1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2014-2021 Netronome Systems, Inc. 3 * All rights reserved. 4 * 5 * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation. 6 */ 7 8 #include <unistd.h> 9 10 #include <eal_firmware.h> 11 #include <rte_alarm.h> 12 13 #include "flower/nfp_flower.h" 14 #include "nfd3/nfp_nfd3.h" 15 #include "nfdk/nfp_nfdk.h" 16 #include "nfpcore/nfp_cpp.h" 17 #include "nfpcore/nfp_hwinfo.h" 18 #include "nfpcore/nfp_rtsym.h" 19 #include "nfpcore/nfp_nsp.h" 20 #include "nfpcore/nfp6000_pcie.h" 21 #include "nfpcore/nfp_resource.h" 22 23 #include "nfp_cpp_bridge.h" 24 #include "nfp_ipsec.h" 25 #include "nfp_logs.h" 26 #include "nfp_net_flow.h" 27 28 /* 64-bit per app capabilities */ 29 #define NFP_NET_APP_CAP_SP_INDIFF RTE_BIT64(0) /* Indifferent to port speed */ 30 31 #define NFP_PF_DRIVER_NAME net_nfp_pf 32 33 static void 34 nfp_net_pf_read_mac(struct nfp_app_fw_nic *app_fw_nic, 35 uint16_t port) 36 { 37 struct nfp_net_hw *hw; 38 struct nfp_eth_table *nfp_eth_table; 39 40 /* Grab a pointer to the correct physical port */ 41 hw = app_fw_nic->ports[port]; 42 43 nfp_eth_table = app_fw_nic->pf_dev->nfp_eth_table; 44 45 rte_ether_addr_copy(&nfp_eth_table->ports[port].mac_addr, &hw->super.mac_addr); 46 } 47 48 static uint32_t 49 nfp_net_speed_bitmap2speed(uint32_t speeds_bitmap) 50 { 51 switch (speeds_bitmap) { 52 case RTE_ETH_LINK_SPEED_10M_HD: 53 return RTE_ETH_SPEED_NUM_10M; 54 case RTE_ETH_LINK_SPEED_10M: 55 return RTE_ETH_SPEED_NUM_10M; 56 case RTE_ETH_LINK_SPEED_100M_HD: 57 return RTE_ETH_SPEED_NUM_100M; 58 case RTE_ETH_LINK_SPEED_100M: 59 return RTE_ETH_SPEED_NUM_100M; 60 case RTE_ETH_LINK_SPEED_1G: 61 return RTE_ETH_SPEED_NUM_1G; 62 case RTE_ETH_LINK_SPEED_2_5G: 63 return RTE_ETH_SPEED_NUM_2_5G; 64 case RTE_ETH_LINK_SPEED_5G: 65 return RTE_ETH_SPEED_NUM_5G; 66 case RTE_ETH_LINK_SPEED_10G: 67 return RTE_ETH_SPEED_NUM_10G; 68 case RTE_ETH_LINK_SPEED_20G: 69 return RTE_ETH_SPEED_NUM_20G; 70 case RTE_ETH_LINK_SPEED_25G: 71 return RTE_ETH_SPEED_NUM_25G; 72 case RTE_ETH_LINK_SPEED_40G: 73 return RTE_ETH_SPEED_NUM_40G; 74 case RTE_ETH_LINK_SPEED_50G: 75 return RTE_ETH_SPEED_NUM_50G; 76 case RTE_ETH_LINK_SPEED_56G: 77 return RTE_ETH_SPEED_NUM_56G; 78 case RTE_ETH_LINK_SPEED_100G: 79 return RTE_ETH_SPEED_NUM_100G; 80 case RTE_ETH_LINK_SPEED_200G: 81 return RTE_ETH_SPEED_NUM_200G; 82 case RTE_ETH_LINK_SPEED_400G: 83 return RTE_ETH_SPEED_NUM_400G; 84 default: 85 return RTE_ETH_SPEED_NUM_NONE; 86 } 87 } 88 89 static int 90 nfp_net_nfp4000_speed_configure_check(uint16_t port_id, 91 uint32_t configure_speed, 92 struct nfp_eth_table *nfp_eth_table) 93 { 94 switch (port_id) { 95 case 0: 96 if (configure_speed == RTE_ETH_SPEED_NUM_25G && 97 nfp_eth_table->ports[1].speed == RTE_ETH_SPEED_NUM_10G) { 98 PMD_DRV_LOG(ERR, "The speed configuration is not supported for NFP4000."); 99 return -ENOTSUP; 100 } 101 break; 102 case 1: 103 if (configure_speed == RTE_ETH_SPEED_NUM_10G && 104 nfp_eth_table->ports[0].speed == RTE_ETH_SPEED_NUM_25G) { 105 PMD_DRV_LOG(ERR, "The speed configuration is not supported for NFP4000."); 106 return -ENOTSUP; 107 } 108 break; 109 default: 110 PMD_DRV_LOG(ERR, "The port id is invalid."); 111 return -EINVAL; 112 } 113 114 return 0; 115 } 116 117 static int 118 nfp_net_speed_configure(struct rte_eth_dev *dev, 119 struct nfp_net_hw *net_hw) 120 { 121 int ret; 122 uint32_t speed_capa; 123 struct nfp_nsp *nsp; 124 uint32_t link_speeds; 125 uint32_t configure_speed; 126 struct nfp_eth_table_port *eth_port; 127 struct nfp_eth_table *nfp_eth_table; 128 129 nfp_eth_table = net_hw->pf_dev->nfp_eth_table; 130 eth_port = &nfp_eth_table->ports[net_hw->idx]; 131 132 speed_capa = net_hw->pf_dev->speed_capa; 133 if (speed_capa == 0) { 134 PMD_DRV_LOG(ERR, "Speed_capa is invalid."); 135 return -EINVAL; 136 } 137 138 link_speeds = dev->data->dev_conf.link_speeds; 139 configure_speed = nfp_net_speed_bitmap2speed(speed_capa & link_speeds); 140 if (configure_speed == RTE_ETH_SPEED_NUM_NONE && 141 link_speeds != RTE_ETH_LINK_SPEED_AUTONEG) { 142 PMD_DRV_LOG(ERR, "Configured speed is invalid."); 143 return -EINVAL; 144 } 145 146 /* NFP4000 does not allow the port 0 25Gbps and port 1 10Gbps at the same time. */ 147 if (net_hw->device_id == PCI_DEVICE_ID_NFP4000_PF_NIC) { 148 ret = nfp_net_nfp4000_speed_configure_check(net_hw->idx, 149 configure_speed, nfp_eth_table); 150 if (ret != 0) { 151 PMD_DRV_LOG(ERR, "Failed to configure speed for NFP4000."); 152 return ret; 153 } 154 } 155 156 nsp = nfp_eth_config_start(net_hw->cpp, eth_port->index); 157 if (nsp == NULL) { 158 PMD_DRV_LOG(ERR, "Couldn't get NSP."); 159 return -EIO; 160 } 161 162 if (link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) { 163 if (eth_port->supp_aneg) { 164 ret = nfp_eth_set_aneg(nsp, NFP_ANEG_AUTO); 165 if (ret != 0) { 166 PMD_DRV_LOG(ERR, "Failed to set ANEG enable."); 167 goto config_cleanup; 168 } 169 } 170 } else { 171 ret = nfp_eth_set_aneg(nsp, NFP_ANEG_DISABLED); 172 if (ret != 0) { 173 PMD_DRV_LOG(ERR, "Failed to set ANEG disable."); 174 goto config_cleanup; 175 } 176 177 ret = nfp_eth_set_speed(nsp, configure_speed); 178 if (ret != 0) { 179 PMD_DRV_LOG(ERR, "Failed to set speed."); 180 goto config_cleanup; 181 } 182 } 183 184 return nfp_eth_config_commit_end(nsp); 185 186 config_cleanup: 187 nfp_eth_config_cleanup_end(nsp); 188 189 return ret; 190 } 191 192 static int 193 nfp_net_start(struct rte_eth_dev *dev) 194 { 195 int ret; 196 uint16_t i; 197 struct nfp_hw *hw; 198 uint32_t new_ctrl; 199 struct nfp_cpp *cpp; 200 uint32_t update = 0; 201 uint32_t cap_extend; 202 uint32_t intr_vector; 203 uint32_t ctrl_extend = 0; 204 struct nfp_net_hw *net_hw; 205 struct nfp_pf_dev *pf_dev; 206 struct rte_eth_rxmode *rxmode; 207 struct nfp_app_fw_nic *app_fw_nic; 208 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 209 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 210 211 net_hw = dev->data->dev_private; 212 pf_dev = net_hw->pf_dev; 213 app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv); 214 hw = &net_hw->super; 215 216 /* Disabling queues just in case... */ 217 nfp_net_disable_queues(dev); 218 219 /* Enabling the required queues in the device */ 220 nfp_net_enable_queues(dev); 221 222 /* Configure the port speed and the auto-negotiation mode. */ 223 ret = nfp_net_speed_configure(dev, net_hw); 224 if (ret < 0) { 225 PMD_DRV_LOG(ERR, "Failed to set the speed and auto-negotiation mode."); 226 return ret; 227 } 228 229 /* Check and configure queue intr-vector mapping */ 230 if (dev->data->dev_conf.intr_conf.rxq != 0) { 231 if (app_fw_nic->multiport) { 232 PMD_INIT_LOG(ERR, "PMD rx interrupt is not supported " 233 "with NFP multiport PF"); 234 return -EINVAL; 235 } 236 237 if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_UIO) { 238 /* 239 * Better not to share LSC with RX interrupts. 240 * Unregistering LSC interrupt handler. 241 */ 242 rte_intr_callback_unregister(intr_handle, 243 nfp_net_dev_interrupt_handler, (void *)dev); 244 245 if (dev->data->nb_rx_queues > 1) { 246 PMD_INIT_LOG(ERR, "PMD rx interrupt only " 247 "supports 1 queue with UIO"); 248 return -EIO; 249 } 250 } 251 252 intr_vector = dev->data->nb_rx_queues; 253 if (rte_intr_efd_enable(intr_handle, intr_vector) != 0) 254 return -1; 255 256 nfp_configure_rx_interrupt(dev, intr_handle); 257 update = NFP_NET_CFG_UPDATE_MSIX; 258 } 259 260 /* Checking MTU set */ 261 if (dev->data->mtu > net_hw->flbufsz) { 262 PMD_INIT_LOG(ERR, "MTU (%u) can't be larger than the current NFP_FRAME_SIZE (%u)", 263 dev->data->mtu, net_hw->flbufsz); 264 return -ERANGE; 265 } 266 267 rte_intr_enable(intr_handle); 268 269 new_ctrl = nfp_check_offloads(dev); 270 271 /* Writing configuration parameters in the device */ 272 nfp_net_params_setup(net_hw); 273 274 rxmode = &dev->data->dev_conf.rxmode; 275 if ((rxmode->mq_mode & RTE_ETH_MQ_RX_RSS) != 0) { 276 nfp_net_rss_config_default(dev); 277 update |= NFP_NET_CFG_UPDATE_RSS; 278 new_ctrl |= nfp_net_cfg_ctrl_rss(hw->cap); 279 } 280 281 /* Enable device */ 282 new_ctrl |= NFP_NET_CFG_CTRL_ENABLE; 283 284 update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING; 285 286 /* Enable vxlan */ 287 if ((hw->cap & NFP_NET_CFG_CTRL_VXLAN) != 0) { 288 new_ctrl |= NFP_NET_CFG_CTRL_VXLAN; 289 update |= NFP_NET_CFG_UPDATE_VXLAN; 290 } 291 292 if ((hw->cap & NFP_NET_CFG_CTRL_RINGCFG) != 0) 293 new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG; 294 295 if (nfp_reconfig(hw, new_ctrl, update) != 0) 296 return -EIO; 297 298 hw->ctrl = new_ctrl; 299 300 /* Enable packet type offload by extend ctrl word1. */ 301 cap_extend = hw->cap_ext; 302 if ((cap_extend & NFP_NET_CFG_CTRL_PKT_TYPE) != 0) 303 ctrl_extend = NFP_NET_CFG_CTRL_PKT_TYPE; 304 305 if ((cap_extend & NFP_NET_CFG_CTRL_IPSEC) != 0) 306 ctrl_extend |= NFP_NET_CFG_CTRL_IPSEC | 307 NFP_NET_CFG_CTRL_IPSEC_SM_LOOKUP | 308 NFP_NET_CFG_CTRL_IPSEC_LM_LOOKUP; 309 310 /* Enable flow steer by extend ctrl word1. */ 311 if ((cap_extend & NFP_NET_CFG_CTRL_FLOW_STEER) != 0) 312 ctrl_extend |= NFP_NET_CFG_CTRL_FLOW_STEER; 313 314 update = NFP_NET_CFG_UPDATE_GEN; 315 if (nfp_ext_reconfig(hw, ctrl_extend, update) != 0) 316 return -EIO; 317 318 hw->ctrl_ext = ctrl_extend; 319 320 /* 321 * Allocating rte mbufs for configured rx queues. 322 * This requires queues being enabled before. 323 */ 324 if (nfp_net_rx_freelist_setup(dev) != 0) { 325 ret = -ENOMEM; 326 goto error; 327 } 328 329 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 330 cpp = net_hw->cpp; 331 else 332 cpp = ((struct nfp_pf_dev *)(dev->process_private))->cpp; 333 334 /* Configure the physical port up */ 335 nfp_eth_set_configured(cpp, net_hw->nfp_idx, 1); 336 337 for (i = 0; i < dev->data->nb_rx_queues; i++) 338 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; 339 for (i = 0; i < dev->data->nb_tx_queues; i++) 340 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; 341 342 return 0; 343 344 error: 345 /* 346 * An error returned by this function should mean the app 347 * exiting and then the system releasing all the memory 348 * allocated even memory coming from hugepages. 349 * 350 * The device could be enabled at this point with some queues 351 * ready for getting packets. This is true if the call to 352 * nfp_net_rx_freelist_setup() succeeds for some queues but 353 * fails for subsequent queues. 354 * 355 * This should make the app exiting but better if we tell the 356 * device first. 357 */ 358 nfp_net_disable_queues(dev); 359 360 return ret; 361 } 362 363 /* Set the link up. */ 364 static int 365 nfp_net_set_link_up(struct rte_eth_dev *dev) 366 { 367 struct nfp_cpp *cpp; 368 struct nfp_net_hw *hw; 369 370 hw = dev->data->dev_private; 371 372 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 373 cpp = hw->cpp; 374 else 375 cpp = ((struct nfp_pf_dev *)(dev->process_private))->cpp; 376 377 return nfp_eth_set_configured(cpp, hw->nfp_idx, 1); 378 } 379 380 /* Set the link down. */ 381 static int 382 nfp_net_set_link_down(struct rte_eth_dev *dev) 383 { 384 struct nfp_cpp *cpp; 385 struct nfp_net_hw *hw; 386 387 hw = dev->data->dev_private; 388 389 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 390 cpp = hw->cpp; 391 else 392 cpp = ((struct nfp_pf_dev *)(dev->process_private))->cpp; 393 394 return nfp_eth_set_configured(cpp, hw->nfp_idx, 0); 395 } 396 397 static uint8_t 398 nfp_function_id_get(const struct nfp_pf_dev *pf_dev, 399 uint8_t phy_port) 400 { 401 if (pf_dev->multi_pf.enabled) 402 return pf_dev->multi_pf.function_id; 403 404 return phy_port; 405 } 406 407 static void 408 nfp_net_beat_timer(void *arg) 409 { 410 uint64_t cur_sec; 411 struct nfp_multi_pf *multi_pf = arg; 412 413 cur_sec = rte_rdtsc(); 414 nn_writeq(cur_sec, multi_pf->beat_addr + NFP_BEAT_OFFSET(multi_pf->function_id)); 415 416 /* Beat once per second. */ 417 if (rte_eal_alarm_set(1000 * 1000, nfp_net_beat_timer, 418 (void *)multi_pf) < 0) { 419 PMD_DRV_LOG(ERR, "Error setting alarm"); 420 } 421 } 422 423 static int 424 nfp_net_keepalive_init(struct nfp_cpp *cpp, 425 struct nfp_multi_pf *multi_pf) 426 { 427 uint8_t *base; 428 uint64_t addr; 429 uint32_t size; 430 uint32_t cpp_id; 431 struct nfp_resource *res; 432 433 res = nfp_resource_acquire(cpp, NFP_RESOURCE_KEEPALIVE); 434 if (res == NULL) 435 return -EIO; 436 437 cpp_id = nfp_resource_cpp_id(res); 438 addr = nfp_resource_address(res); 439 size = nfp_resource_size(res); 440 441 nfp_resource_release(res); 442 443 /* Allocate a fixed area for keepalive. */ 444 base = nfp_cpp_map_area(cpp, cpp_id, addr, size, &multi_pf->beat_area); 445 if (base == NULL) { 446 PMD_DRV_LOG(ERR, "Failed to map area for keepalive."); 447 return -EIO; 448 } 449 450 multi_pf->beat_addr = base; 451 452 return 0; 453 } 454 455 static void 456 nfp_net_keepalive_uninit(struct nfp_multi_pf *multi_pf) 457 { 458 nfp_cpp_area_release_free(multi_pf->beat_area); 459 } 460 461 static int 462 nfp_net_keepalive_start(struct nfp_multi_pf *multi_pf) 463 { 464 if (rte_eal_alarm_set(1000 * 1000, nfp_net_beat_timer, 465 (void *)multi_pf) < 0) { 466 PMD_DRV_LOG(ERR, "Error setting alarm"); 467 return -EIO; 468 } 469 470 return 0; 471 } 472 473 static void 474 nfp_net_keepalive_clear(uint8_t *beat_addr, 475 uint8_t function_id) 476 { 477 nn_writeq(0, beat_addr + NFP_BEAT_OFFSET(function_id)); 478 } 479 480 static void 481 nfp_net_keepalive_clear_others(const struct nfp_dev_info *dev_info, 482 struct nfp_multi_pf *multi_pf) 483 { 484 uint8_t port_num; 485 486 for (port_num = 0; port_num < dev_info->pf_num_per_unit; port_num++) { 487 if (port_num == multi_pf->function_id) 488 continue; 489 490 nfp_net_keepalive_clear(multi_pf->beat_addr, port_num); 491 } 492 } 493 494 static void 495 nfp_net_keepalive_stop(struct nfp_multi_pf *multi_pf) 496 { 497 /* Cancel keepalive for multiple PF setup */ 498 rte_eal_alarm_cancel(nfp_net_beat_timer, (void *)multi_pf); 499 } 500 501 static void 502 nfp_net_uninit(struct rte_eth_dev *eth_dev) 503 { 504 struct nfp_net_hw *net_hw; 505 506 net_hw = eth_dev->data->dev_private; 507 508 if ((net_hw->super.cap_ext & NFP_NET_CFG_CTRL_FLOW_STEER) != 0) 509 nfp_net_flow_priv_uninit(net_hw->pf_dev, net_hw->idx); 510 511 rte_free(net_hw->eth_xstats_base); 512 nfp_ipsec_uninit(eth_dev); 513 if (net_hw->mac_stats_area != NULL) 514 nfp_cpp_area_release_free(net_hw->mac_stats_area); 515 } 516 517 static void 518 nfp_cleanup_port_app_fw_nic(struct nfp_pf_dev *pf_dev, 519 uint8_t id) 520 { 521 struct rte_eth_dev *eth_dev; 522 struct nfp_app_fw_nic *app_fw_nic; 523 524 app_fw_nic = pf_dev->app_fw_priv; 525 if (app_fw_nic->ports[id] != NULL) { 526 eth_dev = app_fw_nic->ports[id]->eth_dev; 527 if (eth_dev != NULL) 528 nfp_net_uninit(eth_dev); 529 530 app_fw_nic->ports[id] = NULL; 531 } 532 } 533 534 static void 535 nfp_uninit_app_fw_nic(struct nfp_pf_dev *pf_dev) 536 { 537 nfp_cpp_area_release_free(pf_dev->ctrl_area); 538 rte_free(pf_dev->app_fw_priv); 539 } 540 541 void 542 nfp_pf_uninit(struct nfp_pf_dev *pf_dev) 543 { 544 nfp_cpp_area_release_free(pf_dev->qc_area); 545 free(pf_dev->sym_tbl); 546 if (pf_dev->multi_pf.enabled) { 547 nfp_net_keepalive_stop(&pf_dev->multi_pf); 548 nfp_net_keepalive_clear(pf_dev->multi_pf.beat_addr, pf_dev->multi_pf.function_id); 549 nfp_net_keepalive_uninit(&pf_dev->multi_pf); 550 } 551 free(pf_dev->nfp_eth_table); 552 free(pf_dev->hwinfo); 553 nfp_cpp_free(pf_dev->cpp); 554 rte_free(pf_dev); 555 } 556 557 static int 558 nfp_pf_secondary_uninit(struct nfp_pf_dev *pf_dev) 559 { 560 free(pf_dev->sym_tbl); 561 nfp_cpp_free(pf_dev->cpp); 562 rte_free(pf_dev); 563 564 return 0; 565 } 566 567 /* Reset and stop device. The device can not be restarted. */ 568 static int 569 nfp_net_close(struct rte_eth_dev *dev) 570 { 571 uint8_t i; 572 uint8_t id; 573 struct nfp_net_hw *hw; 574 struct nfp_pf_dev *pf_dev; 575 struct rte_pci_device *pci_dev; 576 struct nfp_app_fw_nic *app_fw_nic; 577 578 /* 579 * In secondary process, a released eth device can be found by its name 580 * in shared memory. 581 * If the state of the eth device is RTE_ETH_DEV_UNUSED, it means the 582 * eth device has been released. 583 */ 584 if (rte_eal_process_type() == RTE_PROC_SECONDARY) { 585 if (dev->state == RTE_ETH_DEV_UNUSED) 586 return 0; 587 588 nfp_pf_secondary_uninit(dev->process_private); 589 return 0; 590 } 591 592 hw = dev->data->dev_private; 593 pf_dev = hw->pf_dev; 594 pci_dev = RTE_ETH_DEV_TO_PCI(dev); 595 app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv); 596 597 /* 598 * We assume that the DPDK application is stopping all the 599 * threads/queues before calling the device close function. 600 */ 601 nfp_net_disable_queues(dev); 602 603 /* Clear queues */ 604 nfp_net_close_tx_queue(dev); 605 nfp_net_close_rx_queue(dev); 606 607 /* Cancel possible impending LSC work here before releasing the port */ 608 rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler, (void *)dev); 609 610 /* Only free PF resources after all physical ports have been closed */ 611 /* Mark this port as unused and free device priv resources */ 612 nn_cfg_writeb(&hw->super, NFP_NET_CFG_LSC, 0xff); 613 614 if (pf_dev->app_fw_id != NFP_APP_FW_CORE_NIC) 615 return -EINVAL; 616 617 nfp_cleanup_port_app_fw_nic(pf_dev, hw->idx); 618 619 for (i = 0; i < app_fw_nic->total_phyports; i++) { 620 id = nfp_function_id_get(pf_dev, i); 621 622 /* Check to see if ports are still in use */ 623 if (app_fw_nic->ports[id] != NULL) 624 return 0; 625 } 626 627 /* Enable in nfp_net_start() */ 628 rte_intr_disable(pci_dev->intr_handle); 629 630 /* Register in nfp_net_init() */ 631 rte_intr_callback_unregister(pci_dev->intr_handle, 632 nfp_net_dev_interrupt_handler, (void *)dev); 633 634 nfp_uninit_app_fw_nic(pf_dev); 635 nfp_pf_uninit(pf_dev); 636 637 return 0; 638 } 639 640 static int 641 nfp_net_find_vxlan_idx(struct nfp_net_hw *hw, 642 uint16_t port, 643 uint32_t *idx) 644 { 645 uint32_t i; 646 int free_idx = -1; 647 648 for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i++) { 649 if (hw->vxlan_ports[i] == port) { 650 free_idx = i; 651 break; 652 } 653 654 if (hw->vxlan_usecnt[i] == 0) { 655 free_idx = i; 656 break; 657 } 658 } 659 660 if (free_idx == -1) 661 return -EINVAL; 662 663 *idx = free_idx; 664 665 return 0; 666 } 667 668 static int 669 nfp_udp_tunnel_port_add(struct rte_eth_dev *dev, 670 struct rte_eth_udp_tunnel *tunnel_udp) 671 { 672 int ret; 673 uint32_t idx; 674 uint16_t vxlan_port; 675 struct nfp_net_hw *hw; 676 enum rte_eth_tunnel_type tnl_type; 677 678 hw = dev->data->dev_private; 679 vxlan_port = tunnel_udp->udp_port; 680 tnl_type = tunnel_udp->prot_type; 681 682 if (tnl_type != RTE_ETH_TUNNEL_TYPE_VXLAN) { 683 PMD_DRV_LOG(ERR, "Not VXLAN tunnel"); 684 return -ENOTSUP; 685 } 686 687 ret = nfp_net_find_vxlan_idx(hw, vxlan_port, &idx); 688 if (ret != 0) { 689 PMD_DRV_LOG(ERR, "Failed find valid vxlan idx"); 690 return -EINVAL; 691 } 692 693 if (hw->vxlan_usecnt[idx] == 0) { 694 ret = nfp_net_set_vxlan_port(hw, idx, vxlan_port); 695 if (ret != 0) { 696 PMD_DRV_LOG(ERR, "Failed set vxlan port"); 697 return -EINVAL; 698 } 699 } 700 701 hw->vxlan_usecnt[idx]++; 702 703 return 0; 704 } 705 706 static int 707 nfp_udp_tunnel_port_del(struct rte_eth_dev *dev, 708 struct rte_eth_udp_tunnel *tunnel_udp) 709 { 710 int ret; 711 uint32_t idx; 712 uint16_t vxlan_port; 713 struct nfp_net_hw *hw; 714 enum rte_eth_tunnel_type tnl_type; 715 716 hw = dev->data->dev_private; 717 vxlan_port = tunnel_udp->udp_port; 718 tnl_type = tunnel_udp->prot_type; 719 720 if (tnl_type != RTE_ETH_TUNNEL_TYPE_VXLAN) { 721 PMD_DRV_LOG(ERR, "Not VXLAN tunnel"); 722 return -ENOTSUP; 723 } 724 725 ret = nfp_net_find_vxlan_idx(hw, vxlan_port, &idx); 726 if (ret != 0 || hw->vxlan_usecnt[idx] == 0) { 727 PMD_DRV_LOG(ERR, "Failed find valid vxlan idx"); 728 return -EINVAL; 729 } 730 731 hw->vxlan_usecnt[idx]--; 732 733 if (hw->vxlan_usecnt[idx] == 0) { 734 ret = nfp_net_set_vxlan_port(hw, idx, 0); 735 if (ret != 0) { 736 PMD_DRV_LOG(ERR, "Failed set vxlan port"); 737 return -EINVAL; 738 } 739 } 740 741 return 0; 742 } 743 744 /* Initialise and register driver with DPDK Application */ 745 static const struct eth_dev_ops nfp_net_eth_dev_ops = { 746 .dev_configure = nfp_net_configure, 747 .dev_start = nfp_net_start, 748 .dev_stop = nfp_net_stop, 749 .dev_set_link_up = nfp_net_set_link_up, 750 .dev_set_link_down = nfp_net_set_link_down, 751 .dev_close = nfp_net_close, 752 .promiscuous_enable = nfp_net_promisc_enable, 753 .promiscuous_disable = nfp_net_promisc_disable, 754 .allmulticast_enable = nfp_net_allmulticast_enable, 755 .allmulticast_disable = nfp_net_allmulticast_disable, 756 .link_update = nfp_net_link_update, 757 .stats_get = nfp_net_stats_get, 758 .stats_reset = nfp_net_stats_reset, 759 .xstats_get = nfp_net_xstats_get, 760 .xstats_reset = nfp_net_xstats_reset, 761 .xstats_get_names = nfp_net_xstats_get_names, 762 .xstats_get_by_id = nfp_net_xstats_get_by_id, 763 .xstats_get_names_by_id = nfp_net_xstats_get_names_by_id, 764 .dev_infos_get = nfp_net_infos_get, 765 .dev_supported_ptypes_get = nfp_net_supported_ptypes_get, 766 .mtu_set = nfp_net_dev_mtu_set, 767 .mac_addr_set = nfp_net_set_mac_addr, 768 .vlan_offload_set = nfp_net_vlan_offload_set, 769 .reta_update = nfp_net_reta_update, 770 .reta_query = nfp_net_reta_query, 771 .rss_hash_update = nfp_net_rss_hash_update, 772 .rss_hash_conf_get = nfp_net_rss_hash_conf_get, 773 .rx_queue_setup = nfp_net_rx_queue_setup, 774 .rx_queue_release = nfp_net_rx_queue_release, 775 .tx_queue_setup = nfp_net_tx_queue_setup, 776 .tx_queue_release = nfp_net_tx_queue_release, 777 .rx_queue_intr_enable = nfp_rx_queue_intr_enable, 778 .rx_queue_intr_disable = nfp_rx_queue_intr_disable, 779 .udp_tunnel_port_add = nfp_udp_tunnel_port_add, 780 .udp_tunnel_port_del = nfp_udp_tunnel_port_del, 781 .fw_version_get = nfp_net_firmware_version_get, 782 .flow_ctrl_get = nfp_net_flow_ctrl_get, 783 .flow_ctrl_set = nfp_net_flow_ctrl_set, 784 .flow_ops_get = nfp_net_flow_ops_get, 785 .fec_get_capability = nfp_net_fec_get_capability, 786 .fec_get = nfp_net_fec_get, 787 .fec_set = nfp_net_fec_set, 788 }; 789 790 static inline void 791 nfp_net_ethdev_ops_mount(struct nfp_net_hw *hw, 792 struct rte_eth_dev *eth_dev) 793 { 794 if (hw->ver.extend == NFP_NET_CFG_VERSION_DP_NFD3) 795 eth_dev->tx_pkt_burst = nfp_net_nfd3_xmit_pkts; 796 else 797 eth_dev->tx_pkt_burst = nfp_net_nfdk_xmit_pkts; 798 799 eth_dev->dev_ops = &nfp_net_eth_dev_ops; 800 eth_dev->rx_queue_count = nfp_net_rx_queue_count; 801 eth_dev->rx_pkt_burst = &nfp_net_recv_pkts; 802 } 803 804 static int 805 nfp_net_init(struct rte_eth_dev *eth_dev) 806 { 807 int err; 808 uint16_t port; 809 uint64_t rx_base; 810 uint64_t tx_base; 811 struct nfp_hw *hw; 812 struct nfp_net_hw *net_hw; 813 struct nfp_pf_dev *pf_dev; 814 struct rte_pci_device *pci_dev; 815 struct nfp_app_fw_nic *app_fw_nic; 816 817 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 818 net_hw = eth_dev->data->dev_private; 819 820 /* Use backpointer here to the PF of this eth_dev */ 821 pf_dev = net_hw->pf_dev; 822 823 /* Use backpointer to the CoreNIC app struct */ 824 app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv); 825 826 port = ((struct nfp_net_hw *)eth_dev->data->dev_private)->idx; 827 if (port > 7) { 828 PMD_DRV_LOG(ERR, "Port value is wrong"); 829 return -ENODEV; 830 } 831 832 hw = &net_hw->super; 833 834 PMD_INIT_LOG(DEBUG, "Working with physical port number: %hu, " 835 "NFP internal port number: %d", port, net_hw->nfp_idx); 836 837 rte_eth_copy_pci_info(eth_dev, pci_dev); 838 839 if (port == 0 || pf_dev->multi_pf.enabled) { 840 uint32_t min_size; 841 842 hw->ctrl_bar = pf_dev->ctrl_bar; 843 min_size = NFP_MAC_STATS_SIZE * net_hw->pf_dev->nfp_eth_table->max_index; 844 net_hw->mac_stats_bar = nfp_rtsym_map(net_hw->pf_dev->sym_tbl, "_mac_stats", 845 min_size, &net_hw->mac_stats_area); 846 if (net_hw->mac_stats_bar == NULL) { 847 PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for _mac_stats_bar"); 848 return -EIO; 849 } 850 851 net_hw->mac_stats = net_hw->mac_stats_bar; 852 } else { 853 /* Use port offset in pf ctrl_bar for this ports control bar */ 854 hw->ctrl_bar = pf_dev->ctrl_bar + (port * NFP_NET_CFG_BAR_SZ); 855 net_hw->mac_stats = app_fw_nic->ports[0]->mac_stats_bar + 856 (net_hw->nfp_idx * NFP_MAC_STATS_SIZE); 857 } 858 859 PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar); 860 PMD_INIT_LOG(DEBUG, "MAC stats: %p", net_hw->mac_stats); 861 862 err = nfp_net_common_init(pci_dev, net_hw); 863 if (err != 0) 864 goto free_area; 865 866 err = nfp_net_tlv_caps_parse(eth_dev); 867 if (err != 0) { 868 PMD_INIT_LOG(ERR, "Failed to parser TLV caps"); 869 return err; 870 goto free_area; 871 } 872 873 err = nfp_ipsec_init(eth_dev); 874 if (err != 0) { 875 PMD_INIT_LOG(ERR, "Failed to init IPsec module"); 876 goto free_area; 877 } 878 879 nfp_net_ethdev_ops_mount(net_hw, eth_dev); 880 881 net_hw->eth_xstats_base = rte_malloc("rte_eth_xstat", sizeof(struct rte_eth_xstat) * 882 nfp_net_xstats_size(eth_dev), 0); 883 if (net_hw->eth_xstats_base == NULL) { 884 PMD_INIT_LOG(ERR, "no memory for xstats base values on device %s!", 885 pci_dev->device.name); 886 err = -ENOMEM; 887 goto ipsec_exit; 888 } 889 890 /* Work out where in the BAR the queues start. */ 891 tx_base = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ); 892 rx_base = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ); 893 894 net_hw->tx_bar = pf_dev->qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ; 895 net_hw->rx_bar = pf_dev->qc_bar + rx_base * NFP_QCP_QUEUE_ADDR_SZ; 896 eth_dev->data->dev_private = net_hw; 897 898 PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p", 899 hw->ctrl_bar, net_hw->tx_bar, net_hw->rx_bar); 900 901 nfp_net_cfg_queue_setup(net_hw); 902 net_hw->mtu = RTE_ETHER_MTU; 903 904 /* VLAN insertion is incompatible with LSOv2 */ 905 if ((hw->cap & NFP_NET_CFG_CTRL_LSO2) != 0) 906 hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN; 907 908 nfp_net_log_device_information(net_hw); 909 910 /* Initializing spinlock for reconfigs */ 911 rte_spinlock_init(&hw->reconfig_lock); 912 913 /* Allocating memory for mac addr */ 914 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", RTE_ETHER_ADDR_LEN, 0); 915 if (eth_dev->data->mac_addrs == NULL) { 916 PMD_INIT_LOG(ERR, "Failed to space for MAC address"); 917 err = -ENOMEM; 918 goto xstats_free; 919 } 920 921 nfp_net_pf_read_mac(app_fw_nic, port); 922 nfp_write_mac(hw, &hw->mac_addr.addr_bytes[0]); 923 924 if (rte_is_valid_assigned_ether_addr(&hw->mac_addr) == 0) { 925 PMD_INIT_LOG(INFO, "Using random mac address for port %d", port); 926 /* Using random mac addresses for VFs */ 927 rte_eth_random_addr(&hw->mac_addr.addr_bytes[0]); 928 nfp_write_mac(hw, &hw->mac_addr.addr_bytes[0]); 929 } 930 931 /* Copying mac address to DPDK eth_dev struct */ 932 rte_ether_addr_copy(&hw->mac_addr, eth_dev->data->mac_addrs); 933 934 if ((hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR) == 0) 935 eth_dev->data->dev_flags |= RTE_ETH_DEV_NOLIVE_MAC_ADDR; 936 937 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 938 939 PMD_INIT_LOG(INFO, "port %d VendorID=%#x DeviceID=%#x " 940 "mac=" RTE_ETHER_ADDR_PRT_FMT, 941 eth_dev->data->port_id, pci_dev->id.vendor_id, 942 pci_dev->id.device_id, 943 RTE_ETHER_ADDR_BYTES(&hw->mac_addr)); 944 945 /* Registering LSC interrupt handler */ 946 rte_intr_callback_register(pci_dev->intr_handle, 947 nfp_net_dev_interrupt_handler, (void *)eth_dev); 948 /* Telling the firmware about the LSC interrupt entry */ 949 nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX); 950 /* Unmask the LSC interrupt */ 951 nfp_net_irq_unmask(eth_dev); 952 /* Recording current stats counters values */ 953 nfp_net_stats_reset(eth_dev); 954 955 if ((hw->cap_ext & NFP_NET_CFG_CTRL_FLOW_STEER) != 0) { 956 err = nfp_net_flow_priv_init(pf_dev, port); 957 if (err != 0) { 958 PMD_INIT_LOG(ERR, "Init net flow priv failed"); 959 goto xstats_free; 960 } 961 } 962 963 return 0; 964 965 xstats_free: 966 rte_free(net_hw->eth_xstats_base); 967 ipsec_exit: 968 nfp_ipsec_uninit(eth_dev); 969 free_area: 970 if (net_hw->mac_stats_area != NULL) 971 nfp_cpp_area_release_free(net_hw->mac_stats_area); 972 973 return err; 974 } 975 976 #define DEFAULT_FW_PATH "/lib/firmware/netronome" 977 978 static int 979 nfp_fw_get_name(struct rte_pci_device *dev, 980 struct nfp_nsp *nsp, 981 char *card, 982 char *fw_name, 983 size_t fw_size) 984 { 985 char serial[40]; 986 uint16_t interface; 987 uint32_t cpp_serial_len; 988 const uint8_t *cpp_serial; 989 struct nfp_cpp *cpp = nfp_nsp_cpp(nsp); 990 991 cpp_serial_len = nfp_cpp_serial(cpp, &cpp_serial); 992 if (cpp_serial_len != NFP_SERIAL_LEN) 993 return -ERANGE; 994 995 interface = nfp_cpp_interface(cpp); 996 997 /* Looking for firmware file in order of priority */ 998 999 /* First try to find a firmware image specific for this device */ 1000 snprintf(serial, sizeof(serial), 1001 "serial-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x", 1002 cpp_serial[0], cpp_serial[1], cpp_serial[2], cpp_serial[3], 1003 cpp_serial[4], cpp_serial[5], interface >> 8, interface & 0xff); 1004 snprintf(fw_name, fw_size, "%s/%s.nffw", DEFAULT_FW_PATH, serial); 1005 1006 PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); 1007 if (access(fw_name, F_OK) == 0) 1008 return 0; 1009 1010 /* Then try the PCI name */ 1011 snprintf(fw_name, fw_size, "%s/pci-%s.nffw", DEFAULT_FW_PATH, 1012 dev->name); 1013 1014 PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); 1015 if (access(fw_name, F_OK) == 0) 1016 return 0; 1017 1018 /* Finally try the card type and media */ 1019 snprintf(fw_name, fw_size, "%s/%s", DEFAULT_FW_PATH, card); 1020 PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); 1021 if (access(fw_name, F_OK) == 0) 1022 return 0; 1023 1024 return -ENOENT; 1025 } 1026 1027 static int 1028 nfp_fw_upload(struct nfp_nsp *nsp, 1029 char *fw_name) 1030 { 1031 int err; 1032 void *fw_buf; 1033 size_t fsize; 1034 1035 err = rte_firmware_read(fw_name, &fw_buf, &fsize); 1036 if (err != 0) { 1037 PMD_DRV_LOG(ERR, "firmware %s not found!", fw_name); 1038 return -ENOENT; 1039 } 1040 1041 PMD_DRV_LOG(INFO, "Firmware file found at %s with size: %zu", 1042 fw_name, fsize); 1043 PMD_DRV_LOG(INFO, "Uploading the firmware ..."); 1044 if (nfp_nsp_load_fw(nsp, fw_buf, fsize) < 0) { 1045 free(fw_buf); 1046 PMD_DRV_LOG(ERR, "Firmware load failed."); 1047 return -EIO; 1048 } 1049 1050 PMD_DRV_LOG(INFO, "Done"); 1051 1052 free(fw_buf); 1053 1054 return 0; 1055 } 1056 1057 static void 1058 nfp_fw_unload(struct nfp_cpp *cpp) 1059 { 1060 struct nfp_nsp *nsp; 1061 1062 nsp = nfp_nsp_open(cpp); 1063 if (nsp == NULL) 1064 return; 1065 1066 nfp_nsp_device_soft_reset(nsp); 1067 nfp_nsp_close(nsp); 1068 } 1069 1070 static int 1071 nfp_fw_reload(struct nfp_nsp *nsp, 1072 char *fw_name) 1073 { 1074 int err; 1075 1076 nfp_nsp_device_soft_reset(nsp); 1077 err = nfp_fw_upload(nsp, fw_name); 1078 if (err != 0) 1079 PMD_DRV_LOG(ERR, "NFP firmware load failed"); 1080 1081 return err; 1082 } 1083 1084 static bool 1085 nfp_fw_skip_load(const struct nfp_dev_info *dev_info, 1086 struct nfp_multi_pf *multi_pf) 1087 { 1088 uint8_t i; 1089 uint64_t tmp_beat; 1090 uint32_t port_num; 1091 uint8_t in_use = 0; 1092 uint64_t beat[dev_info->pf_num_per_unit]; 1093 uint32_t offset[dev_info->pf_num_per_unit]; 1094 uint8_t abnormal = dev_info->pf_num_per_unit; 1095 1096 for (port_num = 0; port_num < dev_info->pf_num_per_unit; port_num++) { 1097 offset[port_num] = NFP_BEAT_OFFSET(port_num); 1098 beat[port_num] = nn_readq(multi_pf->beat_addr + offset[port_num]); 1099 if (beat[port_num] == 0) 1100 abnormal--; 1101 } 1102 1103 if (abnormal == 0) 1104 return true; 1105 1106 for (i = 0; i < 3; i++) { 1107 sleep(1); 1108 for (port_num = 0; port_num < dev_info->pf_num_per_unit; port_num++) { 1109 if (port_num == multi_pf->function_id) 1110 continue; 1111 1112 if (beat[port_num] == 0) 1113 continue; 1114 1115 tmp_beat = nn_readq(multi_pf->beat_addr + offset[port_num]); 1116 if (tmp_beat != beat[port_num]) { 1117 in_use++; 1118 abnormal--; 1119 beat[port_num] = 0; 1120 } 1121 } 1122 1123 if (abnormal == 0) 1124 return true; 1125 } 1126 1127 if (in_use != 0) { 1128 PMD_DRV_LOG(WARNING, "Abnormal %u != 0, the nic has port which is exit abnormally.", 1129 abnormal); 1130 return true; 1131 } 1132 1133 return false; 1134 } 1135 1136 static int 1137 nfp_fw_reload_for_multipf(struct nfp_nsp *nsp, 1138 char *fw_name, 1139 struct nfp_cpp *cpp, 1140 const struct nfp_dev_info *dev_info, 1141 struct nfp_multi_pf *multi_pf) 1142 { 1143 int err; 1144 bool skip_load_fw = false; 1145 1146 err = nfp_net_keepalive_init(cpp, multi_pf); 1147 if (err != 0) { 1148 PMD_DRV_LOG(ERR, "NFP init beat failed"); 1149 return err; 1150 } 1151 1152 err = nfp_net_keepalive_start(multi_pf); 1153 if (err != 0) { 1154 nfp_net_keepalive_uninit(multi_pf); 1155 PMD_DRV_LOG(ERR, "NFP write beat failed"); 1156 return err; 1157 } 1158 1159 if (nfp_nsp_fw_loaded(nsp)) 1160 skip_load_fw = nfp_fw_skip_load(dev_info, multi_pf); 1161 1162 if (skip_load_fw) 1163 return 0; 1164 1165 err = nfp_fw_reload(nsp, fw_name); 1166 if (err != 0) { 1167 nfp_net_keepalive_stop(multi_pf); 1168 nfp_net_keepalive_uninit(multi_pf); 1169 return err; 1170 } 1171 1172 nfp_net_keepalive_clear_others(dev_info, multi_pf); 1173 1174 return 0; 1175 } 1176 1177 static int 1178 nfp_fw_setup(struct rte_pci_device *dev, 1179 struct nfp_cpp *cpp, 1180 struct nfp_eth_table *nfp_eth_table, 1181 struct nfp_hwinfo *hwinfo, 1182 const struct nfp_dev_info *dev_info, 1183 struct nfp_multi_pf *multi_pf) 1184 { 1185 int err; 1186 char fw_name[125]; 1187 char card_desc[100]; 1188 struct nfp_nsp *nsp; 1189 const char *nfp_fw_model; 1190 1191 nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "nffw.partno"); 1192 if (nfp_fw_model == NULL) 1193 nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "assembly.partno"); 1194 1195 if (nfp_fw_model != NULL) { 1196 PMD_DRV_LOG(INFO, "firmware model found: %s", nfp_fw_model); 1197 } else { 1198 PMD_DRV_LOG(ERR, "firmware model NOT found"); 1199 return -EIO; 1200 } 1201 1202 if (nfp_eth_table->count == 0 || nfp_eth_table->count > 8) { 1203 PMD_DRV_LOG(ERR, "NFP ethernet table reports wrong ports: %u", 1204 nfp_eth_table->count); 1205 return -EIO; 1206 } 1207 1208 PMD_DRV_LOG(INFO, "NFP ethernet port table reports %u ports", 1209 nfp_eth_table->count); 1210 1211 PMD_DRV_LOG(INFO, "Port speed: %u", nfp_eth_table->ports[0].speed); 1212 1213 snprintf(card_desc, sizeof(card_desc), "nic_%s_%dx%d.nffw", 1214 nfp_fw_model, nfp_eth_table->count, 1215 nfp_eth_table->ports[0].speed / 1000); 1216 1217 nsp = nfp_nsp_open(cpp); 1218 if (nsp == NULL) { 1219 PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle"); 1220 return -EIO; 1221 } 1222 1223 err = nfp_fw_get_name(dev, nsp, card_desc, fw_name, sizeof(fw_name)); 1224 if (err != 0) { 1225 PMD_DRV_LOG(ERR, "Can't find suitable firmware."); 1226 nfp_nsp_close(nsp); 1227 return err; 1228 } 1229 1230 if (multi_pf->enabled) 1231 err = nfp_fw_reload_for_multipf(nsp, fw_name, cpp, dev_info, multi_pf); 1232 else 1233 err = nfp_fw_reload(nsp, fw_name); 1234 1235 nfp_nsp_close(nsp); 1236 return err; 1237 } 1238 1239 static inline bool 1240 nfp_check_multi_pf_from_fw(uint32_t total_vnics) 1241 { 1242 if (total_vnics == 1) 1243 return true; 1244 1245 return false; 1246 } 1247 1248 static inline bool 1249 nfp_check_multi_pf_from_nsp(struct rte_pci_device *pci_dev, 1250 struct nfp_cpp *cpp) 1251 { 1252 bool flag; 1253 struct nfp_nsp *nsp; 1254 1255 nsp = nfp_nsp_open(cpp); 1256 if (nsp == NULL) { 1257 PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle"); 1258 return false; 1259 } 1260 1261 flag = (nfp_nsp_get_abi_ver_major(nsp) > 0) && 1262 (pci_dev->id.device_id == PCI_DEVICE_ID_NFP3800_PF_NIC); 1263 1264 nfp_nsp_close(nsp); 1265 return flag; 1266 } 1267 1268 static int 1269 nfp_enable_multi_pf(struct nfp_pf_dev *pf_dev) 1270 { 1271 int err = 0; 1272 uint64_t tx_base; 1273 uint8_t *ctrl_bar; 1274 struct nfp_hw *hw; 1275 uint32_t cap_extend; 1276 struct nfp_net_hw net_hw; 1277 struct nfp_cpp_area *area; 1278 char name[RTE_ETH_NAME_MAX_LEN]; 1279 1280 memset(&net_hw, 0, sizeof(struct nfp_net_hw)); 1281 1282 /* Map the symbol table */ 1283 snprintf(name, sizeof(name), "_pf%u_net_bar0", 1284 pf_dev->multi_pf.function_id); 1285 ctrl_bar = nfp_rtsym_map(pf_dev->sym_tbl, name, NFP_NET_CFG_BAR_SZ, 1286 &area); 1287 if (ctrl_bar == NULL) { 1288 PMD_INIT_LOG(ERR, "Failed to find data vNIC memory symbol"); 1289 return -ENODEV; 1290 } 1291 1292 hw = &net_hw.super; 1293 hw->ctrl_bar = ctrl_bar; 1294 1295 cap_extend = nn_cfg_readl(hw, NFP_NET_CFG_CAP_WORD1); 1296 if ((cap_extend & NFP_NET_CFG_CTRL_MULTI_PF) == 0) { 1297 PMD_INIT_LOG(ERR, "Loaded firmware doesn't support multiple PF"); 1298 err = -EINVAL; 1299 goto end; 1300 } 1301 1302 tx_base = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ); 1303 net_hw.tx_bar = pf_dev->qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ; 1304 nfp_net_cfg_queue_setup(&net_hw); 1305 rte_spinlock_init(&hw->reconfig_lock); 1306 nfp_ext_reconfig(&net_hw.super, NFP_NET_CFG_CTRL_MULTI_PF, NFP_NET_CFG_UPDATE_GEN); 1307 end: 1308 nfp_cpp_area_release_free(area); 1309 return err; 1310 } 1311 1312 static int 1313 nfp_init_app_fw_nic(struct nfp_pf_dev *pf_dev, 1314 const struct nfp_dev_info *dev_info) 1315 { 1316 uint8_t i; 1317 uint8_t id; 1318 int ret = 0; 1319 uint32_t total_vnics; 1320 struct nfp_net_hw *hw; 1321 unsigned int numa_node; 1322 struct rte_eth_dev *eth_dev; 1323 struct nfp_app_fw_nic *app_fw_nic; 1324 struct nfp_eth_table *nfp_eth_table; 1325 char bar_name[RTE_ETH_NAME_MAX_LEN]; 1326 char port_name[RTE_ETH_NAME_MAX_LEN]; 1327 char vnic_name[RTE_ETH_NAME_MAX_LEN]; 1328 1329 nfp_eth_table = pf_dev->nfp_eth_table; 1330 PMD_INIT_LOG(INFO, "Total physical ports: %d", nfp_eth_table->count); 1331 id = nfp_function_id_get(pf_dev, 0); 1332 1333 /* Allocate memory for the CoreNIC app */ 1334 app_fw_nic = rte_zmalloc("nfp_app_fw_nic", sizeof(*app_fw_nic), 0); 1335 if (app_fw_nic == NULL) 1336 return -ENOMEM; 1337 1338 /* Point the app_fw_priv pointer in the PF to the coreNIC app */ 1339 pf_dev->app_fw_priv = app_fw_nic; 1340 1341 /* Read the number of vNIC's created for the PF */ 1342 snprintf(vnic_name, sizeof(vnic_name), "nfd_cfg_pf%u_num_ports", id); 1343 total_vnics = nfp_rtsym_read_le(pf_dev->sym_tbl, vnic_name, &ret); 1344 if (ret != 0 || total_vnics == 0 || total_vnics > 8) { 1345 PMD_INIT_LOG(ERR, "%s symbol with wrong value", vnic_name); 1346 ret = -ENODEV; 1347 goto app_cleanup; 1348 } 1349 1350 if (pf_dev->multi_pf.enabled) { 1351 if (!nfp_check_multi_pf_from_fw(total_vnics)) { 1352 PMD_INIT_LOG(ERR, "NSP report multipf, but FW report not multipf"); 1353 ret = -ENODEV; 1354 goto app_cleanup; 1355 } 1356 } else { 1357 /* 1358 * For coreNIC the number of vNICs exposed should be the same as the 1359 * number of physical ports. 1360 */ 1361 if (total_vnics != nfp_eth_table->count) { 1362 PMD_INIT_LOG(ERR, "Total physical ports do not match number of vNICs"); 1363 ret = -ENODEV; 1364 goto app_cleanup; 1365 } 1366 } 1367 1368 /* Populate coreNIC app properties */ 1369 app_fw_nic->total_phyports = total_vnics; 1370 app_fw_nic->pf_dev = pf_dev; 1371 if (total_vnics > 1) 1372 app_fw_nic->multiport = true; 1373 1374 /* Map the symbol table */ 1375 snprintf(bar_name, sizeof(bar_name), "_pf%u_net_bar0", id); 1376 pf_dev->ctrl_bar = nfp_rtsym_map(pf_dev->sym_tbl, bar_name, 1377 app_fw_nic->total_phyports * NFP_NET_CFG_BAR_SZ, 1378 &pf_dev->ctrl_area); 1379 if (pf_dev->ctrl_bar == NULL) { 1380 PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for %s", bar_name); 1381 ret = -EIO; 1382 goto app_cleanup; 1383 } 1384 1385 PMD_INIT_LOG(DEBUG, "ctrl bar: %p", pf_dev->ctrl_bar); 1386 1387 /* Loop through all physical ports on PF */ 1388 numa_node = rte_socket_id(); 1389 for (i = 0; i < app_fw_nic->total_phyports; i++) { 1390 if (pf_dev->multi_pf.enabled) 1391 snprintf(port_name, sizeof(port_name), "%s", 1392 pf_dev->pci_dev->device.name); 1393 else 1394 snprintf(port_name, sizeof(port_name), "%s_port%u", 1395 pf_dev->pci_dev->device.name, i); 1396 1397 /* Allocate a eth_dev for this phyport */ 1398 eth_dev = rte_eth_dev_allocate(port_name); 1399 if (eth_dev == NULL) { 1400 ret = -ENODEV; 1401 goto port_cleanup; 1402 } 1403 1404 /* Allocate memory for this phyport */ 1405 eth_dev->data->dev_private = rte_zmalloc_socket(port_name, 1406 sizeof(struct nfp_net_hw), 1407 RTE_CACHE_LINE_SIZE, numa_node); 1408 if (eth_dev->data->dev_private == NULL) { 1409 ret = -ENOMEM; 1410 rte_eth_dev_release_port(eth_dev); 1411 goto port_cleanup; 1412 } 1413 1414 hw = eth_dev->data->dev_private; 1415 id = nfp_function_id_get(pf_dev, i); 1416 1417 /* Add this device to the PF's array of physical ports */ 1418 app_fw_nic->ports[id] = hw; 1419 1420 hw->dev_info = dev_info; 1421 hw->pf_dev = pf_dev; 1422 hw->cpp = pf_dev->cpp; 1423 hw->eth_dev = eth_dev; 1424 hw->idx = id; 1425 hw->nfp_idx = nfp_eth_table->ports[id].index; 1426 1427 eth_dev->device = &pf_dev->pci_dev->device; 1428 1429 /* 1430 * Ctrl/tx/rx BAR mappings and remaining init happens in 1431 * @nfp_net_init() 1432 */ 1433 ret = nfp_net_init(eth_dev); 1434 if (ret != 0) { 1435 ret = -ENODEV; 1436 goto port_cleanup; 1437 } 1438 1439 rte_eth_dev_probing_finish(eth_dev); 1440 1441 } /* End loop, all ports on this PF */ 1442 1443 return 0; 1444 1445 port_cleanup: 1446 for (i = 0; i < app_fw_nic->total_phyports; i++) { 1447 id = nfp_function_id_get(pf_dev, i); 1448 hw = app_fw_nic->ports[id]; 1449 1450 if (hw != NULL && hw->eth_dev != NULL) { 1451 nfp_net_uninit(hw->eth_dev); 1452 rte_eth_dev_release_port(hw->eth_dev); 1453 } 1454 } 1455 nfp_cpp_area_release_free(pf_dev->ctrl_area); 1456 app_cleanup: 1457 rte_free(app_fw_nic); 1458 1459 return ret; 1460 } 1461 1462 static int 1463 nfp_net_hwinfo_set(uint8_t function_id, 1464 struct nfp_rtsym_table *sym_tbl, 1465 struct nfp_cpp *cpp) 1466 { 1467 int ret = 0; 1468 uint64_t app_cap; 1469 uint8_t sp_indiff; 1470 struct nfp_nsp *nsp; 1471 char hw_info[RTE_ETH_NAME_MAX_LEN]; 1472 char app_cap_name[RTE_ETH_NAME_MAX_LEN]; 1473 1474 /* Read the app capabilities of the firmware loaded */ 1475 snprintf(app_cap_name, sizeof(app_cap_name), "_pf%u_net_app_cap", function_id); 1476 app_cap = nfp_rtsym_read_le(sym_tbl, app_cap_name, &ret); 1477 if (ret != 0) { 1478 PMD_INIT_LOG(ERR, "Couldn't read app_fw_cap from firmware."); 1479 return ret; 1480 } 1481 1482 /* Calculate the value of sp_indiff and write to hw_info */ 1483 sp_indiff = app_cap & NFP_NET_APP_CAP_SP_INDIFF; 1484 snprintf(hw_info, sizeof(hw_info), "sp_indiff=%u", sp_indiff); 1485 1486 nsp = nfp_nsp_open(cpp); 1487 if (nsp == NULL) { 1488 PMD_INIT_LOG(ERR, "Couldn't get NSP."); 1489 return -EIO; 1490 } 1491 1492 ret = nfp_nsp_hwinfo_set(nsp, hw_info, sizeof(hw_info)); 1493 nfp_nsp_close(nsp); 1494 if (ret != 0) { 1495 PMD_INIT_LOG(ERR, "Failed to set parameter to hwinfo."); 1496 return ret; 1497 } 1498 1499 return 0; 1500 } 1501 1502 const uint32_t nfp_eth_media_table[NFP_MEDIA_LINK_MODES_NUMBER] = { 1503 [NFP_MEDIA_W0_RJ45_10M] = RTE_ETH_LINK_SPEED_10M, 1504 [NFP_MEDIA_W0_RJ45_10M_HD] = RTE_ETH_LINK_SPEED_10M_HD, 1505 [NFP_MEDIA_W0_RJ45_100M] = RTE_ETH_LINK_SPEED_100M, 1506 [NFP_MEDIA_W0_RJ45_100M_HD] = RTE_ETH_LINK_SPEED_100M_HD, 1507 [NFP_MEDIA_W0_RJ45_1G] = RTE_ETH_LINK_SPEED_1G, 1508 [NFP_MEDIA_W0_RJ45_2P5G] = RTE_ETH_LINK_SPEED_2_5G, 1509 [NFP_MEDIA_W0_RJ45_5G] = RTE_ETH_LINK_SPEED_5G, 1510 [NFP_MEDIA_W0_RJ45_10G] = RTE_ETH_LINK_SPEED_10G, 1511 [NFP_MEDIA_1000BASE_CX] = RTE_ETH_LINK_SPEED_1G, 1512 [NFP_MEDIA_1000BASE_KX] = RTE_ETH_LINK_SPEED_1G, 1513 [NFP_MEDIA_10GBASE_KX4] = RTE_ETH_LINK_SPEED_10G, 1514 [NFP_MEDIA_10GBASE_KR] = RTE_ETH_LINK_SPEED_10G, 1515 [NFP_MEDIA_10GBASE_CX4] = RTE_ETH_LINK_SPEED_10G, 1516 [NFP_MEDIA_10GBASE_CR] = RTE_ETH_LINK_SPEED_10G, 1517 [NFP_MEDIA_10GBASE_SR] = RTE_ETH_LINK_SPEED_10G, 1518 [NFP_MEDIA_10GBASE_ER] = RTE_ETH_LINK_SPEED_10G, 1519 [NFP_MEDIA_25GBASE_KR] = RTE_ETH_LINK_SPEED_25G, 1520 [NFP_MEDIA_25GBASE_KR_S] = RTE_ETH_LINK_SPEED_25G, 1521 [NFP_MEDIA_25GBASE_CR] = RTE_ETH_LINK_SPEED_25G, 1522 [NFP_MEDIA_25GBASE_CR_S] = RTE_ETH_LINK_SPEED_25G, 1523 [NFP_MEDIA_25GBASE_SR] = RTE_ETH_LINK_SPEED_25G, 1524 [NFP_MEDIA_40GBASE_CR4] = RTE_ETH_LINK_SPEED_40G, 1525 [NFP_MEDIA_40GBASE_KR4] = RTE_ETH_LINK_SPEED_40G, 1526 [NFP_MEDIA_40GBASE_SR4] = RTE_ETH_LINK_SPEED_40G, 1527 [NFP_MEDIA_40GBASE_LR4] = RTE_ETH_LINK_SPEED_40G, 1528 [NFP_MEDIA_50GBASE_KR] = RTE_ETH_LINK_SPEED_50G, 1529 [NFP_MEDIA_50GBASE_SR] = RTE_ETH_LINK_SPEED_50G, 1530 [NFP_MEDIA_50GBASE_CR] = RTE_ETH_LINK_SPEED_50G, 1531 [NFP_MEDIA_50GBASE_LR] = RTE_ETH_LINK_SPEED_50G, 1532 [NFP_MEDIA_50GBASE_ER] = RTE_ETH_LINK_SPEED_50G, 1533 [NFP_MEDIA_50GBASE_FR] = RTE_ETH_LINK_SPEED_50G, 1534 [NFP_MEDIA_100GBASE_KR4] = RTE_ETH_LINK_SPEED_100G, 1535 [NFP_MEDIA_100GBASE_SR4] = RTE_ETH_LINK_SPEED_100G, 1536 [NFP_MEDIA_100GBASE_CR4] = RTE_ETH_LINK_SPEED_100G, 1537 [NFP_MEDIA_100GBASE_KP4] = RTE_ETH_LINK_SPEED_100G, 1538 [NFP_MEDIA_100GBASE_CR10] = RTE_ETH_LINK_SPEED_100G, 1539 [NFP_MEDIA_10GBASE_LR] = RTE_ETH_LINK_SPEED_10G, 1540 [NFP_MEDIA_25GBASE_LR] = RTE_ETH_LINK_SPEED_25G, 1541 [NFP_MEDIA_25GBASE_ER] = RTE_ETH_LINK_SPEED_25G 1542 }; 1543 1544 static int 1545 nfp_net_speed_capa_get_real(struct nfp_eth_media_buf *media_buf, 1546 struct nfp_pf_dev *pf_dev) 1547 { 1548 uint32_t i; 1549 uint32_t j; 1550 uint32_t offset; 1551 uint32_t speed_capa = 0; 1552 uint64_t supported_modes; 1553 1554 for (i = 0; i < RTE_DIM(media_buf->supported_modes); i++) { 1555 supported_modes = media_buf->supported_modes[i]; 1556 offset = i * UINT64_BIT; 1557 for (j = 0; j < UINT64_BIT; j++) { 1558 if (supported_modes == 0) 1559 break; 1560 1561 if ((supported_modes & 1) != 0) { 1562 if ((j + offset) >= NFP_MEDIA_LINK_MODES_NUMBER) { 1563 PMD_DRV_LOG(ERR, "Invalid offset of media table."); 1564 return -EINVAL; 1565 } 1566 1567 speed_capa |= nfp_eth_media_table[j + offset]; 1568 } 1569 1570 supported_modes = supported_modes >> 1; 1571 } 1572 } 1573 1574 pf_dev->speed_capa = speed_capa; 1575 1576 return pf_dev->speed_capa == 0 ? -EINVAL : 0; 1577 } 1578 1579 static int 1580 nfp_net_speed_capa_get(struct nfp_pf_dev *pf_dev, 1581 uint32_t port_id) 1582 { 1583 int ret; 1584 struct nfp_nsp *nsp; 1585 struct nfp_eth_media_buf media_buf; 1586 1587 media_buf.eth_index = pf_dev->nfp_eth_table->ports[port_id].eth_index; 1588 pf_dev->speed_capa = 0; 1589 1590 nsp = nfp_nsp_open(pf_dev->cpp); 1591 if (nsp == NULL) { 1592 PMD_DRV_LOG(ERR, "Couldn't get NSP."); 1593 return -EIO; 1594 } 1595 1596 ret = nfp_nsp_read_media(nsp, &media_buf, sizeof(media_buf)); 1597 nfp_nsp_close(nsp); 1598 if (ret != 0) { 1599 PMD_DRV_LOG(ERR, "Failed to read media."); 1600 return ret; 1601 } 1602 1603 ret = nfp_net_speed_capa_get_real(&media_buf, pf_dev); 1604 if (ret < 0) { 1605 PMD_DRV_LOG(ERR, "Speed capability is invalid."); 1606 return ret; 1607 } 1608 1609 return 0; 1610 } 1611 1612 static int 1613 nfp_pf_init(struct rte_pci_device *pci_dev) 1614 { 1615 uint32_t i; 1616 uint32_t id; 1617 int ret = 0; 1618 uint64_t addr; 1619 uint32_t index; 1620 uint32_t cpp_id; 1621 uint8_t function_id; 1622 struct nfp_cpp *cpp; 1623 struct nfp_pf_dev *pf_dev; 1624 struct nfp_hwinfo *hwinfo; 1625 enum nfp_app_fw_id app_fw_id; 1626 char name[RTE_ETH_NAME_MAX_LEN]; 1627 struct nfp_rtsym_table *sym_tbl; 1628 char app_name[RTE_ETH_NAME_MAX_LEN]; 1629 struct nfp_eth_table *nfp_eth_table; 1630 const struct nfp_dev_info *dev_info; 1631 1632 if (pci_dev == NULL) 1633 return -ENODEV; 1634 1635 if (pci_dev->mem_resource[0].addr == NULL) { 1636 PMD_INIT_LOG(ERR, "The address of BAR0 is NULL."); 1637 return -ENODEV; 1638 } 1639 1640 dev_info = nfp_dev_info_get(pci_dev->id.device_id); 1641 if (dev_info == NULL) { 1642 PMD_INIT_LOG(ERR, "Not supported device ID"); 1643 return -ENODEV; 1644 } 1645 1646 /* Allocate memory for the PF "device" */ 1647 function_id = (pci_dev->addr.function) & 0x07; 1648 snprintf(name, sizeof(name), "nfp_pf%u", function_id); 1649 pf_dev = rte_zmalloc(name, sizeof(*pf_dev), 0); 1650 if (pf_dev == NULL) { 1651 PMD_INIT_LOG(ERR, "Can't allocate memory for the PF device"); 1652 return -ENOMEM; 1653 } 1654 1655 /* 1656 * When device bound to UIO, the device could be used, by mistake, 1657 * by two DPDK apps, and the UIO driver does not avoid it. This 1658 * could lead to a serious problem when configuring the NFP CPP 1659 * interface. Here we avoid this telling to the CPP init code to 1660 * use a lock file if UIO is being used. 1661 */ 1662 if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO) 1663 cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, false); 1664 else 1665 cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, true); 1666 1667 if (cpp == NULL) { 1668 PMD_INIT_LOG(ERR, "A CPP handle can not be obtained"); 1669 ret = -EIO; 1670 goto pf_cleanup; 1671 } 1672 1673 hwinfo = nfp_hwinfo_read(cpp); 1674 if (hwinfo == NULL) { 1675 PMD_INIT_LOG(ERR, "Error reading hwinfo table"); 1676 ret = -EIO; 1677 goto cpp_cleanup; 1678 } 1679 1680 /* Read the number of physical ports from hardware */ 1681 nfp_eth_table = nfp_eth_read_ports(cpp); 1682 if (nfp_eth_table == NULL) { 1683 PMD_INIT_LOG(ERR, "Error reading NFP ethernet table"); 1684 ret = -EIO; 1685 goto hwinfo_cleanup; 1686 } 1687 1688 pf_dev->multi_pf.enabled = nfp_check_multi_pf_from_nsp(pci_dev, cpp); 1689 pf_dev->multi_pf.function_id = function_id; 1690 1691 /* Force the physical port down to clear the possible DMA error */ 1692 for (i = 0; i < nfp_eth_table->count; i++) { 1693 id = nfp_function_id_get(pf_dev, i); 1694 index = nfp_eth_table->ports[id].index; 1695 nfp_eth_set_configured(cpp, index, 0); 1696 } 1697 1698 if (nfp_fw_setup(pci_dev, cpp, nfp_eth_table, hwinfo, 1699 dev_info, &pf_dev->multi_pf) != 0) { 1700 PMD_INIT_LOG(ERR, "Error when uploading firmware"); 1701 ret = -EIO; 1702 goto eth_table_cleanup; 1703 } 1704 1705 /* Now the symbol table should be there */ 1706 sym_tbl = nfp_rtsym_table_read(cpp); 1707 if (sym_tbl == NULL) { 1708 PMD_INIT_LOG(ERR, "Something is wrong with the firmware symbol table"); 1709 ret = -EIO; 1710 goto fw_cleanup; 1711 } 1712 1713 /* Read the app ID of the firmware loaded */ 1714 snprintf(app_name, sizeof(app_name), "_pf%u_net_app_id", function_id); 1715 app_fw_id = nfp_rtsym_read_le(sym_tbl, app_name, &ret); 1716 if (ret != 0) { 1717 PMD_INIT_LOG(ERR, "Couldn't read %s from firmware", app_name); 1718 ret = -EIO; 1719 goto sym_tbl_cleanup; 1720 } 1721 1722 /* Write sp_indiff to hw_info */ 1723 ret = nfp_net_hwinfo_set(function_id, sym_tbl, cpp); 1724 if (ret != 0) { 1725 PMD_INIT_LOG(ERR, "Failed to set hwinfo."); 1726 ret = -EIO; 1727 goto sym_tbl_cleanup; 1728 } 1729 1730 /* Populate the newly created PF device */ 1731 pf_dev->app_fw_id = app_fw_id; 1732 pf_dev->cpp = cpp; 1733 pf_dev->hwinfo = hwinfo; 1734 pf_dev->sym_tbl = sym_tbl; 1735 pf_dev->pci_dev = pci_dev; 1736 pf_dev->nfp_eth_table = nfp_eth_table; 1737 1738 /* Get the speed capability */ 1739 for (i = 0; i < nfp_eth_table->count; i++) { 1740 id = nfp_function_id_get(pf_dev, i); 1741 ret = nfp_net_speed_capa_get(pf_dev, id); 1742 if (ret != 0) { 1743 PMD_INIT_LOG(ERR, "Failed to get speed capability."); 1744 ret = -EIO; 1745 goto sym_tbl_cleanup; 1746 } 1747 } 1748 1749 /* Configure access to tx/rx vNIC BARs */ 1750 addr = nfp_qcp_queue_offset(dev_info, 0); 1751 cpp_id = NFP_CPP_ISLAND_ID(0, NFP_CPP_ACTION_RW, 0, 0); 1752 1753 pf_dev->qc_bar = nfp_cpp_map_area(pf_dev->cpp, cpp_id, 1754 addr, dev_info->qc_area_sz, &pf_dev->qc_area); 1755 if (pf_dev->qc_bar == NULL) { 1756 PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for net.qc"); 1757 ret = -EIO; 1758 goto sym_tbl_cleanup; 1759 } 1760 1761 PMD_INIT_LOG(DEBUG, "qc_bar address: %p", pf_dev->qc_bar); 1762 1763 /* 1764 * PF initialization has been done at this point. Call app specific 1765 * init code now. 1766 */ 1767 switch (pf_dev->app_fw_id) { 1768 case NFP_APP_FW_CORE_NIC: 1769 if (pf_dev->multi_pf.enabled) { 1770 ret = nfp_enable_multi_pf(pf_dev); 1771 if (ret != 0) 1772 goto hwqueues_cleanup; 1773 } 1774 1775 PMD_INIT_LOG(INFO, "Initializing coreNIC"); 1776 ret = nfp_init_app_fw_nic(pf_dev, dev_info); 1777 if (ret != 0) { 1778 PMD_INIT_LOG(ERR, "Could not initialize coreNIC!"); 1779 goto hwqueues_cleanup; 1780 } 1781 break; 1782 case NFP_APP_FW_FLOWER_NIC: 1783 PMD_INIT_LOG(INFO, "Initializing Flower"); 1784 ret = nfp_init_app_fw_flower(pf_dev, dev_info); 1785 if (ret != 0) { 1786 PMD_INIT_LOG(ERR, "Could not initialize Flower!"); 1787 goto hwqueues_cleanup; 1788 } 1789 break; 1790 default: 1791 PMD_INIT_LOG(ERR, "Unsupported Firmware loaded"); 1792 ret = -EINVAL; 1793 goto hwqueues_cleanup; 1794 } 1795 1796 /* Register the CPP bridge service here for primary use */ 1797 ret = nfp_enable_cpp_service(pf_dev); 1798 if (ret != 0) 1799 PMD_INIT_LOG(INFO, "Enable cpp service failed."); 1800 1801 return 0; 1802 1803 hwqueues_cleanup: 1804 nfp_cpp_area_release_free(pf_dev->qc_area); 1805 sym_tbl_cleanup: 1806 free(sym_tbl); 1807 fw_cleanup: 1808 nfp_fw_unload(cpp); 1809 nfp_net_keepalive_stop(&pf_dev->multi_pf); 1810 nfp_net_keepalive_clear(pf_dev->multi_pf.beat_addr, pf_dev->multi_pf.function_id); 1811 nfp_net_keepalive_uninit(&pf_dev->multi_pf); 1812 eth_table_cleanup: 1813 free(nfp_eth_table); 1814 hwinfo_cleanup: 1815 free(hwinfo); 1816 cpp_cleanup: 1817 nfp_cpp_free(cpp); 1818 pf_cleanup: 1819 rte_free(pf_dev); 1820 1821 return ret; 1822 } 1823 1824 static int 1825 nfp_secondary_init_app_fw_nic(struct nfp_pf_dev *pf_dev) 1826 { 1827 uint32_t i; 1828 int err = 0; 1829 int ret = 0; 1830 uint8_t function_id; 1831 uint32_t total_vnics; 1832 struct nfp_net_hw *hw; 1833 char pf_name[RTE_ETH_NAME_MAX_LEN]; 1834 1835 /* Read the number of vNIC's created for the PF */ 1836 function_id = (pf_dev->pci_dev->addr.function) & 0x07; 1837 snprintf(pf_name, sizeof(pf_name), "nfd_cfg_pf%u_num_ports", function_id); 1838 total_vnics = nfp_rtsym_read_le(pf_dev->sym_tbl, pf_name, &err); 1839 if (err != 0 || total_vnics == 0 || total_vnics > 8) { 1840 PMD_INIT_LOG(ERR, "%s symbol with wrong value", pf_name); 1841 return -ENODEV; 1842 } 1843 1844 for (i = 0; i < total_vnics; i++) { 1845 struct rte_eth_dev *eth_dev; 1846 char port_name[RTE_ETH_NAME_MAX_LEN]; 1847 1848 if (nfp_check_multi_pf_from_fw(total_vnics)) 1849 snprintf(port_name, sizeof(port_name), "%s", 1850 pf_dev->pci_dev->device.name); 1851 else 1852 snprintf(port_name, sizeof(port_name), "%s_port%u", 1853 pf_dev->pci_dev->device.name, i); 1854 1855 PMD_INIT_LOG(DEBUG, "Secondary attaching to port %s", port_name); 1856 eth_dev = rte_eth_dev_attach_secondary(port_name); 1857 if (eth_dev == NULL) { 1858 PMD_INIT_LOG(ERR, "Secondary process attach to port %s failed", port_name); 1859 ret = -ENODEV; 1860 break; 1861 } 1862 1863 eth_dev->process_private = pf_dev; 1864 hw = eth_dev->data->dev_private; 1865 nfp_net_ethdev_ops_mount(hw, eth_dev); 1866 1867 rte_eth_dev_probing_finish(eth_dev); 1868 } 1869 1870 return ret; 1871 } 1872 1873 /* 1874 * When attaching to the NFP4000/6000 PF on a secondary process there 1875 * is no need to initialise the PF again. Only minimal work is required 1876 * here. 1877 */ 1878 static int 1879 nfp_pf_secondary_init(struct rte_pci_device *pci_dev) 1880 { 1881 int ret = 0; 1882 struct nfp_cpp *cpp; 1883 uint8_t function_id; 1884 struct nfp_pf_dev *pf_dev; 1885 enum nfp_app_fw_id app_fw_id; 1886 char name[RTE_ETH_NAME_MAX_LEN]; 1887 struct nfp_rtsym_table *sym_tbl; 1888 const struct nfp_dev_info *dev_info; 1889 char app_name[RTE_ETH_NAME_MAX_LEN]; 1890 1891 if (pci_dev == NULL) 1892 return -ENODEV; 1893 1894 if (pci_dev->mem_resource[0].addr == NULL) { 1895 PMD_INIT_LOG(ERR, "The address of BAR0 is NULL."); 1896 return -ENODEV; 1897 } 1898 1899 dev_info = nfp_dev_info_get(pci_dev->id.device_id); 1900 if (dev_info == NULL) { 1901 PMD_INIT_LOG(ERR, "Not supported device ID"); 1902 return -ENODEV; 1903 } 1904 1905 /* Allocate memory for the PF "device" */ 1906 snprintf(name, sizeof(name), "nfp_pf%d", 0); 1907 pf_dev = rte_zmalloc(name, sizeof(*pf_dev), 0); 1908 if (pf_dev == NULL) { 1909 PMD_INIT_LOG(ERR, "Can't allocate memory for the PF device"); 1910 return -ENOMEM; 1911 } 1912 1913 /* 1914 * When device bound to UIO, the device could be used, by mistake, 1915 * by two DPDK apps, and the UIO driver does not avoid it. This 1916 * could lead to a serious problem when configuring the NFP CPP 1917 * interface. Here we avoid this telling to the CPP init code to 1918 * use a lock file if UIO is being used. 1919 */ 1920 if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO) 1921 cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, false); 1922 else 1923 cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, true); 1924 1925 if (cpp == NULL) { 1926 PMD_INIT_LOG(ERR, "A CPP handle can not be obtained"); 1927 ret = -EIO; 1928 goto pf_cleanup; 1929 } 1930 1931 /* 1932 * We don't have access to the PF created in the primary process 1933 * here so we have to read the number of ports from firmware. 1934 */ 1935 sym_tbl = nfp_rtsym_table_read(cpp); 1936 if (sym_tbl == NULL) { 1937 PMD_INIT_LOG(ERR, "Something is wrong with the firmware symbol table"); 1938 ret = -EIO; 1939 goto pf_cleanup; 1940 } 1941 1942 /* Read the app ID of the firmware loaded */ 1943 function_id = pci_dev->addr.function & 0x7; 1944 snprintf(app_name, sizeof(app_name), "_pf%u_net_app_id", function_id); 1945 app_fw_id = nfp_rtsym_read_le(sym_tbl, app_name, &ret); 1946 if (ret != 0) { 1947 PMD_INIT_LOG(ERR, "Couldn't read %s from fw", app_name); 1948 ret = -EIO; 1949 goto sym_tbl_cleanup; 1950 } 1951 1952 /* Populate the newly created PF device */ 1953 pf_dev->app_fw_id = app_fw_id; 1954 pf_dev->cpp = cpp; 1955 pf_dev->sym_tbl = sym_tbl; 1956 pf_dev->pci_dev = pci_dev; 1957 1958 /* Call app specific init code now */ 1959 switch (app_fw_id) { 1960 case NFP_APP_FW_CORE_NIC: 1961 PMD_INIT_LOG(INFO, "Initializing coreNIC"); 1962 ret = nfp_secondary_init_app_fw_nic(pf_dev); 1963 if (ret != 0) { 1964 PMD_INIT_LOG(ERR, "Could not initialize coreNIC!"); 1965 goto sym_tbl_cleanup; 1966 } 1967 break; 1968 case NFP_APP_FW_FLOWER_NIC: 1969 PMD_INIT_LOG(INFO, "Initializing Flower"); 1970 ret = nfp_secondary_init_app_fw_flower(pf_dev); 1971 if (ret != 0) { 1972 PMD_INIT_LOG(ERR, "Could not initialize Flower!"); 1973 goto sym_tbl_cleanup; 1974 } 1975 break; 1976 default: 1977 PMD_INIT_LOG(ERR, "Unsupported Firmware loaded"); 1978 ret = -EINVAL; 1979 goto sym_tbl_cleanup; 1980 } 1981 1982 return 0; 1983 1984 sym_tbl_cleanup: 1985 free(sym_tbl); 1986 pf_cleanup: 1987 rte_free(pf_dev); 1988 1989 return ret; 1990 } 1991 1992 static int 1993 nfp_pf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 1994 struct rte_pci_device *dev) 1995 { 1996 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 1997 return nfp_pf_init(dev); 1998 else 1999 return nfp_pf_secondary_init(dev); 2000 } 2001 2002 static const struct rte_pci_id pci_id_nfp_pf_net_map[] = { 2003 { 2004 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME, 2005 PCI_DEVICE_ID_NFP3800_PF_NIC) 2006 }, 2007 { 2008 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME, 2009 PCI_DEVICE_ID_NFP4000_PF_NIC) 2010 }, 2011 { 2012 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME, 2013 PCI_DEVICE_ID_NFP6000_PF_NIC) 2014 }, 2015 { 2016 RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE, 2017 PCI_DEVICE_ID_NFP3800_PF_NIC) 2018 }, 2019 { 2020 RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE, 2021 PCI_DEVICE_ID_NFP4000_PF_NIC) 2022 }, 2023 { 2024 RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE, 2025 PCI_DEVICE_ID_NFP6000_PF_NIC) 2026 }, 2027 { 2028 .vendor_id = 0, 2029 }, 2030 }; 2031 2032 static int 2033 nfp_pci_uninit(struct rte_eth_dev *eth_dev) 2034 { 2035 uint16_t port_id; 2036 struct rte_pci_device *pci_dev; 2037 2038 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 2039 2040 /* Free up all physical ports under PF */ 2041 RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device) 2042 rte_eth_dev_close(port_id); 2043 /* 2044 * Ports can be closed and freed but hotplugging is not 2045 * currently supported. 2046 */ 2047 return -ENOTSUP; 2048 } 2049 2050 static int 2051 eth_nfp_pci_remove(struct rte_pci_device *pci_dev) 2052 { 2053 return rte_eth_dev_pci_generic_remove(pci_dev, nfp_pci_uninit); 2054 } 2055 2056 static struct rte_pci_driver rte_nfp_net_pf_pmd = { 2057 .id_table = pci_id_nfp_pf_net_map, 2058 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 2059 .probe = nfp_pf_pci_probe, 2060 .remove = eth_nfp_pci_remove, 2061 }; 2062 2063 RTE_PMD_REGISTER_PCI(NFP_PF_DRIVER_NAME, rte_nfp_net_pf_pmd); 2064 RTE_PMD_REGISTER_PCI_TABLE(NFP_PF_DRIVER_NAME, pci_id_nfp_pf_net_map); 2065 RTE_PMD_REGISTER_KMOD_DEP(NFP_PF_DRIVER_NAME, "* igb_uio | uio_pci_generic | vfio"); 2066