1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2014-2021 Netronome Systems, Inc. 3 * All rights reserved. 4 * 5 * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation. 6 */ 7 8 #include <unistd.h> 9 10 #include <eal_firmware.h> 11 #include <rte_alarm.h> 12 #include <rte_kvargs.h> 13 #include <rte_pci.h> 14 15 #include "flower/nfp_flower.h" 16 #include "nfd3/nfp_nfd3.h" 17 #include "nfdk/nfp_nfdk.h" 18 #include "nfpcore/nfp_cpp.h" 19 #include "nfpcore/nfp_elf.h" 20 #include "nfpcore/nfp_hwinfo.h" 21 #include "nfpcore/nfp_rtsym.h" 22 #include "nfpcore/nfp_nsp.h" 23 #include "nfpcore/nfp6000_pcie.h" 24 #include "nfpcore/nfp_resource.h" 25 #include "nfpcore/nfp_sync.h" 26 27 #include "nfp_cpp_bridge.h" 28 #include "nfp_ipsec.h" 29 #include "nfp_logs.h" 30 #include "nfp_net_flow.h" 31 #include "nfp_rxtx_vec.h" 32 33 /* 64-bit per app capabilities */ 34 #define NFP_NET_APP_CAP_SP_INDIFF RTE_BIT64(0) /* Indifferent to port speed */ 35 36 #define NFP_PF_DRIVER_NAME net_nfp_pf 37 #define NFP_PF_FORCE_RELOAD_FW "force_reload_fw" 38 #define NFP_CPP_SERVICE_ENABLE "cpp_service_enable" 39 #define NFP_QUEUE_PER_VF 1 40 41 struct nfp_net_init { 42 /** Sequential physical port number, only valid for CoreNIC firmware */ 43 uint8_t idx; 44 45 /** Internal port number as seen from NFP */ 46 uint8_t nfp_idx; 47 48 struct nfp_net_hw_priv *hw_priv; 49 }; 50 51 static int 52 nfp_devarg_handle_int(const char *key, 53 const char *value, 54 void *extra_args) 55 { 56 char *end_ptr; 57 uint64_t *num = extra_args; 58 59 if (value == NULL) 60 return -EPERM; 61 62 *num = strtoul(value, &end_ptr, 10); 63 if (*num == ULONG_MAX) { 64 PMD_DRV_LOG(ERR, "%s: '%s' is not a valid param.", key, value); 65 return -ERANGE; 66 } else if (value == end_ptr) { 67 return -EPERM; 68 } 69 70 return 0; 71 } 72 73 static int 74 nfp_devarg_parse_bool_para(struct rte_kvargs *kvlist, 75 const char *key_match, 76 bool *value_ret) 77 { 78 int ret; 79 uint32_t count; 80 uint64_t value; 81 82 count = rte_kvargs_count(kvlist, key_match); 83 if (count == 0) 84 return 0; 85 86 if (count > 1) { 87 PMD_DRV_LOG(ERR, "Too much bool arguments: %s.", key_match); 88 return -EINVAL; 89 } 90 91 ret = rte_kvargs_process(kvlist, key_match, &nfp_devarg_handle_int, &value); 92 if (ret != 0) 93 return -EINVAL; 94 95 if (value == 1) { 96 *value_ret = true; 97 } else if (value == 0) { 98 *value_ret = false; 99 } else { 100 PMD_DRV_LOG(ERR, "The param does not work, the format is %s=0/1.", 101 key_match); 102 return -EINVAL; 103 } 104 105 return 0; 106 } 107 108 static int 109 nfp_devargs_parse(struct nfp_devargs *nfp_devargs_param, 110 const struct rte_devargs *devargs) 111 { 112 int ret; 113 struct rte_kvargs *kvlist; 114 115 if (devargs == NULL) 116 return 0; 117 118 kvlist = rte_kvargs_parse(devargs->args, NULL); 119 if (kvlist == NULL) 120 return -EINVAL; 121 122 ret = nfp_devarg_parse_bool_para(kvlist, NFP_PF_FORCE_RELOAD_FW, 123 &nfp_devargs_param->force_reload_fw); 124 if (ret != 0) 125 goto exit; 126 127 ret = nfp_devarg_parse_bool_para(kvlist, NFP_CPP_SERVICE_ENABLE, 128 &nfp_devargs_param->cpp_service_enable); 129 if (ret != 0) 130 goto exit; 131 132 exit: 133 rte_kvargs_free(kvlist); 134 135 return ret; 136 } 137 138 static void 139 nfp_net_pf_read_mac(struct nfp_app_fw_nic *app_fw_nic, 140 uint16_t port, 141 struct nfp_net_hw_priv *hw_priv) 142 { 143 struct nfp_net_hw *hw; 144 struct nfp_eth_table *nfp_eth_table; 145 146 /* Grab a pointer to the correct physical port */ 147 hw = app_fw_nic->ports[port]; 148 149 nfp_eth_table = hw_priv->pf_dev->nfp_eth_table; 150 151 rte_ether_addr_copy(&nfp_eth_table->ports[port].mac_addr, &hw->super.mac_addr); 152 } 153 154 static uint32_t 155 nfp_net_speed_bitmap2speed(uint32_t speeds_bitmap) 156 { 157 switch (speeds_bitmap) { 158 case RTE_ETH_LINK_SPEED_10M_HD: 159 return RTE_ETH_SPEED_NUM_10M; 160 case RTE_ETH_LINK_SPEED_10M: 161 return RTE_ETH_SPEED_NUM_10M; 162 case RTE_ETH_LINK_SPEED_100M_HD: 163 return RTE_ETH_SPEED_NUM_100M; 164 case RTE_ETH_LINK_SPEED_100M: 165 return RTE_ETH_SPEED_NUM_100M; 166 case RTE_ETH_LINK_SPEED_1G: 167 return RTE_ETH_SPEED_NUM_1G; 168 case RTE_ETH_LINK_SPEED_2_5G: 169 return RTE_ETH_SPEED_NUM_2_5G; 170 case RTE_ETH_LINK_SPEED_5G: 171 return RTE_ETH_SPEED_NUM_5G; 172 case RTE_ETH_LINK_SPEED_10G: 173 return RTE_ETH_SPEED_NUM_10G; 174 case RTE_ETH_LINK_SPEED_20G: 175 return RTE_ETH_SPEED_NUM_20G; 176 case RTE_ETH_LINK_SPEED_25G: 177 return RTE_ETH_SPEED_NUM_25G; 178 case RTE_ETH_LINK_SPEED_40G: 179 return RTE_ETH_SPEED_NUM_40G; 180 case RTE_ETH_LINK_SPEED_50G: 181 return RTE_ETH_SPEED_NUM_50G; 182 case RTE_ETH_LINK_SPEED_56G: 183 return RTE_ETH_SPEED_NUM_56G; 184 case RTE_ETH_LINK_SPEED_100G: 185 return RTE_ETH_SPEED_NUM_100G; 186 case RTE_ETH_LINK_SPEED_200G: 187 return RTE_ETH_SPEED_NUM_200G; 188 case RTE_ETH_LINK_SPEED_400G: 189 return RTE_ETH_SPEED_NUM_400G; 190 default: 191 return RTE_ETH_SPEED_NUM_NONE; 192 } 193 } 194 195 static int 196 nfp_net_nfp4000_speed_configure_check(uint16_t port_id, 197 uint32_t configure_speed, 198 struct nfp_eth_table *nfp_eth_table) 199 { 200 switch (port_id) { 201 case 0: 202 if (configure_speed == RTE_ETH_SPEED_NUM_25G && 203 nfp_eth_table->ports[1].speed == RTE_ETH_SPEED_NUM_10G) { 204 PMD_DRV_LOG(ERR, "The speed configuration is not supported for NFP4000."); 205 return -ENOTSUP; 206 } 207 break; 208 case 1: 209 if (configure_speed == RTE_ETH_SPEED_NUM_10G && 210 nfp_eth_table->ports[0].speed == RTE_ETH_SPEED_NUM_25G) { 211 PMD_DRV_LOG(ERR, "The speed configuration is not supported for NFP4000."); 212 return -ENOTSUP; 213 } 214 break; 215 default: 216 PMD_DRV_LOG(ERR, "The port id is invalid."); 217 return -EINVAL; 218 } 219 220 return 0; 221 } 222 223 static int 224 nfp_net_speed_autoneg_set(struct nfp_net_hw_priv *hw_priv, 225 struct nfp_eth_table_port *eth_port) 226 { 227 int ret; 228 struct nfp_nsp *nsp; 229 230 nsp = nfp_eth_config_start(hw_priv->pf_dev->cpp, eth_port->index); 231 if (nsp == NULL) { 232 PMD_DRV_LOG(ERR, "Could not get NSP."); 233 return -EIO; 234 } 235 236 ret = nfp_eth_set_aneg(nsp, NFP_ANEG_AUTO); 237 if (ret != 0) { 238 PMD_DRV_LOG(ERR, "Failed to set ANEG enable."); 239 nfp_eth_config_cleanup_end(nsp); 240 return ret; 241 } 242 243 return nfp_eth_config_commit_end(nsp); 244 } 245 246 static int 247 nfp_net_speed_fixed_set(struct nfp_net_hw_priv *hw_priv, 248 struct nfp_eth_table_port *eth_port, 249 uint32_t configure_speed) 250 { 251 int ret; 252 struct nfp_nsp *nsp; 253 254 nsp = nfp_eth_config_start(hw_priv->pf_dev->cpp, eth_port->index); 255 if (nsp == NULL) { 256 PMD_DRV_LOG(ERR, "Could not get NSP."); 257 return -EIO; 258 } 259 260 ret = nfp_eth_set_aneg(nsp, NFP_ANEG_DISABLED); 261 if (ret != 0) { 262 PMD_DRV_LOG(ERR, "Failed to set ANEG disable."); 263 goto config_cleanup; 264 } 265 266 ret = nfp_eth_set_speed(nsp, configure_speed); 267 if (ret != 0) { 268 PMD_DRV_LOG(ERR, "Failed to set speed."); 269 goto config_cleanup; 270 } 271 272 return nfp_eth_config_commit_end(nsp); 273 274 config_cleanup: 275 nfp_eth_config_cleanup_end(nsp); 276 277 return ret; 278 } 279 280 static int 281 nfp_net_speed_configure(struct rte_eth_dev *dev) 282 { 283 int ret; 284 uint8_t idx; 285 uint32_t speed_capa; 286 uint32_t link_speeds; 287 uint32_t configure_speed; 288 struct nfp_eth_table_port *eth_port; 289 struct nfp_eth_table *nfp_eth_table; 290 struct nfp_net_hw *net_hw = dev->data->dev_private; 291 struct nfp_net_hw_priv *hw_priv = dev->process_private; 292 293 idx = nfp_net_get_idx(dev); 294 nfp_eth_table = hw_priv->pf_dev->nfp_eth_table; 295 eth_port = &nfp_eth_table->ports[idx]; 296 297 speed_capa = hw_priv->pf_dev->speed_capa; 298 if (speed_capa == 0) { 299 PMD_DRV_LOG(ERR, "Speed_capa is invalid."); 300 return -EINVAL; 301 } 302 303 link_speeds = dev->data->dev_conf.link_speeds; 304 configure_speed = nfp_net_speed_bitmap2speed(speed_capa & link_speeds); 305 if (configure_speed == RTE_ETH_SPEED_NUM_NONE && 306 link_speeds != RTE_ETH_LINK_SPEED_AUTONEG) { 307 PMD_DRV_LOG(ERR, "Configured speed is invalid."); 308 return -EINVAL; 309 } 310 311 /* NFP4000 does not allow the port 0 25Gbps and port 1 10Gbps at the same time. */ 312 if (net_hw->device_id == PCI_DEVICE_ID_NFP4000_PF_NIC) { 313 ret = nfp_net_nfp4000_speed_configure_check(idx, 314 configure_speed, nfp_eth_table); 315 if (ret != 0) { 316 PMD_DRV_LOG(ERR, "Failed to configure speed for NFP4000."); 317 return ret; 318 } 319 } 320 321 if (configure_speed == RTE_ETH_LINK_SPEED_AUTONEG) { 322 if (!eth_port->supp_aneg) 323 return 0; 324 325 if (eth_port->aneg == NFP_ANEG_AUTO) 326 return 0; 327 328 ret = nfp_net_speed_autoneg_set(hw_priv, eth_port); 329 if (ret != 0) { 330 PMD_DRV_LOG(ERR, "Failed to set speed autoneg."); 331 return ret; 332 } 333 } else { 334 if (eth_port->aneg == NFP_ANEG_DISABLED && configure_speed == eth_port->speed) 335 return 0; 336 337 ret = nfp_net_speed_fixed_set(hw_priv, eth_port, configure_speed); 338 if (ret != 0) { 339 PMD_DRV_LOG(ERR, "Failed to set speed fixed."); 340 return ret; 341 } 342 } 343 344 hw_priv->pf_dev->speed_updated = true; 345 346 return 0; 347 } 348 349 static int 350 nfp_net_start(struct rte_eth_dev *dev) 351 { 352 int ret; 353 uint16_t i; 354 struct nfp_hw *hw; 355 uint32_t new_ctrl; 356 uint32_t update = 0; 357 uint32_t cap_extend; 358 uint32_t intr_vector; 359 uint32_t ctrl_extend = 0; 360 struct nfp_net_hw *net_hw; 361 struct nfp_pf_dev *pf_dev; 362 struct rte_eth_rxmode *rxmode; 363 struct rte_eth_txmode *txmode; 364 struct nfp_net_hw_priv *hw_priv; 365 struct nfp_app_fw_nic *app_fw_nic; 366 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 367 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 368 369 net_hw = dev->data->dev_private; 370 hw_priv = dev->process_private; 371 pf_dev = hw_priv->pf_dev; 372 app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv); 373 hw = &net_hw->super; 374 375 /* Disabling queues just in case... */ 376 nfp_net_disable_queues(dev); 377 378 /* Enabling the required queues in the device */ 379 nfp_net_enable_queues(dev); 380 381 /* Configure the port speed and the auto-negotiation mode. */ 382 ret = nfp_net_speed_configure(dev); 383 if (ret < 0) { 384 PMD_DRV_LOG(ERR, "Failed to set the speed and auto-negotiation mode."); 385 return ret; 386 } 387 388 /* Check and configure queue intr-vector mapping */ 389 if (dev->data->dev_conf.intr_conf.rxq != 0) { 390 if (app_fw_nic->multiport) { 391 PMD_INIT_LOG(ERR, "PMD rx interrupt is not supported " 392 "with NFP multiport PF."); 393 return -EINVAL; 394 } 395 396 if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_UIO) { 397 /* 398 * Better not to share LSC with RX interrupts. 399 * Unregistering LSC interrupt handler. 400 */ 401 rte_intr_callback_unregister(intr_handle, 402 nfp_net_dev_interrupt_handler, (void *)dev); 403 404 if (dev->data->nb_rx_queues > 1) { 405 PMD_INIT_LOG(ERR, "PMD rx interrupt only " 406 "supports 1 queue with UIO."); 407 return -EIO; 408 } 409 } 410 411 intr_vector = dev->data->nb_rx_queues; 412 if (rte_intr_efd_enable(intr_handle, intr_vector) != 0) 413 return -1; 414 415 nfp_configure_rx_interrupt(dev, intr_handle); 416 update = NFP_NET_CFG_UPDATE_MSIX; 417 } 418 419 /* Checking MTU set */ 420 if (dev->data->mtu > net_hw->flbufsz) { 421 PMD_INIT_LOG(ERR, "MTU (%u) can not be larger than the current NFP_FRAME_SIZE (%u).", 422 dev->data->mtu, net_hw->flbufsz); 423 return -ERANGE; 424 } 425 426 rte_intr_enable(intr_handle); 427 428 new_ctrl = nfp_check_offloads(dev); 429 430 /* Writing configuration parameters in the device */ 431 nfp_net_params_setup(net_hw); 432 433 rxmode = &dev->data->dev_conf.rxmode; 434 if ((rxmode->offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH) != 0) { 435 nfp_net_rss_config_default(dev); 436 update |= NFP_NET_CFG_UPDATE_RSS; 437 new_ctrl |= nfp_net_cfg_ctrl_rss(hw->cap); 438 } 439 440 /* Enable device */ 441 new_ctrl |= NFP_NET_CFG_CTRL_ENABLE; 442 443 update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING; 444 445 txmode = &dev->data->dev_conf.txmode; 446 447 if ((hw->cap & NFP_NET_CFG_CTRL_RINGCFG) != 0) 448 new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG; 449 450 if ((hw->cap & NFP_NET_CFG_CTRL_TXRWB) != 0) 451 new_ctrl |= NFP_NET_CFG_CTRL_TXRWB; 452 453 if (nfp_reconfig(hw, new_ctrl, update) != 0) 454 return -EIO; 455 456 hw->ctrl = new_ctrl; 457 458 /* Enable packet type offload by extend ctrl word1. */ 459 cap_extend = hw->cap_ext; 460 if ((cap_extend & NFP_NET_CFG_CTRL_PKT_TYPE) != 0) 461 ctrl_extend = NFP_NET_CFG_CTRL_PKT_TYPE; 462 463 if ((rxmode->offloads & RTE_ETH_RX_OFFLOAD_SECURITY) != 0 || 464 (txmode->offloads & RTE_ETH_TX_OFFLOAD_SECURITY) != 0) { 465 if ((cap_extend & NFP_NET_CFG_CTRL_IPSEC) != 0) 466 ctrl_extend |= NFP_NET_CFG_CTRL_IPSEC | 467 NFP_NET_CFG_CTRL_IPSEC_SM_LOOKUP | 468 NFP_NET_CFG_CTRL_IPSEC_LM_LOOKUP; 469 } 470 471 /* Enable flow steer by extend ctrl word1. */ 472 if ((cap_extend & NFP_NET_CFG_CTRL_FLOW_STEER) != 0) 473 ctrl_extend |= NFP_NET_CFG_CTRL_FLOW_STEER; 474 475 update = NFP_NET_CFG_UPDATE_GEN; 476 if (nfp_ext_reconfig(hw, ctrl_extend, update) != 0) 477 return -EIO; 478 479 hw->ctrl_ext = ctrl_extend; 480 481 /* 482 * Allocating rte mbufs for configured rx queues. 483 * This requires queues being enabled before. 484 */ 485 if (nfp_net_rx_freelist_setup(dev) != 0) { 486 ret = -ENOMEM; 487 goto error; 488 } 489 490 /* Configure the physical port up */ 491 ret = nfp_eth_set_configured(pf_dev->cpp, net_hw->nfp_idx, 1); 492 if (ret < 0) 493 goto error; 494 495 for (i = 0; i < dev->data->nb_rx_queues; i++) 496 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; 497 for (i = 0; i < dev->data->nb_tx_queues; i++) 498 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; 499 500 return 0; 501 502 error: 503 /* 504 * An error returned by this function should mean the app 505 * exiting and then the system releasing all the memory 506 * allocated even memory coming from hugepages. 507 * 508 * The device could be enabled at this point with some queues 509 * ready for getting packets. This is true if the call to 510 * nfp_net_rx_freelist_setup() succeeds for some queues but 511 * fails for subsequent queues. 512 * 513 * This should make the app exiting but better if we tell the 514 * device first. 515 */ 516 nfp_net_disable_queues(dev); 517 518 return ret; 519 } 520 521 /* Set the link up. */ 522 static int 523 nfp_net_set_link_up(struct rte_eth_dev *dev) 524 { 525 int ret; 526 struct nfp_net_hw *hw; 527 struct nfp_net_hw_priv *hw_priv; 528 529 hw = dev->data->dev_private; 530 hw_priv = dev->process_private; 531 532 ret = nfp_eth_set_configured(hw_priv->pf_dev->cpp, hw->nfp_idx, 1); 533 if (ret < 0) 534 return ret; 535 536 return 0; 537 } 538 539 /* Set the link down. */ 540 static int 541 nfp_net_set_link_down(struct rte_eth_dev *dev) 542 { 543 int ret; 544 struct nfp_net_hw *hw; 545 struct nfp_net_hw_priv *hw_priv; 546 547 hw = dev->data->dev_private; 548 hw_priv = dev->process_private; 549 550 ret = nfp_eth_set_configured(hw_priv->pf_dev->cpp, hw->nfp_idx, 0); 551 if (ret < 0) 552 return ret; 553 554 return 0; 555 } 556 557 static void 558 nfp_net_beat_timer(void *arg) 559 { 560 uint64_t cur_sec; 561 struct nfp_multi_pf *multi_pf = arg; 562 563 cur_sec = rte_rdtsc(); 564 nn_writeq(cur_sec, multi_pf->beat_addr + NFP_BEAT_OFFSET(multi_pf->function_id)); 565 566 /* Beat once per second. */ 567 if (rte_eal_alarm_set(1000 * 1000, nfp_net_beat_timer, 568 (void *)multi_pf) < 0) { 569 PMD_DRV_LOG(ERR, "Error setting alarm."); 570 } 571 } 572 573 static int 574 nfp_net_keepalive_init(struct nfp_cpp *cpp, 575 struct nfp_multi_pf *multi_pf) 576 { 577 uint8_t *base; 578 uint64_t addr; 579 uint32_t size; 580 uint32_t cpp_id; 581 struct nfp_resource *res; 582 583 res = nfp_resource_acquire(cpp, NFP_RESOURCE_KEEPALIVE); 584 if (res == NULL) 585 return -EIO; 586 587 cpp_id = nfp_resource_cpp_id(res); 588 addr = nfp_resource_address(res); 589 size = nfp_resource_size(res); 590 591 nfp_resource_release(res); 592 593 /* Allocate a fixed area for keepalive. */ 594 base = nfp_cpp_map_area(cpp, cpp_id, addr, size, &multi_pf->beat_area); 595 if (base == NULL) { 596 PMD_DRV_LOG(ERR, "Failed to map area for keepalive."); 597 return -EIO; 598 } 599 600 multi_pf->beat_addr = base; 601 602 return 0; 603 } 604 605 static void 606 nfp_net_keepalive_uninit(struct nfp_multi_pf *multi_pf) 607 { 608 nfp_cpp_area_release_free(multi_pf->beat_area); 609 } 610 611 static int 612 nfp_net_keepalive_start(struct nfp_multi_pf *multi_pf) 613 { 614 if (rte_eal_alarm_set(1000 * 1000, nfp_net_beat_timer, 615 (void *)multi_pf) < 0) { 616 PMD_DRV_LOG(ERR, "Error setting alarm."); 617 return -EIO; 618 } 619 620 return 0; 621 } 622 623 static void 624 nfp_net_keepalive_clear(uint8_t *beat_addr, 625 uint8_t function_id) 626 { 627 nn_writeq(0, beat_addr + NFP_BEAT_OFFSET(function_id)); 628 } 629 630 static void 631 nfp_net_keepalive_clear_others(const struct nfp_dev_info *dev_info, 632 struct nfp_multi_pf *multi_pf) 633 { 634 uint8_t port_num; 635 636 for (port_num = 0; port_num < dev_info->pf_num_per_unit; port_num++) { 637 if (port_num == multi_pf->function_id) 638 continue; 639 640 nfp_net_keepalive_clear(multi_pf->beat_addr, port_num); 641 } 642 } 643 644 static void 645 nfp_net_keepalive_stop(struct nfp_multi_pf *multi_pf) 646 { 647 /* Cancel keepalive for multiple PF setup */ 648 rte_eal_alarm_cancel(nfp_net_beat_timer, (void *)multi_pf); 649 } 650 651 static int 652 nfp_net_uninit(struct rte_eth_dev *eth_dev) 653 { 654 struct nfp_net_hw *net_hw; 655 struct nfp_net_hw_priv *hw_priv; 656 657 net_hw = eth_dev->data->dev_private; 658 hw_priv = eth_dev->process_private; 659 660 if ((net_hw->super.cap_ext & NFP_NET_CFG_CTRL_FLOW_STEER) != 0) 661 nfp_net_flow_priv_uninit(hw_priv->pf_dev, net_hw->idx); 662 663 rte_free(net_hw->eth_xstats_base); 664 if ((net_hw->super.cap & NFP_NET_CFG_CTRL_TXRWB) != 0) 665 nfp_net_txrwb_free(eth_dev); 666 nfp_ipsec_uninit(eth_dev); 667 668 return 0; 669 } 670 671 static void 672 nfp_cleanup_port_app_fw_nic(struct nfp_pf_dev *pf_dev, 673 uint8_t id, 674 struct rte_eth_dev *eth_dev) 675 { 676 struct nfp_app_fw_nic *app_fw_nic; 677 678 app_fw_nic = pf_dev->app_fw_priv; 679 if (app_fw_nic->ports[id] != NULL) { 680 nfp_net_uninit(eth_dev); 681 app_fw_nic->ports[id] = NULL; 682 } 683 } 684 685 static void 686 nfp_uninit_app_fw_nic(struct nfp_pf_dev *pf_dev) 687 { 688 nfp_cpp_area_release_free(pf_dev->ctrl_area); 689 rte_free(pf_dev->app_fw_priv); 690 } 691 692 static void 693 nfp_net_vf_config_uninit(struct nfp_pf_dev *pf_dev) 694 { 695 if (pf_dev->sriov_vf == 0) 696 return; 697 698 nfp_cpp_area_release_free(pf_dev->vf_cfg_tbl_area); 699 nfp_cpp_area_release_free(pf_dev->vf_area); 700 } 701 702 void 703 nfp_pf_uninit(struct nfp_net_hw_priv *hw_priv) 704 { 705 struct nfp_pf_dev *pf_dev = hw_priv->pf_dev; 706 707 if (pf_dev->devargs.cpp_service_enable) 708 nfp_disable_cpp_service(pf_dev); 709 nfp_net_vf_config_uninit(pf_dev); 710 nfp_cpp_area_release_free(pf_dev->mac_stats_area); 711 nfp_cpp_area_release_free(pf_dev->qc_area); 712 free(pf_dev->sym_tbl); 713 if (pf_dev->multi_pf.enabled) { 714 nfp_net_keepalive_stop(&pf_dev->multi_pf); 715 nfp_net_keepalive_clear(pf_dev->multi_pf.beat_addr, pf_dev->multi_pf.function_id); 716 nfp_net_keepalive_uninit(&pf_dev->multi_pf); 717 } 718 free(pf_dev->nfp_eth_table); 719 free(pf_dev->hwinfo); 720 nfp_cpp_free(pf_dev->cpp); 721 nfp_sync_free(pf_dev->sync); 722 rte_free(pf_dev); 723 rte_free(hw_priv); 724 } 725 726 static int 727 nfp_pf_secondary_uninit(struct nfp_net_hw_priv *hw_priv) 728 { 729 struct nfp_pf_dev *pf_dev = hw_priv->pf_dev; 730 731 free(pf_dev->sym_tbl); 732 nfp_cpp_free(pf_dev->cpp); 733 nfp_sync_free(pf_dev->sync); 734 rte_free(pf_dev); 735 rte_free(hw_priv); 736 737 return 0; 738 } 739 740 /* Reset and stop device. The device can not be restarted. */ 741 static int 742 nfp_net_close(struct rte_eth_dev *dev) 743 { 744 uint8_t i; 745 uint8_t id; 746 struct nfp_net_hw *hw; 747 struct nfp_pf_dev *pf_dev; 748 struct rte_pci_device *pci_dev; 749 struct nfp_net_hw_priv *hw_priv; 750 struct nfp_app_fw_nic *app_fw_nic; 751 752 hw_priv = dev->process_private; 753 754 /* 755 * In secondary process, a released eth device can be found by its name 756 * in shared memory. 757 * If the state of the eth device is RTE_ETH_DEV_UNUSED, it means the 758 * eth device has been released. 759 */ 760 if (rte_eal_process_type() == RTE_PROC_SECONDARY) { 761 if (dev->state == RTE_ETH_DEV_UNUSED) 762 return 0; 763 764 nfp_pf_secondary_uninit(hw_priv); 765 return 0; 766 } 767 768 hw = dev->data->dev_private; 769 pf_dev = hw_priv->pf_dev; 770 pci_dev = RTE_ETH_DEV_TO_PCI(dev); 771 app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv); 772 773 /* 774 * We assume that the DPDK application is stopping all the 775 * threads/queues before calling the device close function. 776 */ 777 nfp_net_disable_queues(dev); 778 779 /* Clear queues */ 780 nfp_net_close_tx_queue(dev); 781 nfp_net_close_rx_queue(dev); 782 783 /* Cancel possible impending LSC work here before releasing the port */ 784 rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler, (void *)dev); 785 786 /* Only free PF resources after all physical ports have been closed */ 787 /* Mark this port as unused and free device priv resources */ 788 nn_cfg_writeb(&hw->super, NFP_NET_CFG_LSC, 0xff); 789 790 if (pf_dev->app_fw_id != NFP_APP_FW_CORE_NIC) 791 return -EINVAL; 792 793 nfp_cleanup_port_app_fw_nic(pf_dev, hw->idx, dev); 794 795 for (i = 0; i < pf_dev->total_phyports; i++) { 796 id = nfp_function_id_get(pf_dev, i); 797 798 /* Check to see if ports are still in use */ 799 if (app_fw_nic->ports[id] != NULL) 800 return 0; 801 } 802 803 /* Enable in nfp_net_start() */ 804 rte_intr_disable(pci_dev->intr_handle); 805 806 /* Register in nfp_net_init() */ 807 rte_intr_callback_unregister(pci_dev->intr_handle, 808 nfp_net_dev_interrupt_handler, (void *)dev); 809 810 nfp_uninit_app_fw_nic(pf_dev); 811 nfp_pf_uninit(hw_priv); 812 813 return 0; 814 } 815 816 static int 817 nfp_net_find_vxlan_idx(struct nfp_net_hw *hw, 818 uint16_t port, 819 uint32_t *idx) 820 { 821 uint32_t i; 822 int free_idx = -1; 823 824 for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i++) { 825 if (hw->vxlan_ports[i] == port) { 826 free_idx = i; 827 break; 828 } 829 830 if (hw->vxlan_usecnt[i] == 0) { 831 free_idx = i; 832 break; 833 } 834 } 835 836 if (free_idx == -1) 837 return -EINVAL; 838 839 *idx = free_idx; 840 841 return 0; 842 } 843 844 static int 845 nfp_udp_tunnel_port_add(struct rte_eth_dev *dev, 846 struct rte_eth_udp_tunnel *tunnel_udp) 847 { 848 int ret; 849 uint32_t idx; 850 uint32_t ctrl; 851 struct nfp_hw *hw; 852 uint16_t vxlan_port; 853 struct nfp_net_hw *net_hw; 854 enum rte_eth_tunnel_type tnl_type; 855 856 net_hw = dev->data->dev_private; 857 vxlan_port = tunnel_udp->udp_port; 858 tnl_type = tunnel_udp->prot_type; 859 860 if (tnl_type != RTE_ETH_TUNNEL_TYPE_VXLAN) { 861 PMD_DRV_LOG(ERR, "Not VXLAN tunnel."); 862 return -ENOTSUP; 863 } 864 865 ret = nfp_net_find_vxlan_idx(net_hw, vxlan_port, &idx); 866 if (ret != 0) { 867 PMD_DRV_LOG(ERR, "Failed find valid vxlan idx."); 868 return -EINVAL; 869 } 870 871 if (net_hw->vxlan_usecnt[idx] == 0) { 872 hw = &net_hw->super; 873 ctrl = hw->ctrl | NFP_NET_CFG_CTRL_VXLAN; 874 875 ret = nfp_net_set_vxlan_port(net_hw, idx, vxlan_port, ctrl); 876 if (ret != 0) { 877 PMD_DRV_LOG(ERR, "Failed set vxlan port."); 878 return -EINVAL; 879 } 880 881 hw->ctrl = ctrl; 882 } 883 884 net_hw->vxlan_usecnt[idx]++; 885 886 return 0; 887 } 888 889 static int 890 nfp_udp_tunnel_port_del(struct rte_eth_dev *dev, 891 struct rte_eth_udp_tunnel *tunnel_udp) 892 { 893 int ret; 894 uint32_t idx; 895 uint32_t ctrl; 896 struct nfp_hw *hw; 897 uint16_t vxlan_port; 898 struct nfp_net_hw *net_hw; 899 enum rte_eth_tunnel_type tnl_type; 900 901 net_hw = dev->data->dev_private; 902 vxlan_port = tunnel_udp->udp_port; 903 tnl_type = tunnel_udp->prot_type; 904 905 if (tnl_type != RTE_ETH_TUNNEL_TYPE_VXLAN) { 906 PMD_DRV_LOG(ERR, "Not VXLAN tunnel."); 907 return -ENOTSUP; 908 } 909 910 ret = nfp_net_find_vxlan_idx(net_hw, vxlan_port, &idx); 911 if (ret != 0 || net_hw->vxlan_usecnt[idx] == 0) { 912 PMD_DRV_LOG(ERR, "Failed find valid vxlan idx."); 913 return -EINVAL; 914 } 915 916 net_hw->vxlan_usecnt[idx]--; 917 918 if (net_hw->vxlan_usecnt[idx] == 0) { 919 hw = &net_hw->super; 920 ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_VXLAN; 921 922 ret = nfp_net_set_vxlan_port(net_hw, idx, 0, ctrl); 923 if (ret != 0) { 924 PMD_DRV_LOG(ERR, "Failed set vxlan port."); 925 return -EINVAL; 926 } 927 928 hw->ctrl = ctrl; 929 } 930 931 return 0; 932 } 933 934 /* Initialise and register driver with DPDK Application */ 935 static const struct eth_dev_ops nfp_net_eth_dev_ops = { 936 .dev_configure = nfp_net_configure, 937 .dev_start = nfp_net_start, 938 .dev_stop = nfp_net_stop, 939 .dev_set_link_up = nfp_net_set_link_up, 940 .dev_set_link_down = nfp_net_set_link_down, 941 .dev_close = nfp_net_close, 942 .promiscuous_enable = nfp_net_promisc_enable, 943 .promiscuous_disable = nfp_net_promisc_disable, 944 .allmulticast_enable = nfp_net_allmulticast_enable, 945 .allmulticast_disable = nfp_net_allmulticast_disable, 946 .link_update = nfp_net_link_update, 947 .stats_get = nfp_net_stats_get, 948 .stats_reset = nfp_net_stats_reset, 949 .xstats_get = nfp_net_xstats_get, 950 .xstats_reset = nfp_net_xstats_reset, 951 .xstats_get_names = nfp_net_xstats_get_names, 952 .xstats_get_by_id = nfp_net_xstats_get_by_id, 953 .xstats_get_names_by_id = nfp_net_xstats_get_names_by_id, 954 .dev_infos_get = nfp_net_infos_get, 955 .dev_supported_ptypes_get = nfp_net_supported_ptypes_get, 956 .dev_ptypes_set = nfp_net_ptypes_set, 957 .mtu_set = nfp_net_dev_mtu_set, 958 .mac_addr_set = nfp_net_set_mac_addr, 959 .vlan_offload_set = nfp_net_vlan_offload_set, 960 .reta_update = nfp_net_reta_update, 961 .reta_query = nfp_net_reta_query, 962 .rss_hash_update = nfp_net_rss_hash_update, 963 .rss_hash_conf_get = nfp_net_rss_hash_conf_get, 964 .rx_queue_setup = nfp_net_rx_queue_setup, 965 .rx_queue_release = nfp_net_rx_queue_release, 966 .rxq_info_get = nfp_net_rx_queue_info_get, 967 .tx_queue_setup = nfp_net_tx_queue_setup, 968 .tx_queue_release = nfp_net_tx_queue_release, 969 .txq_info_get = nfp_net_tx_queue_info_get, 970 .rx_queue_intr_enable = nfp_rx_queue_intr_enable, 971 .rx_queue_intr_disable = nfp_rx_queue_intr_disable, 972 .udp_tunnel_port_add = nfp_udp_tunnel_port_add, 973 .udp_tunnel_port_del = nfp_udp_tunnel_port_del, 974 .fw_version_get = nfp_net_firmware_version_get, 975 .flow_ctrl_get = nfp_net_flow_ctrl_get, 976 .flow_ctrl_set = nfp_net_flow_ctrl_set, 977 .flow_ops_get = nfp_net_flow_ops_get, 978 .fec_get_capability = nfp_net_fec_get_capability, 979 .fec_get = nfp_net_fec_get, 980 .fec_set = nfp_net_fec_set, 981 }; 982 983 static inline void 984 nfp_net_ethdev_ops_mount(struct nfp_pf_dev *pf_dev, 985 struct rte_eth_dev *eth_dev) 986 { 987 if (pf_dev->ver.extend == NFP_NET_CFG_VERSION_DP_NFD3) 988 eth_dev->tx_pkt_burst = nfp_net_nfd3_xmit_pkts; 989 else 990 nfp_net_nfdk_xmit_pkts_set(eth_dev); 991 992 eth_dev->dev_ops = &nfp_net_eth_dev_ops; 993 eth_dev->rx_queue_count = nfp_net_rx_queue_count; 994 nfp_net_recv_pkts_set(eth_dev); 995 } 996 997 static int 998 nfp_net_init(struct rte_eth_dev *eth_dev, 999 void *para) 1000 { 1001 int err; 1002 uint16_t port; 1003 uint64_t rx_base; 1004 uint64_t tx_base; 1005 struct nfp_hw *hw; 1006 struct nfp_net_hw *net_hw; 1007 struct nfp_pf_dev *pf_dev; 1008 struct nfp_net_init *hw_init; 1009 struct rte_pci_device *pci_dev; 1010 struct nfp_net_hw_priv *hw_priv; 1011 struct nfp_app_fw_nic *app_fw_nic; 1012 1013 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1014 net_hw = eth_dev->data->dev_private; 1015 1016 hw_init = para; 1017 net_hw->idx = hw_init->idx; 1018 net_hw->nfp_idx = hw_init->nfp_idx; 1019 eth_dev->process_private = hw_init->hw_priv; 1020 1021 /* Use backpointer here to the PF of this eth_dev */ 1022 hw_priv = eth_dev->process_private; 1023 pf_dev = hw_priv->pf_dev; 1024 1025 /* Use backpointer to the CoreNIC app struct */ 1026 app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv); 1027 1028 /* Add this device to the PF's array of physical ports */ 1029 app_fw_nic->ports[net_hw->idx] = net_hw; 1030 1031 port = net_hw->idx; 1032 if (port > 7) { 1033 PMD_DRV_LOG(ERR, "Port value is wrong."); 1034 return -ENODEV; 1035 } 1036 1037 hw = &net_hw->super; 1038 1039 PMD_INIT_LOG(DEBUG, "Working with physical port number: %hu, " 1040 "NFP internal port number: %d.", port, net_hw->nfp_idx); 1041 1042 rte_eth_copy_pci_info(eth_dev, pci_dev); 1043 1044 if (pf_dev->multi_pf.enabled) 1045 hw->ctrl_bar = pf_dev->ctrl_bar; 1046 else 1047 hw->ctrl_bar = pf_dev->ctrl_bar + (port * pf_dev->ctrl_bar_size); 1048 1049 net_hw->mac_stats = pf_dev->mac_stats_bar + 1050 (net_hw->nfp_idx * NFP_MAC_STATS_SIZE); 1051 1052 PMD_INIT_LOG(DEBUG, "Ctrl bar: %p.", hw->ctrl_bar); 1053 PMD_INIT_LOG(DEBUG, "MAC stats: %p.", net_hw->mac_stats); 1054 1055 err = nfp_net_common_init(pf_dev, net_hw); 1056 if (err != 0) 1057 return err; 1058 1059 err = nfp_net_tlv_caps_parse(eth_dev); 1060 if (err != 0) { 1061 PMD_INIT_LOG(ERR, "Failed to parser TLV caps."); 1062 return err; 1063 } 1064 1065 err = nfp_ipsec_init(eth_dev); 1066 if (err != 0) { 1067 PMD_INIT_LOG(ERR, "Failed to init IPsec module."); 1068 return err; 1069 } 1070 1071 nfp_net_ethdev_ops_mount(pf_dev, eth_dev); 1072 1073 net_hw->eth_xstats_base = rte_malloc("rte_eth_xstat", sizeof(struct rte_eth_xstat) * 1074 nfp_net_xstats_size(eth_dev), 0); 1075 if (net_hw->eth_xstats_base == NULL) { 1076 PMD_INIT_LOG(ERR, "No memory for xstats base values on device %s!", 1077 pci_dev->device.name); 1078 err = -ENOMEM; 1079 goto ipsec_exit; 1080 } 1081 1082 /* Work out where in the BAR the queues start. */ 1083 tx_base = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ); 1084 rx_base = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ); 1085 1086 net_hw->tx_bar = pf_dev->qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ; 1087 net_hw->rx_bar = pf_dev->qc_bar + rx_base * NFP_QCP_QUEUE_ADDR_SZ; 1088 1089 PMD_INIT_LOG(DEBUG, "The ctrl_bar: %p, tx_bar: %p, rx_bar: %p.", 1090 hw->ctrl_bar, net_hw->tx_bar, net_hw->rx_bar); 1091 1092 nfp_net_cfg_queue_setup(net_hw); 1093 net_hw->mtu = RTE_ETHER_MTU; 1094 1095 /* VLAN insertion is incompatible with LSOv2 */ 1096 if ((hw->cap & NFP_NET_CFG_CTRL_LSO2) != 0) 1097 hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN; 1098 1099 nfp_net_log_device_information(net_hw, pf_dev); 1100 1101 /* Initializing spinlock for reconfigs */ 1102 rte_spinlock_init(&hw->reconfig_lock); 1103 1104 if ((port == 0 || pf_dev->multi_pf.enabled)) { 1105 err = nfp_net_vf_config_app_init(net_hw, pf_dev); 1106 if (err != 0) { 1107 PMD_INIT_LOG(ERR, "Failed to init sriov module."); 1108 goto xstats_free; 1109 } 1110 } 1111 1112 /* Allocating memory for mac addr */ 1113 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", RTE_ETHER_ADDR_LEN, 0); 1114 if (eth_dev->data->mac_addrs == NULL) { 1115 PMD_INIT_LOG(ERR, "Failed to space for MAC address."); 1116 err = -ENOMEM; 1117 goto xstats_free; 1118 } 1119 1120 if ((hw->cap & NFP_NET_CFG_CTRL_TXRWB) != 0) { 1121 err = nfp_net_txrwb_alloc(eth_dev); 1122 if (err != 0) 1123 goto xstats_free; 1124 } 1125 1126 nfp_net_pf_read_mac(app_fw_nic, port, hw_priv); 1127 nfp_write_mac(hw, &hw->mac_addr.addr_bytes[0]); 1128 1129 if (rte_is_valid_assigned_ether_addr(&hw->mac_addr) == 0) { 1130 PMD_INIT_LOG(INFO, "Using random mac address for port %d.", port); 1131 /* Using random mac addresses for VFs */ 1132 rte_eth_random_addr(&hw->mac_addr.addr_bytes[0]); 1133 nfp_write_mac(hw, &hw->mac_addr.addr_bytes[0]); 1134 } 1135 1136 /* Copying mac address to DPDK eth_dev struct */ 1137 rte_ether_addr_copy(&hw->mac_addr, eth_dev->data->mac_addrs); 1138 1139 if ((hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR) == 0) 1140 eth_dev->data->dev_flags |= RTE_ETH_DEV_NOLIVE_MAC_ADDR; 1141 1142 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 1143 1144 PMD_INIT_LOG(INFO, "Port %d VendorID=%#x DeviceID=%#x " 1145 "mac=" RTE_ETHER_ADDR_PRT_FMT, 1146 eth_dev->data->port_id, pci_dev->id.vendor_id, 1147 pci_dev->id.device_id, 1148 RTE_ETHER_ADDR_BYTES(&hw->mac_addr)); 1149 1150 /* Registering LSC interrupt handler */ 1151 rte_intr_callback_register(pci_dev->intr_handle, 1152 nfp_net_dev_interrupt_handler, (void *)eth_dev); 1153 /* Telling the firmware about the LSC interrupt entry */ 1154 nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX); 1155 /* Unmask the LSC interrupt */ 1156 nfp_net_irq_unmask(eth_dev); 1157 /* Recording current stats counters values */ 1158 nfp_net_stats_reset(eth_dev); 1159 1160 if ((hw->cap_ext & NFP_NET_CFG_CTRL_FLOW_STEER) != 0) { 1161 err = nfp_net_flow_priv_init(pf_dev, port); 1162 if (err != 0) { 1163 PMD_INIT_LOG(ERR, "Init net flow priv failed."); 1164 goto txrwb_free; 1165 } 1166 } 1167 1168 return 0; 1169 1170 txrwb_free: 1171 if ((hw->cap & NFP_NET_CFG_CTRL_TXRWB) != 0) 1172 nfp_net_txrwb_free(eth_dev); 1173 xstats_free: 1174 rte_free(net_hw->eth_xstats_base); 1175 ipsec_exit: 1176 nfp_ipsec_uninit(eth_dev); 1177 1178 return err; 1179 } 1180 1181 static int 1182 nfp_net_device_activate(struct nfp_pf_dev *pf_dev) 1183 { 1184 int ret; 1185 struct nfp_nsp *nsp; 1186 struct nfp_multi_pf *multi_pf; 1187 1188 multi_pf = &pf_dev->multi_pf; 1189 if (multi_pf->enabled && multi_pf->function_id != 0) { 1190 nsp = nfp_nsp_open(pf_dev->cpp); 1191 if (nsp == NULL) { 1192 PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle."); 1193 return -EIO; 1194 } 1195 1196 ret = nfp_nsp_device_activate(nsp); 1197 nfp_nsp_close(nsp); 1198 if (ret != 0 && ret != -EOPNOTSUPP) 1199 return ret; 1200 } 1201 1202 return 0; 1203 } 1204 1205 #define DEFAULT_FW_PATH "/lib/firmware/netronome" 1206 1207 static int 1208 nfp_fw_get_name(struct nfp_pf_dev *pf_dev, 1209 char *fw_name, 1210 size_t fw_size) 1211 { 1212 char serial[40]; 1213 uint16_t interface; 1214 char card_desc[100]; 1215 uint32_t cpp_serial_len; 1216 const char *nfp_fw_model; 1217 const uint8_t *cpp_serial; 1218 1219 cpp_serial_len = nfp_cpp_serial(pf_dev->cpp, &cpp_serial); 1220 if (cpp_serial_len != NFP_SERIAL_LEN) 1221 return -ERANGE; 1222 1223 interface = nfp_cpp_interface(pf_dev->cpp); 1224 1225 /* Looking for firmware file in order of priority */ 1226 1227 /* First try to find a firmware image specific for this device */ 1228 snprintf(serial, sizeof(serial), 1229 "serial-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x", 1230 cpp_serial[0], cpp_serial[1], cpp_serial[2], cpp_serial[3], 1231 cpp_serial[4], cpp_serial[5], interface >> 8, interface & 0xff); 1232 snprintf(fw_name, fw_size, "%s/%s.nffw", DEFAULT_FW_PATH, serial); 1233 1234 PMD_DRV_LOG(DEBUG, "Trying with fw file: %s.", fw_name); 1235 if (access(fw_name, F_OK) == 0) 1236 return 0; 1237 1238 /* Then try the PCI name */ 1239 snprintf(fw_name, fw_size, "%s/pci-%s.nffw", DEFAULT_FW_PATH, 1240 pf_dev->pci_dev->name); 1241 1242 PMD_DRV_LOG(DEBUG, "Trying with fw file: %s.", fw_name); 1243 if (access(fw_name, F_OK) == 0) 1244 return 0; 1245 1246 nfp_fw_model = nfp_hwinfo_lookup(pf_dev->hwinfo, "nffw.partno"); 1247 if (nfp_fw_model == NULL) { 1248 nfp_fw_model = nfp_hwinfo_lookup(pf_dev->hwinfo, "assembly.partno"); 1249 if (nfp_fw_model == NULL) { 1250 PMD_DRV_LOG(ERR, "Firmware model NOT found."); 1251 return -EIO; 1252 } 1253 } 1254 1255 /* And then try the model name */ 1256 snprintf(card_desc, sizeof(card_desc), "%s.nffw", nfp_fw_model); 1257 snprintf(fw_name, fw_size, "%s/%s", DEFAULT_FW_PATH, card_desc); 1258 PMD_DRV_LOG(DEBUG, "Trying with fw file: %s.", fw_name); 1259 if (access(fw_name, F_OK) == 0) 1260 return 0; 1261 1262 /* Finally try the card type and media */ 1263 snprintf(card_desc, sizeof(card_desc), "nic_%s_%dx%d.nffw", 1264 nfp_fw_model, pf_dev->nfp_eth_table->count, 1265 pf_dev->nfp_eth_table->ports[0].speed / 1000); 1266 snprintf(fw_name, fw_size, "%s/%s", DEFAULT_FW_PATH, card_desc); 1267 PMD_DRV_LOG(DEBUG, "Trying with fw file: %s.", fw_name); 1268 if (access(fw_name, F_OK) == 0) 1269 return 0; 1270 1271 return -ENOENT; 1272 } 1273 1274 static int 1275 nfp_fw_upload(struct nfp_nsp *nsp, 1276 char *fw_name) 1277 { 1278 int err; 1279 void *fw_buf; 1280 size_t fsize; 1281 1282 err = rte_firmware_read(fw_name, &fw_buf, &fsize); 1283 if (err != 0) { 1284 PMD_DRV_LOG(ERR, "Firmware %s not found!", fw_name); 1285 return -ENOENT; 1286 } 1287 1288 PMD_DRV_LOG(INFO, "Firmware file found at %s with size: %zu.", 1289 fw_name, fsize); 1290 PMD_DRV_LOG(INFO, "Uploading the firmware ..."); 1291 if (nfp_nsp_load_fw(nsp, fw_buf, fsize) < 0) { 1292 free(fw_buf); 1293 PMD_DRV_LOG(ERR, "Firmware load failed."); 1294 return -EIO; 1295 } 1296 1297 PMD_DRV_LOG(INFO, "Done."); 1298 1299 free(fw_buf); 1300 1301 return 0; 1302 } 1303 1304 static void 1305 nfp_fw_unload(struct nfp_cpp *cpp) 1306 { 1307 int err; 1308 struct nfp_nsp *nsp; 1309 1310 nsp = nfp_nsp_open(cpp); 1311 if (nsp == NULL) 1312 return; 1313 1314 err = nfp_nsp_device_soft_reset(nsp); 1315 if (err != 0) 1316 PMD_DRV_LOG(WARNING, "Failed to do soft reset when nfp fw unload."); 1317 1318 nfp_nsp_close(nsp); 1319 } 1320 1321 static int 1322 nfp_fw_check_change(struct nfp_cpp *cpp, 1323 char *fw_name, 1324 bool *fw_changed) 1325 { 1326 int ret; 1327 uint32_t new_version = 0; 1328 uint32_t old_version = 0; 1329 1330 ret = nfp_elf_get_fw_version(&new_version, fw_name); 1331 if (ret != 0) 1332 return ret; 1333 1334 nfp_net_get_fw_version(cpp, &old_version); 1335 1336 if (new_version != old_version) { 1337 PMD_DRV_LOG(INFO, "FW version is changed, new %u, old %u.", 1338 new_version, old_version); 1339 *fw_changed = true; 1340 } else { 1341 PMD_DRV_LOG(INFO, "FW version is not changed and is %u.", new_version); 1342 *fw_changed = false; 1343 } 1344 1345 return 0; 1346 } 1347 1348 static void 1349 nfp_pcie_reg32_write_clear(struct rte_pci_device *pci_dev, 1350 int position) 1351 { 1352 int ret; 1353 uint32_t capability; 1354 1355 ret = rte_pci_read_config(pci_dev, &capability, 4, position); 1356 if (ret < 0) 1357 capability = 0xffffffff; 1358 1359 (void)rte_pci_write_config(pci_dev, &capability, 4, position); 1360 } 1361 1362 static void 1363 nfp_pcie_aer_clear(struct rte_pci_device *pci_dev) 1364 { 1365 int pos; 1366 1367 pos = rte_pci_find_ext_capability(pci_dev, RTE_PCI_EXT_CAP_ID_ERR); 1368 if (pos <= 0) 1369 return; 1370 1371 nfp_pcie_reg32_write_clear(pci_dev, pos + RTE_PCI_ERR_UNCOR_STATUS); 1372 nfp_pcie_reg32_write_clear(pci_dev, pos + RTE_PCI_ERR_COR_STATUS); 1373 } 1374 1375 static int 1376 nfp_fw_reload(struct nfp_nsp *nsp, 1377 char *fw_name, 1378 struct rte_pci_device *pci_dev, 1379 int reset) 1380 { 1381 int err; 1382 bool reset_flag; 1383 1384 reset_flag = (reset == NFP_NSP_DRV_RESET_ALWAYS) || 1385 (reset == NFP_NSP_DRV_RESET_DISK); 1386 1387 if (reset_flag) { 1388 err = nfp_nsp_device_soft_reset(nsp); 1389 if (err != 0) { 1390 PMD_DRV_LOG(ERR, "NFP firmware soft reset failed."); 1391 return err; 1392 } 1393 } 1394 1395 /* 1396 * Accessing device memory during soft reset may result in some 1397 * errors being recorded in PCIE's AER register, which is normal. 1398 * Therefore, after the soft reset is completed, these errors 1399 * should be cleared. 1400 */ 1401 nfp_pcie_aer_clear(pci_dev); 1402 1403 err = nfp_fw_upload(nsp, fw_name); 1404 if (err != 0) { 1405 PMD_DRV_LOG(ERR, "NFP firmware load failed."); 1406 return err; 1407 } 1408 1409 return 0; 1410 } 1411 1412 static bool 1413 nfp_fw_skip_load(const struct nfp_dev_info *dev_info, 1414 struct nfp_multi_pf *multi_pf, 1415 bool *reload_fw) 1416 { 1417 uint8_t i; 1418 uint64_t tmp_beat; 1419 uint32_t port_num; 1420 uint8_t in_use = 0; 1421 uint64_t beat[dev_info->pf_num_per_unit]; 1422 uint32_t offset[dev_info->pf_num_per_unit]; 1423 uint8_t abnormal = dev_info->pf_num_per_unit; 1424 1425 sleep(1); 1426 for (port_num = 0; port_num < dev_info->pf_num_per_unit; port_num++) { 1427 if (port_num == multi_pf->function_id) { 1428 abnormal--; 1429 continue; 1430 } 1431 1432 offset[port_num] = NFP_BEAT_OFFSET(port_num); 1433 beat[port_num] = nn_readq(multi_pf->beat_addr + offset[port_num]); 1434 if (beat[port_num] == 0) 1435 abnormal--; 1436 } 1437 1438 if (abnormal == 0) 1439 return true; 1440 1441 for (i = 0; i < 3; i++) { 1442 sleep(1); 1443 for (port_num = 0; port_num < dev_info->pf_num_per_unit; port_num++) { 1444 if (port_num == multi_pf->function_id) 1445 continue; 1446 1447 if (beat[port_num] == 0) 1448 continue; 1449 1450 tmp_beat = nn_readq(multi_pf->beat_addr + offset[port_num]); 1451 if (tmp_beat != beat[port_num]) { 1452 in_use++; 1453 abnormal--; 1454 beat[port_num] = 0; 1455 if (*reload_fw) { 1456 *reload_fw = false; 1457 PMD_DRV_LOG(ERR, "The param %s does not work.", 1458 NFP_PF_FORCE_RELOAD_FW); 1459 } 1460 } 1461 } 1462 1463 if (abnormal == 0) 1464 return true; 1465 } 1466 1467 if (in_use != 0) { 1468 PMD_DRV_LOG(WARNING, "Abnormal %u != 0, the nic has port which is exit abnormally.", 1469 abnormal); 1470 return true; 1471 } 1472 1473 return false; 1474 } 1475 1476 static int 1477 nfp_fw_reload_from_flash(struct nfp_nsp *nsp) 1478 { 1479 int ret; 1480 1481 ret = nfp_nsp_load_stored_fw(nsp); 1482 if (ret != 0) { 1483 PMD_DRV_LOG(ERR, "Load firmware from flash failed."); 1484 return -EACCES; 1485 } 1486 1487 return 0; 1488 } 1489 1490 static int 1491 nfp_fw_reload_for_single_pf_from_disk(struct nfp_nsp *nsp, 1492 char *fw_name, 1493 struct nfp_pf_dev *pf_dev, 1494 int reset) 1495 { 1496 int ret; 1497 bool fw_changed = true; 1498 1499 if (nfp_nsp_has_fw_loaded(nsp) && nfp_nsp_fw_loaded(nsp) && 1500 !pf_dev->devargs.force_reload_fw) { 1501 ret = nfp_fw_check_change(pf_dev->cpp, fw_name, &fw_changed); 1502 if (ret != 0) 1503 return ret; 1504 } 1505 1506 if (!fw_changed) 1507 return 0; 1508 1509 ret = nfp_fw_reload(nsp, fw_name, pf_dev->pci_dev, reset); 1510 if (ret != 0) 1511 return ret; 1512 1513 return 0; 1514 } 1515 1516 static int 1517 nfp_fw_reload_for_single_pf(struct nfp_nsp *nsp, 1518 char *fw_name, 1519 struct nfp_pf_dev *pf_dev, 1520 int reset, 1521 int policy) 1522 { 1523 int ret; 1524 1525 if (policy == NFP_NSP_APP_FW_LOAD_FLASH && nfp_nsp_has_stored_fw_load(nsp)) { 1526 ret = nfp_fw_reload_from_flash(nsp); 1527 if (ret != 0) { 1528 PMD_DRV_LOG(ERR, "Load single PF firmware from flash failed."); 1529 return ret; 1530 } 1531 } else if (fw_name[0] != 0) { 1532 ret = nfp_fw_reload_for_single_pf_from_disk(nsp, fw_name, pf_dev, reset); 1533 if (ret != 0) { 1534 PMD_DRV_LOG(ERR, "Load single PF firmware from disk failed."); 1535 return ret; 1536 } 1537 } else { 1538 PMD_DRV_LOG(ERR, "Not load firmware, please update flash or recofigure card."); 1539 return -ENODATA; 1540 } 1541 1542 return 0; 1543 } 1544 1545 static int 1546 nfp_fw_reload_for_multi_pf_from_disk(struct nfp_nsp *nsp, 1547 char *fw_name, 1548 const struct nfp_dev_info *dev_info, 1549 struct nfp_pf_dev *pf_dev, 1550 int reset) 1551 { 1552 int err; 1553 bool fw_changed = true; 1554 bool skip_load_fw = false; 1555 bool reload_fw = pf_dev->devargs.force_reload_fw; 1556 1557 if (nfp_nsp_has_fw_loaded(nsp) && nfp_nsp_fw_loaded(nsp) && !reload_fw) { 1558 err = nfp_fw_check_change(pf_dev->cpp, fw_name, &fw_changed); 1559 if (err != 0) 1560 return err; 1561 } 1562 1563 if (!fw_changed || reload_fw) 1564 skip_load_fw = nfp_fw_skip_load(dev_info, &pf_dev->multi_pf, &reload_fw); 1565 1566 if (skip_load_fw && !reload_fw) 1567 return 0; 1568 1569 err = nfp_fw_reload(nsp, fw_name, pf_dev->pci_dev, reset); 1570 if (err != 0) 1571 return err; 1572 1573 return 0; 1574 } 1575 1576 static int 1577 nfp_fw_reload_for_multi_pf(struct nfp_nsp *nsp, 1578 char *fw_name, 1579 const struct nfp_dev_info *dev_info, 1580 struct nfp_pf_dev *pf_dev, 1581 int reset, 1582 int policy) 1583 { 1584 int err; 1585 struct nfp_multi_pf *multi_pf; 1586 1587 multi_pf = &pf_dev->multi_pf; 1588 1589 err = nfp_net_keepalive_init(pf_dev->cpp, multi_pf); 1590 if (err != 0) { 1591 PMD_DRV_LOG(ERR, "NFP init beat failed."); 1592 return err; 1593 } 1594 1595 err = nfp_net_keepalive_start(multi_pf); 1596 if (err != 0) { 1597 PMD_DRV_LOG(ERR, "NFP write beat failed."); 1598 goto keepalive_uninit; 1599 } 1600 1601 if (policy == NFP_NSP_APP_FW_LOAD_FLASH && nfp_nsp_has_stored_fw_load(nsp)) { 1602 err = nfp_fw_reload_from_flash(nsp); 1603 if (err != 0) { 1604 PMD_DRV_LOG(ERR, "Load multi PF firmware from flash failed."); 1605 goto keepalive_stop; 1606 } 1607 } else if (fw_name[0] != 0) { 1608 err = nfp_fw_reload_for_multi_pf_from_disk(nsp, fw_name, dev_info, 1609 pf_dev, reset); 1610 if (err != 0) { 1611 PMD_DRV_LOG(ERR, "Load multi PF firmware from disk failed."); 1612 goto keepalive_stop; 1613 } 1614 } else { 1615 PMD_DRV_LOG(ERR, "Not load firmware, please update flash or recofigure card."); 1616 err = -ENODATA; 1617 goto keepalive_stop; 1618 } 1619 1620 nfp_net_keepalive_clear_others(dev_info, multi_pf); 1621 1622 return 0; 1623 1624 keepalive_stop: 1625 nfp_net_keepalive_stop(multi_pf); 1626 keepalive_uninit: 1627 nfp_net_keepalive_uninit(multi_pf); 1628 1629 return err; 1630 } 1631 1632 static int 1633 nfp_strtol(const char *buf, 1634 int base, 1635 long *value) 1636 { 1637 long val; 1638 char *tmp; 1639 1640 if (value == NULL) 1641 return -EINVAL; 1642 1643 val = strtol(buf, &tmp, base); 1644 if (tmp == NULL || *tmp != 0) 1645 return -EINVAL; 1646 1647 *value = val; 1648 1649 return 0; 1650 } 1651 1652 static int 1653 nfp_fw_policy_value_get(struct nfp_nsp *nsp, 1654 const char *key, 1655 const char *default_val, 1656 int max_val, 1657 int *value) 1658 { 1659 int ret; 1660 int64_t val; 1661 char buf[64]; 1662 1663 snprintf(buf, sizeof(buf), "%s", key); 1664 ret = nfp_nsp_hwinfo_lookup_optional(nsp, buf, sizeof(buf), default_val); 1665 if (ret != 0) 1666 return ret; 1667 1668 ret = nfp_strtol(buf, 0, &val); 1669 if (ret != 0 || val < 0 || val > max_val) { 1670 PMD_DRV_LOG(WARNING, "Invalid value '%s' from '%s', ignoring.", 1671 buf, key); 1672 /* Fall back to the default value */ 1673 ret = nfp_strtol(default_val, 0, &val); 1674 if (ret != 0) 1675 return ret; 1676 } 1677 1678 *value = val; 1679 1680 return 0; 1681 } 1682 1683 static int 1684 nfp_fw_setup(struct nfp_pf_dev *pf_dev, 1685 const struct nfp_dev_info *dev_info) 1686 { 1687 int err; 1688 int reset; 1689 int policy; 1690 char fw_name[125]; 1691 struct nfp_nsp *nsp; 1692 1693 nsp = nfp_nsp_open(pf_dev->cpp); 1694 if (nsp == NULL) { 1695 PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle."); 1696 return -EIO; 1697 } 1698 1699 err = nfp_fw_policy_value_get(nsp, "abi_drv_reset", 1700 NFP_NSP_DRV_RESET_DEFAULT, NFP_NSP_DRV_RESET_NEVER, 1701 &reset); 1702 if (err != 0) { 1703 PMD_DRV_LOG(ERR, "Get 'abi_drv_reset' from HWinfo failed."); 1704 goto close_nsp; 1705 } 1706 1707 err = nfp_fw_policy_value_get(nsp, "app_fw_from_flash", 1708 NFP_NSP_APP_FW_LOAD_DEFAULT, NFP_NSP_APP_FW_LOAD_PREF, 1709 &policy); 1710 if (err != 0) { 1711 PMD_DRV_LOG(ERR, "Get 'app_fw_from_flash' from HWinfo failed."); 1712 goto close_nsp; 1713 } 1714 1715 fw_name[0] = 0; 1716 if (policy != NFP_NSP_APP_FW_LOAD_FLASH) { 1717 err = nfp_fw_get_name(pf_dev, fw_name, sizeof(fw_name)); 1718 if (err != 0) { 1719 PMD_DRV_LOG(ERR, "Can not find suitable firmware."); 1720 goto close_nsp; 1721 } 1722 } 1723 1724 if (pf_dev->multi_pf.enabled) 1725 err = nfp_fw_reload_for_multi_pf(nsp, fw_name, dev_info, 1726 pf_dev, reset, policy); 1727 else 1728 err = nfp_fw_reload_for_single_pf(nsp, fw_name, pf_dev, 1729 reset, policy); 1730 1731 close_nsp: 1732 nfp_nsp_close(nsp); 1733 return err; 1734 } 1735 1736 static inline bool 1737 nfp_check_multi_pf_from_fw(uint32_t total_vnics) 1738 { 1739 if (total_vnics == 1) 1740 return true; 1741 1742 return false; 1743 } 1744 1745 static inline bool 1746 nfp_check_multi_pf_from_nsp(struct rte_pci_device *pci_dev, 1747 struct nfp_cpp *cpp) 1748 { 1749 bool flag; 1750 struct nfp_nsp *nsp; 1751 1752 nsp = nfp_nsp_open(cpp); 1753 if (nsp == NULL) { 1754 PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle."); 1755 return false; 1756 } 1757 1758 flag = (nfp_nsp_get_abi_ver_major(nsp) > 0) && 1759 (pci_dev->id.device_id == PCI_DEVICE_ID_NFP3800_PF_NIC); 1760 1761 nfp_nsp_close(nsp); 1762 return flag; 1763 } 1764 1765 static int 1766 nfp_enable_multi_pf(struct nfp_pf_dev *pf_dev) 1767 { 1768 int err = 0; 1769 uint64_t tx_base; 1770 uint8_t *ctrl_bar; 1771 struct nfp_hw *hw; 1772 uint32_t cap_extend; 1773 struct nfp_net_hw net_hw; 1774 struct nfp_cpp_area *area; 1775 char name[RTE_ETH_NAME_MAX_LEN]; 1776 1777 memset(&net_hw, 0, sizeof(struct nfp_net_hw)); 1778 1779 /* Map the symbol table */ 1780 pf_dev->ctrl_bar_size = NFP_NET_CFG_BAR_SZ_MIN; 1781 snprintf(name, sizeof(name), "_pf%u_net_bar0", 1782 pf_dev->multi_pf.function_id); 1783 ctrl_bar = nfp_rtsym_map(pf_dev->sym_tbl, name, pf_dev->ctrl_bar_size, 1784 &area); 1785 if (ctrl_bar == NULL) { 1786 PMD_INIT_LOG(ERR, "Failed to find data vNIC memory symbol."); 1787 return -ENODEV; 1788 } 1789 1790 hw = &net_hw.super; 1791 hw->ctrl_bar = ctrl_bar; 1792 1793 /* Check the version from firmware */ 1794 if (!nfp_net_version_check(hw, pf_dev)) { 1795 PMD_INIT_LOG(ERR, "Not the valid version."); 1796 err = -EINVAL; 1797 goto end; 1798 } 1799 1800 /* Set the ctrl bar size */ 1801 nfp_net_ctrl_bar_size_set(pf_dev); 1802 1803 if (!pf_dev->multi_pf.enabled) 1804 goto end; 1805 1806 cap_extend = nn_cfg_readl(hw, NFP_NET_CFG_CAP_WORD1); 1807 if ((cap_extend & NFP_NET_CFG_CTRL_MULTI_PF) == 0) { 1808 PMD_INIT_LOG(ERR, "Loaded firmware does not support multiple PF."); 1809 err = -EINVAL; 1810 goto end; 1811 } 1812 1813 tx_base = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ); 1814 net_hw.tx_bar = pf_dev->qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ; 1815 nfp_net_cfg_queue_setup(&net_hw); 1816 rte_spinlock_init(&hw->reconfig_lock); 1817 err = nfp_ext_reconfig(&net_hw.super, NFP_NET_CFG_CTRL_MULTI_PF, 1818 NFP_NET_CFG_UPDATE_GEN); 1819 if (err != 0) { 1820 PMD_INIT_LOG(ERR, "Configure multiple PF failed."); 1821 goto end; 1822 } 1823 1824 end: 1825 nfp_cpp_area_release_free(area); 1826 return err; 1827 } 1828 1829 static bool 1830 nfp_app_fw_nic_total_phyports_check(struct nfp_pf_dev *pf_dev) 1831 { 1832 uint8_t total_phyports; 1833 1834 total_phyports = nfp_net_get_phyports_from_fw(pf_dev); 1835 1836 if (pf_dev->multi_pf.enabled) { 1837 if (!nfp_check_multi_pf_from_fw(total_phyports)) { 1838 PMD_INIT_LOG(ERR, "NSP report multipf, but FW report not multipf."); 1839 return false; 1840 } 1841 } else { 1842 /* 1843 * For single PF the number of vNICs exposed should be the same as the 1844 * number of physical ports. 1845 */ 1846 if (total_phyports != pf_dev->nfp_eth_table->count) { 1847 PMD_INIT_LOG(ERR, "Total physical ports do not match number of vNICs."); 1848 return false; 1849 } 1850 } 1851 1852 return true; 1853 } 1854 1855 static void 1856 nfp_port_name_generate(char *port_name, 1857 size_t length, 1858 int port_id, 1859 struct nfp_pf_dev *pf_dev) 1860 { 1861 const char *name = pf_dev->pci_dev->device.name; 1862 1863 if (pf_dev->multi_pf.enabled) 1864 snprintf(port_name, length, "%s", name); 1865 else 1866 snprintf(port_name, length, "%s_port%u", name, port_id); 1867 } 1868 1869 static int 1870 nfp_init_app_fw_nic(struct nfp_net_hw_priv *hw_priv) 1871 { 1872 uint8_t i; 1873 uint8_t id; 1874 int ret = 0; 1875 struct nfp_app_fw_nic *app_fw_nic; 1876 struct nfp_eth_table *nfp_eth_table; 1877 char bar_name[RTE_ETH_NAME_MAX_LEN]; 1878 char port_name[RTE_ETH_NAME_MAX_LEN]; 1879 struct nfp_pf_dev *pf_dev = hw_priv->pf_dev; 1880 struct nfp_net_init hw_init = { 1881 .hw_priv = hw_priv, 1882 }; 1883 1884 nfp_eth_table = pf_dev->nfp_eth_table; 1885 PMD_INIT_LOG(INFO, "Total physical ports: %d.", nfp_eth_table->count); 1886 id = nfp_function_id_get(pf_dev, 0); 1887 1888 /* Allocate memory for the CoreNIC app */ 1889 app_fw_nic = rte_zmalloc("nfp_app_fw_nic", sizeof(*app_fw_nic), 0); 1890 if (app_fw_nic == NULL) 1891 return -ENOMEM; 1892 1893 /* Point the app_fw_priv pointer in the PF to the coreNIC app */ 1894 pf_dev->app_fw_priv = app_fw_nic; 1895 1896 /* Check the number of vNIC's created for the PF */ 1897 if (!nfp_app_fw_nic_total_phyports_check(pf_dev)) { 1898 ret = -ENODEV; 1899 goto app_cleanup; 1900 } 1901 1902 /* Populate coreNIC app properties */ 1903 if (pf_dev->total_phyports > 1) 1904 app_fw_nic->multiport = true; 1905 1906 /* Map the symbol table */ 1907 snprintf(bar_name, sizeof(bar_name), "_pf%u_net_bar0", id); 1908 pf_dev->ctrl_bar = nfp_rtsym_map(pf_dev->sym_tbl, bar_name, 1909 pf_dev->total_phyports * pf_dev->ctrl_bar_size, 1910 &pf_dev->ctrl_area); 1911 if (pf_dev->ctrl_bar == NULL) { 1912 PMD_INIT_LOG(ERR, "The nfp_rtsym_map fails for %s.", bar_name); 1913 ret = -EIO; 1914 goto app_cleanup; 1915 } 1916 1917 PMD_INIT_LOG(DEBUG, "Ctrl bar: %p.", pf_dev->ctrl_bar); 1918 1919 /* Loop through all physical ports on PF */ 1920 for (i = 0; i < pf_dev->total_phyports; i++) { 1921 nfp_port_name_generate(port_name, sizeof(port_name), i, pf_dev); 1922 1923 id = nfp_function_id_get(pf_dev, i); 1924 hw_init.idx = id; 1925 hw_init.nfp_idx = nfp_eth_table->ports[id].index; 1926 ret = rte_eth_dev_create(&pf_dev->pci_dev->device, port_name, 1927 sizeof(struct nfp_net_hw), NULL, NULL, 1928 nfp_net_init, &hw_init); 1929 if (ret != 0) 1930 goto port_cleanup; 1931 1932 } /* End loop, all ports on this PF */ 1933 1934 return 0; 1935 1936 port_cleanup: 1937 for (uint32_t j = 0; j < i; j++) { 1938 struct rte_eth_dev *eth_dev; 1939 1940 nfp_port_name_generate(port_name, sizeof(port_name), j, pf_dev); 1941 eth_dev = rte_eth_dev_get_by_name(port_name); 1942 if (eth_dev != NULL) 1943 rte_eth_dev_destroy(eth_dev, nfp_net_uninit); 1944 } 1945 nfp_cpp_area_release_free(pf_dev->ctrl_area); 1946 app_cleanup: 1947 rte_free(app_fw_nic); 1948 1949 return ret; 1950 } 1951 1952 static int 1953 nfp_net_hwinfo_set(uint8_t function_id, 1954 struct nfp_rtsym_table *sym_tbl, 1955 struct nfp_cpp *cpp, 1956 enum nfp_app_fw_id app_fw_id) 1957 { 1958 int ret = 0; 1959 uint64_t app_cap; 1960 struct nfp_nsp *nsp; 1961 uint8_t sp_indiff = 1; 1962 char hw_info[RTE_ETH_NAME_MAX_LEN]; 1963 char app_cap_name[RTE_ETH_NAME_MAX_LEN]; 1964 1965 if (app_fw_id != NFP_APP_FW_FLOWER_NIC) { 1966 /* Read the app capabilities of the firmware loaded */ 1967 snprintf(app_cap_name, sizeof(app_cap_name), "_pf%u_net_app_cap", function_id); 1968 app_cap = nfp_rtsym_read_le(sym_tbl, app_cap_name, &ret); 1969 if (ret != 0) { 1970 PMD_INIT_LOG(ERR, "Could not read app_fw_cap from firmware."); 1971 return ret; 1972 } 1973 1974 /* Calculate the value of sp_indiff and write to hw_info */ 1975 sp_indiff = app_cap & NFP_NET_APP_CAP_SP_INDIFF; 1976 } 1977 1978 snprintf(hw_info, sizeof(hw_info), "sp_indiff=%u", sp_indiff); 1979 1980 nsp = nfp_nsp_open(cpp); 1981 if (nsp == NULL) { 1982 PMD_INIT_LOG(ERR, "Could not get NSP."); 1983 return -EIO; 1984 } 1985 1986 ret = nfp_nsp_hwinfo_set(nsp, hw_info, sizeof(hw_info)); 1987 nfp_nsp_close(nsp); 1988 if (ret != 0) { 1989 PMD_INIT_LOG(ERR, "Failed to set parameter to hwinfo."); 1990 return ret; 1991 } 1992 1993 return 0; 1994 } 1995 1996 const uint32_t nfp_eth_media_table[NFP_MEDIA_LINK_MODES_NUMBER] = { 1997 [NFP_MEDIA_W0_RJ45_10M] = RTE_ETH_LINK_SPEED_10M, 1998 [NFP_MEDIA_W0_RJ45_10M_HD] = RTE_ETH_LINK_SPEED_10M_HD, 1999 [NFP_MEDIA_W0_RJ45_100M] = RTE_ETH_LINK_SPEED_100M, 2000 [NFP_MEDIA_W0_RJ45_100M_HD] = RTE_ETH_LINK_SPEED_100M_HD, 2001 [NFP_MEDIA_W0_RJ45_1G] = RTE_ETH_LINK_SPEED_1G, 2002 [NFP_MEDIA_W0_RJ45_2P5G] = RTE_ETH_LINK_SPEED_2_5G, 2003 [NFP_MEDIA_W0_RJ45_5G] = RTE_ETH_LINK_SPEED_5G, 2004 [NFP_MEDIA_W0_RJ45_10G] = RTE_ETH_LINK_SPEED_10G, 2005 [NFP_MEDIA_1000BASE_CX] = RTE_ETH_LINK_SPEED_1G, 2006 [NFP_MEDIA_1000BASE_KX] = RTE_ETH_LINK_SPEED_1G, 2007 [NFP_MEDIA_10GBASE_KX4] = RTE_ETH_LINK_SPEED_10G, 2008 [NFP_MEDIA_10GBASE_KR] = RTE_ETH_LINK_SPEED_10G, 2009 [NFP_MEDIA_10GBASE_CX4] = RTE_ETH_LINK_SPEED_10G, 2010 [NFP_MEDIA_10GBASE_CR] = RTE_ETH_LINK_SPEED_10G, 2011 [NFP_MEDIA_10GBASE_SR] = RTE_ETH_LINK_SPEED_10G, 2012 [NFP_MEDIA_10GBASE_ER] = RTE_ETH_LINK_SPEED_10G, 2013 [NFP_MEDIA_25GBASE_KR] = RTE_ETH_LINK_SPEED_25G, 2014 [NFP_MEDIA_25GBASE_KR_S] = RTE_ETH_LINK_SPEED_25G, 2015 [NFP_MEDIA_25GBASE_CR] = RTE_ETH_LINK_SPEED_25G, 2016 [NFP_MEDIA_25GBASE_CR_S] = RTE_ETH_LINK_SPEED_25G, 2017 [NFP_MEDIA_25GBASE_SR] = RTE_ETH_LINK_SPEED_25G, 2018 [NFP_MEDIA_40GBASE_CR4] = RTE_ETH_LINK_SPEED_40G, 2019 [NFP_MEDIA_40GBASE_KR4] = RTE_ETH_LINK_SPEED_40G, 2020 [NFP_MEDIA_40GBASE_SR4] = RTE_ETH_LINK_SPEED_40G, 2021 [NFP_MEDIA_40GBASE_LR4] = RTE_ETH_LINK_SPEED_40G, 2022 [NFP_MEDIA_50GBASE_KR] = RTE_ETH_LINK_SPEED_50G, 2023 [NFP_MEDIA_50GBASE_SR] = RTE_ETH_LINK_SPEED_50G, 2024 [NFP_MEDIA_50GBASE_CR] = RTE_ETH_LINK_SPEED_50G, 2025 [NFP_MEDIA_50GBASE_LR] = RTE_ETH_LINK_SPEED_50G, 2026 [NFP_MEDIA_50GBASE_ER] = RTE_ETH_LINK_SPEED_50G, 2027 [NFP_MEDIA_50GBASE_FR] = RTE_ETH_LINK_SPEED_50G, 2028 [NFP_MEDIA_100GBASE_KR4] = RTE_ETH_LINK_SPEED_100G, 2029 [NFP_MEDIA_100GBASE_SR4] = RTE_ETH_LINK_SPEED_100G, 2030 [NFP_MEDIA_100GBASE_CR4] = RTE_ETH_LINK_SPEED_100G, 2031 [NFP_MEDIA_100GBASE_KP4] = RTE_ETH_LINK_SPEED_100G, 2032 [NFP_MEDIA_100GBASE_CR10] = RTE_ETH_LINK_SPEED_100G, 2033 [NFP_MEDIA_10GBASE_LR] = RTE_ETH_LINK_SPEED_10G, 2034 [NFP_MEDIA_25GBASE_LR] = RTE_ETH_LINK_SPEED_25G, 2035 [NFP_MEDIA_25GBASE_ER] = RTE_ETH_LINK_SPEED_25G 2036 }; 2037 2038 static int 2039 nfp_net_speed_capa_get_real(struct nfp_eth_media_buf *media_buf, 2040 struct nfp_pf_dev *pf_dev) 2041 { 2042 uint32_t i; 2043 uint32_t j; 2044 uint32_t offset; 2045 uint32_t speed_capa = 0; 2046 uint64_t supported_modes; 2047 2048 for (i = 0; i < RTE_DIM(media_buf->supported_modes); i++) { 2049 supported_modes = media_buf->supported_modes[i]; 2050 offset = i * UINT64_BIT; 2051 for (j = 0; j < UINT64_BIT; j++) { 2052 if (supported_modes == 0) 2053 break; 2054 2055 if ((supported_modes & 1) != 0) { 2056 if ((j + offset) >= NFP_MEDIA_LINK_MODES_NUMBER) { 2057 PMD_DRV_LOG(ERR, "Invalid offset of media table."); 2058 return -EINVAL; 2059 } 2060 2061 speed_capa |= nfp_eth_media_table[j + offset]; 2062 } 2063 2064 supported_modes = supported_modes >> 1; 2065 } 2066 } 2067 2068 pf_dev->speed_capa = speed_capa; 2069 2070 return pf_dev->speed_capa == 0 ? -EINVAL : 0; 2071 } 2072 2073 static int 2074 nfp_net_speed_cap_get_one(struct nfp_pf_dev *pf_dev, 2075 uint32_t port_id) 2076 { 2077 int ret; 2078 struct nfp_nsp *nsp; 2079 struct nfp_eth_media_buf media_buf; 2080 2081 media_buf.eth_index = pf_dev->nfp_eth_table->ports[port_id].eth_index; 2082 pf_dev->speed_capa = 0; 2083 2084 nsp = nfp_nsp_open(pf_dev->cpp); 2085 if (nsp == NULL) { 2086 PMD_DRV_LOG(ERR, "Could not get NSP."); 2087 return -EIO; 2088 } 2089 2090 ret = nfp_nsp_read_media(nsp, &media_buf, sizeof(media_buf)); 2091 nfp_nsp_close(nsp); 2092 if (ret != 0) { 2093 PMD_DRV_LOG(ERR, "Failed to read media."); 2094 return ret; 2095 } 2096 2097 ret = nfp_net_speed_capa_get_real(&media_buf, pf_dev); 2098 if (ret < 0) { 2099 PMD_DRV_LOG(ERR, "Speed capability is invalid."); 2100 return ret; 2101 } 2102 2103 return 0; 2104 } 2105 2106 static int 2107 nfp_net_speed_cap_get(struct nfp_pf_dev *pf_dev) 2108 { 2109 int ret; 2110 uint32_t i; 2111 uint32_t id; 2112 uint32_t count; 2113 2114 count = pf_dev->total_phyports; 2115 for (i = 0; i < count; i++) { 2116 id = nfp_function_id_get(pf_dev, i); 2117 ret = nfp_net_speed_cap_get_one(pf_dev, id); 2118 if (ret != 0) { 2119 PMD_INIT_LOG(ERR, "Failed to get port %d speed capability.", id); 2120 return ret; 2121 } 2122 } 2123 2124 return 0; 2125 } 2126 2127 /* Force the physical port down to clear the possible DMA error */ 2128 static int 2129 nfp_net_force_port_down(struct nfp_pf_dev *pf_dev) 2130 { 2131 int ret; 2132 uint32_t i; 2133 uint32_t id; 2134 uint32_t index; 2135 uint32_t count; 2136 2137 count = pf_dev->total_phyports; 2138 for (i = 0; i < count; i++) { 2139 id = nfp_function_id_get(pf_dev, i); 2140 index = pf_dev->nfp_eth_table->ports[id].index; 2141 ret = nfp_eth_set_configured(pf_dev->cpp, index, 0); 2142 if (ret < 0) 2143 return ret; 2144 } 2145 2146 return 0; 2147 } 2148 2149 static int 2150 nfp_fw_app_primary_init(struct nfp_net_hw_priv *hw_priv) 2151 { 2152 int ret; 2153 struct nfp_pf_dev *pf_dev = hw_priv->pf_dev; 2154 2155 switch (pf_dev->app_fw_id) { 2156 case NFP_APP_FW_CORE_NIC: 2157 PMD_INIT_LOG(INFO, "Initializing coreNIC."); 2158 ret = nfp_init_app_fw_nic(hw_priv); 2159 if (ret != 0) { 2160 PMD_INIT_LOG(ERR, "Could not initialize coreNIC!"); 2161 return ret; 2162 } 2163 break; 2164 case NFP_APP_FW_FLOWER_NIC: 2165 PMD_INIT_LOG(INFO, "Initializing Flower."); 2166 ret = nfp_init_app_fw_flower(hw_priv); 2167 if (ret != 0) { 2168 PMD_INIT_LOG(ERR, "Could not initialize Flower!"); 2169 return ret; 2170 } 2171 break; 2172 default: 2173 PMD_INIT_LOG(ERR, "Unsupported Firmware loaded."); 2174 ret = -EINVAL; 2175 return ret; 2176 } 2177 2178 return 0; 2179 } 2180 2181 static int 2182 nfp_pf_get_max_vf(struct nfp_pf_dev *pf_dev) 2183 { 2184 int ret; 2185 uint32_t max_vfs; 2186 2187 max_vfs = nfp_rtsym_read_le(pf_dev->sym_tbl, "nfd_vf_cfg_max_vfs", &ret); 2188 if (ret != 0) 2189 return ret; 2190 2191 pf_dev->max_vfs = max_vfs; 2192 2193 return 0; 2194 } 2195 2196 static int 2197 nfp_pf_get_sriov_vf(struct nfp_pf_dev *pf_dev, 2198 const struct nfp_dev_info *dev_info) 2199 { 2200 int ret; 2201 off_t pos; 2202 uint16_t offset; 2203 uint16_t sriov_vf; 2204 2205 /* For 3800 single-PF and 4000 card */ 2206 if (!pf_dev->multi_pf.enabled) { 2207 pf_dev->sriov_vf = pf_dev->max_vfs; 2208 return 0; 2209 } 2210 2211 pos = rte_pci_find_ext_capability(pf_dev->pci_dev, RTE_PCI_EXT_CAP_ID_SRIOV); 2212 if (pos == 0) { 2213 PMD_INIT_LOG(ERR, "Can not get the pci sriov cap."); 2214 return -EIO; 2215 } 2216 2217 /* 2218 * Management firmware ensures that sriov capability registers 2219 * are initialized correctly. 2220 */ 2221 ret = rte_pci_read_config(pf_dev->pci_dev, &sriov_vf, sizeof(sriov_vf), 2222 pos + RTE_PCI_SRIOV_TOTAL_VF); 2223 if (ret < 0) { 2224 PMD_INIT_LOG(ERR, "Can not read the sriov toatl VF."); 2225 return -EIO; 2226 } 2227 2228 /* Offset of first VF is relative to its PF. */ 2229 ret = rte_pci_read_config(pf_dev->pci_dev, &offset, sizeof(offset), 2230 pos + RTE_PCI_SRIOV_VF_OFFSET); 2231 if (ret < 0) { 2232 PMD_INIT_LOG(ERR, "Can not get the VF offset."); 2233 return -EIO; 2234 } 2235 2236 offset += pf_dev->multi_pf.function_id; 2237 if (offset < dev_info->pf_num_per_unit) 2238 return -ERANGE; 2239 2240 offset -= dev_info->pf_num_per_unit; 2241 if (offset >= pf_dev->max_vfs || offset + sriov_vf > pf_dev->max_vfs) { 2242 PMD_INIT_LOG(ERR, "The pci allocate VF is more than the MAX VF."); 2243 return -ERANGE; 2244 } 2245 2246 pf_dev->vf_base_id = offset; 2247 pf_dev->sriov_vf = sriov_vf; 2248 2249 return 0; 2250 } 2251 2252 static int 2253 nfp_net_get_vf_info(struct nfp_pf_dev *pf_dev, 2254 const struct nfp_dev_info *dev_info) 2255 { 2256 int ret; 2257 2258 ret = nfp_pf_get_max_vf(pf_dev); 2259 if (ret != 0) { 2260 if (ret != -ENOENT) { 2261 PMD_INIT_LOG(ERR, "Read max VFs failed."); 2262 return ret; 2263 } 2264 2265 PMD_INIT_LOG(WARNING, "The firmware can not support read max VFs."); 2266 return 0; 2267 } 2268 2269 if (pf_dev->max_vfs == 0) 2270 return 0; 2271 2272 ret = nfp_pf_get_sriov_vf(pf_dev, dev_info); 2273 if (ret < 0) 2274 return ret; 2275 2276 pf_dev->queue_per_vf = NFP_QUEUE_PER_VF; 2277 2278 return 0; 2279 } 2280 2281 static int 2282 nfp_net_vf_config_init(struct nfp_pf_dev *pf_dev) 2283 { 2284 int ret = 0; 2285 uint32_t min_size; 2286 char vf_bar_name[RTE_ETH_NAME_MAX_LEN]; 2287 char vf_cfg_name[RTE_ETH_NAME_MAX_LEN]; 2288 2289 if (pf_dev->sriov_vf == 0) 2290 return 0; 2291 2292 min_size = pf_dev->ctrl_bar_size * pf_dev->sriov_vf; 2293 snprintf(vf_bar_name, sizeof(vf_bar_name), "_pf%d_net_vf_bar", 2294 pf_dev->multi_pf.function_id); 2295 pf_dev->vf_bar = nfp_rtsym_map_offset(pf_dev->sym_tbl, vf_bar_name, 2296 pf_dev->ctrl_bar_size * pf_dev->vf_base_id, 2297 min_size, &pf_dev->vf_area); 2298 if (pf_dev->vf_bar == NULL) { 2299 PMD_INIT_LOG(ERR, "Failed to get vf cfg."); 2300 return -EIO; 2301 } 2302 2303 min_size = NFP_NET_VF_CFG_SZ * pf_dev->sriov_vf + NFP_NET_VF_CFG_MB_SZ; 2304 snprintf(vf_cfg_name, sizeof(vf_cfg_name), "_pf%d_net_vf_cfg2", 2305 pf_dev->multi_pf.function_id); 2306 pf_dev->vf_cfg_tbl_bar = nfp_rtsym_map(pf_dev->sym_tbl, vf_cfg_name, 2307 min_size, &pf_dev->vf_cfg_tbl_area); 2308 if (pf_dev->vf_cfg_tbl_bar == NULL) { 2309 PMD_INIT_LOG(ERR, "Failed to get vf configure table."); 2310 ret = -EIO; 2311 goto vf_bar_cleanup; 2312 } 2313 2314 return 0; 2315 2316 vf_bar_cleanup: 2317 nfp_cpp_area_release_free(pf_dev->vf_area); 2318 2319 return ret; 2320 } 2321 2322 static int 2323 nfp_pf_init(struct rte_pci_device *pci_dev) 2324 { 2325 void *sync; 2326 int ret = 0; 2327 uint64_t addr; 2328 uint32_t cpp_id; 2329 uint8_t function_id; 2330 struct nfp_cpp *cpp; 2331 struct nfp_pf_dev *pf_dev; 2332 struct nfp_hwinfo *hwinfo; 2333 enum nfp_app_fw_id app_fw_id; 2334 char name[RTE_ETH_NAME_MAX_LEN]; 2335 struct nfp_rtsym_table *sym_tbl; 2336 struct nfp_net_hw_priv *hw_priv; 2337 char app_name[RTE_ETH_NAME_MAX_LEN]; 2338 struct nfp_eth_table *nfp_eth_table; 2339 const struct nfp_dev_info *dev_info; 2340 2341 if (pci_dev == NULL) 2342 return -ENODEV; 2343 2344 if (pci_dev->mem_resource[0].addr == NULL) { 2345 PMD_INIT_LOG(ERR, "The address of BAR0 is NULL."); 2346 return -ENODEV; 2347 } 2348 2349 dev_info = nfp_dev_info_get(pci_dev->id.device_id); 2350 if (dev_info == NULL) { 2351 PMD_INIT_LOG(ERR, "Not supported device ID."); 2352 return -ENODEV; 2353 } 2354 2355 hw_priv = rte_zmalloc(NULL, sizeof(*hw_priv), 0); 2356 if (hw_priv == NULL) { 2357 PMD_INIT_LOG(ERR, "Can not alloc memory for hw priv data."); 2358 return -ENOMEM; 2359 } 2360 2361 /* Allocate memory for the PF "device" */ 2362 function_id = (pci_dev->addr.function) & 0x07; 2363 snprintf(name, sizeof(name), "nfp_pf%u", function_id); 2364 pf_dev = rte_zmalloc(name, sizeof(*pf_dev), 0); 2365 if (pf_dev == NULL) { 2366 PMD_INIT_LOG(ERR, "Can not allocate memory for the PF device."); 2367 ret = -ENOMEM; 2368 goto hw_priv_free; 2369 } 2370 2371 hw_priv->dev_info = dev_info; 2372 hw_priv->pf_dev = pf_dev; 2373 2374 sync = nfp_sync_alloc(); 2375 if (sync == NULL) { 2376 PMD_INIT_LOG(ERR, "Failed to alloc sync zone."); 2377 ret = -ENOMEM; 2378 goto pf_cleanup; 2379 } 2380 2381 pf_dev->sync = sync; 2382 2383 /* 2384 * When device bound to UIO, the device could be used, by mistake, 2385 * by two DPDK apps, and the UIO driver does not avoid it. This 2386 * could lead to a serious problem when configuring the NFP CPP 2387 * interface. Here we avoid this telling to the CPP init code to 2388 * use a lock file if UIO is being used. 2389 */ 2390 if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO) 2391 cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, false); 2392 else 2393 cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, true); 2394 2395 if (cpp == NULL) { 2396 PMD_INIT_LOG(ERR, "A CPP handle can not be obtained."); 2397 ret = -EIO; 2398 goto sync_free; 2399 } 2400 2401 pf_dev->cpp = cpp; 2402 pf_dev->pci_dev = pci_dev; 2403 2404 hwinfo = nfp_hwinfo_read(cpp); 2405 if (hwinfo == NULL) { 2406 PMD_INIT_LOG(ERR, "Error reading hwinfo table."); 2407 ret = -EIO; 2408 goto cpp_cleanup; 2409 } 2410 2411 pf_dev->hwinfo = hwinfo; 2412 2413 /* Read the number of physical ports from hardware */ 2414 nfp_eth_table = nfp_eth_read_ports(cpp); 2415 if (nfp_eth_table == NULL) { 2416 PMD_INIT_LOG(ERR, "Error reading NFP ethernet table."); 2417 ret = -EIO; 2418 goto hwinfo_cleanup; 2419 } 2420 2421 if (nfp_eth_table->count == 0 || nfp_eth_table->count > 8) { 2422 PMD_INIT_LOG(ERR, "NFP ethernet table reports wrong ports: %u.", 2423 nfp_eth_table->count); 2424 ret = -EIO; 2425 goto eth_table_cleanup; 2426 } 2427 2428 pf_dev->nfp_eth_table = nfp_eth_table; 2429 pf_dev->multi_pf.enabled = nfp_check_multi_pf_from_nsp(pci_dev, cpp); 2430 pf_dev->multi_pf.function_id = function_id; 2431 pf_dev->total_phyports = nfp_net_get_phyports_from_nsp(pf_dev); 2432 2433 ret = nfp_net_force_port_down(pf_dev); 2434 if (ret != 0) { 2435 PMD_INIT_LOG(ERR, "Failed to force port down."); 2436 ret = -EIO; 2437 goto eth_table_cleanup; 2438 } 2439 2440 ret = nfp_devargs_parse(&pf_dev->devargs, pci_dev->device.devargs); 2441 if (ret != 0) { 2442 PMD_INIT_LOG(ERR, "Error when parsing device args."); 2443 ret = -EINVAL; 2444 goto eth_table_cleanup; 2445 } 2446 2447 ret = nfp_net_device_activate(pf_dev); 2448 if (ret != 0) { 2449 PMD_INIT_LOG(ERR, "Failed to activate the NFP device."); 2450 ret = -EIO; 2451 goto eth_table_cleanup; 2452 } 2453 2454 ret = nfp_fw_setup(pf_dev, dev_info); 2455 if (ret != 0) { 2456 PMD_INIT_LOG(ERR, "Error when uploading firmware."); 2457 ret = -EIO; 2458 goto eth_table_cleanup; 2459 } 2460 2461 /* Now the symbol table should be there */ 2462 sym_tbl = nfp_rtsym_table_read(cpp); 2463 if (sym_tbl == NULL) { 2464 PMD_INIT_LOG(ERR, "Something is wrong with the firmware symbol table."); 2465 ret = -EIO; 2466 goto fw_cleanup; 2467 } 2468 2469 pf_dev->sym_tbl = sym_tbl; 2470 2471 /* Read the app ID of the firmware loaded */ 2472 snprintf(app_name, sizeof(app_name), "_pf%u_net_app_id", function_id); 2473 app_fw_id = nfp_rtsym_read_le(sym_tbl, app_name, &ret); 2474 if (ret != 0) { 2475 PMD_INIT_LOG(ERR, "Could not read %s from firmware.", app_name); 2476 ret = -EIO; 2477 goto sym_tbl_cleanup; 2478 } 2479 2480 pf_dev->app_fw_id = app_fw_id; 2481 2482 /* Write sp_indiff to hw_info */ 2483 ret = nfp_net_hwinfo_set(function_id, sym_tbl, cpp, app_fw_id); 2484 if (ret != 0) { 2485 PMD_INIT_LOG(ERR, "Failed to set hwinfo."); 2486 ret = -EIO; 2487 goto sym_tbl_cleanup; 2488 } 2489 2490 ret = nfp_net_speed_cap_get(pf_dev); 2491 if (ret != 0) { 2492 PMD_INIT_LOG(ERR, "Failed to get speed capability."); 2493 ret = -EIO; 2494 goto sym_tbl_cleanup; 2495 } 2496 2497 /* Get the VF info */ 2498 ret = nfp_net_get_vf_info(pf_dev, dev_info); 2499 if (ret != 0) { 2500 PMD_INIT_LOG(ERR, "Failed to get VF info."); 2501 ret = -EIO; 2502 goto sym_tbl_cleanup; 2503 } 2504 2505 /* Configure access to tx/rx vNIC BARs */ 2506 addr = nfp_qcp_queue_offset(dev_info, 0); 2507 cpp_id = NFP_CPP_ISLAND_ID(0, NFP_CPP_ACTION_RW, 0, 0); 2508 2509 pf_dev->qc_bar = nfp_cpp_map_area(pf_dev->cpp, cpp_id, 2510 addr, dev_info->qc_area_sz, &pf_dev->qc_area); 2511 if (pf_dev->qc_bar == NULL) { 2512 PMD_INIT_LOG(ERR, "The nfp_rtsym_map fails for net.qc."); 2513 ret = -EIO; 2514 goto sym_tbl_cleanup; 2515 } 2516 2517 PMD_INIT_LOG(DEBUG, "The qc_bar address: %p.", pf_dev->qc_bar); 2518 2519 pf_dev->mac_stats_bar = nfp_rtsym_map(sym_tbl, "_mac_stats", 2520 NFP_MAC_STATS_SIZE * nfp_eth_table->max_index, 2521 &pf_dev->mac_stats_area); 2522 if (pf_dev->mac_stats_bar == NULL) { 2523 PMD_INIT_LOG(ERR, "The nfp_rtsym_map fails for _mac_stats."); 2524 goto hwqueues_cleanup; 2525 } 2526 2527 ret = nfp_enable_multi_pf(pf_dev); 2528 if (ret != 0) 2529 goto mac_stats_cleanup; 2530 2531 ret = nfp_net_vf_config_init(pf_dev); 2532 if (ret != 0) { 2533 PMD_INIT_LOG(ERR, "Failed to init VF config."); 2534 goto vf_cfg_tbl_cleanup; 2535 } 2536 2537 hw_priv->is_pf = true; 2538 2539 if (!nfp_net_recv_pkt_meta_check_register(hw_priv)) { 2540 PMD_INIT_LOG(ERR, "PF register meta check function failed."); 2541 ret = -EIO; 2542 goto hw_priv_free; 2543 } 2544 2545 /* 2546 * PF initialization has been done at this point. Call app specific 2547 * init code now. 2548 */ 2549 ret = nfp_fw_app_primary_init(hw_priv); 2550 if (ret != 0) { 2551 PMD_INIT_LOG(ERR, "Failed to init hw app primary."); 2552 goto vf_cfg_tbl_cleanup; 2553 } 2554 2555 /* Register the CPP bridge service here for primary use */ 2556 if (pf_dev->devargs.cpp_service_enable) { 2557 ret = nfp_enable_cpp_service(pf_dev); 2558 if (ret != 0) { 2559 PMD_INIT_LOG(ERR, "Enable CPP service failed."); 2560 goto vf_cfg_tbl_cleanup; 2561 } 2562 } 2563 2564 return 0; 2565 2566 vf_cfg_tbl_cleanup: 2567 nfp_net_vf_config_uninit(pf_dev); 2568 mac_stats_cleanup: 2569 nfp_cpp_area_release_free(pf_dev->mac_stats_area); 2570 hwqueues_cleanup: 2571 nfp_cpp_area_release_free(pf_dev->qc_area); 2572 sym_tbl_cleanup: 2573 free(sym_tbl); 2574 fw_cleanup: 2575 nfp_fw_unload(cpp); 2576 if (pf_dev->multi_pf.enabled) { 2577 nfp_net_keepalive_stop(&pf_dev->multi_pf); 2578 nfp_net_keepalive_clear(pf_dev->multi_pf.beat_addr, pf_dev->multi_pf.function_id); 2579 nfp_net_keepalive_uninit(&pf_dev->multi_pf); 2580 } 2581 eth_table_cleanup: 2582 free(nfp_eth_table); 2583 hwinfo_cleanup: 2584 free(hwinfo); 2585 cpp_cleanup: 2586 nfp_cpp_free(cpp); 2587 sync_free: 2588 nfp_sync_free(sync); 2589 pf_cleanup: 2590 rte_free(pf_dev); 2591 hw_priv_free: 2592 rte_free(hw_priv); 2593 2594 return ret; 2595 } 2596 2597 static int 2598 nfp_secondary_net_init(struct rte_eth_dev *eth_dev, 2599 void *para) 2600 { 2601 struct nfp_net_hw_priv *hw_priv; 2602 2603 hw_priv = para; 2604 nfp_net_ethdev_ops_mount(hw_priv->pf_dev, eth_dev); 2605 2606 eth_dev->process_private = para; 2607 2608 return 0; 2609 } 2610 2611 static int 2612 nfp_secondary_init_app_fw_nic(struct nfp_net_hw_priv *hw_priv) 2613 { 2614 uint32_t i; 2615 int ret = 0; 2616 uint32_t total_vnics; 2617 char port_name[RTE_ETH_NAME_MAX_LEN]; 2618 struct nfp_pf_dev *pf_dev = hw_priv->pf_dev; 2619 2620 total_vnics = nfp_net_get_phyports_from_fw(pf_dev); 2621 2622 for (i = 0; i < total_vnics; i++) { 2623 nfp_port_name_generate(port_name, sizeof(port_name), i, pf_dev); 2624 2625 PMD_INIT_LOG(DEBUG, "Secondary attaching to port %s.", port_name); 2626 ret = rte_eth_dev_create(&pf_dev->pci_dev->device, port_name, 0, 2627 NULL, NULL, nfp_secondary_net_init, hw_priv); 2628 if (ret != 0) { 2629 PMD_INIT_LOG(ERR, "Secondary process attach to port %s failed.", port_name); 2630 goto port_cleanup; 2631 } 2632 } 2633 2634 return 0; 2635 2636 port_cleanup: 2637 for (uint32_t j = 0; j < i; j++) { 2638 struct rte_eth_dev *eth_dev; 2639 2640 nfp_port_name_generate(port_name, sizeof(port_name), j, pf_dev); 2641 eth_dev = rte_eth_dev_get_by_name(port_name); 2642 if (eth_dev != NULL) 2643 rte_eth_dev_destroy(eth_dev, NULL); 2644 } 2645 2646 return ret; 2647 } 2648 2649 static int 2650 nfp_fw_app_secondary_init(struct nfp_net_hw_priv *hw_priv) 2651 { 2652 int ret; 2653 struct nfp_pf_dev *pf_dev = hw_priv->pf_dev; 2654 2655 switch (pf_dev->app_fw_id) { 2656 case NFP_APP_FW_CORE_NIC: 2657 PMD_INIT_LOG(INFO, "Initializing coreNIC."); 2658 ret = nfp_secondary_init_app_fw_nic(hw_priv); 2659 if (ret != 0) { 2660 PMD_INIT_LOG(ERR, "Could not initialize coreNIC!"); 2661 return ret; 2662 } 2663 break; 2664 case NFP_APP_FW_FLOWER_NIC: 2665 PMD_INIT_LOG(INFO, "Initializing Flower."); 2666 ret = nfp_secondary_init_app_fw_flower(hw_priv); 2667 if (ret != 0) { 2668 PMD_INIT_LOG(ERR, "Could not initialize Flower!"); 2669 return ret; 2670 } 2671 break; 2672 default: 2673 PMD_INIT_LOG(ERR, "Unsupported Firmware loaded."); 2674 ret = -EINVAL; 2675 return ret; 2676 } 2677 2678 return 0; 2679 } 2680 2681 /* 2682 * When attaching to the NFP4000/6000 PF on a secondary process there 2683 * is no need to initialise the PF again. Only minimal work is required 2684 * here. 2685 */ 2686 static int 2687 nfp_pf_secondary_init(struct rte_pci_device *pci_dev) 2688 { 2689 void *sync; 2690 int ret = 0; 2691 struct nfp_cpp *cpp; 2692 uint8_t function_id; 2693 struct nfp_pf_dev *pf_dev; 2694 enum nfp_app_fw_id app_fw_id; 2695 char name[RTE_ETH_NAME_MAX_LEN]; 2696 struct nfp_rtsym_table *sym_tbl; 2697 struct nfp_net_hw_priv *hw_priv; 2698 const struct nfp_dev_info *dev_info; 2699 char app_name[RTE_ETH_NAME_MAX_LEN]; 2700 2701 if (pci_dev == NULL) 2702 return -ENODEV; 2703 2704 if (pci_dev->mem_resource[0].addr == NULL) { 2705 PMD_INIT_LOG(ERR, "The address of BAR0 is NULL."); 2706 return -ENODEV; 2707 } 2708 2709 dev_info = nfp_dev_info_get(pci_dev->id.device_id); 2710 if (dev_info == NULL) { 2711 PMD_INIT_LOG(ERR, "Not supported device ID."); 2712 return -ENODEV; 2713 } 2714 2715 hw_priv = rte_zmalloc(NULL, sizeof(*hw_priv), 0); 2716 if (hw_priv == NULL) { 2717 PMD_INIT_LOG(ERR, "Can not alloc memory for hw priv data."); 2718 return -ENOMEM; 2719 } 2720 2721 /* Allocate memory for the PF "device" */ 2722 function_id = pci_dev->addr.function & 0x7; 2723 snprintf(name, sizeof(name), "nfp_pf%d", 0); 2724 pf_dev = rte_zmalloc(name, sizeof(*pf_dev), 0); 2725 if (pf_dev == NULL) { 2726 PMD_INIT_LOG(ERR, "Can not allocate memory for the PF device."); 2727 ret = -ENOMEM; 2728 goto hw_priv_free; 2729 } 2730 2731 hw_priv->pf_dev = pf_dev; 2732 hw_priv->dev_info = dev_info; 2733 2734 sync = nfp_sync_alloc(); 2735 if (sync == NULL) { 2736 PMD_INIT_LOG(ERR, "Failed to alloc sync zone."); 2737 ret = -ENOMEM; 2738 goto pf_cleanup; 2739 } 2740 2741 pf_dev->sync = sync; 2742 2743 /* 2744 * When device bound to UIO, the device could be used, by mistake, 2745 * by two DPDK apps, and the UIO driver does not avoid it. This 2746 * could lead to a serious problem when configuring the NFP CPP 2747 * interface. Here we avoid this telling to the CPP init code to 2748 * use a lock file if UIO is being used. 2749 */ 2750 if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO) 2751 cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, false); 2752 else 2753 cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, true); 2754 2755 if (cpp == NULL) { 2756 PMD_INIT_LOG(ERR, "A CPP handle can not be obtained."); 2757 ret = -EIO; 2758 goto sync_free; 2759 } 2760 2761 pf_dev->cpp = cpp; 2762 pf_dev->pci_dev = pci_dev; 2763 2764 /* 2765 * We don't have access to the PF created in the primary process 2766 * here so we have to read the number of ports from firmware. 2767 */ 2768 sym_tbl = nfp_rtsym_table_read(cpp); 2769 if (sym_tbl == NULL) { 2770 PMD_INIT_LOG(ERR, "Something is wrong with the firmware symbol table."); 2771 ret = -EIO; 2772 goto cpp_cleanup; 2773 } 2774 2775 pf_dev->sym_tbl = sym_tbl; 2776 2777 /* Read the number of physical ports from firmware */ 2778 pf_dev->multi_pf.function_id = function_id; 2779 pf_dev->total_phyports = nfp_net_get_phyports_from_fw(pf_dev); 2780 pf_dev->multi_pf.enabled = nfp_check_multi_pf_from_fw(pf_dev->total_phyports); 2781 2782 /* Read the app ID of the firmware loaded */ 2783 snprintf(app_name, sizeof(app_name), "_pf%u_net_app_id", function_id); 2784 app_fw_id = nfp_rtsym_read_le(sym_tbl, app_name, &ret); 2785 if (ret != 0) { 2786 PMD_INIT_LOG(ERR, "Could not read %s from fw.", app_name); 2787 ret = -EIO; 2788 goto sym_tbl_cleanup; 2789 } 2790 2791 pf_dev->app_fw_id = app_fw_id; 2792 2793 hw_priv->is_pf = true; 2794 2795 /* Call app specific init code now */ 2796 ret = nfp_fw_app_secondary_init(hw_priv); 2797 if (ret != 0) { 2798 PMD_INIT_LOG(ERR, "Failed to init hw app primary."); 2799 goto sym_tbl_cleanup; 2800 } 2801 2802 return 0; 2803 2804 sym_tbl_cleanup: 2805 free(sym_tbl); 2806 cpp_cleanup: 2807 nfp_cpp_free(cpp); 2808 sync_free: 2809 nfp_sync_free(sync); 2810 pf_cleanup: 2811 rte_free(pf_dev); 2812 hw_priv_free: 2813 rte_free(hw_priv); 2814 2815 return ret; 2816 } 2817 2818 static int 2819 nfp_pf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2820 struct rte_pci_device *dev) 2821 { 2822 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 2823 return nfp_pf_init(dev); 2824 else 2825 return nfp_pf_secondary_init(dev); 2826 } 2827 2828 static const struct rte_pci_id pci_id_nfp_pf_net_map[] = { 2829 { 2830 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME, 2831 PCI_DEVICE_ID_NFP3800_PF_NIC) 2832 }, 2833 { 2834 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME, 2835 PCI_DEVICE_ID_NFP4000_PF_NIC) 2836 }, 2837 { 2838 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME, 2839 PCI_DEVICE_ID_NFP6000_PF_NIC) 2840 }, 2841 { 2842 RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE, 2843 PCI_DEVICE_ID_NFP3800_PF_NIC) 2844 }, 2845 { 2846 RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE, 2847 PCI_DEVICE_ID_NFP4000_PF_NIC) 2848 }, 2849 { 2850 RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE, 2851 PCI_DEVICE_ID_NFP6000_PF_NIC) 2852 }, 2853 { 2854 .vendor_id = 0, 2855 }, 2856 }; 2857 2858 static int 2859 nfp_pci_uninit(struct rte_eth_dev *eth_dev) 2860 { 2861 uint16_t port_id; 2862 struct rte_pci_device *pci_dev; 2863 2864 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 2865 2866 /* Free up all physical ports under PF */ 2867 RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device) 2868 rte_eth_dev_close(port_id); 2869 /* 2870 * Ports can be closed and freed but hotplugging is not 2871 * currently supported. 2872 */ 2873 return -ENOTSUP; 2874 } 2875 2876 static int 2877 eth_nfp_pci_remove(struct rte_pci_device *pci_dev) 2878 { 2879 return rte_eth_dev_pci_generic_remove(pci_dev, nfp_pci_uninit); 2880 } 2881 2882 static struct rte_pci_driver rte_nfp_net_pf_pmd = { 2883 .id_table = pci_id_nfp_pf_net_map, 2884 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 2885 .probe = nfp_pf_pci_probe, 2886 .remove = eth_nfp_pci_remove, 2887 }; 2888 2889 RTE_PMD_REGISTER_PCI(NFP_PF_DRIVER_NAME, rte_nfp_net_pf_pmd); 2890 RTE_PMD_REGISTER_PCI_TABLE(NFP_PF_DRIVER_NAME, pci_id_nfp_pf_net_map); 2891 RTE_PMD_REGISTER_KMOD_DEP(NFP_PF_DRIVER_NAME, "* igb_uio | uio_pci_generic | vfio"); 2892 RTE_PMD_REGISTER_PARAM_STRING(NFP_PF_DRIVER_NAME, 2893 NFP_PF_FORCE_RELOAD_FW "=<0|1>" 2894 NFP_CPP_SERVICE_ENABLE "=<0|1>"); 2895