1646ea79cSHeinrich Kuhn /* SPDX-License-Identifier: BSD-3-Clause 2646ea79cSHeinrich Kuhn * Copyright (c) 2014-2021 Netronome Systems, Inc. 3646ea79cSHeinrich Kuhn * All rights reserved. 4646ea79cSHeinrich Kuhn * 5646ea79cSHeinrich Kuhn * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation. 6646ea79cSHeinrich Kuhn */ 7646ea79cSHeinrich Kuhn 8*8ba461d1SPeng Zhang #include <unistd.h> 9*8ba461d1SPeng Zhang 105a95b024SChaoyong He #include <eal_firmware.h> 11851f03e1SHeinrich Kuhn #include <rte_alarm.h> 12646ea79cSHeinrich Kuhn 135a95b024SChaoyong He #include "flower/nfp_flower.h" 145a95b024SChaoyong He #include "nfd3/nfp_nfd3.h" 155a95b024SChaoyong He #include "nfdk/nfp_nfdk.h" 16646ea79cSHeinrich Kuhn #include "nfpcore/nfp_cpp.h" 17646ea79cSHeinrich Kuhn #include "nfpcore/nfp_hwinfo.h" 18646ea79cSHeinrich Kuhn #include "nfpcore/nfp_rtsym.h" 19646ea79cSHeinrich Kuhn #include "nfpcore/nfp_nsp.h" 20796f1aecSChaoyong He #include "nfpcore/nfp6000_pcie.h" 21*8ba461d1SPeng Zhang #include "nfpcore/nfp_resource.h" 22646ea79cSHeinrich Kuhn 23646ea79cSHeinrich Kuhn #include "nfp_cpp_bridge.h" 2454713740SChang Miao #include "nfp_ipsec.h" 255a95b024SChaoyong He #include "nfp_logs.h" 26b1880421SChaoyong He 27d505ee1dSChaoyong He #define NFP_PF_DRIVER_NAME net_nfp_pf 28d505ee1dSChaoyong He 29a243128bSChaoyong He static void 30f4d24fe9SChaoyong He nfp_net_pf_read_mac(struct nfp_app_fw_nic *app_fw_nic, 318ceb85c3SChaoyong He uint16_t port) 32646ea79cSHeinrich Kuhn { 3349952141SChaoyong He struct nfp_net_hw *hw; 34646ea79cSHeinrich Kuhn struct nfp_eth_table *nfp_eth_table; 35646ea79cSHeinrich Kuhn 36646ea79cSHeinrich Kuhn /* Grab a pointer to the correct physical port */ 37968ec1c3SChaoyong He hw = app_fw_nic->ports[port]; 38646ea79cSHeinrich Kuhn 39a243128bSChaoyong He nfp_eth_table = app_fw_nic->pf_dev->nfp_eth_table; 40646ea79cSHeinrich Kuhn 41ef759759SChaoyong He rte_ether_addr_copy(&nfp_eth_table->ports[port].mac_addr, &hw->super.mac_addr); 42646ea79cSHeinrich Kuhn } 43646ea79cSHeinrich Kuhn 44646ea79cSHeinrich Kuhn static int 45646ea79cSHeinrich Kuhn nfp_net_start(struct rte_eth_dev *dev) 46646ea79cSHeinrich Kuhn { 4749952141SChaoyong He int ret; 4849952141SChaoyong He uint16_t i; 4972d1dea6SChaoyong He struct nfp_hw *hw; 5049952141SChaoyong He uint32_t new_ctrl; 5149952141SChaoyong He uint32_t update = 0; 522e7c3612SQin Ke uint32_t cap_extend; 5349952141SChaoyong He uint32_t intr_vector; 5449952141SChaoyong He uint32_t ctrl_extend = 0; 5572d1dea6SChaoyong He struct nfp_net_hw *net_hw; 56646ea79cSHeinrich Kuhn struct nfp_pf_dev *pf_dev; 57646ea79cSHeinrich Kuhn struct rte_eth_rxmode *rxmode; 5849952141SChaoyong He struct nfp_app_fw_nic *app_fw_nic; 5949952141SChaoyong He struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 6049952141SChaoyong He struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 61646ea79cSHeinrich Kuhn 629d723baaSChaoyong He net_hw = dev->data->dev_private; 6365f6915dSChaoyong He pf_dev = net_hw->pf_dev; 64968ec1c3SChaoyong He app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv); 6572d1dea6SChaoyong He hw = &net_hw->super; 66646ea79cSHeinrich Kuhn 67646ea79cSHeinrich Kuhn /* Disabling queues just in case... */ 68646ea79cSHeinrich Kuhn nfp_net_disable_queues(dev); 69646ea79cSHeinrich Kuhn 70646ea79cSHeinrich Kuhn /* Enabling the required queues in the device */ 71646ea79cSHeinrich Kuhn nfp_net_enable_queues(dev); 72646ea79cSHeinrich Kuhn 7340688372SChaoyong He /* Check and configure queue intr-vector mapping */ 74646ea79cSHeinrich Kuhn if (dev->data->dev_conf.intr_conf.rxq != 0) { 75968ec1c3SChaoyong He if (app_fw_nic->multiport) { 76646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "PMD rx interrupt is not supported " 77646ea79cSHeinrich Kuhn "with NFP multiport PF"); 78646ea79cSHeinrich Kuhn return -EINVAL; 79646ea79cSHeinrich Kuhn } 80b0c496abSChaoyong He 81f4d24fe9SChaoyong He if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_UIO) { 82646ea79cSHeinrich Kuhn /* 83646ea79cSHeinrich Kuhn * Better not to share LSC with RX interrupts. 8440688372SChaoyong He * Unregistering LSC interrupt handler. 85646ea79cSHeinrich Kuhn */ 86e7978635SChaoyong He rte_intr_callback_unregister(intr_handle, 87646ea79cSHeinrich Kuhn nfp_net_dev_interrupt_handler, (void *)dev); 88646ea79cSHeinrich Kuhn 89646ea79cSHeinrich Kuhn if (dev->data->nb_rx_queues > 1) { 90646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "PMD rx interrupt only " 91646ea79cSHeinrich Kuhn "supports 1 queue with UIO"); 92646ea79cSHeinrich Kuhn return -EIO; 93646ea79cSHeinrich Kuhn } 94646ea79cSHeinrich Kuhn } 95b0c496abSChaoyong He 96646ea79cSHeinrich Kuhn intr_vector = dev->data->nb_rx_queues; 97c01e5c0cSChaoyong He if (rte_intr_efd_enable(intr_handle, intr_vector) != 0) 98646ea79cSHeinrich Kuhn return -1; 99646ea79cSHeinrich Kuhn 100646ea79cSHeinrich Kuhn nfp_configure_rx_interrupt(dev, intr_handle); 101646ea79cSHeinrich Kuhn update = NFP_NET_CFG_UPDATE_MSIX; 102646ea79cSHeinrich Kuhn } 103646ea79cSHeinrich Kuhn 104dbad6f64SPeng Zhang /* Checking MTU set */ 10572d1dea6SChaoyong He if (dev->data->mtu > net_hw->flbufsz) { 106dbad6f64SPeng Zhang PMD_INIT_LOG(ERR, "MTU (%u) can't be larger than the current NFP_FRAME_SIZE (%u)", 10772d1dea6SChaoyong He dev->data->mtu, net_hw->flbufsz); 108dbad6f64SPeng Zhang return -ERANGE; 109dbad6f64SPeng Zhang } 110dbad6f64SPeng Zhang 111646ea79cSHeinrich Kuhn rte_intr_enable(intr_handle); 112646ea79cSHeinrich Kuhn 113646ea79cSHeinrich Kuhn new_ctrl = nfp_check_offloads(dev); 114646ea79cSHeinrich Kuhn 115646ea79cSHeinrich Kuhn /* Writing configuration parameters in the device */ 11672d1dea6SChaoyong He nfp_net_params_setup(net_hw); 117646ea79cSHeinrich Kuhn 118c4de52ecSChaoyong He rxmode = &dev->data->dev_conf.rxmode; 119c01e5c0cSChaoyong He if ((rxmode->mq_mode & RTE_ETH_MQ_RX_RSS) != 0) { 120646ea79cSHeinrich Kuhn nfp_net_rss_config_default(dev); 121646ea79cSHeinrich Kuhn update |= NFP_NET_CFG_UPDATE_RSS; 12272d1dea6SChaoyong He new_ctrl |= nfp_net_cfg_ctrl_rss(hw->cap); 123646ea79cSHeinrich Kuhn } 124646ea79cSHeinrich Kuhn 125646ea79cSHeinrich Kuhn /* Enable device */ 126646ea79cSHeinrich Kuhn new_ctrl |= NFP_NET_CFG_CTRL_ENABLE; 127646ea79cSHeinrich Kuhn 128646ea79cSHeinrich Kuhn update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING; 129646ea79cSHeinrich Kuhn 130c55abf61SChaoyong He /* Enable vxlan */ 13172d1dea6SChaoyong He if ((hw->cap & NFP_NET_CFG_CTRL_VXLAN) != 0) { 132c55abf61SChaoyong He new_ctrl |= NFP_NET_CFG_CTRL_VXLAN; 133c55abf61SChaoyong He update |= NFP_NET_CFG_UPDATE_VXLAN; 134c925a157SFei Qin } 135c55abf61SChaoyong He 13672d1dea6SChaoyong He if ((hw->cap & NFP_NET_CFG_CTRL_RINGCFG) != 0) 137646ea79cSHeinrich Kuhn new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG; 138646ea79cSHeinrich Kuhn 13972d1dea6SChaoyong He if (nfp_reconfig(hw, new_ctrl, update) != 0) 140646ea79cSHeinrich Kuhn return -EIO; 141646ea79cSHeinrich Kuhn 1421e80c074SChaoyong He hw->ctrl = new_ctrl; 1431e80c074SChaoyong He 1442e7c3612SQin Ke /* Enable packet type offload by extend ctrl word1. */ 14572d1dea6SChaoyong He cap_extend = hw->cap_ext; 1462e7c3612SQin Ke if ((cap_extend & NFP_NET_CFG_CTRL_PKT_TYPE) != 0) 1472e7c3612SQin Ke ctrl_extend = NFP_NET_CFG_CTRL_PKT_TYPE; 1482e7c3612SQin Ke 14954713740SChang Miao if ((cap_extend & NFP_NET_CFG_CTRL_IPSEC) != 0) 15054713740SChang Miao ctrl_extend |= NFP_NET_CFG_CTRL_IPSEC_SM_LOOKUP 15154713740SChang Miao | NFP_NET_CFG_CTRL_IPSEC_LM_LOOKUP; 15254713740SChang Miao 1532e7c3612SQin Ke update = NFP_NET_CFG_UPDATE_GEN; 15472d1dea6SChaoyong He if (nfp_ext_reconfig(hw, ctrl_extend, update) != 0) 1552e7c3612SQin Ke return -EIO; 1562e7c3612SQin Ke 15772d1dea6SChaoyong He hw->ctrl_ext = ctrl_extend; 158b4b6988aSChaoyong He 159646ea79cSHeinrich Kuhn /* 160646ea79cSHeinrich Kuhn * Allocating rte mbufs for configured rx queues. 16140688372SChaoyong He * This requires queues being enabled before. 162646ea79cSHeinrich Kuhn */ 163c01e5c0cSChaoyong He if (nfp_net_rx_freelist_setup(dev) != 0) { 164646ea79cSHeinrich Kuhn ret = -ENOMEM; 165646ea79cSHeinrich Kuhn goto error; 166646ea79cSHeinrich Kuhn } 167646ea79cSHeinrich Kuhn 168646ea79cSHeinrich Kuhn if (rte_eal_process_type() == RTE_PROC_PRIMARY) 169646ea79cSHeinrich Kuhn /* Configure the physical port up */ 17072d1dea6SChaoyong He nfp_eth_set_configured(net_hw->cpp, net_hw->nfp_idx, 1); 171646ea79cSHeinrich Kuhn else 17272d1dea6SChaoyong He nfp_eth_set_configured(dev->process_private, net_hw->nfp_idx, 1); 173646ea79cSHeinrich Kuhn 174c46216e7SJie Hai for (i = 0; i < dev->data->nb_rx_queues; i++) 175c46216e7SJie Hai dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; 176c46216e7SJie Hai for (i = 0; i < dev->data->nb_tx_queues; i++) 177c46216e7SJie Hai dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; 178c46216e7SJie Hai 179646ea79cSHeinrich Kuhn return 0; 180646ea79cSHeinrich Kuhn 181646ea79cSHeinrich Kuhn error: 182646ea79cSHeinrich Kuhn /* 183646ea79cSHeinrich Kuhn * An error returned by this function should mean the app 184646ea79cSHeinrich Kuhn * exiting and then the system releasing all the memory 185646ea79cSHeinrich Kuhn * allocated even memory coming from hugepages. 186646ea79cSHeinrich Kuhn * 187646ea79cSHeinrich Kuhn * The device could be enabled at this point with some queues 188646ea79cSHeinrich Kuhn * ready for getting packets. This is true if the call to 189646ea79cSHeinrich Kuhn * nfp_net_rx_freelist_setup() succeeds for some queues but 190646ea79cSHeinrich Kuhn * fails for subsequent queues. 191646ea79cSHeinrich Kuhn * 192646ea79cSHeinrich Kuhn * This should make the app exiting but better if we tell the 193646ea79cSHeinrich Kuhn * device first. 194646ea79cSHeinrich Kuhn */ 195646ea79cSHeinrich Kuhn nfp_net_disable_queues(dev); 196646ea79cSHeinrich Kuhn 197646ea79cSHeinrich Kuhn return ret; 198646ea79cSHeinrich Kuhn } 199646ea79cSHeinrich Kuhn 200646ea79cSHeinrich Kuhn /* Set the link up. */ 201646ea79cSHeinrich Kuhn static int 202646ea79cSHeinrich Kuhn nfp_net_set_link_up(struct rte_eth_dev *dev) 203646ea79cSHeinrich Kuhn { 204646ea79cSHeinrich Kuhn struct nfp_net_hw *hw; 205646ea79cSHeinrich Kuhn 2069d723baaSChaoyong He hw = dev->data->dev_private; 207646ea79cSHeinrich Kuhn 208646ea79cSHeinrich Kuhn if (rte_eal_process_type() == RTE_PROC_PRIMARY) 209646ea79cSHeinrich Kuhn /* Configure the physical port down */ 210646ea79cSHeinrich Kuhn return nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 1); 211646ea79cSHeinrich Kuhn else 212f4d24fe9SChaoyong He return nfp_eth_set_configured(dev->process_private, hw->nfp_idx, 1); 213646ea79cSHeinrich Kuhn } 214646ea79cSHeinrich Kuhn 215646ea79cSHeinrich Kuhn /* Set the link down. */ 216646ea79cSHeinrich Kuhn static int 217646ea79cSHeinrich Kuhn nfp_net_set_link_down(struct rte_eth_dev *dev) 218646ea79cSHeinrich Kuhn { 219646ea79cSHeinrich Kuhn struct nfp_net_hw *hw; 220646ea79cSHeinrich Kuhn 2219d723baaSChaoyong He hw = dev->data->dev_private; 222646ea79cSHeinrich Kuhn 223646ea79cSHeinrich Kuhn if (rte_eal_process_type() == RTE_PROC_PRIMARY) 224646ea79cSHeinrich Kuhn /* Configure the physical port down */ 225646ea79cSHeinrich Kuhn return nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 0); 226646ea79cSHeinrich Kuhn else 227f4d24fe9SChaoyong He return nfp_eth_set_configured(dev->process_private, hw->nfp_idx, 0); 228646ea79cSHeinrich Kuhn } 229646ea79cSHeinrich Kuhn 2303b00109dSPeng Zhang static uint8_t 2313b00109dSPeng Zhang nfp_function_id_get(const struct nfp_pf_dev *pf_dev, 2323b00109dSPeng Zhang uint8_t phy_port) 2333b00109dSPeng Zhang { 2343b00109dSPeng Zhang if (pf_dev->multi_pf.enabled) 2353b00109dSPeng Zhang return pf_dev->multi_pf.function_id; 2363b00109dSPeng Zhang 2373b00109dSPeng Zhang return phy_port; 2383b00109dSPeng Zhang } 2393b00109dSPeng Zhang 240*8ba461d1SPeng Zhang static void 241*8ba461d1SPeng Zhang nfp_net_beat_timer(void *arg) 242*8ba461d1SPeng Zhang { 243*8ba461d1SPeng Zhang uint64_t cur_sec; 244*8ba461d1SPeng Zhang struct nfp_multi_pf *multi_pf = arg; 245*8ba461d1SPeng Zhang 246*8ba461d1SPeng Zhang cur_sec = rte_rdtsc(); 247*8ba461d1SPeng Zhang nn_writeq(cur_sec, multi_pf->beat_addr + NFP_BEAT_OFFSET(multi_pf->function_id)); 248*8ba461d1SPeng Zhang 249*8ba461d1SPeng Zhang /* Beat once per second. */ 250*8ba461d1SPeng Zhang if (rte_eal_alarm_set(1000 * 1000, nfp_net_beat_timer, 251*8ba461d1SPeng Zhang (void *)multi_pf) < 0) { 252*8ba461d1SPeng Zhang PMD_DRV_LOG(ERR, "Error setting alarm"); 253*8ba461d1SPeng Zhang } 254*8ba461d1SPeng Zhang } 255*8ba461d1SPeng Zhang 256*8ba461d1SPeng Zhang static int 257*8ba461d1SPeng Zhang nfp_net_keepalive_init(struct nfp_cpp *cpp, 258*8ba461d1SPeng Zhang struct nfp_multi_pf *multi_pf) 259*8ba461d1SPeng Zhang { 260*8ba461d1SPeng Zhang uint8_t *base; 261*8ba461d1SPeng Zhang uint64_t addr; 262*8ba461d1SPeng Zhang uint32_t size; 263*8ba461d1SPeng Zhang uint32_t cpp_id; 264*8ba461d1SPeng Zhang struct nfp_resource *res; 265*8ba461d1SPeng Zhang 266*8ba461d1SPeng Zhang res = nfp_resource_acquire(cpp, NFP_RESOURCE_KEEPALIVE); 267*8ba461d1SPeng Zhang if (res == NULL) 268*8ba461d1SPeng Zhang return -EIO; 269*8ba461d1SPeng Zhang 270*8ba461d1SPeng Zhang cpp_id = nfp_resource_cpp_id(res); 271*8ba461d1SPeng Zhang addr = nfp_resource_address(res); 272*8ba461d1SPeng Zhang size = nfp_resource_size(res); 273*8ba461d1SPeng Zhang 274*8ba461d1SPeng Zhang nfp_resource_release(res); 275*8ba461d1SPeng Zhang 276*8ba461d1SPeng Zhang /* Allocate a fixed area for keepalive. */ 277*8ba461d1SPeng Zhang base = nfp_cpp_map_area(cpp, cpp_id, addr, size, &multi_pf->beat_area); 278*8ba461d1SPeng Zhang if (base == NULL) { 279*8ba461d1SPeng Zhang PMD_DRV_LOG(ERR, "Failed to map area for keepalive."); 280*8ba461d1SPeng Zhang return -EIO; 281*8ba461d1SPeng Zhang } 282*8ba461d1SPeng Zhang 283*8ba461d1SPeng Zhang multi_pf->beat_addr = base; 284*8ba461d1SPeng Zhang 285*8ba461d1SPeng Zhang return 0; 286*8ba461d1SPeng Zhang } 287*8ba461d1SPeng Zhang 288*8ba461d1SPeng Zhang static void 289*8ba461d1SPeng Zhang nfp_net_keepalive_uninit(struct nfp_multi_pf *multi_pf) 290*8ba461d1SPeng Zhang { 291*8ba461d1SPeng Zhang nfp_cpp_area_release_free(multi_pf->beat_area); 292*8ba461d1SPeng Zhang } 293*8ba461d1SPeng Zhang 294*8ba461d1SPeng Zhang static int 295*8ba461d1SPeng Zhang nfp_net_keepalive_start(struct nfp_multi_pf *multi_pf) 296*8ba461d1SPeng Zhang { 297*8ba461d1SPeng Zhang if (rte_eal_alarm_set(1000 * 1000, nfp_net_beat_timer, 298*8ba461d1SPeng Zhang (void *)multi_pf) < 0) { 299*8ba461d1SPeng Zhang PMD_DRV_LOG(ERR, "Error setting alarm"); 300*8ba461d1SPeng Zhang return -EIO; 301*8ba461d1SPeng Zhang } 302*8ba461d1SPeng Zhang 303*8ba461d1SPeng Zhang return 0; 304*8ba461d1SPeng Zhang } 305*8ba461d1SPeng Zhang 306*8ba461d1SPeng Zhang static void 307*8ba461d1SPeng Zhang nfp_net_keepalive_stop(struct nfp_multi_pf *multi_pf) 308*8ba461d1SPeng Zhang { 309*8ba461d1SPeng Zhang /* Cancel keepalive for multiple PF setup */ 310*8ba461d1SPeng Zhang rte_eal_alarm_cancel(nfp_net_beat_timer, (void *)multi_pf); 311*8ba461d1SPeng Zhang } 312*8ba461d1SPeng Zhang 313646ea79cSHeinrich Kuhn /* Reset and stop device. The device can not be restarted. */ 314646ea79cSHeinrich Kuhn static int 315646ea79cSHeinrich Kuhn nfp_net_close(struct rte_eth_dev *dev) 316646ea79cSHeinrich Kuhn { 3178ceb85c3SChaoyong He uint8_t i; 3183b00109dSPeng Zhang uint8_t id; 31949952141SChaoyong He struct nfp_net_hw *hw; 32049952141SChaoyong He struct nfp_pf_dev *pf_dev; 32149952141SChaoyong He struct rte_pci_device *pci_dev; 32249952141SChaoyong He struct nfp_app_fw_nic *app_fw_nic; 323646ea79cSHeinrich Kuhn 324646ea79cSHeinrich Kuhn if (rte_eal_process_type() != RTE_PROC_PRIMARY) 325646ea79cSHeinrich Kuhn return 0; 326646ea79cSHeinrich Kuhn 3279d723baaSChaoyong He hw = dev->data->dev_private; 32865f6915dSChaoyong He pf_dev = hw->pf_dev; 329646ea79cSHeinrich Kuhn pci_dev = RTE_ETH_DEV_TO_PCI(dev); 330968ec1c3SChaoyong He app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv); 331646ea79cSHeinrich Kuhn 332646ea79cSHeinrich Kuhn /* 333646ea79cSHeinrich Kuhn * We assume that the DPDK application is stopping all the 334646ea79cSHeinrich Kuhn * threads/queues before calling the device close function. 335646ea79cSHeinrich Kuhn */ 336646ea79cSHeinrich Kuhn nfp_net_disable_queues(dev); 337646ea79cSHeinrich Kuhn 338646ea79cSHeinrich Kuhn /* Clear queues */ 3391c8d02bbSJin Liu nfp_net_close_tx_queue(dev); 3401c8d02bbSJin Liu nfp_net_close_rx_queue(dev); 341646ea79cSHeinrich Kuhn 34254713740SChang Miao /* Clear ipsec */ 34354713740SChang Miao nfp_ipsec_uninit(dev); 34454713740SChang Miao 345851f03e1SHeinrich Kuhn /* Cancel possible impending LSC work here before releasing the port */ 346f4d24fe9SChaoyong He rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler, (void *)dev); 347851f03e1SHeinrich Kuhn 348646ea79cSHeinrich Kuhn /* Only free PF resources after all physical ports have been closed */ 349646ea79cSHeinrich Kuhn /* Mark this port as unused and free device priv resources */ 350f58bde00SChaoyong He nn_cfg_writeb(&hw->super, NFP_NET_CFG_LSC, 0xff); 351968ec1c3SChaoyong He app_fw_nic->ports[hw->idx] = NULL; 352646ea79cSHeinrich Kuhn 353968ec1c3SChaoyong He for (i = 0; i < app_fw_nic->total_phyports; i++) { 3543b00109dSPeng Zhang id = nfp_function_id_get(pf_dev, i); 3553b00109dSPeng Zhang 356646ea79cSHeinrich Kuhn /* Check to see if ports are still in use */ 3573b00109dSPeng Zhang if (app_fw_nic->ports[id] != NULL) 358646ea79cSHeinrich Kuhn return 0; 359646ea79cSHeinrich Kuhn } 360646ea79cSHeinrich Kuhn 361646ea79cSHeinrich Kuhn /* Now it is safe to free all PF resources */ 362646ea79cSHeinrich Kuhn PMD_INIT_LOG(INFO, "Freeing PF resources"); 363*8ba461d1SPeng Zhang if (pf_dev->multi_pf.enabled) { 364*8ba461d1SPeng Zhang nfp_net_keepalive_stop(&pf_dev->multi_pf); 365*8ba461d1SPeng Zhang nfp_net_keepalive_uninit(&pf_dev->multi_pf); 366*8ba461d1SPeng Zhang } 367646ea79cSHeinrich Kuhn nfp_cpp_area_free(pf_dev->ctrl_area); 368711e4559SChaoyong He nfp_cpp_area_free(pf_dev->qc_area); 369646ea79cSHeinrich Kuhn free(pf_dev->hwinfo); 370646ea79cSHeinrich Kuhn free(pf_dev->sym_tbl); 371646ea79cSHeinrich Kuhn nfp_cpp_free(pf_dev->cpp); 372968ec1c3SChaoyong He rte_free(app_fw_nic); 373646ea79cSHeinrich Kuhn rte_free(pf_dev); 374646ea79cSHeinrich Kuhn 375d61138d4SHarman Kalra rte_intr_disable(pci_dev->intr_handle); 376646ea79cSHeinrich Kuhn 37740688372SChaoyong He /* Unregister callback func from eal lib */ 378d61138d4SHarman Kalra rte_intr_callback_unregister(pci_dev->intr_handle, 379a6189a67SJin Liu nfp_net_dev_interrupt_handler, (void *)dev); 380646ea79cSHeinrich Kuhn 381646ea79cSHeinrich Kuhn return 0; 382646ea79cSHeinrich Kuhn } 383646ea79cSHeinrich Kuhn 384c55abf61SChaoyong He static int 385c55abf61SChaoyong He nfp_net_find_vxlan_idx(struct nfp_net_hw *hw, 386c55abf61SChaoyong He uint16_t port, 387c55abf61SChaoyong He uint32_t *idx) 388c55abf61SChaoyong He { 389c55abf61SChaoyong He uint32_t i; 390c55abf61SChaoyong He int free_idx = -1; 391c55abf61SChaoyong He 392c55abf61SChaoyong He for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i++) { 393c55abf61SChaoyong He if (hw->vxlan_ports[i] == port) { 394c55abf61SChaoyong He free_idx = i; 395c55abf61SChaoyong He break; 396c55abf61SChaoyong He } 397c55abf61SChaoyong He 398c55abf61SChaoyong He if (hw->vxlan_usecnt[i] == 0) { 399c55abf61SChaoyong He free_idx = i; 400c55abf61SChaoyong He break; 401c55abf61SChaoyong He } 402c55abf61SChaoyong He } 403c55abf61SChaoyong He 404c55abf61SChaoyong He if (free_idx == -1) 405c55abf61SChaoyong He return -EINVAL; 406c55abf61SChaoyong He 407c55abf61SChaoyong He *idx = free_idx; 408c55abf61SChaoyong He 409c55abf61SChaoyong He return 0; 410c55abf61SChaoyong He } 411c55abf61SChaoyong He 412c55abf61SChaoyong He static int 413c55abf61SChaoyong He nfp_udp_tunnel_port_add(struct rte_eth_dev *dev, 414c55abf61SChaoyong He struct rte_eth_udp_tunnel *tunnel_udp) 415c55abf61SChaoyong He { 416c55abf61SChaoyong He int ret; 417c55abf61SChaoyong He uint32_t idx; 418c55abf61SChaoyong He uint16_t vxlan_port; 419c55abf61SChaoyong He struct nfp_net_hw *hw; 420c55abf61SChaoyong He enum rte_eth_tunnel_type tnl_type; 421c55abf61SChaoyong He 4229d723baaSChaoyong He hw = dev->data->dev_private; 423c55abf61SChaoyong He vxlan_port = tunnel_udp->udp_port; 424c55abf61SChaoyong He tnl_type = tunnel_udp->prot_type; 425c55abf61SChaoyong He 426c55abf61SChaoyong He if (tnl_type != RTE_ETH_TUNNEL_TYPE_VXLAN) { 427c55abf61SChaoyong He PMD_DRV_LOG(ERR, "Not VXLAN tunnel"); 428c55abf61SChaoyong He return -ENOTSUP; 429c55abf61SChaoyong He } 430c55abf61SChaoyong He 431c55abf61SChaoyong He ret = nfp_net_find_vxlan_idx(hw, vxlan_port, &idx); 432c55abf61SChaoyong He if (ret != 0) { 433c55abf61SChaoyong He PMD_DRV_LOG(ERR, "Failed find valid vxlan idx"); 434c55abf61SChaoyong He return -EINVAL; 435c55abf61SChaoyong He } 436c55abf61SChaoyong He 437c55abf61SChaoyong He if (hw->vxlan_usecnt[idx] == 0) { 438c55abf61SChaoyong He ret = nfp_net_set_vxlan_port(hw, idx, vxlan_port); 439c55abf61SChaoyong He if (ret != 0) { 440c55abf61SChaoyong He PMD_DRV_LOG(ERR, "Failed set vxlan port"); 441c55abf61SChaoyong He return -EINVAL; 442c55abf61SChaoyong He } 443c55abf61SChaoyong He } 444c55abf61SChaoyong He 445c55abf61SChaoyong He hw->vxlan_usecnt[idx]++; 446c55abf61SChaoyong He 447c55abf61SChaoyong He return 0; 448c55abf61SChaoyong He } 449c55abf61SChaoyong He 450c55abf61SChaoyong He static int 451c55abf61SChaoyong He nfp_udp_tunnel_port_del(struct rte_eth_dev *dev, 452c55abf61SChaoyong He struct rte_eth_udp_tunnel *tunnel_udp) 453c55abf61SChaoyong He { 454c55abf61SChaoyong He int ret; 455c55abf61SChaoyong He uint32_t idx; 456c55abf61SChaoyong He uint16_t vxlan_port; 457c55abf61SChaoyong He struct nfp_net_hw *hw; 458c55abf61SChaoyong He enum rte_eth_tunnel_type tnl_type; 459c55abf61SChaoyong He 4609d723baaSChaoyong He hw = dev->data->dev_private; 461c55abf61SChaoyong He vxlan_port = tunnel_udp->udp_port; 462c55abf61SChaoyong He tnl_type = tunnel_udp->prot_type; 463c55abf61SChaoyong He 464c55abf61SChaoyong He if (tnl_type != RTE_ETH_TUNNEL_TYPE_VXLAN) { 465c55abf61SChaoyong He PMD_DRV_LOG(ERR, "Not VXLAN tunnel"); 466c55abf61SChaoyong He return -ENOTSUP; 467c55abf61SChaoyong He } 468c55abf61SChaoyong He 469c55abf61SChaoyong He ret = nfp_net_find_vxlan_idx(hw, vxlan_port, &idx); 470c55abf61SChaoyong He if (ret != 0 || hw->vxlan_usecnt[idx] == 0) { 471c55abf61SChaoyong He PMD_DRV_LOG(ERR, "Failed find valid vxlan idx"); 472c55abf61SChaoyong He return -EINVAL; 473c55abf61SChaoyong He } 474c55abf61SChaoyong He 475c55abf61SChaoyong He hw->vxlan_usecnt[idx]--; 476c55abf61SChaoyong He 477c55abf61SChaoyong He if (hw->vxlan_usecnt[idx] == 0) { 478c55abf61SChaoyong He ret = nfp_net_set_vxlan_port(hw, idx, 0); 479c55abf61SChaoyong He if (ret != 0) { 480c55abf61SChaoyong He PMD_DRV_LOG(ERR, "Failed set vxlan port"); 481c55abf61SChaoyong He return -EINVAL; 482c55abf61SChaoyong He } 483c55abf61SChaoyong He } 484c55abf61SChaoyong He 485c55abf61SChaoyong He return 0; 486c55abf61SChaoyong He } 487c55abf61SChaoyong He 488646ea79cSHeinrich Kuhn /* Initialise and register driver with DPDK Application */ 4898d961320SJin Liu static const struct eth_dev_ops nfp_net_eth_dev_ops = { 490646ea79cSHeinrich Kuhn .dev_configure = nfp_net_configure, 491646ea79cSHeinrich Kuhn .dev_start = nfp_net_start, 492646ea79cSHeinrich Kuhn .dev_stop = nfp_net_stop, 493646ea79cSHeinrich Kuhn .dev_set_link_up = nfp_net_set_link_up, 494646ea79cSHeinrich Kuhn .dev_set_link_down = nfp_net_set_link_down, 495646ea79cSHeinrich Kuhn .dev_close = nfp_net_close, 496646ea79cSHeinrich Kuhn .promiscuous_enable = nfp_net_promisc_enable, 497646ea79cSHeinrich Kuhn .promiscuous_disable = nfp_net_promisc_disable, 4984a86c36bSQin Ke .allmulticast_enable = nfp_net_allmulticast_enable, 4994a86c36bSQin Ke .allmulticast_disable = nfp_net_allmulticast_disable, 500646ea79cSHeinrich Kuhn .link_update = nfp_net_link_update, 501646ea79cSHeinrich Kuhn .stats_get = nfp_net_stats_get, 502646ea79cSHeinrich Kuhn .stats_reset = nfp_net_stats_reset, 503f26e8239SJames Hershaw .xstats_get = nfp_net_xstats_get, 504f26e8239SJames Hershaw .xstats_reset = nfp_net_xstats_reset, 505f26e8239SJames Hershaw .xstats_get_names = nfp_net_xstats_get_names, 506f26e8239SJames Hershaw .xstats_get_by_id = nfp_net_xstats_get_by_id, 507f26e8239SJames Hershaw .xstats_get_names_by_id = nfp_net_xstats_get_names_by_id, 508646ea79cSHeinrich Kuhn .dev_infos_get = nfp_net_infos_get, 509646ea79cSHeinrich Kuhn .dev_supported_ptypes_get = nfp_net_supported_ptypes_get, 510646ea79cSHeinrich Kuhn .mtu_set = nfp_net_dev_mtu_set, 5110a94d6bcSJin Liu .mac_addr_set = nfp_net_set_mac_addr, 512646ea79cSHeinrich Kuhn .vlan_offload_set = nfp_net_vlan_offload_set, 513646ea79cSHeinrich Kuhn .reta_update = nfp_net_reta_update, 514646ea79cSHeinrich Kuhn .reta_query = nfp_net_reta_query, 515646ea79cSHeinrich Kuhn .rss_hash_update = nfp_net_rss_hash_update, 516646ea79cSHeinrich Kuhn .rss_hash_conf_get = nfp_net_rss_hash_conf_get, 517646ea79cSHeinrich Kuhn .rx_queue_setup = nfp_net_rx_queue_setup, 518646ea79cSHeinrich Kuhn .rx_queue_release = nfp_net_rx_queue_release, 5198d961320SJin Liu .tx_queue_setup = nfp_net_tx_queue_setup, 52052ddc4c2SJin Liu .tx_queue_release = nfp_net_tx_queue_release, 52152ddc4c2SJin Liu .rx_queue_intr_enable = nfp_rx_queue_intr_enable, 52252ddc4c2SJin Liu .rx_queue_intr_disable = nfp_rx_queue_intr_disable, 523c55abf61SChaoyong He .udp_tunnel_port_add = nfp_udp_tunnel_port_add, 524c55abf61SChaoyong He .udp_tunnel_port_del = nfp_udp_tunnel_port_del, 525128c8ad9SChaoyong He .fw_version_get = nfp_net_firmware_version_get, 52652ddc4c2SJin Liu }; 52752ddc4c2SJin Liu 528ee8ca64eSChaoyong He static inline void 529ee8ca64eSChaoyong He nfp_net_ethdev_ops_mount(struct nfp_net_hw *hw, 530ee8ca64eSChaoyong He struct rte_eth_dev *eth_dev) 531266470b2SJin Liu { 532ee8ca64eSChaoyong He if (hw->ver.extend == NFP_NET_CFG_VERSION_DP_NFD3) 533ee8ca64eSChaoyong He eth_dev->tx_pkt_burst = nfp_net_nfd3_xmit_pkts; 534ee8ca64eSChaoyong He else 535ee8ca64eSChaoyong He eth_dev->tx_pkt_burst = nfp_net_nfdk_xmit_pkts; 536266470b2SJin Liu 5378d961320SJin Liu eth_dev->dev_ops = &nfp_net_eth_dev_ops; 538266470b2SJin Liu eth_dev->rx_queue_count = nfp_net_rx_queue_count; 539266470b2SJin Liu eth_dev->rx_pkt_burst = &nfp_net_recv_pkts; 540266470b2SJin Liu } 541266470b2SJin Liu 542646ea79cSHeinrich Kuhn static int 543646ea79cSHeinrich Kuhn nfp_net_init(struct rte_eth_dev *eth_dev) 544646ea79cSHeinrich Kuhn { 54549952141SChaoyong He int err; 54649952141SChaoyong He uint16_t port; 5470314a8ffSChaoyong He uint64_t rx_base; 5480314a8ffSChaoyong He uint64_t tx_base; 5494a9bb682SChaoyong He struct nfp_hw *hw; 5504a9bb682SChaoyong He struct nfp_net_hw *net_hw; 55149952141SChaoyong He struct nfp_pf_dev *pf_dev; 55249952141SChaoyong He struct rte_pci_device *pci_dev; 55349952141SChaoyong He struct nfp_app_fw_nic *app_fw_nic; 554646ea79cSHeinrich Kuhn 555646ea79cSHeinrich Kuhn pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 55665f6915dSChaoyong He net_hw = eth_dev->data->dev_private; 557646ea79cSHeinrich Kuhn 558646ea79cSHeinrich Kuhn /* Use backpointer here to the PF of this eth_dev */ 55965f6915dSChaoyong He pf_dev = net_hw->pf_dev; 560646ea79cSHeinrich Kuhn 561968ec1c3SChaoyong He /* Use backpointer to the CoreNIC app struct */ 562968ec1c3SChaoyong He app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv); 563968ec1c3SChaoyong He 564646ea79cSHeinrich Kuhn port = ((struct nfp_net_hw *)eth_dev->data->dev_private)->idx; 5658ceb85c3SChaoyong He if (port > 7) { 566646ea79cSHeinrich Kuhn PMD_DRV_LOG(ERR, "Port value is wrong"); 567646ea79cSHeinrich Kuhn return -ENODEV; 568646ea79cSHeinrich Kuhn } 569646ea79cSHeinrich Kuhn 5704a9bb682SChaoyong He hw = &net_hw->super; 571646ea79cSHeinrich Kuhn 572030b2b19SChaoyong He PMD_INIT_LOG(DEBUG, "Working with physical port number: %hu, " 5734a9bb682SChaoyong He "NFP internal port number: %d", port, net_hw->nfp_idx); 574646ea79cSHeinrich Kuhn 575646ea79cSHeinrich Kuhn rte_eth_copy_pci_info(eth_dev, pci_dev); 576646ea79cSHeinrich Kuhn 5778ad2cc8fSPeng Zhang if (port == 0 || pf_dev->multi_pf.enabled) { 578f26e8239SJames Hershaw uint32_t min_size; 579f26e8239SJames Hershaw 5804a9bb682SChaoyong He hw->ctrl_bar = pf_dev->ctrl_bar; 5814a9bb682SChaoyong He min_size = NFP_MAC_STATS_SIZE * net_hw->pf_dev->nfp_eth_table->max_index; 5824a9bb682SChaoyong He net_hw->mac_stats_bar = nfp_rtsym_map(net_hw->pf_dev->sym_tbl, "_mac_stats", 5834a9bb682SChaoyong He min_size, &net_hw->mac_stats_area); 5844a9bb682SChaoyong He if (net_hw->mac_stats_bar == NULL) { 585f26e8239SJames Hershaw PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for _mac_stats_bar"); 586f26e8239SJames Hershaw return -EIO; 587f26e8239SJames Hershaw } 588b0c496abSChaoyong He 5894a9bb682SChaoyong He net_hw->mac_stats = net_hw->mac_stats_bar; 590646ea79cSHeinrich Kuhn } else { 591a6189a67SJin Liu if (pf_dev->ctrl_bar == NULL) 592646ea79cSHeinrich Kuhn return -ENODEV; 593b0c496abSChaoyong He 594a6189a67SJin Liu /* Use port offset in pf ctrl_bar for this ports control bar */ 5954a9bb682SChaoyong He hw->ctrl_bar = pf_dev->ctrl_bar + (port * NFP_NET_CFG_BAR_SZ); 5964a9bb682SChaoyong He net_hw->mac_stats = app_fw_nic->ports[0]->mac_stats_bar + 5974a9bb682SChaoyong He (net_hw->nfp_idx * NFP_MAC_STATS_SIZE); 598646ea79cSHeinrich Kuhn } 599646ea79cSHeinrich Kuhn 6004a9bb682SChaoyong He PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar); 6014a9bb682SChaoyong He PMD_INIT_LOG(DEBUG, "MAC stats: %p", net_hw->mac_stats); 602646ea79cSHeinrich Kuhn 6034a9bb682SChaoyong He err = nfp_net_common_init(pci_dev, net_hw); 604cd4397ebSPeng Zhang if (err != 0) 605cd4397ebSPeng Zhang return err; 606fd392f84SPeng Zhang 607eecdfcc1SShihong Wang err = nfp_net_tlv_caps_parse(eth_dev); 608eecdfcc1SShihong Wang if (err != 0) { 609eecdfcc1SShihong Wang PMD_INIT_LOG(ERR, "Failed to parser TLV caps"); 610eecdfcc1SShihong Wang return err; 611eecdfcc1SShihong Wang } 612eecdfcc1SShihong Wang 61354713740SChang Miao err = nfp_ipsec_init(eth_dev); 61454713740SChang Miao if (err != 0) { 61554713740SChang Miao PMD_INIT_LOG(ERR, "Failed to init IPsec module"); 61654713740SChang Miao return err; 61754713740SChang Miao } 61854713740SChang Miao 6194a9bb682SChaoyong He nfp_net_ethdev_ops_mount(net_hw, eth_dev); 620266470b2SJin Liu 6214a9bb682SChaoyong He net_hw->eth_xstats_base = rte_malloc("rte_eth_xstat", sizeof(struct rte_eth_xstat) * 622f26e8239SJames Hershaw nfp_net_xstats_size(eth_dev), 0); 6234a9bb682SChaoyong He if (net_hw->eth_xstats_base == NULL) { 624f26e8239SJames Hershaw PMD_INIT_LOG(ERR, "no memory for xstats base values on device %s!", 625f26e8239SJames Hershaw pci_dev->device.name); 626f26e8239SJames Hershaw return -ENOMEM; 627f26e8239SJames Hershaw } 628f26e8239SJames Hershaw 629646ea79cSHeinrich Kuhn /* Work out where in the BAR the queues start. */ 6304a9bb682SChaoyong He tx_base = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ); 6314a9bb682SChaoyong He rx_base = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ); 632646ea79cSHeinrich Kuhn 6334a9bb682SChaoyong He net_hw->tx_bar = pf_dev->qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ; 6344a9bb682SChaoyong He net_hw->rx_bar = pf_dev->qc_bar + rx_base * NFP_QCP_QUEUE_ADDR_SZ; 6354a9bb682SChaoyong He eth_dev->data->dev_private = net_hw; 636646ea79cSHeinrich Kuhn 637646ea79cSHeinrich Kuhn PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p", 6384a9bb682SChaoyong He hw->ctrl_bar, net_hw->tx_bar, net_hw->rx_bar); 639646ea79cSHeinrich Kuhn 6404a9bb682SChaoyong He nfp_net_cfg_queue_setup(net_hw); 6414a9bb682SChaoyong He net_hw->mtu = RTE_ETHER_MTU; 642646ea79cSHeinrich Kuhn 643646ea79cSHeinrich Kuhn /* VLAN insertion is incompatible with LSOv2 */ 6444a9bb682SChaoyong He if ((hw->cap & NFP_NET_CFG_CTRL_LSO2) != 0) 6454a9bb682SChaoyong He hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN; 646646ea79cSHeinrich Kuhn 6474a9bb682SChaoyong He nfp_net_log_device_information(net_hw); 648646ea79cSHeinrich Kuhn 649646ea79cSHeinrich Kuhn /* Initializing spinlock for reconfigs */ 6504a9bb682SChaoyong He rte_spinlock_init(&hw->reconfig_lock); 651646ea79cSHeinrich Kuhn 652646ea79cSHeinrich Kuhn /* Allocating memory for mac addr */ 653f4d24fe9SChaoyong He eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", RTE_ETHER_ADDR_LEN, 0); 654646ea79cSHeinrich Kuhn if (eth_dev->data->mac_addrs == NULL) { 655646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "Failed to space for MAC address"); 6567feb8909SChaoyong He return -ENOMEM; 657646ea79cSHeinrich Kuhn } 658646ea79cSHeinrich Kuhn 659968ec1c3SChaoyong He nfp_net_pf_read_mac(app_fw_nic, port); 660503ac807SChaoyong He nfp_write_mac(hw, &hw->mac_addr.addr_bytes[0]); 661646ea79cSHeinrich Kuhn 6624a9bb682SChaoyong He if (rte_is_valid_assigned_ether_addr(&hw->mac_addr) == 0) { 663a6189a67SJin Liu PMD_INIT_LOG(INFO, "Using random mac address for port %d", port); 664646ea79cSHeinrich Kuhn /* Using random mac addresses for VFs */ 6654a9bb682SChaoyong He rte_eth_random_addr(&hw->mac_addr.addr_bytes[0]); 666503ac807SChaoyong He nfp_write_mac(hw, &hw->mac_addr.addr_bytes[0]); 667646ea79cSHeinrich Kuhn } 668646ea79cSHeinrich Kuhn 669646ea79cSHeinrich Kuhn /* Copying mac address to DPDK eth_dev struct */ 6704a9bb682SChaoyong He rte_ether_addr_copy(&hw->mac_addr, eth_dev->data->mac_addrs); 671646ea79cSHeinrich Kuhn 6724a9bb682SChaoyong He if ((hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR) == 0) 673646ea79cSHeinrich Kuhn eth_dev->data->dev_flags |= RTE_ETH_DEV_NOLIVE_MAC_ADDR; 674646ea79cSHeinrich Kuhn 675646ea79cSHeinrich Kuhn eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 676646ea79cSHeinrich Kuhn 677030b2b19SChaoyong He PMD_INIT_LOG(INFO, "port %d VendorID=%#x DeviceID=%#x " 678c2c4f87bSAman Deep Singh "mac=" RTE_ETHER_ADDR_PRT_FMT, 679646ea79cSHeinrich Kuhn eth_dev->data->port_id, pci_dev->id.vendor_id, 680646ea79cSHeinrich Kuhn pci_dev->id.device_id, 6814a9bb682SChaoyong He RTE_ETHER_ADDR_BYTES(&hw->mac_addr)); 682646ea79cSHeinrich Kuhn 683646ea79cSHeinrich Kuhn /* Registering LSC interrupt handler */ 684d61138d4SHarman Kalra rte_intr_callback_register(pci_dev->intr_handle, 685a6189a67SJin Liu nfp_net_dev_interrupt_handler, (void *)eth_dev); 686646ea79cSHeinrich Kuhn /* Telling the firmware about the LSC interrupt entry */ 6874a9bb682SChaoyong He nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX); 68894d0631aSPeng Zhang /* Unmask the LSC interrupt */ 68994d0631aSPeng Zhang nfp_net_irq_unmask(eth_dev); 690646ea79cSHeinrich Kuhn /* Recording current stats counters values */ 691646ea79cSHeinrich Kuhn nfp_net_stats_reset(eth_dev); 692646ea79cSHeinrich Kuhn 693646ea79cSHeinrich Kuhn return 0; 694646ea79cSHeinrich Kuhn } 695646ea79cSHeinrich Kuhn 696646ea79cSHeinrich Kuhn #define DEFAULT_FW_PATH "/lib/firmware/netronome" 697646ea79cSHeinrich Kuhn 698646ea79cSHeinrich Kuhn static int 699f4d24fe9SChaoyong He nfp_fw_upload(struct rte_pci_device *dev, 700f4d24fe9SChaoyong He struct nfp_nsp *nsp, 701f4d24fe9SChaoyong He char *card) 702646ea79cSHeinrich Kuhn { 703646ea79cSHeinrich Kuhn void *fw_buf; 704646ea79cSHeinrich Kuhn size_t fsize; 70549952141SChaoyong He char serial[40]; 70649952141SChaoyong He char fw_name[125]; 707ff627b74SChaoyong He uint16_t interface; 708ff627b74SChaoyong He uint32_t cpp_serial_len; 709ff627b74SChaoyong He const uint8_t *cpp_serial; 71049952141SChaoyong He struct nfp_cpp *cpp = nfp_nsp_cpp(nsp); 711ff627b74SChaoyong He 712ff627b74SChaoyong He cpp_serial_len = nfp_cpp_serial(cpp, &cpp_serial); 713ff627b74SChaoyong He if (cpp_serial_len != NFP_SERIAL_LEN) 714ff627b74SChaoyong He return -ERANGE; 715ff627b74SChaoyong He 716ff627b74SChaoyong He interface = nfp_cpp_interface(cpp); 717646ea79cSHeinrich Kuhn 718646ea79cSHeinrich Kuhn /* Looking for firmware file in order of priority */ 719646ea79cSHeinrich Kuhn 720646ea79cSHeinrich Kuhn /* First try to find a firmware image specific for this device */ 721646ea79cSHeinrich Kuhn snprintf(serial, sizeof(serial), 722646ea79cSHeinrich Kuhn "serial-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x", 723ff627b74SChaoyong He cpp_serial[0], cpp_serial[1], cpp_serial[2], cpp_serial[3], 724ff627b74SChaoyong He cpp_serial[4], cpp_serial[5], interface >> 8, interface & 0xff); 725f4d24fe9SChaoyong He snprintf(fw_name, sizeof(fw_name), "%s/%s.nffw", DEFAULT_FW_PATH, serial); 726646ea79cSHeinrich Kuhn 727646ea79cSHeinrich Kuhn PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); 728646ea79cSHeinrich Kuhn if (rte_firmware_read(fw_name, &fw_buf, &fsize) == 0) 729646ea79cSHeinrich Kuhn goto load_fw; 730b0c496abSChaoyong He 731646ea79cSHeinrich Kuhn /* Then try the PCI name */ 732646ea79cSHeinrich Kuhn snprintf(fw_name, sizeof(fw_name), "%s/pci-%s.nffw", DEFAULT_FW_PATH, 7333ddb4cc0SPeng Zhang dev->name); 734646ea79cSHeinrich Kuhn 735646ea79cSHeinrich Kuhn PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); 736646ea79cSHeinrich Kuhn if (rte_firmware_read(fw_name, &fw_buf, &fsize) == 0) 737646ea79cSHeinrich Kuhn goto load_fw; 738646ea79cSHeinrich Kuhn 739646ea79cSHeinrich Kuhn /* Finally try the card type and media */ 740646ea79cSHeinrich Kuhn snprintf(fw_name, sizeof(fw_name), "%s/%s", DEFAULT_FW_PATH, card); 741646ea79cSHeinrich Kuhn PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); 742c01e5c0cSChaoyong He if (rte_firmware_read(fw_name, &fw_buf, &fsize) == 0) 743c01e5c0cSChaoyong He goto load_fw; 744c01e5c0cSChaoyong He 745c01e5c0cSChaoyong He PMD_DRV_LOG(ERR, "Can't find suitable firmware."); 746646ea79cSHeinrich Kuhn return -ENOENT; 747646ea79cSHeinrich Kuhn 748646ea79cSHeinrich Kuhn load_fw: 749646ea79cSHeinrich Kuhn PMD_DRV_LOG(INFO, "Firmware file found at %s with size: %zu", 750646ea79cSHeinrich Kuhn fw_name, fsize); 751646ea79cSHeinrich Kuhn PMD_DRV_LOG(INFO, "Uploading the firmware ..."); 7522e634b03SPeng Zhang if (nfp_nsp_load_fw(nsp, fw_buf, fsize) < 0) { 7532e634b03SPeng Zhang free(fw_buf); 7542e634b03SPeng Zhang PMD_DRV_LOG(ERR, "Firmware load failed."); 7552e634b03SPeng Zhang return -EIO; 7562e634b03SPeng Zhang } 7572e634b03SPeng Zhang 758646ea79cSHeinrich Kuhn PMD_DRV_LOG(INFO, "Done"); 759646ea79cSHeinrich Kuhn 760646ea79cSHeinrich Kuhn free(fw_buf); 761646ea79cSHeinrich Kuhn 762646ea79cSHeinrich Kuhn return 0; 763646ea79cSHeinrich Kuhn } 764646ea79cSHeinrich Kuhn 7653b00109dSPeng Zhang static void 7663b00109dSPeng Zhang nfp_fw_unload(struct nfp_cpp *cpp) 7673b00109dSPeng Zhang { 7683b00109dSPeng Zhang struct nfp_nsp *nsp; 7693b00109dSPeng Zhang 7703b00109dSPeng Zhang nsp = nfp_nsp_open(cpp); 7713b00109dSPeng Zhang if (nsp == NULL) 7723b00109dSPeng Zhang return; 7733b00109dSPeng Zhang 7743b00109dSPeng Zhang nfp_nsp_device_soft_reset(nsp); 7753b00109dSPeng Zhang nfp_nsp_close(nsp); 7763b00109dSPeng Zhang } 7773b00109dSPeng Zhang 778646ea79cSHeinrich Kuhn static int 779*8ba461d1SPeng Zhang nfp_fw_reload(struct rte_pci_device *dev, 780*8ba461d1SPeng Zhang struct nfp_nsp *nsp, 781*8ba461d1SPeng Zhang char *card_desc) 782*8ba461d1SPeng Zhang { 783*8ba461d1SPeng Zhang int err; 784*8ba461d1SPeng Zhang 785*8ba461d1SPeng Zhang nfp_nsp_device_soft_reset(nsp); 786*8ba461d1SPeng Zhang err = nfp_fw_upload(dev, nsp, card_desc); 787*8ba461d1SPeng Zhang if (err != 0) 788*8ba461d1SPeng Zhang PMD_DRV_LOG(ERR, "NFP firmware load failed"); 789*8ba461d1SPeng Zhang 790*8ba461d1SPeng Zhang return err; 791*8ba461d1SPeng Zhang } 792*8ba461d1SPeng Zhang 793*8ba461d1SPeng Zhang static int 794*8ba461d1SPeng Zhang nfp_fw_loaded_check_alive(struct rte_pci_device *dev, 795*8ba461d1SPeng Zhang struct nfp_nsp *nsp, 796*8ba461d1SPeng Zhang char *card_desc, 797*8ba461d1SPeng Zhang const struct nfp_dev_info *dev_info, 798*8ba461d1SPeng Zhang struct nfp_multi_pf *multi_pf) 799*8ba461d1SPeng Zhang { 800*8ba461d1SPeng Zhang int offset; 801*8ba461d1SPeng Zhang uint32_t i; 802*8ba461d1SPeng Zhang uint64_t beat; 803*8ba461d1SPeng Zhang uint32_t port_num; 804*8ba461d1SPeng Zhang 805*8ba461d1SPeng Zhang /* 806*8ba461d1SPeng Zhang * If the beats of any other port changed in 3s, 807*8ba461d1SPeng Zhang * we should not reload the firmware. 808*8ba461d1SPeng Zhang */ 809*8ba461d1SPeng Zhang for (port_num = 0; port_num < dev_info->pf_num_per_unit; port_num++) { 810*8ba461d1SPeng Zhang if (port_num == multi_pf->function_id) 811*8ba461d1SPeng Zhang continue; 812*8ba461d1SPeng Zhang 813*8ba461d1SPeng Zhang offset = NFP_BEAT_OFFSET(port_num); 814*8ba461d1SPeng Zhang beat = nn_readq(multi_pf->beat_addr + offset); 815*8ba461d1SPeng Zhang for (i = 0; i < 3; i++) { 816*8ba461d1SPeng Zhang sleep(1); 817*8ba461d1SPeng Zhang if (nn_readq(multi_pf->beat_addr + offset) != beat) 818*8ba461d1SPeng Zhang return 0; 819*8ba461d1SPeng Zhang } 820*8ba461d1SPeng Zhang } 821*8ba461d1SPeng Zhang 822*8ba461d1SPeng Zhang return nfp_fw_reload(dev, nsp, card_desc); 823*8ba461d1SPeng Zhang } 824*8ba461d1SPeng Zhang 825*8ba461d1SPeng Zhang static int 826*8ba461d1SPeng Zhang nfp_fw_reload_for_multipf(struct rte_pci_device *dev, 827*8ba461d1SPeng Zhang struct nfp_nsp *nsp, 828*8ba461d1SPeng Zhang char *card_desc, 829*8ba461d1SPeng Zhang struct nfp_cpp *cpp, 830*8ba461d1SPeng Zhang const struct nfp_dev_info *dev_info, 831*8ba461d1SPeng Zhang struct nfp_multi_pf *multi_pf) 832*8ba461d1SPeng Zhang { 833*8ba461d1SPeng Zhang int err; 834*8ba461d1SPeng Zhang 835*8ba461d1SPeng Zhang err = nfp_net_keepalive_init(cpp, multi_pf); 836*8ba461d1SPeng Zhang if (err != 0) 837*8ba461d1SPeng Zhang PMD_DRV_LOG(ERR, "NFP write beat failed"); 838*8ba461d1SPeng Zhang 839*8ba461d1SPeng Zhang if (nfp_nsp_fw_loaded(nsp)) 840*8ba461d1SPeng Zhang err = nfp_fw_loaded_check_alive(dev, nsp, card_desc, dev_info, multi_pf); 841*8ba461d1SPeng Zhang else 842*8ba461d1SPeng Zhang err = nfp_fw_reload(dev, nsp, card_desc); 843*8ba461d1SPeng Zhang if (err != 0) { 844*8ba461d1SPeng Zhang nfp_net_keepalive_uninit(multi_pf); 845*8ba461d1SPeng Zhang return err; 846*8ba461d1SPeng Zhang } 847*8ba461d1SPeng Zhang 848*8ba461d1SPeng Zhang err = nfp_net_keepalive_start(multi_pf); 849*8ba461d1SPeng Zhang if (err != 0) { 850*8ba461d1SPeng Zhang nfp_net_keepalive_uninit(multi_pf); 851*8ba461d1SPeng Zhang PMD_DRV_LOG(ERR, "NFP write beat failed"); 852*8ba461d1SPeng Zhang } 853*8ba461d1SPeng Zhang 854*8ba461d1SPeng Zhang return err; 855*8ba461d1SPeng Zhang } 856*8ba461d1SPeng Zhang 857*8ba461d1SPeng Zhang static int 858a6189a67SJin Liu nfp_fw_setup(struct rte_pci_device *dev, 859a6189a67SJin Liu struct nfp_cpp *cpp, 860a6189a67SJin Liu struct nfp_eth_table *nfp_eth_table, 861*8ba461d1SPeng Zhang struct nfp_hwinfo *hwinfo, 862*8ba461d1SPeng Zhang const struct nfp_dev_info *dev_info, 863*8ba461d1SPeng Zhang struct nfp_multi_pf *multi_pf) 864646ea79cSHeinrich Kuhn { 86549952141SChaoyong He int err; 86649952141SChaoyong He char card_desc[100]; 867646ea79cSHeinrich Kuhn struct nfp_nsp *nsp; 868646ea79cSHeinrich Kuhn const char *nfp_fw_model; 869646ea79cSHeinrich Kuhn 87006be30d4SPeng Zhang nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "nffw.partno"); 87106be30d4SPeng Zhang if (nfp_fw_model == NULL) 872646ea79cSHeinrich Kuhn nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "assembly.partno"); 873646ea79cSHeinrich Kuhn 874c01e5c0cSChaoyong He if (nfp_fw_model != NULL) { 875646ea79cSHeinrich Kuhn PMD_DRV_LOG(INFO, "firmware model found: %s", nfp_fw_model); 876646ea79cSHeinrich Kuhn } else { 877646ea79cSHeinrich Kuhn PMD_DRV_LOG(ERR, "firmware model NOT found"); 878646ea79cSHeinrich Kuhn return -EIO; 879646ea79cSHeinrich Kuhn } 880646ea79cSHeinrich Kuhn 881646ea79cSHeinrich Kuhn if (nfp_eth_table->count == 0 || nfp_eth_table->count > 8) { 882646ea79cSHeinrich Kuhn PMD_DRV_LOG(ERR, "NFP ethernet table reports wrong ports: %u", 883646ea79cSHeinrich Kuhn nfp_eth_table->count); 884646ea79cSHeinrich Kuhn return -EIO; 885646ea79cSHeinrich Kuhn } 886646ea79cSHeinrich Kuhn 887646ea79cSHeinrich Kuhn PMD_DRV_LOG(INFO, "NFP ethernet port table reports %u ports", 888646ea79cSHeinrich Kuhn nfp_eth_table->count); 889646ea79cSHeinrich Kuhn 890646ea79cSHeinrich Kuhn PMD_DRV_LOG(INFO, "Port speed: %u", nfp_eth_table->ports[0].speed); 891646ea79cSHeinrich Kuhn 892646ea79cSHeinrich Kuhn snprintf(card_desc, sizeof(card_desc), "nic_%s_%dx%d.nffw", 893646ea79cSHeinrich Kuhn nfp_fw_model, nfp_eth_table->count, 894646ea79cSHeinrich Kuhn nfp_eth_table->ports[0].speed / 1000); 895646ea79cSHeinrich Kuhn 896646ea79cSHeinrich Kuhn nsp = nfp_nsp_open(cpp); 897a6189a67SJin Liu if (nsp == NULL) { 898646ea79cSHeinrich Kuhn PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle"); 899646ea79cSHeinrich Kuhn return -EIO; 900646ea79cSHeinrich Kuhn } 901646ea79cSHeinrich Kuhn 902*8ba461d1SPeng Zhang if (multi_pf->enabled) 903*8ba461d1SPeng Zhang err = nfp_fw_reload_for_multipf(dev, nsp, card_desc, cpp, dev_info, multi_pf); 904*8ba461d1SPeng Zhang else 905*8ba461d1SPeng Zhang err = nfp_fw_reload(dev, nsp, card_desc); 906646ea79cSHeinrich Kuhn 907646ea79cSHeinrich Kuhn nfp_nsp_close(nsp); 908646ea79cSHeinrich Kuhn return err; 909646ea79cSHeinrich Kuhn } 910646ea79cSHeinrich Kuhn 9118ad2cc8fSPeng Zhang static inline bool 912a508fa23SPeng Zhang nfp_check_multi_pf_from_fw(uint32_t total_vnics) 913a508fa23SPeng Zhang { 914a508fa23SPeng Zhang if (total_vnics == 1) 915a508fa23SPeng Zhang return true; 916a508fa23SPeng Zhang 917a508fa23SPeng Zhang return false; 918a508fa23SPeng Zhang } 919a508fa23SPeng Zhang 920a508fa23SPeng Zhang static inline bool 9218ad2cc8fSPeng Zhang nfp_check_multi_pf_from_nsp(struct rte_pci_device *pci_dev, 9228ad2cc8fSPeng Zhang struct nfp_cpp *cpp) 9238ad2cc8fSPeng Zhang { 9248ad2cc8fSPeng Zhang bool flag; 9258ad2cc8fSPeng Zhang struct nfp_nsp *nsp; 9268ad2cc8fSPeng Zhang 9278ad2cc8fSPeng Zhang nsp = nfp_nsp_open(cpp); 9288ad2cc8fSPeng Zhang if (nsp == NULL) { 9298ad2cc8fSPeng Zhang PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle"); 9308ad2cc8fSPeng Zhang return false; 9318ad2cc8fSPeng Zhang } 9328ad2cc8fSPeng Zhang 9338ad2cc8fSPeng Zhang flag = (nfp_nsp_get_abi_ver_major(nsp) > 0) && 9348ad2cc8fSPeng Zhang (pci_dev->id.device_id == PCI_DEVICE_ID_NFP3800_PF_NIC); 9358ad2cc8fSPeng Zhang 9368ad2cc8fSPeng Zhang nfp_nsp_close(nsp); 9378ad2cc8fSPeng Zhang return flag; 9388ad2cc8fSPeng Zhang } 9398ad2cc8fSPeng Zhang 940a6189a67SJin Liu static int 9410314a8ffSChaoyong He nfp_init_app_fw_nic(struct nfp_pf_dev *pf_dev, 9420314a8ffSChaoyong He const struct nfp_dev_info *dev_info) 943646ea79cSHeinrich Kuhn { 9448ceb85c3SChaoyong He uint8_t i; 9453b00109dSPeng Zhang uint8_t id; 946e7978635SChaoyong He int ret = 0; 9478ceb85c3SChaoyong He uint32_t total_vnics; 948646ea79cSHeinrich Kuhn struct nfp_net_hw *hw; 949968ec1c3SChaoyong He unsigned int numa_node; 950646ea79cSHeinrich Kuhn struct rte_eth_dev *eth_dev; 951968ec1c3SChaoyong He struct nfp_app_fw_nic *app_fw_nic; 952a6189a67SJin Liu struct nfp_eth_table *nfp_eth_table; 9533b00109dSPeng Zhang char bar_name[RTE_ETH_NAME_MAX_LEN]; 954646ea79cSHeinrich Kuhn char port_name[RTE_ETH_NAME_MAX_LEN]; 9553b00109dSPeng Zhang char vnic_name[RTE_ETH_NAME_MAX_LEN]; 956646ea79cSHeinrich Kuhn 957968ec1c3SChaoyong He nfp_eth_table = pf_dev->nfp_eth_table; 958968ec1c3SChaoyong He PMD_INIT_LOG(INFO, "Total physical ports: %d", nfp_eth_table->count); 9593b00109dSPeng Zhang id = nfp_function_id_get(pf_dev, 0); 960968ec1c3SChaoyong He 961968ec1c3SChaoyong He /* Allocate memory for the CoreNIC app */ 962968ec1c3SChaoyong He app_fw_nic = rte_zmalloc("nfp_app_fw_nic", sizeof(*app_fw_nic), 0); 963968ec1c3SChaoyong He if (app_fw_nic == NULL) 964968ec1c3SChaoyong He return -ENOMEM; 965968ec1c3SChaoyong He 966968ec1c3SChaoyong He /* Point the app_fw_priv pointer in the PF to the coreNIC app */ 967968ec1c3SChaoyong He pf_dev->app_fw_priv = app_fw_nic; 968968ec1c3SChaoyong He 969968ec1c3SChaoyong He /* Read the number of vNIC's created for the PF */ 9703b00109dSPeng Zhang snprintf(vnic_name, sizeof(vnic_name), "nfd_cfg_pf%u_num_ports", id); 9713b00109dSPeng Zhang total_vnics = nfp_rtsym_read_le(pf_dev->sym_tbl, vnic_name, &ret); 972e7978635SChaoyong He if (ret != 0 || total_vnics == 0 || total_vnics > 8) { 9733b00109dSPeng Zhang PMD_INIT_LOG(ERR, "%s symbol with wrong value", vnic_name); 974968ec1c3SChaoyong He ret = -ENODEV; 975968ec1c3SChaoyong He goto app_cleanup; 976968ec1c3SChaoyong He } 977968ec1c3SChaoyong He 978a508fa23SPeng Zhang if (pf_dev->multi_pf.enabled) { 979a508fa23SPeng Zhang if (!nfp_check_multi_pf_from_fw(total_vnics)) { 980a508fa23SPeng Zhang PMD_INIT_LOG(ERR, "NSP report multipf, but FW report not multipf"); 981a508fa23SPeng Zhang ret = -ENODEV; 982a508fa23SPeng Zhang goto app_cleanup; 983a508fa23SPeng Zhang } 984a508fa23SPeng Zhang } else { 985968ec1c3SChaoyong He /* 986968ec1c3SChaoyong He * For coreNIC the number of vNICs exposed should be the same as the 98740688372SChaoyong He * number of physical ports. 988968ec1c3SChaoyong He */ 9898ceb85c3SChaoyong He if (total_vnics != nfp_eth_table->count) { 990968ec1c3SChaoyong He PMD_INIT_LOG(ERR, "Total physical ports do not match number of vNICs"); 991968ec1c3SChaoyong He ret = -ENODEV; 992968ec1c3SChaoyong He goto app_cleanup; 993968ec1c3SChaoyong He } 994a508fa23SPeng Zhang } 995968ec1c3SChaoyong He 996968ec1c3SChaoyong He /* Populate coreNIC app properties */ 997968ec1c3SChaoyong He app_fw_nic->total_phyports = total_vnics; 998968ec1c3SChaoyong He app_fw_nic->pf_dev = pf_dev; 999968ec1c3SChaoyong He if (total_vnics > 1) 1000968ec1c3SChaoyong He app_fw_nic->multiport = true; 1001968ec1c3SChaoyong He 1002968ec1c3SChaoyong He /* Map the symbol table */ 10033b00109dSPeng Zhang snprintf(bar_name, sizeof(bar_name), "_pf%u_net_bar0", id); 10043b00109dSPeng Zhang pf_dev->ctrl_bar = nfp_rtsym_map(pf_dev->sym_tbl, bar_name, 1005d5e9fc86SChaoyong He app_fw_nic->total_phyports * NFP_NET_CFG_BAR_SZ, 1006d5e9fc86SChaoyong He &pf_dev->ctrl_area); 1007968ec1c3SChaoyong He if (pf_dev->ctrl_bar == NULL) { 10083b00109dSPeng Zhang PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for %s", bar_name); 1009968ec1c3SChaoyong He ret = -EIO; 1010968ec1c3SChaoyong He goto app_cleanup; 1011968ec1c3SChaoyong He } 1012968ec1c3SChaoyong He 1013968ec1c3SChaoyong He PMD_INIT_LOG(DEBUG, "ctrl bar: %p", pf_dev->ctrl_bar); 1014968ec1c3SChaoyong He 1015968ec1c3SChaoyong He /* Loop through all physical ports on PF */ 1016968ec1c3SChaoyong He numa_node = rte_socket_id(); 1017968ec1c3SChaoyong He for (i = 0; i < app_fw_nic->total_phyports; i++) { 10183b00109dSPeng Zhang id = nfp_function_id_get(pf_dev, i); 10193b00109dSPeng Zhang snprintf(port_name, sizeof(port_name), "%s_port%u", 10203b00109dSPeng Zhang pf_dev->pci_dev->device.name, id); 1021646ea79cSHeinrich Kuhn 1022646ea79cSHeinrich Kuhn /* Allocate a eth_dev for this phyport */ 1023646ea79cSHeinrich Kuhn eth_dev = rte_eth_dev_allocate(port_name); 1024a6189a67SJin Liu if (eth_dev == NULL) { 1025646ea79cSHeinrich Kuhn ret = -ENODEV; 1026646ea79cSHeinrich Kuhn goto port_cleanup; 1027646ea79cSHeinrich Kuhn } 1028646ea79cSHeinrich Kuhn 1029646ea79cSHeinrich Kuhn /* Allocate memory for this phyport */ 1030f4d24fe9SChaoyong He eth_dev->data->dev_private = rte_zmalloc_socket(port_name, 1031f4d24fe9SChaoyong He sizeof(struct nfp_net_hw), 1032646ea79cSHeinrich Kuhn RTE_CACHE_LINE_SIZE, numa_node); 1033a6189a67SJin Liu if (eth_dev->data->dev_private == NULL) { 1034646ea79cSHeinrich Kuhn ret = -ENOMEM; 1035646ea79cSHeinrich Kuhn rte_eth_dev_release_port(eth_dev); 1036646ea79cSHeinrich Kuhn goto port_cleanup; 1037646ea79cSHeinrich Kuhn } 1038646ea79cSHeinrich Kuhn 10399d723baaSChaoyong He hw = eth_dev->data->dev_private; 1040646ea79cSHeinrich Kuhn 1041646ea79cSHeinrich Kuhn /* Add this device to the PF's array of physical ports */ 10423b00109dSPeng Zhang app_fw_nic->ports[id] = hw; 1043646ea79cSHeinrich Kuhn 10440314a8ffSChaoyong He hw->dev_info = dev_info; 1045646ea79cSHeinrich Kuhn hw->pf_dev = pf_dev; 1046646ea79cSHeinrich Kuhn hw->cpp = pf_dev->cpp; 1047646ea79cSHeinrich Kuhn hw->eth_dev = eth_dev; 10483b00109dSPeng Zhang hw->idx = id; 10493b00109dSPeng Zhang hw->nfp_idx = nfp_eth_table->ports[id].index; 1050646ea79cSHeinrich Kuhn 1051646ea79cSHeinrich Kuhn eth_dev->device = &pf_dev->pci_dev->device; 1052646ea79cSHeinrich Kuhn 105340688372SChaoyong He /* 105440688372SChaoyong He * Ctrl/tx/rx BAR mappings and remaining init happens in 105540688372SChaoyong He * @nfp_net_init() 1056646ea79cSHeinrich Kuhn */ 1057646ea79cSHeinrich Kuhn ret = nfp_net_init(eth_dev); 1058c01e5c0cSChaoyong He if (ret != 0) { 1059646ea79cSHeinrich Kuhn ret = -ENODEV; 1060646ea79cSHeinrich Kuhn goto port_cleanup; 1061646ea79cSHeinrich Kuhn } 1062646ea79cSHeinrich Kuhn 1063646ea79cSHeinrich Kuhn rte_eth_dev_probing_finish(eth_dev); 1064646ea79cSHeinrich Kuhn 1065646ea79cSHeinrich Kuhn } /* End loop, all ports on this PF */ 1066968ec1c3SChaoyong He 1067968ec1c3SChaoyong He return 0; 1068646ea79cSHeinrich Kuhn 1069646ea79cSHeinrich Kuhn port_cleanup: 1070968ec1c3SChaoyong He for (i = 0; i < app_fw_nic->total_phyports; i++) { 10713b00109dSPeng Zhang id = nfp_function_id_get(pf_dev, i); 10723b00109dSPeng Zhang 10733b00109dSPeng Zhang if (app_fw_nic->ports[id] != NULL && 10743b00109dSPeng Zhang app_fw_nic->ports[id]->eth_dev != NULL) { 1075646ea79cSHeinrich Kuhn struct rte_eth_dev *tmp_dev; 10763b00109dSPeng Zhang tmp_dev = app_fw_nic->ports[id]->eth_dev; 107754713740SChang Miao nfp_ipsec_uninit(tmp_dev); 1078646ea79cSHeinrich Kuhn rte_eth_dev_release_port(tmp_dev); 10793b00109dSPeng Zhang app_fw_nic->ports[id] = NULL; 1080646ea79cSHeinrich Kuhn } 1081646ea79cSHeinrich Kuhn } 1082968ec1c3SChaoyong He nfp_cpp_area_free(pf_dev->ctrl_area); 1083968ec1c3SChaoyong He app_cleanup: 1084968ec1c3SChaoyong He rte_free(app_fw_nic); 1085a6189a67SJin Liu 1086646ea79cSHeinrich Kuhn return ret; 1087646ea79cSHeinrich Kuhn } 1088646ea79cSHeinrich Kuhn 1089a6189a67SJin Liu static int 1090a6189a67SJin Liu nfp_pf_init(struct rte_pci_device *pci_dev) 1091646ea79cSHeinrich Kuhn { 10929e442599SShihong Wang uint32_t i; 10933b00109dSPeng Zhang uint32_t id; 1094e7978635SChaoyong He int ret = 0; 10955c464d6aSJin Liu uint64_t addr; 10963b00109dSPeng Zhang uint32_t index; 1097925c27ecSChaoyong He uint32_t cpp_id; 10983b00109dSPeng Zhang uint8_t function_id; 1099a6189a67SJin Liu struct nfp_cpp *cpp; 1100a6189a67SJin Liu struct nfp_pf_dev *pf_dev; 1101a6189a67SJin Liu struct nfp_hwinfo *hwinfo; 110249952141SChaoyong He enum nfp_app_fw_id app_fw_id; 1103a6189a67SJin Liu char name[RTE_ETH_NAME_MAX_LEN]; 1104a6189a67SJin Liu struct nfp_rtsym_table *sym_tbl; 11053b00109dSPeng Zhang char app_name[RTE_ETH_NAME_MAX_LEN]; 1106a6189a67SJin Liu struct nfp_eth_table *nfp_eth_table; 11070314a8ffSChaoyong He const struct nfp_dev_info *dev_info; 1108646ea79cSHeinrich Kuhn 1109a6189a67SJin Liu if (pci_dev == NULL) 1110a6189a67SJin Liu return -ENODEV; 1111646ea79cSHeinrich Kuhn 111284aaba5aSChaoyong He if (pci_dev->mem_resource[0].addr == NULL) { 111384aaba5aSChaoyong He PMD_INIT_LOG(ERR, "The address of BAR0 is NULL."); 111484aaba5aSChaoyong He return -ENODEV; 111584aaba5aSChaoyong He } 111684aaba5aSChaoyong He 11170314a8ffSChaoyong He dev_info = nfp_dev_info_get(pci_dev->id.device_id); 11180314a8ffSChaoyong He if (dev_info == NULL) { 11190314a8ffSChaoyong He PMD_INIT_LOG(ERR, "Not supported device ID"); 11200314a8ffSChaoyong He return -ENODEV; 11210314a8ffSChaoyong He } 11220314a8ffSChaoyong He 11238ad2cc8fSPeng Zhang /* Allocate memory for the PF "device" */ 11243b00109dSPeng Zhang function_id = (pci_dev->addr.function) & 0x07; 11253b00109dSPeng Zhang snprintf(name, sizeof(name), "nfp_pf%u", function_id); 11268ad2cc8fSPeng Zhang pf_dev = rte_zmalloc(name, sizeof(*pf_dev), 0); 11278ad2cc8fSPeng Zhang if (pf_dev == NULL) { 11288ad2cc8fSPeng Zhang PMD_INIT_LOG(ERR, "Can't allocate memory for the PF device"); 11298ad2cc8fSPeng Zhang return -ENOMEM; 11308ad2cc8fSPeng Zhang } 11318ad2cc8fSPeng Zhang 1132646ea79cSHeinrich Kuhn /* 1133646ea79cSHeinrich Kuhn * When device bound to UIO, the device could be used, by mistake, 1134646ea79cSHeinrich Kuhn * by two DPDK apps, and the UIO driver does not avoid it. This 1135646ea79cSHeinrich Kuhn * could lead to a serious problem when configuring the NFP CPP 1136646ea79cSHeinrich Kuhn * interface. Here we avoid this telling to the CPP init code to 1137646ea79cSHeinrich Kuhn * use a lock file if UIO is being used. 1138646ea79cSHeinrich Kuhn */ 1139646ea79cSHeinrich Kuhn if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO) 11401fbe51cdSChaoyong He cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, false); 1141646ea79cSHeinrich Kuhn else 11421fbe51cdSChaoyong He cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, true); 1143646ea79cSHeinrich Kuhn 1144a6189a67SJin Liu if (cpp == NULL) { 1145646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "A CPP handle can not be obtained"); 11468ad2cc8fSPeng Zhang ret = -EIO; 11478ad2cc8fSPeng Zhang goto pf_cleanup; 1148646ea79cSHeinrich Kuhn } 1149646ea79cSHeinrich Kuhn 1150646ea79cSHeinrich Kuhn hwinfo = nfp_hwinfo_read(cpp); 1151a6189a67SJin Liu if (hwinfo == NULL) { 1152646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "Error reading hwinfo table"); 1153646ea79cSHeinrich Kuhn ret = -EIO; 1154968ec1c3SChaoyong He goto cpp_cleanup; 1155646ea79cSHeinrich Kuhn } 1156646ea79cSHeinrich Kuhn 1157968ec1c3SChaoyong He /* Read the number of physical ports from hardware */ 1158646ea79cSHeinrich Kuhn nfp_eth_table = nfp_eth_read_ports(cpp); 1159a6189a67SJin Liu if (nfp_eth_table == NULL) { 1160646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "Error reading NFP ethernet table"); 1161646ea79cSHeinrich Kuhn ret = -EIO; 1162646ea79cSHeinrich Kuhn goto hwinfo_cleanup; 1163646ea79cSHeinrich Kuhn } 1164646ea79cSHeinrich Kuhn 11658ad2cc8fSPeng Zhang pf_dev->multi_pf.enabled = nfp_check_multi_pf_from_nsp(pci_dev, cpp); 11663b00109dSPeng Zhang pf_dev->multi_pf.function_id = function_id; 11678ad2cc8fSPeng Zhang 11689e442599SShihong Wang /* Force the physical port down to clear the possible DMA error */ 11693b00109dSPeng Zhang for (i = 0; i < nfp_eth_table->count; i++) { 11703b00109dSPeng Zhang id = nfp_function_id_get(pf_dev, i); 11713b00109dSPeng Zhang index = nfp_eth_table->ports[id].index; 11723b00109dSPeng Zhang nfp_eth_set_configured(cpp, index, 0); 11733b00109dSPeng Zhang } 11749e442599SShihong Wang 1175*8ba461d1SPeng Zhang if (nfp_fw_setup(pci_dev, cpp, nfp_eth_table, hwinfo, 1176*8ba461d1SPeng Zhang dev_info, &pf_dev->multi_pf) != 0) { 1177646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "Error when uploading firmware"); 1178646ea79cSHeinrich Kuhn ret = -EIO; 1179646ea79cSHeinrich Kuhn goto eth_table_cleanup; 1180646ea79cSHeinrich Kuhn } 1181646ea79cSHeinrich Kuhn 1182646ea79cSHeinrich Kuhn /* Now the symbol table should be there */ 1183646ea79cSHeinrich Kuhn sym_tbl = nfp_rtsym_table_read(cpp); 1184a6189a67SJin Liu if (sym_tbl == NULL) { 1185f4d24fe9SChaoyong He PMD_INIT_LOG(ERR, "Something is wrong with the firmware symbol table"); 1186646ea79cSHeinrich Kuhn ret = -EIO; 11873b00109dSPeng Zhang goto fw_cleanup; 1188646ea79cSHeinrich Kuhn } 1189646ea79cSHeinrich Kuhn 1190968ec1c3SChaoyong He /* Read the app ID of the firmware loaded */ 11913b00109dSPeng Zhang snprintf(app_name, sizeof(app_name), "_pf%u_net_app_id", function_id); 11923b00109dSPeng Zhang app_fw_id = nfp_rtsym_read_le(sym_tbl, app_name, &ret); 1193e7978635SChaoyong He if (ret != 0) { 11943b00109dSPeng Zhang PMD_INIT_LOG(ERR, "Couldn't read %s from firmware", app_name); 1195646ea79cSHeinrich Kuhn ret = -EIO; 1196646ea79cSHeinrich Kuhn goto sym_tbl_cleanup; 1197646ea79cSHeinrich Kuhn } 1198646ea79cSHeinrich Kuhn 1199646ea79cSHeinrich Kuhn /* Populate the newly created PF device */ 1200968ec1c3SChaoyong He pf_dev->app_fw_id = app_fw_id; 1201646ea79cSHeinrich Kuhn pf_dev->cpp = cpp; 1202646ea79cSHeinrich Kuhn pf_dev->hwinfo = hwinfo; 1203646ea79cSHeinrich Kuhn pf_dev->sym_tbl = sym_tbl; 1204646ea79cSHeinrich Kuhn pf_dev->pci_dev = pci_dev; 1205968ec1c3SChaoyong He pf_dev->nfp_eth_table = nfp_eth_table; 1206646ea79cSHeinrich Kuhn 120740688372SChaoyong He /* Configure access to tx/rx vNIC BARs */ 12080314a8ffSChaoyong He addr = nfp_qcp_queue_offset(dev_info, 0); 1209925c27ecSChaoyong He cpp_id = NFP_CPP_ISLAND_ID(0, NFP_CPP_ACTION_RW, 0, 0); 12100314a8ffSChaoyong He 1211711e4559SChaoyong He pf_dev->qc_bar = nfp_cpp_map_area(pf_dev->cpp, cpp_id, 12120314a8ffSChaoyong He addr, dev_info->qc_area_sz, &pf_dev->qc_area); 1213711e4559SChaoyong He if (pf_dev->qc_bar == NULL) { 1214646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for net.qc"); 1215646ea79cSHeinrich Kuhn ret = -EIO; 12168ad2cc8fSPeng Zhang goto sym_tbl_cleanup; 1217646ea79cSHeinrich Kuhn } 1218646ea79cSHeinrich Kuhn 1219030b2b19SChaoyong He PMD_INIT_LOG(DEBUG, "qc_bar address: %p", pf_dev->qc_bar); 1220646ea79cSHeinrich Kuhn 1221a6189a67SJin Liu /* 1222968ec1c3SChaoyong He * PF initialization has been done at this point. Call app specific 122340688372SChaoyong He * init code now. 1224646ea79cSHeinrich Kuhn */ 1225968ec1c3SChaoyong He switch (pf_dev->app_fw_id) { 1226968ec1c3SChaoyong He case NFP_APP_FW_CORE_NIC: 1227968ec1c3SChaoyong He PMD_INIT_LOG(INFO, "Initializing coreNIC"); 12280314a8ffSChaoyong He ret = nfp_init_app_fw_nic(pf_dev, dev_info); 1229968ec1c3SChaoyong He if (ret != 0) { 1230968ec1c3SChaoyong He PMD_INIT_LOG(ERR, "Could not initialize coreNIC!"); 1231968ec1c3SChaoyong He goto hwqueues_cleanup; 1232968ec1c3SChaoyong He } 1233968ec1c3SChaoyong He break; 1234b1880421SChaoyong He case NFP_APP_FW_FLOWER_NIC: 1235b1880421SChaoyong He PMD_INIT_LOG(INFO, "Initializing Flower"); 12360314a8ffSChaoyong He ret = nfp_init_app_fw_flower(pf_dev, dev_info); 1237b1880421SChaoyong He if (ret != 0) { 1238b1880421SChaoyong He PMD_INIT_LOG(ERR, "Could not initialize Flower!"); 1239b1880421SChaoyong He goto hwqueues_cleanup; 1240b1880421SChaoyong He } 1241b1880421SChaoyong He break; 1242968ec1c3SChaoyong He default: 1243968ec1c3SChaoyong He PMD_INIT_LOG(ERR, "Unsupported Firmware loaded"); 1244968ec1c3SChaoyong He ret = -EINVAL; 1245646ea79cSHeinrich Kuhn goto hwqueues_cleanup; 1246646ea79cSHeinrich Kuhn } 1247646ea79cSHeinrich Kuhn 124840688372SChaoyong He /* Register the CPP bridge service here for primary use */ 1249bab0e6f4SChaoyong He ret = nfp_enable_cpp_service(pf_dev); 1250dee23e6cSChaoyong He if (ret != 0) 1251dee23e6cSChaoyong He PMD_INIT_LOG(INFO, "Enable cpp service failed."); 1252646ea79cSHeinrich Kuhn 1253646ea79cSHeinrich Kuhn return 0; 1254646ea79cSHeinrich Kuhn 1255646ea79cSHeinrich Kuhn hwqueues_cleanup: 1256711e4559SChaoyong He nfp_cpp_area_free(pf_dev->qc_area); 1257646ea79cSHeinrich Kuhn sym_tbl_cleanup: 1258646ea79cSHeinrich Kuhn free(sym_tbl); 12593b00109dSPeng Zhang fw_cleanup: 12603b00109dSPeng Zhang nfp_fw_unload(cpp); 1261*8ba461d1SPeng Zhang nfp_net_keepalive_stop(&pf_dev->multi_pf); 1262646ea79cSHeinrich Kuhn eth_table_cleanup: 1263646ea79cSHeinrich Kuhn free(nfp_eth_table); 1264646ea79cSHeinrich Kuhn hwinfo_cleanup: 1265646ea79cSHeinrich Kuhn free(hwinfo); 1266968ec1c3SChaoyong He cpp_cleanup: 1267968ec1c3SChaoyong He nfp_cpp_free(cpp); 12688ad2cc8fSPeng Zhang pf_cleanup: 12698ad2cc8fSPeng Zhang rte_free(pf_dev); 12707feb8909SChaoyong He 1271646ea79cSHeinrich Kuhn return ret; 1272646ea79cSHeinrich Kuhn } 1273646ea79cSHeinrich Kuhn 1274d5f39e07SChaoyong He static int 1275016141b1SChaoyong He nfp_secondary_init_app_fw_nic(struct nfp_pf_dev *pf_dev) 1276d5f39e07SChaoyong He { 12778ceb85c3SChaoyong He uint32_t i; 1278d5f39e07SChaoyong He int err = 0; 1279d5f39e07SChaoyong He int ret = 0; 12803b00109dSPeng Zhang uint8_t function_id; 12818ceb85c3SChaoyong He uint32_t total_vnics; 1282d5f39e07SChaoyong He struct nfp_net_hw *hw; 12833b00109dSPeng Zhang char pf_name[RTE_ETH_NAME_MAX_LEN]; 1284d5f39e07SChaoyong He 1285d5f39e07SChaoyong He /* Read the number of vNIC's created for the PF */ 12863b00109dSPeng Zhang function_id = (pf_dev->pci_dev->addr.function) & 0x07; 12873b00109dSPeng Zhang snprintf(pf_name, sizeof(pf_name), "nfd_cfg_pf%u_num_ports", function_id); 12883b00109dSPeng Zhang total_vnics = nfp_rtsym_read_le(pf_dev->sym_tbl, pf_name, &err); 12898ceb85c3SChaoyong He if (err != 0 || total_vnics == 0 || total_vnics > 8) { 12903b00109dSPeng Zhang PMD_INIT_LOG(ERR, "%s symbol with wrong value", pf_name); 1291d5f39e07SChaoyong He return -ENODEV; 1292d5f39e07SChaoyong He } 1293d5f39e07SChaoyong He 1294d5f39e07SChaoyong He for (i = 0; i < total_vnics; i++) { 12953b00109dSPeng Zhang uint32_t id = i; 1296d5f39e07SChaoyong He struct rte_eth_dev *eth_dev; 1297d5f39e07SChaoyong He char port_name[RTE_ETH_NAME_MAX_LEN]; 12983b00109dSPeng Zhang 12993b00109dSPeng Zhang if (nfp_check_multi_pf_from_fw(total_vnics)) 13003b00109dSPeng Zhang id = function_id; 13018ceb85c3SChaoyong He snprintf(port_name, sizeof(port_name), "%s_port%u", 13023b00109dSPeng Zhang pf_dev->pci_dev->device.name, id); 1303d5f39e07SChaoyong He 1304d5f39e07SChaoyong He PMD_INIT_LOG(DEBUG, "Secondary attaching to port %s", port_name); 1305d5f39e07SChaoyong He eth_dev = rte_eth_dev_attach_secondary(port_name); 1306d5f39e07SChaoyong He if (eth_dev == NULL) { 1307d5f39e07SChaoyong He PMD_INIT_LOG(ERR, "Secondary process attach to port %s failed", port_name); 1308d5f39e07SChaoyong He ret = -ENODEV; 1309d5f39e07SChaoyong He break; 1310d5f39e07SChaoyong He } 1311d5f39e07SChaoyong He 1312016141b1SChaoyong He eth_dev->process_private = pf_dev->cpp; 13139d723baaSChaoyong He hw = eth_dev->data->dev_private; 1314ee8ca64eSChaoyong He nfp_net_ethdev_ops_mount(hw, eth_dev); 1315d5f39e07SChaoyong He 1316d5f39e07SChaoyong He rte_eth_dev_probing_finish(eth_dev); 1317d5f39e07SChaoyong He } 1318d5f39e07SChaoyong He 1319d5f39e07SChaoyong He return ret; 1320d5f39e07SChaoyong He } 1321d5f39e07SChaoyong He 1322646ea79cSHeinrich Kuhn /* 1323646ea79cSHeinrich Kuhn * When attaching to the NFP4000/6000 PF on a secondary process there 1324646ea79cSHeinrich Kuhn * is no need to initialise the PF again. Only minimal work is required 132540688372SChaoyong He * here. 1326646ea79cSHeinrich Kuhn */ 1327a6189a67SJin Liu static int 1328a6189a67SJin Liu nfp_pf_secondary_init(struct rte_pci_device *pci_dev) 1329646ea79cSHeinrich Kuhn { 1330968ec1c3SChaoyong He int ret = 0; 1331a6189a67SJin Liu struct nfp_cpp *cpp; 13323b00109dSPeng Zhang uint8_t function_id; 1333016141b1SChaoyong He struct nfp_pf_dev *pf_dev; 1334d5f39e07SChaoyong He enum nfp_app_fw_id app_fw_id; 1335016141b1SChaoyong He char name[RTE_ETH_NAME_MAX_LEN]; 1336a6189a67SJin Liu struct nfp_rtsym_table *sym_tbl; 13370314a8ffSChaoyong He const struct nfp_dev_info *dev_info; 13383b00109dSPeng Zhang char app_name[RTE_ETH_NAME_MAX_LEN]; 1339646ea79cSHeinrich Kuhn 1340a6189a67SJin Liu if (pci_dev == NULL) 1341646ea79cSHeinrich Kuhn return -ENODEV; 1342646ea79cSHeinrich Kuhn 134384aaba5aSChaoyong He if (pci_dev->mem_resource[0].addr == NULL) { 134484aaba5aSChaoyong He PMD_INIT_LOG(ERR, "The address of BAR0 is NULL."); 134584aaba5aSChaoyong He return -ENODEV; 134684aaba5aSChaoyong He } 134784aaba5aSChaoyong He 13480314a8ffSChaoyong He dev_info = nfp_dev_info_get(pci_dev->id.device_id); 13490314a8ffSChaoyong He if (dev_info == NULL) { 13500314a8ffSChaoyong He PMD_INIT_LOG(ERR, "Not supported device ID"); 13510314a8ffSChaoyong He return -ENODEV; 13520314a8ffSChaoyong He } 13530314a8ffSChaoyong He 1354016141b1SChaoyong He /* Allocate memory for the PF "device" */ 1355016141b1SChaoyong He snprintf(name, sizeof(name), "nfp_pf%d", 0); 1356016141b1SChaoyong He pf_dev = rte_zmalloc(name, sizeof(*pf_dev), 0); 1357016141b1SChaoyong He if (pf_dev == NULL) { 1358016141b1SChaoyong He PMD_INIT_LOG(ERR, "Can't allocate memory for the PF device"); 1359016141b1SChaoyong He return -ENOMEM; 1360016141b1SChaoyong He } 1361016141b1SChaoyong He 1362646ea79cSHeinrich Kuhn /* 1363646ea79cSHeinrich Kuhn * When device bound to UIO, the device could be used, by mistake, 1364646ea79cSHeinrich Kuhn * by two DPDK apps, and the UIO driver does not avoid it. This 1365646ea79cSHeinrich Kuhn * could lead to a serious problem when configuring the NFP CPP 1366646ea79cSHeinrich Kuhn * interface. Here we avoid this telling to the CPP init code to 1367646ea79cSHeinrich Kuhn * use a lock file if UIO is being used. 1368646ea79cSHeinrich Kuhn */ 1369646ea79cSHeinrich Kuhn if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO) 13701fbe51cdSChaoyong He cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, false); 1371646ea79cSHeinrich Kuhn else 13721fbe51cdSChaoyong He cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, true); 1373646ea79cSHeinrich Kuhn 1374a6189a67SJin Liu if (cpp == NULL) { 1375646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "A CPP handle can not be obtained"); 1376016141b1SChaoyong He ret = -EIO; 1377016141b1SChaoyong He goto pf_cleanup; 1378646ea79cSHeinrich Kuhn } 1379646ea79cSHeinrich Kuhn 1380646ea79cSHeinrich Kuhn /* 1381646ea79cSHeinrich Kuhn * We don't have access to the PF created in the primary process 138240688372SChaoyong He * here so we have to read the number of ports from firmware. 1383646ea79cSHeinrich Kuhn */ 1384646ea79cSHeinrich Kuhn sym_tbl = nfp_rtsym_table_read(cpp); 1385a6189a67SJin Liu if (sym_tbl == NULL) { 1386f4d24fe9SChaoyong He PMD_INIT_LOG(ERR, "Something is wrong with the firmware symbol table"); 1387016141b1SChaoyong He ret = -EIO; 1388016141b1SChaoyong He goto pf_cleanup; 1389646ea79cSHeinrich Kuhn } 1390646ea79cSHeinrich Kuhn 1391d5f39e07SChaoyong He /* Read the app ID of the firmware loaded */ 13923b00109dSPeng Zhang function_id = pci_dev->addr.function & 0x7; 13933b00109dSPeng Zhang snprintf(app_name, sizeof(app_name), "_pf%u_net_app_id", function_id); 13943b00109dSPeng Zhang app_fw_id = nfp_rtsym_read_le(sym_tbl, app_name, &ret); 1395e7978635SChaoyong He if (ret != 0) { 13963b00109dSPeng Zhang PMD_INIT_LOG(ERR, "Couldn't read %s from fw", app_name); 1397016141b1SChaoyong He ret = -EIO; 1398968ec1c3SChaoyong He goto sym_tbl_cleanup; 1399968ec1c3SChaoyong He } 1400646ea79cSHeinrich Kuhn 1401016141b1SChaoyong He /* Populate the newly created PF device */ 1402016141b1SChaoyong He pf_dev->app_fw_id = app_fw_id; 1403016141b1SChaoyong He pf_dev->cpp = cpp; 1404016141b1SChaoyong He pf_dev->sym_tbl = sym_tbl; 1405016141b1SChaoyong He pf_dev->pci_dev = pci_dev; 1406016141b1SChaoyong He 1407016141b1SChaoyong He /* Call app specific init code now */ 1408d5f39e07SChaoyong He switch (app_fw_id) { 1409d5f39e07SChaoyong He case NFP_APP_FW_CORE_NIC: 1410d5f39e07SChaoyong He PMD_INIT_LOG(INFO, "Initializing coreNIC"); 1411016141b1SChaoyong He ret = nfp_secondary_init_app_fw_nic(pf_dev); 1412d5f39e07SChaoyong He if (ret != 0) { 1413d5f39e07SChaoyong He PMD_INIT_LOG(ERR, "Could not initialize coreNIC!"); 1414d5f39e07SChaoyong He goto sym_tbl_cleanup; 1415646ea79cSHeinrich Kuhn } 1416d5f39e07SChaoyong He break; 1417b1880421SChaoyong He case NFP_APP_FW_FLOWER_NIC: 1418b1880421SChaoyong He PMD_INIT_LOG(INFO, "Initializing Flower"); 1419016141b1SChaoyong He ret = nfp_secondary_init_app_fw_flower(pf_dev); 1420b1880421SChaoyong He if (ret != 0) { 1421b1880421SChaoyong He PMD_INIT_LOG(ERR, "Could not initialize Flower!"); 1422b1880421SChaoyong He goto sym_tbl_cleanup; 1423b1880421SChaoyong He } 1424b1880421SChaoyong He break; 1425d5f39e07SChaoyong He default: 1426d5f39e07SChaoyong He PMD_INIT_LOG(ERR, "Unsupported Firmware loaded"); 1427d5f39e07SChaoyong He ret = -EINVAL; 1428d5f39e07SChaoyong He goto sym_tbl_cleanup; 1429646ea79cSHeinrich Kuhn } 1430646ea79cSHeinrich Kuhn 1431016141b1SChaoyong He return 0; 1432016141b1SChaoyong He 1433968ec1c3SChaoyong He sym_tbl_cleanup: 1434968ec1c3SChaoyong He free(sym_tbl); 1435016141b1SChaoyong He pf_cleanup: 1436016141b1SChaoyong He rte_free(pf_dev); 1437968ec1c3SChaoyong He 1438968ec1c3SChaoyong He return ret; 1439646ea79cSHeinrich Kuhn } 1440646ea79cSHeinrich Kuhn 1441a6189a67SJin Liu static int 1442a6189a67SJin Liu nfp_pf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 1443646ea79cSHeinrich Kuhn struct rte_pci_device *dev) 1444646ea79cSHeinrich Kuhn { 1445646ea79cSHeinrich Kuhn if (rte_eal_process_type() == RTE_PROC_PRIMARY) 1446646ea79cSHeinrich Kuhn return nfp_pf_init(dev); 1447646ea79cSHeinrich Kuhn else 1448646ea79cSHeinrich Kuhn return nfp_pf_secondary_init(dev); 1449646ea79cSHeinrich Kuhn } 1450646ea79cSHeinrich Kuhn 1451646ea79cSHeinrich Kuhn static const struct rte_pci_id pci_id_nfp_pf_net_map[] = { 1452646ea79cSHeinrich Kuhn { 1453646ea79cSHeinrich Kuhn RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME, 14545c464d6aSJin Liu PCI_DEVICE_ID_NFP3800_PF_NIC) 14555c464d6aSJin Liu }, 14565c464d6aSJin Liu { 14575c464d6aSJin Liu RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME, 1458646ea79cSHeinrich Kuhn PCI_DEVICE_ID_NFP4000_PF_NIC) 1459646ea79cSHeinrich Kuhn }, 1460646ea79cSHeinrich Kuhn { 1461646ea79cSHeinrich Kuhn RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME, 1462646ea79cSHeinrich Kuhn PCI_DEVICE_ID_NFP6000_PF_NIC) 1463646ea79cSHeinrich Kuhn }, 1464646ea79cSHeinrich Kuhn { 14655aedd4c3SJames Hershaw RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE, 14665aedd4c3SJames Hershaw PCI_DEVICE_ID_NFP3800_PF_NIC) 14675aedd4c3SJames Hershaw }, 14685aedd4c3SJames Hershaw { 14695aedd4c3SJames Hershaw RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE, 14705aedd4c3SJames Hershaw PCI_DEVICE_ID_NFP4000_PF_NIC) 14715aedd4c3SJames Hershaw }, 14725aedd4c3SJames Hershaw { 14735aedd4c3SJames Hershaw RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE, 14745aedd4c3SJames Hershaw PCI_DEVICE_ID_NFP6000_PF_NIC) 14755aedd4c3SJames Hershaw }, 14765aedd4c3SJames Hershaw { 1477646ea79cSHeinrich Kuhn .vendor_id = 0, 1478646ea79cSHeinrich Kuhn }, 1479646ea79cSHeinrich Kuhn }; 1480646ea79cSHeinrich Kuhn 1481a6189a67SJin Liu static int 1482a6189a67SJin Liu nfp_pci_uninit(struct rte_eth_dev *eth_dev) 1483646ea79cSHeinrich Kuhn { 1484646ea79cSHeinrich Kuhn uint16_t port_id; 148549952141SChaoyong He struct rte_pci_device *pci_dev; 1486646ea79cSHeinrich Kuhn 1487646ea79cSHeinrich Kuhn pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1488646ea79cSHeinrich Kuhn 1489646ea79cSHeinrich Kuhn /* Free up all physical ports under PF */ 1490646ea79cSHeinrich Kuhn RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device) 1491646ea79cSHeinrich Kuhn rte_eth_dev_close(port_id); 1492646ea79cSHeinrich Kuhn /* 1493646ea79cSHeinrich Kuhn * Ports can be closed and freed but hotplugging is not 149440688372SChaoyong He * currently supported. 1495646ea79cSHeinrich Kuhn */ 1496646ea79cSHeinrich Kuhn return -ENOTSUP; 1497646ea79cSHeinrich Kuhn } 1498646ea79cSHeinrich Kuhn 1499a6189a67SJin Liu static int 1500a6189a67SJin Liu eth_nfp_pci_remove(struct rte_pci_device *pci_dev) 1501646ea79cSHeinrich Kuhn { 1502646ea79cSHeinrich Kuhn return rte_eth_dev_pci_generic_remove(pci_dev, nfp_pci_uninit); 1503646ea79cSHeinrich Kuhn } 1504646ea79cSHeinrich Kuhn 1505646ea79cSHeinrich Kuhn static struct rte_pci_driver rte_nfp_net_pf_pmd = { 1506646ea79cSHeinrich Kuhn .id_table = pci_id_nfp_pf_net_map, 1507646ea79cSHeinrich Kuhn .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 1508646ea79cSHeinrich Kuhn .probe = nfp_pf_pci_probe, 1509646ea79cSHeinrich Kuhn .remove = eth_nfp_pci_remove, 1510646ea79cSHeinrich Kuhn }; 1511646ea79cSHeinrich Kuhn 1512d505ee1dSChaoyong He RTE_PMD_REGISTER_PCI(NFP_PF_DRIVER_NAME, rte_nfp_net_pf_pmd); 1513d505ee1dSChaoyong He RTE_PMD_REGISTER_PCI_TABLE(NFP_PF_DRIVER_NAME, pci_id_nfp_pf_net_map); 1514d505ee1dSChaoyong He RTE_PMD_REGISTER_KMOD_DEP(NFP_PF_DRIVER_NAME, "* igb_uio | uio_pci_generic | vfio"); 1515