1646ea79cSHeinrich Kuhn /* SPDX-License-Identifier: BSD-3-Clause 2646ea79cSHeinrich Kuhn * Copyright (c) 2014-2021 Netronome Systems, Inc. 3646ea79cSHeinrich Kuhn * All rights reserved. 4646ea79cSHeinrich Kuhn * 5646ea79cSHeinrich Kuhn * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation. 6646ea79cSHeinrich Kuhn */ 7646ea79cSHeinrich Kuhn 8646ea79cSHeinrich Kuhn #include <rte_common.h> 9646ea79cSHeinrich Kuhn #include <ethdev_driver.h> 10646ea79cSHeinrich Kuhn #include <ethdev_pci.h> 111acb7f54SDavid Marchand #include <dev_driver.h> 12646ea79cSHeinrich Kuhn #include <rte_ether.h> 13646ea79cSHeinrich Kuhn #include <rte_malloc.h> 14646ea79cSHeinrich Kuhn #include <rte_memzone.h> 15646ea79cSHeinrich Kuhn #include <rte_mempool.h> 16646ea79cSHeinrich Kuhn #include <rte_service_component.h> 17851f03e1SHeinrich Kuhn #include <rte_alarm.h> 18646ea79cSHeinrich Kuhn #include "eal_firmware.h" 19646ea79cSHeinrich Kuhn 20646ea79cSHeinrich Kuhn #include "nfpcore/nfp_cpp.h" 21646ea79cSHeinrich Kuhn #include "nfpcore/nfp_nffw.h" 22646ea79cSHeinrich Kuhn #include "nfpcore/nfp_hwinfo.h" 23646ea79cSHeinrich Kuhn #include "nfpcore/nfp_mip.h" 24646ea79cSHeinrich Kuhn #include "nfpcore/nfp_rtsym.h" 25646ea79cSHeinrich Kuhn #include "nfpcore/nfp_nsp.h" 26796f1aecSChaoyong He #include "nfpcore/nfp6000_pcie.h" 27646ea79cSHeinrich Kuhn 288d7a59f1SHeinrich Kuhn #include "nfp_common.h" 29a5f377d8SChaoyong He #include "nfp_ctrl.h" 30646ea79cSHeinrich Kuhn #include "nfp_rxtx.h" 318d7a59f1SHeinrich Kuhn #include "nfp_logs.h" 32646ea79cSHeinrich Kuhn #include "nfp_cpp_bridge.h" 33646ea79cSHeinrich Kuhn 34ffe84fcbSChaoyong He #include "nfd3/nfp_nfd3.h" 35fc756151SChaoyong He #include "nfdk/nfp_nfdk.h" 36b1880421SChaoyong He #include "flower/nfp_flower.h" 37b1880421SChaoyong He 38646ea79cSHeinrich Kuhn static int 39968ec1c3SChaoyong He nfp_net_pf_read_mac(struct nfp_app_fw_nic *app_fw_nic, int port) 40646ea79cSHeinrich Kuhn { 41646ea79cSHeinrich Kuhn struct nfp_eth_table *nfp_eth_table; 42646ea79cSHeinrich Kuhn struct nfp_net_hw *hw = NULL; 43646ea79cSHeinrich Kuhn 44646ea79cSHeinrich Kuhn /* Grab a pointer to the correct physical port */ 45968ec1c3SChaoyong He hw = app_fw_nic->ports[port]; 46646ea79cSHeinrich Kuhn 47968ec1c3SChaoyong He nfp_eth_table = nfp_eth_read_ports(app_fw_nic->pf_dev->cpp); 48646ea79cSHeinrich Kuhn 49393e5822SChaoyong He rte_ether_addr_copy(&nfp_eth_table->ports[port].mac_addr, &hw->mac_addr); 50646ea79cSHeinrich Kuhn 51646ea79cSHeinrich Kuhn free(nfp_eth_table); 52646ea79cSHeinrich Kuhn return 0; 53646ea79cSHeinrich Kuhn } 54646ea79cSHeinrich Kuhn 55646ea79cSHeinrich Kuhn static int 56646ea79cSHeinrich Kuhn nfp_net_start(struct rte_eth_dev *dev) 57646ea79cSHeinrich Kuhn { 58646ea79cSHeinrich Kuhn struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 59d61138d4SHarman Kalra struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 60646ea79cSHeinrich Kuhn uint32_t new_ctrl, update = 0; 612e7c3612SQin Ke uint32_t cap_extend; 622e7c3612SQin Ke uint32_t ctrl_extend = 0; 63646ea79cSHeinrich Kuhn struct nfp_net_hw *hw; 64646ea79cSHeinrich Kuhn struct nfp_pf_dev *pf_dev; 65968ec1c3SChaoyong He struct nfp_app_fw_nic *app_fw_nic; 66646ea79cSHeinrich Kuhn struct rte_eth_conf *dev_conf; 67646ea79cSHeinrich Kuhn struct rte_eth_rxmode *rxmode; 68646ea79cSHeinrich Kuhn uint32_t intr_vector; 69646ea79cSHeinrich Kuhn int ret; 70646ea79cSHeinrich Kuhn 71646ea79cSHeinrich Kuhn hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); 72646ea79cSHeinrich Kuhn pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(dev->data->dev_private); 73968ec1c3SChaoyong He app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv); 74646ea79cSHeinrich Kuhn 75646ea79cSHeinrich Kuhn PMD_INIT_LOG(DEBUG, "Start"); 76646ea79cSHeinrich Kuhn 77646ea79cSHeinrich Kuhn /* Disabling queues just in case... */ 78646ea79cSHeinrich Kuhn nfp_net_disable_queues(dev); 79646ea79cSHeinrich Kuhn 80646ea79cSHeinrich Kuhn /* Enabling the required queues in the device */ 81646ea79cSHeinrich Kuhn nfp_net_enable_queues(dev); 82646ea79cSHeinrich Kuhn 83646ea79cSHeinrich Kuhn /* check and configure queue intr-vector mapping */ 84646ea79cSHeinrich Kuhn if (dev->data->dev_conf.intr_conf.rxq != 0) { 85968ec1c3SChaoyong He if (app_fw_nic->multiport) { 86646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "PMD rx interrupt is not supported " 87646ea79cSHeinrich Kuhn "with NFP multiport PF"); 88646ea79cSHeinrich Kuhn return -EINVAL; 89646ea79cSHeinrich Kuhn } 90d61138d4SHarman Kalra if (rte_intr_type_get(intr_handle) == 91d61138d4SHarman Kalra RTE_INTR_HANDLE_UIO) { 92646ea79cSHeinrich Kuhn /* 93646ea79cSHeinrich Kuhn * Better not to share LSC with RX interrupts. 94646ea79cSHeinrich Kuhn * Unregistering LSC interrupt handler 95646ea79cSHeinrich Kuhn */ 96d61138d4SHarman Kalra rte_intr_callback_unregister(pci_dev->intr_handle, 97646ea79cSHeinrich Kuhn nfp_net_dev_interrupt_handler, (void *)dev); 98646ea79cSHeinrich Kuhn 99646ea79cSHeinrich Kuhn if (dev->data->nb_rx_queues > 1) { 100646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "PMD rx interrupt only " 101646ea79cSHeinrich Kuhn "supports 1 queue with UIO"); 102646ea79cSHeinrich Kuhn return -EIO; 103646ea79cSHeinrich Kuhn } 104646ea79cSHeinrich Kuhn } 105646ea79cSHeinrich Kuhn intr_vector = dev->data->nb_rx_queues; 106646ea79cSHeinrich Kuhn if (rte_intr_efd_enable(intr_handle, intr_vector)) 107646ea79cSHeinrich Kuhn return -1; 108646ea79cSHeinrich Kuhn 109646ea79cSHeinrich Kuhn nfp_configure_rx_interrupt(dev, intr_handle); 110646ea79cSHeinrich Kuhn update = NFP_NET_CFG_UPDATE_MSIX; 111646ea79cSHeinrich Kuhn } 112646ea79cSHeinrich Kuhn 113dbad6f64SPeng Zhang /* Checking MTU set */ 114dbad6f64SPeng Zhang if (dev->data->mtu > hw->flbufsz) { 115dbad6f64SPeng Zhang PMD_INIT_LOG(ERR, "MTU (%u) can't be larger than the current NFP_FRAME_SIZE (%u)", 116dbad6f64SPeng Zhang dev->data->mtu, hw->flbufsz); 117dbad6f64SPeng Zhang return -ERANGE; 118dbad6f64SPeng Zhang } 119dbad6f64SPeng Zhang 120646ea79cSHeinrich Kuhn rte_intr_enable(intr_handle); 121646ea79cSHeinrich Kuhn 122646ea79cSHeinrich Kuhn new_ctrl = nfp_check_offloads(dev); 123646ea79cSHeinrich Kuhn 124646ea79cSHeinrich Kuhn /* Writing configuration parameters in the device */ 125646ea79cSHeinrich Kuhn nfp_net_params_setup(hw); 126646ea79cSHeinrich Kuhn 127646ea79cSHeinrich Kuhn dev_conf = &dev->data->dev_conf; 128646ea79cSHeinrich Kuhn rxmode = &dev_conf->rxmode; 129646ea79cSHeinrich Kuhn 130295968d1SFerruh Yigit if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS) { 131646ea79cSHeinrich Kuhn nfp_net_rss_config_default(dev); 132646ea79cSHeinrich Kuhn update |= NFP_NET_CFG_UPDATE_RSS; 133c1c2d8b4SLong Wu new_ctrl |= nfp_net_cfg_ctrl_rss(hw->cap); 134646ea79cSHeinrich Kuhn } 135646ea79cSHeinrich Kuhn 136646ea79cSHeinrich Kuhn /* Enable device */ 137646ea79cSHeinrich Kuhn new_ctrl |= NFP_NET_CFG_CTRL_ENABLE; 138646ea79cSHeinrich Kuhn 139646ea79cSHeinrich Kuhn update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING; 140646ea79cSHeinrich Kuhn 141c55abf61SChaoyong He /* Enable vxlan */ 142c925a157SFei Qin if (hw->cap & NFP_NET_CFG_CTRL_VXLAN) { 143c55abf61SChaoyong He new_ctrl |= NFP_NET_CFG_CTRL_VXLAN; 144c55abf61SChaoyong He update |= NFP_NET_CFG_UPDATE_VXLAN; 145c925a157SFei Qin } 146c55abf61SChaoyong He 147646ea79cSHeinrich Kuhn if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG) 148646ea79cSHeinrich Kuhn new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG; 149646ea79cSHeinrich Kuhn 150646ea79cSHeinrich Kuhn if (nfp_net_reconfig(hw, new_ctrl, update) < 0) 151646ea79cSHeinrich Kuhn return -EIO; 152646ea79cSHeinrich Kuhn 1532e7c3612SQin Ke /* Enable packet type offload by extend ctrl word1. */ 1542e7c3612SQin Ke cap_extend = nn_cfg_readl(hw, NFP_NET_CFG_CAP_WORD1); 1552e7c3612SQin Ke if ((cap_extend & NFP_NET_CFG_CTRL_PKT_TYPE) != 0) 1562e7c3612SQin Ke ctrl_extend = NFP_NET_CFG_CTRL_PKT_TYPE; 1572e7c3612SQin Ke 1582e7c3612SQin Ke update = NFP_NET_CFG_UPDATE_GEN; 1592e7c3612SQin Ke if (nfp_net_ext_reconfig(hw, ctrl_extend, update) < 0) 1602e7c3612SQin Ke return -EIO; 1612e7c3612SQin Ke 162646ea79cSHeinrich Kuhn /* 163646ea79cSHeinrich Kuhn * Allocating rte mbufs for configured rx queues. 164646ea79cSHeinrich Kuhn * This requires queues being enabled before 165646ea79cSHeinrich Kuhn */ 166646ea79cSHeinrich Kuhn if (nfp_net_rx_freelist_setup(dev) < 0) { 167646ea79cSHeinrich Kuhn ret = -ENOMEM; 168646ea79cSHeinrich Kuhn goto error; 169646ea79cSHeinrich Kuhn } 170646ea79cSHeinrich Kuhn 171646ea79cSHeinrich Kuhn if (rte_eal_process_type() == RTE_PROC_PRIMARY) 172646ea79cSHeinrich Kuhn /* Configure the physical port up */ 173646ea79cSHeinrich Kuhn nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 1); 174646ea79cSHeinrich Kuhn else 175646ea79cSHeinrich Kuhn nfp_eth_set_configured(dev->process_private, 176646ea79cSHeinrich Kuhn hw->nfp_idx, 1); 177646ea79cSHeinrich Kuhn 178646ea79cSHeinrich Kuhn hw->ctrl = new_ctrl; 179646ea79cSHeinrich Kuhn 180646ea79cSHeinrich Kuhn return 0; 181646ea79cSHeinrich Kuhn 182646ea79cSHeinrich Kuhn error: 183646ea79cSHeinrich Kuhn /* 184646ea79cSHeinrich Kuhn * An error returned by this function should mean the app 185646ea79cSHeinrich Kuhn * exiting and then the system releasing all the memory 186646ea79cSHeinrich Kuhn * allocated even memory coming from hugepages. 187646ea79cSHeinrich Kuhn * 188646ea79cSHeinrich Kuhn * The device could be enabled at this point with some queues 189646ea79cSHeinrich Kuhn * ready for getting packets. This is true if the call to 190646ea79cSHeinrich Kuhn * nfp_net_rx_freelist_setup() succeeds for some queues but 191646ea79cSHeinrich Kuhn * fails for subsequent queues. 192646ea79cSHeinrich Kuhn * 193646ea79cSHeinrich Kuhn * This should make the app exiting but better if we tell the 194646ea79cSHeinrich Kuhn * device first. 195646ea79cSHeinrich Kuhn */ 196646ea79cSHeinrich Kuhn nfp_net_disable_queues(dev); 197646ea79cSHeinrich Kuhn 198646ea79cSHeinrich Kuhn return ret; 199646ea79cSHeinrich Kuhn } 200646ea79cSHeinrich Kuhn 201646ea79cSHeinrich Kuhn /* Stop device: disable rx and tx functions to allow for reconfiguring. */ 202646ea79cSHeinrich Kuhn static int 203646ea79cSHeinrich Kuhn nfp_net_stop(struct rte_eth_dev *dev) 204646ea79cSHeinrich Kuhn { 205646ea79cSHeinrich Kuhn struct nfp_net_hw *hw; 206646ea79cSHeinrich Kuhn 207646ea79cSHeinrich Kuhn PMD_INIT_LOG(DEBUG, "Stop"); 208646ea79cSHeinrich Kuhn 209646ea79cSHeinrich Kuhn hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); 210646ea79cSHeinrich Kuhn 211646ea79cSHeinrich Kuhn nfp_net_disable_queues(dev); 212646ea79cSHeinrich Kuhn 213646ea79cSHeinrich Kuhn /* Clear queues */ 2141c8d02bbSJin Liu nfp_net_stop_tx_queue(dev); 215646ea79cSHeinrich Kuhn 2161c8d02bbSJin Liu nfp_net_stop_rx_queue(dev); 217646ea79cSHeinrich Kuhn 218646ea79cSHeinrich Kuhn if (rte_eal_process_type() == RTE_PROC_PRIMARY) 219646ea79cSHeinrich Kuhn /* Configure the physical port down */ 220646ea79cSHeinrich Kuhn nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 0); 221646ea79cSHeinrich Kuhn else 222646ea79cSHeinrich Kuhn nfp_eth_set_configured(dev->process_private, 223646ea79cSHeinrich Kuhn hw->nfp_idx, 0); 224646ea79cSHeinrich Kuhn 225646ea79cSHeinrich Kuhn return 0; 226646ea79cSHeinrich Kuhn } 227646ea79cSHeinrich Kuhn 228646ea79cSHeinrich Kuhn /* Set the link up. */ 229646ea79cSHeinrich Kuhn static int 230646ea79cSHeinrich Kuhn nfp_net_set_link_up(struct rte_eth_dev *dev) 231646ea79cSHeinrich Kuhn { 232646ea79cSHeinrich Kuhn struct nfp_net_hw *hw; 233646ea79cSHeinrich Kuhn 234646ea79cSHeinrich Kuhn PMD_DRV_LOG(DEBUG, "Set link up"); 235646ea79cSHeinrich Kuhn 236646ea79cSHeinrich Kuhn hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); 237646ea79cSHeinrich Kuhn 238646ea79cSHeinrich Kuhn if (rte_eal_process_type() == RTE_PROC_PRIMARY) 239646ea79cSHeinrich Kuhn /* Configure the physical port down */ 240646ea79cSHeinrich Kuhn return nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 1); 241646ea79cSHeinrich Kuhn else 242646ea79cSHeinrich Kuhn return nfp_eth_set_configured(dev->process_private, 243646ea79cSHeinrich Kuhn hw->nfp_idx, 1); 244646ea79cSHeinrich Kuhn } 245646ea79cSHeinrich Kuhn 246646ea79cSHeinrich Kuhn /* Set the link down. */ 247646ea79cSHeinrich Kuhn static int 248646ea79cSHeinrich Kuhn nfp_net_set_link_down(struct rte_eth_dev *dev) 249646ea79cSHeinrich Kuhn { 250646ea79cSHeinrich Kuhn struct nfp_net_hw *hw; 251646ea79cSHeinrich Kuhn 252646ea79cSHeinrich Kuhn PMD_DRV_LOG(DEBUG, "Set link down"); 253646ea79cSHeinrich Kuhn 254646ea79cSHeinrich Kuhn hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); 255646ea79cSHeinrich Kuhn 256646ea79cSHeinrich Kuhn if (rte_eal_process_type() == RTE_PROC_PRIMARY) 257646ea79cSHeinrich Kuhn /* Configure the physical port down */ 258646ea79cSHeinrich Kuhn return nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 0); 259646ea79cSHeinrich Kuhn else 260646ea79cSHeinrich Kuhn return nfp_eth_set_configured(dev->process_private, 261646ea79cSHeinrich Kuhn hw->nfp_idx, 0); 262646ea79cSHeinrich Kuhn } 263646ea79cSHeinrich Kuhn 264646ea79cSHeinrich Kuhn /* Reset and stop device. The device can not be restarted. */ 265646ea79cSHeinrich Kuhn static int 266646ea79cSHeinrich Kuhn nfp_net_close(struct rte_eth_dev *dev) 267646ea79cSHeinrich Kuhn { 268646ea79cSHeinrich Kuhn struct nfp_net_hw *hw; 269646ea79cSHeinrich Kuhn struct rte_pci_device *pci_dev; 270646ea79cSHeinrich Kuhn struct nfp_pf_dev *pf_dev; 271968ec1c3SChaoyong He struct nfp_app_fw_nic *app_fw_nic; 272646ea79cSHeinrich Kuhn int i; 273646ea79cSHeinrich Kuhn 274646ea79cSHeinrich Kuhn if (rte_eal_process_type() != RTE_PROC_PRIMARY) 275646ea79cSHeinrich Kuhn return 0; 276646ea79cSHeinrich Kuhn 277646ea79cSHeinrich Kuhn PMD_INIT_LOG(DEBUG, "Close"); 278646ea79cSHeinrich Kuhn 279646ea79cSHeinrich Kuhn pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(dev->data->dev_private); 280646ea79cSHeinrich Kuhn hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); 281646ea79cSHeinrich Kuhn pci_dev = RTE_ETH_DEV_TO_PCI(dev); 282968ec1c3SChaoyong He app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv); 283646ea79cSHeinrich Kuhn 284646ea79cSHeinrich Kuhn /* 285646ea79cSHeinrich Kuhn * We assume that the DPDK application is stopping all the 286646ea79cSHeinrich Kuhn * threads/queues before calling the device close function. 287646ea79cSHeinrich Kuhn */ 288646ea79cSHeinrich Kuhn 289646ea79cSHeinrich Kuhn nfp_net_disable_queues(dev); 290646ea79cSHeinrich Kuhn 291646ea79cSHeinrich Kuhn /* Clear queues */ 2921c8d02bbSJin Liu nfp_net_close_tx_queue(dev); 293646ea79cSHeinrich Kuhn 2941c8d02bbSJin Liu nfp_net_close_rx_queue(dev); 295646ea79cSHeinrich Kuhn 296851f03e1SHeinrich Kuhn /* Cancel possible impending LSC work here before releasing the port*/ 297851f03e1SHeinrich Kuhn rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler, 298851f03e1SHeinrich Kuhn (void *)dev); 299851f03e1SHeinrich Kuhn 300646ea79cSHeinrich Kuhn /* Only free PF resources after all physical ports have been closed */ 301646ea79cSHeinrich Kuhn /* Mark this port as unused and free device priv resources*/ 302646ea79cSHeinrich Kuhn nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff); 303968ec1c3SChaoyong He app_fw_nic->ports[hw->idx] = NULL; 304646ea79cSHeinrich Kuhn rte_eth_dev_release_port(dev); 305646ea79cSHeinrich Kuhn 306968ec1c3SChaoyong He for (i = 0; i < app_fw_nic->total_phyports; i++) { 307646ea79cSHeinrich Kuhn /* Check to see if ports are still in use */ 308968ec1c3SChaoyong He if (app_fw_nic->ports[i]) 309646ea79cSHeinrich Kuhn return 0; 310646ea79cSHeinrich Kuhn } 311646ea79cSHeinrich Kuhn 312646ea79cSHeinrich Kuhn /* Now it is safe to free all PF resources */ 313646ea79cSHeinrich Kuhn PMD_INIT_LOG(INFO, "Freeing PF resources"); 314646ea79cSHeinrich Kuhn nfp_cpp_area_free(pf_dev->ctrl_area); 315711e4559SChaoyong He nfp_cpp_area_free(pf_dev->qc_area); 316646ea79cSHeinrich Kuhn free(pf_dev->hwinfo); 317646ea79cSHeinrich Kuhn free(pf_dev->sym_tbl); 318646ea79cSHeinrich Kuhn nfp_cpp_free(pf_dev->cpp); 319968ec1c3SChaoyong He rte_free(app_fw_nic); 320646ea79cSHeinrich Kuhn rte_free(pf_dev); 321646ea79cSHeinrich Kuhn 322d61138d4SHarman Kalra rte_intr_disable(pci_dev->intr_handle); 323646ea79cSHeinrich Kuhn 324646ea79cSHeinrich Kuhn /* unregister callback func from eal lib */ 325d61138d4SHarman Kalra rte_intr_callback_unregister(pci_dev->intr_handle, 326a6189a67SJin Liu nfp_net_dev_interrupt_handler, (void *)dev); 327646ea79cSHeinrich Kuhn 328646ea79cSHeinrich Kuhn /* 329f8dbaebbSSean Morrissey * The ixgbe PMD disables the pcie master on the 330646ea79cSHeinrich Kuhn * device. The i40e does not... 331646ea79cSHeinrich Kuhn */ 332646ea79cSHeinrich Kuhn 333646ea79cSHeinrich Kuhn return 0; 334646ea79cSHeinrich Kuhn } 335646ea79cSHeinrich Kuhn 336c55abf61SChaoyong He static int 337c55abf61SChaoyong He nfp_net_find_vxlan_idx(struct nfp_net_hw *hw, 338c55abf61SChaoyong He uint16_t port, 339c55abf61SChaoyong He uint32_t *idx) 340c55abf61SChaoyong He { 341c55abf61SChaoyong He uint32_t i; 342c55abf61SChaoyong He int free_idx = -1; 343c55abf61SChaoyong He 344c55abf61SChaoyong He for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i++) { 345c55abf61SChaoyong He if (hw->vxlan_ports[i] == port) { 346c55abf61SChaoyong He free_idx = i; 347c55abf61SChaoyong He break; 348c55abf61SChaoyong He } 349c55abf61SChaoyong He 350c55abf61SChaoyong He if (hw->vxlan_usecnt[i] == 0) { 351c55abf61SChaoyong He free_idx = i; 352c55abf61SChaoyong He break; 353c55abf61SChaoyong He } 354c55abf61SChaoyong He } 355c55abf61SChaoyong He 356c55abf61SChaoyong He if (free_idx == -1) 357c55abf61SChaoyong He return -EINVAL; 358c55abf61SChaoyong He 359c55abf61SChaoyong He *idx = free_idx; 360c55abf61SChaoyong He 361c55abf61SChaoyong He return 0; 362c55abf61SChaoyong He } 363c55abf61SChaoyong He 364c55abf61SChaoyong He static int 365c55abf61SChaoyong He nfp_udp_tunnel_port_add(struct rte_eth_dev *dev, 366c55abf61SChaoyong He struct rte_eth_udp_tunnel *tunnel_udp) 367c55abf61SChaoyong He { 368c55abf61SChaoyong He int ret; 369c55abf61SChaoyong He uint32_t idx; 370c55abf61SChaoyong He uint16_t vxlan_port; 371c55abf61SChaoyong He struct nfp_net_hw *hw; 372c55abf61SChaoyong He enum rte_eth_tunnel_type tnl_type; 373c55abf61SChaoyong He 374c55abf61SChaoyong He hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); 375c55abf61SChaoyong He vxlan_port = tunnel_udp->udp_port; 376c55abf61SChaoyong He tnl_type = tunnel_udp->prot_type; 377c55abf61SChaoyong He 378c55abf61SChaoyong He if (tnl_type != RTE_ETH_TUNNEL_TYPE_VXLAN) { 379c55abf61SChaoyong He PMD_DRV_LOG(ERR, "Not VXLAN tunnel"); 380c55abf61SChaoyong He return -ENOTSUP; 381c55abf61SChaoyong He } 382c55abf61SChaoyong He 383c55abf61SChaoyong He ret = nfp_net_find_vxlan_idx(hw, vxlan_port, &idx); 384c55abf61SChaoyong He if (ret != 0) { 385c55abf61SChaoyong He PMD_DRV_LOG(ERR, "Failed find valid vxlan idx"); 386c55abf61SChaoyong He return -EINVAL; 387c55abf61SChaoyong He } 388c55abf61SChaoyong He 389c55abf61SChaoyong He if (hw->vxlan_usecnt[idx] == 0) { 390c55abf61SChaoyong He ret = nfp_net_set_vxlan_port(hw, idx, vxlan_port); 391c55abf61SChaoyong He if (ret != 0) { 392c55abf61SChaoyong He PMD_DRV_LOG(ERR, "Failed set vxlan port"); 393c55abf61SChaoyong He return -EINVAL; 394c55abf61SChaoyong He } 395c55abf61SChaoyong He } 396c55abf61SChaoyong He 397c55abf61SChaoyong He hw->vxlan_usecnt[idx]++; 398c55abf61SChaoyong He 399c55abf61SChaoyong He return 0; 400c55abf61SChaoyong He } 401c55abf61SChaoyong He 402c55abf61SChaoyong He static int 403c55abf61SChaoyong He nfp_udp_tunnel_port_del(struct rte_eth_dev *dev, 404c55abf61SChaoyong He struct rte_eth_udp_tunnel *tunnel_udp) 405c55abf61SChaoyong He { 406c55abf61SChaoyong He int ret; 407c55abf61SChaoyong He uint32_t idx; 408c55abf61SChaoyong He uint16_t vxlan_port; 409c55abf61SChaoyong He struct nfp_net_hw *hw; 410c55abf61SChaoyong He enum rte_eth_tunnel_type tnl_type; 411c55abf61SChaoyong He 412c55abf61SChaoyong He hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); 413c55abf61SChaoyong He vxlan_port = tunnel_udp->udp_port; 414c55abf61SChaoyong He tnl_type = tunnel_udp->prot_type; 415c55abf61SChaoyong He 416c55abf61SChaoyong He if (tnl_type != RTE_ETH_TUNNEL_TYPE_VXLAN) { 417c55abf61SChaoyong He PMD_DRV_LOG(ERR, "Not VXLAN tunnel"); 418c55abf61SChaoyong He return -ENOTSUP; 419c55abf61SChaoyong He } 420c55abf61SChaoyong He 421c55abf61SChaoyong He ret = nfp_net_find_vxlan_idx(hw, vxlan_port, &idx); 422c55abf61SChaoyong He if (ret != 0 || hw->vxlan_usecnt[idx] == 0) { 423c55abf61SChaoyong He PMD_DRV_LOG(ERR, "Failed find valid vxlan idx"); 424c55abf61SChaoyong He return -EINVAL; 425c55abf61SChaoyong He } 426c55abf61SChaoyong He 427c55abf61SChaoyong He hw->vxlan_usecnt[idx]--; 428c55abf61SChaoyong He 429c55abf61SChaoyong He if (hw->vxlan_usecnt[idx] == 0) { 430c55abf61SChaoyong He ret = nfp_net_set_vxlan_port(hw, idx, 0); 431c55abf61SChaoyong He if (ret != 0) { 432c55abf61SChaoyong He PMD_DRV_LOG(ERR, "Failed set vxlan port"); 433c55abf61SChaoyong He return -EINVAL; 434c55abf61SChaoyong He } 435c55abf61SChaoyong He } 436c55abf61SChaoyong He 437c55abf61SChaoyong He return 0; 438c55abf61SChaoyong He } 439c55abf61SChaoyong He 440646ea79cSHeinrich Kuhn /* Initialise and register driver with DPDK Application */ 4418d961320SJin Liu static const struct eth_dev_ops nfp_net_eth_dev_ops = { 442646ea79cSHeinrich Kuhn .dev_configure = nfp_net_configure, 443646ea79cSHeinrich Kuhn .dev_start = nfp_net_start, 444646ea79cSHeinrich Kuhn .dev_stop = nfp_net_stop, 445646ea79cSHeinrich Kuhn .dev_set_link_up = nfp_net_set_link_up, 446646ea79cSHeinrich Kuhn .dev_set_link_down = nfp_net_set_link_down, 447646ea79cSHeinrich Kuhn .dev_close = nfp_net_close, 448646ea79cSHeinrich Kuhn .promiscuous_enable = nfp_net_promisc_enable, 449646ea79cSHeinrich Kuhn .promiscuous_disable = nfp_net_promisc_disable, 450646ea79cSHeinrich Kuhn .link_update = nfp_net_link_update, 451646ea79cSHeinrich Kuhn .stats_get = nfp_net_stats_get, 452646ea79cSHeinrich Kuhn .stats_reset = nfp_net_stats_reset, 453f26e8239SJames Hershaw .xstats_get = nfp_net_xstats_get, 454f26e8239SJames Hershaw .xstats_reset = nfp_net_xstats_reset, 455f26e8239SJames Hershaw .xstats_get_names = nfp_net_xstats_get_names, 456f26e8239SJames Hershaw .xstats_get_by_id = nfp_net_xstats_get_by_id, 457f26e8239SJames Hershaw .xstats_get_names_by_id = nfp_net_xstats_get_names_by_id, 458646ea79cSHeinrich Kuhn .dev_infos_get = nfp_net_infos_get, 459646ea79cSHeinrich Kuhn .dev_supported_ptypes_get = nfp_net_supported_ptypes_get, 460646ea79cSHeinrich Kuhn .mtu_set = nfp_net_dev_mtu_set, 4610a94d6bcSJin Liu .mac_addr_set = nfp_net_set_mac_addr, 462646ea79cSHeinrich Kuhn .vlan_offload_set = nfp_net_vlan_offload_set, 463646ea79cSHeinrich Kuhn .reta_update = nfp_net_reta_update, 464646ea79cSHeinrich Kuhn .reta_query = nfp_net_reta_query, 465646ea79cSHeinrich Kuhn .rss_hash_update = nfp_net_rss_hash_update, 466646ea79cSHeinrich Kuhn .rss_hash_conf_get = nfp_net_rss_hash_conf_get, 467646ea79cSHeinrich Kuhn .rx_queue_setup = nfp_net_rx_queue_setup, 468646ea79cSHeinrich Kuhn .rx_queue_release = nfp_net_rx_queue_release, 4698d961320SJin Liu .tx_queue_setup = nfp_net_tx_queue_setup, 47052ddc4c2SJin Liu .tx_queue_release = nfp_net_tx_queue_release, 47152ddc4c2SJin Liu .rx_queue_intr_enable = nfp_rx_queue_intr_enable, 47252ddc4c2SJin Liu .rx_queue_intr_disable = nfp_rx_queue_intr_disable, 473c55abf61SChaoyong He .udp_tunnel_port_add = nfp_udp_tunnel_port_add, 474c55abf61SChaoyong He .udp_tunnel_port_del = nfp_udp_tunnel_port_del, 475128c8ad9SChaoyong He .fw_version_get = nfp_net_firmware_version_get, 47652ddc4c2SJin Liu }; 47752ddc4c2SJin Liu 478ee8ca64eSChaoyong He static inline void 479ee8ca64eSChaoyong He nfp_net_ethdev_ops_mount(struct nfp_net_hw *hw, 480ee8ca64eSChaoyong He struct rte_eth_dev *eth_dev) 481266470b2SJin Liu { 482ee8ca64eSChaoyong He if (hw->ver.extend == NFP_NET_CFG_VERSION_DP_NFD3) 483ee8ca64eSChaoyong He eth_dev->tx_pkt_burst = nfp_net_nfd3_xmit_pkts; 484ee8ca64eSChaoyong He else 485ee8ca64eSChaoyong He eth_dev->tx_pkt_burst = nfp_net_nfdk_xmit_pkts; 486266470b2SJin Liu 4878d961320SJin Liu eth_dev->dev_ops = &nfp_net_eth_dev_ops; 488266470b2SJin Liu eth_dev->rx_queue_count = nfp_net_rx_queue_count; 489266470b2SJin Liu eth_dev->rx_pkt_burst = &nfp_net_recv_pkts; 490266470b2SJin Liu } 491266470b2SJin Liu 492646ea79cSHeinrich Kuhn static int 493646ea79cSHeinrich Kuhn nfp_net_init(struct rte_eth_dev *eth_dev) 494646ea79cSHeinrich Kuhn { 495646ea79cSHeinrich Kuhn struct rte_pci_device *pci_dev; 496646ea79cSHeinrich Kuhn struct nfp_pf_dev *pf_dev; 497968ec1c3SChaoyong He struct nfp_app_fw_nic *app_fw_nic; 498646ea79cSHeinrich Kuhn struct nfp_net_hw *hw; 499646ea79cSHeinrich Kuhn struct rte_ether_addr *tmp_ether_addr; 5000314a8ffSChaoyong He uint64_t rx_base; 5010314a8ffSChaoyong He uint64_t tx_base; 502646ea79cSHeinrich Kuhn int port = 0; 503cd4397ebSPeng Zhang int err; 504646ea79cSHeinrich Kuhn 505646ea79cSHeinrich Kuhn PMD_INIT_FUNC_TRACE(); 506646ea79cSHeinrich Kuhn 507646ea79cSHeinrich Kuhn pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 508646ea79cSHeinrich Kuhn 509646ea79cSHeinrich Kuhn /* Use backpointer here to the PF of this eth_dev */ 510646ea79cSHeinrich Kuhn pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(eth_dev->data->dev_private); 511646ea79cSHeinrich Kuhn 512968ec1c3SChaoyong He /* Use backpointer to the CoreNIC app struct */ 513968ec1c3SChaoyong He app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv); 514968ec1c3SChaoyong He 515646ea79cSHeinrich Kuhn port = ((struct nfp_net_hw *)eth_dev->data->dev_private)->idx; 516646ea79cSHeinrich Kuhn if (port < 0 || port > 7) { 517646ea79cSHeinrich Kuhn PMD_DRV_LOG(ERR, "Port value is wrong"); 518646ea79cSHeinrich Kuhn return -ENODEV; 519646ea79cSHeinrich Kuhn } 520646ea79cSHeinrich Kuhn 521a6189a67SJin Liu /* 522a6189a67SJin Liu * Use PF array of physical ports to get pointer to 523646ea79cSHeinrich Kuhn * this specific port 524646ea79cSHeinrich Kuhn */ 525968ec1c3SChaoyong He hw = app_fw_nic->ports[port]; 526646ea79cSHeinrich Kuhn 527646ea79cSHeinrich Kuhn PMD_INIT_LOG(DEBUG, "Working with physical port number: %d, " 528a6189a67SJin Liu "NFP internal port number: %d", port, hw->nfp_idx); 529646ea79cSHeinrich Kuhn 530646ea79cSHeinrich Kuhn rte_eth_copy_pci_info(eth_dev, pci_dev); 531646ea79cSHeinrich Kuhn 532646ea79cSHeinrich Kuhn 533dddf701fSChaoyong He hw->ctrl_bar = pci_dev->mem_resource[0].addr; 534646ea79cSHeinrich Kuhn if (hw->ctrl_bar == NULL) { 535646ea79cSHeinrich Kuhn PMD_DRV_LOG(ERR, 536646ea79cSHeinrich Kuhn "hw->ctrl_bar is NULL. BAR0 not configured"); 537646ea79cSHeinrich Kuhn return -ENODEV; 538646ea79cSHeinrich Kuhn } 539646ea79cSHeinrich Kuhn 540646ea79cSHeinrich Kuhn if (port == 0) { 541f26e8239SJames Hershaw uint32_t min_size; 542f26e8239SJames Hershaw 543646ea79cSHeinrich Kuhn hw->ctrl_bar = pf_dev->ctrl_bar; 544f26e8239SJames Hershaw min_size = NFP_MAC_STATS_SIZE * hw->pf_dev->nfp_eth_table->max_index; 545f26e8239SJames Hershaw hw->mac_stats_bar = nfp_rtsym_map(hw->pf_dev->sym_tbl, "_mac_stats", 546f26e8239SJames Hershaw min_size, &hw->mac_stats_area); 547f26e8239SJames Hershaw if (hw->mac_stats_bar == NULL) { 548f26e8239SJames Hershaw PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for _mac_stats_bar"); 549f26e8239SJames Hershaw return -EIO; 550f26e8239SJames Hershaw } 551f26e8239SJames Hershaw hw->mac_stats = hw->mac_stats_bar; 552646ea79cSHeinrich Kuhn } else { 553a6189a67SJin Liu if (pf_dev->ctrl_bar == NULL) 554646ea79cSHeinrich Kuhn return -ENODEV; 555a6189a67SJin Liu /* Use port offset in pf ctrl_bar for this ports control bar */ 556a6189a67SJin Liu hw->ctrl_bar = pf_dev->ctrl_bar + (port * NFP_PF_CSR_SLICE_SIZE); 557f26e8239SJames Hershaw hw->mac_stats = app_fw_nic->ports[0]->mac_stats_bar + (port * NFP_MAC_STATS_SIZE); 558646ea79cSHeinrich Kuhn } 559646ea79cSHeinrich Kuhn 560646ea79cSHeinrich Kuhn PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar); 561f26e8239SJames Hershaw PMD_INIT_LOG(DEBUG, "MAC stats: %p", hw->mac_stats); 562646ea79cSHeinrich Kuhn 563cd4397ebSPeng Zhang err = nfp_net_common_init(pci_dev, hw); 564cd4397ebSPeng Zhang if (err != 0) 565cd4397ebSPeng Zhang return err; 566fd392f84SPeng Zhang 567ee8ca64eSChaoyong He nfp_net_ethdev_ops_mount(hw, eth_dev); 568266470b2SJin Liu 569f26e8239SJames Hershaw hw->eth_xstats_base = rte_malloc("rte_eth_xstat", sizeof(struct rte_eth_xstat) * 570f26e8239SJames Hershaw nfp_net_xstats_size(eth_dev), 0); 571f26e8239SJames Hershaw if (hw->eth_xstats_base == NULL) { 572f26e8239SJames Hershaw PMD_INIT_LOG(ERR, "no memory for xstats base values on device %s!", 573f26e8239SJames Hershaw pci_dev->device.name); 574f26e8239SJames Hershaw return -ENOMEM; 575f26e8239SJames Hershaw } 576f26e8239SJames Hershaw 577646ea79cSHeinrich Kuhn 578646ea79cSHeinrich Kuhn /* Work out where in the BAR the queues start. */ 5790314a8ffSChaoyong He tx_base = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ); 5800314a8ffSChaoyong He rx_base = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ); 581646ea79cSHeinrich Kuhn 5820314a8ffSChaoyong He PMD_INIT_LOG(DEBUG, "tx_base: 0x%" PRIx64 "", tx_base); 5830314a8ffSChaoyong He PMD_INIT_LOG(DEBUG, "rx_base: 0x%" PRIx64 "", rx_base); 584646ea79cSHeinrich Kuhn 5850314a8ffSChaoyong He hw->tx_bar = pf_dev->qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ; 5860314a8ffSChaoyong He hw->rx_bar = pf_dev->qc_bar + rx_base * NFP_QCP_QUEUE_ADDR_SZ; 587646ea79cSHeinrich Kuhn eth_dev->data->dev_private = hw; 588646ea79cSHeinrich Kuhn 589646ea79cSHeinrich Kuhn PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p", 590646ea79cSHeinrich Kuhn hw->ctrl_bar, hw->tx_bar, hw->rx_bar); 591646ea79cSHeinrich Kuhn 592646ea79cSHeinrich Kuhn nfp_net_cfg_queue_setup(hw); 593646ea79cSHeinrich Kuhn hw->mtu = RTE_ETHER_MTU; 594646ea79cSHeinrich Kuhn 595646ea79cSHeinrich Kuhn /* VLAN insertion is incompatible with LSOv2 */ 596646ea79cSHeinrich Kuhn if (hw->cap & NFP_NET_CFG_CTRL_LSO2) 597646ea79cSHeinrich Kuhn hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN; 598646ea79cSHeinrich Kuhn 599d20d46f0SPeng Zhang nfp_net_log_device_information(hw); 600646ea79cSHeinrich Kuhn 601646ea79cSHeinrich Kuhn /* Initializing spinlock for reconfigs */ 602646ea79cSHeinrich Kuhn rte_spinlock_init(&hw->reconfig_lock); 603646ea79cSHeinrich Kuhn 604646ea79cSHeinrich Kuhn /* Allocating memory for mac addr */ 605646ea79cSHeinrich Kuhn eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", 606646ea79cSHeinrich Kuhn RTE_ETHER_ADDR_LEN, 0); 607646ea79cSHeinrich Kuhn if (eth_dev->data->mac_addrs == NULL) { 608646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "Failed to space for MAC address"); 6097feb8909SChaoyong He return -ENOMEM; 610646ea79cSHeinrich Kuhn } 611646ea79cSHeinrich Kuhn 612968ec1c3SChaoyong He nfp_net_pf_read_mac(app_fw_nic, port); 613393e5822SChaoyong He nfp_net_write_mac(hw, &hw->mac_addr.addr_bytes[0]); 614646ea79cSHeinrich Kuhn 615393e5822SChaoyong He tmp_ether_addr = &hw->mac_addr; 616646ea79cSHeinrich Kuhn if (!rte_is_valid_assigned_ether_addr(tmp_ether_addr)) { 617a6189a67SJin Liu PMD_INIT_LOG(INFO, "Using random mac address for port %d", port); 618646ea79cSHeinrich Kuhn /* Using random mac addresses for VFs */ 619393e5822SChaoyong He rte_eth_random_addr(&hw->mac_addr.addr_bytes[0]); 620393e5822SChaoyong He nfp_net_write_mac(hw, &hw->mac_addr.addr_bytes[0]); 621646ea79cSHeinrich Kuhn } 622646ea79cSHeinrich Kuhn 623646ea79cSHeinrich Kuhn /* Copying mac address to DPDK eth_dev struct */ 624393e5822SChaoyong He rte_ether_addr_copy(&hw->mac_addr, eth_dev->data->mac_addrs); 625646ea79cSHeinrich Kuhn 626cbcbfd73SJames Hershaw if ((hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR) == 0) 627646ea79cSHeinrich Kuhn eth_dev->data->dev_flags |= RTE_ETH_DEV_NOLIVE_MAC_ADDR; 628646ea79cSHeinrich Kuhn 629646ea79cSHeinrich Kuhn eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 630646ea79cSHeinrich Kuhn 631646ea79cSHeinrich Kuhn PMD_INIT_LOG(INFO, "port %d VendorID=0x%x DeviceID=0x%x " 632c2c4f87bSAman Deep Singh "mac=" RTE_ETHER_ADDR_PRT_FMT, 633646ea79cSHeinrich Kuhn eth_dev->data->port_id, pci_dev->id.vendor_id, 634646ea79cSHeinrich Kuhn pci_dev->id.device_id, 635393e5822SChaoyong He RTE_ETHER_ADDR_BYTES(&hw->mac_addr)); 636646ea79cSHeinrich Kuhn 637646ea79cSHeinrich Kuhn /* Registering LSC interrupt handler */ 638d61138d4SHarman Kalra rte_intr_callback_register(pci_dev->intr_handle, 639a6189a67SJin Liu nfp_net_dev_interrupt_handler, (void *)eth_dev); 640646ea79cSHeinrich Kuhn /* Telling the firmware about the LSC interrupt entry */ 641646ea79cSHeinrich Kuhn nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX); 642646ea79cSHeinrich Kuhn /* Recording current stats counters values */ 643646ea79cSHeinrich Kuhn nfp_net_stats_reset(eth_dev); 644646ea79cSHeinrich Kuhn 645646ea79cSHeinrich Kuhn return 0; 646646ea79cSHeinrich Kuhn } 647646ea79cSHeinrich Kuhn 648646ea79cSHeinrich Kuhn #define DEFAULT_FW_PATH "/lib/firmware/netronome" 649646ea79cSHeinrich Kuhn 650646ea79cSHeinrich Kuhn static int 651646ea79cSHeinrich Kuhn nfp_fw_upload(struct rte_pci_device *dev, struct nfp_nsp *nsp, char *card) 652646ea79cSHeinrich Kuhn { 653acaa57efSChaoyong He struct nfp_cpp *cpp = nfp_nsp_cpp(nsp); 654646ea79cSHeinrich Kuhn void *fw_buf; 655646ea79cSHeinrich Kuhn char fw_name[125]; 656646ea79cSHeinrich Kuhn char serial[40]; 657646ea79cSHeinrich Kuhn size_t fsize; 658ff627b74SChaoyong He uint16_t interface; 659ff627b74SChaoyong He uint32_t cpp_serial_len; 660ff627b74SChaoyong He const uint8_t *cpp_serial; 661ff627b74SChaoyong He 662ff627b74SChaoyong He cpp_serial_len = nfp_cpp_serial(cpp, &cpp_serial); 663ff627b74SChaoyong He if (cpp_serial_len != NFP_SERIAL_LEN) 664ff627b74SChaoyong He return -ERANGE; 665ff627b74SChaoyong He 666ff627b74SChaoyong He interface = nfp_cpp_interface(cpp); 667646ea79cSHeinrich Kuhn 668646ea79cSHeinrich Kuhn /* Looking for firmware file in order of priority */ 669646ea79cSHeinrich Kuhn 670646ea79cSHeinrich Kuhn /* First try to find a firmware image specific for this device */ 671646ea79cSHeinrich Kuhn snprintf(serial, sizeof(serial), 672646ea79cSHeinrich Kuhn "serial-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x", 673ff627b74SChaoyong He cpp_serial[0], cpp_serial[1], cpp_serial[2], cpp_serial[3], 674ff627b74SChaoyong He cpp_serial[4], cpp_serial[5], interface >> 8, interface & 0xff); 675646ea79cSHeinrich Kuhn 676646ea79cSHeinrich Kuhn snprintf(fw_name, sizeof(fw_name), "%s/%s.nffw", DEFAULT_FW_PATH, 677646ea79cSHeinrich Kuhn serial); 678646ea79cSHeinrich Kuhn 679646ea79cSHeinrich Kuhn PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); 680646ea79cSHeinrich Kuhn if (rte_firmware_read(fw_name, &fw_buf, &fsize) == 0) 681646ea79cSHeinrich Kuhn goto load_fw; 682646ea79cSHeinrich Kuhn /* Then try the PCI name */ 683646ea79cSHeinrich Kuhn snprintf(fw_name, sizeof(fw_name), "%s/pci-%s.nffw", DEFAULT_FW_PATH, 6843ddb4cc0SPeng Zhang dev->name); 685646ea79cSHeinrich Kuhn 686646ea79cSHeinrich Kuhn PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); 687646ea79cSHeinrich Kuhn if (rte_firmware_read(fw_name, &fw_buf, &fsize) == 0) 688646ea79cSHeinrich Kuhn goto load_fw; 689646ea79cSHeinrich Kuhn 690646ea79cSHeinrich Kuhn /* Finally try the card type and media */ 691646ea79cSHeinrich Kuhn snprintf(fw_name, sizeof(fw_name), "%s/%s", DEFAULT_FW_PATH, card); 692646ea79cSHeinrich Kuhn PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); 693646ea79cSHeinrich Kuhn if (rte_firmware_read(fw_name, &fw_buf, &fsize) < 0) { 694646ea79cSHeinrich Kuhn PMD_DRV_LOG(INFO, "Firmware file %s not found.", fw_name); 695646ea79cSHeinrich Kuhn return -ENOENT; 696646ea79cSHeinrich Kuhn } 697646ea79cSHeinrich Kuhn 698646ea79cSHeinrich Kuhn load_fw: 699646ea79cSHeinrich Kuhn PMD_DRV_LOG(INFO, "Firmware file found at %s with size: %zu", 700646ea79cSHeinrich Kuhn fw_name, fsize); 701646ea79cSHeinrich Kuhn PMD_DRV_LOG(INFO, "Uploading the firmware ..."); 702646ea79cSHeinrich Kuhn nfp_nsp_load_fw(nsp, fw_buf, fsize); 703646ea79cSHeinrich Kuhn PMD_DRV_LOG(INFO, "Done"); 704646ea79cSHeinrich Kuhn 705646ea79cSHeinrich Kuhn free(fw_buf); 706646ea79cSHeinrich Kuhn 707646ea79cSHeinrich Kuhn return 0; 708646ea79cSHeinrich Kuhn } 709646ea79cSHeinrich Kuhn 710646ea79cSHeinrich Kuhn static int 711a6189a67SJin Liu nfp_fw_setup(struct rte_pci_device *dev, 712a6189a67SJin Liu struct nfp_cpp *cpp, 713a6189a67SJin Liu struct nfp_eth_table *nfp_eth_table, 714a6189a67SJin Liu struct nfp_hwinfo *hwinfo) 715646ea79cSHeinrich Kuhn { 716646ea79cSHeinrich Kuhn struct nfp_nsp *nsp; 717646ea79cSHeinrich Kuhn const char *nfp_fw_model; 718646ea79cSHeinrich Kuhn char card_desc[100]; 719646ea79cSHeinrich Kuhn int err = 0; 720646ea79cSHeinrich Kuhn 72106be30d4SPeng Zhang nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "nffw.partno"); 72206be30d4SPeng Zhang if (nfp_fw_model == NULL) 723646ea79cSHeinrich Kuhn nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "assembly.partno"); 724646ea79cSHeinrich Kuhn 725646ea79cSHeinrich Kuhn if (nfp_fw_model) { 726646ea79cSHeinrich Kuhn PMD_DRV_LOG(INFO, "firmware model found: %s", nfp_fw_model); 727646ea79cSHeinrich Kuhn } else { 728646ea79cSHeinrich Kuhn PMD_DRV_LOG(ERR, "firmware model NOT found"); 729646ea79cSHeinrich Kuhn return -EIO; 730646ea79cSHeinrich Kuhn } 731646ea79cSHeinrich Kuhn 732646ea79cSHeinrich Kuhn if (nfp_eth_table->count == 0 || nfp_eth_table->count > 8) { 733646ea79cSHeinrich Kuhn PMD_DRV_LOG(ERR, "NFP ethernet table reports wrong ports: %u", 734646ea79cSHeinrich Kuhn nfp_eth_table->count); 735646ea79cSHeinrich Kuhn return -EIO; 736646ea79cSHeinrich Kuhn } 737646ea79cSHeinrich Kuhn 738646ea79cSHeinrich Kuhn PMD_DRV_LOG(INFO, "NFP ethernet port table reports %u ports", 739646ea79cSHeinrich Kuhn nfp_eth_table->count); 740646ea79cSHeinrich Kuhn 741646ea79cSHeinrich Kuhn PMD_DRV_LOG(INFO, "Port speed: %u", nfp_eth_table->ports[0].speed); 742646ea79cSHeinrich Kuhn 743646ea79cSHeinrich Kuhn snprintf(card_desc, sizeof(card_desc), "nic_%s_%dx%d.nffw", 744646ea79cSHeinrich Kuhn nfp_fw_model, nfp_eth_table->count, 745646ea79cSHeinrich Kuhn nfp_eth_table->ports[0].speed / 1000); 746646ea79cSHeinrich Kuhn 747646ea79cSHeinrich Kuhn nsp = nfp_nsp_open(cpp); 748a6189a67SJin Liu if (nsp == NULL) { 749646ea79cSHeinrich Kuhn PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle"); 750646ea79cSHeinrich Kuhn return -EIO; 751646ea79cSHeinrich Kuhn } 752646ea79cSHeinrich Kuhn 753646ea79cSHeinrich Kuhn nfp_nsp_device_soft_reset(nsp); 754646ea79cSHeinrich Kuhn err = nfp_fw_upload(dev, nsp, card_desc); 755646ea79cSHeinrich Kuhn 756646ea79cSHeinrich Kuhn nfp_nsp_close(nsp); 757646ea79cSHeinrich Kuhn return err; 758646ea79cSHeinrich Kuhn } 759646ea79cSHeinrich Kuhn 760a6189a67SJin Liu static int 7610314a8ffSChaoyong He nfp_init_app_fw_nic(struct nfp_pf_dev *pf_dev, 7620314a8ffSChaoyong He const struct nfp_dev_info *dev_info) 763646ea79cSHeinrich Kuhn { 764a6189a67SJin Liu int i; 765968ec1c3SChaoyong He int ret; 766968ec1c3SChaoyong He int err = 0; 767968ec1c3SChaoyong He int total_vnics; 768646ea79cSHeinrich Kuhn struct nfp_net_hw *hw; 769968ec1c3SChaoyong He unsigned int numa_node; 770646ea79cSHeinrich Kuhn struct rte_eth_dev *eth_dev; 771968ec1c3SChaoyong He struct nfp_app_fw_nic *app_fw_nic; 772a6189a67SJin Liu struct nfp_eth_table *nfp_eth_table; 773646ea79cSHeinrich Kuhn char port_name[RTE_ETH_NAME_MAX_LEN]; 774646ea79cSHeinrich Kuhn 775968ec1c3SChaoyong He nfp_eth_table = pf_dev->nfp_eth_table; 776968ec1c3SChaoyong He PMD_INIT_LOG(INFO, "Total physical ports: %d", nfp_eth_table->count); 777968ec1c3SChaoyong He 778968ec1c3SChaoyong He /* Allocate memory for the CoreNIC app */ 779968ec1c3SChaoyong He app_fw_nic = rte_zmalloc("nfp_app_fw_nic", sizeof(*app_fw_nic), 0); 780968ec1c3SChaoyong He if (app_fw_nic == NULL) 781968ec1c3SChaoyong He return -ENOMEM; 782968ec1c3SChaoyong He 783968ec1c3SChaoyong He /* Point the app_fw_priv pointer in the PF to the coreNIC app */ 784968ec1c3SChaoyong He pf_dev->app_fw_priv = app_fw_nic; 785968ec1c3SChaoyong He 786968ec1c3SChaoyong He /* Read the number of vNIC's created for the PF */ 787968ec1c3SChaoyong He total_vnics = nfp_rtsym_read_le(pf_dev->sym_tbl, "nfd_cfg_pf0_num_ports", &err); 788968ec1c3SChaoyong He if (err != 0 || total_vnics <= 0 || total_vnics > 8) { 789968ec1c3SChaoyong He PMD_INIT_LOG(ERR, "nfd_cfg_pf0_num_ports symbol with wrong value"); 790968ec1c3SChaoyong He ret = -ENODEV; 791968ec1c3SChaoyong He goto app_cleanup; 792968ec1c3SChaoyong He } 793968ec1c3SChaoyong He 794968ec1c3SChaoyong He /* 795968ec1c3SChaoyong He * For coreNIC the number of vNICs exposed should be the same as the 796968ec1c3SChaoyong He * number of physical ports 797968ec1c3SChaoyong He */ 798968ec1c3SChaoyong He if (total_vnics != (int)nfp_eth_table->count) { 799968ec1c3SChaoyong He PMD_INIT_LOG(ERR, "Total physical ports do not match number of vNICs"); 800968ec1c3SChaoyong He ret = -ENODEV; 801968ec1c3SChaoyong He goto app_cleanup; 802968ec1c3SChaoyong He } 803968ec1c3SChaoyong He 804968ec1c3SChaoyong He /* Populate coreNIC app properties*/ 805968ec1c3SChaoyong He app_fw_nic->total_phyports = total_vnics; 806968ec1c3SChaoyong He app_fw_nic->pf_dev = pf_dev; 807968ec1c3SChaoyong He if (total_vnics > 1) 808968ec1c3SChaoyong He app_fw_nic->multiport = true; 809968ec1c3SChaoyong He 810968ec1c3SChaoyong He /* Map the symbol table */ 811968ec1c3SChaoyong He pf_dev->ctrl_bar = nfp_rtsym_map(pf_dev->sym_tbl, "_pf0_net_bar0", 812d5e9fc86SChaoyong He app_fw_nic->total_phyports * NFP_NET_CFG_BAR_SZ, 813d5e9fc86SChaoyong He &pf_dev->ctrl_area); 814968ec1c3SChaoyong He if (pf_dev->ctrl_bar == NULL) { 815968ec1c3SChaoyong He PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for _pf0_net_ctrl_bar"); 816968ec1c3SChaoyong He ret = -EIO; 817968ec1c3SChaoyong He goto app_cleanup; 818968ec1c3SChaoyong He } 819968ec1c3SChaoyong He 820968ec1c3SChaoyong He PMD_INIT_LOG(DEBUG, "ctrl bar: %p", pf_dev->ctrl_bar); 821968ec1c3SChaoyong He 822968ec1c3SChaoyong He /* Loop through all physical ports on PF */ 823968ec1c3SChaoyong He numa_node = rte_socket_id(); 824968ec1c3SChaoyong He for (i = 0; i < app_fw_nic->total_phyports; i++) { 825646ea79cSHeinrich Kuhn snprintf(port_name, sizeof(port_name), "%s_port%d", 826646ea79cSHeinrich Kuhn pf_dev->pci_dev->device.name, i); 827646ea79cSHeinrich Kuhn 828646ea79cSHeinrich Kuhn /* Allocate a eth_dev for this phyport */ 829646ea79cSHeinrich Kuhn eth_dev = rte_eth_dev_allocate(port_name); 830a6189a67SJin Liu if (eth_dev == NULL) { 831646ea79cSHeinrich Kuhn ret = -ENODEV; 832646ea79cSHeinrich Kuhn goto port_cleanup; 833646ea79cSHeinrich Kuhn } 834646ea79cSHeinrich Kuhn 835646ea79cSHeinrich Kuhn /* Allocate memory for this phyport */ 836646ea79cSHeinrich Kuhn eth_dev->data->dev_private = 837646ea79cSHeinrich Kuhn rte_zmalloc_socket(port_name, sizeof(struct nfp_net_hw), 838646ea79cSHeinrich Kuhn RTE_CACHE_LINE_SIZE, numa_node); 839a6189a67SJin Liu if (eth_dev->data->dev_private == NULL) { 840646ea79cSHeinrich Kuhn ret = -ENOMEM; 841646ea79cSHeinrich Kuhn rte_eth_dev_release_port(eth_dev); 842646ea79cSHeinrich Kuhn goto port_cleanup; 843646ea79cSHeinrich Kuhn } 844646ea79cSHeinrich Kuhn 845646ea79cSHeinrich Kuhn hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 846646ea79cSHeinrich Kuhn 847646ea79cSHeinrich Kuhn /* Add this device to the PF's array of physical ports */ 848968ec1c3SChaoyong He app_fw_nic->ports[i] = hw; 849646ea79cSHeinrich Kuhn 8500314a8ffSChaoyong He hw->dev_info = dev_info; 851646ea79cSHeinrich Kuhn hw->pf_dev = pf_dev; 852646ea79cSHeinrich Kuhn hw->cpp = pf_dev->cpp; 853646ea79cSHeinrich Kuhn hw->eth_dev = eth_dev; 854646ea79cSHeinrich Kuhn hw->idx = i; 855646ea79cSHeinrich Kuhn hw->nfp_idx = nfp_eth_table->ports[i].index; 856646ea79cSHeinrich Kuhn 857646ea79cSHeinrich Kuhn eth_dev->device = &pf_dev->pci_dev->device; 858646ea79cSHeinrich Kuhn 859646ea79cSHeinrich Kuhn /* ctrl/tx/rx BAR mappings and remaining init happens in 860646ea79cSHeinrich Kuhn * nfp_net_init 861646ea79cSHeinrich Kuhn */ 862646ea79cSHeinrich Kuhn ret = nfp_net_init(eth_dev); 863646ea79cSHeinrich Kuhn if (ret) { 864646ea79cSHeinrich Kuhn ret = -ENODEV; 865646ea79cSHeinrich Kuhn goto port_cleanup; 866646ea79cSHeinrich Kuhn } 867646ea79cSHeinrich Kuhn 868646ea79cSHeinrich Kuhn rte_eth_dev_probing_finish(eth_dev); 869646ea79cSHeinrich Kuhn 870646ea79cSHeinrich Kuhn } /* End loop, all ports on this PF */ 871968ec1c3SChaoyong He 872968ec1c3SChaoyong He return 0; 873646ea79cSHeinrich Kuhn 874646ea79cSHeinrich Kuhn port_cleanup: 875968ec1c3SChaoyong He for (i = 0; i < app_fw_nic->total_phyports; i++) { 876968ec1c3SChaoyong He if (app_fw_nic->ports[i] && app_fw_nic->ports[i]->eth_dev) { 877646ea79cSHeinrich Kuhn struct rte_eth_dev *tmp_dev; 878968ec1c3SChaoyong He tmp_dev = app_fw_nic->ports[i]->eth_dev; 879646ea79cSHeinrich Kuhn rte_eth_dev_release_port(tmp_dev); 880968ec1c3SChaoyong He app_fw_nic->ports[i] = NULL; 881646ea79cSHeinrich Kuhn } 882646ea79cSHeinrich Kuhn } 883968ec1c3SChaoyong He nfp_cpp_area_free(pf_dev->ctrl_area); 884968ec1c3SChaoyong He app_cleanup: 885968ec1c3SChaoyong He rte_free(app_fw_nic); 886a6189a67SJin Liu 887646ea79cSHeinrich Kuhn return ret; 888646ea79cSHeinrich Kuhn } 889646ea79cSHeinrich Kuhn 890a6189a67SJin Liu static int 891a6189a67SJin Liu nfp_pf_init(struct rte_pci_device *pci_dev) 892646ea79cSHeinrich Kuhn { 893968ec1c3SChaoyong He int ret; 894968ec1c3SChaoyong He int err = 0; 8955c464d6aSJin Liu uint64_t addr; 896925c27ecSChaoyong He uint32_t cpp_id; 897a6189a67SJin Liu struct nfp_cpp *cpp; 898968ec1c3SChaoyong He enum nfp_app_fw_id app_fw_id; 899a6189a67SJin Liu struct nfp_pf_dev *pf_dev; 900a6189a67SJin Liu struct nfp_hwinfo *hwinfo; 901a6189a67SJin Liu char name[RTE_ETH_NAME_MAX_LEN]; 902a6189a67SJin Liu struct nfp_rtsym_table *sym_tbl; 903a6189a67SJin Liu struct nfp_eth_table *nfp_eth_table; 9040314a8ffSChaoyong He const struct nfp_dev_info *dev_info; 905646ea79cSHeinrich Kuhn 906a6189a67SJin Liu if (pci_dev == NULL) 907a6189a67SJin Liu return -ENODEV; 908646ea79cSHeinrich Kuhn 9090314a8ffSChaoyong He dev_info = nfp_dev_info_get(pci_dev->id.device_id); 9100314a8ffSChaoyong He if (dev_info == NULL) { 9110314a8ffSChaoyong He PMD_INIT_LOG(ERR, "Not supported device ID"); 9120314a8ffSChaoyong He return -ENODEV; 9130314a8ffSChaoyong He } 9140314a8ffSChaoyong He 915646ea79cSHeinrich Kuhn /* 916646ea79cSHeinrich Kuhn * When device bound to UIO, the device could be used, by mistake, 917646ea79cSHeinrich Kuhn * by two DPDK apps, and the UIO driver does not avoid it. This 918646ea79cSHeinrich Kuhn * could lead to a serious problem when configuring the NFP CPP 919646ea79cSHeinrich Kuhn * interface. Here we avoid this telling to the CPP init code to 920646ea79cSHeinrich Kuhn * use a lock file if UIO is being used. 921646ea79cSHeinrich Kuhn */ 922646ea79cSHeinrich Kuhn if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO) 923*1fbe51cdSChaoyong He cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, false); 924646ea79cSHeinrich Kuhn else 925*1fbe51cdSChaoyong He cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, true); 926646ea79cSHeinrich Kuhn 927a6189a67SJin Liu if (cpp == NULL) { 928646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "A CPP handle can not be obtained"); 9297feb8909SChaoyong He return -EIO; 930646ea79cSHeinrich Kuhn } 931646ea79cSHeinrich Kuhn 932646ea79cSHeinrich Kuhn hwinfo = nfp_hwinfo_read(cpp); 933a6189a67SJin Liu if (hwinfo == NULL) { 934646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "Error reading hwinfo table"); 935646ea79cSHeinrich Kuhn ret = -EIO; 936968ec1c3SChaoyong He goto cpp_cleanup; 937646ea79cSHeinrich Kuhn } 938646ea79cSHeinrich Kuhn 939968ec1c3SChaoyong He /* Read the number of physical ports from hardware */ 940646ea79cSHeinrich Kuhn nfp_eth_table = nfp_eth_read_ports(cpp); 941a6189a67SJin Liu if (nfp_eth_table == NULL) { 942646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "Error reading NFP ethernet table"); 943646ea79cSHeinrich Kuhn ret = -EIO; 944646ea79cSHeinrich Kuhn goto hwinfo_cleanup; 945646ea79cSHeinrich Kuhn } 946646ea79cSHeinrich Kuhn 947646ea79cSHeinrich Kuhn if (nfp_fw_setup(pci_dev, cpp, nfp_eth_table, hwinfo)) { 948646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "Error when uploading firmware"); 949646ea79cSHeinrich Kuhn ret = -EIO; 950646ea79cSHeinrich Kuhn goto eth_table_cleanup; 951646ea79cSHeinrich Kuhn } 952646ea79cSHeinrich Kuhn 953646ea79cSHeinrich Kuhn /* Now the symbol table should be there */ 954646ea79cSHeinrich Kuhn sym_tbl = nfp_rtsym_table_read(cpp); 955a6189a67SJin Liu if (sym_tbl == NULL) { 956646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "Something is wrong with the firmware" 957646ea79cSHeinrich Kuhn " symbol table"); 958646ea79cSHeinrich Kuhn ret = -EIO; 959646ea79cSHeinrich Kuhn goto eth_table_cleanup; 960646ea79cSHeinrich Kuhn } 961646ea79cSHeinrich Kuhn 962968ec1c3SChaoyong He /* Read the app ID of the firmware loaded */ 963968ec1c3SChaoyong He app_fw_id = nfp_rtsym_read_le(sym_tbl, "_pf0_net_app_id", &err); 964968ec1c3SChaoyong He if (err != 0) { 965968ec1c3SChaoyong He PMD_INIT_LOG(ERR, "Couldn't read app_fw_id from fw"); 966646ea79cSHeinrich Kuhn ret = -EIO; 967646ea79cSHeinrich Kuhn goto sym_tbl_cleanup; 968646ea79cSHeinrich Kuhn } 969646ea79cSHeinrich Kuhn 970646ea79cSHeinrich Kuhn /* Allocate memory for the PF "device" */ 971646ea79cSHeinrich Kuhn snprintf(name, sizeof(name), "nfp_pf%d", 0); 972646ea79cSHeinrich Kuhn pf_dev = rte_zmalloc(name, sizeof(*pf_dev), 0); 973a6189a67SJin Liu if (pf_dev == NULL) { 974646ea79cSHeinrich Kuhn ret = -ENOMEM; 975646ea79cSHeinrich Kuhn goto sym_tbl_cleanup; 976646ea79cSHeinrich Kuhn } 977646ea79cSHeinrich Kuhn 978646ea79cSHeinrich Kuhn /* Populate the newly created PF device */ 979968ec1c3SChaoyong He pf_dev->app_fw_id = app_fw_id; 980646ea79cSHeinrich Kuhn pf_dev->cpp = cpp; 981646ea79cSHeinrich Kuhn pf_dev->hwinfo = hwinfo; 982646ea79cSHeinrich Kuhn pf_dev->sym_tbl = sym_tbl; 983646ea79cSHeinrich Kuhn pf_dev->pci_dev = pci_dev; 984968ec1c3SChaoyong He pf_dev->nfp_eth_table = nfp_eth_table; 985646ea79cSHeinrich Kuhn 986646ea79cSHeinrich Kuhn /* configure access to tx/rx vNIC BARs */ 9870314a8ffSChaoyong He addr = nfp_qcp_queue_offset(dev_info, 0); 988925c27ecSChaoyong He cpp_id = NFP_CPP_ISLAND_ID(0, NFP_CPP_ACTION_RW, 0, 0); 9890314a8ffSChaoyong He 990711e4559SChaoyong He pf_dev->qc_bar = nfp_cpp_map_area(pf_dev->cpp, cpp_id, 9910314a8ffSChaoyong He addr, dev_info->qc_area_sz, &pf_dev->qc_area); 992711e4559SChaoyong He if (pf_dev->qc_bar == NULL) { 993646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for net.qc"); 994646ea79cSHeinrich Kuhn ret = -EIO; 995968ec1c3SChaoyong He goto pf_cleanup; 996646ea79cSHeinrich Kuhn } 997646ea79cSHeinrich Kuhn 998711e4559SChaoyong He PMD_INIT_LOG(DEBUG, "qc_bar address: 0x%p", pf_dev->qc_bar); 999646ea79cSHeinrich Kuhn 1000a6189a67SJin Liu /* 1001968ec1c3SChaoyong He * PF initialization has been done at this point. Call app specific 1002968ec1c3SChaoyong He * init code now 1003646ea79cSHeinrich Kuhn */ 1004968ec1c3SChaoyong He switch (pf_dev->app_fw_id) { 1005968ec1c3SChaoyong He case NFP_APP_FW_CORE_NIC: 1006968ec1c3SChaoyong He PMD_INIT_LOG(INFO, "Initializing coreNIC"); 10070314a8ffSChaoyong He ret = nfp_init_app_fw_nic(pf_dev, dev_info); 1008968ec1c3SChaoyong He if (ret != 0) { 1009968ec1c3SChaoyong He PMD_INIT_LOG(ERR, "Could not initialize coreNIC!"); 1010968ec1c3SChaoyong He goto hwqueues_cleanup; 1011968ec1c3SChaoyong He } 1012968ec1c3SChaoyong He break; 1013b1880421SChaoyong He case NFP_APP_FW_FLOWER_NIC: 1014b1880421SChaoyong He PMD_INIT_LOG(INFO, "Initializing Flower"); 10150314a8ffSChaoyong He ret = nfp_init_app_fw_flower(pf_dev, dev_info); 1016b1880421SChaoyong He if (ret != 0) { 1017b1880421SChaoyong He PMD_INIT_LOG(ERR, "Could not initialize Flower!"); 1018b1880421SChaoyong He goto hwqueues_cleanup; 1019b1880421SChaoyong He } 1020b1880421SChaoyong He break; 1021968ec1c3SChaoyong He default: 1022968ec1c3SChaoyong He PMD_INIT_LOG(ERR, "Unsupported Firmware loaded"); 1023968ec1c3SChaoyong He ret = -EINVAL; 1024646ea79cSHeinrich Kuhn goto hwqueues_cleanup; 1025646ea79cSHeinrich Kuhn } 1026646ea79cSHeinrich Kuhn 1027646ea79cSHeinrich Kuhn /* register the CPP bridge service here for primary use */ 1028bab0e6f4SChaoyong He ret = nfp_enable_cpp_service(pf_dev); 1029dee23e6cSChaoyong He if (ret != 0) 1030dee23e6cSChaoyong He PMD_INIT_LOG(INFO, "Enable cpp service failed."); 1031646ea79cSHeinrich Kuhn 1032646ea79cSHeinrich Kuhn return 0; 1033646ea79cSHeinrich Kuhn 1034646ea79cSHeinrich Kuhn hwqueues_cleanup: 1035711e4559SChaoyong He nfp_cpp_area_free(pf_dev->qc_area); 1036646ea79cSHeinrich Kuhn pf_cleanup: 1037646ea79cSHeinrich Kuhn rte_free(pf_dev); 1038646ea79cSHeinrich Kuhn sym_tbl_cleanup: 1039646ea79cSHeinrich Kuhn free(sym_tbl); 1040646ea79cSHeinrich Kuhn eth_table_cleanup: 1041646ea79cSHeinrich Kuhn free(nfp_eth_table); 1042646ea79cSHeinrich Kuhn hwinfo_cleanup: 1043646ea79cSHeinrich Kuhn free(hwinfo); 1044968ec1c3SChaoyong He cpp_cleanup: 1045968ec1c3SChaoyong He nfp_cpp_free(cpp); 10467feb8909SChaoyong He 1047646ea79cSHeinrich Kuhn return ret; 1048646ea79cSHeinrich Kuhn } 1049646ea79cSHeinrich Kuhn 1050d5f39e07SChaoyong He static int 1051d5f39e07SChaoyong He nfp_secondary_init_app_fw_nic(struct rte_pci_device *pci_dev, 1052d5f39e07SChaoyong He struct nfp_rtsym_table *sym_tbl, 1053d5f39e07SChaoyong He struct nfp_cpp *cpp) 1054d5f39e07SChaoyong He { 1055d5f39e07SChaoyong He int i; 1056d5f39e07SChaoyong He int err = 0; 1057d5f39e07SChaoyong He int ret = 0; 1058d5f39e07SChaoyong He int total_vnics; 1059d5f39e07SChaoyong He struct nfp_net_hw *hw; 1060d5f39e07SChaoyong He 1061d5f39e07SChaoyong He /* Read the number of vNIC's created for the PF */ 1062d5f39e07SChaoyong He total_vnics = nfp_rtsym_read_le(sym_tbl, "nfd_cfg_pf0_num_ports", &err); 1063d5f39e07SChaoyong He if (err != 0 || total_vnics <= 0 || total_vnics > 8) { 1064d5f39e07SChaoyong He PMD_INIT_LOG(ERR, "nfd_cfg_pf0_num_ports symbol with wrong value"); 1065d5f39e07SChaoyong He return -ENODEV; 1066d5f39e07SChaoyong He } 1067d5f39e07SChaoyong He 1068d5f39e07SChaoyong He for (i = 0; i < total_vnics; i++) { 1069d5f39e07SChaoyong He struct rte_eth_dev *eth_dev; 1070d5f39e07SChaoyong He char port_name[RTE_ETH_NAME_MAX_LEN]; 1071d5f39e07SChaoyong He snprintf(port_name, sizeof(port_name), "%s_port%d", 1072d5f39e07SChaoyong He pci_dev->device.name, i); 1073d5f39e07SChaoyong He 1074d5f39e07SChaoyong He PMD_INIT_LOG(DEBUG, "Secondary attaching to port %s", port_name); 1075d5f39e07SChaoyong He eth_dev = rte_eth_dev_attach_secondary(port_name); 1076d5f39e07SChaoyong He if (eth_dev == NULL) { 1077d5f39e07SChaoyong He PMD_INIT_LOG(ERR, "Secondary process attach to port %s failed", port_name); 1078d5f39e07SChaoyong He ret = -ENODEV; 1079d5f39e07SChaoyong He break; 1080d5f39e07SChaoyong He } 1081d5f39e07SChaoyong He 1082d5f39e07SChaoyong He eth_dev->process_private = cpp; 1083d5f39e07SChaoyong He hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 1084ee8ca64eSChaoyong He nfp_net_ethdev_ops_mount(hw, eth_dev); 1085d5f39e07SChaoyong He 1086d5f39e07SChaoyong He rte_eth_dev_probing_finish(eth_dev); 1087d5f39e07SChaoyong He } 1088d5f39e07SChaoyong He 1089d5f39e07SChaoyong He return ret; 1090d5f39e07SChaoyong He } 1091d5f39e07SChaoyong He 1092646ea79cSHeinrich Kuhn /* 1093646ea79cSHeinrich Kuhn * When attaching to the NFP4000/6000 PF on a secondary process there 1094646ea79cSHeinrich Kuhn * is no need to initialise the PF again. Only minimal work is required 1095646ea79cSHeinrich Kuhn * here 1096646ea79cSHeinrich Kuhn */ 1097a6189a67SJin Liu static int 1098a6189a67SJin Liu nfp_pf_secondary_init(struct rte_pci_device *pci_dev) 1099646ea79cSHeinrich Kuhn { 1100968ec1c3SChaoyong He int err = 0; 1101968ec1c3SChaoyong He int ret = 0; 1102a6189a67SJin Liu struct nfp_cpp *cpp; 1103d5f39e07SChaoyong He enum nfp_app_fw_id app_fw_id; 1104a6189a67SJin Liu struct nfp_rtsym_table *sym_tbl; 11050314a8ffSChaoyong He const struct nfp_dev_info *dev_info; 1106646ea79cSHeinrich Kuhn 1107a6189a67SJin Liu if (pci_dev == NULL) 1108646ea79cSHeinrich Kuhn return -ENODEV; 1109646ea79cSHeinrich Kuhn 11100314a8ffSChaoyong He dev_info = nfp_dev_info_get(pci_dev->id.device_id); 11110314a8ffSChaoyong He if (dev_info == NULL) { 11120314a8ffSChaoyong He PMD_INIT_LOG(ERR, "Not supported device ID"); 11130314a8ffSChaoyong He return -ENODEV; 11140314a8ffSChaoyong He } 11150314a8ffSChaoyong He 1116646ea79cSHeinrich Kuhn /* 1117646ea79cSHeinrich Kuhn * When device bound to UIO, the device could be used, by mistake, 1118646ea79cSHeinrich Kuhn * by two DPDK apps, and the UIO driver does not avoid it. This 1119646ea79cSHeinrich Kuhn * could lead to a serious problem when configuring the NFP CPP 1120646ea79cSHeinrich Kuhn * interface. Here we avoid this telling to the CPP init code to 1121646ea79cSHeinrich Kuhn * use a lock file if UIO is being used. 1122646ea79cSHeinrich Kuhn */ 1123646ea79cSHeinrich Kuhn if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO) 1124*1fbe51cdSChaoyong He cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, false); 1125646ea79cSHeinrich Kuhn else 1126*1fbe51cdSChaoyong He cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, true); 1127646ea79cSHeinrich Kuhn 1128a6189a67SJin Liu if (cpp == NULL) { 1129646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "A CPP handle can not be obtained"); 1130646ea79cSHeinrich Kuhn return -EIO; 1131646ea79cSHeinrich Kuhn } 1132646ea79cSHeinrich Kuhn 1133646ea79cSHeinrich Kuhn /* 1134646ea79cSHeinrich Kuhn * We don't have access to the PF created in the primary process 1135646ea79cSHeinrich Kuhn * here so we have to read the number of ports from firmware 1136646ea79cSHeinrich Kuhn */ 1137646ea79cSHeinrich Kuhn sym_tbl = nfp_rtsym_table_read(cpp); 1138a6189a67SJin Liu if (sym_tbl == NULL) { 1139646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "Something is wrong with the firmware" 1140646ea79cSHeinrich Kuhn " symbol table"); 1141646ea79cSHeinrich Kuhn return -EIO; 1142646ea79cSHeinrich Kuhn } 1143646ea79cSHeinrich Kuhn 1144d5f39e07SChaoyong He /* Read the app ID of the firmware loaded */ 1145d5f39e07SChaoyong He app_fw_id = nfp_rtsym_read_le(sym_tbl, "_pf0_net_app_id", &err); 1146d5f39e07SChaoyong He if (err != 0) { 1147d5f39e07SChaoyong He PMD_INIT_LOG(ERR, "Couldn't read app_fw_id from fw"); 1148968ec1c3SChaoyong He goto sym_tbl_cleanup; 1149968ec1c3SChaoyong He } 1150646ea79cSHeinrich Kuhn 1151d5f39e07SChaoyong He switch (app_fw_id) { 1152d5f39e07SChaoyong He case NFP_APP_FW_CORE_NIC: 1153d5f39e07SChaoyong He PMD_INIT_LOG(INFO, "Initializing coreNIC"); 1154d5f39e07SChaoyong He ret = nfp_secondary_init_app_fw_nic(pci_dev, sym_tbl, cpp); 1155d5f39e07SChaoyong He if (ret != 0) { 1156d5f39e07SChaoyong He PMD_INIT_LOG(ERR, "Could not initialize coreNIC!"); 1157d5f39e07SChaoyong He goto sym_tbl_cleanup; 1158646ea79cSHeinrich Kuhn } 1159d5f39e07SChaoyong He break; 1160b1880421SChaoyong He case NFP_APP_FW_FLOWER_NIC: 1161b1880421SChaoyong He PMD_INIT_LOG(INFO, "Initializing Flower"); 1162b1880421SChaoyong He ret = nfp_secondary_init_app_fw_flower(cpp); 1163b1880421SChaoyong He if (ret != 0) { 1164b1880421SChaoyong He PMD_INIT_LOG(ERR, "Could not initialize Flower!"); 1165b1880421SChaoyong He goto sym_tbl_cleanup; 1166b1880421SChaoyong He } 1167b1880421SChaoyong He break; 1168d5f39e07SChaoyong He default: 1169d5f39e07SChaoyong He PMD_INIT_LOG(ERR, "Unsupported Firmware loaded"); 1170d5f39e07SChaoyong He ret = -EINVAL; 1171d5f39e07SChaoyong He goto sym_tbl_cleanup; 1172646ea79cSHeinrich Kuhn } 1173646ea79cSHeinrich Kuhn 1174968ec1c3SChaoyong He sym_tbl_cleanup: 1175968ec1c3SChaoyong He free(sym_tbl); 1176968ec1c3SChaoyong He 1177968ec1c3SChaoyong He return ret; 1178646ea79cSHeinrich Kuhn } 1179646ea79cSHeinrich Kuhn 1180a6189a67SJin Liu static int 1181a6189a67SJin Liu nfp_pf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 1182646ea79cSHeinrich Kuhn struct rte_pci_device *dev) 1183646ea79cSHeinrich Kuhn { 1184646ea79cSHeinrich Kuhn if (rte_eal_process_type() == RTE_PROC_PRIMARY) 1185646ea79cSHeinrich Kuhn return nfp_pf_init(dev); 1186646ea79cSHeinrich Kuhn else 1187646ea79cSHeinrich Kuhn return nfp_pf_secondary_init(dev); 1188646ea79cSHeinrich Kuhn } 1189646ea79cSHeinrich Kuhn 1190646ea79cSHeinrich Kuhn static const struct rte_pci_id pci_id_nfp_pf_net_map[] = { 1191646ea79cSHeinrich Kuhn { 1192646ea79cSHeinrich Kuhn RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME, 11935c464d6aSJin Liu PCI_DEVICE_ID_NFP3800_PF_NIC) 11945c464d6aSJin Liu }, 11955c464d6aSJin Liu { 11965c464d6aSJin Liu RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME, 1197646ea79cSHeinrich Kuhn PCI_DEVICE_ID_NFP4000_PF_NIC) 1198646ea79cSHeinrich Kuhn }, 1199646ea79cSHeinrich Kuhn { 1200646ea79cSHeinrich Kuhn RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME, 1201646ea79cSHeinrich Kuhn PCI_DEVICE_ID_NFP6000_PF_NIC) 1202646ea79cSHeinrich Kuhn }, 1203646ea79cSHeinrich Kuhn { 12045aedd4c3SJames Hershaw RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE, 12055aedd4c3SJames Hershaw PCI_DEVICE_ID_NFP3800_PF_NIC) 12065aedd4c3SJames Hershaw }, 12075aedd4c3SJames Hershaw { 12085aedd4c3SJames Hershaw RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE, 12095aedd4c3SJames Hershaw PCI_DEVICE_ID_NFP4000_PF_NIC) 12105aedd4c3SJames Hershaw }, 12115aedd4c3SJames Hershaw { 12125aedd4c3SJames Hershaw RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE, 12135aedd4c3SJames Hershaw PCI_DEVICE_ID_NFP6000_PF_NIC) 12145aedd4c3SJames Hershaw }, 12155aedd4c3SJames Hershaw { 1216646ea79cSHeinrich Kuhn .vendor_id = 0, 1217646ea79cSHeinrich Kuhn }, 1218646ea79cSHeinrich Kuhn }; 1219646ea79cSHeinrich Kuhn 1220a6189a67SJin Liu static int 1221a6189a67SJin Liu nfp_pci_uninit(struct rte_eth_dev *eth_dev) 1222646ea79cSHeinrich Kuhn { 1223646ea79cSHeinrich Kuhn struct rte_pci_device *pci_dev; 1224646ea79cSHeinrich Kuhn uint16_t port_id; 1225646ea79cSHeinrich Kuhn 1226646ea79cSHeinrich Kuhn pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1227646ea79cSHeinrich Kuhn 1228646ea79cSHeinrich Kuhn /* Free up all physical ports under PF */ 1229646ea79cSHeinrich Kuhn RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device) 1230646ea79cSHeinrich Kuhn rte_eth_dev_close(port_id); 1231646ea79cSHeinrich Kuhn /* 1232646ea79cSHeinrich Kuhn * Ports can be closed and freed but hotplugging is not 1233646ea79cSHeinrich Kuhn * currently supported 1234646ea79cSHeinrich Kuhn */ 1235646ea79cSHeinrich Kuhn return -ENOTSUP; 1236646ea79cSHeinrich Kuhn } 1237646ea79cSHeinrich Kuhn 1238a6189a67SJin Liu static int 1239a6189a67SJin Liu eth_nfp_pci_remove(struct rte_pci_device *pci_dev) 1240646ea79cSHeinrich Kuhn { 1241646ea79cSHeinrich Kuhn return rte_eth_dev_pci_generic_remove(pci_dev, nfp_pci_uninit); 1242646ea79cSHeinrich Kuhn } 1243646ea79cSHeinrich Kuhn 1244646ea79cSHeinrich Kuhn static struct rte_pci_driver rte_nfp_net_pf_pmd = { 1245646ea79cSHeinrich Kuhn .id_table = pci_id_nfp_pf_net_map, 1246646ea79cSHeinrich Kuhn .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 1247646ea79cSHeinrich Kuhn .probe = nfp_pf_pci_probe, 1248646ea79cSHeinrich Kuhn .remove = eth_nfp_pci_remove, 1249646ea79cSHeinrich Kuhn }; 1250646ea79cSHeinrich Kuhn 1251646ea79cSHeinrich Kuhn RTE_PMD_REGISTER_PCI(net_nfp_pf, rte_nfp_net_pf_pmd); 1252646ea79cSHeinrich Kuhn RTE_PMD_REGISTER_PCI_TABLE(net_nfp_pf, pci_id_nfp_pf_net_map); 1253646ea79cSHeinrich Kuhn RTE_PMD_REGISTER_KMOD_DEP(net_nfp_pf, "* igb_uio | uio_pci_generic | vfio"); 1254