1646ea79cSHeinrich Kuhn /* SPDX-License-Identifier: BSD-3-Clause 2646ea79cSHeinrich Kuhn * Copyright (c) 2014-2021 Netronome Systems, Inc. 3646ea79cSHeinrich Kuhn * All rights reserved. 4646ea79cSHeinrich Kuhn * 5646ea79cSHeinrich Kuhn * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation. 6646ea79cSHeinrich Kuhn */ 7646ea79cSHeinrich Kuhn 8646ea79cSHeinrich Kuhn /* 9646ea79cSHeinrich Kuhn * vim:shiftwidth=8:noexpandtab 10646ea79cSHeinrich Kuhn * 11646ea79cSHeinrich Kuhn * @file dpdk/pmd/nfp_ethdev.c 12646ea79cSHeinrich Kuhn * 13646ea79cSHeinrich Kuhn * Netronome vNIC DPDK Poll-Mode Driver: Main entry point 14646ea79cSHeinrich Kuhn */ 15646ea79cSHeinrich Kuhn 16646ea79cSHeinrich Kuhn #include <rte_common.h> 17646ea79cSHeinrich Kuhn #include <ethdev_driver.h> 18646ea79cSHeinrich Kuhn #include <ethdev_pci.h> 191acb7f54SDavid Marchand #include <dev_driver.h> 20646ea79cSHeinrich Kuhn #include <rte_ether.h> 21646ea79cSHeinrich Kuhn #include <rte_malloc.h> 22646ea79cSHeinrich Kuhn #include <rte_memzone.h> 23646ea79cSHeinrich Kuhn #include <rte_mempool.h> 24646ea79cSHeinrich Kuhn #include <rte_service_component.h> 25851f03e1SHeinrich Kuhn #include <rte_alarm.h> 26646ea79cSHeinrich Kuhn #include "eal_firmware.h" 27646ea79cSHeinrich Kuhn 28646ea79cSHeinrich Kuhn #include "nfpcore/nfp_cpp.h" 29646ea79cSHeinrich Kuhn #include "nfpcore/nfp_nffw.h" 30646ea79cSHeinrich Kuhn #include "nfpcore/nfp_hwinfo.h" 31646ea79cSHeinrich Kuhn #include "nfpcore/nfp_mip.h" 32646ea79cSHeinrich Kuhn #include "nfpcore/nfp_rtsym.h" 33646ea79cSHeinrich Kuhn #include "nfpcore/nfp_nsp.h" 34646ea79cSHeinrich Kuhn 358d7a59f1SHeinrich Kuhn #include "nfp_common.h" 36646ea79cSHeinrich Kuhn #include "nfp_rxtx.h" 378d7a59f1SHeinrich Kuhn #include "nfp_logs.h" 388d7a59f1SHeinrich Kuhn #include "nfp_ctrl.h" 39646ea79cSHeinrich Kuhn #include "nfp_cpp_bridge.h" 40646ea79cSHeinrich Kuhn 41646ea79cSHeinrich Kuhn static int 42646ea79cSHeinrich Kuhn nfp_net_pf_read_mac(struct nfp_pf_dev *pf_dev, int port) 43646ea79cSHeinrich Kuhn { 44646ea79cSHeinrich Kuhn struct nfp_eth_table *nfp_eth_table; 45646ea79cSHeinrich Kuhn struct nfp_net_hw *hw = NULL; 46646ea79cSHeinrich Kuhn 47646ea79cSHeinrich Kuhn /* Grab a pointer to the correct physical port */ 48646ea79cSHeinrich Kuhn hw = pf_dev->ports[port]; 49646ea79cSHeinrich Kuhn 50646ea79cSHeinrich Kuhn nfp_eth_table = nfp_eth_read_ports(pf_dev->cpp); 51646ea79cSHeinrich Kuhn 52646ea79cSHeinrich Kuhn nfp_eth_copy_mac((uint8_t *)&hw->mac_addr, 53646ea79cSHeinrich Kuhn (uint8_t *)&nfp_eth_table->ports[port].mac_addr); 54646ea79cSHeinrich Kuhn 55646ea79cSHeinrich Kuhn free(nfp_eth_table); 56646ea79cSHeinrich Kuhn return 0; 57646ea79cSHeinrich Kuhn } 58646ea79cSHeinrich Kuhn 59646ea79cSHeinrich Kuhn static int 60646ea79cSHeinrich Kuhn nfp_net_start(struct rte_eth_dev *dev) 61646ea79cSHeinrich Kuhn { 62646ea79cSHeinrich Kuhn struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 63d61138d4SHarman Kalra struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 64646ea79cSHeinrich Kuhn uint32_t new_ctrl, update = 0; 65646ea79cSHeinrich Kuhn struct nfp_net_hw *hw; 66646ea79cSHeinrich Kuhn struct nfp_pf_dev *pf_dev; 67646ea79cSHeinrich Kuhn struct rte_eth_conf *dev_conf; 68646ea79cSHeinrich Kuhn struct rte_eth_rxmode *rxmode; 69646ea79cSHeinrich Kuhn uint32_t intr_vector; 70646ea79cSHeinrich Kuhn int ret; 71646ea79cSHeinrich Kuhn 72646ea79cSHeinrich Kuhn hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); 73646ea79cSHeinrich Kuhn pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(dev->data->dev_private); 74646ea79cSHeinrich Kuhn 75646ea79cSHeinrich Kuhn PMD_INIT_LOG(DEBUG, "Start"); 76646ea79cSHeinrich Kuhn 77646ea79cSHeinrich Kuhn /* Disabling queues just in case... */ 78646ea79cSHeinrich Kuhn nfp_net_disable_queues(dev); 79646ea79cSHeinrich Kuhn 80646ea79cSHeinrich Kuhn /* Enabling the required queues in the device */ 81646ea79cSHeinrich Kuhn nfp_net_enable_queues(dev); 82646ea79cSHeinrich Kuhn 83646ea79cSHeinrich Kuhn /* check and configure queue intr-vector mapping */ 84646ea79cSHeinrich Kuhn if (dev->data->dev_conf.intr_conf.rxq != 0) { 85646ea79cSHeinrich Kuhn if (pf_dev->multiport) { 86646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "PMD rx interrupt is not supported " 87646ea79cSHeinrich Kuhn "with NFP multiport PF"); 88646ea79cSHeinrich Kuhn return -EINVAL; 89646ea79cSHeinrich Kuhn } 90d61138d4SHarman Kalra if (rte_intr_type_get(intr_handle) == 91d61138d4SHarman Kalra RTE_INTR_HANDLE_UIO) { 92646ea79cSHeinrich Kuhn /* 93646ea79cSHeinrich Kuhn * Better not to share LSC with RX interrupts. 94646ea79cSHeinrich Kuhn * Unregistering LSC interrupt handler 95646ea79cSHeinrich Kuhn */ 96d61138d4SHarman Kalra rte_intr_callback_unregister(pci_dev->intr_handle, 97646ea79cSHeinrich Kuhn nfp_net_dev_interrupt_handler, (void *)dev); 98646ea79cSHeinrich Kuhn 99646ea79cSHeinrich Kuhn if (dev->data->nb_rx_queues > 1) { 100646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "PMD rx interrupt only " 101646ea79cSHeinrich Kuhn "supports 1 queue with UIO"); 102646ea79cSHeinrich Kuhn return -EIO; 103646ea79cSHeinrich Kuhn } 104646ea79cSHeinrich Kuhn } 105646ea79cSHeinrich Kuhn intr_vector = dev->data->nb_rx_queues; 106646ea79cSHeinrich Kuhn if (rte_intr_efd_enable(intr_handle, intr_vector)) 107646ea79cSHeinrich Kuhn return -1; 108646ea79cSHeinrich Kuhn 109646ea79cSHeinrich Kuhn nfp_configure_rx_interrupt(dev, intr_handle); 110646ea79cSHeinrich Kuhn update = NFP_NET_CFG_UPDATE_MSIX; 111646ea79cSHeinrich Kuhn } 112646ea79cSHeinrich Kuhn 113646ea79cSHeinrich Kuhn rte_intr_enable(intr_handle); 114646ea79cSHeinrich Kuhn 115646ea79cSHeinrich Kuhn new_ctrl = nfp_check_offloads(dev); 116646ea79cSHeinrich Kuhn 117646ea79cSHeinrich Kuhn /* Writing configuration parameters in the device */ 118646ea79cSHeinrich Kuhn nfp_net_params_setup(hw); 119646ea79cSHeinrich Kuhn 120646ea79cSHeinrich Kuhn dev_conf = &dev->data->dev_conf; 121646ea79cSHeinrich Kuhn rxmode = &dev_conf->rxmode; 122646ea79cSHeinrich Kuhn 123295968d1SFerruh Yigit if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS) { 124646ea79cSHeinrich Kuhn nfp_net_rss_config_default(dev); 125646ea79cSHeinrich Kuhn update |= NFP_NET_CFG_UPDATE_RSS; 1263ece835aSJin Liu if (hw->cap & NFP_NET_CFG_CTRL_RSS2) 1273ece835aSJin Liu new_ctrl |= NFP_NET_CFG_CTRL_RSS2; 1283ece835aSJin Liu else 129646ea79cSHeinrich Kuhn new_ctrl |= NFP_NET_CFG_CTRL_RSS; 130646ea79cSHeinrich Kuhn } 131646ea79cSHeinrich Kuhn 132646ea79cSHeinrich Kuhn /* Enable device */ 133646ea79cSHeinrich Kuhn new_ctrl |= NFP_NET_CFG_CTRL_ENABLE; 134646ea79cSHeinrich Kuhn 135646ea79cSHeinrich Kuhn update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING; 136646ea79cSHeinrich Kuhn 137646ea79cSHeinrich Kuhn if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG) 138646ea79cSHeinrich Kuhn new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG; 139646ea79cSHeinrich Kuhn 140646ea79cSHeinrich Kuhn nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl); 141646ea79cSHeinrich Kuhn if (nfp_net_reconfig(hw, new_ctrl, update) < 0) 142646ea79cSHeinrich Kuhn return -EIO; 143646ea79cSHeinrich Kuhn 144646ea79cSHeinrich Kuhn /* 145646ea79cSHeinrich Kuhn * Allocating rte mbufs for configured rx queues. 146646ea79cSHeinrich Kuhn * This requires queues being enabled before 147646ea79cSHeinrich Kuhn */ 148646ea79cSHeinrich Kuhn if (nfp_net_rx_freelist_setup(dev) < 0) { 149646ea79cSHeinrich Kuhn ret = -ENOMEM; 150646ea79cSHeinrich Kuhn goto error; 151646ea79cSHeinrich Kuhn } 152646ea79cSHeinrich Kuhn 153646ea79cSHeinrich Kuhn if (rte_eal_process_type() == RTE_PROC_PRIMARY) 154646ea79cSHeinrich Kuhn /* Configure the physical port up */ 155646ea79cSHeinrich Kuhn nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 1); 156646ea79cSHeinrich Kuhn else 157646ea79cSHeinrich Kuhn nfp_eth_set_configured(dev->process_private, 158646ea79cSHeinrich Kuhn hw->nfp_idx, 1); 159646ea79cSHeinrich Kuhn 160646ea79cSHeinrich Kuhn hw->ctrl = new_ctrl; 161646ea79cSHeinrich Kuhn 162646ea79cSHeinrich Kuhn return 0; 163646ea79cSHeinrich Kuhn 164646ea79cSHeinrich Kuhn error: 165646ea79cSHeinrich Kuhn /* 166646ea79cSHeinrich Kuhn * An error returned by this function should mean the app 167646ea79cSHeinrich Kuhn * exiting and then the system releasing all the memory 168646ea79cSHeinrich Kuhn * allocated even memory coming from hugepages. 169646ea79cSHeinrich Kuhn * 170646ea79cSHeinrich Kuhn * The device could be enabled at this point with some queues 171646ea79cSHeinrich Kuhn * ready for getting packets. This is true if the call to 172646ea79cSHeinrich Kuhn * nfp_net_rx_freelist_setup() succeeds for some queues but 173646ea79cSHeinrich Kuhn * fails for subsequent queues. 174646ea79cSHeinrich Kuhn * 175646ea79cSHeinrich Kuhn * This should make the app exiting but better if we tell the 176646ea79cSHeinrich Kuhn * device first. 177646ea79cSHeinrich Kuhn */ 178646ea79cSHeinrich Kuhn nfp_net_disable_queues(dev); 179646ea79cSHeinrich Kuhn 180646ea79cSHeinrich Kuhn return ret; 181646ea79cSHeinrich Kuhn } 182646ea79cSHeinrich Kuhn 183646ea79cSHeinrich Kuhn /* Stop device: disable rx and tx functions to allow for reconfiguring. */ 184646ea79cSHeinrich Kuhn static int 185646ea79cSHeinrich Kuhn nfp_net_stop(struct rte_eth_dev *dev) 186646ea79cSHeinrich Kuhn { 187646ea79cSHeinrich Kuhn struct nfp_net_hw *hw; 188646ea79cSHeinrich Kuhn 189646ea79cSHeinrich Kuhn PMD_INIT_LOG(DEBUG, "Stop"); 190646ea79cSHeinrich Kuhn 191646ea79cSHeinrich Kuhn hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); 192646ea79cSHeinrich Kuhn 193646ea79cSHeinrich Kuhn nfp_net_disable_queues(dev); 194646ea79cSHeinrich Kuhn 195646ea79cSHeinrich Kuhn /* Clear queues */ 1961c8d02bbSJin Liu nfp_net_stop_tx_queue(dev); 197646ea79cSHeinrich Kuhn 1981c8d02bbSJin Liu nfp_net_stop_rx_queue(dev); 199646ea79cSHeinrich Kuhn 200646ea79cSHeinrich Kuhn if (rte_eal_process_type() == RTE_PROC_PRIMARY) 201646ea79cSHeinrich Kuhn /* Configure the physical port down */ 202646ea79cSHeinrich Kuhn nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 0); 203646ea79cSHeinrich Kuhn else 204646ea79cSHeinrich Kuhn nfp_eth_set_configured(dev->process_private, 205646ea79cSHeinrich Kuhn hw->nfp_idx, 0); 206646ea79cSHeinrich Kuhn 207646ea79cSHeinrich Kuhn return 0; 208646ea79cSHeinrich Kuhn } 209646ea79cSHeinrich Kuhn 210646ea79cSHeinrich Kuhn /* Set the link up. */ 211646ea79cSHeinrich Kuhn static int 212646ea79cSHeinrich Kuhn nfp_net_set_link_up(struct rte_eth_dev *dev) 213646ea79cSHeinrich Kuhn { 214646ea79cSHeinrich Kuhn struct nfp_net_hw *hw; 215646ea79cSHeinrich Kuhn 216646ea79cSHeinrich Kuhn PMD_DRV_LOG(DEBUG, "Set link up"); 217646ea79cSHeinrich Kuhn 218646ea79cSHeinrich Kuhn hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); 219646ea79cSHeinrich Kuhn 220646ea79cSHeinrich Kuhn if (rte_eal_process_type() == RTE_PROC_PRIMARY) 221646ea79cSHeinrich Kuhn /* Configure the physical port down */ 222646ea79cSHeinrich Kuhn return nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 1); 223646ea79cSHeinrich Kuhn else 224646ea79cSHeinrich Kuhn return nfp_eth_set_configured(dev->process_private, 225646ea79cSHeinrich Kuhn hw->nfp_idx, 1); 226646ea79cSHeinrich Kuhn } 227646ea79cSHeinrich Kuhn 228646ea79cSHeinrich Kuhn /* Set the link down. */ 229646ea79cSHeinrich Kuhn static int 230646ea79cSHeinrich Kuhn nfp_net_set_link_down(struct rte_eth_dev *dev) 231646ea79cSHeinrich Kuhn { 232646ea79cSHeinrich Kuhn struct nfp_net_hw *hw; 233646ea79cSHeinrich Kuhn 234646ea79cSHeinrich Kuhn PMD_DRV_LOG(DEBUG, "Set link down"); 235646ea79cSHeinrich Kuhn 236646ea79cSHeinrich Kuhn hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); 237646ea79cSHeinrich Kuhn 238646ea79cSHeinrich Kuhn if (rte_eal_process_type() == RTE_PROC_PRIMARY) 239646ea79cSHeinrich Kuhn /* Configure the physical port down */ 240646ea79cSHeinrich Kuhn return nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 0); 241646ea79cSHeinrich Kuhn else 242646ea79cSHeinrich Kuhn return nfp_eth_set_configured(dev->process_private, 243646ea79cSHeinrich Kuhn hw->nfp_idx, 0); 244646ea79cSHeinrich Kuhn } 245646ea79cSHeinrich Kuhn 246646ea79cSHeinrich Kuhn /* Reset and stop device. The device can not be restarted. */ 247646ea79cSHeinrich Kuhn static int 248646ea79cSHeinrich Kuhn nfp_net_close(struct rte_eth_dev *dev) 249646ea79cSHeinrich Kuhn { 250646ea79cSHeinrich Kuhn struct nfp_net_hw *hw; 251646ea79cSHeinrich Kuhn struct rte_pci_device *pci_dev; 252646ea79cSHeinrich Kuhn struct nfp_pf_dev *pf_dev; 253646ea79cSHeinrich Kuhn int i; 254646ea79cSHeinrich Kuhn 255646ea79cSHeinrich Kuhn if (rte_eal_process_type() != RTE_PROC_PRIMARY) 256646ea79cSHeinrich Kuhn return 0; 257646ea79cSHeinrich Kuhn 258646ea79cSHeinrich Kuhn PMD_INIT_LOG(DEBUG, "Close"); 259646ea79cSHeinrich Kuhn 260646ea79cSHeinrich Kuhn pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(dev->data->dev_private); 261646ea79cSHeinrich Kuhn hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); 262646ea79cSHeinrich Kuhn pci_dev = RTE_ETH_DEV_TO_PCI(dev); 263646ea79cSHeinrich Kuhn 264646ea79cSHeinrich Kuhn /* 265646ea79cSHeinrich Kuhn * We assume that the DPDK application is stopping all the 266646ea79cSHeinrich Kuhn * threads/queues before calling the device close function. 267646ea79cSHeinrich Kuhn */ 268646ea79cSHeinrich Kuhn 269646ea79cSHeinrich Kuhn nfp_net_disable_queues(dev); 270646ea79cSHeinrich Kuhn 271646ea79cSHeinrich Kuhn /* Clear queues */ 2721c8d02bbSJin Liu nfp_net_close_tx_queue(dev); 273646ea79cSHeinrich Kuhn 2741c8d02bbSJin Liu nfp_net_close_rx_queue(dev); 275646ea79cSHeinrich Kuhn 276851f03e1SHeinrich Kuhn /* Cancel possible impending LSC work here before releasing the port*/ 277851f03e1SHeinrich Kuhn rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler, 278851f03e1SHeinrich Kuhn (void *)dev); 279851f03e1SHeinrich Kuhn 280646ea79cSHeinrich Kuhn /* Only free PF resources after all physical ports have been closed */ 281646ea79cSHeinrich Kuhn /* Mark this port as unused and free device priv resources*/ 282646ea79cSHeinrich Kuhn nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff); 283646ea79cSHeinrich Kuhn pf_dev->ports[hw->idx] = NULL; 284646ea79cSHeinrich Kuhn rte_eth_dev_release_port(dev); 285646ea79cSHeinrich Kuhn 286646ea79cSHeinrich Kuhn for (i = 0; i < pf_dev->total_phyports; i++) { 287646ea79cSHeinrich Kuhn /* Check to see if ports are still in use */ 288646ea79cSHeinrich Kuhn if (pf_dev->ports[i]) 289646ea79cSHeinrich Kuhn return 0; 290646ea79cSHeinrich Kuhn } 291646ea79cSHeinrich Kuhn 292646ea79cSHeinrich Kuhn /* Now it is safe to free all PF resources */ 293646ea79cSHeinrich Kuhn PMD_INIT_LOG(INFO, "Freeing PF resources"); 294646ea79cSHeinrich Kuhn nfp_cpp_area_free(pf_dev->ctrl_area); 295646ea79cSHeinrich Kuhn nfp_cpp_area_free(pf_dev->hwqueues_area); 296646ea79cSHeinrich Kuhn free(pf_dev->hwinfo); 297646ea79cSHeinrich Kuhn free(pf_dev->sym_tbl); 298646ea79cSHeinrich Kuhn nfp_cpp_free(pf_dev->cpp); 299646ea79cSHeinrich Kuhn rte_free(pf_dev); 300646ea79cSHeinrich Kuhn 301d61138d4SHarman Kalra rte_intr_disable(pci_dev->intr_handle); 302646ea79cSHeinrich Kuhn 303646ea79cSHeinrich Kuhn /* unregister callback func from eal lib */ 304d61138d4SHarman Kalra rte_intr_callback_unregister(pci_dev->intr_handle, 305a6189a67SJin Liu nfp_net_dev_interrupt_handler, (void *)dev); 306646ea79cSHeinrich Kuhn 307646ea79cSHeinrich Kuhn /* 308f8dbaebbSSean Morrissey * The ixgbe PMD disables the pcie master on the 309646ea79cSHeinrich Kuhn * device. The i40e does not... 310646ea79cSHeinrich Kuhn */ 311646ea79cSHeinrich Kuhn 312646ea79cSHeinrich Kuhn return 0; 313646ea79cSHeinrich Kuhn } 314646ea79cSHeinrich Kuhn 315646ea79cSHeinrich Kuhn /* Initialise and register driver with DPDK Application */ 3163ddd127dSJin Liu static const struct eth_dev_ops nfp_net_nfd3_eth_dev_ops = { 317646ea79cSHeinrich Kuhn .dev_configure = nfp_net_configure, 318646ea79cSHeinrich Kuhn .dev_start = nfp_net_start, 319646ea79cSHeinrich Kuhn .dev_stop = nfp_net_stop, 320646ea79cSHeinrich Kuhn .dev_set_link_up = nfp_net_set_link_up, 321646ea79cSHeinrich Kuhn .dev_set_link_down = nfp_net_set_link_down, 322646ea79cSHeinrich Kuhn .dev_close = nfp_net_close, 323646ea79cSHeinrich Kuhn .promiscuous_enable = nfp_net_promisc_enable, 324646ea79cSHeinrich Kuhn .promiscuous_disable = nfp_net_promisc_disable, 325646ea79cSHeinrich Kuhn .link_update = nfp_net_link_update, 326646ea79cSHeinrich Kuhn .stats_get = nfp_net_stats_get, 327646ea79cSHeinrich Kuhn .stats_reset = nfp_net_stats_reset, 328646ea79cSHeinrich Kuhn .dev_infos_get = nfp_net_infos_get, 329646ea79cSHeinrich Kuhn .dev_supported_ptypes_get = nfp_net_supported_ptypes_get, 330646ea79cSHeinrich Kuhn .mtu_set = nfp_net_dev_mtu_set, 3310a94d6bcSJin Liu .mac_addr_set = nfp_net_set_mac_addr, 332646ea79cSHeinrich Kuhn .vlan_offload_set = nfp_net_vlan_offload_set, 333646ea79cSHeinrich Kuhn .reta_update = nfp_net_reta_update, 334646ea79cSHeinrich Kuhn .reta_query = nfp_net_reta_query, 335646ea79cSHeinrich Kuhn .rss_hash_update = nfp_net_rss_hash_update, 336646ea79cSHeinrich Kuhn .rss_hash_conf_get = nfp_net_rss_hash_conf_get, 337646ea79cSHeinrich Kuhn .rx_queue_setup = nfp_net_rx_queue_setup, 338646ea79cSHeinrich Kuhn .rx_queue_release = nfp_net_rx_queue_release, 3393ddd127dSJin Liu .tx_queue_setup = nfp_net_nfd3_tx_queue_setup, 340646ea79cSHeinrich Kuhn .tx_queue_release = nfp_net_tx_queue_release, 341646ea79cSHeinrich Kuhn .rx_queue_intr_enable = nfp_rx_queue_intr_enable, 342646ea79cSHeinrich Kuhn .rx_queue_intr_disable = nfp_rx_queue_intr_disable, 343646ea79cSHeinrich Kuhn }; 344646ea79cSHeinrich Kuhn 34552ddc4c2SJin Liu static const struct eth_dev_ops nfp_net_nfdk_eth_dev_ops = { 34652ddc4c2SJin Liu .dev_configure = nfp_net_configure, 34752ddc4c2SJin Liu .dev_start = nfp_net_start, 34852ddc4c2SJin Liu .dev_stop = nfp_net_stop, 34952ddc4c2SJin Liu .dev_set_link_up = nfp_net_set_link_up, 35052ddc4c2SJin Liu .dev_set_link_down = nfp_net_set_link_down, 35152ddc4c2SJin Liu .dev_close = nfp_net_close, 35252ddc4c2SJin Liu .promiscuous_enable = nfp_net_promisc_enable, 35352ddc4c2SJin Liu .promiscuous_disable = nfp_net_promisc_disable, 35452ddc4c2SJin Liu .link_update = nfp_net_link_update, 35552ddc4c2SJin Liu .stats_get = nfp_net_stats_get, 35652ddc4c2SJin Liu .stats_reset = nfp_net_stats_reset, 35752ddc4c2SJin Liu .dev_infos_get = nfp_net_infos_get, 35852ddc4c2SJin Liu .dev_supported_ptypes_get = nfp_net_supported_ptypes_get, 35952ddc4c2SJin Liu .mtu_set = nfp_net_dev_mtu_set, 36052ddc4c2SJin Liu .mac_addr_set = nfp_net_set_mac_addr, 36152ddc4c2SJin Liu .vlan_offload_set = nfp_net_vlan_offload_set, 36252ddc4c2SJin Liu .reta_update = nfp_net_reta_update, 36352ddc4c2SJin Liu .reta_query = nfp_net_reta_query, 36452ddc4c2SJin Liu .rss_hash_update = nfp_net_rss_hash_update, 36552ddc4c2SJin Liu .rss_hash_conf_get = nfp_net_rss_hash_conf_get, 36652ddc4c2SJin Liu .rx_queue_setup = nfp_net_rx_queue_setup, 36752ddc4c2SJin Liu .rx_queue_release = nfp_net_rx_queue_release, 36852ddc4c2SJin Liu .tx_queue_setup = nfp_net_nfdk_tx_queue_setup, 36952ddc4c2SJin Liu .tx_queue_release = nfp_net_tx_queue_release, 37052ddc4c2SJin Liu .rx_queue_intr_enable = nfp_rx_queue_intr_enable, 37152ddc4c2SJin Liu .rx_queue_intr_disable = nfp_rx_queue_intr_disable, 37252ddc4c2SJin Liu }; 37352ddc4c2SJin Liu 374266470b2SJin Liu static inline int 375266470b2SJin Liu nfp_net_ethdev_ops_mount(struct nfp_net_hw *hw, struct rte_eth_dev *eth_dev) 376266470b2SJin Liu { 377266470b2SJin Liu switch (NFD_CFG_CLASS_VER_of(hw->ver)) { 378266470b2SJin Liu case NFP_NET_CFG_VERSION_DP_NFD3: 37952ddc4c2SJin Liu eth_dev->dev_ops = &nfp_net_nfd3_eth_dev_ops; 380c73dced4SJin Liu eth_dev->tx_pkt_burst = &nfp_net_nfd3_xmit_pkts; 381266470b2SJin Liu break; 382266470b2SJin Liu case NFP_NET_CFG_VERSION_DP_NFDK: 383266470b2SJin Liu if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 5) { 384266470b2SJin Liu PMD_DRV_LOG(ERR, "NFDK must use ABI 5 or newer, found: %d", 385266470b2SJin Liu NFD_CFG_MAJOR_VERSION_of(hw->ver)); 386266470b2SJin Liu return -EINVAL; 387266470b2SJin Liu } 38852ddc4c2SJin Liu eth_dev->dev_ops = &nfp_net_nfdk_eth_dev_ops; 389c73dced4SJin Liu eth_dev->tx_pkt_burst = &nfp_net_nfdk_xmit_pkts; 390266470b2SJin Liu break; 391266470b2SJin Liu default: 392266470b2SJin Liu PMD_DRV_LOG(ERR, "The version of firmware is not correct."); 393266470b2SJin Liu return -EINVAL; 394266470b2SJin Liu } 395266470b2SJin Liu 396266470b2SJin Liu eth_dev->rx_queue_count = nfp_net_rx_queue_count; 397266470b2SJin Liu eth_dev->rx_pkt_burst = &nfp_net_recv_pkts; 398266470b2SJin Liu 399266470b2SJin Liu return 0; 400266470b2SJin Liu } 401266470b2SJin Liu 402646ea79cSHeinrich Kuhn static int 403646ea79cSHeinrich Kuhn nfp_net_init(struct rte_eth_dev *eth_dev) 404646ea79cSHeinrich Kuhn { 405646ea79cSHeinrich Kuhn struct rte_pci_device *pci_dev; 406646ea79cSHeinrich Kuhn struct nfp_pf_dev *pf_dev; 407646ea79cSHeinrich Kuhn struct nfp_net_hw *hw; 408646ea79cSHeinrich Kuhn struct rte_ether_addr *tmp_ether_addr; 409a6189a67SJin Liu uint64_t rx_bar_off = 0; 410a6189a67SJin Liu uint64_t tx_bar_off = 0; 411646ea79cSHeinrich Kuhn uint32_t start_q; 412646ea79cSHeinrich Kuhn int stride = 4; 413646ea79cSHeinrich Kuhn int port = 0; 414646ea79cSHeinrich Kuhn int err; 415646ea79cSHeinrich Kuhn 416646ea79cSHeinrich Kuhn PMD_INIT_FUNC_TRACE(); 417646ea79cSHeinrich Kuhn 418646ea79cSHeinrich Kuhn pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 419646ea79cSHeinrich Kuhn 420646ea79cSHeinrich Kuhn /* Use backpointer here to the PF of this eth_dev */ 421646ea79cSHeinrich Kuhn pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(eth_dev->data->dev_private); 422646ea79cSHeinrich Kuhn 423646ea79cSHeinrich Kuhn /* NFP can not handle DMA addresses requiring more than 40 bits */ 424646ea79cSHeinrich Kuhn if (rte_mem_check_dma_mask(40)) { 425a6189a67SJin Liu RTE_LOG(ERR, PMD, 426a6189a67SJin Liu "device %s can not be used: restricted dma mask to 40 bits!\n", 427646ea79cSHeinrich Kuhn pci_dev->device.name); 428646ea79cSHeinrich Kuhn return -ENODEV; 429a6189a67SJin Liu } 430646ea79cSHeinrich Kuhn 431646ea79cSHeinrich Kuhn port = ((struct nfp_net_hw *)eth_dev->data->dev_private)->idx; 432646ea79cSHeinrich Kuhn if (port < 0 || port > 7) { 433646ea79cSHeinrich Kuhn PMD_DRV_LOG(ERR, "Port value is wrong"); 434646ea79cSHeinrich Kuhn return -ENODEV; 435646ea79cSHeinrich Kuhn } 436646ea79cSHeinrich Kuhn 437a6189a67SJin Liu /* 438a6189a67SJin Liu * Use PF array of physical ports to get pointer to 439646ea79cSHeinrich Kuhn * this specific port 440646ea79cSHeinrich Kuhn */ 441646ea79cSHeinrich Kuhn hw = pf_dev->ports[port]; 442646ea79cSHeinrich Kuhn 443646ea79cSHeinrich Kuhn PMD_INIT_LOG(DEBUG, "Working with physical port number: %d, " 444a6189a67SJin Liu "NFP internal port number: %d", port, hw->nfp_idx); 445646ea79cSHeinrich Kuhn 446646ea79cSHeinrich Kuhn /* For secondary processes, the primary has done all the work */ 447646ea79cSHeinrich Kuhn if (rte_eal_process_type() != RTE_PROC_PRIMARY) 448646ea79cSHeinrich Kuhn return 0; 449646ea79cSHeinrich Kuhn 450646ea79cSHeinrich Kuhn rte_eth_copy_pci_info(eth_dev, pci_dev); 451646ea79cSHeinrich Kuhn 452646ea79cSHeinrich Kuhn hw->device_id = pci_dev->id.device_id; 453646ea79cSHeinrich Kuhn hw->vendor_id = pci_dev->id.vendor_id; 454646ea79cSHeinrich Kuhn hw->subsystem_device_id = pci_dev->id.subsystem_device_id; 455646ea79cSHeinrich Kuhn hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id; 456646ea79cSHeinrich Kuhn 457646ea79cSHeinrich Kuhn PMD_INIT_LOG(DEBUG, "nfp_net: device (%u:%u) %u:%u:%u:%u", 458646ea79cSHeinrich Kuhn pci_dev->id.vendor_id, pci_dev->id.device_id, 459646ea79cSHeinrich Kuhn pci_dev->addr.domain, pci_dev->addr.bus, 460646ea79cSHeinrich Kuhn pci_dev->addr.devid, pci_dev->addr.function); 461646ea79cSHeinrich Kuhn 462646ea79cSHeinrich Kuhn hw->ctrl_bar = (uint8_t *)pci_dev->mem_resource[0].addr; 463646ea79cSHeinrich Kuhn if (hw->ctrl_bar == NULL) { 464646ea79cSHeinrich Kuhn PMD_DRV_LOG(ERR, 465646ea79cSHeinrich Kuhn "hw->ctrl_bar is NULL. BAR0 not configured"); 466646ea79cSHeinrich Kuhn return -ENODEV; 467646ea79cSHeinrich Kuhn } 468646ea79cSHeinrich Kuhn 469646ea79cSHeinrich Kuhn if (port == 0) { 470646ea79cSHeinrich Kuhn hw->ctrl_bar = pf_dev->ctrl_bar; 471646ea79cSHeinrich Kuhn } else { 472a6189a67SJin Liu if (pf_dev->ctrl_bar == NULL) 473646ea79cSHeinrich Kuhn return -ENODEV; 474a6189a67SJin Liu /* Use port offset in pf ctrl_bar for this ports control bar */ 475a6189a67SJin Liu hw->ctrl_bar = pf_dev->ctrl_bar + (port * NFP_PF_CSR_SLICE_SIZE); 476646ea79cSHeinrich Kuhn } 477646ea79cSHeinrich Kuhn 478646ea79cSHeinrich Kuhn PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar); 479646ea79cSHeinrich Kuhn 480266470b2SJin Liu hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION); 481266470b2SJin Liu 482266470b2SJin Liu if (nfp_net_ethdev_ops_mount(hw, eth_dev)) 483266470b2SJin Liu return -EINVAL; 484266470b2SJin Liu 485646ea79cSHeinrich Kuhn hw->max_rx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_RXRINGS); 486646ea79cSHeinrich Kuhn hw->max_tx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_TXRINGS); 487646ea79cSHeinrich Kuhn 488646ea79cSHeinrich Kuhn /* Work out where in the BAR the queues start. */ 489646ea79cSHeinrich Kuhn switch (pci_dev->id.device_id) { 4905c464d6aSJin Liu case PCI_DEVICE_ID_NFP3800_PF_NIC: 491646ea79cSHeinrich Kuhn case PCI_DEVICE_ID_NFP4000_PF_NIC: 492646ea79cSHeinrich Kuhn case PCI_DEVICE_ID_NFP6000_PF_NIC: 493646ea79cSHeinrich Kuhn start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ); 4945c464d6aSJin Liu tx_bar_off = nfp_pci_queue(pci_dev, start_q); 495646ea79cSHeinrich Kuhn start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ); 4965c464d6aSJin Liu rx_bar_off = nfp_pci_queue(pci_dev, start_q); 497646ea79cSHeinrich Kuhn break; 498646ea79cSHeinrich Kuhn default: 499646ea79cSHeinrich Kuhn PMD_DRV_LOG(ERR, "nfp_net: no device ID matching"); 500646ea79cSHeinrich Kuhn err = -ENODEV; 501646ea79cSHeinrich Kuhn goto dev_err_ctrl_map; 502646ea79cSHeinrich Kuhn } 503646ea79cSHeinrich Kuhn 504646ea79cSHeinrich Kuhn PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%" PRIx64 "", tx_bar_off); 505646ea79cSHeinrich Kuhn PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%" PRIx64 "", rx_bar_off); 506646ea79cSHeinrich Kuhn 507646ea79cSHeinrich Kuhn hw->tx_bar = pf_dev->hw_queues + tx_bar_off; 508646ea79cSHeinrich Kuhn hw->rx_bar = pf_dev->hw_queues + rx_bar_off; 509646ea79cSHeinrich Kuhn eth_dev->data->dev_private = hw; 510646ea79cSHeinrich Kuhn 511646ea79cSHeinrich Kuhn PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p", 512646ea79cSHeinrich Kuhn hw->ctrl_bar, hw->tx_bar, hw->rx_bar); 513646ea79cSHeinrich Kuhn 514646ea79cSHeinrich Kuhn nfp_net_cfg_queue_setup(hw); 515646ea79cSHeinrich Kuhn 516646ea79cSHeinrich Kuhn /* Get some of the read-only fields from the config BAR */ 517646ea79cSHeinrich Kuhn hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP); 518646ea79cSHeinrich Kuhn hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU); 519646ea79cSHeinrich Kuhn hw->mtu = RTE_ETHER_MTU; 5205c305e21SPeng Zhang hw->flbufsz = RTE_ETHER_MTU; 521646ea79cSHeinrich Kuhn 522646ea79cSHeinrich Kuhn /* VLAN insertion is incompatible with LSOv2 */ 523646ea79cSHeinrich Kuhn if (hw->cap & NFP_NET_CFG_CTRL_LSO2) 524646ea79cSHeinrich Kuhn hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN; 525646ea79cSHeinrich Kuhn 526646ea79cSHeinrich Kuhn if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 2) 527646ea79cSHeinrich Kuhn hw->rx_offset = NFP_NET_RX_OFFSET; 528646ea79cSHeinrich Kuhn else 529646ea79cSHeinrich Kuhn hw->rx_offset = nn_cfg_readl(hw, NFP_NET_CFG_RX_OFFSET_ADDR); 530646ea79cSHeinrich Kuhn 531646ea79cSHeinrich Kuhn PMD_INIT_LOG(INFO, "VER: %u.%u, Maximum supported MTU: %d", 532646ea79cSHeinrich Kuhn NFD_CFG_MAJOR_VERSION_of(hw->ver), 533646ea79cSHeinrich Kuhn NFD_CFG_MINOR_VERSION_of(hw->ver), hw->max_mtu); 534646ea79cSHeinrich Kuhn 535646ea79cSHeinrich Kuhn PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s%s%s%s%s%s", hw->cap, 536646ea79cSHeinrich Kuhn hw->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "", 537646ea79cSHeinrich Kuhn hw->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "", 538646ea79cSHeinrich Kuhn hw->cap & NFP_NET_CFG_CTRL_L2MC ? "L2MCFILT " : "", 539646ea79cSHeinrich Kuhn hw->cap & NFP_NET_CFG_CTRL_RXCSUM ? "RXCSUM " : "", 540646ea79cSHeinrich Kuhn hw->cap & NFP_NET_CFG_CTRL_TXCSUM ? "TXCSUM " : "", 541646ea79cSHeinrich Kuhn hw->cap & NFP_NET_CFG_CTRL_RXVLAN ? "RXVLAN " : "", 542646ea79cSHeinrich Kuhn hw->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "", 543646ea79cSHeinrich Kuhn hw->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "", 544646ea79cSHeinrich Kuhn hw->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "", 545646ea79cSHeinrich Kuhn hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR ? "LIVE_ADDR " : "", 546646ea79cSHeinrich Kuhn hw->cap & NFP_NET_CFG_CTRL_LSO ? "TSO " : "", 547646ea79cSHeinrich Kuhn hw->cap & NFP_NET_CFG_CTRL_LSO2 ? "TSOv2 " : "", 548646ea79cSHeinrich Kuhn hw->cap & NFP_NET_CFG_CTRL_RSS ? "RSS " : "", 549646ea79cSHeinrich Kuhn hw->cap & NFP_NET_CFG_CTRL_RSS2 ? "RSSv2 " : ""); 550646ea79cSHeinrich Kuhn 551646ea79cSHeinrich Kuhn hw->ctrl = 0; 552646ea79cSHeinrich Kuhn 553646ea79cSHeinrich Kuhn hw->stride_rx = stride; 554646ea79cSHeinrich Kuhn hw->stride_tx = stride; 555646ea79cSHeinrich Kuhn 556646ea79cSHeinrich Kuhn PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u", 557646ea79cSHeinrich Kuhn hw->max_rx_queues, hw->max_tx_queues); 558646ea79cSHeinrich Kuhn 559646ea79cSHeinrich Kuhn /* Initializing spinlock for reconfigs */ 560646ea79cSHeinrich Kuhn rte_spinlock_init(&hw->reconfig_lock); 561646ea79cSHeinrich Kuhn 562646ea79cSHeinrich Kuhn /* Allocating memory for mac addr */ 563646ea79cSHeinrich Kuhn eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", 564646ea79cSHeinrich Kuhn RTE_ETHER_ADDR_LEN, 0); 565646ea79cSHeinrich Kuhn if (eth_dev->data->mac_addrs == NULL) { 566646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "Failed to space for MAC address"); 567646ea79cSHeinrich Kuhn err = -ENOMEM; 568646ea79cSHeinrich Kuhn goto dev_err_queues_map; 569646ea79cSHeinrich Kuhn } 570646ea79cSHeinrich Kuhn 571646ea79cSHeinrich Kuhn nfp_net_pf_read_mac(pf_dev, port); 572646ea79cSHeinrich Kuhn nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr); 573646ea79cSHeinrich Kuhn 574646ea79cSHeinrich Kuhn tmp_ether_addr = (struct rte_ether_addr *)&hw->mac_addr; 575646ea79cSHeinrich Kuhn if (!rte_is_valid_assigned_ether_addr(tmp_ether_addr)) { 576a6189a67SJin Liu PMD_INIT_LOG(INFO, "Using random mac address for port %d", port); 577646ea79cSHeinrich Kuhn /* Using random mac addresses for VFs */ 578646ea79cSHeinrich Kuhn rte_eth_random_addr(&hw->mac_addr[0]); 579646ea79cSHeinrich Kuhn nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr); 580646ea79cSHeinrich Kuhn } 581646ea79cSHeinrich Kuhn 582646ea79cSHeinrich Kuhn /* Copying mac address to DPDK eth_dev struct */ 583646ea79cSHeinrich Kuhn rte_ether_addr_copy((struct rte_ether_addr *)hw->mac_addr, 584646ea79cSHeinrich Kuhn ð_dev->data->mac_addrs[0]); 585646ea79cSHeinrich Kuhn 586646ea79cSHeinrich Kuhn if (!(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR)) 587646ea79cSHeinrich Kuhn eth_dev->data->dev_flags |= RTE_ETH_DEV_NOLIVE_MAC_ADDR; 588646ea79cSHeinrich Kuhn 589646ea79cSHeinrich Kuhn eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 590646ea79cSHeinrich Kuhn 591646ea79cSHeinrich Kuhn PMD_INIT_LOG(INFO, "port %d VendorID=0x%x DeviceID=0x%x " 592c2c4f87bSAman Deep Singh "mac=" RTE_ETHER_ADDR_PRT_FMT, 593646ea79cSHeinrich Kuhn eth_dev->data->port_id, pci_dev->id.vendor_id, 594646ea79cSHeinrich Kuhn pci_dev->id.device_id, 595646ea79cSHeinrich Kuhn hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2], 596646ea79cSHeinrich Kuhn hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]); 597646ea79cSHeinrich Kuhn 598646ea79cSHeinrich Kuhn if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 599646ea79cSHeinrich Kuhn /* Registering LSC interrupt handler */ 600d61138d4SHarman Kalra rte_intr_callback_register(pci_dev->intr_handle, 601a6189a67SJin Liu nfp_net_dev_interrupt_handler, (void *)eth_dev); 602646ea79cSHeinrich Kuhn /* Telling the firmware about the LSC interrupt entry */ 603646ea79cSHeinrich Kuhn nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX); 604646ea79cSHeinrich Kuhn /* Recording current stats counters values */ 605646ea79cSHeinrich Kuhn nfp_net_stats_reset(eth_dev); 606646ea79cSHeinrich Kuhn } 607646ea79cSHeinrich Kuhn 608646ea79cSHeinrich Kuhn return 0; 609646ea79cSHeinrich Kuhn 610646ea79cSHeinrich Kuhn dev_err_queues_map: 611646ea79cSHeinrich Kuhn nfp_cpp_area_free(hw->hwqueues_area); 612646ea79cSHeinrich Kuhn dev_err_ctrl_map: 613646ea79cSHeinrich Kuhn nfp_cpp_area_free(hw->ctrl_area); 614646ea79cSHeinrich Kuhn 615646ea79cSHeinrich Kuhn return err; 616646ea79cSHeinrich Kuhn } 617646ea79cSHeinrich Kuhn 618646ea79cSHeinrich Kuhn #define DEFAULT_FW_PATH "/lib/firmware/netronome" 619646ea79cSHeinrich Kuhn 620646ea79cSHeinrich Kuhn static int 621646ea79cSHeinrich Kuhn nfp_fw_upload(struct rte_pci_device *dev, struct nfp_nsp *nsp, char *card) 622646ea79cSHeinrich Kuhn { 623646ea79cSHeinrich Kuhn struct nfp_cpp *cpp = nsp->cpp; 624646ea79cSHeinrich Kuhn void *fw_buf; 625646ea79cSHeinrich Kuhn char fw_name[125]; 626646ea79cSHeinrich Kuhn char serial[40]; 627646ea79cSHeinrich Kuhn size_t fsize; 628646ea79cSHeinrich Kuhn 629646ea79cSHeinrich Kuhn /* Looking for firmware file in order of priority */ 630646ea79cSHeinrich Kuhn 631646ea79cSHeinrich Kuhn /* First try to find a firmware image specific for this device */ 632646ea79cSHeinrich Kuhn snprintf(serial, sizeof(serial), 633646ea79cSHeinrich Kuhn "serial-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x", 634646ea79cSHeinrich Kuhn cpp->serial[0], cpp->serial[1], cpp->serial[2], cpp->serial[3], 635646ea79cSHeinrich Kuhn cpp->serial[4], cpp->serial[5], cpp->interface >> 8, 636646ea79cSHeinrich Kuhn cpp->interface & 0xff); 637646ea79cSHeinrich Kuhn 638646ea79cSHeinrich Kuhn snprintf(fw_name, sizeof(fw_name), "%s/%s.nffw", DEFAULT_FW_PATH, 639646ea79cSHeinrich Kuhn serial); 640646ea79cSHeinrich Kuhn 641646ea79cSHeinrich Kuhn PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); 642646ea79cSHeinrich Kuhn if (rte_firmware_read(fw_name, &fw_buf, &fsize) == 0) 643646ea79cSHeinrich Kuhn goto load_fw; 644646ea79cSHeinrich Kuhn /* Then try the PCI name */ 645646ea79cSHeinrich Kuhn snprintf(fw_name, sizeof(fw_name), "%s/pci-%s.nffw", DEFAULT_FW_PATH, 646646ea79cSHeinrich Kuhn dev->device.name); 647646ea79cSHeinrich Kuhn 648646ea79cSHeinrich Kuhn PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); 649646ea79cSHeinrich Kuhn if (rte_firmware_read(fw_name, &fw_buf, &fsize) == 0) 650646ea79cSHeinrich Kuhn goto load_fw; 651646ea79cSHeinrich Kuhn 652646ea79cSHeinrich Kuhn /* Finally try the card type and media */ 653646ea79cSHeinrich Kuhn snprintf(fw_name, sizeof(fw_name), "%s/%s", DEFAULT_FW_PATH, card); 654646ea79cSHeinrich Kuhn PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); 655646ea79cSHeinrich Kuhn if (rte_firmware_read(fw_name, &fw_buf, &fsize) < 0) { 656646ea79cSHeinrich Kuhn PMD_DRV_LOG(INFO, "Firmware file %s not found.", fw_name); 657646ea79cSHeinrich Kuhn return -ENOENT; 658646ea79cSHeinrich Kuhn } 659646ea79cSHeinrich Kuhn 660646ea79cSHeinrich Kuhn load_fw: 661646ea79cSHeinrich Kuhn PMD_DRV_LOG(INFO, "Firmware file found at %s with size: %zu", 662646ea79cSHeinrich Kuhn fw_name, fsize); 663646ea79cSHeinrich Kuhn PMD_DRV_LOG(INFO, "Uploading the firmware ..."); 664646ea79cSHeinrich Kuhn nfp_nsp_load_fw(nsp, fw_buf, fsize); 665646ea79cSHeinrich Kuhn PMD_DRV_LOG(INFO, "Done"); 666646ea79cSHeinrich Kuhn 667646ea79cSHeinrich Kuhn free(fw_buf); 668646ea79cSHeinrich Kuhn 669646ea79cSHeinrich Kuhn return 0; 670646ea79cSHeinrich Kuhn } 671646ea79cSHeinrich Kuhn 672646ea79cSHeinrich Kuhn static int 673a6189a67SJin Liu nfp_fw_setup(struct rte_pci_device *dev, 674a6189a67SJin Liu struct nfp_cpp *cpp, 675a6189a67SJin Liu struct nfp_eth_table *nfp_eth_table, 676a6189a67SJin Liu struct nfp_hwinfo *hwinfo) 677646ea79cSHeinrich Kuhn { 678646ea79cSHeinrich Kuhn struct nfp_nsp *nsp; 679646ea79cSHeinrich Kuhn const char *nfp_fw_model; 680646ea79cSHeinrich Kuhn char card_desc[100]; 681646ea79cSHeinrich Kuhn int err = 0; 682646ea79cSHeinrich Kuhn 68306be30d4SPeng Zhang nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "nffw.partno"); 68406be30d4SPeng Zhang if (nfp_fw_model == NULL) 685646ea79cSHeinrich Kuhn nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "assembly.partno"); 686646ea79cSHeinrich Kuhn 687646ea79cSHeinrich Kuhn if (nfp_fw_model) { 688646ea79cSHeinrich Kuhn PMD_DRV_LOG(INFO, "firmware model found: %s", nfp_fw_model); 689646ea79cSHeinrich Kuhn } else { 690646ea79cSHeinrich Kuhn PMD_DRV_LOG(ERR, "firmware model NOT found"); 691646ea79cSHeinrich Kuhn return -EIO; 692646ea79cSHeinrich Kuhn } 693646ea79cSHeinrich Kuhn 694646ea79cSHeinrich Kuhn if (nfp_eth_table->count == 0 || nfp_eth_table->count > 8) { 695646ea79cSHeinrich Kuhn PMD_DRV_LOG(ERR, "NFP ethernet table reports wrong ports: %u", 696646ea79cSHeinrich Kuhn nfp_eth_table->count); 697646ea79cSHeinrich Kuhn return -EIO; 698646ea79cSHeinrich Kuhn } 699646ea79cSHeinrich Kuhn 700646ea79cSHeinrich Kuhn PMD_DRV_LOG(INFO, "NFP ethernet port table reports %u ports", 701646ea79cSHeinrich Kuhn nfp_eth_table->count); 702646ea79cSHeinrich Kuhn 703646ea79cSHeinrich Kuhn PMD_DRV_LOG(INFO, "Port speed: %u", nfp_eth_table->ports[0].speed); 704646ea79cSHeinrich Kuhn 705646ea79cSHeinrich Kuhn snprintf(card_desc, sizeof(card_desc), "nic_%s_%dx%d.nffw", 706646ea79cSHeinrich Kuhn nfp_fw_model, nfp_eth_table->count, 707646ea79cSHeinrich Kuhn nfp_eth_table->ports[0].speed / 1000); 708646ea79cSHeinrich Kuhn 709646ea79cSHeinrich Kuhn nsp = nfp_nsp_open(cpp); 710a6189a67SJin Liu if (nsp == NULL) { 711646ea79cSHeinrich Kuhn PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle"); 712646ea79cSHeinrich Kuhn return -EIO; 713646ea79cSHeinrich Kuhn } 714646ea79cSHeinrich Kuhn 715646ea79cSHeinrich Kuhn nfp_nsp_device_soft_reset(nsp); 716646ea79cSHeinrich Kuhn err = nfp_fw_upload(dev, nsp, card_desc); 717646ea79cSHeinrich Kuhn 718646ea79cSHeinrich Kuhn nfp_nsp_close(nsp); 719646ea79cSHeinrich Kuhn return err; 720646ea79cSHeinrich Kuhn } 721646ea79cSHeinrich Kuhn 722a6189a67SJin Liu static int 723a6189a67SJin Liu nfp_init_phyports(struct nfp_pf_dev *pf_dev) 724646ea79cSHeinrich Kuhn { 725a6189a67SJin Liu int i; 726a6189a67SJin Liu int ret = 0; 727646ea79cSHeinrich Kuhn struct nfp_net_hw *hw; 728646ea79cSHeinrich Kuhn struct rte_eth_dev *eth_dev; 729a6189a67SJin Liu struct nfp_eth_table *nfp_eth_table; 730646ea79cSHeinrich Kuhn 731646ea79cSHeinrich Kuhn nfp_eth_table = nfp_eth_read_ports(pf_dev->cpp); 732a6189a67SJin Liu if (nfp_eth_table == NULL) { 733646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "Error reading NFP ethernet table"); 734a6189a67SJin Liu return -EIO; 735646ea79cSHeinrich Kuhn } 736646ea79cSHeinrich Kuhn 737646ea79cSHeinrich Kuhn /* Loop through all physical ports on PF */ 738646ea79cSHeinrich Kuhn for (i = 0; i < pf_dev->total_phyports; i++) { 739646ea79cSHeinrich Kuhn const unsigned int numa_node = rte_socket_id(); 740646ea79cSHeinrich Kuhn char port_name[RTE_ETH_NAME_MAX_LEN]; 741646ea79cSHeinrich Kuhn 742646ea79cSHeinrich Kuhn snprintf(port_name, sizeof(port_name), "%s_port%d", 743646ea79cSHeinrich Kuhn pf_dev->pci_dev->device.name, i); 744646ea79cSHeinrich Kuhn 745646ea79cSHeinrich Kuhn /* Allocate a eth_dev for this phyport */ 746646ea79cSHeinrich Kuhn eth_dev = rte_eth_dev_allocate(port_name); 747a6189a67SJin Liu if (eth_dev == NULL) { 748646ea79cSHeinrich Kuhn ret = -ENODEV; 749646ea79cSHeinrich Kuhn goto port_cleanup; 750646ea79cSHeinrich Kuhn } 751646ea79cSHeinrich Kuhn 752646ea79cSHeinrich Kuhn /* Allocate memory for this phyport */ 753646ea79cSHeinrich Kuhn eth_dev->data->dev_private = 754646ea79cSHeinrich Kuhn rte_zmalloc_socket(port_name, sizeof(struct nfp_net_hw), 755646ea79cSHeinrich Kuhn RTE_CACHE_LINE_SIZE, numa_node); 756a6189a67SJin Liu if (eth_dev->data->dev_private == NULL) { 757646ea79cSHeinrich Kuhn ret = -ENOMEM; 758646ea79cSHeinrich Kuhn rte_eth_dev_release_port(eth_dev); 759646ea79cSHeinrich Kuhn goto port_cleanup; 760646ea79cSHeinrich Kuhn } 761646ea79cSHeinrich Kuhn 762646ea79cSHeinrich Kuhn hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 763646ea79cSHeinrich Kuhn 764646ea79cSHeinrich Kuhn /* Add this device to the PF's array of physical ports */ 765646ea79cSHeinrich Kuhn pf_dev->ports[i] = hw; 766646ea79cSHeinrich Kuhn 767646ea79cSHeinrich Kuhn hw->pf_dev = pf_dev; 768646ea79cSHeinrich Kuhn hw->cpp = pf_dev->cpp; 769646ea79cSHeinrich Kuhn hw->eth_dev = eth_dev; 770646ea79cSHeinrich Kuhn hw->idx = i; 771646ea79cSHeinrich Kuhn hw->nfp_idx = nfp_eth_table->ports[i].index; 772646ea79cSHeinrich Kuhn hw->is_phyport = true; 773646ea79cSHeinrich Kuhn 774646ea79cSHeinrich Kuhn eth_dev->device = &pf_dev->pci_dev->device; 775646ea79cSHeinrich Kuhn 776646ea79cSHeinrich Kuhn /* ctrl/tx/rx BAR mappings and remaining init happens in 777646ea79cSHeinrich Kuhn * nfp_net_init 778646ea79cSHeinrich Kuhn */ 779646ea79cSHeinrich Kuhn ret = nfp_net_init(eth_dev); 780646ea79cSHeinrich Kuhn if (ret) { 781646ea79cSHeinrich Kuhn ret = -ENODEV; 782646ea79cSHeinrich Kuhn goto port_cleanup; 783646ea79cSHeinrich Kuhn } 784646ea79cSHeinrich Kuhn 785646ea79cSHeinrich Kuhn rte_eth_dev_probing_finish(eth_dev); 786646ea79cSHeinrich Kuhn 787646ea79cSHeinrich Kuhn } /* End loop, all ports on this PF */ 788646ea79cSHeinrich Kuhn ret = 0; 789646ea79cSHeinrich Kuhn goto eth_table_cleanup; 790646ea79cSHeinrich Kuhn 791646ea79cSHeinrich Kuhn port_cleanup: 792646ea79cSHeinrich Kuhn for (i = 0; i < pf_dev->total_phyports; i++) { 793646ea79cSHeinrich Kuhn if (pf_dev->ports[i] && pf_dev->ports[i]->eth_dev) { 794646ea79cSHeinrich Kuhn struct rte_eth_dev *tmp_dev; 795646ea79cSHeinrich Kuhn tmp_dev = pf_dev->ports[i]->eth_dev; 796646ea79cSHeinrich Kuhn rte_eth_dev_release_port(tmp_dev); 797646ea79cSHeinrich Kuhn pf_dev->ports[i] = NULL; 798646ea79cSHeinrich Kuhn } 799646ea79cSHeinrich Kuhn } 800646ea79cSHeinrich Kuhn eth_table_cleanup: 801646ea79cSHeinrich Kuhn free(nfp_eth_table); 802a6189a67SJin Liu 803646ea79cSHeinrich Kuhn return ret; 804646ea79cSHeinrich Kuhn } 805646ea79cSHeinrich Kuhn 806a6189a67SJin Liu static int 807a6189a67SJin Liu nfp_pf_init(struct rte_pci_device *pci_dev) 808646ea79cSHeinrich Kuhn { 809646ea79cSHeinrich Kuhn int err; 810a6189a67SJin Liu int ret = 0; 8115c464d6aSJin Liu uint64_t addr; 812a6189a67SJin Liu int total_ports; 813a6189a67SJin Liu struct nfp_cpp *cpp; 814a6189a67SJin Liu struct nfp_pf_dev *pf_dev; 815a6189a67SJin Liu struct nfp_hwinfo *hwinfo; 816a6189a67SJin Liu char name[RTE_ETH_NAME_MAX_LEN]; 817a6189a67SJin Liu struct nfp_rtsym_table *sym_tbl; 818a6189a67SJin Liu struct nfp_eth_table *nfp_eth_table; 819646ea79cSHeinrich Kuhn 820a6189a67SJin Liu if (pci_dev == NULL) 821a6189a67SJin Liu return -ENODEV; 822646ea79cSHeinrich Kuhn 823646ea79cSHeinrich Kuhn /* 824646ea79cSHeinrich Kuhn * When device bound to UIO, the device could be used, by mistake, 825646ea79cSHeinrich Kuhn * by two DPDK apps, and the UIO driver does not avoid it. This 826646ea79cSHeinrich Kuhn * could lead to a serious problem when configuring the NFP CPP 827646ea79cSHeinrich Kuhn * interface. Here we avoid this telling to the CPP init code to 828646ea79cSHeinrich Kuhn * use a lock file if UIO is being used. 829646ea79cSHeinrich Kuhn */ 830646ea79cSHeinrich Kuhn if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO) 831646ea79cSHeinrich Kuhn cpp = nfp_cpp_from_device_name(pci_dev, 0); 832646ea79cSHeinrich Kuhn else 833646ea79cSHeinrich Kuhn cpp = nfp_cpp_from_device_name(pci_dev, 1); 834646ea79cSHeinrich Kuhn 835a6189a67SJin Liu if (cpp == NULL) { 836646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "A CPP handle can not be obtained"); 837646ea79cSHeinrich Kuhn ret = -EIO; 838646ea79cSHeinrich Kuhn goto error; 839646ea79cSHeinrich Kuhn } 840646ea79cSHeinrich Kuhn 841646ea79cSHeinrich Kuhn hwinfo = nfp_hwinfo_read(cpp); 842a6189a67SJin Liu if (hwinfo == NULL) { 843646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "Error reading hwinfo table"); 844646ea79cSHeinrich Kuhn ret = -EIO; 845646ea79cSHeinrich Kuhn goto error; 846646ea79cSHeinrich Kuhn } 847646ea79cSHeinrich Kuhn 848646ea79cSHeinrich Kuhn nfp_eth_table = nfp_eth_read_ports(cpp); 849a6189a67SJin Liu if (nfp_eth_table == NULL) { 850646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "Error reading NFP ethernet table"); 851646ea79cSHeinrich Kuhn ret = -EIO; 852646ea79cSHeinrich Kuhn goto hwinfo_cleanup; 853646ea79cSHeinrich Kuhn } 854646ea79cSHeinrich Kuhn 855646ea79cSHeinrich Kuhn if (nfp_fw_setup(pci_dev, cpp, nfp_eth_table, hwinfo)) { 856646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "Error when uploading firmware"); 857646ea79cSHeinrich Kuhn ret = -EIO; 858646ea79cSHeinrich Kuhn goto eth_table_cleanup; 859646ea79cSHeinrich Kuhn } 860646ea79cSHeinrich Kuhn 861646ea79cSHeinrich Kuhn /* Now the symbol table should be there */ 862646ea79cSHeinrich Kuhn sym_tbl = nfp_rtsym_table_read(cpp); 863a6189a67SJin Liu if (sym_tbl == NULL) { 864646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "Something is wrong with the firmware" 865646ea79cSHeinrich Kuhn " symbol table"); 866646ea79cSHeinrich Kuhn ret = -EIO; 867646ea79cSHeinrich Kuhn goto eth_table_cleanup; 868646ea79cSHeinrich Kuhn } 869646ea79cSHeinrich Kuhn 870646ea79cSHeinrich Kuhn total_ports = nfp_rtsym_read_le(sym_tbl, "nfd_cfg_pf0_num_ports", &err); 871646ea79cSHeinrich Kuhn if (total_ports != (int)nfp_eth_table->count) { 872646ea79cSHeinrich Kuhn PMD_DRV_LOG(ERR, "Inconsistent number of ports"); 873646ea79cSHeinrich Kuhn ret = -EIO; 874646ea79cSHeinrich Kuhn goto sym_tbl_cleanup; 875646ea79cSHeinrich Kuhn } 876646ea79cSHeinrich Kuhn 877646ea79cSHeinrich Kuhn PMD_INIT_LOG(INFO, "Total physical ports: %d", total_ports); 878646ea79cSHeinrich Kuhn 879646ea79cSHeinrich Kuhn if (total_ports <= 0 || total_ports > 8) { 880646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "nfd_cfg_pf0_num_ports symbol with wrong value"); 881646ea79cSHeinrich Kuhn ret = -ENODEV; 882646ea79cSHeinrich Kuhn goto sym_tbl_cleanup; 883646ea79cSHeinrich Kuhn } 884646ea79cSHeinrich Kuhn /* Allocate memory for the PF "device" */ 885646ea79cSHeinrich Kuhn snprintf(name, sizeof(name), "nfp_pf%d", 0); 886646ea79cSHeinrich Kuhn pf_dev = rte_zmalloc(name, sizeof(*pf_dev), 0); 887a6189a67SJin Liu if (pf_dev == NULL) { 888646ea79cSHeinrich Kuhn ret = -ENOMEM; 889646ea79cSHeinrich Kuhn goto sym_tbl_cleanup; 890646ea79cSHeinrich Kuhn } 891646ea79cSHeinrich Kuhn 892646ea79cSHeinrich Kuhn /* Populate the newly created PF device */ 893646ea79cSHeinrich Kuhn pf_dev->cpp = cpp; 894646ea79cSHeinrich Kuhn pf_dev->hwinfo = hwinfo; 895646ea79cSHeinrich Kuhn pf_dev->sym_tbl = sym_tbl; 896646ea79cSHeinrich Kuhn pf_dev->total_phyports = total_ports; 897646ea79cSHeinrich Kuhn 898646ea79cSHeinrich Kuhn if (total_ports > 1) 899646ea79cSHeinrich Kuhn pf_dev->multiport = true; 900646ea79cSHeinrich Kuhn 901646ea79cSHeinrich Kuhn pf_dev->pci_dev = pci_dev; 902646ea79cSHeinrich Kuhn 903646ea79cSHeinrich Kuhn /* Map the symbol table */ 904646ea79cSHeinrich Kuhn pf_dev->ctrl_bar = nfp_rtsym_map(pf_dev->sym_tbl, "_pf0_net_bar0", 905a6189a67SJin Liu pf_dev->total_phyports * 32768, &pf_dev->ctrl_area); 906a6189a67SJin Liu if (pf_dev->ctrl_bar == NULL) { 907646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for _pf0_net_ctrl_bar"); 908646ea79cSHeinrich Kuhn ret = -EIO; 909646ea79cSHeinrich Kuhn goto pf_cleanup; 910646ea79cSHeinrich Kuhn } 911646ea79cSHeinrich Kuhn 912646ea79cSHeinrich Kuhn PMD_INIT_LOG(DEBUG, "ctrl bar: %p", pf_dev->ctrl_bar); 913646ea79cSHeinrich Kuhn 914646ea79cSHeinrich Kuhn /* configure access to tx/rx vNIC BARs */ 9155c464d6aSJin Liu switch (pci_dev->id.device_id) { 9165c464d6aSJin Liu case PCI_DEVICE_ID_NFP3800_PF_NIC: 9175c464d6aSJin Liu addr = NFP_PCIE_QUEUE(NFP_PCIE_QCP_NFP3800_OFFSET, 9185c464d6aSJin Liu 0, NFP_PCIE_QUEUE_NFP3800_MASK); 9195c464d6aSJin Liu break; 9205c464d6aSJin Liu case PCI_DEVICE_ID_NFP4000_PF_NIC: 9215c464d6aSJin Liu case PCI_DEVICE_ID_NFP6000_PF_NIC: 9225c464d6aSJin Liu addr = NFP_PCIE_QUEUE(NFP_PCIE_QCP_NFP6000_OFFSET, 9235c464d6aSJin Liu 0, NFP_PCIE_QUEUE_NFP6000_MASK); 9245c464d6aSJin Liu break; 9255c464d6aSJin Liu default: 9265c464d6aSJin Liu PMD_INIT_LOG(ERR, "nfp_net: no device ID matching"); 927*bb7f8aa5SJin Liu ret = -ENODEV; 9285c464d6aSJin Liu goto ctrl_area_cleanup; 9295c464d6aSJin Liu } 9305c464d6aSJin Liu 931646ea79cSHeinrich Kuhn pf_dev->hw_queues = nfp_cpp_map_area(pf_dev->cpp, 0, 0, 9325c464d6aSJin Liu addr, NFP_QCP_QUEUE_AREA_SZ, 933646ea79cSHeinrich Kuhn &pf_dev->hwqueues_area); 934a6189a67SJin Liu if (pf_dev->hw_queues == NULL) { 935646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for net.qc"); 936646ea79cSHeinrich Kuhn ret = -EIO; 937646ea79cSHeinrich Kuhn goto ctrl_area_cleanup; 938646ea79cSHeinrich Kuhn } 939646ea79cSHeinrich Kuhn 940646ea79cSHeinrich Kuhn PMD_INIT_LOG(DEBUG, "tx/rx bar address: 0x%p", pf_dev->hw_queues); 941646ea79cSHeinrich Kuhn 942a6189a67SJin Liu /* 943a6189a67SJin Liu * Initialize and prep physical ports now 944646ea79cSHeinrich Kuhn * This will loop through all physical ports 945646ea79cSHeinrich Kuhn */ 946646ea79cSHeinrich Kuhn ret = nfp_init_phyports(pf_dev); 947646ea79cSHeinrich Kuhn if (ret) { 948646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "Could not create physical ports"); 949646ea79cSHeinrich Kuhn goto hwqueues_cleanup; 950646ea79cSHeinrich Kuhn } 951646ea79cSHeinrich Kuhn 952646ea79cSHeinrich Kuhn /* register the CPP bridge service here for primary use */ 953646ea79cSHeinrich Kuhn nfp_register_cpp_service(pf_dev->cpp); 954646ea79cSHeinrich Kuhn 955646ea79cSHeinrich Kuhn return 0; 956646ea79cSHeinrich Kuhn 957646ea79cSHeinrich Kuhn hwqueues_cleanup: 958646ea79cSHeinrich Kuhn nfp_cpp_area_free(pf_dev->hwqueues_area); 959646ea79cSHeinrich Kuhn ctrl_area_cleanup: 960646ea79cSHeinrich Kuhn nfp_cpp_area_free(pf_dev->ctrl_area); 961646ea79cSHeinrich Kuhn pf_cleanup: 962646ea79cSHeinrich Kuhn rte_free(pf_dev); 963646ea79cSHeinrich Kuhn sym_tbl_cleanup: 964646ea79cSHeinrich Kuhn free(sym_tbl); 965646ea79cSHeinrich Kuhn eth_table_cleanup: 966646ea79cSHeinrich Kuhn free(nfp_eth_table); 967646ea79cSHeinrich Kuhn hwinfo_cleanup: 968646ea79cSHeinrich Kuhn free(hwinfo); 969646ea79cSHeinrich Kuhn error: 970646ea79cSHeinrich Kuhn return ret; 971646ea79cSHeinrich Kuhn } 972646ea79cSHeinrich Kuhn 973646ea79cSHeinrich Kuhn /* 974646ea79cSHeinrich Kuhn * When attaching to the NFP4000/6000 PF on a secondary process there 975646ea79cSHeinrich Kuhn * is no need to initialise the PF again. Only minimal work is required 976646ea79cSHeinrich Kuhn * here 977646ea79cSHeinrich Kuhn */ 978a6189a67SJin Liu static int 979a6189a67SJin Liu nfp_pf_secondary_init(struct rte_pci_device *pci_dev) 980646ea79cSHeinrich Kuhn { 981646ea79cSHeinrich Kuhn int i; 982646ea79cSHeinrich Kuhn int err; 983a6189a67SJin Liu int total_ports; 984a6189a67SJin Liu struct nfp_cpp *cpp; 985266470b2SJin Liu struct nfp_net_hw *hw; 986a6189a67SJin Liu struct nfp_rtsym_table *sym_tbl; 987646ea79cSHeinrich Kuhn 988a6189a67SJin Liu if (pci_dev == NULL) 989646ea79cSHeinrich Kuhn return -ENODEV; 990646ea79cSHeinrich Kuhn 991646ea79cSHeinrich Kuhn /* 992646ea79cSHeinrich Kuhn * When device bound to UIO, the device could be used, by mistake, 993646ea79cSHeinrich Kuhn * by two DPDK apps, and the UIO driver does not avoid it. This 994646ea79cSHeinrich Kuhn * could lead to a serious problem when configuring the NFP CPP 995646ea79cSHeinrich Kuhn * interface. Here we avoid this telling to the CPP init code to 996646ea79cSHeinrich Kuhn * use a lock file if UIO is being used. 997646ea79cSHeinrich Kuhn */ 998646ea79cSHeinrich Kuhn if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO) 999646ea79cSHeinrich Kuhn cpp = nfp_cpp_from_device_name(pci_dev, 0); 1000646ea79cSHeinrich Kuhn else 1001646ea79cSHeinrich Kuhn cpp = nfp_cpp_from_device_name(pci_dev, 1); 1002646ea79cSHeinrich Kuhn 1003a6189a67SJin Liu if (cpp == NULL) { 1004646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "A CPP handle can not be obtained"); 1005646ea79cSHeinrich Kuhn return -EIO; 1006646ea79cSHeinrich Kuhn } 1007646ea79cSHeinrich Kuhn 1008646ea79cSHeinrich Kuhn /* 1009646ea79cSHeinrich Kuhn * We don't have access to the PF created in the primary process 1010646ea79cSHeinrich Kuhn * here so we have to read the number of ports from firmware 1011646ea79cSHeinrich Kuhn */ 1012646ea79cSHeinrich Kuhn sym_tbl = nfp_rtsym_table_read(cpp); 1013a6189a67SJin Liu if (sym_tbl == NULL) { 1014646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "Something is wrong with the firmware" 1015646ea79cSHeinrich Kuhn " symbol table"); 1016646ea79cSHeinrich Kuhn return -EIO; 1017646ea79cSHeinrich Kuhn } 1018646ea79cSHeinrich Kuhn 1019646ea79cSHeinrich Kuhn total_ports = nfp_rtsym_read_le(sym_tbl, "nfd_cfg_pf0_num_ports", &err); 1020646ea79cSHeinrich Kuhn 1021646ea79cSHeinrich Kuhn for (i = 0; i < total_ports; i++) { 1022646ea79cSHeinrich Kuhn struct rte_eth_dev *eth_dev; 1023646ea79cSHeinrich Kuhn char port_name[RTE_ETH_NAME_MAX_LEN]; 1024646ea79cSHeinrich Kuhn 1025646ea79cSHeinrich Kuhn snprintf(port_name, sizeof(port_name), "%s_port%d", 1026646ea79cSHeinrich Kuhn pci_dev->device.name, i); 1027646ea79cSHeinrich Kuhn 1028a6189a67SJin Liu PMD_DRV_LOG(DEBUG, "Secondary attaching to port %s", port_name); 1029646ea79cSHeinrich Kuhn eth_dev = rte_eth_dev_attach_secondary(port_name); 1030a6189a67SJin Liu if (eth_dev == NULL) { 1031646ea79cSHeinrich Kuhn RTE_LOG(ERR, EAL, 1032a6189a67SJin Liu "secondary process attach failed, ethdev doesn't exist"); 1033646ea79cSHeinrich Kuhn return -ENODEV; 1034646ea79cSHeinrich Kuhn } 1035266470b2SJin Liu 1036266470b2SJin Liu hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 1037266470b2SJin Liu 1038266470b2SJin Liu if (nfp_net_ethdev_ops_mount(hw, eth_dev)) 1039266470b2SJin Liu return -EINVAL; 1040266470b2SJin Liu 1041646ea79cSHeinrich Kuhn eth_dev->process_private = cpp; 1042266470b2SJin Liu 1043646ea79cSHeinrich Kuhn rte_eth_dev_probing_finish(eth_dev); 1044646ea79cSHeinrich Kuhn } 1045646ea79cSHeinrich Kuhn 1046646ea79cSHeinrich Kuhn /* Register the CPP bridge service for the secondary too */ 1047646ea79cSHeinrich Kuhn nfp_register_cpp_service(cpp); 1048646ea79cSHeinrich Kuhn 1049646ea79cSHeinrich Kuhn return 0; 1050646ea79cSHeinrich Kuhn } 1051646ea79cSHeinrich Kuhn 1052a6189a67SJin Liu static int 1053a6189a67SJin Liu nfp_pf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 1054646ea79cSHeinrich Kuhn struct rte_pci_device *dev) 1055646ea79cSHeinrich Kuhn { 1056646ea79cSHeinrich Kuhn if (rte_eal_process_type() == RTE_PROC_PRIMARY) 1057646ea79cSHeinrich Kuhn return nfp_pf_init(dev); 1058646ea79cSHeinrich Kuhn else 1059646ea79cSHeinrich Kuhn return nfp_pf_secondary_init(dev); 1060646ea79cSHeinrich Kuhn } 1061646ea79cSHeinrich Kuhn 1062646ea79cSHeinrich Kuhn static const struct rte_pci_id pci_id_nfp_pf_net_map[] = { 1063646ea79cSHeinrich Kuhn { 1064646ea79cSHeinrich Kuhn RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME, 10655c464d6aSJin Liu PCI_DEVICE_ID_NFP3800_PF_NIC) 10665c464d6aSJin Liu }, 10675c464d6aSJin Liu { 10685c464d6aSJin Liu RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME, 1069646ea79cSHeinrich Kuhn PCI_DEVICE_ID_NFP4000_PF_NIC) 1070646ea79cSHeinrich Kuhn }, 1071646ea79cSHeinrich Kuhn { 1072646ea79cSHeinrich Kuhn RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME, 1073646ea79cSHeinrich Kuhn PCI_DEVICE_ID_NFP6000_PF_NIC) 1074646ea79cSHeinrich Kuhn }, 1075646ea79cSHeinrich Kuhn { 10765aedd4c3SJames Hershaw RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE, 10775aedd4c3SJames Hershaw PCI_DEVICE_ID_NFP3800_PF_NIC) 10785aedd4c3SJames Hershaw }, 10795aedd4c3SJames Hershaw { 10805aedd4c3SJames Hershaw RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE, 10815aedd4c3SJames Hershaw PCI_DEVICE_ID_NFP4000_PF_NIC) 10825aedd4c3SJames Hershaw }, 10835aedd4c3SJames Hershaw { 10845aedd4c3SJames Hershaw RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE, 10855aedd4c3SJames Hershaw PCI_DEVICE_ID_NFP6000_PF_NIC) 10865aedd4c3SJames Hershaw }, 10875aedd4c3SJames Hershaw { 1088646ea79cSHeinrich Kuhn .vendor_id = 0, 1089646ea79cSHeinrich Kuhn }, 1090646ea79cSHeinrich Kuhn }; 1091646ea79cSHeinrich Kuhn 1092a6189a67SJin Liu static int 1093a6189a67SJin Liu nfp_pci_uninit(struct rte_eth_dev *eth_dev) 1094646ea79cSHeinrich Kuhn { 1095646ea79cSHeinrich Kuhn struct rte_pci_device *pci_dev; 1096646ea79cSHeinrich Kuhn uint16_t port_id; 1097646ea79cSHeinrich Kuhn 1098646ea79cSHeinrich Kuhn pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1099646ea79cSHeinrich Kuhn 1100646ea79cSHeinrich Kuhn /* Free up all physical ports under PF */ 1101646ea79cSHeinrich Kuhn RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device) 1102646ea79cSHeinrich Kuhn rte_eth_dev_close(port_id); 1103646ea79cSHeinrich Kuhn /* 1104646ea79cSHeinrich Kuhn * Ports can be closed and freed but hotplugging is not 1105646ea79cSHeinrich Kuhn * currently supported 1106646ea79cSHeinrich Kuhn */ 1107646ea79cSHeinrich Kuhn return -ENOTSUP; 1108646ea79cSHeinrich Kuhn } 1109646ea79cSHeinrich Kuhn 1110a6189a67SJin Liu static int 1111a6189a67SJin Liu eth_nfp_pci_remove(struct rte_pci_device *pci_dev) 1112646ea79cSHeinrich Kuhn { 1113646ea79cSHeinrich Kuhn return rte_eth_dev_pci_generic_remove(pci_dev, nfp_pci_uninit); 1114646ea79cSHeinrich Kuhn } 1115646ea79cSHeinrich Kuhn 1116646ea79cSHeinrich Kuhn static struct rte_pci_driver rte_nfp_net_pf_pmd = { 1117646ea79cSHeinrich Kuhn .id_table = pci_id_nfp_pf_net_map, 1118646ea79cSHeinrich Kuhn .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 1119646ea79cSHeinrich Kuhn .probe = nfp_pf_pci_probe, 1120646ea79cSHeinrich Kuhn .remove = eth_nfp_pci_remove, 1121646ea79cSHeinrich Kuhn }; 1122646ea79cSHeinrich Kuhn 1123646ea79cSHeinrich Kuhn RTE_PMD_REGISTER_PCI(net_nfp_pf, rte_nfp_net_pf_pmd); 1124646ea79cSHeinrich Kuhn RTE_PMD_REGISTER_PCI_TABLE(net_nfp_pf, pci_id_nfp_pf_net_map); 1125646ea79cSHeinrich Kuhn RTE_PMD_REGISTER_KMOD_DEP(net_nfp_pf, "* igb_uio | uio_pci_generic | vfio"); 1126646ea79cSHeinrich Kuhn /* 1127646ea79cSHeinrich Kuhn * Local variables: 1128646ea79cSHeinrich Kuhn * c-file-style: "Linux" 1129646ea79cSHeinrich Kuhn * indent-tabs-mode: t 1130646ea79cSHeinrich Kuhn * End: 1131646ea79cSHeinrich Kuhn */ 1132