1646ea79cSHeinrich Kuhn /* SPDX-License-Identifier: BSD-3-Clause 2646ea79cSHeinrich Kuhn * Copyright (c) 2014-2021 Netronome Systems, Inc. 3646ea79cSHeinrich Kuhn * All rights reserved. 4646ea79cSHeinrich Kuhn * 5646ea79cSHeinrich Kuhn * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation. 6646ea79cSHeinrich Kuhn */ 7646ea79cSHeinrich Kuhn 8646ea79cSHeinrich Kuhn /* 9646ea79cSHeinrich Kuhn * vim:shiftwidth=8:noexpandtab 10646ea79cSHeinrich Kuhn * 11646ea79cSHeinrich Kuhn * @file dpdk/pmd/nfp_ethdev.c 12646ea79cSHeinrich Kuhn * 13646ea79cSHeinrich Kuhn * Netronome vNIC DPDK Poll-Mode Driver: Main entry point 14646ea79cSHeinrich Kuhn */ 15646ea79cSHeinrich Kuhn 16646ea79cSHeinrich Kuhn #include <rte_common.h> 17646ea79cSHeinrich Kuhn #include <ethdev_driver.h> 18646ea79cSHeinrich Kuhn #include <ethdev_pci.h> 191acb7f54SDavid Marchand #include <dev_driver.h> 20646ea79cSHeinrich Kuhn #include <rte_ether.h> 21646ea79cSHeinrich Kuhn #include <rte_malloc.h> 22646ea79cSHeinrich Kuhn #include <rte_memzone.h> 23646ea79cSHeinrich Kuhn #include <rte_mempool.h> 24646ea79cSHeinrich Kuhn #include <rte_service_component.h> 25851f03e1SHeinrich Kuhn #include <rte_alarm.h> 26646ea79cSHeinrich Kuhn #include "eal_firmware.h" 27646ea79cSHeinrich Kuhn 28646ea79cSHeinrich Kuhn #include "nfpcore/nfp_cpp.h" 29646ea79cSHeinrich Kuhn #include "nfpcore/nfp_nffw.h" 30646ea79cSHeinrich Kuhn #include "nfpcore/nfp_hwinfo.h" 31646ea79cSHeinrich Kuhn #include "nfpcore/nfp_mip.h" 32646ea79cSHeinrich Kuhn #include "nfpcore/nfp_rtsym.h" 33646ea79cSHeinrich Kuhn #include "nfpcore/nfp_nsp.h" 34646ea79cSHeinrich Kuhn 358d7a59f1SHeinrich Kuhn #include "nfp_common.h" 36a5f377d8SChaoyong He #include "nfp_ctrl.h" 37646ea79cSHeinrich Kuhn #include "nfp_rxtx.h" 388d7a59f1SHeinrich Kuhn #include "nfp_logs.h" 39646ea79cSHeinrich Kuhn #include "nfp_cpp_bridge.h" 40646ea79cSHeinrich Kuhn 41b1880421SChaoyong He #include "flower/nfp_flower.h" 42b1880421SChaoyong He 43646ea79cSHeinrich Kuhn static int 44968ec1c3SChaoyong He nfp_net_pf_read_mac(struct nfp_app_fw_nic *app_fw_nic, int port) 45646ea79cSHeinrich Kuhn { 46646ea79cSHeinrich Kuhn struct nfp_eth_table *nfp_eth_table; 47646ea79cSHeinrich Kuhn struct nfp_net_hw *hw = NULL; 48646ea79cSHeinrich Kuhn 49646ea79cSHeinrich Kuhn /* Grab a pointer to the correct physical port */ 50968ec1c3SChaoyong He hw = app_fw_nic->ports[port]; 51646ea79cSHeinrich Kuhn 52968ec1c3SChaoyong He nfp_eth_table = nfp_eth_read_ports(app_fw_nic->pf_dev->cpp); 53646ea79cSHeinrich Kuhn 54646ea79cSHeinrich Kuhn nfp_eth_copy_mac((uint8_t *)&hw->mac_addr, 55646ea79cSHeinrich Kuhn (uint8_t *)&nfp_eth_table->ports[port].mac_addr); 56646ea79cSHeinrich Kuhn 57646ea79cSHeinrich Kuhn free(nfp_eth_table); 58646ea79cSHeinrich Kuhn return 0; 59646ea79cSHeinrich Kuhn } 60646ea79cSHeinrich Kuhn 61646ea79cSHeinrich Kuhn static int 62646ea79cSHeinrich Kuhn nfp_net_start(struct rte_eth_dev *dev) 63646ea79cSHeinrich Kuhn { 64646ea79cSHeinrich Kuhn struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 65d61138d4SHarman Kalra struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 66646ea79cSHeinrich Kuhn uint32_t new_ctrl, update = 0; 67646ea79cSHeinrich Kuhn struct nfp_net_hw *hw; 68646ea79cSHeinrich Kuhn struct nfp_pf_dev *pf_dev; 69968ec1c3SChaoyong He struct nfp_app_fw_nic *app_fw_nic; 70646ea79cSHeinrich Kuhn struct rte_eth_conf *dev_conf; 71646ea79cSHeinrich Kuhn struct rte_eth_rxmode *rxmode; 72646ea79cSHeinrich Kuhn uint32_t intr_vector; 73646ea79cSHeinrich Kuhn int ret; 74646ea79cSHeinrich Kuhn 75646ea79cSHeinrich Kuhn hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); 76646ea79cSHeinrich Kuhn pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(dev->data->dev_private); 77968ec1c3SChaoyong He app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv); 78646ea79cSHeinrich Kuhn 79646ea79cSHeinrich Kuhn PMD_INIT_LOG(DEBUG, "Start"); 80646ea79cSHeinrich Kuhn 81646ea79cSHeinrich Kuhn /* Disabling queues just in case... */ 82646ea79cSHeinrich Kuhn nfp_net_disable_queues(dev); 83646ea79cSHeinrich Kuhn 84646ea79cSHeinrich Kuhn /* Enabling the required queues in the device */ 85646ea79cSHeinrich Kuhn nfp_net_enable_queues(dev); 86646ea79cSHeinrich Kuhn 87646ea79cSHeinrich Kuhn /* check and configure queue intr-vector mapping */ 88646ea79cSHeinrich Kuhn if (dev->data->dev_conf.intr_conf.rxq != 0) { 89968ec1c3SChaoyong He if (app_fw_nic->multiport) { 90646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "PMD rx interrupt is not supported " 91646ea79cSHeinrich Kuhn "with NFP multiport PF"); 92646ea79cSHeinrich Kuhn return -EINVAL; 93646ea79cSHeinrich Kuhn } 94d61138d4SHarman Kalra if (rte_intr_type_get(intr_handle) == 95d61138d4SHarman Kalra RTE_INTR_HANDLE_UIO) { 96646ea79cSHeinrich Kuhn /* 97646ea79cSHeinrich Kuhn * Better not to share LSC with RX interrupts. 98646ea79cSHeinrich Kuhn * Unregistering LSC interrupt handler 99646ea79cSHeinrich Kuhn */ 100d61138d4SHarman Kalra rte_intr_callback_unregister(pci_dev->intr_handle, 101646ea79cSHeinrich Kuhn nfp_net_dev_interrupt_handler, (void *)dev); 102646ea79cSHeinrich Kuhn 103646ea79cSHeinrich Kuhn if (dev->data->nb_rx_queues > 1) { 104646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "PMD rx interrupt only " 105646ea79cSHeinrich Kuhn "supports 1 queue with UIO"); 106646ea79cSHeinrich Kuhn return -EIO; 107646ea79cSHeinrich Kuhn } 108646ea79cSHeinrich Kuhn } 109646ea79cSHeinrich Kuhn intr_vector = dev->data->nb_rx_queues; 110646ea79cSHeinrich Kuhn if (rte_intr_efd_enable(intr_handle, intr_vector)) 111646ea79cSHeinrich Kuhn return -1; 112646ea79cSHeinrich Kuhn 113646ea79cSHeinrich Kuhn nfp_configure_rx_interrupt(dev, intr_handle); 114646ea79cSHeinrich Kuhn update = NFP_NET_CFG_UPDATE_MSIX; 115646ea79cSHeinrich Kuhn } 116646ea79cSHeinrich Kuhn 117dbad6f64SPeng Zhang /* Checking MTU set */ 118dbad6f64SPeng Zhang if (dev->data->mtu > hw->flbufsz) { 119dbad6f64SPeng Zhang PMD_INIT_LOG(ERR, "MTU (%u) can't be larger than the current NFP_FRAME_SIZE (%u)", 120dbad6f64SPeng Zhang dev->data->mtu, hw->flbufsz); 121dbad6f64SPeng Zhang return -ERANGE; 122dbad6f64SPeng Zhang } 123dbad6f64SPeng Zhang 124646ea79cSHeinrich Kuhn rte_intr_enable(intr_handle); 125646ea79cSHeinrich Kuhn 126646ea79cSHeinrich Kuhn new_ctrl = nfp_check_offloads(dev); 127646ea79cSHeinrich Kuhn 128646ea79cSHeinrich Kuhn /* Writing configuration parameters in the device */ 129646ea79cSHeinrich Kuhn nfp_net_params_setup(hw); 130646ea79cSHeinrich Kuhn 131646ea79cSHeinrich Kuhn dev_conf = &dev->data->dev_conf; 132646ea79cSHeinrich Kuhn rxmode = &dev_conf->rxmode; 133646ea79cSHeinrich Kuhn 134295968d1SFerruh Yigit if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS) { 135646ea79cSHeinrich Kuhn nfp_net_rss_config_default(dev); 136646ea79cSHeinrich Kuhn update |= NFP_NET_CFG_UPDATE_RSS; 1373ece835aSJin Liu if (hw->cap & NFP_NET_CFG_CTRL_RSS2) 1383ece835aSJin Liu new_ctrl |= NFP_NET_CFG_CTRL_RSS2; 1393ece835aSJin Liu else 140646ea79cSHeinrich Kuhn new_ctrl |= NFP_NET_CFG_CTRL_RSS; 141646ea79cSHeinrich Kuhn } 142646ea79cSHeinrich Kuhn 143646ea79cSHeinrich Kuhn /* Enable device */ 144646ea79cSHeinrich Kuhn new_ctrl |= NFP_NET_CFG_CTRL_ENABLE; 145646ea79cSHeinrich Kuhn 146646ea79cSHeinrich Kuhn update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING; 147646ea79cSHeinrich Kuhn 148c55abf61SChaoyong He /* Enable vxlan */ 149c55abf61SChaoyong He new_ctrl |= NFP_NET_CFG_CTRL_VXLAN; 150c55abf61SChaoyong He update |= NFP_NET_CFG_UPDATE_VXLAN; 151c55abf61SChaoyong He 152646ea79cSHeinrich Kuhn if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG) 153646ea79cSHeinrich Kuhn new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG; 154646ea79cSHeinrich Kuhn 155646ea79cSHeinrich Kuhn nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl); 156646ea79cSHeinrich Kuhn if (nfp_net_reconfig(hw, new_ctrl, update) < 0) 157646ea79cSHeinrich Kuhn return -EIO; 158646ea79cSHeinrich Kuhn 159646ea79cSHeinrich Kuhn /* 160646ea79cSHeinrich Kuhn * Allocating rte mbufs for configured rx queues. 161646ea79cSHeinrich Kuhn * This requires queues being enabled before 162646ea79cSHeinrich Kuhn */ 163646ea79cSHeinrich Kuhn if (nfp_net_rx_freelist_setup(dev) < 0) { 164646ea79cSHeinrich Kuhn ret = -ENOMEM; 165646ea79cSHeinrich Kuhn goto error; 166646ea79cSHeinrich Kuhn } 167646ea79cSHeinrich Kuhn 168646ea79cSHeinrich Kuhn if (rte_eal_process_type() == RTE_PROC_PRIMARY) 169646ea79cSHeinrich Kuhn /* Configure the physical port up */ 170646ea79cSHeinrich Kuhn nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 1); 171646ea79cSHeinrich Kuhn else 172646ea79cSHeinrich Kuhn nfp_eth_set_configured(dev->process_private, 173646ea79cSHeinrich Kuhn hw->nfp_idx, 1); 174646ea79cSHeinrich Kuhn 175646ea79cSHeinrich Kuhn hw->ctrl = new_ctrl; 176646ea79cSHeinrich Kuhn 177646ea79cSHeinrich Kuhn return 0; 178646ea79cSHeinrich Kuhn 179646ea79cSHeinrich Kuhn error: 180646ea79cSHeinrich Kuhn /* 181646ea79cSHeinrich Kuhn * An error returned by this function should mean the app 182646ea79cSHeinrich Kuhn * exiting and then the system releasing all the memory 183646ea79cSHeinrich Kuhn * allocated even memory coming from hugepages. 184646ea79cSHeinrich Kuhn * 185646ea79cSHeinrich Kuhn * The device could be enabled at this point with some queues 186646ea79cSHeinrich Kuhn * ready for getting packets. This is true if the call to 187646ea79cSHeinrich Kuhn * nfp_net_rx_freelist_setup() succeeds for some queues but 188646ea79cSHeinrich Kuhn * fails for subsequent queues. 189646ea79cSHeinrich Kuhn * 190646ea79cSHeinrich Kuhn * This should make the app exiting but better if we tell the 191646ea79cSHeinrich Kuhn * device first. 192646ea79cSHeinrich Kuhn */ 193646ea79cSHeinrich Kuhn nfp_net_disable_queues(dev); 194646ea79cSHeinrich Kuhn 195646ea79cSHeinrich Kuhn return ret; 196646ea79cSHeinrich Kuhn } 197646ea79cSHeinrich Kuhn 198646ea79cSHeinrich Kuhn /* Stop device: disable rx and tx functions to allow for reconfiguring. */ 199646ea79cSHeinrich Kuhn static int 200646ea79cSHeinrich Kuhn nfp_net_stop(struct rte_eth_dev *dev) 201646ea79cSHeinrich Kuhn { 202646ea79cSHeinrich Kuhn struct nfp_net_hw *hw; 203646ea79cSHeinrich Kuhn 204646ea79cSHeinrich Kuhn PMD_INIT_LOG(DEBUG, "Stop"); 205646ea79cSHeinrich Kuhn 206646ea79cSHeinrich Kuhn hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); 207646ea79cSHeinrich Kuhn 208646ea79cSHeinrich Kuhn nfp_net_disable_queues(dev); 209646ea79cSHeinrich Kuhn 210646ea79cSHeinrich Kuhn /* Clear queues */ 2111c8d02bbSJin Liu nfp_net_stop_tx_queue(dev); 212646ea79cSHeinrich Kuhn 2131c8d02bbSJin Liu nfp_net_stop_rx_queue(dev); 214646ea79cSHeinrich Kuhn 215646ea79cSHeinrich Kuhn if (rte_eal_process_type() == RTE_PROC_PRIMARY) 216646ea79cSHeinrich Kuhn /* Configure the physical port down */ 217646ea79cSHeinrich Kuhn nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 0); 218646ea79cSHeinrich Kuhn else 219646ea79cSHeinrich Kuhn nfp_eth_set_configured(dev->process_private, 220646ea79cSHeinrich Kuhn hw->nfp_idx, 0); 221646ea79cSHeinrich Kuhn 222646ea79cSHeinrich Kuhn return 0; 223646ea79cSHeinrich Kuhn } 224646ea79cSHeinrich Kuhn 225646ea79cSHeinrich Kuhn /* Set the link up. */ 226646ea79cSHeinrich Kuhn static int 227646ea79cSHeinrich Kuhn nfp_net_set_link_up(struct rte_eth_dev *dev) 228646ea79cSHeinrich Kuhn { 229646ea79cSHeinrich Kuhn struct nfp_net_hw *hw; 230646ea79cSHeinrich Kuhn 231646ea79cSHeinrich Kuhn PMD_DRV_LOG(DEBUG, "Set link up"); 232646ea79cSHeinrich Kuhn 233646ea79cSHeinrich Kuhn hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); 234646ea79cSHeinrich Kuhn 235646ea79cSHeinrich Kuhn if (rte_eal_process_type() == RTE_PROC_PRIMARY) 236646ea79cSHeinrich Kuhn /* Configure the physical port down */ 237646ea79cSHeinrich Kuhn return nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 1); 238646ea79cSHeinrich Kuhn else 239646ea79cSHeinrich Kuhn return nfp_eth_set_configured(dev->process_private, 240646ea79cSHeinrich Kuhn hw->nfp_idx, 1); 241646ea79cSHeinrich Kuhn } 242646ea79cSHeinrich Kuhn 243646ea79cSHeinrich Kuhn /* Set the link down. */ 244646ea79cSHeinrich Kuhn static int 245646ea79cSHeinrich Kuhn nfp_net_set_link_down(struct rte_eth_dev *dev) 246646ea79cSHeinrich Kuhn { 247646ea79cSHeinrich Kuhn struct nfp_net_hw *hw; 248646ea79cSHeinrich Kuhn 249646ea79cSHeinrich Kuhn PMD_DRV_LOG(DEBUG, "Set link down"); 250646ea79cSHeinrich Kuhn 251646ea79cSHeinrich Kuhn hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); 252646ea79cSHeinrich Kuhn 253646ea79cSHeinrich Kuhn if (rte_eal_process_type() == RTE_PROC_PRIMARY) 254646ea79cSHeinrich Kuhn /* Configure the physical port down */ 255646ea79cSHeinrich Kuhn return nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 0); 256646ea79cSHeinrich Kuhn else 257646ea79cSHeinrich Kuhn return nfp_eth_set_configured(dev->process_private, 258646ea79cSHeinrich Kuhn hw->nfp_idx, 0); 259646ea79cSHeinrich Kuhn } 260646ea79cSHeinrich Kuhn 261646ea79cSHeinrich Kuhn /* Reset and stop device. The device can not be restarted. */ 262646ea79cSHeinrich Kuhn static int 263646ea79cSHeinrich Kuhn nfp_net_close(struct rte_eth_dev *dev) 264646ea79cSHeinrich Kuhn { 265646ea79cSHeinrich Kuhn struct nfp_net_hw *hw; 266646ea79cSHeinrich Kuhn struct rte_pci_device *pci_dev; 267646ea79cSHeinrich Kuhn struct nfp_pf_dev *pf_dev; 268968ec1c3SChaoyong He struct nfp_app_fw_nic *app_fw_nic; 269646ea79cSHeinrich Kuhn int i; 270646ea79cSHeinrich Kuhn 271646ea79cSHeinrich Kuhn if (rte_eal_process_type() != RTE_PROC_PRIMARY) 272646ea79cSHeinrich Kuhn return 0; 273646ea79cSHeinrich Kuhn 274646ea79cSHeinrich Kuhn PMD_INIT_LOG(DEBUG, "Close"); 275646ea79cSHeinrich Kuhn 276646ea79cSHeinrich Kuhn pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(dev->data->dev_private); 277646ea79cSHeinrich Kuhn hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); 278646ea79cSHeinrich Kuhn pci_dev = RTE_ETH_DEV_TO_PCI(dev); 279968ec1c3SChaoyong He app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv); 280646ea79cSHeinrich Kuhn 281646ea79cSHeinrich Kuhn /* 282646ea79cSHeinrich Kuhn * We assume that the DPDK application is stopping all the 283646ea79cSHeinrich Kuhn * threads/queues before calling the device close function. 284646ea79cSHeinrich Kuhn */ 285646ea79cSHeinrich Kuhn 286646ea79cSHeinrich Kuhn nfp_net_disable_queues(dev); 287646ea79cSHeinrich Kuhn 288646ea79cSHeinrich Kuhn /* Clear queues */ 2891c8d02bbSJin Liu nfp_net_close_tx_queue(dev); 290646ea79cSHeinrich Kuhn 2911c8d02bbSJin Liu nfp_net_close_rx_queue(dev); 292646ea79cSHeinrich Kuhn 293851f03e1SHeinrich Kuhn /* Cancel possible impending LSC work here before releasing the port*/ 294851f03e1SHeinrich Kuhn rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler, 295851f03e1SHeinrich Kuhn (void *)dev); 296851f03e1SHeinrich Kuhn 297646ea79cSHeinrich Kuhn /* Only free PF resources after all physical ports have been closed */ 298646ea79cSHeinrich Kuhn /* Mark this port as unused and free device priv resources*/ 299646ea79cSHeinrich Kuhn nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff); 300968ec1c3SChaoyong He app_fw_nic->ports[hw->idx] = NULL; 301646ea79cSHeinrich Kuhn rte_eth_dev_release_port(dev); 302646ea79cSHeinrich Kuhn 303968ec1c3SChaoyong He for (i = 0; i < app_fw_nic->total_phyports; i++) { 304646ea79cSHeinrich Kuhn /* Check to see if ports are still in use */ 305968ec1c3SChaoyong He if (app_fw_nic->ports[i]) 306646ea79cSHeinrich Kuhn return 0; 307646ea79cSHeinrich Kuhn } 308646ea79cSHeinrich Kuhn 309646ea79cSHeinrich Kuhn /* Now it is safe to free all PF resources */ 310646ea79cSHeinrich Kuhn PMD_INIT_LOG(INFO, "Freeing PF resources"); 311646ea79cSHeinrich Kuhn nfp_cpp_area_free(pf_dev->ctrl_area); 312646ea79cSHeinrich Kuhn nfp_cpp_area_free(pf_dev->hwqueues_area); 313646ea79cSHeinrich Kuhn free(pf_dev->hwinfo); 314646ea79cSHeinrich Kuhn free(pf_dev->sym_tbl); 315646ea79cSHeinrich Kuhn nfp_cpp_free(pf_dev->cpp); 316968ec1c3SChaoyong He rte_free(app_fw_nic); 317646ea79cSHeinrich Kuhn rte_free(pf_dev); 318646ea79cSHeinrich Kuhn 319d61138d4SHarman Kalra rte_intr_disable(pci_dev->intr_handle); 320646ea79cSHeinrich Kuhn 321646ea79cSHeinrich Kuhn /* unregister callback func from eal lib */ 322d61138d4SHarman Kalra rte_intr_callback_unregister(pci_dev->intr_handle, 323a6189a67SJin Liu nfp_net_dev_interrupt_handler, (void *)dev); 324646ea79cSHeinrich Kuhn 325646ea79cSHeinrich Kuhn /* 326f8dbaebbSSean Morrissey * The ixgbe PMD disables the pcie master on the 327646ea79cSHeinrich Kuhn * device. The i40e does not... 328646ea79cSHeinrich Kuhn */ 329646ea79cSHeinrich Kuhn 330646ea79cSHeinrich Kuhn return 0; 331646ea79cSHeinrich Kuhn } 332646ea79cSHeinrich Kuhn 333c55abf61SChaoyong He static int 334c55abf61SChaoyong He nfp_net_find_vxlan_idx(struct nfp_net_hw *hw, 335c55abf61SChaoyong He uint16_t port, 336c55abf61SChaoyong He uint32_t *idx) 337c55abf61SChaoyong He { 338c55abf61SChaoyong He uint32_t i; 339c55abf61SChaoyong He int free_idx = -1; 340c55abf61SChaoyong He 341c55abf61SChaoyong He for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i++) { 342c55abf61SChaoyong He if (hw->vxlan_ports[i] == port) { 343c55abf61SChaoyong He free_idx = i; 344c55abf61SChaoyong He break; 345c55abf61SChaoyong He } 346c55abf61SChaoyong He 347c55abf61SChaoyong He if (hw->vxlan_usecnt[i] == 0) { 348c55abf61SChaoyong He free_idx = i; 349c55abf61SChaoyong He break; 350c55abf61SChaoyong He } 351c55abf61SChaoyong He } 352c55abf61SChaoyong He 353c55abf61SChaoyong He if (free_idx == -1) 354c55abf61SChaoyong He return -EINVAL; 355c55abf61SChaoyong He 356c55abf61SChaoyong He *idx = free_idx; 357c55abf61SChaoyong He 358c55abf61SChaoyong He return 0; 359c55abf61SChaoyong He } 360c55abf61SChaoyong He 361c55abf61SChaoyong He static int 362c55abf61SChaoyong He nfp_udp_tunnel_port_add(struct rte_eth_dev *dev, 363c55abf61SChaoyong He struct rte_eth_udp_tunnel *tunnel_udp) 364c55abf61SChaoyong He { 365c55abf61SChaoyong He int ret; 366c55abf61SChaoyong He uint32_t idx; 367c55abf61SChaoyong He uint16_t vxlan_port; 368c55abf61SChaoyong He struct nfp_net_hw *hw; 369c55abf61SChaoyong He enum rte_eth_tunnel_type tnl_type; 370c55abf61SChaoyong He 371c55abf61SChaoyong He hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); 372c55abf61SChaoyong He vxlan_port = tunnel_udp->udp_port; 373c55abf61SChaoyong He tnl_type = tunnel_udp->prot_type; 374c55abf61SChaoyong He 375c55abf61SChaoyong He if (tnl_type != RTE_ETH_TUNNEL_TYPE_VXLAN) { 376c55abf61SChaoyong He PMD_DRV_LOG(ERR, "Not VXLAN tunnel"); 377c55abf61SChaoyong He return -ENOTSUP; 378c55abf61SChaoyong He } 379c55abf61SChaoyong He 380c55abf61SChaoyong He ret = nfp_net_find_vxlan_idx(hw, vxlan_port, &idx); 381c55abf61SChaoyong He if (ret != 0) { 382c55abf61SChaoyong He PMD_DRV_LOG(ERR, "Failed find valid vxlan idx"); 383c55abf61SChaoyong He return -EINVAL; 384c55abf61SChaoyong He } 385c55abf61SChaoyong He 386c55abf61SChaoyong He if (hw->vxlan_usecnt[idx] == 0) { 387c55abf61SChaoyong He ret = nfp_net_set_vxlan_port(hw, idx, vxlan_port); 388c55abf61SChaoyong He if (ret != 0) { 389c55abf61SChaoyong He PMD_DRV_LOG(ERR, "Failed set vxlan port"); 390c55abf61SChaoyong He return -EINVAL; 391c55abf61SChaoyong He } 392c55abf61SChaoyong He } 393c55abf61SChaoyong He 394c55abf61SChaoyong He hw->vxlan_usecnt[idx]++; 395c55abf61SChaoyong He 396c55abf61SChaoyong He return 0; 397c55abf61SChaoyong He } 398c55abf61SChaoyong He 399c55abf61SChaoyong He static int 400c55abf61SChaoyong He nfp_udp_tunnel_port_del(struct rte_eth_dev *dev, 401c55abf61SChaoyong He struct rte_eth_udp_tunnel *tunnel_udp) 402c55abf61SChaoyong He { 403c55abf61SChaoyong He int ret; 404c55abf61SChaoyong He uint32_t idx; 405c55abf61SChaoyong He uint16_t vxlan_port; 406c55abf61SChaoyong He struct nfp_net_hw *hw; 407c55abf61SChaoyong He enum rte_eth_tunnel_type tnl_type; 408c55abf61SChaoyong He 409c55abf61SChaoyong He hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); 410c55abf61SChaoyong He vxlan_port = tunnel_udp->udp_port; 411c55abf61SChaoyong He tnl_type = tunnel_udp->prot_type; 412c55abf61SChaoyong He 413c55abf61SChaoyong He if (tnl_type != RTE_ETH_TUNNEL_TYPE_VXLAN) { 414c55abf61SChaoyong He PMD_DRV_LOG(ERR, "Not VXLAN tunnel"); 415c55abf61SChaoyong He return -ENOTSUP; 416c55abf61SChaoyong He } 417c55abf61SChaoyong He 418c55abf61SChaoyong He ret = nfp_net_find_vxlan_idx(hw, vxlan_port, &idx); 419c55abf61SChaoyong He if (ret != 0 || hw->vxlan_usecnt[idx] == 0) { 420c55abf61SChaoyong He PMD_DRV_LOG(ERR, "Failed find valid vxlan idx"); 421c55abf61SChaoyong He return -EINVAL; 422c55abf61SChaoyong He } 423c55abf61SChaoyong He 424c55abf61SChaoyong He hw->vxlan_usecnt[idx]--; 425c55abf61SChaoyong He 426c55abf61SChaoyong He if (hw->vxlan_usecnt[idx] == 0) { 427c55abf61SChaoyong He ret = nfp_net_set_vxlan_port(hw, idx, 0); 428c55abf61SChaoyong He if (ret != 0) { 429c55abf61SChaoyong He PMD_DRV_LOG(ERR, "Failed set vxlan port"); 430c55abf61SChaoyong He return -EINVAL; 431c55abf61SChaoyong He } 432c55abf61SChaoyong He } 433c55abf61SChaoyong He 434c55abf61SChaoyong He return 0; 435c55abf61SChaoyong He } 436c55abf61SChaoyong He 437646ea79cSHeinrich Kuhn /* Initialise and register driver with DPDK Application */ 4388d961320SJin Liu static const struct eth_dev_ops nfp_net_eth_dev_ops = { 439646ea79cSHeinrich Kuhn .dev_configure = nfp_net_configure, 440646ea79cSHeinrich Kuhn .dev_start = nfp_net_start, 441646ea79cSHeinrich Kuhn .dev_stop = nfp_net_stop, 442646ea79cSHeinrich Kuhn .dev_set_link_up = nfp_net_set_link_up, 443646ea79cSHeinrich Kuhn .dev_set_link_down = nfp_net_set_link_down, 444646ea79cSHeinrich Kuhn .dev_close = nfp_net_close, 445646ea79cSHeinrich Kuhn .promiscuous_enable = nfp_net_promisc_enable, 446646ea79cSHeinrich Kuhn .promiscuous_disable = nfp_net_promisc_disable, 447646ea79cSHeinrich Kuhn .link_update = nfp_net_link_update, 448646ea79cSHeinrich Kuhn .stats_get = nfp_net_stats_get, 449646ea79cSHeinrich Kuhn .stats_reset = nfp_net_stats_reset, 450646ea79cSHeinrich Kuhn .dev_infos_get = nfp_net_infos_get, 451646ea79cSHeinrich Kuhn .dev_supported_ptypes_get = nfp_net_supported_ptypes_get, 452646ea79cSHeinrich Kuhn .mtu_set = nfp_net_dev_mtu_set, 4530a94d6bcSJin Liu .mac_addr_set = nfp_net_set_mac_addr, 454646ea79cSHeinrich Kuhn .vlan_offload_set = nfp_net_vlan_offload_set, 455646ea79cSHeinrich Kuhn .reta_update = nfp_net_reta_update, 456646ea79cSHeinrich Kuhn .reta_query = nfp_net_reta_query, 457646ea79cSHeinrich Kuhn .rss_hash_update = nfp_net_rss_hash_update, 458646ea79cSHeinrich Kuhn .rss_hash_conf_get = nfp_net_rss_hash_conf_get, 459646ea79cSHeinrich Kuhn .rx_queue_setup = nfp_net_rx_queue_setup, 460646ea79cSHeinrich Kuhn .rx_queue_release = nfp_net_rx_queue_release, 4618d961320SJin Liu .tx_queue_setup = nfp_net_tx_queue_setup, 46252ddc4c2SJin Liu .tx_queue_release = nfp_net_tx_queue_release, 46352ddc4c2SJin Liu .rx_queue_intr_enable = nfp_rx_queue_intr_enable, 46452ddc4c2SJin Liu .rx_queue_intr_disable = nfp_rx_queue_intr_disable, 465c55abf61SChaoyong He .udp_tunnel_port_add = nfp_udp_tunnel_port_add, 466c55abf61SChaoyong He .udp_tunnel_port_del = nfp_udp_tunnel_port_del, 46752ddc4c2SJin Liu }; 46852ddc4c2SJin Liu 469266470b2SJin Liu static inline int 470266470b2SJin Liu nfp_net_ethdev_ops_mount(struct nfp_net_hw *hw, struct rte_eth_dev *eth_dev) 471266470b2SJin Liu { 472266470b2SJin Liu switch (NFD_CFG_CLASS_VER_of(hw->ver)) { 473266470b2SJin Liu case NFP_NET_CFG_VERSION_DP_NFD3: 474c73dced4SJin Liu eth_dev->tx_pkt_burst = &nfp_net_nfd3_xmit_pkts; 475266470b2SJin Liu break; 476266470b2SJin Liu case NFP_NET_CFG_VERSION_DP_NFDK: 477266470b2SJin Liu if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 5) { 478266470b2SJin Liu PMD_DRV_LOG(ERR, "NFDK must use ABI 5 or newer, found: %d", 479266470b2SJin Liu NFD_CFG_MAJOR_VERSION_of(hw->ver)); 480266470b2SJin Liu return -EINVAL; 481266470b2SJin Liu } 482c73dced4SJin Liu eth_dev->tx_pkt_burst = &nfp_net_nfdk_xmit_pkts; 483266470b2SJin Liu break; 484266470b2SJin Liu default: 485266470b2SJin Liu PMD_DRV_LOG(ERR, "The version of firmware is not correct."); 486266470b2SJin Liu return -EINVAL; 487266470b2SJin Liu } 488266470b2SJin Liu 4898d961320SJin Liu eth_dev->dev_ops = &nfp_net_eth_dev_ops; 490266470b2SJin Liu eth_dev->rx_queue_count = nfp_net_rx_queue_count; 491266470b2SJin Liu eth_dev->rx_pkt_burst = &nfp_net_recv_pkts; 492266470b2SJin Liu 493266470b2SJin Liu return 0; 494266470b2SJin Liu } 495266470b2SJin Liu 496646ea79cSHeinrich Kuhn static int 497646ea79cSHeinrich Kuhn nfp_net_init(struct rte_eth_dev *eth_dev) 498646ea79cSHeinrich Kuhn { 499646ea79cSHeinrich Kuhn struct rte_pci_device *pci_dev; 500646ea79cSHeinrich Kuhn struct nfp_pf_dev *pf_dev; 501968ec1c3SChaoyong He struct nfp_app_fw_nic *app_fw_nic; 502646ea79cSHeinrich Kuhn struct nfp_net_hw *hw; 503646ea79cSHeinrich Kuhn struct rte_ether_addr *tmp_ether_addr; 504a6189a67SJin Liu uint64_t rx_bar_off = 0; 505a6189a67SJin Liu uint64_t tx_bar_off = 0; 506646ea79cSHeinrich Kuhn uint32_t start_q; 507646ea79cSHeinrich Kuhn int stride = 4; 508646ea79cSHeinrich Kuhn int port = 0; 509646ea79cSHeinrich Kuhn 510646ea79cSHeinrich Kuhn PMD_INIT_FUNC_TRACE(); 511646ea79cSHeinrich Kuhn 512646ea79cSHeinrich Kuhn pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 513646ea79cSHeinrich Kuhn 514646ea79cSHeinrich Kuhn /* Use backpointer here to the PF of this eth_dev */ 515646ea79cSHeinrich Kuhn pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(eth_dev->data->dev_private); 516646ea79cSHeinrich Kuhn 517968ec1c3SChaoyong He /* Use backpointer to the CoreNIC app struct */ 518968ec1c3SChaoyong He app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv); 519968ec1c3SChaoyong He 520646ea79cSHeinrich Kuhn /* NFP can not handle DMA addresses requiring more than 40 bits */ 521646ea79cSHeinrich Kuhn if (rte_mem_check_dma_mask(40)) { 522a6189a67SJin Liu RTE_LOG(ERR, PMD, 523a6189a67SJin Liu "device %s can not be used: restricted dma mask to 40 bits!\n", 524646ea79cSHeinrich Kuhn pci_dev->device.name); 525646ea79cSHeinrich Kuhn return -ENODEV; 526a6189a67SJin Liu } 527646ea79cSHeinrich Kuhn 528646ea79cSHeinrich Kuhn port = ((struct nfp_net_hw *)eth_dev->data->dev_private)->idx; 529646ea79cSHeinrich Kuhn if (port < 0 || port > 7) { 530646ea79cSHeinrich Kuhn PMD_DRV_LOG(ERR, "Port value is wrong"); 531646ea79cSHeinrich Kuhn return -ENODEV; 532646ea79cSHeinrich Kuhn } 533646ea79cSHeinrich Kuhn 534a6189a67SJin Liu /* 535a6189a67SJin Liu * Use PF array of physical ports to get pointer to 536646ea79cSHeinrich Kuhn * this specific port 537646ea79cSHeinrich Kuhn */ 538968ec1c3SChaoyong He hw = app_fw_nic->ports[port]; 539646ea79cSHeinrich Kuhn 540646ea79cSHeinrich Kuhn PMD_INIT_LOG(DEBUG, "Working with physical port number: %d, " 541a6189a67SJin Liu "NFP internal port number: %d", port, hw->nfp_idx); 542646ea79cSHeinrich Kuhn 543646ea79cSHeinrich Kuhn rte_eth_copy_pci_info(eth_dev, pci_dev); 544646ea79cSHeinrich Kuhn 545646ea79cSHeinrich Kuhn hw->device_id = pci_dev->id.device_id; 546646ea79cSHeinrich Kuhn hw->vendor_id = pci_dev->id.vendor_id; 547646ea79cSHeinrich Kuhn hw->subsystem_device_id = pci_dev->id.subsystem_device_id; 548646ea79cSHeinrich Kuhn hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id; 549646ea79cSHeinrich Kuhn 550646ea79cSHeinrich Kuhn PMD_INIT_LOG(DEBUG, "nfp_net: device (%u:%u) %u:%u:%u:%u", 551646ea79cSHeinrich Kuhn pci_dev->id.vendor_id, pci_dev->id.device_id, 552646ea79cSHeinrich Kuhn pci_dev->addr.domain, pci_dev->addr.bus, 553646ea79cSHeinrich Kuhn pci_dev->addr.devid, pci_dev->addr.function); 554646ea79cSHeinrich Kuhn 555646ea79cSHeinrich Kuhn hw->ctrl_bar = (uint8_t *)pci_dev->mem_resource[0].addr; 556646ea79cSHeinrich Kuhn if (hw->ctrl_bar == NULL) { 557646ea79cSHeinrich Kuhn PMD_DRV_LOG(ERR, 558646ea79cSHeinrich Kuhn "hw->ctrl_bar is NULL. BAR0 not configured"); 559646ea79cSHeinrich Kuhn return -ENODEV; 560646ea79cSHeinrich Kuhn } 561646ea79cSHeinrich Kuhn 562646ea79cSHeinrich Kuhn if (port == 0) { 563646ea79cSHeinrich Kuhn hw->ctrl_bar = pf_dev->ctrl_bar; 564646ea79cSHeinrich Kuhn } else { 565a6189a67SJin Liu if (pf_dev->ctrl_bar == NULL) 566646ea79cSHeinrich Kuhn return -ENODEV; 567a6189a67SJin Liu /* Use port offset in pf ctrl_bar for this ports control bar */ 568a6189a67SJin Liu hw->ctrl_bar = pf_dev->ctrl_bar + (port * NFP_PF_CSR_SLICE_SIZE); 569646ea79cSHeinrich Kuhn } 570646ea79cSHeinrich Kuhn 571646ea79cSHeinrich Kuhn PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar); 572646ea79cSHeinrich Kuhn 573266470b2SJin Liu hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION); 574266470b2SJin Liu 575266470b2SJin Liu if (nfp_net_ethdev_ops_mount(hw, eth_dev)) 576266470b2SJin Liu return -EINVAL; 577266470b2SJin Liu 578646ea79cSHeinrich Kuhn hw->max_rx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_RXRINGS); 579646ea79cSHeinrich Kuhn hw->max_tx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_TXRINGS); 580646ea79cSHeinrich Kuhn 581646ea79cSHeinrich Kuhn /* Work out where in the BAR the queues start. */ 582646ea79cSHeinrich Kuhn switch (pci_dev->id.device_id) { 5835c464d6aSJin Liu case PCI_DEVICE_ID_NFP3800_PF_NIC: 584646ea79cSHeinrich Kuhn case PCI_DEVICE_ID_NFP4000_PF_NIC: 585646ea79cSHeinrich Kuhn case PCI_DEVICE_ID_NFP6000_PF_NIC: 586646ea79cSHeinrich Kuhn start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ); 5875c464d6aSJin Liu tx_bar_off = nfp_pci_queue(pci_dev, start_q); 588646ea79cSHeinrich Kuhn start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ); 5895c464d6aSJin Liu rx_bar_off = nfp_pci_queue(pci_dev, start_q); 590646ea79cSHeinrich Kuhn break; 591646ea79cSHeinrich Kuhn default: 592646ea79cSHeinrich Kuhn PMD_DRV_LOG(ERR, "nfp_net: no device ID matching"); 5937feb8909SChaoyong He return -ENODEV; 594646ea79cSHeinrich Kuhn } 595646ea79cSHeinrich Kuhn 596646ea79cSHeinrich Kuhn PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%" PRIx64 "", tx_bar_off); 597646ea79cSHeinrich Kuhn PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%" PRIx64 "", rx_bar_off); 598646ea79cSHeinrich Kuhn 599646ea79cSHeinrich Kuhn hw->tx_bar = pf_dev->hw_queues + tx_bar_off; 600646ea79cSHeinrich Kuhn hw->rx_bar = pf_dev->hw_queues + rx_bar_off; 601646ea79cSHeinrich Kuhn eth_dev->data->dev_private = hw; 602646ea79cSHeinrich Kuhn 603646ea79cSHeinrich Kuhn PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p", 604646ea79cSHeinrich Kuhn hw->ctrl_bar, hw->tx_bar, hw->rx_bar); 605646ea79cSHeinrich Kuhn 606646ea79cSHeinrich Kuhn nfp_net_cfg_queue_setup(hw); 607646ea79cSHeinrich Kuhn 608646ea79cSHeinrich Kuhn /* Get some of the read-only fields from the config BAR */ 609646ea79cSHeinrich Kuhn hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP); 610646ea79cSHeinrich Kuhn hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU); 611646ea79cSHeinrich Kuhn hw->mtu = RTE_ETHER_MTU; 612646ea79cSHeinrich Kuhn 613646ea79cSHeinrich Kuhn /* VLAN insertion is incompatible with LSOv2 */ 614646ea79cSHeinrich Kuhn if (hw->cap & NFP_NET_CFG_CTRL_LSO2) 615646ea79cSHeinrich Kuhn hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN; 616646ea79cSHeinrich Kuhn 617646ea79cSHeinrich Kuhn if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 2) 618646ea79cSHeinrich Kuhn hw->rx_offset = NFP_NET_RX_OFFSET; 619646ea79cSHeinrich Kuhn else 620646ea79cSHeinrich Kuhn hw->rx_offset = nn_cfg_readl(hw, NFP_NET_CFG_RX_OFFSET_ADDR); 621646ea79cSHeinrich Kuhn 622646ea79cSHeinrich Kuhn PMD_INIT_LOG(INFO, "VER: %u.%u, Maximum supported MTU: %d", 623646ea79cSHeinrich Kuhn NFD_CFG_MAJOR_VERSION_of(hw->ver), 624646ea79cSHeinrich Kuhn NFD_CFG_MINOR_VERSION_of(hw->ver), hw->max_mtu); 625646ea79cSHeinrich Kuhn 626646ea79cSHeinrich Kuhn PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s%s%s%s%s%s", hw->cap, 627646ea79cSHeinrich Kuhn hw->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "", 628646ea79cSHeinrich Kuhn hw->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "", 629646ea79cSHeinrich Kuhn hw->cap & NFP_NET_CFG_CTRL_L2MC ? "L2MCFILT " : "", 630646ea79cSHeinrich Kuhn hw->cap & NFP_NET_CFG_CTRL_RXCSUM ? "RXCSUM " : "", 631646ea79cSHeinrich Kuhn hw->cap & NFP_NET_CFG_CTRL_TXCSUM ? "TXCSUM " : "", 632646ea79cSHeinrich Kuhn hw->cap & NFP_NET_CFG_CTRL_RXVLAN ? "RXVLAN " : "", 633646ea79cSHeinrich Kuhn hw->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "", 634646ea79cSHeinrich Kuhn hw->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "", 635646ea79cSHeinrich Kuhn hw->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "", 636646ea79cSHeinrich Kuhn hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR ? "LIVE_ADDR " : "", 637646ea79cSHeinrich Kuhn hw->cap & NFP_NET_CFG_CTRL_LSO ? "TSO " : "", 638646ea79cSHeinrich Kuhn hw->cap & NFP_NET_CFG_CTRL_LSO2 ? "TSOv2 " : "", 639646ea79cSHeinrich Kuhn hw->cap & NFP_NET_CFG_CTRL_RSS ? "RSS " : "", 640646ea79cSHeinrich Kuhn hw->cap & NFP_NET_CFG_CTRL_RSS2 ? "RSSv2 " : ""); 641646ea79cSHeinrich Kuhn 642646ea79cSHeinrich Kuhn hw->ctrl = 0; 643646ea79cSHeinrich Kuhn 644646ea79cSHeinrich Kuhn hw->stride_rx = stride; 645646ea79cSHeinrich Kuhn hw->stride_tx = stride; 646646ea79cSHeinrich Kuhn 647646ea79cSHeinrich Kuhn PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u", 648646ea79cSHeinrich Kuhn hw->max_rx_queues, hw->max_tx_queues); 649646ea79cSHeinrich Kuhn 650646ea79cSHeinrich Kuhn /* Initializing spinlock for reconfigs */ 651646ea79cSHeinrich Kuhn rte_spinlock_init(&hw->reconfig_lock); 652646ea79cSHeinrich Kuhn 653646ea79cSHeinrich Kuhn /* Allocating memory for mac addr */ 654646ea79cSHeinrich Kuhn eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", 655646ea79cSHeinrich Kuhn RTE_ETHER_ADDR_LEN, 0); 656646ea79cSHeinrich Kuhn if (eth_dev->data->mac_addrs == NULL) { 657646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "Failed to space for MAC address"); 6587feb8909SChaoyong He return -ENOMEM; 659646ea79cSHeinrich Kuhn } 660646ea79cSHeinrich Kuhn 661968ec1c3SChaoyong He nfp_net_pf_read_mac(app_fw_nic, port); 662646ea79cSHeinrich Kuhn nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr); 663646ea79cSHeinrich Kuhn 664646ea79cSHeinrich Kuhn tmp_ether_addr = (struct rte_ether_addr *)&hw->mac_addr; 665646ea79cSHeinrich Kuhn if (!rte_is_valid_assigned_ether_addr(tmp_ether_addr)) { 666a6189a67SJin Liu PMD_INIT_LOG(INFO, "Using random mac address for port %d", port); 667646ea79cSHeinrich Kuhn /* Using random mac addresses for VFs */ 668646ea79cSHeinrich Kuhn rte_eth_random_addr(&hw->mac_addr[0]); 669646ea79cSHeinrich Kuhn nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr); 670646ea79cSHeinrich Kuhn } 671646ea79cSHeinrich Kuhn 672646ea79cSHeinrich Kuhn /* Copying mac address to DPDK eth_dev struct */ 673646ea79cSHeinrich Kuhn rte_ether_addr_copy((struct rte_ether_addr *)hw->mac_addr, 674646ea79cSHeinrich Kuhn ð_dev->data->mac_addrs[0]); 675646ea79cSHeinrich Kuhn 676646ea79cSHeinrich Kuhn if (!(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR)) 677646ea79cSHeinrich Kuhn eth_dev->data->dev_flags |= RTE_ETH_DEV_NOLIVE_MAC_ADDR; 678646ea79cSHeinrich Kuhn 679646ea79cSHeinrich Kuhn eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 680646ea79cSHeinrich Kuhn 681646ea79cSHeinrich Kuhn PMD_INIT_LOG(INFO, "port %d VendorID=0x%x DeviceID=0x%x " 682c2c4f87bSAman Deep Singh "mac=" RTE_ETHER_ADDR_PRT_FMT, 683646ea79cSHeinrich Kuhn eth_dev->data->port_id, pci_dev->id.vendor_id, 684646ea79cSHeinrich Kuhn pci_dev->id.device_id, 685646ea79cSHeinrich Kuhn hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2], 686646ea79cSHeinrich Kuhn hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]); 687646ea79cSHeinrich Kuhn 688646ea79cSHeinrich Kuhn /* Registering LSC interrupt handler */ 689d61138d4SHarman Kalra rte_intr_callback_register(pci_dev->intr_handle, 690a6189a67SJin Liu nfp_net_dev_interrupt_handler, (void *)eth_dev); 691646ea79cSHeinrich Kuhn /* Telling the firmware about the LSC interrupt entry */ 692646ea79cSHeinrich Kuhn nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX); 693646ea79cSHeinrich Kuhn /* Recording current stats counters values */ 694646ea79cSHeinrich Kuhn nfp_net_stats_reset(eth_dev); 695646ea79cSHeinrich Kuhn 696646ea79cSHeinrich Kuhn return 0; 697646ea79cSHeinrich Kuhn } 698646ea79cSHeinrich Kuhn 699646ea79cSHeinrich Kuhn #define DEFAULT_FW_PATH "/lib/firmware/netronome" 700646ea79cSHeinrich Kuhn 701646ea79cSHeinrich Kuhn static int 702646ea79cSHeinrich Kuhn nfp_fw_upload(struct rte_pci_device *dev, struct nfp_nsp *nsp, char *card) 703646ea79cSHeinrich Kuhn { 704646ea79cSHeinrich Kuhn struct nfp_cpp *cpp = nsp->cpp; 705646ea79cSHeinrich Kuhn void *fw_buf; 706646ea79cSHeinrich Kuhn char fw_name[125]; 707646ea79cSHeinrich Kuhn char serial[40]; 708646ea79cSHeinrich Kuhn size_t fsize; 709646ea79cSHeinrich Kuhn 710646ea79cSHeinrich Kuhn /* Looking for firmware file in order of priority */ 711646ea79cSHeinrich Kuhn 712646ea79cSHeinrich Kuhn /* First try to find a firmware image specific for this device */ 713646ea79cSHeinrich Kuhn snprintf(serial, sizeof(serial), 714646ea79cSHeinrich Kuhn "serial-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x", 715646ea79cSHeinrich Kuhn cpp->serial[0], cpp->serial[1], cpp->serial[2], cpp->serial[3], 716646ea79cSHeinrich Kuhn cpp->serial[4], cpp->serial[5], cpp->interface >> 8, 717646ea79cSHeinrich Kuhn cpp->interface & 0xff); 718646ea79cSHeinrich Kuhn 719646ea79cSHeinrich Kuhn snprintf(fw_name, sizeof(fw_name), "%s/%s.nffw", DEFAULT_FW_PATH, 720646ea79cSHeinrich Kuhn serial); 721646ea79cSHeinrich Kuhn 722646ea79cSHeinrich Kuhn PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); 723646ea79cSHeinrich Kuhn if (rte_firmware_read(fw_name, &fw_buf, &fsize) == 0) 724646ea79cSHeinrich Kuhn goto load_fw; 725646ea79cSHeinrich Kuhn /* Then try the PCI name */ 726646ea79cSHeinrich Kuhn snprintf(fw_name, sizeof(fw_name), "%s/pci-%s.nffw", DEFAULT_FW_PATH, 727646ea79cSHeinrich Kuhn dev->device.name); 728646ea79cSHeinrich Kuhn 729646ea79cSHeinrich Kuhn PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); 730646ea79cSHeinrich Kuhn if (rte_firmware_read(fw_name, &fw_buf, &fsize) == 0) 731646ea79cSHeinrich Kuhn goto load_fw; 732646ea79cSHeinrich Kuhn 733646ea79cSHeinrich Kuhn /* Finally try the card type and media */ 734646ea79cSHeinrich Kuhn snprintf(fw_name, sizeof(fw_name), "%s/%s", DEFAULT_FW_PATH, card); 735646ea79cSHeinrich Kuhn PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); 736646ea79cSHeinrich Kuhn if (rte_firmware_read(fw_name, &fw_buf, &fsize) < 0) { 737646ea79cSHeinrich Kuhn PMD_DRV_LOG(INFO, "Firmware file %s not found.", fw_name); 738646ea79cSHeinrich Kuhn return -ENOENT; 739646ea79cSHeinrich Kuhn } 740646ea79cSHeinrich Kuhn 741646ea79cSHeinrich Kuhn load_fw: 742646ea79cSHeinrich Kuhn PMD_DRV_LOG(INFO, "Firmware file found at %s with size: %zu", 743646ea79cSHeinrich Kuhn fw_name, fsize); 744646ea79cSHeinrich Kuhn PMD_DRV_LOG(INFO, "Uploading the firmware ..."); 745646ea79cSHeinrich Kuhn nfp_nsp_load_fw(nsp, fw_buf, fsize); 746646ea79cSHeinrich Kuhn PMD_DRV_LOG(INFO, "Done"); 747646ea79cSHeinrich Kuhn 748646ea79cSHeinrich Kuhn free(fw_buf); 749646ea79cSHeinrich Kuhn 750646ea79cSHeinrich Kuhn return 0; 751646ea79cSHeinrich Kuhn } 752646ea79cSHeinrich Kuhn 753646ea79cSHeinrich Kuhn static int 754a6189a67SJin Liu nfp_fw_setup(struct rte_pci_device *dev, 755a6189a67SJin Liu struct nfp_cpp *cpp, 756a6189a67SJin Liu struct nfp_eth_table *nfp_eth_table, 757a6189a67SJin Liu struct nfp_hwinfo *hwinfo) 758646ea79cSHeinrich Kuhn { 759646ea79cSHeinrich Kuhn struct nfp_nsp *nsp; 760646ea79cSHeinrich Kuhn const char *nfp_fw_model; 761646ea79cSHeinrich Kuhn char card_desc[100]; 762646ea79cSHeinrich Kuhn int err = 0; 763646ea79cSHeinrich Kuhn 76406be30d4SPeng Zhang nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "nffw.partno"); 76506be30d4SPeng Zhang if (nfp_fw_model == NULL) 766646ea79cSHeinrich Kuhn nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "assembly.partno"); 767646ea79cSHeinrich Kuhn 768646ea79cSHeinrich Kuhn if (nfp_fw_model) { 769646ea79cSHeinrich Kuhn PMD_DRV_LOG(INFO, "firmware model found: %s", nfp_fw_model); 770646ea79cSHeinrich Kuhn } else { 771646ea79cSHeinrich Kuhn PMD_DRV_LOG(ERR, "firmware model NOT found"); 772646ea79cSHeinrich Kuhn return -EIO; 773646ea79cSHeinrich Kuhn } 774646ea79cSHeinrich Kuhn 775646ea79cSHeinrich Kuhn if (nfp_eth_table->count == 0 || nfp_eth_table->count > 8) { 776646ea79cSHeinrich Kuhn PMD_DRV_LOG(ERR, "NFP ethernet table reports wrong ports: %u", 777646ea79cSHeinrich Kuhn nfp_eth_table->count); 778646ea79cSHeinrich Kuhn return -EIO; 779646ea79cSHeinrich Kuhn } 780646ea79cSHeinrich Kuhn 781646ea79cSHeinrich Kuhn PMD_DRV_LOG(INFO, "NFP ethernet port table reports %u ports", 782646ea79cSHeinrich Kuhn nfp_eth_table->count); 783646ea79cSHeinrich Kuhn 784646ea79cSHeinrich Kuhn PMD_DRV_LOG(INFO, "Port speed: %u", nfp_eth_table->ports[0].speed); 785646ea79cSHeinrich Kuhn 786646ea79cSHeinrich Kuhn snprintf(card_desc, sizeof(card_desc), "nic_%s_%dx%d.nffw", 787646ea79cSHeinrich Kuhn nfp_fw_model, nfp_eth_table->count, 788646ea79cSHeinrich Kuhn nfp_eth_table->ports[0].speed / 1000); 789646ea79cSHeinrich Kuhn 790646ea79cSHeinrich Kuhn nsp = nfp_nsp_open(cpp); 791a6189a67SJin Liu if (nsp == NULL) { 792646ea79cSHeinrich Kuhn PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle"); 793646ea79cSHeinrich Kuhn return -EIO; 794646ea79cSHeinrich Kuhn } 795646ea79cSHeinrich Kuhn 796646ea79cSHeinrich Kuhn nfp_nsp_device_soft_reset(nsp); 797646ea79cSHeinrich Kuhn err = nfp_fw_upload(dev, nsp, card_desc); 798646ea79cSHeinrich Kuhn 799646ea79cSHeinrich Kuhn nfp_nsp_close(nsp); 800646ea79cSHeinrich Kuhn return err; 801646ea79cSHeinrich Kuhn } 802646ea79cSHeinrich Kuhn 803a6189a67SJin Liu static int 804968ec1c3SChaoyong He nfp_init_app_fw_nic(struct nfp_pf_dev *pf_dev) 805646ea79cSHeinrich Kuhn { 806a6189a67SJin Liu int i; 807968ec1c3SChaoyong He int ret; 808968ec1c3SChaoyong He int err = 0; 809968ec1c3SChaoyong He int total_vnics; 810646ea79cSHeinrich Kuhn struct nfp_net_hw *hw; 811968ec1c3SChaoyong He unsigned int numa_node; 812646ea79cSHeinrich Kuhn struct rte_eth_dev *eth_dev; 813968ec1c3SChaoyong He struct nfp_app_fw_nic *app_fw_nic; 814a6189a67SJin Liu struct nfp_eth_table *nfp_eth_table; 815646ea79cSHeinrich Kuhn char port_name[RTE_ETH_NAME_MAX_LEN]; 816646ea79cSHeinrich Kuhn 817968ec1c3SChaoyong He nfp_eth_table = pf_dev->nfp_eth_table; 818968ec1c3SChaoyong He PMD_INIT_LOG(INFO, "Total physical ports: %d", nfp_eth_table->count); 819968ec1c3SChaoyong He 820968ec1c3SChaoyong He /* Allocate memory for the CoreNIC app */ 821968ec1c3SChaoyong He app_fw_nic = rte_zmalloc("nfp_app_fw_nic", sizeof(*app_fw_nic), 0); 822968ec1c3SChaoyong He if (app_fw_nic == NULL) 823968ec1c3SChaoyong He return -ENOMEM; 824968ec1c3SChaoyong He 825968ec1c3SChaoyong He /* Point the app_fw_priv pointer in the PF to the coreNIC app */ 826968ec1c3SChaoyong He pf_dev->app_fw_priv = app_fw_nic; 827968ec1c3SChaoyong He 828968ec1c3SChaoyong He /* Read the number of vNIC's created for the PF */ 829968ec1c3SChaoyong He total_vnics = nfp_rtsym_read_le(pf_dev->sym_tbl, "nfd_cfg_pf0_num_ports", &err); 830968ec1c3SChaoyong He if (err != 0 || total_vnics <= 0 || total_vnics > 8) { 831968ec1c3SChaoyong He PMD_INIT_LOG(ERR, "nfd_cfg_pf0_num_ports symbol with wrong value"); 832968ec1c3SChaoyong He ret = -ENODEV; 833968ec1c3SChaoyong He goto app_cleanup; 834968ec1c3SChaoyong He } 835968ec1c3SChaoyong He 836968ec1c3SChaoyong He /* 837968ec1c3SChaoyong He * For coreNIC the number of vNICs exposed should be the same as the 838968ec1c3SChaoyong He * number of physical ports 839968ec1c3SChaoyong He */ 840968ec1c3SChaoyong He if (total_vnics != (int)nfp_eth_table->count) { 841968ec1c3SChaoyong He PMD_INIT_LOG(ERR, "Total physical ports do not match number of vNICs"); 842968ec1c3SChaoyong He ret = -ENODEV; 843968ec1c3SChaoyong He goto app_cleanup; 844968ec1c3SChaoyong He } 845968ec1c3SChaoyong He 846968ec1c3SChaoyong He /* Populate coreNIC app properties*/ 847968ec1c3SChaoyong He app_fw_nic->total_phyports = total_vnics; 848968ec1c3SChaoyong He app_fw_nic->pf_dev = pf_dev; 849968ec1c3SChaoyong He if (total_vnics > 1) 850968ec1c3SChaoyong He app_fw_nic->multiport = true; 851968ec1c3SChaoyong He 852968ec1c3SChaoyong He /* Map the symbol table */ 853968ec1c3SChaoyong He pf_dev->ctrl_bar = nfp_rtsym_map(pf_dev->sym_tbl, "_pf0_net_bar0", 854968ec1c3SChaoyong He app_fw_nic->total_phyports * 32768, &pf_dev->ctrl_area); 855968ec1c3SChaoyong He if (pf_dev->ctrl_bar == NULL) { 856968ec1c3SChaoyong He PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for _pf0_net_ctrl_bar"); 857968ec1c3SChaoyong He ret = -EIO; 858968ec1c3SChaoyong He goto app_cleanup; 859968ec1c3SChaoyong He } 860968ec1c3SChaoyong He 861968ec1c3SChaoyong He PMD_INIT_LOG(DEBUG, "ctrl bar: %p", pf_dev->ctrl_bar); 862968ec1c3SChaoyong He 863968ec1c3SChaoyong He /* Loop through all physical ports on PF */ 864968ec1c3SChaoyong He numa_node = rte_socket_id(); 865968ec1c3SChaoyong He for (i = 0; i < app_fw_nic->total_phyports; i++) { 866646ea79cSHeinrich Kuhn snprintf(port_name, sizeof(port_name), "%s_port%d", 867646ea79cSHeinrich Kuhn pf_dev->pci_dev->device.name, i); 868646ea79cSHeinrich Kuhn 869646ea79cSHeinrich Kuhn /* Allocate a eth_dev for this phyport */ 870646ea79cSHeinrich Kuhn eth_dev = rte_eth_dev_allocate(port_name); 871a6189a67SJin Liu if (eth_dev == NULL) { 872646ea79cSHeinrich Kuhn ret = -ENODEV; 873646ea79cSHeinrich Kuhn goto port_cleanup; 874646ea79cSHeinrich Kuhn } 875646ea79cSHeinrich Kuhn 876646ea79cSHeinrich Kuhn /* Allocate memory for this phyport */ 877646ea79cSHeinrich Kuhn eth_dev->data->dev_private = 878646ea79cSHeinrich Kuhn rte_zmalloc_socket(port_name, sizeof(struct nfp_net_hw), 879646ea79cSHeinrich Kuhn RTE_CACHE_LINE_SIZE, numa_node); 880a6189a67SJin Liu if (eth_dev->data->dev_private == NULL) { 881646ea79cSHeinrich Kuhn ret = -ENOMEM; 882646ea79cSHeinrich Kuhn rte_eth_dev_release_port(eth_dev); 883646ea79cSHeinrich Kuhn goto port_cleanup; 884646ea79cSHeinrich Kuhn } 885646ea79cSHeinrich Kuhn 886646ea79cSHeinrich Kuhn hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 887646ea79cSHeinrich Kuhn 888646ea79cSHeinrich Kuhn /* Add this device to the PF's array of physical ports */ 889968ec1c3SChaoyong He app_fw_nic->ports[i] = hw; 890646ea79cSHeinrich Kuhn 891646ea79cSHeinrich Kuhn hw->pf_dev = pf_dev; 892646ea79cSHeinrich Kuhn hw->cpp = pf_dev->cpp; 893646ea79cSHeinrich Kuhn hw->eth_dev = eth_dev; 894646ea79cSHeinrich Kuhn hw->idx = i; 895646ea79cSHeinrich Kuhn hw->nfp_idx = nfp_eth_table->ports[i].index; 896646ea79cSHeinrich Kuhn 897646ea79cSHeinrich Kuhn eth_dev->device = &pf_dev->pci_dev->device; 898646ea79cSHeinrich Kuhn 899646ea79cSHeinrich Kuhn /* ctrl/tx/rx BAR mappings and remaining init happens in 900646ea79cSHeinrich Kuhn * nfp_net_init 901646ea79cSHeinrich Kuhn */ 902646ea79cSHeinrich Kuhn ret = nfp_net_init(eth_dev); 903646ea79cSHeinrich Kuhn if (ret) { 904646ea79cSHeinrich Kuhn ret = -ENODEV; 905646ea79cSHeinrich Kuhn goto port_cleanup; 906646ea79cSHeinrich Kuhn } 907646ea79cSHeinrich Kuhn 908646ea79cSHeinrich Kuhn rte_eth_dev_probing_finish(eth_dev); 909646ea79cSHeinrich Kuhn 910646ea79cSHeinrich Kuhn } /* End loop, all ports on this PF */ 911968ec1c3SChaoyong He 912968ec1c3SChaoyong He return 0; 913646ea79cSHeinrich Kuhn 914646ea79cSHeinrich Kuhn port_cleanup: 915968ec1c3SChaoyong He for (i = 0; i < app_fw_nic->total_phyports; i++) { 916968ec1c3SChaoyong He if (app_fw_nic->ports[i] && app_fw_nic->ports[i]->eth_dev) { 917646ea79cSHeinrich Kuhn struct rte_eth_dev *tmp_dev; 918968ec1c3SChaoyong He tmp_dev = app_fw_nic->ports[i]->eth_dev; 919646ea79cSHeinrich Kuhn rte_eth_dev_release_port(tmp_dev); 920968ec1c3SChaoyong He app_fw_nic->ports[i] = NULL; 921646ea79cSHeinrich Kuhn } 922646ea79cSHeinrich Kuhn } 923968ec1c3SChaoyong He nfp_cpp_area_free(pf_dev->ctrl_area); 924968ec1c3SChaoyong He app_cleanup: 925968ec1c3SChaoyong He rte_free(app_fw_nic); 926a6189a67SJin Liu 927646ea79cSHeinrich Kuhn return ret; 928646ea79cSHeinrich Kuhn } 929646ea79cSHeinrich Kuhn 930a6189a67SJin Liu static int 931a6189a67SJin Liu nfp_pf_init(struct rte_pci_device *pci_dev) 932646ea79cSHeinrich Kuhn { 933968ec1c3SChaoyong He int ret; 934968ec1c3SChaoyong He int err = 0; 9355c464d6aSJin Liu uint64_t addr; 936a6189a67SJin Liu struct nfp_cpp *cpp; 937968ec1c3SChaoyong He enum nfp_app_fw_id app_fw_id; 938a6189a67SJin Liu struct nfp_pf_dev *pf_dev; 939a6189a67SJin Liu struct nfp_hwinfo *hwinfo; 940a6189a67SJin Liu char name[RTE_ETH_NAME_MAX_LEN]; 941a6189a67SJin Liu struct nfp_rtsym_table *sym_tbl; 942a6189a67SJin Liu struct nfp_eth_table *nfp_eth_table; 943646ea79cSHeinrich Kuhn 944a6189a67SJin Liu if (pci_dev == NULL) 945a6189a67SJin Liu return -ENODEV; 946646ea79cSHeinrich Kuhn 947646ea79cSHeinrich Kuhn /* 948646ea79cSHeinrich Kuhn * When device bound to UIO, the device could be used, by mistake, 949646ea79cSHeinrich Kuhn * by two DPDK apps, and the UIO driver does not avoid it. This 950646ea79cSHeinrich Kuhn * could lead to a serious problem when configuring the NFP CPP 951646ea79cSHeinrich Kuhn * interface. Here we avoid this telling to the CPP init code to 952646ea79cSHeinrich Kuhn * use a lock file if UIO is being used. 953646ea79cSHeinrich Kuhn */ 954646ea79cSHeinrich Kuhn if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO) 955646ea79cSHeinrich Kuhn cpp = nfp_cpp_from_device_name(pci_dev, 0); 956646ea79cSHeinrich Kuhn else 957646ea79cSHeinrich Kuhn cpp = nfp_cpp_from_device_name(pci_dev, 1); 958646ea79cSHeinrich Kuhn 959a6189a67SJin Liu if (cpp == NULL) { 960646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "A CPP handle can not be obtained"); 9617feb8909SChaoyong He return -EIO; 962646ea79cSHeinrich Kuhn } 963646ea79cSHeinrich Kuhn 964646ea79cSHeinrich Kuhn hwinfo = nfp_hwinfo_read(cpp); 965a6189a67SJin Liu if (hwinfo == NULL) { 966646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "Error reading hwinfo table"); 967646ea79cSHeinrich Kuhn ret = -EIO; 968968ec1c3SChaoyong He goto cpp_cleanup; 969646ea79cSHeinrich Kuhn } 970646ea79cSHeinrich Kuhn 971968ec1c3SChaoyong He /* Read the number of physical ports from hardware */ 972646ea79cSHeinrich Kuhn nfp_eth_table = nfp_eth_read_ports(cpp); 973a6189a67SJin Liu if (nfp_eth_table == NULL) { 974646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "Error reading NFP ethernet table"); 975646ea79cSHeinrich Kuhn ret = -EIO; 976646ea79cSHeinrich Kuhn goto hwinfo_cleanup; 977646ea79cSHeinrich Kuhn } 978646ea79cSHeinrich Kuhn 979646ea79cSHeinrich Kuhn if (nfp_fw_setup(pci_dev, cpp, nfp_eth_table, hwinfo)) { 980646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "Error when uploading firmware"); 981646ea79cSHeinrich Kuhn ret = -EIO; 982646ea79cSHeinrich Kuhn goto eth_table_cleanup; 983646ea79cSHeinrich Kuhn } 984646ea79cSHeinrich Kuhn 985646ea79cSHeinrich Kuhn /* Now the symbol table should be there */ 986646ea79cSHeinrich Kuhn sym_tbl = nfp_rtsym_table_read(cpp); 987a6189a67SJin Liu if (sym_tbl == NULL) { 988646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "Something is wrong with the firmware" 989646ea79cSHeinrich Kuhn " symbol table"); 990646ea79cSHeinrich Kuhn ret = -EIO; 991646ea79cSHeinrich Kuhn goto eth_table_cleanup; 992646ea79cSHeinrich Kuhn } 993646ea79cSHeinrich Kuhn 994968ec1c3SChaoyong He /* Read the app ID of the firmware loaded */ 995968ec1c3SChaoyong He app_fw_id = nfp_rtsym_read_le(sym_tbl, "_pf0_net_app_id", &err); 996968ec1c3SChaoyong He if (err != 0) { 997968ec1c3SChaoyong He PMD_INIT_LOG(ERR, "Couldn't read app_fw_id from fw"); 998646ea79cSHeinrich Kuhn ret = -EIO; 999646ea79cSHeinrich Kuhn goto sym_tbl_cleanup; 1000646ea79cSHeinrich Kuhn } 1001646ea79cSHeinrich Kuhn 1002646ea79cSHeinrich Kuhn /* Allocate memory for the PF "device" */ 1003646ea79cSHeinrich Kuhn snprintf(name, sizeof(name), "nfp_pf%d", 0); 1004646ea79cSHeinrich Kuhn pf_dev = rte_zmalloc(name, sizeof(*pf_dev), 0); 1005a6189a67SJin Liu if (pf_dev == NULL) { 1006646ea79cSHeinrich Kuhn ret = -ENOMEM; 1007646ea79cSHeinrich Kuhn goto sym_tbl_cleanup; 1008646ea79cSHeinrich Kuhn } 1009646ea79cSHeinrich Kuhn 1010646ea79cSHeinrich Kuhn /* Populate the newly created PF device */ 1011968ec1c3SChaoyong He pf_dev->app_fw_id = app_fw_id; 1012646ea79cSHeinrich Kuhn pf_dev->cpp = cpp; 1013646ea79cSHeinrich Kuhn pf_dev->hwinfo = hwinfo; 1014646ea79cSHeinrich Kuhn pf_dev->sym_tbl = sym_tbl; 1015646ea79cSHeinrich Kuhn pf_dev->pci_dev = pci_dev; 1016968ec1c3SChaoyong He pf_dev->nfp_eth_table = nfp_eth_table; 1017646ea79cSHeinrich Kuhn 1018646ea79cSHeinrich Kuhn /* configure access to tx/rx vNIC BARs */ 10195c464d6aSJin Liu switch (pci_dev->id.device_id) { 10205c464d6aSJin Liu case PCI_DEVICE_ID_NFP3800_PF_NIC: 10215c464d6aSJin Liu addr = NFP_PCIE_QUEUE(NFP_PCIE_QCP_NFP3800_OFFSET, 10225c464d6aSJin Liu 0, NFP_PCIE_QUEUE_NFP3800_MASK); 10235c464d6aSJin Liu break; 10245c464d6aSJin Liu case PCI_DEVICE_ID_NFP4000_PF_NIC: 10255c464d6aSJin Liu case PCI_DEVICE_ID_NFP6000_PF_NIC: 10265c464d6aSJin Liu addr = NFP_PCIE_QUEUE(NFP_PCIE_QCP_NFP6000_OFFSET, 10275c464d6aSJin Liu 0, NFP_PCIE_QUEUE_NFP6000_MASK); 10285c464d6aSJin Liu break; 10295c464d6aSJin Liu default: 10305c464d6aSJin Liu PMD_INIT_LOG(ERR, "nfp_net: no device ID matching"); 1031bb7f8aa5SJin Liu ret = -ENODEV; 1032968ec1c3SChaoyong He goto pf_cleanup; 10335c464d6aSJin Liu } 10345c464d6aSJin Liu 1035646ea79cSHeinrich Kuhn pf_dev->hw_queues = nfp_cpp_map_area(pf_dev->cpp, 0, 0, 10365c464d6aSJin Liu addr, NFP_QCP_QUEUE_AREA_SZ, 1037646ea79cSHeinrich Kuhn &pf_dev->hwqueues_area); 1038a6189a67SJin Liu if (pf_dev->hw_queues == NULL) { 1039646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for net.qc"); 1040646ea79cSHeinrich Kuhn ret = -EIO; 1041968ec1c3SChaoyong He goto pf_cleanup; 1042646ea79cSHeinrich Kuhn } 1043646ea79cSHeinrich Kuhn 1044646ea79cSHeinrich Kuhn PMD_INIT_LOG(DEBUG, "tx/rx bar address: 0x%p", pf_dev->hw_queues); 1045646ea79cSHeinrich Kuhn 1046a6189a67SJin Liu /* 1047968ec1c3SChaoyong He * PF initialization has been done at this point. Call app specific 1048968ec1c3SChaoyong He * init code now 1049646ea79cSHeinrich Kuhn */ 1050968ec1c3SChaoyong He switch (pf_dev->app_fw_id) { 1051968ec1c3SChaoyong He case NFP_APP_FW_CORE_NIC: 1052968ec1c3SChaoyong He PMD_INIT_LOG(INFO, "Initializing coreNIC"); 1053968ec1c3SChaoyong He ret = nfp_init_app_fw_nic(pf_dev); 1054968ec1c3SChaoyong He if (ret != 0) { 1055968ec1c3SChaoyong He PMD_INIT_LOG(ERR, "Could not initialize coreNIC!"); 1056968ec1c3SChaoyong He goto hwqueues_cleanup; 1057968ec1c3SChaoyong He } 1058968ec1c3SChaoyong He break; 1059b1880421SChaoyong He case NFP_APP_FW_FLOWER_NIC: 1060b1880421SChaoyong He PMD_INIT_LOG(INFO, "Initializing Flower"); 1061b1880421SChaoyong He ret = nfp_init_app_fw_flower(pf_dev); 1062b1880421SChaoyong He if (ret != 0) { 1063b1880421SChaoyong He PMD_INIT_LOG(ERR, "Could not initialize Flower!"); 1064b1880421SChaoyong He goto hwqueues_cleanup; 1065b1880421SChaoyong He } 1066b1880421SChaoyong He break; 1067968ec1c3SChaoyong He default: 1068968ec1c3SChaoyong He PMD_INIT_LOG(ERR, "Unsupported Firmware loaded"); 1069968ec1c3SChaoyong He ret = -EINVAL; 1070646ea79cSHeinrich Kuhn goto hwqueues_cleanup; 1071646ea79cSHeinrich Kuhn } 1072646ea79cSHeinrich Kuhn 1073646ea79cSHeinrich Kuhn /* register the CPP bridge service here for primary use */ 1074b1880421SChaoyong He ret = nfp_enable_cpp_service(pf_dev->cpp); 1075*dee23e6cSChaoyong He if (ret != 0) 1076*dee23e6cSChaoyong He PMD_INIT_LOG(INFO, "Enable cpp service failed."); 1077646ea79cSHeinrich Kuhn 1078646ea79cSHeinrich Kuhn return 0; 1079646ea79cSHeinrich Kuhn 1080646ea79cSHeinrich Kuhn hwqueues_cleanup: 1081646ea79cSHeinrich Kuhn nfp_cpp_area_free(pf_dev->hwqueues_area); 1082646ea79cSHeinrich Kuhn pf_cleanup: 1083646ea79cSHeinrich Kuhn rte_free(pf_dev); 1084646ea79cSHeinrich Kuhn sym_tbl_cleanup: 1085646ea79cSHeinrich Kuhn free(sym_tbl); 1086646ea79cSHeinrich Kuhn eth_table_cleanup: 1087646ea79cSHeinrich Kuhn free(nfp_eth_table); 1088646ea79cSHeinrich Kuhn hwinfo_cleanup: 1089646ea79cSHeinrich Kuhn free(hwinfo); 1090968ec1c3SChaoyong He cpp_cleanup: 1091968ec1c3SChaoyong He nfp_cpp_free(cpp); 10927feb8909SChaoyong He 1093646ea79cSHeinrich Kuhn return ret; 1094646ea79cSHeinrich Kuhn } 1095646ea79cSHeinrich Kuhn 1096d5f39e07SChaoyong He static int 1097d5f39e07SChaoyong He nfp_secondary_init_app_fw_nic(struct rte_pci_device *pci_dev, 1098d5f39e07SChaoyong He struct nfp_rtsym_table *sym_tbl, 1099d5f39e07SChaoyong He struct nfp_cpp *cpp) 1100d5f39e07SChaoyong He { 1101d5f39e07SChaoyong He int i; 1102d5f39e07SChaoyong He int err = 0; 1103d5f39e07SChaoyong He int ret = 0; 1104d5f39e07SChaoyong He int total_vnics; 1105d5f39e07SChaoyong He struct nfp_net_hw *hw; 1106d5f39e07SChaoyong He 1107d5f39e07SChaoyong He /* Read the number of vNIC's created for the PF */ 1108d5f39e07SChaoyong He total_vnics = nfp_rtsym_read_le(sym_tbl, "nfd_cfg_pf0_num_ports", &err); 1109d5f39e07SChaoyong He if (err != 0 || total_vnics <= 0 || total_vnics > 8) { 1110d5f39e07SChaoyong He PMD_INIT_LOG(ERR, "nfd_cfg_pf0_num_ports symbol with wrong value"); 1111d5f39e07SChaoyong He return -ENODEV; 1112d5f39e07SChaoyong He } 1113d5f39e07SChaoyong He 1114d5f39e07SChaoyong He for (i = 0; i < total_vnics; i++) { 1115d5f39e07SChaoyong He struct rte_eth_dev *eth_dev; 1116d5f39e07SChaoyong He char port_name[RTE_ETH_NAME_MAX_LEN]; 1117d5f39e07SChaoyong He snprintf(port_name, sizeof(port_name), "%s_port%d", 1118d5f39e07SChaoyong He pci_dev->device.name, i); 1119d5f39e07SChaoyong He 1120d5f39e07SChaoyong He PMD_INIT_LOG(DEBUG, "Secondary attaching to port %s", port_name); 1121d5f39e07SChaoyong He eth_dev = rte_eth_dev_attach_secondary(port_name); 1122d5f39e07SChaoyong He if (eth_dev == NULL) { 1123d5f39e07SChaoyong He PMD_INIT_LOG(ERR, "Secondary process attach to port %s failed", port_name); 1124d5f39e07SChaoyong He ret = -ENODEV; 1125d5f39e07SChaoyong He break; 1126d5f39e07SChaoyong He } 1127d5f39e07SChaoyong He 1128d5f39e07SChaoyong He eth_dev->process_private = cpp; 1129d5f39e07SChaoyong He hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 1130d5f39e07SChaoyong He if (nfp_net_ethdev_ops_mount(hw, eth_dev)) 1131d5f39e07SChaoyong He return -EINVAL; 1132d5f39e07SChaoyong He 1133d5f39e07SChaoyong He rte_eth_dev_probing_finish(eth_dev); 1134d5f39e07SChaoyong He } 1135d5f39e07SChaoyong He 1136d5f39e07SChaoyong He return ret; 1137d5f39e07SChaoyong He } 1138d5f39e07SChaoyong He 1139646ea79cSHeinrich Kuhn /* 1140646ea79cSHeinrich Kuhn * When attaching to the NFP4000/6000 PF on a secondary process there 1141646ea79cSHeinrich Kuhn * is no need to initialise the PF again. Only minimal work is required 1142646ea79cSHeinrich Kuhn * here 1143646ea79cSHeinrich Kuhn */ 1144a6189a67SJin Liu static int 1145a6189a67SJin Liu nfp_pf_secondary_init(struct rte_pci_device *pci_dev) 1146646ea79cSHeinrich Kuhn { 1147968ec1c3SChaoyong He int err = 0; 1148968ec1c3SChaoyong He int ret = 0; 1149a6189a67SJin Liu struct nfp_cpp *cpp; 1150d5f39e07SChaoyong He enum nfp_app_fw_id app_fw_id; 1151a6189a67SJin Liu struct nfp_rtsym_table *sym_tbl; 1152646ea79cSHeinrich Kuhn 1153a6189a67SJin Liu if (pci_dev == NULL) 1154646ea79cSHeinrich Kuhn return -ENODEV; 1155646ea79cSHeinrich Kuhn 1156646ea79cSHeinrich Kuhn /* 1157646ea79cSHeinrich Kuhn * When device bound to UIO, the device could be used, by mistake, 1158646ea79cSHeinrich Kuhn * by two DPDK apps, and the UIO driver does not avoid it. This 1159646ea79cSHeinrich Kuhn * could lead to a serious problem when configuring the NFP CPP 1160646ea79cSHeinrich Kuhn * interface. Here we avoid this telling to the CPP init code to 1161646ea79cSHeinrich Kuhn * use a lock file if UIO is being used. 1162646ea79cSHeinrich Kuhn */ 1163646ea79cSHeinrich Kuhn if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO) 1164646ea79cSHeinrich Kuhn cpp = nfp_cpp_from_device_name(pci_dev, 0); 1165646ea79cSHeinrich Kuhn else 1166646ea79cSHeinrich Kuhn cpp = nfp_cpp_from_device_name(pci_dev, 1); 1167646ea79cSHeinrich Kuhn 1168a6189a67SJin Liu if (cpp == NULL) { 1169646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "A CPP handle can not be obtained"); 1170646ea79cSHeinrich Kuhn return -EIO; 1171646ea79cSHeinrich Kuhn } 1172646ea79cSHeinrich Kuhn 1173646ea79cSHeinrich Kuhn /* 1174646ea79cSHeinrich Kuhn * We don't have access to the PF created in the primary process 1175646ea79cSHeinrich Kuhn * here so we have to read the number of ports from firmware 1176646ea79cSHeinrich Kuhn */ 1177646ea79cSHeinrich Kuhn sym_tbl = nfp_rtsym_table_read(cpp); 1178a6189a67SJin Liu if (sym_tbl == NULL) { 1179646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "Something is wrong with the firmware" 1180646ea79cSHeinrich Kuhn " symbol table"); 1181646ea79cSHeinrich Kuhn return -EIO; 1182646ea79cSHeinrich Kuhn } 1183646ea79cSHeinrich Kuhn 1184d5f39e07SChaoyong He /* Read the app ID of the firmware loaded */ 1185d5f39e07SChaoyong He app_fw_id = nfp_rtsym_read_le(sym_tbl, "_pf0_net_app_id", &err); 1186d5f39e07SChaoyong He if (err != 0) { 1187d5f39e07SChaoyong He PMD_INIT_LOG(ERR, "Couldn't read app_fw_id from fw"); 1188968ec1c3SChaoyong He goto sym_tbl_cleanup; 1189968ec1c3SChaoyong He } 1190646ea79cSHeinrich Kuhn 1191d5f39e07SChaoyong He switch (app_fw_id) { 1192d5f39e07SChaoyong He case NFP_APP_FW_CORE_NIC: 1193d5f39e07SChaoyong He PMD_INIT_LOG(INFO, "Initializing coreNIC"); 1194d5f39e07SChaoyong He ret = nfp_secondary_init_app_fw_nic(pci_dev, sym_tbl, cpp); 1195d5f39e07SChaoyong He if (ret != 0) { 1196d5f39e07SChaoyong He PMD_INIT_LOG(ERR, "Could not initialize coreNIC!"); 1197d5f39e07SChaoyong He goto sym_tbl_cleanup; 1198646ea79cSHeinrich Kuhn } 1199d5f39e07SChaoyong He break; 1200b1880421SChaoyong He case NFP_APP_FW_FLOWER_NIC: 1201b1880421SChaoyong He PMD_INIT_LOG(INFO, "Initializing Flower"); 1202b1880421SChaoyong He ret = nfp_secondary_init_app_fw_flower(cpp); 1203b1880421SChaoyong He if (ret != 0) { 1204b1880421SChaoyong He PMD_INIT_LOG(ERR, "Could not initialize Flower!"); 1205b1880421SChaoyong He goto sym_tbl_cleanup; 1206b1880421SChaoyong He } 1207b1880421SChaoyong He break; 1208d5f39e07SChaoyong He default: 1209d5f39e07SChaoyong He PMD_INIT_LOG(ERR, "Unsupported Firmware loaded"); 1210d5f39e07SChaoyong He ret = -EINVAL; 1211d5f39e07SChaoyong He goto sym_tbl_cleanup; 1212646ea79cSHeinrich Kuhn } 1213646ea79cSHeinrich Kuhn 1214968ec1c3SChaoyong He sym_tbl_cleanup: 1215968ec1c3SChaoyong He free(sym_tbl); 1216968ec1c3SChaoyong He 1217968ec1c3SChaoyong He return ret; 1218646ea79cSHeinrich Kuhn } 1219646ea79cSHeinrich Kuhn 1220a6189a67SJin Liu static int 1221a6189a67SJin Liu nfp_pf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 1222646ea79cSHeinrich Kuhn struct rte_pci_device *dev) 1223646ea79cSHeinrich Kuhn { 1224646ea79cSHeinrich Kuhn if (rte_eal_process_type() == RTE_PROC_PRIMARY) 1225646ea79cSHeinrich Kuhn return nfp_pf_init(dev); 1226646ea79cSHeinrich Kuhn else 1227646ea79cSHeinrich Kuhn return nfp_pf_secondary_init(dev); 1228646ea79cSHeinrich Kuhn } 1229646ea79cSHeinrich Kuhn 1230646ea79cSHeinrich Kuhn static const struct rte_pci_id pci_id_nfp_pf_net_map[] = { 1231646ea79cSHeinrich Kuhn { 1232646ea79cSHeinrich Kuhn RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME, 12335c464d6aSJin Liu PCI_DEVICE_ID_NFP3800_PF_NIC) 12345c464d6aSJin Liu }, 12355c464d6aSJin Liu { 12365c464d6aSJin Liu RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME, 1237646ea79cSHeinrich Kuhn PCI_DEVICE_ID_NFP4000_PF_NIC) 1238646ea79cSHeinrich Kuhn }, 1239646ea79cSHeinrich Kuhn { 1240646ea79cSHeinrich Kuhn RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME, 1241646ea79cSHeinrich Kuhn PCI_DEVICE_ID_NFP6000_PF_NIC) 1242646ea79cSHeinrich Kuhn }, 1243646ea79cSHeinrich Kuhn { 12445aedd4c3SJames Hershaw RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE, 12455aedd4c3SJames Hershaw PCI_DEVICE_ID_NFP3800_PF_NIC) 12465aedd4c3SJames Hershaw }, 12475aedd4c3SJames Hershaw { 12485aedd4c3SJames Hershaw RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE, 12495aedd4c3SJames Hershaw PCI_DEVICE_ID_NFP4000_PF_NIC) 12505aedd4c3SJames Hershaw }, 12515aedd4c3SJames Hershaw { 12525aedd4c3SJames Hershaw RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE, 12535aedd4c3SJames Hershaw PCI_DEVICE_ID_NFP6000_PF_NIC) 12545aedd4c3SJames Hershaw }, 12555aedd4c3SJames Hershaw { 1256646ea79cSHeinrich Kuhn .vendor_id = 0, 1257646ea79cSHeinrich Kuhn }, 1258646ea79cSHeinrich Kuhn }; 1259646ea79cSHeinrich Kuhn 1260a6189a67SJin Liu static int 1261a6189a67SJin Liu nfp_pci_uninit(struct rte_eth_dev *eth_dev) 1262646ea79cSHeinrich Kuhn { 1263646ea79cSHeinrich Kuhn struct rte_pci_device *pci_dev; 1264646ea79cSHeinrich Kuhn uint16_t port_id; 1265646ea79cSHeinrich Kuhn 1266646ea79cSHeinrich Kuhn pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1267646ea79cSHeinrich Kuhn 1268646ea79cSHeinrich Kuhn /* Free up all physical ports under PF */ 1269646ea79cSHeinrich Kuhn RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device) 1270646ea79cSHeinrich Kuhn rte_eth_dev_close(port_id); 1271646ea79cSHeinrich Kuhn /* 1272646ea79cSHeinrich Kuhn * Ports can be closed and freed but hotplugging is not 1273646ea79cSHeinrich Kuhn * currently supported 1274646ea79cSHeinrich Kuhn */ 1275646ea79cSHeinrich Kuhn return -ENOTSUP; 1276646ea79cSHeinrich Kuhn } 1277646ea79cSHeinrich Kuhn 1278a6189a67SJin Liu static int 1279a6189a67SJin Liu eth_nfp_pci_remove(struct rte_pci_device *pci_dev) 1280646ea79cSHeinrich Kuhn { 1281646ea79cSHeinrich Kuhn return rte_eth_dev_pci_generic_remove(pci_dev, nfp_pci_uninit); 1282646ea79cSHeinrich Kuhn } 1283646ea79cSHeinrich Kuhn 1284646ea79cSHeinrich Kuhn static struct rte_pci_driver rte_nfp_net_pf_pmd = { 1285646ea79cSHeinrich Kuhn .id_table = pci_id_nfp_pf_net_map, 1286646ea79cSHeinrich Kuhn .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 1287646ea79cSHeinrich Kuhn .probe = nfp_pf_pci_probe, 1288646ea79cSHeinrich Kuhn .remove = eth_nfp_pci_remove, 1289646ea79cSHeinrich Kuhn }; 1290646ea79cSHeinrich Kuhn 1291646ea79cSHeinrich Kuhn RTE_PMD_REGISTER_PCI(net_nfp_pf, rte_nfp_net_pf_pmd); 1292646ea79cSHeinrich Kuhn RTE_PMD_REGISTER_PCI_TABLE(net_nfp_pf, pci_id_nfp_pf_net_map); 1293646ea79cSHeinrich Kuhn RTE_PMD_REGISTER_KMOD_DEP(net_nfp_pf, "* igb_uio | uio_pci_generic | vfio"); 1294646ea79cSHeinrich Kuhn /* 1295646ea79cSHeinrich Kuhn * Local variables: 1296646ea79cSHeinrich Kuhn * c-file-style: "Linux" 1297646ea79cSHeinrich Kuhn * indent-tabs-mode: t 1298646ea79cSHeinrich Kuhn * End: 1299646ea79cSHeinrich Kuhn */ 1300