1646ea79cSHeinrich Kuhn /* SPDX-License-Identifier: BSD-3-Clause 2646ea79cSHeinrich Kuhn * Copyright (c) 2014-2021 Netronome Systems, Inc. 3646ea79cSHeinrich Kuhn * All rights reserved. 4646ea79cSHeinrich Kuhn * 5646ea79cSHeinrich Kuhn * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation. 6646ea79cSHeinrich Kuhn */ 7646ea79cSHeinrich Kuhn 85a95b024SChaoyong He #include <eal_firmware.h> 9851f03e1SHeinrich Kuhn #include <rte_alarm.h> 10646ea79cSHeinrich Kuhn 115a95b024SChaoyong He #include "flower/nfp_flower.h" 125a95b024SChaoyong He #include "nfd3/nfp_nfd3.h" 135a95b024SChaoyong He #include "nfdk/nfp_nfdk.h" 14646ea79cSHeinrich Kuhn #include "nfpcore/nfp_cpp.h" 15646ea79cSHeinrich Kuhn #include "nfpcore/nfp_hwinfo.h" 16646ea79cSHeinrich Kuhn #include "nfpcore/nfp_rtsym.h" 17646ea79cSHeinrich Kuhn #include "nfpcore/nfp_nsp.h" 18796f1aecSChaoyong He #include "nfpcore/nfp6000_pcie.h" 19646ea79cSHeinrich Kuhn 20646ea79cSHeinrich Kuhn #include "nfp_cpp_bridge.h" 2154713740SChang Miao #include "nfp_ipsec.h" 225a95b024SChaoyong He #include "nfp_logs.h" 23b1880421SChaoyong He 24646ea79cSHeinrich Kuhn static int 25f4d24fe9SChaoyong He nfp_net_pf_read_mac(struct nfp_app_fw_nic *app_fw_nic, 268ceb85c3SChaoyong He uint16_t port) 27646ea79cSHeinrich Kuhn { 2849952141SChaoyong He struct nfp_net_hw *hw; 29646ea79cSHeinrich Kuhn struct nfp_eth_table *nfp_eth_table; 30646ea79cSHeinrich Kuhn 31646ea79cSHeinrich Kuhn /* Grab a pointer to the correct physical port */ 32968ec1c3SChaoyong He hw = app_fw_nic->ports[port]; 33646ea79cSHeinrich Kuhn 34968ec1c3SChaoyong He nfp_eth_table = nfp_eth_read_ports(app_fw_nic->pf_dev->cpp); 35646ea79cSHeinrich Kuhn 36393e5822SChaoyong He rte_ether_addr_copy(&nfp_eth_table->ports[port].mac_addr, &hw->mac_addr); 37646ea79cSHeinrich Kuhn 38646ea79cSHeinrich Kuhn free(nfp_eth_table); 39b0c496abSChaoyong He 40646ea79cSHeinrich Kuhn return 0; 41646ea79cSHeinrich Kuhn } 42646ea79cSHeinrich Kuhn 43646ea79cSHeinrich Kuhn static int 44646ea79cSHeinrich Kuhn nfp_net_start(struct rte_eth_dev *dev) 45646ea79cSHeinrich Kuhn { 4649952141SChaoyong He int ret; 4749952141SChaoyong He uint16_t i; 48*72d1dea6SChaoyong He struct nfp_hw *hw; 4949952141SChaoyong He uint32_t new_ctrl; 5049952141SChaoyong He uint32_t update = 0; 512e7c3612SQin Ke uint32_t cap_extend; 5249952141SChaoyong He uint32_t intr_vector; 5349952141SChaoyong He uint32_t ctrl_extend = 0; 54*72d1dea6SChaoyong He struct nfp_net_hw *net_hw; 55646ea79cSHeinrich Kuhn struct nfp_pf_dev *pf_dev; 56646ea79cSHeinrich Kuhn struct rte_eth_conf *dev_conf; 57646ea79cSHeinrich Kuhn struct rte_eth_rxmode *rxmode; 5849952141SChaoyong He struct nfp_app_fw_nic *app_fw_nic; 5949952141SChaoyong He struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 6049952141SChaoyong He struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 61646ea79cSHeinrich Kuhn 62*72d1dea6SChaoyong He net_hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); 63646ea79cSHeinrich Kuhn pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(dev->data->dev_private); 64968ec1c3SChaoyong He app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv); 65*72d1dea6SChaoyong He hw = &net_hw->super; 66646ea79cSHeinrich Kuhn 67646ea79cSHeinrich Kuhn /* Disabling queues just in case... */ 68646ea79cSHeinrich Kuhn nfp_net_disable_queues(dev); 69646ea79cSHeinrich Kuhn 70646ea79cSHeinrich Kuhn /* Enabling the required queues in the device */ 71646ea79cSHeinrich Kuhn nfp_net_enable_queues(dev); 72646ea79cSHeinrich Kuhn 7340688372SChaoyong He /* Check and configure queue intr-vector mapping */ 74646ea79cSHeinrich Kuhn if (dev->data->dev_conf.intr_conf.rxq != 0) { 75968ec1c3SChaoyong He if (app_fw_nic->multiport) { 76646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "PMD rx interrupt is not supported " 77646ea79cSHeinrich Kuhn "with NFP multiport PF"); 78646ea79cSHeinrich Kuhn return -EINVAL; 79646ea79cSHeinrich Kuhn } 80b0c496abSChaoyong He 81f4d24fe9SChaoyong He if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_UIO) { 82646ea79cSHeinrich Kuhn /* 83646ea79cSHeinrich Kuhn * Better not to share LSC with RX interrupts. 8440688372SChaoyong He * Unregistering LSC interrupt handler. 85646ea79cSHeinrich Kuhn */ 86e7978635SChaoyong He rte_intr_callback_unregister(intr_handle, 87646ea79cSHeinrich Kuhn nfp_net_dev_interrupt_handler, (void *)dev); 88646ea79cSHeinrich Kuhn 89646ea79cSHeinrich Kuhn if (dev->data->nb_rx_queues > 1) { 90646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "PMD rx interrupt only " 91646ea79cSHeinrich Kuhn "supports 1 queue with UIO"); 92646ea79cSHeinrich Kuhn return -EIO; 93646ea79cSHeinrich Kuhn } 94646ea79cSHeinrich Kuhn } 95b0c496abSChaoyong He 96646ea79cSHeinrich Kuhn intr_vector = dev->data->nb_rx_queues; 97c01e5c0cSChaoyong He if (rte_intr_efd_enable(intr_handle, intr_vector) != 0) 98646ea79cSHeinrich Kuhn return -1; 99646ea79cSHeinrich Kuhn 100646ea79cSHeinrich Kuhn nfp_configure_rx_interrupt(dev, intr_handle); 101646ea79cSHeinrich Kuhn update = NFP_NET_CFG_UPDATE_MSIX; 102646ea79cSHeinrich Kuhn } 103646ea79cSHeinrich Kuhn 104dbad6f64SPeng Zhang /* Checking MTU set */ 105*72d1dea6SChaoyong He if (dev->data->mtu > net_hw->flbufsz) { 106dbad6f64SPeng Zhang PMD_INIT_LOG(ERR, "MTU (%u) can't be larger than the current NFP_FRAME_SIZE (%u)", 107*72d1dea6SChaoyong He dev->data->mtu, net_hw->flbufsz); 108dbad6f64SPeng Zhang return -ERANGE; 109dbad6f64SPeng Zhang } 110dbad6f64SPeng Zhang 111646ea79cSHeinrich Kuhn rte_intr_enable(intr_handle); 112646ea79cSHeinrich Kuhn 113646ea79cSHeinrich Kuhn new_ctrl = nfp_check_offloads(dev); 114646ea79cSHeinrich Kuhn 115646ea79cSHeinrich Kuhn /* Writing configuration parameters in the device */ 116*72d1dea6SChaoyong He nfp_net_params_setup(net_hw); 117646ea79cSHeinrich Kuhn 118646ea79cSHeinrich Kuhn dev_conf = &dev->data->dev_conf; 119646ea79cSHeinrich Kuhn rxmode = &dev_conf->rxmode; 120646ea79cSHeinrich Kuhn 121c01e5c0cSChaoyong He if ((rxmode->mq_mode & RTE_ETH_MQ_RX_RSS) != 0) { 122646ea79cSHeinrich Kuhn nfp_net_rss_config_default(dev); 123646ea79cSHeinrich Kuhn update |= NFP_NET_CFG_UPDATE_RSS; 124*72d1dea6SChaoyong He new_ctrl |= nfp_net_cfg_ctrl_rss(hw->cap); 125646ea79cSHeinrich Kuhn } 126646ea79cSHeinrich Kuhn 127646ea79cSHeinrich Kuhn /* Enable device */ 128646ea79cSHeinrich Kuhn new_ctrl |= NFP_NET_CFG_CTRL_ENABLE; 129646ea79cSHeinrich Kuhn 130646ea79cSHeinrich Kuhn update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING; 131646ea79cSHeinrich Kuhn 132c55abf61SChaoyong He /* Enable vxlan */ 133*72d1dea6SChaoyong He if ((hw->cap & NFP_NET_CFG_CTRL_VXLAN) != 0) { 134c55abf61SChaoyong He new_ctrl |= NFP_NET_CFG_CTRL_VXLAN; 135c55abf61SChaoyong He update |= NFP_NET_CFG_UPDATE_VXLAN; 136c925a157SFei Qin } 137c55abf61SChaoyong He 138*72d1dea6SChaoyong He if ((hw->cap & NFP_NET_CFG_CTRL_RINGCFG) != 0) 139646ea79cSHeinrich Kuhn new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG; 140646ea79cSHeinrich Kuhn 141*72d1dea6SChaoyong He if (nfp_reconfig(hw, new_ctrl, update) != 0) 142646ea79cSHeinrich Kuhn return -EIO; 143646ea79cSHeinrich Kuhn 1442e7c3612SQin Ke /* Enable packet type offload by extend ctrl word1. */ 145*72d1dea6SChaoyong He cap_extend = hw->cap_ext; 1462e7c3612SQin Ke if ((cap_extend & NFP_NET_CFG_CTRL_PKT_TYPE) != 0) 1472e7c3612SQin Ke ctrl_extend = NFP_NET_CFG_CTRL_PKT_TYPE; 1482e7c3612SQin Ke 14954713740SChang Miao if ((cap_extend & NFP_NET_CFG_CTRL_IPSEC) != 0) 15054713740SChang Miao ctrl_extend |= NFP_NET_CFG_CTRL_IPSEC_SM_LOOKUP 15154713740SChang Miao | NFP_NET_CFG_CTRL_IPSEC_LM_LOOKUP; 15254713740SChang Miao 1532e7c3612SQin Ke update = NFP_NET_CFG_UPDATE_GEN; 154*72d1dea6SChaoyong He if (nfp_ext_reconfig(hw, ctrl_extend, update) != 0) 1552e7c3612SQin Ke return -EIO; 1562e7c3612SQin Ke 157*72d1dea6SChaoyong He hw->ctrl_ext = ctrl_extend; 158b4b6988aSChaoyong He 159646ea79cSHeinrich Kuhn /* 160646ea79cSHeinrich Kuhn * Allocating rte mbufs for configured rx queues. 16140688372SChaoyong He * This requires queues being enabled before. 162646ea79cSHeinrich Kuhn */ 163c01e5c0cSChaoyong He if (nfp_net_rx_freelist_setup(dev) != 0) { 164646ea79cSHeinrich Kuhn ret = -ENOMEM; 165646ea79cSHeinrich Kuhn goto error; 166646ea79cSHeinrich Kuhn } 167646ea79cSHeinrich Kuhn 168646ea79cSHeinrich Kuhn if (rte_eal_process_type() == RTE_PROC_PRIMARY) 169646ea79cSHeinrich Kuhn /* Configure the physical port up */ 170*72d1dea6SChaoyong He nfp_eth_set_configured(net_hw->cpp, net_hw->nfp_idx, 1); 171646ea79cSHeinrich Kuhn else 172*72d1dea6SChaoyong He nfp_eth_set_configured(dev->process_private, net_hw->nfp_idx, 1); 173646ea79cSHeinrich Kuhn 174*72d1dea6SChaoyong He hw->ctrl = new_ctrl; 175646ea79cSHeinrich Kuhn 176c46216e7SJie Hai for (i = 0; i < dev->data->nb_rx_queues; i++) 177c46216e7SJie Hai dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; 178c46216e7SJie Hai for (i = 0; i < dev->data->nb_tx_queues; i++) 179c46216e7SJie Hai dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; 180c46216e7SJie Hai 181646ea79cSHeinrich Kuhn return 0; 182646ea79cSHeinrich Kuhn 183646ea79cSHeinrich Kuhn error: 184646ea79cSHeinrich Kuhn /* 185646ea79cSHeinrich Kuhn * An error returned by this function should mean the app 186646ea79cSHeinrich Kuhn * exiting and then the system releasing all the memory 187646ea79cSHeinrich Kuhn * allocated even memory coming from hugepages. 188646ea79cSHeinrich Kuhn * 189646ea79cSHeinrich Kuhn * The device could be enabled at this point with some queues 190646ea79cSHeinrich Kuhn * ready for getting packets. This is true if the call to 191646ea79cSHeinrich Kuhn * nfp_net_rx_freelist_setup() succeeds for some queues but 192646ea79cSHeinrich Kuhn * fails for subsequent queues. 193646ea79cSHeinrich Kuhn * 194646ea79cSHeinrich Kuhn * This should make the app exiting but better if we tell the 195646ea79cSHeinrich Kuhn * device first. 196646ea79cSHeinrich Kuhn */ 197646ea79cSHeinrich Kuhn nfp_net_disable_queues(dev); 198646ea79cSHeinrich Kuhn 199646ea79cSHeinrich Kuhn return ret; 200646ea79cSHeinrich Kuhn } 201646ea79cSHeinrich Kuhn 202646ea79cSHeinrich Kuhn /* Stop device: disable rx and tx functions to allow for reconfiguring. */ 203646ea79cSHeinrich Kuhn static int 204646ea79cSHeinrich Kuhn nfp_net_stop(struct rte_eth_dev *dev) 205646ea79cSHeinrich Kuhn { 206646ea79cSHeinrich Kuhn struct nfp_net_hw *hw; 207646ea79cSHeinrich Kuhn 208646ea79cSHeinrich Kuhn hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); 209646ea79cSHeinrich Kuhn 210646ea79cSHeinrich Kuhn nfp_net_disable_queues(dev); 211646ea79cSHeinrich Kuhn 212646ea79cSHeinrich Kuhn /* Clear queues */ 2131c8d02bbSJin Liu nfp_net_stop_tx_queue(dev); 2141c8d02bbSJin Liu nfp_net_stop_rx_queue(dev); 215646ea79cSHeinrich Kuhn 216646ea79cSHeinrich Kuhn if (rte_eal_process_type() == RTE_PROC_PRIMARY) 217646ea79cSHeinrich Kuhn /* Configure the physical port down */ 218646ea79cSHeinrich Kuhn nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 0); 219646ea79cSHeinrich Kuhn else 220f4d24fe9SChaoyong He nfp_eth_set_configured(dev->process_private, hw->nfp_idx, 0); 221646ea79cSHeinrich Kuhn 222646ea79cSHeinrich Kuhn return 0; 223646ea79cSHeinrich Kuhn } 224646ea79cSHeinrich Kuhn 225646ea79cSHeinrich Kuhn /* Set the link up. */ 226646ea79cSHeinrich Kuhn static int 227646ea79cSHeinrich Kuhn nfp_net_set_link_up(struct rte_eth_dev *dev) 228646ea79cSHeinrich Kuhn { 229646ea79cSHeinrich Kuhn struct nfp_net_hw *hw; 230646ea79cSHeinrich Kuhn 231646ea79cSHeinrich Kuhn hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); 232646ea79cSHeinrich Kuhn 233646ea79cSHeinrich Kuhn if (rte_eal_process_type() == RTE_PROC_PRIMARY) 234646ea79cSHeinrich Kuhn /* Configure the physical port down */ 235646ea79cSHeinrich Kuhn return nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 1); 236646ea79cSHeinrich Kuhn else 237f4d24fe9SChaoyong He return nfp_eth_set_configured(dev->process_private, hw->nfp_idx, 1); 238646ea79cSHeinrich Kuhn } 239646ea79cSHeinrich Kuhn 240646ea79cSHeinrich Kuhn /* Set the link down. */ 241646ea79cSHeinrich Kuhn static int 242646ea79cSHeinrich Kuhn nfp_net_set_link_down(struct rte_eth_dev *dev) 243646ea79cSHeinrich Kuhn { 244646ea79cSHeinrich Kuhn struct nfp_net_hw *hw; 245646ea79cSHeinrich Kuhn 246646ea79cSHeinrich Kuhn hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); 247646ea79cSHeinrich Kuhn 248646ea79cSHeinrich Kuhn if (rte_eal_process_type() == RTE_PROC_PRIMARY) 249646ea79cSHeinrich Kuhn /* Configure the physical port down */ 250646ea79cSHeinrich Kuhn return nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 0); 251646ea79cSHeinrich Kuhn else 252f4d24fe9SChaoyong He return nfp_eth_set_configured(dev->process_private, hw->nfp_idx, 0); 253646ea79cSHeinrich Kuhn } 254646ea79cSHeinrich Kuhn 255646ea79cSHeinrich Kuhn /* Reset and stop device. The device can not be restarted. */ 256646ea79cSHeinrich Kuhn static int 257646ea79cSHeinrich Kuhn nfp_net_close(struct rte_eth_dev *dev) 258646ea79cSHeinrich Kuhn { 2598ceb85c3SChaoyong He uint8_t i; 26049952141SChaoyong He struct nfp_net_hw *hw; 26149952141SChaoyong He struct nfp_pf_dev *pf_dev; 26249952141SChaoyong He struct rte_pci_device *pci_dev; 26349952141SChaoyong He struct nfp_app_fw_nic *app_fw_nic; 264646ea79cSHeinrich Kuhn 265646ea79cSHeinrich Kuhn if (rte_eal_process_type() != RTE_PROC_PRIMARY) 266646ea79cSHeinrich Kuhn return 0; 267646ea79cSHeinrich Kuhn 268646ea79cSHeinrich Kuhn pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(dev->data->dev_private); 269646ea79cSHeinrich Kuhn hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); 270646ea79cSHeinrich Kuhn pci_dev = RTE_ETH_DEV_TO_PCI(dev); 271968ec1c3SChaoyong He app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv); 272646ea79cSHeinrich Kuhn 273646ea79cSHeinrich Kuhn /* 274646ea79cSHeinrich Kuhn * We assume that the DPDK application is stopping all the 275646ea79cSHeinrich Kuhn * threads/queues before calling the device close function. 276646ea79cSHeinrich Kuhn */ 277646ea79cSHeinrich Kuhn nfp_net_disable_queues(dev); 278646ea79cSHeinrich Kuhn 279646ea79cSHeinrich Kuhn /* Clear queues */ 2801c8d02bbSJin Liu nfp_net_close_tx_queue(dev); 2811c8d02bbSJin Liu nfp_net_close_rx_queue(dev); 282646ea79cSHeinrich Kuhn 28354713740SChang Miao /* Clear ipsec */ 28454713740SChang Miao nfp_ipsec_uninit(dev); 28554713740SChang Miao 286851f03e1SHeinrich Kuhn /* Cancel possible impending LSC work here before releasing the port */ 287f4d24fe9SChaoyong He rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler, (void *)dev); 288851f03e1SHeinrich Kuhn 289646ea79cSHeinrich Kuhn /* Only free PF resources after all physical ports have been closed */ 290646ea79cSHeinrich Kuhn /* Mark this port as unused and free device priv resources */ 291f58bde00SChaoyong He nn_cfg_writeb(&hw->super, NFP_NET_CFG_LSC, 0xff); 292968ec1c3SChaoyong He app_fw_nic->ports[hw->idx] = NULL; 293646ea79cSHeinrich Kuhn 294968ec1c3SChaoyong He for (i = 0; i < app_fw_nic->total_phyports; i++) { 295646ea79cSHeinrich Kuhn /* Check to see if ports are still in use */ 296c01e5c0cSChaoyong He if (app_fw_nic->ports[i] != NULL) 297646ea79cSHeinrich Kuhn return 0; 298646ea79cSHeinrich Kuhn } 299646ea79cSHeinrich Kuhn 300646ea79cSHeinrich Kuhn /* Now it is safe to free all PF resources */ 301646ea79cSHeinrich Kuhn PMD_INIT_LOG(INFO, "Freeing PF resources"); 302646ea79cSHeinrich Kuhn nfp_cpp_area_free(pf_dev->ctrl_area); 303711e4559SChaoyong He nfp_cpp_area_free(pf_dev->qc_area); 304646ea79cSHeinrich Kuhn free(pf_dev->hwinfo); 305646ea79cSHeinrich Kuhn free(pf_dev->sym_tbl); 306646ea79cSHeinrich Kuhn nfp_cpp_free(pf_dev->cpp); 307968ec1c3SChaoyong He rte_free(app_fw_nic); 308646ea79cSHeinrich Kuhn rte_free(pf_dev); 309646ea79cSHeinrich Kuhn 310d61138d4SHarman Kalra rte_intr_disable(pci_dev->intr_handle); 311646ea79cSHeinrich Kuhn 31240688372SChaoyong He /* Unregister callback func from eal lib */ 313d61138d4SHarman Kalra rte_intr_callback_unregister(pci_dev->intr_handle, 314a6189a67SJin Liu nfp_net_dev_interrupt_handler, (void *)dev); 315646ea79cSHeinrich Kuhn 316646ea79cSHeinrich Kuhn return 0; 317646ea79cSHeinrich Kuhn } 318646ea79cSHeinrich Kuhn 319c55abf61SChaoyong He static int 320c55abf61SChaoyong He nfp_net_find_vxlan_idx(struct nfp_net_hw *hw, 321c55abf61SChaoyong He uint16_t port, 322c55abf61SChaoyong He uint32_t *idx) 323c55abf61SChaoyong He { 324c55abf61SChaoyong He uint32_t i; 325c55abf61SChaoyong He int free_idx = -1; 326c55abf61SChaoyong He 327c55abf61SChaoyong He for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i++) { 328c55abf61SChaoyong He if (hw->vxlan_ports[i] == port) { 329c55abf61SChaoyong He free_idx = i; 330c55abf61SChaoyong He break; 331c55abf61SChaoyong He } 332c55abf61SChaoyong He 333c55abf61SChaoyong He if (hw->vxlan_usecnt[i] == 0) { 334c55abf61SChaoyong He free_idx = i; 335c55abf61SChaoyong He break; 336c55abf61SChaoyong He } 337c55abf61SChaoyong He } 338c55abf61SChaoyong He 339c55abf61SChaoyong He if (free_idx == -1) 340c55abf61SChaoyong He return -EINVAL; 341c55abf61SChaoyong He 342c55abf61SChaoyong He *idx = free_idx; 343c55abf61SChaoyong He 344c55abf61SChaoyong He return 0; 345c55abf61SChaoyong He } 346c55abf61SChaoyong He 347c55abf61SChaoyong He static int 348c55abf61SChaoyong He nfp_udp_tunnel_port_add(struct rte_eth_dev *dev, 349c55abf61SChaoyong He struct rte_eth_udp_tunnel *tunnel_udp) 350c55abf61SChaoyong He { 351c55abf61SChaoyong He int ret; 352c55abf61SChaoyong He uint32_t idx; 353c55abf61SChaoyong He uint16_t vxlan_port; 354c55abf61SChaoyong He struct nfp_net_hw *hw; 355c55abf61SChaoyong He enum rte_eth_tunnel_type tnl_type; 356c55abf61SChaoyong He 357c55abf61SChaoyong He hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); 358c55abf61SChaoyong He vxlan_port = tunnel_udp->udp_port; 359c55abf61SChaoyong He tnl_type = tunnel_udp->prot_type; 360c55abf61SChaoyong He 361c55abf61SChaoyong He if (tnl_type != RTE_ETH_TUNNEL_TYPE_VXLAN) { 362c55abf61SChaoyong He PMD_DRV_LOG(ERR, "Not VXLAN tunnel"); 363c55abf61SChaoyong He return -ENOTSUP; 364c55abf61SChaoyong He } 365c55abf61SChaoyong He 366c55abf61SChaoyong He ret = nfp_net_find_vxlan_idx(hw, vxlan_port, &idx); 367c55abf61SChaoyong He if (ret != 0) { 368c55abf61SChaoyong He PMD_DRV_LOG(ERR, "Failed find valid vxlan idx"); 369c55abf61SChaoyong He return -EINVAL; 370c55abf61SChaoyong He } 371c55abf61SChaoyong He 372c55abf61SChaoyong He if (hw->vxlan_usecnt[idx] == 0) { 373c55abf61SChaoyong He ret = nfp_net_set_vxlan_port(hw, idx, vxlan_port); 374c55abf61SChaoyong He if (ret != 0) { 375c55abf61SChaoyong He PMD_DRV_LOG(ERR, "Failed set vxlan port"); 376c55abf61SChaoyong He return -EINVAL; 377c55abf61SChaoyong He } 378c55abf61SChaoyong He } 379c55abf61SChaoyong He 380c55abf61SChaoyong He hw->vxlan_usecnt[idx]++; 381c55abf61SChaoyong He 382c55abf61SChaoyong He return 0; 383c55abf61SChaoyong He } 384c55abf61SChaoyong He 385c55abf61SChaoyong He static int 386c55abf61SChaoyong He nfp_udp_tunnel_port_del(struct rte_eth_dev *dev, 387c55abf61SChaoyong He struct rte_eth_udp_tunnel *tunnel_udp) 388c55abf61SChaoyong He { 389c55abf61SChaoyong He int ret; 390c55abf61SChaoyong He uint32_t idx; 391c55abf61SChaoyong He uint16_t vxlan_port; 392c55abf61SChaoyong He struct nfp_net_hw *hw; 393c55abf61SChaoyong He enum rte_eth_tunnel_type tnl_type; 394c55abf61SChaoyong He 395c55abf61SChaoyong He hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); 396c55abf61SChaoyong He vxlan_port = tunnel_udp->udp_port; 397c55abf61SChaoyong He tnl_type = tunnel_udp->prot_type; 398c55abf61SChaoyong He 399c55abf61SChaoyong He if (tnl_type != RTE_ETH_TUNNEL_TYPE_VXLAN) { 400c55abf61SChaoyong He PMD_DRV_LOG(ERR, "Not VXLAN tunnel"); 401c55abf61SChaoyong He return -ENOTSUP; 402c55abf61SChaoyong He } 403c55abf61SChaoyong He 404c55abf61SChaoyong He ret = nfp_net_find_vxlan_idx(hw, vxlan_port, &idx); 405c55abf61SChaoyong He if (ret != 0 || hw->vxlan_usecnt[idx] == 0) { 406c55abf61SChaoyong He PMD_DRV_LOG(ERR, "Failed find valid vxlan idx"); 407c55abf61SChaoyong He return -EINVAL; 408c55abf61SChaoyong He } 409c55abf61SChaoyong He 410c55abf61SChaoyong He hw->vxlan_usecnt[idx]--; 411c55abf61SChaoyong He 412c55abf61SChaoyong He if (hw->vxlan_usecnt[idx] == 0) { 413c55abf61SChaoyong He ret = nfp_net_set_vxlan_port(hw, idx, 0); 414c55abf61SChaoyong He if (ret != 0) { 415c55abf61SChaoyong He PMD_DRV_LOG(ERR, "Failed set vxlan port"); 416c55abf61SChaoyong He return -EINVAL; 417c55abf61SChaoyong He } 418c55abf61SChaoyong He } 419c55abf61SChaoyong He 420c55abf61SChaoyong He return 0; 421c55abf61SChaoyong He } 422c55abf61SChaoyong He 423646ea79cSHeinrich Kuhn /* Initialise and register driver with DPDK Application */ 4248d961320SJin Liu static const struct eth_dev_ops nfp_net_eth_dev_ops = { 425646ea79cSHeinrich Kuhn .dev_configure = nfp_net_configure, 426646ea79cSHeinrich Kuhn .dev_start = nfp_net_start, 427646ea79cSHeinrich Kuhn .dev_stop = nfp_net_stop, 428646ea79cSHeinrich Kuhn .dev_set_link_up = nfp_net_set_link_up, 429646ea79cSHeinrich Kuhn .dev_set_link_down = nfp_net_set_link_down, 430646ea79cSHeinrich Kuhn .dev_close = nfp_net_close, 431646ea79cSHeinrich Kuhn .promiscuous_enable = nfp_net_promisc_enable, 432646ea79cSHeinrich Kuhn .promiscuous_disable = nfp_net_promisc_disable, 433646ea79cSHeinrich Kuhn .link_update = nfp_net_link_update, 434646ea79cSHeinrich Kuhn .stats_get = nfp_net_stats_get, 435646ea79cSHeinrich Kuhn .stats_reset = nfp_net_stats_reset, 436f26e8239SJames Hershaw .xstats_get = nfp_net_xstats_get, 437f26e8239SJames Hershaw .xstats_reset = nfp_net_xstats_reset, 438f26e8239SJames Hershaw .xstats_get_names = nfp_net_xstats_get_names, 439f26e8239SJames Hershaw .xstats_get_by_id = nfp_net_xstats_get_by_id, 440f26e8239SJames Hershaw .xstats_get_names_by_id = nfp_net_xstats_get_names_by_id, 441646ea79cSHeinrich Kuhn .dev_infos_get = nfp_net_infos_get, 442646ea79cSHeinrich Kuhn .dev_supported_ptypes_get = nfp_net_supported_ptypes_get, 443646ea79cSHeinrich Kuhn .mtu_set = nfp_net_dev_mtu_set, 4440a94d6bcSJin Liu .mac_addr_set = nfp_net_set_mac_addr, 445646ea79cSHeinrich Kuhn .vlan_offload_set = nfp_net_vlan_offload_set, 446646ea79cSHeinrich Kuhn .reta_update = nfp_net_reta_update, 447646ea79cSHeinrich Kuhn .reta_query = nfp_net_reta_query, 448646ea79cSHeinrich Kuhn .rss_hash_update = nfp_net_rss_hash_update, 449646ea79cSHeinrich Kuhn .rss_hash_conf_get = nfp_net_rss_hash_conf_get, 450646ea79cSHeinrich Kuhn .rx_queue_setup = nfp_net_rx_queue_setup, 451646ea79cSHeinrich Kuhn .rx_queue_release = nfp_net_rx_queue_release, 4528d961320SJin Liu .tx_queue_setup = nfp_net_tx_queue_setup, 45352ddc4c2SJin Liu .tx_queue_release = nfp_net_tx_queue_release, 45452ddc4c2SJin Liu .rx_queue_intr_enable = nfp_rx_queue_intr_enable, 45552ddc4c2SJin Liu .rx_queue_intr_disable = nfp_rx_queue_intr_disable, 456c55abf61SChaoyong He .udp_tunnel_port_add = nfp_udp_tunnel_port_add, 457c55abf61SChaoyong He .udp_tunnel_port_del = nfp_udp_tunnel_port_del, 458128c8ad9SChaoyong He .fw_version_get = nfp_net_firmware_version_get, 45952ddc4c2SJin Liu }; 46052ddc4c2SJin Liu 461ee8ca64eSChaoyong He static inline void 462ee8ca64eSChaoyong He nfp_net_ethdev_ops_mount(struct nfp_net_hw *hw, 463ee8ca64eSChaoyong He struct rte_eth_dev *eth_dev) 464266470b2SJin Liu { 465ee8ca64eSChaoyong He if (hw->ver.extend == NFP_NET_CFG_VERSION_DP_NFD3) 466ee8ca64eSChaoyong He eth_dev->tx_pkt_burst = nfp_net_nfd3_xmit_pkts; 467ee8ca64eSChaoyong He else 468ee8ca64eSChaoyong He eth_dev->tx_pkt_burst = nfp_net_nfdk_xmit_pkts; 469266470b2SJin Liu 4708d961320SJin Liu eth_dev->dev_ops = &nfp_net_eth_dev_ops; 471266470b2SJin Liu eth_dev->rx_queue_count = nfp_net_rx_queue_count; 472266470b2SJin Liu eth_dev->rx_pkt_burst = &nfp_net_recv_pkts; 473266470b2SJin Liu } 474266470b2SJin Liu 475646ea79cSHeinrich Kuhn static int 476646ea79cSHeinrich Kuhn nfp_net_init(struct rte_eth_dev *eth_dev) 477646ea79cSHeinrich Kuhn { 47849952141SChaoyong He int err; 47949952141SChaoyong He uint16_t port; 4800314a8ffSChaoyong He uint64_t rx_base; 4810314a8ffSChaoyong He uint64_t tx_base; 48249952141SChaoyong He struct nfp_net_hw *hw; 48349952141SChaoyong He struct nfp_pf_dev *pf_dev; 48449952141SChaoyong He struct rte_pci_device *pci_dev; 48549952141SChaoyong He struct nfp_app_fw_nic *app_fw_nic; 48649952141SChaoyong He struct rte_ether_addr *tmp_ether_addr; 487646ea79cSHeinrich Kuhn 488646ea79cSHeinrich Kuhn pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 489646ea79cSHeinrich Kuhn 490646ea79cSHeinrich Kuhn /* Use backpointer here to the PF of this eth_dev */ 491646ea79cSHeinrich Kuhn pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(eth_dev->data->dev_private); 492646ea79cSHeinrich Kuhn 493968ec1c3SChaoyong He /* Use backpointer to the CoreNIC app struct */ 494968ec1c3SChaoyong He app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv); 495968ec1c3SChaoyong He 496646ea79cSHeinrich Kuhn port = ((struct nfp_net_hw *)eth_dev->data->dev_private)->idx; 4978ceb85c3SChaoyong He if (port > 7) { 498646ea79cSHeinrich Kuhn PMD_DRV_LOG(ERR, "Port value is wrong"); 499646ea79cSHeinrich Kuhn return -ENODEV; 500646ea79cSHeinrich Kuhn } 501646ea79cSHeinrich Kuhn 502a6189a67SJin Liu /* 503a6189a67SJin Liu * Use PF array of physical ports to get pointer to 50440688372SChaoyong He * this specific port. 505646ea79cSHeinrich Kuhn */ 506968ec1c3SChaoyong He hw = app_fw_nic->ports[port]; 507646ea79cSHeinrich Kuhn 508030b2b19SChaoyong He PMD_INIT_LOG(DEBUG, "Working with physical port number: %hu, " 509a6189a67SJin Liu "NFP internal port number: %d", port, hw->nfp_idx); 510646ea79cSHeinrich Kuhn 511646ea79cSHeinrich Kuhn rte_eth_copy_pci_info(eth_dev, pci_dev); 512646ea79cSHeinrich Kuhn 5138980792dSChaoyong He hw->super.ctrl_bar = pci_dev->mem_resource[0].addr; 5148980792dSChaoyong He if (hw->super.ctrl_bar == NULL) { 5158980792dSChaoyong He PMD_DRV_LOG(ERR, "hw->super.ctrl_bar is NULL. BAR0 not configured"); 516646ea79cSHeinrich Kuhn return -ENODEV; 517646ea79cSHeinrich Kuhn } 518646ea79cSHeinrich Kuhn 519646ea79cSHeinrich Kuhn if (port == 0) { 520f26e8239SJames Hershaw uint32_t min_size; 521f26e8239SJames Hershaw 5228980792dSChaoyong He hw->super.ctrl_bar = pf_dev->ctrl_bar; 523f26e8239SJames Hershaw min_size = NFP_MAC_STATS_SIZE * hw->pf_dev->nfp_eth_table->max_index; 524f26e8239SJames Hershaw hw->mac_stats_bar = nfp_rtsym_map(hw->pf_dev->sym_tbl, "_mac_stats", 525f26e8239SJames Hershaw min_size, &hw->mac_stats_area); 526f26e8239SJames Hershaw if (hw->mac_stats_bar == NULL) { 527f26e8239SJames Hershaw PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for _mac_stats_bar"); 528f26e8239SJames Hershaw return -EIO; 529f26e8239SJames Hershaw } 530b0c496abSChaoyong He 531f26e8239SJames Hershaw hw->mac_stats = hw->mac_stats_bar; 532646ea79cSHeinrich Kuhn } else { 533a6189a67SJin Liu if (pf_dev->ctrl_bar == NULL) 534646ea79cSHeinrich Kuhn return -ENODEV; 535b0c496abSChaoyong He 536a6189a67SJin Liu /* Use port offset in pf ctrl_bar for this ports control bar */ 5378980792dSChaoyong He hw->super.ctrl_bar = pf_dev->ctrl_bar + (port * NFP_NET_CFG_BAR_SZ); 538fe681917SJames Hershaw hw->mac_stats = app_fw_nic->ports[0]->mac_stats_bar + 539fe681917SJames Hershaw (hw->nfp_idx * NFP_MAC_STATS_SIZE); 540646ea79cSHeinrich Kuhn } 541646ea79cSHeinrich Kuhn 5428980792dSChaoyong He PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->super.ctrl_bar); 543f26e8239SJames Hershaw PMD_INIT_LOG(DEBUG, "MAC stats: %p", hw->mac_stats); 544646ea79cSHeinrich Kuhn 545cd4397ebSPeng Zhang err = nfp_net_common_init(pci_dev, hw); 546cd4397ebSPeng Zhang if (err != 0) 547cd4397ebSPeng Zhang return err; 548fd392f84SPeng Zhang 549eecdfcc1SShihong Wang err = nfp_net_tlv_caps_parse(eth_dev); 550eecdfcc1SShihong Wang if (err != 0) { 551eecdfcc1SShihong Wang PMD_INIT_LOG(ERR, "Failed to parser TLV caps"); 552eecdfcc1SShihong Wang return err; 553eecdfcc1SShihong Wang } 554eecdfcc1SShihong Wang 55554713740SChang Miao err = nfp_ipsec_init(eth_dev); 55654713740SChang Miao if (err != 0) { 55754713740SChang Miao PMD_INIT_LOG(ERR, "Failed to init IPsec module"); 55854713740SChang Miao return err; 55954713740SChang Miao } 56054713740SChang Miao 561ee8ca64eSChaoyong He nfp_net_ethdev_ops_mount(hw, eth_dev); 562266470b2SJin Liu 563f26e8239SJames Hershaw hw->eth_xstats_base = rte_malloc("rte_eth_xstat", sizeof(struct rte_eth_xstat) * 564f26e8239SJames Hershaw nfp_net_xstats_size(eth_dev), 0); 565f26e8239SJames Hershaw if (hw->eth_xstats_base == NULL) { 566f26e8239SJames Hershaw PMD_INIT_LOG(ERR, "no memory for xstats base values on device %s!", 567f26e8239SJames Hershaw pci_dev->device.name); 568f26e8239SJames Hershaw return -ENOMEM; 569f26e8239SJames Hershaw } 570f26e8239SJames Hershaw 571646ea79cSHeinrich Kuhn /* Work out where in the BAR the queues start. */ 572f58bde00SChaoyong He tx_base = nn_cfg_readl(&hw->super, NFP_NET_CFG_START_TXQ); 573f58bde00SChaoyong He rx_base = nn_cfg_readl(&hw->super, NFP_NET_CFG_START_RXQ); 574646ea79cSHeinrich Kuhn 5750314a8ffSChaoyong He hw->tx_bar = pf_dev->qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ; 5760314a8ffSChaoyong He hw->rx_bar = pf_dev->qc_bar + rx_base * NFP_QCP_QUEUE_ADDR_SZ; 577646ea79cSHeinrich Kuhn eth_dev->data->dev_private = hw; 578646ea79cSHeinrich Kuhn 579646ea79cSHeinrich Kuhn PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p", 5808980792dSChaoyong He hw->super.ctrl_bar, hw->tx_bar, hw->rx_bar); 581646ea79cSHeinrich Kuhn 582646ea79cSHeinrich Kuhn nfp_net_cfg_queue_setup(hw); 583646ea79cSHeinrich Kuhn hw->mtu = RTE_ETHER_MTU; 584646ea79cSHeinrich Kuhn 585646ea79cSHeinrich Kuhn /* VLAN insertion is incompatible with LSOv2 */ 58675a76c73SChaoyong He if ((hw->super.cap & NFP_NET_CFG_CTRL_LSO2) != 0) 58775a76c73SChaoyong He hw->super.cap &= ~NFP_NET_CFG_CTRL_TXVLAN; 588646ea79cSHeinrich Kuhn 589d20d46f0SPeng Zhang nfp_net_log_device_information(hw); 590646ea79cSHeinrich Kuhn 591646ea79cSHeinrich Kuhn /* Initializing spinlock for reconfigs */ 592*72d1dea6SChaoyong He rte_spinlock_init(&hw->super.reconfig_lock); 593646ea79cSHeinrich Kuhn 594646ea79cSHeinrich Kuhn /* Allocating memory for mac addr */ 595f4d24fe9SChaoyong He eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", RTE_ETHER_ADDR_LEN, 0); 596646ea79cSHeinrich Kuhn if (eth_dev->data->mac_addrs == NULL) { 597646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "Failed to space for MAC address"); 5987feb8909SChaoyong He return -ENOMEM; 599646ea79cSHeinrich Kuhn } 600646ea79cSHeinrich Kuhn 601968ec1c3SChaoyong He nfp_net_pf_read_mac(app_fw_nic, port); 602393e5822SChaoyong He nfp_net_write_mac(hw, &hw->mac_addr.addr_bytes[0]); 603646ea79cSHeinrich Kuhn 604393e5822SChaoyong He tmp_ether_addr = &hw->mac_addr; 605c01e5c0cSChaoyong He if (rte_is_valid_assigned_ether_addr(tmp_ether_addr) == 0) { 606a6189a67SJin Liu PMD_INIT_LOG(INFO, "Using random mac address for port %d", port); 607646ea79cSHeinrich Kuhn /* Using random mac addresses for VFs */ 608393e5822SChaoyong He rte_eth_random_addr(&hw->mac_addr.addr_bytes[0]); 609393e5822SChaoyong He nfp_net_write_mac(hw, &hw->mac_addr.addr_bytes[0]); 610646ea79cSHeinrich Kuhn } 611646ea79cSHeinrich Kuhn 612646ea79cSHeinrich Kuhn /* Copying mac address to DPDK eth_dev struct */ 613393e5822SChaoyong He rte_ether_addr_copy(&hw->mac_addr, eth_dev->data->mac_addrs); 614646ea79cSHeinrich Kuhn 61575a76c73SChaoyong He if ((hw->super.cap & NFP_NET_CFG_CTRL_LIVE_ADDR) == 0) 616646ea79cSHeinrich Kuhn eth_dev->data->dev_flags |= RTE_ETH_DEV_NOLIVE_MAC_ADDR; 617646ea79cSHeinrich Kuhn 618646ea79cSHeinrich Kuhn eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 619646ea79cSHeinrich Kuhn 620030b2b19SChaoyong He PMD_INIT_LOG(INFO, "port %d VendorID=%#x DeviceID=%#x " 621c2c4f87bSAman Deep Singh "mac=" RTE_ETHER_ADDR_PRT_FMT, 622646ea79cSHeinrich Kuhn eth_dev->data->port_id, pci_dev->id.vendor_id, 623646ea79cSHeinrich Kuhn pci_dev->id.device_id, 624393e5822SChaoyong He RTE_ETHER_ADDR_BYTES(&hw->mac_addr)); 625646ea79cSHeinrich Kuhn 626646ea79cSHeinrich Kuhn /* Registering LSC interrupt handler */ 627d61138d4SHarman Kalra rte_intr_callback_register(pci_dev->intr_handle, 628a6189a67SJin Liu nfp_net_dev_interrupt_handler, (void *)eth_dev); 629646ea79cSHeinrich Kuhn /* Telling the firmware about the LSC interrupt entry */ 630f58bde00SChaoyong He nn_cfg_writeb(&hw->super, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX); 631646ea79cSHeinrich Kuhn /* Recording current stats counters values */ 632646ea79cSHeinrich Kuhn nfp_net_stats_reset(eth_dev); 633646ea79cSHeinrich Kuhn 634646ea79cSHeinrich Kuhn return 0; 635646ea79cSHeinrich Kuhn } 636646ea79cSHeinrich Kuhn 637646ea79cSHeinrich Kuhn #define DEFAULT_FW_PATH "/lib/firmware/netronome" 638646ea79cSHeinrich Kuhn 639646ea79cSHeinrich Kuhn static int 640f4d24fe9SChaoyong He nfp_fw_upload(struct rte_pci_device *dev, 641f4d24fe9SChaoyong He struct nfp_nsp *nsp, 642f4d24fe9SChaoyong He char *card) 643646ea79cSHeinrich Kuhn { 644646ea79cSHeinrich Kuhn void *fw_buf; 645646ea79cSHeinrich Kuhn size_t fsize; 64649952141SChaoyong He char serial[40]; 64749952141SChaoyong He char fw_name[125]; 648ff627b74SChaoyong He uint16_t interface; 649ff627b74SChaoyong He uint32_t cpp_serial_len; 650ff627b74SChaoyong He const uint8_t *cpp_serial; 65149952141SChaoyong He struct nfp_cpp *cpp = nfp_nsp_cpp(nsp); 652ff627b74SChaoyong He 653ff627b74SChaoyong He cpp_serial_len = nfp_cpp_serial(cpp, &cpp_serial); 654ff627b74SChaoyong He if (cpp_serial_len != NFP_SERIAL_LEN) 655ff627b74SChaoyong He return -ERANGE; 656ff627b74SChaoyong He 657ff627b74SChaoyong He interface = nfp_cpp_interface(cpp); 658646ea79cSHeinrich Kuhn 659646ea79cSHeinrich Kuhn /* Looking for firmware file in order of priority */ 660646ea79cSHeinrich Kuhn 661646ea79cSHeinrich Kuhn /* First try to find a firmware image specific for this device */ 662646ea79cSHeinrich Kuhn snprintf(serial, sizeof(serial), 663646ea79cSHeinrich Kuhn "serial-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x", 664ff627b74SChaoyong He cpp_serial[0], cpp_serial[1], cpp_serial[2], cpp_serial[3], 665ff627b74SChaoyong He cpp_serial[4], cpp_serial[5], interface >> 8, interface & 0xff); 666f4d24fe9SChaoyong He snprintf(fw_name, sizeof(fw_name), "%s/%s.nffw", DEFAULT_FW_PATH, serial); 667646ea79cSHeinrich Kuhn 668646ea79cSHeinrich Kuhn PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); 669646ea79cSHeinrich Kuhn if (rte_firmware_read(fw_name, &fw_buf, &fsize) == 0) 670646ea79cSHeinrich Kuhn goto load_fw; 671b0c496abSChaoyong He 672646ea79cSHeinrich Kuhn /* Then try the PCI name */ 673646ea79cSHeinrich Kuhn snprintf(fw_name, sizeof(fw_name), "%s/pci-%s.nffw", DEFAULT_FW_PATH, 6743ddb4cc0SPeng Zhang dev->name); 675646ea79cSHeinrich Kuhn 676646ea79cSHeinrich Kuhn PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); 677646ea79cSHeinrich Kuhn if (rte_firmware_read(fw_name, &fw_buf, &fsize) == 0) 678646ea79cSHeinrich Kuhn goto load_fw; 679646ea79cSHeinrich Kuhn 680646ea79cSHeinrich Kuhn /* Finally try the card type and media */ 681646ea79cSHeinrich Kuhn snprintf(fw_name, sizeof(fw_name), "%s/%s", DEFAULT_FW_PATH, card); 682646ea79cSHeinrich Kuhn PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); 683c01e5c0cSChaoyong He if (rte_firmware_read(fw_name, &fw_buf, &fsize) == 0) 684c01e5c0cSChaoyong He goto load_fw; 685c01e5c0cSChaoyong He 686c01e5c0cSChaoyong He PMD_DRV_LOG(ERR, "Can't find suitable firmware."); 687646ea79cSHeinrich Kuhn return -ENOENT; 688646ea79cSHeinrich Kuhn 689646ea79cSHeinrich Kuhn load_fw: 690646ea79cSHeinrich Kuhn PMD_DRV_LOG(INFO, "Firmware file found at %s with size: %zu", 691646ea79cSHeinrich Kuhn fw_name, fsize); 692646ea79cSHeinrich Kuhn PMD_DRV_LOG(INFO, "Uploading the firmware ..."); 693646ea79cSHeinrich Kuhn nfp_nsp_load_fw(nsp, fw_buf, fsize); 694646ea79cSHeinrich Kuhn PMD_DRV_LOG(INFO, "Done"); 695646ea79cSHeinrich Kuhn 696646ea79cSHeinrich Kuhn free(fw_buf); 697646ea79cSHeinrich Kuhn 698646ea79cSHeinrich Kuhn return 0; 699646ea79cSHeinrich Kuhn } 700646ea79cSHeinrich Kuhn 701646ea79cSHeinrich Kuhn static int 702a6189a67SJin Liu nfp_fw_setup(struct rte_pci_device *dev, 703a6189a67SJin Liu struct nfp_cpp *cpp, 704a6189a67SJin Liu struct nfp_eth_table *nfp_eth_table, 705a6189a67SJin Liu struct nfp_hwinfo *hwinfo) 706646ea79cSHeinrich Kuhn { 70749952141SChaoyong He int err; 70849952141SChaoyong He char card_desc[100]; 709646ea79cSHeinrich Kuhn struct nfp_nsp *nsp; 710646ea79cSHeinrich Kuhn const char *nfp_fw_model; 711646ea79cSHeinrich Kuhn 71206be30d4SPeng Zhang nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "nffw.partno"); 71306be30d4SPeng Zhang if (nfp_fw_model == NULL) 714646ea79cSHeinrich Kuhn nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "assembly.partno"); 715646ea79cSHeinrich Kuhn 716c01e5c0cSChaoyong He if (nfp_fw_model != NULL) { 717646ea79cSHeinrich Kuhn PMD_DRV_LOG(INFO, "firmware model found: %s", nfp_fw_model); 718646ea79cSHeinrich Kuhn } else { 719646ea79cSHeinrich Kuhn PMD_DRV_LOG(ERR, "firmware model NOT found"); 720646ea79cSHeinrich Kuhn return -EIO; 721646ea79cSHeinrich Kuhn } 722646ea79cSHeinrich Kuhn 723646ea79cSHeinrich Kuhn if (nfp_eth_table->count == 0 || nfp_eth_table->count > 8) { 724646ea79cSHeinrich Kuhn PMD_DRV_LOG(ERR, "NFP ethernet table reports wrong ports: %u", 725646ea79cSHeinrich Kuhn nfp_eth_table->count); 726646ea79cSHeinrich Kuhn return -EIO; 727646ea79cSHeinrich Kuhn } 728646ea79cSHeinrich Kuhn 729646ea79cSHeinrich Kuhn PMD_DRV_LOG(INFO, "NFP ethernet port table reports %u ports", 730646ea79cSHeinrich Kuhn nfp_eth_table->count); 731646ea79cSHeinrich Kuhn 732646ea79cSHeinrich Kuhn PMD_DRV_LOG(INFO, "Port speed: %u", nfp_eth_table->ports[0].speed); 733646ea79cSHeinrich Kuhn 734646ea79cSHeinrich Kuhn snprintf(card_desc, sizeof(card_desc), "nic_%s_%dx%d.nffw", 735646ea79cSHeinrich Kuhn nfp_fw_model, nfp_eth_table->count, 736646ea79cSHeinrich Kuhn nfp_eth_table->ports[0].speed / 1000); 737646ea79cSHeinrich Kuhn 738646ea79cSHeinrich Kuhn nsp = nfp_nsp_open(cpp); 739a6189a67SJin Liu if (nsp == NULL) { 740646ea79cSHeinrich Kuhn PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle"); 741646ea79cSHeinrich Kuhn return -EIO; 742646ea79cSHeinrich Kuhn } 743646ea79cSHeinrich Kuhn 744646ea79cSHeinrich Kuhn nfp_nsp_device_soft_reset(nsp); 745646ea79cSHeinrich Kuhn err = nfp_fw_upload(dev, nsp, card_desc); 746646ea79cSHeinrich Kuhn 747646ea79cSHeinrich Kuhn nfp_nsp_close(nsp); 748646ea79cSHeinrich Kuhn return err; 749646ea79cSHeinrich Kuhn } 750646ea79cSHeinrich Kuhn 751a6189a67SJin Liu static int 7520314a8ffSChaoyong He nfp_init_app_fw_nic(struct nfp_pf_dev *pf_dev, 7530314a8ffSChaoyong He const struct nfp_dev_info *dev_info) 754646ea79cSHeinrich Kuhn { 7558ceb85c3SChaoyong He uint8_t i; 756e7978635SChaoyong He int ret = 0; 7578ceb85c3SChaoyong He uint32_t total_vnics; 758646ea79cSHeinrich Kuhn struct nfp_net_hw *hw; 759968ec1c3SChaoyong He unsigned int numa_node; 760646ea79cSHeinrich Kuhn struct rte_eth_dev *eth_dev; 761968ec1c3SChaoyong He struct nfp_app_fw_nic *app_fw_nic; 762a6189a67SJin Liu struct nfp_eth_table *nfp_eth_table; 763646ea79cSHeinrich Kuhn char port_name[RTE_ETH_NAME_MAX_LEN]; 764646ea79cSHeinrich Kuhn 765968ec1c3SChaoyong He nfp_eth_table = pf_dev->nfp_eth_table; 766968ec1c3SChaoyong He PMD_INIT_LOG(INFO, "Total physical ports: %d", nfp_eth_table->count); 767968ec1c3SChaoyong He 768968ec1c3SChaoyong He /* Allocate memory for the CoreNIC app */ 769968ec1c3SChaoyong He app_fw_nic = rte_zmalloc("nfp_app_fw_nic", sizeof(*app_fw_nic), 0); 770968ec1c3SChaoyong He if (app_fw_nic == NULL) 771968ec1c3SChaoyong He return -ENOMEM; 772968ec1c3SChaoyong He 773968ec1c3SChaoyong He /* Point the app_fw_priv pointer in the PF to the coreNIC app */ 774968ec1c3SChaoyong He pf_dev->app_fw_priv = app_fw_nic; 775968ec1c3SChaoyong He 776968ec1c3SChaoyong He /* Read the number of vNIC's created for the PF */ 777e7978635SChaoyong He total_vnics = nfp_rtsym_read_le(pf_dev->sym_tbl, "nfd_cfg_pf0_num_ports", &ret); 778e7978635SChaoyong He if (ret != 0 || total_vnics == 0 || total_vnics > 8) { 779968ec1c3SChaoyong He PMD_INIT_LOG(ERR, "nfd_cfg_pf0_num_ports symbol with wrong value"); 780968ec1c3SChaoyong He ret = -ENODEV; 781968ec1c3SChaoyong He goto app_cleanup; 782968ec1c3SChaoyong He } 783968ec1c3SChaoyong He 784968ec1c3SChaoyong He /* 785968ec1c3SChaoyong He * For coreNIC the number of vNICs exposed should be the same as the 78640688372SChaoyong He * number of physical ports. 787968ec1c3SChaoyong He */ 7888ceb85c3SChaoyong He if (total_vnics != nfp_eth_table->count) { 789968ec1c3SChaoyong He PMD_INIT_LOG(ERR, "Total physical ports do not match number of vNICs"); 790968ec1c3SChaoyong He ret = -ENODEV; 791968ec1c3SChaoyong He goto app_cleanup; 792968ec1c3SChaoyong He } 793968ec1c3SChaoyong He 794968ec1c3SChaoyong He /* Populate coreNIC app properties */ 795968ec1c3SChaoyong He app_fw_nic->total_phyports = total_vnics; 796968ec1c3SChaoyong He app_fw_nic->pf_dev = pf_dev; 797968ec1c3SChaoyong He if (total_vnics > 1) 798968ec1c3SChaoyong He app_fw_nic->multiport = true; 799968ec1c3SChaoyong He 800968ec1c3SChaoyong He /* Map the symbol table */ 801968ec1c3SChaoyong He pf_dev->ctrl_bar = nfp_rtsym_map(pf_dev->sym_tbl, "_pf0_net_bar0", 802d5e9fc86SChaoyong He app_fw_nic->total_phyports * NFP_NET_CFG_BAR_SZ, 803d5e9fc86SChaoyong He &pf_dev->ctrl_area); 804968ec1c3SChaoyong He if (pf_dev->ctrl_bar == NULL) { 805968ec1c3SChaoyong He PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for _pf0_net_ctrl_bar"); 806968ec1c3SChaoyong He ret = -EIO; 807968ec1c3SChaoyong He goto app_cleanup; 808968ec1c3SChaoyong He } 809968ec1c3SChaoyong He 810968ec1c3SChaoyong He PMD_INIT_LOG(DEBUG, "ctrl bar: %p", pf_dev->ctrl_bar); 811968ec1c3SChaoyong He 812968ec1c3SChaoyong He /* Loop through all physical ports on PF */ 813968ec1c3SChaoyong He numa_node = rte_socket_id(); 814968ec1c3SChaoyong He for (i = 0; i < app_fw_nic->total_phyports; i++) { 815646ea79cSHeinrich Kuhn snprintf(port_name, sizeof(port_name), "%s_port%d", 816646ea79cSHeinrich Kuhn pf_dev->pci_dev->device.name, i); 817646ea79cSHeinrich Kuhn 818646ea79cSHeinrich Kuhn /* Allocate a eth_dev for this phyport */ 819646ea79cSHeinrich Kuhn eth_dev = rte_eth_dev_allocate(port_name); 820a6189a67SJin Liu if (eth_dev == NULL) { 821646ea79cSHeinrich Kuhn ret = -ENODEV; 822646ea79cSHeinrich Kuhn goto port_cleanup; 823646ea79cSHeinrich Kuhn } 824646ea79cSHeinrich Kuhn 825646ea79cSHeinrich Kuhn /* Allocate memory for this phyport */ 826f4d24fe9SChaoyong He eth_dev->data->dev_private = rte_zmalloc_socket(port_name, 827f4d24fe9SChaoyong He sizeof(struct nfp_net_hw), 828646ea79cSHeinrich Kuhn RTE_CACHE_LINE_SIZE, numa_node); 829a6189a67SJin Liu if (eth_dev->data->dev_private == NULL) { 830646ea79cSHeinrich Kuhn ret = -ENOMEM; 831646ea79cSHeinrich Kuhn rte_eth_dev_release_port(eth_dev); 832646ea79cSHeinrich Kuhn goto port_cleanup; 833646ea79cSHeinrich Kuhn } 834646ea79cSHeinrich Kuhn 835646ea79cSHeinrich Kuhn hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 836646ea79cSHeinrich Kuhn 837646ea79cSHeinrich Kuhn /* Add this device to the PF's array of physical ports */ 838968ec1c3SChaoyong He app_fw_nic->ports[i] = hw; 839646ea79cSHeinrich Kuhn 8400314a8ffSChaoyong He hw->dev_info = dev_info; 841646ea79cSHeinrich Kuhn hw->pf_dev = pf_dev; 842646ea79cSHeinrich Kuhn hw->cpp = pf_dev->cpp; 843646ea79cSHeinrich Kuhn hw->eth_dev = eth_dev; 844646ea79cSHeinrich Kuhn hw->idx = i; 845646ea79cSHeinrich Kuhn hw->nfp_idx = nfp_eth_table->ports[i].index; 846646ea79cSHeinrich Kuhn 847646ea79cSHeinrich Kuhn eth_dev->device = &pf_dev->pci_dev->device; 848646ea79cSHeinrich Kuhn 84940688372SChaoyong He /* 85040688372SChaoyong He * Ctrl/tx/rx BAR mappings and remaining init happens in 85140688372SChaoyong He * @nfp_net_init() 852646ea79cSHeinrich Kuhn */ 853646ea79cSHeinrich Kuhn ret = nfp_net_init(eth_dev); 854c01e5c0cSChaoyong He if (ret != 0) { 855646ea79cSHeinrich Kuhn ret = -ENODEV; 856646ea79cSHeinrich Kuhn goto port_cleanup; 857646ea79cSHeinrich Kuhn } 858646ea79cSHeinrich Kuhn 859646ea79cSHeinrich Kuhn rte_eth_dev_probing_finish(eth_dev); 860646ea79cSHeinrich Kuhn 861646ea79cSHeinrich Kuhn } /* End loop, all ports on this PF */ 862968ec1c3SChaoyong He 863968ec1c3SChaoyong He return 0; 864646ea79cSHeinrich Kuhn 865646ea79cSHeinrich Kuhn port_cleanup: 866968ec1c3SChaoyong He for (i = 0; i < app_fw_nic->total_phyports; i++) { 867c01e5c0cSChaoyong He if (app_fw_nic->ports[i] != NULL && 868c01e5c0cSChaoyong He app_fw_nic->ports[i]->eth_dev != NULL) { 869646ea79cSHeinrich Kuhn struct rte_eth_dev *tmp_dev; 870968ec1c3SChaoyong He tmp_dev = app_fw_nic->ports[i]->eth_dev; 87154713740SChang Miao nfp_ipsec_uninit(tmp_dev); 872646ea79cSHeinrich Kuhn rte_eth_dev_release_port(tmp_dev); 873968ec1c3SChaoyong He app_fw_nic->ports[i] = NULL; 874646ea79cSHeinrich Kuhn } 875646ea79cSHeinrich Kuhn } 876968ec1c3SChaoyong He nfp_cpp_area_free(pf_dev->ctrl_area); 877968ec1c3SChaoyong He app_cleanup: 878968ec1c3SChaoyong He rte_free(app_fw_nic); 879a6189a67SJin Liu 880646ea79cSHeinrich Kuhn return ret; 881646ea79cSHeinrich Kuhn } 882646ea79cSHeinrich Kuhn 883a6189a67SJin Liu static int 884a6189a67SJin Liu nfp_pf_init(struct rte_pci_device *pci_dev) 885646ea79cSHeinrich Kuhn { 886e7978635SChaoyong He int ret = 0; 8875c464d6aSJin Liu uint64_t addr; 888925c27ecSChaoyong He uint32_t cpp_id; 889a6189a67SJin Liu struct nfp_cpp *cpp; 890a6189a67SJin Liu struct nfp_pf_dev *pf_dev; 891a6189a67SJin Liu struct nfp_hwinfo *hwinfo; 89249952141SChaoyong He enum nfp_app_fw_id app_fw_id; 893a6189a67SJin Liu char name[RTE_ETH_NAME_MAX_LEN]; 894a6189a67SJin Liu struct nfp_rtsym_table *sym_tbl; 895a6189a67SJin Liu struct nfp_eth_table *nfp_eth_table; 8960314a8ffSChaoyong He const struct nfp_dev_info *dev_info; 897646ea79cSHeinrich Kuhn 898a6189a67SJin Liu if (pci_dev == NULL) 899a6189a67SJin Liu return -ENODEV; 900646ea79cSHeinrich Kuhn 9010314a8ffSChaoyong He dev_info = nfp_dev_info_get(pci_dev->id.device_id); 9020314a8ffSChaoyong He if (dev_info == NULL) { 9030314a8ffSChaoyong He PMD_INIT_LOG(ERR, "Not supported device ID"); 9040314a8ffSChaoyong He return -ENODEV; 9050314a8ffSChaoyong He } 9060314a8ffSChaoyong He 907646ea79cSHeinrich Kuhn /* 908646ea79cSHeinrich Kuhn * When device bound to UIO, the device could be used, by mistake, 909646ea79cSHeinrich Kuhn * by two DPDK apps, and the UIO driver does not avoid it. This 910646ea79cSHeinrich Kuhn * could lead to a serious problem when configuring the NFP CPP 911646ea79cSHeinrich Kuhn * interface. Here we avoid this telling to the CPP init code to 912646ea79cSHeinrich Kuhn * use a lock file if UIO is being used. 913646ea79cSHeinrich Kuhn */ 914646ea79cSHeinrich Kuhn if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO) 9151fbe51cdSChaoyong He cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, false); 916646ea79cSHeinrich Kuhn else 9171fbe51cdSChaoyong He cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, true); 918646ea79cSHeinrich Kuhn 919a6189a67SJin Liu if (cpp == NULL) { 920646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "A CPP handle can not be obtained"); 9217feb8909SChaoyong He return -EIO; 922646ea79cSHeinrich Kuhn } 923646ea79cSHeinrich Kuhn 924646ea79cSHeinrich Kuhn hwinfo = nfp_hwinfo_read(cpp); 925a6189a67SJin Liu if (hwinfo == NULL) { 926646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "Error reading hwinfo table"); 927646ea79cSHeinrich Kuhn ret = -EIO; 928968ec1c3SChaoyong He goto cpp_cleanup; 929646ea79cSHeinrich Kuhn } 930646ea79cSHeinrich Kuhn 931968ec1c3SChaoyong He /* Read the number of physical ports from hardware */ 932646ea79cSHeinrich Kuhn nfp_eth_table = nfp_eth_read_ports(cpp); 933a6189a67SJin Liu if (nfp_eth_table == NULL) { 934646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "Error reading NFP ethernet table"); 935646ea79cSHeinrich Kuhn ret = -EIO; 936646ea79cSHeinrich Kuhn goto hwinfo_cleanup; 937646ea79cSHeinrich Kuhn } 938646ea79cSHeinrich Kuhn 939c01e5c0cSChaoyong He if (nfp_fw_setup(pci_dev, cpp, nfp_eth_table, hwinfo) != 0) { 940646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "Error when uploading firmware"); 941646ea79cSHeinrich Kuhn ret = -EIO; 942646ea79cSHeinrich Kuhn goto eth_table_cleanup; 943646ea79cSHeinrich Kuhn } 944646ea79cSHeinrich Kuhn 945646ea79cSHeinrich Kuhn /* Now the symbol table should be there */ 946646ea79cSHeinrich Kuhn sym_tbl = nfp_rtsym_table_read(cpp); 947a6189a67SJin Liu if (sym_tbl == NULL) { 948f4d24fe9SChaoyong He PMD_INIT_LOG(ERR, "Something is wrong with the firmware symbol table"); 949646ea79cSHeinrich Kuhn ret = -EIO; 950646ea79cSHeinrich Kuhn goto eth_table_cleanup; 951646ea79cSHeinrich Kuhn } 952646ea79cSHeinrich Kuhn 953968ec1c3SChaoyong He /* Read the app ID of the firmware loaded */ 954e7978635SChaoyong He app_fw_id = nfp_rtsym_read_le(sym_tbl, "_pf0_net_app_id", &ret); 955e7978635SChaoyong He if (ret != 0) { 956968ec1c3SChaoyong He PMD_INIT_LOG(ERR, "Couldn't read app_fw_id from fw"); 957646ea79cSHeinrich Kuhn ret = -EIO; 958646ea79cSHeinrich Kuhn goto sym_tbl_cleanup; 959646ea79cSHeinrich Kuhn } 960646ea79cSHeinrich Kuhn 961646ea79cSHeinrich Kuhn /* Allocate memory for the PF "device" */ 962646ea79cSHeinrich Kuhn snprintf(name, sizeof(name), "nfp_pf%d", 0); 963646ea79cSHeinrich Kuhn pf_dev = rte_zmalloc(name, sizeof(*pf_dev), 0); 964a6189a67SJin Liu if (pf_dev == NULL) { 965646ea79cSHeinrich Kuhn ret = -ENOMEM; 966646ea79cSHeinrich Kuhn goto sym_tbl_cleanup; 967646ea79cSHeinrich Kuhn } 968646ea79cSHeinrich Kuhn 969646ea79cSHeinrich Kuhn /* Populate the newly created PF device */ 970968ec1c3SChaoyong He pf_dev->app_fw_id = app_fw_id; 971646ea79cSHeinrich Kuhn pf_dev->cpp = cpp; 972646ea79cSHeinrich Kuhn pf_dev->hwinfo = hwinfo; 973646ea79cSHeinrich Kuhn pf_dev->sym_tbl = sym_tbl; 974646ea79cSHeinrich Kuhn pf_dev->pci_dev = pci_dev; 975968ec1c3SChaoyong He pf_dev->nfp_eth_table = nfp_eth_table; 976646ea79cSHeinrich Kuhn 97740688372SChaoyong He /* Configure access to tx/rx vNIC BARs */ 9780314a8ffSChaoyong He addr = nfp_qcp_queue_offset(dev_info, 0); 979925c27ecSChaoyong He cpp_id = NFP_CPP_ISLAND_ID(0, NFP_CPP_ACTION_RW, 0, 0); 9800314a8ffSChaoyong He 981711e4559SChaoyong He pf_dev->qc_bar = nfp_cpp_map_area(pf_dev->cpp, cpp_id, 9820314a8ffSChaoyong He addr, dev_info->qc_area_sz, &pf_dev->qc_area); 983711e4559SChaoyong He if (pf_dev->qc_bar == NULL) { 984646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for net.qc"); 985646ea79cSHeinrich Kuhn ret = -EIO; 986968ec1c3SChaoyong He goto pf_cleanup; 987646ea79cSHeinrich Kuhn } 988646ea79cSHeinrich Kuhn 989030b2b19SChaoyong He PMD_INIT_LOG(DEBUG, "qc_bar address: %p", pf_dev->qc_bar); 990646ea79cSHeinrich Kuhn 991a6189a67SJin Liu /* 992968ec1c3SChaoyong He * PF initialization has been done at this point. Call app specific 99340688372SChaoyong He * init code now. 994646ea79cSHeinrich Kuhn */ 995968ec1c3SChaoyong He switch (pf_dev->app_fw_id) { 996968ec1c3SChaoyong He case NFP_APP_FW_CORE_NIC: 997968ec1c3SChaoyong He PMD_INIT_LOG(INFO, "Initializing coreNIC"); 9980314a8ffSChaoyong He ret = nfp_init_app_fw_nic(pf_dev, dev_info); 999968ec1c3SChaoyong He if (ret != 0) { 1000968ec1c3SChaoyong He PMD_INIT_LOG(ERR, "Could not initialize coreNIC!"); 1001968ec1c3SChaoyong He goto hwqueues_cleanup; 1002968ec1c3SChaoyong He } 1003968ec1c3SChaoyong He break; 1004b1880421SChaoyong He case NFP_APP_FW_FLOWER_NIC: 1005b1880421SChaoyong He PMD_INIT_LOG(INFO, "Initializing Flower"); 10060314a8ffSChaoyong He ret = nfp_init_app_fw_flower(pf_dev, dev_info); 1007b1880421SChaoyong He if (ret != 0) { 1008b1880421SChaoyong He PMD_INIT_LOG(ERR, "Could not initialize Flower!"); 1009b1880421SChaoyong He goto hwqueues_cleanup; 1010b1880421SChaoyong He } 1011b1880421SChaoyong He break; 1012968ec1c3SChaoyong He default: 1013968ec1c3SChaoyong He PMD_INIT_LOG(ERR, "Unsupported Firmware loaded"); 1014968ec1c3SChaoyong He ret = -EINVAL; 1015646ea79cSHeinrich Kuhn goto hwqueues_cleanup; 1016646ea79cSHeinrich Kuhn } 1017646ea79cSHeinrich Kuhn 101840688372SChaoyong He /* Register the CPP bridge service here for primary use */ 1019bab0e6f4SChaoyong He ret = nfp_enable_cpp_service(pf_dev); 1020dee23e6cSChaoyong He if (ret != 0) 1021dee23e6cSChaoyong He PMD_INIT_LOG(INFO, "Enable cpp service failed."); 1022646ea79cSHeinrich Kuhn 1023646ea79cSHeinrich Kuhn return 0; 1024646ea79cSHeinrich Kuhn 1025646ea79cSHeinrich Kuhn hwqueues_cleanup: 1026711e4559SChaoyong He nfp_cpp_area_free(pf_dev->qc_area); 1027646ea79cSHeinrich Kuhn pf_cleanup: 1028646ea79cSHeinrich Kuhn rte_free(pf_dev); 1029646ea79cSHeinrich Kuhn sym_tbl_cleanup: 1030646ea79cSHeinrich Kuhn free(sym_tbl); 1031646ea79cSHeinrich Kuhn eth_table_cleanup: 1032646ea79cSHeinrich Kuhn free(nfp_eth_table); 1033646ea79cSHeinrich Kuhn hwinfo_cleanup: 1034646ea79cSHeinrich Kuhn free(hwinfo); 1035968ec1c3SChaoyong He cpp_cleanup: 1036968ec1c3SChaoyong He nfp_cpp_free(cpp); 10377feb8909SChaoyong He 1038646ea79cSHeinrich Kuhn return ret; 1039646ea79cSHeinrich Kuhn } 1040646ea79cSHeinrich Kuhn 1041d5f39e07SChaoyong He static int 1042d5f39e07SChaoyong He nfp_secondary_init_app_fw_nic(struct rte_pci_device *pci_dev, 1043d5f39e07SChaoyong He struct nfp_rtsym_table *sym_tbl, 1044d5f39e07SChaoyong He struct nfp_cpp *cpp) 1045d5f39e07SChaoyong He { 10468ceb85c3SChaoyong He uint32_t i; 1047d5f39e07SChaoyong He int err = 0; 1048d5f39e07SChaoyong He int ret = 0; 10498ceb85c3SChaoyong He uint32_t total_vnics; 1050d5f39e07SChaoyong He struct nfp_net_hw *hw; 1051d5f39e07SChaoyong He 1052d5f39e07SChaoyong He /* Read the number of vNIC's created for the PF */ 1053d5f39e07SChaoyong He total_vnics = nfp_rtsym_read_le(sym_tbl, "nfd_cfg_pf0_num_ports", &err); 10548ceb85c3SChaoyong He if (err != 0 || total_vnics == 0 || total_vnics > 8) { 1055d5f39e07SChaoyong He PMD_INIT_LOG(ERR, "nfd_cfg_pf0_num_ports symbol with wrong value"); 1056d5f39e07SChaoyong He return -ENODEV; 1057d5f39e07SChaoyong He } 1058d5f39e07SChaoyong He 1059d5f39e07SChaoyong He for (i = 0; i < total_vnics; i++) { 1060d5f39e07SChaoyong He struct rte_eth_dev *eth_dev; 1061d5f39e07SChaoyong He char port_name[RTE_ETH_NAME_MAX_LEN]; 10628ceb85c3SChaoyong He snprintf(port_name, sizeof(port_name), "%s_port%u", 1063d5f39e07SChaoyong He pci_dev->device.name, i); 1064d5f39e07SChaoyong He 1065d5f39e07SChaoyong He PMD_INIT_LOG(DEBUG, "Secondary attaching to port %s", port_name); 1066d5f39e07SChaoyong He eth_dev = rte_eth_dev_attach_secondary(port_name); 1067d5f39e07SChaoyong He if (eth_dev == NULL) { 1068d5f39e07SChaoyong He PMD_INIT_LOG(ERR, "Secondary process attach to port %s failed", port_name); 1069d5f39e07SChaoyong He ret = -ENODEV; 1070d5f39e07SChaoyong He break; 1071d5f39e07SChaoyong He } 1072d5f39e07SChaoyong He 1073d5f39e07SChaoyong He eth_dev->process_private = cpp; 1074d5f39e07SChaoyong He hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 1075ee8ca64eSChaoyong He nfp_net_ethdev_ops_mount(hw, eth_dev); 1076d5f39e07SChaoyong He 1077d5f39e07SChaoyong He rte_eth_dev_probing_finish(eth_dev); 1078d5f39e07SChaoyong He } 1079d5f39e07SChaoyong He 1080d5f39e07SChaoyong He return ret; 1081d5f39e07SChaoyong He } 1082d5f39e07SChaoyong He 1083646ea79cSHeinrich Kuhn /* 1084646ea79cSHeinrich Kuhn * When attaching to the NFP4000/6000 PF on a secondary process there 1085646ea79cSHeinrich Kuhn * is no need to initialise the PF again. Only minimal work is required 108640688372SChaoyong He * here. 1087646ea79cSHeinrich Kuhn */ 1088a6189a67SJin Liu static int 1089a6189a67SJin Liu nfp_pf_secondary_init(struct rte_pci_device *pci_dev) 1090646ea79cSHeinrich Kuhn { 1091968ec1c3SChaoyong He int ret = 0; 1092a6189a67SJin Liu struct nfp_cpp *cpp; 1093d5f39e07SChaoyong He enum nfp_app_fw_id app_fw_id; 1094a6189a67SJin Liu struct nfp_rtsym_table *sym_tbl; 10950314a8ffSChaoyong He const struct nfp_dev_info *dev_info; 1096646ea79cSHeinrich Kuhn 1097a6189a67SJin Liu if (pci_dev == NULL) 1098646ea79cSHeinrich Kuhn return -ENODEV; 1099646ea79cSHeinrich Kuhn 11000314a8ffSChaoyong He dev_info = nfp_dev_info_get(pci_dev->id.device_id); 11010314a8ffSChaoyong He if (dev_info == NULL) { 11020314a8ffSChaoyong He PMD_INIT_LOG(ERR, "Not supported device ID"); 11030314a8ffSChaoyong He return -ENODEV; 11040314a8ffSChaoyong He } 11050314a8ffSChaoyong He 1106646ea79cSHeinrich Kuhn /* 1107646ea79cSHeinrich Kuhn * When device bound to UIO, the device could be used, by mistake, 1108646ea79cSHeinrich Kuhn * by two DPDK apps, and the UIO driver does not avoid it. This 1109646ea79cSHeinrich Kuhn * could lead to a serious problem when configuring the NFP CPP 1110646ea79cSHeinrich Kuhn * interface. Here we avoid this telling to the CPP init code to 1111646ea79cSHeinrich Kuhn * use a lock file if UIO is being used. 1112646ea79cSHeinrich Kuhn */ 1113646ea79cSHeinrich Kuhn if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO) 11141fbe51cdSChaoyong He cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, false); 1115646ea79cSHeinrich Kuhn else 11161fbe51cdSChaoyong He cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, true); 1117646ea79cSHeinrich Kuhn 1118a6189a67SJin Liu if (cpp == NULL) { 1119646ea79cSHeinrich Kuhn PMD_INIT_LOG(ERR, "A CPP handle can not be obtained"); 1120646ea79cSHeinrich Kuhn return -EIO; 1121646ea79cSHeinrich Kuhn } 1122646ea79cSHeinrich Kuhn 1123646ea79cSHeinrich Kuhn /* 1124646ea79cSHeinrich Kuhn * We don't have access to the PF created in the primary process 112540688372SChaoyong He * here so we have to read the number of ports from firmware. 1126646ea79cSHeinrich Kuhn */ 1127646ea79cSHeinrich Kuhn sym_tbl = nfp_rtsym_table_read(cpp); 1128a6189a67SJin Liu if (sym_tbl == NULL) { 1129f4d24fe9SChaoyong He PMD_INIT_LOG(ERR, "Something is wrong with the firmware symbol table"); 1130646ea79cSHeinrich Kuhn return -EIO; 1131646ea79cSHeinrich Kuhn } 1132646ea79cSHeinrich Kuhn 1133d5f39e07SChaoyong He /* Read the app ID of the firmware loaded */ 1134e7978635SChaoyong He app_fw_id = nfp_rtsym_read_le(sym_tbl, "_pf0_net_app_id", &ret); 1135e7978635SChaoyong He if (ret != 0) { 1136d5f39e07SChaoyong He PMD_INIT_LOG(ERR, "Couldn't read app_fw_id from fw"); 1137968ec1c3SChaoyong He goto sym_tbl_cleanup; 1138968ec1c3SChaoyong He } 1139646ea79cSHeinrich Kuhn 1140d5f39e07SChaoyong He switch (app_fw_id) { 1141d5f39e07SChaoyong He case NFP_APP_FW_CORE_NIC: 1142d5f39e07SChaoyong He PMD_INIT_LOG(INFO, "Initializing coreNIC"); 1143d5f39e07SChaoyong He ret = nfp_secondary_init_app_fw_nic(pci_dev, sym_tbl, cpp); 1144d5f39e07SChaoyong He if (ret != 0) { 1145d5f39e07SChaoyong He PMD_INIT_LOG(ERR, "Could not initialize coreNIC!"); 1146d5f39e07SChaoyong He goto sym_tbl_cleanup; 1147646ea79cSHeinrich Kuhn } 1148d5f39e07SChaoyong He break; 1149b1880421SChaoyong He case NFP_APP_FW_FLOWER_NIC: 1150b1880421SChaoyong He PMD_INIT_LOG(INFO, "Initializing Flower"); 1151b1880421SChaoyong He ret = nfp_secondary_init_app_fw_flower(cpp); 1152b1880421SChaoyong He if (ret != 0) { 1153b1880421SChaoyong He PMD_INIT_LOG(ERR, "Could not initialize Flower!"); 1154b1880421SChaoyong He goto sym_tbl_cleanup; 1155b1880421SChaoyong He } 1156b1880421SChaoyong He break; 1157d5f39e07SChaoyong He default: 1158d5f39e07SChaoyong He PMD_INIT_LOG(ERR, "Unsupported Firmware loaded"); 1159d5f39e07SChaoyong He ret = -EINVAL; 1160d5f39e07SChaoyong He goto sym_tbl_cleanup; 1161646ea79cSHeinrich Kuhn } 1162646ea79cSHeinrich Kuhn 1163968ec1c3SChaoyong He sym_tbl_cleanup: 1164968ec1c3SChaoyong He free(sym_tbl); 1165968ec1c3SChaoyong He 1166968ec1c3SChaoyong He return ret; 1167646ea79cSHeinrich Kuhn } 1168646ea79cSHeinrich Kuhn 1169a6189a67SJin Liu static int 1170a6189a67SJin Liu nfp_pf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 1171646ea79cSHeinrich Kuhn struct rte_pci_device *dev) 1172646ea79cSHeinrich Kuhn { 1173646ea79cSHeinrich Kuhn if (rte_eal_process_type() == RTE_PROC_PRIMARY) 1174646ea79cSHeinrich Kuhn return nfp_pf_init(dev); 1175646ea79cSHeinrich Kuhn else 1176646ea79cSHeinrich Kuhn return nfp_pf_secondary_init(dev); 1177646ea79cSHeinrich Kuhn } 1178646ea79cSHeinrich Kuhn 1179646ea79cSHeinrich Kuhn static const struct rte_pci_id pci_id_nfp_pf_net_map[] = { 1180646ea79cSHeinrich Kuhn { 1181646ea79cSHeinrich Kuhn RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME, 11825c464d6aSJin Liu PCI_DEVICE_ID_NFP3800_PF_NIC) 11835c464d6aSJin Liu }, 11845c464d6aSJin Liu { 11855c464d6aSJin Liu RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME, 1186646ea79cSHeinrich Kuhn PCI_DEVICE_ID_NFP4000_PF_NIC) 1187646ea79cSHeinrich Kuhn }, 1188646ea79cSHeinrich Kuhn { 1189646ea79cSHeinrich Kuhn RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME, 1190646ea79cSHeinrich Kuhn PCI_DEVICE_ID_NFP6000_PF_NIC) 1191646ea79cSHeinrich Kuhn }, 1192646ea79cSHeinrich Kuhn { 11935aedd4c3SJames Hershaw RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE, 11945aedd4c3SJames Hershaw PCI_DEVICE_ID_NFP3800_PF_NIC) 11955aedd4c3SJames Hershaw }, 11965aedd4c3SJames Hershaw { 11975aedd4c3SJames Hershaw RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE, 11985aedd4c3SJames Hershaw PCI_DEVICE_ID_NFP4000_PF_NIC) 11995aedd4c3SJames Hershaw }, 12005aedd4c3SJames Hershaw { 12015aedd4c3SJames Hershaw RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE, 12025aedd4c3SJames Hershaw PCI_DEVICE_ID_NFP6000_PF_NIC) 12035aedd4c3SJames Hershaw }, 12045aedd4c3SJames Hershaw { 1205646ea79cSHeinrich Kuhn .vendor_id = 0, 1206646ea79cSHeinrich Kuhn }, 1207646ea79cSHeinrich Kuhn }; 1208646ea79cSHeinrich Kuhn 1209a6189a67SJin Liu static int 1210a6189a67SJin Liu nfp_pci_uninit(struct rte_eth_dev *eth_dev) 1211646ea79cSHeinrich Kuhn { 1212646ea79cSHeinrich Kuhn uint16_t port_id; 121349952141SChaoyong He struct rte_pci_device *pci_dev; 1214646ea79cSHeinrich Kuhn 1215646ea79cSHeinrich Kuhn pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1216646ea79cSHeinrich Kuhn 1217646ea79cSHeinrich Kuhn /* Free up all physical ports under PF */ 1218646ea79cSHeinrich Kuhn RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device) 1219646ea79cSHeinrich Kuhn rte_eth_dev_close(port_id); 1220646ea79cSHeinrich Kuhn /* 1221646ea79cSHeinrich Kuhn * Ports can be closed and freed but hotplugging is not 122240688372SChaoyong He * currently supported. 1223646ea79cSHeinrich Kuhn */ 1224646ea79cSHeinrich Kuhn return -ENOTSUP; 1225646ea79cSHeinrich Kuhn } 1226646ea79cSHeinrich Kuhn 1227a6189a67SJin Liu static int 1228a6189a67SJin Liu eth_nfp_pci_remove(struct rte_pci_device *pci_dev) 1229646ea79cSHeinrich Kuhn { 1230646ea79cSHeinrich Kuhn return rte_eth_dev_pci_generic_remove(pci_dev, nfp_pci_uninit); 1231646ea79cSHeinrich Kuhn } 1232646ea79cSHeinrich Kuhn 1233646ea79cSHeinrich Kuhn static struct rte_pci_driver rte_nfp_net_pf_pmd = { 1234646ea79cSHeinrich Kuhn .id_table = pci_id_nfp_pf_net_map, 1235646ea79cSHeinrich Kuhn .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 1236646ea79cSHeinrich Kuhn .probe = nfp_pf_pci_probe, 1237646ea79cSHeinrich Kuhn .remove = eth_nfp_pci_remove, 1238646ea79cSHeinrich Kuhn }; 1239646ea79cSHeinrich Kuhn 1240646ea79cSHeinrich Kuhn RTE_PMD_REGISTER_PCI(net_nfp_pf, rte_nfp_net_pf_pmd); 1241646ea79cSHeinrich Kuhn RTE_PMD_REGISTER_PCI_TABLE(net_nfp_pf, pci_id_nfp_pf_net_map); 1242646ea79cSHeinrich Kuhn RTE_PMD_REGISTER_KMOD_DEP(net_nfp_pf, "* igb_uio | uio_pci_generic | vfio"); 1243