xref: /dpdk/drivers/net/nfp/nfp_ethdev_vf.c (revision 72d1dea62c2c99cfcf524f4ddf57a7e5c27a1a1e)
17f8e7320SHeinrich Kuhn /* SPDX-License-Identifier: BSD-3-Clause
27f8e7320SHeinrich Kuhn  * Copyright (c) 2014-2021 Netronome Systems, Inc.
37f8e7320SHeinrich Kuhn  * All rights reserved.
47f8e7320SHeinrich Kuhn  *
57f8e7320SHeinrich Kuhn  * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
67f8e7320SHeinrich Kuhn  */
77f8e7320SHeinrich Kuhn 
8851f03e1SHeinrich Kuhn #include <rte_alarm.h>
9214fb306SChaoyong He #include <nfp_common_pci.h>
10851f03e1SHeinrich Kuhn 
11ffe84fcbSChaoyong He #include "nfd3/nfp_nfd3.h"
12fc756151SChaoyong He #include "nfdk/nfp_nfdk.h"
135a95b024SChaoyong He #include "nfpcore/nfp_cpp.h"
145a95b024SChaoyong He 
155a95b024SChaoyong He #include "nfp_logs.h"
1611e9eae4SChaoyong He #include "nfp_net_common.h"
177f8e7320SHeinrich Kuhn 
187f8e7320SHeinrich Kuhn static void
197f8e7320SHeinrich Kuhn nfp_netvf_read_mac(struct nfp_net_hw *hw)
207f8e7320SHeinrich Kuhn {
217f8e7320SHeinrich Kuhn 	uint32_t tmp;
227f8e7320SHeinrich Kuhn 
23f58bde00SChaoyong He 	tmp = rte_be_to_cpu_32(nn_cfg_readl(&hw->super, NFP_NET_CFG_MACADDR));
24393e5822SChaoyong He 	memcpy(&hw->mac_addr.addr_bytes[0], &tmp, 4);
257f8e7320SHeinrich Kuhn 
26f58bde00SChaoyong He 	tmp = rte_be_to_cpu_32(nn_cfg_readl(&hw->super, NFP_NET_CFG_MACADDR + 4));
27393e5822SChaoyong He 	memcpy(&hw->mac_addr.addr_bytes[4], &tmp, 2);
287f8e7320SHeinrich Kuhn }
297f8e7320SHeinrich Kuhn 
307f8e7320SHeinrich Kuhn static int
317f8e7320SHeinrich Kuhn nfp_netvf_start(struct rte_eth_dev *dev)
327f8e7320SHeinrich Kuhn {
3349952141SChaoyong He 	int ret;
3449952141SChaoyong He 	uint16_t i;
35*72d1dea6SChaoyong He 	struct nfp_hw *hw;
3649952141SChaoyong He 	uint32_t new_ctrl;
3749952141SChaoyong He 	uint32_t update = 0;
3849952141SChaoyong He 	uint32_t intr_vector;
39*72d1dea6SChaoyong He 	struct nfp_net_hw *net_hw;
407f8e7320SHeinrich Kuhn 	struct rte_eth_conf *dev_conf;
417f8e7320SHeinrich Kuhn 	struct rte_eth_rxmode *rxmode;
4249952141SChaoyong He 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4349952141SChaoyong He 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
447f8e7320SHeinrich Kuhn 
457f8e7320SHeinrich Kuhn 	/* Disabling queues just in case... */
467f8e7320SHeinrich Kuhn 	nfp_net_disable_queues(dev);
477f8e7320SHeinrich Kuhn 
487f8e7320SHeinrich Kuhn 	/* Enabling the required queues in the device */
497f8e7320SHeinrich Kuhn 	nfp_net_enable_queues(dev);
507f8e7320SHeinrich Kuhn 
5140688372SChaoyong He 	/* Check and configure queue intr-vector mapping */
527f8e7320SHeinrich Kuhn 	if (dev->data->dev_conf.intr_conf.rxq != 0) {
53f4d24fe9SChaoyong He 		if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_UIO) {
547f8e7320SHeinrich Kuhn 			/*
557f8e7320SHeinrich Kuhn 			 * Better not to share LSC with RX interrupts.
5640688372SChaoyong He 			 * Unregistering LSC interrupt handler.
577f8e7320SHeinrich Kuhn 			 */
58e7978635SChaoyong He 			rte_intr_callback_unregister(intr_handle,
597f8e7320SHeinrich Kuhn 					nfp_net_dev_interrupt_handler, (void *)dev);
607f8e7320SHeinrich Kuhn 
617f8e7320SHeinrich Kuhn 			if (dev->data->nb_rx_queues > 1) {
627f8e7320SHeinrich Kuhn 				PMD_INIT_LOG(ERR, "PMD rx interrupt only "
637f8e7320SHeinrich Kuhn 						"supports 1 queue with UIO");
647f8e7320SHeinrich Kuhn 				return -EIO;
657f8e7320SHeinrich Kuhn 			}
667f8e7320SHeinrich Kuhn 		}
67b0c496abSChaoyong He 
687f8e7320SHeinrich Kuhn 		intr_vector = dev->data->nb_rx_queues;
69c01e5c0cSChaoyong He 		if (rte_intr_efd_enable(intr_handle, intr_vector) != 0)
707f8e7320SHeinrich Kuhn 			return -1;
717f8e7320SHeinrich Kuhn 
727f8e7320SHeinrich Kuhn 		nfp_configure_rx_interrupt(dev, intr_handle);
737f8e7320SHeinrich Kuhn 		update = NFP_NET_CFG_UPDATE_MSIX;
747f8e7320SHeinrich Kuhn 	}
757f8e7320SHeinrich Kuhn 
767f8e7320SHeinrich Kuhn 	rte_intr_enable(intr_handle);
777f8e7320SHeinrich Kuhn 
787f8e7320SHeinrich Kuhn 	new_ctrl = nfp_check_offloads(dev);
797f8e7320SHeinrich Kuhn 
807f8e7320SHeinrich Kuhn 	/* Writing configuration parameters in the device */
81*72d1dea6SChaoyong He 	net_hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
82*72d1dea6SChaoyong He 	hw = &net_hw->super;
83*72d1dea6SChaoyong He 	nfp_net_params_setup(net_hw);
847f8e7320SHeinrich Kuhn 
857f8e7320SHeinrich Kuhn 	dev_conf = &dev->data->dev_conf;
867f8e7320SHeinrich Kuhn 	rxmode = &dev_conf->rxmode;
877f8e7320SHeinrich Kuhn 
88c01e5c0cSChaoyong He 	if ((rxmode->mq_mode & RTE_ETH_MQ_RX_RSS) != 0) {
897f8e7320SHeinrich Kuhn 		nfp_net_rss_config_default(dev);
907f8e7320SHeinrich Kuhn 		update |= NFP_NET_CFG_UPDATE_RSS;
91*72d1dea6SChaoyong He 		new_ctrl |= nfp_net_cfg_ctrl_rss(hw->cap);
927f8e7320SHeinrich Kuhn 	}
937f8e7320SHeinrich Kuhn 
947f8e7320SHeinrich Kuhn 	/* Enable device */
957f8e7320SHeinrich Kuhn 	new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
967f8e7320SHeinrich Kuhn 
977f8e7320SHeinrich Kuhn 	update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
987f8e7320SHeinrich Kuhn 
99*72d1dea6SChaoyong He 	if ((hw->cap & NFP_NET_CFG_CTRL_RINGCFG) != 0)
1007f8e7320SHeinrich Kuhn 		new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
1017f8e7320SHeinrich Kuhn 
102*72d1dea6SChaoyong He 	nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl);
103*72d1dea6SChaoyong He 	if (nfp_reconfig(hw, new_ctrl, update) != 0)
1047f8e7320SHeinrich Kuhn 		return -EIO;
1057f8e7320SHeinrich Kuhn 
1067f8e7320SHeinrich Kuhn 	/*
1077f8e7320SHeinrich Kuhn 	 * Allocating rte mbufs for configured rx queues.
10840688372SChaoyong He 	 * This requires queues being enabled before.
1097f8e7320SHeinrich Kuhn 	 */
110c01e5c0cSChaoyong He 	if (nfp_net_rx_freelist_setup(dev) != 0) {
1117f8e7320SHeinrich Kuhn 		ret = -ENOMEM;
1127f8e7320SHeinrich Kuhn 		goto error;
1137f8e7320SHeinrich Kuhn 	}
1147f8e7320SHeinrich Kuhn 
115*72d1dea6SChaoyong He 	hw->ctrl = new_ctrl;
1167f8e7320SHeinrich Kuhn 
117c46216e7SJie Hai 	for (i = 0; i < dev->data->nb_rx_queues; i++)
118c46216e7SJie Hai 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
119c46216e7SJie Hai 	for (i = 0; i < dev->data->nb_tx_queues; i++)
120c46216e7SJie Hai 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
121c46216e7SJie Hai 
1227f8e7320SHeinrich Kuhn 	return 0;
1237f8e7320SHeinrich Kuhn 
1247f8e7320SHeinrich Kuhn error:
1257f8e7320SHeinrich Kuhn 	/*
1267f8e7320SHeinrich Kuhn 	 * An error returned by this function should mean the app
1277f8e7320SHeinrich Kuhn 	 * exiting and then the system releasing all the memory
1287f8e7320SHeinrich Kuhn 	 * allocated even memory coming from hugepages.
1297f8e7320SHeinrich Kuhn 	 *
1307f8e7320SHeinrich Kuhn 	 * The device could be enabled at this point with some queues
1317f8e7320SHeinrich Kuhn 	 * ready for getting packets. This is true if the call to
1327f8e7320SHeinrich Kuhn 	 * nfp_net_rx_freelist_setup() succeeds for some queues but
1337f8e7320SHeinrich Kuhn 	 * fails for subsequent queues.
1347f8e7320SHeinrich Kuhn 	 *
1357f8e7320SHeinrich Kuhn 	 * This should make the app exiting but better if we tell the
1367f8e7320SHeinrich Kuhn 	 * device first.
1377f8e7320SHeinrich Kuhn 	 */
1387f8e7320SHeinrich Kuhn 	nfp_net_disable_queues(dev);
1397f8e7320SHeinrich Kuhn 
1407f8e7320SHeinrich Kuhn 	return ret;
1417f8e7320SHeinrich Kuhn }
1427f8e7320SHeinrich Kuhn 
1437f8e7320SHeinrich Kuhn static int
1447f8e7320SHeinrich Kuhn nfp_netvf_stop(struct rte_eth_dev *dev)
1457f8e7320SHeinrich Kuhn {
1467f8e7320SHeinrich Kuhn 	nfp_net_disable_queues(dev);
1477f8e7320SHeinrich Kuhn 
1487f8e7320SHeinrich Kuhn 	/* Clear queues */
1491c8d02bbSJin Liu 	nfp_net_stop_tx_queue(dev);
1507f8e7320SHeinrich Kuhn 
1511c8d02bbSJin Liu 	nfp_net_stop_rx_queue(dev);
1527f8e7320SHeinrich Kuhn 
1537f8e7320SHeinrich Kuhn 	return 0;
1547f8e7320SHeinrich Kuhn }
1557f8e7320SHeinrich Kuhn 
1567f8e7320SHeinrich Kuhn static int
1577f8e7320SHeinrich Kuhn nfp_netvf_set_link_up(struct rte_eth_dev *dev __rte_unused)
1587f8e7320SHeinrich Kuhn {
1597f8e7320SHeinrich Kuhn 	return -ENOTSUP;
1607f8e7320SHeinrich Kuhn }
1617f8e7320SHeinrich Kuhn 
1627f8e7320SHeinrich Kuhn /* Set the link down. */
1637f8e7320SHeinrich Kuhn static int
1647f8e7320SHeinrich Kuhn nfp_netvf_set_link_down(struct rte_eth_dev *dev __rte_unused)
1657f8e7320SHeinrich Kuhn {
1667f8e7320SHeinrich Kuhn 	return -ENOTSUP;
1677f8e7320SHeinrich Kuhn }
1687f8e7320SHeinrich Kuhn 
1697f8e7320SHeinrich Kuhn /* Reset and stop device. The device can not be restarted. */
1707f8e7320SHeinrich Kuhn static int
1717f8e7320SHeinrich Kuhn nfp_netvf_close(struct rte_eth_dev *dev)
1727f8e7320SHeinrich Kuhn {
1737f8e7320SHeinrich Kuhn 	struct rte_pci_device *pci_dev;
1747f8e7320SHeinrich Kuhn 
1757f8e7320SHeinrich Kuhn 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1767f8e7320SHeinrich Kuhn 		return 0;
1777f8e7320SHeinrich Kuhn 
1787f8e7320SHeinrich Kuhn 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1797f8e7320SHeinrich Kuhn 
1807f8e7320SHeinrich Kuhn 	/*
1817f8e7320SHeinrich Kuhn 	 * We assume that the DPDK application is stopping all the
1827f8e7320SHeinrich Kuhn 	 * threads/queues before calling the device close function.
1837f8e7320SHeinrich Kuhn 	 */
1847f8e7320SHeinrich Kuhn 	nfp_net_disable_queues(dev);
1857f8e7320SHeinrich Kuhn 
1867f8e7320SHeinrich Kuhn 	/* Clear queues */
1871c8d02bbSJin Liu 	nfp_net_close_tx_queue(dev);
1881c8d02bbSJin Liu 	nfp_net_close_rx_queue(dev);
1897f8e7320SHeinrich Kuhn 
190d61138d4SHarman Kalra 	rte_intr_disable(pci_dev->intr_handle);
1917f8e7320SHeinrich Kuhn 
19240688372SChaoyong He 	/* Unregister callback func from eal lib */
193d61138d4SHarman Kalra 	rte_intr_callback_unregister(pci_dev->intr_handle,
194f4d24fe9SChaoyong He 			nfp_net_dev_interrupt_handler, (void *)dev);
1957f8e7320SHeinrich Kuhn 
196851f03e1SHeinrich Kuhn 	/* Cancel possible impending LSC work here before releasing the port */
197f4d24fe9SChaoyong He 	rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler, (void *)dev);
198851f03e1SHeinrich Kuhn 
1997f8e7320SHeinrich Kuhn 	return 0;
2007f8e7320SHeinrich Kuhn }
2017f8e7320SHeinrich Kuhn 
2027f8e7320SHeinrich Kuhn /* Initialise and register VF driver with DPDK Application */
2038d961320SJin Liu static const struct eth_dev_ops nfp_netvf_eth_dev_ops = {
2047f8e7320SHeinrich Kuhn 	.dev_configure          = nfp_net_configure,
2057f8e7320SHeinrich Kuhn 	.dev_start              = nfp_netvf_start,
2067f8e7320SHeinrich Kuhn 	.dev_stop               = nfp_netvf_stop,
2077f8e7320SHeinrich Kuhn 	.dev_set_link_up        = nfp_netvf_set_link_up,
2087f8e7320SHeinrich Kuhn 	.dev_set_link_down      = nfp_netvf_set_link_down,
2097f8e7320SHeinrich Kuhn 	.dev_close              = nfp_netvf_close,
2107f8e7320SHeinrich Kuhn 	.promiscuous_enable     = nfp_net_promisc_enable,
2117f8e7320SHeinrich Kuhn 	.promiscuous_disable    = nfp_net_promisc_disable,
2127f8e7320SHeinrich Kuhn 	.link_update            = nfp_net_link_update,
2137f8e7320SHeinrich Kuhn 	.stats_get              = nfp_net_stats_get,
2147f8e7320SHeinrich Kuhn 	.stats_reset            = nfp_net_stats_reset,
215f26e8239SJames Hershaw 	.xstats_get             = nfp_net_xstats_get,
216f26e8239SJames Hershaw 	.xstats_reset           = nfp_net_xstats_reset,
217f26e8239SJames Hershaw 	.xstats_get_names       = nfp_net_xstats_get_names,
218f26e8239SJames Hershaw 	.xstats_get_by_id       = nfp_net_xstats_get_by_id,
219f26e8239SJames Hershaw 	.xstats_get_names_by_id = nfp_net_xstats_get_names_by_id,
2207f8e7320SHeinrich Kuhn 	.dev_infos_get          = nfp_net_infos_get,
2217f8e7320SHeinrich Kuhn 	.dev_supported_ptypes_get = nfp_net_supported_ptypes_get,
2227f8e7320SHeinrich Kuhn 	.mtu_set                = nfp_net_dev_mtu_set,
2230a94d6bcSJin Liu 	.mac_addr_set           = nfp_net_set_mac_addr,
2247f8e7320SHeinrich Kuhn 	.vlan_offload_set       = nfp_net_vlan_offload_set,
2257f8e7320SHeinrich Kuhn 	.reta_update            = nfp_net_reta_update,
2267f8e7320SHeinrich Kuhn 	.reta_query             = nfp_net_reta_query,
2277f8e7320SHeinrich Kuhn 	.rss_hash_update        = nfp_net_rss_hash_update,
2287f8e7320SHeinrich Kuhn 	.rss_hash_conf_get      = nfp_net_rss_hash_conf_get,
2297f8e7320SHeinrich Kuhn 	.rx_queue_setup         = nfp_net_rx_queue_setup,
2307f8e7320SHeinrich Kuhn 	.rx_queue_release       = nfp_net_rx_queue_release,
2318d961320SJin Liu 	.tx_queue_setup         = nfp_net_tx_queue_setup,
23252ddc4c2SJin Liu 	.tx_queue_release       = nfp_net_tx_queue_release,
23352ddc4c2SJin Liu 	.rx_queue_intr_enable   = nfp_rx_queue_intr_enable,
23452ddc4c2SJin Liu 	.rx_queue_intr_disable  = nfp_rx_queue_intr_disable,
23552ddc4c2SJin Liu };
23652ddc4c2SJin Liu 
237ee8ca64eSChaoyong He static inline void
238ee8ca64eSChaoyong He nfp_netvf_ethdev_ops_mount(struct nfp_net_hw *hw,
239ee8ca64eSChaoyong He 		struct rte_eth_dev *eth_dev)
240266470b2SJin Liu {
241ee8ca64eSChaoyong He 	if (hw->ver.extend == NFP_NET_CFG_VERSION_DP_NFD3)
242ee8ca64eSChaoyong He 		eth_dev->tx_pkt_burst = nfp_net_nfd3_xmit_pkts;
243ee8ca64eSChaoyong He 	else
244ee8ca64eSChaoyong He 		eth_dev->tx_pkt_burst = nfp_net_nfdk_xmit_pkts;
245266470b2SJin Liu 
2468d961320SJin Liu 	eth_dev->dev_ops = &nfp_netvf_eth_dev_ops;
247266470b2SJin Liu 	eth_dev->rx_queue_count = nfp_net_rx_queue_count;
248266470b2SJin Liu 	eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
249266470b2SJin Liu }
250266470b2SJin Liu 
2517f8e7320SHeinrich Kuhn static int
2527f8e7320SHeinrich Kuhn nfp_netvf_init(struct rte_eth_dev *eth_dev)
2537f8e7320SHeinrich Kuhn {
25449952141SChaoyong He 	int err;
255e7978635SChaoyong He 	uint16_t port;
2567f8e7320SHeinrich Kuhn 	uint32_t start_q;
25749952141SChaoyong He 	struct nfp_net_hw *hw;
25849952141SChaoyong He 	uint64_t tx_bar_off = 0;
25949952141SChaoyong He 	uint64_t rx_bar_off = 0;
26049952141SChaoyong He 	struct rte_pci_device *pci_dev;
2610314a8ffSChaoyong He 	const struct nfp_dev_info *dev_info;
2627f8e7320SHeinrich Kuhn 
263e7978635SChaoyong He 	port = eth_dev->data->port_id;
2647f8e7320SHeinrich Kuhn 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2657f8e7320SHeinrich Kuhn 
2660314a8ffSChaoyong He 	dev_info = nfp_dev_info_get(pci_dev->id.device_id);
2670314a8ffSChaoyong He 	if (dev_info == NULL) {
2680314a8ffSChaoyong He 		PMD_INIT_LOG(ERR, "Not supported device ID");
2690314a8ffSChaoyong He 		return -ENODEV;
2700314a8ffSChaoyong He 	}
2710314a8ffSChaoyong He 
2727f8e7320SHeinrich Kuhn 	hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2730314a8ffSChaoyong He 	hw->dev_info = dev_info;
2747f8e7320SHeinrich Kuhn 
2758980792dSChaoyong He 	hw->super.ctrl_bar = pci_dev->mem_resource[0].addr;
2768980792dSChaoyong He 	if (hw->super.ctrl_bar == NULL) {
2778980792dSChaoyong He 		PMD_DRV_LOG(ERR, "hw->super.ctrl_bar is NULL. BAR0 not configured");
278266470b2SJin Liu 		return -ENODEV;
279266470b2SJin Liu 	}
280266470b2SJin Liu 
2818980792dSChaoyong He 	PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->super.ctrl_bar);
282266470b2SJin Liu 
283cd4397ebSPeng Zhang 	err = nfp_net_common_init(pci_dev, hw);
284cd4397ebSPeng Zhang 	if (err != 0)
285cd4397ebSPeng Zhang 		return err;
286fd392f84SPeng Zhang 
287ee8ca64eSChaoyong He 	nfp_netvf_ethdev_ops_mount(hw, eth_dev);
2887f8e7320SHeinrich Kuhn 
2897f8e7320SHeinrich Kuhn 	/* For secondary processes, the primary has done all the work */
2907f8e7320SHeinrich Kuhn 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2917f8e7320SHeinrich Kuhn 		return 0;
2927f8e7320SHeinrich Kuhn 
2937f8e7320SHeinrich Kuhn 	rte_eth_copy_pci_info(eth_dev, pci_dev);
2947f8e7320SHeinrich Kuhn 
295f4d24fe9SChaoyong He 	hw->eth_xstats_base = rte_malloc("rte_eth_xstat",
296f4d24fe9SChaoyong He 			sizeof(struct rte_eth_xstat) * nfp_net_xstats_size(eth_dev), 0);
297f26e8239SJames Hershaw 	if (hw->eth_xstats_base == NULL) {
298030b2b19SChaoyong He 		PMD_INIT_LOG(ERR, "No memory for xstats base values on device %s!",
299f26e8239SJames Hershaw 				pci_dev->device.name);
300f26e8239SJames Hershaw 		return -ENOMEM;
301f26e8239SJames Hershaw 	}
302f26e8239SJames Hershaw 
3037f8e7320SHeinrich Kuhn 	/* Work out where in the BAR the queues start. */
304f58bde00SChaoyong He 	start_q = nn_cfg_readl(&hw->super, NFP_NET_CFG_START_TXQ);
3050314a8ffSChaoyong He 	tx_bar_off = nfp_qcp_queue_offset(dev_info, start_q);
306f58bde00SChaoyong He 	start_q = nn_cfg_readl(&hw->super, NFP_NET_CFG_START_RXQ);
3070314a8ffSChaoyong He 	rx_bar_off = nfp_qcp_queue_offset(dev_info, start_q);
3087f8e7320SHeinrich Kuhn 
309f4d24fe9SChaoyong He 	hw->tx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + tx_bar_off;
310f4d24fe9SChaoyong He 	hw->rx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + rx_bar_off;
3117f8e7320SHeinrich Kuhn 
3127f8e7320SHeinrich Kuhn 	PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p",
3138980792dSChaoyong He 			hw->super.ctrl_bar, hw->tx_bar, hw->rx_bar);
3147f8e7320SHeinrich Kuhn 
3157f8e7320SHeinrich Kuhn 	nfp_net_cfg_queue_setup(hw);
3167f8e7320SHeinrich Kuhn 	hw->mtu = RTE_ETHER_MTU;
3177f8e7320SHeinrich Kuhn 
3187f8e7320SHeinrich Kuhn 	/* VLAN insertion is incompatible with LSOv2 */
31975a76c73SChaoyong He 	if ((hw->super.cap & NFP_NET_CFG_CTRL_LSO2) != 0)
32075a76c73SChaoyong He 		hw->super.cap &= ~NFP_NET_CFG_CTRL_TXVLAN;
3217f8e7320SHeinrich Kuhn 
322d20d46f0SPeng Zhang 	nfp_net_log_device_information(hw);
3237f8e7320SHeinrich Kuhn 
3247f8e7320SHeinrich Kuhn 	/* Initializing spinlock for reconfigs */
325*72d1dea6SChaoyong He 	rte_spinlock_init(&hw->super.reconfig_lock);
3267f8e7320SHeinrich Kuhn 
3277f8e7320SHeinrich Kuhn 	/* Allocating memory for mac addr */
328f4d24fe9SChaoyong He 	eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", RTE_ETHER_ADDR_LEN, 0);
3297f8e7320SHeinrich Kuhn 	if (eth_dev->data->mac_addrs == NULL) {
3307f8e7320SHeinrich Kuhn 		PMD_INIT_LOG(ERR, "Failed to space for MAC address");
3317f8e7320SHeinrich Kuhn 		err = -ENOMEM;
332ca4392d8SChaoyong He 		goto dev_err_ctrl_map;
3337f8e7320SHeinrich Kuhn 	}
3347f8e7320SHeinrich Kuhn 
3357f8e7320SHeinrich Kuhn 	nfp_netvf_read_mac(hw);
336e7978635SChaoyong He 	if (rte_is_valid_assigned_ether_addr(&hw->mac_addr) == 0) {
337030b2b19SChaoyong He 		PMD_INIT_LOG(INFO, "Using random mac address for port %hu", port);
3387f8e7320SHeinrich Kuhn 		/* Using random mac addresses for VFs */
339393e5822SChaoyong He 		rte_eth_random_addr(&hw->mac_addr.addr_bytes[0]);
340393e5822SChaoyong He 		nfp_net_write_mac(hw, &hw->mac_addr.addr_bytes[0]);
3417f8e7320SHeinrich Kuhn 	}
3427f8e7320SHeinrich Kuhn 
3437f8e7320SHeinrich Kuhn 	/* Copying mac address to DPDK eth_dev struct */
344393e5822SChaoyong He 	rte_ether_addr_copy(&hw->mac_addr, eth_dev->data->mac_addrs);
3457f8e7320SHeinrich Kuhn 
34675a76c73SChaoyong He 	if ((hw->super.cap & NFP_NET_CFG_CTRL_LIVE_ADDR) == 0)
3477f8e7320SHeinrich Kuhn 		eth_dev->data->dev_flags |= RTE_ETH_DEV_NOLIVE_MAC_ADDR;
3487f8e7320SHeinrich Kuhn 
3497f8e7320SHeinrich Kuhn 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
3507f8e7320SHeinrich Kuhn 
351030b2b19SChaoyong He 	PMD_INIT_LOG(INFO, "port %hu VendorID=%#x DeviceID=%#x "
352393e5822SChaoyong He 			"mac=" RTE_ETHER_ADDR_PRT_FMT,
353e7978635SChaoyong He 			port, pci_dev->id.vendor_id,
3547f8e7320SHeinrich Kuhn 			pci_dev->id.device_id,
355393e5822SChaoyong He 			RTE_ETHER_ADDR_BYTES(&hw->mac_addr));
3567f8e7320SHeinrich Kuhn 
3577f8e7320SHeinrich Kuhn 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3587f8e7320SHeinrich Kuhn 		/* Registering LSC interrupt handler */
359d61138d4SHarman Kalra 		rte_intr_callback_register(pci_dev->intr_handle,
360f4d24fe9SChaoyong He 				nfp_net_dev_interrupt_handler, (void *)eth_dev);
3617f8e7320SHeinrich Kuhn 		/* Telling the firmware about the LSC interrupt entry */
362f58bde00SChaoyong He 		nn_cfg_writeb(&hw->super, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
3637f8e7320SHeinrich Kuhn 		/* Recording current stats counters values */
3647f8e7320SHeinrich Kuhn 		nfp_net_stats_reset(eth_dev);
3657f8e7320SHeinrich Kuhn 	}
3667f8e7320SHeinrich Kuhn 
3677f8e7320SHeinrich Kuhn 	return 0;
3687f8e7320SHeinrich Kuhn 
3697f8e7320SHeinrich Kuhn dev_err_ctrl_map:
3707f8e7320SHeinrich Kuhn 		nfp_cpp_area_free(hw->ctrl_area);
3717f8e7320SHeinrich Kuhn 
3727f8e7320SHeinrich Kuhn 	return err;
3737f8e7320SHeinrich Kuhn }
3747f8e7320SHeinrich Kuhn 
3757f8e7320SHeinrich Kuhn static const struct rte_pci_id pci_id_nfp_vf_net_map[] = {
3767f8e7320SHeinrich Kuhn 	{
3777f8e7320SHeinrich Kuhn 		RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
3785c464d6aSJin Liu 				PCI_DEVICE_ID_NFP3800_VF_NIC)
3795c464d6aSJin Liu 	},
3805c464d6aSJin Liu 	{
3815c464d6aSJin Liu 		RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
3827f8e7320SHeinrich Kuhn 				PCI_DEVICE_ID_NFP6000_VF_NIC)
3837f8e7320SHeinrich Kuhn 	},
3847f8e7320SHeinrich Kuhn 	{
3855aedd4c3SJames Hershaw 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE,
3865aedd4c3SJames Hershaw 				PCI_DEVICE_ID_NFP3800_VF_NIC)
3875aedd4c3SJames Hershaw 	},
3885aedd4c3SJames Hershaw 	{
3895aedd4c3SJames Hershaw 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE,
3905aedd4c3SJames Hershaw 				PCI_DEVICE_ID_NFP6000_VF_NIC)
3915aedd4c3SJames Hershaw 	},
3925aedd4c3SJames Hershaw 	{
3937f8e7320SHeinrich Kuhn 		.vendor_id = 0,
3947f8e7320SHeinrich Kuhn 	},
3957f8e7320SHeinrich Kuhn };
3967f8e7320SHeinrich Kuhn 
397f4d24fe9SChaoyong He static int
398f4d24fe9SChaoyong He nfp_vf_pci_uninit(struct rte_eth_dev *eth_dev)
3997f8e7320SHeinrich Kuhn {
4007f8e7320SHeinrich Kuhn 	/* VF cleanup, just free private port data */
4017f8e7320SHeinrich Kuhn 	return nfp_netvf_close(eth_dev);
4027f8e7320SHeinrich Kuhn }
4037f8e7320SHeinrich Kuhn 
404f4d24fe9SChaoyong He static int
405214fb306SChaoyong He nfp_vf_pci_probe(struct rte_pci_device *pci_dev)
4067f8e7320SHeinrich Kuhn {
4077f8e7320SHeinrich Kuhn 	return rte_eth_dev_pci_generic_probe(pci_dev,
4087f8e7320SHeinrich Kuhn 			sizeof(struct nfp_net_adapter), nfp_netvf_init);
4097f8e7320SHeinrich Kuhn }
4107f8e7320SHeinrich Kuhn 
411f4d24fe9SChaoyong He static int
412845fcd13SChaoyong He nfp_vf_pci_remove(struct rte_pci_device *pci_dev)
4137f8e7320SHeinrich Kuhn {
4147f8e7320SHeinrich Kuhn 	return rte_eth_dev_pci_generic_remove(pci_dev, nfp_vf_pci_uninit);
4157f8e7320SHeinrich Kuhn }
4167f8e7320SHeinrich Kuhn 
417214fb306SChaoyong He static struct nfp_class_driver rte_nfp_net_vf_pmd = {
418214fb306SChaoyong He 	.drv_class = NFP_CLASS_ETH,
419214fb306SChaoyong He 	.name = RTE_STR(net_nfp_vf),
4207f8e7320SHeinrich Kuhn 	.id_table = pci_id_nfp_vf_net_map,
4217f8e7320SHeinrich Kuhn 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
422845fcd13SChaoyong He 	.probe = nfp_vf_pci_probe,
423845fcd13SChaoyong He 	.remove = nfp_vf_pci_remove,
4247f8e7320SHeinrich Kuhn };
4257f8e7320SHeinrich Kuhn 
426214fb306SChaoyong He RTE_INIT(rte_nfp_vf_pmd_init)
427214fb306SChaoyong He {
428214fb306SChaoyong He 	nfp_class_driver_register(&rte_nfp_net_vf_pmd);
429214fb306SChaoyong He }
430214fb306SChaoyong He 
4317f8e7320SHeinrich Kuhn RTE_PMD_REGISTER_PCI_TABLE(net_nfp_vf, pci_id_nfp_vf_net_map);
4327f8e7320SHeinrich Kuhn RTE_PMD_REGISTER_KMOD_DEP(net_nfp_vf, "* igb_uio | uio_pci_generic | vfio");
433