xref: /dpdk/drivers/net/thunderx/nicvf_ethdev.c (revision b8d96c71ff56f21f15c3e446d74a68ef770e09fa)
1aaf4363eSJerin Jacob /* SPDX-License-Identifier: BSD-3-Clause
2aaf4363eSJerin Jacob  * Copyright(c) 2016 Cavium, Inc
3e4387966SJerin Jacob  */
4e4387966SJerin Jacob 
5e4387966SJerin Jacob #include <assert.h>
6e4387966SJerin Jacob #include <stdio.h>
7e4387966SJerin Jacob #include <stdbool.h>
8e4387966SJerin Jacob #include <errno.h>
9e4387966SJerin Jacob #include <stdint.h>
10e4387966SJerin Jacob #include <string.h>
11e4387966SJerin Jacob #include <unistd.h>
12e4387966SJerin Jacob #include <stdarg.h>
13e4387966SJerin Jacob #include <inttypes.h>
14e4387966SJerin Jacob #include <netinet/in.h>
15e4387966SJerin Jacob #include <sys/queue.h>
16e4387966SJerin Jacob 
17e4387966SJerin Jacob #include <rte_alarm.h>
18e4387966SJerin Jacob #include <rte_branch_prediction.h>
19e4387966SJerin Jacob #include <rte_byteorder.h>
20e4387966SJerin Jacob #include <rte_common.h>
21e4387966SJerin Jacob #include <rte_cycles.h>
22e4387966SJerin Jacob #include <rte_debug.h>
23e4387966SJerin Jacob #include <rte_dev.h>
24e4387966SJerin Jacob #include <rte_eal.h>
25e4387966SJerin Jacob #include <rte_ether.h>
26ffc905f3SFerruh Yigit #include <rte_ethdev_driver.h>
27fdf91e0fSJan Blunck #include <rte_ethdev_pci.h>
28e4387966SJerin Jacob #include <rte_interrupts.h>
29e4387966SJerin Jacob #include <rte_log.h>
30e4387966SJerin Jacob #include <rte_memory.h>
31e4387966SJerin Jacob #include <rte_memzone.h>
32e4387966SJerin Jacob #include <rte_malloc.h>
33e4387966SJerin Jacob #include <rte_random.h>
34e4387966SJerin Jacob #include <rte_pci.h>
35c752998bSGaetan Rivet #include <rte_bus_pci.h>
36e4387966SJerin Jacob #include <rte_tailq.h>
37279d3319SRakesh Kudurumalla #include <rte_devargs.h>
38279d3319SRakesh Kudurumalla #include <rte_kvargs.h>
39e4387966SJerin Jacob 
40e4387966SJerin Jacob #include "base/nicvf_plat.h"
41e4387966SJerin Jacob 
42e4387966SJerin Jacob #include "nicvf_ethdev.h"
431c421f18SJerin Jacob #include "nicvf_rxtx.h"
44627d4ba2SKamil Rytarowski #include "nicvf_svf.h"
45e4387966SJerin Jacob #include "nicvf_logs.h"
46e4387966SJerin Jacob 
47c563443cSPavan Nikhilesh int nicvf_logtype_mbox;
48c563443cSPavan Nikhilesh int nicvf_logtype_init;
49c563443cSPavan Nikhilesh int nicvf_logtype_driver;
50c563443cSPavan Nikhilesh 
517413feeeSJerin Jacob static void nicvf_dev_stop(struct rte_eth_dev *dev);
52627d4ba2SKamil Rytarowski static void nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup);
53627d4ba2SKamil Rytarowski static void nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic,
54627d4ba2SKamil Rytarowski 			  bool cleanup);
55d3bf2564SRakesh Kudurumalla static int nicvf_vlan_offload_config(struct rte_eth_dev *dev, int mask);
56d3bf2564SRakesh Kudurumalla static int nicvf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
577413feeeSJerin Jacob 
58f8e99896SThomas Monjalon RTE_INIT(nicvf_init_log)
59c563443cSPavan Nikhilesh {
60fd396066SHarry van Haaren 	nicvf_logtype_mbox = rte_log_register("pmd.net.thunderx.mbox");
61c563443cSPavan Nikhilesh 	if (nicvf_logtype_mbox >= 0)
62c563443cSPavan Nikhilesh 		rte_log_set_level(nicvf_logtype_mbox, RTE_LOG_NOTICE);
63c563443cSPavan Nikhilesh 
64fd396066SHarry van Haaren 	nicvf_logtype_init = rte_log_register("pmd.net.thunderx.init");
65c563443cSPavan Nikhilesh 	if (nicvf_logtype_init >= 0)
66c563443cSPavan Nikhilesh 		rte_log_set_level(nicvf_logtype_init, RTE_LOG_NOTICE);
67c563443cSPavan Nikhilesh 
68fd396066SHarry van Haaren 	nicvf_logtype_driver = rte_log_register("pmd.net.thunderx.driver");
69c563443cSPavan Nikhilesh 	if (nicvf_logtype_driver >= 0)
70c563443cSPavan Nikhilesh 		rte_log_set_level(nicvf_logtype_driver, RTE_LOG_NOTICE);
71c563443cSPavan Nikhilesh }
72c563443cSPavan Nikhilesh 
738e14dc28SStephen Hemminger static void
748e14dc28SStephen Hemminger nicvf_link_status_update(struct nicvf *nic,
758fc70464SJerin Jacob 			 struct rte_eth_link *link)
768fc70464SJerin Jacob {
778e14dc28SStephen Hemminger 	memset(link, 0, sizeof(*link));
788fc70464SJerin Jacob 
798e14dc28SStephen Hemminger 	link->link_status = nic->link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
808fc70464SJerin Jacob 
818fc70464SJerin Jacob 	if (nic->duplex == NICVF_HALF_DUPLEX)
828fc70464SJerin Jacob 		link->link_duplex = ETH_LINK_HALF_DUPLEX;
838fc70464SJerin Jacob 	else if (nic->duplex == NICVF_FULL_DUPLEX)
848fc70464SJerin Jacob 		link->link_duplex = ETH_LINK_FULL_DUPLEX;
858fc70464SJerin Jacob 	link->link_speed = nic->speed;
861e3a958fSThomas Monjalon 	link->link_autoneg = ETH_LINK_AUTONEG;
878fc70464SJerin Jacob }
888fc70464SJerin Jacob 
89e4387966SJerin Jacob static void
90e4387966SJerin Jacob nicvf_interrupt(void *arg)
91e4387966SJerin Jacob {
92f141adcaSKamil Rytarowski 	struct rte_eth_dev *dev = arg;
93f141adcaSKamil Rytarowski 	struct nicvf *nic = nicvf_pmd_priv(dev);
948e14dc28SStephen Hemminger 	struct rte_eth_link link;
95e4387966SJerin Jacob 
968fc70464SJerin Jacob 	if (nicvf_reg_poll_interrupts(nic) == NIC_MBOX_MSG_BGX_LINK_CHANGE) {
978e14dc28SStephen Hemminger 		if (dev->data->dev_conf.intr_conf.lsc) {
988e14dc28SStephen Hemminger 			nicvf_link_status_update(nic, &link);
998e14dc28SStephen Hemminger 			rte_eth_linkstatus_set(dev, &link);
1008e14dc28SStephen Hemminger 
1018e14dc28SStephen Hemminger 			_rte_eth_dev_callback_process(dev,
1028e14dc28SStephen Hemminger 						      RTE_ETH_EVENT_INTR_LSC,
103cebe3d7bSThomas Monjalon 						      NULL);
1048fc70464SJerin Jacob 		}
1058e14dc28SStephen Hemminger 	}
106e4387966SJerin Jacob 
107e4387966SJerin Jacob 	rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000,
108f141adcaSKamil Rytarowski 				nicvf_interrupt, dev);
109f141adcaSKamil Rytarowski }
110f141adcaSKamil Rytarowski 
11121e3fb00SKamil Rytarowski static void
112f141adcaSKamil Rytarowski nicvf_vf_interrupt(void *arg)
113f141adcaSKamil Rytarowski {
114f141adcaSKamil Rytarowski 	struct nicvf *nic = arg;
115f141adcaSKamil Rytarowski 
116f141adcaSKamil Rytarowski 	nicvf_reg_poll_interrupts(nic);
117f141adcaSKamil Rytarowski 
118f141adcaSKamil Rytarowski 	rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000,
119f141adcaSKamil Rytarowski 				nicvf_vf_interrupt, nic);
120e4387966SJerin Jacob }
121e4387966SJerin Jacob 
122e4387966SJerin Jacob static int
123f141adcaSKamil Rytarowski nicvf_periodic_alarm_start(void (fn)(void *), void *arg)
124e4387966SJerin Jacob {
125f141adcaSKamil Rytarowski 	return rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000, fn, arg);
126e4387966SJerin Jacob }
127e4387966SJerin Jacob 
128e4387966SJerin Jacob static int
129f141adcaSKamil Rytarowski nicvf_periodic_alarm_stop(void (fn)(void *), void *arg)
130e4387966SJerin Jacob {
131f141adcaSKamil Rytarowski 	return rte_eal_alarm_cancel(fn, arg);
132e4387966SJerin Jacob }
133e4387966SJerin Jacob 
1348fc70464SJerin Jacob /*
1358fc70464SJerin Jacob  * Return 0 means link status changed, -1 means not changed
1368fc70464SJerin Jacob  */
1378fc70464SJerin Jacob static int
1380cca5670SAndriy Berestovskyy nicvf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1398fc70464SJerin Jacob {
1400cca5670SAndriy Berestovskyy #define CHECK_INTERVAL 100  /* 100ms */
1410cca5670SAndriy Berestovskyy #define MAX_CHECK_TIME 90   /* 9s (90 * 100ms) in total */
1428fc70464SJerin Jacob 	struct rte_eth_link link;
1438fc70464SJerin Jacob 	struct nicvf *nic = nicvf_pmd_priv(dev);
1440cca5670SAndriy Berestovskyy 	int i;
1458fc70464SJerin Jacob 
1468fc70464SJerin Jacob 	PMD_INIT_FUNC_TRACE();
1478fc70464SJerin Jacob 
1480cca5670SAndriy Berestovskyy 	if (wait_to_complete) {
1490cca5670SAndriy Berestovskyy 		/* rte_eth_link_get() might need to wait up to 9 seconds */
1500cca5670SAndriy Berestovskyy 		for (i = 0; i < MAX_CHECK_TIME; i++) {
1518e14dc28SStephen Hemminger 			nicvf_link_status_update(nic, &link);
1528e14dc28SStephen Hemminger 			if (link.link_status == ETH_LINK_UP)
1530cca5670SAndriy Berestovskyy 				break;
1540cca5670SAndriy Berestovskyy 			rte_delay_ms(CHECK_INTERVAL);
1550cca5670SAndriy Berestovskyy 		}
1560cca5670SAndriy Berestovskyy 	} else {
1578e14dc28SStephen Hemminger 		nicvf_link_status_update(nic, &link);
1580cca5670SAndriy Berestovskyy 	}
1598e14dc28SStephen Hemminger 
1608e14dc28SStephen Hemminger 	return rte_eth_linkstatus_set(dev, &link);
1618fc70464SJerin Jacob }
1628fc70464SJerin Jacob 
163606ee746SJerin Jacob static int
16465d9804eSJerin Jacob nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
16565d9804eSJerin Jacob {
16665d9804eSJerin Jacob 	struct nicvf *nic = nicvf_pmd_priv(dev);
167c77875fbSNitin Saxena 	uint32_t buffsz, frame_size = mtu + NIC_HW_L2_OVERHEAD;
168b7004ab2SKamil Rytarowski 	size_t i;
169c97da2cbSMaciej Czekaj 	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
17065d9804eSJerin Jacob 
17165d9804eSJerin Jacob 	PMD_INIT_FUNC_TRACE();
17265d9804eSJerin Jacob 
17365d9804eSJerin Jacob 	if (frame_size > NIC_HW_MAX_FRS)
17465d9804eSJerin Jacob 		return -EINVAL;
17565d9804eSJerin Jacob 
17665d9804eSJerin Jacob 	if (frame_size < NIC_HW_MIN_FRS)
17765d9804eSJerin Jacob 		return -EINVAL;
17865d9804eSJerin Jacob 
17965d9804eSJerin Jacob 	buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
18065d9804eSJerin Jacob 
18165d9804eSJerin Jacob 	/*
18265d9804eSJerin Jacob 	 * Refuse mtu that requires the support of scattered packets
18365d9804eSJerin Jacob 	 * when this feature has not been enabled before.
18465d9804eSJerin Jacob 	 */
185c77875fbSNitin Saxena 	if (dev->data->dev_started && !dev->data->scattered_rx &&
18665d9804eSJerin Jacob 		(frame_size + 2 * VLAN_TAG_SIZE > buffsz))
18765d9804eSJerin Jacob 		return -EINVAL;
18865d9804eSJerin Jacob 
18965d9804eSJerin Jacob 	/* check <seg size> * <max_seg>  >= max_frame */
19065d9804eSJerin Jacob 	if (dev->data->scattered_rx &&
19165d9804eSJerin Jacob 		(frame_size + 2 * VLAN_TAG_SIZE > buffsz * NIC_HW_MAX_SEGS))
19265d9804eSJerin Jacob 		return -EINVAL;
19365d9804eSJerin Jacob 
19435b2d13fSOlivier Matz 	if (frame_size > RTE_ETHER_MAX_LEN)
195c97da2cbSMaciej Czekaj 		rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
19665d9804eSJerin Jacob 	else
197c97da2cbSMaciej Czekaj 		rxmode->offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
19865d9804eSJerin Jacob 
199c77875fbSNitin Saxena 	if (nicvf_mbox_update_hw_max_frs(nic, mtu))
20065d9804eSJerin Jacob 		return -EINVAL;
20165d9804eSJerin Jacob 
202c77875fbSNitin Saxena 	/* Update max_rx_pkt_len */
20335b2d13fSOlivier Matz 	rxmode->max_rx_pkt_len = mtu + RTE_ETHER_HDR_LEN;
20465d9804eSJerin Jacob 	nic->mtu = mtu;
205b7004ab2SKamil Rytarowski 
206b7004ab2SKamil Rytarowski 	for (i = 0; i < nic->sqs_count; i++)
207b7004ab2SKamil Rytarowski 		nic->snicvf[i]->mtu = mtu;
208b7004ab2SKamil Rytarowski 
20965d9804eSJerin Jacob 	return 0;
21065d9804eSJerin Jacob }
21165d9804eSJerin Jacob 
21265d9804eSJerin Jacob static int
213606ee746SJerin Jacob nicvf_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
214606ee746SJerin Jacob {
215606ee746SJerin Jacob 	uint64_t *data = regs->data;
216606ee746SJerin Jacob 	struct nicvf *nic = nicvf_pmd_priv(dev);
217606ee746SJerin Jacob 
218001a1c0fSZyta Szpak 	if (data == NULL) {
219001a1c0fSZyta Szpak 		regs->length = nicvf_reg_get_count();
220001a1c0fSZyta Szpak 		regs->width = THUNDERX_REG_BYTES;
221001a1c0fSZyta Szpak 		return 0;
222001a1c0fSZyta Szpak 	}
223606ee746SJerin Jacob 
224606ee746SJerin Jacob 	/* Support only full register dump */
225606ee746SJerin Jacob 	if ((regs->length == 0) ||
226606ee746SJerin Jacob 		(regs->length == (uint32_t)nicvf_reg_get_count())) {
227606ee746SJerin Jacob 		regs->version = nic->vendor_id << 16 | nic->device_id;
228606ee746SJerin Jacob 		nicvf_reg_dump(nic, data);
229606ee746SJerin Jacob 		return 0;
230606ee746SJerin Jacob 	}
231606ee746SJerin Jacob 	return -ENOTSUP;
232606ee746SJerin Jacob }
233606ee746SJerin Jacob 
234d5b0924bSMatan Azrad static int
235684fa771SJerin Jacob nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
236684fa771SJerin Jacob {
237684fa771SJerin Jacob 	uint16_t qidx;
238684fa771SJerin Jacob 	struct nicvf_hw_rx_qstats rx_qstats;
239684fa771SJerin Jacob 	struct nicvf_hw_tx_qstats tx_qstats;
240684fa771SJerin Jacob 	struct nicvf_hw_stats port_stats;
241684fa771SJerin Jacob 	struct nicvf *nic = nicvf_pmd_priv(dev);
24221e3fb00SKamil Rytarowski 	uint16_t rx_start, rx_end;
24321e3fb00SKamil Rytarowski 	uint16_t tx_start, tx_end;
24421e3fb00SKamil Rytarowski 	size_t i;
24521e3fb00SKamil Rytarowski 
24621e3fb00SKamil Rytarowski 	/* RX queue indices for the first VF */
24721e3fb00SKamil Rytarowski 	nicvf_rx_range(dev, nic, &rx_start, &rx_end);
248684fa771SJerin Jacob 
249684fa771SJerin Jacob 	/* Reading per RX ring stats */
25021e3fb00SKamil Rytarowski 	for (qidx = rx_start; qidx <= rx_end; qidx++) {
251695cd416SMarcin Wilk 		if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
252684fa771SJerin Jacob 			break;
253684fa771SJerin Jacob 
254684fa771SJerin Jacob 		nicvf_hw_get_rx_qstats(nic, &rx_qstats, qidx);
255684fa771SJerin Jacob 		stats->q_ibytes[qidx] = rx_qstats.q_rx_bytes;
256684fa771SJerin Jacob 		stats->q_ipackets[qidx] = rx_qstats.q_rx_packets;
257684fa771SJerin Jacob 	}
258684fa771SJerin Jacob 
25921e3fb00SKamil Rytarowski 	/* TX queue indices for the first VF */
26021e3fb00SKamil Rytarowski 	nicvf_tx_range(dev, nic, &tx_start, &tx_end);
26121e3fb00SKamil Rytarowski 
262684fa771SJerin Jacob 	/* Reading per TX ring stats */
26321e3fb00SKamil Rytarowski 	for (qidx = tx_start; qidx <= tx_end; qidx++) {
264695cd416SMarcin Wilk 		if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
265684fa771SJerin Jacob 			break;
266684fa771SJerin Jacob 
267684fa771SJerin Jacob 		nicvf_hw_get_tx_qstats(nic, &tx_qstats, qidx);
268684fa771SJerin Jacob 		stats->q_obytes[qidx] = tx_qstats.q_tx_bytes;
269684fa771SJerin Jacob 		stats->q_opackets[qidx] = tx_qstats.q_tx_packets;
270684fa771SJerin Jacob 	}
271684fa771SJerin Jacob 
27221e3fb00SKamil Rytarowski 	for (i = 0; i < nic->sqs_count; i++) {
27321e3fb00SKamil Rytarowski 		struct nicvf *snic = nic->snicvf[i];
27421e3fb00SKamil Rytarowski 
27521e3fb00SKamil Rytarowski 		if (snic == NULL)
27621e3fb00SKamil Rytarowski 			break;
27721e3fb00SKamil Rytarowski 
27821e3fb00SKamil Rytarowski 		/* RX queue indices for a secondary VF */
27921e3fb00SKamil Rytarowski 		nicvf_rx_range(dev, snic, &rx_start, &rx_end);
28021e3fb00SKamil Rytarowski 
28121e3fb00SKamil Rytarowski 		/* Reading per RX ring stats */
28221e3fb00SKamil Rytarowski 		for (qidx = rx_start; qidx <= rx_end; qidx++) {
283695cd416SMarcin Wilk 			if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
28421e3fb00SKamil Rytarowski 				break;
28521e3fb00SKamil Rytarowski 
28621e3fb00SKamil Rytarowski 			nicvf_hw_get_rx_qstats(snic, &rx_qstats,
28721e3fb00SKamil Rytarowski 					       qidx % MAX_RCV_QUEUES_PER_QS);
28821e3fb00SKamil Rytarowski 			stats->q_ibytes[qidx] = rx_qstats.q_rx_bytes;
28921e3fb00SKamil Rytarowski 			stats->q_ipackets[qidx] = rx_qstats.q_rx_packets;
29021e3fb00SKamil Rytarowski 		}
29121e3fb00SKamil Rytarowski 
29221e3fb00SKamil Rytarowski 		/* TX queue indices for a secondary VF */
29321e3fb00SKamil Rytarowski 		nicvf_tx_range(dev, snic, &tx_start, &tx_end);
29421e3fb00SKamil Rytarowski 		/* Reading per TX ring stats */
29521e3fb00SKamil Rytarowski 		for (qidx = tx_start; qidx <= tx_end; qidx++) {
296695cd416SMarcin Wilk 			if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
29721e3fb00SKamil Rytarowski 				break;
29821e3fb00SKamil Rytarowski 
29921e3fb00SKamil Rytarowski 			nicvf_hw_get_tx_qstats(snic, &tx_qstats,
30021e3fb00SKamil Rytarowski 					       qidx % MAX_SND_QUEUES_PER_QS);
30121e3fb00SKamil Rytarowski 			stats->q_obytes[qidx] = tx_qstats.q_tx_bytes;
30221e3fb00SKamil Rytarowski 			stats->q_opackets[qidx] = tx_qstats.q_tx_packets;
30321e3fb00SKamil Rytarowski 		}
30421e3fb00SKamil Rytarowski 	}
30521e3fb00SKamil Rytarowski 
306684fa771SJerin Jacob 	nicvf_hw_get_stats(nic, &port_stats);
307684fa771SJerin Jacob 	stats->ibytes = port_stats.rx_bytes;
308684fa771SJerin Jacob 	stats->ipackets = port_stats.rx_ucast_frames;
309684fa771SJerin Jacob 	stats->ipackets += port_stats.rx_bcast_frames;
310684fa771SJerin Jacob 	stats->ipackets += port_stats.rx_mcast_frames;
311684fa771SJerin Jacob 	stats->ierrors = port_stats.rx_l2_errors;
312684fa771SJerin Jacob 	stats->imissed = port_stats.rx_drop_red;
313684fa771SJerin Jacob 	stats->imissed += port_stats.rx_drop_overrun;
314684fa771SJerin Jacob 	stats->imissed += port_stats.rx_drop_bcast;
315684fa771SJerin Jacob 	stats->imissed += port_stats.rx_drop_mcast;
316684fa771SJerin Jacob 	stats->imissed += port_stats.rx_drop_l3_bcast;
317684fa771SJerin Jacob 	stats->imissed += port_stats.rx_drop_l3_mcast;
318684fa771SJerin Jacob 
319684fa771SJerin Jacob 	stats->obytes = port_stats.tx_bytes_ok;
320684fa771SJerin Jacob 	stats->opackets = port_stats.tx_ucast_frames_ok;
321684fa771SJerin Jacob 	stats->opackets += port_stats.tx_bcast_frames_ok;
322684fa771SJerin Jacob 	stats->opackets += port_stats.tx_mcast_frames_ok;
323684fa771SJerin Jacob 	stats->oerrors = port_stats.tx_drops;
324d5b0924bSMatan Azrad 
325d5b0924bSMatan Azrad 	return 0;
326684fa771SJerin Jacob }
327684fa771SJerin Jacob 
3281c80e4fdSJerin Jacob static const uint32_t *
3291c80e4fdSJerin Jacob nicvf_dev_supported_ptypes_get(struct rte_eth_dev *dev)
3301c80e4fdSJerin Jacob {
3311c80e4fdSJerin Jacob 	size_t copied;
3321c80e4fdSJerin Jacob 	static uint32_t ptypes[32];
3331c80e4fdSJerin Jacob 	struct nicvf *nic = nicvf_pmd_priv(dev);
334398a1be1SJerin Jacob 	static const uint32_t ptypes_common[] = {
3351c80e4fdSJerin Jacob 		RTE_PTYPE_L3_IPV4,
3361c80e4fdSJerin Jacob 		RTE_PTYPE_L3_IPV4_EXT,
3371c80e4fdSJerin Jacob 		RTE_PTYPE_L3_IPV6,
3381c80e4fdSJerin Jacob 		RTE_PTYPE_L3_IPV6_EXT,
3391c80e4fdSJerin Jacob 		RTE_PTYPE_L4_TCP,
3401c80e4fdSJerin Jacob 		RTE_PTYPE_L4_UDP,
3411c80e4fdSJerin Jacob 		RTE_PTYPE_L4_FRAG,
3421c80e4fdSJerin Jacob 	};
343398a1be1SJerin Jacob 	static const uint32_t ptypes_tunnel[] = {
3441c80e4fdSJerin Jacob 		RTE_PTYPE_TUNNEL_GRE,
3451c80e4fdSJerin Jacob 		RTE_PTYPE_TUNNEL_GENEVE,
3461c80e4fdSJerin Jacob 		RTE_PTYPE_TUNNEL_VXLAN,
3471c80e4fdSJerin Jacob 		RTE_PTYPE_TUNNEL_NVGRE,
3481c80e4fdSJerin Jacob 	};
3491c80e4fdSJerin Jacob 	static const uint32_t ptypes_end = RTE_PTYPE_UNKNOWN;
3501c80e4fdSJerin Jacob 
351398a1be1SJerin Jacob 	copied = sizeof(ptypes_common);
352398a1be1SJerin Jacob 	memcpy(ptypes, ptypes_common, copied);
353398a1be1SJerin Jacob 	if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
354398a1be1SJerin Jacob 		memcpy((char *)ptypes + copied, ptypes_tunnel,
355398a1be1SJerin Jacob 			sizeof(ptypes_tunnel));
356398a1be1SJerin Jacob 		copied += sizeof(ptypes_tunnel);
3571c80e4fdSJerin Jacob 	}
3581c80e4fdSJerin Jacob 
3591c80e4fdSJerin Jacob 	memcpy((char *)ptypes + copied, &ptypes_end, sizeof(ptypes_end));
3601c80e4fdSJerin Jacob 
3615e64c812SPavan Nikhilesh 	/* All Ptypes are supported in all Rx functions. */
3625e64c812SPavan Nikhilesh 	return ptypes;
3631c80e4fdSJerin Jacob }
3641c80e4fdSJerin Jacob 
3659970a9adSIgor Romanov static int
366684fa771SJerin Jacob nicvf_dev_stats_reset(struct rte_eth_dev *dev)
367684fa771SJerin Jacob {
368684fa771SJerin Jacob 	int i;
369684fa771SJerin Jacob 	uint16_t rxqs = 0, txqs = 0;
370684fa771SJerin Jacob 	struct nicvf *nic = nicvf_pmd_priv(dev);
37121e3fb00SKamil Rytarowski 	uint16_t rx_start, rx_end;
37221e3fb00SKamil Rytarowski 	uint16_t tx_start, tx_end;
3739970a9adSIgor Romanov 	int ret;
374684fa771SJerin Jacob 
37521e3fb00SKamil Rytarowski 	/* Reset all primary nic counters */
37621e3fb00SKamil Rytarowski 	nicvf_rx_range(dev, nic, &rx_start, &rx_end);
37721e3fb00SKamil Rytarowski 	for (i = rx_start; i <= rx_end; i++)
378684fa771SJerin Jacob 		rxqs |= (0x3 << (i * 2));
37921e3fb00SKamil Rytarowski 
38021e3fb00SKamil Rytarowski 	nicvf_tx_range(dev, nic, &tx_start, &tx_end);
38121e3fb00SKamil Rytarowski 	for (i = tx_start; i <= tx_end; i++)
382684fa771SJerin Jacob 		txqs |= (0x3 << (i * 2));
383684fa771SJerin Jacob 
3849970a9adSIgor Romanov 	ret = nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, rxqs, txqs);
3859970a9adSIgor Romanov 	if (ret != 0)
3869970a9adSIgor Romanov 		return ret;
38721e3fb00SKamil Rytarowski 
38821e3fb00SKamil Rytarowski 	/* Reset secondary nic queue counters */
38921e3fb00SKamil Rytarowski 	for (i = 0; i < nic->sqs_count; i++) {
39021e3fb00SKamil Rytarowski 		struct nicvf *snic = nic->snicvf[i];
39121e3fb00SKamil Rytarowski 		if (snic == NULL)
39221e3fb00SKamil Rytarowski 			break;
39321e3fb00SKamil Rytarowski 
39421e3fb00SKamil Rytarowski 		nicvf_rx_range(dev, snic, &rx_start, &rx_end);
39521e3fb00SKamil Rytarowski 		for (i = rx_start; i <= rx_end; i++)
39621e3fb00SKamil Rytarowski 			rxqs |= (0x3 << ((i % MAX_CMP_QUEUES_PER_QS) * 2));
39721e3fb00SKamil Rytarowski 
39821e3fb00SKamil Rytarowski 		nicvf_tx_range(dev, snic, &tx_start, &tx_end);
39921e3fb00SKamil Rytarowski 		for (i = tx_start; i <= tx_end; i++)
40021e3fb00SKamil Rytarowski 			txqs |= (0x3 << ((i % MAX_SND_QUEUES_PER_QS) * 2));
40121e3fb00SKamil Rytarowski 
4029970a9adSIgor Romanov 		ret = nicvf_mbox_reset_stat_counters(snic, 0, 0, rxqs, txqs);
4039970a9adSIgor Romanov 		if (ret != 0)
4049970a9adSIgor Romanov 			return ret;
40521e3fb00SKamil Rytarowski 	}
4069970a9adSIgor Romanov 
4079970a9adSIgor Romanov 	return 0;
408684fa771SJerin Jacob }
409684fa771SJerin Jacob 
4106eae36eaSJerin Jacob /* Promiscuous mode enabled by default in LMAC to VF 1:1 map configuration */
4119039c812SAndrew Rybchenko static int
4126eae36eaSJerin Jacob nicvf_dev_promisc_enable(struct rte_eth_dev *dev __rte_unused)
4136eae36eaSJerin Jacob {
4149039c812SAndrew Rybchenko 	return 0;
4156eae36eaSJerin Jacob }
4166eae36eaSJerin Jacob 
41743362c6aSJerin Jacob static inline uint64_t
41843362c6aSJerin Jacob nicvf_rss_ethdev_to_nic(struct nicvf *nic, uint64_t ethdev_rss)
41943362c6aSJerin Jacob {
42043362c6aSJerin Jacob 	uint64_t nic_rss = 0;
42143362c6aSJerin Jacob 
42243362c6aSJerin Jacob 	if (ethdev_rss & ETH_RSS_IPV4)
42343362c6aSJerin Jacob 		nic_rss |= RSS_IP_ENA;
42443362c6aSJerin Jacob 
42543362c6aSJerin Jacob 	if (ethdev_rss & ETH_RSS_IPV6)
42643362c6aSJerin Jacob 		nic_rss |= RSS_IP_ENA;
42743362c6aSJerin Jacob 
42843362c6aSJerin Jacob 	if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_UDP)
42943362c6aSJerin Jacob 		nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA);
43043362c6aSJerin Jacob 
43143362c6aSJerin Jacob 	if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_TCP)
43243362c6aSJerin Jacob 		nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA);
43343362c6aSJerin Jacob 
43443362c6aSJerin Jacob 	if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_UDP)
43543362c6aSJerin Jacob 		nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA);
43643362c6aSJerin Jacob 
43743362c6aSJerin Jacob 	if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_TCP)
43843362c6aSJerin Jacob 		nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA);
43943362c6aSJerin Jacob 
44043362c6aSJerin Jacob 	if (ethdev_rss & ETH_RSS_PORT)
44143362c6aSJerin Jacob 		nic_rss |= RSS_L2_EXTENDED_HASH_ENA;
44243362c6aSJerin Jacob 
44343362c6aSJerin Jacob 	if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
44443362c6aSJerin Jacob 		if (ethdev_rss & ETH_RSS_VXLAN)
44543362c6aSJerin Jacob 			nic_rss |= RSS_TUN_VXLAN_ENA;
44643362c6aSJerin Jacob 
44743362c6aSJerin Jacob 		if (ethdev_rss & ETH_RSS_GENEVE)
44843362c6aSJerin Jacob 			nic_rss |= RSS_TUN_GENEVE_ENA;
44943362c6aSJerin Jacob 
45043362c6aSJerin Jacob 		if (ethdev_rss & ETH_RSS_NVGRE)
45143362c6aSJerin Jacob 			nic_rss |= RSS_TUN_NVGRE_ENA;
45243362c6aSJerin Jacob 	}
45343362c6aSJerin Jacob 
45443362c6aSJerin Jacob 	return nic_rss;
45543362c6aSJerin Jacob }
45643362c6aSJerin Jacob 
45743362c6aSJerin Jacob static inline uint64_t
45843362c6aSJerin Jacob nicvf_rss_nic_to_ethdev(struct nicvf *nic,  uint64_t nic_rss)
45943362c6aSJerin Jacob {
46043362c6aSJerin Jacob 	uint64_t ethdev_rss = 0;
46143362c6aSJerin Jacob 
46243362c6aSJerin Jacob 	if (nic_rss & RSS_IP_ENA)
46343362c6aSJerin Jacob 		ethdev_rss |= (ETH_RSS_IPV4 | ETH_RSS_IPV6);
46443362c6aSJerin Jacob 
46543362c6aSJerin Jacob 	if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_TCP_ENA))
46643362c6aSJerin Jacob 		ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_TCP |
46743362c6aSJerin Jacob 				ETH_RSS_NONFRAG_IPV6_TCP);
46843362c6aSJerin Jacob 
46943362c6aSJerin Jacob 	if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_UDP_ENA))
47043362c6aSJerin Jacob 		ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_UDP |
47143362c6aSJerin Jacob 				ETH_RSS_NONFRAG_IPV6_UDP);
47243362c6aSJerin Jacob 
47343362c6aSJerin Jacob 	if (nic_rss & RSS_L2_EXTENDED_HASH_ENA)
47443362c6aSJerin Jacob 		ethdev_rss |= ETH_RSS_PORT;
47543362c6aSJerin Jacob 
47643362c6aSJerin Jacob 	if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
47743362c6aSJerin Jacob 		if (nic_rss & RSS_TUN_VXLAN_ENA)
47843362c6aSJerin Jacob 			ethdev_rss |= ETH_RSS_VXLAN;
47943362c6aSJerin Jacob 
48043362c6aSJerin Jacob 		if (nic_rss & RSS_TUN_GENEVE_ENA)
48143362c6aSJerin Jacob 			ethdev_rss |= ETH_RSS_GENEVE;
48243362c6aSJerin Jacob 
48343362c6aSJerin Jacob 		if (nic_rss & RSS_TUN_NVGRE_ENA)
48443362c6aSJerin Jacob 			ethdev_rss |= ETH_RSS_NVGRE;
48543362c6aSJerin Jacob 	}
48643362c6aSJerin Jacob 	return ethdev_rss;
48743362c6aSJerin Jacob }
48843362c6aSJerin Jacob 
48943362c6aSJerin Jacob static int
49043362c6aSJerin Jacob nicvf_dev_reta_query(struct rte_eth_dev *dev,
49143362c6aSJerin Jacob 		     struct rte_eth_rss_reta_entry64 *reta_conf,
49243362c6aSJerin Jacob 		     uint16_t reta_size)
49343362c6aSJerin Jacob {
49443362c6aSJerin Jacob 	struct nicvf *nic = nicvf_pmd_priv(dev);
49543362c6aSJerin Jacob 	uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
49643362c6aSJerin Jacob 	int ret, i, j;
49743362c6aSJerin Jacob 
49843362c6aSJerin Jacob 	if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) {
49943362c6aSJerin Jacob 		RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
50043362c6aSJerin Jacob 			"(%d) doesn't match the number hardware can supported "
50143362c6aSJerin Jacob 			"(%d)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE);
50243362c6aSJerin Jacob 		return -EINVAL;
50343362c6aSJerin Jacob 	}
50443362c6aSJerin Jacob 
50543362c6aSJerin Jacob 	ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE);
50643362c6aSJerin Jacob 	if (ret)
50743362c6aSJerin Jacob 		return ret;
50843362c6aSJerin Jacob 
50943362c6aSJerin Jacob 	/* Copy RETA table */
51043362c6aSJerin Jacob 	for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) {
51143362c6aSJerin Jacob 		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
51243362c6aSJerin Jacob 			if ((reta_conf[i].mask >> j) & 0x01)
51343362c6aSJerin Jacob 				reta_conf[i].reta[j] = tbl[j];
51443362c6aSJerin Jacob 	}
51543362c6aSJerin Jacob 
51643362c6aSJerin Jacob 	return 0;
51743362c6aSJerin Jacob }
51843362c6aSJerin Jacob 
51943362c6aSJerin Jacob static int
52043362c6aSJerin Jacob nicvf_dev_reta_update(struct rte_eth_dev *dev,
52143362c6aSJerin Jacob 		      struct rte_eth_rss_reta_entry64 *reta_conf,
52243362c6aSJerin Jacob 		      uint16_t reta_size)
52343362c6aSJerin Jacob {
52443362c6aSJerin Jacob 	struct nicvf *nic = nicvf_pmd_priv(dev);
52543362c6aSJerin Jacob 	uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
52643362c6aSJerin Jacob 	int ret, i, j;
52743362c6aSJerin Jacob 
52843362c6aSJerin Jacob 	if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) {
52943362c6aSJerin Jacob 		RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
53043362c6aSJerin Jacob 			"(%d) doesn't match the number hardware can supported "
53143362c6aSJerin Jacob 			"(%d)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE);
53243362c6aSJerin Jacob 		return -EINVAL;
53343362c6aSJerin Jacob 	}
53443362c6aSJerin Jacob 
53543362c6aSJerin Jacob 	ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE);
53643362c6aSJerin Jacob 	if (ret)
53743362c6aSJerin Jacob 		return ret;
53843362c6aSJerin Jacob 
53943362c6aSJerin Jacob 	/* Copy RETA table */
54043362c6aSJerin Jacob 	for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) {
54143362c6aSJerin Jacob 		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
54243362c6aSJerin Jacob 			if ((reta_conf[i].mask >> j) & 0x01)
54343362c6aSJerin Jacob 				tbl[j] = reta_conf[i].reta[j];
54443362c6aSJerin Jacob 	}
54543362c6aSJerin Jacob 
54643362c6aSJerin Jacob 	return nicvf_rss_reta_update(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE);
54743362c6aSJerin Jacob }
54843362c6aSJerin Jacob 
54943362c6aSJerin Jacob static int
55043362c6aSJerin Jacob nicvf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
55143362c6aSJerin Jacob 			    struct rte_eth_rss_conf *rss_conf)
55243362c6aSJerin Jacob {
55343362c6aSJerin Jacob 	struct nicvf *nic = nicvf_pmd_priv(dev);
55443362c6aSJerin Jacob 
55543362c6aSJerin Jacob 	if (rss_conf->rss_key)
55643362c6aSJerin Jacob 		nicvf_rss_get_key(nic, rss_conf->rss_key);
55743362c6aSJerin Jacob 
55843362c6aSJerin Jacob 	rss_conf->rss_key_len =  RSS_HASH_KEY_BYTE_SIZE;
55943362c6aSJerin Jacob 	rss_conf->rss_hf = nicvf_rss_nic_to_ethdev(nic, nicvf_rss_get_cfg(nic));
56043362c6aSJerin Jacob 	return 0;
56143362c6aSJerin Jacob }
56243362c6aSJerin Jacob 
56343362c6aSJerin Jacob static int
56443362c6aSJerin Jacob nicvf_dev_rss_hash_update(struct rte_eth_dev *dev,
56543362c6aSJerin Jacob 			  struct rte_eth_rss_conf *rss_conf)
56643362c6aSJerin Jacob {
56743362c6aSJerin Jacob 	struct nicvf *nic = nicvf_pmd_priv(dev);
56843362c6aSJerin Jacob 	uint64_t nic_rss;
56943362c6aSJerin Jacob 
57043362c6aSJerin Jacob 	if (rss_conf->rss_key &&
57143362c6aSJerin Jacob 		rss_conf->rss_key_len != RSS_HASH_KEY_BYTE_SIZE) {
57243362c6aSJerin Jacob 		RTE_LOG(ERR, PMD, "Hash key size mismatch %d",
57343362c6aSJerin Jacob 				rss_conf->rss_key_len);
57443362c6aSJerin Jacob 		return -EINVAL;
57543362c6aSJerin Jacob 	}
57643362c6aSJerin Jacob 
57743362c6aSJerin Jacob 	if (rss_conf->rss_key)
57843362c6aSJerin Jacob 		nicvf_rss_set_key(nic, rss_conf->rss_key);
57943362c6aSJerin Jacob 
58043362c6aSJerin Jacob 	nic_rss = nicvf_rss_ethdev_to_nic(nic, rss_conf->rss_hf);
58143362c6aSJerin Jacob 	nicvf_rss_set_cfg(nic, nic_rss);
58243362c6aSJerin Jacob 	return 0;
58343362c6aSJerin Jacob }
58443362c6aSJerin Jacob 
585aa0d976eSJerin Jacob static int
5866d3cbd56SKamil Rytarowski nicvf_qset_cq_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
5876d3cbd56SKamil Rytarowski 		    struct nicvf_rxq *rxq, uint16_t qidx, uint32_t desc_cnt)
588aa0d976eSJerin Jacob {
589aa0d976eSJerin Jacob 	const struct rte_memzone *rz;
590d1d861efSKamil Rytarowski 	uint32_t ring_size = CMP_QUEUE_SZ_MAX * sizeof(union cq_entry_t);
591aa0d976eSJerin Jacob 
592b7004ab2SKamil Rytarowski 	rz = rte_eth_dma_zone_reserve(dev, "cq_ring",
593b7004ab2SKamil Rytarowski 				      nicvf_netdev_qidx(nic, qidx), ring_size,
594aa0d976eSJerin Jacob 				      NICVF_CQ_BASE_ALIGN_BYTES, nic->node);
595aa0d976eSJerin Jacob 	if (rz == NULL) {
596aa0d976eSJerin Jacob 		PMD_INIT_LOG(ERR, "Failed to allocate mem for cq hw ring");
597aa0d976eSJerin Jacob 		return -ENOMEM;
598aa0d976eSJerin Jacob 	}
599aa0d976eSJerin Jacob 
600aa0d976eSJerin Jacob 	memset(rz->addr, 0, ring_size);
601aa0d976eSJerin Jacob 
602f17ca787SThomas Monjalon 	rxq->phys = rz->iova;
603aa0d976eSJerin Jacob 	rxq->desc = rz->addr;
604aa0d976eSJerin Jacob 	rxq->qlen_mask = desc_cnt - 1;
605aa0d976eSJerin Jacob 
606aa0d976eSJerin Jacob 	return 0;
607aa0d976eSJerin Jacob }
608aa0d976eSJerin Jacob 
6093f3c6f97SJerin Jacob static int
6106d3cbd56SKamil Rytarowski nicvf_qset_sq_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
6116d3cbd56SKamil Rytarowski 		    struct nicvf_txq *sq, uint16_t qidx, uint32_t desc_cnt)
6123f3c6f97SJerin Jacob {
6133f3c6f97SJerin Jacob 	const struct rte_memzone *rz;
614d1d861efSKamil Rytarowski 	uint32_t ring_size = SND_QUEUE_SZ_MAX * sizeof(union sq_entry_t);
6153f3c6f97SJerin Jacob 
616b7004ab2SKamil Rytarowski 	rz = rte_eth_dma_zone_reserve(dev, "sq",
617b7004ab2SKamil Rytarowski 				      nicvf_netdev_qidx(nic, qidx), ring_size,
6183f3c6f97SJerin Jacob 				      NICVF_SQ_BASE_ALIGN_BYTES, nic->node);
6193f3c6f97SJerin Jacob 	if (rz == NULL) {
6203f3c6f97SJerin Jacob 		PMD_INIT_LOG(ERR, "Failed allocate mem for sq hw ring");
6213f3c6f97SJerin Jacob 		return -ENOMEM;
6223f3c6f97SJerin Jacob 	}
6233f3c6f97SJerin Jacob 
6243f3c6f97SJerin Jacob 	memset(rz->addr, 0, ring_size);
6253f3c6f97SJerin Jacob 
626f17ca787SThomas Monjalon 	sq->phys = rz->iova;
6273f3c6f97SJerin Jacob 	sq->desc = rz->addr;
6283f3c6f97SJerin Jacob 	sq->qlen_mask = desc_cnt - 1;
6293f3c6f97SJerin Jacob 
6303f3c6f97SJerin Jacob 	return 0;
6313f3c6f97SJerin Jacob }
6323f3c6f97SJerin Jacob 
6337413feeeSJerin Jacob static int
6346d3cbd56SKamil Rytarowski nicvf_qset_rbdr_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
6356d3cbd56SKamil Rytarowski 		      uint32_t desc_cnt, uint32_t buffsz)
6367413feeeSJerin Jacob {
6377413feeeSJerin Jacob 	struct nicvf_rbdr *rbdr;
6387413feeeSJerin Jacob 	const struct rte_memzone *rz;
6397413feeeSJerin Jacob 	uint32_t ring_size;
6407413feeeSJerin Jacob 
6417413feeeSJerin Jacob 	assert(nic->rbdr == NULL);
6427413feeeSJerin Jacob 	rbdr = rte_zmalloc_socket("rbdr", sizeof(struct nicvf_rbdr),
6437413feeeSJerin Jacob 				  RTE_CACHE_LINE_SIZE, nic->node);
6447413feeeSJerin Jacob 	if (rbdr == NULL) {
6457413feeeSJerin Jacob 		PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr");
6467413feeeSJerin Jacob 		return -ENOMEM;
6477413feeeSJerin Jacob 	}
6487413feeeSJerin Jacob 
649d1d861efSKamil Rytarowski 	ring_size = sizeof(struct rbdr_entry_t) * RBDR_QUEUE_SZ_MAX;
650b7004ab2SKamil Rytarowski 	rz = rte_eth_dma_zone_reserve(dev, "rbdr",
651b7004ab2SKamil Rytarowski 				      nicvf_netdev_qidx(nic, 0), ring_size,
6527413feeeSJerin Jacob 				      NICVF_RBDR_BASE_ALIGN_BYTES, nic->node);
6537413feeeSJerin Jacob 	if (rz == NULL) {
6547413feeeSJerin Jacob 		PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr desc ring");
6557413feeeSJerin Jacob 		return -ENOMEM;
6567413feeeSJerin Jacob 	}
6577413feeeSJerin Jacob 
6587413feeeSJerin Jacob 	memset(rz->addr, 0, ring_size);
6597413feeeSJerin Jacob 
660f17ca787SThomas Monjalon 	rbdr->phys = rz->iova;
6617413feeeSJerin Jacob 	rbdr->tail = 0;
6627413feeeSJerin Jacob 	rbdr->next_tail = 0;
6637413feeeSJerin Jacob 	rbdr->desc = rz->addr;
6647413feeeSJerin Jacob 	rbdr->buffsz = buffsz;
6657413feeeSJerin Jacob 	rbdr->qlen_mask = desc_cnt - 1;
6667413feeeSJerin Jacob 	rbdr->rbdr_status =
6677413feeeSJerin Jacob 		nicvf_qset_base(nic, 0) + NIC_QSET_RBDR_0_1_STATUS0;
6687413feeeSJerin Jacob 	rbdr->rbdr_door =
6697413feeeSJerin Jacob 		nicvf_qset_base(nic, 0) + NIC_QSET_RBDR_0_1_DOOR;
6707413feeeSJerin Jacob 
6717413feeeSJerin Jacob 	nic->rbdr = rbdr;
6727413feeeSJerin Jacob 	return 0;
6737413feeeSJerin Jacob }
6747413feeeSJerin Jacob 
6757413feeeSJerin Jacob static void
67621e3fb00SKamil Rytarowski nicvf_rbdr_release_mbuf(struct rte_eth_dev *dev, struct nicvf *nic,
677df6e0a06SSantosh Shukla 			nicvf_iova_addr_t phy)
6787413feeeSJerin Jacob {
6797413feeeSJerin Jacob 	uint16_t qidx;
6807413feeeSJerin Jacob 	void *obj;
6817413feeeSJerin Jacob 	struct nicvf_rxq *rxq;
68221e3fb00SKamil Rytarowski 	uint16_t rx_start, rx_end;
6837413feeeSJerin Jacob 
68421e3fb00SKamil Rytarowski 	/* Get queue ranges for this VF */
68521e3fb00SKamil Rytarowski 	nicvf_rx_range(dev, nic, &rx_start, &rx_end);
68621e3fb00SKamil Rytarowski 
68721e3fb00SKamil Rytarowski 	for (qidx = rx_start; qidx <= rx_end; qidx++) {
6886d3cbd56SKamil Rytarowski 		rxq = dev->data->rx_queues[qidx];
6897413feeeSJerin Jacob 		if (rxq->precharge_cnt) {
6907413feeeSJerin Jacob 			obj = (void *)nicvf_mbuff_phy2virt(phy,
6917413feeeSJerin Jacob 							   rxq->mbuf_phys_off);
6927413feeeSJerin Jacob 			rte_mempool_put(rxq->pool, obj);
6937413feeeSJerin Jacob 			rxq->precharge_cnt--;
6947413feeeSJerin Jacob 			break;
6957413feeeSJerin Jacob 		}
6967413feeeSJerin Jacob 	}
6977413feeeSJerin Jacob }
6987413feeeSJerin Jacob 
6997413feeeSJerin Jacob static inline void
7006d3cbd56SKamil Rytarowski nicvf_rbdr_release_mbufs(struct rte_eth_dev *dev, struct nicvf *nic)
7017413feeeSJerin Jacob {
7027413feeeSJerin Jacob 	uint32_t qlen_mask, head;
7037413feeeSJerin Jacob 	struct rbdr_entry_t *entry;
7047413feeeSJerin Jacob 	struct nicvf_rbdr *rbdr = nic->rbdr;
7057413feeeSJerin Jacob 
7067413feeeSJerin Jacob 	qlen_mask = rbdr->qlen_mask;
7077413feeeSJerin Jacob 	head = rbdr->head;
7087413feeeSJerin Jacob 	while (head != rbdr->tail) {
7097413feeeSJerin Jacob 		entry = rbdr->desc + head;
7106d3cbd56SKamil Rytarowski 		nicvf_rbdr_release_mbuf(dev, nic, entry->full_addr);
7117413feeeSJerin Jacob 		head++;
7127413feeeSJerin Jacob 		head = head & qlen_mask;
7137413feeeSJerin Jacob 	}
7147413feeeSJerin Jacob }
7157413feeeSJerin Jacob 
7163f3c6f97SJerin Jacob static inline void
7173f3c6f97SJerin Jacob nicvf_tx_queue_release_mbufs(struct nicvf_txq *txq)
7183f3c6f97SJerin Jacob {
7193f3c6f97SJerin Jacob 	uint32_t head;
7203f3c6f97SJerin Jacob 
7213f3c6f97SJerin Jacob 	head = txq->head;
7223f3c6f97SJerin Jacob 	while (head != txq->tail) {
7233f3c6f97SJerin Jacob 		if (txq->txbuffs[head]) {
7243f3c6f97SJerin Jacob 			rte_pktmbuf_free_seg(txq->txbuffs[head]);
7253f3c6f97SJerin Jacob 			txq->txbuffs[head] = NULL;
7263f3c6f97SJerin Jacob 		}
7273f3c6f97SJerin Jacob 		head++;
7283f3c6f97SJerin Jacob 		head = head & txq->qlen_mask;
7293f3c6f97SJerin Jacob 	}
7303f3c6f97SJerin Jacob }
7313f3c6f97SJerin Jacob 
7323f3c6f97SJerin Jacob static void
7333f3c6f97SJerin Jacob nicvf_tx_queue_reset(struct nicvf_txq *txq)
7343f3c6f97SJerin Jacob {
7353f3c6f97SJerin Jacob 	uint32_t txq_desc_cnt = txq->qlen_mask + 1;
7363f3c6f97SJerin Jacob 
7373f3c6f97SJerin Jacob 	memset(txq->desc, 0, sizeof(union sq_entry_t) * txq_desc_cnt);
7383f3c6f97SJerin Jacob 	memset(txq->txbuffs, 0, sizeof(struct rte_mbuf *) * txq_desc_cnt);
7393f3c6f97SJerin Jacob 	txq->tail = 0;
7403f3c6f97SJerin Jacob 	txq->head = 0;
7413f3c6f97SJerin Jacob 	txq->xmit_bufs = 0;
7423f3c6f97SJerin Jacob }
7433f3c6f97SJerin Jacob 
744fc1f6c62SJerin Jacob static inline int
74571e76186SKamil Rytarowski nicvf_vf_start_tx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
74671e76186SKamil Rytarowski 			uint16_t qidx)
747fc1f6c62SJerin Jacob {
748fc1f6c62SJerin Jacob 	struct nicvf_txq *txq;
749fc1f6c62SJerin Jacob 	int ret;
750fc1f6c62SJerin Jacob 
75171e76186SKamil Rytarowski 	assert(qidx < MAX_SND_QUEUES_PER_QS);
75271e76186SKamil Rytarowski 
75371e76186SKamil Rytarowski 	if (dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
75471e76186SKamil Rytarowski 		RTE_ETH_QUEUE_STATE_STARTED)
755fc1f6c62SJerin Jacob 		return 0;
756fc1f6c62SJerin Jacob 
75771e76186SKamil Rytarowski 	txq = dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)];
758fc1f6c62SJerin Jacob 	txq->pool = NULL;
75971e76186SKamil Rytarowski 	ret = nicvf_qset_sq_config(nic, qidx, txq);
760fc1f6c62SJerin Jacob 	if (ret) {
76171e76186SKamil Rytarowski 		PMD_INIT_LOG(ERR, "Failed to configure sq VF%d %d %d",
76271e76186SKamil Rytarowski 			     nic->vf_id, qidx, ret);
763fc1f6c62SJerin Jacob 		goto config_sq_error;
764fc1f6c62SJerin Jacob 	}
765fc1f6c62SJerin Jacob 
76671e76186SKamil Rytarowski 	dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
76771e76186SKamil Rytarowski 		RTE_ETH_QUEUE_STATE_STARTED;
768fc1f6c62SJerin Jacob 	return ret;
769fc1f6c62SJerin Jacob 
770fc1f6c62SJerin Jacob config_sq_error:
77171e76186SKamil Rytarowski 	nicvf_qset_sq_reclaim(nic, qidx);
772fc1f6c62SJerin Jacob 	return ret;
773fc1f6c62SJerin Jacob }
774fc1f6c62SJerin Jacob 
775fc1f6c62SJerin Jacob static inline int
776627d4ba2SKamil Rytarowski nicvf_vf_stop_tx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
777627d4ba2SKamil Rytarowski 		       uint16_t qidx)
778fc1f6c62SJerin Jacob {
779fc1f6c62SJerin Jacob 	struct nicvf_txq *txq;
780fc1f6c62SJerin Jacob 	int ret;
781fc1f6c62SJerin Jacob 
782627d4ba2SKamil Rytarowski 	assert(qidx < MAX_SND_QUEUES_PER_QS);
783627d4ba2SKamil Rytarowski 
784627d4ba2SKamil Rytarowski 	if (dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
785627d4ba2SKamil Rytarowski 		RTE_ETH_QUEUE_STATE_STOPPED)
786fc1f6c62SJerin Jacob 		return 0;
787fc1f6c62SJerin Jacob 
788627d4ba2SKamil Rytarowski 	ret = nicvf_qset_sq_reclaim(nic, qidx);
789fc1f6c62SJerin Jacob 	if (ret)
790627d4ba2SKamil Rytarowski 		PMD_INIT_LOG(ERR, "Failed to reclaim sq VF%d %d %d",
791627d4ba2SKamil Rytarowski 			     nic->vf_id, qidx, ret);
792fc1f6c62SJerin Jacob 
793627d4ba2SKamil Rytarowski 	txq = dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)];
794fc1f6c62SJerin Jacob 	nicvf_tx_queue_release_mbufs(txq);
795fc1f6c62SJerin Jacob 	nicvf_tx_queue_reset(txq);
796fc1f6c62SJerin Jacob 
797627d4ba2SKamil Rytarowski 	dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
798627d4ba2SKamil Rytarowski 		RTE_ETH_QUEUE_STATE_STOPPED;
799fc1f6c62SJerin Jacob 	return ret;
800fc1f6c62SJerin Jacob }
80186b4eb42SJerin Jacob 
80286b4eb42SJerin Jacob static inline int
80386b4eb42SJerin Jacob nicvf_configure_cpi(struct rte_eth_dev *dev)
80486b4eb42SJerin Jacob {
80586b4eb42SJerin Jacob 	struct nicvf *nic = nicvf_pmd_priv(dev);
80686b4eb42SJerin Jacob 	uint16_t qidx, qcnt;
80786b4eb42SJerin Jacob 	int ret;
80886b4eb42SJerin Jacob 
80986b4eb42SJerin Jacob 	/* Count started rx queues */
810394014bcSKamil Rytarowski 	for (qidx = qcnt = 0; qidx < dev->data->nb_rx_queues; qidx++)
81186b4eb42SJerin Jacob 		if (dev->data->rx_queue_state[qidx] ==
81286b4eb42SJerin Jacob 		    RTE_ETH_QUEUE_STATE_STARTED)
81386b4eb42SJerin Jacob 			qcnt++;
81486b4eb42SJerin Jacob 
81586b4eb42SJerin Jacob 	nic->cpi_alg = CPI_ALG_NONE;
81686b4eb42SJerin Jacob 	ret = nicvf_mbox_config_cpi(nic, qcnt);
81786b4eb42SJerin Jacob 	if (ret)
81886b4eb42SJerin Jacob 		PMD_INIT_LOG(ERR, "Failed to configure CPI %d", ret);
81986b4eb42SJerin Jacob 
82086b4eb42SJerin Jacob 	return ret;
82186b4eb42SJerin Jacob }
82286b4eb42SJerin Jacob 
8237413feeeSJerin Jacob static inline int
8247413feeeSJerin Jacob nicvf_configure_rss(struct rte_eth_dev *dev)
8257413feeeSJerin Jacob {
8267413feeeSJerin Jacob 	struct nicvf *nic = nicvf_pmd_priv(dev);
8277413feeeSJerin Jacob 	uint64_t rsshf;
8287413feeeSJerin Jacob 	int ret = -EINVAL;
8297413feeeSJerin Jacob 
8307413feeeSJerin Jacob 	rsshf = nicvf_rss_ethdev_to_nic(nic,
8317413feeeSJerin Jacob 			dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf);
8327413feeeSJerin Jacob 	PMD_DRV_LOG(INFO, "mode=%d rx_queues=%d loopback=%d rsshf=0x%" PRIx64,
8337413feeeSJerin Jacob 		    dev->data->dev_conf.rxmode.mq_mode,
8346d3cbd56SKamil Rytarowski 		    dev->data->nb_rx_queues,
8356d3cbd56SKamil Rytarowski 		    dev->data->dev_conf.lpbk_mode, rsshf);
8367413feeeSJerin Jacob 
8377413feeeSJerin Jacob 	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
8387413feeeSJerin Jacob 		ret = nicvf_rss_term(nic);
8397413feeeSJerin Jacob 	else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
8406d3cbd56SKamil Rytarowski 		ret = nicvf_rss_config(nic, dev->data->nb_rx_queues, rsshf);
8417413feeeSJerin Jacob 	if (ret)
8427413feeeSJerin Jacob 		PMD_INIT_LOG(ERR, "Failed to configure RSS %d", ret);
8437413feeeSJerin Jacob 
8447413feeeSJerin Jacob 	return ret;
8457413feeeSJerin Jacob }
8467413feeeSJerin Jacob 
84786b4eb42SJerin Jacob static int
84886b4eb42SJerin Jacob nicvf_configure_rss_reta(struct rte_eth_dev *dev)
84986b4eb42SJerin Jacob {
85086b4eb42SJerin Jacob 	struct nicvf *nic = nicvf_pmd_priv(dev);
85186b4eb42SJerin Jacob 	unsigned int idx, qmap_size;
85286b4eb42SJerin Jacob 	uint8_t qmap[RTE_MAX_QUEUES_PER_PORT];
85386b4eb42SJerin Jacob 	uint8_t default_reta[NIC_MAX_RSS_IDR_TBL_SIZE];
85486b4eb42SJerin Jacob 
85586b4eb42SJerin Jacob 	if (nic->cpi_alg != CPI_ALG_NONE)
85686b4eb42SJerin Jacob 		return -EINVAL;
85786b4eb42SJerin Jacob 
85886b4eb42SJerin Jacob 	/* Prepare queue map */
85986b4eb42SJerin Jacob 	for (idx = 0, qmap_size = 0; idx < dev->data->nb_rx_queues; idx++) {
86086b4eb42SJerin Jacob 		if (dev->data->rx_queue_state[idx] ==
86186b4eb42SJerin Jacob 				RTE_ETH_QUEUE_STATE_STARTED)
86286b4eb42SJerin Jacob 			qmap[qmap_size++] = idx;
86386b4eb42SJerin Jacob 	}
86486b4eb42SJerin Jacob 
86586b4eb42SJerin Jacob 	/* Update default RSS RETA */
86686b4eb42SJerin Jacob 	for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++)
86786b4eb42SJerin Jacob 		default_reta[idx] = qmap[idx % qmap_size];
86886b4eb42SJerin Jacob 
86986b4eb42SJerin Jacob 	return nicvf_rss_reta_update(nic, default_reta,
87086b4eb42SJerin Jacob 				     NIC_MAX_RSS_IDR_TBL_SIZE);
87186b4eb42SJerin Jacob }
87286b4eb42SJerin Jacob 
8733f3c6f97SJerin Jacob static void
8743f3c6f97SJerin Jacob nicvf_dev_tx_queue_release(void *sq)
8753f3c6f97SJerin Jacob {
8763f3c6f97SJerin Jacob 	struct nicvf_txq *txq;
8773f3c6f97SJerin Jacob 
8783f3c6f97SJerin Jacob 	PMD_INIT_FUNC_TRACE();
8793f3c6f97SJerin Jacob 
8803f3c6f97SJerin Jacob 	txq = (struct nicvf_txq *)sq;
8813f3c6f97SJerin Jacob 	if (txq) {
8823f3c6f97SJerin Jacob 		if (txq->txbuffs != NULL) {
8833f3c6f97SJerin Jacob 			nicvf_tx_queue_release_mbufs(txq);
8843f3c6f97SJerin Jacob 			rte_free(txq->txbuffs);
8853f3c6f97SJerin Jacob 			txq->txbuffs = NULL;
8863f3c6f97SJerin Jacob 		}
8873f3c6f97SJerin Jacob 		rte_free(txq);
8883f3c6f97SJerin Jacob 	}
8893f3c6f97SJerin Jacob }
8903f3c6f97SJerin Jacob 
8917413feeeSJerin Jacob static void
8927413feeeSJerin Jacob nicvf_set_tx_function(struct rte_eth_dev *dev)
8937413feeeSJerin Jacob {
894d9014196SFerruh Yigit 	struct nicvf_txq *txq = NULL;
8957413feeeSJerin Jacob 	size_t i;
8967413feeeSJerin Jacob 	bool multiseg = false;
8977413feeeSJerin Jacob 
8987413feeeSJerin Jacob 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
8997413feeeSJerin Jacob 		txq = dev->data->tx_queues[i];
900c97da2cbSMaciej Czekaj 		if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
9017413feeeSJerin Jacob 			multiseg = true;
9027413feeeSJerin Jacob 			break;
9037413feeeSJerin Jacob 		}
9047413feeeSJerin Jacob 	}
9057413feeeSJerin Jacob 
9067413feeeSJerin Jacob 	/* Use a simple Tx queue (no offloads, no multi segs) if possible */
9077413feeeSJerin Jacob 	if (multiseg) {
9087413feeeSJerin Jacob 		PMD_DRV_LOG(DEBUG, "Using multi-segment tx callback");
9097413feeeSJerin Jacob 		dev->tx_pkt_burst = nicvf_xmit_pkts_multiseg;
9107413feeeSJerin Jacob 	} else {
9117413feeeSJerin Jacob 		PMD_DRV_LOG(DEBUG, "Using single-segment tx callback");
9127413feeeSJerin Jacob 		dev->tx_pkt_burst = nicvf_xmit_pkts;
9137413feeeSJerin Jacob 	}
9147413feeeSJerin Jacob 
915d9014196SFerruh Yigit 	if (!txq)
916d9014196SFerruh Yigit 		return;
917d9014196SFerruh Yigit 
9187413feeeSJerin Jacob 	if (txq->pool_free == nicvf_single_pool_free_xmited_buffers)
9197413feeeSJerin Jacob 		PMD_DRV_LOG(DEBUG, "Using single-mempool tx free method");
9207413feeeSJerin Jacob 	else
9217413feeeSJerin Jacob 		PMD_DRV_LOG(DEBUG, "Using multi-mempool tx free method");
9227413feeeSJerin Jacob }
9237413feeeSJerin Jacob 
9247413feeeSJerin Jacob static void
9257413feeeSJerin Jacob nicvf_set_rx_function(struct rte_eth_dev *dev)
9267413feeeSJerin Jacob {
9275e64c812SPavan Nikhilesh 	struct nicvf *nic = nicvf_pmd_priv(dev);
9285e64c812SPavan Nikhilesh 
929d3bf2564SRakesh Kudurumalla 	const eth_rx_burst_t rx_burst_func[2][2][2] = {
930d3bf2564SRakesh Kudurumalla 	/* [NORMAL/SCATTER] [CKSUM/NO_CKSUM] [VLAN_STRIP/NO_VLAN_STRIP] */
931d3bf2564SRakesh Kudurumalla 		[0][0][0] = nicvf_recv_pkts_no_offload,
932d3bf2564SRakesh Kudurumalla 		[0][0][1] = nicvf_recv_pkts_vlan_strip,
933d3bf2564SRakesh Kudurumalla 		[0][1][0] = nicvf_recv_pkts_cksum,
934d3bf2564SRakesh Kudurumalla 		[0][1][1] = nicvf_recv_pkts_cksum_vlan_strip,
935d3bf2564SRakesh Kudurumalla 		[1][0][0] = nicvf_recv_pkts_multiseg_no_offload,
936d3bf2564SRakesh Kudurumalla 		[1][0][1] = nicvf_recv_pkts_multiseg_vlan_strip,
937d3bf2564SRakesh Kudurumalla 		[1][1][0] = nicvf_recv_pkts_multiseg_cksum,
938d3bf2564SRakesh Kudurumalla 		[1][1][1] = nicvf_recv_pkts_multiseg_cksum_vlan_strip,
9395e64c812SPavan Nikhilesh 	};
9405e64c812SPavan Nikhilesh 
9415e64c812SPavan Nikhilesh 	dev->rx_pkt_burst =
942d3bf2564SRakesh Kudurumalla 		rx_burst_func[dev->data->scattered_rx]
943d3bf2564SRakesh Kudurumalla 			[nic->offload_cksum][nic->vlan_strip];
9447413feeeSJerin Jacob }
9457413feeeSJerin Jacob 
9463f3c6f97SJerin Jacob static int
9473f3c6f97SJerin Jacob nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
9483f3c6f97SJerin Jacob 			 uint16_t nb_desc, unsigned int socket_id,
9493f3c6f97SJerin Jacob 			 const struct rte_eth_txconf *tx_conf)
9503f3c6f97SJerin Jacob {
9513f3c6f97SJerin Jacob 	uint16_t tx_free_thresh;
952c97da2cbSMaciej Czekaj 	bool is_single_pool;
9533f3c6f97SJerin Jacob 	struct nicvf_txq *txq;
9543f3c6f97SJerin Jacob 	struct nicvf *nic = nicvf_pmd_priv(dev);
955a4996bd8SWei Dai 	uint64_t offloads;
9563f3c6f97SJerin Jacob 
9573f3c6f97SJerin Jacob 	PMD_INIT_FUNC_TRACE();
9583f3c6f97SJerin Jacob 
95921e3fb00SKamil Rytarowski 	if (qidx >= MAX_SND_QUEUES_PER_QS)
96021e3fb00SKamil Rytarowski 		nic = nic->snicvf[qidx / MAX_SND_QUEUES_PER_QS - 1];
96121e3fb00SKamil Rytarowski 
96221e3fb00SKamil Rytarowski 	qidx = qidx % MAX_SND_QUEUES_PER_QS;
96321e3fb00SKamil Rytarowski 
9643f3c6f97SJerin Jacob 	/* Socket id check */
9653f3c6f97SJerin Jacob 	if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node)
9663f3c6f97SJerin Jacob 		PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
9673f3c6f97SJerin Jacob 		socket_id, nic->node);
9683f3c6f97SJerin Jacob 
9693f3c6f97SJerin Jacob 	/* Tx deferred start is not supported */
9703f3c6f97SJerin Jacob 	if (tx_conf->tx_deferred_start) {
9713f3c6f97SJerin Jacob 		PMD_INIT_LOG(ERR, "Tx deferred start not supported");
9723f3c6f97SJerin Jacob 		return -EINVAL;
9733f3c6f97SJerin Jacob 	}
9743f3c6f97SJerin Jacob 
9753f3c6f97SJerin Jacob 	/* Roundup nb_desc to available qsize and validate max number of desc */
9763f3c6f97SJerin Jacob 	nb_desc = nicvf_qsize_sq_roundup(nb_desc);
9773f3c6f97SJerin Jacob 	if (nb_desc == 0) {
9783f3c6f97SJerin Jacob 		PMD_INIT_LOG(ERR, "Value of nb_desc beyond available sq qsize");
9793f3c6f97SJerin Jacob 		return -EINVAL;
9803f3c6f97SJerin Jacob 	}
9813f3c6f97SJerin Jacob 
9823f3c6f97SJerin Jacob 	/* Validate tx_free_thresh */
9833f3c6f97SJerin Jacob 	tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
9843f3c6f97SJerin Jacob 				tx_conf->tx_free_thresh :
9853f3c6f97SJerin Jacob 				NICVF_DEFAULT_TX_FREE_THRESH);
9863f3c6f97SJerin Jacob 
9873f3c6f97SJerin Jacob 	if (tx_free_thresh > (nb_desc) ||
9883f3c6f97SJerin Jacob 		tx_free_thresh > NICVF_MAX_TX_FREE_THRESH) {
9893f3c6f97SJerin Jacob 		PMD_INIT_LOG(ERR,
9903f3c6f97SJerin Jacob 			"tx_free_thresh must be less than the number of TX "
9913f3c6f97SJerin Jacob 			"descriptors. (tx_free_thresh=%u port=%d "
9923f3c6f97SJerin Jacob 			"queue=%d)", (unsigned int)tx_free_thresh,
9933f3c6f97SJerin Jacob 			(int)dev->data->port_id, (int)qidx);
9943f3c6f97SJerin Jacob 		return -EINVAL;
9953f3c6f97SJerin Jacob 	}
9963f3c6f97SJerin Jacob 
9973f3c6f97SJerin Jacob 	/* Free memory prior to re-allocation if needed. */
99821e3fb00SKamil Rytarowski 	if (dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] != NULL) {
9993f3c6f97SJerin Jacob 		PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
100021e3fb00SKamil Rytarowski 				nicvf_netdev_qidx(nic, qidx));
100121e3fb00SKamil Rytarowski 		nicvf_dev_tx_queue_release(
100221e3fb00SKamil Rytarowski 			dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)]);
100321e3fb00SKamil Rytarowski 		dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = NULL;
10043f3c6f97SJerin Jacob 	}
10053f3c6f97SJerin Jacob 
10063f3c6f97SJerin Jacob 	/* Allocating tx queue data structure */
10073f3c6f97SJerin Jacob 	txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nicvf_txq),
10083f3c6f97SJerin Jacob 					RTE_CACHE_LINE_SIZE, nic->node);
10093f3c6f97SJerin Jacob 	if (txq == NULL) {
101021e3fb00SKamil Rytarowski 		PMD_INIT_LOG(ERR, "Failed to allocate txq=%d",
101121e3fb00SKamil Rytarowski 			     nicvf_netdev_qidx(nic, qidx));
10123f3c6f97SJerin Jacob 		return -ENOMEM;
10133f3c6f97SJerin Jacob 	}
10143f3c6f97SJerin Jacob 
10153f3c6f97SJerin Jacob 	txq->nic = nic;
10163f3c6f97SJerin Jacob 	txq->queue_id = qidx;
10173f3c6f97SJerin Jacob 	txq->tx_free_thresh = tx_free_thresh;
10183f3c6f97SJerin Jacob 	txq->sq_head = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_HEAD;
10193f3c6f97SJerin Jacob 	txq->sq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_DOOR;
1020a4996bd8SWei Dai 	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
1021a4996bd8SWei Dai 	txq->offloads = offloads;
1022c97da2cbSMaciej Czekaj 
1023a4996bd8SWei Dai 	is_single_pool = !!(offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE);
10243f3c6f97SJerin Jacob 
10253f3c6f97SJerin Jacob 	/* Choose optimum free threshold value for multipool case */
10263f3c6f97SJerin Jacob 	if (!is_single_pool) {
10273f3c6f97SJerin Jacob 		txq->tx_free_thresh = (uint16_t)
10283f3c6f97SJerin Jacob 		(tx_conf->tx_free_thresh == NICVF_DEFAULT_TX_FREE_THRESH ?
10293f3c6f97SJerin Jacob 				NICVF_TX_FREE_MPOOL_THRESH :
10303f3c6f97SJerin Jacob 				tx_conf->tx_free_thresh);
10311c421f18SJerin Jacob 		txq->pool_free = nicvf_multi_pool_free_xmited_buffers;
10321c421f18SJerin Jacob 	} else {
10331c421f18SJerin Jacob 		txq->pool_free = nicvf_single_pool_free_xmited_buffers;
10343f3c6f97SJerin Jacob 	}
10353f3c6f97SJerin Jacob 
10363f3c6f97SJerin Jacob 	/* Allocate software ring */
10373f3c6f97SJerin Jacob 	txq->txbuffs = rte_zmalloc_socket("txq->txbuffs",
10383f3c6f97SJerin Jacob 				nb_desc * sizeof(struct rte_mbuf *),
10393f3c6f97SJerin Jacob 				RTE_CACHE_LINE_SIZE, nic->node);
10403f3c6f97SJerin Jacob 
10413f3c6f97SJerin Jacob 	if (txq->txbuffs == NULL) {
10423f3c6f97SJerin Jacob 		nicvf_dev_tx_queue_release(txq);
10433f3c6f97SJerin Jacob 		return -ENOMEM;
10443f3c6f97SJerin Jacob 	}
10453f3c6f97SJerin Jacob 
10466d3cbd56SKamil Rytarowski 	if (nicvf_qset_sq_alloc(dev, nic, txq, qidx, nb_desc)) {
10473f3c6f97SJerin Jacob 		PMD_INIT_LOG(ERR, "Failed to allocate mem for sq %d", qidx);
10483f3c6f97SJerin Jacob 		nicvf_dev_tx_queue_release(txq);
10493f3c6f97SJerin Jacob 		return -ENOMEM;
10503f3c6f97SJerin Jacob 	}
10513f3c6f97SJerin Jacob 
10523f3c6f97SJerin Jacob 	nicvf_tx_queue_reset(txq);
10533f3c6f97SJerin Jacob 
1054c97da2cbSMaciej Czekaj 	PMD_INIT_LOG(DEBUG, "[%d] txq=%p nb_desc=%d desc=%p"
1055c97da2cbSMaciej Czekaj 			" phys=0x%" PRIx64 " offloads=0x%" PRIx64,
105621e3fb00SKamil Rytarowski 			nicvf_netdev_qidx(nic, qidx), txq, nb_desc, txq->desc,
1057c97da2cbSMaciej Czekaj 			txq->phys, txq->offloads);
10583f3c6f97SJerin Jacob 
105921e3fb00SKamil Rytarowski 	dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = txq;
106021e3fb00SKamil Rytarowski 	dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
106121e3fb00SKamil Rytarowski 		RTE_ETH_QUEUE_STATE_STOPPED;
10623f3c6f97SJerin Jacob 	return 0;
10633f3c6f97SJerin Jacob }
10643f3c6f97SJerin Jacob 
106586b4eb42SJerin Jacob static inline void
10666d3cbd56SKamil Rytarowski nicvf_rx_queue_release_mbufs(struct rte_eth_dev *dev, struct nicvf_rxq *rxq)
106786b4eb42SJerin Jacob {
106886b4eb42SJerin Jacob 	uint32_t rxq_cnt;
106986b4eb42SJerin Jacob 	uint32_t nb_pkts, released_pkts = 0;
107086b4eb42SJerin Jacob 	uint32_t refill_cnt = 0;
107186b4eb42SJerin Jacob 	struct rte_mbuf *rx_pkts[NICVF_MAX_RX_FREE_THRESH];
107286b4eb42SJerin Jacob 
107386b4eb42SJerin Jacob 	if (dev->rx_pkt_burst == NULL)
107486b4eb42SJerin Jacob 		return;
107586b4eb42SJerin Jacob 
107621e3fb00SKamil Rytarowski 	while ((rxq_cnt = nicvf_dev_rx_queue_count(dev,
107721e3fb00SKamil Rytarowski 				nicvf_netdev_qidx(rxq->nic, rxq->queue_id)))) {
107886b4eb42SJerin Jacob 		nb_pkts = dev->rx_pkt_burst(rxq, rx_pkts,
107986b4eb42SJerin Jacob 					NICVF_MAX_RX_FREE_THRESH);
108086b4eb42SJerin Jacob 		PMD_DRV_LOG(INFO, "nb_pkts=%d  rxq_cnt=%d", nb_pkts, rxq_cnt);
108186b4eb42SJerin Jacob 		while (nb_pkts) {
108286b4eb42SJerin Jacob 			rte_pktmbuf_free_seg(rx_pkts[--nb_pkts]);
108386b4eb42SJerin Jacob 			released_pkts++;
108486b4eb42SJerin Jacob 		}
108586b4eb42SJerin Jacob 	}
108686b4eb42SJerin Jacob 
108721e3fb00SKamil Rytarowski 
108821e3fb00SKamil Rytarowski 	refill_cnt += nicvf_dev_rbdr_refill(dev,
108921e3fb00SKamil Rytarowski 			nicvf_netdev_qidx(rxq->nic, rxq->queue_id));
109021e3fb00SKamil Rytarowski 
109186b4eb42SJerin Jacob 	PMD_DRV_LOG(INFO, "free_cnt=%d  refill_cnt=%d",
109286b4eb42SJerin Jacob 		    released_pkts, refill_cnt);
109386b4eb42SJerin Jacob }
109486b4eb42SJerin Jacob 
1095aa0d976eSJerin Jacob static void
1096aa0d976eSJerin Jacob nicvf_rx_queue_reset(struct nicvf_rxq *rxq)
1097aa0d976eSJerin Jacob {
1098aa0d976eSJerin Jacob 	rxq->head = 0;
1099aa0d976eSJerin Jacob 	rxq->available_space = 0;
1100aa0d976eSJerin Jacob 	rxq->recv_buffers = 0;
1101aa0d976eSJerin Jacob }
1102aa0d976eSJerin Jacob 
110386b4eb42SJerin Jacob static inline int
110471e76186SKamil Rytarowski nicvf_vf_start_rx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
110571e76186SKamil Rytarowski 			uint16_t qidx)
110686b4eb42SJerin Jacob {
110786b4eb42SJerin Jacob 	struct nicvf_rxq *rxq;
110886b4eb42SJerin Jacob 	int ret;
110986b4eb42SJerin Jacob 
111071e76186SKamil Rytarowski 	assert(qidx < MAX_RCV_QUEUES_PER_QS);
111171e76186SKamil Rytarowski 
111271e76186SKamil Rytarowski 	if (dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
111371e76186SKamil Rytarowski 		RTE_ETH_QUEUE_STATE_STARTED)
111486b4eb42SJerin Jacob 		return 0;
111586b4eb42SJerin Jacob 
111686b4eb42SJerin Jacob 	/* Update rbdr pointer to all rxq */
111771e76186SKamil Rytarowski 	rxq = dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)];
111886b4eb42SJerin Jacob 	rxq->shared_rbdr = nic->rbdr;
111986b4eb42SJerin Jacob 
112086b4eb42SJerin Jacob 	ret = nicvf_qset_rq_config(nic, qidx, rxq);
112186b4eb42SJerin Jacob 	if (ret) {
112271e76186SKamil Rytarowski 		PMD_INIT_LOG(ERR, "Failed to configure rq VF%d %d %d",
112371e76186SKamil Rytarowski 			     nic->vf_id, qidx, ret);
112486b4eb42SJerin Jacob 		goto config_rq_error;
112586b4eb42SJerin Jacob 	}
112686b4eb42SJerin Jacob 	ret = nicvf_qset_cq_config(nic, qidx, rxq);
112786b4eb42SJerin Jacob 	if (ret) {
112871e76186SKamil Rytarowski 		PMD_INIT_LOG(ERR, "Failed to configure cq VF%d %d %d",
112971e76186SKamil Rytarowski 			     nic->vf_id, qidx, ret);
113086b4eb42SJerin Jacob 		goto config_cq_error;
113186b4eb42SJerin Jacob 	}
113286b4eb42SJerin Jacob 
113371e76186SKamil Rytarowski 	dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
113471e76186SKamil Rytarowski 		RTE_ETH_QUEUE_STATE_STARTED;
113586b4eb42SJerin Jacob 	return 0;
113686b4eb42SJerin Jacob 
113786b4eb42SJerin Jacob config_cq_error:
113886b4eb42SJerin Jacob 	nicvf_qset_cq_reclaim(nic, qidx);
113986b4eb42SJerin Jacob config_rq_error:
114086b4eb42SJerin Jacob 	nicvf_qset_rq_reclaim(nic, qidx);
114186b4eb42SJerin Jacob 	return ret;
114286b4eb42SJerin Jacob }
114386b4eb42SJerin Jacob 
114486b4eb42SJerin Jacob static inline int
1145627d4ba2SKamil Rytarowski nicvf_vf_stop_rx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
1146627d4ba2SKamil Rytarowski 		       uint16_t qidx)
114786b4eb42SJerin Jacob {
114886b4eb42SJerin Jacob 	struct nicvf_rxq *rxq;
114986b4eb42SJerin Jacob 	int ret, other_error;
115086b4eb42SJerin Jacob 
1151627d4ba2SKamil Rytarowski 	if (dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
1152627d4ba2SKamil Rytarowski 		RTE_ETH_QUEUE_STATE_STOPPED)
115386b4eb42SJerin Jacob 		return 0;
115486b4eb42SJerin Jacob 
115586b4eb42SJerin Jacob 	ret = nicvf_qset_rq_reclaim(nic, qidx);
115686b4eb42SJerin Jacob 	if (ret)
1157627d4ba2SKamil Rytarowski 		PMD_INIT_LOG(ERR, "Failed to reclaim rq VF%d %d %d",
1158627d4ba2SKamil Rytarowski 			     nic->vf_id, qidx, ret);
115986b4eb42SJerin Jacob 
116086b4eb42SJerin Jacob 	other_error = ret;
1161627d4ba2SKamil Rytarowski 	rxq = dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)];
11626d3cbd56SKamil Rytarowski 	nicvf_rx_queue_release_mbufs(dev, rxq);
116386b4eb42SJerin Jacob 	nicvf_rx_queue_reset(rxq);
116486b4eb42SJerin Jacob 
116586b4eb42SJerin Jacob 	ret = nicvf_qset_cq_reclaim(nic, qidx);
116686b4eb42SJerin Jacob 	if (ret)
1167627d4ba2SKamil Rytarowski 		PMD_INIT_LOG(ERR, "Failed to reclaim cq VF%d %d %d",
1168627d4ba2SKamil Rytarowski 			     nic->vf_id, qidx, ret);
116986b4eb42SJerin Jacob 
117086b4eb42SJerin Jacob 	other_error |= ret;
1171627d4ba2SKamil Rytarowski 	dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
1172627d4ba2SKamil Rytarowski 		RTE_ETH_QUEUE_STATE_STOPPED;
117386b4eb42SJerin Jacob 	return other_error;
117486b4eb42SJerin Jacob }
117586b4eb42SJerin Jacob 
1176aa0d976eSJerin Jacob static void
1177aa0d976eSJerin Jacob nicvf_dev_rx_queue_release(void *rx_queue)
1178aa0d976eSJerin Jacob {
1179aa0d976eSJerin Jacob 	PMD_INIT_FUNC_TRACE();
1180aa0d976eSJerin Jacob 
1181394014bcSKamil Rytarowski 	rte_free(rx_queue);
1182aa0d976eSJerin Jacob }
1183aa0d976eSJerin Jacob 
1184aa0d976eSJerin Jacob static int
118586b4eb42SJerin Jacob nicvf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
118686b4eb42SJerin Jacob {
118771e76186SKamil Rytarowski 	struct nicvf *nic = nicvf_pmd_priv(dev);
118886b4eb42SJerin Jacob 	int ret;
118986b4eb42SJerin Jacob 
119071e76186SKamil Rytarowski 	if (qidx >= MAX_RCV_QUEUES_PER_QS)
119171e76186SKamil Rytarowski 		nic = nic->snicvf[(qidx / MAX_RCV_QUEUES_PER_QS - 1)];
119271e76186SKamil Rytarowski 
119371e76186SKamil Rytarowski 	qidx = qidx % MAX_RCV_QUEUES_PER_QS;
119471e76186SKamil Rytarowski 
119571e76186SKamil Rytarowski 	ret = nicvf_vf_start_rx_queue(dev, nic, qidx);
119686b4eb42SJerin Jacob 	if (ret)
119786b4eb42SJerin Jacob 		return ret;
119886b4eb42SJerin Jacob 
119986b4eb42SJerin Jacob 	ret = nicvf_configure_cpi(dev);
120086b4eb42SJerin Jacob 	if (ret)
120186b4eb42SJerin Jacob 		return ret;
120286b4eb42SJerin Jacob 
120386b4eb42SJerin Jacob 	return nicvf_configure_rss_reta(dev);
120486b4eb42SJerin Jacob }
120586b4eb42SJerin Jacob 
120686b4eb42SJerin Jacob static int
120786b4eb42SJerin Jacob nicvf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
120886b4eb42SJerin Jacob {
120986b4eb42SJerin Jacob 	int ret;
1210627d4ba2SKamil Rytarowski 	struct nicvf *nic = nicvf_pmd_priv(dev);
121186b4eb42SJerin Jacob 
1212627d4ba2SKamil Rytarowski 	if (qidx >= MAX_SND_QUEUES_PER_QS)
1213627d4ba2SKamil Rytarowski 		nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)];
1214627d4ba2SKamil Rytarowski 
1215627d4ba2SKamil Rytarowski 	qidx = qidx % MAX_RCV_QUEUES_PER_QS;
1216627d4ba2SKamil Rytarowski 
1217627d4ba2SKamil Rytarowski 	ret = nicvf_vf_stop_rx_queue(dev, nic, qidx);
121886b4eb42SJerin Jacob 	ret |= nicvf_configure_cpi(dev);
121986b4eb42SJerin Jacob 	ret |= nicvf_configure_rss_reta(dev);
122086b4eb42SJerin Jacob 	return ret;
122186b4eb42SJerin Jacob }
122286b4eb42SJerin Jacob 
122386b4eb42SJerin Jacob static int
1224fc1f6c62SJerin Jacob nicvf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
1225fc1f6c62SJerin Jacob {
122671e76186SKamil Rytarowski 	struct nicvf *nic = nicvf_pmd_priv(dev);
122771e76186SKamil Rytarowski 
122871e76186SKamil Rytarowski 	if (qidx >= MAX_SND_QUEUES_PER_QS)
122971e76186SKamil Rytarowski 		nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)];
123071e76186SKamil Rytarowski 
123171e76186SKamil Rytarowski 	qidx = qidx % MAX_SND_QUEUES_PER_QS;
123271e76186SKamil Rytarowski 
123371e76186SKamil Rytarowski 	return nicvf_vf_start_tx_queue(dev, nic, qidx);
1234fc1f6c62SJerin Jacob }
1235fc1f6c62SJerin Jacob 
1236fc1f6c62SJerin Jacob static int
1237fc1f6c62SJerin Jacob nicvf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
1238fc1f6c62SJerin Jacob {
1239627d4ba2SKamil Rytarowski 	struct nicvf *nic = nicvf_pmd_priv(dev);
1240627d4ba2SKamil Rytarowski 
1241627d4ba2SKamil Rytarowski 	if (qidx >= MAX_SND_QUEUES_PER_QS)
1242627d4ba2SKamil Rytarowski 		nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)];
1243627d4ba2SKamil Rytarowski 
1244627d4ba2SKamil Rytarowski 	qidx = qidx % MAX_SND_QUEUES_PER_QS;
1245627d4ba2SKamil Rytarowski 
1246627d4ba2SKamil Rytarowski 	return nicvf_vf_stop_tx_queue(dev, nic, qidx);
1247fc1f6c62SJerin Jacob }
1248fc1f6c62SJerin Jacob 
12495c7ccb26SJerin Jacob static inline void
12505c7ccb26SJerin Jacob nicvf_rxq_mbuf_setup(struct nicvf_rxq *rxq)
12515c7ccb26SJerin Jacob {
12525c7ccb26SJerin Jacob 	uintptr_t p;
12535c7ccb26SJerin Jacob 	struct rte_mbuf mb_def;
1254279d3319SRakesh Kudurumalla 	struct nicvf *nic = rxq->nic;
12555c7ccb26SJerin Jacob 
12565c7ccb26SJerin Jacob 	RTE_BUILD_BUG_ON(sizeof(union mbuf_initializer) != 8);
125795b097c8SJerin Jacob 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0);
125895b097c8SJerin Jacob 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) -
125995b097c8SJerin Jacob 				offsetof(struct rte_mbuf, data_off) != 2);
126095b097c8SJerin Jacob 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) -
126195b097c8SJerin Jacob 				offsetof(struct rte_mbuf, data_off) != 4);
126295b097c8SJerin Jacob 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) -
126395b097c8SJerin Jacob 				offsetof(struct rte_mbuf, data_off) != 6);
12645e64c812SPavan Nikhilesh 	RTE_BUILD_BUG_ON(offsetof(struct nicvf_rxq, rxq_fastpath_data_end) -
12655e64c812SPavan Nikhilesh 				offsetof(struct nicvf_rxq,
12665e64c812SPavan Nikhilesh 					rxq_fastpath_data_start) > 128);
12675c7ccb26SJerin Jacob 	mb_def.nb_segs = 1;
1268279d3319SRakesh Kudurumalla 	mb_def.data_off = RTE_PKTMBUF_HEADROOM + (nic->skip_bytes);
12695c7ccb26SJerin Jacob 	mb_def.port = rxq->port_id;
12705c7ccb26SJerin Jacob 	rte_mbuf_refcnt_set(&mb_def, 1);
12715c7ccb26SJerin Jacob 
12725c7ccb26SJerin Jacob 	/* Prevent compiler reordering: rearm_data covers previous fields */
12735c7ccb26SJerin Jacob 	rte_compiler_barrier();
12745c7ccb26SJerin Jacob 	p = (uintptr_t)&mb_def.rearm_data;
12755c7ccb26SJerin Jacob 	rxq->mbuf_initializer.value = *(uint64_t *)p;
12765c7ccb26SJerin Jacob }
1277394014bcSKamil Rytarowski 
1278fc1f6c62SJerin Jacob static int
1279aa0d976eSJerin Jacob nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
1280aa0d976eSJerin Jacob 			 uint16_t nb_desc, unsigned int socket_id,
1281aa0d976eSJerin Jacob 			 const struct rte_eth_rxconf *rx_conf,
1282aa0d976eSJerin Jacob 			 struct rte_mempool *mp)
1283aa0d976eSJerin Jacob {
1284aa0d976eSJerin Jacob 	uint16_t rx_free_thresh;
1285aa0d976eSJerin Jacob 	struct nicvf_rxq *rxq;
1286aa0d976eSJerin Jacob 	struct nicvf *nic = nicvf_pmd_priv(dev);
1287a4996bd8SWei Dai 	uint64_t offloads;
1288279d3319SRakesh Kudurumalla 	uint32_t buffsz;
1289279d3319SRakesh Kudurumalla 	struct rte_pktmbuf_pool_private *mbp_priv;
1290aa0d976eSJerin Jacob 
1291aa0d976eSJerin Jacob 	PMD_INIT_FUNC_TRACE();
1292aa0d976eSJerin Jacob 
1293279d3319SRakesh Kudurumalla 	/* First skip check */
1294279d3319SRakesh Kudurumalla 	mbp_priv = rte_mempool_get_priv(mp);
1295279d3319SRakesh Kudurumalla 	buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
1296279d3319SRakesh Kudurumalla 	if (buffsz < (uint32_t)(nic->skip_bytes)) {
1297279d3319SRakesh Kudurumalla 		PMD_INIT_LOG(ERR, "First skip is more than configured buffer size");
1298279d3319SRakesh Kudurumalla 		return -EINVAL;
1299279d3319SRakesh Kudurumalla 	}
1300279d3319SRakesh Kudurumalla 
130121e3fb00SKamil Rytarowski 	if (qidx >= MAX_RCV_QUEUES_PER_QS)
130221e3fb00SKamil Rytarowski 		nic = nic->snicvf[qidx / MAX_RCV_QUEUES_PER_QS - 1];
130321e3fb00SKamil Rytarowski 
130421e3fb00SKamil Rytarowski 	qidx = qidx % MAX_RCV_QUEUES_PER_QS;
130521e3fb00SKamil Rytarowski 
1306aa0d976eSJerin Jacob 	/* Socket id check */
1307aa0d976eSJerin Jacob 	if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node)
1308aa0d976eSJerin Jacob 		PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
1309aa0d976eSJerin Jacob 		socket_id, nic->node);
1310aa0d976eSJerin Jacob 
1311394014bcSKamil Rytarowski 	/* Mempool memory must be contiguous, so must be one memory segment*/
1312aa0d976eSJerin Jacob 	if (mp->nb_mem_chunks != 1) {
1313394014bcSKamil Rytarowski 		PMD_INIT_LOG(ERR, "Non-contiguous mempool, add more huge pages");
1314394014bcSKamil Rytarowski 		return -EINVAL;
1315394014bcSKamil Rytarowski 	}
1316394014bcSKamil Rytarowski 
1317394014bcSKamil Rytarowski 	/* Mempool memory must be physically contiguous */
13184143b122SAndrew Rybchenko 	if (mp->flags & MEMPOOL_F_NO_IOVA_CONTIG) {
1319394014bcSKamil Rytarowski 		PMD_INIT_LOG(ERR, "Mempool memory must be physically contiguous");
1320aa0d976eSJerin Jacob 		return -EINVAL;
1321aa0d976eSJerin Jacob 	}
1322aa0d976eSJerin Jacob 
1323aa0d976eSJerin Jacob 	/* Rx deferred start is not supported */
1324aa0d976eSJerin Jacob 	if (rx_conf->rx_deferred_start) {
1325aa0d976eSJerin Jacob 		PMD_INIT_LOG(ERR, "Rx deferred start not supported");
1326aa0d976eSJerin Jacob 		return -EINVAL;
1327aa0d976eSJerin Jacob 	}
1328aa0d976eSJerin Jacob 
1329aa0d976eSJerin Jacob 	/* Roundup nb_desc to available qsize and validate max number of desc */
1330aa0d976eSJerin Jacob 	nb_desc = nicvf_qsize_cq_roundup(nb_desc);
1331aa0d976eSJerin Jacob 	if (nb_desc == 0) {
1332aa0d976eSJerin Jacob 		PMD_INIT_LOG(ERR, "Value nb_desc beyond available hw cq qsize");
1333aa0d976eSJerin Jacob 		return -EINVAL;
1334aa0d976eSJerin Jacob 	}
1335aa0d976eSJerin Jacob 
1336279d3319SRakesh Kudurumalla 
1337aa0d976eSJerin Jacob 	/* Check rx_free_thresh upper bound */
1338aa0d976eSJerin Jacob 	rx_free_thresh = (uint16_t)((rx_conf->rx_free_thresh) ?
1339aa0d976eSJerin Jacob 				rx_conf->rx_free_thresh :
1340aa0d976eSJerin Jacob 				NICVF_DEFAULT_RX_FREE_THRESH);
1341aa0d976eSJerin Jacob 	if (rx_free_thresh > NICVF_MAX_RX_FREE_THRESH ||
1342aa0d976eSJerin Jacob 		rx_free_thresh >= nb_desc * .75) {
1343aa0d976eSJerin Jacob 		PMD_INIT_LOG(ERR, "rx_free_thresh greater than expected %d",
1344aa0d976eSJerin Jacob 				rx_free_thresh);
1345aa0d976eSJerin Jacob 		return -EINVAL;
1346aa0d976eSJerin Jacob 	}
1347aa0d976eSJerin Jacob 
1348aa0d976eSJerin Jacob 	/* Free memory prior to re-allocation if needed */
134921e3fb00SKamil Rytarowski 	if (dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] != NULL) {
1350aa0d976eSJerin Jacob 		PMD_RX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
135121e3fb00SKamil Rytarowski 				nicvf_netdev_qidx(nic, qidx));
135221e3fb00SKamil Rytarowski 		nicvf_dev_rx_queue_release(
135321e3fb00SKamil Rytarowski 			dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)]);
135421e3fb00SKamil Rytarowski 		dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = NULL;
1355aa0d976eSJerin Jacob 	}
1356aa0d976eSJerin Jacob 
1357aa0d976eSJerin Jacob 	/* Allocate rxq memory */
1358aa0d976eSJerin Jacob 	rxq = rte_zmalloc_socket("ethdev rx queue", sizeof(struct nicvf_rxq),
1359aa0d976eSJerin Jacob 					RTE_CACHE_LINE_SIZE, nic->node);
1360aa0d976eSJerin Jacob 	if (rxq == NULL) {
136121e3fb00SKamil Rytarowski 		PMD_INIT_LOG(ERR, "Failed to allocate rxq=%d",
136221e3fb00SKamil Rytarowski 			     nicvf_netdev_qidx(nic, qidx));
1363aa0d976eSJerin Jacob 		return -ENOMEM;
1364aa0d976eSJerin Jacob 	}
1365aa0d976eSJerin Jacob 
1366aa0d976eSJerin Jacob 	rxq->nic = nic;
1367aa0d976eSJerin Jacob 	rxq->pool = mp;
1368aa0d976eSJerin Jacob 	rxq->queue_id = qidx;
1369aa0d976eSJerin Jacob 	rxq->port_id = dev->data->port_id;
1370aa0d976eSJerin Jacob 	rxq->rx_free_thresh = rx_free_thresh;
1371aa0d976eSJerin Jacob 	rxq->rx_drop_en = rx_conf->rx_drop_en;
1372aa0d976eSJerin Jacob 	rxq->cq_status = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_STATUS;
1373aa0d976eSJerin Jacob 	rxq->cq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_DOOR;
1374aa0d976eSJerin Jacob 	rxq->precharge_cnt = 0;
1375e2c519b3SJerin Jacob 
1376e2c519b3SJerin Jacob 	if (nicvf_hw_cap(nic) & NICVF_CAP_CQE_RX2)
1377e2c519b3SJerin Jacob 		rxq->rbptr_offset = NICVF_CQE_RX2_RBPTR_WORD;
1378e2c519b3SJerin Jacob 	else
1379aa0d976eSJerin Jacob 		rxq->rbptr_offset = NICVF_CQE_RBPTR_WORD;
1380aa0d976eSJerin Jacob 
13815c7ccb26SJerin Jacob 	nicvf_rxq_mbuf_setup(rxq);
1382e2c519b3SJerin Jacob 
1383aa0d976eSJerin Jacob 	/* Alloc completion queue */
13846d3cbd56SKamil Rytarowski 	if (nicvf_qset_cq_alloc(dev, nic, rxq, rxq->queue_id, nb_desc)) {
1385aa0d976eSJerin Jacob 		PMD_INIT_LOG(ERR, "failed to allocate cq %u", rxq->queue_id);
1386aa0d976eSJerin Jacob 		nicvf_dev_rx_queue_release(rxq);
1387aa0d976eSJerin Jacob 		return -ENOMEM;
1388aa0d976eSJerin Jacob 	}
1389aa0d976eSJerin Jacob 
1390aa0d976eSJerin Jacob 	nicvf_rx_queue_reset(rxq);
1391aa0d976eSJerin Jacob 
1392a4996bd8SWei Dai 	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
1393c97da2cbSMaciej Czekaj 	PMD_INIT_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d)"
1394c97da2cbSMaciej Czekaj 			" phy=0x%" PRIx64 " offloads=0x%" PRIx64,
139521e3fb00SKamil Rytarowski 			nicvf_netdev_qidx(nic, qidx), rxq, mp->name, nb_desc,
1396a4996bd8SWei Dai 			rte_mempool_avail_count(mp), rxq->phys, offloads);
1397aa0d976eSJerin Jacob 
139821e3fb00SKamil Rytarowski 	dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = rxq;
139921e3fb00SKamil Rytarowski 	dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
140021e3fb00SKamil Rytarowski 		RTE_ETH_QUEUE_STATE_STOPPED;
1401aa0d976eSJerin Jacob 	return 0;
1402aa0d976eSJerin Jacob }
1403aa0d976eSJerin Jacob 
1404bdad90d1SIvan Ilchenko static int
1405dcd7b1e1SJerin Jacob nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1406dcd7b1e1SJerin Jacob {
1407dcd7b1e1SJerin Jacob 	struct nicvf *nic = nicvf_pmd_priv(dev);
1408c0802544SFerruh Yigit 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1409dcd7b1e1SJerin Jacob 
1410dcd7b1e1SJerin Jacob 	PMD_INIT_FUNC_TRACE();
1411dcd7b1e1SJerin Jacob 
1412ba2d05abSJerin Jacob 	/* Autonegotiation may be disabled */
1413ba2d05abSJerin Jacob 	dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
1414ba2d05abSJerin Jacob 	dev_info->speed_capa |= ETH_LINK_SPEED_10M | ETH_LINK_SPEED_100M |
1415ba2d05abSJerin Jacob 				 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
1416ba2d05abSJerin Jacob 	if (nicvf_hw_version(nic) != PCI_SUB_DEVICE_ID_CN81XX_NICVF)
1417ba2d05abSJerin Jacob 		dev_info->speed_capa |= ETH_LINK_SPEED_40G;
1418ba2d05abSJerin Jacob 
141935b2d13fSOlivier Matz 	dev_info->min_rx_bufsize = RTE_ETHER_MIN_MTU;
142035b2d13fSOlivier Matz 	dev_info->max_rx_pktlen = NIC_HW_MAX_MTU + RTE_ETHER_HDR_LEN;
142121e3fb00SKamil Rytarowski 	dev_info->max_rx_queues =
142221e3fb00SKamil Rytarowski 			(uint16_t)MAX_RCV_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1);
142321e3fb00SKamil Rytarowski 	dev_info->max_tx_queues =
142421e3fb00SKamil Rytarowski 			(uint16_t)MAX_SND_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1);
1425dcd7b1e1SJerin Jacob 	dev_info->max_mac_addrs = 1;
1426eac901ceSJan Blunck 	dev_info->max_vfs = pci_dev->max_vfs;
1427dcd7b1e1SJerin Jacob 
1428c97da2cbSMaciej Czekaj 	dev_info->rx_offload_capa = NICVF_RX_OFFLOAD_CAPA;
1429c97da2cbSMaciej Czekaj 	dev_info->tx_offload_capa = NICVF_TX_OFFLOAD_CAPA;
1430c97da2cbSMaciej Czekaj 	dev_info->rx_queue_offload_capa = NICVF_RX_OFFLOAD_CAPA;
1431c97da2cbSMaciej Czekaj 	dev_info->tx_queue_offload_capa = NICVF_TX_OFFLOAD_CAPA;
1432dcd7b1e1SJerin Jacob 
1433dcd7b1e1SJerin Jacob 	dev_info->reta_size = nic->rss_info.rss_size;
1434dcd7b1e1SJerin Jacob 	dev_info->hash_key_size = RSS_HASH_KEY_BYTE_SIZE;
1435dcd7b1e1SJerin Jacob 	dev_info->flow_type_rss_offloads = NICVF_RSS_OFFLOAD_PASS1;
1436dcd7b1e1SJerin Jacob 	if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING)
1437dcd7b1e1SJerin Jacob 		dev_info->flow_type_rss_offloads |= NICVF_RSS_OFFLOAD_TUNNEL;
1438dcd7b1e1SJerin Jacob 
1439dcd7b1e1SJerin Jacob 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
1440dcd7b1e1SJerin Jacob 		.rx_free_thresh = NICVF_DEFAULT_RX_FREE_THRESH,
1441dcd7b1e1SJerin Jacob 		.rx_drop_en = 0,
1442dcd7b1e1SJerin Jacob 	};
1443dcd7b1e1SJerin Jacob 
1444dcd7b1e1SJerin Jacob 	dev_info->default_txconf = (struct rte_eth_txconf) {
1445dcd7b1e1SJerin Jacob 		.tx_free_thresh = NICVF_DEFAULT_TX_FREE_THRESH,
1446c97da2cbSMaciej Czekaj 		.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE |
1447c97da2cbSMaciej Czekaj 			DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM   |
1448c97da2cbSMaciej Czekaj 			DEV_TX_OFFLOAD_UDP_CKSUM          |
1449c97da2cbSMaciej Czekaj 			DEV_TX_OFFLOAD_TCP_CKSUM,
1450dcd7b1e1SJerin Jacob 	};
1451bdad90d1SIvan Ilchenko 
1452bdad90d1SIvan Ilchenko 	return 0;
1453dcd7b1e1SJerin Jacob }
1454dcd7b1e1SJerin Jacob 
1455df6e0a06SSantosh Shukla static nicvf_iova_addr_t
1456394014bcSKamil Rytarowski rbdr_rte_mempool_get(void *dev, void *opaque)
14577413feeeSJerin Jacob {
14587413feeeSJerin Jacob 	uint16_t qidx;
14597413feeeSJerin Jacob 	uintptr_t mbuf;
14607413feeeSJerin Jacob 	struct nicvf_rxq *rxq;
1461394014bcSKamil Rytarowski 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)dev;
146221e3fb00SKamil Rytarowski 	struct nicvf *nic = (struct nicvf *)opaque;
146321e3fb00SKamil Rytarowski 	uint16_t rx_start, rx_end;
14647413feeeSJerin Jacob 
146521e3fb00SKamil Rytarowski 	/* Get queue ranges for this VF */
146621e3fb00SKamil Rytarowski 	nicvf_rx_range(eth_dev, nic, &rx_start, &rx_end);
146721e3fb00SKamil Rytarowski 
146821e3fb00SKamil Rytarowski 	for (qidx = rx_start; qidx <= rx_end; qidx++) {
1469394014bcSKamil Rytarowski 		rxq = eth_dev->data->rx_queues[qidx];
14707413feeeSJerin Jacob 		/* Maintain equal buffer count across all pools */
14717413feeeSJerin Jacob 		if (rxq->precharge_cnt >= rxq->qlen_mask)
14727413feeeSJerin Jacob 			continue;
14737413feeeSJerin Jacob 		rxq->precharge_cnt++;
14747413feeeSJerin Jacob 		mbuf = (uintptr_t)rte_pktmbuf_alloc(rxq->pool);
14757413feeeSJerin Jacob 		if (mbuf)
14767413feeeSJerin Jacob 			return nicvf_mbuff_virt2phy(mbuf, rxq->mbuf_phys_off);
14777413feeeSJerin Jacob 	}
14787413feeeSJerin Jacob 	return 0;
14797413feeeSJerin Jacob }
14807413feeeSJerin Jacob 
14817413feeeSJerin Jacob static int
148271e76186SKamil Rytarowski nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
14837413feeeSJerin Jacob {
14847413feeeSJerin Jacob 	int ret;
148534c2e702SJerin Jacob 	uint16_t qidx, data_off;
14867413feeeSJerin Jacob 	uint32_t total_rxq_desc, nb_rbdr_desc, exp_buffs;
14877413feeeSJerin Jacob 	uint64_t mbuf_phys_off = 0;
14887413feeeSJerin Jacob 	struct nicvf_rxq *rxq;
14897413feeeSJerin Jacob 	struct rte_mbuf *mbuf;
149071e76186SKamil Rytarowski 	uint16_t rx_start, rx_end;
149171e76186SKamil Rytarowski 	uint16_t tx_start, tx_end;
1492d3bf2564SRakesh Kudurumalla 	int mask;
14937413feeeSJerin Jacob 
14947413feeeSJerin Jacob 	PMD_INIT_FUNC_TRACE();
14957413feeeSJerin Jacob 
14967413feeeSJerin Jacob 	/* Userspace process exited without proper shutdown in last run */
14977413feeeSJerin Jacob 	if (nicvf_qset_rbdr_active(nic, 0))
149871e76186SKamil Rytarowski 		nicvf_vf_stop(dev, nic, false);
149971e76186SKamil Rytarowski 
150071e76186SKamil Rytarowski 	/* Get queue ranges for this VF */
150171e76186SKamil Rytarowski 	nicvf_rx_range(dev, nic, &rx_start, &rx_end);
15027413feeeSJerin Jacob 
15037413feeeSJerin Jacob 	/*
15047413feeeSJerin Jacob 	 * Thunderx nicvf PMD can support more than one pool per port only when
15057413feeeSJerin Jacob 	 * 1) Data payload size is same across all the pools in given port
15067413feeeSJerin Jacob 	 * AND
15077413feeeSJerin Jacob 	 * 2) All mbuffs in the pools are from the same hugepage
15087413feeeSJerin Jacob 	 * AND
15097413feeeSJerin Jacob 	 * 3) Mbuff metadata size is same across all the pools in given port
15107413feeeSJerin Jacob 	 *
15117413feeeSJerin Jacob 	 * This is to support existing application that uses multiple pool/port.
15127413feeeSJerin Jacob 	 * But, the purpose of using multipool for QoS will not be addressed.
15137413feeeSJerin Jacob 	 *
15147413feeeSJerin Jacob 	 */
15157413feeeSJerin Jacob 
15167413feeeSJerin Jacob 	/* Validate mempool attributes */
151771e76186SKamil Rytarowski 	for (qidx = rx_start; qidx <= rx_end; qidx++) {
15187413feeeSJerin Jacob 		rxq = dev->data->rx_queues[qidx];
15197413feeeSJerin Jacob 		rxq->mbuf_phys_off = nicvf_mempool_phy_offset(rxq->pool);
15207413feeeSJerin Jacob 		mbuf = rte_pktmbuf_alloc(rxq->pool);
15217413feeeSJerin Jacob 		if (mbuf == NULL) {
152271e76186SKamil Rytarowski 			PMD_INIT_LOG(ERR, "Failed allocate mbuf VF%d qid=%d "
152371e76186SKamil Rytarowski 				     "pool=%s",
152471e76186SKamil Rytarowski 				     nic->vf_id, qidx, rxq->pool->name);
15257413feeeSJerin Jacob 			return -ENOMEM;
15267413feeeSJerin Jacob 		}
152734c2e702SJerin Jacob 		data_off = nicvf_mbuff_meta_length(mbuf);
152834c2e702SJerin Jacob 		data_off += RTE_PKTMBUF_HEADROOM;
15297413feeeSJerin Jacob 		rte_pktmbuf_free(mbuf);
15307413feeeSJerin Jacob 
153134c2e702SJerin Jacob 		if (data_off % RTE_CACHE_LINE_SIZE) {
153234c2e702SJerin Jacob 			PMD_INIT_LOG(ERR, "%s: unaligned data_off=%d delta=%d",
153334c2e702SJerin Jacob 				rxq->pool->name, data_off,
153434c2e702SJerin Jacob 				data_off % RTE_CACHE_LINE_SIZE);
153534c2e702SJerin Jacob 			return -EINVAL;
153634c2e702SJerin Jacob 		}
153734c2e702SJerin Jacob 		rxq->mbuf_phys_off -= data_off;
1538279d3319SRakesh Kudurumalla 		rxq->mbuf_phys_off -= nic->skip_bytes;
153934c2e702SJerin Jacob 
15407413feeeSJerin Jacob 		if (mbuf_phys_off == 0)
15417413feeeSJerin Jacob 			mbuf_phys_off = rxq->mbuf_phys_off;
15427413feeeSJerin Jacob 		if (mbuf_phys_off != rxq->mbuf_phys_off) {
154371e76186SKamil Rytarowski 			PMD_INIT_LOG(ERR, "pool params not same,%s VF%d %"
154471e76186SKamil Rytarowski 				     PRIx64, rxq->pool->name, nic->vf_id,
154571e76186SKamil Rytarowski 				     mbuf_phys_off);
15467413feeeSJerin Jacob 			return -EINVAL;
15477413feeeSJerin Jacob 		}
15487413feeeSJerin Jacob 	}
15497413feeeSJerin Jacob 
15507413feeeSJerin Jacob 	/* Check the level of buffers in the pool */
15517413feeeSJerin Jacob 	total_rxq_desc = 0;
155271e76186SKamil Rytarowski 	for (qidx = rx_start; qidx <= rx_end; qidx++) {
15537413feeeSJerin Jacob 		rxq = dev->data->rx_queues[qidx];
15547413feeeSJerin Jacob 		/* Count total numbers of rxq descs */
15557413feeeSJerin Jacob 		total_rxq_desc += rxq->qlen_mask + 1;
15567413feeeSJerin Jacob 		exp_buffs = RTE_MEMPOOL_CACHE_MAX_SIZE + rxq->rx_free_thresh;
15576d3cbd56SKamil Rytarowski 		exp_buffs *= dev->data->nb_rx_queues;
1558a0fd91ceSBruce Richardson 		if (rte_mempool_avail_count(rxq->pool) < exp_buffs) {
15597413feeeSJerin Jacob 			PMD_INIT_LOG(ERR, "Buff shortage in pool=%s (%d/%d)",
15607413feeeSJerin Jacob 				     rxq->pool->name,
1561a0fd91ceSBruce Richardson 				     rte_mempool_avail_count(rxq->pool),
15627413feeeSJerin Jacob 				     exp_buffs);
15637413feeeSJerin Jacob 			return -ENOENT;
15647413feeeSJerin Jacob 		}
15657413feeeSJerin Jacob 	}
15667413feeeSJerin Jacob 
15677413feeeSJerin Jacob 	/* Check RBDR desc overflow */
15687413feeeSJerin Jacob 	ret = nicvf_qsize_rbdr_roundup(total_rxq_desc);
15697413feeeSJerin Jacob 	if (ret == 0) {
157071e76186SKamil Rytarowski 		PMD_INIT_LOG(ERR, "Reached RBDR desc limit, reduce nr desc "
157171e76186SKamil Rytarowski 			     "VF%d", nic->vf_id);
15727413feeeSJerin Jacob 		return -ENOMEM;
15737413feeeSJerin Jacob 	}
15747413feeeSJerin Jacob 
15757413feeeSJerin Jacob 	/* Enable qset */
15767413feeeSJerin Jacob 	ret = nicvf_qset_config(nic);
15777413feeeSJerin Jacob 	if (ret) {
157871e76186SKamil Rytarowski 		PMD_INIT_LOG(ERR, "Failed to enable qset %d VF%d", ret,
157971e76186SKamil Rytarowski 			     nic->vf_id);
15807413feeeSJerin Jacob 		return ret;
15817413feeeSJerin Jacob 	}
15827413feeeSJerin Jacob 
15837413feeeSJerin Jacob 	/* Allocate RBDR and RBDR ring desc */
15847413feeeSJerin Jacob 	nb_rbdr_desc = nicvf_qsize_rbdr_roundup(total_rxq_desc);
15856d3cbd56SKamil Rytarowski 	ret = nicvf_qset_rbdr_alloc(dev, nic, nb_rbdr_desc, rbdrsz);
15867413feeeSJerin Jacob 	if (ret) {
158771e76186SKamil Rytarowski 		PMD_INIT_LOG(ERR, "Failed to allocate memory for rbdr alloc "
158871e76186SKamil Rytarowski 			     "VF%d", nic->vf_id);
15897413feeeSJerin Jacob 		goto qset_reclaim;
15907413feeeSJerin Jacob 	}
15917413feeeSJerin Jacob 
15927413feeeSJerin Jacob 	/* Enable and configure RBDR registers */
15937413feeeSJerin Jacob 	ret = nicvf_qset_rbdr_config(nic, 0);
15947413feeeSJerin Jacob 	if (ret) {
159571e76186SKamil Rytarowski 		PMD_INIT_LOG(ERR, "Failed to configure rbdr %d VF%d", ret,
159671e76186SKamil Rytarowski 			     nic->vf_id);
15977413feeeSJerin Jacob 		goto qset_rbdr_free;
15987413feeeSJerin Jacob 	}
15997413feeeSJerin Jacob 
16007413feeeSJerin Jacob 	/* Fill rte_mempool buffers in RBDR pool and precharge it */
1601394014bcSKamil Rytarowski 	ret = nicvf_qset_rbdr_precharge(dev, nic, 0, rbdr_rte_mempool_get,
1602394014bcSKamil Rytarowski 					total_rxq_desc);
16037413feeeSJerin Jacob 	if (ret) {
160471e76186SKamil Rytarowski 		PMD_INIT_LOG(ERR, "Failed to fill rbdr %d VF%d", ret,
160571e76186SKamil Rytarowski 			     nic->vf_id);
16067413feeeSJerin Jacob 		goto qset_rbdr_reclaim;
16077413feeeSJerin Jacob 	}
16087413feeeSJerin Jacob 
160971e76186SKamil Rytarowski 	PMD_DRV_LOG(INFO, "Filled %d out of %d entries in RBDR VF%d",
161071e76186SKamil Rytarowski 		     nic->rbdr->tail, nb_rbdr_desc, nic->vf_id);
16117413feeeSJerin Jacob 
16127413feeeSJerin Jacob 	/* Configure VLAN Strip */
1613d3bf2564SRakesh Kudurumalla 	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
1614d3bf2564SRakesh Kudurumalla 		ETH_VLAN_EXTEND_MASK;
1615d3bf2564SRakesh Kudurumalla 	ret = nicvf_vlan_offload_config(dev, mask);
16167413feeeSJerin Jacob 
16178a946db3SJerin Jacob 	/* Based on the packet type(IPv4 or IPv6), the nicvf HW aligns L3 data
16188a946db3SJerin Jacob 	 * to the 64bit memory address.
16198a946db3SJerin Jacob 	 * The alignment creates a hole in mbuf(between the end of headroom and
16208a946db3SJerin Jacob 	 * packet data start). The new revision of the HW provides an option to
16218a946db3SJerin Jacob 	 * disable the L3 alignment feature and make mbuf layout looks
16228a946db3SJerin Jacob 	 * more like other NICs. For better application compatibility, disabling
16238a946db3SJerin Jacob 	 * l3 alignment feature on the hardware revisions it supports
16248a946db3SJerin Jacob 	 */
16258a946db3SJerin Jacob 	nicvf_apad_config(nic, false);
16268a946db3SJerin Jacob 
162771e76186SKamil Rytarowski 	/* Get queue ranges for this VF */
162871e76186SKamil Rytarowski 	nicvf_tx_range(dev, nic, &tx_start, &tx_end);
162971e76186SKamil Rytarowski 
16307413feeeSJerin Jacob 	/* Configure TX queues */
163171e76186SKamil Rytarowski 	for (qidx = tx_start; qidx <= tx_end; qidx++) {
163271e76186SKamil Rytarowski 		ret = nicvf_vf_start_tx_queue(dev, nic,
163371e76186SKamil Rytarowski 			qidx % MAX_SND_QUEUES_PER_QS);
16347413feeeSJerin Jacob 		if (ret)
16357413feeeSJerin Jacob 			goto start_txq_error;
16367413feeeSJerin Jacob 	}
16377413feeeSJerin Jacob 
163871e76186SKamil Rytarowski 	/* Configure RX queues */
163971e76186SKamil Rytarowski 	for (qidx = rx_start; qidx <= rx_end; qidx++) {
164071e76186SKamil Rytarowski 		ret = nicvf_vf_start_rx_queue(dev, nic,
164171e76186SKamil Rytarowski 			qidx % MAX_RCV_QUEUES_PER_QS);
164271e76186SKamil Rytarowski 		if (ret)
164371e76186SKamil Rytarowski 			goto start_rxq_error;
164471e76186SKamil Rytarowski 	}
164571e76186SKamil Rytarowski 
164671e76186SKamil Rytarowski 	if (!nic->sqs_mode) {
16477413feeeSJerin Jacob 		/* Configure CPI algorithm */
16487413feeeSJerin Jacob 		ret = nicvf_configure_cpi(dev);
16497413feeeSJerin Jacob 		if (ret)
16507413feeeSJerin Jacob 			goto start_txq_error;
16517413feeeSJerin Jacob 
165271e76186SKamil Rytarowski 		ret = nicvf_mbox_get_rss_size(nic);
165371e76186SKamil Rytarowski 		if (ret) {
165471e76186SKamil Rytarowski 			PMD_INIT_LOG(ERR, "Failed to get rss table size");
165571e76186SKamil Rytarowski 			goto qset_rss_error;
165671e76186SKamil Rytarowski 		}
165771e76186SKamil Rytarowski 
16587413feeeSJerin Jacob 		/* Configure RSS */
16597413feeeSJerin Jacob 		ret = nicvf_configure_rss(dev);
16607413feeeSJerin Jacob 		if (ret)
16617413feeeSJerin Jacob 			goto qset_rss_error;
166271e76186SKamil Rytarowski 	}
166371e76186SKamil Rytarowski 
166471e76186SKamil Rytarowski 	/* Done; Let PF make the BGX's RX and TX switches to ON position */
166571e76186SKamil Rytarowski 	nicvf_mbox_cfg_done(nic);
166671e76186SKamil Rytarowski 	return 0;
166771e76186SKamil Rytarowski 
166871e76186SKamil Rytarowski qset_rss_error:
166971e76186SKamil Rytarowski 	nicvf_rss_term(nic);
167071e76186SKamil Rytarowski start_rxq_error:
167171e76186SKamil Rytarowski 	for (qidx = rx_start; qidx <= rx_end; qidx++)
167271e76186SKamil Rytarowski 		nicvf_vf_stop_rx_queue(dev, nic, qidx % MAX_RCV_QUEUES_PER_QS);
167371e76186SKamil Rytarowski start_txq_error:
167471e76186SKamil Rytarowski 	for (qidx = tx_start; qidx <= tx_end; qidx++)
167571e76186SKamil Rytarowski 		nicvf_vf_stop_tx_queue(dev, nic, qidx % MAX_SND_QUEUES_PER_QS);
167671e76186SKamil Rytarowski qset_rbdr_reclaim:
167771e76186SKamil Rytarowski 	nicvf_qset_rbdr_reclaim(nic, 0);
167871e76186SKamil Rytarowski 	nicvf_rbdr_release_mbufs(dev, nic);
167971e76186SKamil Rytarowski qset_rbdr_free:
168071e76186SKamil Rytarowski 	if (nic->rbdr) {
168171e76186SKamil Rytarowski 		rte_free(nic->rbdr);
168271e76186SKamil Rytarowski 		nic->rbdr = NULL;
168371e76186SKamil Rytarowski 	}
168471e76186SKamil Rytarowski qset_reclaim:
168571e76186SKamil Rytarowski 	nicvf_qset_reclaim(nic);
168671e76186SKamil Rytarowski 	return ret;
168771e76186SKamil Rytarowski }
168871e76186SKamil Rytarowski 
168971e76186SKamil Rytarowski static int
169071e76186SKamil Rytarowski nicvf_dev_start(struct rte_eth_dev *dev)
169171e76186SKamil Rytarowski {
169271e76186SKamil Rytarowski 	uint16_t qidx;
169371e76186SKamil Rytarowski 	int ret;
169471e76186SKamil Rytarowski 	size_t i;
169571e76186SKamil Rytarowski 	struct nicvf *nic = nicvf_pmd_priv(dev);
169671e76186SKamil Rytarowski 	struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
169771e76186SKamil Rytarowski 	uint16_t mtu;
169871e76186SKamil Rytarowski 	uint32_t buffsz = 0, rbdrsz = 0;
169971e76186SKamil Rytarowski 	struct rte_pktmbuf_pool_private *mbp_priv;
170071e76186SKamil Rytarowski 	struct nicvf_rxq *rxq;
170171e76186SKamil Rytarowski 
170271e76186SKamil Rytarowski 	PMD_INIT_FUNC_TRACE();
170371e76186SKamil Rytarowski 
170471e76186SKamil Rytarowski 	/* This function must be called for a primary device */
170571e76186SKamil Rytarowski 	assert_primary(nic);
170671e76186SKamil Rytarowski 
170771e76186SKamil Rytarowski 	/* Validate RBDR buff size */
170871e76186SKamil Rytarowski 	for (qidx = 0; qidx < dev->data->nb_rx_queues; qidx++) {
170971e76186SKamil Rytarowski 		rxq = dev->data->rx_queues[qidx];
171071e76186SKamil Rytarowski 		mbp_priv = rte_mempool_get_priv(rxq->pool);
171171e76186SKamil Rytarowski 		buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
171271e76186SKamil Rytarowski 		if (buffsz % 128) {
171371e76186SKamil Rytarowski 			PMD_INIT_LOG(ERR, "rxbuf size must be multiply of 128");
171471e76186SKamil Rytarowski 			return -EINVAL;
171571e76186SKamil Rytarowski 		}
171671e76186SKamil Rytarowski 		if (rbdrsz == 0)
171771e76186SKamil Rytarowski 			rbdrsz = buffsz;
171871e76186SKamil Rytarowski 		if (rbdrsz != buffsz) {
171971e76186SKamil Rytarowski 			PMD_INIT_LOG(ERR, "buffsz not same, qidx=%d (%d/%d)",
172071e76186SKamil Rytarowski 				     qidx, rbdrsz, buffsz);
172171e76186SKamil Rytarowski 			return -EINVAL;
172271e76186SKamil Rytarowski 		}
172371e76186SKamil Rytarowski 	}
17247413feeeSJerin Jacob 
17257413feeeSJerin Jacob 	/* Configure loopback */
17267413feeeSJerin Jacob 	ret = nicvf_loopback_config(nic, dev->data->dev_conf.lpbk_mode);
17277413feeeSJerin Jacob 	if (ret) {
17287413feeeSJerin Jacob 		PMD_INIT_LOG(ERR, "Failed to configure loopback %d", ret);
172971e76186SKamil Rytarowski 		return ret;
17307413feeeSJerin Jacob 	}
17317413feeeSJerin Jacob 
17327413feeeSJerin Jacob 	/* Reset all statistics counters attached to this port */
17337413feeeSJerin Jacob 	ret = nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, 0xFFFF, 0xFFFF);
17347413feeeSJerin Jacob 	if (ret) {
17357413feeeSJerin Jacob 		PMD_INIT_LOG(ERR, "Failed to reset stat counters %d", ret);
173671e76186SKamil Rytarowski 		return ret;
17377413feeeSJerin Jacob 	}
17387413feeeSJerin Jacob 
17397413feeeSJerin Jacob 	/* Setup scatter mode if needed by jumbo */
17407413feeeSJerin Jacob 	if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
17417413feeeSJerin Jacob 					    2 * VLAN_TAG_SIZE > buffsz)
17427413feeeSJerin Jacob 		dev->data->scattered_rx = 1;
1743c97da2cbSMaciej Czekaj 	if ((rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) != 0)
17447413feeeSJerin Jacob 		dev->data->scattered_rx = 1;
17457413feeeSJerin Jacob 
17467413feeeSJerin Jacob 	/* Setup MTU based on max_rx_pkt_len or default */
1747c97da2cbSMaciej Czekaj 	mtu = dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME ?
17487413feeeSJerin Jacob 		dev->data->dev_conf.rxmode.max_rx_pkt_len
174935b2d13fSOlivier Matz 			-  RTE_ETHER_HDR_LEN : RTE_ETHER_MTU;
17507413feeeSJerin Jacob 
17517413feeeSJerin Jacob 	if (nicvf_dev_set_mtu(dev, mtu)) {
17527413feeeSJerin Jacob 		PMD_INIT_LOG(ERR, "Failed to set default mtu size");
17537413feeeSJerin Jacob 		return -EBUSY;
17547413feeeSJerin Jacob 	}
17557413feeeSJerin Jacob 
175671e76186SKamil Rytarowski 	ret = nicvf_vf_start(dev, nic, rbdrsz);
175771e76186SKamil Rytarowski 	if (ret != 0)
175871e76186SKamil Rytarowski 		return ret;
175971e76186SKamil Rytarowski 
176071e76186SKamil Rytarowski 	for (i = 0; i < nic->sqs_count; i++) {
176171e76186SKamil Rytarowski 		assert(nic->snicvf[i]);
176271e76186SKamil Rytarowski 
176371e76186SKamil Rytarowski 		ret = nicvf_vf_start(dev, nic->snicvf[i], rbdrsz);
176471e76186SKamil Rytarowski 		if (ret != 0)
176571e76186SKamil Rytarowski 			return ret;
176671e76186SKamil Rytarowski 	}
176771e76186SKamil Rytarowski 
17685e64c812SPavan Nikhilesh 	/* Configure callbacks based on offloads */
17697413feeeSJerin Jacob 	nicvf_set_tx_function(dev);
17707413feeeSJerin Jacob 	nicvf_set_rx_function(dev);
17717413feeeSJerin Jacob 
17727413feeeSJerin Jacob 	return 0;
17737413feeeSJerin Jacob }
17747413feeeSJerin Jacob 
17757413feeeSJerin Jacob static void
1776627d4ba2SKamil Rytarowski nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup)
17777413feeeSJerin Jacob {
1778627d4ba2SKamil Rytarowski 	size_t i;
17797413feeeSJerin Jacob 	int ret;
17807413feeeSJerin Jacob 	struct nicvf *nic = nicvf_pmd_priv(dev);
17817413feeeSJerin Jacob 
17827413feeeSJerin Jacob 	PMD_INIT_FUNC_TRACE();
17837413feeeSJerin Jacob 
1784627d4ba2SKamil Rytarowski 	/* Teardown secondary vf first */
1785627d4ba2SKamil Rytarowski 	for (i = 0; i < nic->sqs_count; i++) {
1786627d4ba2SKamil Rytarowski 		if (!nic->snicvf[i])
1787627d4ba2SKamil Rytarowski 			continue;
1788627d4ba2SKamil Rytarowski 
1789627d4ba2SKamil Rytarowski 		nicvf_vf_stop(dev, nic->snicvf[i], cleanup);
1790627d4ba2SKamil Rytarowski 	}
1791627d4ba2SKamil Rytarowski 
1792627d4ba2SKamil Rytarowski 	/* Stop the primary VF now */
1793627d4ba2SKamil Rytarowski 	nicvf_vf_stop(dev, nic, cleanup);
17947413feeeSJerin Jacob 
17957413feeeSJerin Jacob 	/* Disable loopback */
17967413feeeSJerin Jacob 	ret = nicvf_loopback_config(nic, 0);
17977413feeeSJerin Jacob 	if (ret)
17987413feeeSJerin Jacob 		PMD_INIT_LOG(ERR, "Failed to disable loopback %d", ret);
17997413feeeSJerin Jacob 
1800627d4ba2SKamil Rytarowski 	/* Reclaim CPI configuration */
1801627d4ba2SKamil Rytarowski 	ret = nicvf_mbox_config_cpi(nic, 0);
1802627d4ba2SKamil Rytarowski 	if (ret)
1803627d4ba2SKamil Rytarowski 		PMD_INIT_LOG(ERR, "Failed to reclaim CPI config %d", ret);
1804627d4ba2SKamil Rytarowski }
1805627d4ba2SKamil Rytarowski 
1806627d4ba2SKamil Rytarowski static void
1807627d4ba2SKamil Rytarowski nicvf_dev_stop(struct rte_eth_dev *dev)
1808627d4ba2SKamil Rytarowski {
1809627d4ba2SKamil Rytarowski 	PMD_INIT_FUNC_TRACE();
1810627d4ba2SKamil Rytarowski 
1811627d4ba2SKamil Rytarowski 	nicvf_dev_stop_cleanup(dev, false);
1812627d4ba2SKamil Rytarowski }
1813627d4ba2SKamil Rytarowski 
1814627d4ba2SKamil Rytarowski static void
1815627d4ba2SKamil Rytarowski nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic, bool cleanup)
1816627d4ba2SKamil Rytarowski {
1817627d4ba2SKamil Rytarowski 	int ret;
1818627d4ba2SKamil Rytarowski 	uint16_t qidx;
1819627d4ba2SKamil Rytarowski 	uint16_t tx_start, tx_end;
1820627d4ba2SKamil Rytarowski 	uint16_t rx_start, rx_end;
1821627d4ba2SKamil Rytarowski 
1822627d4ba2SKamil Rytarowski 	PMD_INIT_FUNC_TRACE();
1823627d4ba2SKamil Rytarowski 
1824627d4ba2SKamil Rytarowski 	if (cleanup) {
1825627d4ba2SKamil Rytarowski 		/* Let PF make the BGX's RX and TX switches to OFF position */
1826627d4ba2SKamil Rytarowski 		nicvf_mbox_shutdown(nic);
1827627d4ba2SKamil Rytarowski 	}
1828627d4ba2SKamil Rytarowski 
18297413feeeSJerin Jacob 	/* Disable VLAN Strip */
18307413feeeSJerin Jacob 	nicvf_vlan_hw_strip(nic, 0);
18317413feeeSJerin Jacob 
1832627d4ba2SKamil Rytarowski 	/* Get queue ranges for this VF */
1833627d4ba2SKamil Rytarowski 	nicvf_tx_range(dev, nic, &tx_start, &tx_end);
1834627d4ba2SKamil Rytarowski 
1835627d4ba2SKamil Rytarowski 	for (qidx = tx_start; qidx <= tx_end; qidx++)
1836627d4ba2SKamil Rytarowski 		nicvf_vf_stop_tx_queue(dev, nic, qidx % MAX_SND_QUEUES_PER_QS);
1837627d4ba2SKamil Rytarowski 
1838627d4ba2SKamil Rytarowski 	/* Get queue ranges for this VF */
1839627d4ba2SKamil Rytarowski 	nicvf_rx_range(dev, nic, &rx_start, &rx_end);
18407413feeeSJerin Jacob 
18417413feeeSJerin Jacob 	/* Reclaim rq */
1842627d4ba2SKamil Rytarowski 	for (qidx = rx_start; qidx <= rx_end; qidx++)
1843627d4ba2SKamil Rytarowski 		nicvf_vf_stop_rx_queue(dev, nic, qidx % MAX_RCV_QUEUES_PER_QS);
18447413feeeSJerin Jacob 
18457413feeeSJerin Jacob 	/* Reclaim RBDR */
18467413feeeSJerin Jacob 	ret = nicvf_qset_rbdr_reclaim(nic, 0);
18477413feeeSJerin Jacob 	if (ret)
18487413feeeSJerin Jacob 		PMD_INIT_LOG(ERR, "Failed to reclaim RBDR %d", ret);
18497413feeeSJerin Jacob 
18507413feeeSJerin Jacob 	/* Move all charged buffers in RBDR back to pool */
18517413feeeSJerin Jacob 	if (nic->rbdr != NULL)
18526d3cbd56SKamil Rytarowski 		nicvf_rbdr_release_mbufs(dev, nic);
18537413feeeSJerin Jacob 
18547413feeeSJerin Jacob 	/* Disable qset */
1855627d4ba2SKamil Rytarowski 	ret = nicvf_qset_reclaim(nic);
18567413feeeSJerin Jacob 	if (ret)
18577413feeeSJerin Jacob 		PMD_INIT_LOG(ERR, "Failed to disable qset %d", ret);
18587413feeeSJerin Jacob 
18597413feeeSJerin Jacob 	/* Disable all interrupts */
18607413feeeSJerin Jacob 	nicvf_disable_all_interrupts(nic);
18617413feeeSJerin Jacob 
18627413feeeSJerin Jacob 	/* Free RBDR SW structure */
18637413feeeSJerin Jacob 	if (nic->rbdr) {
18647413feeeSJerin Jacob 		rte_free(nic->rbdr);
18657413feeeSJerin Jacob 		nic->rbdr = NULL;
18667413feeeSJerin Jacob 	}
18677413feeeSJerin Jacob }
18687413feeeSJerin Jacob 
18697413feeeSJerin Jacob static void
18707413feeeSJerin Jacob nicvf_dev_close(struct rte_eth_dev *dev)
18717413feeeSJerin Jacob {
1872627d4ba2SKamil Rytarowski 	size_t i;
1873627d4ba2SKamil Rytarowski 	struct nicvf *nic = nicvf_pmd_priv(dev);
1874627d4ba2SKamil Rytarowski 
18757413feeeSJerin Jacob 	PMD_INIT_FUNC_TRACE();
18767413feeeSJerin Jacob 
1877627d4ba2SKamil Rytarowski 	nicvf_dev_stop_cleanup(dev, true);
1878f141adcaSKamil Rytarowski 	nicvf_periodic_alarm_stop(nicvf_interrupt, dev);
1879627d4ba2SKamil Rytarowski 
1880627d4ba2SKamil Rytarowski 	for (i = 0; i < nic->sqs_count; i++) {
1881627d4ba2SKamil Rytarowski 		if (!nic->snicvf[i])
1882627d4ba2SKamil Rytarowski 			continue;
1883627d4ba2SKamil Rytarowski 
1884627d4ba2SKamil Rytarowski 		nicvf_periodic_alarm_stop(nicvf_vf_interrupt, nic->snicvf[i]);
1885627d4ba2SKamil Rytarowski 	}
18867413feeeSJerin Jacob }
18877413feeeSJerin Jacob 
1888bc79615aSJerin Jacob static int
1889b7004ab2SKamil Rytarowski nicvf_request_sqs(struct nicvf *nic)
1890b7004ab2SKamil Rytarowski {
1891b7004ab2SKamil Rytarowski 	size_t i;
1892b7004ab2SKamil Rytarowski 
1893b7004ab2SKamil Rytarowski 	assert_primary(nic);
1894b7004ab2SKamil Rytarowski 	assert(nic->sqs_count > 0);
1895b7004ab2SKamil Rytarowski 	assert(nic->sqs_count <= MAX_SQS_PER_VF);
1896b7004ab2SKamil Rytarowski 
1897b7004ab2SKamil Rytarowski 	/* Set no of Rx/Tx queues in each of the SQsets */
1898b7004ab2SKamil Rytarowski 	for (i = 0; i < nic->sqs_count; i++) {
1899b7004ab2SKamil Rytarowski 		if (nicvf_svf_empty())
1900b7004ab2SKamil Rytarowski 			rte_panic("Cannot assign sufficient number of "
1901b7004ab2SKamil Rytarowski 				  "secondary queues to primary VF%" PRIu8 "\n",
1902b7004ab2SKamil Rytarowski 				  nic->vf_id);
1903b7004ab2SKamil Rytarowski 
1904b7004ab2SKamil Rytarowski 		nic->snicvf[i] = nicvf_svf_pop();
1905b7004ab2SKamil Rytarowski 		nic->snicvf[i]->sqs_id = i;
1906b7004ab2SKamil Rytarowski 	}
1907b7004ab2SKamil Rytarowski 
1908b7004ab2SKamil Rytarowski 	return nicvf_mbox_request_sqs(nic);
1909b7004ab2SKamil Rytarowski }
1910b7004ab2SKamil Rytarowski 
1911b7004ab2SKamil Rytarowski static int
1912bc79615aSJerin Jacob nicvf_dev_configure(struct rte_eth_dev *dev)
1913bc79615aSJerin Jacob {
1914b7004ab2SKamil Rytarowski 	struct rte_eth_dev_data *data = dev->data;
1915b7004ab2SKamil Rytarowski 	struct rte_eth_conf *conf = &data->dev_conf;
1916bc79615aSJerin Jacob 	struct rte_eth_rxmode *rxmode = &conf->rxmode;
1917bc79615aSJerin Jacob 	struct rte_eth_txmode *txmode = &conf->txmode;
1918bc79615aSJerin Jacob 	struct nicvf *nic = nicvf_pmd_priv(dev);
1919b7004ab2SKamil Rytarowski 	uint8_t cqcount;
1920bc79615aSJerin Jacob 
1921bc79615aSJerin Jacob 	PMD_INIT_FUNC_TRACE();
1922bc79615aSJerin Jacob 
192373fb89ddSAndrew Rybchenko 	if (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG)
19248b945a7fSPavan Nikhilesh 		rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH;
19258b945a7fSPavan Nikhilesh 
1926bc79615aSJerin Jacob 	if (!rte_eal_has_hugepages()) {
1927bc79615aSJerin Jacob 		PMD_INIT_LOG(INFO, "Huge page is not configured");
1928bc79615aSJerin Jacob 		return -EINVAL;
1929bc79615aSJerin Jacob 	}
1930bc79615aSJerin Jacob 
1931bc79615aSJerin Jacob 	if (txmode->mq_mode) {
1932bc79615aSJerin Jacob 		PMD_INIT_LOG(INFO, "Tx mq_mode DCB or VMDq not supported");
1933bc79615aSJerin Jacob 		return -EINVAL;
1934bc79615aSJerin Jacob 	}
1935bc79615aSJerin Jacob 
1936bc79615aSJerin Jacob 	if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
1937bc79615aSJerin Jacob 		rxmode->mq_mode != ETH_MQ_RX_RSS) {
1938bc79615aSJerin Jacob 		PMD_INIT_LOG(INFO, "Unsupported rx qmode %d", rxmode->mq_mode);
1939bc79615aSJerin Jacob 		return -EINVAL;
1940bc79615aSJerin Jacob 	}
1941bc79615aSJerin Jacob 
1942bc79615aSJerin Jacob 	if (rxmode->split_hdr_size) {
1943bc79615aSJerin Jacob 		PMD_INIT_LOG(INFO, "Rxmode does not support split header");
1944bc79615aSJerin Jacob 		return -EINVAL;
1945bc79615aSJerin Jacob 	}
1946bc79615aSJerin Jacob 
1947bc79615aSJerin Jacob 	if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
1948bc79615aSJerin Jacob 		PMD_INIT_LOG(INFO, "Setting link speed/duplex not supported");
1949bc79615aSJerin Jacob 		return -EINVAL;
1950bc79615aSJerin Jacob 	}
1951bc79615aSJerin Jacob 
1952bc79615aSJerin Jacob 	if (conf->dcb_capability_en) {
1953bc79615aSJerin Jacob 		PMD_INIT_LOG(INFO, "DCB enable not supported");
1954bc79615aSJerin Jacob 		return -EINVAL;
1955bc79615aSJerin Jacob 	}
1956bc79615aSJerin Jacob 
1957bc79615aSJerin Jacob 	if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
1958bc79615aSJerin Jacob 		PMD_INIT_LOG(INFO, "Flow director not supported");
1959bc79615aSJerin Jacob 		return -EINVAL;
1960bc79615aSJerin Jacob 	}
1961bc79615aSJerin Jacob 
1962b7004ab2SKamil Rytarowski 	assert_primary(nic);
1963b7004ab2SKamil Rytarowski 	NICVF_STATIC_ASSERT(MAX_RCV_QUEUES_PER_QS == MAX_SND_QUEUES_PER_QS);
1964b7004ab2SKamil Rytarowski 	cqcount = RTE_MAX(data->nb_tx_queues, data->nb_rx_queues);
1965b7004ab2SKamil Rytarowski 	if (cqcount > MAX_RCV_QUEUES_PER_QS) {
1966b7004ab2SKamil Rytarowski 		nic->sqs_count = RTE_ALIGN_CEIL(cqcount, MAX_RCV_QUEUES_PER_QS);
1967b7004ab2SKamil Rytarowski 		nic->sqs_count = (nic->sqs_count / MAX_RCV_QUEUES_PER_QS) - 1;
1968b7004ab2SKamil Rytarowski 	} else {
1969b7004ab2SKamil Rytarowski 		nic->sqs_count = 0;
1970b7004ab2SKamil Rytarowski 	}
1971b7004ab2SKamil Rytarowski 
1972b7004ab2SKamil Rytarowski 	assert(nic->sqs_count <= MAX_SQS_PER_VF);
1973b7004ab2SKamil Rytarowski 
1974b7004ab2SKamil Rytarowski 	if (nic->sqs_count > 0) {
1975b7004ab2SKamil Rytarowski 		if (nicvf_request_sqs(nic)) {
1976b7004ab2SKamil Rytarowski 			rte_panic("Cannot assign sufficient number of "
1977b7004ab2SKamil Rytarowski 				  "secondary queues to PORT%d VF%" PRIu8 "\n",
1978b7004ab2SKamil Rytarowski 				  dev->data->port_id, nic->vf_id);
1979b7004ab2SKamil Rytarowski 		}
1980b7004ab2SKamil Rytarowski 	}
1981b7004ab2SKamil Rytarowski 
19825e64c812SPavan Nikhilesh 	if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
19835e64c812SPavan Nikhilesh 		nic->offload_cksum = 1;
19845e64c812SPavan Nikhilesh 
1985bc79615aSJerin Jacob 	PMD_INIT_LOG(DEBUG, "Configured ethdev port%d hwcap=0x%" PRIx64,
1986bc79615aSJerin Jacob 		dev->data->port_id, nicvf_hw_cap(nic));
1987bc79615aSJerin Jacob 
1988bc79615aSJerin Jacob 	return 0;
1989bc79615aSJerin Jacob }
1990bc79615aSJerin Jacob 
1991*b8d96c71SHarman Kalra static int
1992*b8d96c71SHarman Kalra nicvf_dev_set_link_up(struct rte_eth_dev *dev)
1993*b8d96c71SHarman Kalra {
1994*b8d96c71SHarman Kalra 	struct nicvf *nic = nicvf_pmd_priv(dev);
1995*b8d96c71SHarman Kalra 	int rc, i;
1996*b8d96c71SHarman Kalra 
1997*b8d96c71SHarman Kalra 	rc = nicvf_mbox_set_link_up_down(nic, true);
1998*b8d96c71SHarman Kalra 	if (rc)
1999*b8d96c71SHarman Kalra 		goto done;
2000*b8d96c71SHarman Kalra 
2001*b8d96c71SHarman Kalra 	/* Start tx queues  */
2002*b8d96c71SHarman Kalra 	for (i = 0; i < dev->data->nb_tx_queues; i++)
2003*b8d96c71SHarman Kalra 		nicvf_dev_tx_queue_start(dev, i);
2004*b8d96c71SHarman Kalra 
2005*b8d96c71SHarman Kalra done:
2006*b8d96c71SHarman Kalra 	return rc;
2007*b8d96c71SHarman Kalra }
2008*b8d96c71SHarman Kalra 
2009*b8d96c71SHarman Kalra static int
2010*b8d96c71SHarman Kalra nicvf_dev_set_link_down(struct rte_eth_dev *dev)
2011*b8d96c71SHarman Kalra {
2012*b8d96c71SHarman Kalra 	struct nicvf *nic = nicvf_pmd_priv(dev);
2013*b8d96c71SHarman Kalra 	int i;
2014*b8d96c71SHarman Kalra 
2015*b8d96c71SHarman Kalra 	/* Stop tx queues  */
2016*b8d96c71SHarman Kalra 	for (i = 0; i < dev->data->nb_tx_queues; i++)
2017*b8d96c71SHarman Kalra 		nicvf_dev_tx_queue_stop(dev, i);
2018*b8d96c71SHarman Kalra 
2019*b8d96c71SHarman Kalra 	return nicvf_mbox_set_link_up_down(nic, false);
2020*b8d96c71SHarman Kalra }
2021*b8d96c71SHarman Kalra 
2022e4387966SJerin Jacob /* Initialize and register driver with DPDK Application */
2023e4387966SJerin Jacob static const struct eth_dev_ops nicvf_eth_dev_ops = {
2024bc79615aSJerin Jacob 	.dev_configure            = nicvf_dev_configure,
20257413feeeSJerin Jacob 	.dev_start                = nicvf_dev_start,
20267413feeeSJerin Jacob 	.dev_stop                 = nicvf_dev_stop,
20278fc70464SJerin Jacob 	.link_update              = nicvf_dev_link_update,
20287413feeeSJerin Jacob 	.dev_close                = nicvf_dev_close,
2029684fa771SJerin Jacob 	.stats_get                = nicvf_dev_stats_get,
2030684fa771SJerin Jacob 	.stats_reset              = nicvf_dev_stats_reset,
20316eae36eaSJerin Jacob 	.promiscuous_enable       = nicvf_dev_promisc_enable,
2032dcd7b1e1SJerin Jacob 	.dev_infos_get            = nicvf_dev_info_get,
20331c80e4fdSJerin Jacob 	.dev_supported_ptypes_get = nicvf_dev_supported_ptypes_get,
203465d9804eSJerin Jacob 	.mtu_set                  = nicvf_dev_set_mtu,
2035d3bf2564SRakesh Kudurumalla 	.vlan_offload_set         = nicvf_vlan_offload_set,
203643362c6aSJerin Jacob 	.reta_update              = nicvf_dev_reta_update,
203743362c6aSJerin Jacob 	.reta_query               = nicvf_dev_reta_query,
203843362c6aSJerin Jacob 	.rss_hash_update          = nicvf_dev_rss_hash_update,
203943362c6aSJerin Jacob 	.rss_hash_conf_get        = nicvf_dev_rss_hash_conf_get,
204086b4eb42SJerin Jacob 	.rx_queue_start           = nicvf_dev_rx_queue_start,
204186b4eb42SJerin Jacob 	.rx_queue_stop            = nicvf_dev_rx_queue_stop,
2042fc1f6c62SJerin Jacob 	.tx_queue_start           = nicvf_dev_tx_queue_start,
2043fc1f6c62SJerin Jacob 	.tx_queue_stop            = nicvf_dev_tx_queue_stop,
2044aa0d976eSJerin Jacob 	.rx_queue_setup           = nicvf_dev_rx_queue_setup,
2045aa0d976eSJerin Jacob 	.rx_queue_release         = nicvf_dev_rx_queue_release,
2046da14e00cSJerin Jacob 	.rx_queue_count           = nicvf_dev_rx_queue_count,
20473f3c6f97SJerin Jacob 	.tx_queue_setup           = nicvf_dev_tx_queue_setup,
20483f3c6f97SJerin Jacob 	.tx_queue_release         = nicvf_dev_tx_queue_release,
2049*b8d96c71SHarman Kalra 	.dev_set_link_up          = nicvf_dev_set_link_up,
2050*b8d96c71SHarman Kalra 	.dev_set_link_down        = nicvf_dev_set_link_down,
2051606ee746SJerin Jacob 	.get_reg                  = nicvf_dev_get_regs,
2052e4387966SJerin Jacob };
2053e4387966SJerin Jacob 
2054d3bf2564SRakesh Kudurumalla static int
2055d3bf2564SRakesh Kudurumalla nicvf_vlan_offload_config(struct rte_eth_dev *dev, int mask)
2056d3bf2564SRakesh Kudurumalla {
2057d3bf2564SRakesh Kudurumalla 	struct rte_eth_rxmode *rxmode;
2058d3bf2564SRakesh Kudurumalla 	struct nicvf *nic = nicvf_pmd_priv(dev);
2059d3bf2564SRakesh Kudurumalla 	rxmode = &dev->data->dev_conf.rxmode;
2060d3bf2564SRakesh Kudurumalla 	if (mask & ETH_VLAN_STRIP_MASK) {
2061d3bf2564SRakesh Kudurumalla 		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
2062d3bf2564SRakesh Kudurumalla 			nicvf_vlan_hw_strip(nic, true);
2063d3bf2564SRakesh Kudurumalla 		else
2064d3bf2564SRakesh Kudurumalla 			nicvf_vlan_hw_strip(nic, false);
2065d3bf2564SRakesh Kudurumalla 	}
2066d3bf2564SRakesh Kudurumalla 
2067d3bf2564SRakesh Kudurumalla 	return 0;
2068d3bf2564SRakesh Kudurumalla }
2069d3bf2564SRakesh Kudurumalla 
2070d3bf2564SRakesh Kudurumalla static int
2071d3bf2564SRakesh Kudurumalla nicvf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2072d3bf2564SRakesh Kudurumalla {
2073d3bf2564SRakesh Kudurumalla 	nicvf_vlan_offload_config(dev, mask);
2074d3bf2564SRakesh Kudurumalla 
2075d3bf2564SRakesh Kudurumalla 	return 0;
2076d3bf2564SRakesh Kudurumalla }
2077d3bf2564SRakesh Kudurumalla 
2078279d3319SRakesh Kudurumalla static inline int
2079279d3319SRakesh Kudurumalla nicvf_set_first_skip(struct rte_eth_dev *dev)
2080279d3319SRakesh Kudurumalla {
2081279d3319SRakesh Kudurumalla 	int bytes_to_skip = 0;
2082279d3319SRakesh Kudurumalla 	int ret = 0;
2083279d3319SRakesh Kudurumalla 	unsigned int i;
2084279d3319SRakesh Kudurumalla 	struct rte_kvargs *kvlist;
2085279d3319SRakesh Kudurumalla 	static const char *const skip[] = {
2086279d3319SRakesh Kudurumalla 		SKIP_DATA_BYTES,
2087279d3319SRakesh Kudurumalla 		NULL};
2088279d3319SRakesh Kudurumalla 	struct nicvf *nic = nicvf_pmd_priv(dev);
2089279d3319SRakesh Kudurumalla 
2090279d3319SRakesh Kudurumalla 	if (!dev->device->devargs) {
2091279d3319SRakesh Kudurumalla 		nicvf_first_skip_config(nic, 0);
2092279d3319SRakesh Kudurumalla 		return ret;
2093279d3319SRakesh Kudurumalla 	}
2094279d3319SRakesh Kudurumalla 
2095279d3319SRakesh Kudurumalla 	kvlist = rte_kvargs_parse(dev->device->devargs->args, skip);
2096279d3319SRakesh Kudurumalla 	if (!kvlist)
2097279d3319SRakesh Kudurumalla 		return -EINVAL;
2098279d3319SRakesh Kudurumalla 
2099279d3319SRakesh Kudurumalla 	if (kvlist->count == 0)
2100279d3319SRakesh Kudurumalla 		goto exit;
2101279d3319SRakesh Kudurumalla 
2102279d3319SRakesh Kudurumalla 	for (i = 0; i != kvlist->count; ++i) {
2103279d3319SRakesh Kudurumalla 		const struct rte_kvargs_pair *pair = &kvlist->pairs[i];
2104279d3319SRakesh Kudurumalla 
2105279d3319SRakesh Kudurumalla 		if (!strcmp(pair->key, SKIP_DATA_BYTES))
2106279d3319SRakesh Kudurumalla 			bytes_to_skip = atoi(pair->value);
2107279d3319SRakesh Kudurumalla 	}
2108279d3319SRakesh Kudurumalla 
2109279d3319SRakesh Kudurumalla 	/*128 bytes amounts to one cache line*/
2110279d3319SRakesh Kudurumalla 	if (bytes_to_skip >= 0 && bytes_to_skip < 128) {
2111279d3319SRakesh Kudurumalla 		if (!(bytes_to_skip % 8)) {
2112279d3319SRakesh Kudurumalla 			nicvf_first_skip_config(nic, (bytes_to_skip / 8));
2113279d3319SRakesh Kudurumalla 			nic->skip_bytes = bytes_to_skip;
2114279d3319SRakesh Kudurumalla 			goto kvlist_free;
2115279d3319SRakesh Kudurumalla 		} else {
2116279d3319SRakesh Kudurumalla 			PMD_INIT_LOG(ERR, "skip_data_bytes should be multiple of 8");
2117279d3319SRakesh Kudurumalla 			ret = -EINVAL;
2118279d3319SRakesh Kudurumalla 			goto exit;
2119279d3319SRakesh Kudurumalla 		}
2120279d3319SRakesh Kudurumalla 	} else {
2121279d3319SRakesh Kudurumalla 		PMD_INIT_LOG(ERR, "skip_data_bytes should be less than 128");
2122279d3319SRakesh Kudurumalla 		ret = -EINVAL;
2123279d3319SRakesh Kudurumalla 		goto exit;
2124279d3319SRakesh Kudurumalla 	}
2125279d3319SRakesh Kudurumalla exit:
2126279d3319SRakesh Kudurumalla 	nicvf_first_skip_config(nic, 0);
2127279d3319SRakesh Kudurumalla kvlist_free:
2128279d3319SRakesh Kudurumalla 	rte_kvargs_free(kvlist);
2129279d3319SRakesh Kudurumalla 	return ret;
2130279d3319SRakesh Kudurumalla }
2131e4387966SJerin Jacob static int
2132230dce64SAmit Gupta nicvf_eth_dev_uninit(struct rte_eth_dev *dev)
2133230dce64SAmit Gupta {
2134230dce64SAmit Gupta 	PMD_INIT_FUNC_TRACE();
2135230dce64SAmit Gupta 
2136230dce64SAmit Gupta 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2137230dce64SAmit Gupta 		nicvf_dev_close(dev);
2138230dce64SAmit Gupta 
2139230dce64SAmit Gupta 	return 0;
2140230dce64SAmit Gupta }
2141230dce64SAmit Gupta static int
2142e4387966SJerin Jacob nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
2143e4387966SJerin Jacob {
2144e4387966SJerin Jacob 	int ret;
2145e4387966SJerin Jacob 	struct rte_pci_device *pci_dev;
2146e4387966SJerin Jacob 	struct nicvf *nic = nicvf_pmd_priv(eth_dev);
2147e4387966SJerin Jacob 
2148e4387966SJerin Jacob 	PMD_INIT_FUNC_TRACE();
2149e4387966SJerin Jacob 
2150e4387966SJerin Jacob 	eth_dev->dev_ops = &nicvf_eth_dev_ops;
2151e4387966SJerin Jacob 
21527413feeeSJerin Jacob 	/* For secondary processes, the primary has done all the work */
21537413feeeSJerin Jacob 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
215421e3fb00SKamil Rytarowski 		if (nic) {
21557413feeeSJerin Jacob 			/* Setup callbacks for secondary process */
21567413feeeSJerin Jacob 			nicvf_set_tx_function(eth_dev);
21577413feeeSJerin Jacob 			nicvf_set_rx_function(eth_dev);
21587413feeeSJerin Jacob 			return 0;
215921e3fb00SKamil Rytarowski 		} else {
216021e3fb00SKamil Rytarowski 			/* If nic == NULL than it is secondary function
216121e3fb00SKamil Rytarowski 			 * so ethdev need to be released by caller */
216221e3fb00SKamil Rytarowski 			return ENOTSUP;
216321e3fb00SKamil Rytarowski 		}
21647413feeeSJerin Jacob 	}
21657413feeeSJerin Jacob 
2166c0802544SFerruh Yigit 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2167e4387966SJerin Jacob 	rte_eth_copy_pci_info(eth_dev, pci_dev);
2168e4387966SJerin Jacob 
2169e4387966SJerin Jacob 	nic->device_id = pci_dev->id.device_id;
2170e4387966SJerin Jacob 	nic->vendor_id = pci_dev->id.vendor_id;
2171e4387966SJerin Jacob 	nic->subsystem_device_id = pci_dev->id.subsystem_device_id;
2172e4387966SJerin Jacob 	nic->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
2173e4387966SJerin Jacob 
2174e4387966SJerin Jacob 	PMD_INIT_LOG(DEBUG, "nicvf: device (%x:%x) %u:%u:%u:%u",
2175e4387966SJerin Jacob 			pci_dev->id.vendor_id, pci_dev->id.device_id,
2176e4387966SJerin Jacob 			pci_dev->addr.domain, pci_dev->addr.bus,
2177e4387966SJerin Jacob 			pci_dev->addr.devid, pci_dev->addr.function);
2178e4387966SJerin Jacob 
2179e4387966SJerin Jacob 	nic->reg_base = (uintptr_t)pci_dev->mem_resource[0].addr;
2180e4387966SJerin Jacob 	if (!nic->reg_base) {
2181e4387966SJerin Jacob 		PMD_INIT_LOG(ERR, "Failed to map BAR0");
2182e4387966SJerin Jacob 		ret = -ENODEV;
2183e4387966SJerin Jacob 		goto fail;
2184e4387966SJerin Jacob 	}
2185e4387966SJerin Jacob 
2186e4387966SJerin Jacob 	nicvf_disable_all_interrupts(nic);
2187e4387966SJerin Jacob 
2188f141adcaSKamil Rytarowski 	ret = nicvf_periodic_alarm_start(nicvf_interrupt, eth_dev);
2189e4387966SJerin Jacob 	if (ret) {
2190e4387966SJerin Jacob 		PMD_INIT_LOG(ERR, "Failed to start period alarm");
2191e4387966SJerin Jacob 		goto fail;
2192e4387966SJerin Jacob 	}
2193e4387966SJerin Jacob 
2194e4387966SJerin Jacob 	ret = nicvf_mbox_check_pf_ready(nic);
2195e4387966SJerin Jacob 	if (ret) {
2196e4387966SJerin Jacob 		PMD_INIT_LOG(ERR, "Failed to get ready message from PF");
2197e4387966SJerin Jacob 		goto alarm_fail;
2198e4387966SJerin Jacob 	} else {
2199e4387966SJerin Jacob 		PMD_INIT_LOG(INFO,
2200e4387966SJerin Jacob 			"node=%d vf=%d mode=%s sqs=%s loopback_supported=%s",
2201e4387966SJerin Jacob 			nic->node, nic->vf_id,
2202e4387966SJerin Jacob 			nic->tns_mode == NIC_TNS_MODE ? "tns" : "tns-bypass",
2203e4387966SJerin Jacob 			nic->sqs_mode ? "true" : "false",
2204e4387966SJerin Jacob 			nic->loopback_supported ? "true" : "false"
2205e4387966SJerin Jacob 			);
2206e4387966SJerin Jacob 	}
2207e4387966SJerin Jacob 
220821e3fb00SKamil Rytarowski 	ret = nicvf_base_init(nic);
220921e3fb00SKamil Rytarowski 	if (ret) {
221021e3fb00SKamil Rytarowski 		PMD_INIT_LOG(ERR, "Failed to execute nicvf_base_init");
221121e3fb00SKamil Rytarowski 		goto malloc_fail;
221221e3fb00SKamil Rytarowski 	}
221321e3fb00SKamil Rytarowski 
2214e4387966SJerin Jacob 	if (nic->sqs_mode) {
221521e3fb00SKamil Rytarowski 		/* Push nic to stack of secondary vfs */
221621e3fb00SKamil Rytarowski 		nicvf_svf_push(nic);
221721e3fb00SKamil Rytarowski 
221821e3fb00SKamil Rytarowski 		/* Steal nic pointer from the device for further reuse */
221921e3fb00SKamil Rytarowski 		eth_dev->data->dev_private = NULL;
222021e3fb00SKamil Rytarowski 
222121e3fb00SKamil Rytarowski 		nicvf_periodic_alarm_stop(nicvf_interrupt, eth_dev);
222221e3fb00SKamil Rytarowski 		ret = nicvf_periodic_alarm_start(nicvf_vf_interrupt, nic);
222321e3fb00SKamil Rytarowski 		if (ret) {
222421e3fb00SKamil Rytarowski 			PMD_INIT_LOG(ERR, "Failed to start period alarm");
222521e3fb00SKamil Rytarowski 			goto fail;
222621e3fb00SKamil Rytarowski 		}
222721e3fb00SKamil Rytarowski 
222898a7ea33SJerin Jacob 		/* Detach port by returning positive error number */
222921e3fb00SKamil Rytarowski 		return ENOTSUP;
2230e4387966SJerin Jacob 	}
2231e4387966SJerin Jacob 
223235b2d13fSOlivier Matz 	eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
223335b2d13fSOlivier Matz 					RTE_ETHER_ADDR_LEN, 0);
2234e4387966SJerin Jacob 	if (eth_dev->data->mac_addrs == NULL) {
2235e4387966SJerin Jacob 		PMD_INIT_LOG(ERR, "Failed to allocate memory for mac addr");
2236e4387966SJerin Jacob 		ret = -ENOMEM;
2237e4387966SJerin Jacob 		goto alarm_fail;
2238e4387966SJerin Jacob 	}
2239538da7a1SOlivier Matz 	if (rte_is_zero_ether_addr((struct rte_ether_addr *)nic->mac_addr))
2240538da7a1SOlivier Matz 		rte_eth_random_addr(&nic->mac_addr[0]);
2241e4387966SJerin Jacob 
2242538da7a1SOlivier Matz 	rte_ether_addr_copy((struct rte_ether_addr *)nic->mac_addr,
2243e4387966SJerin Jacob 			&eth_dev->data->mac_addrs[0]);
2244e4387966SJerin Jacob 
2245e4387966SJerin Jacob 	ret = nicvf_mbox_set_mac_addr(nic, nic->mac_addr);
2246e4387966SJerin Jacob 	if (ret) {
2247e4387966SJerin Jacob 		PMD_INIT_LOG(ERR, "Failed to set mac addr");
2248e4387966SJerin Jacob 		goto malloc_fail;
2249e4387966SJerin Jacob 	}
2250e4387966SJerin Jacob 
2251279d3319SRakesh Kudurumalla 	ret = nicvf_set_first_skip(eth_dev);
2252279d3319SRakesh Kudurumalla 	if (ret) {
2253279d3319SRakesh Kudurumalla 		PMD_INIT_LOG(ERR, "Failed to configure first skip");
2254279d3319SRakesh Kudurumalla 		goto malloc_fail;
2255279d3319SRakesh Kudurumalla 	}
2256e4387966SJerin Jacob 	PMD_INIT_LOG(INFO, "Port %d (%x:%x) mac=%02x:%02x:%02x:%02x:%02x:%02x",
2257e4387966SJerin Jacob 		eth_dev->data->port_id, nic->vendor_id, nic->device_id,
2258e4387966SJerin Jacob 		nic->mac_addr[0], nic->mac_addr[1], nic->mac_addr[2],
2259e4387966SJerin Jacob 		nic->mac_addr[3], nic->mac_addr[4], nic->mac_addr[5]);
2260e4387966SJerin Jacob 
2261e4387966SJerin Jacob 	return 0;
2262e4387966SJerin Jacob 
2263e4387966SJerin Jacob malloc_fail:
2264e4387966SJerin Jacob 	rte_free(eth_dev->data->mac_addrs);
2265e7f2fa88SDavid Marchand 	eth_dev->data->mac_addrs = NULL;
2266e4387966SJerin Jacob alarm_fail:
2267f141adcaSKamil Rytarowski 	nicvf_periodic_alarm_stop(nicvf_interrupt, eth_dev);
2268e4387966SJerin Jacob fail:
2269e4387966SJerin Jacob 	return ret;
2270e4387966SJerin Jacob }
2271e4387966SJerin Jacob 
2272e4387966SJerin Jacob static const struct rte_pci_id pci_id_nicvf_map[] = {
2273e4387966SJerin Jacob 	{
2274e4387966SJerin Jacob 		.class_id = RTE_CLASS_ANY_ID,
2275e4387966SJerin Jacob 		.vendor_id = PCI_VENDOR_ID_CAVIUM,
2276398a1be1SJerin Jacob 		.device_id = PCI_DEVICE_ID_THUNDERX_CN88XX_PASS1_NICVF,
2277e4387966SJerin Jacob 		.subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
2278398a1be1SJerin Jacob 		.subsystem_device_id = PCI_SUB_DEVICE_ID_CN88XX_PASS1_NICVF,
2279e4387966SJerin Jacob 	},
2280e4387966SJerin Jacob 	{
2281e4387966SJerin Jacob 		.class_id = RTE_CLASS_ANY_ID,
2282e4387966SJerin Jacob 		.vendor_id = PCI_VENDOR_ID_CAVIUM,
2283398a1be1SJerin Jacob 		.device_id = PCI_DEVICE_ID_THUNDERX_NICVF,
2284e4387966SJerin Jacob 		.subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
2285398a1be1SJerin Jacob 		.subsystem_device_id = PCI_SUB_DEVICE_ID_CN88XX_PASS2_NICVF,
2286e4387966SJerin Jacob 	},
2287e4387966SJerin Jacob 	{
2288b72a7768SJerin Jacob 		.class_id = RTE_CLASS_ANY_ID,
2289b72a7768SJerin Jacob 		.vendor_id = PCI_VENDOR_ID_CAVIUM,
2290b72a7768SJerin Jacob 		.device_id = PCI_DEVICE_ID_THUNDERX_NICVF,
2291b72a7768SJerin Jacob 		.subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
2292b72a7768SJerin Jacob 		.subsystem_device_id = PCI_SUB_DEVICE_ID_CN81XX_NICVF,
2293b72a7768SJerin Jacob 	},
2294b72a7768SJerin Jacob 	{
2295174dd78eSJerin Jacob 		.class_id = RTE_CLASS_ANY_ID,
2296174dd78eSJerin Jacob 		.vendor_id = PCI_VENDOR_ID_CAVIUM,
2297174dd78eSJerin Jacob 		.device_id = PCI_DEVICE_ID_THUNDERX_NICVF,
2298174dd78eSJerin Jacob 		.subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
2299174dd78eSJerin Jacob 		.subsystem_device_id = PCI_SUB_DEVICE_ID_CN83XX_NICVF,
2300174dd78eSJerin Jacob 	},
2301174dd78eSJerin Jacob 	{
2302e4387966SJerin Jacob 		.vendor_id = 0,
2303e4387966SJerin Jacob 	},
2304e4387966SJerin Jacob };
2305e4387966SJerin Jacob 
2306fdf91e0fSJan Blunck static int nicvf_eth_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2307fdf91e0fSJan Blunck 	struct rte_pci_device *pci_dev)
2308fdf91e0fSJan Blunck {
2309fdf91e0fSJan Blunck 	return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct nicvf),
2310fdf91e0fSJan Blunck 		nicvf_eth_dev_init);
2311fdf91e0fSJan Blunck }
2312fdf91e0fSJan Blunck 
2313fdf91e0fSJan Blunck static int nicvf_eth_pci_remove(struct rte_pci_device *pci_dev)
2314fdf91e0fSJan Blunck {
2315230dce64SAmit Gupta 	return rte_eth_dev_pci_generic_remove(pci_dev, nicvf_eth_dev_uninit);
2316fdf91e0fSJan Blunck }
2317fdf91e0fSJan Blunck 
2318fdf91e0fSJan Blunck static struct rte_pci_driver rte_nicvf_pmd = {
2319e4387966SJerin Jacob 	.id_table = pci_id_nicvf_map,
23206110b1c6SJerin Jacob 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_KEEP_MAPPED_RES |
23216110b1c6SJerin Jacob 			RTE_PCI_DRV_INTR_LSC,
2322fdf91e0fSJan Blunck 	.probe = nicvf_eth_pci_probe,
2323fdf91e0fSJan Blunck 	.remove = nicvf_eth_pci_remove,
2324e4387966SJerin Jacob };
2325e4387966SJerin Jacob 
2326fdf91e0fSJan Blunck RTE_PMD_REGISTER_PCI(net_thunderx, rte_nicvf_pmd);
232701f19227SShreyansh Jain RTE_PMD_REGISTER_PCI_TABLE(net_thunderx, pci_id_nicvf_map);
232806e81dc9SDavid Marchand RTE_PMD_REGISTER_KMOD_DEP(net_thunderx, "* igb_uio | uio_pci_generic | vfio-pci");
2329279d3319SRakesh Kudurumalla RTE_PMD_REGISTER_PARAM_STRING(net_thunderx, SKIP_DATA_BYTES "=<int>");
2330