xref: /dpdk/drivers/net/thunderx/nicvf_ethdev.c (revision 4143b12200973e5f52569b29598921c141b9d85c)
1aaf4363eSJerin Jacob /* SPDX-License-Identifier: BSD-3-Clause
2aaf4363eSJerin Jacob  * Copyright(c) 2016 Cavium, Inc
3e4387966SJerin Jacob  */
4e4387966SJerin Jacob 
5e4387966SJerin Jacob #include <assert.h>
6e4387966SJerin Jacob #include <stdio.h>
7e4387966SJerin Jacob #include <stdbool.h>
8e4387966SJerin Jacob #include <errno.h>
9e4387966SJerin Jacob #include <stdint.h>
10e4387966SJerin Jacob #include <string.h>
11e4387966SJerin Jacob #include <unistd.h>
12e4387966SJerin Jacob #include <stdarg.h>
13e4387966SJerin Jacob #include <inttypes.h>
14e4387966SJerin Jacob #include <netinet/in.h>
15e4387966SJerin Jacob #include <sys/queue.h>
16e4387966SJerin Jacob 
17e4387966SJerin Jacob #include <rte_alarm.h>
18e4387966SJerin Jacob #include <rte_branch_prediction.h>
19e4387966SJerin Jacob #include <rte_byteorder.h>
20e4387966SJerin Jacob #include <rte_common.h>
21e4387966SJerin Jacob #include <rte_cycles.h>
22e4387966SJerin Jacob #include <rte_debug.h>
23e4387966SJerin Jacob #include <rte_dev.h>
24e4387966SJerin Jacob #include <rte_eal.h>
25e4387966SJerin Jacob #include <rte_ether.h>
26ffc905f3SFerruh Yigit #include <rte_ethdev_driver.h>
27fdf91e0fSJan Blunck #include <rte_ethdev_pci.h>
28e4387966SJerin Jacob #include <rte_interrupts.h>
29e4387966SJerin Jacob #include <rte_log.h>
30e4387966SJerin Jacob #include <rte_memory.h>
31e4387966SJerin Jacob #include <rte_memzone.h>
32e4387966SJerin Jacob #include <rte_malloc.h>
33e4387966SJerin Jacob #include <rte_random.h>
34e4387966SJerin Jacob #include <rte_pci.h>
35c752998bSGaetan Rivet #include <rte_bus_pci.h>
36e4387966SJerin Jacob #include <rte_tailq.h>
37e4387966SJerin Jacob 
38e4387966SJerin Jacob #include "base/nicvf_plat.h"
39e4387966SJerin Jacob 
40e4387966SJerin Jacob #include "nicvf_ethdev.h"
411c421f18SJerin Jacob #include "nicvf_rxtx.h"
42627d4ba2SKamil Rytarowski #include "nicvf_svf.h"
43e4387966SJerin Jacob #include "nicvf_logs.h"
44e4387966SJerin Jacob 
45c563443cSPavan Nikhilesh int nicvf_logtype_mbox;
46c563443cSPavan Nikhilesh int nicvf_logtype_init;
47c563443cSPavan Nikhilesh int nicvf_logtype_driver;
48c563443cSPavan Nikhilesh 
497413feeeSJerin Jacob static void nicvf_dev_stop(struct rte_eth_dev *dev);
50627d4ba2SKamil Rytarowski static void nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup);
51627d4ba2SKamil Rytarowski static void nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic,
52627d4ba2SKamil Rytarowski 			  bool cleanup);
537413feeeSJerin Jacob 
54c563443cSPavan Nikhilesh RTE_INIT(nicvf_init_log);
55c563443cSPavan Nikhilesh static void
56c563443cSPavan Nikhilesh nicvf_init_log(void)
57c563443cSPavan Nikhilesh {
58fd396066SHarry van Haaren 	nicvf_logtype_mbox = rte_log_register("pmd.net.thunderx.mbox");
59c563443cSPavan Nikhilesh 	if (nicvf_logtype_mbox >= 0)
60c563443cSPavan Nikhilesh 		rte_log_set_level(nicvf_logtype_mbox, RTE_LOG_NOTICE);
61c563443cSPavan Nikhilesh 
62fd396066SHarry van Haaren 	nicvf_logtype_init = rte_log_register("pmd.net.thunderx.init");
63c563443cSPavan Nikhilesh 	if (nicvf_logtype_init >= 0)
64c563443cSPavan Nikhilesh 		rte_log_set_level(nicvf_logtype_init, RTE_LOG_NOTICE);
65c563443cSPavan Nikhilesh 
66fd396066SHarry van Haaren 	nicvf_logtype_driver = rte_log_register("pmd.net.thunderx.driver");
67c563443cSPavan Nikhilesh 	if (nicvf_logtype_driver >= 0)
68c563443cSPavan Nikhilesh 		rte_log_set_level(nicvf_logtype_driver, RTE_LOG_NOTICE);
69c563443cSPavan Nikhilesh }
70c563443cSPavan Nikhilesh 
718e14dc28SStephen Hemminger static void
728e14dc28SStephen Hemminger nicvf_link_status_update(struct nicvf *nic,
738fc70464SJerin Jacob 			 struct rte_eth_link *link)
748fc70464SJerin Jacob {
758e14dc28SStephen Hemminger 	memset(link, 0, sizeof(*link));
768fc70464SJerin Jacob 
778e14dc28SStephen Hemminger 	link->link_status = nic->link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
788fc70464SJerin Jacob 
798fc70464SJerin Jacob 	if (nic->duplex == NICVF_HALF_DUPLEX)
808fc70464SJerin Jacob 		link->link_duplex = ETH_LINK_HALF_DUPLEX;
818fc70464SJerin Jacob 	else if (nic->duplex == NICVF_FULL_DUPLEX)
828fc70464SJerin Jacob 		link->link_duplex = ETH_LINK_FULL_DUPLEX;
838fc70464SJerin Jacob 	link->link_speed = nic->speed;
841e3a958fSThomas Monjalon 	link->link_autoneg = ETH_LINK_AUTONEG;
858fc70464SJerin Jacob }
868fc70464SJerin Jacob 
87e4387966SJerin Jacob static void
88e4387966SJerin Jacob nicvf_interrupt(void *arg)
89e4387966SJerin Jacob {
90f141adcaSKamil Rytarowski 	struct rte_eth_dev *dev = arg;
91f141adcaSKamil Rytarowski 	struct nicvf *nic = nicvf_pmd_priv(dev);
928e14dc28SStephen Hemminger 	struct rte_eth_link link;
93e4387966SJerin Jacob 
948fc70464SJerin Jacob 	if (nicvf_reg_poll_interrupts(nic) == NIC_MBOX_MSG_BGX_LINK_CHANGE) {
958e14dc28SStephen Hemminger 		if (dev->data->dev_conf.intr_conf.lsc) {
968e14dc28SStephen Hemminger 			nicvf_link_status_update(nic, &link);
978e14dc28SStephen Hemminger 			rte_eth_linkstatus_set(dev, &link);
988e14dc28SStephen Hemminger 
998e14dc28SStephen Hemminger 			_rte_eth_dev_callback_process(dev,
1008e14dc28SStephen Hemminger 						      RTE_ETH_EVENT_INTR_LSC,
101cebe3d7bSThomas Monjalon 						      NULL);
1028fc70464SJerin Jacob 		}
1038e14dc28SStephen Hemminger 	}
104e4387966SJerin Jacob 
105e4387966SJerin Jacob 	rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000,
106f141adcaSKamil Rytarowski 				nicvf_interrupt, dev);
107f141adcaSKamil Rytarowski }
108f141adcaSKamil Rytarowski 
10921e3fb00SKamil Rytarowski static void
110f141adcaSKamil Rytarowski nicvf_vf_interrupt(void *arg)
111f141adcaSKamil Rytarowski {
112f141adcaSKamil Rytarowski 	struct nicvf *nic = arg;
113f141adcaSKamil Rytarowski 
114f141adcaSKamil Rytarowski 	nicvf_reg_poll_interrupts(nic);
115f141adcaSKamil Rytarowski 
116f141adcaSKamil Rytarowski 	rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000,
117f141adcaSKamil Rytarowski 				nicvf_vf_interrupt, nic);
118e4387966SJerin Jacob }
119e4387966SJerin Jacob 
120e4387966SJerin Jacob static int
121f141adcaSKamil Rytarowski nicvf_periodic_alarm_start(void (fn)(void *), void *arg)
122e4387966SJerin Jacob {
123f141adcaSKamil Rytarowski 	return rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000, fn, arg);
124e4387966SJerin Jacob }
125e4387966SJerin Jacob 
126e4387966SJerin Jacob static int
127f141adcaSKamil Rytarowski nicvf_periodic_alarm_stop(void (fn)(void *), void *arg)
128e4387966SJerin Jacob {
129f141adcaSKamil Rytarowski 	return rte_eal_alarm_cancel(fn, arg);
130e4387966SJerin Jacob }
131e4387966SJerin Jacob 
1328fc70464SJerin Jacob /*
1338fc70464SJerin Jacob  * Return 0 means link status changed, -1 means not changed
1348fc70464SJerin Jacob  */
1358fc70464SJerin Jacob static int
1360cca5670SAndriy Berestovskyy nicvf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1378fc70464SJerin Jacob {
1380cca5670SAndriy Berestovskyy #define CHECK_INTERVAL 100  /* 100ms */
1390cca5670SAndriy Berestovskyy #define MAX_CHECK_TIME 90   /* 9s (90 * 100ms) in total */
1408fc70464SJerin Jacob 	struct rte_eth_link link;
1418fc70464SJerin Jacob 	struct nicvf *nic = nicvf_pmd_priv(dev);
1420cca5670SAndriy Berestovskyy 	int i;
1438fc70464SJerin Jacob 
1448fc70464SJerin Jacob 	PMD_INIT_FUNC_TRACE();
1458fc70464SJerin Jacob 
1460cca5670SAndriy Berestovskyy 	if (wait_to_complete) {
1470cca5670SAndriy Berestovskyy 		/* rte_eth_link_get() might need to wait up to 9 seconds */
1480cca5670SAndriy Berestovskyy 		for (i = 0; i < MAX_CHECK_TIME; i++) {
1498e14dc28SStephen Hemminger 			nicvf_link_status_update(nic, &link);
1508e14dc28SStephen Hemminger 			if (link.link_status == ETH_LINK_UP)
1510cca5670SAndriy Berestovskyy 				break;
1520cca5670SAndriy Berestovskyy 			rte_delay_ms(CHECK_INTERVAL);
1530cca5670SAndriy Berestovskyy 		}
1540cca5670SAndriy Berestovskyy 	} else {
1558e14dc28SStephen Hemminger 		nicvf_link_status_update(nic, &link);
1560cca5670SAndriy Berestovskyy 	}
1578e14dc28SStephen Hemminger 
1588e14dc28SStephen Hemminger 	return rte_eth_linkstatus_set(dev, &link);
1598fc70464SJerin Jacob }
1608fc70464SJerin Jacob 
161606ee746SJerin Jacob static int
16265d9804eSJerin Jacob nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
16365d9804eSJerin Jacob {
16465d9804eSJerin Jacob 	struct nicvf *nic = nicvf_pmd_priv(dev);
16565d9804eSJerin Jacob 	uint32_t buffsz, frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
166b7004ab2SKamil Rytarowski 	size_t i;
167c97da2cbSMaciej Czekaj 	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
16865d9804eSJerin Jacob 
16965d9804eSJerin Jacob 	PMD_INIT_FUNC_TRACE();
17065d9804eSJerin Jacob 
17165d9804eSJerin Jacob 	if (frame_size > NIC_HW_MAX_FRS)
17265d9804eSJerin Jacob 		return -EINVAL;
17365d9804eSJerin Jacob 
17465d9804eSJerin Jacob 	if (frame_size < NIC_HW_MIN_FRS)
17565d9804eSJerin Jacob 		return -EINVAL;
17665d9804eSJerin Jacob 
17765d9804eSJerin Jacob 	buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
17865d9804eSJerin Jacob 
17965d9804eSJerin Jacob 	/*
18065d9804eSJerin Jacob 	 * Refuse mtu that requires the support of scattered packets
18165d9804eSJerin Jacob 	 * when this feature has not been enabled before.
18265d9804eSJerin Jacob 	 */
18365d9804eSJerin Jacob 	if (!dev->data->scattered_rx &&
18465d9804eSJerin Jacob 		(frame_size + 2 * VLAN_TAG_SIZE > buffsz))
18565d9804eSJerin Jacob 		return -EINVAL;
18665d9804eSJerin Jacob 
18765d9804eSJerin Jacob 	/* check <seg size> * <max_seg>  >= max_frame */
18865d9804eSJerin Jacob 	if (dev->data->scattered_rx &&
18965d9804eSJerin Jacob 		(frame_size + 2 * VLAN_TAG_SIZE > buffsz * NIC_HW_MAX_SEGS))
19065d9804eSJerin Jacob 		return -EINVAL;
19165d9804eSJerin Jacob 
19265d9804eSJerin Jacob 	if (frame_size > ETHER_MAX_LEN)
193c97da2cbSMaciej Czekaj 		rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
19465d9804eSJerin Jacob 	else
195c97da2cbSMaciej Czekaj 		rxmode->offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
19665d9804eSJerin Jacob 
19765d9804eSJerin Jacob 	if (nicvf_mbox_update_hw_max_frs(nic, frame_size))
19865d9804eSJerin Jacob 		return -EINVAL;
19965d9804eSJerin Jacob 
20065d9804eSJerin Jacob 	/* Update max frame size */
201c97da2cbSMaciej Czekaj 	rxmode->max_rx_pkt_len = (uint32_t)frame_size;
20265d9804eSJerin Jacob 	nic->mtu = mtu;
203b7004ab2SKamil Rytarowski 
204b7004ab2SKamil Rytarowski 	for (i = 0; i < nic->sqs_count; i++)
205b7004ab2SKamil Rytarowski 		nic->snicvf[i]->mtu = mtu;
206b7004ab2SKamil Rytarowski 
20765d9804eSJerin Jacob 	return 0;
20865d9804eSJerin Jacob }
20965d9804eSJerin Jacob 
21065d9804eSJerin Jacob static int
211606ee746SJerin Jacob nicvf_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
212606ee746SJerin Jacob {
213606ee746SJerin Jacob 	uint64_t *data = regs->data;
214606ee746SJerin Jacob 	struct nicvf *nic = nicvf_pmd_priv(dev);
215606ee746SJerin Jacob 
216001a1c0fSZyta Szpak 	if (data == NULL) {
217001a1c0fSZyta Szpak 		regs->length = nicvf_reg_get_count();
218001a1c0fSZyta Szpak 		regs->width = THUNDERX_REG_BYTES;
219001a1c0fSZyta Szpak 		return 0;
220001a1c0fSZyta Szpak 	}
221606ee746SJerin Jacob 
222606ee746SJerin Jacob 	/* Support only full register dump */
223606ee746SJerin Jacob 	if ((regs->length == 0) ||
224606ee746SJerin Jacob 		(regs->length == (uint32_t)nicvf_reg_get_count())) {
225606ee746SJerin Jacob 		regs->version = nic->vendor_id << 16 | nic->device_id;
226606ee746SJerin Jacob 		nicvf_reg_dump(nic, data);
227606ee746SJerin Jacob 		return 0;
228606ee746SJerin Jacob 	}
229606ee746SJerin Jacob 	return -ENOTSUP;
230606ee746SJerin Jacob }
231606ee746SJerin Jacob 
232d5b0924bSMatan Azrad static int
233684fa771SJerin Jacob nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
234684fa771SJerin Jacob {
235684fa771SJerin Jacob 	uint16_t qidx;
236684fa771SJerin Jacob 	struct nicvf_hw_rx_qstats rx_qstats;
237684fa771SJerin Jacob 	struct nicvf_hw_tx_qstats tx_qstats;
238684fa771SJerin Jacob 	struct nicvf_hw_stats port_stats;
239684fa771SJerin Jacob 	struct nicvf *nic = nicvf_pmd_priv(dev);
24021e3fb00SKamil Rytarowski 	uint16_t rx_start, rx_end;
24121e3fb00SKamil Rytarowski 	uint16_t tx_start, tx_end;
24221e3fb00SKamil Rytarowski 	size_t i;
24321e3fb00SKamil Rytarowski 
24421e3fb00SKamil Rytarowski 	/* RX queue indices for the first VF */
24521e3fb00SKamil Rytarowski 	nicvf_rx_range(dev, nic, &rx_start, &rx_end);
246684fa771SJerin Jacob 
247684fa771SJerin Jacob 	/* Reading per RX ring stats */
24821e3fb00SKamil Rytarowski 	for (qidx = rx_start; qidx <= rx_end; qidx++) {
249695cd416SMarcin Wilk 		if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
250684fa771SJerin Jacob 			break;
251684fa771SJerin Jacob 
252684fa771SJerin Jacob 		nicvf_hw_get_rx_qstats(nic, &rx_qstats, qidx);
253684fa771SJerin Jacob 		stats->q_ibytes[qidx] = rx_qstats.q_rx_bytes;
254684fa771SJerin Jacob 		stats->q_ipackets[qidx] = rx_qstats.q_rx_packets;
255684fa771SJerin Jacob 	}
256684fa771SJerin Jacob 
25721e3fb00SKamil Rytarowski 	/* TX queue indices for the first VF */
25821e3fb00SKamil Rytarowski 	nicvf_tx_range(dev, nic, &tx_start, &tx_end);
25921e3fb00SKamil Rytarowski 
260684fa771SJerin Jacob 	/* Reading per TX ring stats */
26121e3fb00SKamil Rytarowski 	for (qidx = tx_start; qidx <= tx_end; qidx++) {
262695cd416SMarcin Wilk 		if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
263684fa771SJerin Jacob 			break;
264684fa771SJerin Jacob 
265684fa771SJerin Jacob 		nicvf_hw_get_tx_qstats(nic, &tx_qstats, qidx);
266684fa771SJerin Jacob 		stats->q_obytes[qidx] = tx_qstats.q_tx_bytes;
267684fa771SJerin Jacob 		stats->q_opackets[qidx] = tx_qstats.q_tx_packets;
268684fa771SJerin Jacob 	}
269684fa771SJerin Jacob 
27021e3fb00SKamil Rytarowski 	for (i = 0; i < nic->sqs_count; i++) {
27121e3fb00SKamil Rytarowski 		struct nicvf *snic = nic->snicvf[i];
27221e3fb00SKamil Rytarowski 
27321e3fb00SKamil Rytarowski 		if (snic == NULL)
27421e3fb00SKamil Rytarowski 			break;
27521e3fb00SKamil Rytarowski 
27621e3fb00SKamil Rytarowski 		/* RX queue indices for a secondary VF */
27721e3fb00SKamil Rytarowski 		nicvf_rx_range(dev, snic, &rx_start, &rx_end);
27821e3fb00SKamil Rytarowski 
27921e3fb00SKamil Rytarowski 		/* Reading per RX ring stats */
28021e3fb00SKamil Rytarowski 		for (qidx = rx_start; qidx <= rx_end; qidx++) {
281695cd416SMarcin Wilk 			if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
28221e3fb00SKamil Rytarowski 				break;
28321e3fb00SKamil Rytarowski 
28421e3fb00SKamil Rytarowski 			nicvf_hw_get_rx_qstats(snic, &rx_qstats,
28521e3fb00SKamil Rytarowski 					       qidx % MAX_RCV_QUEUES_PER_QS);
28621e3fb00SKamil Rytarowski 			stats->q_ibytes[qidx] = rx_qstats.q_rx_bytes;
28721e3fb00SKamil Rytarowski 			stats->q_ipackets[qidx] = rx_qstats.q_rx_packets;
28821e3fb00SKamil Rytarowski 		}
28921e3fb00SKamil Rytarowski 
29021e3fb00SKamil Rytarowski 		/* TX queue indices for a secondary VF */
29121e3fb00SKamil Rytarowski 		nicvf_tx_range(dev, snic, &tx_start, &tx_end);
29221e3fb00SKamil Rytarowski 		/* Reading per TX ring stats */
29321e3fb00SKamil Rytarowski 		for (qidx = tx_start; qidx <= tx_end; qidx++) {
294695cd416SMarcin Wilk 			if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
29521e3fb00SKamil Rytarowski 				break;
29621e3fb00SKamil Rytarowski 
29721e3fb00SKamil Rytarowski 			nicvf_hw_get_tx_qstats(snic, &tx_qstats,
29821e3fb00SKamil Rytarowski 					       qidx % MAX_SND_QUEUES_PER_QS);
29921e3fb00SKamil Rytarowski 			stats->q_obytes[qidx] = tx_qstats.q_tx_bytes;
30021e3fb00SKamil Rytarowski 			stats->q_opackets[qidx] = tx_qstats.q_tx_packets;
30121e3fb00SKamil Rytarowski 		}
30221e3fb00SKamil Rytarowski 	}
30321e3fb00SKamil Rytarowski 
304684fa771SJerin Jacob 	nicvf_hw_get_stats(nic, &port_stats);
305684fa771SJerin Jacob 	stats->ibytes = port_stats.rx_bytes;
306684fa771SJerin Jacob 	stats->ipackets = port_stats.rx_ucast_frames;
307684fa771SJerin Jacob 	stats->ipackets += port_stats.rx_bcast_frames;
308684fa771SJerin Jacob 	stats->ipackets += port_stats.rx_mcast_frames;
309684fa771SJerin Jacob 	stats->ierrors = port_stats.rx_l2_errors;
310684fa771SJerin Jacob 	stats->imissed = port_stats.rx_drop_red;
311684fa771SJerin Jacob 	stats->imissed += port_stats.rx_drop_overrun;
312684fa771SJerin Jacob 	stats->imissed += port_stats.rx_drop_bcast;
313684fa771SJerin Jacob 	stats->imissed += port_stats.rx_drop_mcast;
314684fa771SJerin Jacob 	stats->imissed += port_stats.rx_drop_l3_bcast;
315684fa771SJerin Jacob 	stats->imissed += port_stats.rx_drop_l3_mcast;
316684fa771SJerin Jacob 
317684fa771SJerin Jacob 	stats->obytes = port_stats.tx_bytes_ok;
318684fa771SJerin Jacob 	stats->opackets = port_stats.tx_ucast_frames_ok;
319684fa771SJerin Jacob 	stats->opackets += port_stats.tx_bcast_frames_ok;
320684fa771SJerin Jacob 	stats->opackets += port_stats.tx_mcast_frames_ok;
321684fa771SJerin Jacob 	stats->oerrors = port_stats.tx_drops;
322d5b0924bSMatan Azrad 
323d5b0924bSMatan Azrad 	return 0;
324684fa771SJerin Jacob }
325684fa771SJerin Jacob 
3261c80e4fdSJerin Jacob static const uint32_t *
3271c80e4fdSJerin Jacob nicvf_dev_supported_ptypes_get(struct rte_eth_dev *dev)
3281c80e4fdSJerin Jacob {
3291c80e4fdSJerin Jacob 	size_t copied;
3301c80e4fdSJerin Jacob 	static uint32_t ptypes[32];
3311c80e4fdSJerin Jacob 	struct nicvf *nic = nicvf_pmd_priv(dev);
332398a1be1SJerin Jacob 	static const uint32_t ptypes_common[] = {
3331c80e4fdSJerin Jacob 		RTE_PTYPE_L3_IPV4,
3341c80e4fdSJerin Jacob 		RTE_PTYPE_L3_IPV4_EXT,
3351c80e4fdSJerin Jacob 		RTE_PTYPE_L3_IPV6,
3361c80e4fdSJerin Jacob 		RTE_PTYPE_L3_IPV6_EXT,
3371c80e4fdSJerin Jacob 		RTE_PTYPE_L4_TCP,
3381c80e4fdSJerin Jacob 		RTE_PTYPE_L4_UDP,
3391c80e4fdSJerin Jacob 		RTE_PTYPE_L4_FRAG,
3401c80e4fdSJerin Jacob 	};
341398a1be1SJerin Jacob 	static const uint32_t ptypes_tunnel[] = {
3421c80e4fdSJerin Jacob 		RTE_PTYPE_TUNNEL_GRE,
3431c80e4fdSJerin Jacob 		RTE_PTYPE_TUNNEL_GENEVE,
3441c80e4fdSJerin Jacob 		RTE_PTYPE_TUNNEL_VXLAN,
3451c80e4fdSJerin Jacob 		RTE_PTYPE_TUNNEL_NVGRE,
3461c80e4fdSJerin Jacob 	};
3471c80e4fdSJerin Jacob 	static const uint32_t ptypes_end = RTE_PTYPE_UNKNOWN;
3481c80e4fdSJerin Jacob 
349398a1be1SJerin Jacob 	copied = sizeof(ptypes_common);
350398a1be1SJerin Jacob 	memcpy(ptypes, ptypes_common, copied);
351398a1be1SJerin Jacob 	if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
352398a1be1SJerin Jacob 		memcpy((char *)ptypes + copied, ptypes_tunnel,
353398a1be1SJerin Jacob 			sizeof(ptypes_tunnel));
354398a1be1SJerin Jacob 		copied += sizeof(ptypes_tunnel);
3551c80e4fdSJerin Jacob 	}
3561c80e4fdSJerin Jacob 
3571c80e4fdSJerin Jacob 	memcpy((char *)ptypes + copied, &ptypes_end, sizeof(ptypes_end));
3581c80e4fdSJerin Jacob 	if (dev->rx_pkt_burst == nicvf_recv_pkts ||
3591c80e4fdSJerin Jacob 		dev->rx_pkt_burst == nicvf_recv_pkts_multiseg)
3601c80e4fdSJerin Jacob 		return ptypes;
3611c80e4fdSJerin Jacob 
3621c80e4fdSJerin Jacob 	return NULL;
3631c80e4fdSJerin Jacob }
3641c80e4fdSJerin Jacob 
365684fa771SJerin Jacob static void
366684fa771SJerin Jacob nicvf_dev_stats_reset(struct rte_eth_dev *dev)
367684fa771SJerin Jacob {
368684fa771SJerin Jacob 	int i;
369684fa771SJerin Jacob 	uint16_t rxqs = 0, txqs = 0;
370684fa771SJerin Jacob 	struct nicvf *nic = nicvf_pmd_priv(dev);
37121e3fb00SKamil Rytarowski 	uint16_t rx_start, rx_end;
37221e3fb00SKamil Rytarowski 	uint16_t tx_start, tx_end;
373684fa771SJerin Jacob 
37421e3fb00SKamil Rytarowski 	/* Reset all primary nic counters */
37521e3fb00SKamil Rytarowski 	nicvf_rx_range(dev, nic, &rx_start, &rx_end);
37621e3fb00SKamil Rytarowski 	for (i = rx_start; i <= rx_end; i++)
377684fa771SJerin Jacob 		rxqs |= (0x3 << (i * 2));
37821e3fb00SKamil Rytarowski 
37921e3fb00SKamil Rytarowski 	nicvf_tx_range(dev, nic, &tx_start, &tx_end);
38021e3fb00SKamil Rytarowski 	for (i = tx_start; i <= tx_end; i++)
381684fa771SJerin Jacob 		txqs |= (0x3 << (i * 2));
382684fa771SJerin Jacob 
383684fa771SJerin Jacob 	nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, rxqs, txqs);
38421e3fb00SKamil Rytarowski 
38521e3fb00SKamil Rytarowski 	/* Reset secondary nic queue counters */
38621e3fb00SKamil Rytarowski 	for (i = 0; i < nic->sqs_count; i++) {
38721e3fb00SKamil Rytarowski 		struct nicvf *snic = nic->snicvf[i];
38821e3fb00SKamil Rytarowski 		if (snic == NULL)
38921e3fb00SKamil Rytarowski 			break;
39021e3fb00SKamil Rytarowski 
39121e3fb00SKamil Rytarowski 		nicvf_rx_range(dev, snic, &rx_start, &rx_end);
39221e3fb00SKamil Rytarowski 		for (i = rx_start; i <= rx_end; i++)
39321e3fb00SKamil Rytarowski 			rxqs |= (0x3 << ((i % MAX_CMP_QUEUES_PER_QS) * 2));
39421e3fb00SKamil Rytarowski 
39521e3fb00SKamil Rytarowski 		nicvf_tx_range(dev, snic, &tx_start, &tx_end);
39621e3fb00SKamil Rytarowski 		for (i = tx_start; i <= tx_end; i++)
39721e3fb00SKamil Rytarowski 			txqs |= (0x3 << ((i % MAX_SND_QUEUES_PER_QS) * 2));
39821e3fb00SKamil Rytarowski 
39921e3fb00SKamil Rytarowski 		nicvf_mbox_reset_stat_counters(snic, 0, 0, rxqs, txqs);
40021e3fb00SKamil Rytarowski 	}
401684fa771SJerin Jacob }
402684fa771SJerin Jacob 
4036eae36eaSJerin Jacob /* Promiscuous mode enabled by default in LMAC to VF 1:1 map configuration */
4046eae36eaSJerin Jacob static void
4056eae36eaSJerin Jacob nicvf_dev_promisc_enable(struct rte_eth_dev *dev __rte_unused)
4066eae36eaSJerin Jacob {
4076eae36eaSJerin Jacob }
4086eae36eaSJerin Jacob 
40943362c6aSJerin Jacob static inline uint64_t
41043362c6aSJerin Jacob nicvf_rss_ethdev_to_nic(struct nicvf *nic, uint64_t ethdev_rss)
41143362c6aSJerin Jacob {
41243362c6aSJerin Jacob 	uint64_t nic_rss = 0;
41343362c6aSJerin Jacob 
41443362c6aSJerin Jacob 	if (ethdev_rss & ETH_RSS_IPV4)
41543362c6aSJerin Jacob 		nic_rss |= RSS_IP_ENA;
41643362c6aSJerin Jacob 
41743362c6aSJerin Jacob 	if (ethdev_rss & ETH_RSS_IPV6)
41843362c6aSJerin Jacob 		nic_rss |= RSS_IP_ENA;
41943362c6aSJerin Jacob 
42043362c6aSJerin Jacob 	if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_UDP)
42143362c6aSJerin Jacob 		nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA);
42243362c6aSJerin Jacob 
42343362c6aSJerin Jacob 	if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_TCP)
42443362c6aSJerin Jacob 		nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA);
42543362c6aSJerin Jacob 
42643362c6aSJerin Jacob 	if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_UDP)
42743362c6aSJerin Jacob 		nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA);
42843362c6aSJerin Jacob 
42943362c6aSJerin Jacob 	if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_TCP)
43043362c6aSJerin Jacob 		nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA);
43143362c6aSJerin Jacob 
43243362c6aSJerin Jacob 	if (ethdev_rss & ETH_RSS_PORT)
43343362c6aSJerin Jacob 		nic_rss |= RSS_L2_EXTENDED_HASH_ENA;
43443362c6aSJerin Jacob 
43543362c6aSJerin Jacob 	if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
43643362c6aSJerin Jacob 		if (ethdev_rss & ETH_RSS_VXLAN)
43743362c6aSJerin Jacob 			nic_rss |= RSS_TUN_VXLAN_ENA;
43843362c6aSJerin Jacob 
43943362c6aSJerin Jacob 		if (ethdev_rss & ETH_RSS_GENEVE)
44043362c6aSJerin Jacob 			nic_rss |= RSS_TUN_GENEVE_ENA;
44143362c6aSJerin Jacob 
44243362c6aSJerin Jacob 		if (ethdev_rss & ETH_RSS_NVGRE)
44343362c6aSJerin Jacob 			nic_rss |= RSS_TUN_NVGRE_ENA;
44443362c6aSJerin Jacob 	}
44543362c6aSJerin Jacob 
44643362c6aSJerin Jacob 	return nic_rss;
44743362c6aSJerin Jacob }
44843362c6aSJerin Jacob 
44943362c6aSJerin Jacob static inline uint64_t
45043362c6aSJerin Jacob nicvf_rss_nic_to_ethdev(struct nicvf *nic,  uint64_t nic_rss)
45143362c6aSJerin Jacob {
45243362c6aSJerin Jacob 	uint64_t ethdev_rss = 0;
45343362c6aSJerin Jacob 
45443362c6aSJerin Jacob 	if (nic_rss & RSS_IP_ENA)
45543362c6aSJerin Jacob 		ethdev_rss |= (ETH_RSS_IPV4 | ETH_RSS_IPV6);
45643362c6aSJerin Jacob 
45743362c6aSJerin Jacob 	if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_TCP_ENA))
45843362c6aSJerin Jacob 		ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_TCP |
45943362c6aSJerin Jacob 				ETH_RSS_NONFRAG_IPV6_TCP);
46043362c6aSJerin Jacob 
46143362c6aSJerin Jacob 	if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_UDP_ENA))
46243362c6aSJerin Jacob 		ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_UDP |
46343362c6aSJerin Jacob 				ETH_RSS_NONFRAG_IPV6_UDP);
46443362c6aSJerin Jacob 
46543362c6aSJerin Jacob 	if (nic_rss & RSS_L2_EXTENDED_HASH_ENA)
46643362c6aSJerin Jacob 		ethdev_rss |= ETH_RSS_PORT;
46743362c6aSJerin Jacob 
46843362c6aSJerin Jacob 	if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
46943362c6aSJerin Jacob 		if (nic_rss & RSS_TUN_VXLAN_ENA)
47043362c6aSJerin Jacob 			ethdev_rss |= ETH_RSS_VXLAN;
47143362c6aSJerin Jacob 
47243362c6aSJerin Jacob 		if (nic_rss & RSS_TUN_GENEVE_ENA)
47343362c6aSJerin Jacob 			ethdev_rss |= ETH_RSS_GENEVE;
47443362c6aSJerin Jacob 
47543362c6aSJerin Jacob 		if (nic_rss & RSS_TUN_NVGRE_ENA)
47643362c6aSJerin Jacob 			ethdev_rss |= ETH_RSS_NVGRE;
47743362c6aSJerin Jacob 	}
47843362c6aSJerin Jacob 	return ethdev_rss;
47943362c6aSJerin Jacob }
48043362c6aSJerin Jacob 
48143362c6aSJerin Jacob static int
48243362c6aSJerin Jacob nicvf_dev_reta_query(struct rte_eth_dev *dev,
48343362c6aSJerin Jacob 		     struct rte_eth_rss_reta_entry64 *reta_conf,
48443362c6aSJerin Jacob 		     uint16_t reta_size)
48543362c6aSJerin Jacob {
48643362c6aSJerin Jacob 	struct nicvf *nic = nicvf_pmd_priv(dev);
48743362c6aSJerin Jacob 	uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
48843362c6aSJerin Jacob 	int ret, i, j;
48943362c6aSJerin Jacob 
49043362c6aSJerin Jacob 	if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) {
49143362c6aSJerin Jacob 		RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
49243362c6aSJerin Jacob 			"(%d) doesn't match the number hardware can supported "
49343362c6aSJerin Jacob 			"(%d)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE);
49443362c6aSJerin Jacob 		return -EINVAL;
49543362c6aSJerin Jacob 	}
49643362c6aSJerin Jacob 
49743362c6aSJerin Jacob 	ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE);
49843362c6aSJerin Jacob 	if (ret)
49943362c6aSJerin Jacob 		return ret;
50043362c6aSJerin Jacob 
50143362c6aSJerin Jacob 	/* Copy RETA table */
50243362c6aSJerin Jacob 	for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) {
50343362c6aSJerin Jacob 		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
50443362c6aSJerin Jacob 			if ((reta_conf[i].mask >> j) & 0x01)
50543362c6aSJerin Jacob 				reta_conf[i].reta[j] = tbl[j];
50643362c6aSJerin Jacob 	}
50743362c6aSJerin Jacob 
50843362c6aSJerin Jacob 	return 0;
50943362c6aSJerin Jacob }
51043362c6aSJerin Jacob 
51143362c6aSJerin Jacob static int
51243362c6aSJerin Jacob nicvf_dev_reta_update(struct rte_eth_dev *dev,
51343362c6aSJerin Jacob 		      struct rte_eth_rss_reta_entry64 *reta_conf,
51443362c6aSJerin Jacob 		      uint16_t reta_size)
51543362c6aSJerin Jacob {
51643362c6aSJerin Jacob 	struct nicvf *nic = nicvf_pmd_priv(dev);
51743362c6aSJerin Jacob 	uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
51843362c6aSJerin Jacob 	int ret, i, j;
51943362c6aSJerin Jacob 
52043362c6aSJerin Jacob 	if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) {
52143362c6aSJerin Jacob 		RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
52243362c6aSJerin Jacob 			"(%d) doesn't match the number hardware can supported "
52343362c6aSJerin Jacob 			"(%d)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE);
52443362c6aSJerin Jacob 		return -EINVAL;
52543362c6aSJerin Jacob 	}
52643362c6aSJerin Jacob 
52743362c6aSJerin Jacob 	ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE);
52843362c6aSJerin Jacob 	if (ret)
52943362c6aSJerin Jacob 		return ret;
53043362c6aSJerin Jacob 
53143362c6aSJerin Jacob 	/* Copy RETA table */
53243362c6aSJerin Jacob 	for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) {
53343362c6aSJerin Jacob 		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
53443362c6aSJerin Jacob 			if ((reta_conf[i].mask >> j) & 0x01)
53543362c6aSJerin Jacob 				tbl[j] = reta_conf[i].reta[j];
53643362c6aSJerin Jacob 	}
53743362c6aSJerin Jacob 
53843362c6aSJerin Jacob 	return nicvf_rss_reta_update(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE);
53943362c6aSJerin Jacob }
54043362c6aSJerin Jacob 
54143362c6aSJerin Jacob static int
54243362c6aSJerin Jacob nicvf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
54343362c6aSJerin Jacob 			    struct rte_eth_rss_conf *rss_conf)
54443362c6aSJerin Jacob {
54543362c6aSJerin Jacob 	struct nicvf *nic = nicvf_pmd_priv(dev);
54643362c6aSJerin Jacob 
54743362c6aSJerin Jacob 	if (rss_conf->rss_key)
54843362c6aSJerin Jacob 		nicvf_rss_get_key(nic, rss_conf->rss_key);
54943362c6aSJerin Jacob 
55043362c6aSJerin Jacob 	rss_conf->rss_key_len =  RSS_HASH_KEY_BYTE_SIZE;
55143362c6aSJerin Jacob 	rss_conf->rss_hf = nicvf_rss_nic_to_ethdev(nic, nicvf_rss_get_cfg(nic));
55243362c6aSJerin Jacob 	return 0;
55343362c6aSJerin Jacob }
55443362c6aSJerin Jacob 
55543362c6aSJerin Jacob static int
55643362c6aSJerin Jacob nicvf_dev_rss_hash_update(struct rte_eth_dev *dev,
55743362c6aSJerin Jacob 			  struct rte_eth_rss_conf *rss_conf)
55843362c6aSJerin Jacob {
55943362c6aSJerin Jacob 	struct nicvf *nic = nicvf_pmd_priv(dev);
56043362c6aSJerin Jacob 	uint64_t nic_rss;
56143362c6aSJerin Jacob 
56243362c6aSJerin Jacob 	if (rss_conf->rss_key &&
56343362c6aSJerin Jacob 		rss_conf->rss_key_len != RSS_HASH_KEY_BYTE_SIZE) {
56443362c6aSJerin Jacob 		RTE_LOG(ERR, PMD, "Hash key size mismatch %d",
56543362c6aSJerin Jacob 				rss_conf->rss_key_len);
56643362c6aSJerin Jacob 		return -EINVAL;
56743362c6aSJerin Jacob 	}
56843362c6aSJerin Jacob 
56943362c6aSJerin Jacob 	if (rss_conf->rss_key)
57043362c6aSJerin Jacob 		nicvf_rss_set_key(nic, rss_conf->rss_key);
57143362c6aSJerin Jacob 
57243362c6aSJerin Jacob 	nic_rss = nicvf_rss_ethdev_to_nic(nic, rss_conf->rss_hf);
57343362c6aSJerin Jacob 	nicvf_rss_set_cfg(nic, nic_rss);
57443362c6aSJerin Jacob 	return 0;
57543362c6aSJerin Jacob }
57643362c6aSJerin Jacob 
577aa0d976eSJerin Jacob static int
5786d3cbd56SKamil Rytarowski nicvf_qset_cq_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
5796d3cbd56SKamil Rytarowski 		    struct nicvf_rxq *rxq, uint16_t qidx, uint32_t desc_cnt)
580aa0d976eSJerin Jacob {
581aa0d976eSJerin Jacob 	const struct rte_memzone *rz;
582d1d861efSKamil Rytarowski 	uint32_t ring_size = CMP_QUEUE_SZ_MAX * sizeof(union cq_entry_t);
583aa0d976eSJerin Jacob 
584b7004ab2SKamil Rytarowski 	rz = rte_eth_dma_zone_reserve(dev, "cq_ring",
585b7004ab2SKamil Rytarowski 				      nicvf_netdev_qidx(nic, qidx), ring_size,
586aa0d976eSJerin Jacob 				      NICVF_CQ_BASE_ALIGN_BYTES, nic->node);
587aa0d976eSJerin Jacob 	if (rz == NULL) {
588aa0d976eSJerin Jacob 		PMD_INIT_LOG(ERR, "Failed to allocate mem for cq hw ring");
589aa0d976eSJerin Jacob 		return -ENOMEM;
590aa0d976eSJerin Jacob 	}
591aa0d976eSJerin Jacob 
592aa0d976eSJerin Jacob 	memset(rz->addr, 0, ring_size);
593aa0d976eSJerin Jacob 
594f17ca787SThomas Monjalon 	rxq->phys = rz->iova;
595aa0d976eSJerin Jacob 	rxq->desc = rz->addr;
596aa0d976eSJerin Jacob 	rxq->qlen_mask = desc_cnt - 1;
597aa0d976eSJerin Jacob 
598aa0d976eSJerin Jacob 	return 0;
599aa0d976eSJerin Jacob }
600aa0d976eSJerin Jacob 
6013f3c6f97SJerin Jacob static int
6026d3cbd56SKamil Rytarowski nicvf_qset_sq_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
6036d3cbd56SKamil Rytarowski 		    struct nicvf_txq *sq, uint16_t qidx, uint32_t desc_cnt)
6043f3c6f97SJerin Jacob {
6053f3c6f97SJerin Jacob 	const struct rte_memzone *rz;
606d1d861efSKamil Rytarowski 	uint32_t ring_size = SND_QUEUE_SZ_MAX * sizeof(union sq_entry_t);
6073f3c6f97SJerin Jacob 
608b7004ab2SKamil Rytarowski 	rz = rte_eth_dma_zone_reserve(dev, "sq",
609b7004ab2SKamil Rytarowski 				      nicvf_netdev_qidx(nic, qidx), ring_size,
6103f3c6f97SJerin Jacob 				      NICVF_SQ_BASE_ALIGN_BYTES, nic->node);
6113f3c6f97SJerin Jacob 	if (rz == NULL) {
6123f3c6f97SJerin Jacob 		PMD_INIT_LOG(ERR, "Failed allocate mem for sq hw ring");
6133f3c6f97SJerin Jacob 		return -ENOMEM;
6143f3c6f97SJerin Jacob 	}
6153f3c6f97SJerin Jacob 
6163f3c6f97SJerin Jacob 	memset(rz->addr, 0, ring_size);
6173f3c6f97SJerin Jacob 
618f17ca787SThomas Monjalon 	sq->phys = rz->iova;
6193f3c6f97SJerin Jacob 	sq->desc = rz->addr;
6203f3c6f97SJerin Jacob 	sq->qlen_mask = desc_cnt - 1;
6213f3c6f97SJerin Jacob 
6223f3c6f97SJerin Jacob 	return 0;
6233f3c6f97SJerin Jacob }
6243f3c6f97SJerin Jacob 
6257413feeeSJerin Jacob static int
6266d3cbd56SKamil Rytarowski nicvf_qset_rbdr_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
6276d3cbd56SKamil Rytarowski 		      uint32_t desc_cnt, uint32_t buffsz)
6287413feeeSJerin Jacob {
6297413feeeSJerin Jacob 	struct nicvf_rbdr *rbdr;
6307413feeeSJerin Jacob 	const struct rte_memzone *rz;
6317413feeeSJerin Jacob 	uint32_t ring_size;
6327413feeeSJerin Jacob 
6337413feeeSJerin Jacob 	assert(nic->rbdr == NULL);
6347413feeeSJerin Jacob 	rbdr = rte_zmalloc_socket("rbdr", sizeof(struct nicvf_rbdr),
6357413feeeSJerin Jacob 				  RTE_CACHE_LINE_SIZE, nic->node);
6367413feeeSJerin Jacob 	if (rbdr == NULL) {
6377413feeeSJerin Jacob 		PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr");
6387413feeeSJerin Jacob 		return -ENOMEM;
6397413feeeSJerin Jacob 	}
6407413feeeSJerin Jacob 
641d1d861efSKamil Rytarowski 	ring_size = sizeof(struct rbdr_entry_t) * RBDR_QUEUE_SZ_MAX;
642b7004ab2SKamil Rytarowski 	rz = rte_eth_dma_zone_reserve(dev, "rbdr",
643b7004ab2SKamil Rytarowski 				      nicvf_netdev_qidx(nic, 0), ring_size,
6447413feeeSJerin Jacob 				      NICVF_RBDR_BASE_ALIGN_BYTES, nic->node);
6457413feeeSJerin Jacob 	if (rz == NULL) {
6467413feeeSJerin Jacob 		PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr desc ring");
6477413feeeSJerin Jacob 		return -ENOMEM;
6487413feeeSJerin Jacob 	}
6497413feeeSJerin Jacob 
6507413feeeSJerin Jacob 	memset(rz->addr, 0, ring_size);
6517413feeeSJerin Jacob 
652f17ca787SThomas Monjalon 	rbdr->phys = rz->iova;
6537413feeeSJerin Jacob 	rbdr->tail = 0;
6547413feeeSJerin Jacob 	rbdr->next_tail = 0;
6557413feeeSJerin Jacob 	rbdr->desc = rz->addr;
6567413feeeSJerin Jacob 	rbdr->buffsz = buffsz;
6577413feeeSJerin Jacob 	rbdr->qlen_mask = desc_cnt - 1;
6587413feeeSJerin Jacob 	rbdr->rbdr_status =
6597413feeeSJerin Jacob 		nicvf_qset_base(nic, 0) + NIC_QSET_RBDR_0_1_STATUS0;
6607413feeeSJerin Jacob 	rbdr->rbdr_door =
6617413feeeSJerin Jacob 		nicvf_qset_base(nic, 0) + NIC_QSET_RBDR_0_1_DOOR;
6627413feeeSJerin Jacob 
6637413feeeSJerin Jacob 	nic->rbdr = rbdr;
6647413feeeSJerin Jacob 	return 0;
6657413feeeSJerin Jacob }
6667413feeeSJerin Jacob 
6677413feeeSJerin Jacob static void
66821e3fb00SKamil Rytarowski nicvf_rbdr_release_mbuf(struct rte_eth_dev *dev, struct nicvf *nic,
669df6e0a06SSantosh Shukla 			nicvf_iova_addr_t phy)
6707413feeeSJerin Jacob {
6717413feeeSJerin Jacob 	uint16_t qidx;
6727413feeeSJerin Jacob 	void *obj;
6737413feeeSJerin Jacob 	struct nicvf_rxq *rxq;
67421e3fb00SKamil Rytarowski 	uint16_t rx_start, rx_end;
6757413feeeSJerin Jacob 
67621e3fb00SKamil Rytarowski 	/* Get queue ranges for this VF */
67721e3fb00SKamil Rytarowski 	nicvf_rx_range(dev, nic, &rx_start, &rx_end);
67821e3fb00SKamil Rytarowski 
67921e3fb00SKamil Rytarowski 	for (qidx = rx_start; qidx <= rx_end; qidx++) {
6806d3cbd56SKamil Rytarowski 		rxq = dev->data->rx_queues[qidx];
6817413feeeSJerin Jacob 		if (rxq->precharge_cnt) {
6827413feeeSJerin Jacob 			obj = (void *)nicvf_mbuff_phy2virt(phy,
6837413feeeSJerin Jacob 							   rxq->mbuf_phys_off);
6847413feeeSJerin Jacob 			rte_mempool_put(rxq->pool, obj);
6857413feeeSJerin Jacob 			rxq->precharge_cnt--;
6867413feeeSJerin Jacob 			break;
6877413feeeSJerin Jacob 		}
6887413feeeSJerin Jacob 	}
6897413feeeSJerin Jacob }
6907413feeeSJerin Jacob 
6917413feeeSJerin Jacob static inline void
6926d3cbd56SKamil Rytarowski nicvf_rbdr_release_mbufs(struct rte_eth_dev *dev, struct nicvf *nic)
6937413feeeSJerin Jacob {
6947413feeeSJerin Jacob 	uint32_t qlen_mask, head;
6957413feeeSJerin Jacob 	struct rbdr_entry_t *entry;
6967413feeeSJerin Jacob 	struct nicvf_rbdr *rbdr = nic->rbdr;
6977413feeeSJerin Jacob 
6987413feeeSJerin Jacob 	qlen_mask = rbdr->qlen_mask;
6997413feeeSJerin Jacob 	head = rbdr->head;
7007413feeeSJerin Jacob 	while (head != rbdr->tail) {
7017413feeeSJerin Jacob 		entry = rbdr->desc + head;
7026d3cbd56SKamil Rytarowski 		nicvf_rbdr_release_mbuf(dev, nic, entry->full_addr);
7037413feeeSJerin Jacob 		head++;
7047413feeeSJerin Jacob 		head = head & qlen_mask;
7057413feeeSJerin Jacob 	}
7067413feeeSJerin Jacob }
7077413feeeSJerin Jacob 
7083f3c6f97SJerin Jacob static inline void
7093f3c6f97SJerin Jacob nicvf_tx_queue_release_mbufs(struct nicvf_txq *txq)
7103f3c6f97SJerin Jacob {
7113f3c6f97SJerin Jacob 	uint32_t head;
7123f3c6f97SJerin Jacob 
7133f3c6f97SJerin Jacob 	head = txq->head;
7143f3c6f97SJerin Jacob 	while (head != txq->tail) {
7153f3c6f97SJerin Jacob 		if (txq->txbuffs[head]) {
7163f3c6f97SJerin Jacob 			rte_pktmbuf_free_seg(txq->txbuffs[head]);
7173f3c6f97SJerin Jacob 			txq->txbuffs[head] = NULL;
7183f3c6f97SJerin Jacob 		}
7193f3c6f97SJerin Jacob 		head++;
7203f3c6f97SJerin Jacob 		head = head & txq->qlen_mask;
7213f3c6f97SJerin Jacob 	}
7223f3c6f97SJerin Jacob }
7233f3c6f97SJerin Jacob 
7243f3c6f97SJerin Jacob static void
7253f3c6f97SJerin Jacob nicvf_tx_queue_reset(struct nicvf_txq *txq)
7263f3c6f97SJerin Jacob {
7273f3c6f97SJerin Jacob 	uint32_t txq_desc_cnt = txq->qlen_mask + 1;
7283f3c6f97SJerin Jacob 
7293f3c6f97SJerin Jacob 	memset(txq->desc, 0, sizeof(union sq_entry_t) * txq_desc_cnt);
7303f3c6f97SJerin Jacob 	memset(txq->txbuffs, 0, sizeof(struct rte_mbuf *) * txq_desc_cnt);
7313f3c6f97SJerin Jacob 	txq->tail = 0;
7323f3c6f97SJerin Jacob 	txq->head = 0;
7333f3c6f97SJerin Jacob 	txq->xmit_bufs = 0;
7343f3c6f97SJerin Jacob }
7353f3c6f97SJerin Jacob 
736fc1f6c62SJerin Jacob static inline int
73771e76186SKamil Rytarowski nicvf_vf_start_tx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
73871e76186SKamil Rytarowski 			uint16_t qidx)
739fc1f6c62SJerin Jacob {
740fc1f6c62SJerin Jacob 	struct nicvf_txq *txq;
741fc1f6c62SJerin Jacob 	int ret;
742fc1f6c62SJerin Jacob 
74371e76186SKamil Rytarowski 	assert(qidx < MAX_SND_QUEUES_PER_QS);
74471e76186SKamil Rytarowski 
74571e76186SKamil Rytarowski 	if (dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
74671e76186SKamil Rytarowski 		RTE_ETH_QUEUE_STATE_STARTED)
747fc1f6c62SJerin Jacob 		return 0;
748fc1f6c62SJerin Jacob 
74971e76186SKamil Rytarowski 	txq = dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)];
750fc1f6c62SJerin Jacob 	txq->pool = NULL;
75171e76186SKamil Rytarowski 	ret = nicvf_qset_sq_config(nic, qidx, txq);
752fc1f6c62SJerin Jacob 	if (ret) {
75371e76186SKamil Rytarowski 		PMD_INIT_LOG(ERR, "Failed to configure sq VF%d %d %d",
75471e76186SKamil Rytarowski 			     nic->vf_id, qidx, ret);
755fc1f6c62SJerin Jacob 		goto config_sq_error;
756fc1f6c62SJerin Jacob 	}
757fc1f6c62SJerin Jacob 
75871e76186SKamil Rytarowski 	dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
75971e76186SKamil Rytarowski 		RTE_ETH_QUEUE_STATE_STARTED;
760fc1f6c62SJerin Jacob 	return ret;
761fc1f6c62SJerin Jacob 
762fc1f6c62SJerin Jacob config_sq_error:
76371e76186SKamil Rytarowski 	nicvf_qset_sq_reclaim(nic, qidx);
764fc1f6c62SJerin Jacob 	return ret;
765fc1f6c62SJerin Jacob }
766fc1f6c62SJerin Jacob 
767fc1f6c62SJerin Jacob static inline int
768627d4ba2SKamil Rytarowski nicvf_vf_stop_tx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
769627d4ba2SKamil Rytarowski 		       uint16_t qidx)
770fc1f6c62SJerin Jacob {
771fc1f6c62SJerin Jacob 	struct nicvf_txq *txq;
772fc1f6c62SJerin Jacob 	int ret;
773fc1f6c62SJerin Jacob 
774627d4ba2SKamil Rytarowski 	assert(qidx < MAX_SND_QUEUES_PER_QS);
775627d4ba2SKamil Rytarowski 
776627d4ba2SKamil Rytarowski 	if (dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
777627d4ba2SKamil Rytarowski 		RTE_ETH_QUEUE_STATE_STOPPED)
778fc1f6c62SJerin Jacob 		return 0;
779fc1f6c62SJerin Jacob 
780627d4ba2SKamil Rytarowski 	ret = nicvf_qset_sq_reclaim(nic, qidx);
781fc1f6c62SJerin Jacob 	if (ret)
782627d4ba2SKamil Rytarowski 		PMD_INIT_LOG(ERR, "Failed to reclaim sq VF%d %d %d",
783627d4ba2SKamil Rytarowski 			     nic->vf_id, qidx, ret);
784fc1f6c62SJerin Jacob 
785627d4ba2SKamil Rytarowski 	txq = dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)];
786fc1f6c62SJerin Jacob 	nicvf_tx_queue_release_mbufs(txq);
787fc1f6c62SJerin Jacob 	nicvf_tx_queue_reset(txq);
788fc1f6c62SJerin Jacob 
789627d4ba2SKamil Rytarowski 	dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
790627d4ba2SKamil Rytarowski 		RTE_ETH_QUEUE_STATE_STOPPED;
791fc1f6c62SJerin Jacob 	return ret;
792fc1f6c62SJerin Jacob }
79386b4eb42SJerin Jacob 
79486b4eb42SJerin Jacob static inline int
79586b4eb42SJerin Jacob nicvf_configure_cpi(struct rte_eth_dev *dev)
79686b4eb42SJerin Jacob {
79786b4eb42SJerin Jacob 	struct nicvf *nic = nicvf_pmd_priv(dev);
79886b4eb42SJerin Jacob 	uint16_t qidx, qcnt;
79986b4eb42SJerin Jacob 	int ret;
80086b4eb42SJerin Jacob 
80186b4eb42SJerin Jacob 	/* Count started rx queues */
802394014bcSKamil Rytarowski 	for (qidx = qcnt = 0; qidx < dev->data->nb_rx_queues; qidx++)
80386b4eb42SJerin Jacob 		if (dev->data->rx_queue_state[qidx] ==
80486b4eb42SJerin Jacob 		    RTE_ETH_QUEUE_STATE_STARTED)
80586b4eb42SJerin Jacob 			qcnt++;
80686b4eb42SJerin Jacob 
80786b4eb42SJerin Jacob 	nic->cpi_alg = CPI_ALG_NONE;
80886b4eb42SJerin Jacob 	ret = nicvf_mbox_config_cpi(nic, qcnt);
80986b4eb42SJerin Jacob 	if (ret)
81086b4eb42SJerin Jacob 		PMD_INIT_LOG(ERR, "Failed to configure CPI %d", ret);
81186b4eb42SJerin Jacob 
81286b4eb42SJerin Jacob 	return ret;
81386b4eb42SJerin Jacob }
81486b4eb42SJerin Jacob 
8157413feeeSJerin Jacob static inline int
8167413feeeSJerin Jacob nicvf_configure_rss(struct rte_eth_dev *dev)
8177413feeeSJerin Jacob {
8187413feeeSJerin Jacob 	struct nicvf *nic = nicvf_pmd_priv(dev);
8197413feeeSJerin Jacob 	uint64_t rsshf;
8207413feeeSJerin Jacob 	int ret = -EINVAL;
8217413feeeSJerin Jacob 
8227413feeeSJerin Jacob 	rsshf = nicvf_rss_ethdev_to_nic(nic,
8237413feeeSJerin Jacob 			dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf);
8247413feeeSJerin Jacob 	PMD_DRV_LOG(INFO, "mode=%d rx_queues=%d loopback=%d rsshf=0x%" PRIx64,
8257413feeeSJerin Jacob 		    dev->data->dev_conf.rxmode.mq_mode,
8266d3cbd56SKamil Rytarowski 		    dev->data->nb_rx_queues,
8276d3cbd56SKamil Rytarowski 		    dev->data->dev_conf.lpbk_mode, rsshf);
8287413feeeSJerin Jacob 
8297413feeeSJerin Jacob 	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
8307413feeeSJerin Jacob 		ret = nicvf_rss_term(nic);
8317413feeeSJerin Jacob 	else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
8326d3cbd56SKamil Rytarowski 		ret = nicvf_rss_config(nic, dev->data->nb_rx_queues, rsshf);
8337413feeeSJerin Jacob 	if (ret)
8347413feeeSJerin Jacob 		PMD_INIT_LOG(ERR, "Failed to configure RSS %d", ret);
8357413feeeSJerin Jacob 
8367413feeeSJerin Jacob 	return ret;
8377413feeeSJerin Jacob }
8387413feeeSJerin Jacob 
83986b4eb42SJerin Jacob static int
84086b4eb42SJerin Jacob nicvf_configure_rss_reta(struct rte_eth_dev *dev)
84186b4eb42SJerin Jacob {
84286b4eb42SJerin Jacob 	struct nicvf *nic = nicvf_pmd_priv(dev);
84386b4eb42SJerin Jacob 	unsigned int idx, qmap_size;
84486b4eb42SJerin Jacob 	uint8_t qmap[RTE_MAX_QUEUES_PER_PORT];
84586b4eb42SJerin Jacob 	uint8_t default_reta[NIC_MAX_RSS_IDR_TBL_SIZE];
84686b4eb42SJerin Jacob 
84786b4eb42SJerin Jacob 	if (nic->cpi_alg != CPI_ALG_NONE)
84886b4eb42SJerin Jacob 		return -EINVAL;
84986b4eb42SJerin Jacob 
85086b4eb42SJerin Jacob 	/* Prepare queue map */
85186b4eb42SJerin Jacob 	for (idx = 0, qmap_size = 0; idx < dev->data->nb_rx_queues; idx++) {
85286b4eb42SJerin Jacob 		if (dev->data->rx_queue_state[idx] ==
85386b4eb42SJerin Jacob 				RTE_ETH_QUEUE_STATE_STARTED)
85486b4eb42SJerin Jacob 			qmap[qmap_size++] = idx;
85586b4eb42SJerin Jacob 	}
85686b4eb42SJerin Jacob 
85786b4eb42SJerin Jacob 	/* Update default RSS RETA */
85886b4eb42SJerin Jacob 	for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++)
85986b4eb42SJerin Jacob 		default_reta[idx] = qmap[idx % qmap_size];
86086b4eb42SJerin Jacob 
86186b4eb42SJerin Jacob 	return nicvf_rss_reta_update(nic, default_reta,
86286b4eb42SJerin Jacob 				     NIC_MAX_RSS_IDR_TBL_SIZE);
86386b4eb42SJerin Jacob }
86486b4eb42SJerin Jacob 
8653f3c6f97SJerin Jacob static void
8663f3c6f97SJerin Jacob nicvf_dev_tx_queue_release(void *sq)
8673f3c6f97SJerin Jacob {
8683f3c6f97SJerin Jacob 	struct nicvf_txq *txq;
8693f3c6f97SJerin Jacob 
8703f3c6f97SJerin Jacob 	PMD_INIT_FUNC_TRACE();
8713f3c6f97SJerin Jacob 
8723f3c6f97SJerin Jacob 	txq = (struct nicvf_txq *)sq;
8733f3c6f97SJerin Jacob 	if (txq) {
8743f3c6f97SJerin Jacob 		if (txq->txbuffs != NULL) {
8753f3c6f97SJerin Jacob 			nicvf_tx_queue_release_mbufs(txq);
8763f3c6f97SJerin Jacob 			rte_free(txq->txbuffs);
8773f3c6f97SJerin Jacob 			txq->txbuffs = NULL;
8783f3c6f97SJerin Jacob 		}
8793f3c6f97SJerin Jacob 		rte_free(txq);
8803f3c6f97SJerin Jacob 	}
8813f3c6f97SJerin Jacob }
8823f3c6f97SJerin Jacob 
8837413feeeSJerin Jacob static void
8847413feeeSJerin Jacob nicvf_set_tx_function(struct rte_eth_dev *dev)
8857413feeeSJerin Jacob {
8867413feeeSJerin Jacob 	struct nicvf_txq *txq;
8877413feeeSJerin Jacob 	size_t i;
8887413feeeSJerin Jacob 	bool multiseg = false;
8897413feeeSJerin Jacob 
8907413feeeSJerin Jacob 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
8917413feeeSJerin Jacob 		txq = dev->data->tx_queues[i];
892c97da2cbSMaciej Czekaj 		if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
8937413feeeSJerin Jacob 			multiseg = true;
8947413feeeSJerin Jacob 			break;
8957413feeeSJerin Jacob 		}
8967413feeeSJerin Jacob 	}
8977413feeeSJerin Jacob 
8987413feeeSJerin Jacob 	/* Use a simple Tx queue (no offloads, no multi segs) if possible */
8997413feeeSJerin Jacob 	if (multiseg) {
9007413feeeSJerin Jacob 		PMD_DRV_LOG(DEBUG, "Using multi-segment tx callback");
9017413feeeSJerin Jacob 		dev->tx_pkt_burst = nicvf_xmit_pkts_multiseg;
9027413feeeSJerin Jacob 	} else {
9037413feeeSJerin Jacob 		PMD_DRV_LOG(DEBUG, "Using single-segment tx callback");
9047413feeeSJerin Jacob 		dev->tx_pkt_burst = nicvf_xmit_pkts;
9057413feeeSJerin Jacob 	}
9067413feeeSJerin Jacob 
9077413feeeSJerin Jacob 	if (txq->pool_free == nicvf_single_pool_free_xmited_buffers)
9087413feeeSJerin Jacob 		PMD_DRV_LOG(DEBUG, "Using single-mempool tx free method");
9097413feeeSJerin Jacob 	else
9107413feeeSJerin Jacob 		PMD_DRV_LOG(DEBUG, "Using multi-mempool tx free method");
9117413feeeSJerin Jacob }
9127413feeeSJerin Jacob 
9137413feeeSJerin Jacob static void
9147413feeeSJerin Jacob nicvf_set_rx_function(struct rte_eth_dev *dev)
9157413feeeSJerin Jacob {
9167413feeeSJerin Jacob 	if (dev->data->scattered_rx) {
9177413feeeSJerin Jacob 		PMD_DRV_LOG(DEBUG, "Using multi-segment rx callback");
9187413feeeSJerin Jacob 		dev->rx_pkt_burst = nicvf_recv_pkts_multiseg;
9197413feeeSJerin Jacob 	} else {
9207413feeeSJerin Jacob 		PMD_DRV_LOG(DEBUG, "Using single-segment rx callback");
9217413feeeSJerin Jacob 		dev->rx_pkt_burst = nicvf_recv_pkts;
9227413feeeSJerin Jacob 	}
9237413feeeSJerin Jacob }
9247413feeeSJerin Jacob 
9253f3c6f97SJerin Jacob static int
9263f3c6f97SJerin Jacob nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
9273f3c6f97SJerin Jacob 			 uint16_t nb_desc, unsigned int socket_id,
9283f3c6f97SJerin Jacob 			 const struct rte_eth_txconf *tx_conf)
9293f3c6f97SJerin Jacob {
9303f3c6f97SJerin Jacob 	uint16_t tx_free_thresh;
931c97da2cbSMaciej Czekaj 	bool is_single_pool;
9323f3c6f97SJerin Jacob 	struct nicvf_txq *txq;
9333f3c6f97SJerin Jacob 	struct nicvf *nic = nicvf_pmd_priv(dev);
934c97da2cbSMaciej Czekaj 	uint64_t conf_offloads, offload_capa, unsupported_offloads;
9353f3c6f97SJerin Jacob 
9363f3c6f97SJerin Jacob 	PMD_INIT_FUNC_TRACE();
9373f3c6f97SJerin Jacob 
93821e3fb00SKamil Rytarowski 	if (qidx >= MAX_SND_QUEUES_PER_QS)
93921e3fb00SKamil Rytarowski 		nic = nic->snicvf[qidx / MAX_SND_QUEUES_PER_QS - 1];
94021e3fb00SKamil Rytarowski 
94121e3fb00SKamil Rytarowski 	qidx = qidx % MAX_SND_QUEUES_PER_QS;
94221e3fb00SKamil Rytarowski 
9433f3c6f97SJerin Jacob 	/* Socket id check */
9443f3c6f97SJerin Jacob 	if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node)
9453f3c6f97SJerin Jacob 		PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
9463f3c6f97SJerin Jacob 		socket_id, nic->node);
9473f3c6f97SJerin Jacob 
948c97da2cbSMaciej Czekaj 	conf_offloads = tx_conf->offloads;
949c97da2cbSMaciej Czekaj 	offload_capa = NICVF_TX_OFFLOAD_CAPA;
950c97da2cbSMaciej Czekaj 
951c97da2cbSMaciej Czekaj 	unsupported_offloads = conf_offloads & ~offload_capa;
952c97da2cbSMaciej Czekaj 	if (unsupported_offloads) {
953c97da2cbSMaciej Czekaj 		PMD_INIT_LOG(ERR, "Tx offloads 0x%" PRIx64 " are not supported."
954c97da2cbSMaciej Czekaj 		      "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
955c97da2cbSMaciej Czekaj 		      unsupported_offloads, conf_offloads, offload_capa);
956c97da2cbSMaciej Czekaj 		return -ENOTSUP;
957c97da2cbSMaciej Czekaj 	}
958c97da2cbSMaciej Czekaj 
9593f3c6f97SJerin Jacob 	/* Tx deferred start is not supported */
9603f3c6f97SJerin Jacob 	if (tx_conf->tx_deferred_start) {
9613f3c6f97SJerin Jacob 		PMD_INIT_LOG(ERR, "Tx deferred start not supported");
9623f3c6f97SJerin Jacob 		return -EINVAL;
9633f3c6f97SJerin Jacob 	}
9643f3c6f97SJerin Jacob 
9653f3c6f97SJerin Jacob 	/* Roundup nb_desc to available qsize and validate max number of desc */
9663f3c6f97SJerin Jacob 	nb_desc = nicvf_qsize_sq_roundup(nb_desc);
9673f3c6f97SJerin Jacob 	if (nb_desc == 0) {
9683f3c6f97SJerin Jacob 		PMD_INIT_LOG(ERR, "Value of nb_desc beyond available sq qsize");
9693f3c6f97SJerin Jacob 		return -EINVAL;
9703f3c6f97SJerin Jacob 	}
9713f3c6f97SJerin Jacob 
9723f3c6f97SJerin Jacob 	/* Validate tx_free_thresh */
9733f3c6f97SJerin Jacob 	tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
9743f3c6f97SJerin Jacob 				tx_conf->tx_free_thresh :
9753f3c6f97SJerin Jacob 				NICVF_DEFAULT_TX_FREE_THRESH);
9763f3c6f97SJerin Jacob 
9773f3c6f97SJerin Jacob 	if (tx_free_thresh > (nb_desc) ||
9783f3c6f97SJerin Jacob 		tx_free_thresh > NICVF_MAX_TX_FREE_THRESH) {
9793f3c6f97SJerin Jacob 		PMD_INIT_LOG(ERR,
9803f3c6f97SJerin Jacob 			"tx_free_thresh must be less than the number of TX "
9813f3c6f97SJerin Jacob 			"descriptors. (tx_free_thresh=%u port=%d "
9823f3c6f97SJerin Jacob 			"queue=%d)", (unsigned int)tx_free_thresh,
9833f3c6f97SJerin Jacob 			(int)dev->data->port_id, (int)qidx);
9843f3c6f97SJerin Jacob 		return -EINVAL;
9853f3c6f97SJerin Jacob 	}
9863f3c6f97SJerin Jacob 
9873f3c6f97SJerin Jacob 	/* Free memory prior to re-allocation if needed. */
98821e3fb00SKamil Rytarowski 	if (dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] != NULL) {
9893f3c6f97SJerin Jacob 		PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
99021e3fb00SKamil Rytarowski 				nicvf_netdev_qidx(nic, qidx));
99121e3fb00SKamil Rytarowski 		nicvf_dev_tx_queue_release(
99221e3fb00SKamil Rytarowski 			dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)]);
99321e3fb00SKamil Rytarowski 		dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = NULL;
9943f3c6f97SJerin Jacob 	}
9953f3c6f97SJerin Jacob 
9963f3c6f97SJerin Jacob 	/* Allocating tx queue data structure */
9973f3c6f97SJerin Jacob 	txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nicvf_txq),
9983f3c6f97SJerin Jacob 					RTE_CACHE_LINE_SIZE, nic->node);
9993f3c6f97SJerin Jacob 	if (txq == NULL) {
100021e3fb00SKamil Rytarowski 		PMD_INIT_LOG(ERR, "Failed to allocate txq=%d",
100121e3fb00SKamil Rytarowski 			     nicvf_netdev_qidx(nic, qidx));
10023f3c6f97SJerin Jacob 		return -ENOMEM;
10033f3c6f97SJerin Jacob 	}
10043f3c6f97SJerin Jacob 
10053f3c6f97SJerin Jacob 	txq->nic = nic;
10063f3c6f97SJerin Jacob 	txq->queue_id = qidx;
10073f3c6f97SJerin Jacob 	txq->tx_free_thresh = tx_free_thresh;
10083f3c6f97SJerin Jacob 	txq->sq_head = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_HEAD;
10093f3c6f97SJerin Jacob 	txq->sq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_DOOR;
1010c97da2cbSMaciej Czekaj 	txq->offloads = conf_offloads;
1011c97da2cbSMaciej Czekaj 
1012c97da2cbSMaciej Czekaj 	is_single_pool = !!(conf_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE);
10133f3c6f97SJerin Jacob 
10143f3c6f97SJerin Jacob 	/* Choose optimum free threshold value for multipool case */
10153f3c6f97SJerin Jacob 	if (!is_single_pool) {
10163f3c6f97SJerin Jacob 		txq->tx_free_thresh = (uint16_t)
10173f3c6f97SJerin Jacob 		(tx_conf->tx_free_thresh == NICVF_DEFAULT_TX_FREE_THRESH ?
10183f3c6f97SJerin Jacob 				NICVF_TX_FREE_MPOOL_THRESH :
10193f3c6f97SJerin Jacob 				tx_conf->tx_free_thresh);
10201c421f18SJerin Jacob 		txq->pool_free = nicvf_multi_pool_free_xmited_buffers;
10211c421f18SJerin Jacob 	} else {
10221c421f18SJerin Jacob 		txq->pool_free = nicvf_single_pool_free_xmited_buffers;
10233f3c6f97SJerin Jacob 	}
10243f3c6f97SJerin Jacob 
10253f3c6f97SJerin Jacob 	/* Allocate software ring */
10263f3c6f97SJerin Jacob 	txq->txbuffs = rte_zmalloc_socket("txq->txbuffs",
10273f3c6f97SJerin Jacob 				nb_desc * sizeof(struct rte_mbuf *),
10283f3c6f97SJerin Jacob 				RTE_CACHE_LINE_SIZE, nic->node);
10293f3c6f97SJerin Jacob 
10303f3c6f97SJerin Jacob 	if (txq->txbuffs == NULL) {
10313f3c6f97SJerin Jacob 		nicvf_dev_tx_queue_release(txq);
10323f3c6f97SJerin Jacob 		return -ENOMEM;
10333f3c6f97SJerin Jacob 	}
10343f3c6f97SJerin Jacob 
10356d3cbd56SKamil Rytarowski 	if (nicvf_qset_sq_alloc(dev, nic, txq, qidx, nb_desc)) {
10363f3c6f97SJerin Jacob 		PMD_INIT_LOG(ERR, "Failed to allocate mem for sq %d", qidx);
10373f3c6f97SJerin Jacob 		nicvf_dev_tx_queue_release(txq);
10383f3c6f97SJerin Jacob 		return -ENOMEM;
10393f3c6f97SJerin Jacob 	}
10403f3c6f97SJerin Jacob 
10413f3c6f97SJerin Jacob 	nicvf_tx_queue_reset(txq);
10423f3c6f97SJerin Jacob 
1043c97da2cbSMaciej Czekaj 	PMD_INIT_LOG(DEBUG, "[%d] txq=%p nb_desc=%d desc=%p"
1044c97da2cbSMaciej Czekaj 			" phys=0x%" PRIx64 " offloads=0x%" PRIx64,
104521e3fb00SKamil Rytarowski 			nicvf_netdev_qidx(nic, qidx), txq, nb_desc, txq->desc,
1046c97da2cbSMaciej Czekaj 			txq->phys, txq->offloads);
10473f3c6f97SJerin Jacob 
104821e3fb00SKamil Rytarowski 	dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = txq;
104921e3fb00SKamil Rytarowski 	dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
105021e3fb00SKamil Rytarowski 		RTE_ETH_QUEUE_STATE_STOPPED;
10513f3c6f97SJerin Jacob 	return 0;
10523f3c6f97SJerin Jacob }
10533f3c6f97SJerin Jacob 
105486b4eb42SJerin Jacob static inline void
10556d3cbd56SKamil Rytarowski nicvf_rx_queue_release_mbufs(struct rte_eth_dev *dev, struct nicvf_rxq *rxq)
105686b4eb42SJerin Jacob {
105786b4eb42SJerin Jacob 	uint32_t rxq_cnt;
105886b4eb42SJerin Jacob 	uint32_t nb_pkts, released_pkts = 0;
105986b4eb42SJerin Jacob 	uint32_t refill_cnt = 0;
106086b4eb42SJerin Jacob 	struct rte_mbuf *rx_pkts[NICVF_MAX_RX_FREE_THRESH];
106186b4eb42SJerin Jacob 
106286b4eb42SJerin Jacob 	if (dev->rx_pkt_burst == NULL)
106386b4eb42SJerin Jacob 		return;
106486b4eb42SJerin Jacob 
106521e3fb00SKamil Rytarowski 	while ((rxq_cnt = nicvf_dev_rx_queue_count(dev,
106621e3fb00SKamil Rytarowski 				nicvf_netdev_qidx(rxq->nic, rxq->queue_id)))) {
106786b4eb42SJerin Jacob 		nb_pkts = dev->rx_pkt_burst(rxq, rx_pkts,
106886b4eb42SJerin Jacob 					NICVF_MAX_RX_FREE_THRESH);
106986b4eb42SJerin Jacob 		PMD_DRV_LOG(INFO, "nb_pkts=%d  rxq_cnt=%d", nb_pkts, rxq_cnt);
107086b4eb42SJerin Jacob 		while (nb_pkts) {
107186b4eb42SJerin Jacob 			rte_pktmbuf_free_seg(rx_pkts[--nb_pkts]);
107286b4eb42SJerin Jacob 			released_pkts++;
107386b4eb42SJerin Jacob 		}
107486b4eb42SJerin Jacob 	}
107586b4eb42SJerin Jacob 
107621e3fb00SKamil Rytarowski 
107721e3fb00SKamil Rytarowski 	refill_cnt += nicvf_dev_rbdr_refill(dev,
107821e3fb00SKamil Rytarowski 			nicvf_netdev_qidx(rxq->nic, rxq->queue_id));
107921e3fb00SKamil Rytarowski 
108086b4eb42SJerin Jacob 	PMD_DRV_LOG(INFO, "free_cnt=%d  refill_cnt=%d",
108186b4eb42SJerin Jacob 		    released_pkts, refill_cnt);
108286b4eb42SJerin Jacob }
108386b4eb42SJerin Jacob 
1084aa0d976eSJerin Jacob static void
1085aa0d976eSJerin Jacob nicvf_rx_queue_reset(struct nicvf_rxq *rxq)
1086aa0d976eSJerin Jacob {
1087aa0d976eSJerin Jacob 	rxq->head = 0;
1088aa0d976eSJerin Jacob 	rxq->available_space = 0;
1089aa0d976eSJerin Jacob 	rxq->recv_buffers = 0;
1090aa0d976eSJerin Jacob }
1091aa0d976eSJerin Jacob 
109286b4eb42SJerin Jacob static inline int
109371e76186SKamil Rytarowski nicvf_vf_start_rx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
109471e76186SKamil Rytarowski 			uint16_t qidx)
109586b4eb42SJerin Jacob {
109686b4eb42SJerin Jacob 	struct nicvf_rxq *rxq;
109786b4eb42SJerin Jacob 	int ret;
109886b4eb42SJerin Jacob 
109971e76186SKamil Rytarowski 	assert(qidx < MAX_RCV_QUEUES_PER_QS);
110071e76186SKamil Rytarowski 
110171e76186SKamil Rytarowski 	if (dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
110271e76186SKamil Rytarowski 		RTE_ETH_QUEUE_STATE_STARTED)
110386b4eb42SJerin Jacob 		return 0;
110486b4eb42SJerin Jacob 
110586b4eb42SJerin Jacob 	/* Update rbdr pointer to all rxq */
110671e76186SKamil Rytarowski 	rxq = dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)];
110786b4eb42SJerin Jacob 	rxq->shared_rbdr = nic->rbdr;
110886b4eb42SJerin Jacob 
110986b4eb42SJerin Jacob 	ret = nicvf_qset_rq_config(nic, qidx, rxq);
111086b4eb42SJerin Jacob 	if (ret) {
111171e76186SKamil Rytarowski 		PMD_INIT_LOG(ERR, "Failed to configure rq VF%d %d %d",
111271e76186SKamil Rytarowski 			     nic->vf_id, qidx, ret);
111386b4eb42SJerin Jacob 		goto config_rq_error;
111486b4eb42SJerin Jacob 	}
111586b4eb42SJerin Jacob 	ret = nicvf_qset_cq_config(nic, qidx, rxq);
111686b4eb42SJerin Jacob 	if (ret) {
111771e76186SKamil Rytarowski 		PMD_INIT_LOG(ERR, "Failed to configure cq VF%d %d %d",
111871e76186SKamil Rytarowski 			     nic->vf_id, qidx, ret);
111986b4eb42SJerin Jacob 		goto config_cq_error;
112086b4eb42SJerin Jacob 	}
112186b4eb42SJerin Jacob 
112271e76186SKamil Rytarowski 	dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
112371e76186SKamil Rytarowski 		RTE_ETH_QUEUE_STATE_STARTED;
112486b4eb42SJerin Jacob 	return 0;
112586b4eb42SJerin Jacob 
112686b4eb42SJerin Jacob config_cq_error:
112786b4eb42SJerin Jacob 	nicvf_qset_cq_reclaim(nic, qidx);
112886b4eb42SJerin Jacob config_rq_error:
112986b4eb42SJerin Jacob 	nicvf_qset_rq_reclaim(nic, qidx);
113086b4eb42SJerin Jacob 	return ret;
113186b4eb42SJerin Jacob }
113286b4eb42SJerin Jacob 
113386b4eb42SJerin Jacob static inline int
1134627d4ba2SKamil Rytarowski nicvf_vf_stop_rx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
1135627d4ba2SKamil Rytarowski 		       uint16_t qidx)
113686b4eb42SJerin Jacob {
113786b4eb42SJerin Jacob 	struct nicvf_rxq *rxq;
113886b4eb42SJerin Jacob 	int ret, other_error;
113986b4eb42SJerin Jacob 
1140627d4ba2SKamil Rytarowski 	if (dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
1141627d4ba2SKamil Rytarowski 		RTE_ETH_QUEUE_STATE_STOPPED)
114286b4eb42SJerin Jacob 		return 0;
114386b4eb42SJerin Jacob 
114486b4eb42SJerin Jacob 	ret = nicvf_qset_rq_reclaim(nic, qidx);
114586b4eb42SJerin Jacob 	if (ret)
1146627d4ba2SKamil Rytarowski 		PMD_INIT_LOG(ERR, "Failed to reclaim rq VF%d %d %d",
1147627d4ba2SKamil Rytarowski 			     nic->vf_id, qidx, ret);
114886b4eb42SJerin Jacob 
114986b4eb42SJerin Jacob 	other_error = ret;
1150627d4ba2SKamil Rytarowski 	rxq = dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)];
11516d3cbd56SKamil Rytarowski 	nicvf_rx_queue_release_mbufs(dev, rxq);
115286b4eb42SJerin Jacob 	nicvf_rx_queue_reset(rxq);
115386b4eb42SJerin Jacob 
115486b4eb42SJerin Jacob 	ret = nicvf_qset_cq_reclaim(nic, qidx);
115586b4eb42SJerin Jacob 	if (ret)
1156627d4ba2SKamil Rytarowski 		PMD_INIT_LOG(ERR, "Failed to reclaim cq VF%d %d %d",
1157627d4ba2SKamil Rytarowski 			     nic->vf_id, qidx, ret);
115886b4eb42SJerin Jacob 
115986b4eb42SJerin Jacob 	other_error |= ret;
1160627d4ba2SKamil Rytarowski 	dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
1161627d4ba2SKamil Rytarowski 		RTE_ETH_QUEUE_STATE_STOPPED;
116286b4eb42SJerin Jacob 	return other_error;
116386b4eb42SJerin Jacob }
116486b4eb42SJerin Jacob 
1165aa0d976eSJerin Jacob static void
1166aa0d976eSJerin Jacob nicvf_dev_rx_queue_release(void *rx_queue)
1167aa0d976eSJerin Jacob {
1168aa0d976eSJerin Jacob 	PMD_INIT_FUNC_TRACE();
1169aa0d976eSJerin Jacob 
1170394014bcSKamil Rytarowski 	rte_free(rx_queue);
1171aa0d976eSJerin Jacob }
1172aa0d976eSJerin Jacob 
1173aa0d976eSJerin Jacob static int
117486b4eb42SJerin Jacob nicvf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
117586b4eb42SJerin Jacob {
117671e76186SKamil Rytarowski 	struct nicvf *nic = nicvf_pmd_priv(dev);
117786b4eb42SJerin Jacob 	int ret;
117886b4eb42SJerin Jacob 
117971e76186SKamil Rytarowski 	if (qidx >= MAX_RCV_QUEUES_PER_QS)
118071e76186SKamil Rytarowski 		nic = nic->snicvf[(qidx / MAX_RCV_QUEUES_PER_QS - 1)];
118171e76186SKamil Rytarowski 
118271e76186SKamil Rytarowski 	qidx = qidx % MAX_RCV_QUEUES_PER_QS;
118371e76186SKamil Rytarowski 
118471e76186SKamil Rytarowski 	ret = nicvf_vf_start_rx_queue(dev, nic, qidx);
118586b4eb42SJerin Jacob 	if (ret)
118686b4eb42SJerin Jacob 		return ret;
118786b4eb42SJerin Jacob 
118886b4eb42SJerin Jacob 	ret = nicvf_configure_cpi(dev);
118986b4eb42SJerin Jacob 	if (ret)
119086b4eb42SJerin Jacob 		return ret;
119186b4eb42SJerin Jacob 
119286b4eb42SJerin Jacob 	return nicvf_configure_rss_reta(dev);
119386b4eb42SJerin Jacob }
119486b4eb42SJerin Jacob 
119586b4eb42SJerin Jacob static int
119686b4eb42SJerin Jacob nicvf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
119786b4eb42SJerin Jacob {
119886b4eb42SJerin Jacob 	int ret;
1199627d4ba2SKamil Rytarowski 	struct nicvf *nic = nicvf_pmd_priv(dev);
120086b4eb42SJerin Jacob 
1201627d4ba2SKamil Rytarowski 	if (qidx >= MAX_SND_QUEUES_PER_QS)
1202627d4ba2SKamil Rytarowski 		nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)];
1203627d4ba2SKamil Rytarowski 
1204627d4ba2SKamil Rytarowski 	qidx = qidx % MAX_RCV_QUEUES_PER_QS;
1205627d4ba2SKamil Rytarowski 
1206627d4ba2SKamil Rytarowski 	ret = nicvf_vf_stop_rx_queue(dev, nic, qidx);
120786b4eb42SJerin Jacob 	ret |= nicvf_configure_cpi(dev);
120886b4eb42SJerin Jacob 	ret |= nicvf_configure_rss_reta(dev);
120986b4eb42SJerin Jacob 	return ret;
121086b4eb42SJerin Jacob }
121186b4eb42SJerin Jacob 
121286b4eb42SJerin Jacob static int
1213fc1f6c62SJerin Jacob nicvf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
1214fc1f6c62SJerin Jacob {
121571e76186SKamil Rytarowski 	struct nicvf *nic = nicvf_pmd_priv(dev);
121671e76186SKamil Rytarowski 
121771e76186SKamil Rytarowski 	if (qidx >= MAX_SND_QUEUES_PER_QS)
121871e76186SKamil Rytarowski 		nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)];
121971e76186SKamil Rytarowski 
122071e76186SKamil Rytarowski 	qidx = qidx % MAX_SND_QUEUES_PER_QS;
122171e76186SKamil Rytarowski 
122271e76186SKamil Rytarowski 	return nicvf_vf_start_tx_queue(dev, nic, qidx);
1223fc1f6c62SJerin Jacob }
1224fc1f6c62SJerin Jacob 
1225fc1f6c62SJerin Jacob static int
1226fc1f6c62SJerin Jacob nicvf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
1227fc1f6c62SJerin Jacob {
1228627d4ba2SKamil Rytarowski 	struct nicvf *nic = nicvf_pmd_priv(dev);
1229627d4ba2SKamil Rytarowski 
1230627d4ba2SKamil Rytarowski 	if (qidx >= MAX_SND_QUEUES_PER_QS)
1231627d4ba2SKamil Rytarowski 		nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)];
1232627d4ba2SKamil Rytarowski 
1233627d4ba2SKamil Rytarowski 	qidx = qidx % MAX_SND_QUEUES_PER_QS;
1234627d4ba2SKamil Rytarowski 
1235627d4ba2SKamil Rytarowski 	return nicvf_vf_stop_tx_queue(dev, nic, qidx);
1236fc1f6c62SJerin Jacob }
1237fc1f6c62SJerin Jacob 
12385c7ccb26SJerin Jacob static inline void
12395c7ccb26SJerin Jacob nicvf_rxq_mbuf_setup(struct nicvf_rxq *rxq)
12405c7ccb26SJerin Jacob {
12415c7ccb26SJerin Jacob 	uintptr_t p;
12425c7ccb26SJerin Jacob 	struct rte_mbuf mb_def;
12435c7ccb26SJerin Jacob 
12445c7ccb26SJerin Jacob 	RTE_BUILD_BUG_ON(sizeof(union mbuf_initializer) != 8);
124595b097c8SJerin Jacob 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0);
124695b097c8SJerin Jacob 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) -
124795b097c8SJerin Jacob 				offsetof(struct rte_mbuf, data_off) != 2);
124895b097c8SJerin Jacob 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) -
124995b097c8SJerin Jacob 				offsetof(struct rte_mbuf, data_off) != 4);
125095b097c8SJerin Jacob 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) -
125195b097c8SJerin Jacob 				offsetof(struct rte_mbuf, data_off) != 6);
12525c7ccb26SJerin Jacob 	mb_def.nb_segs = 1;
12535c7ccb26SJerin Jacob 	mb_def.data_off = RTE_PKTMBUF_HEADROOM;
12545c7ccb26SJerin Jacob 	mb_def.port = rxq->port_id;
12555c7ccb26SJerin Jacob 	rte_mbuf_refcnt_set(&mb_def, 1);
12565c7ccb26SJerin Jacob 
12575c7ccb26SJerin Jacob 	/* Prevent compiler reordering: rearm_data covers previous fields */
12585c7ccb26SJerin Jacob 	rte_compiler_barrier();
12595c7ccb26SJerin Jacob 	p = (uintptr_t)&mb_def.rearm_data;
12605c7ccb26SJerin Jacob 	rxq->mbuf_initializer.value = *(uint64_t *)p;
12615c7ccb26SJerin Jacob }
1262394014bcSKamil Rytarowski 
1263fc1f6c62SJerin Jacob static int
1264aa0d976eSJerin Jacob nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
1265aa0d976eSJerin Jacob 			 uint16_t nb_desc, unsigned int socket_id,
1266aa0d976eSJerin Jacob 			 const struct rte_eth_rxconf *rx_conf,
1267aa0d976eSJerin Jacob 			 struct rte_mempool *mp)
1268aa0d976eSJerin Jacob {
1269aa0d976eSJerin Jacob 	uint16_t rx_free_thresh;
1270aa0d976eSJerin Jacob 	struct nicvf_rxq *rxq;
1271aa0d976eSJerin Jacob 	struct nicvf *nic = nicvf_pmd_priv(dev);
1272c97da2cbSMaciej Czekaj 	uint64_t conf_offloads, offload_capa, unsupported_offloads;
1273aa0d976eSJerin Jacob 
1274aa0d976eSJerin Jacob 	PMD_INIT_FUNC_TRACE();
1275aa0d976eSJerin Jacob 
127621e3fb00SKamil Rytarowski 	if (qidx >= MAX_RCV_QUEUES_PER_QS)
127721e3fb00SKamil Rytarowski 		nic = nic->snicvf[qidx / MAX_RCV_QUEUES_PER_QS - 1];
127821e3fb00SKamil Rytarowski 
127921e3fb00SKamil Rytarowski 	qidx = qidx % MAX_RCV_QUEUES_PER_QS;
128021e3fb00SKamil Rytarowski 
1281aa0d976eSJerin Jacob 	/* Socket id check */
1282aa0d976eSJerin Jacob 	if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node)
1283aa0d976eSJerin Jacob 		PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
1284aa0d976eSJerin Jacob 		socket_id, nic->node);
1285aa0d976eSJerin Jacob 
1286c97da2cbSMaciej Czekaj 
1287c97da2cbSMaciej Czekaj 	conf_offloads = rx_conf->offloads;
1288c97da2cbSMaciej Czekaj 
1289c97da2cbSMaciej Czekaj 	if (conf_offloads & DEV_RX_OFFLOAD_CHECKSUM) {
1290c97da2cbSMaciej Czekaj 		PMD_INIT_LOG(NOTICE, "Rx checksum not supported");
1291c97da2cbSMaciej Czekaj 		conf_offloads &= ~DEV_RX_OFFLOAD_CHECKSUM;
1292c97da2cbSMaciej Czekaj 	}
1293c97da2cbSMaciej Czekaj 
1294c97da2cbSMaciej Czekaj 	offload_capa = NICVF_RX_OFFLOAD_CAPA;
1295c97da2cbSMaciej Czekaj 	unsupported_offloads = conf_offloads & ~offload_capa;
1296c97da2cbSMaciej Czekaj 
1297c97da2cbSMaciej Czekaj 	if (unsupported_offloads) {
1298c97da2cbSMaciej Czekaj 		PMD_INIT_LOG(ERR, "Rx offloads 0x%" PRIx64 " are not supported. "
1299c97da2cbSMaciej Czekaj 		      "Requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
1300c97da2cbSMaciej Czekaj 		      unsupported_offloads, conf_offloads, offload_capa);
1301c97da2cbSMaciej Czekaj 		return -ENOTSUP;
1302c97da2cbSMaciej Czekaj 	}
1303c97da2cbSMaciej Czekaj 
1304394014bcSKamil Rytarowski 	/* Mempool memory must be contiguous, so must be one memory segment*/
1305aa0d976eSJerin Jacob 	if (mp->nb_mem_chunks != 1) {
1306394014bcSKamil Rytarowski 		PMD_INIT_LOG(ERR, "Non-contiguous mempool, add more huge pages");
1307394014bcSKamil Rytarowski 		return -EINVAL;
1308394014bcSKamil Rytarowski 	}
1309394014bcSKamil Rytarowski 
1310394014bcSKamil Rytarowski 	/* Mempool memory must be physically contiguous */
1311*4143b122SAndrew Rybchenko 	if (mp->flags & MEMPOOL_F_NO_IOVA_CONTIG) {
1312394014bcSKamil Rytarowski 		PMD_INIT_LOG(ERR, "Mempool memory must be physically contiguous");
1313aa0d976eSJerin Jacob 		return -EINVAL;
1314aa0d976eSJerin Jacob 	}
1315aa0d976eSJerin Jacob 
1316aa0d976eSJerin Jacob 	/* Rx deferred start is not supported */
1317aa0d976eSJerin Jacob 	if (rx_conf->rx_deferred_start) {
1318aa0d976eSJerin Jacob 		PMD_INIT_LOG(ERR, "Rx deferred start not supported");
1319aa0d976eSJerin Jacob 		return -EINVAL;
1320aa0d976eSJerin Jacob 	}
1321aa0d976eSJerin Jacob 
1322aa0d976eSJerin Jacob 	/* Roundup nb_desc to available qsize and validate max number of desc */
1323aa0d976eSJerin Jacob 	nb_desc = nicvf_qsize_cq_roundup(nb_desc);
1324aa0d976eSJerin Jacob 	if (nb_desc == 0) {
1325aa0d976eSJerin Jacob 		PMD_INIT_LOG(ERR, "Value nb_desc beyond available hw cq qsize");
1326aa0d976eSJerin Jacob 		return -EINVAL;
1327aa0d976eSJerin Jacob 	}
1328aa0d976eSJerin Jacob 
1329aa0d976eSJerin Jacob 	/* Check rx_free_thresh upper bound */
1330aa0d976eSJerin Jacob 	rx_free_thresh = (uint16_t)((rx_conf->rx_free_thresh) ?
1331aa0d976eSJerin Jacob 				rx_conf->rx_free_thresh :
1332aa0d976eSJerin Jacob 				NICVF_DEFAULT_RX_FREE_THRESH);
1333aa0d976eSJerin Jacob 	if (rx_free_thresh > NICVF_MAX_RX_FREE_THRESH ||
1334aa0d976eSJerin Jacob 		rx_free_thresh >= nb_desc * .75) {
1335aa0d976eSJerin Jacob 		PMD_INIT_LOG(ERR, "rx_free_thresh greater than expected %d",
1336aa0d976eSJerin Jacob 				rx_free_thresh);
1337aa0d976eSJerin Jacob 		return -EINVAL;
1338aa0d976eSJerin Jacob 	}
1339aa0d976eSJerin Jacob 
1340aa0d976eSJerin Jacob 	/* Free memory prior to re-allocation if needed */
134121e3fb00SKamil Rytarowski 	if (dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] != NULL) {
1342aa0d976eSJerin Jacob 		PMD_RX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
134321e3fb00SKamil Rytarowski 				nicvf_netdev_qidx(nic, qidx));
134421e3fb00SKamil Rytarowski 		nicvf_dev_rx_queue_release(
134521e3fb00SKamil Rytarowski 			dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)]);
134621e3fb00SKamil Rytarowski 		dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = NULL;
1347aa0d976eSJerin Jacob 	}
1348aa0d976eSJerin Jacob 
1349aa0d976eSJerin Jacob 	/* Allocate rxq memory */
1350aa0d976eSJerin Jacob 	rxq = rte_zmalloc_socket("ethdev rx queue", sizeof(struct nicvf_rxq),
1351aa0d976eSJerin Jacob 					RTE_CACHE_LINE_SIZE, nic->node);
1352aa0d976eSJerin Jacob 	if (rxq == NULL) {
135321e3fb00SKamil Rytarowski 		PMD_INIT_LOG(ERR, "Failed to allocate rxq=%d",
135421e3fb00SKamil Rytarowski 			     nicvf_netdev_qidx(nic, qidx));
1355aa0d976eSJerin Jacob 		return -ENOMEM;
1356aa0d976eSJerin Jacob 	}
1357aa0d976eSJerin Jacob 
1358aa0d976eSJerin Jacob 	rxq->nic = nic;
1359aa0d976eSJerin Jacob 	rxq->pool = mp;
1360aa0d976eSJerin Jacob 	rxq->queue_id = qidx;
1361aa0d976eSJerin Jacob 	rxq->port_id = dev->data->port_id;
1362aa0d976eSJerin Jacob 	rxq->rx_free_thresh = rx_free_thresh;
1363aa0d976eSJerin Jacob 	rxq->rx_drop_en = rx_conf->rx_drop_en;
1364aa0d976eSJerin Jacob 	rxq->cq_status = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_STATUS;
1365aa0d976eSJerin Jacob 	rxq->cq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_DOOR;
1366aa0d976eSJerin Jacob 	rxq->precharge_cnt = 0;
1367e2c519b3SJerin Jacob 
1368e2c519b3SJerin Jacob 	if (nicvf_hw_cap(nic) & NICVF_CAP_CQE_RX2)
1369e2c519b3SJerin Jacob 		rxq->rbptr_offset = NICVF_CQE_RX2_RBPTR_WORD;
1370e2c519b3SJerin Jacob 	else
1371aa0d976eSJerin Jacob 		rxq->rbptr_offset = NICVF_CQE_RBPTR_WORD;
1372aa0d976eSJerin Jacob 
13735c7ccb26SJerin Jacob 	nicvf_rxq_mbuf_setup(rxq);
1374e2c519b3SJerin Jacob 
1375aa0d976eSJerin Jacob 	/* Alloc completion queue */
13766d3cbd56SKamil Rytarowski 	if (nicvf_qset_cq_alloc(dev, nic, rxq, rxq->queue_id, nb_desc)) {
1377aa0d976eSJerin Jacob 		PMD_INIT_LOG(ERR, "failed to allocate cq %u", rxq->queue_id);
1378aa0d976eSJerin Jacob 		nicvf_dev_rx_queue_release(rxq);
1379aa0d976eSJerin Jacob 		return -ENOMEM;
1380aa0d976eSJerin Jacob 	}
1381aa0d976eSJerin Jacob 
1382aa0d976eSJerin Jacob 	nicvf_rx_queue_reset(rxq);
1383aa0d976eSJerin Jacob 
1384c97da2cbSMaciej Czekaj 	PMD_INIT_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d)"
1385c97da2cbSMaciej Czekaj 			" phy=0x%" PRIx64 " offloads=0x%" PRIx64,
138621e3fb00SKamil Rytarowski 			nicvf_netdev_qidx(nic, qidx), rxq, mp->name, nb_desc,
1387c97da2cbSMaciej Czekaj 			rte_mempool_avail_count(mp), rxq->phys, conf_offloads);
1388aa0d976eSJerin Jacob 
138921e3fb00SKamil Rytarowski 	dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = rxq;
139021e3fb00SKamil Rytarowski 	dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
139121e3fb00SKamil Rytarowski 		RTE_ETH_QUEUE_STATE_STOPPED;
1392aa0d976eSJerin Jacob 	return 0;
1393aa0d976eSJerin Jacob }
1394aa0d976eSJerin Jacob 
1395dcd7b1e1SJerin Jacob static void
1396dcd7b1e1SJerin Jacob nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1397dcd7b1e1SJerin Jacob {
1398dcd7b1e1SJerin Jacob 	struct nicvf *nic = nicvf_pmd_priv(dev);
1399c0802544SFerruh Yigit 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1400dcd7b1e1SJerin Jacob 
1401dcd7b1e1SJerin Jacob 	PMD_INIT_FUNC_TRACE();
1402dcd7b1e1SJerin Jacob 
1403ba2d05abSJerin Jacob 	/* Autonegotiation may be disabled */
1404ba2d05abSJerin Jacob 	dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
1405ba2d05abSJerin Jacob 	dev_info->speed_capa |= ETH_LINK_SPEED_10M | ETH_LINK_SPEED_100M |
1406ba2d05abSJerin Jacob 				 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
1407ba2d05abSJerin Jacob 	if (nicvf_hw_version(nic) != PCI_SUB_DEVICE_ID_CN81XX_NICVF)
1408ba2d05abSJerin Jacob 		dev_info->speed_capa |= ETH_LINK_SPEED_40G;
1409ba2d05abSJerin Jacob 
1410dcd7b1e1SJerin Jacob 	dev_info->min_rx_bufsize = ETHER_MIN_MTU;
1411dcd7b1e1SJerin Jacob 	dev_info->max_rx_pktlen = NIC_HW_MAX_FRS;
141221e3fb00SKamil Rytarowski 	dev_info->max_rx_queues =
141321e3fb00SKamil Rytarowski 			(uint16_t)MAX_RCV_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1);
141421e3fb00SKamil Rytarowski 	dev_info->max_tx_queues =
141521e3fb00SKamil Rytarowski 			(uint16_t)MAX_SND_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1);
1416dcd7b1e1SJerin Jacob 	dev_info->max_mac_addrs = 1;
1417eac901ceSJan Blunck 	dev_info->max_vfs = pci_dev->max_vfs;
1418dcd7b1e1SJerin Jacob 
1419c97da2cbSMaciej Czekaj 	dev_info->rx_offload_capa = NICVF_RX_OFFLOAD_CAPA;
1420c97da2cbSMaciej Czekaj 	dev_info->tx_offload_capa = NICVF_TX_OFFLOAD_CAPA;
1421c97da2cbSMaciej Czekaj 	dev_info->rx_queue_offload_capa = NICVF_RX_OFFLOAD_CAPA;
1422c97da2cbSMaciej Czekaj 	dev_info->tx_queue_offload_capa = NICVF_TX_OFFLOAD_CAPA;
1423dcd7b1e1SJerin Jacob 
1424dcd7b1e1SJerin Jacob 	dev_info->reta_size = nic->rss_info.rss_size;
1425dcd7b1e1SJerin Jacob 	dev_info->hash_key_size = RSS_HASH_KEY_BYTE_SIZE;
1426dcd7b1e1SJerin Jacob 	dev_info->flow_type_rss_offloads = NICVF_RSS_OFFLOAD_PASS1;
1427dcd7b1e1SJerin Jacob 	if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING)
1428dcd7b1e1SJerin Jacob 		dev_info->flow_type_rss_offloads |= NICVF_RSS_OFFLOAD_TUNNEL;
1429dcd7b1e1SJerin Jacob 
1430dcd7b1e1SJerin Jacob 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
1431dcd7b1e1SJerin Jacob 		.rx_free_thresh = NICVF_DEFAULT_RX_FREE_THRESH,
1432dcd7b1e1SJerin Jacob 		.rx_drop_en = 0,
1433c97da2cbSMaciej Czekaj 		.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
1434dcd7b1e1SJerin Jacob 	};
1435dcd7b1e1SJerin Jacob 
1436dcd7b1e1SJerin Jacob 	dev_info->default_txconf = (struct rte_eth_txconf) {
1437dcd7b1e1SJerin Jacob 		.tx_free_thresh = NICVF_DEFAULT_TX_FREE_THRESH,
1438dcd7b1e1SJerin Jacob 		.txq_flags =
1439dcd7b1e1SJerin Jacob 			ETH_TXQ_FLAGS_NOMULTSEGS  |
1440dcd7b1e1SJerin Jacob 			ETH_TXQ_FLAGS_NOREFCOUNT  |
1441dcd7b1e1SJerin Jacob 			ETH_TXQ_FLAGS_NOMULTMEMP  |
1442dcd7b1e1SJerin Jacob 			ETH_TXQ_FLAGS_NOVLANOFFL  |
1443dcd7b1e1SJerin Jacob 			ETH_TXQ_FLAGS_NOXSUMSCTP,
1444c97da2cbSMaciej Czekaj 		.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE |
1445c97da2cbSMaciej Czekaj 			DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM   |
1446c97da2cbSMaciej Czekaj 			DEV_TX_OFFLOAD_UDP_CKSUM          |
1447c97da2cbSMaciej Czekaj 			DEV_TX_OFFLOAD_TCP_CKSUM,
1448dcd7b1e1SJerin Jacob 	};
1449dcd7b1e1SJerin Jacob }
1450dcd7b1e1SJerin Jacob 
1451df6e0a06SSantosh Shukla static nicvf_iova_addr_t
1452394014bcSKamil Rytarowski rbdr_rte_mempool_get(void *dev, void *opaque)
14537413feeeSJerin Jacob {
14547413feeeSJerin Jacob 	uint16_t qidx;
14557413feeeSJerin Jacob 	uintptr_t mbuf;
14567413feeeSJerin Jacob 	struct nicvf_rxq *rxq;
1457394014bcSKamil Rytarowski 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)dev;
145821e3fb00SKamil Rytarowski 	struct nicvf *nic = (struct nicvf *)opaque;
145921e3fb00SKamil Rytarowski 	uint16_t rx_start, rx_end;
14607413feeeSJerin Jacob 
146121e3fb00SKamil Rytarowski 	/* Get queue ranges for this VF */
146221e3fb00SKamil Rytarowski 	nicvf_rx_range(eth_dev, nic, &rx_start, &rx_end);
146321e3fb00SKamil Rytarowski 
146421e3fb00SKamil Rytarowski 	for (qidx = rx_start; qidx <= rx_end; qidx++) {
1465394014bcSKamil Rytarowski 		rxq = eth_dev->data->rx_queues[qidx];
14667413feeeSJerin Jacob 		/* Maintain equal buffer count across all pools */
14677413feeeSJerin Jacob 		if (rxq->precharge_cnt >= rxq->qlen_mask)
14687413feeeSJerin Jacob 			continue;
14697413feeeSJerin Jacob 		rxq->precharge_cnt++;
14707413feeeSJerin Jacob 		mbuf = (uintptr_t)rte_pktmbuf_alloc(rxq->pool);
14717413feeeSJerin Jacob 		if (mbuf)
14727413feeeSJerin Jacob 			return nicvf_mbuff_virt2phy(mbuf, rxq->mbuf_phys_off);
14737413feeeSJerin Jacob 	}
14747413feeeSJerin Jacob 	return 0;
14757413feeeSJerin Jacob }
14767413feeeSJerin Jacob 
14777413feeeSJerin Jacob static int
147871e76186SKamil Rytarowski nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
14797413feeeSJerin Jacob {
14807413feeeSJerin Jacob 	int ret;
148134c2e702SJerin Jacob 	uint16_t qidx, data_off;
14827413feeeSJerin Jacob 	uint32_t total_rxq_desc, nb_rbdr_desc, exp_buffs;
14837413feeeSJerin Jacob 	uint64_t mbuf_phys_off = 0;
14847413feeeSJerin Jacob 	struct nicvf_rxq *rxq;
14857413feeeSJerin Jacob 	struct rte_mbuf *mbuf;
148671e76186SKamil Rytarowski 	uint16_t rx_start, rx_end;
148771e76186SKamil Rytarowski 	uint16_t tx_start, tx_end;
1488c97da2cbSMaciej Czekaj 	bool vlan_strip;
14897413feeeSJerin Jacob 
14907413feeeSJerin Jacob 	PMD_INIT_FUNC_TRACE();
14917413feeeSJerin Jacob 
14927413feeeSJerin Jacob 	/* Userspace process exited without proper shutdown in last run */
14937413feeeSJerin Jacob 	if (nicvf_qset_rbdr_active(nic, 0))
149471e76186SKamil Rytarowski 		nicvf_vf_stop(dev, nic, false);
149571e76186SKamil Rytarowski 
149671e76186SKamil Rytarowski 	/* Get queue ranges for this VF */
149771e76186SKamil Rytarowski 	nicvf_rx_range(dev, nic, &rx_start, &rx_end);
14987413feeeSJerin Jacob 
14997413feeeSJerin Jacob 	/*
15007413feeeSJerin Jacob 	 * Thunderx nicvf PMD can support more than one pool per port only when
15017413feeeSJerin Jacob 	 * 1) Data payload size is same across all the pools in given port
15027413feeeSJerin Jacob 	 * AND
15037413feeeSJerin Jacob 	 * 2) All mbuffs in the pools are from the same hugepage
15047413feeeSJerin Jacob 	 * AND
15057413feeeSJerin Jacob 	 * 3) Mbuff metadata size is same across all the pools in given port
15067413feeeSJerin Jacob 	 *
15077413feeeSJerin Jacob 	 * This is to support existing application that uses multiple pool/port.
15087413feeeSJerin Jacob 	 * But, the purpose of using multipool for QoS will not be addressed.
15097413feeeSJerin Jacob 	 *
15107413feeeSJerin Jacob 	 */
15117413feeeSJerin Jacob 
15127413feeeSJerin Jacob 	/* Validate mempool attributes */
151371e76186SKamil Rytarowski 	for (qidx = rx_start; qidx <= rx_end; qidx++) {
15147413feeeSJerin Jacob 		rxq = dev->data->rx_queues[qidx];
15157413feeeSJerin Jacob 		rxq->mbuf_phys_off = nicvf_mempool_phy_offset(rxq->pool);
15167413feeeSJerin Jacob 		mbuf = rte_pktmbuf_alloc(rxq->pool);
15177413feeeSJerin Jacob 		if (mbuf == NULL) {
151871e76186SKamil Rytarowski 			PMD_INIT_LOG(ERR, "Failed allocate mbuf VF%d qid=%d "
151971e76186SKamil Rytarowski 				     "pool=%s",
152071e76186SKamil Rytarowski 				     nic->vf_id, qidx, rxq->pool->name);
15217413feeeSJerin Jacob 			return -ENOMEM;
15227413feeeSJerin Jacob 		}
152334c2e702SJerin Jacob 		data_off = nicvf_mbuff_meta_length(mbuf);
152434c2e702SJerin Jacob 		data_off += RTE_PKTMBUF_HEADROOM;
15257413feeeSJerin Jacob 		rte_pktmbuf_free(mbuf);
15267413feeeSJerin Jacob 
152734c2e702SJerin Jacob 		if (data_off % RTE_CACHE_LINE_SIZE) {
152834c2e702SJerin Jacob 			PMD_INIT_LOG(ERR, "%s: unaligned data_off=%d delta=%d",
152934c2e702SJerin Jacob 				rxq->pool->name, data_off,
153034c2e702SJerin Jacob 				data_off % RTE_CACHE_LINE_SIZE);
153134c2e702SJerin Jacob 			return -EINVAL;
153234c2e702SJerin Jacob 		}
153334c2e702SJerin Jacob 		rxq->mbuf_phys_off -= data_off;
153434c2e702SJerin Jacob 
15357413feeeSJerin Jacob 		if (mbuf_phys_off == 0)
15367413feeeSJerin Jacob 			mbuf_phys_off = rxq->mbuf_phys_off;
15377413feeeSJerin Jacob 		if (mbuf_phys_off != rxq->mbuf_phys_off) {
153871e76186SKamil Rytarowski 			PMD_INIT_LOG(ERR, "pool params not same,%s VF%d %"
153971e76186SKamil Rytarowski 				     PRIx64, rxq->pool->name, nic->vf_id,
154071e76186SKamil Rytarowski 				     mbuf_phys_off);
15417413feeeSJerin Jacob 			return -EINVAL;
15427413feeeSJerin Jacob 		}
15437413feeeSJerin Jacob 	}
15447413feeeSJerin Jacob 
15457413feeeSJerin Jacob 	/* Check the level of buffers in the pool */
15467413feeeSJerin Jacob 	total_rxq_desc = 0;
154771e76186SKamil Rytarowski 	for (qidx = rx_start; qidx <= rx_end; qidx++) {
15487413feeeSJerin Jacob 		rxq = dev->data->rx_queues[qidx];
15497413feeeSJerin Jacob 		/* Count total numbers of rxq descs */
15507413feeeSJerin Jacob 		total_rxq_desc += rxq->qlen_mask + 1;
15517413feeeSJerin Jacob 		exp_buffs = RTE_MEMPOOL_CACHE_MAX_SIZE + rxq->rx_free_thresh;
15526d3cbd56SKamil Rytarowski 		exp_buffs *= dev->data->nb_rx_queues;
1553a0fd91ceSBruce Richardson 		if (rte_mempool_avail_count(rxq->pool) < exp_buffs) {
15547413feeeSJerin Jacob 			PMD_INIT_LOG(ERR, "Buff shortage in pool=%s (%d/%d)",
15557413feeeSJerin Jacob 				     rxq->pool->name,
1556a0fd91ceSBruce Richardson 				     rte_mempool_avail_count(rxq->pool),
15577413feeeSJerin Jacob 				     exp_buffs);
15587413feeeSJerin Jacob 			return -ENOENT;
15597413feeeSJerin Jacob 		}
15607413feeeSJerin Jacob 	}
15617413feeeSJerin Jacob 
15627413feeeSJerin Jacob 	/* Check RBDR desc overflow */
15637413feeeSJerin Jacob 	ret = nicvf_qsize_rbdr_roundup(total_rxq_desc);
15647413feeeSJerin Jacob 	if (ret == 0) {
156571e76186SKamil Rytarowski 		PMD_INIT_LOG(ERR, "Reached RBDR desc limit, reduce nr desc "
156671e76186SKamil Rytarowski 			     "VF%d", nic->vf_id);
15677413feeeSJerin Jacob 		return -ENOMEM;
15687413feeeSJerin Jacob 	}
15697413feeeSJerin Jacob 
15707413feeeSJerin Jacob 	/* Enable qset */
15717413feeeSJerin Jacob 	ret = nicvf_qset_config(nic);
15727413feeeSJerin Jacob 	if (ret) {
157371e76186SKamil Rytarowski 		PMD_INIT_LOG(ERR, "Failed to enable qset %d VF%d", ret,
157471e76186SKamil Rytarowski 			     nic->vf_id);
15757413feeeSJerin Jacob 		return ret;
15767413feeeSJerin Jacob 	}
15777413feeeSJerin Jacob 
15787413feeeSJerin Jacob 	/* Allocate RBDR and RBDR ring desc */
15797413feeeSJerin Jacob 	nb_rbdr_desc = nicvf_qsize_rbdr_roundup(total_rxq_desc);
15806d3cbd56SKamil Rytarowski 	ret = nicvf_qset_rbdr_alloc(dev, nic, nb_rbdr_desc, rbdrsz);
15817413feeeSJerin Jacob 	if (ret) {
158271e76186SKamil Rytarowski 		PMD_INIT_LOG(ERR, "Failed to allocate memory for rbdr alloc "
158371e76186SKamil Rytarowski 			     "VF%d", nic->vf_id);
15847413feeeSJerin Jacob 		goto qset_reclaim;
15857413feeeSJerin Jacob 	}
15867413feeeSJerin Jacob 
15877413feeeSJerin Jacob 	/* Enable and configure RBDR registers */
15887413feeeSJerin Jacob 	ret = nicvf_qset_rbdr_config(nic, 0);
15897413feeeSJerin Jacob 	if (ret) {
159071e76186SKamil Rytarowski 		PMD_INIT_LOG(ERR, "Failed to configure rbdr %d VF%d", ret,
159171e76186SKamil Rytarowski 			     nic->vf_id);
15927413feeeSJerin Jacob 		goto qset_rbdr_free;
15937413feeeSJerin Jacob 	}
15947413feeeSJerin Jacob 
15957413feeeSJerin Jacob 	/* Fill rte_mempool buffers in RBDR pool and precharge it */
1596394014bcSKamil Rytarowski 	ret = nicvf_qset_rbdr_precharge(dev, nic, 0, rbdr_rte_mempool_get,
1597394014bcSKamil Rytarowski 					total_rxq_desc);
15987413feeeSJerin Jacob 	if (ret) {
159971e76186SKamil Rytarowski 		PMD_INIT_LOG(ERR, "Failed to fill rbdr %d VF%d", ret,
160071e76186SKamil Rytarowski 			     nic->vf_id);
16017413feeeSJerin Jacob 		goto qset_rbdr_reclaim;
16027413feeeSJerin Jacob 	}
16037413feeeSJerin Jacob 
160471e76186SKamil Rytarowski 	PMD_DRV_LOG(INFO, "Filled %d out of %d entries in RBDR VF%d",
160571e76186SKamil Rytarowski 		     nic->rbdr->tail, nb_rbdr_desc, nic->vf_id);
16067413feeeSJerin Jacob 
16077413feeeSJerin Jacob 	/* Configure VLAN Strip */
1608c97da2cbSMaciej Czekaj 	vlan_strip = !!(dev->data->dev_conf.rxmode.offloads &
1609c97da2cbSMaciej Czekaj 			DEV_RX_OFFLOAD_VLAN_STRIP);
1610c97da2cbSMaciej Czekaj 	nicvf_vlan_hw_strip(nic, vlan_strip);
16117413feeeSJerin Jacob 
16128a946db3SJerin Jacob 	/* Based on the packet type(IPv4 or IPv6), the nicvf HW aligns L3 data
16138a946db3SJerin Jacob 	 * to the 64bit memory address.
16148a946db3SJerin Jacob 	 * The alignment creates a hole in mbuf(between the end of headroom and
16158a946db3SJerin Jacob 	 * packet data start). The new revision of the HW provides an option to
16168a946db3SJerin Jacob 	 * disable the L3 alignment feature and make mbuf layout looks
16178a946db3SJerin Jacob 	 * more like other NICs. For better application compatibility, disabling
16188a946db3SJerin Jacob 	 * l3 alignment feature on the hardware revisions it supports
16198a946db3SJerin Jacob 	 */
16208a946db3SJerin Jacob 	nicvf_apad_config(nic, false);
16218a946db3SJerin Jacob 
162271e76186SKamil Rytarowski 	/* Get queue ranges for this VF */
162371e76186SKamil Rytarowski 	nicvf_tx_range(dev, nic, &tx_start, &tx_end);
162471e76186SKamil Rytarowski 
16257413feeeSJerin Jacob 	/* Configure TX queues */
162671e76186SKamil Rytarowski 	for (qidx = tx_start; qidx <= tx_end; qidx++) {
162771e76186SKamil Rytarowski 		ret = nicvf_vf_start_tx_queue(dev, nic,
162871e76186SKamil Rytarowski 			qidx % MAX_SND_QUEUES_PER_QS);
16297413feeeSJerin Jacob 		if (ret)
16307413feeeSJerin Jacob 			goto start_txq_error;
16317413feeeSJerin Jacob 	}
16327413feeeSJerin Jacob 
163371e76186SKamil Rytarowski 	/* Configure RX queues */
163471e76186SKamil Rytarowski 	for (qidx = rx_start; qidx <= rx_end; qidx++) {
163571e76186SKamil Rytarowski 		ret = nicvf_vf_start_rx_queue(dev, nic,
163671e76186SKamil Rytarowski 			qidx % MAX_RCV_QUEUES_PER_QS);
163771e76186SKamil Rytarowski 		if (ret)
163871e76186SKamil Rytarowski 			goto start_rxq_error;
163971e76186SKamil Rytarowski 	}
164071e76186SKamil Rytarowski 
164171e76186SKamil Rytarowski 	if (!nic->sqs_mode) {
16427413feeeSJerin Jacob 		/* Configure CPI algorithm */
16437413feeeSJerin Jacob 		ret = nicvf_configure_cpi(dev);
16447413feeeSJerin Jacob 		if (ret)
16457413feeeSJerin Jacob 			goto start_txq_error;
16467413feeeSJerin Jacob 
164771e76186SKamil Rytarowski 		ret = nicvf_mbox_get_rss_size(nic);
164871e76186SKamil Rytarowski 		if (ret) {
164971e76186SKamil Rytarowski 			PMD_INIT_LOG(ERR, "Failed to get rss table size");
165071e76186SKamil Rytarowski 			goto qset_rss_error;
165171e76186SKamil Rytarowski 		}
165271e76186SKamil Rytarowski 
16537413feeeSJerin Jacob 		/* Configure RSS */
16547413feeeSJerin Jacob 		ret = nicvf_configure_rss(dev);
16557413feeeSJerin Jacob 		if (ret)
16567413feeeSJerin Jacob 			goto qset_rss_error;
165771e76186SKamil Rytarowski 	}
165871e76186SKamil Rytarowski 
165971e76186SKamil Rytarowski 	/* Done; Let PF make the BGX's RX and TX switches to ON position */
166071e76186SKamil Rytarowski 	nicvf_mbox_cfg_done(nic);
166171e76186SKamil Rytarowski 	return 0;
166271e76186SKamil Rytarowski 
166371e76186SKamil Rytarowski qset_rss_error:
166471e76186SKamil Rytarowski 	nicvf_rss_term(nic);
166571e76186SKamil Rytarowski start_rxq_error:
166671e76186SKamil Rytarowski 	for (qidx = rx_start; qidx <= rx_end; qidx++)
166771e76186SKamil Rytarowski 		nicvf_vf_stop_rx_queue(dev, nic, qidx % MAX_RCV_QUEUES_PER_QS);
166871e76186SKamil Rytarowski start_txq_error:
166971e76186SKamil Rytarowski 	for (qidx = tx_start; qidx <= tx_end; qidx++)
167071e76186SKamil Rytarowski 		nicvf_vf_stop_tx_queue(dev, nic, qidx % MAX_SND_QUEUES_PER_QS);
167171e76186SKamil Rytarowski qset_rbdr_reclaim:
167271e76186SKamil Rytarowski 	nicvf_qset_rbdr_reclaim(nic, 0);
167371e76186SKamil Rytarowski 	nicvf_rbdr_release_mbufs(dev, nic);
167471e76186SKamil Rytarowski qset_rbdr_free:
167571e76186SKamil Rytarowski 	if (nic->rbdr) {
167671e76186SKamil Rytarowski 		rte_free(nic->rbdr);
167771e76186SKamil Rytarowski 		nic->rbdr = NULL;
167871e76186SKamil Rytarowski 	}
167971e76186SKamil Rytarowski qset_reclaim:
168071e76186SKamil Rytarowski 	nicvf_qset_reclaim(nic);
168171e76186SKamil Rytarowski 	return ret;
168271e76186SKamil Rytarowski }
168371e76186SKamil Rytarowski 
168471e76186SKamil Rytarowski static int
168571e76186SKamil Rytarowski nicvf_dev_start(struct rte_eth_dev *dev)
168671e76186SKamil Rytarowski {
168771e76186SKamil Rytarowski 	uint16_t qidx;
168871e76186SKamil Rytarowski 	int ret;
168971e76186SKamil Rytarowski 	size_t i;
169071e76186SKamil Rytarowski 	struct nicvf *nic = nicvf_pmd_priv(dev);
169171e76186SKamil Rytarowski 	struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
169271e76186SKamil Rytarowski 	uint16_t mtu;
169371e76186SKamil Rytarowski 	uint32_t buffsz = 0, rbdrsz = 0;
169471e76186SKamil Rytarowski 	struct rte_pktmbuf_pool_private *mbp_priv;
169571e76186SKamil Rytarowski 	struct nicvf_rxq *rxq;
169671e76186SKamil Rytarowski 
169771e76186SKamil Rytarowski 	PMD_INIT_FUNC_TRACE();
169871e76186SKamil Rytarowski 
169971e76186SKamil Rytarowski 	/* This function must be called for a primary device */
170071e76186SKamil Rytarowski 	assert_primary(nic);
170171e76186SKamil Rytarowski 
170271e76186SKamil Rytarowski 	/* Validate RBDR buff size */
170371e76186SKamil Rytarowski 	for (qidx = 0; qidx < dev->data->nb_rx_queues; qidx++) {
170471e76186SKamil Rytarowski 		rxq = dev->data->rx_queues[qidx];
170571e76186SKamil Rytarowski 		mbp_priv = rte_mempool_get_priv(rxq->pool);
170671e76186SKamil Rytarowski 		buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
170771e76186SKamil Rytarowski 		if (buffsz % 128) {
170871e76186SKamil Rytarowski 			PMD_INIT_LOG(ERR, "rxbuf size must be multiply of 128");
170971e76186SKamil Rytarowski 			return -EINVAL;
171071e76186SKamil Rytarowski 		}
171171e76186SKamil Rytarowski 		if (rbdrsz == 0)
171271e76186SKamil Rytarowski 			rbdrsz = buffsz;
171371e76186SKamil Rytarowski 		if (rbdrsz != buffsz) {
171471e76186SKamil Rytarowski 			PMD_INIT_LOG(ERR, "buffsz not same, qidx=%d (%d/%d)",
171571e76186SKamil Rytarowski 				     qidx, rbdrsz, buffsz);
171671e76186SKamil Rytarowski 			return -EINVAL;
171771e76186SKamil Rytarowski 		}
171871e76186SKamil Rytarowski 	}
17197413feeeSJerin Jacob 
17207413feeeSJerin Jacob 	/* Configure loopback */
17217413feeeSJerin Jacob 	ret = nicvf_loopback_config(nic, dev->data->dev_conf.lpbk_mode);
17227413feeeSJerin Jacob 	if (ret) {
17237413feeeSJerin Jacob 		PMD_INIT_LOG(ERR, "Failed to configure loopback %d", ret);
172471e76186SKamil Rytarowski 		return ret;
17257413feeeSJerin Jacob 	}
17267413feeeSJerin Jacob 
17277413feeeSJerin Jacob 	/* Reset all statistics counters attached to this port */
17287413feeeSJerin Jacob 	ret = nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, 0xFFFF, 0xFFFF);
17297413feeeSJerin Jacob 	if (ret) {
17307413feeeSJerin Jacob 		PMD_INIT_LOG(ERR, "Failed to reset stat counters %d", ret);
173171e76186SKamil Rytarowski 		return ret;
17327413feeeSJerin Jacob 	}
17337413feeeSJerin Jacob 
17347413feeeSJerin Jacob 	/* Setup scatter mode if needed by jumbo */
17357413feeeSJerin Jacob 	if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
17367413feeeSJerin Jacob 					    2 * VLAN_TAG_SIZE > buffsz)
17377413feeeSJerin Jacob 		dev->data->scattered_rx = 1;
1738c97da2cbSMaciej Czekaj 	if ((rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) != 0)
17397413feeeSJerin Jacob 		dev->data->scattered_rx = 1;
17407413feeeSJerin Jacob 
17417413feeeSJerin Jacob 	/* Setup MTU based on max_rx_pkt_len or default */
1742c97da2cbSMaciej Czekaj 	mtu = dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME ?
17437413feeeSJerin Jacob 		dev->data->dev_conf.rxmode.max_rx_pkt_len
17447413feeeSJerin Jacob 			-  ETHER_HDR_LEN - ETHER_CRC_LEN
17457413feeeSJerin Jacob 		: ETHER_MTU;
17467413feeeSJerin Jacob 
17477413feeeSJerin Jacob 	if (nicvf_dev_set_mtu(dev, mtu)) {
17487413feeeSJerin Jacob 		PMD_INIT_LOG(ERR, "Failed to set default mtu size");
17497413feeeSJerin Jacob 		return -EBUSY;
17507413feeeSJerin Jacob 	}
17517413feeeSJerin Jacob 
175271e76186SKamil Rytarowski 	ret = nicvf_vf_start(dev, nic, rbdrsz);
175371e76186SKamil Rytarowski 	if (ret != 0)
175471e76186SKamil Rytarowski 		return ret;
175571e76186SKamil Rytarowski 
175671e76186SKamil Rytarowski 	for (i = 0; i < nic->sqs_count; i++) {
175771e76186SKamil Rytarowski 		assert(nic->snicvf[i]);
175871e76186SKamil Rytarowski 
175971e76186SKamil Rytarowski 		ret = nicvf_vf_start(dev, nic->snicvf[i], rbdrsz);
176071e76186SKamil Rytarowski 		if (ret != 0)
176171e76186SKamil Rytarowski 			return ret;
176271e76186SKamil Rytarowski 	}
176371e76186SKamil Rytarowski 
17647413feeeSJerin Jacob 	/* Configure callbacks based on scatter mode */
17657413feeeSJerin Jacob 	nicvf_set_tx_function(dev);
17667413feeeSJerin Jacob 	nicvf_set_rx_function(dev);
17677413feeeSJerin Jacob 
17687413feeeSJerin Jacob 	return 0;
17697413feeeSJerin Jacob }
17707413feeeSJerin Jacob 
17717413feeeSJerin Jacob static void
1772627d4ba2SKamil Rytarowski nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup)
17737413feeeSJerin Jacob {
1774627d4ba2SKamil Rytarowski 	size_t i;
17757413feeeSJerin Jacob 	int ret;
17767413feeeSJerin Jacob 	struct nicvf *nic = nicvf_pmd_priv(dev);
17777413feeeSJerin Jacob 
17787413feeeSJerin Jacob 	PMD_INIT_FUNC_TRACE();
17797413feeeSJerin Jacob 
1780627d4ba2SKamil Rytarowski 	/* Teardown secondary vf first */
1781627d4ba2SKamil Rytarowski 	for (i = 0; i < nic->sqs_count; i++) {
1782627d4ba2SKamil Rytarowski 		if (!nic->snicvf[i])
1783627d4ba2SKamil Rytarowski 			continue;
1784627d4ba2SKamil Rytarowski 
1785627d4ba2SKamil Rytarowski 		nicvf_vf_stop(dev, nic->snicvf[i], cleanup);
1786627d4ba2SKamil Rytarowski 	}
1787627d4ba2SKamil Rytarowski 
1788627d4ba2SKamil Rytarowski 	/* Stop the primary VF now */
1789627d4ba2SKamil Rytarowski 	nicvf_vf_stop(dev, nic, cleanup);
17907413feeeSJerin Jacob 
17917413feeeSJerin Jacob 	/* Disable loopback */
17927413feeeSJerin Jacob 	ret = nicvf_loopback_config(nic, 0);
17937413feeeSJerin Jacob 	if (ret)
17947413feeeSJerin Jacob 		PMD_INIT_LOG(ERR, "Failed to disable loopback %d", ret);
17957413feeeSJerin Jacob 
1796627d4ba2SKamil Rytarowski 	/* Reclaim CPI configuration */
1797627d4ba2SKamil Rytarowski 	ret = nicvf_mbox_config_cpi(nic, 0);
1798627d4ba2SKamil Rytarowski 	if (ret)
1799627d4ba2SKamil Rytarowski 		PMD_INIT_LOG(ERR, "Failed to reclaim CPI config %d", ret);
1800627d4ba2SKamil Rytarowski }
1801627d4ba2SKamil Rytarowski 
1802627d4ba2SKamil Rytarowski static void
1803627d4ba2SKamil Rytarowski nicvf_dev_stop(struct rte_eth_dev *dev)
1804627d4ba2SKamil Rytarowski {
1805627d4ba2SKamil Rytarowski 	PMD_INIT_FUNC_TRACE();
1806627d4ba2SKamil Rytarowski 
1807627d4ba2SKamil Rytarowski 	nicvf_dev_stop_cleanup(dev, false);
1808627d4ba2SKamil Rytarowski }
1809627d4ba2SKamil Rytarowski 
1810627d4ba2SKamil Rytarowski static void
1811627d4ba2SKamil Rytarowski nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic, bool cleanup)
1812627d4ba2SKamil Rytarowski {
1813627d4ba2SKamil Rytarowski 	int ret;
1814627d4ba2SKamil Rytarowski 	uint16_t qidx;
1815627d4ba2SKamil Rytarowski 	uint16_t tx_start, tx_end;
1816627d4ba2SKamil Rytarowski 	uint16_t rx_start, rx_end;
1817627d4ba2SKamil Rytarowski 
1818627d4ba2SKamil Rytarowski 	PMD_INIT_FUNC_TRACE();
1819627d4ba2SKamil Rytarowski 
1820627d4ba2SKamil Rytarowski 	if (cleanup) {
1821627d4ba2SKamil Rytarowski 		/* Let PF make the BGX's RX and TX switches to OFF position */
1822627d4ba2SKamil Rytarowski 		nicvf_mbox_shutdown(nic);
1823627d4ba2SKamil Rytarowski 	}
1824627d4ba2SKamil Rytarowski 
18257413feeeSJerin Jacob 	/* Disable VLAN Strip */
18267413feeeSJerin Jacob 	nicvf_vlan_hw_strip(nic, 0);
18277413feeeSJerin Jacob 
1828627d4ba2SKamil Rytarowski 	/* Get queue ranges for this VF */
1829627d4ba2SKamil Rytarowski 	nicvf_tx_range(dev, nic, &tx_start, &tx_end);
1830627d4ba2SKamil Rytarowski 
1831627d4ba2SKamil Rytarowski 	for (qidx = tx_start; qidx <= tx_end; qidx++)
1832627d4ba2SKamil Rytarowski 		nicvf_vf_stop_tx_queue(dev, nic, qidx % MAX_SND_QUEUES_PER_QS);
1833627d4ba2SKamil Rytarowski 
1834627d4ba2SKamil Rytarowski 	/* Get queue ranges for this VF */
1835627d4ba2SKamil Rytarowski 	nicvf_rx_range(dev, nic, &rx_start, &rx_end);
18367413feeeSJerin Jacob 
18377413feeeSJerin Jacob 	/* Reclaim rq */
1838627d4ba2SKamil Rytarowski 	for (qidx = rx_start; qidx <= rx_end; qidx++)
1839627d4ba2SKamil Rytarowski 		nicvf_vf_stop_rx_queue(dev, nic, qidx % MAX_RCV_QUEUES_PER_QS);
18407413feeeSJerin Jacob 
18417413feeeSJerin Jacob 	/* Reclaim RBDR */
18427413feeeSJerin Jacob 	ret = nicvf_qset_rbdr_reclaim(nic, 0);
18437413feeeSJerin Jacob 	if (ret)
18447413feeeSJerin Jacob 		PMD_INIT_LOG(ERR, "Failed to reclaim RBDR %d", ret);
18457413feeeSJerin Jacob 
18467413feeeSJerin Jacob 	/* Move all charged buffers in RBDR back to pool */
18477413feeeSJerin Jacob 	if (nic->rbdr != NULL)
18486d3cbd56SKamil Rytarowski 		nicvf_rbdr_release_mbufs(dev, nic);
18497413feeeSJerin Jacob 
18507413feeeSJerin Jacob 	/* Disable qset */
1851627d4ba2SKamil Rytarowski 	ret = nicvf_qset_reclaim(nic);
18527413feeeSJerin Jacob 	if (ret)
18537413feeeSJerin Jacob 		PMD_INIT_LOG(ERR, "Failed to disable qset %d", ret);
18547413feeeSJerin Jacob 
18557413feeeSJerin Jacob 	/* Disable all interrupts */
18567413feeeSJerin Jacob 	nicvf_disable_all_interrupts(nic);
18577413feeeSJerin Jacob 
18587413feeeSJerin Jacob 	/* Free RBDR SW structure */
18597413feeeSJerin Jacob 	if (nic->rbdr) {
18607413feeeSJerin Jacob 		rte_free(nic->rbdr);
18617413feeeSJerin Jacob 		nic->rbdr = NULL;
18627413feeeSJerin Jacob 	}
18637413feeeSJerin Jacob }
18647413feeeSJerin Jacob 
18657413feeeSJerin Jacob static void
18667413feeeSJerin Jacob nicvf_dev_close(struct rte_eth_dev *dev)
18677413feeeSJerin Jacob {
1868627d4ba2SKamil Rytarowski 	size_t i;
1869627d4ba2SKamil Rytarowski 	struct nicvf *nic = nicvf_pmd_priv(dev);
1870627d4ba2SKamil Rytarowski 
18717413feeeSJerin Jacob 	PMD_INIT_FUNC_TRACE();
18727413feeeSJerin Jacob 
1873627d4ba2SKamil Rytarowski 	nicvf_dev_stop_cleanup(dev, true);
1874f141adcaSKamil Rytarowski 	nicvf_periodic_alarm_stop(nicvf_interrupt, dev);
1875627d4ba2SKamil Rytarowski 
1876627d4ba2SKamil Rytarowski 	for (i = 0; i < nic->sqs_count; i++) {
1877627d4ba2SKamil Rytarowski 		if (!nic->snicvf[i])
1878627d4ba2SKamil Rytarowski 			continue;
1879627d4ba2SKamil Rytarowski 
1880627d4ba2SKamil Rytarowski 		nicvf_periodic_alarm_stop(nicvf_vf_interrupt, nic->snicvf[i]);
1881627d4ba2SKamil Rytarowski 	}
18827413feeeSJerin Jacob }
18837413feeeSJerin Jacob 
1884bc79615aSJerin Jacob static int
1885b7004ab2SKamil Rytarowski nicvf_request_sqs(struct nicvf *nic)
1886b7004ab2SKamil Rytarowski {
1887b7004ab2SKamil Rytarowski 	size_t i;
1888b7004ab2SKamil Rytarowski 
1889b7004ab2SKamil Rytarowski 	assert_primary(nic);
1890b7004ab2SKamil Rytarowski 	assert(nic->sqs_count > 0);
1891b7004ab2SKamil Rytarowski 	assert(nic->sqs_count <= MAX_SQS_PER_VF);
1892b7004ab2SKamil Rytarowski 
1893b7004ab2SKamil Rytarowski 	/* Set no of Rx/Tx queues in each of the SQsets */
1894b7004ab2SKamil Rytarowski 	for (i = 0; i < nic->sqs_count; i++) {
1895b7004ab2SKamil Rytarowski 		if (nicvf_svf_empty())
1896b7004ab2SKamil Rytarowski 			rte_panic("Cannot assign sufficient number of "
1897b7004ab2SKamil Rytarowski 				  "secondary queues to primary VF%" PRIu8 "\n",
1898b7004ab2SKamil Rytarowski 				  nic->vf_id);
1899b7004ab2SKamil Rytarowski 
1900b7004ab2SKamil Rytarowski 		nic->snicvf[i] = nicvf_svf_pop();
1901b7004ab2SKamil Rytarowski 		nic->snicvf[i]->sqs_id = i;
1902b7004ab2SKamil Rytarowski 	}
1903b7004ab2SKamil Rytarowski 
1904b7004ab2SKamil Rytarowski 	return nicvf_mbox_request_sqs(nic);
1905b7004ab2SKamil Rytarowski }
1906b7004ab2SKamil Rytarowski 
1907b7004ab2SKamil Rytarowski static int
1908bc79615aSJerin Jacob nicvf_dev_configure(struct rte_eth_dev *dev)
1909bc79615aSJerin Jacob {
1910b7004ab2SKamil Rytarowski 	struct rte_eth_dev_data *data = dev->data;
1911b7004ab2SKamil Rytarowski 	struct rte_eth_conf *conf = &data->dev_conf;
1912bc79615aSJerin Jacob 	struct rte_eth_rxmode *rxmode = &conf->rxmode;
1913bc79615aSJerin Jacob 	struct rte_eth_txmode *txmode = &conf->txmode;
1914bc79615aSJerin Jacob 	struct nicvf *nic = nicvf_pmd_priv(dev);
1915b7004ab2SKamil Rytarowski 	uint8_t cqcount;
1916c97da2cbSMaciej Czekaj 	uint64_t conf_rx_offloads, rx_offload_capa;
1917c97da2cbSMaciej Czekaj 	uint64_t conf_tx_offloads, tx_offload_capa;
1918bc79615aSJerin Jacob 
1919bc79615aSJerin Jacob 	PMD_INIT_FUNC_TRACE();
1920bc79615aSJerin Jacob 
1921bc79615aSJerin Jacob 	if (!rte_eal_has_hugepages()) {
1922bc79615aSJerin Jacob 		PMD_INIT_LOG(INFO, "Huge page is not configured");
1923bc79615aSJerin Jacob 		return -EINVAL;
1924bc79615aSJerin Jacob 	}
1925bc79615aSJerin Jacob 
1926c97da2cbSMaciej Czekaj 	conf_tx_offloads = dev->data->dev_conf.txmode.offloads;
1927c97da2cbSMaciej Czekaj 	tx_offload_capa = NICVF_TX_OFFLOAD_CAPA;
1928c97da2cbSMaciej Czekaj 
1929c97da2cbSMaciej Czekaj 	if ((conf_tx_offloads & tx_offload_capa) != conf_tx_offloads) {
1930c97da2cbSMaciej Czekaj 		PMD_INIT_LOG(ERR, "Some Tx offloads are not supported "
1931c97da2cbSMaciej Czekaj 		      "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
1932c97da2cbSMaciej Czekaj 		      conf_tx_offloads, tx_offload_capa);
1933c97da2cbSMaciej Czekaj 		return -ENOTSUP;
1934c97da2cbSMaciej Czekaj 	}
1935c97da2cbSMaciej Czekaj 
1936c97da2cbSMaciej Czekaj 	if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) {
1937c97da2cbSMaciej Czekaj 		PMD_INIT_LOG(NOTICE, "Rx checksum not supported");
1938c97da2cbSMaciej Czekaj 		rxmode->offloads &= ~DEV_RX_OFFLOAD_CHECKSUM;
1939c97da2cbSMaciej Czekaj 	}
1940c97da2cbSMaciej Czekaj 
1941c97da2cbSMaciej Czekaj 	conf_rx_offloads = rxmode->offloads;
1942c97da2cbSMaciej Czekaj 	rx_offload_capa = NICVF_RX_OFFLOAD_CAPA;
1943c97da2cbSMaciej Czekaj 
1944c97da2cbSMaciej Czekaj 	if ((conf_rx_offloads & rx_offload_capa) != conf_rx_offloads) {
1945c97da2cbSMaciej Czekaj 		PMD_INIT_LOG(ERR, "Some Rx offloads are not supported "
1946c97da2cbSMaciej Czekaj 		      "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
1947c97da2cbSMaciej Czekaj 		      conf_rx_offloads, rx_offload_capa);
1948c97da2cbSMaciej Czekaj 		return -ENOTSUP;
1949c97da2cbSMaciej Czekaj 	}
1950c97da2cbSMaciej Czekaj 
1951c97da2cbSMaciej Czekaj 	if ((conf_rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP) == 0) {
1952c97da2cbSMaciej Czekaj 		PMD_INIT_LOG(NOTICE, "Can't disable hw crc strip");
1953c97da2cbSMaciej Czekaj 		rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
1954c97da2cbSMaciej Czekaj 	}
1955c97da2cbSMaciej Czekaj 
1956bc79615aSJerin Jacob 	if (txmode->mq_mode) {
1957bc79615aSJerin Jacob 		PMD_INIT_LOG(INFO, "Tx mq_mode DCB or VMDq not supported");
1958bc79615aSJerin Jacob 		return -EINVAL;
1959bc79615aSJerin Jacob 	}
1960bc79615aSJerin Jacob 
1961bc79615aSJerin Jacob 	if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
1962bc79615aSJerin Jacob 		rxmode->mq_mode != ETH_MQ_RX_RSS) {
1963bc79615aSJerin Jacob 		PMD_INIT_LOG(INFO, "Unsupported rx qmode %d", rxmode->mq_mode);
1964bc79615aSJerin Jacob 		return -EINVAL;
1965bc79615aSJerin Jacob 	}
1966bc79615aSJerin Jacob 
1967bc79615aSJerin Jacob 	if (rxmode->split_hdr_size) {
1968bc79615aSJerin Jacob 		PMD_INIT_LOG(INFO, "Rxmode does not support split header");
1969bc79615aSJerin Jacob 		return -EINVAL;
1970bc79615aSJerin Jacob 	}
1971bc79615aSJerin Jacob 
1972bc79615aSJerin Jacob 	if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
1973bc79615aSJerin Jacob 		PMD_INIT_LOG(INFO, "Setting link speed/duplex not supported");
1974bc79615aSJerin Jacob 		return -EINVAL;
1975bc79615aSJerin Jacob 	}
1976bc79615aSJerin Jacob 
1977bc79615aSJerin Jacob 	if (conf->dcb_capability_en) {
1978bc79615aSJerin Jacob 		PMD_INIT_LOG(INFO, "DCB enable not supported");
1979bc79615aSJerin Jacob 		return -EINVAL;
1980bc79615aSJerin Jacob 	}
1981bc79615aSJerin Jacob 
1982bc79615aSJerin Jacob 	if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
1983bc79615aSJerin Jacob 		PMD_INIT_LOG(INFO, "Flow director not supported");
1984bc79615aSJerin Jacob 		return -EINVAL;
1985bc79615aSJerin Jacob 	}
1986bc79615aSJerin Jacob 
1987b7004ab2SKamil Rytarowski 	assert_primary(nic);
1988b7004ab2SKamil Rytarowski 	NICVF_STATIC_ASSERT(MAX_RCV_QUEUES_PER_QS == MAX_SND_QUEUES_PER_QS);
1989b7004ab2SKamil Rytarowski 	cqcount = RTE_MAX(data->nb_tx_queues, data->nb_rx_queues);
1990b7004ab2SKamil Rytarowski 	if (cqcount > MAX_RCV_QUEUES_PER_QS) {
1991b7004ab2SKamil Rytarowski 		nic->sqs_count = RTE_ALIGN_CEIL(cqcount, MAX_RCV_QUEUES_PER_QS);
1992b7004ab2SKamil Rytarowski 		nic->sqs_count = (nic->sqs_count / MAX_RCV_QUEUES_PER_QS) - 1;
1993b7004ab2SKamil Rytarowski 	} else {
1994b7004ab2SKamil Rytarowski 		nic->sqs_count = 0;
1995b7004ab2SKamil Rytarowski 	}
1996b7004ab2SKamil Rytarowski 
1997b7004ab2SKamil Rytarowski 	assert(nic->sqs_count <= MAX_SQS_PER_VF);
1998b7004ab2SKamil Rytarowski 
1999b7004ab2SKamil Rytarowski 	if (nic->sqs_count > 0) {
2000b7004ab2SKamil Rytarowski 		if (nicvf_request_sqs(nic)) {
2001b7004ab2SKamil Rytarowski 			rte_panic("Cannot assign sufficient number of "
2002b7004ab2SKamil Rytarowski 				  "secondary queues to PORT%d VF%" PRIu8 "\n",
2003b7004ab2SKamil Rytarowski 				  dev->data->port_id, nic->vf_id);
2004b7004ab2SKamil Rytarowski 		}
2005b7004ab2SKamil Rytarowski 	}
2006b7004ab2SKamil Rytarowski 
2007bc79615aSJerin Jacob 	PMD_INIT_LOG(DEBUG, "Configured ethdev port%d hwcap=0x%" PRIx64,
2008bc79615aSJerin Jacob 		dev->data->port_id, nicvf_hw_cap(nic));
2009bc79615aSJerin Jacob 
2010bc79615aSJerin Jacob 	return 0;
2011bc79615aSJerin Jacob }
2012bc79615aSJerin Jacob 
2013e4387966SJerin Jacob /* Initialize and register driver with DPDK Application */
2014e4387966SJerin Jacob static const struct eth_dev_ops nicvf_eth_dev_ops = {
2015bc79615aSJerin Jacob 	.dev_configure            = nicvf_dev_configure,
20167413feeeSJerin Jacob 	.dev_start                = nicvf_dev_start,
20177413feeeSJerin Jacob 	.dev_stop                 = nicvf_dev_stop,
20188fc70464SJerin Jacob 	.link_update              = nicvf_dev_link_update,
20197413feeeSJerin Jacob 	.dev_close                = nicvf_dev_close,
2020684fa771SJerin Jacob 	.stats_get                = nicvf_dev_stats_get,
2021684fa771SJerin Jacob 	.stats_reset              = nicvf_dev_stats_reset,
20226eae36eaSJerin Jacob 	.promiscuous_enable       = nicvf_dev_promisc_enable,
2023dcd7b1e1SJerin Jacob 	.dev_infos_get            = nicvf_dev_info_get,
20241c80e4fdSJerin Jacob 	.dev_supported_ptypes_get = nicvf_dev_supported_ptypes_get,
202565d9804eSJerin Jacob 	.mtu_set                  = nicvf_dev_set_mtu,
202643362c6aSJerin Jacob 	.reta_update              = nicvf_dev_reta_update,
202743362c6aSJerin Jacob 	.reta_query               = nicvf_dev_reta_query,
202843362c6aSJerin Jacob 	.rss_hash_update          = nicvf_dev_rss_hash_update,
202943362c6aSJerin Jacob 	.rss_hash_conf_get        = nicvf_dev_rss_hash_conf_get,
203086b4eb42SJerin Jacob 	.rx_queue_start           = nicvf_dev_rx_queue_start,
203186b4eb42SJerin Jacob 	.rx_queue_stop            = nicvf_dev_rx_queue_stop,
2032fc1f6c62SJerin Jacob 	.tx_queue_start           = nicvf_dev_tx_queue_start,
2033fc1f6c62SJerin Jacob 	.tx_queue_stop            = nicvf_dev_tx_queue_stop,
2034aa0d976eSJerin Jacob 	.rx_queue_setup           = nicvf_dev_rx_queue_setup,
2035aa0d976eSJerin Jacob 	.rx_queue_release         = nicvf_dev_rx_queue_release,
2036da14e00cSJerin Jacob 	.rx_queue_count           = nicvf_dev_rx_queue_count,
20373f3c6f97SJerin Jacob 	.tx_queue_setup           = nicvf_dev_tx_queue_setup,
20383f3c6f97SJerin Jacob 	.tx_queue_release         = nicvf_dev_tx_queue_release,
2039606ee746SJerin Jacob 	.get_reg                  = nicvf_dev_get_regs,
2040e4387966SJerin Jacob };
2041e4387966SJerin Jacob 
2042e4387966SJerin Jacob static int
2043e4387966SJerin Jacob nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
2044e4387966SJerin Jacob {
2045e4387966SJerin Jacob 	int ret;
2046e4387966SJerin Jacob 	struct rte_pci_device *pci_dev;
2047e4387966SJerin Jacob 	struct nicvf *nic = nicvf_pmd_priv(eth_dev);
2048e4387966SJerin Jacob 
2049e4387966SJerin Jacob 	PMD_INIT_FUNC_TRACE();
2050e4387966SJerin Jacob 
2051e4387966SJerin Jacob 	eth_dev->dev_ops = &nicvf_eth_dev_ops;
2052e4387966SJerin Jacob 
20537413feeeSJerin Jacob 	/* For secondary processes, the primary has done all the work */
20547413feeeSJerin Jacob 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
205521e3fb00SKamil Rytarowski 		if (nic) {
20567413feeeSJerin Jacob 			/* Setup callbacks for secondary process */
20577413feeeSJerin Jacob 			nicvf_set_tx_function(eth_dev);
20587413feeeSJerin Jacob 			nicvf_set_rx_function(eth_dev);
20597413feeeSJerin Jacob 			return 0;
206021e3fb00SKamil Rytarowski 		} else {
206121e3fb00SKamil Rytarowski 			/* If nic == NULL than it is secondary function
206221e3fb00SKamil Rytarowski 			 * so ethdev need to be released by caller */
206321e3fb00SKamil Rytarowski 			return ENOTSUP;
206421e3fb00SKamil Rytarowski 		}
20657413feeeSJerin Jacob 	}
20667413feeeSJerin Jacob 
2067c0802544SFerruh Yigit 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2068e4387966SJerin Jacob 	rte_eth_copy_pci_info(eth_dev, pci_dev);
2069e4387966SJerin Jacob 
2070e4387966SJerin Jacob 	nic->device_id = pci_dev->id.device_id;
2071e4387966SJerin Jacob 	nic->vendor_id = pci_dev->id.vendor_id;
2072e4387966SJerin Jacob 	nic->subsystem_device_id = pci_dev->id.subsystem_device_id;
2073e4387966SJerin Jacob 	nic->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
2074e4387966SJerin Jacob 
2075e4387966SJerin Jacob 	PMD_INIT_LOG(DEBUG, "nicvf: device (%x:%x) %u:%u:%u:%u",
2076e4387966SJerin Jacob 			pci_dev->id.vendor_id, pci_dev->id.device_id,
2077e4387966SJerin Jacob 			pci_dev->addr.domain, pci_dev->addr.bus,
2078e4387966SJerin Jacob 			pci_dev->addr.devid, pci_dev->addr.function);
2079e4387966SJerin Jacob 
2080e4387966SJerin Jacob 	nic->reg_base = (uintptr_t)pci_dev->mem_resource[0].addr;
2081e4387966SJerin Jacob 	if (!nic->reg_base) {
2082e4387966SJerin Jacob 		PMD_INIT_LOG(ERR, "Failed to map BAR0");
2083e4387966SJerin Jacob 		ret = -ENODEV;
2084e4387966SJerin Jacob 		goto fail;
2085e4387966SJerin Jacob 	}
2086e4387966SJerin Jacob 
2087e4387966SJerin Jacob 	nicvf_disable_all_interrupts(nic);
2088e4387966SJerin Jacob 
2089f141adcaSKamil Rytarowski 	ret = nicvf_periodic_alarm_start(nicvf_interrupt, eth_dev);
2090e4387966SJerin Jacob 	if (ret) {
2091e4387966SJerin Jacob 		PMD_INIT_LOG(ERR, "Failed to start period alarm");
2092e4387966SJerin Jacob 		goto fail;
2093e4387966SJerin Jacob 	}
2094e4387966SJerin Jacob 
2095e4387966SJerin Jacob 	ret = nicvf_mbox_check_pf_ready(nic);
2096e4387966SJerin Jacob 	if (ret) {
2097e4387966SJerin Jacob 		PMD_INIT_LOG(ERR, "Failed to get ready message from PF");
2098e4387966SJerin Jacob 		goto alarm_fail;
2099e4387966SJerin Jacob 	} else {
2100e4387966SJerin Jacob 		PMD_INIT_LOG(INFO,
2101e4387966SJerin Jacob 			"node=%d vf=%d mode=%s sqs=%s loopback_supported=%s",
2102e4387966SJerin Jacob 			nic->node, nic->vf_id,
2103e4387966SJerin Jacob 			nic->tns_mode == NIC_TNS_MODE ? "tns" : "tns-bypass",
2104e4387966SJerin Jacob 			nic->sqs_mode ? "true" : "false",
2105e4387966SJerin Jacob 			nic->loopback_supported ? "true" : "false"
2106e4387966SJerin Jacob 			);
2107e4387966SJerin Jacob 	}
2108e4387966SJerin Jacob 
210921e3fb00SKamil Rytarowski 	ret = nicvf_base_init(nic);
211021e3fb00SKamil Rytarowski 	if (ret) {
211121e3fb00SKamil Rytarowski 		PMD_INIT_LOG(ERR, "Failed to execute nicvf_base_init");
211221e3fb00SKamil Rytarowski 		goto malloc_fail;
211321e3fb00SKamil Rytarowski 	}
211421e3fb00SKamil Rytarowski 
2115e4387966SJerin Jacob 	if (nic->sqs_mode) {
211621e3fb00SKamil Rytarowski 		/* Push nic to stack of secondary vfs */
211721e3fb00SKamil Rytarowski 		nicvf_svf_push(nic);
211821e3fb00SKamil Rytarowski 
211921e3fb00SKamil Rytarowski 		/* Steal nic pointer from the device for further reuse */
212021e3fb00SKamil Rytarowski 		eth_dev->data->dev_private = NULL;
212121e3fb00SKamil Rytarowski 
212221e3fb00SKamil Rytarowski 		nicvf_periodic_alarm_stop(nicvf_interrupt, eth_dev);
212321e3fb00SKamil Rytarowski 		ret = nicvf_periodic_alarm_start(nicvf_vf_interrupt, nic);
212421e3fb00SKamil Rytarowski 		if (ret) {
212521e3fb00SKamil Rytarowski 			PMD_INIT_LOG(ERR, "Failed to start period alarm");
212621e3fb00SKamil Rytarowski 			goto fail;
212721e3fb00SKamil Rytarowski 		}
212821e3fb00SKamil Rytarowski 
212998a7ea33SJerin Jacob 		/* Detach port by returning positive error number */
213021e3fb00SKamil Rytarowski 		return ENOTSUP;
2131e4387966SJerin Jacob 	}
2132e4387966SJerin Jacob 
2133e4387966SJerin Jacob 	eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0);
2134e4387966SJerin Jacob 	if (eth_dev->data->mac_addrs == NULL) {
2135e4387966SJerin Jacob 		PMD_INIT_LOG(ERR, "Failed to allocate memory for mac addr");
2136e4387966SJerin Jacob 		ret = -ENOMEM;
2137e4387966SJerin Jacob 		goto alarm_fail;
2138e4387966SJerin Jacob 	}
2139e4387966SJerin Jacob 	if (is_zero_ether_addr((struct ether_addr *)nic->mac_addr))
2140e4387966SJerin Jacob 		eth_random_addr(&nic->mac_addr[0]);
2141e4387966SJerin Jacob 
2142e4387966SJerin Jacob 	ether_addr_copy((struct ether_addr *)nic->mac_addr,
2143e4387966SJerin Jacob 			&eth_dev->data->mac_addrs[0]);
2144e4387966SJerin Jacob 
2145e4387966SJerin Jacob 	ret = nicvf_mbox_set_mac_addr(nic, nic->mac_addr);
2146e4387966SJerin Jacob 	if (ret) {
2147e4387966SJerin Jacob 		PMD_INIT_LOG(ERR, "Failed to set mac addr");
2148e4387966SJerin Jacob 		goto malloc_fail;
2149e4387966SJerin Jacob 	}
2150e4387966SJerin Jacob 
2151e4387966SJerin Jacob 	PMD_INIT_LOG(INFO, "Port %d (%x:%x) mac=%02x:%02x:%02x:%02x:%02x:%02x",
2152e4387966SJerin Jacob 		eth_dev->data->port_id, nic->vendor_id, nic->device_id,
2153e4387966SJerin Jacob 		nic->mac_addr[0], nic->mac_addr[1], nic->mac_addr[2],
2154e4387966SJerin Jacob 		nic->mac_addr[3], nic->mac_addr[4], nic->mac_addr[5]);
2155e4387966SJerin Jacob 
2156e4387966SJerin Jacob 	return 0;
2157e4387966SJerin Jacob 
2158e4387966SJerin Jacob malloc_fail:
2159e4387966SJerin Jacob 	rte_free(eth_dev->data->mac_addrs);
2160e4387966SJerin Jacob alarm_fail:
2161f141adcaSKamil Rytarowski 	nicvf_periodic_alarm_stop(nicvf_interrupt, eth_dev);
2162e4387966SJerin Jacob fail:
2163e4387966SJerin Jacob 	return ret;
2164e4387966SJerin Jacob }
2165e4387966SJerin Jacob 
2166e4387966SJerin Jacob static const struct rte_pci_id pci_id_nicvf_map[] = {
2167e4387966SJerin Jacob 	{
2168e4387966SJerin Jacob 		.class_id = RTE_CLASS_ANY_ID,
2169e4387966SJerin Jacob 		.vendor_id = PCI_VENDOR_ID_CAVIUM,
2170398a1be1SJerin Jacob 		.device_id = PCI_DEVICE_ID_THUNDERX_CN88XX_PASS1_NICVF,
2171e4387966SJerin Jacob 		.subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
2172398a1be1SJerin Jacob 		.subsystem_device_id = PCI_SUB_DEVICE_ID_CN88XX_PASS1_NICVF,
2173e4387966SJerin Jacob 	},
2174e4387966SJerin Jacob 	{
2175e4387966SJerin Jacob 		.class_id = RTE_CLASS_ANY_ID,
2176e4387966SJerin Jacob 		.vendor_id = PCI_VENDOR_ID_CAVIUM,
2177398a1be1SJerin Jacob 		.device_id = PCI_DEVICE_ID_THUNDERX_NICVF,
2178e4387966SJerin Jacob 		.subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
2179398a1be1SJerin Jacob 		.subsystem_device_id = PCI_SUB_DEVICE_ID_CN88XX_PASS2_NICVF,
2180e4387966SJerin Jacob 	},
2181e4387966SJerin Jacob 	{
2182b72a7768SJerin Jacob 		.class_id = RTE_CLASS_ANY_ID,
2183b72a7768SJerin Jacob 		.vendor_id = PCI_VENDOR_ID_CAVIUM,
2184b72a7768SJerin Jacob 		.device_id = PCI_DEVICE_ID_THUNDERX_NICVF,
2185b72a7768SJerin Jacob 		.subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
2186b72a7768SJerin Jacob 		.subsystem_device_id = PCI_SUB_DEVICE_ID_CN81XX_NICVF,
2187b72a7768SJerin Jacob 	},
2188b72a7768SJerin Jacob 	{
2189174dd78eSJerin Jacob 		.class_id = RTE_CLASS_ANY_ID,
2190174dd78eSJerin Jacob 		.vendor_id = PCI_VENDOR_ID_CAVIUM,
2191174dd78eSJerin Jacob 		.device_id = PCI_DEVICE_ID_THUNDERX_NICVF,
2192174dd78eSJerin Jacob 		.subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
2193174dd78eSJerin Jacob 		.subsystem_device_id = PCI_SUB_DEVICE_ID_CN83XX_NICVF,
2194174dd78eSJerin Jacob 	},
2195174dd78eSJerin Jacob 	{
2196e4387966SJerin Jacob 		.vendor_id = 0,
2197e4387966SJerin Jacob 	},
2198e4387966SJerin Jacob };
2199e4387966SJerin Jacob 
2200fdf91e0fSJan Blunck static int nicvf_eth_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2201fdf91e0fSJan Blunck 	struct rte_pci_device *pci_dev)
2202fdf91e0fSJan Blunck {
2203fdf91e0fSJan Blunck 	return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct nicvf),
2204fdf91e0fSJan Blunck 		nicvf_eth_dev_init);
2205fdf91e0fSJan Blunck }
2206fdf91e0fSJan Blunck 
2207fdf91e0fSJan Blunck static int nicvf_eth_pci_remove(struct rte_pci_device *pci_dev)
2208fdf91e0fSJan Blunck {
2209fdf91e0fSJan Blunck 	return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
2210fdf91e0fSJan Blunck }
2211fdf91e0fSJan Blunck 
2212fdf91e0fSJan Blunck static struct rte_pci_driver rte_nicvf_pmd = {
2213e4387966SJerin Jacob 	.id_table = pci_id_nicvf_map,
22146110b1c6SJerin Jacob 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_KEEP_MAPPED_RES |
22156110b1c6SJerin Jacob 			RTE_PCI_DRV_INTR_LSC,
2216fdf91e0fSJan Blunck 	.probe = nicvf_eth_pci_probe,
2217fdf91e0fSJan Blunck 	.remove = nicvf_eth_pci_remove,
2218e4387966SJerin Jacob };
2219e4387966SJerin Jacob 
2220fdf91e0fSJan Blunck RTE_PMD_REGISTER_PCI(net_thunderx, rte_nicvf_pmd);
222101f19227SShreyansh Jain RTE_PMD_REGISTER_PCI_TABLE(net_thunderx, pci_id_nicvf_map);
222206e81dc9SDavid Marchand RTE_PMD_REGISTER_KMOD_DEP(net_thunderx, "* igb_uio | uio_pci_generic | vfio-pci");
2223