xref: /dpdk/drivers/net/enic/enic_ethdev.c (revision a41f593f1bce27cd94eae0e85a8085c592b14b30)
12e99ea80SHyong Youb Kim /* SPDX-License-Identifier: BSD-3-Clause
22e99ea80SHyong Youb Kim  * Copyright 2008-2017 Cisco Systems, Inc.  All rights reserved.
372f3de30SBruce Richardson  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
472f3de30SBruce Richardson  */
572f3de30SBruce Richardson 
672f3de30SBruce Richardson #include <stdio.h>
772f3de30SBruce Richardson #include <stdint.h>
872f3de30SBruce Richardson 
972f3de30SBruce Richardson #include <rte_dev.h>
1072f3de30SBruce Richardson #include <rte_pci.h>
11c752998bSGaetan Rivet #include <rte_bus_pci.h>
12df96fd0dSBruce Richardson #include <ethdev_driver.h>
13df96fd0dSBruce Richardson #include <ethdev_pci.h>
1461c7b522SJohn Daley #include <rte_geneve.h>
1593fb21fdSHyong Youb Kim #include <rte_kvargs.h>
1672f3de30SBruce Richardson #include <rte_string_fns.h>
1772f3de30SBruce Richardson 
1872f3de30SBruce Richardson #include "vnic_intr.h"
1972f3de30SBruce Richardson #include "vnic_cq.h"
2072f3de30SBruce Richardson #include "vnic_wq.h"
2172f3de30SBruce Richardson #include "vnic_rq.h"
2272f3de30SBruce Richardson #include "vnic_enet.h"
2372f3de30SBruce Richardson #include "enic.h"
2472f3de30SBruce Richardson 
2572f3de30SBruce Richardson /*
2672f3de30SBruce Richardson  * The set of PCI devices this driver supports
2772f3de30SBruce Richardson  */
280b6fe7bdSDavid Marchand #define CISCO_PCI_VENDOR_ID 0x1137
2972f3de30SBruce Richardson static const struct rte_pci_id pci_id_enic_map[] = {
300b6fe7bdSDavid Marchand 	{RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET)},
310b6fe7bdSDavid Marchand 	{RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET_VF)},
3257bb45b3SJohn Daley 	{RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET_SN)},
330b6fe7bdSDavid Marchand 	{.vendor_id = 0, /* sentinel */},
3472f3de30SBruce Richardson };
3572f3de30SBruce Richardson 
36f1edcfe6SHyong Youb Kim /* Supported link speeds of production VIC models */
37f1edcfe6SHyong Youb Kim static const struct vic_speed_capa {
38f1edcfe6SHyong Youb Kim 	uint16_t sub_devid;
39f1edcfe6SHyong Youb Kim 	uint32_t capa;
40f1edcfe6SHyong Youb Kim } vic_speed_capa_map[] = {
41295968d1SFerruh Yigit 	{ 0x0043, RTE_ETH_LINK_SPEED_10G }, /* VIC */
42295968d1SFerruh Yigit 	{ 0x0047, RTE_ETH_LINK_SPEED_10G }, /* P81E PCIe */
43295968d1SFerruh Yigit 	{ 0x0048, RTE_ETH_LINK_SPEED_10G }, /* M81KR Mezz */
44295968d1SFerruh Yigit 	{ 0x004f, RTE_ETH_LINK_SPEED_10G }, /* 1280 Mezz */
45295968d1SFerruh Yigit 	{ 0x0084, RTE_ETH_LINK_SPEED_10G }, /* 1240 MLOM */
46295968d1SFerruh Yigit 	{ 0x0085, RTE_ETH_LINK_SPEED_10G }, /* 1225 PCIe */
47295968d1SFerruh Yigit 	{ 0x00cd, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1285 PCIe */
48295968d1SFerruh Yigit 	{ 0x00ce, RTE_ETH_LINK_SPEED_10G }, /* 1225T PCIe */
49295968d1SFerruh Yigit 	{ 0x012a, RTE_ETH_LINK_SPEED_40G }, /* M4308 */
50295968d1SFerruh Yigit 	{ 0x012c, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1340 MLOM */
51295968d1SFerruh Yigit 	{ 0x012e, RTE_ETH_LINK_SPEED_10G }, /* 1227 PCIe */
52295968d1SFerruh Yigit 	{ 0x0137, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1380 Mezz */
53295968d1SFerruh Yigit 	{ 0x014d, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1385 PCIe */
54295968d1SFerruh Yigit 	{ 0x015d, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1387 MLOM */
55295968d1SFerruh Yigit 	{ 0x0215, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G |
56295968d1SFerruh Yigit 		  RTE_ETH_LINK_SPEED_40G }, /* 1440 Mezz */
57295968d1SFerruh Yigit 	{ 0x0216, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G |
58295968d1SFerruh Yigit 		  RTE_ETH_LINK_SPEED_40G }, /* 1480 MLOM */
59295968d1SFerruh Yigit 	{ 0x0217, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G }, /* 1455 PCIe */
60295968d1SFerruh Yigit 	{ 0x0218, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G }, /* 1457 MLOM */
61295968d1SFerruh Yigit 	{ 0x0219, RTE_ETH_LINK_SPEED_40G }, /* 1485 PCIe */
62295968d1SFerruh Yigit 	{ 0x021a, RTE_ETH_LINK_SPEED_40G }, /* 1487 MLOM */
63295968d1SFerruh Yigit 	{ 0x024a, RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_100G }, /* 1495 PCIe */
64295968d1SFerruh Yigit 	{ 0x024b, RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_100G }, /* 1497 MLOM */
65f1edcfe6SHyong Youb Kim 	{ 0, 0 }, /* End marker */
66f1edcfe6SHyong Youb Kim };
67f1edcfe6SHyong Youb Kim 
688b428cb5SHyong Youb Kim #define ENIC_DEVARG_CQ64 "cq64"
6993fb21fdSHyong Youb Kim #define ENIC_DEVARG_DISABLE_OVERLAY "disable-overlay"
708a6ff33dSHyong Youb Kim #define ENIC_DEVARG_ENABLE_AVX2_RX "enable-avx2-rx"
71e39c2756SHyong Youb Kim #define ENIC_DEVARG_IG_VLAN_REWRITE "ig-vlan-rewrite"
7239cf83f1SHyong Youb Kim #define ENIC_DEVARG_REPRESENTOR "representor"
7392ca7ea4SHyong Youb Kim 
74eeded204SDavid Marchand RTE_LOG_REGISTER_DEFAULT(enic_pmd_logtype, INFO);
7536efba2fSHyong Youb Kim 
7672f3de30SBruce Richardson static int
77fb7ad441SThomas Monjalon enicpmd_dev_flow_ops_get(struct rte_eth_dev *dev,
78fb7ad441SThomas Monjalon 			 const struct rte_flow_ops **ops)
7972f3de30SBruce Richardson {
80c02a96fcSHyong Youb Kim 	struct enic *enic = pmd_priv(dev);
8172f3de30SBruce Richardson 
820f766680SJohn Daley 	ENICPMD_FUNC_TRACE();
830f766680SJohn Daley 
84fb7ad441SThomas Monjalon 	if (enic->flow_filter_mode == FILTER_FLOWMAN)
85fb7ad441SThomas Monjalon 		*ops = &enic_fm_flow_ops;
86fb7ad441SThomas Monjalon 	else
87fb7ad441SThomas Monjalon 		*ops = &enic_flow_ops;
88fb7ad441SThomas Monjalon 	return 0;
8972f3de30SBruce Richardson }
9072f3de30SBruce Richardson 
917483341aSXueming Li static void enicpmd_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
9272f3de30SBruce Richardson {
937483341aSXueming Li 	void *txq = dev->data->tx_queues[qid];
947483341aSXueming Li 
9572f3de30SBruce Richardson 	ENICPMD_FUNC_TRACE();
960e804034SJohn Daley 
970e804034SJohn Daley 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
980e804034SJohn Daley 		return;
990e804034SJohn Daley 
10072f3de30SBruce Richardson 	enic_free_wq(txq);
10172f3de30SBruce Richardson }
10272f3de30SBruce Richardson 
10372f3de30SBruce Richardson static int enicpmd_dev_setup_intr(struct enic *enic)
10472f3de30SBruce Richardson {
10572f3de30SBruce Richardson 	int ret;
10672f3de30SBruce Richardson 	unsigned int index;
10772f3de30SBruce Richardson 
10872f3de30SBruce Richardson 	ENICPMD_FUNC_TRACE();
10972f3de30SBruce Richardson 
11072f3de30SBruce Richardson 	/* Are we done with the init of all the queues? */
11172f3de30SBruce Richardson 	for (index = 0; index < enic->cq_count; index++) {
11272f3de30SBruce Richardson 		if (!enic->cq[index].ctrl)
11372f3de30SBruce Richardson 			break;
11472f3de30SBruce Richardson 	}
11572f3de30SBruce Richardson 	if (enic->cq_count != index)
11672f3de30SBruce Richardson 		return 0;
117954828b8SJohn Daley 	for (index = 0; index < enic->wq_count; index++) {
118954828b8SJohn Daley 		if (!enic->wq[index].ctrl)
119954828b8SJohn Daley 			break;
120954828b8SJohn Daley 	}
121954828b8SJohn Daley 	if (enic->wq_count != index)
122954828b8SJohn Daley 		return 0;
123954828b8SJohn Daley 	/* check start of packet (SOP) RQs only in case scatter is disabled. */
124954828b8SJohn Daley 	for (index = 0; index < enic->rq_count; index++) {
125aa07bf8fSJohn Daley 		if (!enic->rq[enic_rte_rq_idx_to_sop_idx(index)].ctrl)
126954828b8SJohn Daley 			break;
127954828b8SJohn Daley 	}
128954828b8SJohn Daley 	if (enic->rq_count != index)
129954828b8SJohn Daley 		return 0;
13072f3de30SBruce Richardson 
13172f3de30SBruce Richardson 	ret = enic_alloc_intr_resources(enic);
13272f3de30SBruce Richardson 	if (ret) {
13372f3de30SBruce Richardson 		dev_err(enic, "alloc intr failed\n");
13472f3de30SBruce Richardson 		return ret;
13572f3de30SBruce Richardson 	}
13672f3de30SBruce Richardson 	enic_init_vnic_resources(enic);
13772f3de30SBruce Richardson 
13872f3de30SBruce Richardson 	ret = enic_setup_finish(enic);
13972f3de30SBruce Richardson 	if (ret)
14072f3de30SBruce Richardson 		dev_err(enic, "setup could not be finished\n");
14172f3de30SBruce Richardson 
14272f3de30SBruce Richardson 	return ret;
14372f3de30SBruce Richardson }
14472f3de30SBruce Richardson 
14572f3de30SBruce Richardson static int enicpmd_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
14672f3de30SBruce Richardson 	uint16_t queue_idx,
14772f3de30SBruce Richardson 	uint16_t nb_desc,
14872f3de30SBruce Richardson 	unsigned int socket_id,
149bcaa54c1SHyong Youb Kim 	const struct rte_eth_txconf *tx_conf)
15072f3de30SBruce Richardson {
15172f3de30SBruce Richardson 	int ret;
15272f3de30SBruce Richardson 	struct enic *enic = pmd_priv(eth_dev);
153bcaa54c1SHyong Youb Kim 	struct vnic_wq *wq;
15472f3de30SBruce Richardson 
1550e804034SJohn Daley 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1560e804034SJohn Daley 		return -E_RTE_SECONDARY;
1570e804034SJohn Daley 
15872f3de30SBruce Richardson 	ENICPMD_FUNC_TRACE();
1596c45c330SHyong Youb Kim 	RTE_ASSERT(queue_idx < enic->conf_wq_count);
160bcaa54c1SHyong Youb Kim 	wq = &enic->wq[queue_idx];
161bcaa54c1SHyong Youb Kim 	wq->offloads = tx_conf->offloads |
162bcaa54c1SHyong Youb Kim 		eth_dev->data->dev_conf.txmode.offloads;
163bcaa54c1SHyong Youb Kim 	eth_dev->data->tx_queues[queue_idx] = (void *)wq;
16472f3de30SBruce Richardson 
16572f3de30SBruce Richardson 	ret = enic_alloc_wq(enic, queue_idx, socket_id, nb_desc);
16672f3de30SBruce Richardson 	if (ret) {
16772f3de30SBruce Richardson 		dev_err(enic, "error in allocating wq\n");
16872f3de30SBruce Richardson 		return ret;
16972f3de30SBruce Richardson 	}
17072f3de30SBruce Richardson 
17172f3de30SBruce Richardson 	return enicpmd_dev_setup_intr(enic);
17272f3de30SBruce Richardson }
17372f3de30SBruce Richardson 
17472f3de30SBruce Richardson static int enicpmd_dev_tx_queue_start(struct rte_eth_dev *eth_dev,
17572f3de30SBruce Richardson 	uint16_t queue_idx)
17672f3de30SBruce Richardson {
17772f3de30SBruce Richardson 	struct enic *enic = pmd_priv(eth_dev);
17872f3de30SBruce Richardson 
17972f3de30SBruce Richardson 	ENICPMD_FUNC_TRACE();
18072f3de30SBruce Richardson 
18172f3de30SBruce Richardson 	enic_start_wq(enic, queue_idx);
18272f3de30SBruce Richardson 
18372f3de30SBruce Richardson 	return 0;
18472f3de30SBruce Richardson }
18572f3de30SBruce Richardson 
18672f3de30SBruce Richardson static int enicpmd_dev_tx_queue_stop(struct rte_eth_dev *eth_dev,
18772f3de30SBruce Richardson 	uint16_t queue_idx)
18872f3de30SBruce Richardson {
18972f3de30SBruce Richardson 	int ret;
19072f3de30SBruce Richardson 	struct enic *enic = pmd_priv(eth_dev);
19172f3de30SBruce Richardson 
19272f3de30SBruce Richardson 	ENICPMD_FUNC_TRACE();
19372f3de30SBruce Richardson 
19472f3de30SBruce Richardson 	ret = enic_stop_wq(enic, queue_idx);
19572f3de30SBruce Richardson 	if (ret)
19672f3de30SBruce Richardson 		dev_err(enic, "error in stopping wq %d\n", queue_idx);
19772f3de30SBruce Richardson 
19872f3de30SBruce Richardson 	return ret;
19972f3de30SBruce Richardson }
20072f3de30SBruce Richardson 
20172f3de30SBruce Richardson static int enicpmd_dev_rx_queue_start(struct rte_eth_dev *eth_dev,
20272f3de30SBruce Richardson 	uint16_t queue_idx)
20372f3de30SBruce Richardson {
20472f3de30SBruce Richardson 	struct enic *enic = pmd_priv(eth_dev);
20572f3de30SBruce Richardson 
20672f3de30SBruce Richardson 	ENICPMD_FUNC_TRACE();
20772f3de30SBruce Richardson 
20872f3de30SBruce Richardson 	enic_start_rq(enic, queue_idx);
20972f3de30SBruce Richardson 
21072f3de30SBruce Richardson 	return 0;
21172f3de30SBruce Richardson }
21272f3de30SBruce Richardson 
21372f3de30SBruce Richardson static int enicpmd_dev_rx_queue_stop(struct rte_eth_dev *eth_dev,
21472f3de30SBruce Richardson 	uint16_t queue_idx)
21572f3de30SBruce Richardson {
21672f3de30SBruce Richardson 	int ret;
21772f3de30SBruce Richardson 	struct enic *enic = pmd_priv(eth_dev);
21872f3de30SBruce Richardson 
21972f3de30SBruce Richardson 	ENICPMD_FUNC_TRACE();
22072f3de30SBruce Richardson 
22172f3de30SBruce Richardson 	ret = enic_stop_rq(enic, queue_idx);
22272f3de30SBruce Richardson 	if (ret)
22372f3de30SBruce Richardson 		dev_err(enic, "error in stopping rq %d\n", queue_idx);
22472f3de30SBruce Richardson 
22572f3de30SBruce Richardson 	return ret;
22672f3de30SBruce Richardson }
22772f3de30SBruce Richardson 
2287483341aSXueming Li static void enicpmd_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
22972f3de30SBruce Richardson {
2307483341aSXueming Li 	void *rxq = dev->data->rx_queues[qid];
2317483341aSXueming Li 
23272f3de30SBruce Richardson 	ENICPMD_FUNC_TRACE();
2330e804034SJohn Daley 
2340e804034SJohn Daley 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2350e804034SJohn Daley 		return;
2360e804034SJohn Daley 
23772f3de30SBruce Richardson 	enic_free_rq(rxq);
23872f3de30SBruce Richardson }
23972f3de30SBruce Richardson 
2408d7d4fcdSKonstantin Ananyev static uint32_t enicpmd_dev_rx_queue_count(void *rx_queue)
241a787f7e6SNelson Escobar {
2428d7d4fcdSKonstantin Ananyev 	struct enic *enic;
2438d7d4fcdSKonstantin Ananyev 	struct vnic_rq *sop_rq;
244a787f7e6SNelson Escobar 	uint32_t queue_count = 0;
245a787f7e6SNelson Escobar 	struct vnic_cq *cq;
246a787f7e6SNelson Escobar 	uint32_t cq_tail;
247a787f7e6SNelson Escobar 	uint16_t cq_idx;
248a787f7e6SNelson Escobar 
2498d7d4fcdSKonstantin Ananyev 	sop_rq = rx_queue;
2508d7d4fcdSKonstantin Ananyev 	enic = vnic_dev_priv(sop_rq->vdev);
2518d7d4fcdSKonstantin Ananyev 	cq = &enic->cq[enic_cq_rq(enic, sop_rq->index)];
252a787f7e6SNelson Escobar 	cq_idx = cq->to_clean;
253a787f7e6SNelson Escobar 
254a787f7e6SNelson Escobar 	cq_tail = ioread32(&cq->ctrl->cq_tail);
255a787f7e6SNelson Escobar 
256a787f7e6SNelson Escobar 	if (cq_tail < cq_idx)
257a787f7e6SNelson Escobar 		cq_tail += cq->ring.desc_count;
258a787f7e6SNelson Escobar 
259a787f7e6SNelson Escobar 	queue_count = cq_tail - cq_idx;
260a787f7e6SNelson Escobar 
261a787f7e6SNelson Escobar 	return queue_count;
262a787f7e6SNelson Escobar }
263a787f7e6SNelson Escobar 
26472f3de30SBruce Richardson static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
26572f3de30SBruce Richardson 	uint16_t queue_idx,
26672f3de30SBruce Richardson 	uint16_t nb_desc,
26772f3de30SBruce Richardson 	unsigned int socket_id,
268947d860cSJohn Daley 	const struct rte_eth_rxconf *rx_conf,
26972f3de30SBruce Richardson 	struct rte_mempool *mp)
27072f3de30SBruce Richardson {
27172f3de30SBruce Richardson 	int ret;
27272f3de30SBruce Richardson 	struct enic *enic = pmd_priv(eth_dev);
27372f3de30SBruce Richardson 
27472f3de30SBruce Richardson 	ENICPMD_FUNC_TRACE();
2750e804034SJohn Daley 
2760e804034SJohn Daley 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2770e804034SJohn Daley 		return -E_RTE_SECONDARY;
2786c45c330SHyong Youb Kim 	RTE_ASSERT(enic_rte_rq_idx_to_sop_idx(queue_idx) < enic->conf_rq_count);
279856d7ba7SNelson Escobar 	eth_dev->data->rx_queues[queue_idx] =
280aa07bf8fSJohn Daley 		(void *)&enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
28172f3de30SBruce Richardson 
282ce16fd70SJohn Daley 	ret = enic_alloc_rq(enic, queue_idx, socket_id, mp, nb_desc,
283ce16fd70SJohn Daley 			    rx_conf->rx_free_thresh);
28472f3de30SBruce Richardson 	if (ret) {
28572f3de30SBruce Richardson 		dev_err(enic, "error in allocating rq\n");
28672f3de30SBruce Richardson 		return ret;
28772f3de30SBruce Richardson 	}
28872f3de30SBruce Richardson 
28972f3de30SBruce Richardson 	return enicpmd_dev_setup_intr(enic);
29072f3de30SBruce Richardson }
29172f3de30SBruce Richardson 
292289ba0c0SDavid Harton static int enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
29372f3de30SBruce Richardson {
29472f3de30SBruce Richardson 	struct enic *enic = pmd_priv(eth_dev);
295f9416bbaSHyong Youb Kim 	uint64_t offloads;
29672f3de30SBruce Richardson 
29772f3de30SBruce Richardson 	ENICPMD_FUNC_TRACE();
29872f3de30SBruce Richardson 
299f9416bbaSHyong Youb Kim 	offloads = eth_dev->data->dev_conf.rxmode.offloads;
300295968d1SFerruh Yigit 	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
301295968d1SFerruh Yigit 		if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
30272f3de30SBruce Richardson 			enic->ig_vlan_strip_en = 1;
30372f3de30SBruce Richardson 		else
30472f3de30SBruce Richardson 			enic->ig_vlan_strip_en = 0;
30572f3de30SBruce Richardson 	}
30672f3de30SBruce Richardson 
307c2fec27bSHyong Youb Kim 	return enic_set_vlan_strip(enic);
30872f3de30SBruce Richardson }
30972f3de30SBruce Richardson 
31072f3de30SBruce Richardson static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev)
31172f3de30SBruce Richardson {
31272f3de30SBruce Richardson 	int ret;
313f9416bbaSHyong Youb Kim 	int mask;
31472f3de30SBruce Richardson 	struct enic *enic = pmd_priv(eth_dev);
31572f3de30SBruce Richardson 
3160e804034SJohn Daley 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3170e804034SJohn Daley 		return -E_RTE_SECONDARY;
3180e804034SJohn Daley 
31972f3de30SBruce Richardson 	ENICPMD_FUNC_TRACE();
32072f3de30SBruce Richardson 	ret = enic_set_vnic_res(enic);
32172f3de30SBruce Richardson 	if (ret) {
32272f3de30SBruce Richardson 		dev_err(enic, "Set vNIC resource num  failed, aborting\n");
32372f3de30SBruce Richardson 		return ret;
32472f3de30SBruce Richardson 	}
32572f3de30SBruce Richardson 
326295968d1SFerruh Yigit 	if (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
32773fb89ddSAndrew Rybchenko 		eth_dev->data->dev_conf.rxmode.offloads |=
328295968d1SFerruh Yigit 			RTE_ETH_RX_OFFLOAD_RSS_HASH;
3298b945a7fSPavan Nikhilesh 
3308d496995SHyong Youb Kim 	enic->mc_count = 0;
331a062bafaSHyong Youb Kim 	enic->hw_ip_checksum = !!(eth_dev->data->dev_conf.rxmode.offloads &
332295968d1SFerruh Yigit 				  RTE_ETH_RX_OFFLOAD_CHECKSUM);
333f9416bbaSHyong Youb Kim 	/* All vlan offload masks to apply the current settings */
334295968d1SFerruh Yigit 	mask = RTE_ETH_VLAN_STRIP_MASK |
335295968d1SFerruh Yigit 		RTE_ETH_VLAN_FILTER_MASK |
336295968d1SFerruh Yigit 		RTE_ETH_VLAN_EXTEND_MASK;
337f9416bbaSHyong Youb Kim 	ret = enicpmd_vlan_offload_set(eth_dev, mask);
338c2fec27bSHyong Youb Kim 	if (ret) {
339c2fec27bSHyong Youb Kim 		dev_err(enic, "Failed to configure VLAN offloads\n");
340289ba0c0SDavid Harton 		return ret;
34172f3de30SBruce Richardson 	}
342c2fec27bSHyong Youb Kim 	/*
343c2fec27bSHyong Youb Kim 	 * Initialize RSS with the default reta and key. If the user key is
344c2fec27bSHyong Youb Kim 	 * given (rx_adv_conf.rss_conf.rss_key), will use that instead of the
345c2fec27bSHyong Youb Kim 	 * default key.
346c2fec27bSHyong Youb Kim 	 */
347c2fec27bSHyong Youb Kim 	return enic_init_rss_nic_cfg(enic);
348c2fec27bSHyong Youb Kim }
34972f3de30SBruce Richardson 
35072f3de30SBruce Richardson /* Start the device.
35172f3de30SBruce Richardson  * It returns 0 on success.
35272f3de30SBruce Richardson  */
35372f3de30SBruce Richardson static int enicpmd_dev_start(struct rte_eth_dev *eth_dev)
35472f3de30SBruce Richardson {
35572f3de30SBruce Richardson 	struct enic *enic = pmd_priv(eth_dev);
35672f3de30SBruce Richardson 
3570e804034SJohn Daley 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3580e804034SJohn Daley 		return -E_RTE_SECONDARY;
3590e804034SJohn Daley 
36072f3de30SBruce Richardson 	ENICPMD_FUNC_TRACE();
36172f3de30SBruce Richardson 	return enic_enable(enic);
36272f3de30SBruce Richardson }
36372f3de30SBruce Richardson 
36472f3de30SBruce Richardson /*
36572f3de30SBruce Richardson  * Stop device: disable rx and tx functions to allow for reconfiguring.
36672f3de30SBruce Richardson  */
36762024eb8SIvan Ilchenko static int enicpmd_dev_stop(struct rte_eth_dev *eth_dev)
36872f3de30SBruce Richardson {
36972f3de30SBruce Richardson 	struct rte_eth_link link;
37072f3de30SBruce Richardson 	struct enic *enic = pmd_priv(eth_dev);
37172f3de30SBruce Richardson 
3720e804034SJohn Daley 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
37362024eb8SIvan Ilchenko 		return 0;
3740e804034SJohn Daley 
37572f3de30SBruce Richardson 	ENICPMD_FUNC_TRACE();
37672f3de30SBruce Richardson 	enic_disable(enic);
3775042dde0SStephen Hemminger 
37872f3de30SBruce Richardson 	memset(&link, 0, sizeof(link));
3795042dde0SStephen Hemminger 	rte_eth_linkstatus_set(eth_dev, &link);
38062024eb8SIvan Ilchenko 
38162024eb8SIvan Ilchenko 	return 0;
38272f3de30SBruce Richardson }
38372f3de30SBruce Richardson 
38472f3de30SBruce Richardson /*
38572f3de30SBruce Richardson  * Stop device.
38672f3de30SBruce Richardson  */
387b142387bSThomas Monjalon static int enicpmd_dev_close(struct rte_eth_dev *eth_dev)
38872f3de30SBruce Richardson {
38972f3de30SBruce Richardson 	struct enic *enic = pmd_priv(eth_dev);
39072f3de30SBruce Richardson 
39172f3de30SBruce Richardson 	ENICPMD_FUNC_TRACE();
39230410493SThomas Monjalon 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
39330410493SThomas Monjalon 		return 0;
39430410493SThomas Monjalon 
39572f3de30SBruce Richardson 	enic_remove(enic);
396b142387bSThomas Monjalon 
397b142387bSThomas Monjalon 	return 0;
39872f3de30SBruce Richardson }
39972f3de30SBruce Richardson 
40072f3de30SBruce Richardson static int enicpmd_dev_link_update(struct rte_eth_dev *eth_dev,
40172f3de30SBruce Richardson 	__rte_unused int wait_to_complete)
40272f3de30SBruce Richardson {
40372f3de30SBruce Richardson 	ENICPMD_FUNC_TRACE();
404c655c547SHyong Youb Kim 	return enic_link_update(eth_dev);
40572f3de30SBruce Richardson }
40672f3de30SBruce Richardson 
407d5b0924bSMatan Azrad static int enicpmd_dev_stats_get(struct rte_eth_dev *eth_dev,
40872f3de30SBruce Richardson 	struct rte_eth_stats *stats)
40972f3de30SBruce Richardson {
41072f3de30SBruce Richardson 	struct enic *enic = pmd_priv(eth_dev);
41172f3de30SBruce Richardson 
41272f3de30SBruce Richardson 	ENICPMD_FUNC_TRACE();
413d5b0924bSMatan Azrad 	return enic_dev_stats_get(enic, stats);
41472f3de30SBruce Richardson }
41572f3de30SBruce Richardson 
4169970a9adSIgor Romanov static int enicpmd_dev_stats_reset(struct rte_eth_dev *eth_dev)
41772f3de30SBruce Richardson {
41872f3de30SBruce Richardson 	struct enic *enic = pmd_priv(eth_dev);
41972f3de30SBruce Richardson 
42072f3de30SBruce Richardson 	ENICPMD_FUNC_TRACE();
4219970a9adSIgor Romanov 	return enic_dev_stats_clear(enic);
42272f3de30SBruce Richardson }
42372f3de30SBruce Richardson 
424f1edcfe6SHyong Youb Kim static uint32_t speed_capa_from_pci_id(struct rte_eth_dev *eth_dev)
425f1edcfe6SHyong Youb Kim {
426f1edcfe6SHyong Youb Kim 	const struct vic_speed_capa *m;
427f1edcfe6SHyong Youb Kim 	struct rte_pci_device *pdev;
428f1edcfe6SHyong Youb Kim 	uint16_t id;
429f1edcfe6SHyong Youb Kim 
430f1edcfe6SHyong Youb Kim 	pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
431f1edcfe6SHyong Youb Kim 	id = pdev->id.subsystem_device_id;
432f1edcfe6SHyong Youb Kim 	for (m = vic_speed_capa_map; m->sub_devid != 0; m++) {
433f1edcfe6SHyong Youb Kim 		if (m->sub_devid == id)
434f1edcfe6SHyong Youb Kim 			return m->capa;
435f1edcfe6SHyong Youb Kim 	}
436f1edcfe6SHyong Youb Kim 	/* 1300 and later models are at least 40G */
437f1edcfe6SHyong Youb Kim 	if (id >= 0x0100)
438295968d1SFerruh Yigit 		return RTE_ETH_LINK_SPEED_40G;
439e015fca0SHyong Youb Kim 	/* VFs have subsystem id 0, check device id */
440e015fca0SHyong Youb Kim 	if (id == 0) {
441e015fca0SHyong Youb Kim 		/* Newer VF implies at least 40G model */
442e015fca0SHyong Youb Kim 		if (pdev->id.device_id == PCI_DEVICE_ID_CISCO_VIC_ENET_SN)
443295968d1SFerruh Yigit 			return RTE_ETH_LINK_SPEED_40G;
444e015fca0SHyong Youb Kim 	}
445295968d1SFerruh Yigit 	return RTE_ETH_LINK_SPEED_10G;
446f1edcfe6SHyong Youb Kim }
447f1edcfe6SHyong Youb Kim 
448bdad90d1SIvan Ilchenko static int enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
44972f3de30SBruce Richardson 	struct rte_eth_dev_info *device_info)
45072f3de30SBruce Richardson {
45172f3de30SBruce Richardson 	struct enic *enic = pmd_priv(eth_dev);
45272f3de30SBruce Richardson 
45372f3de30SBruce Richardson 	ENICPMD_FUNC_TRACE();
454ce93d3c3SNelson Escobar 	/* Scattered Rx uses two receive queues per rx queue exposed to dpdk */
455ce93d3c3SNelson Escobar 	device_info->max_rx_queues = enic->conf_rq_count / 2;
456ce93d3c3SNelson Escobar 	device_info->max_tx_queues = enic->conf_wq_count;
45772f3de30SBruce Richardson 	device_info->min_rx_bufsize = ENIC_MIN_MTU;
458422ba917SHyong Youb Kim 	/* "Max" mtu is not a typo. HW receives packet sizes up to the
459422ba917SHyong Youb Kim 	 * max mtu regardless of the current mtu (vNIC's mtu). vNIC mtu is
460422ba917SHyong Youb Kim 	 * a hint to the driver to size receive buffers accordingly so that
461422ba917SHyong Youb Kim 	 * larger-than-vnic-mtu packets get truncated.. For DPDK, we let
4621bb4a528SFerruh Yigit 	 * the user decide the buffer size via rxmode.mtu, basically
463422ba917SHyong Youb Kim 	 * ignoring vNIC mtu.
464422ba917SHyong Youb Kim 	 */
465422ba917SHyong Youb Kim 	device_info->max_rx_pktlen = enic_mtu_to_max_rx_pktlen(enic->max_mtu);
4668d496995SHyong Youb Kim 	device_info->max_mac_addrs = ENIC_UNICAST_PERFECT_FILTERS;
46795104e02SHyong Youb Kim 	device_info->min_mtu = ENIC_MIN_MTU;
46895104e02SHyong Youb Kim 	device_info->max_mtu = enic->max_mtu;
46993fb21fdSHyong Youb Kim 	device_info->rx_offload_capa = enic->rx_offload_capa;
47093fb21fdSHyong Youb Kim 	device_info->tx_offload_capa = enic->tx_offload_capa;
471bcaa54c1SHyong Youb Kim 	device_info->tx_queue_offload_capa = enic->tx_queue_offload_capa;
4722fe6f1b7SDmitry Kozlyuk 	device_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
473947d860cSJohn Daley 	device_info->default_rxconf = (struct rte_eth_rxconf) {
474947d860cSJohn Daley 		.rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH
475947d860cSJohn Daley 	};
476c2fec27bSHyong Youb Kim 	device_info->reta_size = enic->reta_size;
477c2fec27bSHyong Youb Kim 	device_info->hash_key_size = enic->hash_key_size;
478c2fec27bSHyong Youb Kim 	device_info->flow_type_rss_offloads = enic->flow_type_rss_offloads;
4799466a38dSHyong Youb Kim 	device_info->rx_desc_lim = (struct rte_eth_desc_lim) {
4809466a38dSHyong Youb Kim 		.nb_max = enic->config.rq_desc_count,
4819466a38dSHyong Youb Kim 		.nb_min = ENIC_MIN_RQ_DESCS,
4829466a38dSHyong Youb Kim 		.nb_align = ENIC_ALIGN_DESCS,
4839466a38dSHyong Youb Kim 	};
4849466a38dSHyong Youb Kim 	device_info->tx_desc_lim = (struct rte_eth_desc_lim) {
4859466a38dSHyong Youb Kim 		.nb_max = enic->config.wq_desc_count,
4869466a38dSHyong Youb Kim 		.nb_min = ENIC_MIN_WQ_DESCS,
4879466a38dSHyong Youb Kim 		.nb_align = ENIC_ALIGN_DESCS,
4889466a38dSHyong Youb Kim 		.nb_seg_max = ENIC_TX_XMIT_MAX,
4899466a38dSHyong Youb Kim 		.nb_mtu_seg_max = ENIC_NON_TSO_MAX_DESC,
4909466a38dSHyong Youb Kim 	};
4919466a38dSHyong Youb Kim 	device_info->default_rxportconf = (struct rte_eth_dev_portconf) {
4929466a38dSHyong Youb Kim 		.burst_size = ENIC_DEFAULT_RX_BURST,
4939466a38dSHyong Youb Kim 		.ring_size = RTE_MIN(device_info->rx_desc_lim.nb_max,
4949466a38dSHyong Youb Kim 			ENIC_DEFAULT_RX_RING_SIZE),
4959466a38dSHyong Youb Kim 		.nb_queues = ENIC_DEFAULT_RX_RINGS,
4969466a38dSHyong Youb Kim 	};
4979466a38dSHyong Youb Kim 	device_info->default_txportconf = (struct rte_eth_dev_portconf) {
4989466a38dSHyong Youb Kim 		.burst_size = ENIC_DEFAULT_TX_BURST,
4999466a38dSHyong Youb Kim 		.ring_size = RTE_MIN(device_info->tx_desc_lim.nb_max,
5009466a38dSHyong Youb Kim 			ENIC_DEFAULT_TX_RING_SIZE),
5019466a38dSHyong Youb Kim 		.nb_queues = ENIC_DEFAULT_TX_RINGS,
5029466a38dSHyong Youb Kim 	};
503f1edcfe6SHyong Youb Kim 	device_info->speed_capa = speed_capa_from_pci_id(eth_dev);
504bdad90d1SIvan Ilchenko 
505bdad90d1SIvan Ilchenko 	return 0;
50672f3de30SBruce Richardson }
50772f3de30SBruce Richardson 
50878a38edfSJianfeng Tan static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev)
50978a38edfSJianfeng Tan {
51078a38edfSJianfeng Tan 	static const uint32_t ptypes[] = {
511c6f45550SJohn Daley 		RTE_PTYPE_L2_ETHER,
512c6f45550SJohn Daley 		RTE_PTYPE_L2_ETHER_VLAN,
513097e1f1eSNelson Escobar 		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
514097e1f1eSNelson Escobar 		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
515097e1f1eSNelson Escobar 		RTE_PTYPE_L4_TCP,
516097e1f1eSNelson Escobar 		RTE_PTYPE_L4_UDP,
517097e1f1eSNelson Escobar 		RTE_PTYPE_L4_FRAG,
518097e1f1eSNelson Escobar 		RTE_PTYPE_L4_NONFRAG,
51978a38edfSJianfeng Tan 		RTE_PTYPE_UNKNOWN
52078a38edfSJianfeng Tan 	};
521432ed10dSHyong Youb Kim 	static const uint32_t ptypes_overlay[] = {
522432ed10dSHyong Youb Kim 		RTE_PTYPE_L2_ETHER,
523432ed10dSHyong Youb Kim 		RTE_PTYPE_L2_ETHER_VLAN,
524432ed10dSHyong Youb Kim 		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
525432ed10dSHyong Youb Kim 		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
526432ed10dSHyong Youb Kim 		RTE_PTYPE_L4_TCP,
527432ed10dSHyong Youb Kim 		RTE_PTYPE_L4_UDP,
528432ed10dSHyong Youb Kim 		RTE_PTYPE_L4_FRAG,
529432ed10dSHyong Youb Kim 		RTE_PTYPE_L4_NONFRAG,
530432ed10dSHyong Youb Kim 		RTE_PTYPE_TUNNEL_GRENAT,
531432ed10dSHyong Youb Kim 		RTE_PTYPE_INNER_L2_ETHER,
532432ed10dSHyong Youb Kim 		RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
533432ed10dSHyong Youb Kim 		RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
534432ed10dSHyong Youb Kim 		RTE_PTYPE_INNER_L4_TCP,
535432ed10dSHyong Youb Kim 		RTE_PTYPE_INNER_L4_UDP,
536432ed10dSHyong Youb Kim 		RTE_PTYPE_INNER_L4_FRAG,
537432ed10dSHyong Youb Kim 		RTE_PTYPE_INNER_L4_NONFRAG,
538432ed10dSHyong Youb Kim 		RTE_PTYPE_UNKNOWN
539432ed10dSHyong Youb Kim 	};
54078a38edfSJianfeng Tan 
541*a41f593fSFerruh Yigit 	if (dev->rx_pkt_burst != rte_eth_pkt_burst_dummy &&
542432ed10dSHyong Youb Kim 	    dev->rx_pkt_burst != NULL) {
543432ed10dSHyong Youb Kim 		struct enic *enic = pmd_priv(dev);
544432ed10dSHyong Youb Kim 		if (enic->overlay_offload)
545432ed10dSHyong Youb Kim 			return ptypes_overlay;
546432ed10dSHyong Youb Kim 		else
54778a38edfSJianfeng Tan 			return ptypes;
548432ed10dSHyong Youb Kim 	}
54978a38edfSJianfeng Tan 	return NULL;
55078a38edfSJianfeng Tan }
55178a38edfSJianfeng Tan 
5529039c812SAndrew Rybchenko static int enicpmd_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
55372f3de30SBruce Richardson {
55472f3de30SBruce Richardson 	struct enic *enic = pmd_priv(eth_dev);
5559039c812SAndrew Rybchenko 	int ret;
55672f3de30SBruce Richardson 
5570e804034SJohn Daley 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
5589039c812SAndrew Rybchenko 		return -E_RTE_SECONDARY;
5590e804034SJohn Daley 
56072f3de30SBruce Richardson 	ENICPMD_FUNC_TRACE();
5610e804034SJohn Daley 
56272f3de30SBruce Richardson 	enic->promisc = 1;
5639039c812SAndrew Rybchenko 	ret = enic_add_packet_filter(enic);
5649039c812SAndrew Rybchenko 	if (ret != 0)
5659039c812SAndrew Rybchenko 		enic->promisc = 0;
5669039c812SAndrew Rybchenko 
5679039c812SAndrew Rybchenko 	return ret;
56872f3de30SBruce Richardson }
56972f3de30SBruce Richardson 
5709039c812SAndrew Rybchenko static int enicpmd_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
57172f3de30SBruce Richardson {
57272f3de30SBruce Richardson 	struct enic *enic = pmd_priv(eth_dev);
5739039c812SAndrew Rybchenko 	int ret;
57472f3de30SBruce Richardson 
5750e804034SJohn Daley 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
5769039c812SAndrew Rybchenko 		return -E_RTE_SECONDARY;
5770e804034SJohn Daley 
57872f3de30SBruce Richardson 	ENICPMD_FUNC_TRACE();
57972f3de30SBruce Richardson 	enic->promisc = 0;
5809039c812SAndrew Rybchenko 	ret = enic_add_packet_filter(enic);
5819039c812SAndrew Rybchenko 	if (ret != 0)
5829039c812SAndrew Rybchenko 		enic->promisc = 1;
5839039c812SAndrew Rybchenko 
5849039c812SAndrew Rybchenko 	return ret;
58572f3de30SBruce Richardson }
58672f3de30SBruce Richardson 
587ca041cd4SIvan Ilchenko static int enicpmd_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
58872f3de30SBruce Richardson {
58972f3de30SBruce Richardson 	struct enic *enic = pmd_priv(eth_dev);
590ca041cd4SIvan Ilchenko 	int ret;
59172f3de30SBruce Richardson 
5920e804034SJohn Daley 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
593ca041cd4SIvan Ilchenko 		return -E_RTE_SECONDARY;
5940e804034SJohn Daley 
59572f3de30SBruce Richardson 	ENICPMD_FUNC_TRACE();
59672f3de30SBruce Richardson 	enic->allmulti = 1;
597ca041cd4SIvan Ilchenko 	ret = enic_add_packet_filter(enic);
598ca041cd4SIvan Ilchenko 	if (ret != 0)
599ca041cd4SIvan Ilchenko 		enic->allmulti = 0;
600ca041cd4SIvan Ilchenko 
601ca041cd4SIvan Ilchenko 	return ret;
60272f3de30SBruce Richardson }
60372f3de30SBruce Richardson 
604ca041cd4SIvan Ilchenko static int enicpmd_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
60572f3de30SBruce Richardson {
60672f3de30SBruce Richardson 	struct enic *enic = pmd_priv(eth_dev);
607ca041cd4SIvan Ilchenko 	int ret;
60872f3de30SBruce Richardson 
6090e804034SJohn Daley 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
610ca041cd4SIvan Ilchenko 		return -E_RTE_SECONDARY;
6110e804034SJohn Daley 
61272f3de30SBruce Richardson 	ENICPMD_FUNC_TRACE();
61372f3de30SBruce Richardson 	enic->allmulti = 0;
614ca041cd4SIvan Ilchenko 	ret = enic_add_packet_filter(enic);
615ca041cd4SIvan Ilchenko 	if (ret != 0)
616ca041cd4SIvan Ilchenko 		enic->allmulti = 1;
617ca041cd4SIvan Ilchenko 
618ca041cd4SIvan Ilchenko 	return ret;
61972f3de30SBruce Richardson }
62072f3de30SBruce Richardson 
6216d01e580SWei Dai static int enicpmd_add_mac_addr(struct rte_eth_dev *eth_dev,
6226d13ea8eSOlivier Matz 	struct rte_ether_addr *mac_addr,
62372f3de30SBruce Richardson 	__rte_unused uint32_t index, __rte_unused uint32_t pool)
62472f3de30SBruce Richardson {
62572f3de30SBruce Richardson 	struct enic *enic = pmd_priv(eth_dev);
62672f3de30SBruce Richardson 
6270e804034SJohn Daley 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
6280e804034SJohn Daley 		return -E_RTE_SECONDARY;
6290e804034SJohn Daley 
63072f3de30SBruce Richardson 	ENICPMD_FUNC_TRACE();
6316d01e580SWei Dai 	return enic_set_mac_address(enic, mac_addr->addr_bytes);
63272f3de30SBruce Richardson }
63372f3de30SBruce Richardson 
634bbab3d97SJohn Daley static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, uint32_t index)
63572f3de30SBruce Richardson {
63672f3de30SBruce Richardson 	struct enic *enic = pmd_priv(eth_dev);
63772f3de30SBruce Richardson 
6380e804034SJohn Daley 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
6390e804034SJohn Daley 		return;
6400e804034SJohn Daley 
64172f3de30SBruce Richardson 	ENICPMD_FUNC_TRACE();
642740f5bf1SDavid Marchand 	if (enic_del_mac_address(enic, index))
643740f5bf1SDavid Marchand 		dev_err(enic, "del mac addr failed\n");
644740f5bf1SDavid Marchand }
645740f5bf1SDavid Marchand 
646740f5bf1SDavid Marchand static int enicpmd_set_mac_addr(struct rte_eth_dev *eth_dev,
6476d13ea8eSOlivier Matz 				struct rte_ether_addr *addr)
648740f5bf1SDavid Marchand {
649740f5bf1SDavid Marchand 	struct enic *enic = pmd_priv(eth_dev);
650740f5bf1SDavid Marchand 	int ret;
651740f5bf1SDavid Marchand 
652740f5bf1SDavid Marchand 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
653740f5bf1SDavid Marchand 		return -E_RTE_SECONDARY;
654740f5bf1SDavid Marchand 
655740f5bf1SDavid Marchand 	ENICPMD_FUNC_TRACE();
656740f5bf1SDavid Marchand 	ret = enic_del_mac_address(enic, 0);
657740f5bf1SDavid Marchand 	if (ret)
658740f5bf1SDavid Marchand 		return ret;
659740f5bf1SDavid Marchand 	return enic_set_mac_address(enic, addr->addr_bytes);
66072f3de30SBruce Richardson }
66172f3de30SBruce Richardson 
6626d13ea8eSOlivier Matz static void debug_log_add_del_addr(struct rte_ether_addr *addr, bool add)
6638d496995SHyong Youb Kim {
66435b2d13fSOlivier Matz 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
6659bd48e2dSHyong Youb Kim 
66635b2d13fSOlivier Matz 	rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, addr);
667bbd8ecc0SJohn Daley 	ENICPMD_LOG(DEBUG, " %s address %s\n",
6688d496995SHyong Youb Kim 		     add ? "add" : "remove", mac_str);
6698d496995SHyong Youb Kim }
6708d496995SHyong Youb Kim 
6718d496995SHyong Youb Kim static int enicpmd_set_mc_addr_list(struct rte_eth_dev *eth_dev,
6726d13ea8eSOlivier Matz 				    struct rte_ether_addr *mc_addr_set,
6738d496995SHyong Youb Kim 				    uint32_t nb_mc_addr)
6748d496995SHyong Youb Kim {
6758d496995SHyong Youb Kim 	struct enic *enic = pmd_priv(eth_dev);
67635b2d13fSOlivier Matz 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
6776d13ea8eSOlivier Matz 	struct rte_ether_addr *addr;
6788d496995SHyong Youb Kim 	uint32_t i, j;
6798d496995SHyong Youb Kim 	int ret;
6808d496995SHyong Youb Kim 
6818d496995SHyong Youb Kim 	ENICPMD_FUNC_TRACE();
6828d496995SHyong Youb Kim 
6838d496995SHyong Youb Kim 	/* Validate the given addresses first */
6848d496995SHyong Youb Kim 	for (i = 0; i < nb_mc_addr && mc_addr_set != NULL; i++) {
6858d496995SHyong Youb Kim 		addr = &mc_addr_set[i];
686538da7a1SOlivier Matz 		if (!rte_is_multicast_ether_addr(addr) ||
687538da7a1SOlivier Matz 		    rte_is_broadcast_ether_addr(addr)) {
688538da7a1SOlivier Matz 			rte_ether_format_addr(mac_str,
68935b2d13fSOlivier Matz 					RTE_ETHER_ADDR_FMT_SIZE, addr);
690bbd8ecc0SJohn Daley 			ENICPMD_LOG(ERR, " invalid multicast address %s\n",
6918d496995SHyong Youb Kim 				     mac_str);
6928d496995SHyong Youb Kim 			return -EINVAL;
6938d496995SHyong Youb Kim 		}
6948d496995SHyong Youb Kim 	}
6958d496995SHyong Youb Kim 
6968d496995SHyong Youb Kim 	/* Flush all if requested */
6978d496995SHyong Youb Kim 	if (nb_mc_addr == 0 || mc_addr_set == NULL) {
698bbd8ecc0SJohn Daley 		ENICPMD_LOG(DEBUG, " flush multicast addresses\n");
6998d496995SHyong Youb Kim 		for (i = 0; i < enic->mc_count; i++) {
7008d496995SHyong Youb Kim 			addr = &enic->mc_addrs[i];
7018d496995SHyong Youb Kim 			debug_log_add_del_addr(addr, false);
7028d496995SHyong Youb Kim 			ret = vnic_dev_del_addr(enic->vdev, addr->addr_bytes);
7038d496995SHyong Youb Kim 			if (ret)
7048d496995SHyong Youb Kim 				return ret;
7058d496995SHyong Youb Kim 		}
7068d496995SHyong Youb Kim 		enic->mc_count = 0;
7078d496995SHyong Youb Kim 		return 0;
7088d496995SHyong Youb Kim 	}
7098d496995SHyong Youb Kim 
7108d496995SHyong Youb Kim 	if (nb_mc_addr > ENIC_MULTICAST_PERFECT_FILTERS) {
711bbd8ecc0SJohn Daley 		ENICPMD_LOG(ERR, " too many multicast addresses: max=%d\n",
7128d496995SHyong Youb Kim 			     ENIC_MULTICAST_PERFECT_FILTERS);
7138d496995SHyong Youb Kim 		return -ENOSPC;
7148d496995SHyong Youb Kim 	}
7158d496995SHyong Youb Kim 	/*
7168d496995SHyong Youb Kim 	 * devcmd is slow, so apply the difference instead of flushing and
7178d496995SHyong Youb Kim 	 * adding everything.
7188d496995SHyong Youb Kim 	 * 1. Delete addresses on the NIC but not on the host
7198d496995SHyong Youb Kim 	 */
7208d496995SHyong Youb Kim 	for (i = 0; i < enic->mc_count; i++) {
7218d496995SHyong Youb Kim 		addr = &enic->mc_addrs[i];
7228d496995SHyong Youb Kim 		for (j = 0; j < nb_mc_addr; j++) {
723538da7a1SOlivier Matz 			if (rte_is_same_ether_addr(addr, &mc_addr_set[j]))
7248d496995SHyong Youb Kim 				break;
7258d496995SHyong Youb Kim 		}
7268d496995SHyong Youb Kim 		if (j < nb_mc_addr)
7278d496995SHyong Youb Kim 			continue;
7288d496995SHyong Youb Kim 		debug_log_add_del_addr(addr, false);
7298d496995SHyong Youb Kim 		ret = vnic_dev_del_addr(enic->vdev, addr->addr_bytes);
7308d496995SHyong Youb Kim 		if (ret)
7318d496995SHyong Youb Kim 			return ret;
7328d496995SHyong Youb Kim 	}
7338d496995SHyong Youb Kim 	/* 2. Add addresses on the host but not on the NIC */
7348d496995SHyong Youb Kim 	for (i = 0; i < nb_mc_addr; i++) {
7358d496995SHyong Youb Kim 		addr = &mc_addr_set[i];
7368d496995SHyong Youb Kim 		for (j = 0; j < enic->mc_count; j++) {
737538da7a1SOlivier Matz 			if (rte_is_same_ether_addr(addr, &enic->mc_addrs[j]))
7388d496995SHyong Youb Kim 				break;
7398d496995SHyong Youb Kim 		}
7408d496995SHyong Youb Kim 		if (j < enic->mc_count)
7418d496995SHyong Youb Kim 			continue;
7428d496995SHyong Youb Kim 		debug_log_add_del_addr(addr, true);
7438d496995SHyong Youb Kim 		ret = vnic_dev_add_addr(enic->vdev, addr->addr_bytes);
7448d496995SHyong Youb Kim 		if (ret)
7458d496995SHyong Youb Kim 			return ret;
7468d496995SHyong Youb Kim 	}
7478d496995SHyong Youb Kim 	/* Keep a copy so we can flush/apply later on.. */
7488d496995SHyong Youb Kim 	memcpy(enic->mc_addrs, mc_addr_set,
7496d13ea8eSOlivier Matz 	       nb_mc_addr * sizeof(struct rte_ether_addr));
7508d496995SHyong Youb Kim 	enic->mc_count = nb_mc_addr;
7518d496995SHyong Youb Kim 	return 0;
7528d496995SHyong Youb Kim }
7538d496995SHyong Youb Kim 
754396a6d71SJohn Daley static int enicpmd_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
755396a6d71SJohn Daley {
756396a6d71SJohn Daley 	struct enic *enic = pmd_priv(eth_dev);
757396a6d71SJohn Daley 
758396a6d71SJohn Daley 	ENICPMD_FUNC_TRACE();
759396a6d71SJohn Daley 	return enic_set_mtu(enic, mtu);
760396a6d71SJohn Daley }
761396a6d71SJohn Daley 
762c2fec27bSHyong Youb Kim static int enicpmd_dev_rss_reta_query(struct rte_eth_dev *dev,
763c2fec27bSHyong Youb Kim 				      struct rte_eth_rss_reta_entry64
764c2fec27bSHyong Youb Kim 				      *reta_conf,
765c2fec27bSHyong Youb Kim 				      uint16_t reta_size)
766c2fec27bSHyong Youb Kim {
767c2fec27bSHyong Youb Kim 	struct enic *enic = pmd_priv(dev);
768c2fec27bSHyong Youb Kim 	uint16_t i, idx, shift;
769c2fec27bSHyong Youb Kim 
770c2fec27bSHyong Youb Kim 	ENICPMD_FUNC_TRACE();
771c2fec27bSHyong Youb Kim 	if (reta_size != ENIC_RSS_RETA_SIZE) {
772c2fec27bSHyong Youb Kim 		dev_err(enic, "reta_query: wrong reta_size. given=%u expected=%u\n",
773c2fec27bSHyong Youb Kim 			reta_size, ENIC_RSS_RETA_SIZE);
774c2fec27bSHyong Youb Kim 		return -EINVAL;
775c2fec27bSHyong Youb Kim 	}
776c2fec27bSHyong Youb Kim 
777c2fec27bSHyong Youb Kim 	for (i = 0; i < reta_size; i++) {
778295968d1SFerruh Yigit 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
779295968d1SFerruh Yigit 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
780c2fec27bSHyong Youb Kim 		if (reta_conf[idx].mask & (1ULL << shift))
781c2fec27bSHyong Youb Kim 			reta_conf[idx].reta[shift] = enic_sop_rq_idx_to_rte_idx(
782c2fec27bSHyong Youb Kim 				enic->rss_cpu.cpu[i / 4].b[i % 4]);
783c2fec27bSHyong Youb Kim 	}
784c2fec27bSHyong Youb Kim 
785c2fec27bSHyong Youb Kim 	return 0;
786c2fec27bSHyong Youb Kim }
787c2fec27bSHyong Youb Kim 
788c2fec27bSHyong Youb Kim static int enicpmd_dev_rss_reta_update(struct rte_eth_dev *dev,
789c2fec27bSHyong Youb Kim 				       struct rte_eth_rss_reta_entry64
790c2fec27bSHyong Youb Kim 				       *reta_conf,
791c2fec27bSHyong Youb Kim 				       uint16_t reta_size)
792c2fec27bSHyong Youb Kim {
793c2fec27bSHyong Youb Kim 	struct enic *enic = pmd_priv(dev);
794c2fec27bSHyong Youb Kim 	union vnic_rss_cpu rss_cpu;
795c2fec27bSHyong Youb Kim 	uint16_t i, idx, shift;
796c2fec27bSHyong Youb Kim 
797c2fec27bSHyong Youb Kim 	ENICPMD_FUNC_TRACE();
798c2fec27bSHyong Youb Kim 	if (reta_size != ENIC_RSS_RETA_SIZE) {
799c2fec27bSHyong Youb Kim 		dev_err(enic, "reta_update: wrong reta_size. given=%u"
800c2fec27bSHyong Youb Kim 			" expected=%u\n",
801c2fec27bSHyong Youb Kim 			reta_size, ENIC_RSS_RETA_SIZE);
802c2fec27bSHyong Youb Kim 		return -EINVAL;
803c2fec27bSHyong Youb Kim 	}
804c2fec27bSHyong Youb Kim 	/*
805c2fec27bSHyong Youb Kim 	 * Start with the current reta and modify it per reta_conf, as we
806c2fec27bSHyong Youb Kim 	 * need to push the entire reta even if we only modify one entry.
807c2fec27bSHyong Youb Kim 	 */
808c2fec27bSHyong Youb Kim 	rss_cpu = enic->rss_cpu;
809c2fec27bSHyong Youb Kim 	for (i = 0; i < reta_size; i++) {
810295968d1SFerruh Yigit 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
811295968d1SFerruh Yigit 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
812c2fec27bSHyong Youb Kim 		if (reta_conf[idx].mask & (1ULL << shift))
813c2fec27bSHyong Youb Kim 			rss_cpu.cpu[i / 4].b[i % 4] =
814c2fec27bSHyong Youb Kim 				enic_rte_rq_idx_to_sop_idx(
815c2fec27bSHyong Youb Kim 					reta_conf[idx].reta[shift]);
816c2fec27bSHyong Youb Kim 	}
817c2fec27bSHyong Youb Kim 	return enic_set_rss_reta(enic, &rss_cpu);
818c2fec27bSHyong Youb Kim }
819c2fec27bSHyong Youb Kim 
820c2fec27bSHyong Youb Kim static int enicpmd_dev_rss_hash_update(struct rte_eth_dev *dev,
821c2fec27bSHyong Youb Kim 				       struct rte_eth_rss_conf *rss_conf)
822c2fec27bSHyong Youb Kim {
823c2fec27bSHyong Youb Kim 	struct enic *enic = pmd_priv(dev);
824c2fec27bSHyong Youb Kim 
825c2fec27bSHyong Youb Kim 	ENICPMD_FUNC_TRACE();
826c2fec27bSHyong Youb Kim 	return enic_set_rss_conf(enic, rss_conf);
827c2fec27bSHyong Youb Kim }
828c2fec27bSHyong Youb Kim 
829c2fec27bSHyong Youb Kim static int enicpmd_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
830c2fec27bSHyong Youb Kim 					 struct rte_eth_rss_conf *rss_conf)
831c2fec27bSHyong Youb Kim {
832c2fec27bSHyong Youb Kim 	struct enic *enic = pmd_priv(dev);
833c2fec27bSHyong Youb Kim 
834c2fec27bSHyong Youb Kim 	ENICPMD_FUNC_TRACE();
835c2fec27bSHyong Youb Kim 	if (rss_conf == NULL)
836c2fec27bSHyong Youb Kim 		return -EINVAL;
837c2fec27bSHyong Youb Kim 	if (rss_conf->rss_key != NULL &&
838c2fec27bSHyong Youb Kim 	    rss_conf->rss_key_len < ENIC_RSS_HASH_KEY_SIZE) {
839c2fec27bSHyong Youb Kim 		dev_err(enic, "rss_hash_conf_get: wrong rss_key_len. given=%u"
840c2fec27bSHyong Youb Kim 			" expected=%u+\n",
841c2fec27bSHyong Youb Kim 			rss_conf->rss_key_len, ENIC_RSS_HASH_KEY_SIZE);
842c2fec27bSHyong Youb Kim 		return -EINVAL;
843c2fec27bSHyong Youb Kim 	}
844c2fec27bSHyong Youb Kim 	rss_conf->rss_hf = enic->rss_hf;
845c2fec27bSHyong Youb Kim 	if (rss_conf->rss_key != NULL) {
846c2fec27bSHyong Youb Kim 		int i;
847c2fec27bSHyong Youb Kim 		for (i = 0; i < ENIC_RSS_HASH_KEY_SIZE; i++) {
848c2fec27bSHyong Youb Kim 			rss_conf->rss_key[i] =
849c2fec27bSHyong Youb Kim 				enic->rss_key.key[i / 10].b[i % 10];
850c2fec27bSHyong Youb Kim 		}
851c2fec27bSHyong Youb Kim 		rss_conf->rss_key_len = ENIC_RSS_HASH_KEY_SIZE;
852c2fec27bSHyong Youb Kim 	}
853c2fec27bSHyong Youb Kim 	return 0;
854c2fec27bSHyong Youb Kim }
855c2fec27bSHyong Youb Kim 
85692ca7ea4SHyong Youb Kim static void enicpmd_dev_rxq_info_get(struct rte_eth_dev *dev,
85792ca7ea4SHyong Youb Kim 				     uint16_t rx_queue_id,
85892ca7ea4SHyong Youb Kim 				     struct rte_eth_rxq_info *qinfo)
85992ca7ea4SHyong Youb Kim {
86092ca7ea4SHyong Youb Kim 	struct enic *enic = pmd_priv(dev);
86192ca7ea4SHyong Youb Kim 	struct vnic_rq *rq_sop;
86292ca7ea4SHyong Youb Kim 	struct vnic_rq *rq_data;
86392ca7ea4SHyong Youb Kim 	struct rte_eth_rxconf *conf;
86492ca7ea4SHyong Youb Kim 	uint16_t sop_queue_idx;
86592ca7ea4SHyong Youb Kim 	uint16_t data_queue_idx;
86692ca7ea4SHyong Youb Kim 
86792ca7ea4SHyong Youb Kim 	ENICPMD_FUNC_TRACE();
86892ca7ea4SHyong Youb Kim 	sop_queue_idx = enic_rte_rq_idx_to_sop_idx(rx_queue_id);
869285fd7c4SJohn Daley 	data_queue_idx = enic_rte_rq_idx_to_data_idx(rx_queue_id, enic);
87092ca7ea4SHyong Youb Kim 	rq_sop = &enic->rq[sop_queue_idx];
87192ca7ea4SHyong Youb Kim 	rq_data = &enic->rq[data_queue_idx]; /* valid if data_queue_enable */
87292ca7ea4SHyong Youb Kim 	qinfo->mp = rq_sop->mp;
87392ca7ea4SHyong Youb Kim 	qinfo->scattered_rx = rq_sop->data_queue_enable;
87492ca7ea4SHyong Youb Kim 	qinfo->nb_desc = rq_sop->ring.desc_count;
87592ca7ea4SHyong Youb Kim 	if (qinfo->scattered_rx)
87692ca7ea4SHyong Youb Kim 		qinfo->nb_desc += rq_data->ring.desc_count;
87792ca7ea4SHyong Youb Kim 	conf = &qinfo->conf;
87892ca7ea4SHyong Youb Kim 	memset(conf, 0, sizeof(*conf));
87992ca7ea4SHyong Youb Kim 	conf->rx_free_thresh = rq_sop->rx_free_thresh;
88092ca7ea4SHyong Youb Kim 	conf->rx_drop_en = 1;
88192ca7ea4SHyong Youb Kim 	/*
88292ca7ea4SHyong Youb Kim 	 * Except VLAN stripping (port setting), all the checksum offloads
88392ca7ea4SHyong Youb Kim 	 * are always enabled.
88492ca7ea4SHyong Youb Kim 	 */
88593fb21fdSHyong Youb Kim 	conf->offloads = enic->rx_offload_capa;
88692ca7ea4SHyong Youb Kim 	if (!enic->ig_vlan_strip_en)
887295968d1SFerruh Yigit 		conf->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
88892ca7ea4SHyong Youb Kim 	/* rx_thresh and other fields are not applicable for enic */
88992ca7ea4SHyong Youb Kim }
89092ca7ea4SHyong Youb Kim 
89192ca7ea4SHyong Youb Kim static void enicpmd_dev_txq_info_get(struct rte_eth_dev *dev,
8922a7e3d54SHyong Youb Kim 				     uint16_t tx_queue_id,
89392ca7ea4SHyong Youb Kim 				     struct rte_eth_txq_info *qinfo)
89492ca7ea4SHyong Youb Kim {
89592ca7ea4SHyong Youb Kim 	struct enic *enic = pmd_priv(dev);
8962a7e3d54SHyong Youb Kim 	struct vnic_wq *wq = &enic->wq[tx_queue_id];
89792ca7ea4SHyong Youb Kim 
89892ca7ea4SHyong Youb Kim 	ENICPMD_FUNC_TRACE();
8992a7e3d54SHyong Youb Kim 	qinfo->nb_desc = wq->ring.desc_count;
90092ca7ea4SHyong Youb Kim 	memset(&qinfo->conf, 0, sizeof(qinfo->conf));
901bcaa54c1SHyong Youb Kim 	qinfo->conf.offloads = wq->offloads;
90292ca7ea4SHyong Youb Kim 	/* tx_thresh, and all the other fields are not applicable for enic */
90392ca7ea4SHyong Youb Kim }
90492ca7ea4SHyong Youb Kim 
905f011fa0aSHyong Youb Kim static int enicpmd_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
906f011fa0aSHyong Youb Kim 					 __rte_unused uint16_t queue_id,
907f011fa0aSHyong Youb Kim 					 struct rte_eth_burst_mode *mode)
908f011fa0aSHyong Youb Kim {
909f011fa0aSHyong Youb Kim 	eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
910f011fa0aSHyong Youb Kim 	struct enic *enic = pmd_priv(dev);
911f011fa0aSHyong Youb Kim 	const char *info_str = NULL;
912f011fa0aSHyong Youb Kim 	int ret = -EINVAL;
913f011fa0aSHyong Youb Kim 
914f011fa0aSHyong Youb Kim 	ENICPMD_FUNC_TRACE();
915f011fa0aSHyong Youb Kim 	if (enic->use_noscatter_vec_rx_handler)
916f011fa0aSHyong Youb Kim 		info_str = "Vector AVX2 No Scatter";
917f011fa0aSHyong Youb Kim 	else if (pkt_burst == enic_noscatter_recv_pkts)
918f011fa0aSHyong Youb Kim 		info_str = "Scalar No Scatter";
919f011fa0aSHyong Youb Kim 	else if (pkt_burst == enic_recv_pkts)
920f011fa0aSHyong Youb Kim 		info_str = "Scalar";
9218b428cb5SHyong Youb Kim 	else if (pkt_burst == enic_recv_pkts_64)
9228b428cb5SHyong Youb Kim 		info_str = "Scalar 64B Completion";
923f011fa0aSHyong Youb Kim 	if (info_str) {
924f011fa0aSHyong Youb Kim 		strlcpy(mode->info, info_str, sizeof(mode->info));
925f011fa0aSHyong Youb Kim 		ret = 0;
926f011fa0aSHyong Youb Kim 	}
927f011fa0aSHyong Youb Kim 	return ret;
928f011fa0aSHyong Youb Kim }
929f011fa0aSHyong Youb Kim 
930f011fa0aSHyong Youb Kim static int enicpmd_dev_tx_burst_mode_get(struct rte_eth_dev *dev,
931f011fa0aSHyong Youb Kim 					 __rte_unused uint16_t queue_id,
932f011fa0aSHyong Youb Kim 					 struct rte_eth_burst_mode *mode)
933f011fa0aSHyong Youb Kim {
934f011fa0aSHyong Youb Kim 	eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
935f011fa0aSHyong Youb Kim 	const char *info_str = NULL;
936f011fa0aSHyong Youb Kim 	int ret = -EINVAL;
937f011fa0aSHyong Youb Kim 
938f011fa0aSHyong Youb Kim 	ENICPMD_FUNC_TRACE();
939f011fa0aSHyong Youb Kim 	if (pkt_burst == enic_simple_xmit_pkts)
940f011fa0aSHyong Youb Kim 		info_str = "Scalar Simplified";
941f011fa0aSHyong Youb Kim 	else if (pkt_burst == enic_xmit_pkts)
942f011fa0aSHyong Youb Kim 		info_str = "Scalar";
943f011fa0aSHyong Youb Kim 	if (info_str) {
944f011fa0aSHyong Youb Kim 		strlcpy(mode->info, info_str, sizeof(mode->info));
945f011fa0aSHyong Youb Kim 		ret = 0;
946f011fa0aSHyong Youb Kim 	}
947f011fa0aSHyong Youb Kim 	return ret;
948f011fa0aSHyong Youb Kim }
949f011fa0aSHyong Youb Kim 
9500f872d31SHyong Youb Kim static int enicpmd_dev_rx_queue_intr_enable(struct rte_eth_dev *eth_dev,
9510f872d31SHyong Youb Kim 					    uint16_t rx_queue_id)
9520f872d31SHyong Youb Kim {
9530f872d31SHyong Youb Kim 	struct enic *enic = pmd_priv(eth_dev);
9540f872d31SHyong Youb Kim 
9550f872d31SHyong Youb Kim 	ENICPMD_FUNC_TRACE();
9560f872d31SHyong Youb Kim 	vnic_intr_unmask(&enic->intr[rx_queue_id + ENICPMD_RXQ_INTR_OFFSET]);
9570f872d31SHyong Youb Kim 	return 0;
9580f872d31SHyong Youb Kim }
9590f872d31SHyong Youb Kim 
9600f872d31SHyong Youb Kim static int enicpmd_dev_rx_queue_intr_disable(struct rte_eth_dev *eth_dev,
9610f872d31SHyong Youb Kim 					     uint16_t rx_queue_id)
9620f872d31SHyong Youb Kim {
9630f872d31SHyong Youb Kim 	struct enic *enic = pmd_priv(eth_dev);
9640f872d31SHyong Youb Kim 
9650f872d31SHyong Youb Kim 	ENICPMD_FUNC_TRACE();
9660f872d31SHyong Youb Kim 	vnic_intr_mask(&enic->intr[rx_queue_id + ENICPMD_RXQ_INTR_OFFSET]);
9670f872d31SHyong Youb Kim 	return 0;
9680f872d31SHyong Youb Kim }
9690f872d31SHyong Youb Kim 
9708a4efd17SHyong Youb Kim static int udp_tunnel_common_check(struct enic *enic,
9718a4efd17SHyong Youb Kim 				   struct rte_eth_udp_tunnel *tnl)
9728a4efd17SHyong Youb Kim {
973295968d1SFerruh Yigit 	if (tnl->prot_type != RTE_ETH_TUNNEL_TYPE_VXLAN &&
974295968d1SFerruh Yigit 	    tnl->prot_type != RTE_ETH_TUNNEL_TYPE_GENEVE)
9758a4efd17SHyong Youb Kim 		return -ENOTSUP;
9768a4efd17SHyong Youb Kim 	if (!enic->overlay_offload) {
97761c7b522SJohn Daley 		ENICPMD_LOG(DEBUG, " overlay offload is not supported\n");
9788a4efd17SHyong Youb Kim 		return -ENOTSUP;
9798a4efd17SHyong Youb Kim 	}
9808a4efd17SHyong Youb Kim 	return 0;
9818a4efd17SHyong Youb Kim }
9828a4efd17SHyong Youb Kim 
98361c7b522SJohn Daley static int update_tunnel_port(struct enic *enic, uint16_t port, bool vxlan)
9848a4efd17SHyong Youb Kim {
98561c7b522SJohn Daley 	uint8_t cfg;
98661c7b522SJohn Daley 
98761c7b522SJohn Daley 	cfg = vxlan ? OVERLAY_CFG_VXLAN_PORT_UPDATE :
98861c7b522SJohn Daley 		OVERLAY_CFG_GENEVE_PORT_UPDATE;
98961c7b522SJohn Daley 	if (vnic_dev_overlay_offload_cfg(enic->vdev, cfg, port)) {
99061c7b522SJohn Daley 		ENICPMD_LOG(DEBUG, " failed to update tunnel port\n");
9918a4efd17SHyong Youb Kim 		return -EINVAL;
9928a4efd17SHyong Youb Kim 	}
99361c7b522SJohn Daley 	ENICPMD_LOG(DEBUG, " updated %s port to %u\n",
99461c7b522SJohn Daley 		    vxlan ? "vxlan" : "geneve", port);
99561c7b522SJohn Daley 	if (vxlan)
9968a4efd17SHyong Youb Kim 		enic->vxlan_port = port;
99761c7b522SJohn Daley 	else
99861c7b522SJohn Daley 		enic->geneve_port = port;
9998a4efd17SHyong Youb Kim 	return 0;
10008a4efd17SHyong Youb Kim }
10018a4efd17SHyong Youb Kim 
10028a4efd17SHyong Youb Kim static int enicpmd_dev_udp_tunnel_port_add(struct rte_eth_dev *eth_dev,
10038a4efd17SHyong Youb Kim 					   struct rte_eth_udp_tunnel *tnl)
10048a4efd17SHyong Youb Kim {
10058a4efd17SHyong Youb Kim 	struct enic *enic = pmd_priv(eth_dev);
100661c7b522SJohn Daley 	uint16_t port;
100761c7b522SJohn Daley 	bool vxlan;
10088a4efd17SHyong Youb Kim 	int ret;
10098a4efd17SHyong Youb Kim 
10108a4efd17SHyong Youb Kim 	ENICPMD_FUNC_TRACE();
10118a4efd17SHyong Youb Kim 	ret = udp_tunnel_common_check(enic, tnl);
10128a4efd17SHyong Youb Kim 	if (ret)
10138a4efd17SHyong Youb Kim 		return ret;
1014295968d1SFerruh Yigit 	vxlan = (tnl->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN);
101561c7b522SJohn Daley 	if (vxlan)
101661c7b522SJohn Daley 		port = enic->vxlan_port;
101761c7b522SJohn Daley 	else
101861c7b522SJohn Daley 		port = enic->geneve_port;
10198a4efd17SHyong Youb Kim 	/*
102061c7b522SJohn Daley 	 * The NIC has 1 configurable port number per tunnel type.
102161c7b522SJohn Daley 	 * "Adding" a new port number replaces it.
10228a4efd17SHyong Youb Kim 	 */
102361c7b522SJohn Daley 	if (tnl->udp_port == port || tnl->udp_port == 0) {
1024bbd8ecc0SJohn Daley 		ENICPMD_LOG(DEBUG, " %u is already configured or invalid\n",
10258a4efd17SHyong Youb Kim 			     tnl->udp_port);
10268a4efd17SHyong Youb Kim 		return -EINVAL;
10278a4efd17SHyong Youb Kim 	}
102861c7b522SJohn Daley 	return update_tunnel_port(enic, tnl->udp_port, vxlan);
10298a4efd17SHyong Youb Kim }
10308a4efd17SHyong Youb Kim 
10318a4efd17SHyong Youb Kim static int enicpmd_dev_udp_tunnel_port_del(struct rte_eth_dev *eth_dev,
10328a4efd17SHyong Youb Kim 					   struct rte_eth_udp_tunnel *tnl)
10338a4efd17SHyong Youb Kim {
10348a4efd17SHyong Youb Kim 	struct enic *enic = pmd_priv(eth_dev);
103561c7b522SJohn Daley 	uint16_t port;
103661c7b522SJohn Daley 	bool vxlan;
10378a4efd17SHyong Youb Kim 	int ret;
10388a4efd17SHyong Youb Kim 
10398a4efd17SHyong Youb Kim 	ENICPMD_FUNC_TRACE();
10408a4efd17SHyong Youb Kim 	ret = udp_tunnel_common_check(enic, tnl);
10418a4efd17SHyong Youb Kim 	if (ret)
10428a4efd17SHyong Youb Kim 		return ret;
1043295968d1SFerruh Yigit 	vxlan = (tnl->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN);
104461c7b522SJohn Daley 	if (vxlan)
104561c7b522SJohn Daley 		port = enic->vxlan_port;
104661c7b522SJohn Daley 	else
104761c7b522SJohn Daley 		port = enic->geneve_port;
10488a4efd17SHyong Youb Kim 	/*
10498a4efd17SHyong Youb Kim 	 * Clear the previously set port number and restore the
10508a4efd17SHyong Youb Kim 	 * hardware default port number. Some drivers disable VXLAN
10518a4efd17SHyong Youb Kim 	 * offloads when there are no configured port numbers. But
10528a4efd17SHyong Youb Kim 	 * enic does not do that as VXLAN is part of overlay offload,
10538a4efd17SHyong Youb Kim 	 * which is tied to inner RSS and TSO.
10548a4efd17SHyong Youb Kim 	 */
105561c7b522SJohn Daley 	if (tnl->udp_port != port) {
105661c7b522SJohn Daley 		ENICPMD_LOG(DEBUG, " %u is not a configured tunnel port\n",
10578a4efd17SHyong Youb Kim 			     tnl->udp_port);
10588a4efd17SHyong Youb Kim 		return -EINVAL;
10598a4efd17SHyong Youb Kim 	}
106061c7b522SJohn Daley 	port = vxlan ? RTE_VXLAN_DEFAULT_PORT : RTE_GENEVE_DEFAULT_PORT;
106161c7b522SJohn Daley 	return update_tunnel_port(enic, port, vxlan);
10628a4efd17SHyong Youb Kim }
10638a4efd17SHyong Youb Kim 
106429343067SHyong Youb Kim static int enicpmd_dev_fw_version_get(struct rte_eth_dev *eth_dev,
106529343067SHyong Youb Kim 				      char *fw_version, size_t fw_size)
106629343067SHyong Youb Kim {
106729343067SHyong Youb Kim 	struct vnic_devcmd_fw_info *info;
106829343067SHyong Youb Kim 	struct enic *enic;
106929343067SHyong Youb Kim 	int ret;
107029343067SHyong Youb Kim 
107129343067SHyong Youb Kim 	ENICPMD_FUNC_TRACE();
1072d345d6c9SFerruh Yigit 
107329343067SHyong Youb Kim 	enic = pmd_priv(eth_dev);
107429343067SHyong Youb Kim 	ret = vnic_dev_fw_info(enic->vdev, &info);
107529343067SHyong Youb Kim 	if (ret)
107629343067SHyong Youb Kim 		return ret;
1077d345d6c9SFerruh Yigit 	ret = snprintf(fw_version, fw_size, "%s %s",
107829343067SHyong Youb Kim 		 info->fw_version, info->fw_build);
1079d345d6c9SFerruh Yigit 	if (ret < 0)
1080d345d6c9SFerruh Yigit 		return -EINVAL;
1081d345d6c9SFerruh Yigit 
1082d345d6c9SFerruh Yigit 	ret += 1; /* add the size of '\0' */
1083d345d6c9SFerruh Yigit 	if (fw_size < (size_t)ret)
1084d345d6c9SFerruh Yigit 		return ret;
1085d345d6c9SFerruh Yigit 	else
108629343067SHyong Youb Kim 		return 0;
108729343067SHyong Youb Kim }
108829343067SHyong Youb Kim 
108972f3de30SBruce Richardson static const struct eth_dev_ops enicpmd_eth_dev_ops = {
109072f3de30SBruce Richardson 	.dev_configure        = enicpmd_dev_configure,
109172f3de30SBruce Richardson 	.dev_start            = enicpmd_dev_start,
109272f3de30SBruce Richardson 	.dev_stop             = enicpmd_dev_stop,
109372f3de30SBruce Richardson 	.dev_set_link_up      = NULL,
109472f3de30SBruce Richardson 	.dev_set_link_down    = NULL,
109572f3de30SBruce Richardson 	.dev_close            = enicpmd_dev_close,
109672f3de30SBruce Richardson 	.promiscuous_enable   = enicpmd_dev_promiscuous_enable,
109772f3de30SBruce Richardson 	.promiscuous_disable  = enicpmd_dev_promiscuous_disable,
109872f3de30SBruce Richardson 	.allmulticast_enable  = enicpmd_dev_allmulticast_enable,
109972f3de30SBruce Richardson 	.allmulticast_disable = enicpmd_dev_allmulticast_disable,
110072f3de30SBruce Richardson 	.link_update          = enicpmd_dev_link_update,
110172f3de30SBruce Richardson 	.stats_get            = enicpmd_dev_stats_get,
110272f3de30SBruce Richardson 	.stats_reset          = enicpmd_dev_stats_reset,
110372f3de30SBruce Richardson 	.queue_stats_mapping_set = NULL,
110472f3de30SBruce Richardson 	.dev_infos_get        = enicpmd_dev_info_get,
110578a38edfSJianfeng Tan 	.dev_supported_ptypes_get = enicpmd_dev_supported_ptypes_get,
1106396a6d71SJohn Daley 	.mtu_set              = enicpmd_mtu_set,
1107f9416bbaSHyong Youb Kim 	.vlan_filter_set      = NULL,
110872f3de30SBruce Richardson 	.vlan_tpid_set        = NULL,
110972f3de30SBruce Richardson 	.vlan_offload_set     = enicpmd_vlan_offload_set,
111072f3de30SBruce Richardson 	.vlan_strip_queue_set = NULL,
111172f3de30SBruce Richardson 	.rx_queue_start       = enicpmd_dev_rx_queue_start,
111272f3de30SBruce Richardson 	.rx_queue_stop        = enicpmd_dev_rx_queue_stop,
111372f3de30SBruce Richardson 	.tx_queue_start       = enicpmd_dev_tx_queue_start,
111472f3de30SBruce Richardson 	.tx_queue_stop        = enicpmd_dev_tx_queue_stop,
111572f3de30SBruce Richardson 	.rx_queue_setup       = enicpmd_dev_rx_queue_setup,
111672f3de30SBruce Richardson 	.rx_queue_release     = enicpmd_dev_rx_queue_release,
111772f3de30SBruce Richardson 	.tx_queue_setup       = enicpmd_dev_tx_queue_setup,
111872f3de30SBruce Richardson 	.tx_queue_release     = enicpmd_dev_tx_queue_release,
11190f872d31SHyong Youb Kim 	.rx_queue_intr_enable = enicpmd_dev_rx_queue_intr_enable,
11200f872d31SHyong Youb Kim 	.rx_queue_intr_disable = enicpmd_dev_rx_queue_intr_disable,
112192ca7ea4SHyong Youb Kim 	.rxq_info_get         = enicpmd_dev_rxq_info_get,
112292ca7ea4SHyong Youb Kim 	.txq_info_get         = enicpmd_dev_txq_info_get,
1123f011fa0aSHyong Youb Kim 	.rx_burst_mode_get    = enicpmd_dev_rx_burst_mode_get,
1124f011fa0aSHyong Youb Kim 	.tx_burst_mode_get    = enicpmd_dev_tx_burst_mode_get,
112572f3de30SBruce Richardson 	.dev_led_on           = NULL,
112672f3de30SBruce Richardson 	.dev_led_off          = NULL,
112772f3de30SBruce Richardson 	.flow_ctrl_get        = NULL,
112872f3de30SBruce Richardson 	.flow_ctrl_set        = NULL,
112972f3de30SBruce Richardson 	.priority_flow_ctrl_set = NULL,
113072f3de30SBruce Richardson 	.mac_addr_add         = enicpmd_add_mac_addr,
113172f3de30SBruce Richardson 	.mac_addr_remove      = enicpmd_remove_mac_addr,
1132740f5bf1SDavid Marchand 	.mac_addr_set         = enicpmd_set_mac_addr,
11338d496995SHyong Youb Kim 	.set_mc_addr_list     = enicpmd_set_mc_addr_list,
1134fb7ad441SThomas Monjalon 	.flow_ops_get         = enicpmd_dev_flow_ops_get,
1135c2fec27bSHyong Youb Kim 	.reta_query           = enicpmd_dev_rss_reta_query,
1136c2fec27bSHyong Youb Kim 	.reta_update          = enicpmd_dev_rss_reta_update,
1137c2fec27bSHyong Youb Kim 	.rss_hash_conf_get    = enicpmd_dev_rss_hash_conf_get,
1138c2fec27bSHyong Youb Kim 	.rss_hash_update      = enicpmd_dev_rss_hash_update,
11398a4efd17SHyong Youb Kim 	.udp_tunnel_port_add  = enicpmd_dev_udp_tunnel_port_add,
11408a4efd17SHyong Youb Kim 	.udp_tunnel_port_del  = enicpmd_dev_udp_tunnel_port_del,
114129343067SHyong Youb Kim 	.fw_version_get       = enicpmd_dev_fw_version_get,
114272f3de30SBruce Richardson };
114372f3de30SBruce Richardson 
11448a6ff33dSHyong Youb Kim static int enic_parse_zero_one(const char *key,
114593fb21fdSHyong Youb Kim 			       const char *value,
114693fb21fdSHyong Youb Kim 			       void *opaque)
114793fb21fdSHyong Youb Kim {
114893fb21fdSHyong Youb Kim 	struct enic *enic;
11498a6ff33dSHyong Youb Kim 	bool b;
115093fb21fdSHyong Youb Kim 
115193fb21fdSHyong Youb Kim 	enic = (struct enic *)opaque;
115293fb21fdSHyong Youb Kim 	if (strcmp(value, "0") == 0) {
11538a6ff33dSHyong Youb Kim 		b = false;
115493fb21fdSHyong Youb Kim 	} else if (strcmp(value, "1") == 0) {
11558a6ff33dSHyong Youb Kim 		b = true;
115693fb21fdSHyong Youb Kim 	} else {
11578a6ff33dSHyong Youb Kim 		dev_err(enic, "Invalid value for %s"
11588a6ff33dSHyong Youb Kim 			": expected=0|1 given=%s\n", key, value);
115993fb21fdSHyong Youb Kim 		return -EINVAL;
116093fb21fdSHyong Youb Kim 	}
11618b428cb5SHyong Youb Kim 	if (strcmp(key, ENIC_DEVARG_CQ64) == 0)
11628b428cb5SHyong Youb Kim 		enic->cq64_request = b;
11638a6ff33dSHyong Youb Kim 	if (strcmp(key, ENIC_DEVARG_DISABLE_OVERLAY) == 0)
11648a6ff33dSHyong Youb Kim 		enic->disable_overlay = b;
11658a6ff33dSHyong Youb Kim 	if (strcmp(key, ENIC_DEVARG_ENABLE_AVX2_RX) == 0)
11668a6ff33dSHyong Youb Kim 		enic->enable_avx2_rx = b;
116793fb21fdSHyong Youb Kim 	return 0;
116893fb21fdSHyong Youb Kim }
116993fb21fdSHyong Youb Kim 
1170e39c2756SHyong Youb Kim static int enic_parse_ig_vlan_rewrite(__rte_unused const char *key,
1171e39c2756SHyong Youb Kim 				      const char *value,
1172e39c2756SHyong Youb Kim 				      void *opaque)
1173e39c2756SHyong Youb Kim {
1174e39c2756SHyong Youb Kim 	struct enic *enic;
1175e39c2756SHyong Youb Kim 
1176e39c2756SHyong Youb Kim 	enic = (struct enic *)opaque;
1177e39c2756SHyong Youb Kim 	if (strcmp(value, "trunk") == 0) {
1178e39c2756SHyong Youb Kim 		/* Trunk mode: always tag */
1179e39c2756SHyong Youb Kim 		enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_DEFAULT_TRUNK;
1180e39c2756SHyong Youb Kim 	} else if (strcmp(value, "untag") == 0) {
1181e39c2756SHyong Youb Kim 		/* Untag default VLAN mode: untag if VLAN = default VLAN */
1182e39c2756SHyong Youb Kim 		enic->ig_vlan_rewrite_mode =
1183e39c2756SHyong Youb Kim 			IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN;
1184e39c2756SHyong Youb Kim 	} else if (strcmp(value, "priority") == 0) {
1185e39c2756SHyong Youb Kim 		/*
1186e39c2756SHyong Youb Kim 		 * Priority-tag default VLAN mode: priority tag (VLAN header
1187e39c2756SHyong Youb Kim 		 * with ID=0) if VLAN = default
1188e39c2756SHyong Youb Kim 		 */
1189e39c2756SHyong Youb Kim 		enic->ig_vlan_rewrite_mode =
1190e39c2756SHyong Youb Kim 			IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN;
1191e39c2756SHyong Youb Kim 	} else if (strcmp(value, "pass") == 0) {
1192e39c2756SHyong Youb Kim 		/* Pass through mode: do not touch tags */
1193e39c2756SHyong Youb Kim 		enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU;
1194e39c2756SHyong Youb Kim 	} else {
1195e39c2756SHyong Youb Kim 		dev_err(enic, "Invalid value for " ENIC_DEVARG_IG_VLAN_REWRITE
1196e39c2756SHyong Youb Kim 			": expected=trunk|untag|priority|pass given=%s\n",
1197e39c2756SHyong Youb Kim 			value);
1198e39c2756SHyong Youb Kim 		return -EINVAL;
1199e39c2756SHyong Youb Kim 	}
1200e39c2756SHyong Youb Kim 	return 0;
1201e39c2756SHyong Youb Kim }
1202e39c2756SHyong Youb Kim 
120393fb21fdSHyong Youb Kim static int enic_check_devargs(struct rte_eth_dev *dev)
120493fb21fdSHyong Youb Kim {
120593fb21fdSHyong Youb Kim 	static const char *const valid_keys[] = {
12068b428cb5SHyong Youb Kim 		ENIC_DEVARG_CQ64,
1207e39c2756SHyong Youb Kim 		ENIC_DEVARG_DISABLE_OVERLAY,
12088a6ff33dSHyong Youb Kim 		ENIC_DEVARG_ENABLE_AVX2_RX,
1209e39c2756SHyong Youb Kim 		ENIC_DEVARG_IG_VLAN_REWRITE,
121039cf83f1SHyong Youb Kim 		ENIC_DEVARG_REPRESENTOR,
1211e39c2756SHyong Youb Kim 		NULL};
121293fb21fdSHyong Youb Kim 	struct enic *enic = pmd_priv(dev);
121393fb21fdSHyong Youb Kim 	struct rte_kvargs *kvlist;
121493fb21fdSHyong Youb Kim 
121593fb21fdSHyong Youb Kim 	ENICPMD_FUNC_TRACE();
121693fb21fdSHyong Youb Kim 
12178b428cb5SHyong Youb Kim 	enic->cq64_request = true; /* Use 64B entry if available */
121893fb21fdSHyong Youb Kim 	enic->disable_overlay = false;
12198a6ff33dSHyong Youb Kim 	enic->enable_avx2_rx = false;
1220e39c2756SHyong Youb Kim 	enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU;
122193fb21fdSHyong Youb Kim 	if (!dev->device->devargs)
122293fb21fdSHyong Youb Kim 		return 0;
122393fb21fdSHyong Youb Kim 	kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
122493fb21fdSHyong Youb Kim 	if (!kvlist)
122593fb21fdSHyong Youb Kim 		return -EINVAL;
12268b428cb5SHyong Youb Kim 	if (rte_kvargs_process(kvlist, ENIC_DEVARG_CQ64,
12278b428cb5SHyong Youb Kim 			       enic_parse_zero_one, enic) < 0 ||
12288b428cb5SHyong Youb Kim 	    rte_kvargs_process(kvlist, ENIC_DEVARG_DISABLE_OVERLAY,
12298a6ff33dSHyong Youb Kim 			       enic_parse_zero_one, enic) < 0 ||
12308a6ff33dSHyong Youb Kim 	    rte_kvargs_process(kvlist, ENIC_DEVARG_ENABLE_AVX2_RX,
12318a6ff33dSHyong Youb Kim 			       enic_parse_zero_one, enic) < 0 ||
1232e39c2756SHyong Youb Kim 	    rte_kvargs_process(kvlist, ENIC_DEVARG_IG_VLAN_REWRITE,
1233e39c2756SHyong Youb Kim 			       enic_parse_ig_vlan_rewrite, enic) < 0) {
123493fb21fdSHyong Youb Kim 		rte_kvargs_free(kvlist);
123593fb21fdSHyong Youb Kim 		return -EINVAL;
123693fb21fdSHyong Youb Kim 	}
123793fb21fdSHyong Youb Kim 	rte_kvargs_free(kvlist);
123893fb21fdSHyong Youb Kim 	return 0;
123993fb21fdSHyong Youb Kim }
124093fb21fdSHyong Youb Kim 
124139cf83f1SHyong Youb Kim /* Initialize the driver for PF */
124239cf83f1SHyong Youb Kim static int eth_enic_dev_init(struct rte_eth_dev *eth_dev,
124339cf83f1SHyong Youb Kim 			     void *init_params __rte_unused)
124472f3de30SBruce Richardson {
124572f3de30SBruce Richardson 	struct rte_pci_device *pdev;
124672f3de30SBruce Richardson 	struct rte_pci_addr *addr;
124772f3de30SBruce Richardson 	struct enic *enic = pmd_priv(eth_dev);
124893fb21fdSHyong Youb Kim 	int err;
124972f3de30SBruce Richardson 
125072f3de30SBruce Richardson 	ENICPMD_FUNC_TRACE();
125172f3de30SBruce Richardson 	eth_dev->dev_ops = &enicpmd_eth_dev_ops;
1252cbfc6111SFerruh Yigit 	eth_dev->rx_queue_count = enicpmd_dev_rx_queue_count;
1253947d860cSJohn Daley 	eth_dev->rx_pkt_burst = &enic_recv_pkts;
1254d309bdc2SJohn Daley 	eth_dev->tx_pkt_burst = &enic_xmit_pkts;
12551e81dbb5SHyong Youb Kim 	eth_dev->tx_pkt_prepare = &enic_prep_pkts;
1256e92a4b41SHyong Youb Kim 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1257e92a4b41SHyong Youb Kim 		enic_pick_tx_handler(eth_dev);
1258e92a4b41SHyong Youb Kim 		enic_pick_rx_handler(eth_dev);
1259e92a4b41SHyong Youb Kim 		return 0;
1260e92a4b41SHyong Youb Kim 	}
1261e92a4b41SHyong Youb Kim 	/* Only the primary sets up adapter and other data in shared memory */
1262e92a4b41SHyong Youb Kim 	enic->port_id = eth_dev->data->port_id;
1263e92a4b41SHyong Youb Kim 	enic->rte_dev = eth_dev;
1264c655c547SHyong Youb Kim 	enic->dev_data = eth_dev->data;
126572f3de30SBruce Richardson 
1266c0802544SFerruh Yigit 	pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
1267eeefe73fSBernard Iremonger 	rte_eth_copy_pci_info(eth_dev, pdev);
126872f3de30SBruce Richardson 	enic->pdev = pdev;
126972f3de30SBruce Richardson 	addr = &pdev->addr;
127072f3de30SBruce Richardson 
127172f3de30SBruce Richardson 	snprintf(enic->bdf_name, ENICPMD_BDF_LENGTH, "%04x:%02x:%02x.%x",
127272f3de30SBruce Richardson 		addr->domain, addr->bus, addr->devid, addr->function);
127372f3de30SBruce Richardson 
127493fb21fdSHyong Youb Kim 	err = enic_check_devargs(eth_dev);
127593fb21fdSHyong Youb Kim 	if (err)
127693fb21fdSHyong Youb Kim 		return err;
127739cf83f1SHyong Youb Kim 	err = enic_probe(enic);
127839cf83f1SHyong Youb Kim 	if (!err && enic->fm) {
127939cf83f1SHyong Youb Kim 		err = enic_fm_allocate_switch_domain(enic);
128039cf83f1SHyong Youb Kim 		if (err)
128139cf83f1SHyong Youb Kim 			ENICPMD_LOG(ERR, "failed to allocate switch domain id");
128239cf83f1SHyong Youb Kim 	}
128339cf83f1SHyong Youb Kim 	return err;
128439cf83f1SHyong Youb Kim }
128539cf83f1SHyong Youb Kim 
128639cf83f1SHyong Youb Kim static int eth_enic_dev_uninit(struct rte_eth_dev *eth_dev)
128739cf83f1SHyong Youb Kim {
128839cf83f1SHyong Youb Kim 	struct enic *enic = pmd_priv(eth_dev);
128939cf83f1SHyong Youb Kim 	int err;
129039cf83f1SHyong Youb Kim 
129139cf83f1SHyong Youb Kim 	ENICPMD_FUNC_TRACE();
129239cf83f1SHyong Youb Kim 	eth_dev->device = NULL;
129339cf83f1SHyong Youb Kim 	eth_dev->intr_handle = NULL;
129439cf83f1SHyong Youb Kim 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
129539cf83f1SHyong Youb Kim 		return 0;
129639cf83f1SHyong Youb Kim 	err = rte_eth_switch_domain_free(enic->switch_domain_id);
129739cf83f1SHyong Youb Kim 	if (err)
129839cf83f1SHyong Youb Kim 		ENICPMD_LOG(WARNING, "failed to free switch domain: %d", err);
129939cf83f1SHyong Youb Kim 	return 0;
130072f3de30SBruce Richardson }
130172f3de30SBruce Richardson 
1302fdf91e0fSJan Blunck static int eth_enic_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1303fdf91e0fSJan Blunck 	struct rte_pci_device *pci_dev)
1304fdf91e0fSJan Blunck {
130539cf83f1SHyong Youb Kim 	char name[RTE_ETH_NAME_MAX_LEN];
130639cf83f1SHyong Youb Kim 	struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
130739cf83f1SHyong Youb Kim 	struct rte_eth_dev *pf_ethdev;
130839cf83f1SHyong Youb Kim 	struct enic *pf_enic;
130939cf83f1SHyong Youb Kim 	int i, retval;
131039cf83f1SHyong Youb Kim 
131139cf83f1SHyong Youb Kim 	ENICPMD_FUNC_TRACE();
131239cf83f1SHyong Youb Kim 	if (pci_dev->device.devargs) {
131339cf83f1SHyong Youb Kim 		retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
131439cf83f1SHyong Youb Kim 				&eth_da);
131539cf83f1SHyong Youb Kim 		if (retval)
131639cf83f1SHyong Youb Kim 			return retval;
131739cf83f1SHyong Youb Kim 	}
1318d6541676SXueming Li 	if (eth_da.nb_representor_ports > 0 &&
1319d6541676SXueming Li 	    eth_da.type != RTE_ETH_REPRESENTOR_VF) {
1320d6541676SXueming Li 		ENICPMD_LOG(ERR, "unsupported representor type: %s\n",
1321d6541676SXueming Li 			    pci_dev->device.devargs->args);
1322d6541676SXueming Li 		return -ENOTSUP;
1323d6541676SXueming Li 	}
132439cf83f1SHyong Youb Kim 	retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
132539cf83f1SHyong Youb Kim 		sizeof(struct enic),
132639cf83f1SHyong Youb Kim 		eth_dev_pci_specific_init, pci_dev,
132739cf83f1SHyong Youb Kim 		eth_enic_dev_init, NULL);
132839cf83f1SHyong Youb Kim 	if (retval || eth_da.nb_representor_ports < 1)
132939cf83f1SHyong Youb Kim 		return retval;
133039cf83f1SHyong Youb Kim 
133139cf83f1SHyong Youb Kim 	/* Probe VF representor */
133239cf83f1SHyong Youb Kim 	pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
133339cf83f1SHyong Youb Kim 	if (pf_ethdev == NULL)
133439cf83f1SHyong Youb Kim 		return -ENODEV;
133539cf83f1SHyong Youb Kim 	/* Representors require flowman */
133639cf83f1SHyong Youb Kim 	pf_enic = pmd_priv(pf_ethdev);
133739cf83f1SHyong Youb Kim 	if (pf_enic->fm == NULL) {
133839cf83f1SHyong Youb Kim 		ENICPMD_LOG(ERR, "VF representors require flowman");
133939cf83f1SHyong Youb Kim 		return -ENOTSUP;
134039cf83f1SHyong Youb Kim 	}
134139cf83f1SHyong Youb Kim 	/*
134239cf83f1SHyong Youb Kim 	 * For now representors imply switchdev, as firmware does not support
134339cf83f1SHyong Youb Kim 	 * legacy mode SR-IOV
134439cf83f1SHyong Youb Kim 	 */
134539cf83f1SHyong Youb Kim 	pf_enic->switchdev_mode = 1;
134639cf83f1SHyong Youb Kim 	/* Calculate max VF ID before initializing representor*/
134739cf83f1SHyong Youb Kim 	pf_enic->max_vf_id = 0;
134839cf83f1SHyong Youb Kim 	for (i = 0; i < eth_da.nb_representor_ports; i++) {
134939cf83f1SHyong Youb Kim 		pf_enic->max_vf_id = RTE_MAX(pf_enic->max_vf_id,
135039cf83f1SHyong Youb Kim 					     eth_da.representor_ports[i]);
135139cf83f1SHyong Youb Kim 	}
135239cf83f1SHyong Youb Kim 	for (i = 0; i < eth_da.nb_representor_ports; i++) {
135339cf83f1SHyong Youb Kim 		struct enic_vf_representor representor;
135439cf83f1SHyong Youb Kim 
135539cf83f1SHyong Youb Kim 		representor.vf_id = eth_da.representor_ports[i];
135639cf83f1SHyong Youb Kim 				representor.switch_domain_id =
135739cf83f1SHyong Youb Kim 			pmd_priv(pf_ethdev)->switch_domain_id;
135839cf83f1SHyong Youb Kim 		representor.pf = pmd_priv(pf_ethdev);
135939cf83f1SHyong Youb Kim 		snprintf(name, sizeof(name), "net_%s_representor_%d",
136039cf83f1SHyong Youb Kim 			pci_dev->device.name, eth_da.representor_ports[i]);
136139cf83f1SHyong Youb Kim 		retval = rte_eth_dev_create(&pci_dev->device, name,
136239cf83f1SHyong Youb Kim 			sizeof(struct enic_vf_representor), NULL, NULL,
136339cf83f1SHyong Youb Kim 			enic_vf_representor_init, &representor);
136439cf83f1SHyong Youb Kim 		if (retval) {
136539cf83f1SHyong Youb Kim 			ENICPMD_LOG(ERR, "failed to create enic vf representor %s",
136639cf83f1SHyong Youb Kim 				    name);
136739cf83f1SHyong Youb Kim 			return retval;
136839cf83f1SHyong Youb Kim 		}
136939cf83f1SHyong Youb Kim 	}
137039cf83f1SHyong Youb Kim 	return 0;
1371fdf91e0fSJan Blunck }
1372fdf91e0fSJan Blunck 
1373fdf91e0fSJan Blunck static int eth_enic_pci_remove(struct rte_pci_device *pci_dev)
1374fdf91e0fSJan Blunck {
137539cf83f1SHyong Youb Kim 	struct rte_eth_dev *ethdev;
137639cf83f1SHyong Youb Kim 
137739cf83f1SHyong Youb Kim 	ENICPMD_FUNC_TRACE();
137839cf83f1SHyong Youb Kim 	ethdev = rte_eth_dev_allocated(pci_dev->device.name);
137939cf83f1SHyong Youb Kim 	if (!ethdev)
138039cf83f1SHyong Youb Kim 		return -ENODEV;
138139cf83f1SHyong Youb Kim 	if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
138239cf83f1SHyong Youb Kim 		return rte_eth_dev_destroy(ethdev, enic_vf_representor_uninit);
138339cf83f1SHyong Youb Kim 	else
138439cf83f1SHyong Youb Kim 		return rte_eth_dev_destroy(ethdev, eth_enic_dev_uninit);
1385fdf91e0fSJan Blunck }
1386fdf91e0fSJan Blunck 
1387fdf91e0fSJan Blunck static struct rte_pci_driver rte_enic_pmd = {
138872f3de30SBruce Richardson 	.id_table = pci_id_enic_map,
1389b76fafb1SDavid Marchand 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
1390fdf91e0fSJan Blunck 	.probe = eth_enic_pci_probe,
1391fdf91e0fSJan Blunck 	.remove = eth_enic_pci_remove,
139272f3de30SBruce Richardson };
139372f3de30SBruce Richardson 
1394ea7768b5SHyong Youb Kim int dev_is_enic(struct rte_eth_dev *dev)
1395ea7768b5SHyong Youb Kim {
1396ea7768b5SHyong Youb Kim 	return dev->device->driver == &rte_enic_pmd.driver;
1397ea7768b5SHyong Youb Kim }
1398ea7768b5SHyong Youb Kim 
1399fdf91e0fSJan Blunck RTE_PMD_REGISTER_PCI(net_enic, rte_enic_pmd);
140001f19227SShreyansh Jain RTE_PMD_REGISTER_PCI_TABLE(net_enic, pci_id_enic_map);
140106e81dc9SDavid Marchand RTE_PMD_REGISTER_KMOD_DEP(net_enic, "* igb_uio | uio_pci_generic | vfio-pci");
140293fb21fdSHyong Youb Kim RTE_PMD_REGISTER_PARAM_STRING(net_enic,
14038b428cb5SHyong Youb Kim 	ENIC_DEVARG_CQ64 "=0|1"
1404e39c2756SHyong Youb Kim 	ENIC_DEVARG_DISABLE_OVERLAY "=0|1 "
14058a6ff33dSHyong Youb Kim 	ENIC_DEVARG_ENABLE_AVX2_RX "=0|1 "
1406e39c2756SHyong Youb Kim 	ENIC_DEVARG_IG_VLAN_REWRITE "=trunk|untag|priority|pass");
1407