xref: /dpdk/drivers/net/enic/enic_ethdev.c (revision 543617f44eec3e348ea8cd04924ef80389610d46)
12e99ea80SHyong Youb Kim /* SPDX-License-Identifier: BSD-3-Clause
22e99ea80SHyong Youb Kim  * Copyright 2008-2017 Cisco Systems, Inc.  All rights reserved.
372f3de30SBruce Richardson  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
472f3de30SBruce Richardson  */
572f3de30SBruce Richardson 
672f3de30SBruce Richardson #include <stdio.h>
772f3de30SBruce Richardson #include <stdint.h>
872f3de30SBruce Richardson 
91acb7f54SDavid Marchand #include <dev_driver.h>
1072f3de30SBruce Richardson #include <rte_pci.h>
111f37cb2bSDavid Marchand #include <bus_pci_driver.h>
12df96fd0dSBruce Richardson #include <ethdev_driver.h>
13df96fd0dSBruce Richardson #include <ethdev_pci.h>
1461c7b522SJohn Daley #include <rte_geneve.h>
1593fb21fdSHyong Youb Kim #include <rte_kvargs.h>
1672f3de30SBruce Richardson #include <rte_string_fns.h>
1772f3de30SBruce Richardson 
1872f3de30SBruce Richardson #include "vnic_intr.h"
1972f3de30SBruce Richardson #include "vnic_cq.h"
2072f3de30SBruce Richardson #include "vnic_wq.h"
2172f3de30SBruce Richardson #include "vnic_rq.h"
2272f3de30SBruce Richardson #include "vnic_enet.h"
2372f3de30SBruce Richardson #include "enic.h"
2400ce4311SHyong Youb Kim #include "enic_sriov.h"
2572f3de30SBruce Richardson 
2672f3de30SBruce Richardson /*
2772f3de30SBruce Richardson  * The set of PCI devices this driver supports
2872f3de30SBruce Richardson  */
290b6fe7bdSDavid Marchand #define CISCO_PCI_VENDOR_ID 0x1137
3072f3de30SBruce Richardson static const struct rte_pci_id pci_id_enic_map[] = {
310b6fe7bdSDavid Marchand 	{RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET)},
3257bb45b3SJohn Daley 	{RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET_SN)},
330b6fe7bdSDavid Marchand 	{.vendor_id = 0, /* sentinel */},
3472f3de30SBruce Richardson };
3572f3de30SBruce Richardson 
36f1edcfe6SHyong Youb Kim /* Supported link speeds of production VIC models */
37f1edcfe6SHyong Youb Kim static const struct vic_speed_capa {
38f1edcfe6SHyong Youb Kim 	uint16_t sub_devid;
39f1edcfe6SHyong Youb Kim 	uint32_t capa;
40f1edcfe6SHyong Youb Kim } vic_speed_capa_map[] = {
41295968d1SFerruh Yigit 	{ 0x0043, RTE_ETH_LINK_SPEED_10G }, /* VIC */
42295968d1SFerruh Yigit 	{ 0x0047, RTE_ETH_LINK_SPEED_10G }, /* P81E PCIe */
43295968d1SFerruh Yigit 	{ 0x0048, RTE_ETH_LINK_SPEED_10G }, /* M81KR Mezz */
44295968d1SFerruh Yigit 	{ 0x004f, RTE_ETH_LINK_SPEED_10G }, /* 1280 Mezz */
45295968d1SFerruh Yigit 	{ 0x0084, RTE_ETH_LINK_SPEED_10G }, /* 1240 MLOM */
46295968d1SFerruh Yigit 	{ 0x0085, RTE_ETH_LINK_SPEED_10G }, /* 1225 PCIe */
47295968d1SFerruh Yigit 	{ 0x00cd, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1285 PCIe */
48295968d1SFerruh Yigit 	{ 0x00ce, RTE_ETH_LINK_SPEED_10G }, /* 1225T PCIe */
49295968d1SFerruh Yigit 	{ 0x012a, RTE_ETH_LINK_SPEED_40G }, /* M4308 */
50295968d1SFerruh Yigit 	{ 0x012c, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1340 MLOM */
51295968d1SFerruh Yigit 	{ 0x012e, RTE_ETH_LINK_SPEED_10G }, /* 1227 PCIe */
52295968d1SFerruh Yigit 	{ 0x0137, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1380 Mezz */
53295968d1SFerruh Yigit 	{ 0x014d, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1385 PCIe */
54295968d1SFerruh Yigit 	{ 0x015d, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1387 MLOM */
55295968d1SFerruh Yigit 	{ 0x0215, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G |
56295968d1SFerruh Yigit 		  RTE_ETH_LINK_SPEED_40G }, /* 1440 Mezz */
57295968d1SFerruh Yigit 	{ 0x0216, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G |
58295968d1SFerruh Yigit 		  RTE_ETH_LINK_SPEED_40G }, /* 1480 MLOM */
59295968d1SFerruh Yigit 	{ 0x0217, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G }, /* 1455 PCIe */
60295968d1SFerruh Yigit 	{ 0x0218, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G }, /* 1457 MLOM */
61295968d1SFerruh Yigit 	{ 0x0219, RTE_ETH_LINK_SPEED_40G }, /* 1485 PCIe */
62295968d1SFerruh Yigit 	{ 0x021a, RTE_ETH_LINK_SPEED_40G }, /* 1487 MLOM */
63295968d1SFerruh Yigit 	{ 0x024a, RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_100G }, /* 1495 PCIe */
64295968d1SFerruh Yigit 	{ 0x024b, RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_100G }, /* 1497 MLOM */
65*543617f4SHyong Youb Kim 	{ 0x02af, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G }, /* 1467 MLOM */
66*543617f4SHyong Youb Kim 	{ 0x02b0, RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_100G }, /* 1477 MLOM */
67*543617f4SHyong Youb Kim 	{ 0x02cf, RTE_ETH_LINK_SPEED_25G }, /* 14425 MLOM */
68*543617f4SHyong Youb Kim 	{ 0x02d0, RTE_ETH_LINK_SPEED_25G }, /* 14825 Mezz */
69*543617f4SHyong Youb Kim 	{ 0x02db, RTE_ETH_LINK_SPEED_100G }, /* 15231 MLOM */
70*543617f4SHyong Youb Kim 	{ 0x02dc, RTE_ETH_LINK_SPEED_10G }, /* 15411 MLOM */
71*543617f4SHyong Youb Kim 	{ 0x02dd, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G |
72*543617f4SHyong Youb Kim 		  RTE_ETH_LINK_SPEED_50G }, /* 15428 MLOM */
73*543617f4SHyong Youb Kim 	{ 0x02de, RTE_ETH_LINK_SPEED_25G }, /* 15420 MLOM */
74*543617f4SHyong Youb Kim 	{ 0x02e8, RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_100G |
75*543617f4SHyong Youb Kim 		  RTE_ETH_LINK_SPEED_200G}, /* 15238 MLOM */
76*543617f4SHyong Youb Kim 	{ 0x02e0, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G |
77*543617f4SHyong Youb Kim 		  RTE_ETH_LINK_SPEED_50G }, /* 15427 MLOM */
78*543617f4SHyong Youb Kim 	{ 0x02df, RTE_ETH_LINK_SPEED_50G | RTE_ETH_LINK_SPEED_100G }, /* 15230 MLOM */
79*543617f4SHyong Youb Kim 	{ 0x02e1, RTE_ETH_LINK_SPEED_25G | RTE_ETH_LINK_SPEED_50G }, /* 15422 Mezz */
80*543617f4SHyong Youb Kim 	{ 0x02e4, RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_100G |
81*543617f4SHyong Youb Kim 		  RTE_ETH_LINK_SPEED_200G }, /* 15235 PCIe */
82*543617f4SHyong Youb Kim 	{ 0x02f2, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G |
83*543617f4SHyong Youb Kim 		  RTE_ETH_LINK_SPEED_50G }, /* 15425 PCIe */
84*543617f4SHyong Youb Kim 	{ 0x02f3, RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_100G |
85*543617f4SHyong Youb Kim 		  RTE_ETH_LINK_SPEED_200G }, /* 15237 MLOM */
86f1edcfe6SHyong Youb Kim 	{ 0, 0 }, /* End marker */
87f1edcfe6SHyong Youb Kim };
88f1edcfe6SHyong Youb Kim 
898b428cb5SHyong Youb Kim #define ENIC_DEVARG_CQ64 "cq64"
9093fb21fdSHyong Youb Kim #define ENIC_DEVARG_DISABLE_OVERLAY "disable-overlay"
918a6ff33dSHyong Youb Kim #define ENIC_DEVARG_ENABLE_AVX2_RX "enable-avx2-rx"
92e39c2756SHyong Youb Kim #define ENIC_DEVARG_IG_VLAN_REWRITE "ig-vlan-rewrite"
9339cf83f1SHyong Youb Kim #define ENIC_DEVARG_REPRESENTOR "representor"
9492ca7ea4SHyong Youb Kim 
95eeded204SDavid Marchand RTE_LOG_REGISTER_DEFAULT(enic_pmd_logtype, INFO);
9636efba2fSHyong Youb Kim 
9772f3de30SBruce Richardson static int
98fb7ad441SThomas Monjalon enicpmd_dev_flow_ops_get(struct rte_eth_dev *dev,
99fb7ad441SThomas Monjalon 			 const struct rte_flow_ops **ops)
10072f3de30SBruce Richardson {
101c02a96fcSHyong Youb Kim 	struct enic *enic = pmd_priv(dev);
10272f3de30SBruce Richardson 
1030f766680SJohn Daley 	ENICPMD_FUNC_TRACE();
1040f766680SJohn Daley 
105fb7ad441SThomas Monjalon 	if (enic->flow_filter_mode == FILTER_FLOWMAN)
106fb7ad441SThomas Monjalon 		*ops = &enic_fm_flow_ops;
107fb7ad441SThomas Monjalon 	else
108fb7ad441SThomas Monjalon 		*ops = &enic_flow_ops;
109fb7ad441SThomas Monjalon 	return 0;
11072f3de30SBruce Richardson }
11172f3de30SBruce Richardson 
1127483341aSXueming Li static void enicpmd_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
11372f3de30SBruce Richardson {
1147483341aSXueming Li 	void *txq = dev->data->tx_queues[qid];
1157483341aSXueming Li 
11672f3de30SBruce Richardson 	ENICPMD_FUNC_TRACE();
1170e804034SJohn Daley 
1180e804034SJohn Daley 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1190e804034SJohn Daley 		return;
1200e804034SJohn Daley 
12172f3de30SBruce Richardson 	enic_free_wq(txq);
12272f3de30SBruce Richardson }
12372f3de30SBruce Richardson 
12472f3de30SBruce Richardson static int enicpmd_dev_setup_intr(struct enic *enic)
12572f3de30SBruce Richardson {
12672f3de30SBruce Richardson 	int ret;
12772f3de30SBruce Richardson 	unsigned int index;
12872f3de30SBruce Richardson 
12972f3de30SBruce Richardson 	ENICPMD_FUNC_TRACE();
13072f3de30SBruce Richardson 
13172f3de30SBruce Richardson 	/* Are we done with the init of all the queues? */
13272f3de30SBruce Richardson 	for (index = 0; index < enic->cq_count; index++) {
13372f3de30SBruce Richardson 		if (!enic->cq[index].ctrl)
13472f3de30SBruce Richardson 			break;
13572f3de30SBruce Richardson 	}
13672f3de30SBruce Richardson 	if (enic->cq_count != index)
13772f3de30SBruce Richardson 		return 0;
138954828b8SJohn Daley 	for (index = 0; index < enic->wq_count; index++) {
139954828b8SJohn Daley 		if (!enic->wq[index].ctrl)
140954828b8SJohn Daley 			break;
141954828b8SJohn Daley 	}
142954828b8SJohn Daley 	if (enic->wq_count != index)
143954828b8SJohn Daley 		return 0;
144954828b8SJohn Daley 	/* check start of packet (SOP) RQs only in case scatter is disabled. */
145954828b8SJohn Daley 	for (index = 0; index < enic->rq_count; index++) {
146aa07bf8fSJohn Daley 		if (!enic->rq[enic_rte_rq_idx_to_sop_idx(index)].ctrl)
147954828b8SJohn Daley 			break;
148954828b8SJohn Daley 	}
149954828b8SJohn Daley 	if (enic->rq_count != index)
150954828b8SJohn Daley 		return 0;
15172f3de30SBruce Richardson 
15272f3de30SBruce Richardson 	ret = enic_alloc_intr_resources(enic);
15372f3de30SBruce Richardson 	if (ret) {
15472f3de30SBruce Richardson 		dev_err(enic, "alloc intr failed\n");
15572f3de30SBruce Richardson 		return ret;
15672f3de30SBruce Richardson 	}
15772f3de30SBruce Richardson 	enic_init_vnic_resources(enic);
15872f3de30SBruce Richardson 
15972f3de30SBruce Richardson 	ret = enic_setup_finish(enic);
16072f3de30SBruce Richardson 	if (ret)
16172f3de30SBruce Richardson 		dev_err(enic, "setup could not be finished\n");
16272f3de30SBruce Richardson 
16372f3de30SBruce Richardson 	return ret;
16472f3de30SBruce Richardson }
16572f3de30SBruce Richardson 
16672f3de30SBruce Richardson static int enicpmd_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
16772f3de30SBruce Richardson 	uint16_t queue_idx,
16872f3de30SBruce Richardson 	uint16_t nb_desc,
16972f3de30SBruce Richardson 	unsigned int socket_id,
170bcaa54c1SHyong Youb Kim 	const struct rte_eth_txconf *tx_conf)
17172f3de30SBruce Richardson {
17272f3de30SBruce Richardson 	int ret;
17372f3de30SBruce Richardson 	struct enic *enic = pmd_priv(eth_dev);
174bcaa54c1SHyong Youb Kim 	struct vnic_wq *wq;
17572f3de30SBruce Richardson 
1760e804034SJohn Daley 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1770e804034SJohn Daley 		return -E_RTE_SECONDARY;
1780e804034SJohn Daley 
17972f3de30SBruce Richardson 	ENICPMD_FUNC_TRACE();
1806c45c330SHyong Youb Kim 	RTE_ASSERT(queue_idx < enic->conf_wq_count);
181bcaa54c1SHyong Youb Kim 	wq = &enic->wq[queue_idx];
182bcaa54c1SHyong Youb Kim 	wq->offloads = tx_conf->offloads |
183bcaa54c1SHyong Youb Kim 		eth_dev->data->dev_conf.txmode.offloads;
184bcaa54c1SHyong Youb Kim 	eth_dev->data->tx_queues[queue_idx] = (void *)wq;
18572f3de30SBruce Richardson 
18672f3de30SBruce Richardson 	ret = enic_alloc_wq(enic, queue_idx, socket_id, nb_desc);
18772f3de30SBruce Richardson 	if (ret) {
18872f3de30SBruce Richardson 		dev_err(enic, "error in allocating wq\n");
18972f3de30SBruce Richardson 		return ret;
19072f3de30SBruce Richardson 	}
19172f3de30SBruce Richardson 
19272f3de30SBruce Richardson 	return enicpmd_dev_setup_intr(enic);
19372f3de30SBruce Richardson }
19472f3de30SBruce Richardson 
19572f3de30SBruce Richardson static int enicpmd_dev_tx_queue_start(struct rte_eth_dev *eth_dev,
19672f3de30SBruce Richardson 	uint16_t queue_idx)
19772f3de30SBruce Richardson {
19872f3de30SBruce Richardson 	struct enic *enic = pmd_priv(eth_dev);
19972f3de30SBruce Richardson 
20072f3de30SBruce Richardson 	ENICPMD_FUNC_TRACE();
20172f3de30SBruce Richardson 
20272f3de30SBruce Richardson 	enic_start_wq(enic, queue_idx);
20372f3de30SBruce Richardson 
20472f3de30SBruce Richardson 	return 0;
20572f3de30SBruce Richardson }
20672f3de30SBruce Richardson 
20772f3de30SBruce Richardson static int enicpmd_dev_tx_queue_stop(struct rte_eth_dev *eth_dev,
20872f3de30SBruce Richardson 	uint16_t queue_idx)
20972f3de30SBruce Richardson {
21072f3de30SBruce Richardson 	int ret;
21172f3de30SBruce Richardson 	struct enic *enic = pmd_priv(eth_dev);
21272f3de30SBruce Richardson 
21372f3de30SBruce Richardson 	ENICPMD_FUNC_TRACE();
21472f3de30SBruce Richardson 
21572f3de30SBruce Richardson 	ret = enic_stop_wq(enic, queue_idx);
21672f3de30SBruce Richardson 	if (ret)
21772f3de30SBruce Richardson 		dev_err(enic, "error in stopping wq %d\n", queue_idx);
21872f3de30SBruce Richardson 
21972f3de30SBruce Richardson 	return ret;
22072f3de30SBruce Richardson }
22172f3de30SBruce Richardson 
22272f3de30SBruce Richardson static int enicpmd_dev_rx_queue_start(struct rte_eth_dev *eth_dev,
22372f3de30SBruce Richardson 	uint16_t queue_idx)
22472f3de30SBruce Richardson {
22572f3de30SBruce Richardson 	struct enic *enic = pmd_priv(eth_dev);
22672f3de30SBruce Richardson 
22772f3de30SBruce Richardson 	ENICPMD_FUNC_TRACE();
22872f3de30SBruce Richardson 
22972f3de30SBruce Richardson 	enic_start_rq(enic, queue_idx);
23072f3de30SBruce Richardson 
23172f3de30SBruce Richardson 	return 0;
23272f3de30SBruce Richardson }
23372f3de30SBruce Richardson 
23472f3de30SBruce Richardson static int enicpmd_dev_rx_queue_stop(struct rte_eth_dev *eth_dev,
23572f3de30SBruce Richardson 	uint16_t queue_idx)
23672f3de30SBruce Richardson {
23772f3de30SBruce Richardson 	int ret;
23872f3de30SBruce Richardson 	struct enic *enic = pmd_priv(eth_dev);
23972f3de30SBruce Richardson 
24072f3de30SBruce Richardson 	ENICPMD_FUNC_TRACE();
24172f3de30SBruce Richardson 
24272f3de30SBruce Richardson 	ret = enic_stop_rq(enic, queue_idx);
24372f3de30SBruce Richardson 	if (ret)
24472f3de30SBruce Richardson 		dev_err(enic, "error in stopping rq %d\n", queue_idx);
24572f3de30SBruce Richardson 
24672f3de30SBruce Richardson 	return ret;
24772f3de30SBruce Richardson }
24872f3de30SBruce Richardson 
2497483341aSXueming Li static void enicpmd_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
25072f3de30SBruce Richardson {
2517483341aSXueming Li 	void *rxq = dev->data->rx_queues[qid];
2527483341aSXueming Li 
25372f3de30SBruce Richardson 	ENICPMD_FUNC_TRACE();
2540e804034SJohn Daley 
2550e804034SJohn Daley 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2560e804034SJohn Daley 		return;
2570e804034SJohn Daley 
25872f3de30SBruce Richardson 	enic_free_rq(rxq);
25972f3de30SBruce Richardson }
26072f3de30SBruce Richardson 
2618d7d4fcdSKonstantin Ananyev static uint32_t enicpmd_dev_rx_queue_count(void *rx_queue)
262a787f7e6SNelson Escobar {
2638d7d4fcdSKonstantin Ananyev 	struct enic *enic;
2648d7d4fcdSKonstantin Ananyev 	struct vnic_rq *sop_rq;
265a787f7e6SNelson Escobar 	uint32_t queue_count = 0;
266a787f7e6SNelson Escobar 	struct vnic_cq *cq;
267a787f7e6SNelson Escobar 	uint32_t cq_tail;
268a787f7e6SNelson Escobar 	uint16_t cq_idx;
269a787f7e6SNelson Escobar 
2708d7d4fcdSKonstantin Ananyev 	sop_rq = rx_queue;
2718d7d4fcdSKonstantin Ananyev 	enic = vnic_dev_priv(sop_rq->vdev);
2728d7d4fcdSKonstantin Ananyev 	cq = &enic->cq[enic_cq_rq(enic, sop_rq->index)];
273a787f7e6SNelson Escobar 	cq_idx = cq->to_clean;
274a787f7e6SNelson Escobar 
275a787f7e6SNelson Escobar 	cq_tail = ioread32(&cq->ctrl->cq_tail);
276a787f7e6SNelson Escobar 
277a787f7e6SNelson Escobar 	if (cq_tail < cq_idx)
278a787f7e6SNelson Escobar 		cq_tail += cq->ring.desc_count;
279a787f7e6SNelson Escobar 
280a787f7e6SNelson Escobar 	queue_count = cq_tail - cq_idx;
281a787f7e6SNelson Escobar 
282a787f7e6SNelson Escobar 	return queue_count;
283a787f7e6SNelson Escobar }
284a787f7e6SNelson Escobar 
28572f3de30SBruce Richardson static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
28672f3de30SBruce Richardson 	uint16_t queue_idx,
28772f3de30SBruce Richardson 	uint16_t nb_desc,
28872f3de30SBruce Richardson 	unsigned int socket_id,
289947d860cSJohn Daley 	const struct rte_eth_rxconf *rx_conf,
29072f3de30SBruce Richardson 	struct rte_mempool *mp)
29172f3de30SBruce Richardson {
29272f3de30SBruce Richardson 	int ret;
29372f3de30SBruce Richardson 	struct enic *enic = pmd_priv(eth_dev);
29472f3de30SBruce Richardson 
29572f3de30SBruce Richardson 	ENICPMD_FUNC_TRACE();
2960e804034SJohn Daley 
2970e804034SJohn Daley 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2980e804034SJohn Daley 		return -E_RTE_SECONDARY;
2996c45c330SHyong Youb Kim 	RTE_ASSERT(enic_rte_rq_idx_to_sop_idx(queue_idx) < enic->conf_rq_count);
300856d7ba7SNelson Escobar 	eth_dev->data->rx_queues[queue_idx] =
301aa07bf8fSJohn Daley 		(void *)&enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
30272f3de30SBruce Richardson 
303ce16fd70SJohn Daley 	ret = enic_alloc_rq(enic, queue_idx, socket_id, mp, nb_desc,
304ce16fd70SJohn Daley 			    rx_conf->rx_free_thresh);
30572f3de30SBruce Richardson 	if (ret) {
30672f3de30SBruce Richardson 		dev_err(enic, "error in allocating rq\n");
30772f3de30SBruce Richardson 		return ret;
30872f3de30SBruce Richardson 	}
30972f3de30SBruce Richardson 
31072f3de30SBruce Richardson 	return enicpmd_dev_setup_intr(enic);
31172f3de30SBruce Richardson }
31272f3de30SBruce Richardson 
313289ba0c0SDavid Harton static int enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
31472f3de30SBruce Richardson {
31572f3de30SBruce Richardson 	struct enic *enic = pmd_priv(eth_dev);
316f9416bbaSHyong Youb Kim 	uint64_t offloads;
31772f3de30SBruce Richardson 
31872f3de30SBruce Richardson 	ENICPMD_FUNC_TRACE();
31972f3de30SBruce Richardson 
320f9416bbaSHyong Youb Kim 	offloads = eth_dev->data->dev_conf.rxmode.offloads;
321295968d1SFerruh Yigit 	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
322295968d1SFerruh Yigit 		if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
32372f3de30SBruce Richardson 			enic->ig_vlan_strip_en = 1;
32472f3de30SBruce Richardson 		else
32572f3de30SBruce Richardson 			enic->ig_vlan_strip_en = 0;
32672f3de30SBruce Richardson 	}
32772f3de30SBruce Richardson 
328c2fec27bSHyong Youb Kim 	return enic_set_vlan_strip(enic);
32972f3de30SBruce Richardson }
33072f3de30SBruce Richardson 
33172f3de30SBruce Richardson static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev)
33272f3de30SBruce Richardson {
33372f3de30SBruce Richardson 	int ret;
334f9416bbaSHyong Youb Kim 	int mask;
33572f3de30SBruce Richardson 	struct enic *enic = pmd_priv(eth_dev);
33672f3de30SBruce Richardson 
3370e804034SJohn Daley 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3380e804034SJohn Daley 		return -E_RTE_SECONDARY;
3390e804034SJohn Daley 
34072f3de30SBruce Richardson 	ENICPMD_FUNC_TRACE();
34172f3de30SBruce Richardson 	ret = enic_set_vnic_res(enic);
34272f3de30SBruce Richardson 	if (ret) {
34372f3de30SBruce Richardson 		dev_err(enic, "Set vNIC resource num  failed, aborting\n");
34472f3de30SBruce Richardson 		return ret;
34572f3de30SBruce Richardson 	}
34672f3de30SBruce Richardson 
347295968d1SFerruh Yigit 	if (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
34873fb89ddSAndrew Rybchenko 		eth_dev->data->dev_conf.rxmode.offloads |=
349295968d1SFerruh Yigit 			RTE_ETH_RX_OFFLOAD_RSS_HASH;
3508b945a7fSPavan Nikhilesh 
3518d496995SHyong Youb Kim 	enic->mc_count = 0;
352a062bafaSHyong Youb Kim 	enic->hw_ip_checksum = !!(eth_dev->data->dev_conf.rxmode.offloads &
353295968d1SFerruh Yigit 				  RTE_ETH_RX_OFFLOAD_CHECKSUM);
354f9416bbaSHyong Youb Kim 	/* All vlan offload masks to apply the current settings */
355295968d1SFerruh Yigit 	mask = RTE_ETH_VLAN_STRIP_MASK |
356295968d1SFerruh Yigit 		RTE_ETH_VLAN_FILTER_MASK |
357295968d1SFerruh Yigit 		RTE_ETH_VLAN_EXTEND_MASK;
358f9416bbaSHyong Youb Kim 	ret = enicpmd_vlan_offload_set(eth_dev, mask);
359c2fec27bSHyong Youb Kim 	if (ret) {
360c2fec27bSHyong Youb Kim 		dev_err(enic, "Failed to configure VLAN offloads\n");
361289ba0c0SDavid Harton 		return ret;
36272f3de30SBruce Richardson 	}
363c2fec27bSHyong Youb Kim 	/*
364c2fec27bSHyong Youb Kim 	 * Initialize RSS with the default reta and key. If the user key is
365c2fec27bSHyong Youb Kim 	 * given (rx_adv_conf.rss_conf.rss_key), will use that instead of the
366c2fec27bSHyong Youb Kim 	 * default key.
367c2fec27bSHyong Youb Kim 	 */
368c2fec27bSHyong Youb Kim 	return enic_init_rss_nic_cfg(enic);
369c2fec27bSHyong Youb Kim }
37072f3de30SBruce Richardson 
37172f3de30SBruce Richardson /* Start the device.
37272f3de30SBruce Richardson  * It returns 0 on success.
37372f3de30SBruce Richardson  */
37472f3de30SBruce Richardson static int enicpmd_dev_start(struct rte_eth_dev *eth_dev)
37572f3de30SBruce Richardson {
37672f3de30SBruce Richardson 	struct enic *enic = pmd_priv(eth_dev);
37772f3de30SBruce Richardson 
3780e804034SJohn Daley 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3790e804034SJohn Daley 		return -E_RTE_SECONDARY;
3800e804034SJohn Daley 
38172f3de30SBruce Richardson 	ENICPMD_FUNC_TRACE();
38272f3de30SBruce Richardson 	return enic_enable(enic);
38372f3de30SBruce Richardson }
38472f3de30SBruce Richardson 
38572f3de30SBruce Richardson /*
38672f3de30SBruce Richardson  * Stop device: disable rx and tx functions to allow for reconfiguring.
38772f3de30SBruce Richardson  */
38862024eb8SIvan Ilchenko static int enicpmd_dev_stop(struct rte_eth_dev *eth_dev)
38972f3de30SBruce Richardson {
39072f3de30SBruce Richardson 	struct rte_eth_link link;
39172f3de30SBruce Richardson 	struct enic *enic = pmd_priv(eth_dev);
392880b9220SJie Hai 	uint16_t i;
39372f3de30SBruce Richardson 
3940e804034SJohn Daley 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
39562024eb8SIvan Ilchenko 		return 0;
3960e804034SJohn Daley 
39772f3de30SBruce Richardson 	ENICPMD_FUNC_TRACE();
39872f3de30SBruce Richardson 	enic_disable(enic);
3995042dde0SStephen Hemminger 
40072f3de30SBruce Richardson 	memset(&link, 0, sizeof(link));
4015042dde0SStephen Hemminger 	rte_eth_linkstatus_set(eth_dev, &link);
40262024eb8SIvan Ilchenko 
403880b9220SJie Hai 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
404880b9220SJie Hai 		eth_dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
405880b9220SJie Hai 	for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
406880b9220SJie Hai 		eth_dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
407880b9220SJie Hai 
40862024eb8SIvan Ilchenko 	return 0;
40972f3de30SBruce Richardson }
41072f3de30SBruce Richardson 
41172f3de30SBruce Richardson /*
41272f3de30SBruce Richardson  * Stop device.
41372f3de30SBruce Richardson  */
414b142387bSThomas Monjalon static int enicpmd_dev_close(struct rte_eth_dev *eth_dev)
41572f3de30SBruce Richardson {
41672f3de30SBruce Richardson 	struct enic *enic = pmd_priv(eth_dev);
41772f3de30SBruce Richardson 
41872f3de30SBruce Richardson 	ENICPMD_FUNC_TRACE();
41930410493SThomas Monjalon 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
42030410493SThomas Monjalon 		return 0;
42130410493SThomas Monjalon 
42272f3de30SBruce Richardson 	enic_remove(enic);
423b142387bSThomas Monjalon 
424b142387bSThomas Monjalon 	return 0;
42572f3de30SBruce Richardson }
42672f3de30SBruce Richardson 
42772f3de30SBruce Richardson static int enicpmd_dev_link_update(struct rte_eth_dev *eth_dev,
42872f3de30SBruce Richardson 	__rte_unused int wait_to_complete)
42972f3de30SBruce Richardson {
43072f3de30SBruce Richardson 	ENICPMD_FUNC_TRACE();
431c655c547SHyong Youb Kim 	return enic_link_update(eth_dev);
43272f3de30SBruce Richardson }
43372f3de30SBruce Richardson 
434d5b0924bSMatan Azrad static int enicpmd_dev_stats_get(struct rte_eth_dev *eth_dev,
43572f3de30SBruce Richardson 	struct rte_eth_stats *stats)
43672f3de30SBruce Richardson {
43772f3de30SBruce Richardson 	struct enic *enic = pmd_priv(eth_dev);
43872f3de30SBruce Richardson 
43972f3de30SBruce Richardson 	ENICPMD_FUNC_TRACE();
440d5b0924bSMatan Azrad 	return enic_dev_stats_get(enic, stats);
44172f3de30SBruce Richardson }
44272f3de30SBruce Richardson 
4439970a9adSIgor Romanov static int enicpmd_dev_stats_reset(struct rte_eth_dev *eth_dev)
44472f3de30SBruce Richardson {
44572f3de30SBruce Richardson 	struct enic *enic = pmd_priv(eth_dev);
44672f3de30SBruce Richardson 
44772f3de30SBruce Richardson 	ENICPMD_FUNC_TRACE();
4489970a9adSIgor Romanov 	return enic_dev_stats_clear(enic);
44972f3de30SBruce Richardson }
45072f3de30SBruce Richardson 
451f1edcfe6SHyong Youb Kim static uint32_t speed_capa_from_pci_id(struct rte_eth_dev *eth_dev)
452f1edcfe6SHyong Youb Kim {
453f1edcfe6SHyong Youb Kim 	const struct vic_speed_capa *m;
454f1edcfe6SHyong Youb Kim 	struct rte_pci_device *pdev;
455f1edcfe6SHyong Youb Kim 	uint16_t id;
456f1edcfe6SHyong Youb Kim 
457f1edcfe6SHyong Youb Kim 	pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
458f1edcfe6SHyong Youb Kim 	id = pdev->id.subsystem_device_id;
459f1edcfe6SHyong Youb Kim 	for (m = vic_speed_capa_map; m->sub_devid != 0; m++) {
460f1edcfe6SHyong Youb Kim 		if (m->sub_devid == id)
461f1edcfe6SHyong Youb Kim 			return m->capa;
462f1edcfe6SHyong Youb Kim 	}
463f1edcfe6SHyong Youb Kim 	/* 1300 and later models are at least 40G */
464f1edcfe6SHyong Youb Kim 	if (id >= 0x0100)
465295968d1SFerruh Yigit 		return RTE_ETH_LINK_SPEED_40G;
466e015fca0SHyong Youb Kim 	/* VFs have subsystem id 0, check device id */
467e015fca0SHyong Youb Kim 	if (id == 0) {
468e015fca0SHyong Youb Kim 		/* Newer VF implies at least 40G model */
469e015fca0SHyong Youb Kim 		if (pdev->id.device_id == PCI_DEVICE_ID_CISCO_VIC_ENET_SN)
470295968d1SFerruh Yigit 			return RTE_ETH_LINK_SPEED_40G;
471e015fca0SHyong Youb Kim 	}
472295968d1SFerruh Yigit 	return RTE_ETH_LINK_SPEED_10G;
473f1edcfe6SHyong Youb Kim }
474f1edcfe6SHyong Youb Kim 
475bdad90d1SIvan Ilchenko static int enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
47672f3de30SBruce Richardson 	struct rte_eth_dev_info *device_info)
47772f3de30SBruce Richardson {
47872f3de30SBruce Richardson 	struct enic *enic = pmd_priv(eth_dev);
47972f3de30SBruce Richardson 
48072f3de30SBruce Richardson 	ENICPMD_FUNC_TRACE();
481ce93d3c3SNelson Escobar 	/* Scattered Rx uses two receive queues per rx queue exposed to dpdk */
482ce93d3c3SNelson Escobar 	device_info->max_rx_queues = enic->conf_rq_count / 2;
483ce93d3c3SNelson Escobar 	device_info->max_tx_queues = enic->conf_wq_count;
48472f3de30SBruce Richardson 	device_info->min_rx_bufsize = ENIC_MIN_MTU;
485422ba917SHyong Youb Kim 	/* "Max" mtu is not a typo. HW receives packet sizes up to the
486422ba917SHyong Youb Kim 	 * max mtu regardless of the current mtu (vNIC's mtu). vNIC mtu is
487422ba917SHyong Youb Kim 	 * a hint to the driver to size receive buffers accordingly so that
488422ba917SHyong Youb Kim 	 * larger-than-vnic-mtu packets get truncated.. For DPDK, we let
4891bb4a528SFerruh Yigit 	 * the user decide the buffer size via rxmode.mtu, basically
490422ba917SHyong Youb Kim 	 * ignoring vNIC mtu.
491422ba917SHyong Youb Kim 	 */
492422ba917SHyong Youb Kim 	device_info->max_rx_pktlen = enic_mtu_to_max_rx_pktlen(enic->max_mtu);
4938d496995SHyong Youb Kim 	device_info->max_mac_addrs = ENIC_UNICAST_PERFECT_FILTERS;
49495104e02SHyong Youb Kim 	device_info->min_mtu = ENIC_MIN_MTU;
49595104e02SHyong Youb Kim 	device_info->max_mtu = enic->max_mtu;
49693fb21fdSHyong Youb Kim 	device_info->rx_offload_capa = enic->rx_offload_capa;
49793fb21fdSHyong Youb Kim 	device_info->tx_offload_capa = enic->tx_offload_capa;
498bcaa54c1SHyong Youb Kim 	device_info->tx_queue_offload_capa = enic->tx_queue_offload_capa;
4992fe6f1b7SDmitry Kozlyuk 	device_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
500947d860cSJohn Daley 	device_info->default_rxconf = (struct rte_eth_rxconf) {
501947d860cSJohn Daley 		.rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH
502947d860cSJohn Daley 	};
503c2fec27bSHyong Youb Kim 	device_info->reta_size = enic->reta_size;
504c2fec27bSHyong Youb Kim 	device_info->hash_key_size = enic->hash_key_size;
505c2fec27bSHyong Youb Kim 	device_info->flow_type_rss_offloads = enic->flow_type_rss_offloads;
5069466a38dSHyong Youb Kim 	device_info->rx_desc_lim = (struct rte_eth_desc_lim) {
5079466a38dSHyong Youb Kim 		.nb_max = enic->config.rq_desc_count,
5089466a38dSHyong Youb Kim 		.nb_min = ENIC_MIN_RQ_DESCS,
5099466a38dSHyong Youb Kim 		.nb_align = ENIC_ALIGN_DESCS,
5109466a38dSHyong Youb Kim 	};
5119466a38dSHyong Youb Kim 	device_info->tx_desc_lim = (struct rte_eth_desc_lim) {
5129466a38dSHyong Youb Kim 		.nb_max = enic->config.wq_desc_count,
5139466a38dSHyong Youb Kim 		.nb_min = ENIC_MIN_WQ_DESCS,
5149466a38dSHyong Youb Kim 		.nb_align = ENIC_ALIGN_DESCS,
5159466a38dSHyong Youb Kim 		.nb_seg_max = ENIC_TX_XMIT_MAX,
5169466a38dSHyong Youb Kim 		.nb_mtu_seg_max = ENIC_NON_TSO_MAX_DESC,
5179466a38dSHyong Youb Kim 	};
5189466a38dSHyong Youb Kim 	device_info->default_rxportconf = (struct rte_eth_dev_portconf) {
5199466a38dSHyong Youb Kim 		.burst_size = ENIC_DEFAULT_RX_BURST,
5209466a38dSHyong Youb Kim 		.ring_size = RTE_MIN(device_info->rx_desc_lim.nb_max,
5219466a38dSHyong Youb Kim 			ENIC_DEFAULT_RX_RING_SIZE),
5229466a38dSHyong Youb Kim 		.nb_queues = ENIC_DEFAULT_RX_RINGS,
5239466a38dSHyong Youb Kim 	};
5249466a38dSHyong Youb Kim 	device_info->default_txportconf = (struct rte_eth_dev_portconf) {
5259466a38dSHyong Youb Kim 		.burst_size = ENIC_DEFAULT_TX_BURST,
5269466a38dSHyong Youb Kim 		.ring_size = RTE_MIN(device_info->tx_desc_lim.nb_max,
5279466a38dSHyong Youb Kim 			ENIC_DEFAULT_TX_RING_SIZE),
5289466a38dSHyong Youb Kim 		.nb_queues = ENIC_DEFAULT_TX_RINGS,
5299466a38dSHyong Youb Kim 	};
530f1edcfe6SHyong Youb Kim 	device_info->speed_capa = speed_capa_from_pci_id(eth_dev);
531bdad90d1SIvan Ilchenko 
532bdad90d1SIvan Ilchenko 	return 0;
53372f3de30SBruce Richardson }
53472f3de30SBruce Richardson 
535ba6a168aSSivaramakrishnan Venkat static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev,
536ba6a168aSSivaramakrishnan Venkat 							size_t *no_of_elements)
53778a38edfSJianfeng Tan {
53878a38edfSJianfeng Tan 	static const uint32_t ptypes[] = {
539c6f45550SJohn Daley 		RTE_PTYPE_L2_ETHER,
540c6f45550SJohn Daley 		RTE_PTYPE_L2_ETHER_VLAN,
541097e1f1eSNelson Escobar 		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
542097e1f1eSNelson Escobar 		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
543097e1f1eSNelson Escobar 		RTE_PTYPE_L4_TCP,
544097e1f1eSNelson Escobar 		RTE_PTYPE_L4_UDP,
545097e1f1eSNelson Escobar 		RTE_PTYPE_L4_FRAG,
546097e1f1eSNelson Escobar 		RTE_PTYPE_L4_NONFRAG,
54778a38edfSJianfeng Tan 	};
548432ed10dSHyong Youb Kim 	static const uint32_t ptypes_overlay[] = {
549432ed10dSHyong Youb Kim 		RTE_PTYPE_L2_ETHER,
550432ed10dSHyong Youb Kim 		RTE_PTYPE_L2_ETHER_VLAN,
551432ed10dSHyong Youb Kim 		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
552432ed10dSHyong Youb Kim 		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
553432ed10dSHyong Youb Kim 		RTE_PTYPE_L4_TCP,
554432ed10dSHyong Youb Kim 		RTE_PTYPE_L4_UDP,
555432ed10dSHyong Youb Kim 		RTE_PTYPE_L4_FRAG,
556432ed10dSHyong Youb Kim 		RTE_PTYPE_L4_NONFRAG,
557432ed10dSHyong Youb Kim 		RTE_PTYPE_TUNNEL_GRENAT,
558432ed10dSHyong Youb Kim 		RTE_PTYPE_INNER_L2_ETHER,
559432ed10dSHyong Youb Kim 		RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
560432ed10dSHyong Youb Kim 		RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
561432ed10dSHyong Youb Kim 		RTE_PTYPE_INNER_L4_TCP,
562432ed10dSHyong Youb Kim 		RTE_PTYPE_INNER_L4_UDP,
563432ed10dSHyong Youb Kim 		RTE_PTYPE_INNER_L4_FRAG,
564432ed10dSHyong Youb Kim 		RTE_PTYPE_INNER_L4_NONFRAG,
565432ed10dSHyong Youb Kim 	};
56678a38edfSJianfeng Tan 
567a41f593fSFerruh Yigit 	if (dev->rx_pkt_burst != rte_eth_pkt_burst_dummy &&
568432ed10dSHyong Youb Kim 	    dev->rx_pkt_burst != NULL) {
569432ed10dSHyong Youb Kim 		struct enic *enic = pmd_priv(dev);
570ba6a168aSSivaramakrishnan Venkat 		if (enic->overlay_offload) {
571ba6a168aSSivaramakrishnan Venkat 			*no_of_elements = RTE_DIM(ptypes_overlay);
572432ed10dSHyong Youb Kim 			return ptypes_overlay;
573ba6a168aSSivaramakrishnan Venkat 		} else {
574ba6a168aSSivaramakrishnan Venkat 			*no_of_elements = RTE_DIM(ptypes);
57578a38edfSJianfeng Tan 			return ptypes;
576432ed10dSHyong Youb Kim 		}
577ba6a168aSSivaramakrishnan Venkat 	}
57878a38edfSJianfeng Tan 	return NULL;
57978a38edfSJianfeng Tan }
58078a38edfSJianfeng Tan 
5819039c812SAndrew Rybchenko static int enicpmd_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
58272f3de30SBruce Richardson {
58372f3de30SBruce Richardson 	struct enic *enic = pmd_priv(eth_dev);
5849039c812SAndrew Rybchenko 	int ret;
58572f3de30SBruce Richardson 
5860e804034SJohn Daley 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
5879039c812SAndrew Rybchenko 		return -E_RTE_SECONDARY;
5880e804034SJohn Daley 
58972f3de30SBruce Richardson 	ENICPMD_FUNC_TRACE();
5900e804034SJohn Daley 
59172f3de30SBruce Richardson 	enic->promisc = 1;
5929039c812SAndrew Rybchenko 	ret = enic_add_packet_filter(enic);
5939039c812SAndrew Rybchenko 	if (ret != 0)
5949039c812SAndrew Rybchenko 		enic->promisc = 0;
5959039c812SAndrew Rybchenko 
5969039c812SAndrew Rybchenko 	return ret;
59772f3de30SBruce Richardson }
59872f3de30SBruce Richardson 
5999039c812SAndrew Rybchenko static int enicpmd_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
60072f3de30SBruce Richardson {
60172f3de30SBruce Richardson 	struct enic *enic = pmd_priv(eth_dev);
6029039c812SAndrew Rybchenko 	int ret;
60372f3de30SBruce Richardson 
6040e804034SJohn Daley 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
6059039c812SAndrew Rybchenko 		return -E_RTE_SECONDARY;
6060e804034SJohn Daley 
60772f3de30SBruce Richardson 	ENICPMD_FUNC_TRACE();
60872f3de30SBruce Richardson 	enic->promisc = 0;
6099039c812SAndrew Rybchenko 	ret = enic_add_packet_filter(enic);
6109039c812SAndrew Rybchenko 	if (ret != 0)
6119039c812SAndrew Rybchenko 		enic->promisc = 1;
6129039c812SAndrew Rybchenko 
6139039c812SAndrew Rybchenko 	return ret;
61472f3de30SBruce Richardson }
61572f3de30SBruce Richardson 
616ca041cd4SIvan Ilchenko static int enicpmd_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
61772f3de30SBruce Richardson {
61872f3de30SBruce Richardson 	struct enic *enic = pmd_priv(eth_dev);
619ca041cd4SIvan Ilchenko 	int ret;
62072f3de30SBruce Richardson 
6210e804034SJohn Daley 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
622ca041cd4SIvan Ilchenko 		return -E_RTE_SECONDARY;
6230e804034SJohn Daley 
62472f3de30SBruce Richardson 	ENICPMD_FUNC_TRACE();
62572f3de30SBruce Richardson 	enic->allmulti = 1;
626ca041cd4SIvan Ilchenko 	ret = enic_add_packet_filter(enic);
627ca041cd4SIvan Ilchenko 	if (ret != 0)
628ca041cd4SIvan Ilchenko 		enic->allmulti = 0;
629ca041cd4SIvan Ilchenko 
630ca041cd4SIvan Ilchenko 	return ret;
63172f3de30SBruce Richardson }
63272f3de30SBruce Richardson 
633ca041cd4SIvan Ilchenko static int enicpmd_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
63472f3de30SBruce Richardson {
63572f3de30SBruce Richardson 	struct enic *enic = pmd_priv(eth_dev);
636ca041cd4SIvan Ilchenko 	int ret;
63772f3de30SBruce Richardson 
6380e804034SJohn Daley 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
639ca041cd4SIvan Ilchenko 		return -E_RTE_SECONDARY;
6400e804034SJohn Daley 
64172f3de30SBruce Richardson 	ENICPMD_FUNC_TRACE();
64272f3de30SBruce Richardson 	enic->allmulti = 0;
643ca041cd4SIvan Ilchenko 	ret = enic_add_packet_filter(enic);
644ca041cd4SIvan Ilchenko 	if (ret != 0)
645ca041cd4SIvan Ilchenko 		enic->allmulti = 1;
646ca041cd4SIvan Ilchenko 
647ca041cd4SIvan Ilchenko 	return ret;
64872f3de30SBruce Richardson }
64972f3de30SBruce Richardson 
6506d01e580SWei Dai static int enicpmd_add_mac_addr(struct rte_eth_dev *eth_dev,
6516d13ea8eSOlivier Matz 	struct rte_ether_addr *mac_addr,
65272f3de30SBruce Richardson 	__rte_unused uint32_t index, __rte_unused uint32_t pool)
65372f3de30SBruce Richardson {
65472f3de30SBruce Richardson 	struct enic *enic = pmd_priv(eth_dev);
65572f3de30SBruce Richardson 
6560e804034SJohn Daley 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
6570e804034SJohn Daley 		return -E_RTE_SECONDARY;
6580e804034SJohn Daley 
65972f3de30SBruce Richardson 	ENICPMD_FUNC_TRACE();
6606d01e580SWei Dai 	return enic_set_mac_address(enic, mac_addr->addr_bytes);
66172f3de30SBruce Richardson }
66272f3de30SBruce Richardson 
663bbab3d97SJohn Daley static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, uint32_t index)
66472f3de30SBruce Richardson {
66572f3de30SBruce Richardson 	struct enic *enic = pmd_priv(eth_dev);
66672f3de30SBruce Richardson 
6670e804034SJohn Daley 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
6680e804034SJohn Daley 		return;
6690e804034SJohn Daley 
67072f3de30SBruce Richardson 	ENICPMD_FUNC_TRACE();
671740f5bf1SDavid Marchand 	if (enic_del_mac_address(enic, index))
672740f5bf1SDavid Marchand 		dev_err(enic, "del mac addr failed\n");
673740f5bf1SDavid Marchand }
674740f5bf1SDavid Marchand 
675740f5bf1SDavid Marchand static int enicpmd_set_mac_addr(struct rte_eth_dev *eth_dev,
6766d13ea8eSOlivier Matz 				struct rte_ether_addr *addr)
677740f5bf1SDavid Marchand {
678740f5bf1SDavid Marchand 	struct enic *enic = pmd_priv(eth_dev);
679740f5bf1SDavid Marchand 	int ret;
680740f5bf1SDavid Marchand 
681740f5bf1SDavid Marchand 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
682740f5bf1SDavid Marchand 		return -E_RTE_SECONDARY;
683740f5bf1SDavid Marchand 
684740f5bf1SDavid Marchand 	ENICPMD_FUNC_TRACE();
685740f5bf1SDavid Marchand 	ret = enic_del_mac_address(enic, 0);
686740f5bf1SDavid Marchand 	if (ret)
687740f5bf1SDavid Marchand 		return ret;
688740f5bf1SDavid Marchand 	return enic_set_mac_address(enic, addr->addr_bytes);
68972f3de30SBruce Richardson }
69072f3de30SBruce Richardson 
6916d13ea8eSOlivier Matz static void debug_log_add_del_addr(struct rte_ether_addr *addr, bool add)
6928d496995SHyong Youb Kim {
69335b2d13fSOlivier Matz 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
6949bd48e2dSHyong Youb Kim 
69535b2d13fSOlivier Matz 	rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, addr);
696f665790aSDavid Marchand 	ENICPMD_LOG(DEBUG, " %s address %s",
6978d496995SHyong Youb Kim 		     add ? "add" : "remove", mac_str);
6988d496995SHyong Youb Kim }
6998d496995SHyong Youb Kim 
7008d496995SHyong Youb Kim static int enicpmd_set_mc_addr_list(struct rte_eth_dev *eth_dev,
7016d13ea8eSOlivier Matz 				    struct rte_ether_addr *mc_addr_set,
7028d496995SHyong Youb Kim 				    uint32_t nb_mc_addr)
7038d496995SHyong Youb Kim {
7048d496995SHyong Youb Kim 	struct enic *enic = pmd_priv(eth_dev);
70535b2d13fSOlivier Matz 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
7066d13ea8eSOlivier Matz 	struct rte_ether_addr *addr;
7078d496995SHyong Youb Kim 	uint32_t i, j;
7088d496995SHyong Youb Kim 	int ret;
7098d496995SHyong Youb Kim 
7108d496995SHyong Youb Kim 	ENICPMD_FUNC_TRACE();
7118d496995SHyong Youb Kim 
7128d496995SHyong Youb Kim 	/* Validate the given addresses first */
7138d496995SHyong Youb Kim 	for (i = 0; i < nb_mc_addr && mc_addr_set != NULL; i++) {
7148d496995SHyong Youb Kim 		addr = &mc_addr_set[i];
715538da7a1SOlivier Matz 		if (!rte_is_multicast_ether_addr(addr) ||
716538da7a1SOlivier Matz 		    rte_is_broadcast_ether_addr(addr)) {
717538da7a1SOlivier Matz 			rte_ether_format_addr(mac_str,
71835b2d13fSOlivier Matz 					RTE_ETHER_ADDR_FMT_SIZE, addr);
719f665790aSDavid Marchand 			ENICPMD_LOG(ERR, " invalid multicast address %s",
7208d496995SHyong Youb Kim 				     mac_str);
7218d496995SHyong Youb Kim 			return -EINVAL;
7228d496995SHyong Youb Kim 		}
7238d496995SHyong Youb Kim 	}
7248d496995SHyong Youb Kim 
7258d496995SHyong Youb Kim 	/* Flush all if requested */
7268d496995SHyong Youb Kim 	if (nb_mc_addr == 0 || mc_addr_set == NULL) {
727f665790aSDavid Marchand 		ENICPMD_LOG(DEBUG, " flush multicast addresses");
7288d496995SHyong Youb Kim 		for (i = 0; i < enic->mc_count; i++) {
7298d496995SHyong Youb Kim 			addr = &enic->mc_addrs[i];
7308d496995SHyong Youb Kim 			debug_log_add_del_addr(addr, false);
73100ce4311SHyong Youb Kim 			ret = enic_dev_del_addr(enic, addr->addr_bytes);
7328d496995SHyong Youb Kim 			if (ret)
7338d496995SHyong Youb Kim 				return ret;
7348d496995SHyong Youb Kim 		}
7358d496995SHyong Youb Kim 		enic->mc_count = 0;
7368d496995SHyong Youb Kim 		return 0;
7378d496995SHyong Youb Kim 	}
7388d496995SHyong Youb Kim 
7398d496995SHyong Youb Kim 	if (nb_mc_addr > ENIC_MULTICAST_PERFECT_FILTERS) {
740f665790aSDavid Marchand 		ENICPMD_LOG(ERR, " too many multicast addresses: max=%d",
7418d496995SHyong Youb Kim 			     ENIC_MULTICAST_PERFECT_FILTERS);
7428d496995SHyong Youb Kim 		return -ENOSPC;
7438d496995SHyong Youb Kim 	}
7448d496995SHyong Youb Kim 	/*
7458d496995SHyong Youb Kim 	 * devcmd is slow, so apply the difference instead of flushing and
7468d496995SHyong Youb Kim 	 * adding everything.
7478d496995SHyong Youb Kim 	 * 1. Delete addresses on the NIC but not on the host
7488d496995SHyong Youb Kim 	 */
7498d496995SHyong Youb Kim 	for (i = 0; i < enic->mc_count; i++) {
7508d496995SHyong Youb Kim 		addr = &enic->mc_addrs[i];
7518d496995SHyong Youb Kim 		for (j = 0; j < nb_mc_addr; j++) {
752538da7a1SOlivier Matz 			if (rte_is_same_ether_addr(addr, &mc_addr_set[j]))
7538d496995SHyong Youb Kim 				break;
7548d496995SHyong Youb Kim 		}
7558d496995SHyong Youb Kim 		if (j < nb_mc_addr)
7568d496995SHyong Youb Kim 			continue;
7578d496995SHyong Youb Kim 		debug_log_add_del_addr(addr, false);
75800ce4311SHyong Youb Kim 		ret = enic_dev_del_addr(enic, addr->addr_bytes);
7598d496995SHyong Youb Kim 		if (ret)
7608d496995SHyong Youb Kim 			return ret;
7618d496995SHyong Youb Kim 	}
7628d496995SHyong Youb Kim 	/* 2. Add addresses on the host but not on the NIC */
7638d496995SHyong Youb Kim 	for (i = 0; i < nb_mc_addr; i++) {
7648d496995SHyong Youb Kim 		addr = &mc_addr_set[i];
7658d496995SHyong Youb Kim 		for (j = 0; j < enic->mc_count; j++) {
766538da7a1SOlivier Matz 			if (rte_is_same_ether_addr(addr, &enic->mc_addrs[j]))
7678d496995SHyong Youb Kim 				break;
7688d496995SHyong Youb Kim 		}
7698d496995SHyong Youb Kim 		if (j < enic->mc_count)
7708d496995SHyong Youb Kim 			continue;
7718d496995SHyong Youb Kim 		debug_log_add_del_addr(addr, true);
77200ce4311SHyong Youb Kim 		ret = enic_dev_add_addr(enic, addr->addr_bytes);
7738d496995SHyong Youb Kim 		if (ret)
7748d496995SHyong Youb Kim 			return ret;
7758d496995SHyong Youb Kim 	}
7768d496995SHyong Youb Kim 	/* Keep a copy so we can flush/apply later on.. */
7778d496995SHyong Youb Kim 	memcpy(enic->mc_addrs, mc_addr_set,
7786d13ea8eSOlivier Matz 	       nb_mc_addr * sizeof(struct rte_ether_addr));
7798d496995SHyong Youb Kim 	enic->mc_count = nb_mc_addr;
7808d496995SHyong Youb Kim 	return 0;
7818d496995SHyong Youb Kim }
7828d496995SHyong Youb Kim 
783396a6d71SJohn Daley static int enicpmd_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
784396a6d71SJohn Daley {
785396a6d71SJohn Daley 	struct enic *enic = pmd_priv(eth_dev);
786396a6d71SJohn Daley 
787396a6d71SJohn Daley 	ENICPMD_FUNC_TRACE();
788396a6d71SJohn Daley 	return enic_set_mtu(enic, mtu);
789396a6d71SJohn Daley }
790396a6d71SJohn Daley 
791c2fec27bSHyong Youb Kim static int enicpmd_dev_rss_reta_query(struct rte_eth_dev *dev,
792c2fec27bSHyong Youb Kim 				      struct rte_eth_rss_reta_entry64
793c2fec27bSHyong Youb Kim 				      *reta_conf,
794c2fec27bSHyong Youb Kim 				      uint16_t reta_size)
795c2fec27bSHyong Youb Kim {
796c2fec27bSHyong Youb Kim 	struct enic *enic = pmd_priv(dev);
797c2fec27bSHyong Youb Kim 	uint16_t i, idx, shift;
798c2fec27bSHyong Youb Kim 
799c2fec27bSHyong Youb Kim 	ENICPMD_FUNC_TRACE();
800c2fec27bSHyong Youb Kim 	if (reta_size != ENIC_RSS_RETA_SIZE) {
801c2fec27bSHyong Youb Kim 		dev_err(enic, "reta_query: wrong reta_size. given=%u expected=%u\n",
802c2fec27bSHyong Youb Kim 			reta_size, ENIC_RSS_RETA_SIZE);
803c2fec27bSHyong Youb Kim 		return -EINVAL;
804c2fec27bSHyong Youb Kim 	}
805c2fec27bSHyong Youb Kim 
806c2fec27bSHyong Youb Kim 	for (i = 0; i < reta_size; i++) {
807295968d1SFerruh Yigit 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
808295968d1SFerruh Yigit 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
809c2fec27bSHyong Youb Kim 		if (reta_conf[idx].mask & (1ULL << shift))
810c2fec27bSHyong Youb Kim 			reta_conf[idx].reta[shift] = enic_sop_rq_idx_to_rte_idx(
811c2fec27bSHyong Youb Kim 				enic->rss_cpu.cpu[i / 4].b[i % 4]);
812c2fec27bSHyong Youb Kim 	}
813c2fec27bSHyong Youb Kim 
814c2fec27bSHyong Youb Kim 	return 0;
815c2fec27bSHyong Youb Kim }
816c2fec27bSHyong Youb Kim 
817c2fec27bSHyong Youb Kim static int enicpmd_dev_rss_reta_update(struct rte_eth_dev *dev,
818c2fec27bSHyong Youb Kim 				       struct rte_eth_rss_reta_entry64
819c2fec27bSHyong Youb Kim 				       *reta_conf,
820c2fec27bSHyong Youb Kim 				       uint16_t reta_size)
821c2fec27bSHyong Youb Kim {
822c2fec27bSHyong Youb Kim 	struct enic *enic = pmd_priv(dev);
823c2fec27bSHyong Youb Kim 	union vnic_rss_cpu rss_cpu;
824c2fec27bSHyong Youb Kim 	uint16_t i, idx, shift;
825c2fec27bSHyong Youb Kim 
826c2fec27bSHyong Youb Kim 	ENICPMD_FUNC_TRACE();
827c2fec27bSHyong Youb Kim 	if (reta_size != ENIC_RSS_RETA_SIZE) {
828c2fec27bSHyong Youb Kim 		dev_err(enic, "reta_update: wrong reta_size. given=%u"
829c2fec27bSHyong Youb Kim 			" expected=%u\n",
830c2fec27bSHyong Youb Kim 			reta_size, ENIC_RSS_RETA_SIZE);
831c2fec27bSHyong Youb Kim 		return -EINVAL;
832c2fec27bSHyong Youb Kim 	}
833c2fec27bSHyong Youb Kim 	/*
834c2fec27bSHyong Youb Kim 	 * Start with the current reta and modify it per reta_conf, as we
835c2fec27bSHyong Youb Kim 	 * need to push the entire reta even if we only modify one entry.
836c2fec27bSHyong Youb Kim 	 */
837c2fec27bSHyong Youb Kim 	rss_cpu = enic->rss_cpu;
838c2fec27bSHyong Youb Kim 	for (i = 0; i < reta_size; i++) {
839295968d1SFerruh Yigit 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
840295968d1SFerruh Yigit 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
841c2fec27bSHyong Youb Kim 		if (reta_conf[idx].mask & (1ULL << shift))
842c2fec27bSHyong Youb Kim 			rss_cpu.cpu[i / 4].b[i % 4] =
843c2fec27bSHyong Youb Kim 				enic_rte_rq_idx_to_sop_idx(
844c2fec27bSHyong Youb Kim 					reta_conf[idx].reta[shift]);
845c2fec27bSHyong Youb Kim 	}
846c2fec27bSHyong Youb Kim 	return enic_set_rss_reta(enic, &rss_cpu);
847c2fec27bSHyong Youb Kim }
848c2fec27bSHyong Youb Kim 
849c2fec27bSHyong Youb Kim static int enicpmd_dev_rss_hash_update(struct rte_eth_dev *dev,
850c2fec27bSHyong Youb Kim 				       struct rte_eth_rss_conf *rss_conf)
851c2fec27bSHyong Youb Kim {
852c2fec27bSHyong Youb Kim 	struct enic *enic = pmd_priv(dev);
853c2fec27bSHyong Youb Kim 
854c2fec27bSHyong Youb Kim 	ENICPMD_FUNC_TRACE();
855c2fec27bSHyong Youb Kim 	return enic_set_rss_conf(enic, rss_conf);
856c2fec27bSHyong Youb Kim }
857c2fec27bSHyong Youb Kim 
858c2fec27bSHyong Youb Kim static int enicpmd_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
859c2fec27bSHyong Youb Kim 					 struct rte_eth_rss_conf *rss_conf)
860c2fec27bSHyong Youb Kim {
861c2fec27bSHyong Youb Kim 	struct enic *enic = pmd_priv(dev);
862c2fec27bSHyong Youb Kim 
863c2fec27bSHyong Youb Kim 	ENICPMD_FUNC_TRACE();
864c2fec27bSHyong Youb Kim 	if (rss_conf == NULL)
865c2fec27bSHyong Youb Kim 		return -EINVAL;
866c2fec27bSHyong Youb Kim 	if (rss_conf->rss_key != NULL &&
867c2fec27bSHyong Youb Kim 	    rss_conf->rss_key_len < ENIC_RSS_HASH_KEY_SIZE) {
868c2fec27bSHyong Youb Kim 		dev_err(enic, "rss_hash_conf_get: wrong rss_key_len. given=%u"
869c2fec27bSHyong Youb Kim 			" expected=%u+\n",
870c2fec27bSHyong Youb Kim 			rss_conf->rss_key_len, ENIC_RSS_HASH_KEY_SIZE);
871c2fec27bSHyong Youb Kim 		return -EINVAL;
872c2fec27bSHyong Youb Kim 	}
873c2fec27bSHyong Youb Kim 	rss_conf->rss_hf = enic->rss_hf;
874c2fec27bSHyong Youb Kim 	if (rss_conf->rss_key != NULL) {
875c2fec27bSHyong Youb Kim 		int i;
876c2fec27bSHyong Youb Kim 		for (i = 0; i < ENIC_RSS_HASH_KEY_SIZE; i++) {
877c2fec27bSHyong Youb Kim 			rss_conf->rss_key[i] =
878c2fec27bSHyong Youb Kim 				enic->rss_key.key[i / 10].b[i % 10];
879c2fec27bSHyong Youb Kim 		}
880c2fec27bSHyong Youb Kim 		rss_conf->rss_key_len = ENIC_RSS_HASH_KEY_SIZE;
881c2fec27bSHyong Youb Kim 	}
882c2fec27bSHyong Youb Kim 	return 0;
883c2fec27bSHyong Youb Kim }
884c2fec27bSHyong Youb Kim 
88592ca7ea4SHyong Youb Kim static void enicpmd_dev_rxq_info_get(struct rte_eth_dev *dev,
88692ca7ea4SHyong Youb Kim 				     uint16_t rx_queue_id,
88792ca7ea4SHyong Youb Kim 				     struct rte_eth_rxq_info *qinfo)
88892ca7ea4SHyong Youb Kim {
88992ca7ea4SHyong Youb Kim 	struct enic *enic = pmd_priv(dev);
89092ca7ea4SHyong Youb Kim 	struct vnic_rq *rq_sop;
89192ca7ea4SHyong Youb Kim 	struct vnic_rq *rq_data;
89292ca7ea4SHyong Youb Kim 	struct rte_eth_rxconf *conf;
89392ca7ea4SHyong Youb Kim 	uint16_t sop_queue_idx;
89492ca7ea4SHyong Youb Kim 	uint16_t data_queue_idx;
89592ca7ea4SHyong Youb Kim 
89692ca7ea4SHyong Youb Kim 	ENICPMD_FUNC_TRACE();
89792ca7ea4SHyong Youb Kim 	sop_queue_idx = enic_rte_rq_idx_to_sop_idx(rx_queue_id);
898285fd7c4SJohn Daley 	data_queue_idx = enic_rte_rq_idx_to_data_idx(rx_queue_id, enic);
89992ca7ea4SHyong Youb Kim 	rq_sop = &enic->rq[sop_queue_idx];
90092ca7ea4SHyong Youb Kim 	rq_data = &enic->rq[data_queue_idx]; /* valid if data_queue_enable */
90192ca7ea4SHyong Youb Kim 	qinfo->mp = rq_sop->mp;
90292ca7ea4SHyong Youb Kim 	qinfo->scattered_rx = rq_sop->data_queue_enable;
90392ca7ea4SHyong Youb Kim 	qinfo->nb_desc = rq_sop->ring.desc_count;
90492ca7ea4SHyong Youb Kim 	if (qinfo->scattered_rx)
90592ca7ea4SHyong Youb Kim 		qinfo->nb_desc += rq_data->ring.desc_count;
90692ca7ea4SHyong Youb Kim 	conf = &qinfo->conf;
90792ca7ea4SHyong Youb Kim 	memset(conf, 0, sizeof(*conf));
90892ca7ea4SHyong Youb Kim 	conf->rx_free_thresh = rq_sop->rx_free_thresh;
90992ca7ea4SHyong Youb Kim 	conf->rx_drop_en = 1;
91092ca7ea4SHyong Youb Kim 	/*
91192ca7ea4SHyong Youb Kim 	 * Except VLAN stripping (port setting), all the checksum offloads
91292ca7ea4SHyong Youb Kim 	 * are always enabled.
91392ca7ea4SHyong Youb Kim 	 */
91493fb21fdSHyong Youb Kim 	conf->offloads = enic->rx_offload_capa;
91592ca7ea4SHyong Youb Kim 	if (!enic->ig_vlan_strip_en)
916295968d1SFerruh Yigit 		conf->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
91792ca7ea4SHyong Youb Kim 	/* rx_thresh and other fields are not applicable for enic */
91892ca7ea4SHyong Youb Kim }
91992ca7ea4SHyong Youb Kim 
92092ca7ea4SHyong Youb Kim static void enicpmd_dev_txq_info_get(struct rte_eth_dev *dev,
9212a7e3d54SHyong Youb Kim 				     uint16_t tx_queue_id,
92292ca7ea4SHyong Youb Kim 				     struct rte_eth_txq_info *qinfo)
92392ca7ea4SHyong Youb Kim {
92492ca7ea4SHyong Youb Kim 	struct enic *enic = pmd_priv(dev);
9252a7e3d54SHyong Youb Kim 	struct vnic_wq *wq = &enic->wq[tx_queue_id];
92692ca7ea4SHyong Youb Kim 
92792ca7ea4SHyong Youb Kim 	ENICPMD_FUNC_TRACE();
9282a7e3d54SHyong Youb Kim 	qinfo->nb_desc = wq->ring.desc_count;
92992ca7ea4SHyong Youb Kim 	memset(&qinfo->conf, 0, sizeof(qinfo->conf));
930bcaa54c1SHyong Youb Kim 	qinfo->conf.offloads = wq->offloads;
93192ca7ea4SHyong Youb Kim 	/* tx_thresh, and all the other fields are not applicable for enic */
93292ca7ea4SHyong Youb Kim }
93392ca7ea4SHyong Youb Kim 
934f011fa0aSHyong Youb Kim static int enicpmd_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
935f011fa0aSHyong Youb Kim 					 __rte_unused uint16_t queue_id,
936f011fa0aSHyong Youb Kim 					 struct rte_eth_burst_mode *mode)
937f011fa0aSHyong Youb Kim {
938f011fa0aSHyong Youb Kim 	eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
939f011fa0aSHyong Youb Kim 	struct enic *enic = pmd_priv(dev);
940f011fa0aSHyong Youb Kim 	const char *info_str = NULL;
941f011fa0aSHyong Youb Kim 	int ret = -EINVAL;
942f011fa0aSHyong Youb Kim 
943f011fa0aSHyong Youb Kim 	ENICPMD_FUNC_TRACE();
944f011fa0aSHyong Youb Kim 	if (enic->use_noscatter_vec_rx_handler)
945f011fa0aSHyong Youb Kim 		info_str = "Vector AVX2 No Scatter";
946f011fa0aSHyong Youb Kim 	else if (pkt_burst == enic_noscatter_recv_pkts)
947f011fa0aSHyong Youb Kim 		info_str = "Scalar No Scatter";
948f011fa0aSHyong Youb Kim 	else if (pkt_burst == enic_recv_pkts)
949f011fa0aSHyong Youb Kim 		info_str = "Scalar";
9508b428cb5SHyong Youb Kim 	else if (pkt_burst == enic_recv_pkts_64)
9518b428cb5SHyong Youb Kim 		info_str = "Scalar 64B Completion";
952f011fa0aSHyong Youb Kim 	if (info_str) {
953f011fa0aSHyong Youb Kim 		strlcpy(mode->info, info_str, sizeof(mode->info));
954f011fa0aSHyong Youb Kim 		ret = 0;
955f011fa0aSHyong Youb Kim 	}
956f011fa0aSHyong Youb Kim 	return ret;
957f011fa0aSHyong Youb Kim }
958f011fa0aSHyong Youb Kim 
959f011fa0aSHyong Youb Kim static int enicpmd_dev_tx_burst_mode_get(struct rte_eth_dev *dev,
960f011fa0aSHyong Youb Kim 					 __rte_unused uint16_t queue_id,
961f011fa0aSHyong Youb Kim 					 struct rte_eth_burst_mode *mode)
962f011fa0aSHyong Youb Kim {
963f011fa0aSHyong Youb Kim 	eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
964f011fa0aSHyong Youb Kim 	const char *info_str = NULL;
965f011fa0aSHyong Youb Kim 	int ret = -EINVAL;
966f011fa0aSHyong Youb Kim 
967f011fa0aSHyong Youb Kim 	ENICPMD_FUNC_TRACE();
968f011fa0aSHyong Youb Kim 	if (pkt_burst == enic_simple_xmit_pkts)
969f011fa0aSHyong Youb Kim 		info_str = "Scalar Simplified";
970f011fa0aSHyong Youb Kim 	else if (pkt_burst == enic_xmit_pkts)
971f011fa0aSHyong Youb Kim 		info_str = "Scalar";
972f011fa0aSHyong Youb Kim 	if (info_str) {
973f011fa0aSHyong Youb Kim 		strlcpy(mode->info, info_str, sizeof(mode->info));
974f011fa0aSHyong Youb Kim 		ret = 0;
975f011fa0aSHyong Youb Kim 	}
976f011fa0aSHyong Youb Kim 	return ret;
977f011fa0aSHyong Youb Kim }
978f011fa0aSHyong Youb Kim 
9790f872d31SHyong Youb Kim static int enicpmd_dev_rx_queue_intr_enable(struct rte_eth_dev *eth_dev,
9800f872d31SHyong Youb Kim 					    uint16_t rx_queue_id)
9810f872d31SHyong Youb Kim {
9820f872d31SHyong Youb Kim 	struct enic *enic = pmd_priv(eth_dev);
9830f872d31SHyong Youb Kim 
9840f872d31SHyong Youb Kim 	ENICPMD_FUNC_TRACE();
9850f872d31SHyong Youb Kim 	vnic_intr_unmask(&enic->intr[rx_queue_id + ENICPMD_RXQ_INTR_OFFSET]);
9860f872d31SHyong Youb Kim 	return 0;
9870f872d31SHyong Youb Kim }
9880f872d31SHyong Youb Kim 
9890f872d31SHyong Youb Kim static int enicpmd_dev_rx_queue_intr_disable(struct rte_eth_dev *eth_dev,
9900f872d31SHyong Youb Kim 					     uint16_t rx_queue_id)
9910f872d31SHyong Youb Kim {
9920f872d31SHyong Youb Kim 	struct enic *enic = pmd_priv(eth_dev);
9930f872d31SHyong Youb Kim 
9940f872d31SHyong Youb Kim 	ENICPMD_FUNC_TRACE();
9950f872d31SHyong Youb Kim 	vnic_intr_mask(&enic->intr[rx_queue_id + ENICPMD_RXQ_INTR_OFFSET]);
9960f872d31SHyong Youb Kim 	return 0;
9970f872d31SHyong Youb Kim }
9980f872d31SHyong Youb Kim 
9998a4efd17SHyong Youb Kim static int udp_tunnel_common_check(struct enic *enic,
10008a4efd17SHyong Youb Kim 				   struct rte_eth_udp_tunnel *tnl)
10018a4efd17SHyong Youb Kim {
1002295968d1SFerruh Yigit 	if (tnl->prot_type != RTE_ETH_TUNNEL_TYPE_VXLAN &&
1003295968d1SFerruh Yigit 	    tnl->prot_type != RTE_ETH_TUNNEL_TYPE_GENEVE)
10048a4efd17SHyong Youb Kim 		return -ENOTSUP;
10058a4efd17SHyong Youb Kim 	if (!enic->overlay_offload) {
1006f665790aSDavid Marchand 		ENICPMD_LOG(DEBUG, " overlay offload is not supported");
10078a4efd17SHyong Youb Kim 		return -ENOTSUP;
10088a4efd17SHyong Youb Kim 	}
10098a4efd17SHyong Youb Kim 	return 0;
10108a4efd17SHyong Youb Kim }
10118a4efd17SHyong Youb Kim 
101261c7b522SJohn Daley static int update_tunnel_port(struct enic *enic, uint16_t port, bool vxlan)
10138a4efd17SHyong Youb Kim {
101461c7b522SJohn Daley 	uint8_t cfg;
101561c7b522SJohn Daley 
101661c7b522SJohn Daley 	cfg = vxlan ? OVERLAY_CFG_VXLAN_PORT_UPDATE :
101761c7b522SJohn Daley 		OVERLAY_CFG_GENEVE_PORT_UPDATE;
101861c7b522SJohn Daley 	if (vnic_dev_overlay_offload_cfg(enic->vdev, cfg, port)) {
1019f665790aSDavid Marchand 		ENICPMD_LOG(DEBUG, " failed to update tunnel port");
10208a4efd17SHyong Youb Kim 		return -EINVAL;
10218a4efd17SHyong Youb Kim 	}
1022f665790aSDavid Marchand 	ENICPMD_LOG(DEBUG, " updated %s port to %u",
102361c7b522SJohn Daley 		    vxlan ? "vxlan" : "geneve", port);
102461c7b522SJohn Daley 	if (vxlan)
10258a4efd17SHyong Youb Kim 		enic->vxlan_port = port;
102661c7b522SJohn Daley 	else
102761c7b522SJohn Daley 		enic->geneve_port = port;
10288a4efd17SHyong Youb Kim 	return 0;
10298a4efd17SHyong Youb Kim }
10308a4efd17SHyong Youb Kim 
10318a4efd17SHyong Youb Kim static int enicpmd_dev_udp_tunnel_port_add(struct rte_eth_dev *eth_dev,
10328a4efd17SHyong Youb Kim 					   struct rte_eth_udp_tunnel *tnl)
10338a4efd17SHyong Youb Kim {
10348a4efd17SHyong Youb Kim 	struct enic *enic = pmd_priv(eth_dev);
103561c7b522SJohn Daley 	uint16_t port;
103661c7b522SJohn Daley 	bool vxlan;
10378a4efd17SHyong Youb Kim 	int ret;
10388a4efd17SHyong Youb Kim 
10398a4efd17SHyong Youb Kim 	ENICPMD_FUNC_TRACE();
10408a4efd17SHyong Youb Kim 	ret = udp_tunnel_common_check(enic, tnl);
10418a4efd17SHyong Youb Kim 	if (ret)
10428a4efd17SHyong Youb Kim 		return ret;
1043295968d1SFerruh Yigit 	vxlan = (tnl->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN);
104461c7b522SJohn Daley 	if (vxlan)
104561c7b522SJohn Daley 		port = enic->vxlan_port;
104661c7b522SJohn Daley 	else
104761c7b522SJohn Daley 		port = enic->geneve_port;
10488a4efd17SHyong Youb Kim 	/*
104961c7b522SJohn Daley 	 * The NIC has 1 configurable port number per tunnel type.
105061c7b522SJohn Daley 	 * "Adding" a new port number replaces it.
10518a4efd17SHyong Youb Kim 	 */
105261c7b522SJohn Daley 	if (tnl->udp_port == port || tnl->udp_port == 0) {
1053f665790aSDavid Marchand 		ENICPMD_LOG(DEBUG, " %u is already configured or invalid",
10548a4efd17SHyong Youb Kim 			     tnl->udp_port);
10558a4efd17SHyong Youb Kim 		return -EINVAL;
10568a4efd17SHyong Youb Kim 	}
105761c7b522SJohn Daley 	return update_tunnel_port(enic, tnl->udp_port, vxlan);
10588a4efd17SHyong Youb Kim }
10598a4efd17SHyong Youb Kim 
10608a4efd17SHyong Youb Kim static int enicpmd_dev_udp_tunnel_port_del(struct rte_eth_dev *eth_dev,
10618a4efd17SHyong Youb Kim 					   struct rte_eth_udp_tunnel *tnl)
10628a4efd17SHyong Youb Kim {
10638a4efd17SHyong Youb Kim 	struct enic *enic = pmd_priv(eth_dev);
106461c7b522SJohn Daley 	uint16_t port;
106561c7b522SJohn Daley 	bool vxlan;
10668a4efd17SHyong Youb Kim 	int ret;
10678a4efd17SHyong Youb Kim 
10688a4efd17SHyong Youb Kim 	ENICPMD_FUNC_TRACE();
10698a4efd17SHyong Youb Kim 	ret = udp_tunnel_common_check(enic, tnl);
10708a4efd17SHyong Youb Kim 	if (ret)
10718a4efd17SHyong Youb Kim 		return ret;
1072295968d1SFerruh Yigit 	vxlan = (tnl->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN);
107361c7b522SJohn Daley 	if (vxlan)
107461c7b522SJohn Daley 		port = enic->vxlan_port;
107561c7b522SJohn Daley 	else
107661c7b522SJohn Daley 		port = enic->geneve_port;
10778a4efd17SHyong Youb Kim 	/*
10788a4efd17SHyong Youb Kim 	 * Clear the previously set port number and restore the
10798a4efd17SHyong Youb Kim 	 * hardware default port number. Some drivers disable VXLAN
10808a4efd17SHyong Youb Kim 	 * offloads when there are no configured port numbers. But
10818a4efd17SHyong Youb Kim 	 * enic does not do that as VXLAN is part of overlay offload,
10828a4efd17SHyong Youb Kim 	 * which is tied to inner RSS and TSO.
10838a4efd17SHyong Youb Kim 	 */
108461c7b522SJohn Daley 	if (tnl->udp_port != port) {
1085f665790aSDavid Marchand 		ENICPMD_LOG(DEBUG, " %u is not a configured tunnel port",
10868a4efd17SHyong Youb Kim 			     tnl->udp_port);
10878a4efd17SHyong Youb Kim 		return -EINVAL;
10888a4efd17SHyong Youb Kim 	}
108961c7b522SJohn Daley 	port = vxlan ? RTE_VXLAN_DEFAULT_PORT : RTE_GENEVE_DEFAULT_PORT;
109061c7b522SJohn Daley 	return update_tunnel_port(enic, port, vxlan);
10918a4efd17SHyong Youb Kim }
10928a4efd17SHyong Youb Kim 
109329343067SHyong Youb Kim static int enicpmd_dev_fw_version_get(struct rte_eth_dev *eth_dev,
109429343067SHyong Youb Kim 				      char *fw_version, size_t fw_size)
109529343067SHyong Youb Kim {
109629343067SHyong Youb Kim 	struct vnic_devcmd_fw_info *info;
109729343067SHyong Youb Kim 	struct enic *enic;
109829343067SHyong Youb Kim 	int ret;
109929343067SHyong Youb Kim 
110029343067SHyong Youb Kim 	ENICPMD_FUNC_TRACE();
1101d345d6c9SFerruh Yigit 
110229343067SHyong Youb Kim 	enic = pmd_priv(eth_dev);
110329343067SHyong Youb Kim 	ret = vnic_dev_fw_info(enic->vdev, &info);
110429343067SHyong Youb Kim 	if (ret)
110529343067SHyong Youb Kim 		return ret;
1106d345d6c9SFerruh Yigit 	ret = snprintf(fw_version, fw_size, "%s %s",
110729343067SHyong Youb Kim 		 info->fw_version, info->fw_build);
1108d345d6c9SFerruh Yigit 	if (ret < 0)
1109d345d6c9SFerruh Yigit 		return -EINVAL;
1110d345d6c9SFerruh Yigit 
1111d345d6c9SFerruh Yigit 	ret += 1; /* add the size of '\0' */
1112d345d6c9SFerruh Yigit 	if (fw_size < (size_t)ret)
1113d345d6c9SFerruh Yigit 		return ret;
1114d345d6c9SFerruh Yigit 	else
111529343067SHyong Youb Kim 		return 0;
111629343067SHyong Youb Kim }
111729343067SHyong Youb Kim 
111872f3de30SBruce Richardson static const struct eth_dev_ops enicpmd_eth_dev_ops = {
111972f3de30SBruce Richardson 	.dev_configure        = enicpmd_dev_configure,
112072f3de30SBruce Richardson 	.dev_start            = enicpmd_dev_start,
112172f3de30SBruce Richardson 	.dev_stop             = enicpmd_dev_stop,
112272f3de30SBruce Richardson 	.dev_set_link_up      = NULL,
112372f3de30SBruce Richardson 	.dev_set_link_down    = NULL,
112472f3de30SBruce Richardson 	.dev_close            = enicpmd_dev_close,
112572f3de30SBruce Richardson 	.promiscuous_enable   = enicpmd_dev_promiscuous_enable,
112672f3de30SBruce Richardson 	.promiscuous_disable  = enicpmd_dev_promiscuous_disable,
112772f3de30SBruce Richardson 	.allmulticast_enable  = enicpmd_dev_allmulticast_enable,
112872f3de30SBruce Richardson 	.allmulticast_disable = enicpmd_dev_allmulticast_disable,
112972f3de30SBruce Richardson 	.link_update          = enicpmd_dev_link_update,
113072f3de30SBruce Richardson 	.stats_get            = enicpmd_dev_stats_get,
113172f3de30SBruce Richardson 	.stats_reset          = enicpmd_dev_stats_reset,
113272f3de30SBruce Richardson 	.queue_stats_mapping_set = NULL,
113372f3de30SBruce Richardson 	.dev_infos_get        = enicpmd_dev_info_get,
113478a38edfSJianfeng Tan 	.dev_supported_ptypes_get = enicpmd_dev_supported_ptypes_get,
1135396a6d71SJohn Daley 	.mtu_set              = enicpmd_mtu_set,
1136f9416bbaSHyong Youb Kim 	.vlan_filter_set      = NULL,
113772f3de30SBruce Richardson 	.vlan_tpid_set        = NULL,
113872f3de30SBruce Richardson 	.vlan_offload_set     = enicpmd_vlan_offload_set,
113972f3de30SBruce Richardson 	.vlan_strip_queue_set = NULL,
114072f3de30SBruce Richardson 	.rx_queue_start       = enicpmd_dev_rx_queue_start,
114172f3de30SBruce Richardson 	.rx_queue_stop        = enicpmd_dev_rx_queue_stop,
114272f3de30SBruce Richardson 	.tx_queue_start       = enicpmd_dev_tx_queue_start,
114372f3de30SBruce Richardson 	.tx_queue_stop        = enicpmd_dev_tx_queue_stop,
114472f3de30SBruce Richardson 	.rx_queue_setup       = enicpmd_dev_rx_queue_setup,
114572f3de30SBruce Richardson 	.rx_queue_release     = enicpmd_dev_rx_queue_release,
114672f3de30SBruce Richardson 	.tx_queue_setup       = enicpmd_dev_tx_queue_setup,
114772f3de30SBruce Richardson 	.tx_queue_release     = enicpmd_dev_tx_queue_release,
11480f872d31SHyong Youb Kim 	.rx_queue_intr_enable = enicpmd_dev_rx_queue_intr_enable,
11490f872d31SHyong Youb Kim 	.rx_queue_intr_disable = enicpmd_dev_rx_queue_intr_disable,
115092ca7ea4SHyong Youb Kim 	.rxq_info_get         = enicpmd_dev_rxq_info_get,
115192ca7ea4SHyong Youb Kim 	.txq_info_get         = enicpmd_dev_txq_info_get,
1152f011fa0aSHyong Youb Kim 	.rx_burst_mode_get    = enicpmd_dev_rx_burst_mode_get,
1153f011fa0aSHyong Youb Kim 	.tx_burst_mode_get    = enicpmd_dev_tx_burst_mode_get,
115472f3de30SBruce Richardson 	.dev_led_on           = NULL,
115572f3de30SBruce Richardson 	.dev_led_off          = NULL,
115672f3de30SBruce Richardson 	.flow_ctrl_get        = NULL,
115772f3de30SBruce Richardson 	.flow_ctrl_set        = NULL,
115872f3de30SBruce Richardson 	.priority_flow_ctrl_set = NULL,
115972f3de30SBruce Richardson 	.mac_addr_add         = enicpmd_add_mac_addr,
116072f3de30SBruce Richardson 	.mac_addr_remove      = enicpmd_remove_mac_addr,
1161740f5bf1SDavid Marchand 	.mac_addr_set         = enicpmd_set_mac_addr,
11628d496995SHyong Youb Kim 	.set_mc_addr_list     = enicpmd_set_mc_addr_list,
1163fb7ad441SThomas Monjalon 	.flow_ops_get         = enicpmd_dev_flow_ops_get,
1164c2fec27bSHyong Youb Kim 	.reta_query           = enicpmd_dev_rss_reta_query,
1165c2fec27bSHyong Youb Kim 	.reta_update          = enicpmd_dev_rss_reta_update,
1166c2fec27bSHyong Youb Kim 	.rss_hash_conf_get    = enicpmd_dev_rss_hash_conf_get,
1167c2fec27bSHyong Youb Kim 	.rss_hash_update      = enicpmd_dev_rss_hash_update,
11688a4efd17SHyong Youb Kim 	.udp_tunnel_port_add  = enicpmd_dev_udp_tunnel_port_add,
11698a4efd17SHyong Youb Kim 	.udp_tunnel_port_del  = enicpmd_dev_udp_tunnel_port_del,
117029343067SHyong Youb Kim 	.fw_version_get       = enicpmd_dev_fw_version_get,
117172f3de30SBruce Richardson };
117272f3de30SBruce Richardson 
11738a6ff33dSHyong Youb Kim static int enic_parse_zero_one(const char *key,
117493fb21fdSHyong Youb Kim 			       const char *value,
117593fb21fdSHyong Youb Kim 			       void *opaque)
117693fb21fdSHyong Youb Kim {
117793fb21fdSHyong Youb Kim 	struct enic *enic;
11788a6ff33dSHyong Youb Kim 	bool b;
117993fb21fdSHyong Youb Kim 
118093fb21fdSHyong Youb Kim 	enic = (struct enic *)opaque;
118193fb21fdSHyong Youb Kim 	if (strcmp(value, "0") == 0) {
11828a6ff33dSHyong Youb Kim 		b = false;
118393fb21fdSHyong Youb Kim 	} else if (strcmp(value, "1") == 0) {
11848a6ff33dSHyong Youb Kim 		b = true;
118593fb21fdSHyong Youb Kim 	} else {
11868a6ff33dSHyong Youb Kim 		dev_err(enic, "Invalid value for %s"
11878a6ff33dSHyong Youb Kim 			": expected=0|1 given=%s\n", key, value);
118893fb21fdSHyong Youb Kim 		return -EINVAL;
118993fb21fdSHyong Youb Kim 	}
11908b428cb5SHyong Youb Kim 	if (strcmp(key, ENIC_DEVARG_CQ64) == 0)
11918b428cb5SHyong Youb Kim 		enic->cq64_request = b;
11928a6ff33dSHyong Youb Kim 	if (strcmp(key, ENIC_DEVARG_DISABLE_OVERLAY) == 0)
11938a6ff33dSHyong Youb Kim 		enic->disable_overlay = b;
11948a6ff33dSHyong Youb Kim 	if (strcmp(key, ENIC_DEVARG_ENABLE_AVX2_RX) == 0)
11958a6ff33dSHyong Youb Kim 		enic->enable_avx2_rx = b;
119693fb21fdSHyong Youb Kim 	return 0;
119793fb21fdSHyong Youb Kim }
119893fb21fdSHyong Youb Kim 
1199e39c2756SHyong Youb Kim static int enic_parse_ig_vlan_rewrite(__rte_unused const char *key,
1200e39c2756SHyong Youb Kim 				      const char *value,
1201e39c2756SHyong Youb Kim 				      void *opaque)
1202e39c2756SHyong Youb Kim {
1203e39c2756SHyong Youb Kim 	struct enic *enic;
1204e39c2756SHyong Youb Kim 
1205e39c2756SHyong Youb Kim 	enic = (struct enic *)opaque;
1206e39c2756SHyong Youb Kim 	if (strcmp(value, "trunk") == 0) {
1207e39c2756SHyong Youb Kim 		/* Trunk mode: always tag */
1208e39c2756SHyong Youb Kim 		enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_DEFAULT_TRUNK;
1209e39c2756SHyong Youb Kim 	} else if (strcmp(value, "untag") == 0) {
1210e39c2756SHyong Youb Kim 		/* Untag default VLAN mode: untag if VLAN = default VLAN */
1211e39c2756SHyong Youb Kim 		enic->ig_vlan_rewrite_mode =
1212e39c2756SHyong Youb Kim 			IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN;
1213e39c2756SHyong Youb Kim 	} else if (strcmp(value, "priority") == 0) {
1214e39c2756SHyong Youb Kim 		/*
1215e39c2756SHyong Youb Kim 		 * Priority-tag default VLAN mode: priority tag (VLAN header
1216e39c2756SHyong Youb Kim 		 * with ID=0) if VLAN = default
1217e39c2756SHyong Youb Kim 		 */
1218e39c2756SHyong Youb Kim 		enic->ig_vlan_rewrite_mode =
1219e39c2756SHyong Youb Kim 			IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN;
1220e39c2756SHyong Youb Kim 	} else if (strcmp(value, "pass") == 0) {
1221e39c2756SHyong Youb Kim 		/* Pass through mode: do not touch tags */
1222e39c2756SHyong Youb Kim 		enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU;
1223e39c2756SHyong Youb Kim 	} else {
1224e39c2756SHyong Youb Kim 		dev_err(enic, "Invalid value for " ENIC_DEVARG_IG_VLAN_REWRITE
1225e39c2756SHyong Youb Kim 			": expected=trunk|untag|priority|pass given=%s\n",
1226e39c2756SHyong Youb Kim 			value);
1227e39c2756SHyong Youb Kim 		return -EINVAL;
1228e39c2756SHyong Youb Kim 	}
1229e39c2756SHyong Youb Kim 	return 0;
1230e39c2756SHyong Youb Kim }
1231e39c2756SHyong Youb Kim 
123293fb21fdSHyong Youb Kim static int enic_check_devargs(struct rte_eth_dev *dev)
123393fb21fdSHyong Youb Kim {
123493fb21fdSHyong Youb Kim 	static const char *const valid_keys[] = {
12358b428cb5SHyong Youb Kim 		ENIC_DEVARG_CQ64,
1236e39c2756SHyong Youb Kim 		ENIC_DEVARG_DISABLE_OVERLAY,
12378a6ff33dSHyong Youb Kim 		ENIC_DEVARG_ENABLE_AVX2_RX,
1238e39c2756SHyong Youb Kim 		ENIC_DEVARG_IG_VLAN_REWRITE,
123939cf83f1SHyong Youb Kim 		ENIC_DEVARG_REPRESENTOR,
1240e39c2756SHyong Youb Kim 		NULL};
124193fb21fdSHyong Youb Kim 	struct enic *enic = pmd_priv(dev);
124293fb21fdSHyong Youb Kim 	struct rte_kvargs *kvlist;
124393fb21fdSHyong Youb Kim 
124493fb21fdSHyong Youb Kim 	ENICPMD_FUNC_TRACE();
124593fb21fdSHyong Youb Kim 
12468b428cb5SHyong Youb Kim 	enic->cq64_request = true; /* Use 64B entry if available */
124793fb21fdSHyong Youb Kim 	enic->disable_overlay = false;
12488a6ff33dSHyong Youb Kim 	enic->enable_avx2_rx = false;
1249e39c2756SHyong Youb Kim 	enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU;
125093fb21fdSHyong Youb Kim 	if (!dev->device->devargs)
125193fb21fdSHyong Youb Kim 		return 0;
125293fb21fdSHyong Youb Kim 	kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
125393fb21fdSHyong Youb Kim 	if (!kvlist)
125493fb21fdSHyong Youb Kim 		return -EINVAL;
12558b428cb5SHyong Youb Kim 	if (rte_kvargs_process(kvlist, ENIC_DEVARG_CQ64,
12568b428cb5SHyong Youb Kim 			       enic_parse_zero_one, enic) < 0 ||
12578b428cb5SHyong Youb Kim 	    rte_kvargs_process(kvlist, ENIC_DEVARG_DISABLE_OVERLAY,
12588a6ff33dSHyong Youb Kim 			       enic_parse_zero_one, enic) < 0 ||
12598a6ff33dSHyong Youb Kim 	    rte_kvargs_process(kvlist, ENIC_DEVARG_ENABLE_AVX2_RX,
12608a6ff33dSHyong Youb Kim 			       enic_parse_zero_one, enic) < 0 ||
1261e39c2756SHyong Youb Kim 	    rte_kvargs_process(kvlist, ENIC_DEVARG_IG_VLAN_REWRITE,
1262e39c2756SHyong Youb Kim 			       enic_parse_ig_vlan_rewrite, enic) < 0) {
126393fb21fdSHyong Youb Kim 		rte_kvargs_free(kvlist);
126493fb21fdSHyong Youb Kim 		return -EINVAL;
126593fb21fdSHyong Youb Kim 	}
126693fb21fdSHyong Youb Kim 	rte_kvargs_free(kvlist);
126793fb21fdSHyong Youb Kim 	return 0;
126893fb21fdSHyong Youb Kim }
126993fb21fdSHyong Youb Kim 
127039cf83f1SHyong Youb Kim /* Initialize the driver for PF */
127139cf83f1SHyong Youb Kim static int eth_enic_dev_init(struct rte_eth_dev *eth_dev,
127239cf83f1SHyong Youb Kim 			     void *init_params __rte_unused)
127372f3de30SBruce Richardson {
127472f3de30SBruce Richardson 	struct rte_pci_device *pdev;
127572f3de30SBruce Richardson 	struct rte_pci_addr *addr;
127672f3de30SBruce Richardson 	struct enic *enic = pmd_priv(eth_dev);
127793fb21fdSHyong Youb Kim 	int err;
127872f3de30SBruce Richardson 
127972f3de30SBruce Richardson 	ENICPMD_FUNC_TRACE();
128072f3de30SBruce Richardson 	eth_dev->dev_ops = &enicpmd_eth_dev_ops;
1281cbfc6111SFerruh Yigit 	eth_dev->rx_queue_count = enicpmd_dev_rx_queue_count;
1282947d860cSJohn Daley 	eth_dev->rx_pkt_burst = &enic_recv_pkts;
1283d309bdc2SJohn Daley 	eth_dev->tx_pkt_burst = &enic_xmit_pkts;
12841e81dbb5SHyong Youb Kim 	eth_dev->tx_pkt_prepare = &enic_prep_pkts;
1285e92a4b41SHyong Youb Kim 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1286e92a4b41SHyong Youb Kim 		enic_pick_tx_handler(eth_dev);
1287e92a4b41SHyong Youb Kim 		enic_pick_rx_handler(eth_dev);
1288e92a4b41SHyong Youb Kim 		return 0;
1289e92a4b41SHyong Youb Kim 	}
1290e92a4b41SHyong Youb Kim 	/* Only the primary sets up adapter and other data in shared memory */
1291e92a4b41SHyong Youb Kim 	enic->port_id = eth_dev->data->port_id;
1292e92a4b41SHyong Youb Kim 	enic->rte_dev = eth_dev;
1293c655c547SHyong Youb Kim 	enic->dev_data = eth_dev->data;
129472f3de30SBruce Richardson 
1295c0802544SFerruh Yigit 	pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
1296eeefe73fSBernard Iremonger 	rte_eth_copy_pci_info(eth_dev, pdev);
129772f3de30SBruce Richardson 	enic->pdev = pdev;
129872f3de30SBruce Richardson 	addr = &pdev->addr;
129972f3de30SBruce Richardson 
13002fc03b23SThomas Monjalon 	snprintf(enic->bdf_name, PCI_PRI_STR_SIZE, PCI_PRI_FMT,
130172f3de30SBruce Richardson 		addr->domain, addr->bus, addr->devid, addr->function);
130272f3de30SBruce Richardson 
130393fb21fdSHyong Youb Kim 	err = enic_check_devargs(eth_dev);
130493fb21fdSHyong Youb Kim 	if (err)
130593fb21fdSHyong Youb Kim 		return err;
130639cf83f1SHyong Youb Kim 	err = enic_probe(enic);
130739cf83f1SHyong Youb Kim 	if (!err && enic->fm) {
130839cf83f1SHyong Youb Kim 		err = enic_fm_allocate_switch_domain(enic);
130939cf83f1SHyong Youb Kim 		if (err)
131039cf83f1SHyong Youb Kim 			ENICPMD_LOG(ERR, "failed to allocate switch domain id");
131139cf83f1SHyong Youb Kim 	}
131239cf83f1SHyong Youb Kim 	return err;
131339cf83f1SHyong Youb Kim }
131439cf83f1SHyong Youb Kim 
131539cf83f1SHyong Youb Kim static int eth_enic_dev_uninit(struct rte_eth_dev *eth_dev)
131639cf83f1SHyong Youb Kim {
131739cf83f1SHyong Youb Kim 	struct enic *enic = pmd_priv(eth_dev);
131839cf83f1SHyong Youb Kim 	int err;
131939cf83f1SHyong Youb Kim 
132039cf83f1SHyong Youb Kim 	ENICPMD_FUNC_TRACE();
132139cf83f1SHyong Youb Kim 	eth_dev->device = NULL;
132239cf83f1SHyong Youb Kim 	eth_dev->intr_handle = NULL;
132339cf83f1SHyong Youb Kim 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
132439cf83f1SHyong Youb Kim 		return 0;
132539cf83f1SHyong Youb Kim 	err = rte_eth_switch_domain_free(enic->switch_domain_id);
132639cf83f1SHyong Youb Kim 	if (err)
132739cf83f1SHyong Youb Kim 		ENICPMD_LOG(WARNING, "failed to free switch domain: %d", err);
132839cf83f1SHyong Youb Kim 	return 0;
132972f3de30SBruce Richardson }
133072f3de30SBruce Richardson 
1331fdf91e0fSJan Blunck static int eth_enic_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1332fdf91e0fSJan Blunck 	struct rte_pci_device *pci_dev)
1333fdf91e0fSJan Blunck {
133439cf83f1SHyong Youb Kim 	char name[RTE_ETH_NAME_MAX_LEN];
133539cf83f1SHyong Youb Kim 	struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
133639cf83f1SHyong Youb Kim 	struct rte_eth_dev *pf_ethdev;
133739cf83f1SHyong Youb Kim 	struct enic *pf_enic;
133839cf83f1SHyong Youb Kim 	int i, retval;
133939cf83f1SHyong Youb Kim 
134039cf83f1SHyong Youb Kim 	ENICPMD_FUNC_TRACE();
134139cf83f1SHyong Youb Kim 	if (pci_dev->device.devargs) {
134239cf83f1SHyong Youb Kim 		retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
13439a9eb104SHarman Kalra 				&eth_da, 1);
13449a9eb104SHarman Kalra 		if (retval < 0)
134539cf83f1SHyong Youb Kim 			return retval;
134639cf83f1SHyong Youb Kim 	}
1347d6541676SXueming Li 	if (eth_da.nb_representor_ports > 0 &&
1348d6541676SXueming Li 	    eth_da.type != RTE_ETH_REPRESENTOR_VF) {
1349f665790aSDavid Marchand 		ENICPMD_LOG(ERR, "unsupported representor type: %s",
1350d6541676SXueming Li 			    pci_dev->device.devargs->args);
1351d6541676SXueming Li 		return -ENOTSUP;
1352d6541676SXueming Li 	}
135339cf83f1SHyong Youb Kim 	retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
135439cf83f1SHyong Youb Kim 		sizeof(struct enic),
135539cf83f1SHyong Youb Kim 		eth_dev_pci_specific_init, pci_dev,
135639cf83f1SHyong Youb Kim 		eth_enic_dev_init, NULL);
135739cf83f1SHyong Youb Kim 	if (retval || eth_da.nb_representor_ports < 1)
135839cf83f1SHyong Youb Kim 		return retval;
135939cf83f1SHyong Youb Kim 
136039cf83f1SHyong Youb Kim 	/* Probe VF representor */
136139cf83f1SHyong Youb Kim 	pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
136239cf83f1SHyong Youb Kim 	if (pf_ethdev == NULL)
136339cf83f1SHyong Youb Kim 		return -ENODEV;
136439cf83f1SHyong Youb Kim 	/* Representors require flowman */
136539cf83f1SHyong Youb Kim 	pf_enic = pmd_priv(pf_ethdev);
136639cf83f1SHyong Youb Kim 	if (pf_enic->fm == NULL) {
136739cf83f1SHyong Youb Kim 		ENICPMD_LOG(ERR, "VF representors require flowman");
136839cf83f1SHyong Youb Kim 		return -ENOTSUP;
136939cf83f1SHyong Youb Kim 	}
137039cf83f1SHyong Youb Kim 	/*
137139cf83f1SHyong Youb Kim 	 * For now representors imply switchdev, as firmware does not support
137239cf83f1SHyong Youb Kim 	 * legacy mode SR-IOV
137339cf83f1SHyong Youb Kim 	 */
137439cf83f1SHyong Youb Kim 	pf_enic->switchdev_mode = 1;
137539cf83f1SHyong Youb Kim 	/* Calculate max VF ID before initializing representor*/
137639cf83f1SHyong Youb Kim 	pf_enic->max_vf_id = 0;
137739cf83f1SHyong Youb Kim 	for (i = 0; i < eth_da.nb_representor_ports; i++) {
137839cf83f1SHyong Youb Kim 		pf_enic->max_vf_id = RTE_MAX(pf_enic->max_vf_id,
137939cf83f1SHyong Youb Kim 					     eth_da.representor_ports[i]);
138039cf83f1SHyong Youb Kim 	}
138139cf83f1SHyong Youb Kim 	for (i = 0; i < eth_da.nb_representor_ports; i++) {
138239cf83f1SHyong Youb Kim 		struct enic_vf_representor representor;
138339cf83f1SHyong Youb Kim 
138439cf83f1SHyong Youb Kim 		representor.vf_id = eth_da.representor_ports[i];
138539cf83f1SHyong Youb Kim 				representor.switch_domain_id =
138639cf83f1SHyong Youb Kim 			pmd_priv(pf_ethdev)->switch_domain_id;
138739cf83f1SHyong Youb Kim 		representor.pf = pmd_priv(pf_ethdev);
138839cf83f1SHyong Youb Kim 		snprintf(name, sizeof(name), "net_%s_representor_%d",
138939cf83f1SHyong Youb Kim 			pci_dev->device.name, eth_da.representor_ports[i]);
139039cf83f1SHyong Youb Kim 		retval = rte_eth_dev_create(&pci_dev->device, name,
139139cf83f1SHyong Youb Kim 			sizeof(struct enic_vf_representor), NULL, NULL,
139239cf83f1SHyong Youb Kim 			enic_vf_representor_init, &representor);
139339cf83f1SHyong Youb Kim 		if (retval) {
139439cf83f1SHyong Youb Kim 			ENICPMD_LOG(ERR, "failed to create enic vf representor %s",
139539cf83f1SHyong Youb Kim 				    name);
139639cf83f1SHyong Youb Kim 			return retval;
139739cf83f1SHyong Youb Kim 		}
139839cf83f1SHyong Youb Kim 	}
139939cf83f1SHyong Youb Kim 	return 0;
1400fdf91e0fSJan Blunck }
1401fdf91e0fSJan Blunck 
1402fdf91e0fSJan Blunck static int eth_enic_pci_remove(struct rte_pci_device *pci_dev)
1403fdf91e0fSJan Blunck {
140439cf83f1SHyong Youb Kim 	struct rte_eth_dev *ethdev;
140539cf83f1SHyong Youb Kim 
140639cf83f1SHyong Youb Kim 	ENICPMD_FUNC_TRACE();
140739cf83f1SHyong Youb Kim 	ethdev = rte_eth_dev_allocated(pci_dev->device.name);
140839cf83f1SHyong Youb Kim 	if (!ethdev)
140939cf83f1SHyong Youb Kim 		return -ENODEV;
1410c99e1db8SLong Wu 	if (rte_eth_dev_is_repr(ethdev))
141139cf83f1SHyong Youb Kim 		return rte_eth_dev_destroy(ethdev, enic_vf_representor_uninit);
141239cf83f1SHyong Youb Kim 	else
141339cf83f1SHyong Youb Kim 		return rte_eth_dev_destroy(ethdev, eth_enic_dev_uninit);
1414fdf91e0fSJan Blunck }
1415fdf91e0fSJan Blunck 
1416fdf91e0fSJan Blunck static struct rte_pci_driver rte_enic_pmd = {
141772f3de30SBruce Richardson 	.id_table = pci_id_enic_map,
1418b76fafb1SDavid Marchand 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
1419fdf91e0fSJan Blunck 	.probe = eth_enic_pci_probe,
1420fdf91e0fSJan Blunck 	.remove = eth_enic_pci_remove,
142172f3de30SBruce Richardson };
142272f3de30SBruce Richardson 
1423ea7768b5SHyong Youb Kim int dev_is_enic(struct rte_eth_dev *dev)
1424ea7768b5SHyong Youb Kim {
1425ea7768b5SHyong Youb Kim 	return dev->device->driver == &rte_enic_pmd.driver;
1426ea7768b5SHyong Youb Kim }
1427ea7768b5SHyong Youb Kim 
1428fdf91e0fSJan Blunck RTE_PMD_REGISTER_PCI(net_enic, rte_enic_pmd);
142901f19227SShreyansh Jain RTE_PMD_REGISTER_PCI_TABLE(net_enic, pci_id_enic_map);
143006e81dc9SDavid Marchand RTE_PMD_REGISTER_KMOD_DEP(net_enic, "* igb_uio | uio_pci_generic | vfio-pci");
143193fb21fdSHyong Youb Kim RTE_PMD_REGISTER_PARAM_STRING(net_enic,
14328b428cb5SHyong Youb Kim 	ENIC_DEVARG_CQ64 "=0|1"
1433e39c2756SHyong Youb Kim 	ENIC_DEVARG_DISABLE_OVERLAY "=0|1 "
14348a6ff33dSHyong Youb Kim 	ENIC_DEVARG_ENABLE_AVX2_RX "=0|1 "
1435e39c2756SHyong Youb Kim 	ENIC_DEVARG_IG_VLAN_REWRITE "=trunk|untag|priority|pass");
1436