xref: /dpdk/drivers/net/enic/enic_ethdev.c (revision e9fd1ebf981f361844aea9ec94e17f4bda5e1479)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2008-2017 Cisco Systems, Inc.  All rights reserved.
3  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
4  */
5 
6 #include <stdio.h>
7 #include <stdint.h>
8 
9 #include <dev_driver.h>
10 #include <rte_pci.h>
11 #include <bus_pci_driver.h>
12 #include <ethdev_driver.h>
13 #include <ethdev_pci.h>
14 #include <rte_geneve.h>
15 #include <rte_kvargs.h>
16 #include <rte_string_fns.h>
17 
18 #include "vnic_intr.h"
19 #include "vnic_cq.h"
20 #include "vnic_wq.h"
21 #include "vnic_rq.h"
22 #include "vnic_enet.h"
23 #include "enic.h"
24 
25 /*
26  * The set of PCI devices this driver supports
27  */
28 #define CISCO_PCI_VENDOR_ID 0x1137
29 static const struct rte_pci_id pci_id_enic_map[] = {
30 	{RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET)},
31 	{RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET_VF)},
32 	{RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET_SN)},
33 	{.vendor_id = 0, /* sentinel */},
34 };
35 
36 /* Supported link speeds of production VIC models */
37 static const struct vic_speed_capa {
38 	uint16_t sub_devid;
39 	uint32_t capa;
40 } vic_speed_capa_map[] = {
41 	{ 0x0043, RTE_ETH_LINK_SPEED_10G }, /* VIC */
42 	{ 0x0047, RTE_ETH_LINK_SPEED_10G }, /* P81E PCIe */
43 	{ 0x0048, RTE_ETH_LINK_SPEED_10G }, /* M81KR Mezz */
44 	{ 0x004f, RTE_ETH_LINK_SPEED_10G }, /* 1280 Mezz */
45 	{ 0x0084, RTE_ETH_LINK_SPEED_10G }, /* 1240 MLOM */
46 	{ 0x0085, RTE_ETH_LINK_SPEED_10G }, /* 1225 PCIe */
47 	{ 0x00cd, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1285 PCIe */
48 	{ 0x00ce, RTE_ETH_LINK_SPEED_10G }, /* 1225T PCIe */
49 	{ 0x012a, RTE_ETH_LINK_SPEED_40G }, /* M4308 */
50 	{ 0x012c, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1340 MLOM */
51 	{ 0x012e, RTE_ETH_LINK_SPEED_10G }, /* 1227 PCIe */
52 	{ 0x0137, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1380 Mezz */
53 	{ 0x014d, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1385 PCIe */
54 	{ 0x015d, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1387 MLOM */
55 	{ 0x0215, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G |
56 		  RTE_ETH_LINK_SPEED_40G }, /* 1440 Mezz */
57 	{ 0x0216, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G |
58 		  RTE_ETH_LINK_SPEED_40G }, /* 1480 MLOM */
59 	{ 0x0217, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G }, /* 1455 PCIe */
60 	{ 0x0218, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G }, /* 1457 MLOM */
61 	{ 0x0219, RTE_ETH_LINK_SPEED_40G }, /* 1485 PCIe */
62 	{ 0x021a, RTE_ETH_LINK_SPEED_40G }, /* 1487 MLOM */
63 	{ 0x024a, RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_100G }, /* 1495 PCIe */
64 	{ 0x024b, RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_100G }, /* 1497 MLOM */
65 	{ 0, 0 }, /* End marker */
66 };
67 
68 #define ENIC_DEVARG_CQ64 "cq64"
69 #define ENIC_DEVARG_DISABLE_OVERLAY "disable-overlay"
70 #define ENIC_DEVARG_ENABLE_AVX2_RX "enable-avx2-rx"
71 #define ENIC_DEVARG_IG_VLAN_REWRITE "ig-vlan-rewrite"
72 #define ENIC_DEVARG_REPRESENTOR "representor"
73 
74 RTE_LOG_REGISTER_DEFAULT(enic_pmd_logtype, INFO);
75 
76 static int
77 enicpmd_dev_flow_ops_get(struct rte_eth_dev *dev,
78 			 const struct rte_flow_ops **ops)
79 {
80 	struct enic *enic = pmd_priv(dev);
81 
82 	ENICPMD_FUNC_TRACE();
83 
84 	if (enic->flow_filter_mode == FILTER_FLOWMAN)
85 		*ops = &enic_fm_flow_ops;
86 	else
87 		*ops = &enic_flow_ops;
88 	return 0;
89 }
90 
91 static void enicpmd_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
92 {
93 	void *txq = dev->data->tx_queues[qid];
94 
95 	ENICPMD_FUNC_TRACE();
96 
97 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
98 		return;
99 
100 	enic_free_wq(txq);
101 }
102 
103 static int enicpmd_dev_setup_intr(struct enic *enic)
104 {
105 	int ret;
106 	unsigned int index;
107 
108 	ENICPMD_FUNC_TRACE();
109 
110 	/* Are we done with the init of all the queues? */
111 	for (index = 0; index < enic->cq_count; index++) {
112 		if (!enic->cq[index].ctrl)
113 			break;
114 	}
115 	if (enic->cq_count != index)
116 		return 0;
117 	for (index = 0; index < enic->wq_count; index++) {
118 		if (!enic->wq[index].ctrl)
119 			break;
120 	}
121 	if (enic->wq_count != index)
122 		return 0;
123 	/* check start of packet (SOP) RQs only in case scatter is disabled. */
124 	for (index = 0; index < enic->rq_count; index++) {
125 		if (!enic->rq[enic_rte_rq_idx_to_sop_idx(index)].ctrl)
126 			break;
127 	}
128 	if (enic->rq_count != index)
129 		return 0;
130 
131 	ret = enic_alloc_intr_resources(enic);
132 	if (ret) {
133 		dev_err(enic, "alloc intr failed\n");
134 		return ret;
135 	}
136 	enic_init_vnic_resources(enic);
137 
138 	ret = enic_setup_finish(enic);
139 	if (ret)
140 		dev_err(enic, "setup could not be finished\n");
141 
142 	return ret;
143 }
144 
145 static int enicpmd_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
146 	uint16_t queue_idx,
147 	uint16_t nb_desc,
148 	unsigned int socket_id,
149 	const struct rte_eth_txconf *tx_conf)
150 {
151 	int ret;
152 	struct enic *enic = pmd_priv(eth_dev);
153 	struct vnic_wq *wq;
154 
155 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
156 		return -E_RTE_SECONDARY;
157 
158 	ENICPMD_FUNC_TRACE();
159 	RTE_ASSERT(queue_idx < enic->conf_wq_count);
160 	wq = &enic->wq[queue_idx];
161 	wq->offloads = tx_conf->offloads |
162 		eth_dev->data->dev_conf.txmode.offloads;
163 	eth_dev->data->tx_queues[queue_idx] = (void *)wq;
164 
165 	ret = enic_alloc_wq(enic, queue_idx, socket_id, nb_desc);
166 	if (ret) {
167 		dev_err(enic, "error in allocating wq\n");
168 		return ret;
169 	}
170 
171 	return enicpmd_dev_setup_intr(enic);
172 }
173 
174 static int enicpmd_dev_tx_queue_start(struct rte_eth_dev *eth_dev,
175 	uint16_t queue_idx)
176 {
177 	struct enic *enic = pmd_priv(eth_dev);
178 
179 	ENICPMD_FUNC_TRACE();
180 
181 	enic_start_wq(enic, queue_idx);
182 
183 	return 0;
184 }
185 
186 static int enicpmd_dev_tx_queue_stop(struct rte_eth_dev *eth_dev,
187 	uint16_t queue_idx)
188 {
189 	int ret;
190 	struct enic *enic = pmd_priv(eth_dev);
191 
192 	ENICPMD_FUNC_TRACE();
193 
194 	ret = enic_stop_wq(enic, queue_idx);
195 	if (ret)
196 		dev_err(enic, "error in stopping wq %d\n", queue_idx);
197 
198 	return ret;
199 }
200 
201 static int enicpmd_dev_rx_queue_start(struct rte_eth_dev *eth_dev,
202 	uint16_t queue_idx)
203 {
204 	struct enic *enic = pmd_priv(eth_dev);
205 
206 	ENICPMD_FUNC_TRACE();
207 
208 	enic_start_rq(enic, queue_idx);
209 
210 	return 0;
211 }
212 
213 static int enicpmd_dev_rx_queue_stop(struct rte_eth_dev *eth_dev,
214 	uint16_t queue_idx)
215 {
216 	int ret;
217 	struct enic *enic = pmd_priv(eth_dev);
218 
219 	ENICPMD_FUNC_TRACE();
220 
221 	ret = enic_stop_rq(enic, queue_idx);
222 	if (ret)
223 		dev_err(enic, "error in stopping rq %d\n", queue_idx);
224 
225 	return ret;
226 }
227 
228 static void enicpmd_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
229 {
230 	void *rxq = dev->data->rx_queues[qid];
231 
232 	ENICPMD_FUNC_TRACE();
233 
234 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
235 		return;
236 
237 	enic_free_rq(rxq);
238 }
239 
240 static uint32_t enicpmd_dev_rx_queue_count(void *rx_queue)
241 {
242 	struct enic *enic;
243 	struct vnic_rq *sop_rq;
244 	uint32_t queue_count = 0;
245 	struct vnic_cq *cq;
246 	uint32_t cq_tail;
247 	uint16_t cq_idx;
248 
249 	sop_rq = rx_queue;
250 	enic = vnic_dev_priv(sop_rq->vdev);
251 	cq = &enic->cq[enic_cq_rq(enic, sop_rq->index)];
252 	cq_idx = cq->to_clean;
253 
254 	cq_tail = ioread32(&cq->ctrl->cq_tail);
255 
256 	if (cq_tail < cq_idx)
257 		cq_tail += cq->ring.desc_count;
258 
259 	queue_count = cq_tail - cq_idx;
260 
261 	return queue_count;
262 }
263 
264 static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
265 	uint16_t queue_idx,
266 	uint16_t nb_desc,
267 	unsigned int socket_id,
268 	const struct rte_eth_rxconf *rx_conf,
269 	struct rte_mempool *mp)
270 {
271 	int ret;
272 	struct enic *enic = pmd_priv(eth_dev);
273 
274 	ENICPMD_FUNC_TRACE();
275 
276 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
277 		return -E_RTE_SECONDARY;
278 	RTE_ASSERT(enic_rte_rq_idx_to_sop_idx(queue_idx) < enic->conf_rq_count);
279 	eth_dev->data->rx_queues[queue_idx] =
280 		(void *)&enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
281 
282 	ret = enic_alloc_rq(enic, queue_idx, socket_id, mp, nb_desc,
283 			    rx_conf->rx_free_thresh);
284 	if (ret) {
285 		dev_err(enic, "error in allocating rq\n");
286 		return ret;
287 	}
288 
289 	return enicpmd_dev_setup_intr(enic);
290 }
291 
292 static int enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
293 {
294 	struct enic *enic = pmd_priv(eth_dev);
295 	uint64_t offloads;
296 
297 	ENICPMD_FUNC_TRACE();
298 
299 	offloads = eth_dev->data->dev_conf.rxmode.offloads;
300 	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
301 		if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
302 			enic->ig_vlan_strip_en = 1;
303 		else
304 			enic->ig_vlan_strip_en = 0;
305 	}
306 
307 	return enic_set_vlan_strip(enic);
308 }
309 
310 static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev)
311 {
312 	int ret;
313 	int mask;
314 	struct enic *enic = pmd_priv(eth_dev);
315 
316 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
317 		return -E_RTE_SECONDARY;
318 
319 	ENICPMD_FUNC_TRACE();
320 	ret = enic_set_vnic_res(enic);
321 	if (ret) {
322 		dev_err(enic, "Set vNIC resource num  failed, aborting\n");
323 		return ret;
324 	}
325 
326 	if (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
327 		eth_dev->data->dev_conf.rxmode.offloads |=
328 			RTE_ETH_RX_OFFLOAD_RSS_HASH;
329 
330 	enic->mc_count = 0;
331 	enic->hw_ip_checksum = !!(eth_dev->data->dev_conf.rxmode.offloads &
332 				  RTE_ETH_RX_OFFLOAD_CHECKSUM);
333 	/* All vlan offload masks to apply the current settings */
334 	mask = RTE_ETH_VLAN_STRIP_MASK |
335 		RTE_ETH_VLAN_FILTER_MASK |
336 		RTE_ETH_VLAN_EXTEND_MASK;
337 	ret = enicpmd_vlan_offload_set(eth_dev, mask);
338 	if (ret) {
339 		dev_err(enic, "Failed to configure VLAN offloads\n");
340 		return ret;
341 	}
342 	/*
343 	 * Initialize RSS with the default reta and key. If the user key is
344 	 * given (rx_adv_conf.rss_conf.rss_key), will use that instead of the
345 	 * default key.
346 	 */
347 	return enic_init_rss_nic_cfg(enic);
348 }
349 
350 /* Start the device.
351  * It returns 0 on success.
352  */
353 static int enicpmd_dev_start(struct rte_eth_dev *eth_dev)
354 {
355 	struct enic *enic = pmd_priv(eth_dev);
356 
357 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
358 		return -E_RTE_SECONDARY;
359 
360 	ENICPMD_FUNC_TRACE();
361 	return enic_enable(enic);
362 }
363 
364 /*
365  * Stop device: disable rx and tx functions to allow for reconfiguring.
366  */
367 static int enicpmd_dev_stop(struct rte_eth_dev *eth_dev)
368 {
369 	struct rte_eth_link link;
370 	struct enic *enic = pmd_priv(eth_dev);
371 	uint16_t i;
372 
373 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
374 		return 0;
375 
376 	ENICPMD_FUNC_TRACE();
377 	enic_disable(enic);
378 
379 	memset(&link, 0, sizeof(link));
380 	rte_eth_linkstatus_set(eth_dev, &link);
381 
382 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
383 		eth_dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
384 	for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
385 		eth_dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
386 
387 	return 0;
388 }
389 
390 /*
391  * Stop device.
392  */
393 static int enicpmd_dev_close(struct rte_eth_dev *eth_dev)
394 {
395 	struct enic *enic = pmd_priv(eth_dev);
396 
397 	ENICPMD_FUNC_TRACE();
398 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
399 		return 0;
400 
401 	enic_remove(enic);
402 
403 	return 0;
404 }
405 
406 static int enicpmd_dev_link_update(struct rte_eth_dev *eth_dev,
407 	__rte_unused int wait_to_complete)
408 {
409 	ENICPMD_FUNC_TRACE();
410 	return enic_link_update(eth_dev);
411 }
412 
413 static int enicpmd_dev_stats_get(struct rte_eth_dev *eth_dev,
414 	struct rte_eth_stats *stats)
415 {
416 	struct enic *enic = pmd_priv(eth_dev);
417 
418 	ENICPMD_FUNC_TRACE();
419 	return enic_dev_stats_get(enic, stats);
420 }
421 
422 static int enicpmd_dev_stats_reset(struct rte_eth_dev *eth_dev)
423 {
424 	struct enic *enic = pmd_priv(eth_dev);
425 
426 	ENICPMD_FUNC_TRACE();
427 	return enic_dev_stats_clear(enic);
428 }
429 
430 static uint32_t speed_capa_from_pci_id(struct rte_eth_dev *eth_dev)
431 {
432 	const struct vic_speed_capa *m;
433 	struct rte_pci_device *pdev;
434 	uint16_t id;
435 
436 	pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
437 	id = pdev->id.subsystem_device_id;
438 	for (m = vic_speed_capa_map; m->sub_devid != 0; m++) {
439 		if (m->sub_devid == id)
440 			return m->capa;
441 	}
442 	/* 1300 and later models are at least 40G */
443 	if (id >= 0x0100)
444 		return RTE_ETH_LINK_SPEED_40G;
445 	/* VFs have subsystem id 0, check device id */
446 	if (id == 0) {
447 		/* Newer VF implies at least 40G model */
448 		if (pdev->id.device_id == PCI_DEVICE_ID_CISCO_VIC_ENET_SN)
449 			return RTE_ETH_LINK_SPEED_40G;
450 	}
451 	return RTE_ETH_LINK_SPEED_10G;
452 }
453 
454 static int enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
455 	struct rte_eth_dev_info *device_info)
456 {
457 	struct enic *enic = pmd_priv(eth_dev);
458 
459 	ENICPMD_FUNC_TRACE();
460 	/* Scattered Rx uses two receive queues per rx queue exposed to dpdk */
461 	device_info->max_rx_queues = enic->conf_rq_count / 2;
462 	device_info->max_tx_queues = enic->conf_wq_count;
463 	device_info->min_rx_bufsize = ENIC_MIN_MTU;
464 	/* "Max" mtu is not a typo. HW receives packet sizes up to the
465 	 * max mtu regardless of the current mtu (vNIC's mtu). vNIC mtu is
466 	 * a hint to the driver to size receive buffers accordingly so that
467 	 * larger-than-vnic-mtu packets get truncated.. For DPDK, we let
468 	 * the user decide the buffer size via rxmode.mtu, basically
469 	 * ignoring vNIC mtu.
470 	 */
471 	device_info->max_rx_pktlen = enic_mtu_to_max_rx_pktlen(enic->max_mtu);
472 	device_info->max_mac_addrs = ENIC_UNICAST_PERFECT_FILTERS;
473 	device_info->min_mtu = ENIC_MIN_MTU;
474 	device_info->max_mtu = enic->max_mtu;
475 	device_info->rx_offload_capa = enic->rx_offload_capa;
476 	device_info->tx_offload_capa = enic->tx_offload_capa;
477 	device_info->tx_queue_offload_capa = enic->tx_queue_offload_capa;
478 	device_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
479 	device_info->default_rxconf = (struct rte_eth_rxconf) {
480 		.rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH
481 	};
482 	device_info->reta_size = enic->reta_size;
483 	device_info->hash_key_size = enic->hash_key_size;
484 	device_info->flow_type_rss_offloads = enic->flow_type_rss_offloads;
485 	device_info->rx_desc_lim = (struct rte_eth_desc_lim) {
486 		.nb_max = enic->config.rq_desc_count,
487 		.nb_min = ENIC_MIN_RQ_DESCS,
488 		.nb_align = ENIC_ALIGN_DESCS,
489 	};
490 	device_info->tx_desc_lim = (struct rte_eth_desc_lim) {
491 		.nb_max = enic->config.wq_desc_count,
492 		.nb_min = ENIC_MIN_WQ_DESCS,
493 		.nb_align = ENIC_ALIGN_DESCS,
494 		.nb_seg_max = ENIC_TX_XMIT_MAX,
495 		.nb_mtu_seg_max = ENIC_NON_TSO_MAX_DESC,
496 	};
497 	device_info->default_rxportconf = (struct rte_eth_dev_portconf) {
498 		.burst_size = ENIC_DEFAULT_RX_BURST,
499 		.ring_size = RTE_MIN(device_info->rx_desc_lim.nb_max,
500 			ENIC_DEFAULT_RX_RING_SIZE),
501 		.nb_queues = ENIC_DEFAULT_RX_RINGS,
502 	};
503 	device_info->default_txportconf = (struct rte_eth_dev_portconf) {
504 		.burst_size = ENIC_DEFAULT_TX_BURST,
505 		.ring_size = RTE_MIN(device_info->tx_desc_lim.nb_max,
506 			ENIC_DEFAULT_TX_RING_SIZE),
507 		.nb_queues = ENIC_DEFAULT_TX_RINGS,
508 	};
509 	device_info->speed_capa = speed_capa_from_pci_id(eth_dev);
510 
511 	return 0;
512 }
513 
514 static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev,
515 							size_t *no_of_elements)
516 {
517 	static const uint32_t ptypes[] = {
518 		RTE_PTYPE_L2_ETHER,
519 		RTE_PTYPE_L2_ETHER_VLAN,
520 		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
521 		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
522 		RTE_PTYPE_L4_TCP,
523 		RTE_PTYPE_L4_UDP,
524 		RTE_PTYPE_L4_FRAG,
525 		RTE_PTYPE_L4_NONFRAG,
526 	};
527 	static const uint32_t ptypes_overlay[] = {
528 		RTE_PTYPE_L2_ETHER,
529 		RTE_PTYPE_L2_ETHER_VLAN,
530 		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
531 		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
532 		RTE_PTYPE_L4_TCP,
533 		RTE_PTYPE_L4_UDP,
534 		RTE_PTYPE_L4_FRAG,
535 		RTE_PTYPE_L4_NONFRAG,
536 		RTE_PTYPE_TUNNEL_GRENAT,
537 		RTE_PTYPE_INNER_L2_ETHER,
538 		RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
539 		RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
540 		RTE_PTYPE_INNER_L4_TCP,
541 		RTE_PTYPE_INNER_L4_UDP,
542 		RTE_PTYPE_INNER_L4_FRAG,
543 		RTE_PTYPE_INNER_L4_NONFRAG,
544 	};
545 
546 	if (dev->rx_pkt_burst != rte_eth_pkt_burst_dummy &&
547 	    dev->rx_pkt_burst != NULL) {
548 		struct enic *enic = pmd_priv(dev);
549 		if (enic->overlay_offload) {
550 			*no_of_elements = RTE_DIM(ptypes_overlay);
551 			return ptypes_overlay;
552 		} else {
553 			*no_of_elements = RTE_DIM(ptypes);
554 			return ptypes;
555 		}
556 	}
557 	return NULL;
558 }
559 
560 static int enicpmd_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
561 {
562 	struct enic *enic = pmd_priv(eth_dev);
563 	int ret;
564 
565 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
566 		return -E_RTE_SECONDARY;
567 
568 	ENICPMD_FUNC_TRACE();
569 
570 	enic->promisc = 1;
571 	ret = enic_add_packet_filter(enic);
572 	if (ret != 0)
573 		enic->promisc = 0;
574 
575 	return ret;
576 }
577 
578 static int enicpmd_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
579 {
580 	struct enic *enic = pmd_priv(eth_dev);
581 	int ret;
582 
583 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
584 		return -E_RTE_SECONDARY;
585 
586 	ENICPMD_FUNC_TRACE();
587 	enic->promisc = 0;
588 	ret = enic_add_packet_filter(enic);
589 	if (ret != 0)
590 		enic->promisc = 1;
591 
592 	return ret;
593 }
594 
595 static int enicpmd_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
596 {
597 	struct enic *enic = pmd_priv(eth_dev);
598 	int ret;
599 
600 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
601 		return -E_RTE_SECONDARY;
602 
603 	ENICPMD_FUNC_TRACE();
604 	enic->allmulti = 1;
605 	ret = enic_add_packet_filter(enic);
606 	if (ret != 0)
607 		enic->allmulti = 0;
608 
609 	return ret;
610 }
611 
612 static int enicpmd_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
613 {
614 	struct enic *enic = pmd_priv(eth_dev);
615 	int ret;
616 
617 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
618 		return -E_RTE_SECONDARY;
619 
620 	ENICPMD_FUNC_TRACE();
621 	enic->allmulti = 0;
622 	ret = enic_add_packet_filter(enic);
623 	if (ret != 0)
624 		enic->allmulti = 1;
625 
626 	return ret;
627 }
628 
629 static int enicpmd_add_mac_addr(struct rte_eth_dev *eth_dev,
630 	struct rte_ether_addr *mac_addr,
631 	__rte_unused uint32_t index, __rte_unused uint32_t pool)
632 {
633 	struct enic *enic = pmd_priv(eth_dev);
634 
635 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
636 		return -E_RTE_SECONDARY;
637 
638 	ENICPMD_FUNC_TRACE();
639 	return enic_set_mac_address(enic, mac_addr->addr_bytes);
640 }
641 
642 static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, uint32_t index)
643 {
644 	struct enic *enic = pmd_priv(eth_dev);
645 
646 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
647 		return;
648 
649 	ENICPMD_FUNC_TRACE();
650 	if (enic_del_mac_address(enic, index))
651 		dev_err(enic, "del mac addr failed\n");
652 }
653 
654 static int enicpmd_set_mac_addr(struct rte_eth_dev *eth_dev,
655 				struct rte_ether_addr *addr)
656 {
657 	struct enic *enic = pmd_priv(eth_dev);
658 	int ret;
659 
660 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
661 		return -E_RTE_SECONDARY;
662 
663 	ENICPMD_FUNC_TRACE();
664 	ret = enic_del_mac_address(enic, 0);
665 	if (ret)
666 		return ret;
667 	return enic_set_mac_address(enic, addr->addr_bytes);
668 }
669 
670 static void debug_log_add_del_addr(struct rte_ether_addr *addr, bool add)
671 {
672 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
673 
674 	rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, addr);
675 	ENICPMD_LOG(DEBUG, " %s address %s\n",
676 		     add ? "add" : "remove", mac_str);
677 }
678 
679 static int enicpmd_set_mc_addr_list(struct rte_eth_dev *eth_dev,
680 				    struct rte_ether_addr *mc_addr_set,
681 				    uint32_t nb_mc_addr)
682 {
683 	struct enic *enic = pmd_priv(eth_dev);
684 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
685 	struct rte_ether_addr *addr;
686 	uint32_t i, j;
687 	int ret;
688 
689 	ENICPMD_FUNC_TRACE();
690 
691 	/* Validate the given addresses first */
692 	for (i = 0; i < nb_mc_addr && mc_addr_set != NULL; i++) {
693 		addr = &mc_addr_set[i];
694 		if (!rte_is_multicast_ether_addr(addr) ||
695 		    rte_is_broadcast_ether_addr(addr)) {
696 			rte_ether_format_addr(mac_str,
697 					RTE_ETHER_ADDR_FMT_SIZE, addr);
698 			ENICPMD_LOG(ERR, " invalid multicast address %s\n",
699 				     mac_str);
700 			return -EINVAL;
701 		}
702 	}
703 
704 	/* Flush all if requested */
705 	if (nb_mc_addr == 0 || mc_addr_set == NULL) {
706 		ENICPMD_LOG(DEBUG, " flush multicast addresses\n");
707 		for (i = 0; i < enic->mc_count; i++) {
708 			addr = &enic->mc_addrs[i];
709 			debug_log_add_del_addr(addr, false);
710 			ret = vnic_dev_del_addr(enic->vdev, addr->addr_bytes);
711 			if (ret)
712 				return ret;
713 		}
714 		enic->mc_count = 0;
715 		return 0;
716 	}
717 
718 	if (nb_mc_addr > ENIC_MULTICAST_PERFECT_FILTERS) {
719 		ENICPMD_LOG(ERR, " too many multicast addresses: max=%d\n",
720 			     ENIC_MULTICAST_PERFECT_FILTERS);
721 		return -ENOSPC;
722 	}
723 	/*
724 	 * devcmd is slow, so apply the difference instead of flushing and
725 	 * adding everything.
726 	 * 1. Delete addresses on the NIC but not on the host
727 	 */
728 	for (i = 0; i < enic->mc_count; i++) {
729 		addr = &enic->mc_addrs[i];
730 		for (j = 0; j < nb_mc_addr; j++) {
731 			if (rte_is_same_ether_addr(addr, &mc_addr_set[j]))
732 				break;
733 		}
734 		if (j < nb_mc_addr)
735 			continue;
736 		debug_log_add_del_addr(addr, false);
737 		ret = vnic_dev_del_addr(enic->vdev, addr->addr_bytes);
738 		if (ret)
739 			return ret;
740 	}
741 	/* 2. Add addresses on the host but not on the NIC */
742 	for (i = 0; i < nb_mc_addr; i++) {
743 		addr = &mc_addr_set[i];
744 		for (j = 0; j < enic->mc_count; j++) {
745 			if (rte_is_same_ether_addr(addr, &enic->mc_addrs[j]))
746 				break;
747 		}
748 		if (j < enic->mc_count)
749 			continue;
750 		debug_log_add_del_addr(addr, true);
751 		ret = vnic_dev_add_addr(enic->vdev, addr->addr_bytes);
752 		if (ret)
753 			return ret;
754 	}
755 	/* Keep a copy so we can flush/apply later on.. */
756 	memcpy(enic->mc_addrs, mc_addr_set,
757 	       nb_mc_addr * sizeof(struct rte_ether_addr));
758 	enic->mc_count = nb_mc_addr;
759 	return 0;
760 }
761 
762 static int enicpmd_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
763 {
764 	struct enic *enic = pmd_priv(eth_dev);
765 
766 	ENICPMD_FUNC_TRACE();
767 	return enic_set_mtu(enic, mtu);
768 }
769 
770 static int enicpmd_dev_rss_reta_query(struct rte_eth_dev *dev,
771 				      struct rte_eth_rss_reta_entry64
772 				      *reta_conf,
773 				      uint16_t reta_size)
774 {
775 	struct enic *enic = pmd_priv(dev);
776 	uint16_t i, idx, shift;
777 
778 	ENICPMD_FUNC_TRACE();
779 	if (reta_size != ENIC_RSS_RETA_SIZE) {
780 		dev_err(enic, "reta_query: wrong reta_size. given=%u expected=%u\n",
781 			reta_size, ENIC_RSS_RETA_SIZE);
782 		return -EINVAL;
783 	}
784 
785 	for (i = 0; i < reta_size; i++) {
786 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
787 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
788 		if (reta_conf[idx].mask & (1ULL << shift))
789 			reta_conf[idx].reta[shift] = enic_sop_rq_idx_to_rte_idx(
790 				enic->rss_cpu.cpu[i / 4].b[i % 4]);
791 	}
792 
793 	return 0;
794 }
795 
796 static int enicpmd_dev_rss_reta_update(struct rte_eth_dev *dev,
797 				       struct rte_eth_rss_reta_entry64
798 				       *reta_conf,
799 				       uint16_t reta_size)
800 {
801 	struct enic *enic = pmd_priv(dev);
802 	union vnic_rss_cpu rss_cpu;
803 	uint16_t i, idx, shift;
804 
805 	ENICPMD_FUNC_TRACE();
806 	if (reta_size != ENIC_RSS_RETA_SIZE) {
807 		dev_err(enic, "reta_update: wrong reta_size. given=%u"
808 			" expected=%u\n",
809 			reta_size, ENIC_RSS_RETA_SIZE);
810 		return -EINVAL;
811 	}
812 	/*
813 	 * Start with the current reta and modify it per reta_conf, as we
814 	 * need to push the entire reta even if we only modify one entry.
815 	 */
816 	rss_cpu = enic->rss_cpu;
817 	for (i = 0; i < reta_size; i++) {
818 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
819 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
820 		if (reta_conf[idx].mask & (1ULL << shift))
821 			rss_cpu.cpu[i / 4].b[i % 4] =
822 				enic_rte_rq_idx_to_sop_idx(
823 					reta_conf[idx].reta[shift]);
824 	}
825 	return enic_set_rss_reta(enic, &rss_cpu);
826 }
827 
828 static int enicpmd_dev_rss_hash_update(struct rte_eth_dev *dev,
829 				       struct rte_eth_rss_conf *rss_conf)
830 {
831 	struct enic *enic = pmd_priv(dev);
832 
833 	ENICPMD_FUNC_TRACE();
834 	return enic_set_rss_conf(enic, rss_conf);
835 }
836 
837 static int enicpmd_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
838 					 struct rte_eth_rss_conf *rss_conf)
839 {
840 	struct enic *enic = pmd_priv(dev);
841 
842 	ENICPMD_FUNC_TRACE();
843 	if (rss_conf == NULL)
844 		return -EINVAL;
845 	if (rss_conf->rss_key != NULL &&
846 	    rss_conf->rss_key_len < ENIC_RSS_HASH_KEY_SIZE) {
847 		dev_err(enic, "rss_hash_conf_get: wrong rss_key_len. given=%u"
848 			" expected=%u+\n",
849 			rss_conf->rss_key_len, ENIC_RSS_HASH_KEY_SIZE);
850 		return -EINVAL;
851 	}
852 	rss_conf->rss_hf = enic->rss_hf;
853 	if (rss_conf->rss_key != NULL) {
854 		int i;
855 		for (i = 0; i < ENIC_RSS_HASH_KEY_SIZE; i++) {
856 			rss_conf->rss_key[i] =
857 				enic->rss_key.key[i / 10].b[i % 10];
858 		}
859 		rss_conf->rss_key_len = ENIC_RSS_HASH_KEY_SIZE;
860 	}
861 	return 0;
862 }
863 
864 static void enicpmd_dev_rxq_info_get(struct rte_eth_dev *dev,
865 				     uint16_t rx_queue_id,
866 				     struct rte_eth_rxq_info *qinfo)
867 {
868 	struct enic *enic = pmd_priv(dev);
869 	struct vnic_rq *rq_sop;
870 	struct vnic_rq *rq_data;
871 	struct rte_eth_rxconf *conf;
872 	uint16_t sop_queue_idx;
873 	uint16_t data_queue_idx;
874 
875 	ENICPMD_FUNC_TRACE();
876 	sop_queue_idx = enic_rte_rq_idx_to_sop_idx(rx_queue_id);
877 	data_queue_idx = enic_rte_rq_idx_to_data_idx(rx_queue_id, enic);
878 	rq_sop = &enic->rq[sop_queue_idx];
879 	rq_data = &enic->rq[data_queue_idx]; /* valid if data_queue_enable */
880 	qinfo->mp = rq_sop->mp;
881 	qinfo->scattered_rx = rq_sop->data_queue_enable;
882 	qinfo->nb_desc = rq_sop->ring.desc_count;
883 	if (qinfo->scattered_rx)
884 		qinfo->nb_desc += rq_data->ring.desc_count;
885 	conf = &qinfo->conf;
886 	memset(conf, 0, sizeof(*conf));
887 	conf->rx_free_thresh = rq_sop->rx_free_thresh;
888 	conf->rx_drop_en = 1;
889 	/*
890 	 * Except VLAN stripping (port setting), all the checksum offloads
891 	 * are always enabled.
892 	 */
893 	conf->offloads = enic->rx_offload_capa;
894 	if (!enic->ig_vlan_strip_en)
895 		conf->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
896 	/* rx_thresh and other fields are not applicable for enic */
897 }
898 
899 static void enicpmd_dev_txq_info_get(struct rte_eth_dev *dev,
900 				     uint16_t tx_queue_id,
901 				     struct rte_eth_txq_info *qinfo)
902 {
903 	struct enic *enic = pmd_priv(dev);
904 	struct vnic_wq *wq = &enic->wq[tx_queue_id];
905 
906 	ENICPMD_FUNC_TRACE();
907 	qinfo->nb_desc = wq->ring.desc_count;
908 	memset(&qinfo->conf, 0, sizeof(qinfo->conf));
909 	qinfo->conf.offloads = wq->offloads;
910 	/* tx_thresh, and all the other fields are not applicable for enic */
911 }
912 
913 static int enicpmd_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
914 					 __rte_unused uint16_t queue_id,
915 					 struct rte_eth_burst_mode *mode)
916 {
917 	eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
918 	struct enic *enic = pmd_priv(dev);
919 	const char *info_str = NULL;
920 	int ret = -EINVAL;
921 
922 	ENICPMD_FUNC_TRACE();
923 	if (enic->use_noscatter_vec_rx_handler)
924 		info_str = "Vector AVX2 No Scatter";
925 	else if (pkt_burst == enic_noscatter_recv_pkts)
926 		info_str = "Scalar No Scatter";
927 	else if (pkt_burst == enic_recv_pkts)
928 		info_str = "Scalar";
929 	else if (pkt_burst == enic_recv_pkts_64)
930 		info_str = "Scalar 64B Completion";
931 	if (info_str) {
932 		strlcpy(mode->info, info_str, sizeof(mode->info));
933 		ret = 0;
934 	}
935 	return ret;
936 }
937 
938 static int enicpmd_dev_tx_burst_mode_get(struct rte_eth_dev *dev,
939 					 __rte_unused uint16_t queue_id,
940 					 struct rte_eth_burst_mode *mode)
941 {
942 	eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
943 	const char *info_str = NULL;
944 	int ret = -EINVAL;
945 
946 	ENICPMD_FUNC_TRACE();
947 	if (pkt_burst == enic_simple_xmit_pkts)
948 		info_str = "Scalar Simplified";
949 	else if (pkt_burst == enic_xmit_pkts)
950 		info_str = "Scalar";
951 	if (info_str) {
952 		strlcpy(mode->info, info_str, sizeof(mode->info));
953 		ret = 0;
954 	}
955 	return ret;
956 }
957 
958 static int enicpmd_dev_rx_queue_intr_enable(struct rte_eth_dev *eth_dev,
959 					    uint16_t rx_queue_id)
960 {
961 	struct enic *enic = pmd_priv(eth_dev);
962 
963 	ENICPMD_FUNC_TRACE();
964 	vnic_intr_unmask(&enic->intr[rx_queue_id + ENICPMD_RXQ_INTR_OFFSET]);
965 	return 0;
966 }
967 
968 static int enicpmd_dev_rx_queue_intr_disable(struct rte_eth_dev *eth_dev,
969 					     uint16_t rx_queue_id)
970 {
971 	struct enic *enic = pmd_priv(eth_dev);
972 
973 	ENICPMD_FUNC_TRACE();
974 	vnic_intr_mask(&enic->intr[rx_queue_id + ENICPMD_RXQ_INTR_OFFSET]);
975 	return 0;
976 }
977 
978 static int udp_tunnel_common_check(struct enic *enic,
979 				   struct rte_eth_udp_tunnel *tnl)
980 {
981 	if (tnl->prot_type != RTE_ETH_TUNNEL_TYPE_VXLAN &&
982 	    tnl->prot_type != RTE_ETH_TUNNEL_TYPE_GENEVE)
983 		return -ENOTSUP;
984 	if (!enic->overlay_offload) {
985 		ENICPMD_LOG(DEBUG, " overlay offload is not supported\n");
986 		return -ENOTSUP;
987 	}
988 	return 0;
989 }
990 
991 static int update_tunnel_port(struct enic *enic, uint16_t port, bool vxlan)
992 {
993 	uint8_t cfg;
994 
995 	cfg = vxlan ? OVERLAY_CFG_VXLAN_PORT_UPDATE :
996 		OVERLAY_CFG_GENEVE_PORT_UPDATE;
997 	if (vnic_dev_overlay_offload_cfg(enic->vdev, cfg, port)) {
998 		ENICPMD_LOG(DEBUG, " failed to update tunnel port\n");
999 		return -EINVAL;
1000 	}
1001 	ENICPMD_LOG(DEBUG, " updated %s port to %u\n",
1002 		    vxlan ? "vxlan" : "geneve", port);
1003 	if (vxlan)
1004 		enic->vxlan_port = port;
1005 	else
1006 		enic->geneve_port = port;
1007 	return 0;
1008 }
1009 
1010 static int enicpmd_dev_udp_tunnel_port_add(struct rte_eth_dev *eth_dev,
1011 					   struct rte_eth_udp_tunnel *tnl)
1012 {
1013 	struct enic *enic = pmd_priv(eth_dev);
1014 	uint16_t port;
1015 	bool vxlan;
1016 	int ret;
1017 
1018 	ENICPMD_FUNC_TRACE();
1019 	ret = udp_tunnel_common_check(enic, tnl);
1020 	if (ret)
1021 		return ret;
1022 	vxlan = (tnl->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN);
1023 	if (vxlan)
1024 		port = enic->vxlan_port;
1025 	else
1026 		port = enic->geneve_port;
1027 	/*
1028 	 * The NIC has 1 configurable port number per tunnel type.
1029 	 * "Adding" a new port number replaces it.
1030 	 */
1031 	if (tnl->udp_port == port || tnl->udp_port == 0) {
1032 		ENICPMD_LOG(DEBUG, " %u is already configured or invalid\n",
1033 			     tnl->udp_port);
1034 		return -EINVAL;
1035 	}
1036 	return update_tunnel_port(enic, tnl->udp_port, vxlan);
1037 }
1038 
1039 static int enicpmd_dev_udp_tunnel_port_del(struct rte_eth_dev *eth_dev,
1040 					   struct rte_eth_udp_tunnel *tnl)
1041 {
1042 	struct enic *enic = pmd_priv(eth_dev);
1043 	uint16_t port;
1044 	bool vxlan;
1045 	int ret;
1046 
1047 	ENICPMD_FUNC_TRACE();
1048 	ret = udp_tunnel_common_check(enic, tnl);
1049 	if (ret)
1050 		return ret;
1051 	vxlan = (tnl->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN);
1052 	if (vxlan)
1053 		port = enic->vxlan_port;
1054 	else
1055 		port = enic->geneve_port;
1056 	/*
1057 	 * Clear the previously set port number and restore the
1058 	 * hardware default port number. Some drivers disable VXLAN
1059 	 * offloads when there are no configured port numbers. But
1060 	 * enic does not do that as VXLAN is part of overlay offload,
1061 	 * which is tied to inner RSS and TSO.
1062 	 */
1063 	if (tnl->udp_port != port) {
1064 		ENICPMD_LOG(DEBUG, " %u is not a configured tunnel port\n",
1065 			     tnl->udp_port);
1066 		return -EINVAL;
1067 	}
1068 	port = vxlan ? RTE_VXLAN_DEFAULT_PORT : RTE_GENEVE_DEFAULT_PORT;
1069 	return update_tunnel_port(enic, port, vxlan);
1070 }
1071 
1072 static int enicpmd_dev_fw_version_get(struct rte_eth_dev *eth_dev,
1073 				      char *fw_version, size_t fw_size)
1074 {
1075 	struct vnic_devcmd_fw_info *info;
1076 	struct enic *enic;
1077 	int ret;
1078 
1079 	ENICPMD_FUNC_TRACE();
1080 
1081 	enic = pmd_priv(eth_dev);
1082 	ret = vnic_dev_fw_info(enic->vdev, &info);
1083 	if (ret)
1084 		return ret;
1085 	ret = snprintf(fw_version, fw_size, "%s %s",
1086 		 info->fw_version, info->fw_build);
1087 	if (ret < 0)
1088 		return -EINVAL;
1089 
1090 	ret += 1; /* add the size of '\0' */
1091 	if (fw_size < (size_t)ret)
1092 		return ret;
1093 	else
1094 		return 0;
1095 }
1096 
1097 static const struct eth_dev_ops enicpmd_eth_dev_ops = {
1098 	.dev_configure        = enicpmd_dev_configure,
1099 	.dev_start            = enicpmd_dev_start,
1100 	.dev_stop             = enicpmd_dev_stop,
1101 	.dev_set_link_up      = NULL,
1102 	.dev_set_link_down    = NULL,
1103 	.dev_close            = enicpmd_dev_close,
1104 	.promiscuous_enable   = enicpmd_dev_promiscuous_enable,
1105 	.promiscuous_disable  = enicpmd_dev_promiscuous_disable,
1106 	.allmulticast_enable  = enicpmd_dev_allmulticast_enable,
1107 	.allmulticast_disable = enicpmd_dev_allmulticast_disable,
1108 	.link_update          = enicpmd_dev_link_update,
1109 	.stats_get            = enicpmd_dev_stats_get,
1110 	.stats_reset          = enicpmd_dev_stats_reset,
1111 	.queue_stats_mapping_set = NULL,
1112 	.dev_infos_get        = enicpmd_dev_info_get,
1113 	.dev_supported_ptypes_get = enicpmd_dev_supported_ptypes_get,
1114 	.mtu_set              = enicpmd_mtu_set,
1115 	.vlan_filter_set      = NULL,
1116 	.vlan_tpid_set        = NULL,
1117 	.vlan_offload_set     = enicpmd_vlan_offload_set,
1118 	.vlan_strip_queue_set = NULL,
1119 	.rx_queue_start       = enicpmd_dev_rx_queue_start,
1120 	.rx_queue_stop        = enicpmd_dev_rx_queue_stop,
1121 	.tx_queue_start       = enicpmd_dev_tx_queue_start,
1122 	.tx_queue_stop        = enicpmd_dev_tx_queue_stop,
1123 	.rx_queue_setup       = enicpmd_dev_rx_queue_setup,
1124 	.rx_queue_release     = enicpmd_dev_rx_queue_release,
1125 	.tx_queue_setup       = enicpmd_dev_tx_queue_setup,
1126 	.tx_queue_release     = enicpmd_dev_tx_queue_release,
1127 	.rx_queue_intr_enable = enicpmd_dev_rx_queue_intr_enable,
1128 	.rx_queue_intr_disable = enicpmd_dev_rx_queue_intr_disable,
1129 	.rxq_info_get         = enicpmd_dev_rxq_info_get,
1130 	.txq_info_get         = enicpmd_dev_txq_info_get,
1131 	.rx_burst_mode_get    = enicpmd_dev_rx_burst_mode_get,
1132 	.tx_burst_mode_get    = enicpmd_dev_tx_burst_mode_get,
1133 	.dev_led_on           = NULL,
1134 	.dev_led_off          = NULL,
1135 	.flow_ctrl_get        = NULL,
1136 	.flow_ctrl_set        = NULL,
1137 	.priority_flow_ctrl_set = NULL,
1138 	.mac_addr_add         = enicpmd_add_mac_addr,
1139 	.mac_addr_remove      = enicpmd_remove_mac_addr,
1140 	.mac_addr_set         = enicpmd_set_mac_addr,
1141 	.set_mc_addr_list     = enicpmd_set_mc_addr_list,
1142 	.flow_ops_get         = enicpmd_dev_flow_ops_get,
1143 	.reta_query           = enicpmd_dev_rss_reta_query,
1144 	.reta_update          = enicpmd_dev_rss_reta_update,
1145 	.rss_hash_conf_get    = enicpmd_dev_rss_hash_conf_get,
1146 	.rss_hash_update      = enicpmd_dev_rss_hash_update,
1147 	.udp_tunnel_port_add  = enicpmd_dev_udp_tunnel_port_add,
1148 	.udp_tunnel_port_del  = enicpmd_dev_udp_tunnel_port_del,
1149 	.fw_version_get       = enicpmd_dev_fw_version_get,
1150 };
1151 
1152 static int enic_parse_zero_one(const char *key,
1153 			       const char *value,
1154 			       void *opaque)
1155 {
1156 	struct enic *enic;
1157 	bool b;
1158 
1159 	enic = (struct enic *)opaque;
1160 	if (strcmp(value, "0") == 0) {
1161 		b = false;
1162 	} else if (strcmp(value, "1") == 0) {
1163 		b = true;
1164 	} else {
1165 		dev_err(enic, "Invalid value for %s"
1166 			": expected=0|1 given=%s\n", key, value);
1167 		return -EINVAL;
1168 	}
1169 	if (strcmp(key, ENIC_DEVARG_CQ64) == 0)
1170 		enic->cq64_request = b;
1171 	if (strcmp(key, ENIC_DEVARG_DISABLE_OVERLAY) == 0)
1172 		enic->disable_overlay = b;
1173 	if (strcmp(key, ENIC_DEVARG_ENABLE_AVX2_RX) == 0)
1174 		enic->enable_avx2_rx = b;
1175 	return 0;
1176 }
1177 
1178 static int enic_parse_ig_vlan_rewrite(__rte_unused const char *key,
1179 				      const char *value,
1180 				      void *opaque)
1181 {
1182 	struct enic *enic;
1183 
1184 	enic = (struct enic *)opaque;
1185 	if (strcmp(value, "trunk") == 0) {
1186 		/* Trunk mode: always tag */
1187 		enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_DEFAULT_TRUNK;
1188 	} else if (strcmp(value, "untag") == 0) {
1189 		/* Untag default VLAN mode: untag if VLAN = default VLAN */
1190 		enic->ig_vlan_rewrite_mode =
1191 			IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN;
1192 	} else if (strcmp(value, "priority") == 0) {
1193 		/*
1194 		 * Priority-tag default VLAN mode: priority tag (VLAN header
1195 		 * with ID=0) if VLAN = default
1196 		 */
1197 		enic->ig_vlan_rewrite_mode =
1198 			IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN;
1199 	} else if (strcmp(value, "pass") == 0) {
1200 		/* Pass through mode: do not touch tags */
1201 		enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU;
1202 	} else {
1203 		dev_err(enic, "Invalid value for " ENIC_DEVARG_IG_VLAN_REWRITE
1204 			": expected=trunk|untag|priority|pass given=%s\n",
1205 			value);
1206 		return -EINVAL;
1207 	}
1208 	return 0;
1209 }
1210 
1211 static int enic_check_devargs(struct rte_eth_dev *dev)
1212 {
1213 	static const char *const valid_keys[] = {
1214 		ENIC_DEVARG_CQ64,
1215 		ENIC_DEVARG_DISABLE_OVERLAY,
1216 		ENIC_DEVARG_ENABLE_AVX2_RX,
1217 		ENIC_DEVARG_IG_VLAN_REWRITE,
1218 		ENIC_DEVARG_REPRESENTOR,
1219 		NULL};
1220 	struct enic *enic = pmd_priv(dev);
1221 	struct rte_kvargs *kvlist;
1222 
1223 	ENICPMD_FUNC_TRACE();
1224 
1225 	enic->cq64_request = true; /* Use 64B entry if available */
1226 	enic->disable_overlay = false;
1227 	enic->enable_avx2_rx = false;
1228 	enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU;
1229 	if (!dev->device->devargs)
1230 		return 0;
1231 	kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
1232 	if (!kvlist)
1233 		return -EINVAL;
1234 	if (rte_kvargs_process(kvlist, ENIC_DEVARG_CQ64,
1235 			       enic_parse_zero_one, enic) < 0 ||
1236 	    rte_kvargs_process(kvlist, ENIC_DEVARG_DISABLE_OVERLAY,
1237 			       enic_parse_zero_one, enic) < 0 ||
1238 	    rte_kvargs_process(kvlist, ENIC_DEVARG_ENABLE_AVX2_RX,
1239 			       enic_parse_zero_one, enic) < 0 ||
1240 	    rte_kvargs_process(kvlist, ENIC_DEVARG_IG_VLAN_REWRITE,
1241 			       enic_parse_ig_vlan_rewrite, enic) < 0) {
1242 		rte_kvargs_free(kvlist);
1243 		return -EINVAL;
1244 	}
1245 	rte_kvargs_free(kvlist);
1246 	return 0;
1247 }
1248 
1249 /* Initialize the driver for PF */
1250 static int eth_enic_dev_init(struct rte_eth_dev *eth_dev,
1251 			     void *init_params __rte_unused)
1252 {
1253 	struct rte_pci_device *pdev;
1254 	struct rte_pci_addr *addr;
1255 	struct enic *enic = pmd_priv(eth_dev);
1256 	int err;
1257 
1258 	ENICPMD_FUNC_TRACE();
1259 	eth_dev->dev_ops = &enicpmd_eth_dev_ops;
1260 	eth_dev->rx_queue_count = enicpmd_dev_rx_queue_count;
1261 	eth_dev->rx_pkt_burst = &enic_recv_pkts;
1262 	eth_dev->tx_pkt_burst = &enic_xmit_pkts;
1263 	eth_dev->tx_pkt_prepare = &enic_prep_pkts;
1264 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1265 		enic_pick_tx_handler(eth_dev);
1266 		enic_pick_rx_handler(eth_dev);
1267 		return 0;
1268 	}
1269 	/* Only the primary sets up adapter and other data in shared memory */
1270 	enic->port_id = eth_dev->data->port_id;
1271 	enic->rte_dev = eth_dev;
1272 	enic->dev_data = eth_dev->data;
1273 
1274 	pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
1275 	rte_eth_copy_pci_info(eth_dev, pdev);
1276 	enic->pdev = pdev;
1277 	addr = &pdev->addr;
1278 
1279 	snprintf(enic->bdf_name, PCI_PRI_STR_SIZE, PCI_PRI_FMT,
1280 		addr->domain, addr->bus, addr->devid, addr->function);
1281 
1282 	err = enic_check_devargs(eth_dev);
1283 	if (err)
1284 		return err;
1285 	err = enic_probe(enic);
1286 	if (!err && enic->fm) {
1287 		err = enic_fm_allocate_switch_domain(enic);
1288 		if (err)
1289 			ENICPMD_LOG(ERR, "failed to allocate switch domain id");
1290 	}
1291 	return err;
1292 }
1293 
1294 static int eth_enic_dev_uninit(struct rte_eth_dev *eth_dev)
1295 {
1296 	struct enic *enic = pmd_priv(eth_dev);
1297 	int err;
1298 
1299 	ENICPMD_FUNC_TRACE();
1300 	eth_dev->device = NULL;
1301 	eth_dev->intr_handle = NULL;
1302 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1303 		return 0;
1304 	err = rte_eth_switch_domain_free(enic->switch_domain_id);
1305 	if (err)
1306 		ENICPMD_LOG(WARNING, "failed to free switch domain: %d", err);
1307 	return 0;
1308 }
1309 
1310 static int eth_enic_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1311 	struct rte_pci_device *pci_dev)
1312 {
1313 	char name[RTE_ETH_NAME_MAX_LEN];
1314 	struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
1315 	struct rte_eth_dev *pf_ethdev;
1316 	struct enic *pf_enic;
1317 	int i, retval;
1318 
1319 	ENICPMD_FUNC_TRACE();
1320 	if (pci_dev->device.devargs) {
1321 		retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
1322 				&eth_da, 1);
1323 		if (retval < 0)
1324 			return retval;
1325 	}
1326 	if (eth_da.nb_representor_ports > 0 &&
1327 	    eth_da.type != RTE_ETH_REPRESENTOR_VF) {
1328 		ENICPMD_LOG(ERR, "unsupported representor type: %s\n",
1329 			    pci_dev->device.devargs->args);
1330 		return -ENOTSUP;
1331 	}
1332 	retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
1333 		sizeof(struct enic),
1334 		eth_dev_pci_specific_init, pci_dev,
1335 		eth_enic_dev_init, NULL);
1336 	if (retval || eth_da.nb_representor_ports < 1)
1337 		return retval;
1338 
1339 	/* Probe VF representor */
1340 	pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
1341 	if (pf_ethdev == NULL)
1342 		return -ENODEV;
1343 	/* Representors require flowman */
1344 	pf_enic = pmd_priv(pf_ethdev);
1345 	if (pf_enic->fm == NULL) {
1346 		ENICPMD_LOG(ERR, "VF representors require flowman");
1347 		return -ENOTSUP;
1348 	}
1349 	/*
1350 	 * For now representors imply switchdev, as firmware does not support
1351 	 * legacy mode SR-IOV
1352 	 */
1353 	pf_enic->switchdev_mode = 1;
1354 	/* Calculate max VF ID before initializing representor*/
1355 	pf_enic->max_vf_id = 0;
1356 	for (i = 0; i < eth_da.nb_representor_ports; i++) {
1357 		pf_enic->max_vf_id = RTE_MAX(pf_enic->max_vf_id,
1358 					     eth_da.representor_ports[i]);
1359 	}
1360 	for (i = 0; i < eth_da.nb_representor_ports; i++) {
1361 		struct enic_vf_representor representor;
1362 
1363 		representor.vf_id = eth_da.representor_ports[i];
1364 				representor.switch_domain_id =
1365 			pmd_priv(pf_ethdev)->switch_domain_id;
1366 		representor.pf = pmd_priv(pf_ethdev);
1367 		snprintf(name, sizeof(name), "net_%s_representor_%d",
1368 			pci_dev->device.name, eth_da.representor_ports[i]);
1369 		retval = rte_eth_dev_create(&pci_dev->device, name,
1370 			sizeof(struct enic_vf_representor), NULL, NULL,
1371 			enic_vf_representor_init, &representor);
1372 		if (retval) {
1373 			ENICPMD_LOG(ERR, "failed to create enic vf representor %s",
1374 				    name);
1375 			return retval;
1376 		}
1377 	}
1378 	return 0;
1379 }
1380 
1381 static int eth_enic_pci_remove(struct rte_pci_device *pci_dev)
1382 {
1383 	struct rte_eth_dev *ethdev;
1384 
1385 	ENICPMD_FUNC_TRACE();
1386 	ethdev = rte_eth_dev_allocated(pci_dev->device.name);
1387 	if (!ethdev)
1388 		return -ENODEV;
1389 	if (rte_eth_dev_is_repr(ethdev))
1390 		return rte_eth_dev_destroy(ethdev, enic_vf_representor_uninit);
1391 	else
1392 		return rte_eth_dev_destroy(ethdev, eth_enic_dev_uninit);
1393 }
1394 
1395 static struct rte_pci_driver rte_enic_pmd = {
1396 	.id_table = pci_id_enic_map,
1397 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
1398 	.probe = eth_enic_pci_probe,
1399 	.remove = eth_enic_pci_remove,
1400 };
1401 
1402 int dev_is_enic(struct rte_eth_dev *dev)
1403 {
1404 	return dev->device->driver == &rte_enic_pmd.driver;
1405 }
1406 
1407 RTE_PMD_REGISTER_PCI(net_enic, rte_enic_pmd);
1408 RTE_PMD_REGISTER_PCI_TABLE(net_enic, pci_id_enic_map);
1409 RTE_PMD_REGISTER_KMOD_DEP(net_enic, "* igb_uio | uio_pci_generic | vfio-pci");
1410 RTE_PMD_REGISTER_PARAM_STRING(net_enic,
1411 	ENIC_DEVARG_CQ64 "=0|1"
1412 	ENIC_DEVARG_DISABLE_OVERLAY "=0|1 "
1413 	ENIC_DEVARG_ENABLE_AVX2_RX "=0|1 "
1414 	ENIC_DEVARG_IG_VLAN_REWRITE "=trunk|untag|priority|pass");
1415