xref: /dpdk/drivers/net/enic/enic_ethdev.c (revision 945acb4a0d644d194f1823084a234f9c286dcf8c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2008-2017 Cisco Systems, Inc.  All rights reserved.
3  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
4  */
5 
6 #include <stdio.h>
7 #include <stdint.h>
8 
9 #include <rte_dev.h>
10 #include <rte_pci.h>
11 #include <rte_bus_pci.h>
12 #include <rte_ethdev.h>
13 #include <rte_ethdev_pci.h>
14 #include <rte_string_fns.h>
15 
16 #include "vnic_intr.h"
17 #include "vnic_cq.h"
18 #include "vnic_wq.h"
19 #include "vnic_rq.h"
20 #include "vnic_enet.h"
21 #include "enic.h"
22 
23 int enicpmd_logtype_init;
24 int enicpmd_logtype_flow;
25 
26 #define PMD_INIT_LOG(level, fmt, args...) \
27 	rte_log(RTE_LOG_ ## level, enicpmd_logtype_init, \
28 		"%s" fmt "\n", __func__, ##args)
29 
30 #define ENICPMD_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
31 
32 /*
33  * The set of PCI devices this driver supports
34  */
35 #define CISCO_PCI_VENDOR_ID 0x1137
36 static const struct rte_pci_id pci_id_enic_map[] = {
37 	{ RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET) },
38 	{ RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET_VF) },
39 	{.vendor_id = 0, /* sentinel */},
40 };
41 
42 RTE_INIT(enicpmd_init_log);
43 static void
44 enicpmd_init_log(void)
45 {
46 	enicpmd_logtype_init = rte_log_register("pmd.enic.init");
47 	if (enicpmd_logtype_init >= 0)
48 		rte_log_set_level(enicpmd_logtype_init, RTE_LOG_NOTICE);
49 	enicpmd_logtype_flow = rte_log_register("pmd.enic.flow");
50 	if (enicpmd_logtype_flow >= 0)
51 		rte_log_set_level(enicpmd_logtype_flow, RTE_LOG_NOTICE);
52 }
53 
54 static int
55 enicpmd_fdir_ctrl_func(struct rte_eth_dev *eth_dev,
56 			enum rte_filter_op filter_op, void *arg)
57 {
58 	struct enic *enic = pmd_priv(eth_dev);
59 	int ret = 0;
60 
61 	ENICPMD_FUNC_TRACE();
62 	if (filter_op == RTE_ETH_FILTER_NOP)
63 		return 0;
64 
65 	if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
66 		return -EINVAL;
67 
68 	switch (filter_op) {
69 	case RTE_ETH_FILTER_ADD:
70 	case RTE_ETH_FILTER_UPDATE:
71 		ret = enic_fdir_add_fltr(enic,
72 			(struct rte_eth_fdir_filter *)arg);
73 		break;
74 
75 	case RTE_ETH_FILTER_DELETE:
76 		ret = enic_fdir_del_fltr(enic,
77 			(struct rte_eth_fdir_filter *)arg);
78 		break;
79 
80 	case RTE_ETH_FILTER_STATS:
81 		enic_fdir_stats_get(enic, (struct rte_eth_fdir_stats *)arg);
82 		break;
83 
84 	case RTE_ETH_FILTER_FLUSH:
85 		dev_warning(enic, "unsupported operation %u", filter_op);
86 		ret = -ENOTSUP;
87 		break;
88 	case RTE_ETH_FILTER_INFO:
89 		enic_fdir_info_get(enic, (struct rte_eth_fdir_info *)arg);
90 		break;
91 	default:
92 		dev_err(enic, "unknown operation %u", filter_op);
93 		ret = -EINVAL;
94 		break;
95 	}
96 	return ret;
97 }
98 
99 static int
100 enicpmd_dev_filter_ctrl(struct rte_eth_dev *dev,
101 		     enum rte_filter_type filter_type,
102 		     enum rte_filter_op filter_op,
103 		     void *arg)
104 {
105 	int ret = 0;
106 
107 	ENICPMD_FUNC_TRACE();
108 
109 	switch (filter_type) {
110 	case RTE_ETH_FILTER_GENERIC:
111 		if (filter_op != RTE_ETH_FILTER_GET)
112 			return -EINVAL;
113 		*(const void **)arg = &enic_flow_ops;
114 		break;
115 	case RTE_ETH_FILTER_FDIR:
116 		ret = enicpmd_fdir_ctrl_func(dev, filter_op, arg);
117 		break;
118 	default:
119 		dev_warning(enic, "Filter type (%d) not supported",
120 			filter_type);
121 		ret = -EINVAL;
122 		break;
123 	}
124 
125 	return ret;
126 }
127 
128 static void enicpmd_dev_tx_queue_release(void *txq)
129 {
130 	ENICPMD_FUNC_TRACE();
131 
132 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
133 		return;
134 
135 	enic_free_wq(txq);
136 }
137 
138 static int enicpmd_dev_setup_intr(struct enic *enic)
139 {
140 	int ret;
141 	unsigned int index;
142 
143 	ENICPMD_FUNC_TRACE();
144 
145 	/* Are we done with the init of all the queues? */
146 	for (index = 0; index < enic->cq_count; index++) {
147 		if (!enic->cq[index].ctrl)
148 			break;
149 	}
150 	if (enic->cq_count != index)
151 		return 0;
152 	for (index = 0; index < enic->wq_count; index++) {
153 		if (!enic->wq[index].ctrl)
154 			break;
155 	}
156 	if (enic->wq_count != index)
157 		return 0;
158 	/* check start of packet (SOP) RQs only in case scatter is disabled. */
159 	for (index = 0; index < enic->rq_count; index++) {
160 		if (!enic->rq[enic_rte_rq_idx_to_sop_idx(index)].ctrl)
161 			break;
162 	}
163 	if (enic->rq_count != index)
164 		return 0;
165 
166 	ret = enic_alloc_intr_resources(enic);
167 	if (ret) {
168 		dev_err(enic, "alloc intr failed\n");
169 		return ret;
170 	}
171 	enic_init_vnic_resources(enic);
172 
173 	ret = enic_setup_finish(enic);
174 	if (ret)
175 		dev_err(enic, "setup could not be finished\n");
176 
177 	return ret;
178 }
179 
180 static int enicpmd_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
181 	uint16_t queue_idx,
182 	uint16_t nb_desc,
183 	unsigned int socket_id,
184 	__rte_unused const struct rte_eth_txconf *tx_conf)
185 {
186 	int ret;
187 	struct enic *enic = pmd_priv(eth_dev);
188 
189 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
190 		return -E_RTE_SECONDARY;
191 
192 	ENICPMD_FUNC_TRACE();
193 	if (queue_idx >= ENIC_WQ_MAX) {
194 		dev_err(enic,
195 			"Max number of TX queues exceeded.  Max is %d\n",
196 			ENIC_WQ_MAX);
197 		return -EINVAL;
198 	}
199 
200 	eth_dev->data->tx_queues[queue_idx] = (void *)&enic->wq[queue_idx];
201 
202 	ret = enic_alloc_wq(enic, queue_idx, socket_id, nb_desc);
203 	if (ret) {
204 		dev_err(enic, "error in allocating wq\n");
205 		return ret;
206 	}
207 
208 	return enicpmd_dev_setup_intr(enic);
209 }
210 
211 static int enicpmd_dev_tx_queue_start(struct rte_eth_dev *eth_dev,
212 	uint16_t queue_idx)
213 {
214 	struct enic *enic = pmd_priv(eth_dev);
215 
216 	ENICPMD_FUNC_TRACE();
217 
218 	enic_start_wq(enic, queue_idx);
219 
220 	return 0;
221 }
222 
223 static int enicpmd_dev_tx_queue_stop(struct rte_eth_dev *eth_dev,
224 	uint16_t queue_idx)
225 {
226 	int ret;
227 	struct enic *enic = pmd_priv(eth_dev);
228 
229 	ENICPMD_FUNC_TRACE();
230 
231 	ret = enic_stop_wq(enic, queue_idx);
232 	if (ret)
233 		dev_err(enic, "error in stopping wq %d\n", queue_idx);
234 
235 	return ret;
236 }
237 
238 static int enicpmd_dev_rx_queue_start(struct rte_eth_dev *eth_dev,
239 	uint16_t queue_idx)
240 {
241 	struct enic *enic = pmd_priv(eth_dev);
242 
243 	ENICPMD_FUNC_TRACE();
244 
245 	enic_start_rq(enic, queue_idx);
246 
247 	return 0;
248 }
249 
250 static int enicpmd_dev_rx_queue_stop(struct rte_eth_dev *eth_dev,
251 	uint16_t queue_idx)
252 {
253 	int ret;
254 	struct enic *enic = pmd_priv(eth_dev);
255 
256 	ENICPMD_FUNC_TRACE();
257 
258 	ret = enic_stop_rq(enic, queue_idx);
259 	if (ret)
260 		dev_err(enic, "error in stopping rq %d\n", queue_idx);
261 
262 	return ret;
263 }
264 
265 static void enicpmd_dev_rx_queue_release(void *rxq)
266 {
267 	ENICPMD_FUNC_TRACE();
268 
269 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
270 		return;
271 
272 	enic_free_rq(rxq);
273 }
274 
275 static uint32_t enicpmd_dev_rx_queue_count(struct rte_eth_dev *dev,
276 					   uint16_t rx_queue_id)
277 {
278 	struct enic *enic = pmd_priv(dev);
279 	uint32_t queue_count = 0;
280 	struct vnic_cq *cq;
281 	uint32_t cq_tail;
282 	uint16_t cq_idx;
283 	int rq_num;
284 
285 	rq_num = enic_rte_rq_idx_to_sop_idx(rx_queue_id);
286 	cq = &enic->cq[enic_cq_rq(enic, rq_num)];
287 	cq_idx = cq->to_clean;
288 
289 	cq_tail = ioread32(&cq->ctrl->cq_tail);
290 
291 	if (cq_tail < cq_idx)
292 		cq_tail += cq->ring.desc_count;
293 
294 	queue_count = cq_tail - cq_idx;
295 
296 	return queue_count;
297 }
298 
299 static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
300 	uint16_t queue_idx,
301 	uint16_t nb_desc,
302 	unsigned int socket_id,
303 	const struct rte_eth_rxconf *rx_conf,
304 	struct rte_mempool *mp)
305 {
306 	int ret;
307 	struct enic *enic = pmd_priv(eth_dev);
308 
309 	ENICPMD_FUNC_TRACE();
310 
311 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
312 		return -E_RTE_SECONDARY;
313 
314 	/* With Rx scatter support, two RQs are now used on VIC per RQ used
315 	 * by the application.
316 	 */
317 	if (queue_idx * 2 >= ENIC_RQ_MAX) {
318 		dev_err(enic,
319 			"Max number of RX queues exceeded.  Max is %d. This PMD uses 2 RQs on VIC per RQ used by DPDK.\n",
320 			ENIC_RQ_MAX);
321 		return -EINVAL;
322 	}
323 
324 	eth_dev->data->rx_queues[queue_idx] =
325 		(void *)&enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
326 
327 	ret = enic_alloc_rq(enic, queue_idx, socket_id, mp, nb_desc,
328 			    rx_conf->rx_free_thresh);
329 	if (ret) {
330 		dev_err(enic, "error in allocating rq\n");
331 		return ret;
332 	}
333 
334 	return enicpmd_dev_setup_intr(enic);
335 }
336 
337 static int enicpmd_vlan_filter_set(struct rte_eth_dev *eth_dev,
338 	uint16_t vlan_id, int on)
339 {
340 	struct enic *enic = pmd_priv(eth_dev);
341 	int err;
342 
343 	ENICPMD_FUNC_TRACE();
344 	if (on)
345 		err = enic_add_vlan(enic, vlan_id);
346 	else
347 		err = enic_del_vlan(enic, vlan_id);
348 	return err;
349 }
350 
351 static int enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
352 {
353 	struct enic *enic = pmd_priv(eth_dev);
354 
355 	ENICPMD_FUNC_TRACE();
356 
357 	if (mask & ETH_VLAN_STRIP_MASK) {
358 		if (eth_dev->data->dev_conf.rxmode.offloads &
359 		    DEV_RX_OFFLOAD_VLAN_STRIP)
360 			enic->ig_vlan_strip_en = 1;
361 		else
362 			enic->ig_vlan_strip_en = 0;
363 	}
364 	enic_set_rss_nic_cfg(enic);
365 
366 
367 	if (mask & ETH_VLAN_FILTER_MASK) {
368 		dev_warning(enic,
369 			"Configuration of VLAN filter is not supported\n");
370 	}
371 
372 	if (mask & ETH_VLAN_EXTEND_MASK) {
373 		dev_warning(enic,
374 			"Configuration of extended VLAN is not supported\n");
375 	}
376 
377 	return 0;
378 }
379 
380 static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev)
381 {
382 	int ret;
383 	struct enic *enic = pmd_priv(eth_dev);
384 
385 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
386 		return -E_RTE_SECONDARY;
387 
388 	ENICPMD_FUNC_TRACE();
389 	ret = enic_set_vnic_res(enic);
390 	if (ret) {
391 		dev_err(enic, "Set vNIC resource num  failed, aborting\n");
392 		return ret;
393 	}
394 
395 	enic->hw_ip_checksum = !!(eth_dev->data->dev_conf.rxmode.offloads &
396 				  DEV_RX_OFFLOAD_CHECKSUM);
397 	ret = enicpmd_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK);
398 
399 	return ret;
400 }
401 
402 /* Start the device.
403  * It returns 0 on success.
404  */
405 static int enicpmd_dev_start(struct rte_eth_dev *eth_dev)
406 {
407 	struct enic *enic = pmd_priv(eth_dev);
408 
409 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
410 		return -E_RTE_SECONDARY;
411 
412 	ENICPMD_FUNC_TRACE();
413 	return enic_enable(enic);
414 }
415 
416 /*
417  * Stop device: disable rx and tx functions to allow for reconfiguring.
418  */
419 static void enicpmd_dev_stop(struct rte_eth_dev *eth_dev)
420 {
421 	struct rte_eth_link link;
422 	struct enic *enic = pmd_priv(eth_dev);
423 
424 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
425 		return;
426 
427 	ENICPMD_FUNC_TRACE();
428 	enic_disable(enic);
429 	memset(&link, 0, sizeof(link));
430 	rte_atomic64_cmpset((uint64_t *)&eth_dev->data->dev_link,
431 		*(uint64_t *)&eth_dev->data->dev_link,
432 		*(uint64_t *)&link);
433 }
434 
435 /*
436  * Stop device.
437  */
438 static void enicpmd_dev_close(struct rte_eth_dev *eth_dev)
439 {
440 	struct enic *enic = pmd_priv(eth_dev);
441 
442 	ENICPMD_FUNC_TRACE();
443 	enic_remove(enic);
444 }
445 
446 static int enicpmd_dev_link_update(struct rte_eth_dev *eth_dev,
447 	__rte_unused int wait_to_complete)
448 {
449 	struct enic *enic = pmd_priv(eth_dev);
450 
451 	ENICPMD_FUNC_TRACE();
452 	return enic_link_update(enic);
453 }
454 
455 static int enicpmd_dev_stats_get(struct rte_eth_dev *eth_dev,
456 	struct rte_eth_stats *stats)
457 {
458 	struct enic *enic = pmd_priv(eth_dev);
459 
460 	ENICPMD_FUNC_TRACE();
461 	return enic_dev_stats_get(enic, stats);
462 }
463 
464 static void enicpmd_dev_stats_reset(struct rte_eth_dev *eth_dev)
465 {
466 	struct enic *enic = pmd_priv(eth_dev);
467 
468 	ENICPMD_FUNC_TRACE();
469 	enic_dev_stats_clear(enic);
470 }
471 
472 static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
473 	struct rte_eth_dev_info *device_info)
474 {
475 	struct enic *enic = pmd_priv(eth_dev);
476 
477 	ENICPMD_FUNC_TRACE();
478 	device_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
479 	/* Scattered Rx uses two receive queues per rx queue exposed to dpdk */
480 	device_info->max_rx_queues = enic->conf_rq_count / 2;
481 	device_info->max_tx_queues = enic->conf_wq_count;
482 	device_info->min_rx_bufsize = ENIC_MIN_MTU;
483 	device_info->max_rx_pktlen = enic->max_mtu + ETHER_HDR_LEN + 4;
484 	device_info->max_mac_addrs = ENIC_MAX_MAC_ADDR;
485 	device_info->rx_offload_capa =
486 		DEV_RX_OFFLOAD_VLAN_STRIP |
487 		DEV_RX_OFFLOAD_IPV4_CKSUM |
488 		DEV_RX_OFFLOAD_UDP_CKSUM  |
489 		DEV_RX_OFFLOAD_TCP_CKSUM;
490 	device_info->tx_offload_capa =
491 		DEV_TX_OFFLOAD_VLAN_INSERT |
492 		DEV_TX_OFFLOAD_IPV4_CKSUM  |
493 		DEV_TX_OFFLOAD_UDP_CKSUM   |
494 		DEV_TX_OFFLOAD_TCP_CKSUM   |
495 		DEV_TX_OFFLOAD_TCP_TSO;
496 	device_info->default_rxconf = (struct rte_eth_rxconf) {
497 		.rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH
498 	};
499 }
500 
501 static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev)
502 {
503 	static const uint32_t ptypes[] = {
504 		RTE_PTYPE_L2_ETHER,
505 		RTE_PTYPE_L2_ETHER_VLAN,
506 		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
507 		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
508 		RTE_PTYPE_L4_TCP,
509 		RTE_PTYPE_L4_UDP,
510 		RTE_PTYPE_L4_FRAG,
511 		RTE_PTYPE_L4_NONFRAG,
512 		RTE_PTYPE_UNKNOWN
513 	};
514 
515 	if (dev->rx_pkt_burst == enic_recv_pkts)
516 		return ptypes;
517 	return NULL;
518 }
519 
520 static void enicpmd_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
521 {
522 	struct enic *enic = pmd_priv(eth_dev);
523 
524 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
525 		return;
526 
527 	ENICPMD_FUNC_TRACE();
528 
529 	enic->promisc = 1;
530 	enic_add_packet_filter(enic);
531 }
532 
533 static void enicpmd_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
534 {
535 	struct enic *enic = pmd_priv(eth_dev);
536 
537 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
538 		return;
539 
540 	ENICPMD_FUNC_TRACE();
541 	enic->promisc = 0;
542 	enic_add_packet_filter(enic);
543 }
544 
545 static void enicpmd_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
546 {
547 	struct enic *enic = pmd_priv(eth_dev);
548 
549 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
550 		return;
551 
552 	ENICPMD_FUNC_TRACE();
553 	enic->allmulti = 1;
554 	enic_add_packet_filter(enic);
555 }
556 
557 static void enicpmd_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
558 {
559 	struct enic *enic = pmd_priv(eth_dev);
560 
561 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
562 		return;
563 
564 	ENICPMD_FUNC_TRACE();
565 	enic->allmulti = 0;
566 	enic_add_packet_filter(enic);
567 }
568 
569 static int enicpmd_add_mac_addr(struct rte_eth_dev *eth_dev,
570 	struct ether_addr *mac_addr,
571 	__rte_unused uint32_t index, __rte_unused uint32_t pool)
572 {
573 	struct enic *enic = pmd_priv(eth_dev);
574 
575 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
576 		return -E_RTE_SECONDARY;
577 
578 	ENICPMD_FUNC_TRACE();
579 	return enic_set_mac_address(enic, mac_addr->addr_bytes);
580 }
581 
582 static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, uint32_t index)
583 {
584 	struct enic *enic = pmd_priv(eth_dev);
585 
586 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
587 		return;
588 
589 	ENICPMD_FUNC_TRACE();
590 	enic_del_mac_address(enic, index);
591 }
592 
593 static int enicpmd_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
594 {
595 	struct enic *enic = pmd_priv(eth_dev);
596 
597 	ENICPMD_FUNC_TRACE();
598 	return enic_set_mtu(enic, mtu);
599 }
600 
601 static const struct eth_dev_ops enicpmd_eth_dev_ops = {
602 	.dev_configure        = enicpmd_dev_configure,
603 	.dev_start            = enicpmd_dev_start,
604 	.dev_stop             = enicpmd_dev_stop,
605 	.dev_set_link_up      = NULL,
606 	.dev_set_link_down    = NULL,
607 	.dev_close            = enicpmd_dev_close,
608 	.promiscuous_enable   = enicpmd_dev_promiscuous_enable,
609 	.promiscuous_disable  = enicpmd_dev_promiscuous_disable,
610 	.allmulticast_enable  = enicpmd_dev_allmulticast_enable,
611 	.allmulticast_disable = enicpmd_dev_allmulticast_disable,
612 	.link_update          = enicpmd_dev_link_update,
613 	.stats_get            = enicpmd_dev_stats_get,
614 	.stats_reset          = enicpmd_dev_stats_reset,
615 	.queue_stats_mapping_set = NULL,
616 	.dev_infos_get        = enicpmd_dev_info_get,
617 	.dev_supported_ptypes_get = enicpmd_dev_supported_ptypes_get,
618 	.mtu_set              = enicpmd_mtu_set,
619 	.vlan_filter_set      = enicpmd_vlan_filter_set,
620 	.vlan_tpid_set        = NULL,
621 	.vlan_offload_set     = enicpmd_vlan_offload_set,
622 	.vlan_strip_queue_set = NULL,
623 	.rx_queue_start       = enicpmd_dev_rx_queue_start,
624 	.rx_queue_stop        = enicpmd_dev_rx_queue_stop,
625 	.tx_queue_start       = enicpmd_dev_tx_queue_start,
626 	.tx_queue_stop        = enicpmd_dev_tx_queue_stop,
627 	.rx_queue_setup       = enicpmd_dev_rx_queue_setup,
628 	.rx_queue_release     = enicpmd_dev_rx_queue_release,
629 	.rx_queue_count       = enicpmd_dev_rx_queue_count,
630 	.rx_descriptor_done   = NULL,
631 	.tx_queue_setup       = enicpmd_dev_tx_queue_setup,
632 	.tx_queue_release     = enicpmd_dev_tx_queue_release,
633 	.dev_led_on           = NULL,
634 	.dev_led_off          = NULL,
635 	.flow_ctrl_get        = NULL,
636 	.flow_ctrl_set        = NULL,
637 	.priority_flow_ctrl_set = NULL,
638 	.mac_addr_add         = enicpmd_add_mac_addr,
639 	.mac_addr_remove      = enicpmd_remove_mac_addr,
640 	.filter_ctrl          = enicpmd_dev_filter_ctrl,
641 };
642 
643 struct enic *enicpmd_list_head = NULL;
644 /* Initialize the driver
645  * It returns 0 on success.
646  */
647 static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
648 {
649 	struct rte_pci_device *pdev;
650 	struct rte_pci_addr *addr;
651 	struct enic *enic = pmd_priv(eth_dev);
652 
653 	ENICPMD_FUNC_TRACE();
654 
655 	enic->port_id = eth_dev->data->port_id;
656 	enic->rte_dev = eth_dev;
657 	eth_dev->dev_ops = &enicpmd_eth_dev_ops;
658 	eth_dev->rx_pkt_burst = &enic_recv_pkts;
659 	eth_dev->tx_pkt_burst = &enic_xmit_pkts;
660 
661 	pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
662 	rte_eth_copy_pci_info(eth_dev, pdev);
663 	enic->pdev = pdev;
664 	addr = &pdev->addr;
665 
666 	snprintf(enic->bdf_name, ENICPMD_BDF_LENGTH, "%04x:%02x:%02x.%x",
667 		addr->domain, addr->bus, addr->devid, addr->function);
668 
669 	return enic_probe(enic);
670 }
671 
672 static int eth_enic_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
673 	struct rte_pci_device *pci_dev)
674 {
675 	return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct enic),
676 		eth_enicpmd_dev_init);
677 }
678 
679 static int eth_enic_pci_remove(struct rte_pci_device *pci_dev)
680 {
681 	return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
682 }
683 
684 static struct rte_pci_driver rte_enic_pmd = {
685 	.id_table = pci_id_enic_map,
686 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
687 	.probe = eth_enic_pci_probe,
688 	.remove = eth_enic_pci_remove,
689 };
690 
691 RTE_PMD_REGISTER_PCI(net_enic, rte_enic_pmd);
692 RTE_PMD_REGISTER_PCI_TABLE(net_enic, pci_id_enic_map);
693 RTE_PMD_REGISTER_KMOD_DEP(net_enic, "* igb_uio | uio_pci_generic | vfio-pci");
694