xref: /dpdk/drivers/net/bnxt/bnxt_ethdev.c (revision a997a33b2a0145ad3e6320ea1fc7df8d51a2fcdf)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) Broadcom Limited.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Broadcom Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <inttypes.h>
35 #include <stdbool.h>
36 
37 #include <rte_dev.h>
38 #include <rte_ethdev.h>
39 #include <rte_malloc.h>
40 #include <rte_cycles.h>
41 
42 #include "bnxt.h"
43 #include "bnxt_cpr.h"
44 #include "bnxt_filter.h"
45 #include "bnxt_hwrm.h"
46 #include "bnxt_irq.h"
47 #include "bnxt_ring.h"
48 #include "bnxt_rxq.h"
49 #include "bnxt_rxr.h"
50 #include "bnxt_stats.h"
51 #include "bnxt_txq.h"
52 #include "bnxt_txr.h"
53 #include "bnxt_vnic.h"
54 #include "hsi_struct_def_dpdk.h"
55 
56 #define DRV_MODULE_NAME		"bnxt"
57 static const char bnxt_version[] =
58 	"Broadcom Cumulus driver " DRV_MODULE_NAME "\n";
59 
60 #define PCI_VENDOR_ID_BROADCOM 0x14E4
61 
62 #define BROADCOM_DEV_ID_57301 0x16c8
63 #define BROADCOM_DEV_ID_57302 0x16c9
64 #define BROADCOM_DEV_ID_57304_PF 0x16ca
65 #define BROADCOM_DEV_ID_57304_VF 0x16cb
66 #define BROADCOM_DEV_ID_NS2 0x16cd
67 #define BROADCOM_DEV_ID_57402 0x16d0
68 #define BROADCOM_DEV_ID_57404 0x16d1
69 #define BROADCOM_DEV_ID_57406_PF 0x16d2
70 #define BROADCOM_DEV_ID_57406_VF 0x16d3
71 #define BROADCOM_DEV_ID_57402_MF 0x16d4
72 #define BROADCOM_DEV_ID_57407_RJ45 0x16d5
73 #define BROADCOM_DEV_ID_5741X_VF 0x16dc
74 #define BROADCOM_DEV_ID_5731X_VF 0x16e1
75 #define BROADCOM_DEV_ID_57404_MF 0x16e7
76 #define BROADCOM_DEV_ID_57406_MF 0x16e8
77 #define BROADCOM_DEV_ID_57407_SFP 0x16e9
78 #define BROADCOM_DEV_ID_57407_MF 0x16ea
79 
80 static struct rte_pci_id bnxt_pci_id_map[] = {
81 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) },
82 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57302) },
83 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_PF) },
84 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_VF) },
85 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_NS2) },
86 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402) },
87 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404) },
88 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_PF) },
89 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_VF) },
90 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402_MF) },
91 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_RJ45) },
92 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404_MF) },
93 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_MF) },
94 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_SFP) },
95 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_MF) },
96 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5741X_VF) },
97 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5731X_VF) },
98 	{ .vendor_id = 0, /* sentinel */ },
99 };
100 
101 #define BNXT_ETH_RSS_SUPPORT (	\
102 	ETH_RSS_IPV4 |		\
103 	ETH_RSS_NONFRAG_IPV4_TCP |	\
104 	ETH_RSS_NONFRAG_IPV4_UDP |	\
105 	ETH_RSS_IPV6 |		\
106 	ETH_RSS_NONFRAG_IPV6_TCP |	\
107 	ETH_RSS_NONFRAG_IPV6_UDP)
108 
109 /***********************/
110 
111 /*
112  * High level utility functions
113  */
114 
115 static void bnxt_free_mem(struct bnxt *bp)
116 {
117 	bnxt_free_filter_mem(bp);
118 	bnxt_free_vnic_attributes(bp);
119 	bnxt_free_vnic_mem(bp);
120 
121 	bnxt_free_stats(bp);
122 	bnxt_free_tx_rings(bp);
123 	bnxt_free_rx_rings(bp);
124 	bnxt_free_def_cp_ring(bp);
125 }
126 
127 static int bnxt_alloc_mem(struct bnxt *bp)
128 {
129 	int rc;
130 
131 	/* Default completion ring */
132 	rc = bnxt_init_def_ring_struct(bp, SOCKET_ID_ANY);
133 	if (rc)
134 		goto alloc_mem_err;
135 
136 	rc = bnxt_alloc_rings(bp, 0, NULL, NULL,
137 			      bp->def_cp_ring, "def_cp");
138 	if (rc)
139 		goto alloc_mem_err;
140 
141 	rc = bnxt_alloc_vnic_mem(bp);
142 	if (rc)
143 		goto alloc_mem_err;
144 
145 	rc = bnxt_alloc_vnic_attributes(bp);
146 	if (rc)
147 		goto alloc_mem_err;
148 
149 	rc = bnxt_alloc_filter_mem(bp);
150 	if (rc)
151 		goto alloc_mem_err;
152 
153 	return 0;
154 
155 alloc_mem_err:
156 	bnxt_free_mem(bp);
157 	return rc;
158 }
159 
160 static int bnxt_init_chip(struct bnxt *bp)
161 {
162 	unsigned int i, rss_idx, fw_idx;
163 	struct rte_eth_link new;
164 	int rc;
165 
166 	rc = bnxt_alloc_all_hwrm_stat_ctxs(bp);
167 	if (rc) {
168 		RTE_LOG(ERR, PMD, "HWRM stat ctx alloc failure rc: %x\n", rc);
169 		goto err_out;
170 	}
171 
172 	rc = bnxt_alloc_hwrm_rings(bp);
173 	if (rc) {
174 		RTE_LOG(ERR, PMD, "HWRM ring alloc failure rc: %x\n", rc);
175 		goto err_out;
176 	}
177 
178 	rc = bnxt_alloc_all_hwrm_ring_grps(bp);
179 	if (rc) {
180 		RTE_LOG(ERR, PMD, "HWRM ring grp alloc failure: %x\n", rc);
181 		goto err_out;
182 	}
183 
184 	rc = bnxt_mq_rx_configure(bp);
185 	if (rc) {
186 		RTE_LOG(ERR, PMD, "MQ mode configure failure rc: %x\n", rc);
187 		goto err_out;
188 	}
189 
190 	/* VNIC configuration */
191 	for (i = 0; i < bp->nr_vnics; i++) {
192 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
193 
194 		rc = bnxt_hwrm_vnic_alloc(bp, vnic);
195 		if (rc) {
196 			RTE_LOG(ERR, PMD, "HWRM vnic alloc failure rc: %x\n",
197 				rc);
198 			goto err_out;
199 		}
200 
201 		rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic);
202 		if (rc) {
203 			RTE_LOG(ERR, PMD,
204 				"HWRM vnic ctx alloc failure rc: %x\n", rc);
205 			goto err_out;
206 		}
207 
208 		rc = bnxt_hwrm_vnic_cfg(bp, vnic);
209 		if (rc) {
210 			RTE_LOG(ERR, PMD, "HWRM vnic cfg failure rc: %x\n", rc);
211 			goto err_out;
212 		}
213 
214 		rc = bnxt_set_hwrm_vnic_filters(bp, vnic);
215 		if (rc) {
216 			RTE_LOG(ERR, PMD, "HWRM vnic filter failure rc: %x\n",
217 				rc);
218 			goto err_out;
219 		}
220 		if (vnic->rss_table && vnic->hash_type) {
221 			/*
222 			 * Fill the RSS hash & redirection table with
223 			 * ring group ids for all VNICs
224 			 */
225 			for (rss_idx = 0, fw_idx = 0;
226 			     rss_idx < HW_HASH_INDEX_SIZE;
227 			     rss_idx++, fw_idx++) {
228 				if (vnic->fw_grp_ids[fw_idx] ==
229 				    INVALID_HW_RING_ID)
230 					fw_idx = 0;
231 				vnic->rss_table[rss_idx] =
232 						vnic->fw_grp_ids[fw_idx];
233 			}
234 			rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic);
235 			if (rc) {
236 				RTE_LOG(ERR, PMD,
237 					"HWRM vnic set RSS failure rc: %x\n",
238 					rc);
239 				goto err_out;
240 			}
241 		}
242 	}
243 	rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0]);
244 	if (rc) {
245 		RTE_LOG(ERR, PMD,
246 			"HWRM cfa l2 rx mask failure rc: %x\n", rc);
247 		goto err_out;
248 	}
249 
250 	rc = bnxt_get_hwrm_link_config(bp, &new);
251 	if (rc) {
252 		RTE_LOG(ERR, PMD, "HWRM Get link config failure rc: %x\n", rc);
253 		goto err_out;
254 	}
255 
256 	if (!bp->link_info.link_up) {
257 		rc = bnxt_set_hwrm_link_config(bp, true);
258 		if (rc) {
259 			RTE_LOG(ERR, PMD,
260 				"HWRM link config failure rc: %x\n", rc);
261 			goto err_out;
262 		}
263 	}
264 
265 	return 0;
266 
267 err_out:
268 	bnxt_free_all_hwrm_resources(bp);
269 
270 	return rc;
271 }
272 
273 static int bnxt_shutdown_nic(struct bnxt *bp)
274 {
275 	bnxt_free_all_hwrm_resources(bp);
276 	bnxt_free_all_filters(bp);
277 	bnxt_free_all_vnics(bp);
278 	return 0;
279 }
280 
281 static int bnxt_init_nic(struct bnxt *bp)
282 {
283 	int rc;
284 
285 	bnxt_init_ring_grps(bp);
286 	bnxt_init_vnics(bp);
287 	bnxt_init_filters(bp);
288 
289 	rc = bnxt_init_chip(bp);
290 	if (rc)
291 		return rc;
292 
293 	return 0;
294 }
295 
296 /*
297  * Device configuration and status function
298  */
299 
300 static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
301 				  struct rte_eth_dev_info *dev_info)
302 {
303 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
304 	uint16_t max_vnics, i, j, vpool, vrxq;
305 
306 	/* MAC Specifics */
307 	dev_info->max_mac_addrs = MAX_NUM_MAC_ADDR;
308 	dev_info->max_hash_mac_addrs = 0;
309 
310 	/* PF/VF specifics */
311 	if (BNXT_PF(bp)) {
312 		dev_info->max_rx_queues = bp->pf.max_rx_rings;
313 		dev_info->max_tx_queues = bp->pf.max_tx_rings;
314 		dev_info->max_vfs = bp->pf.active_vfs;
315 		dev_info->reta_size = bp->pf.max_rsscos_ctx;
316 		max_vnics = bp->pf.max_vnics;
317 	} else {
318 		dev_info->max_rx_queues = bp->vf.max_rx_rings;
319 		dev_info->max_tx_queues = bp->vf.max_tx_rings;
320 		dev_info->reta_size = bp->vf.max_rsscos_ctx;
321 		max_vnics = bp->vf.max_vnics;
322 	}
323 
324 	/* Fast path specifics */
325 	dev_info->min_rx_bufsize = 1;
326 	dev_info->max_rx_pktlen = BNXT_MAX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN
327 				  + VLAN_TAG_SIZE;
328 	dev_info->rx_offload_capa = 0;
329 	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_IPV4_CKSUM |
330 					DEV_TX_OFFLOAD_TCP_CKSUM |
331 					DEV_TX_OFFLOAD_UDP_CKSUM |
332 					DEV_TX_OFFLOAD_TCP_TSO;
333 
334 	/* *INDENT-OFF* */
335 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
336 		.rx_thresh = {
337 			.pthresh = 8,
338 			.hthresh = 8,
339 			.wthresh = 0,
340 		},
341 		.rx_free_thresh = 32,
342 		.rx_drop_en = 0,
343 	};
344 
345 	dev_info->default_txconf = (struct rte_eth_txconf) {
346 		.tx_thresh = {
347 			.pthresh = 32,
348 			.hthresh = 0,
349 			.wthresh = 0,
350 		},
351 		.tx_free_thresh = 32,
352 		.tx_rs_thresh = 32,
353 		.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
354 			     ETH_TXQ_FLAGS_NOOFFLOADS,
355 	};
356 	eth_dev->data->dev_conf.intr_conf.lsc = 1;
357 
358 	/* *INDENT-ON* */
359 
360 	/*
361 	 * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim
362 	 *       need further investigation.
363 	 */
364 
365 	/* VMDq resources */
366 	vpool = 64; /* ETH_64_POOLS */
367 	vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */
368 	for (i = 0; i < 4; vpool >>= 1, i++) {
369 		if (max_vnics > vpool) {
370 			for (j = 0; j < 5; vrxq >>= 1, j++) {
371 				if (dev_info->max_rx_queues > vrxq) {
372 					if (vpool > vrxq)
373 						vpool = vrxq;
374 					goto found;
375 				}
376 			}
377 			/* Not enough resources to support VMDq */
378 			break;
379 		}
380 	}
381 	/* Not enough resources to support VMDq */
382 	vpool = 0;
383 	vrxq = 0;
384 found:
385 	dev_info->max_vmdq_pools = vpool;
386 	dev_info->vmdq_queue_num = vrxq;
387 
388 	dev_info->vmdq_pool_base = 0;
389 	dev_info->vmdq_queue_base = 0;
390 }
391 
392 /* Configure the device based on the configuration provided */
393 static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
394 {
395 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
396 
397 	bp->rx_queues = (void *)eth_dev->data->rx_queues;
398 	bp->tx_queues = (void *)eth_dev->data->tx_queues;
399 
400 	/* Inherit new configurations */
401 	bp->rx_nr_rings = eth_dev->data->nb_rx_queues;
402 	bp->tx_nr_rings = eth_dev->data->nb_tx_queues;
403 	bp->rx_cp_nr_rings = bp->rx_nr_rings;
404 	bp->tx_cp_nr_rings = bp->tx_nr_rings;
405 
406 	if (eth_dev->data->dev_conf.rxmode.jumbo_frame)
407 		eth_dev->data->mtu =
408 				eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
409 				ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE;
410 	return 0;
411 }
412 
413 static inline int
414 rte_bnxt_atomic_write_link_status(struct rte_eth_dev *eth_dev,
415 				struct rte_eth_link *link)
416 {
417 	struct rte_eth_link *dst = &eth_dev->data->dev_link;
418 	struct rte_eth_link *src = link;
419 
420 	if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
421 					*(uint64_t *)src) == 0)
422 		return 1;
423 
424 	return 0;
425 }
426 
427 static void bnxt_print_link_info(struct rte_eth_dev *eth_dev)
428 {
429 	struct rte_eth_link *link = &eth_dev->data->dev_link;
430 
431 	if (link->link_status)
432 		RTE_LOG(INFO, PMD, "Port %d Link Up - speed %u Mbps - %s\n",
433 			(uint8_t)(eth_dev->data->port_id),
434 			(uint32_t)link->link_speed,
435 			(link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
436 			("full-duplex") : ("half-duplex\n"));
437 	else
438 		RTE_LOG(INFO, PMD, "Port %d Link Down\n",
439 			(uint8_t)(eth_dev->data->port_id));
440 }
441 
442 static int bnxt_dev_lsc_intr_setup(struct rte_eth_dev *eth_dev)
443 {
444 	bnxt_print_link_info(eth_dev);
445 	return 0;
446 }
447 
448 static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
449 {
450 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
451 	int rc;
452 
453 	bp->dev_stopped = 0;
454 	rc = bnxt_hwrm_func_reset(bp);
455 	if (rc) {
456 		RTE_LOG(ERR, PMD, "hwrm chip reset failure rc: %x\n", rc);
457 		rc = -1;
458 		goto error;
459 	}
460 
461 	rc = bnxt_setup_int(bp);
462 	if (rc)
463 		goto error;
464 
465 	rc = bnxt_alloc_mem(bp);
466 	if (rc)
467 		goto error;
468 
469 	rc = bnxt_request_int(bp);
470 	if (rc)
471 		goto error;
472 
473 	rc = bnxt_init_nic(bp);
474 	if (rc)
475 		goto error;
476 
477 	bnxt_enable_int(bp);
478 
479 	bnxt_link_update_op(eth_dev, 0);
480 	return 0;
481 
482 error:
483 	bnxt_shutdown_nic(bp);
484 	bnxt_disable_int(bp);
485 	bnxt_free_int(bp);
486 	bnxt_free_tx_mbufs(bp);
487 	bnxt_free_rx_mbufs(bp);
488 	bnxt_free_mem(bp);
489 	return rc;
490 }
491 
492 static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev)
493 {
494 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
495 
496 	eth_dev->data->dev_link.link_status = 1;
497 	bnxt_set_hwrm_link_config(bp, true);
498 	return 0;
499 }
500 
501 static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev)
502 {
503 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
504 
505 	eth_dev->data->dev_link.link_status = 0;
506 	bnxt_set_hwrm_link_config(bp, false);
507 	return 0;
508 }
509 
510 /* Unload the driver, release resources */
511 static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
512 {
513 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
514 
515 	if (bp->eth_dev->data->dev_started) {
516 		/* TBD: STOP HW queues DMA */
517 		eth_dev->data->dev_link.link_status = 0;
518 	}
519 	bnxt_set_hwrm_link_config(bp, false);
520 	bnxt_disable_int(bp);
521 	bnxt_free_int(bp);
522 	bnxt_shutdown_nic(bp);
523 	bp->dev_stopped = 1;
524 }
525 
526 static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
527 {
528 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
529 
530 	if (bp->dev_stopped == 0)
531 		bnxt_dev_stop_op(eth_dev);
532 
533 	bnxt_free_tx_mbufs(bp);
534 	bnxt_free_rx_mbufs(bp);
535 	bnxt_free_mem(bp);
536 	if (eth_dev->data->mac_addrs != NULL) {
537 		rte_free(eth_dev->data->mac_addrs);
538 		eth_dev->data->mac_addrs = NULL;
539 	}
540 	if (bp->grp_info != NULL) {
541 		rte_free(bp->grp_info);
542 		bp->grp_info = NULL;
543 	}
544 }
545 
546 static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev,
547 				    uint32_t index)
548 {
549 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
550 	uint64_t pool_mask = eth_dev->data->mac_pool_sel[index];
551 	struct bnxt_vnic_info *vnic;
552 	struct bnxt_filter_info *filter, *temp_filter;
553 	int i;
554 
555 	/*
556 	 * Loop through all VNICs from the specified filter flow pools to
557 	 * remove the corresponding MAC addr filter
558 	 */
559 	for (i = 0; i < MAX_FF_POOLS; i++) {
560 		if (!(pool_mask & (1ULL << i)))
561 			continue;
562 
563 		STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
564 			filter = STAILQ_FIRST(&vnic->filter);
565 			while (filter) {
566 				temp_filter = STAILQ_NEXT(filter, next);
567 				if (filter->mac_index == index) {
568 					STAILQ_REMOVE(&vnic->filter, filter,
569 						      bnxt_filter_info, next);
570 					bnxt_hwrm_clear_filter(bp, filter);
571 					filter->mac_index = INVALID_MAC_INDEX;
572 					memset(&filter->l2_addr, 0,
573 					       ETHER_ADDR_LEN);
574 					STAILQ_INSERT_TAIL(
575 							&bp->free_filter_list,
576 							filter, next);
577 				}
578 				filter = temp_filter;
579 			}
580 		}
581 	}
582 }
583 
584 static void bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
585 				 struct ether_addr *mac_addr,
586 				 uint32_t index, uint32_t pool)
587 {
588 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
589 	struct bnxt_vnic_info *vnic = STAILQ_FIRST(&bp->ff_pool[pool]);
590 	struct bnxt_filter_info *filter;
591 
592 	if (BNXT_VF(bp)) {
593 		RTE_LOG(ERR, PMD, "Cannot add MAC address to a VF interface\n");
594 		return;
595 	}
596 
597 	if (!vnic) {
598 		RTE_LOG(ERR, PMD, "VNIC not found for pool %d!\n", pool);
599 		return;
600 	}
601 	/* Attach requested MAC address to the new l2_filter */
602 	STAILQ_FOREACH(filter, &vnic->filter, next) {
603 		if (filter->mac_index == index) {
604 			RTE_LOG(ERR, PMD,
605 				"MAC addr already existed for pool %d\n", pool);
606 			return;
607 		}
608 	}
609 	filter = bnxt_alloc_filter(bp);
610 	if (!filter) {
611 		RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
612 		return;
613 	}
614 	STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
615 	filter->mac_index = index;
616 	memcpy(filter->l2_addr, mac_addr, ETHER_ADDR_LEN);
617 	bnxt_hwrm_set_filter(bp, vnic, filter);
618 }
619 
620 int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete)
621 {
622 	int rc = 0;
623 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
624 	struct rte_eth_link new;
625 	unsigned int cnt = BNXT_LINK_WAIT_CNT;
626 
627 	memset(&new, 0, sizeof(new));
628 	do {
629 		/* Retrieve link info from hardware */
630 		rc = bnxt_get_hwrm_link_config(bp, &new);
631 		if (rc) {
632 			new.link_speed = ETH_LINK_SPEED_100M;
633 			new.link_duplex = ETH_LINK_FULL_DUPLEX;
634 			RTE_LOG(ERR, PMD,
635 				"Failed to retrieve link rc = 0x%x!", rc);
636 			goto out;
637 		}
638 		rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
639 
640 		if (!wait_to_complete)
641 			break;
642 	} while (!new.link_status && cnt--);
643 
644 out:
645 	/* Timed out or success */
646 	if (new.link_status != eth_dev->data->dev_link.link_status ||
647 	new.link_speed != eth_dev->data->dev_link.link_speed) {
648 		rte_bnxt_atomic_write_link_status(eth_dev, &new);
649 		bnxt_print_link_info(eth_dev);
650 	}
651 
652 	return rc;
653 }
654 
655 static void bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev)
656 {
657 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
658 	struct bnxt_vnic_info *vnic;
659 
660 	if (bp->vnic_info == NULL)
661 		return;
662 
663 	vnic = &bp->vnic_info[0];
664 
665 	vnic->flags |= BNXT_VNIC_INFO_PROMISC;
666 	bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic);
667 }
668 
669 static void bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev)
670 {
671 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
672 	struct bnxt_vnic_info *vnic;
673 
674 	if (bp->vnic_info == NULL)
675 		return;
676 
677 	vnic = &bp->vnic_info[0];
678 
679 	vnic->flags &= ~BNXT_VNIC_INFO_PROMISC;
680 	bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic);
681 }
682 
683 static void bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev)
684 {
685 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
686 	struct bnxt_vnic_info *vnic;
687 
688 	if (bp->vnic_info == NULL)
689 		return;
690 
691 	vnic = &bp->vnic_info[0];
692 
693 	vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
694 	bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic);
695 }
696 
697 static void bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev)
698 {
699 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
700 	struct bnxt_vnic_info *vnic;
701 
702 	if (bp->vnic_info == NULL)
703 		return;
704 
705 	vnic = &bp->vnic_info[0];
706 
707 	vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
708 	bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic);
709 }
710 
711 static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
712 			    struct rte_eth_rss_reta_entry64 *reta_conf,
713 			    uint16_t reta_size)
714 {
715 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
716 	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
717 	struct bnxt_vnic_info *vnic;
718 	int i;
719 
720 	if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
721 		return -EINVAL;
722 
723 	if (reta_size != HW_HASH_INDEX_SIZE) {
724 		RTE_LOG(ERR, PMD, "The configured hash table lookup size "
725 			"(%d) must equal the size supported by the hardware "
726 			"(%d)\n", reta_size, HW_HASH_INDEX_SIZE);
727 		return -EINVAL;
728 	}
729 	/* Update the RSS VNIC(s) */
730 	for (i = 0; i < MAX_FF_POOLS; i++) {
731 		STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
732 			memcpy(vnic->rss_table, reta_conf, reta_size);
733 
734 			bnxt_hwrm_vnic_rss_cfg(bp, vnic);
735 		}
736 	}
737 	return 0;
738 }
739 
740 static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
741 			      struct rte_eth_rss_reta_entry64 *reta_conf,
742 			      uint16_t reta_size)
743 {
744 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
745 	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
746 
747 	/* Retrieve from the default VNIC */
748 	if (!vnic)
749 		return -EINVAL;
750 	if (!vnic->rss_table)
751 		return -EINVAL;
752 
753 	if (reta_size != HW_HASH_INDEX_SIZE) {
754 		RTE_LOG(ERR, PMD, "The configured hash table lookup size "
755 			"(%d) must equal the size supported by the hardware "
756 			"(%d)\n", reta_size, HW_HASH_INDEX_SIZE);
757 		return -EINVAL;
758 	}
759 	/* EW - need to revisit here copying from u64 to u16 */
760 	memcpy(reta_conf, vnic->rss_table, reta_size);
761 
762 	if (rte_intr_allow_others(&eth_dev->pci_dev->intr_handle)) {
763 		if (eth_dev->data->dev_conf.intr_conf.lsc != 0)
764 			bnxt_dev_lsc_intr_setup(eth_dev);
765 	}
766 
767 	return 0;
768 }
769 
770 static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
771 				   struct rte_eth_rss_conf *rss_conf)
772 {
773 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
774 	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
775 	struct bnxt_vnic_info *vnic;
776 	uint16_t hash_type = 0;
777 	int i;
778 
779 	/*
780 	 * If RSS enablement were different than dev_configure,
781 	 * then return -EINVAL
782 	 */
783 	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
784 		if (!rss_conf->rss_hf)
785 			return -EINVAL;
786 	} else {
787 		if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT)
788 			return -EINVAL;
789 	}
790 	if (rss_conf->rss_hf & ETH_RSS_IPV4)
791 		hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
792 	if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
793 		hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
794 	if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
795 		hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
796 	if (rss_conf->rss_hf & ETH_RSS_IPV6)
797 		hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
798 	if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
799 		hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
800 	if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
801 		hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
802 
803 	/* Update the RSS VNIC(s) */
804 	for (i = 0; i < MAX_FF_POOLS; i++) {
805 		STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
806 			vnic->hash_type = hash_type;
807 
808 			/*
809 			 * Use the supplied key if the key length is
810 			 * acceptable and the rss_key is not NULL
811 			 */
812 			if (rss_conf->rss_key &&
813 			    rss_conf->rss_key_len <= HW_HASH_KEY_SIZE)
814 				memcpy(vnic->rss_hash_key, rss_conf->rss_key,
815 				       rss_conf->rss_key_len);
816 
817 			bnxt_hwrm_vnic_rss_cfg(bp, vnic);
818 		}
819 	}
820 	return 0;
821 }
822 
823 static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev,
824 				     struct rte_eth_rss_conf *rss_conf)
825 {
826 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
827 	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
828 	int len;
829 	uint32_t hash_types;
830 
831 	/* RSS configuration is the same for all VNICs */
832 	if (vnic && vnic->rss_hash_key) {
833 		if (rss_conf->rss_key) {
834 			len = rss_conf->rss_key_len <= HW_HASH_KEY_SIZE ?
835 			      rss_conf->rss_key_len : HW_HASH_KEY_SIZE;
836 			memcpy(rss_conf->rss_key, vnic->rss_hash_key, len);
837 		}
838 
839 		hash_types = vnic->hash_type;
840 		rss_conf->rss_hf = 0;
841 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) {
842 			rss_conf->rss_hf |= ETH_RSS_IPV4;
843 			hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
844 		}
845 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) {
846 			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
847 			hash_types &=
848 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
849 		}
850 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) {
851 			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
852 			hash_types &=
853 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
854 		}
855 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) {
856 			rss_conf->rss_hf |= ETH_RSS_IPV6;
857 			hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
858 		}
859 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) {
860 			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
861 			hash_types &=
862 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
863 		}
864 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) {
865 			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
866 			hash_types &=
867 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
868 		}
869 		if (hash_types) {
870 			RTE_LOG(ERR, PMD,
871 				"Unknwon RSS config from firmware (%08x), RSS disabled",
872 				vnic->hash_type);
873 			return -ENOTSUP;
874 		}
875 	} else {
876 		rss_conf->rss_hf = 0;
877 	}
878 	return 0;
879 }
880 
881 static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev,
882 			       struct rte_eth_fc_conf *fc_conf __rte_unused)
883 {
884 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
885 	struct rte_eth_link link_info;
886 	int rc;
887 
888 	rc = bnxt_get_hwrm_link_config(bp, &link_info);
889 	if (rc)
890 		return rc;
891 
892 	memset(fc_conf, 0, sizeof(*fc_conf));
893 	if (bp->link_info.auto_pause)
894 		fc_conf->autoneg = 1;
895 	switch (bp->link_info.pause) {
896 	case 0:
897 		fc_conf->mode = RTE_FC_NONE;
898 		break;
899 	case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX:
900 		fc_conf->mode = RTE_FC_TX_PAUSE;
901 		break;
902 	case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX:
903 		fc_conf->mode = RTE_FC_RX_PAUSE;
904 		break;
905 	case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX |
906 			HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX):
907 		fc_conf->mode = RTE_FC_FULL;
908 		break;
909 	}
910 	return 0;
911 }
912 
913 static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
914 			       struct rte_eth_fc_conf *fc_conf)
915 {
916 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
917 
918 	if (BNXT_NPAR_PF(bp) || BNXT_VF(bp)) {
919 		RTE_LOG(ERR, PMD, "Flow Control Settings cannot be modified\n");
920 		return -ENOTSUP;
921 	}
922 
923 	switch (fc_conf->mode) {
924 	case RTE_FC_NONE:
925 		bp->link_info.auto_pause = 0;
926 		bp->link_info.force_pause = 0;
927 		break;
928 	case RTE_FC_RX_PAUSE:
929 		if (fc_conf->autoneg) {
930 			bp->link_info.auto_pause =
931 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
932 			bp->link_info.force_pause = 0;
933 		} else {
934 			bp->link_info.auto_pause = 0;
935 			bp->link_info.force_pause =
936 					HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
937 		}
938 		break;
939 	case RTE_FC_TX_PAUSE:
940 		if (fc_conf->autoneg) {
941 			bp->link_info.auto_pause =
942 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX;
943 			bp->link_info.force_pause = 0;
944 		} else {
945 			bp->link_info.auto_pause = 0;
946 			bp->link_info.force_pause =
947 					HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX;
948 		}
949 		break;
950 	case RTE_FC_FULL:
951 		if (fc_conf->autoneg) {
952 			bp->link_info.auto_pause =
953 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX |
954 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
955 			bp->link_info.force_pause = 0;
956 		} else {
957 			bp->link_info.auto_pause = 0;
958 			bp->link_info.force_pause =
959 					HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX |
960 					HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
961 		}
962 		break;
963 	}
964 	return bnxt_set_hwrm_link_config(bp, true);
965 }
966 
967 /*
968  * Initialization
969  */
970 
971 static struct eth_dev_ops bnxt_dev_ops = {
972 	.dev_infos_get = bnxt_dev_info_get_op,
973 	.dev_close = bnxt_dev_close_op,
974 	.dev_configure = bnxt_dev_configure_op,
975 	.dev_start = bnxt_dev_start_op,
976 	.dev_stop = bnxt_dev_stop_op,
977 	.dev_set_link_up = bnxt_dev_set_link_up_op,
978 	.dev_set_link_down = bnxt_dev_set_link_down_op,
979 	.stats_get = bnxt_stats_get_op,
980 	.stats_reset = bnxt_stats_reset_op,
981 	.rx_queue_setup = bnxt_rx_queue_setup_op,
982 	.rx_queue_release = bnxt_rx_queue_release_op,
983 	.tx_queue_setup = bnxt_tx_queue_setup_op,
984 	.tx_queue_release = bnxt_tx_queue_release_op,
985 	.reta_update = bnxt_reta_update_op,
986 	.reta_query = bnxt_reta_query_op,
987 	.rss_hash_update = bnxt_rss_hash_update_op,
988 	.rss_hash_conf_get = bnxt_rss_hash_conf_get_op,
989 	.link_update = bnxt_link_update_op,
990 	.promiscuous_enable = bnxt_promiscuous_enable_op,
991 	.promiscuous_disable = bnxt_promiscuous_disable_op,
992 	.allmulticast_enable = bnxt_allmulticast_enable_op,
993 	.allmulticast_disable = bnxt_allmulticast_disable_op,
994 	.mac_addr_add = bnxt_mac_addr_add_op,
995 	.mac_addr_remove = bnxt_mac_addr_remove_op,
996 	.flow_ctrl_get = bnxt_flow_ctrl_get_op,
997 	.flow_ctrl_set = bnxt_flow_ctrl_set_op,
998 };
999 
1000 static bool bnxt_vf_pciid(uint16_t id)
1001 {
1002 	if (id == BROADCOM_DEV_ID_57304_VF ||
1003 	    id == BROADCOM_DEV_ID_57406_VF ||
1004 	    id == BROADCOM_DEV_ID_5731X_VF ||
1005 	    id == BROADCOM_DEV_ID_5741X_VF)
1006 		return true;
1007 	return false;
1008 }
1009 
1010 static int bnxt_init_board(struct rte_eth_dev *eth_dev)
1011 {
1012 	int rc;
1013 	struct bnxt *bp = eth_dev->data->dev_private;
1014 
1015 	/* enable device (incl. PCI PM wakeup), and bus-mastering */
1016 	if (!eth_dev->pci_dev->mem_resource[0].addr) {
1017 		RTE_LOG(ERR, PMD,
1018 			"Cannot find PCI device base address, aborting\n");
1019 		rc = -ENODEV;
1020 		goto init_err_disable;
1021 	}
1022 
1023 	bp->eth_dev = eth_dev;
1024 	bp->pdev = eth_dev->pci_dev;
1025 
1026 	bp->bar0 = (void *)eth_dev->pci_dev->mem_resource[0].addr;
1027 	if (!bp->bar0) {
1028 		RTE_LOG(ERR, PMD, "Cannot map device registers, aborting\n");
1029 		rc = -ENOMEM;
1030 		goto init_err_release;
1031 	}
1032 	return 0;
1033 
1034 init_err_release:
1035 	if (bp->bar0)
1036 		bp->bar0 = NULL;
1037 
1038 init_err_disable:
1039 
1040 	return rc;
1041 }
1042 
1043 static int
1044 bnxt_dev_init(struct rte_eth_dev *eth_dev)
1045 {
1046 	static int version_printed;
1047 	struct bnxt *bp;
1048 	int rc;
1049 
1050 	if (version_printed++ == 0)
1051 		RTE_LOG(INFO, PMD, "%s", bnxt_version);
1052 
1053 	rte_eth_copy_pci_info(eth_dev, eth_dev->pci_dev);
1054 	bp = eth_dev->data->dev_private;
1055 
1056 	if (bnxt_vf_pciid(eth_dev->pci_dev->id.device_id))
1057 		bp->flags |= BNXT_FLAG_VF;
1058 
1059 	rc = bnxt_init_board(eth_dev);
1060 	if (rc) {
1061 		RTE_LOG(ERR, PMD,
1062 			"Board initialization failed rc: %x\n", rc);
1063 		goto error;
1064 	}
1065 	eth_dev->dev_ops = &bnxt_dev_ops;
1066 	eth_dev->rx_pkt_burst = &bnxt_recv_pkts;
1067 	eth_dev->tx_pkt_burst = &bnxt_xmit_pkts;
1068 
1069 	rc = bnxt_alloc_hwrm_resources(bp);
1070 	if (rc) {
1071 		RTE_LOG(ERR, PMD,
1072 			"hwrm resource allocation failure rc: %x\n", rc);
1073 		goto error_free;
1074 	}
1075 	rc = bnxt_hwrm_ver_get(bp);
1076 	if (rc)
1077 		goto error_free;
1078 	bnxt_hwrm_queue_qportcfg(bp);
1079 
1080 	bnxt_hwrm_func_qcfg(bp);
1081 
1082 	/* Get the MAX capabilities for this function */
1083 	rc = bnxt_hwrm_func_qcaps(bp);
1084 	if (rc) {
1085 		RTE_LOG(ERR, PMD, "hwrm query capability failure rc: %x\n", rc);
1086 		goto error_free;
1087 	}
1088 	eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl",
1089 					ETHER_ADDR_LEN * MAX_NUM_MAC_ADDR, 0);
1090 	if (eth_dev->data->mac_addrs == NULL) {
1091 		RTE_LOG(ERR, PMD,
1092 			"Failed to alloc %u bytes needed to store MAC addr tbl",
1093 			ETHER_ADDR_LEN * MAX_NUM_MAC_ADDR);
1094 		rc = -ENOMEM;
1095 		goto error_free;
1096 	}
1097 	/* Copy the permanent MAC from the qcap response address now. */
1098 	if (BNXT_PF(bp))
1099 		memcpy(bp->mac_addr, bp->pf.mac_addr, sizeof(bp->mac_addr));
1100 	else
1101 		memcpy(bp->mac_addr, bp->vf.mac_addr, sizeof(bp->mac_addr));
1102 	memcpy(&eth_dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN);
1103 	bp->grp_info = rte_zmalloc("bnxt_grp_info",
1104 				sizeof(*bp->grp_info) * bp->max_ring_grps, 0);
1105 	if (!bp->grp_info) {
1106 		RTE_LOG(ERR, PMD,
1107 			"Failed to alloc %zu bytes needed to store group info table\n",
1108 			sizeof(*bp->grp_info) * bp->max_ring_grps);
1109 		rc = -ENOMEM;
1110 		goto error_free;
1111 	}
1112 
1113 	rc = bnxt_hwrm_func_driver_register(bp, 0,
1114 					    bp->pf.vf_req_fwd);
1115 	if (rc) {
1116 		RTE_LOG(ERR, PMD,
1117 			"Failed to register driver");
1118 		rc = -EBUSY;
1119 		goto error_free;
1120 	}
1121 
1122 	RTE_LOG(INFO, PMD,
1123 		DRV_MODULE_NAME " found at mem %" PRIx64 ", node addr %pM\n",
1124 		eth_dev->pci_dev->mem_resource[0].phys_addr,
1125 		eth_dev->pci_dev->mem_resource[0].addr);
1126 
1127 	bp->dev_stopped = 0;
1128 
1129 	return 0;
1130 
1131 error_free:
1132 	eth_dev->driver->eth_dev_uninit(eth_dev);
1133 error:
1134 	return rc;
1135 }
1136 
1137 static int
1138 bnxt_dev_uninit(struct rte_eth_dev *eth_dev) {
1139 	struct bnxt *bp = eth_dev->data->dev_private;
1140 	int rc;
1141 
1142 	if (eth_dev->data->mac_addrs != NULL) {
1143 		rte_free(eth_dev->data->mac_addrs);
1144 		eth_dev->data->mac_addrs = NULL;
1145 	}
1146 	if (bp->grp_info != NULL) {
1147 		rte_free(bp->grp_info);
1148 		bp->grp_info = NULL;
1149 	}
1150 	rc = bnxt_hwrm_func_driver_unregister(bp, 0);
1151 	bnxt_free_hwrm_resources(bp);
1152 	if (bp->dev_stopped == 0)
1153 		bnxt_dev_close_op(eth_dev);
1154 	eth_dev->dev_ops = NULL;
1155 	eth_dev->rx_pkt_burst = NULL;
1156 	eth_dev->tx_pkt_burst = NULL;
1157 
1158 	return rc;
1159 }
1160 
1161 static struct eth_driver bnxt_rte_pmd = {
1162 	.pci_drv = {
1163 		    .id_table = bnxt_pci_id_map,
1164 		    .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
1165 			    RTE_PCI_DRV_DETACHABLE | RTE_PCI_DRV_INTR_LSC,
1166 		    .probe = rte_eth_dev_pci_probe,
1167 		    .remove = rte_eth_dev_pci_remove
1168 		    },
1169 	.eth_dev_init = bnxt_dev_init,
1170 	.eth_dev_uninit = bnxt_dev_uninit,
1171 	.dev_private_size = sizeof(struct bnxt),
1172 };
1173 
1174 RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd.pci_drv);
1175 RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map);
1176 RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio");
1177