xref: /dpdk/drivers/net/bnxt/bnxt_ethdev.c (revision 1b488241353c25d09eb610aa91a88aaeab1eeedf)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) Broadcom Limited.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Broadcom Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <inttypes.h>
35 #include <stdbool.h>
36 
37 #include <rte_dev.h>
38 #include <rte_ethdev.h>
39 #include <rte_malloc.h>
40 #include <rte_cycles.h>
41 
42 #include "bnxt.h"
43 #include "bnxt_cpr.h"
44 #include "bnxt_filter.h"
45 #include "bnxt_hwrm.h"
46 #include "bnxt_ring.h"
47 #include "bnxt_rxq.h"
48 #include "bnxt_rxr.h"
49 #include "bnxt_stats.h"
50 #include "bnxt_txq.h"
51 #include "bnxt_txr.h"
52 #include "bnxt_vnic.h"
53 #include "hsi_struct_def_dpdk.h"
54 
55 #define DRV_MODULE_NAME		"bnxt"
56 static const char bnxt_version[] =
57 	"Broadcom Cumulus driver " DRV_MODULE_NAME "\n";
58 
59 static struct rte_pci_id bnxt_pci_id_map[] = {
60 #define RTE_PCI_DEV_ID_DECL_BNXT(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
61 #include "rte_pci_dev_ids.h"
62 	{.device_id = 0},
63 };
64 
65 static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
66 {
67 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
68 
69 	rte_free(eth_dev->data->mac_addrs);
70 	bnxt_free_hwrm_resources(bp);
71 }
72 
73 /***********************/
74 
75 /*
76  * High level utility functions
77  */
78 
79 static void bnxt_free_mem(struct bnxt *bp)
80 {
81 	bnxt_free_filter_mem(bp);
82 	bnxt_free_vnic_attributes(bp);
83 	bnxt_free_vnic_mem(bp);
84 
85 	bnxt_free_stats(bp);
86 	bnxt_free_tx_rings(bp);
87 	bnxt_free_rx_rings(bp);
88 	bnxt_free_def_cp_ring(bp);
89 }
90 
91 static int bnxt_alloc_mem(struct bnxt *bp)
92 {
93 	int rc;
94 
95 	/* Default completion ring */
96 	rc = bnxt_init_def_ring_struct(bp, SOCKET_ID_ANY);
97 	if (rc)
98 		goto alloc_mem_err;
99 
100 	rc = bnxt_alloc_rings(bp, 0, NULL, NULL,
101 			      bp->def_cp_ring, "def_cp");
102 	if (rc)
103 		goto alloc_mem_err;
104 
105 	rc = bnxt_alloc_vnic_mem(bp);
106 	if (rc)
107 		goto alloc_mem_err;
108 
109 	rc = bnxt_alloc_vnic_attributes(bp);
110 	if (rc)
111 		goto alloc_mem_err;
112 
113 	rc = bnxt_alloc_filter_mem(bp);
114 	if (rc)
115 		goto alloc_mem_err;
116 
117 	return 0;
118 
119 alloc_mem_err:
120 	bnxt_free_mem(bp);
121 	return rc;
122 }
123 
124 static int bnxt_init_chip(struct bnxt *bp)
125 {
126 	unsigned int i, rss_idx, fw_idx;
127 	int rc;
128 
129 	rc = bnxt_alloc_all_hwrm_stat_ctxs(bp);
130 	if (rc) {
131 		RTE_LOG(ERR, PMD, "HWRM stat ctx alloc failure rc: %x\n", rc);
132 		goto err_out;
133 	}
134 
135 	rc = bnxt_alloc_hwrm_rings(bp);
136 	if (rc) {
137 		RTE_LOG(ERR, PMD, "HWRM ring alloc failure rc: %x\n", rc);
138 		goto err_out;
139 	}
140 
141 	rc = bnxt_alloc_all_hwrm_ring_grps(bp);
142 	if (rc) {
143 		RTE_LOG(ERR, PMD, "HWRM ring grp alloc failure: %x\n", rc);
144 		goto err_out;
145 	}
146 
147 	rc = bnxt_mq_rx_configure(bp);
148 	if (rc) {
149 		RTE_LOG(ERR, PMD, "MQ mode configure failure rc: %x\n", rc);
150 		goto err_out;
151 	}
152 
153 	/* VNIC configuration */
154 	for (i = 0; i < bp->nr_vnics; i++) {
155 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
156 
157 		rc = bnxt_hwrm_vnic_alloc(bp, vnic);
158 		if (rc) {
159 			RTE_LOG(ERR, PMD, "HWRM vnic alloc failure rc: %x\n",
160 				rc);
161 			goto err_out;
162 		}
163 
164 		rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic);
165 		if (rc) {
166 			RTE_LOG(ERR, PMD,
167 				"HWRM vnic ctx alloc failure rc: %x\n", rc);
168 			goto err_out;
169 		}
170 
171 		rc = bnxt_hwrm_vnic_cfg(bp, vnic);
172 		if (rc) {
173 			RTE_LOG(ERR, PMD, "HWRM vnic cfg failure rc: %x\n", rc);
174 			goto err_out;
175 		}
176 
177 		rc = bnxt_set_hwrm_vnic_filters(bp, vnic);
178 		if (rc) {
179 			RTE_LOG(ERR, PMD, "HWRM vnic filter failure rc: %x\n",
180 				rc);
181 			goto err_out;
182 		}
183 		if (vnic->rss_table && vnic->hash_type) {
184 			/*
185 			 * Fill the RSS hash & redirection table with
186 			 * ring group ids for all VNICs
187 			 */
188 			for (rss_idx = 0, fw_idx = 0;
189 			     rss_idx < HW_HASH_INDEX_SIZE;
190 			     rss_idx++, fw_idx++) {
191 				if (vnic->fw_grp_ids[fw_idx] ==
192 				    INVALID_HW_RING_ID)
193 					fw_idx = 0;
194 				vnic->rss_table[rss_idx] =
195 						vnic->fw_grp_ids[fw_idx];
196 			}
197 			rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic);
198 			if (rc) {
199 				RTE_LOG(ERR, PMD,
200 					"HWRM vnic set RSS failure rc: %x\n",
201 					rc);
202 				goto err_out;
203 			}
204 		}
205 	}
206 	rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0]);
207 	if (rc) {
208 		RTE_LOG(ERR, PMD,
209 			"HWRM cfa l2 rx mask failure rc: %x\n", rc);
210 		goto err_out;
211 	}
212 
213 	return 0;
214 
215 err_out:
216 	bnxt_free_all_hwrm_resources(bp);
217 
218 	return rc;
219 }
220 
221 static int bnxt_shutdown_nic(struct bnxt *bp)
222 {
223 	bnxt_free_all_hwrm_resources(bp);
224 	bnxt_free_all_filters(bp);
225 	bnxt_free_all_vnics(bp);
226 	return 0;
227 }
228 
229 static int bnxt_init_nic(struct bnxt *bp)
230 {
231 	int rc;
232 
233 	bnxt_init_ring_grps(bp);
234 	bnxt_init_vnics(bp);
235 	bnxt_init_filters(bp);
236 
237 	rc = bnxt_init_chip(bp);
238 	if (rc)
239 		return rc;
240 
241 	return 0;
242 }
243 
244 /*
245  * Device configuration and status function
246  */
247 
248 static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
249 				  struct rte_eth_dev_info *dev_info)
250 {
251 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
252 	uint16_t max_vnics, i, j, vpool, vrxq;
253 
254 	/* MAC Specifics */
255 	dev_info->max_mac_addrs = MAX_NUM_MAC_ADDR;
256 	dev_info->max_hash_mac_addrs = 0;
257 
258 	/* PF/VF specifics */
259 	if (BNXT_PF(bp)) {
260 		dev_info->max_rx_queues = bp->pf.max_rx_rings;
261 		dev_info->max_tx_queues = bp->pf.max_tx_rings;
262 		dev_info->max_vfs = bp->pf.active_vfs;
263 		dev_info->reta_size = bp->pf.max_rsscos_ctx;
264 		max_vnics = bp->pf.max_vnics;
265 	} else {
266 		dev_info->max_rx_queues = bp->vf.max_rx_rings;
267 		dev_info->max_tx_queues = bp->vf.max_tx_rings;
268 		dev_info->reta_size = bp->vf.max_rsscos_ctx;
269 		max_vnics = bp->vf.max_vnics;
270 	}
271 
272 	/* Fast path specifics */
273 	dev_info->min_rx_bufsize = 1;
274 	dev_info->max_rx_pktlen = BNXT_MAX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN
275 				  + VLAN_TAG_SIZE;
276 	dev_info->rx_offload_capa = 0;
277 	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_IPV4_CKSUM |
278 					DEV_TX_OFFLOAD_TCP_CKSUM |
279 					DEV_TX_OFFLOAD_UDP_CKSUM |
280 					DEV_TX_OFFLOAD_TCP_TSO;
281 
282 	/* *INDENT-OFF* */
283 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
284 		.rx_thresh = {
285 			.pthresh = 8,
286 			.hthresh = 8,
287 			.wthresh = 0,
288 		},
289 		.rx_free_thresh = 32,
290 		.rx_drop_en = 0,
291 	};
292 
293 	dev_info->default_txconf = (struct rte_eth_txconf) {
294 		.tx_thresh = {
295 			.pthresh = 32,
296 			.hthresh = 0,
297 			.wthresh = 0,
298 		},
299 		.tx_free_thresh = 32,
300 		.tx_rs_thresh = 32,
301 		.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
302 			     ETH_TXQ_FLAGS_NOOFFLOADS,
303 	};
304 	/* *INDENT-ON* */
305 
306 	/*
307 	 * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim
308 	 *       need further investigation.
309 	 */
310 
311 	/* VMDq resources */
312 	vpool = 64; /* ETH_64_POOLS */
313 	vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */
314 	for (i = 0; i < 4; vpool >>= 1, i++) {
315 		if (max_vnics > vpool) {
316 			for (j = 0; j < 5; vrxq >>= 1, j++) {
317 				if (dev_info->max_rx_queues > vrxq) {
318 					if (vpool > vrxq)
319 						vpool = vrxq;
320 					goto found;
321 				}
322 			}
323 			/* Not enough resources to support VMDq */
324 			break;
325 		}
326 	}
327 	/* Not enough resources to support VMDq */
328 	vpool = 0;
329 	vrxq = 0;
330 found:
331 	dev_info->max_vmdq_pools = vpool;
332 	dev_info->vmdq_queue_num = vrxq;
333 
334 	dev_info->vmdq_pool_base = 0;
335 	dev_info->vmdq_queue_base = 0;
336 }
337 
338 /* Configure the device based on the configuration provided */
339 static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
340 {
341 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
342 	int rc;
343 
344 	bp->rx_queues = (void *)eth_dev->data->rx_queues;
345 	bp->tx_queues = (void *)eth_dev->data->tx_queues;
346 
347 	/* Inherit new configurations */
348 	bp->rx_nr_rings = eth_dev->data->nb_rx_queues;
349 	bp->tx_nr_rings = eth_dev->data->nb_tx_queues;
350 	bp->rx_cp_nr_rings = bp->rx_nr_rings;
351 	bp->tx_cp_nr_rings = bp->tx_nr_rings;
352 
353 	if (eth_dev->data->dev_conf.rxmode.jumbo_frame)
354 		eth_dev->data->mtu =
355 				eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
356 				ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE;
357 	rc = bnxt_set_hwrm_link_config(bp, true);
358 	return rc;
359 }
360 
361 static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
362 {
363 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
364 	int rc;
365 
366 	rc = bnxt_hwrm_func_reset(bp);
367 	if (rc) {
368 		RTE_LOG(ERR, PMD, "hwrm chip reset failure rc: %x\n", rc);
369 		rc = -1;
370 		goto error;
371 	}
372 
373 	rc = bnxt_alloc_mem(bp);
374 	if (rc)
375 		goto error;
376 
377 	rc = bnxt_init_nic(bp);
378 	if (rc)
379 		goto error;
380 
381 	return 0;
382 
383 error:
384 	bnxt_shutdown_nic(bp);
385 	bnxt_free_tx_mbufs(bp);
386 	bnxt_free_rx_mbufs(bp);
387 	bnxt_free_mem(bp);
388 	return rc;
389 }
390 
391 /* Unload the driver, release resources */
392 static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
393 {
394 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
395 
396 	if (bp->eth_dev->data->dev_started) {
397 		/* TBD: STOP HW queues DMA */
398 		eth_dev->data->dev_link.link_status = 0;
399 	}
400 	bnxt_shutdown_nic(bp);
401 }
402 
403 static int bnxt_link_update_op(struct rte_eth_dev *eth_dev,
404 			       int wait_to_complete)
405 {
406 	int rc = 0;
407 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
408 	struct rte_eth_link new;
409 	unsigned int cnt = BNXT_LINK_WAIT_CNT;
410 
411 	memset(&new, 0, sizeof(new));
412 	do {
413 		/* Retrieve link info from hardware */
414 		rc = bnxt_get_hwrm_link_config(bp, &new);
415 		if (rc) {
416 			new.link_speed = ETH_LINK_SPEED_100M;
417 			new.link_duplex = ETH_LINK_FULL_DUPLEX;
418 			RTE_LOG(ERR, PMD,
419 				"Failed to retrieve link rc = 0x%x!", rc);
420 			goto out;
421 		}
422 		if (!wait_to_complete)
423 			break;
424 
425 		rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
426 
427 	} while (!new.link_status && cnt--);
428 
429 	/* Timed out or success */
430 	if (new.link_status) {
431 		/* Update only if success */
432 		eth_dev->data->dev_link.link_duplex = new.link_duplex;
433 		eth_dev->data->dev_link.link_speed = new.link_speed;
434 	}
435 	eth_dev->data->dev_link.link_status = new.link_status;
436 out:
437 	return rc;
438 }
439 
440 static void bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev)
441 {
442 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
443 	struct bnxt_vnic_info *vnic;
444 
445 	if (bp->vnic_info == NULL)
446 		return;
447 
448 	vnic = &bp->vnic_info[0];
449 
450 	vnic->flags |= BNXT_VNIC_INFO_PROMISC;
451 	bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic);
452 }
453 
454 static void bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev)
455 {
456 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
457 	struct bnxt_vnic_info *vnic;
458 
459 	if (bp->vnic_info == NULL)
460 		return;
461 
462 	vnic = &bp->vnic_info[0];
463 
464 	vnic->flags &= ~BNXT_VNIC_INFO_PROMISC;
465 	bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic);
466 }
467 
468 static void bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev)
469 {
470 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
471 	struct bnxt_vnic_info *vnic;
472 
473 	if (bp->vnic_info == NULL)
474 		return;
475 
476 	vnic = &bp->vnic_info[0];
477 
478 	vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
479 	bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic);
480 }
481 
482 static void bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev)
483 {
484 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
485 	struct bnxt_vnic_info *vnic;
486 
487 	if (bp->vnic_info == NULL)
488 		return;
489 
490 	vnic = &bp->vnic_info[0];
491 
492 	vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
493 	bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic);
494 }
495 
496 /*
497  * Initialization
498  */
499 
500 static struct eth_dev_ops bnxt_dev_ops = {
501 	.dev_infos_get = bnxt_dev_info_get_op,
502 	.dev_close = bnxt_dev_close_op,
503 	.dev_configure = bnxt_dev_configure_op,
504 	.dev_start = bnxt_dev_start_op,
505 	.dev_stop = bnxt_dev_stop_op,
506 	.stats_get = bnxt_stats_get_op,
507 	.stats_reset = bnxt_stats_reset_op,
508 	.rx_queue_setup = bnxt_rx_queue_setup_op,
509 	.rx_queue_release = bnxt_rx_queue_release_op,
510 	.tx_queue_setup = bnxt_tx_queue_setup_op,
511 	.tx_queue_release = bnxt_tx_queue_release_op,
512 	.link_update = bnxt_link_update_op,
513 	.promiscuous_enable = bnxt_promiscuous_enable_op,
514 	.promiscuous_disable = bnxt_promiscuous_disable_op,
515 	.allmulticast_enable = bnxt_allmulticast_enable_op,
516 	.allmulticast_disable = bnxt_allmulticast_disable_op,
517 };
518 
519 static bool bnxt_vf_pciid(uint16_t id)
520 {
521 	if (id == BROADCOM_DEV_ID_57304_VF ||
522 	    id == BROADCOM_DEV_ID_57406_VF)
523 		return true;
524 	return false;
525 }
526 
527 static int bnxt_init_board(struct rte_eth_dev *eth_dev)
528 {
529 	int rc;
530 	struct bnxt *bp = eth_dev->data->dev_private;
531 
532 	/* enable device (incl. PCI PM wakeup), and bus-mastering */
533 	if (!eth_dev->pci_dev->mem_resource[0].addr) {
534 		RTE_LOG(ERR, PMD,
535 			"Cannot find PCI device base address, aborting\n");
536 		rc = -ENODEV;
537 		goto init_err_disable;
538 	}
539 
540 	bp->eth_dev = eth_dev;
541 	bp->pdev = eth_dev->pci_dev;
542 
543 	bp->bar0 = (void *)eth_dev->pci_dev->mem_resource[0].addr;
544 	if (!bp->bar0) {
545 		RTE_LOG(ERR, PMD, "Cannot map device registers, aborting\n");
546 		rc = -ENOMEM;
547 		goto init_err_release;
548 	}
549 	return 0;
550 
551 init_err_release:
552 	if (bp->bar0)
553 		bp->bar0 = NULL;
554 
555 init_err_disable:
556 
557 	return rc;
558 }
559 
560 static int
561 bnxt_dev_init(struct rte_eth_dev *eth_dev)
562 {
563 	static int version_printed;
564 	struct bnxt *bp;
565 	int rc;
566 
567 	if (version_printed++ == 0)
568 		RTE_LOG(INFO, PMD, "%s", bnxt_version);
569 
570 	if (eth_dev->pci_dev->addr.function >= 2 &&
571 			eth_dev->pci_dev->addr.function < 4) {
572 		RTE_LOG(ERR, PMD, "Function not enabled %x:\n",
573 			eth_dev->pci_dev->addr.function);
574 		rc = -ENOMEM;
575 		goto error;
576 	}
577 
578 	rte_eth_copy_pci_info(eth_dev, eth_dev->pci_dev);
579 	bp = eth_dev->data->dev_private;
580 
581 	if (bnxt_vf_pciid(eth_dev->pci_dev->id.device_id))
582 		bp->flags |= BNXT_FLAG_VF;
583 
584 	rc = bnxt_init_board(eth_dev);
585 	if (rc) {
586 		RTE_LOG(ERR, PMD,
587 			"Board initialization failed rc: %x\n", rc);
588 		goto error;
589 	}
590 	eth_dev->dev_ops = &bnxt_dev_ops;
591 	eth_dev->rx_pkt_burst = &bnxt_recv_pkts;
592 	eth_dev->tx_pkt_burst = &bnxt_xmit_pkts;
593 
594 	rc = bnxt_alloc_hwrm_resources(bp);
595 	if (rc) {
596 		RTE_LOG(ERR, PMD,
597 			"hwrm resource allocation failure rc: %x\n", rc);
598 		goto error_free;
599 	}
600 	rc = bnxt_hwrm_ver_get(bp);
601 	if (rc)
602 		goto error_free;
603 	bnxt_hwrm_queue_qportcfg(bp);
604 
605 	/* Get the MAX capabilities for this function */
606 	rc = bnxt_hwrm_func_qcaps(bp);
607 	if (rc) {
608 		RTE_LOG(ERR, PMD, "hwrm query capability failure rc: %x\n", rc);
609 		goto error_free;
610 	}
611 	eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl",
612 					ETHER_ADDR_LEN * MAX_NUM_MAC_ADDR, 0);
613 	if (eth_dev->data->mac_addrs == NULL) {
614 		RTE_LOG(ERR, PMD,
615 			"Failed to alloc %u bytes needed to store MAC addr tbl",
616 			ETHER_ADDR_LEN * MAX_NUM_MAC_ADDR);
617 		rc = -ENOMEM;
618 		goto error_free;
619 	}
620 	/* Copy the permanent MAC from the qcap response address now. */
621 	if (BNXT_PF(bp))
622 		memcpy(bp->mac_addr, bp->pf.mac_addr, sizeof(bp->mac_addr));
623 	else
624 		memcpy(bp->mac_addr, bp->vf.mac_addr, sizeof(bp->mac_addr));
625 	memcpy(&eth_dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN);
626 	bp->grp_info = rte_zmalloc("bnxt_grp_info",
627 				sizeof(*bp->grp_info) * bp->max_ring_grps, 0);
628 	if (!bp->grp_info) {
629 		RTE_LOG(ERR, PMD,
630 			"Failed to alloc %zu bytes needed to store group info table\n",
631 			sizeof(*bp->grp_info) * bp->max_ring_grps);
632 		rc = -ENOMEM;
633 		goto error_free;
634 	}
635 
636 	rc = bnxt_hwrm_func_driver_register(bp, 0,
637 					    bp->pf.vf_req_fwd);
638 	if (rc) {
639 		RTE_LOG(ERR, PMD,
640 			"Failed to register driver");
641 		rc = -EBUSY;
642 		goto error_free;
643 	}
644 
645 	RTE_LOG(INFO, PMD,
646 		DRV_MODULE_NAME " found at mem %" PRIx64 ", node addr %pM\n",
647 		eth_dev->pci_dev->mem_resource[0].phys_addr,
648 		eth_dev->pci_dev->mem_resource[0].addr);
649 
650 	return 0;
651 
652 error_free:
653 	eth_dev->driver->eth_dev_uninit(eth_dev);
654 error:
655 	return rc;
656 }
657 
658 static int
659 bnxt_dev_uninit(struct rte_eth_dev *eth_dev) {
660 	struct bnxt *bp = eth_dev->data->dev_private;
661 	int rc;
662 
663 	if (eth_dev->data->mac_addrs)
664 		rte_free(eth_dev->data->mac_addrs);
665 	if (bp->grp_info)
666 		rte_free(bp->grp_info);
667 	rc = bnxt_hwrm_func_driver_unregister(bp, 0);
668 	bnxt_free_hwrm_resources(bp);
669 	return rc;
670 }
671 
672 static struct eth_driver bnxt_rte_pmd = {
673 	.pci_drv = {
674 		    .name = "rte_" DRV_MODULE_NAME "_pmd",
675 		    .id_table = bnxt_pci_id_map,
676 		    .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
677 		    },
678 	.eth_dev_init = bnxt_dev_init,
679 	.eth_dev_uninit = bnxt_dev_uninit,
680 	.dev_private_size = sizeof(struct bnxt),
681 };
682 
683 static int bnxt_rte_pmd_init(const char *name, const char *params __rte_unused)
684 {
685 	RTE_LOG(INFO, PMD, "bnxt_rte_pmd_init() called for %s\n", name);
686 	rte_eth_driver_register(&bnxt_rte_pmd);
687 	return 0;
688 }
689 
690 static struct rte_driver bnxt_pmd_drv = {
691 	.name = "eth_bnxt",
692 	.type = PMD_PDEV,
693 	.init = bnxt_rte_pmd_init,
694 };
695 
696 PMD_REGISTER_DRIVER(bnxt_pmd_drv);
697