xref: /dpdk/drivers/net/bnxt/bnxt_ethdev.c (revision f69ed1044230c218c9afd8f1b47b6fe6aa1eeec5)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5 
6 #include <inttypes.h>
7 #include <stdbool.h>
8 
9 #include <rte_dev.h>
10 #include <rte_ethdev_driver.h>
11 #include <rte_ethdev_pci.h>
12 #include <rte_malloc.h>
13 #include <rte_cycles.h>
14 #include <rte_alarm.h>
15 
16 #include "bnxt.h"
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
19 #include "bnxt_irq.h"
20 #include "bnxt_ring.h"
21 #include "bnxt_rxq.h"
22 #include "bnxt_rxr.h"
23 #include "bnxt_stats.h"
24 #include "bnxt_txq.h"
25 #include "bnxt_txr.h"
26 #include "bnxt_vnic.h"
27 #include "hsi_struct_def_dpdk.h"
28 #include "bnxt_nvm_defs.h"
29 
30 #define DRV_MODULE_NAME		"bnxt"
31 static const char bnxt_version[] =
32 	"Broadcom NetXtreme driver " DRV_MODULE_NAME;
33 int bnxt_logtype_driver;
34 
35 /*
36  * The set of PCI devices this driver supports
37  */
38 static const struct rte_pci_id bnxt_pci_id_map[] = {
39 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM,
40 			 BROADCOM_DEV_ID_STRATUS_NIC_VF1) },
41 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM,
42 			 BROADCOM_DEV_ID_STRATUS_NIC_VF2) },
43 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) },
44 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) },
45 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) },
46 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57302) },
47 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_PF) },
48 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_VF) },
49 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_NS2) },
50 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402) },
51 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404) },
52 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_PF) },
53 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_VF) },
54 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402_MF) },
55 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_RJ45) },
56 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404_MF) },
57 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_MF) },
58 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_SFP) },
59 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_MF) },
60 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5741X_VF) },
61 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5731X_VF) },
62 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57314) },
63 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_MF) },
64 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57311) },
65 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57312) },
66 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412) },
67 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414) },
68 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_RJ45) },
69 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_RJ45) },
70 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412_MF) },
71 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_RJ45) },
72 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_SFP) },
73 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_SFP) },
74 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) },
75 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) },
76 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) },
77 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802) },
78 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58804) },
79 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58808) },
80 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802_VF) },
81 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508) },
82 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504) },
83 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502) },
84 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF1) },
85 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF2) },
86 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508_MF1) },
87 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504_MF1) },
88 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502_MF1) },
89 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508_MF2) },
90 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504_MF2) },
91 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502_MF2) },
92 	{ .vendor_id = 0, /* sentinel */ },
93 };
94 
95 #define BNXT_ETH_RSS_SUPPORT (	\
96 	ETH_RSS_IPV4 |		\
97 	ETH_RSS_NONFRAG_IPV4_TCP |	\
98 	ETH_RSS_NONFRAG_IPV4_UDP |	\
99 	ETH_RSS_IPV6 |		\
100 	ETH_RSS_NONFRAG_IPV6_TCP |	\
101 	ETH_RSS_NONFRAG_IPV6_UDP)
102 
103 #define BNXT_DEV_TX_OFFLOAD_SUPPORT (DEV_TX_OFFLOAD_VLAN_INSERT | \
104 				     DEV_TX_OFFLOAD_IPV4_CKSUM | \
105 				     DEV_TX_OFFLOAD_TCP_CKSUM | \
106 				     DEV_TX_OFFLOAD_UDP_CKSUM | \
107 				     DEV_TX_OFFLOAD_TCP_TSO | \
108 				     DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
109 				     DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \
110 				     DEV_TX_OFFLOAD_GRE_TNL_TSO | \
111 				     DEV_TX_OFFLOAD_IPIP_TNL_TSO | \
112 				     DEV_TX_OFFLOAD_GENEVE_TNL_TSO | \
113 				     DEV_TX_OFFLOAD_QINQ_INSERT | \
114 				     DEV_TX_OFFLOAD_MULTI_SEGS)
115 
116 #define BNXT_DEV_RX_OFFLOAD_SUPPORT (DEV_RX_OFFLOAD_VLAN_FILTER | \
117 				     DEV_RX_OFFLOAD_VLAN_STRIP | \
118 				     DEV_RX_OFFLOAD_IPV4_CKSUM | \
119 				     DEV_RX_OFFLOAD_UDP_CKSUM | \
120 				     DEV_RX_OFFLOAD_TCP_CKSUM | \
121 				     DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
122 				     DEV_RX_OFFLOAD_JUMBO_FRAME | \
123 				     DEV_RX_OFFLOAD_KEEP_CRC | \
124 				     DEV_RX_OFFLOAD_VLAN_EXTEND | \
125 				     DEV_RX_OFFLOAD_TCP_LRO | \
126 				     DEV_RX_OFFLOAD_SCATTER | \
127 				     DEV_RX_OFFLOAD_RSS_HASH)
128 
129 static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask);
130 static void bnxt_print_link_info(struct rte_eth_dev *eth_dev);
131 static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev);
132 static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev);
133 static int bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev);
134 static void bnxt_cancel_fw_health_check(struct bnxt *bp);
135 static int bnxt_restore_vlan_filters(struct bnxt *bp);
136 
137 int is_bnxt_in_error(struct bnxt *bp)
138 {
139 	if (bp->flags & BNXT_FLAG_FATAL_ERROR)
140 		return -EIO;
141 	if (bp->flags & BNXT_FLAG_FW_RESET)
142 		return -EBUSY;
143 
144 	return 0;
145 }
146 
147 /***********************/
148 
149 /*
150  * High level utility functions
151  */
152 
153 static uint16_t bnxt_rss_ctxts(const struct bnxt *bp)
154 {
155 	if (!BNXT_CHIP_THOR(bp))
156 		return 1;
157 
158 	return RTE_ALIGN_MUL_CEIL(bp->rx_nr_rings,
159 				  BNXT_RSS_ENTRIES_PER_CTX_THOR) /
160 				    BNXT_RSS_ENTRIES_PER_CTX_THOR;
161 }
162 
163 static uint16_t  bnxt_rss_hash_tbl_size(const struct bnxt *bp)
164 {
165 	if (!BNXT_CHIP_THOR(bp))
166 		return HW_HASH_INDEX_SIZE;
167 
168 	return bnxt_rss_ctxts(bp) * BNXT_RSS_ENTRIES_PER_CTX_THOR;
169 }
170 
171 static void bnxt_free_mem(struct bnxt *bp, bool reconfig)
172 {
173 	bnxt_free_filter_mem(bp);
174 	bnxt_free_vnic_attributes(bp);
175 	bnxt_free_vnic_mem(bp);
176 
177 	/* tx/rx rings are configured as part of *_queue_setup callbacks.
178 	 * If the number of rings change across fw update,
179 	 * we don't have much choice except to warn the user.
180 	 */
181 	if (!reconfig) {
182 		bnxt_free_stats(bp);
183 		bnxt_free_tx_rings(bp);
184 		bnxt_free_rx_rings(bp);
185 	}
186 	bnxt_free_async_cp_ring(bp);
187 	bnxt_free_rxtx_nq_ring(bp);
188 
189 	rte_free(bp->grp_info);
190 	bp->grp_info = NULL;
191 }
192 
193 static int bnxt_alloc_mem(struct bnxt *bp, bool reconfig)
194 {
195 	int rc;
196 
197 	rc = bnxt_alloc_ring_grps(bp);
198 	if (rc)
199 		goto alloc_mem_err;
200 
201 	rc = bnxt_alloc_async_ring_struct(bp);
202 	if (rc)
203 		goto alloc_mem_err;
204 
205 	rc = bnxt_alloc_vnic_mem(bp);
206 	if (rc)
207 		goto alloc_mem_err;
208 
209 	rc = bnxt_alloc_vnic_attributes(bp);
210 	if (rc)
211 		goto alloc_mem_err;
212 
213 	rc = bnxt_alloc_filter_mem(bp);
214 	if (rc)
215 		goto alloc_mem_err;
216 
217 	rc = bnxt_alloc_async_cp_ring(bp);
218 	if (rc)
219 		goto alloc_mem_err;
220 
221 	rc = bnxt_alloc_rxtx_nq_ring(bp);
222 	if (rc)
223 		goto alloc_mem_err;
224 
225 	return 0;
226 
227 alloc_mem_err:
228 	bnxt_free_mem(bp, reconfig);
229 	return rc;
230 }
231 
232 static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id)
233 {
234 	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
235 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
236 	uint64_t rx_offloads = dev_conf->rxmode.offloads;
237 	struct bnxt_rx_queue *rxq;
238 	unsigned int j;
239 	int rc;
240 
241 	rc = bnxt_vnic_grp_alloc(bp, vnic);
242 	if (rc)
243 		goto err_out;
244 
245 	PMD_DRV_LOG(DEBUG, "vnic[%d] = %p vnic->fw_grp_ids = %p\n",
246 		    vnic_id, vnic, vnic->fw_grp_ids);
247 
248 	rc = bnxt_hwrm_vnic_alloc(bp, vnic);
249 	if (rc)
250 		goto err_out;
251 
252 	/* Alloc RSS context only if RSS mode is enabled */
253 	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) {
254 		int j, nr_ctxs = bnxt_rss_ctxts(bp);
255 
256 		rc = 0;
257 		for (j = 0; j < nr_ctxs; j++) {
258 			rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, j);
259 			if (rc)
260 				break;
261 		}
262 		if (rc) {
263 			PMD_DRV_LOG(ERR,
264 				    "HWRM vnic %d ctx %d alloc failure rc: %x\n",
265 				    vnic_id, j, rc);
266 			goto err_out;
267 		}
268 		vnic->num_lb_ctxts = nr_ctxs;
269 	}
270 
271 	/*
272 	 * Firmware sets pf pair in default vnic cfg. If the VLAN strip
273 	 * setting is not available at this time, it will not be
274 	 * configured correctly in the CFA.
275 	 */
276 	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
277 		vnic->vlan_strip = true;
278 	else
279 		vnic->vlan_strip = false;
280 
281 	rc = bnxt_hwrm_vnic_cfg(bp, vnic);
282 	if (rc)
283 		goto err_out;
284 
285 	rc = bnxt_set_hwrm_vnic_filters(bp, vnic);
286 	if (rc)
287 		goto err_out;
288 
289 	for (j = 0; j < bp->rx_num_qs_per_vnic; j++) {
290 		rxq = bp->eth_dev->data->rx_queues[j];
291 
292 		PMD_DRV_LOG(DEBUG,
293 			    "rxq[%d]->vnic=%p vnic->fw_grp_ids=%p\n",
294 			    j, rxq->vnic, rxq->vnic->fw_grp_ids);
295 
296 		if (BNXT_HAS_RING_GRPS(bp) && rxq->rx_deferred_start)
297 			rxq->vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
298 		else
299 			vnic->rx_queue_cnt++;
300 	}
301 
302 	PMD_DRV_LOG(DEBUG, "vnic->rx_queue_cnt = %d\n", vnic->rx_queue_cnt);
303 
304 	rc = bnxt_vnic_rss_configure(bp, vnic);
305 	if (rc)
306 		goto err_out;
307 
308 	bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
309 
310 	if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
311 		bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 1);
312 	else
313 		bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 0);
314 
315 	return 0;
316 err_out:
317 	PMD_DRV_LOG(ERR, "HWRM vnic %d cfg failure rc: %x\n",
318 		    vnic_id, rc);
319 	return rc;
320 }
321 
322 static int bnxt_init_chip(struct bnxt *bp)
323 {
324 	struct rte_eth_link new;
325 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev);
326 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
327 	uint32_t intr_vector = 0;
328 	uint32_t queue_id, base = BNXT_MISC_VEC_ID;
329 	uint32_t vec = BNXT_MISC_VEC_ID;
330 	unsigned int i, j;
331 	int rc;
332 
333 	if (bp->eth_dev->data->mtu > RTE_ETHER_MTU) {
334 		bp->eth_dev->data->dev_conf.rxmode.offloads |=
335 			DEV_RX_OFFLOAD_JUMBO_FRAME;
336 		bp->flags |= BNXT_FLAG_JUMBO;
337 	} else {
338 		bp->eth_dev->data->dev_conf.rxmode.offloads &=
339 			~DEV_RX_OFFLOAD_JUMBO_FRAME;
340 		bp->flags &= ~BNXT_FLAG_JUMBO;
341 	}
342 
343 	/* THOR does not support ring groups.
344 	 * But we will use the array to save RSS context IDs.
345 	 */
346 	if (BNXT_CHIP_THOR(bp))
347 		bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_THOR;
348 
349 	rc = bnxt_alloc_all_hwrm_stat_ctxs(bp);
350 	if (rc) {
351 		PMD_DRV_LOG(ERR, "HWRM stat ctx alloc failure rc: %x\n", rc);
352 		goto err_out;
353 	}
354 
355 	rc = bnxt_alloc_hwrm_rings(bp);
356 	if (rc) {
357 		PMD_DRV_LOG(ERR, "HWRM ring alloc failure rc: %x\n", rc);
358 		goto err_out;
359 	}
360 
361 	rc = bnxt_alloc_all_hwrm_ring_grps(bp);
362 	if (rc) {
363 		PMD_DRV_LOG(ERR, "HWRM ring grp alloc failure: %x\n", rc);
364 		goto err_out;
365 	}
366 
367 	if (!(bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY))
368 		goto skip_cosq_cfg;
369 
370 	for (j = 0, i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
371 		if (bp->rx_cos_queue[i].id != 0xff) {
372 			struct bnxt_vnic_info *vnic = &bp->vnic_info[j++];
373 
374 			if (!vnic) {
375 				PMD_DRV_LOG(ERR,
376 					    "Num pools more than FW profile\n");
377 				rc = -EINVAL;
378 				goto err_out;
379 			}
380 			vnic->cos_queue_id = bp->rx_cos_queue[i].id;
381 			bp->rx_cosq_cnt++;
382 		}
383 	}
384 
385 skip_cosq_cfg:
386 	rc = bnxt_mq_rx_configure(bp);
387 	if (rc) {
388 		PMD_DRV_LOG(ERR, "MQ mode configure failure rc: %x\n", rc);
389 		goto err_out;
390 	}
391 
392 	/* VNIC configuration */
393 	for (i = 0; i < bp->nr_vnics; i++) {
394 		rc = bnxt_setup_one_vnic(bp, i);
395 		if (rc)
396 			goto err_out;
397 	}
398 
399 	rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL);
400 	if (rc) {
401 		PMD_DRV_LOG(ERR,
402 			"HWRM cfa l2 rx mask failure rc: %x\n", rc);
403 		goto err_out;
404 	}
405 
406 	/* check and configure queue intr-vector mapping */
407 	if ((rte_intr_cap_multiple(intr_handle) ||
408 	     !RTE_ETH_DEV_SRIOV(bp->eth_dev).active) &&
409 	    bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) {
410 		intr_vector = bp->eth_dev->data->nb_rx_queues;
411 		PMD_DRV_LOG(DEBUG, "intr_vector = %d\n", intr_vector);
412 		if (intr_vector > bp->rx_cp_nr_rings) {
413 			PMD_DRV_LOG(ERR, "At most %d intr queues supported",
414 					bp->rx_cp_nr_rings);
415 			return -ENOTSUP;
416 		}
417 		rc = rte_intr_efd_enable(intr_handle, intr_vector);
418 		if (rc)
419 			return rc;
420 	}
421 
422 	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
423 		intr_handle->intr_vec =
424 			rte_zmalloc("intr_vec",
425 				    bp->eth_dev->data->nb_rx_queues *
426 				    sizeof(int), 0);
427 		if (intr_handle->intr_vec == NULL) {
428 			PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues"
429 				" intr_vec", bp->eth_dev->data->nb_rx_queues);
430 			rc = -ENOMEM;
431 			goto err_disable;
432 		}
433 		PMD_DRV_LOG(DEBUG, "intr_handle->intr_vec = %p "
434 			"intr_handle->nb_efd = %d intr_handle->max_intr = %d\n",
435 			 intr_handle->intr_vec, intr_handle->nb_efd,
436 			intr_handle->max_intr);
437 		for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues;
438 		     queue_id++) {
439 			intr_handle->intr_vec[queue_id] =
440 							vec + BNXT_RX_VEC_START;
441 			if (vec < base + intr_handle->nb_efd - 1)
442 				vec++;
443 		}
444 	}
445 
446 	/* enable uio/vfio intr/eventfd mapping */
447 	rc = rte_intr_enable(intr_handle);
448 #ifndef RTE_EXEC_ENV_FREEBSD
449 	/* In FreeBSD OS, nic_uio driver does not support interrupts */
450 	if (rc)
451 		goto err_free;
452 #endif
453 
454 	rc = bnxt_get_hwrm_link_config(bp, &new);
455 	if (rc) {
456 		PMD_DRV_LOG(ERR, "HWRM Get link config failure rc: %x\n", rc);
457 		goto err_free;
458 	}
459 
460 	if (!bp->link_info.link_up) {
461 		rc = bnxt_set_hwrm_link_config(bp, true);
462 		if (rc) {
463 			PMD_DRV_LOG(ERR,
464 				"HWRM link config failure rc: %x\n", rc);
465 			goto err_free;
466 		}
467 	}
468 	bnxt_print_link_info(bp->eth_dev);
469 
470 	bp->mark_table = rte_zmalloc("bnxt_mark_table", BNXT_MARK_TABLE_SZ, 0);
471 	if (!bp->mark_table)
472 		PMD_DRV_LOG(ERR, "Allocation of mark table failed\n");
473 
474 	return 0;
475 
476 err_free:
477 	rte_free(intr_handle->intr_vec);
478 err_disable:
479 	rte_intr_efd_disable(intr_handle);
480 err_out:
481 	/* Some of the error status returned by FW may not be from errno.h */
482 	if (rc > 0)
483 		rc = -EIO;
484 
485 	return rc;
486 }
487 
488 static int bnxt_shutdown_nic(struct bnxt *bp)
489 {
490 	bnxt_free_all_hwrm_resources(bp);
491 	bnxt_free_all_filters(bp);
492 	bnxt_free_all_vnics(bp);
493 	return 0;
494 }
495 
496 /*
497  * Device configuration and status function
498  */
499 
500 static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
501 				struct rte_eth_dev_info *dev_info)
502 {
503 	struct rte_pci_device *pdev = RTE_DEV_TO_PCI(eth_dev->device);
504 	struct bnxt *bp = eth_dev->data->dev_private;
505 	uint16_t max_vnics, i, j, vpool, vrxq;
506 	unsigned int max_rx_rings;
507 	int rc;
508 
509 	rc = is_bnxt_in_error(bp);
510 	if (rc)
511 		return rc;
512 
513 	/* MAC Specifics */
514 	dev_info->max_mac_addrs = bp->max_l2_ctx;
515 	dev_info->max_hash_mac_addrs = 0;
516 
517 	/* PF/VF specifics */
518 	if (BNXT_PF(bp))
519 		dev_info->max_vfs = pdev->max_vfs;
520 
521 	max_rx_rings = BNXT_MAX_RINGS(bp);
522 	/* For the sake of symmetry, max_rx_queues = max_tx_queues */
523 	dev_info->max_rx_queues = max_rx_rings;
524 	dev_info->max_tx_queues = max_rx_rings;
525 	dev_info->reta_size = bnxt_rss_hash_tbl_size(bp);
526 	dev_info->hash_key_size = 40;
527 	max_vnics = bp->max_vnics;
528 
529 	/* MTU specifics */
530 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
531 	dev_info->max_mtu = BNXT_MAX_MTU;
532 
533 	/* Fast path specifics */
534 	dev_info->min_rx_bufsize = 1;
535 	dev_info->max_rx_pktlen = BNXT_MAX_PKT_LEN;
536 
537 	dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
538 	if (bp->flags & BNXT_FLAG_PTP_SUPPORTED)
539 		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
540 	dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT;
541 	dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
542 
543 	/* *INDENT-OFF* */
544 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
545 		.rx_thresh = {
546 			.pthresh = 8,
547 			.hthresh = 8,
548 			.wthresh = 0,
549 		},
550 		.rx_free_thresh = 32,
551 		/* If no descriptors available, pkts are dropped by default */
552 		.rx_drop_en = 1,
553 	};
554 
555 	dev_info->default_txconf = (struct rte_eth_txconf) {
556 		.tx_thresh = {
557 			.pthresh = 32,
558 			.hthresh = 0,
559 			.wthresh = 0,
560 		},
561 		.tx_free_thresh = 32,
562 		.tx_rs_thresh = 32,
563 	};
564 	eth_dev->data->dev_conf.intr_conf.lsc = 1;
565 
566 	eth_dev->data->dev_conf.intr_conf.rxq = 1;
567 	dev_info->rx_desc_lim.nb_min = BNXT_MIN_RING_DESC;
568 	dev_info->rx_desc_lim.nb_max = BNXT_MAX_RX_RING_DESC;
569 	dev_info->tx_desc_lim.nb_min = BNXT_MIN_RING_DESC;
570 	dev_info->tx_desc_lim.nb_max = BNXT_MAX_TX_RING_DESC;
571 
572 	/* *INDENT-ON* */
573 
574 	/*
575 	 * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim
576 	 *       need further investigation.
577 	 */
578 
579 	/* VMDq resources */
580 	vpool = 64; /* ETH_64_POOLS */
581 	vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */
582 	for (i = 0; i < 4; vpool >>= 1, i++) {
583 		if (max_vnics > vpool) {
584 			for (j = 0; j < 5; vrxq >>= 1, j++) {
585 				if (dev_info->max_rx_queues > vrxq) {
586 					if (vpool > vrxq)
587 						vpool = vrxq;
588 					goto found;
589 				}
590 			}
591 			/* Not enough resources to support VMDq */
592 			break;
593 		}
594 	}
595 	/* Not enough resources to support VMDq */
596 	vpool = 0;
597 	vrxq = 0;
598 found:
599 	dev_info->max_vmdq_pools = vpool;
600 	dev_info->vmdq_queue_num = vrxq;
601 
602 	dev_info->vmdq_pool_base = 0;
603 	dev_info->vmdq_queue_base = 0;
604 
605 	return 0;
606 }
607 
608 /* Configure the device based on the configuration provided */
609 static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
610 {
611 	struct bnxt *bp = eth_dev->data->dev_private;
612 	uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
613 	int rc;
614 
615 	bp->rx_queues = (void *)eth_dev->data->rx_queues;
616 	bp->tx_queues = (void *)eth_dev->data->tx_queues;
617 	bp->tx_nr_rings = eth_dev->data->nb_tx_queues;
618 	bp->rx_nr_rings = eth_dev->data->nb_rx_queues;
619 
620 	rc = is_bnxt_in_error(bp);
621 	if (rc)
622 		return rc;
623 
624 	if (BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)) {
625 		rc = bnxt_hwrm_check_vf_rings(bp);
626 		if (rc) {
627 			PMD_DRV_LOG(ERR, "HWRM insufficient resources\n");
628 			return -ENOSPC;
629 		}
630 
631 		/* If a resource has already been allocated - in this case
632 		 * it is the async completion ring, free it. Reallocate it after
633 		 * resource reservation. This will ensure the resource counts
634 		 * are calculated correctly.
635 		 */
636 
637 		pthread_mutex_lock(&bp->def_cp_lock);
638 
639 		if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) {
640 			bnxt_disable_int(bp);
641 			bnxt_free_cp_ring(bp, bp->async_cp_ring);
642 		}
643 
644 		rc = bnxt_hwrm_func_reserve_vf_resc(bp, false);
645 		if (rc) {
646 			PMD_DRV_LOG(ERR, "HWRM resource alloc fail:%x\n", rc);
647 			pthread_mutex_unlock(&bp->def_cp_lock);
648 			return -ENOSPC;
649 		}
650 
651 		if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) {
652 			rc = bnxt_alloc_async_cp_ring(bp);
653 			if (rc) {
654 				pthread_mutex_unlock(&bp->def_cp_lock);
655 				return rc;
656 			}
657 			bnxt_enable_int(bp);
658 		}
659 
660 		pthread_mutex_unlock(&bp->def_cp_lock);
661 	} else {
662 		/* legacy driver needs to get updated values */
663 		rc = bnxt_hwrm_func_qcaps(bp);
664 		if (rc) {
665 			PMD_DRV_LOG(ERR, "hwrm func qcaps fail:%d\n", rc);
666 			return rc;
667 		}
668 	}
669 
670 	/* Inherit new configurations */
671 	if (eth_dev->data->nb_rx_queues > bp->max_rx_rings ||
672 	    eth_dev->data->nb_tx_queues > bp->max_tx_rings ||
673 	    eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues
674 		+ BNXT_NUM_ASYNC_CPR(bp) > bp->max_cp_rings ||
675 	    eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues >
676 	    bp->max_stat_ctx)
677 		goto resource_error;
678 
679 	if (BNXT_HAS_RING_GRPS(bp) &&
680 	    (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps)
681 		goto resource_error;
682 
683 	if (!(eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) &&
684 	    bp->max_vnics < eth_dev->data->nb_rx_queues)
685 		goto resource_error;
686 
687 	bp->rx_cp_nr_rings = bp->rx_nr_rings;
688 	bp->tx_cp_nr_rings = bp->tx_nr_rings;
689 
690 	if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
691 		rx_offloads |= DEV_RX_OFFLOAD_RSS_HASH;
692 	eth_dev->data->dev_conf.rxmode.offloads = rx_offloads;
693 
694 	if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
695 		eth_dev->data->mtu =
696 			eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
697 			RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE *
698 			BNXT_NUM_VLANS;
699 		bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu);
700 	}
701 	return 0;
702 
703 resource_error:
704 	PMD_DRV_LOG(ERR,
705 		    "Insufficient resources to support requested config\n");
706 	PMD_DRV_LOG(ERR,
707 		    "Num Queues Requested: Tx %d, Rx %d\n",
708 		    eth_dev->data->nb_tx_queues,
709 		    eth_dev->data->nb_rx_queues);
710 	PMD_DRV_LOG(ERR,
711 		    "MAX: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d, Vnic %d\n",
712 		    bp->max_tx_rings, bp->max_rx_rings, bp->max_cp_rings,
713 		    bp->max_stat_ctx, bp->max_ring_grps, bp->max_vnics);
714 	return -ENOSPC;
715 }
716 
717 static void bnxt_print_link_info(struct rte_eth_dev *eth_dev)
718 {
719 	struct rte_eth_link *link = &eth_dev->data->dev_link;
720 
721 	if (link->link_status)
722 		PMD_DRV_LOG(INFO, "Port %d Link Up - speed %u Mbps - %s\n",
723 			eth_dev->data->port_id,
724 			(uint32_t)link->link_speed,
725 			(link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
726 			("full-duplex") : ("half-duplex\n"));
727 	else
728 		PMD_DRV_LOG(INFO, "Port %d Link Down\n",
729 			eth_dev->data->port_id);
730 }
731 
732 /*
733  * Determine whether the current configuration requires support for scattered
734  * receive; return 1 if scattered receive is required and 0 if not.
735  */
736 static int bnxt_scattered_rx(struct rte_eth_dev *eth_dev)
737 {
738 	uint16_t buf_size;
739 	int i;
740 
741 	if (eth_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER)
742 		return 1;
743 
744 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
745 		struct bnxt_rx_queue *rxq = eth_dev->data->rx_queues[i];
746 
747 		buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
748 				      RTE_PKTMBUF_HEADROOM);
749 		if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size)
750 			return 1;
751 	}
752 	return 0;
753 }
754 
755 static eth_rx_burst_t
756 bnxt_receive_function(struct rte_eth_dev *eth_dev)
757 {
758 	struct bnxt *bp = eth_dev->data->dev_private;
759 
760 #ifdef RTE_ARCH_X86
761 #ifndef RTE_LIBRTE_IEEE1588
762 	/*
763 	 * Vector mode receive can be enabled only if scatter rx is not
764 	 * in use and rx offloads are limited to VLAN stripping and
765 	 * CRC stripping.
766 	 */
767 	if (!eth_dev->data->scattered_rx &&
768 	    !(eth_dev->data->dev_conf.rxmode.offloads &
769 	      ~(DEV_RX_OFFLOAD_VLAN_STRIP |
770 		DEV_RX_OFFLOAD_KEEP_CRC |
771 		DEV_RX_OFFLOAD_JUMBO_FRAME |
772 		DEV_RX_OFFLOAD_IPV4_CKSUM |
773 		DEV_RX_OFFLOAD_UDP_CKSUM |
774 		DEV_RX_OFFLOAD_TCP_CKSUM |
775 		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
776 		DEV_RX_OFFLOAD_RSS_HASH |
777 		DEV_RX_OFFLOAD_VLAN_FILTER))) {
778 		PMD_DRV_LOG(INFO, "Using vector mode receive for port %d\n",
779 			    eth_dev->data->port_id);
780 		bp->flags |= BNXT_FLAG_RX_VECTOR_PKT_MODE;
781 		return bnxt_recv_pkts_vec;
782 	}
783 	PMD_DRV_LOG(INFO, "Vector mode receive disabled for port %d\n",
784 		    eth_dev->data->port_id);
785 	PMD_DRV_LOG(INFO,
786 		    "Port %d scatter: %d rx offload: %" PRIX64 "\n",
787 		    eth_dev->data->port_id,
788 		    eth_dev->data->scattered_rx,
789 		    eth_dev->data->dev_conf.rxmode.offloads);
790 #endif
791 #endif
792 	bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE;
793 	return bnxt_recv_pkts;
794 }
795 
796 static eth_tx_burst_t
797 bnxt_transmit_function(__rte_unused struct rte_eth_dev *eth_dev)
798 {
799 #ifdef RTE_ARCH_X86
800 #ifndef RTE_LIBRTE_IEEE1588
801 	/*
802 	 * Vector mode transmit can be enabled only if not using scatter rx
803 	 * or tx offloads.
804 	 */
805 	if (!eth_dev->data->scattered_rx &&
806 	    !eth_dev->data->dev_conf.txmode.offloads) {
807 		PMD_DRV_LOG(INFO, "Using vector mode transmit for port %d\n",
808 			    eth_dev->data->port_id);
809 		return bnxt_xmit_pkts_vec;
810 	}
811 	PMD_DRV_LOG(INFO, "Vector mode transmit disabled for port %d\n",
812 		    eth_dev->data->port_id);
813 	PMD_DRV_LOG(INFO,
814 		    "Port %d scatter: %d tx offload: %" PRIX64 "\n",
815 		    eth_dev->data->port_id,
816 		    eth_dev->data->scattered_rx,
817 		    eth_dev->data->dev_conf.txmode.offloads);
818 #endif
819 #endif
820 	return bnxt_xmit_pkts;
821 }
822 
823 static int bnxt_handle_if_change_status(struct bnxt *bp)
824 {
825 	int rc;
826 
827 	/* Since fw has undergone a reset and lost all contexts,
828 	 * set fatal flag to not issue hwrm during cleanup
829 	 */
830 	bp->flags |= BNXT_FLAG_FATAL_ERROR;
831 	bnxt_uninit_resources(bp, true);
832 
833 	/* clear fatal flag so that re-init happens */
834 	bp->flags &= ~BNXT_FLAG_FATAL_ERROR;
835 	rc = bnxt_init_resources(bp, true);
836 
837 	bp->flags &= ~BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE;
838 
839 	return rc;
840 }
841 
842 static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
843 {
844 	struct bnxt *bp = eth_dev->data->dev_private;
845 	uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
846 	int vlan_mask = 0;
847 	int rc;
848 
849 	if (!eth_dev->data->nb_tx_queues || !eth_dev->data->nb_rx_queues) {
850 		PMD_DRV_LOG(ERR, "Queues are not configured yet!\n");
851 		return -EINVAL;
852 	}
853 
854 	if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) {
855 		PMD_DRV_LOG(ERR,
856 			"RxQ cnt %d > CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n",
857 			bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS);
858 	}
859 
860 	rc = bnxt_hwrm_if_change(bp, 1);
861 	if (!rc) {
862 		if (bp->flags & BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE) {
863 			rc = bnxt_handle_if_change_status(bp);
864 			if (rc)
865 				return rc;
866 		}
867 	}
868 	bnxt_enable_int(bp);
869 
870 	rc = bnxt_init_chip(bp);
871 	if (rc)
872 		goto error;
873 
874 	eth_dev->data->scattered_rx = bnxt_scattered_rx(eth_dev);
875 	eth_dev->data->dev_started = 1;
876 
877 	bnxt_link_update(eth_dev, 1, ETH_LINK_UP);
878 
879 	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
880 		vlan_mask |= ETH_VLAN_FILTER_MASK;
881 	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
882 		vlan_mask |= ETH_VLAN_STRIP_MASK;
883 	rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask);
884 	if (rc)
885 		goto error;
886 
887 	eth_dev->rx_pkt_burst = bnxt_receive_function(eth_dev);
888 	eth_dev->tx_pkt_burst = bnxt_transmit_function(eth_dev);
889 
890 	pthread_mutex_lock(&bp->def_cp_lock);
891 	bnxt_schedule_fw_health_check(bp);
892 	pthread_mutex_unlock(&bp->def_cp_lock);
893 	return 0;
894 
895 error:
896 	bnxt_hwrm_if_change(bp, 0);
897 	bnxt_shutdown_nic(bp);
898 	bnxt_free_tx_mbufs(bp);
899 	bnxt_free_rx_mbufs(bp);
900 	eth_dev->data->dev_started = 0;
901 	return rc;
902 }
903 
904 static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev)
905 {
906 	struct bnxt *bp = eth_dev->data->dev_private;
907 	int rc = 0;
908 
909 	if (!bp->link_info.link_up)
910 		rc = bnxt_set_hwrm_link_config(bp, true);
911 	if (!rc)
912 		eth_dev->data->dev_link.link_status = 1;
913 
914 	bnxt_print_link_info(eth_dev);
915 	return rc;
916 }
917 
918 static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev)
919 {
920 	struct bnxt *bp = eth_dev->data->dev_private;
921 
922 	eth_dev->data->dev_link.link_status = 0;
923 	bnxt_set_hwrm_link_config(bp, false);
924 	bp->link_info.link_up = 0;
925 
926 	return 0;
927 }
928 
929 /* Unload the driver, release resources */
930 static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
931 {
932 	struct bnxt *bp = eth_dev->data->dev_private;
933 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
934 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
935 
936 	eth_dev->data->dev_started = 0;
937 	/* Prevent crashes when queues are still in use */
938 	eth_dev->rx_pkt_burst = &bnxt_dummy_recv_pkts;
939 	eth_dev->tx_pkt_burst = &bnxt_dummy_xmit_pkts;
940 
941 	bnxt_disable_int(bp);
942 
943 	/* disable uio/vfio intr/eventfd mapping */
944 	rte_intr_disable(intr_handle);
945 
946 	bnxt_cancel_fw_health_check(bp);
947 
948 	bnxt_dev_set_link_down_op(eth_dev);
949 
950 	/* Wait for link to be reset and the async notification to process.
951 	 * During reset recovery, there is no need to wait and
952 	 * VF/NPAR functions do not have privilege to change PHY config.
953 	 */
954 	if (!is_bnxt_in_error(bp) && BNXT_SINGLE_PF(bp))
955 		bnxt_link_update(eth_dev, 1, ETH_LINK_DOWN);
956 
957 	/* Clean queue intr-vector mapping */
958 	rte_intr_efd_disable(intr_handle);
959 	if (intr_handle->intr_vec != NULL) {
960 		rte_free(intr_handle->intr_vec);
961 		intr_handle->intr_vec = NULL;
962 	}
963 
964 	bnxt_hwrm_port_clr_stats(bp);
965 	bnxt_free_tx_mbufs(bp);
966 	bnxt_free_rx_mbufs(bp);
967 	/* Process any remaining notifications in default completion queue */
968 	bnxt_int_handler(eth_dev);
969 	bnxt_shutdown_nic(bp);
970 	bnxt_hwrm_if_change(bp, 0);
971 
972 	rte_free(bp->mark_table);
973 	bp->mark_table = NULL;
974 
975 	bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE;
976 	bp->rx_cosq_cnt = 0;
977 }
978 
979 static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
980 {
981 	struct bnxt *bp = eth_dev->data->dev_private;
982 
983 	if (eth_dev->data->dev_started)
984 		bnxt_dev_stop_op(eth_dev);
985 
986 	bnxt_uninit_resources(bp, false);
987 
988 	eth_dev->dev_ops = NULL;
989 	eth_dev->rx_pkt_burst = NULL;
990 	eth_dev->tx_pkt_burst = NULL;
991 
992 	rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone);
993 	bp->tx_mem_zone = NULL;
994 	rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone);
995 	bp->rx_mem_zone = NULL;
996 
997 	rte_free(bp->pf.vf_info);
998 	bp->pf.vf_info = NULL;
999 
1000 	rte_free(bp->grp_info);
1001 	bp->grp_info = NULL;
1002 }
1003 
1004 static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev,
1005 				    uint32_t index)
1006 {
1007 	struct bnxt *bp = eth_dev->data->dev_private;
1008 	uint64_t pool_mask = eth_dev->data->mac_pool_sel[index];
1009 	struct bnxt_vnic_info *vnic;
1010 	struct bnxt_filter_info *filter, *temp_filter;
1011 	uint32_t i;
1012 
1013 	if (is_bnxt_in_error(bp))
1014 		return;
1015 
1016 	/*
1017 	 * Loop through all VNICs from the specified filter flow pools to
1018 	 * remove the corresponding MAC addr filter
1019 	 */
1020 	for (i = 0; i < bp->nr_vnics; i++) {
1021 		if (!(pool_mask & (1ULL << i)))
1022 			continue;
1023 
1024 		vnic = &bp->vnic_info[i];
1025 		filter = STAILQ_FIRST(&vnic->filter);
1026 		while (filter) {
1027 			temp_filter = STAILQ_NEXT(filter, next);
1028 			if (filter->mac_index == index) {
1029 				STAILQ_REMOVE(&vnic->filter, filter,
1030 						bnxt_filter_info, next);
1031 				bnxt_hwrm_clear_l2_filter(bp, filter);
1032 				bnxt_free_filter(bp, filter);
1033 			}
1034 			filter = temp_filter;
1035 		}
1036 	}
1037 }
1038 
1039 static int bnxt_add_mac_filter(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1040 			       struct rte_ether_addr *mac_addr, uint32_t index,
1041 			       uint32_t pool)
1042 {
1043 	struct bnxt_filter_info *filter;
1044 	int rc = 0;
1045 
1046 	/* Attach requested MAC address to the new l2_filter */
1047 	STAILQ_FOREACH(filter, &vnic->filter, next) {
1048 		if (filter->mac_index == index) {
1049 			PMD_DRV_LOG(DEBUG,
1050 				    "MAC addr already existed for pool %d\n",
1051 				    pool);
1052 			return 0;
1053 		}
1054 	}
1055 
1056 	filter = bnxt_alloc_filter(bp);
1057 	if (!filter) {
1058 		PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
1059 		return -ENODEV;
1060 	}
1061 
1062 	/* bnxt_alloc_filter copies default MAC to filter->l2_addr. So,
1063 	 * if the MAC that's been programmed now is a different one, then,
1064 	 * copy that addr to filter->l2_addr
1065 	 */
1066 	if (mac_addr)
1067 		memcpy(filter->l2_addr, mac_addr, RTE_ETHER_ADDR_LEN);
1068 	filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
1069 
1070 	rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
1071 	if (!rc) {
1072 		filter->mac_index = index;
1073 		if (filter->mac_index == 0)
1074 			STAILQ_INSERT_HEAD(&vnic->filter, filter, next);
1075 		else
1076 			STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
1077 	} else {
1078 		bnxt_free_filter(bp, filter);
1079 	}
1080 
1081 	return rc;
1082 }
1083 
1084 static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
1085 				struct rte_ether_addr *mac_addr,
1086 				uint32_t index, uint32_t pool)
1087 {
1088 	struct bnxt *bp = eth_dev->data->dev_private;
1089 	struct bnxt_vnic_info *vnic = &bp->vnic_info[pool];
1090 	int rc = 0;
1091 
1092 	rc = is_bnxt_in_error(bp);
1093 	if (rc)
1094 		return rc;
1095 
1096 	if (BNXT_VF(bp) & !BNXT_VF_IS_TRUSTED(bp)) {
1097 		PMD_DRV_LOG(ERR, "Cannot add MAC address to a VF interface\n");
1098 		return -ENOTSUP;
1099 	}
1100 
1101 	if (!vnic) {
1102 		PMD_DRV_LOG(ERR, "VNIC not found for pool %d!\n", pool);
1103 		return -EINVAL;
1104 	}
1105 
1106 	rc = bnxt_add_mac_filter(bp, vnic, mac_addr, index, pool);
1107 
1108 	return rc;
1109 }
1110 
1111 int bnxt_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete,
1112 		     bool exp_link_status)
1113 {
1114 	int rc = 0;
1115 	struct bnxt *bp = eth_dev->data->dev_private;
1116 	struct rte_eth_link new;
1117 	int cnt = exp_link_status ? BNXT_LINK_UP_WAIT_CNT :
1118 		  BNXT_LINK_DOWN_WAIT_CNT;
1119 
1120 	rc = is_bnxt_in_error(bp);
1121 	if (rc)
1122 		return rc;
1123 
1124 	memset(&new, 0, sizeof(new));
1125 	do {
1126 		/* Retrieve link info from hardware */
1127 		rc = bnxt_get_hwrm_link_config(bp, &new);
1128 		if (rc) {
1129 			new.link_speed = ETH_LINK_SPEED_100M;
1130 			new.link_duplex = ETH_LINK_FULL_DUPLEX;
1131 			PMD_DRV_LOG(ERR,
1132 				"Failed to retrieve link rc = 0x%x!\n", rc);
1133 			goto out;
1134 		}
1135 
1136 		if (!wait_to_complete || new.link_status == exp_link_status)
1137 			break;
1138 
1139 		rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
1140 	} while (cnt--);
1141 
1142 out:
1143 	/* Timed out or success */
1144 	if (new.link_status != eth_dev->data->dev_link.link_status ||
1145 	new.link_speed != eth_dev->data->dev_link.link_speed) {
1146 		rte_eth_linkstatus_set(eth_dev, &new);
1147 
1148 		_rte_eth_dev_callback_process(eth_dev,
1149 					      RTE_ETH_EVENT_INTR_LSC,
1150 					      NULL);
1151 
1152 		bnxt_print_link_info(eth_dev);
1153 	}
1154 
1155 	return rc;
1156 }
1157 
1158 static int bnxt_link_update_op(struct rte_eth_dev *eth_dev,
1159 			       int wait_to_complete)
1160 {
1161 	return bnxt_link_update(eth_dev, wait_to_complete, ETH_LINK_UP);
1162 }
1163 
1164 static int bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev)
1165 {
1166 	struct bnxt *bp = eth_dev->data->dev_private;
1167 	struct bnxt_vnic_info *vnic;
1168 	uint32_t old_flags;
1169 	int rc;
1170 
1171 	rc = is_bnxt_in_error(bp);
1172 	if (rc)
1173 		return rc;
1174 
1175 	/* Filter settings will get applied when port is started */
1176 	if (!eth_dev->data->dev_started)
1177 		return 0;
1178 
1179 	if (bp->vnic_info == NULL)
1180 		return 0;
1181 
1182 	vnic = BNXT_GET_DEFAULT_VNIC(bp);
1183 
1184 	old_flags = vnic->flags;
1185 	vnic->flags |= BNXT_VNIC_INFO_PROMISC;
1186 	rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
1187 	if (rc != 0)
1188 		vnic->flags = old_flags;
1189 
1190 	return rc;
1191 }
1192 
1193 static int bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev)
1194 {
1195 	struct bnxt *bp = eth_dev->data->dev_private;
1196 	struct bnxt_vnic_info *vnic;
1197 	uint32_t old_flags;
1198 	int rc;
1199 
1200 	rc = is_bnxt_in_error(bp);
1201 	if (rc)
1202 		return rc;
1203 
1204 	/* Filter settings will get applied when port is started */
1205 	if (!eth_dev->data->dev_started)
1206 		return 0;
1207 
1208 	if (bp->vnic_info == NULL)
1209 		return 0;
1210 
1211 	vnic = BNXT_GET_DEFAULT_VNIC(bp);
1212 
1213 	old_flags = vnic->flags;
1214 	vnic->flags &= ~BNXT_VNIC_INFO_PROMISC;
1215 	rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
1216 	if (rc != 0)
1217 		vnic->flags = old_flags;
1218 
1219 	return rc;
1220 }
1221 
1222 static int bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev)
1223 {
1224 	struct bnxt *bp = eth_dev->data->dev_private;
1225 	struct bnxt_vnic_info *vnic;
1226 	uint32_t old_flags;
1227 	int rc;
1228 
1229 	rc = is_bnxt_in_error(bp);
1230 	if (rc)
1231 		return rc;
1232 
1233 	/* Filter settings will get applied when port is started */
1234 	if (!eth_dev->data->dev_started)
1235 		return 0;
1236 
1237 	if (bp->vnic_info == NULL)
1238 		return 0;
1239 
1240 	vnic = BNXT_GET_DEFAULT_VNIC(bp);
1241 
1242 	old_flags = vnic->flags;
1243 	vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
1244 	rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
1245 	if (rc != 0)
1246 		vnic->flags = old_flags;
1247 
1248 	return rc;
1249 }
1250 
1251 static int bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev)
1252 {
1253 	struct bnxt *bp = eth_dev->data->dev_private;
1254 	struct bnxt_vnic_info *vnic;
1255 	uint32_t old_flags;
1256 	int rc;
1257 
1258 	rc = is_bnxt_in_error(bp);
1259 	if (rc)
1260 		return rc;
1261 
1262 	/* Filter settings will get applied when port is started */
1263 	if (!eth_dev->data->dev_started)
1264 		return 0;
1265 
1266 	if (bp->vnic_info == NULL)
1267 		return 0;
1268 
1269 	vnic = BNXT_GET_DEFAULT_VNIC(bp);
1270 
1271 	old_flags = vnic->flags;
1272 	vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
1273 	rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
1274 	if (rc != 0)
1275 		vnic->flags = old_flags;
1276 
1277 	return rc;
1278 }
1279 
1280 /* Return bnxt_rx_queue pointer corresponding to a given rxq. */
1281 static struct bnxt_rx_queue *bnxt_qid_to_rxq(struct bnxt *bp, uint16_t qid)
1282 {
1283 	if (qid >= bp->rx_nr_rings)
1284 		return NULL;
1285 
1286 	return bp->eth_dev->data->rx_queues[qid];
1287 }
1288 
1289 /* Return rxq corresponding to a given rss table ring/group ID. */
1290 static uint16_t bnxt_rss_to_qid(struct bnxt *bp, uint16_t fwr)
1291 {
1292 	struct bnxt_rx_queue *rxq;
1293 	unsigned int i;
1294 
1295 	if (!BNXT_HAS_RING_GRPS(bp)) {
1296 		for (i = 0; i < bp->rx_nr_rings; i++) {
1297 			rxq = bp->eth_dev->data->rx_queues[i];
1298 			if (rxq->rx_ring->rx_ring_struct->fw_ring_id == fwr)
1299 				return rxq->index;
1300 		}
1301 	} else {
1302 		for (i = 0; i < bp->rx_nr_rings; i++) {
1303 			if (bp->grp_info[i].fw_grp_id == fwr)
1304 				return i;
1305 		}
1306 	}
1307 
1308 	return INVALID_HW_RING_ID;
1309 }
1310 
1311 static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
1312 			    struct rte_eth_rss_reta_entry64 *reta_conf,
1313 			    uint16_t reta_size)
1314 {
1315 	struct bnxt *bp = eth_dev->data->dev_private;
1316 	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1317 	struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp);
1318 	uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp);
1319 	uint16_t idx, sft;
1320 	int i, rc;
1321 
1322 	rc = is_bnxt_in_error(bp);
1323 	if (rc)
1324 		return rc;
1325 
1326 	if (!vnic->rss_table)
1327 		return -EINVAL;
1328 
1329 	if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
1330 		return -EINVAL;
1331 
1332 	if (reta_size != tbl_size) {
1333 		PMD_DRV_LOG(ERR, "The configured hash table lookup size "
1334 			"(%d) must equal the size supported by the hardware "
1335 			"(%d)\n", reta_size, tbl_size);
1336 		return -EINVAL;
1337 	}
1338 
1339 	for (i = 0; i < reta_size; i++) {
1340 		struct bnxt_rx_queue *rxq;
1341 
1342 		idx = i / RTE_RETA_GROUP_SIZE;
1343 		sft = i % RTE_RETA_GROUP_SIZE;
1344 
1345 		if (!(reta_conf[idx].mask & (1ULL << sft)))
1346 			continue;
1347 
1348 		rxq = bnxt_qid_to_rxq(bp, reta_conf[idx].reta[sft]);
1349 		if (!rxq) {
1350 			PMD_DRV_LOG(ERR, "Invalid ring in reta_conf.\n");
1351 			return -EINVAL;
1352 		}
1353 
1354 		if (BNXT_CHIP_THOR(bp)) {
1355 			vnic->rss_table[i * 2] =
1356 				rxq->rx_ring->rx_ring_struct->fw_ring_id;
1357 			vnic->rss_table[i * 2 + 1] =
1358 				rxq->cp_ring->cp_ring_struct->fw_ring_id;
1359 		} else {
1360 			vnic->rss_table[i] =
1361 			    vnic->fw_grp_ids[reta_conf[idx].reta[sft]];
1362 		}
1363 	}
1364 
1365 	bnxt_hwrm_vnic_rss_cfg(bp, vnic);
1366 	return 0;
1367 }
1368 
1369 static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
1370 			      struct rte_eth_rss_reta_entry64 *reta_conf,
1371 			      uint16_t reta_size)
1372 {
1373 	struct bnxt *bp = eth_dev->data->dev_private;
1374 	struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp);
1375 	uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp);
1376 	uint16_t idx, sft, i;
1377 	int rc;
1378 
1379 	rc = is_bnxt_in_error(bp);
1380 	if (rc)
1381 		return rc;
1382 
1383 	/* Retrieve from the default VNIC */
1384 	if (!vnic)
1385 		return -EINVAL;
1386 	if (!vnic->rss_table)
1387 		return -EINVAL;
1388 
1389 	if (reta_size != tbl_size) {
1390 		PMD_DRV_LOG(ERR, "The configured hash table lookup size "
1391 			"(%d) must equal the size supported by the hardware "
1392 			"(%d)\n", reta_size, tbl_size);
1393 		return -EINVAL;
1394 	}
1395 
1396 	for (idx = 0, i = 0; i < reta_size; i++) {
1397 		idx = i / RTE_RETA_GROUP_SIZE;
1398 		sft = i % RTE_RETA_GROUP_SIZE;
1399 
1400 		if (reta_conf[idx].mask & (1ULL << sft)) {
1401 			uint16_t qid;
1402 
1403 			if (BNXT_CHIP_THOR(bp))
1404 				qid = bnxt_rss_to_qid(bp,
1405 						      vnic->rss_table[i * 2]);
1406 			else
1407 				qid = bnxt_rss_to_qid(bp, vnic->rss_table[i]);
1408 
1409 			if (qid == INVALID_HW_RING_ID) {
1410 				PMD_DRV_LOG(ERR, "Inv. entry in rss table.\n");
1411 				return -EINVAL;
1412 			}
1413 			reta_conf[idx].reta[sft] = qid;
1414 		}
1415 	}
1416 
1417 	return 0;
1418 }
1419 
1420 static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
1421 				   struct rte_eth_rss_conf *rss_conf)
1422 {
1423 	struct bnxt *bp = eth_dev->data->dev_private;
1424 	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1425 	struct bnxt_vnic_info *vnic;
1426 	int rc;
1427 
1428 	rc = is_bnxt_in_error(bp);
1429 	if (rc)
1430 		return rc;
1431 
1432 	/*
1433 	 * If RSS enablement were different than dev_configure,
1434 	 * then return -EINVAL
1435 	 */
1436 	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
1437 		if (!rss_conf->rss_hf)
1438 			PMD_DRV_LOG(ERR, "Hash type NONE\n");
1439 	} else {
1440 		if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT)
1441 			return -EINVAL;
1442 	}
1443 
1444 	bp->flags |= BNXT_FLAG_UPDATE_HASH;
1445 	memcpy(&bp->rss_conf, rss_conf, sizeof(*rss_conf));
1446 
1447 	/* Update the default RSS VNIC(s) */
1448 	vnic = BNXT_GET_DEFAULT_VNIC(bp);
1449 	vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss_conf->rss_hf);
1450 
1451 	/*
1452 	 * If hashkey is not specified, use the previously configured
1453 	 * hashkey
1454 	 */
1455 	if (!rss_conf->rss_key)
1456 		goto rss_config;
1457 
1458 	if (rss_conf->rss_key_len != HW_HASH_KEY_SIZE) {
1459 		PMD_DRV_LOG(ERR,
1460 			    "Invalid hashkey length, should be 16 bytes\n");
1461 		return -EINVAL;
1462 	}
1463 	memcpy(vnic->rss_hash_key, rss_conf->rss_key, rss_conf->rss_key_len);
1464 
1465 rss_config:
1466 	bnxt_hwrm_vnic_rss_cfg(bp, vnic);
1467 	return 0;
1468 }
1469 
1470 static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev,
1471 				     struct rte_eth_rss_conf *rss_conf)
1472 {
1473 	struct bnxt *bp = eth_dev->data->dev_private;
1474 	struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp);
1475 	int len, rc;
1476 	uint32_t hash_types;
1477 
1478 	rc = is_bnxt_in_error(bp);
1479 	if (rc)
1480 		return rc;
1481 
1482 	/* RSS configuration is the same for all VNICs */
1483 	if (vnic && vnic->rss_hash_key) {
1484 		if (rss_conf->rss_key) {
1485 			len = rss_conf->rss_key_len <= HW_HASH_KEY_SIZE ?
1486 			      rss_conf->rss_key_len : HW_HASH_KEY_SIZE;
1487 			memcpy(rss_conf->rss_key, vnic->rss_hash_key, len);
1488 		}
1489 
1490 		hash_types = vnic->hash_type;
1491 		rss_conf->rss_hf = 0;
1492 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) {
1493 			rss_conf->rss_hf |= ETH_RSS_IPV4;
1494 			hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
1495 		}
1496 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) {
1497 			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
1498 			hash_types &=
1499 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
1500 		}
1501 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) {
1502 			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
1503 			hash_types &=
1504 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
1505 		}
1506 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) {
1507 			rss_conf->rss_hf |= ETH_RSS_IPV6;
1508 			hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
1509 		}
1510 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) {
1511 			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
1512 			hash_types &=
1513 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
1514 		}
1515 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) {
1516 			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
1517 			hash_types &=
1518 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
1519 		}
1520 		if (hash_types) {
1521 			PMD_DRV_LOG(ERR,
1522 				"Unknwon RSS config from firmware (%08x), RSS disabled",
1523 				vnic->hash_type);
1524 			return -ENOTSUP;
1525 		}
1526 	} else {
1527 		rss_conf->rss_hf = 0;
1528 	}
1529 	return 0;
1530 }
1531 
1532 static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev,
1533 			       struct rte_eth_fc_conf *fc_conf)
1534 {
1535 	struct bnxt *bp = dev->data->dev_private;
1536 	struct rte_eth_link link_info;
1537 	int rc;
1538 
1539 	rc = is_bnxt_in_error(bp);
1540 	if (rc)
1541 		return rc;
1542 
1543 	rc = bnxt_get_hwrm_link_config(bp, &link_info);
1544 	if (rc)
1545 		return rc;
1546 
1547 	memset(fc_conf, 0, sizeof(*fc_conf));
1548 	if (bp->link_info.auto_pause)
1549 		fc_conf->autoneg = 1;
1550 	switch (bp->link_info.pause) {
1551 	case 0:
1552 		fc_conf->mode = RTE_FC_NONE;
1553 		break;
1554 	case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX:
1555 		fc_conf->mode = RTE_FC_TX_PAUSE;
1556 		break;
1557 	case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX:
1558 		fc_conf->mode = RTE_FC_RX_PAUSE;
1559 		break;
1560 	case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX |
1561 			HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX):
1562 		fc_conf->mode = RTE_FC_FULL;
1563 		break;
1564 	}
1565 	return 0;
1566 }
1567 
1568 static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
1569 			       struct rte_eth_fc_conf *fc_conf)
1570 {
1571 	struct bnxt *bp = dev->data->dev_private;
1572 	int rc;
1573 
1574 	rc = is_bnxt_in_error(bp);
1575 	if (rc)
1576 		return rc;
1577 
1578 	if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) {
1579 		PMD_DRV_LOG(ERR, "Flow Control Settings cannot be modified\n");
1580 		return -ENOTSUP;
1581 	}
1582 
1583 	switch (fc_conf->mode) {
1584 	case RTE_FC_NONE:
1585 		bp->link_info.auto_pause = 0;
1586 		bp->link_info.force_pause = 0;
1587 		break;
1588 	case RTE_FC_RX_PAUSE:
1589 		if (fc_conf->autoneg) {
1590 			bp->link_info.auto_pause =
1591 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
1592 			bp->link_info.force_pause = 0;
1593 		} else {
1594 			bp->link_info.auto_pause = 0;
1595 			bp->link_info.force_pause =
1596 					HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
1597 		}
1598 		break;
1599 	case RTE_FC_TX_PAUSE:
1600 		if (fc_conf->autoneg) {
1601 			bp->link_info.auto_pause =
1602 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX;
1603 			bp->link_info.force_pause = 0;
1604 		} else {
1605 			bp->link_info.auto_pause = 0;
1606 			bp->link_info.force_pause =
1607 					HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX;
1608 		}
1609 		break;
1610 	case RTE_FC_FULL:
1611 		if (fc_conf->autoneg) {
1612 			bp->link_info.auto_pause =
1613 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX |
1614 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
1615 			bp->link_info.force_pause = 0;
1616 		} else {
1617 			bp->link_info.auto_pause = 0;
1618 			bp->link_info.force_pause =
1619 					HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX |
1620 					HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
1621 		}
1622 		break;
1623 	}
1624 	return bnxt_set_hwrm_link_config(bp, true);
1625 }
1626 
1627 /* Add UDP tunneling port */
1628 static int
1629 bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
1630 			 struct rte_eth_udp_tunnel *udp_tunnel)
1631 {
1632 	struct bnxt *bp = eth_dev->data->dev_private;
1633 	uint16_t tunnel_type = 0;
1634 	int rc = 0;
1635 
1636 	rc = is_bnxt_in_error(bp);
1637 	if (rc)
1638 		return rc;
1639 
1640 	switch (udp_tunnel->prot_type) {
1641 	case RTE_TUNNEL_TYPE_VXLAN:
1642 		if (bp->vxlan_port_cnt) {
1643 			PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
1644 				udp_tunnel->udp_port);
1645 			if (bp->vxlan_port != udp_tunnel->udp_port) {
1646 				PMD_DRV_LOG(ERR, "Only one port allowed\n");
1647 				return -ENOSPC;
1648 			}
1649 			bp->vxlan_port_cnt++;
1650 			return 0;
1651 		}
1652 		tunnel_type =
1653 			HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN;
1654 		bp->vxlan_port_cnt++;
1655 		break;
1656 	case RTE_TUNNEL_TYPE_GENEVE:
1657 		if (bp->geneve_port_cnt) {
1658 			PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
1659 				udp_tunnel->udp_port);
1660 			if (bp->geneve_port != udp_tunnel->udp_port) {
1661 				PMD_DRV_LOG(ERR, "Only one port allowed\n");
1662 				return -ENOSPC;
1663 			}
1664 			bp->geneve_port_cnt++;
1665 			return 0;
1666 		}
1667 		tunnel_type =
1668 			HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE;
1669 		bp->geneve_port_cnt++;
1670 		break;
1671 	default:
1672 		PMD_DRV_LOG(ERR, "Tunnel type is not supported\n");
1673 		return -ENOTSUP;
1674 	}
1675 	rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port,
1676 					     tunnel_type);
1677 	return rc;
1678 }
1679 
1680 static int
1681 bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
1682 			 struct rte_eth_udp_tunnel *udp_tunnel)
1683 {
1684 	struct bnxt *bp = eth_dev->data->dev_private;
1685 	uint16_t tunnel_type = 0;
1686 	uint16_t port = 0;
1687 	int rc = 0;
1688 
1689 	rc = is_bnxt_in_error(bp);
1690 	if (rc)
1691 		return rc;
1692 
1693 	switch (udp_tunnel->prot_type) {
1694 	case RTE_TUNNEL_TYPE_VXLAN:
1695 		if (!bp->vxlan_port_cnt) {
1696 			PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
1697 			return -EINVAL;
1698 		}
1699 		if (bp->vxlan_port != udp_tunnel->udp_port) {
1700 			PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n",
1701 				udp_tunnel->udp_port, bp->vxlan_port);
1702 			return -EINVAL;
1703 		}
1704 		if (--bp->vxlan_port_cnt)
1705 			return 0;
1706 
1707 		tunnel_type =
1708 			HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN;
1709 		port = bp->vxlan_fw_dst_port_id;
1710 		break;
1711 	case RTE_TUNNEL_TYPE_GENEVE:
1712 		if (!bp->geneve_port_cnt) {
1713 			PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
1714 			return -EINVAL;
1715 		}
1716 		if (bp->geneve_port != udp_tunnel->udp_port) {
1717 			PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n",
1718 				udp_tunnel->udp_port, bp->geneve_port);
1719 			return -EINVAL;
1720 		}
1721 		if (--bp->geneve_port_cnt)
1722 			return 0;
1723 
1724 		tunnel_type =
1725 			HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE;
1726 		port = bp->geneve_fw_dst_port_id;
1727 		break;
1728 	default:
1729 		PMD_DRV_LOG(ERR, "Tunnel type is not supported\n");
1730 		return -ENOTSUP;
1731 	}
1732 
1733 	rc = bnxt_hwrm_tunnel_dst_port_free(bp, port, tunnel_type);
1734 	if (!rc) {
1735 		if (tunnel_type ==
1736 		    HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN)
1737 			bp->vxlan_port = 0;
1738 		if (tunnel_type ==
1739 		    HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE)
1740 			bp->geneve_port = 0;
1741 	}
1742 	return rc;
1743 }
1744 
1745 static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
1746 {
1747 	struct bnxt_filter_info *filter;
1748 	struct bnxt_vnic_info *vnic;
1749 	int rc = 0;
1750 	uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN;
1751 
1752 	vnic = BNXT_GET_DEFAULT_VNIC(bp);
1753 	filter = STAILQ_FIRST(&vnic->filter);
1754 	while (filter) {
1755 		/* Search for this matching MAC+VLAN filter */
1756 		if (bnxt_vlan_filter_exists(bp, filter, chk, vlan_id)) {
1757 			/* Delete the filter */
1758 			rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1759 			if (rc)
1760 				return rc;
1761 			STAILQ_REMOVE(&vnic->filter, filter,
1762 				      bnxt_filter_info, next);
1763 			bnxt_free_filter(bp, filter);
1764 			PMD_DRV_LOG(INFO,
1765 				    "Deleted vlan filter for %d\n",
1766 				    vlan_id);
1767 			return 0;
1768 		}
1769 		filter = STAILQ_NEXT(filter, next);
1770 	}
1771 	return -ENOENT;
1772 }
1773 
1774 static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
1775 {
1776 	struct bnxt_filter_info *filter;
1777 	struct bnxt_vnic_info *vnic;
1778 	int rc = 0;
1779 	uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
1780 		HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
1781 	uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN;
1782 
1783 	/* Implementation notes on the use of VNIC in this command:
1784 	 *
1785 	 * By default, these filters belong to default vnic for the function.
1786 	 * Once these filters are set up, only destination VNIC can be modified.
1787 	 * If the destination VNIC is not specified in this command,
1788 	 * then the HWRM shall only create an l2 context id.
1789 	 */
1790 
1791 	vnic = BNXT_GET_DEFAULT_VNIC(bp);
1792 	filter = STAILQ_FIRST(&vnic->filter);
1793 	/* Check if the VLAN has already been added */
1794 	while (filter) {
1795 		if (bnxt_vlan_filter_exists(bp, filter, chk, vlan_id))
1796 			return -EEXIST;
1797 
1798 		filter = STAILQ_NEXT(filter, next);
1799 	}
1800 
1801 	/* No match found. Alloc a fresh filter and issue the L2_FILTER_ALLOC
1802 	 * command to create MAC+VLAN filter with the right flags, enables set.
1803 	 */
1804 	filter = bnxt_alloc_filter(bp);
1805 	if (!filter) {
1806 		PMD_DRV_LOG(ERR,
1807 			    "MAC/VLAN filter alloc failed\n");
1808 		return -ENOMEM;
1809 	}
1810 	/* MAC + VLAN ID filter */
1811 	/* If l2_ivlan == 0 and l2_ivlan_mask != 0, only
1812 	 * untagged packets are received
1813 	 *
1814 	 * If l2_ivlan != 0 and l2_ivlan_mask != 0, untagged
1815 	 * packets and only the programmed vlan's packets are received
1816 	 */
1817 	filter->l2_ivlan = vlan_id;
1818 	filter->l2_ivlan_mask = 0x0FFF;
1819 	filter->enables |= en;
1820 	filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
1821 
1822 	rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
1823 	if (rc) {
1824 		/* Free the newly allocated filter as we were
1825 		 * not able to create the filter in hardware.
1826 		 */
1827 		bnxt_free_filter(bp, filter);
1828 		return rc;
1829 	}
1830 
1831 	filter->mac_index = 0;
1832 	/* Add this new filter to the list */
1833 	if (vlan_id == 0)
1834 		STAILQ_INSERT_HEAD(&vnic->filter, filter, next);
1835 	else
1836 		STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
1837 
1838 	PMD_DRV_LOG(INFO,
1839 		    "Added Vlan filter for %d\n", vlan_id);
1840 	return rc;
1841 }
1842 
1843 static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev,
1844 		uint16_t vlan_id, int on)
1845 {
1846 	struct bnxt *bp = eth_dev->data->dev_private;
1847 	int rc;
1848 
1849 	rc = is_bnxt_in_error(bp);
1850 	if (rc)
1851 		return rc;
1852 
1853 	/* These operations apply to ALL existing MAC/VLAN filters */
1854 	if (on)
1855 		return bnxt_add_vlan_filter(bp, vlan_id);
1856 	else
1857 		return bnxt_del_vlan_filter(bp, vlan_id);
1858 }
1859 
1860 static int bnxt_del_dflt_mac_filter(struct bnxt *bp,
1861 				    struct bnxt_vnic_info *vnic)
1862 {
1863 	struct bnxt_filter_info *filter;
1864 	int rc;
1865 
1866 	filter = STAILQ_FIRST(&vnic->filter);
1867 	while (filter) {
1868 		if (filter->mac_index == 0 &&
1869 		    !memcmp(filter->l2_addr, bp->mac_addr,
1870 			    RTE_ETHER_ADDR_LEN)) {
1871 			rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1872 			if (!rc) {
1873 				STAILQ_REMOVE(&vnic->filter, filter,
1874 					      bnxt_filter_info, next);
1875 				bnxt_free_filter(bp, filter);
1876 			}
1877 			return rc;
1878 		}
1879 		filter = STAILQ_NEXT(filter, next);
1880 	}
1881 	return 0;
1882 }
1883 
1884 static int
1885 bnxt_config_vlan_hw_filter(struct bnxt *bp, uint64_t rx_offloads)
1886 {
1887 	struct bnxt_vnic_info *vnic;
1888 	unsigned int i;
1889 	int rc;
1890 
1891 	vnic = BNXT_GET_DEFAULT_VNIC(bp);
1892 	if (!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) {
1893 		/* Remove any VLAN filters programmed */
1894 		for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++)
1895 			bnxt_del_vlan_filter(bp, i);
1896 
1897 		rc = bnxt_add_mac_filter(bp, vnic, NULL, 0, 0);
1898 		if (rc)
1899 			return rc;
1900 	} else {
1901 		/* Default filter will allow packets that match the
1902 		 * dest mac. So, it has to be deleted, otherwise, we
1903 		 * will endup receiving vlan packets for which the
1904 		 * filter is not programmed, when hw-vlan-filter
1905 		 * configuration is ON
1906 		 */
1907 		bnxt_del_dflt_mac_filter(bp, vnic);
1908 		/* This filter will allow only untagged packets */
1909 		bnxt_add_vlan_filter(bp, 0);
1910 	}
1911 	PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n",
1912 		    !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER));
1913 
1914 	return 0;
1915 }
1916 
1917 static int bnxt_free_one_vnic(struct bnxt *bp, uint16_t vnic_id)
1918 {
1919 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
1920 	unsigned int i;
1921 	int rc;
1922 
1923 	/* Destroy vnic filters and vnic */
1924 	if (bp->eth_dev->data->dev_conf.rxmode.offloads &
1925 	    DEV_RX_OFFLOAD_VLAN_FILTER) {
1926 		for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++)
1927 			bnxt_del_vlan_filter(bp, i);
1928 	}
1929 	bnxt_del_dflt_mac_filter(bp, vnic);
1930 
1931 	rc = bnxt_hwrm_vnic_free(bp, vnic);
1932 	if (rc)
1933 		return rc;
1934 
1935 	rte_free(vnic->fw_grp_ids);
1936 	vnic->fw_grp_ids = NULL;
1937 
1938 	return 0;
1939 }
1940 
1941 static int
1942 bnxt_config_vlan_hw_stripping(struct bnxt *bp, uint64_t rx_offloads)
1943 {
1944 	struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp);
1945 	int rc;
1946 
1947 	/* Destroy, recreate and reconfigure the default vnic */
1948 	rc = bnxt_free_one_vnic(bp, 0);
1949 	if (rc)
1950 		return rc;
1951 
1952 	/* default vnic 0 */
1953 	rc = bnxt_setup_one_vnic(bp, 0);
1954 	if (rc)
1955 		return rc;
1956 
1957 	if (bp->eth_dev->data->dev_conf.rxmode.offloads &
1958 	    DEV_RX_OFFLOAD_VLAN_FILTER) {
1959 		rc = bnxt_add_vlan_filter(bp, 0);
1960 		if (rc)
1961 			return rc;
1962 		rc = bnxt_restore_vlan_filters(bp);
1963 		if (rc)
1964 			return rc;
1965 	} else {
1966 		rc = bnxt_add_mac_filter(bp, vnic, NULL, 0, 0);
1967 		if (rc)
1968 			return rc;
1969 	}
1970 
1971 	rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
1972 	if (rc)
1973 		return rc;
1974 
1975 	PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n",
1976 		    !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP));
1977 
1978 	return rc;
1979 }
1980 
1981 static int
1982 bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
1983 {
1984 	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
1985 	struct bnxt *bp = dev->data->dev_private;
1986 	int rc;
1987 
1988 	rc = is_bnxt_in_error(bp);
1989 	if (rc)
1990 		return rc;
1991 
1992 	/* Filter settings will get applied when port is started */
1993 	if (!dev->data->dev_started)
1994 		return 0;
1995 
1996 	if (mask & ETH_VLAN_FILTER_MASK) {
1997 		/* Enable or disable VLAN filtering */
1998 		rc = bnxt_config_vlan_hw_filter(bp, rx_offloads);
1999 		if (rc)
2000 			return rc;
2001 	}
2002 
2003 	if (mask & ETH_VLAN_STRIP_MASK) {
2004 		/* Enable or disable VLAN stripping */
2005 		rc = bnxt_config_vlan_hw_stripping(bp, rx_offloads);
2006 		if (rc)
2007 			return rc;
2008 	}
2009 
2010 	if (mask & ETH_VLAN_EXTEND_MASK) {
2011 		if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
2012 			PMD_DRV_LOG(DEBUG, "Extend VLAN supported\n");
2013 		else
2014 			PMD_DRV_LOG(INFO, "Extend VLAN unsupported\n");
2015 	}
2016 
2017 	return 0;
2018 }
2019 
2020 static int
2021 bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
2022 		      uint16_t tpid)
2023 {
2024 	struct bnxt *bp = dev->data->dev_private;
2025 	int qinq = dev->data->dev_conf.rxmode.offloads &
2026 		   DEV_RX_OFFLOAD_VLAN_EXTEND;
2027 
2028 	if (vlan_type != ETH_VLAN_TYPE_INNER &&
2029 	    vlan_type != ETH_VLAN_TYPE_OUTER) {
2030 		PMD_DRV_LOG(ERR,
2031 			    "Unsupported vlan type.");
2032 		return -EINVAL;
2033 	}
2034 	if (!qinq) {
2035 		PMD_DRV_LOG(ERR,
2036 			    "QinQ not enabled. Needs to be ON as we can "
2037 			    "accelerate only outer vlan\n");
2038 		return -EINVAL;
2039 	}
2040 
2041 	if (vlan_type == ETH_VLAN_TYPE_OUTER) {
2042 		switch (tpid) {
2043 		case RTE_ETHER_TYPE_QINQ:
2044 			bp->outer_tpid_bd =
2045 				TX_BD_LONG_CFA_META_VLAN_TPID_TPID88A8;
2046 				break;
2047 		case RTE_ETHER_TYPE_VLAN:
2048 			bp->outer_tpid_bd =
2049 				TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100;
2050 				break;
2051 		case 0x9100:
2052 			bp->outer_tpid_bd =
2053 				TX_BD_LONG_CFA_META_VLAN_TPID_TPID9100;
2054 				break;
2055 		case 0x9200:
2056 			bp->outer_tpid_bd =
2057 				TX_BD_LONG_CFA_META_VLAN_TPID_TPID9200;
2058 				break;
2059 		case 0x9300:
2060 			bp->outer_tpid_bd =
2061 				 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9300;
2062 				break;
2063 		default:
2064 			PMD_DRV_LOG(ERR, "Invalid TPID: %x\n", tpid);
2065 			return -EINVAL;
2066 		}
2067 		bp->outer_tpid_bd |= tpid;
2068 		PMD_DRV_LOG(INFO, "outer_tpid_bd = %x\n", bp->outer_tpid_bd);
2069 	} else if (vlan_type == ETH_VLAN_TYPE_INNER) {
2070 		PMD_DRV_LOG(ERR,
2071 			    "Can accelerate only outer vlan in QinQ\n");
2072 		return -EINVAL;
2073 	}
2074 
2075 	return 0;
2076 }
2077 
2078 static int
2079 bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev,
2080 			     struct rte_ether_addr *addr)
2081 {
2082 	struct bnxt *bp = dev->data->dev_private;
2083 	/* Default Filter is tied to VNIC 0 */
2084 	struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp);
2085 	int rc;
2086 
2087 	rc = is_bnxt_in_error(bp);
2088 	if (rc)
2089 		return rc;
2090 
2091 	if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
2092 		return -EPERM;
2093 
2094 	if (rte_is_zero_ether_addr(addr))
2095 		return -EINVAL;
2096 
2097 	/* Check if the requested MAC is already added */
2098 	if (memcmp(addr, bp->mac_addr, RTE_ETHER_ADDR_LEN) == 0)
2099 		return 0;
2100 
2101 	/* Destroy filter and re-create it */
2102 	bnxt_del_dflt_mac_filter(bp, vnic);
2103 
2104 	memcpy(bp->mac_addr, addr, RTE_ETHER_ADDR_LEN);
2105 	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
2106 		/* This filter will allow only untagged packets */
2107 		rc = bnxt_add_vlan_filter(bp, 0);
2108 	} else {
2109 		rc = bnxt_add_mac_filter(bp, vnic, addr, 0, 0);
2110 	}
2111 
2112 	PMD_DRV_LOG(DEBUG, "Set MAC addr\n");
2113 	return rc;
2114 }
2115 
2116 static int
2117 bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev,
2118 			  struct rte_ether_addr *mc_addr_set,
2119 			  uint32_t nb_mc_addr)
2120 {
2121 	struct bnxt *bp = eth_dev->data->dev_private;
2122 	char *mc_addr_list = (char *)mc_addr_set;
2123 	struct bnxt_vnic_info *vnic;
2124 	uint32_t off = 0, i = 0;
2125 	int rc;
2126 
2127 	rc = is_bnxt_in_error(bp);
2128 	if (rc)
2129 		return rc;
2130 
2131 	vnic = BNXT_GET_DEFAULT_VNIC(bp);
2132 
2133 	if (nb_mc_addr > BNXT_MAX_MC_ADDRS) {
2134 		vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
2135 		goto allmulti;
2136 	}
2137 
2138 	/* TODO Check for Duplicate mcast addresses */
2139 	vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
2140 	for (i = 0; i < nb_mc_addr; i++) {
2141 		memcpy(vnic->mc_list + off, &mc_addr_list[i],
2142 			RTE_ETHER_ADDR_LEN);
2143 		off += RTE_ETHER_ADDR_LEN;
2144 	}
2145 
2146 	vnic->mc_addr_cnt = i;
2147 	if (vnic->mc_addr_cnt)
2148 		vnic->flags |= BNXT_VNIC_INFO_MCAST;
2149 	else
2150 		vnic->flags &= ~BNXT_VNIC_INFO_MCAST;
2151 
2152 allmulti:
2153 	return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2154 }
2155 
2156 static int
2157 bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
2158 {
2159 	struct bnxt *bp = dev->data->dev_private;
2160 	uint8_t fw_major = (bp->fw_ver >> 24) & 0xff;
2161 	uint8_t fw_minor = (bp->fw_ver >> 16) & 0xff;
2162 	uint8_t fw_updt = (bp->fw_ver >> 8) & 0xff;
2163 	int ret;
2164 
2165 	ret = snprintf(fw_version, fw_size, "%d.%d.%d",
2166 			fw_major, fw_minor, fw_updt);
2167 
2168 	ret += 1; /* add the size of '\0' */
2169 	if (fw_size < (uint32_t)ret)
2170 		return ret;
2171 	else
2172 		return 0;
2173 }
2174 
2175 static void
2176 bnxt_rxq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id,
2177 	struct rte_eth_rxq_info *qinfo)
2178 {
2179 	struct bnxt *bp = dev->data->dev_private;
2180 	struct bnxt_rx_queue *rxq;
2181 
2182 	if (is_bnxt_in_error(bp))
2183 		return;
2184 
2185 	rxq = dev->data->rx_queues[queue_id];
2186 
2187 	qinfo->mp = rxq->mb_pool;
2188 	qinfo->scattered_rx = dev->data->scattered_rx;
2189 	qinfo->nb_desc = rxq->nb_rx_desc;
2190 
2191 	qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
2192 	qinfo->conf.rx_drop_en = 0;
2193 	qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
2194 }
2195 
2196 static void
2197 bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id,
2198 	struct rte_eth_txq_info *qinfo)
2199 {
2200 	struct bnxt *bp = dev->data->dev_private;
2201 	struct bnxt_tx_queue *txq;
2202 
2203 	if (is_bnxt_in_error(bp))
2204 		return;
2205 
2206 	txq = dev->data->tx_queues[queue_id];
2207 
2208 	qinfo->nb_desc = txq->nb_tx_desc;
2209 
2210 	qinfo->conf.tx_thresh.pthresh = txq->pthresh;
2211 	qinfo->conf.tx_thresh.hthresh = txq->hthresh;
2212 	qinfo->conf.tx_thresh.wthresh = txq->wthresh;
2213 
2214 	qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
2215 	qinfo->conf.tx_rs_thresh = 0;
2216 	qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
2217 }
2218 
2219 int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
2220 {
2221 	struct bnxt *bp = eth_dev->data->dev_private;
2222 	uint32_t new_pkt_size;
2223 	uint32_t rc = 0;
2224 	uint32_t i;
2225 
2226 	rc = is_bnxt_in_error(bp);
2227 	if (rc)
2228 		return rc;
2229 
2230 	/* Exit if receive queues are not configured yet */
2231 	if (!eth_dev->data->nb_rx_queues)
2232 		return rc;
2233 
2234 	new_pkt_size = new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
2235 		       VLAN_TAG_SIZE * BNXT_NUM_VLANS;
2236 
2237 #ifdef RTE_ARCH_X86
2238 	/*
2239 	 * If vector-mode tx/rx is active, disallow any MTU change that would
2240 	 * require scattered receive support.
2241 	 */
2242 	if (eth_dev->data->dev_started &&
2243 	    (eth_dev->rx_pkt_burst == bnxt_recv_pkts_vec ||
2244 	     eth_dev->tx_pkt_burst == bnxt_xmit_pkts_vec) &&
2245 	    (new_pkt_size >
2246 	     eth_dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
2247 		PMD_DRV_LOG(ERR,
2248 			    "MTU change would require scattered rx support. ");
2249 		PMD_DRV_LOG(ERR, "Stop port before changing MTU.\n");
2250 		return -EINVAL;
2251 	}
2252 #endif
2253 
2254 	if (new_mtu > RTE_ETHER_MTU) {
2255 		bp->flags |= BNXT_FLAG_JUMBO;
2256 		bp->eth_dev->data->dev_conf.rxmode.offloads |=
2257 			DEV_RX_OFFLOAD_JUMBO_FRAME;
2258 	} else {
2259 		bp->eth_dev->data->dev_conf.rxmode.offloads &=
2260 			~DEV_RX_OFFLOAD_JUMBO_FRAME;
2261 		bp->flags &= ~BNXT_FLAG_JUMBO;
2262 	}
2263 
2264 	/* Is there a change in mtu setting? */
2265 	if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len == new_pkt_size)
2266 		return rc;
2267 
2268 	for (i = 0; i < bp->nr_vnics; i++) {
2269 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2270 		uint16_t size = 0;
2271 
2272 		vnic->mru = BNXT_VNIC_MRU(new_mtu);
2273 		rc = bnxt_hwrm_vnic_cfg(bp, vnic);
2274 		if (rc)
2275 			break;
2276 
2277 		size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
2278 		size -= RTE_PKTMBUF_HEADROOM;
2279 
2280 		if (size < new_mtu) {
2281 			rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
2282 			if (rc)
2283 				return rc;
2284 		}
2285 	}
2286 
2287 	if (!rc)
2288 		eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_pkt_size;
2289 
2290 	PMD_DRV_LOG(INFO, "New MTU is %d\n", new_mtu);
2291 
2292 	return rc;
2293 }
2294 
2295 static int
2296 bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on)
2297 {
2298 	struct bnxt *bp = dev->data->dev_private;
2299 	uint16_t vlan = bp->vlan;
2300 	int rc;
2301 
2302 	rc = is_bnxt_in_error(bp);
2303 	if (rc)
2304 		return rc;
2305 
2306 	if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) {
2307 		PMD_DRV_LOG(ERR,
2308 			"PVID cannot be modified for this function\n");
2309 		return -ENOTSUP;
2310 	}
2311 	bp->vlan = on ? pvid : 0;
2312 
2313 	rc = bnxt_hwrm_set_default_vlan(bp, 0, 0);
2314 	if (rc)
2315 		bp->vlan = vlan;
2316 	return rc;
2317 }
2318 
2319 static int
2320 bnxt_dev_led_on_op(struct rte_eth_dev *dev)
2321 {
2322 	struct bnxt *bp = dev->data->dev_private;
2323 	int rc;
2324 
2325 	rc = is_bnxt_in_error(bp);
2326 	if (rc)
2327 		return rc;
2328 
2329 	return bnxt_hwrm_port_led_cfg(bp, true);
2330 }
2331 
2332 static int
2333 bnxt_dev_led_off_op(struct rte_eth_dev *dev)
2334 {
2335 	struct bnxt *bp = dev->data->dev_private;
2336 	int rc;
2337 
2338 	rc = is_bnxt_in_error(bp);
2339 	if (rc)
2340 		return rc;
2341 
2342 	return bnxt_hwrm_port_led_cfg(bp, false);
2343 }
2344 
2345 static uint32_t
2346 bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id)
2347 {
2348 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2349 	uint32_t desc = 0, raw_cons = 0, cons;
2350 	struct bnxt_cp_ring_info *cpr;
2351 	struct bnxt_rx_queue *rxq;
2352 	struct rx_pkt_cmpl *rxcmp;
2353 	int rc;
2354 
2355 	rc = is_bnxt_in_error(bp);
2356 	if (rc)
2357 		return rc;
2358 
2359 	rxq = dev->data->rx_queues[rx_queue_id];
2360 	cpr = rxq->cp_ring;
2361 	raw_cons = cpr->cp_raw_cons;
2362 
2363 	while (1) {
2364 		cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
2365 		rte_prefetch0(&cpr->cp_desc_ring[cons]);
2366 		rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
2367 
2368 		if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct)) {
2369 			break;
2370 		} else {
2371 			raw_cons++;
2372 			desc++;
2373 		}
2374 	}
2375 
2376 	return desc;
2377 }
2378 
2379 static int
2380 bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset)
2381 {
2382 	struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
2383 	struct bnxt_rx_ring_info *rxr;
2384 	struct bnxt_cp_ring_info *cpr;
2385 	struct bnxt_sw_rx_bd *rx_buf;
2386 	struct rx_pkt_cmpl *rxcmp;
2387 	uint32_t cons, cp_cons;
2388 	int rc;
2389 
2390 	if (!rxq)
2391 		return -EINVAL;
2392 
2393 	rc = is_bnxt_in_error(rxq->bp);
2394 	if (rc)
2395 		return rc;
2396 
2397 	cpr = rxq->cp_ring;
2398 	rxr = rxq->rx_ring;
2399 
2400 	if (offset >= rxq->nb_rx_desc)
2401 		return -EINVAL;
2402 
2403 	cons = RING_CMP(cpr->cp_ring_struct, offset);
2404 	cp_cons = cpr->cp_raw_cons;
2405 	rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
2406 
2407 	if (cons > cp_cons) {
2408 		if (CMPL_VALID(rxcmp, cpr->valid))
2409 			return RTE_ETH_RX_DESC_DONE;
2410 	} else {
2411 		if (CMPL_VALID(rxcmp, !cpr->valid))
2412 			return RTE_ETH_RX_DESC_DONE;
2413 	}
2414 	rx_buf = &rxr->rx_buf_ring[cons];
2415 	if (rx_buf->mbuf == NULL)
2416 		return RTE_ETH_RX_DESC_UNAVAIL;
2417 
2418 
2419 	return RTE_ETH_RX_DESC_AVAIL;
2420 }
2421 
2422 static int
2423 bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset)
2424 {
2425 	struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue;
2426 	struct bnxt_tx_ring_info *txr;
2427 	struct bnxt_cp_ring_info *cpr;
2428 	struct bnxt_sw_tx_bd *tx_buf;
2429 	struct tx_pkt_cmpl *txcmp;
2430 	uint32_t cons, cp_cons;
2431 	int rc;
2432 
2433 	if (!txq)
2434 		return -EINVAL;
2435 
2436 	rc = is_bnxt_in_error(txq->bp);
2437 	if (rc)
2438 		return rc;
2439 
2440 	cpr = txq->cp_ring;
2441 	txr = txq->tx_ring;
2442 
2443 	if (offset >= txq->nb_tx_desc)
2444 		return -EINVAL;
2445 
2446 	cons = RING_CMP(cpr->cp_ring_struct, offset);
2447 	txcmp = (struct tx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
2448 	cp_cons = cpr->cp_raw_cons;
2449 
2450 	if (cons > cp_cons) {
2451 		if (CMPL_VALID(txcmp, cpr->valid))
2452 			return RTE_ETH_TX_DESC_UNAVAIL;
2453 	} else {
2454 		if (CMPL_VALID(txcmp, !cpr->valid))
2455 			return RTE_ETH_TX_DESC_UNAVAIL;
2456 	}
2457 	tx_buf = &txr->tx_buf_ring[cons];
2458 	if (tx_buf->mbuf == NULL)
2459 		return RTE_ETH_TX_DESC_DONE;
2460 
2461 	return RTE_ETH_TX_DESC_FULL;
2462 }
2463 
2464 static struct bnxt_filter_info *
2465 bnxt_match_and_validate_ether_filter(struct bnxt *bp,
2466 				struct rte_eth_ethertype_filter *efilter,
2467 				struct bnxt_vnic_info *vnic0,
2468 				struct bnxt_vnic_info *vnic,
2469 				int *ret)
2470 {
2471 	struct bnxt_filter_info *mfilter = NULL;
2472 	int match = 0;
2473 	*ret = 0;
2474 
2475 	if (efilter->ether_type == RTE_ETHER_TYPE_IPV4 ||
2476 		efilter->ether_type == RTE_ETHER_TYPE_IPV6) {
2477 		PMD_DRV_LOG(ERR, "invalid ether_type(0x%04x) in"
2478 			" ethertype filter.", efilter->ether_type);
2479 		*ret = -EINVAL;
2480 		goto exit;
2481 	}
2482 	if (efilter->queue >= bp->rx_nr_rings) {
2483 		PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue);
2484 		*ret = -EINVAL;
2485 		goto exit;
2486 	}
2487 
2488 	vnic0 = BNXT_GET_DEFAULT_VNIC(bp);
2489 	vnic = &bp->vnic_info[efilter->queue];
2490 	if (vnic == NULL) {
2491 		PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue);
2492 		*ret = -EINVAL;
2493 		goto exit;
2494 	}
2495 
2496 	if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) {
2497 		STAILQ_FOREACH(mfilter, &vnic0->filter, next) {
2498 			if ((!memcmp(efilter->mac_addr.addr_bytes,
2499 				     mfilter->l2_addr, RTE_ETHER_ADDR_LEN) &&
2500 			     mfilter->flags ==
2501 			     HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP &&
2502 			     mfilter->ethertype == efilter->ether_type)) {
2503 				match = 1;
2504 				break;
2505 			}
2506 		}
2507 	} else {
2508 		STAILQ_FOREACH(mfilter, &vnic->filter, next)
2509 			if ((!memcmp(efilter->mac_addr.addr_bytes,
2510 				     mfilter->l2_addr, RTE_ETHER_ADDR_LEN) &&
2511 			     mfilter->ethertype == efilter->ether_type &&
2512 			     mfilter->flags ==
2513 			     HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX)) {
2514 				match = 1;
2515 				break;
2516 			}
2517 	}
2518 
2519 	if (match)
2520 		*ret = -EEXIST;
2521 
2522 exit:
2523 	return mfilter;
2524 }
2525 
2526 static int
2527 bnxt_ethertype_filter(struct rte_eth_dev *dev,
2528 			enum rte_filter_op filter_op,
2529 			void *arg)
2530 {
2531 	struct bnxt *bp = dev->data->dev_private;
2532 	struct rte_eth_ethertype_filter *efilter =
2533 			(struct rte_eth_ethertype_filter *)arg;
2534 	struct bnxt_filter_info *bfilter, *filter1;
2535 	struct bnxt_vnic_info *vnic, *vnic0;
2536 	int ret;
2537 
2538 	if (filter_op == RTE_ETH_FILTER_NOP)
2539 		return 0;
2540 
2541 	if (arg == NULL) {
2542 		PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
2543 			    filter_op);
2544 		return -EINVAL;
2545 	}
2546 
2547 	vnic0 = BNXT_GET_DEFAULT_VNIC(bp);
2548 	vnic = &bp->vnic_info[efilter->queue];
2549 
2550 	switch (filter_op) {
2551 	case RTE_ETH_FILTER_ADD:
2552 		bnxt_match_and_validate_ether_filter(bp, efilter,
2553 							vnic0, vnic, &ret);
2554 		if (ret < 0)
2555 			return ret;
2556 
2557 		bfilter = bnxt_get_unused_filter(bp);
2558 		if (bfilter == NULL) {
2559 			PMD_DRV_LOG(ERR,
2560 				"Not enough resources for a new filter.\n");
2561 			return -ENOMEM;
2562 		}
2563 		bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER;
2564 		memcpy(bfilter->l2_addr, efilter->mac_addr.addr_bytes,
2565 		       RTE_ETHER_ADDR_LEN);
2566 		memcpy(bfilter->dst_macaddr, efilter->mac_addr.addr_bytes,
2567 		       RTE_ETHER_ADDR_LEN);
2568 		bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR;
2569 		bfilter->ethertype = efilter->ether_type;
2570 		bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2571 
2572 		filter1 = bnxt_get_l2_filter(bp, bfilter, vnic0);
2573 		if (filter1 == NULL) {
2574 			ret = -EINVAL;
2575 			goto cleanup;
2576 		}
2577 		bfilter->enables |=
2578 			HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
2579 		bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id;
2580 
2581 		bfilter->dst_id = vnic->fw_vnic_id;
2582 
2583 		if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) {
2584 			bfilter->flags =
2585 				HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
2586 		}
2587 
2588 		ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter);
2589 		if (ret)
2590 			goto cleanup;
2591 		STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next);
2592 		break;
2593 	case RTE_ETH_FILTER_DELETE:
2594 		filter1 = bnxt_match_and_validate_ether_filter(bp, efilter,
2595 							vnic0, vnic, &ret);
2596 		if (ret == -EEXIST) {
2597 			ret = bnxt_hwrm_clear_ntuple_filter(bp, filter1);
2598 
2599 			STAILQ_REMOVE(&vnic->filter, filter1, bnxt_filter_info,
2600 				      next);
2601 			bnxt_free_filter(bp, filter1);
2602 		} else if (ret == 0) {
2603 			PMD_DRV_LOG(ERR, "No matching filter found\n");
2604 		}
2605 		break;
2606 	default:
2607 		PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
2608 		ret = -EINVAL;
2609 		goto error;
2610 	}
2611 	return ret;
2612 cleanup:
2613 	bnxt_free_filter(bp, bfilter);
2614 error:
2615 	return ret;
2616 }
2617 
2618 static inline int
2619 parse_ntuple_filter(struct bnxt *bp,
2620 		    struct rte_eth_ntuple_filter *nfilter,
2621 		    struct bnxt_filter_info *bfilter)
2622 {
2623 	uint32_t en = 0;
2624 
2625 	if (nfilter->queue >= bp->rx_nr_rings) {
2626 		PMD_DRV_LOG(ERR, "Invalid queue %d\n", nfilter->queue);
2627 		return -EINVAL;
2628 	}
2629 
2630 	switch (nfilter->dst_port_mask) {
2631 	case UINT16_MAX:
2632 		bfilter->dst_port_mask = -1;
2633 		bfilter->dst_port = nfilter->dst_port;
2634 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT |
2635 			NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
2636 		break;
2637 	default:
2638 		PMD_DRV_LOG(ERR, "invalid dst_port mask.");
2639 		return -EINVAL;
2640 	}
2641 
2642 	bfilter->ip_addr_type = NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
2643 	en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2644 
2645 	switch (nfilter->proto_mask) {
2646 	case UINT8_MAX:
2647 		if (nfilter->proto == 17) /* IPPROTO_UDP */
2648 			bfilter->ip_protocol = 17;
2649 		else if (nfilter->proto == 6) /* IPPROTO_TCP */
2650 			bfilter->ip_protocol = 6;
2651 		else
2652 			return -EINVAL;
2653 		en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2654 		break;
2655 	default:
2656 		PMD_DRV_LOG(ERR, "invalid protocol mask.");
2657 		return -EINVAL;
2658 	}
2659 
2660 	switch (nfilter->dst_ip_mask) {
2661 	case UINT32_MAX:
2662 		bfilter->dst_ipaddr_mask[0] = -1;
2663 		bfilter->dst_ipaddr[0] = nfilter->dst_ip;
2664 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR |
2665 			NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
2666 		break;
2667 	default:
2668 		PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
2669 		return -EINVAL;
2670 	}
2671 
2672 	switch (nfilter->src_ip_mask) {
2673 	case UINT32_MAX:
2674 		bfilter->src_ipaddr_mask[0] = -1;
2675 		bfilter->src_ipaddr[0] = nfilter->src_ip;
2676 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
2677 			NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
2678 		break;
2679 	default:
2680 		PMD_DRV_LOG(ERR, "invalid src_ip mask.");
2681 		return -EINVAL;
2682 	}
2683 
2684 	switch (nfilter->src_port_mask) {
2685 	case UINT16_MAX:
2686 		bfilter->src_port_mask = -1;
2687 		bfilter->src_port = nfilter->src_port;
2688 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
2689 			NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
2690 		break;
2691 	default:
2692 		PMD_DRV_LOG(ERR, "invalid src_port mask.");
2693 		return -EINVAL;
2694 	}
2695 
2696 	bfilter->enables = en;
2697 	return 0;
2698 }
2699 
2700 static struct bnxt_filter_info*
2701 bnxt_match_ntuple_filter(struct bnxt *bp,
2702 			 struct bnxt_filter_info *bfilter,
2703 			 struct bnxt_vnic_info **mvnic)
2704 {
2705 	struct bnxt_filter_info *mfilter = NULL;
2706 	int i;
2707 
2708 	for (i = bp->nr_vnics - 1; i >= 0; i--) {
2709 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2710 		STAILQ_FOREACH(mfilter, &vnic->filter, next) {
2711 			if (bfilter->src_ipaddr[0] == mfilter->src_ipaddr[0] &&
2712 			    bfilter->src_ipaddr_mask[0] ==
2713 			    mfilter->src_ipaddr_mask[0] &&
2714 			    bfilter->src_port == mfilter->src_port &&
2715 			    bfilter->src_port_mask == mfilter->src_port_mask &&
2716 			    bfilter->dst_ipaddr[0] == mfilter->dst_ipaddr[0] &&
2717 			    bfilter->dst_ipaddr_mask[0] ==
2718 			    mfilter->dst_ipaddr_mask[0] &&
2719 			    bfilter->dst_port == mfilter->dst_port &&
2720 			    bfilter->dst_port_mask == mfilter->dst_port_mask &&
2721 			    bfilter->flags == mfilter->flags &&
2722 			    bfilter->enables == mfilter->enables) {
2723 				if (mvnic)
2724 					*mvnic = vnic;
2725 				return mfilter;
2726 			}
2727 		}
2728 	}
2729 	return NULL;
2730 }
2731 
2732 static int
2733 bnxt_cfg_ntuple_filter(struct bnxt *bp,
2734 		       struct rte_eth_ntuple_filter *nfilter,
2735 		       enum rte_filter_op filter_op)
2736 {
2737 	struct bnxt_filter_info *bfilter, *mfilter, *filter1;
2738 	struct bnxt_vnic_info *vnic, *vnic0, *mvnic;
2739 	int ret;
2740 
2741 	if (nfilter->flags != RTE_5TUPLE_FLAGS) {
2742 		PMD_DRV_LOG(ERR, "only 5tuple is supported.");
2743 		return -EINVAL;
2744 	}
2745 
2746 	if (nfilter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
2747 		PMD_DRV_LOG(ERR, "Ntuple filter: TCP flags not supported\n");
2748 		return -EINVAL;
2749 	}
2750 
2751 	bfilter = bnxt_get_unused_filter(bp);
2752 	if (bfilter == NULL) {
2753 		PMD_DRV_LOG(ERR,
2754 			"Not enough resources for a new filter.\n");
2755 		return -ENOMEM;
2756 	}
2757 	ret = parse_ntuple_filter(bp, nfilter, bfilter);
2758 	if (ret < 0)
2759 		goto free_filter;
2760 
2761 	vnic = &bp->vnic_info[nfilter->queue];
2762 	vnic0 = BNXT_GET_DEFAULT_VNIC(bp);
2763 	filter1 = STAILQ_FIRST(&vnic0->filter);
2764 	if (filter1 == NULL) {
2765 		ret = -EINVAL;
2766 		goto free_filter;
2767 	}
2768 
2769 	bfilter->dst_id = vnic->fw_vnic_id;
2770 	bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id;
2771 	bfilter->enables |=
2772 		HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
2773 	bfilter->ethertype = 0x800;
2774 	bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2775 
2776 	mfilter = bnxt_match_ntuple_filter(bp, bfilter, &mvnic);
2777 
2778 	if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD &&
2779 	    bfilter->dst_id == mfilter->dst_id) {
2780 		PMD_DRV_LOG(ERR, "filter exists.\n");
2781 		ret = -EEXIST;
2782 		goto free_filter;
2783 	} else if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD &&
2784 		   bfilter->dst_id != mfilter->dst_id) {
2785 		mfilter->dst_id = vnic->fw_vnic_id;
2786 		ret = bnxt_hwrm_set_ntuple_filter(bp, mfilter->dst_id, mfilter);
2787 		STAILQ_REMOVE(&mvnic->filter, mfilter, bnxt_filter_info, next);
2788 		STAILQ_INSERT_TAIL(&vnic->filter, mfilter, next);
2789 		PMD_DRV_LOG(ERR, "filter with matching pattern exists.\n");
2790 		PMD_DRV_LOG(ERR, " Updated it to the new destination queue\n");
2791 		goto free_filter;
2792 	}
2793 	if (mfilter == NULL && filter_op == RTE_ETH_FILTER_DELETE) {
2794 		PMD_DRV_LOG(ERR, "filter doesn't exist.");
2795 		ret = -ENOENT;
2796 		goto free_filter;
2797 	}
2798 
2799 	if (filter_op == RTE_ETH_FILTER_ADD) {
2800 		bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER;
2801 		ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter);
2802 		if (ret)
2803 			goto free_filter;
2804 		STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next);
2805 	} else {
2806 		if (mfilter == NULL) {
2807 			/* This should not happen. But for Coverity! */
2808 			ret = -ENOENT;
2809 			goto free_filter;
2810 		}
2811 		ret = bnxt_hwrm_clear_ntuple_filter(bp, mfilter);
2812 
2813 		STAILQ_REMOVE(&vnic->filter, mfilter, bnxt_filter_info, next);
2814 		bnxt_free_filter(bp, mfilter);
2815 		bnxt_free_filter(bp, bfilter);
2816 	}
2817 
2818 	return 0;
2819 free_filter:
2820 	bnxt_free_filter(bp, bfilter);
2821 	return ret;
2822 }
2823 
2824 static int
2825 bnxt_ntuple_filter(struct rte_eth_dev *dev,
2826 			enum rte_filter_op filter_op,
2827 			void *arg)
2828 {
2829 	struct bnxt *bp = dev->data->dev_private;
2830 	int ret;
2831 
2832 	if (filter_op == RTE_ETH_FILTER_NOP)
2833 		return 0;
2834 
2835 	if (arg == NULL) {
2836 		PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
2837 			    filter_op);
2838 		return -EINVAL;
2839 	}
2840 
2841 	switch (filter_op) {
2842 	case RTE_ETH_FILTER_ADD:
2843 		ret = bnxt_cfg_ntuple_filter(bp,
2844 			(struct rte_eth_ntuple_filter *)arg,
2845 			filter_op);
2846 		break;
2847 	case RTE_ETH_FILTER_DELETE:
2848 		ret = bnxt_cfg_ntuple_filter(bp,
2849 			(struct rte_eth_ntuple_filter *)arg,
2850 			filter_op);
2851 		break;
2852 	default:
2853 		PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
2854 		ret = -EINVAL;
2855 		break;
2856 	}
2857 	return ret;
2858 }
2859 
2860 static int
2861 bnxt_parse_fdir_filter(struct bnxt *bp,
2862 		       struct rte_eth_fdir_filter *fdir,
2863 		       struct bnxt_filter_info *filter)
2864 {
2865 	enum rte_fdir_mode fdir_mode =
2866 		bp->eth_dev->data->dev_conf.fdir_conf.mode;
2867 	struct bnxt_vnic_info *vnic0, *vnic;
2868 	struct bnxt_filter_info *filter1;
2869 	uint32_t en = 0;
2870 	int i;
2871 
2872 	if (fdir_mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
2873 		return -EINVAL;
2874 
2875 	filter->l2_ovlan = fdir->input.flow_ext.vlan_tci;
2876 	en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
2877 
2878 	switch (fdir->input.flow_type) {
2879 	case RTE_ETH_FLOW_IPV4:
2880 	case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
2881 		/* FALLTHROUGH */
2882 		filter->src_ipaddr[0] = fdir->input.flow.ip4_flow.src_ip;
2883 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
2884 		filter->dst_ipaddr[0] = fdir->input.flow.ip4_flow.dst_ip;
2885 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
2886 		filter->ip_protocol = fdir->input.flow.ip4_flow.proto;
2887 		en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2888 		filter->ip_addr_type =
2889 			NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
2890 		filter->src_ipaddr_mask[0] = 0xffffffff;
2891 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
2892 		filter->dst_ipaddr_mask[0] = 0xffffffff;
2893 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
2894 		filter->ethertype = 0x800;
2895 		filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2896 		break;
2897 	case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
2898 		filter->src_port = fdir->input.flow.tcp4_flow.src_port;
2899 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
2900 		filter->dst_port = fdir->input.flow.tcp4_flow.dst_port;
2901 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
2902 		filter->dst_port_mask = 0xffff;
2903 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
2904 		filter->src_port_mask = 0xffff;
2905 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
2906 		filter->src_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.src_ip;
2907 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
2908 		filter->dst_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.dst_ip;
2909 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
2910 		filter->ip_protocol = 6;
2911 		en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2912 		filter->ip_addr_type =
2913 			NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
2914 		filter->src_ipaddr_mask[0] = 0xffffffff;
2915 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
2916 		filter->dst_ipaddr_mask[0] = 0xffffffff;
2917 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
2918 		filter->ethertype = 0x800;
2919 		filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2920 		break;
2921 	case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
2922 		filter->src_port = fdir->input.flow.udp4_flow.src_port;
2923 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
2924 		filter->dst_port = fdir->input.flow.udp4_flow.dst_port;
2925 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
2926 		filter->dst_port_mask = 0xffff;
2927 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
2928 		filter->src_port_mask = 0xffff;
2929 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
2930 		filter->src_ipaddr[0] = fdir->input.flow.udp4_flow.ip.src_ip;
2931 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
2932 		filter->dst_ipaddr[0] = fdir->input.flow.udp4_flow.ip.dst_ip;
2933 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
2934 		filter->ip_protocol = 17;
2935 		en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2936 		filter->ip_addr_type =
2937 			NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
2938 		filter->src_ipaddr_mask[0] = 0xffffffff;
2939 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
2940 		filter->dst_ipaddr_mask[0] = 0xffffffff;
2941 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
2942 		filter->ethertype = 0x800;
2943 		filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2944 		break;
2945 	case RTE_ETH_FLOW_IPV6:
2946 	case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
2947 		/* FALLTHROUGH */
2948 		filter->ip_addr_type =
2949 			NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
2950 		filter->ip_protocol = fdir->input.flow.ipv6_flow.proto;
2951 		en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2952 		rte_memcpy(filter->src_ipaddr,
2953 			   fdir->input.flow.ipv6_flow.src_ip, 16);
2954 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
2955 		rte_memcpy(filter->dst_ipaddr,
2956 			   fdir->input.flow.ipv6_flow.dst_ip, 16);
2957 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
2958 		memset(filter->dst_ipaddr_mask, 0xff, 16);
2959 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
2960 		memset(filter->src_ipaddr_mask, 0xff, 16);
2961 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
2962 		filter->ethertype = 0x86dd;
2963 		filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2964 		break;
2965 	case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
2966 		filter->src_port = fdir->input.flow.tcp6_flow.src_port;
2967 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
2968 		filter->dst_port = fdir->input.flow.tcp6_flow.dst_port;
2969 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
2970 		filter->dst_port_mask = 0xffff;
2971 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
2972 		filter->src_port_mask = 0xffff;
2973 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
2974 		filter->ip_addr_type =
2975 			NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
2976 		filter->ip_protocol = fdir->input.flow.tcp6_flow.ip.proto;
2977 		en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2978 		rte_memcpy(filter->src_ipaddr,
2979 			   fdir->input.flow.tcp6_flow.ip.src_ip, 16);
2980 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
2981 		rte_memcpy(filter->dst_ipaddr,
2982 			   fdir->input.flow.tcp6_flow.ip.dst_ip, 16);
2983 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
2984 		memset(filter->dst_ipaddr_mask, 0xff, 16);
2985 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
2986 		memset(filter->src_ipaddr_mask, 0xff, 16);
2987 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
2988 		filter->ethertype = 0x86dd;
2989 		filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2990 		break;
2991 	case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
2992 		filter->src_port = fdir->input.flow.udp6_flow.src_port;
2993 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
2994 		filter->dst_port = fdir->input.flow.udp6_flow.dst_port;
2995 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
2996 		filter->dst_port_mask = 0xffff;
2997 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
2998 		filter->src_port_mask = 0xffff;
2999 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
3000 		filter->ip_addr_type =
3001 			NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
3002 		filter->ip_protocol = fdir->input.flow.udp6_flow.ip.proto;
3003 		en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
3004 		rte_memcpy(filter->src_ipaddr,
3005 			   fdir->input.flow.udp6_flow.ip.src_ip, 16);
3006 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
3007 		rte_memcpy(filter->dst_ipaddr,
3008 			   fdir->input.flow.udp6_flow.ip.dst_ip, 16);
3009 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
3010 		memset(filter->dst_ipaddr_mask, 0xff, 16);
3011 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
3012 		memset(filter->src_ipaddr_mask, 0xff, 16);
3013 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
3014 		filter->ethertype = 0x86dd;
3015 		filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3016 		break;
3017 	case RTE_ETH_FLOW_L2_PAYLOAD:
3018 		filter->ethertype = fdir->input.flow.l2_flow.ether_type;
3019 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3020 		break;
3021 	case RTE_ETH_FLOW_VXLAN:
3022 		if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
3023 			return -EINVAL;
3024 		filter->vni = fdir->input.flow.tunnel_flow.tunnel_id;
3025 		filter->tunnel_type =
3026 			CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
3027 		en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE;
3028 		break;
3029 	case RTE_ETH_FLOW_NVGRE:
3030 		if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
3031 			return -EINVAL;
3032 		filter->vni = fdir->input.flow.tunnel_flow.tunnel_id;
3033 		filter->tunnel_type =
3034 			CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
3035 		en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE;
3036 		break;
3037 	case RTE_ETH_FLOW_UNKNOWN:
3038 	case RTE_ETH_FLOW_RAW:
3039 	case RTE_ETH_FLOW_FRAG_IPV4:
3040 	case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
3041 	case RTE_ETH_FLOW_FRAG_IPV6:
3042 	case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
3043 	case RTE_ETH_FLOW_IPV6_EX:
3044 	case RTE_ETH_FLOW_IPV6_TCP_EX:
3045 	case RTE_ETH_FLOW_IPV6_UDP_EX:
3046 	case RTE_ETH_FLOW_GENEVE:
3047 		/* FALLTHROUGH */
3048 	default:
3049 		return -EINVAL;
3050 	}
3051 
3052 	vnic0 = BNXT_GET_DEFAULT_VNIC(bp);
3053 	vnic = &bp->vnic_info[fdir->action.rx_queue];
3054 	if (vnic == NULL) {
3055 		PMD_DRV_LOG(ERR, "Invalid queue %d\n", fdir->action.rx_queue);
3056 		return -EINVAL;
3057 	}
3058 
3059 	if (fdir_mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
3060 		rte_memcpy(filter->dst_macaddr,
3061 			fdir->input.flow.mac_vlan_flow.mac_addr.addr_bytes, 6);
3062 			en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR;
3063 	}
3064 
3065 	if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) {
3066 		filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
3067 		filter1 = STAILQ_FIRST(&vnic0->filter);
3068 		//filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
3069 	} else {
3070 		filter->dst_id = vnic->fw_vnic_id;
3071 		for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
3072 			if (filter->dst_macaddr[i] == 0x00)
3073 				filter1 = STAILQ_FIRST(&vnic0->filter);
3074 			else
3075 				filter1 = bnxt_get_l2_filter(bp, filter, vnic);
3076 	}
3077 
3078 	if (filter1 == NULL)
3079 		return -EINVAL;
3080 
3081 	en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
3082 	filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
3083 
3084 	filter->enables = en;
3085 
3086 	return 0;
3087 }
3088 
3089 static struct bnxt_filter_info *
3090 bnxt_match_fdir(struct bnxt *bp, struct bnxt_filter_info *nf,
3091 		struct bnxt_vnic_info **mvnic)
3092 {
3093 	struct bnxt_filter_info *mf = NULL;
3094 	int i;
3095 
3096 	for (i = bp->nr_vnics - 1; i >= 0; i--) {
3097 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3098 
3099 		STAILQ_FOREACH(mf, &vnic->filter, next) {
3100 			if (mf->filter_type == nf->filter_type &&
3101 			    mf->flags == nf->flags &&
3102 			    mf->src_port == nf->src_port &&
3103 			    mf->src_port_mask == nf->src_port_mask &&
3104 			    mf->dst_port == nf->dst_port &&
3105 			    mf->dst_port_mask == nf->dst_port_mask &&
3106 			    mf->ip_protocol == nf->ip_protocol &&
3107 			    mf->ip_addr_type == nf->ip_addr_type &&
3108 			    mf->ethertype == nf->ethertype &&
3109 			    mf->vni == nf->vni &&
3110 			    mf->tunnel_type == nf->tunnel_type &&
3111 			    mf->l2_ovlan == nf->l2_ovlan &&
3112 			    mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
3113 			    mf->l2_ivlan == nf->l2_ivlan &&
3114 			    mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
3115 			    !memcmp(mf->l2_addr, nf->l2_addr,
3116 				    RTE_ETHER_ADDR_LEN) &&
3117 			    !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
3118 				    RTE_ETHER_ADDR_LEN) &&
3119 			    !memcmp(mf->src_macaddr, nf->src_macaddr,
3120 				    RTE_ETHER_ADDR_LEN) &&
3121 			    !memcmp(mf->dst_macaddr, nf->dst_macaddr,
3122 				    RTE_ETHER_ADDR_LEN) &&
3123 			    !memcmp(mf->src_ipaddr, nf->src_ipaddr,
3124 				    sizeof(nf->src_ipaddr)) &&
3125 			    !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
3126 				    sizeof(nf->src_ipaddr_mask)) &&
3127 			    !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
3128 				    sizeof(nf->dst_ipaddr)) &&
3129 			    !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
3130 				    sizeof(nf->dst_ipaddr_mask))) {
3131 				if (mvnic)
3132 					*mvnic = vnic;
3133 				return mf;
3134 			}
3135 		}
3136 	}
3137 	return NULL;
3138 }
3139 
3140 static int
3141 bnxt_fdir_filter(struct rte_eth_dev *dev,
3142 		 enum rte_filter_op filter_op,
3143 		 void *arg)
3144 {
3145 	struct bnxt *bp = dev->data->dev_private;
3146 	struct rte_eth_fdir_filter *fdir  = (struct rte_eth_fdir_filter *)arg;
3147 	struct bnxt_filter_info *filter, *match;
3148 	struct bnxt_vnic_info *vnic, *mvnic;
3149 	int ret = 0, i;
3150 
3151 	if (filter_op == RTE_ETH_FILTER_NOP)
3152 		return 0;
3153 
3154 	if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
3155 		return -EINVAL;
3156 
3157 	switch (filter_op) {
3158 	case RTE_ETH_FILTER_ADD:
3159 	case RTE_ETH_FILTER_DELETE:
3160 		/* FALLTHROUGH */
3161 		filter = bnxt_get_unused_filter(bp);
3162 		if (filter == NULL) {
3163 			PMD_DRV_LOG(ERR,
3164 				"Not enough resources for a new flow.\n");
3165 			return -ENOMEM;
3166 		}
3167 
3168 		ret = bnxt_parse_fdir_filter(bp, fdir, filter);
3169 		if (ret != 0)
3170 			goto free_filter;
3171 		filter->filter_type = HWRM_CFA_NTUPLE_FILTER;
3172 
3173 		if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
3174 			vnic = &bp->vnic_info[0];
3175 		else
3176 			vnic = &bp->vnic_info[fdir->action.rx_queue];
3177 
3178 		match = bnxt_match_fdir(bp, filter, &mvnic);
3179 		if (match != NULL && filter_op == RTE_ETH_FILTER_ADD) {
3180 			if (match->dst_id == vnic->fw_vnic_id) {
3181 				PMD_DRV_LOG(ERR, "Flow already exists.\n");
3182 				ret = -EEXIST;
3183 				goto free_filter;
3184 			} else {
3185 				match->dst_id = vnic->fw_vnic_id;
3186 				ret = bnxt_hwrm_set_ntuple_filter(bp,
3187 								  match->dst_id,
3188 								  match);
3189 				STAILQ_REMOVE(&mvnic->filter, match,
3190 					      bnxt_filter_info, next);
3191 				STAILQ_INSERT_TAIL(&vnic->filter, match, next);
3192 				PMD_DRV_LOG(ERR,
3193 					"Filter with matching pattern exist\n");
3194 				PMD_DRV_LOG(ERR,
3195 					"Updated it to new destination q\n");
3196 				goto free_filter;
3197 			}
3198 		}
3199 		if (match == NULL && filter_op == RTE_ETH_FILTER_DELETE) {
3200 			PMD_DRV_LOG(ERR, "Flow does not exist.\n");
3201 			ret = -ENOENT;
3202 			goto free_filter;
3203 		}
3204 
3205 		if (filter_op == RTE_ETH_FILTER_ADD) {
3206 			ret = bnxt_hwrm_set_ntuple_filter(bp,
3207 							  filter->dst_id,
3208 							  filter);
3209 			if (ret)
3210 				goto free_filter;
3211 			STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
3212 		} else {
3213 			ret = bnxt_hwrm_clear_ntuple_filter(bp, match);
3214 			STAILQ_REMOVE(&vnic->filter, match,
3215 				      bnxt_filter_info, next);
3216 			bnxt_free_filter(bp, match);
3217 			bnxt_free_filter(bp, filter);
3218 		}
3219 		break;
3220 	case RTE_ETH_FILTER_FLUSH:
3221 		for (i = bp->nr_vnics - 1; i >= 0; i--) {
3222 			struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3223 
3224 			STAILQ_FOREACH(filter, &vnic->filter, next) {
3225 				if (filter->filter_type ==
3226 				    HWRM_CFA_NTUPLE_FILTER) {
3227 					ret =
3228 					bnxt_hwrm_clear_ntuple_filter(bp,
3229 								      filter);
3230 					STAILQ_REMOVE(&vnic->filter, filter,
3231 						      bnxt_filter_info, next);
3232 				}
3233 			}
3234 		}
3235 		return ret;
3236 	case RTE_ETH_FILTER_UPDATE:
3237 	case RTE_ETH_FILTER_STATS:
3238 	case RTE_ETH_FILTER_INFO:
3239 		PMD_DRV_LOG(ERR, "operation %u not implemented", filter_op);
3240 		break;
3241 	default:
3242 		PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
3243 		ret = -EINVAL;
3244 		break;
3245 	}
3246 	return ret;
3247 
3248 free_filter:
3249 	bnxt_free_filter(bp, filter);
3250 	return ret;
3251 }
3252 
3253 static int
3254 bnxt_filter_ctrl_op(struct rte_eth_dev *dev,
3255 		    enum rte_filter_type filter_type,
3256 		    enum rte_filter_op filter_op, void *arg)
3257 {
3258 	int ret = 0;
3259 
3260 	ret = is_bnxt_in_error(dev->data->dev_private);
3261 	if (ret)
3262 		return ret;
3263 
3264 	switch (filter_type) {
3265 	case RTE_ETH_FILTER_TUNNEL:
3266 		PMD_DRV_LOG(ERR,
3267 			"filter type: %d: To be implemented\n", filter_type);
3268 		break;
3269 	case RTE_ETH_FILTER_FDIR:
3270 		ret = bnxt_fdir_filter(dev, filter_op, arg);
3271 		break;
3272 	case RTE_ETH_FILTER_NTUPLE:
3273 		ret = bnxt_ntuple_filter(dev, filter_op, arg);
3274 		break;
3275 	case RTE_ETH_FILTER_ETHERTYPE:
3276 		ret = bnxt_ethertype_filter(dev, filter_op, arg);
3277 		break;
3278 	case RTE_ETH_FILTER_GENERIC:
3279 		if (filter_op != RTE_ETH_FILTER_GET)
3280 			return -EINVAL;
3281 		*(const void **)arg = &bnxt_flow_ops;
3282 		break;
3283 	default:
3284 		PMD_DRV_LOG(ERR,
3285 			"Filter type (%d) not supported", filter_type);
3286 		ret = -EINVAL;
3287 		break;
3288 	}
3289 	return ret;
3290 }
3291 
3292 static const uint32_t *
3293 bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev *dev)
3294 {
3295 	static const uint32_t ptypes[] = {
3296 		RTE_PTYPE_L2_ETHER_VLAN,
3297 		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
3298 		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
3299 		RTE_PTYPE_L4_ICMP,
3300 		RTE_PTYPE_L4_TCP,
3301 		RTE_PTYPE_L4_UDP,
3302 		RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
3303 		RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
3304 		RTE_PTYPE_INNER_L4_ICMP,
3305 		RTE_PTYPE_INNER_L4_TCP,
3306 		RTE_PTYPE_INNER_L4_UDP,
3307 		RTE_PTYPE_UNKNOWN
3308 	};
3309 
3310 	if (!dev->rx_pkt_burst)
3311 		return NULL;
3312 
3313 	return ptypes;
3314 }
3315 
3316 static int bnxt_map_regs(struct bnxt *bp, uint32_t *reg_arr, int count,
3317 			 int reg_win)
3318 {
3319 	uint32_t reg_base = *reg_arr & 0xfffff000;
3320 	uint32_t win_off;
3321 	int i;
3322 
3323 	for (i = 0; i < count; i++) {
3324 		if ((reg_arr[i] & 0xfffff000) != reg_base)
3325 			return -ERANGE;
3326 	}
3327 	win_off = BNXT_GRCPF_REG_WINDOW_BASE_OUT + (reg_win - 1) * 4;
3328 	rte_write32(reg_base, (uint8_t *)bp->bar0 + win_off);
3329 	return 0;
3330 }
3331 
3332 static int bnxt_map_ptp_regs(struct bnxt *bp)
3333 {
3334 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3335 	uint32_t *reg_arr;
3336 	int rc, i;
3337 
3338 	reg_arr = ptp->rx_regs;
3339 	rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_RX_REGS, 5);
3340 	if (rc)
3341 		return rc;
3342 
3343 	reg_arr = ptp->tx_regs;
3344 	rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_TX_REGS, 6);
3345 	if (rc)
3346 		return rc;
3347 
3348 	for (i = 0; i < BNXT_PTP_RX_REGS; i++)
3349 		ptp->rx_mapped_regs[i] = 0x5000 + (ptp->rx_regs[i] & 0xfff);
3350 
3351 	for (i = 0; i < BNXT_PTP_TX_REGS; i++)
3352 		ptp->tx_mapped_regs[i] = 0x6000 + (ptp->tx_regs[i] & 0xfff);
3353 
3354 	return 0;
3355 }
3356 
3357 static void bnxt_unmap_ptp_regs(struct bnxt *bp)
3358 {
3359 	rte_write32(0, (uint8_t *)bp->bar0 +
3360 			 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 16);
3361 	rte_write32(0, (uint8_t *)bp->bar0 +
3362 			 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 20);
3363 }
3364 
3365 static uint64_t bnxt_cc_read(struct bnxt *bp)
3366 {
3367 	uint64_t ns;
3368 
3369 	ns = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3370 			      BNXT_GRCPF_REG_SYNC_TIME));
3371 	ns |= (uint64_t)(rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3372 					  BNXT_GRCPF_REG_SYNC_TIME + 4))) << 32;
3373 	return ns;
3374 }
3375 
3376 static int bnxt_get_tx_ts(struct bnxt *bp, uint64_t *ts)
3377 {
3378 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3379 	uint32_t fifo;
3380 
3381 	fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3382 				ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO]));
3383 	if (fifo & BNXT_PTP_TX_FIFO_EMPTY)
3384 		return -EAGAIN;
3385 
3386 	fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3387 				ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO]));
3388 	*ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3389 				ptp->tx_mapped_regs[BNXT_PTP_TX_TS_L]));
3390 	*ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3391 				ptp->tx_mapped_regs[BNXT_PTP_TX_TS_H])) << 32;
3392 
3393 	return 0;
3394 }
3395 
3396 static int bnxt_get_rx_ts(struct bnxt *bp, uint64_t *ts)
3397 {
3398 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3399 	struct bnxt_pf_info *pf = &bp->pf;
3400 	uint16_t port_id;
3401 	uint32_t fifo;
3402 
3403 	if (!ptp)
3404 		return -ENODEV;
3405 
3406 	fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3407 				ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO]));
3408 	if (!(fifo & BNXT_PTP_RX_FIFO_PENDING))
3409 		return -EAGAIN;
3410 
3411 	port_id = pf->port_id;
3412 	rte_write32(1 << port_id, (uint8_t *)bp->bar0 +
3413 	       ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]);
3414 
3415 	fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3416 				   ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO]));
3417 	if (fifo & BNXT_PTP_RX_FIFO_PENDING) {
3418 /*		bnxt_clr_rx_ts(bp);	  TBD  */
3419 		return -EBUSY;
3420 	}
3421 
3422 	*ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3423 				ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L]));
3424 	*ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3425 				ptp->rx_mapped_regs[BNXT_PTP_RX_TS_H])) << 32;
3426 
3427 	return 0;
3428 }
3429 
3430 static int
3431 bnxt_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
3432 {
3433 	uint64_t ns;
3434 	struct bnxt *bp = dev->data->dev_private;
3435 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3436 
3437 	if (!ptp)
3438 		return 0;
3439 
3440 	ns = rte_timespec_to_ns(ts);
3441 	/* Set the timecounters to a new value. */
3442 	ptp->tc.nsec = ns;
3443 
3444 	return 0;
3445 }
3446 
3447 static int
3448 bnxt_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
3449 {
3450 	struct bnxt *bp = dev->data->dev_private;
3451 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3452 	uint64_t ns, systime_cycles = 0;
3453 	int rc = 0;
3454 
3455 	if (!ptp)
3456 		return 0;
3457 
3458 	if (BNXT_CHIP_THOR(bp))
3459 		rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME,
3460 					     &systime_cycles);
3461 	else
3462 		systime_cycles = bnxt_cc_read(bp);
3463 
3464 	ns = rte_timecounter_update(&ptp->tc, systime_cycles);
3465 	*ts = rte_ns_to_timespec(ns);
3466 
3467 	return rc;
3468 }
3469 static int
3470 bnxt_timesync_enable(struct rte_eth_dev *dev)
3471 {
3472 	struct bnxt *bp = dev->data->dev_private;
3473 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3474 	uint32_t shift = 0;
3475 	int rc;
3476 
3477 	if (!ptp)
3478 		return 0;
3479 
3480 	ptp->rx_filter = 1;
3481 	ptp->tx_tstamp_en = 1;
3482 	ptp->rxctl = BNXT_PTP_MSG_EVENTS;
3483 
3484 	rc = bnxt_hwrm_ptp_cfg(bp);
3485 	if (rc)
3486 		return rc;
3487 
3488 	memset(&ptp->tc, 0, sizeof(struct rte_timecounter));
3489 	memset(&ptp->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
3490 	memset(&ptp->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
3491 
3492 	ptp->tc.cc_mask = BNXT_CYCLECOUNTER_MASK;
3493 	ptp->tc.cc_shift = shift;
3494 	ptp->tc.nsec_mask = (1ULL << shift) - 1;
3495 
3496 	ptp->rx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK;
3497 	ptp->rx_tstamp_tc.cc_shift = shift;
3498 	ptp->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
3499 
3500 	ptp->tx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK;
3501 	ptp->tx_tstamp_tc.cc_shift = shift;
3502 	ptp->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
3503 
3504 	if (!BNXT_CHIP_THOR(bp))
3505 		bnxt_map_ptp_regs(bp);
3506 
3507 	return 0;
3508 }
3509 
3510 static int
3511 bnxt_timesync_disable(struct rte_eth_dev *dev)
3512 {
3513 	struct bnxt *bp = dev->data->dev_private;
3514 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3515 
3516 	if (!ptp)
3517 		return 0;
3518 
3519 	ptp->rx_filter = 0;
3520 	ptp->tx_tstamp_en = 0;
3521 	ptp->rxctl = 0;
3522 
3523 	bnxt_hwrm_ptp_cfg(bp);
3524 
3525 	if (!BNXT_CHIP_THOR(bp))
3526 		bnxt_unmap_ptp_regs(bp);
3527 
3528 	return 0;
3529 }
3530 
3531 static int
3532 bnxt_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
3533 				 struct timespec *timestamp,
3534 				 uint32_t flags __rte_unused)
3535 {
3536 	struct bnxt *bp = dev->data->dev_private;
3537 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3538 	uint64_t rx_tstamp_cycles = 0;
3539 	uint64_t ns;
3540 
3541 	if (!ptp)
3542 		return 0;
3543 
3544 	if (BNXT_CHIP_THOR(bp))
3545 		rx_tstamp_cycles = ptp->rx_timestamp;
3546 	else
3547 		bnxt_get_rx_ts(bp, &rx_tstamp_cycles);
3548 
3549 	ns = rte_timecounter_update(&ptp->rx_tstamp_tc, rx_tstamp_cycles);
3550 	*timestamp = rte_ns_to_timespec(ns);
3551 	return  0;
3552 }
3553 
3554 static int
3555 bnxt_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
3556 				 struct timespec *timestamp)
3557 {
3558 	struct bnxt *bp = dev->data->dev_private;
3559 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3560 	uint64_t tx_tstamp_cycles = 0;
3561 	uint64_t ns;
3562 	int rc = 0;
3563 
3564 	if (!ptp)
3565 		return 0;
3566 
3567 	if (BNXT_CHIP_THOR(bp))
3568 		rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_PATH_TX,
3569 					     &tx_tstamp_cycles);
3570 	else
3571 		rc = bnxt_get_tx_ts(bp, &tx_tstamp_cycles);
3572 
3573 	ns = rte_timecounter_update(&ptp->tx_tstamp_tc, tx_tstamp_cycles);
3574 	*timestamp = rte_ns_to_timespec(ns);
3575 
3576 	return rc;
3577 }
3578 
3579 static int
3580 bnxt_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
3581 {
3582 	struct bnxt *bp = dev->data->dev_private;
3583 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3584 
3585 	if (!ptp)
3586 		return 0;
3587 
3588 	ptp->tc.nsec += delta;
3589 
3590 	return 0;
3591 }
3592 
3593 static int
3594 bnxt_get_eeprom_length_op(struct rte_eth_dev *dev)
3595 {
3596 	struct bnxt *bp = dev->data->dev_private;
3597 	int rc;
3598 	uint32_t dir_entries;
3599 	uint32_t entry_length;
3600 
3601 	rc = is_bnxt_in_error(bp);
3602 	if (rc)
3603 		return rc;
3604 
3605 	PMD_DRV_LOG(INFO, PCI_PRI_FMT "\n",
3606 		    bp->pdev->addr.domain, bp->pdev->addr.bus,
3607 		    bp->pdev->addr.devid, bp->pdev->addr.function);
3608 
3609 	rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
3610 	if (rc != 0)
3611 		return rc;
3612 
3613 	return dir_entries * entry_length;
3614 }
3615 
3616 static int
3617 bnxt_get_eeprom_op(struct rte_eth_dev *dev,
3618 		struct rte_dev_eeprom_info *in_eeprom)
3619 {
3620 	struct bnxt *bp = dev->data->dev_private;
3621 	uint32_t index;
3622 	uint32_t offset;
3623 	int rc;
3624 
3625 	rc = is_bnxt_in_error(bp);
3626 	if (rc)
3627 		return rc;
3628 
3629 	PMD_DRV_LOG(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d\n",
3630 		    bp->pdev->addr.domain, bp->pdev->addr.bus,
3631 		    bp->pdev->addr.devid, bp->pdev->addr.function,
3632 		    in_eeprom->offset, in_eeprom->length);
3633 
3634 	if (in_eeprom->offset == 0) /* special offset value to get directory */
3635 		return bnxt_get_nvram_directory(bp, in_eeprom->length,
3636 						in_eeprom->data);
3637 
3638 	index = in_eeprom->offset >> 24;
3639 	offset = in_eeprom->offset & 0xffffff;
3640 
3641 	if (index != 0)
3642 		return bnxt_hwrm_get_nvram_item(bp, index - 1, offset,
3643 					   in_eeprom->length, in_eeprom->data);
3644 
3645 	return 0;
3646 }
3647 
3648 static bool bnxt_dir_type_is_ape_bin_format(uint16_t dir_type)
3649 {
3650 	switch (dir_type) {
3651 	case BNX_DIR_TYPE_CHIMP_PATCH:
3652 	case BNX_DIR_TYPE_BOOTCODE:
3653 	case BNX_DIR_TYPE_BOOTCODE_2:
3654 	case BNX_DIR_TYPE_APE_FW:
3655 	case BNX_DIR_TYPE_APE_PATCH:
3656 	case BNX_DIR_TYPE_KONG_FW:
3657 	case BNX_DIR_TYPE_KONG_PATCH:
3658 	case BNX_DIR_TYPE_BONO_FW:
3659 	case BNX_DIR_TYPE_BONO_PATCH:
3660 		/* FALLTHROUGH */
3661 		return true;
3662 	}
3663 
3664 	return false;
3665 }
3666 
3667 static bool bnxt_dir_type_is_other_exec_format(uint16_t dir_type)
3668 {
3669 	switch (dir_type) {
3670 	case BNX_DIR_TYPE_AVS:
3671 	case BNX_DIR_TYPE_EXP_ROM_MBA:
3672 	case BNX_DIR_TYPE_PCIE:
3673 	case BNX_DIR_TYPE_TSCF_UCODE:
3674 	case BNX_DIR_TYPE_EXT_PHY:
3675 	case BNX_DIR_TYPE_CCM:
3676 	case BNX_DIR_TYPE_ISCSI_BOOT:
3677 	case BNX_DIR_TYPE_ISCSI_BOOT_IPV6:
3678 	case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6:
3679 		/* FALLTHROUGH */
3680 		return true;
3681 	}
3682 
3683 	return false;
3684 }
3685 
3686 static bool bnxt_dir_type_is_executable(uint16_t dir_type)
3687 {
3688 	return bnxt_dir_type_is_ape_bin_format(dir_type) ||
3689 		bnxt_dir_type_is_other_exec_format(dir_type);
3690 }
3691 
3692 static int
3693 bnxt_set_eeprom_op(struct rte_eth_dev *dev,
3694 		struct rte_dev_eeprom_info *in_eeprom)
3695 {
3696 	struct bnxt *bp = dev->data->dev_private;
3697 	uint8_t index, dir_op;
3698 	uint16_t type, ext, ordinal, attr;
3699 	int rc;
3700 
3701 	rc = is_bnxt_in_error(bp);
3702 	if (rc)
3703 		return rc;
3704 
3705 	PMD_DRV_LOG(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d\n",
3706 		    bp->pdev->addr.domain, bp->pdev->addr.bus,
3707 		    bp->pdev->addr.devid, bp->pdev->addr.function,
3708 		    in_eeprom->offset, in_eeprom->length);
3709 
3710 	if (!BNXT_PF(bp)) {
3711 		PMD_DRV_LOG(ERR, "NVM write not supported from a VF\n");
3712 		return -EINVAL;
3713 	}
3714 
3715 	type = in_eeprom->magic >> 16;
3716 
3717 	if (type == 0xffff) { /* special value for directory operations */
3718 		index = in_eeprom->magic & 0xff;
3719 		dir_op = in_eeprom->magic >> 8;
3720 		if (index == 0)
3721 			return -EINVAL;
3722 		switch (dir_op) {
3723 		case 0x0e: /* erase */
3724 			if (in_eeprom->offset != ~in_eeprom->magic)
3725 				return -EINVAL;
3726 			return bnxt_hwrm_erase_nvram_directory(bp, index - 1);
3727 		default:
3728 			return -EINVAL;
3729 		}
3730 	}
3731 
3732 	/* Create or re-write an NVM item: */
3733 	if (bnxt_dir_type_is_executable(type) == true)
3734 		return -EOPNOTSUPP;
3735 	ext = in_eeprom->magic & 0xffff;
3736 	ordinal = in_eeprom->offset >> 16;
3737 	attr = in_eeprom->offset & 0xffff;
3738 
3739 	return bnxt_hwrm_flash_nvram(bp, type, ordinal, ext, attr,
3740 				     in_eeprom->data, in_eeprom->length);
3741 }
3742 
3743 /*
3744  * Initialization
3745  */
3746 
3747 static const struct eth_dev_ops bnxt_dev_ops = {
3748 	.dev_infos_get = bnxt_dev_info_get_op,
3749 	.dev_close = bnxt_dev_close_op,
3750 	.dev_configure = bnxt_dev_configure_op,
3751 	.dev_start = bnxt_dev_start_op,
3752 	.dev_stop = bnxt_dev_stop_op,
3753 	.dev_set_link_up = bnxt_dev_set_link_up_op,
3754 	.dev_set_link_down = bnxt_dev_set_link_down_op,
3755 	.stats_get = bnxt_stats_get_op,
3756 	.stats_reset = bnxt_stats_reset_op,
3757 	.rx_queue_setup = bnxt_rx_queue_setup_op,
3758 	.rx_queue_release = bnxt_rx_queue_release_op,
3759 	.tx_queue_setup = bnxt_tx_queue_setup_op,
3760 	.tx_queue_release = bnxt_tx_queue_release_op,
3761 	.rx_queue_intr_enable = bnxt_rx_queue_intr_enable_op,
3762 	.rx_queue_intr_disable = bnxt_rx_queue_intr_disable_op,
3763 	.reta_update = bnxt_reta_update_op,
3764 	.reta_query = bnxt_reta_query_op,
3765 	.rss_hash_update = bnxt_rss_hash_update_op,
3766 	.rss_hash_conf_get = bnxt_rss_hash_conf_get_op,
3767 	.link_update = bnxt_link_update_op,
3768 	.promiscuous_enable = bnxt_promiscuous_enable_op,
3769 	.promiscuous_disable = bnxt_promiscuous_disable_op,
3770 	.allmulticast_enable = bnxt_allmulticast_enable_op,
3771 	.allmulticast_disable = bnxt_allmulticast_disable_op,
3772 	.mac_addr_add = bnxt_mac_addr_add_op,
3773 	.mac_addr_remove = bnxt_mac_addr_remove_op,
3774 	.flow_ctrl_get = bnxt_flow_ctrl_get_op,
3775 	.flow_ctrl_set = bnxt_flow_ctrl_set_op,
3776 	.udp_tunnel_port_add  = bnxt_udp_tunnel_port_add_op,
3777 	.udp_tunnel_port_del  = bnxt_udp_tunnel_port_del_op,
3778 	.vlan_filter_set = bnxt_vlan_filter_set_op,
3779 	.vlan_offload_set = bnxt_vlan_offload_set_op,
3780 	.vlan_tpid_set = bnxt_vlan_tpid_set_op,
3781 	.vlan_pvid_set = bnxt_vlan_pvid_set_op,
3782 	.mtu_set = bnxt_mtu_set_op,
3783 	.mac_addr_set = bnxt_set_default_mac_addr_op,
3784 	.xstats_get = bnxt_dev_xstats_get_op,
3785 	.xstats_get_names = bnxt_dev_xstats_get_names_op,
3786 	.xstats_reset = bnxt_dev_xstats_reset_op,
3787 	.fw_version_get = bnxt_fw_version_get,
3788 	.set_mc_addr_list = bnxt_dev_set_mc_addr_list_op,
3789 	.rxq_info_get = bnxt_rxq_info_get_op,
3790 	.txq_info_get = bnxt_txq_info_get_op,
3791 	.dev_led_on = bnxt_dev_led_on_op,
3792 	.dev_led_off = bnxt_dev_led_off_op,
3793 	.xstats_get_by_id = bnxt_dev_xstats_get_by_id_op,
3794 	.xstats_get_names_by_id = bnxt_dev_xstats_get_names_by_id_op,
3795 	.rx_queue_count = bnxt_rx_queue_count_op,
3796 	.rx_descriptor_status = bnxt_rx_descriptor_status_op,
3797 	.tx_descriptor_status = bnxt_tx_descriptor_status_op,
3798 	.rx_queue_start = bnxt_rx_queue_start,
3799 	.rx_queue_stop = bnxt_rx_queue_stop,
3800 	.tx_queue_start = bnxt_tx_queue_start,
3801 	.tx_queue_stop = bnxt_tx_queue_stop,
3802 	.filter_ctrl = bnxt_filter_ctrl_op,
3803 	.dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op,
3804 	.get_eeprom_length    = bnxt_get_eeprom_length_op,
3805 	.get_eeprom           = bnxt_get_eeprom_op,
3806 	.set_eeprom           = bnxt_set_eeprom_op,
3807 	.timesync_enable      = bnxt_timesync_enable,
3808 	.timesync_disable     = bnxt_timesync_disable,
3809 	.timesync_read_time   = bnxt_timesync_read_time,
3810 	.timesync_write_time   = bnxt_timesync_write_time,
3811 	.timesync_adjust_time = bnxt_timesync_adjust_time,
3812 	.timesync_read_rx_timestamp = bnxt_timesync_read_rx_timestamp,
3813 	.timesync_read_tx_timestamp = bnxt_timesync_read_tx_timestamp,
3814 };
3815 
3816 static uint32_t bnxt_map_reset_regs(struct bnxt *bp, uint32_t reg)
3817 {
3818 	uint32_t offset;
3819 
3820 	/* Only pre-map the reset GRC registers using window 3 */
3821 	rte_write32(reg & 0xfffff000, (uint8_t *)bp->bar0 +
3822 		    BNXT_GRCPF_REG_WINDOW_BASE_OUT + 8);
3823 
3824 	offset = BNXT_GRCP_WINDOW_3_BASE + (reg & 0xffc);
3825 
3826 	return offset;
3827 }
3828 
3829 int bnxt_map_fw_health_status_regs(struct bnxt *bp)
3830 {
3831 	struct bnxt_error_recovery_info *info = bp->recovery_info;
3832 	uint32_t reg_base = 0xffffffff;
3833 	int i;
3834 
3835 	/* Only pre-map the monitoring GRC registers using window 2 */
3836 	for (i = 0; i < BNXT_FW_STATUS_REG_CNT; i++) {
3837 		uint32_t reg = info->status_regs[i];
3838 
3839 		if (BNXT_FW_STATUS_REG_TYPE(reg) != BNXT_FW_STATUS_REG_TYPE_GRC)
3840 			continue;
3841 
3842 		if (reg_base == 0xffffffff)
3843 			reg_base = reg & 0xfffff000;
3844 		if ((reg & 0xfffff000) != reg_base)
3845 			return -ERANGE;
3846 
3847 		/* Use mask 0xffc as the Lower 2 bits indicates
3848 		 * address space location
3849 		 */
3850 		info->mapped_status_regs[i] = BNXT_GRCP_WINDOW_2_BASE +
3851 						(reg & 0xffc);
3852 	}
3853 
3854 	if (reg_base == 0xffffffff)
3855 		return 0;
3856 
3857 	rte_write32(reg_base, (uint8_t *)bp->bar0 +
3858 		    BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
3859 
3860 	return 0;
3861 }
3862 
3863 static void bnxt_write_fw_reset_reg(struct bnxt *bp, uint32_t index)
3864 {
3865 	struct bnxt_error_recovery_info *info = bp->recovery_info;
3866 	uint32_t delay = info->delay_after_reset[index];
3867 	uint32_t val = info->reset_reg_val[index];
3868 	uint32_t reg = info->reset_reg[index];
3869 	uint32_t type, offset;
3870 
3871 	type = BNXT_FW_STATUS_REG_TYPE(reg);
3872 	offset = BNXT_FW_STATUS_REG_OFF(reg);
3873 
3874 	switch (type) {
3875 	case BNXT_FW_STATUS_REG_TYPE_CFG:
3876 		rte_pci_write_config(bp->pdev, &val, sizeof(val), offset);
3877 		break;
3878 	case BNXT_FW_STATUS_REG_TYPE_GRC:
3879 		offset = bnxt_map_reset_regs(bp, offset);
3880 		rte_write32(val, (uint8_t *)bp->bar0 + offset);
3881 		break;
3882 	case BNXT_FW_STATUS_REG_TYPE_BAR0:
3883 		rte_write32(val, (uint8_t *)bp->bar0 + offset);
3884 		break;
3885 	}
3886 	/* wait on a specific interval of time until core reset is complete */
3887 	if (delay)
3888 		rte_delay_ms(delay);
3889 }
3890 
3891 static void bnxt_dev_cleanup(struct bnxt *bp)
3892 {
3893 	bnxt_set_hwrm_link_config(bp, false);
3894 	bp->link_info.link_up = 0;
3895 	if (bp->eth_dev->data->dev_started)
3896 		bnxt_dev_stop_op(bp->eth_dev);
3897 
3898 	bnxt_uninit_resources(bp, true);
3899 }
3900 
3901 static int bnxt_restore_vlan_filters(struct bnxt *bp)
3902 {
3903 	struct rte_eth_dev *dev = bp->eth_dev;
3904 	struct rte_vlan_filter_conf *vfc;
3905 	int vidx, vbit, rc;
3906 	uint16_t vlan_id;
3907 
3908 	for (vlan_id = 1; vlan_id <= RTE_ETHER_MAX_VLAN_ID; vlan_id++) {
3909 		vfc = &dev->data->vlan_filter_conf;
3910 		vidx = vlan_id / 64;
3911 		vbit = vlan_id % 64;
3912 
3913 		/* Each bit corresponds to a VLAN id */
3914 		if (vfc->ids[vidx] & (UINT64_C(1) << vbit)) {
3915 			rc = bnxt_add_vlan_filter(bp, vlan_id);
3916 			if (rc)
3917 				return rc;
3918 		}
3919 	}
3920 
3921 	return 0;
3922 }
3923 
3924 static int bnxt_restore_mac_filters(struct bnxt *bp)
3925 {
3926 	struct rte_eth_dev *dev = bp->eth_dev;
3927 	struct rte_eth_dev_info dev_info;
3928 	struct rte_ether_addr *addr;
3929 	uint64_t pool_mask;
3930 	uint32_t pool = 0;
3931 	uint16_t i;
3932 	int rc;
3933 
3934 	if (BNXT_VF(bp) & !BNXT_VF_IS_TRUSTED(bp))
3935 		return 0;
3936 
3937 	rc = bnxt_dev_info_get_op(dev, &dev_info);
3938 	if (rc)
3939 		return rc;
3940 
3941 	/* replay MAC address configuration */
3942 	for (i = 1; i < dev_info.max_mac_addrs; i++) {
3943 		addr = &dev->data->mac_addrs[i];
3944 
3945 		/* skip zero address */
3946 		if (rte_is_zero_ether_addr(addr))
3947 			continue;
3948 
3949 		pool = 0;
3950 		pool_mask = dev->data->mac_pool_sel[i];
3951 
3952 		do {
3953 			if (pool_mask & 1ULL) {
3954 				rc = bnxt_mac_addr_add_op(dev, addr, i, pool);
3955 				if (rc)
3956 					return rc;
3957 			}
3958 			pool_mask >>= 1;
3959 			pool++;
3960 		} while (pool_mask);
3961 	}
3962 
3963 	return 0;
3964 }
3965 
3966 static int bnxt_restore_filters(struct bnxt *bp)
3967 {
3968 	struct rte_eth_dev *dev = bp->eth_dev;
3969 	int ret = 0;
3970 
3971 	if (dev->data->all_multicast) {
3972 		ret = bnxt_allmulticast_enable_op(dev);
3973 		if (ret)
3974 			return ret;
3975 	}
3976 	if (dev->data->promiscuous) {
3977 		ret = bnxt_promiscuous_enable_op(dev);
3978 		if (ret)
3979 			return ret;
3980 	}
3981 
3982 	ret = bnxt_restore_mac_filters(bp);
3983 	if (ret)
3984 		return ret;
3985 
3986 	ret = bnxt_restore_vlan_filters(bp);
3987 	/* TODO restore other filters as well */
3988 	return ret;
3989 }
3990 
3991 static void bnxt_dev_recover(void *arg)
3992 {
3993 	struct bnxt *bp = arg;
3994 	int timeout = bp->fw_reset_max_msecs;
3995 	int rc = 0;
3996 
3997 	/* Clear Error flag so that device re-init should happen */
3998 	bp->flags &= ~BNXT_FLAG_FATAL_ERROR;
3999 
4000 	do {
4001 		rc = bnxt_hwrm_ver_get(bp, SHORT_HWRM_CMD_TIMEOUT);
4002 		if (rc == 0)
4003 			break;
4004 		rte_delay_ms(BNXT_FW_READY_WAIT_INTERVAL);
4005 		timeout -= BNXT_FW_READY_WAIT_INTERVAL;
4006 	} while (rc && timeout);
4007 
4008 	if (rc) {
4009 		PMD_DRV_LOG(ERR, "FW is not Ready after reset\n");
4010 		goto err;
4011 	}
4012 
4013 	rc = bnxt_init_resources(bp, true);
4014 	if (rc) {
4015 		PMD_DRV_LOG(ERR,
4016 			    "Failed to initialize resources after reset\n");
4017 		goto err;
4018 	}
4019 	/* clear reset flag as the device is initialized now */
4020 	bp->flags &= ~BNXT_FLAG_FW_RESET;
4021 
4022 	rc = bnxt_dev_start_op(bp->eth_dev);
4023 	if (rc) {
4024 		PMD_DRV_LOG(ERR, "Failed to start port after reset\n");
4025 		goto err_start;
4026 	}
4027 
4028 	rc = bnxt_restore_filters(bp);
4029 	if (rc)
4030 		goto err_start;
4031 
4032 	PMD_DRV_LOG(INFO, "Recovered from FW reset\n");
4033 	return;
4034 err_start:
4035 	bnxt_dev_stop_op(bp->eth_dev);
4036 err:
4037 	bp->flags |= BNXT_FLAG_FATAL_ERROR;
4038 	bnxt_uninit_resources(bp, false);
4039 	PMD_DRV_LOG(ERR, "Failed to recover from FW reset\n");
4040 }
4041 
4042 void bnxt_dev_reset_and_resume(void *arg)
4043 {
4044 	struct bnxt *bp = arg;
4045 	int rc;
4046 
4047 	bnxt_dev_cleanup(bp);
4048 
4049 	bnxt_wait_for_device_shutdown(bp);
4050 
4051 	rc = rte_eal_alarm_set(US_PER_MS * bp->fw_reset_min_msecs,
4052 			       bnxt_dev_recover, (void *)bp);
4053 	if (rc)
4054 		PMD_DRV_LOG(ERR, "Error setting recovery alarm");
4055 }
4056 
4057 uint32_t bnxt_read_fw_status_reg(struct bnxt *bp, uint32_t index)
4058 {
4059 	struct bnxt_error_recovery_info *info = bp->recovery_info;
4060 	uint32_t reg = info->status_regs[index];
4061 	uint32_t type, offset, val = 0;
4062 
4063 	type = BNXT_FW_STATUS_REG_TYPE(reg);
4064 	offset = BNXT_FW_STATUS_REG_OFF(reg);
4065 
4066 	switch (type) {
4067 	case BNXT_FW_STATUS_REG_TYPE_CFG:
4068 		rte_pci_read_config(bp->pdev, &val, sizeof(val), offset);
4069 		break;
4070 	case BNXT_FW_STATUS_REG_TYPE_GRC:
4071 		offset = info->mapped_status_regs[index];
4072 		/* FALLTHROUGH */
4073 	case BNXT_FW_STATUS_REG_TYPE_BAR0:
4074 		val = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
4075 				       offset));
4076 		break;
4077 	}
4078 
4079 	return val;
4080 }
4081 
4082 static int bnxt_fw_reset_all(struct bnxt *bp)
4083 {
4084 	struct bnxt_error_recovery_info *info = bp->recovery_info;
4085 	uint32_t i;
4086 	int rc = 0;
4087 
4088 	if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) {
4089 		/* Reset through master function driver */
4090 		for (i = 0; i < info->reg_array_cnt; i++)
4091 			bnxt_write_fw_reset_reg(bp, i);
4092 		/* Wait for time specified by FW after triggering reset */
4093 		rte_delay_ms(info->master_func_wait_period_after_reset);
4094 	} else if (info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) {
4095 		/* Reset with the help of Kong processor */
4096 		rc = bnxt_hwrm_fw_reset(bp);
4097 		if (rc)
4098 			PMD_DRV_LOG(ERR, "Failed to reset FW\n");
4099 	}
4100 
4101 	return rc;
4102 }
4103 
4104 static void bnxt_fw_reset_cb(void *arg)
4105 {
4106 	struct bnxt *bp = arg;
4107 	struct bnxt_error_recovery_info *info = bp->recovery_info;
4108 	int rc = 0;
4109 
4110 	/* Only Master function can do FW reset */
4111 	if (bnxt_is_master_func(bp) &&
4112 	    bnxt_is_recovery_enabled(bp)) {
4113 		rc = bnxt_fw_reset_all(bp);
4114 		if (rc) {
4115 			PMD_DRV_LOG(ERR, "Adapter recovery failed\n");
4116 			return;
4117 		}
4118 	}
4119 
4120 	/* if recovery method is ERROR_RECOVERY_CO_CPU, KONG will send
4121 	 * EXCEPTION_FATAL_ASYNC event to all the functions
4122 	 * (including MASTER FUNC). After receiving this Async, all the active
4123 	 * drivers should treat this case as FW initiated recovery
4124 	 */
4125 	if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) {
4126 		bp->fw_reset_min_msecs = BNXT_MIN_FW_READY_TIMEOUT;
4127 		bp->fw_reset_max_msecs = BNXT_MAX_FW_RESET_TIMEOUT;
4128 
4129 		/* To recover from error */
4130 		rte_eal_alarm_set(US_PER_MS, bnxt_dev_reset_and_resume,
4131 				  (void *)bp);
4132 	}
4133 }
4134 
4135 /* Driver should poll FW heartbeat, reset_counter with the frequency
4136  * advertised by FW in HWRM_ERROR_RECOVERY_QCFG.
4137  * When the driver detects heartbeat stop or change in reset_counter,
4138  * it has to trigger a reset to recover from the error condition.
4139  * A “master PF” is the function who will have the privilege to
4140  * initiate the chimp reset. The master PF will be elected by the
4141  * firmware and will be notified through async message.
4142  */
4143 static void bnxt_check_fw_health(void *arg)
4144 {
4145 	struct bnxt *bp = arg;
4146 	struct bnxt_error_recovery_info *info = bp->recovery_info;
4147 	uint32_t val = 0, wait_msec;
4148 
4149 	if (!info || !bnxt_is_recovery_enabled(bp) ||
4150 	    is_bnxt_in_error(bp))
4151 		return;
4152 
4153 	val = bnxt_read_fw_status_reg(bp, BNXT_FW_HEARTBEAT_CNT_REG);
4154 	if (val == info->last_heart_beat)
4155 		goto reset;
4156 
4157 	info->last_heart_beat = val;
4158 
4159 	val = bnxt_read_fw_status_reg(bp, BNXT_FW_RECOVERY_CNT_REG);
4160 	if (val != info->last_reset_counter)
4161 		goto reset;
4162 
4163 	info->last_reset_counter = val;
4164 
4165 	rte_eal_alarm_set(US_PER_MS * info->driver_polling_freq,
4166 			  bnxt_check_fw_health, (void *)bp);
4167 
4168 	return;
4169 reset:
4170 	/* Stop DMA to/from device */
4171 	bp->flags |= BNXT_FLAG_FATAL_ERROR;
4172 	bp->flags |= BNXT_FLAG_FW_RESET;
4173 
4174 	PMD_DRV_LOG(ERR, "Detected FW dead condition\n");
4175 
4176 	if (bnxt_is_master_func(bp))
4177 		wait_msec = info->master_func_wait_period;
4178 	else
4179 		wait_msec = info->normal_func_wait_period;
4180 
4181 	rte_eal_alarm_set(US_PER_MS * wait_msec,
4182 			  bnxt_fw_reset_cb, (void *)bp);
4183 }
4184 
4185 void bnxt_schedule_fw_health_check(struct bnxt *bp)
4186 {
4187 	uint32_t polling_freq;
4188 
4189 	if (!bnxt_is_recovery_enabled(bp))
4190 		return;
4191 
4192 	if (bp->flags & BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED)
4193 		return;
4194 
4195 	polling_freq = bp->recovery_info->driver_polling_freq;
4196 
4197 	rte_eal_alarm_set(US_PER_MS * polling_freq,
4198 			  bnxt_check_fw_health, (void *)bp);
4199 	bp->flags |= BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED;
4200 }
4201 
4202 static void bnxt_cancel_fw_health_check(struct bnxt *bp)
4203 {
4204 	if (!bnxt_is_recovery_enabled(bp))
4205 		return;
4206 
4207 	rte_eal_alarm_cancel(bnxt_check_fw_health, (void *)bp);
4208 	bp->flags &= ~BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED;
4209 }
4210 
4211 static bool bnxt_vf_pciid(uint16_t device_id)
4212 {
4213 	switch (device_id) {
4214 	case BROADCOM_DEV_ID_57304_VF:
4215 	case BROADCOM_DEV_ID_57406_VF:
4216 	case BROADCOM_DEV_ID_5731X_VF:
4217 	case BROADCOM_DEV_ID_5741X_VF:
4218 	case BROADCOM_DEV_ID_57414_VF:
4219 	case BROADCOM_DEV_ID_STRATUS_NIC_VF1:
4220 	case BROADCOM_DEV_ID_STRATUS_NIC_VF2:
4221 	case BROADCOM_DEV_ID_58802_VF:
4222 	case BROADCOM_DEV_ID_57500_VF1:
4223 	case BROADCOM_DEV_ID_57500_VF2:
4224 		/* FALLTHROUGH */
4225 		return true;
4226 	default:
4227 		return false;
4228 	}
4229 }
4230 
4231 static bool bnxt_thor_device(uint16_t device_id)
4232 {
4233 	switch (device_id) {
4234 	case BROADCOM_DEV_ID_57508:
4235 	case BROADCOM_DEV_ID_57504:
4236 	case BROADCOM_DEV_ID_57502:
4237 	case BROADCOM_DEV_ID_57508_MF1:
4238 	case BROADCOM_DEV_ID_57504_MF1:
4239 	case BROADCOM_DEV_ID_57502_MF1:
4240 	case BROADCOM_DEV_ID_57508_MF2:
4241 	case BROADCOM_DEV_ID_57504_MF2:
4242 	case BROADCOM_DEV_ID_57502_MF2:
4243 	case BROADCOM_DEV_ID_57500_VF1:
4244 	case BROADCOM_DEV_ID_57500_VF2:
4245 		/* FALLTHROUGH */
4246 		return true;
4247 	default:
4248 		return false;
4249 	}
4250 }
4251 
4252 bool bnxt_stratus_device(struct bnxt *bp)
4253 {
4254 	uint16_t device_id = bp->pdev->id.device_id;
4255 
4256 	switch (device_id) {
4257 	case BROADCOM_DEV_ID_STRATUS_NIC:
4258 	case BROADCOM_DEV_ID_STRATUS_NIC_VF1:
4259 	case BROADCOM_DEV_ID_STRATUS_NIC_VF2:
4260 		/* FALLTHROUGH */
4261 		return true;
4262 	default:
4263 		return false;
4264 	}
4265 }
4266 
4267 static int bnxt_init_board(struct rte_eth_dev *eth_dev)
4268 {
4269 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
4270 	struct bnxt *bp = eth_dev->data->dev_private;
4271 
4272 	/* enable device (incl. PCI PM wakeup), and bus-mastering */
4273 	bp->bar0 = (void *)pci_dev->mem_resource[0].addr;
4274 	bp->doorbell_base = (void *)pci_dev->mem_resource[2].addr;
4275 	if (!bp->bar0 || !bp->doorbell_base) {
4276 		PMD_DRV_LOG(ERR, "Unable to access Hardware\n");
4277 		return -ENODEV;
4278 	}
4279 
4280 	bp->eth_dev = eth_dev;
4281 	bp->pdev = pci_dev;
4282 
4283 	return 0;
4284 }
4285 
4286 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
4287 				  struct bnxt_ctx_pg_info *ctx_pg,
4288 				  uint32_t mem_size,
4289 				  const char *suffix,
4290 				  uint16_t idx)
4291 {
4292 	struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
4293 	const struct rte_memzone *mz = NULL;
4294 	char mz_name[RTE_MEMZONE_NAMESIZE];
4295 	rte_iova_t mz_phys_addr;
4296 	uint64_t valid_bits = 0;
4297 	uint32_t sz;
4298 	int i;
4299 
4300 	if (!mem_size)
4301 		return 0;
4302 
4303 	rmem->nr_pages = RTE_ALIGN_MUL_CEIL(mem_size, BNXT_PAGE_SIZE) /
4304 			 BNXT_PAGE_SIZE;
4305 	rmem->page_size = BNXT_PAGE_SIZE;
4306 	rmem->pg_arr = ctx_pg->ctx_pg_arr;
4307 	rmem->dma_arr = ctx_pg->ctx_dma_arr;
4308 	rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
4309 
4310 	valid_bits = PTU_PTE_VALID;
4311 
4312 	if (rmem->nr_pages > 1) {
4313 		snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
4314 			 "bnxt_ctx_pg_tbl%s_%x_%d",
4315 			 suffix, idx, bp->eth_dev->data->port_id);
4316 		mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
4317 		mz = rte_memzone_lookup(mz_name);
4318 		if (!mz) {
4319 			mz = rte_memzone_reserve_aligned(mz_name,
4320 						rmem->nr_pages * 8,
4321 						SOCKET_ID_ANY,
4322 						RTE_MEMZONE_2MB |
4323 						RTE_MEMZONE_SIZE_HINT_ONLY |
4324 						RTE_MEMZONE_IOVA_CONTIG,
4325 						BNXT_PAGE_SIZE);
4326 			if (mz == NULL)
4327 				return -ENOMEM;
4328 		}
4329 
4330 		memset(mz->addr, 0, mz->len);
4331 		mz_phys_addr = mz->iova;
4332 
4333 		rmem->pg_tbl = mz->addr;
4334 		rmem->pg_tbl_map = mz_phys_addr;
4335 		rmem->pg_tbl_mz = mz;
4336 	}
4337 
4338 	snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_%s_%x_%d",
4339 		 suffix, idx, bp->eth_dev->data->port_id);
4340 	mz = rte_memzone_lookup(mz_name);
4341 	if (!mz) {
4342 		mz = rte_memzone_reserve_aligned(mz_name,
4343 						 mem_size,
4344 						 SOCKET_ID_ANY,
4345 						 RTE_MEMZONE_1GB |
4346 						 RTE_MEMZONE_SIZE_HINT_ONLY |
4347 						 RTE_MEMZONE_IOVA_CONTIG,
4348 						 BNXT_PAGE_SIZE);
4349 		if (mz == NULL)
4350 			return -ENOMEM;
4351 	}
4352 
4353 	memset(mz->addr, 0, mz->len);
4354 	mz_phys_addr = mz->iova;
4355 
4356 	for (sz = 0, i = 0; sz < mem_size; sz += BNXT_PAGE_SIZE, i++) {
4357 		rmem->pg_arr[i] = ((char *)mz->addr) + sz;
4358 		rmem->dma_arr[i] = mz_phys_addr + sz;
4359 
4360 		if (rmem->nr_pages > 1) {
4361 			if (i == rmem->nr_pages - 2 &&
4362 			    (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
4363 				valid_bits |= PTU_PTE_NEXT_TO_LAST;
4364 			else if (i == rmem->nr_pages - 1 &&
4365 				 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
4366 				valid_bits |= PTU_PTE_LAST;
4367 
4368 			rmem->pg_tbl[i] = rte_cpu_to_le_64(rmem->dma_arr[i] |
4369 							   valid_bits);
4370 		}
4371 	}
4372 
4373 	rmem->mz = mz;
4374 	if (rmem->vmem_size)
4375 		rmem->vmem = (void **)mz->addr;
4376 	rmem->dma_arr[0] = mz_phys_addr;
4377 	return 0;
4378 }
4379 
4380 static void bnxt_free_ctx_mem(struct bnxt *bp)
4381 {
4382 	int i;
4383 
4384 	if (!bp->ctx || !(bp->ctx->flags & BNXT_CTX_FLAG_INITED))
4385 		return;
4386 
4387 	bp->ctx->flags &= ~BNXT_CTX_FLAG_INITED;
4388 	rte_memzone_free(bp->ctx->qp_mem.ring_mem.mz);
4389 	rte_memzone_free(bp->ctx->srq_mem.ring_mem.mz);
4390 	rte_memzone_free(bp->ctx->cq_mem.ring_mem.mz);
4391 	rte_memzone_free(bp->ctx->vnic_mem.ring_mem.mz);
4392 	rte_memzone_free(bp->ctx->stat_mem.ring_mem.mz);
4393 	rte_memzone_free(bp->ctx->qp_mem.ring_mem.pg_tbl_mz);
4394 	rte_memzone_free(bp->ctx->srq_mem.ring_mem.pg_tbl_mz);
4395 	rte_memzone_free(bp->ctx->cq_mem.ring_mem.pg_tbl_mz);
4396 	rte_memzone_free(bp->ctx->vnic_mem.ring_mem.pg_tbl_mz);
4397 	rte_memzone_free(bp->ctx->stat_mem.ring_mem.pg_tbl_mz);
4398 
4399 	for (i = 0; i < BNXT_MAX_Q; i++) {
4400 		if (bp->ctx->tqm_mem[i])
4401 			rte_memzone_free(bp->ctx->tqm_mem[i]->ring_mem.mz);
4402 	}
4403 
4404 	rte_free(bp->ctx);
4405 	bp->ctx = NULL;
4406 }
4407 
4408 #define bnxt_roundup(x, y)   ((((x) + ((y) - 1)) / (y)) * (y))
4409 
4410 #define min_t(type, x, y) ({                    \
4411 	type __min1 = (x);                      \
4412 	type __min2 = (y);                      \
4413 	__min1 < __min2 ? __min1 : __min2; })
4414 
4415 #define max_t(type, x, y) ({                    \
4416 	type __max1 = (x);                      \
4417 	type __max2 = (y);                      \
4418 	__max1 > __max2 ? __max1 : __max2; })
4419 
4420 #define clamp_t(type, _x, min, max)     min_t(type, max_t(type, _x, min), max)
4421 
4422 int bnxt_alloc_ctx_mem(struct bnxt *bp)
4423 {
4424 	struct bnxt_ctx_pg_info *ctx_pg;
4425 	struct bnxt_ctx_mem_info *ctx;
4426 	uint32_t mem_size, ena, entries;
4427 	int i, rc;
4428 
4429 	rc = bnxt_hwrm_func_backing_store_qcaps(bp);
4430 	if (rc) {
4431 		PMD_DRV_LOG(ERR, "Query context mem capability failed\n");
4432 		return rc;
4433 	}
4434 	ctx = bp->ctx;
4435 	if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
4436 		return 0;
4437 
4438 	ctx_pg = &ctx->qp_mem;
4439 	ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries;
4440 	mem_size = ctx->qp_entry_size * ctx_pg->entries;
4441 	rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "qp_mem", 0);
4442 	if (rc)
4443 		return rc;
4444 
4445 	ctx_pg = &ctx->srq_mem;
4446 	ctx_pg->entries = ctx->srq_max_l2_entries;
4447 	mem_size = ctx->srq_entry_size * ctx_pg->entries;
4448 	rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "srq_mem", 0);
4449 	if (rc)
4450 		return rc;
4451 
4452 	ctx_pg = &ctx->cq_mem;
4453 	ctx_pg->entries = ctx->cq_max_l2_entries;
4454 	mem_size = ctx->cq_entry_size * ctx_pg->entries;
4455 	rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "cq_mem", 0);
4456 	if (rc)
4457 		return rc;
4458 
4459 	ctx_pg = &ctx->vnic_mem;
4460 	ctx_pg->entries = ctx->vnic_max_vnic_entries +
4461 		ctx->vnic_max_ring_table_entries;
4462 	mem_size = ctx->vnic_entry_size * ctx_pg->entries;
4463 	rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "vnic_mem", 0);
4464 	if (rc)
4465 		return rc;
4466 
4467 	ctx_pg = &ctx->stat_mem;
4468 	ctx_pg->entries = ctx->stat_max_entries;
4469 	mem_size = ctx->stat_entry_size * ctx_pg->entries;
4470 	rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "stat_mem", 0);
4471 	if (rc)
4472 		return rc;
4473 
4474 	entries = ctx->qp_max_l2_entries +
4475 		  ctx->vnic_max_vnic_entries +
4476 		  ctx->tqm_min_entries_per_ring;
4477 	entries = bnxt_roundup(entries, ctx->tqm_entries_multiple);
4478 	entries = clamp_t(uint32_t, entries, ctx->tqm_min_entries_per_ring,
4479 			  ctx->tqm_max_entries_per_ring);
4480 	for (i = 0, ena = 0; i < BNXT_MAX_Q; i++) {
4481 		ctx_pg = ctx->tqm_mem[i];
4482 		/* use min tqm entries for now. */
4483 		ctx_pg->entries = entries;
4484 		mem_size = ctx->tqm_entry_size * ctx_pg->entries;
4485 		rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "tqm_mem", i);
4486 		if (rc)
4487 			return rc;
4488 		ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP << i;
4489 	}
4490 
4491 	ena |= FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES;
4492 	rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
4493 	if (rc)
4494 		PMD_DRV_LOG(ERR,
4495 			    "Failed to configure context mem: rc = %d\n", rc);
4496 	else
4497 		ctx->flags |= BNXT_CTX_FLAG_INITED;
4498 
4499 	return rc;
4500 }
4501 
4502 static int bnxt_alloc_stats_mem(struct bnxt *bp)
4503 {
4504 	struct rte_pci_device *pci_dev = bp->pdev;
4505 	char mz_name[RTE_MEMZONE_NAMESIZE];
4506 	const struct rte_memzone *mz = NULL;
4507 	uint32_t total_alloc_len;
4508 	rte_iova_t mz_phys_addr;
4509 
4510 	if (pci_dev->id.device_id == BROADCOM_DEV_ID_NS2)
4511 		return 0;
4512 
4513 	snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
4514 		 "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain,
4515 		 pci_dev->addr.bus, pci_dev->addr.devid,
4516 		 pci_dev->addr.function, "rx_port_stats");
4517 	mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
4518 	mz = rte_memzone_lookup(mz_name);
4519 	total_alloc_len =
4520 		RTE_CACHE_LINE_ROUNDUP(sizeof(struct rx_port_stats) +
4521 				       sizeof(struct rx_port_stats_ext) + 512);
4522 	if (!mz) {
4523 		mz = rte_memzone_reserve(mz_name, total_alloc_len,
4524 					 SOCKET_ID_ANY,
4525 					 RTE_MEMZONE_2MB |
4526 					 RTE_MEMZONE_SIZE_HINT_ONLY |
4527 					 RTE_MEMZONE_IOVA_CONTIG);
4528 		if (mz == NULL)
4529 			return -ENOMEM;
4530 	}
4531 	memset(mz->addr, 0, mz->len);
4532 	mz_phys_addr = mz->iova;
4533 
4534 	bp->rx_mem_zone = (const void *)mz;
4535 	bp->hw_rx_port_stats = mz->addr;
4536 	bp->hw_rx_port_stats_map = mz_phys_addr;
4537 
4538 	snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
4539 		 "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain,
4540 		 pci_dev->addr.bus, pci_dev->addr.devid,
4541 		 pci_dev->addr.function, "tx_port_stats");
4542 	mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
4543 	mz = rte_memzone_lookup(mz_name);
4544 	total_alloc_len =
4545 		RTE_CACHE_LINE_ROUNDUP(sizeof(struct tx_port_stats) +
4546 				       sizeof(struct tx_port_stats_ext) + 512);
4547 	if (!mz) {
4548 		mz = rte_memzone_reserve(mz_name,
4549 					 total_alloc_len,
4550 					 SOCKET_ID_ANY,
4551 					 RTE_MEMZONE_2MB |
4552 					 RTE_MEMZONE_SIZE_HINT_ONLY |
4553 					 RTE_MEMZONE_IOVA_CONTIG);
4554 		if (mz == NULL)
4555 			return -ENOMEM;
4556 	}
4557 	memset(mz->addr, 0, mz->len);
4558 	mz_phys_addr = mz->iova;
4559 
4560 	bp->tx_mem_zone = (const void *)mz;
4561 	bp->hw_tx_port_stats = mz->addr;
4562 	bp->hw_tx_port_stats_map = mz_phys_addr;
4563 	bp->flags |= BNXT_FLAG_PORT_STATS;
4564 
4565 	/* Display extended statistics if FW supports it */
4566 	if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_8_4 ||
4567 	    bp->hwrm_spec_code == HWRM_SPEC_CODE_1_9_0 ||
4568 	    !(bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED))
4569 		return 0;
4570 
4571 	bp->hw_rx_port_stats_ext = (void *)
4572 		((uint8_t *)bp->hw_rx_port_stats +
4573 		 sizeof(struct rx_port_stats));
4574 	bp->hw_rx_port_stats_ext_map = bp->hw_rx_port_stats_map +
4575 		sizeof(struct rx_port_stats);
4576 	bp->flags |= BNXT_FLAG_EXT_RX_PORT_STATS;
4577 
4578 	if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_9_2 ||
4579 	    bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED) {
4580 		bp->hw_tx_port_stats_ext = (void *)
4581 			((uint8_t *)bp->hw_tx_port_stats +
4582 			 sizeof(struct tx_port_stats));
4583 		bp->hw_tx_port_stats_ext_map =
4584 			bp->hw_tx_port_stats_map +
4585 			sizeof(struct tx_port_stats);
4586 		bp->flags |= BNXT_FLAG_EXT_TX_PORT_STATS;
4587 	}
4588 
4589 	return 0;
4590 }
4591 
4592 static int bnxt_setup_mac_addr(struct rte_eth_dev *eth_dev)
4593 {
4594 	struct bnxt *bp = eth_dev->data->dev_private;
4595 	int rc = 0;
4596 
4597 	eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl",
4598 					       RTE_ETHER_ADDR_LEN *
4599 					       bp->max_l2_ctx,
4600 					       0);
4601 	if (eth_dev->data->mac_addrs == NULL) {
4602 		PMD_DRV_LOG(ERR, "Failed to alloc MAC addr tbl\n");
4603 		return -ENOMEM;
4604 	}
4605 
4606 	if (bnxt_check_zero_bytes(bp->dflt_mac_addr, RTE_ETHER_ADDR_LEN)) {
4607 		if (BNXT_PF(bp))
4608 			return -EINVAL;
4609 
4610 		/* Generate a random MAC address, if none was assigned by PF */
4611 		PMD_DRV_LOG(INFO, "VF MAC address not assigned by Host PF\n");
4612 		bnxt_eth_hw_addr_random(bp->mac_addr);
4613 		PMD_DRV_LOG(INFO,
4614 			    "Assign random MAC:%02X:%02X:%02X:%02X:%02X:%02X\n",
4615 			    bp->mac_addr[0], bp->mac_addr[1], bp->mac_addr[2],
4616 			    bp->mac_addr[3], bp->mac_addr[4], bp->mac_addr[5]);
4617 
4618 		rc = bnxt_hwrm_set_mac(bp);
4619 		if (!rc)
4620 			memcpy(&bp->eth_dev->data->mac_addrs[0], bp->mac_addr,
4621 			       RTE_ETHER_ADDR_LEN);
4622 		return rc;
4623 	}
4624 
4625 	/* Copy the permanent MAC from the FUNC_QCAPS response */
4626 	memcpy(bp->mac_addr, bp->dflt_mac_addr, RTE_ETHER_ADDR_LEN);
4627 	memcpy(&eth_dev->data->mac_addrs[0], bp->mac_addr, RTE_ETHER_ADDR_LEN);
4628 
4629 	return rc;
4630 }
4631 
4632 static int bnxt_restore_dflt_mac(struct bnxt *bp)
4633 {
4634 	int rc = 0;
4635 
4636 	/* MAC is already configured in FW */
4637 	if (!bnxt_check_zero_bytes(bp->dflt_mac_addr, RTE_ETHER_ADDR_LEN))
4638 		return 0;
4639 
4640 	/* Restore the old MAC configured */
4641 	rc = bnxt_hwrm_set_mac(bp);
4642 	if (rc)
4643 		PMD_DRV_LOG(ERR, "Failed to restore MAC address\n");
4644 
4645 	return rc;
4646 }
4647 
4648 static void bnxt_config_vf_req_fwd(struct bnxt *bp)
4649 {
4650 	if (!BNXT_PF(bp))
4651 		return;
4652 
4653 #define ALLOW_FUNC(x)	\
4654 	{ \
4655 		uint32_t arg = (x); \
4656 		bp->pf.vf_req_fwd[((arg) >> 5)] &= \
4657 		~rte_cpu_to_le_32(1 << ((arg) & 0x1f)); \
4658 	}
4659 
4660 	/* Forward all requests if firmware is new enough */
4661 	if (((bp->fw_ver >= ((20 << 24) | (6 << 16) | (100 << 8))) &&
4662 	     (bp->fw_ver < ((20 << 24) | (7 << 16)))) ||
4663 	    ((bp->fw_ver >= ((20 << 24) | (8 << 16))))) {
4664 		memset(bp->pf.vf_req_fwd, 0xff, sizeof(bp->pf.vf_req_fwd));
4665 	} else {
4666 		PMD_DRV_LOG(WARNING,
4667 			    "Firmware too old for VF mailbox functionality\n");
4668 		memset(bp->pf.vf_req_fwd, 0, sizeof(bp->pf.vf_req_fwd));
4669 	}
4670 
4671 	/*
4672 	 * The following are used for driver cleanup. If we disallow these,
4673 	 * VF drivers can't clean up cleanly.
4674 	 */
4675 	ALLOW_FUNC(HWRM_FUNC_DRV_UNRGTR);
4676 	ALLOW_FUNC(HWRM_VNIC_FREE);
4677 	ALLOW_FUNC(HWRM_RING_FREE);
4678 	ALLOW_FUNC(HWRM_RING_GRP_FREE);
4679 	ALLOW_FUNC(HWRM_VNIC_RSS_COS_LB_CTX_FREE);
4680 	ALLOW_FUNC(HWRM_CFA_L2_FILTER_FREE);
4681 	ALLOW_FUNC(HWRM_STAT_CTX_FREE);
4682 	ALLOW_FUNC(HWRM_PORT_PHY_QCFG);
4683 	ALLOW_FUNC(HWRM_VNIC_TPA_CFG);
4684 }
4685 
4686 static int bnxt_init_fw(struct bnxt *bp)
4687 {
4688 	uint16_t mtu;
4689 	int rc = 0;
4690 
4691 	bp->fw_cap = 0;
4692 
4693 	rc = bnxt_hwrm_ver_get(bp, DFLT_HWRM_CMD_TIMEOUT);
4694 	if (rc)
4695 		return rc;
4696 
4697 	rc = bnxt_hwrm_func_reset(bp);
4698 	if (rc)
4699 		return -EIO;
4700 
4701 	rc = bnxt_hwrm_vnic_qcaps(bp);
4702 	if (rc)
4703 		return rc;
4704 
4705 	rc = bnxt_hwrm_queue_qportcfg(bp);
4706 	if (rc)
4707 		return rc;
4708 
4709 	/* Get the MAX capabilities for this function.
4710 	 * This function also allocates context memory for TQM rings and
4711 	 * informs the firmware about this allocated backing store memory.
4712 	 */
4713 	rc = bnxt_hwrm_func_qcaps(bp);
4714 	if (rc)
4715 		return rc;
4716 
4717 	rc = bnxt_hwrm_func_qcfg(bp, &mtu);
4718 	if (rc)
4719 		return rc;
4720 
4721 	rc = bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(bp);
4722 	if (rc)
4723 		return rc;
4724 
4725 	/* Get the adapter error recovery support info */
4726 	rc = bnxt_hwrm_error_recovery_qcfg(bp);
4727 	if (rc)
4728 		bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
4729 
4730 	bnxt_hwrm_port_led_qcaps(bp);
4731 
4732 	return 0;
4733 }
4734 
4735 static int
4736 bnxt_init_locks(struct bnxt *bp)
4737 {
4738 	int err;
4739 
4740 	err = pthread_mutex_init(&bp->flow_lock, NULL);
4741 	if (err) {
4742 		PMD_DRV_LOG(ERR, "Unable to initialize flow_lock\n");
4743 		return err;
4744 	}
4745 
4746 	err = pthread_mutex_init(&bp->def_cp_lock, NULL);
4747 	if (err)
4748 		PMD_DRV_LOG(ERR, "Unable to initialize def_cp_lock\n");
4749 	return err;
4750 }
4751 
4752 static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev)
4753 {
4754 	int rc;
4755 
4756 	rc = bnxt_init_fw(bp);
4757 	if (rc)
4758 		return rc;
4759 
4760 	if (!reconfig_dev) {
4761 		rc = bnxt_setup_mac_addr(bp->eth_dev);
4762 		if (rc)
4763 			return rc;
4764 	} else {
4765 		rc = bnxt_restore_dflt_mac(bp);
4766 		if (rc)
4767 			return rc;
4768 	}
4769 
4770 	bnxt_config_vf_req_fwd(bp);
4771 
4772 	rc = bnxt_hwrm_func_driver_register(bp);
4773 	if (rc) {
4774 		PMD_DRV_LOG(ERR, "Failed to register driver");
4775 		return -EBUSY;
4776 	}
4777 
4778 	if (BNXT_PF(bp)) {
4779 		if (bp->pdev->max_vfs) {
4780 			rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs);
4781 			if (rc) {
4782 				PMD_DRV_LOG(ERR, "Failed to allocate VFs\n");
4783 				return rc;
4784 			}
4785 		} else {
4786 			rc = bnxt_hwrm_allocate_pf_only(bp);
4787 			if (rc) {
4788 				PMD_DRV_LOG(ERR,
4789 					    "Failed to allocate PF resources");
4790 				return rc;
4791 			}
4792 		}
4793 	}
4794 
4795 	rc = bnxt_alloc_mem(bp, reconfig_dev);
4796 	if (rc)
4797 		return rc;
4798 
4799 	rc = bnxt_setup_int(bp);
4800 	if (rc)
4801 		return rc;
4802 
4803 	rc = bnxt_request_int(bp);
4804 	if (rc)
4805 		return rc;
4806 
4807 	rc = bnxt_init_locks(bp);
4808 	if (rc)
4809 		return rc;
4810 
4811 	return 0;
4812 }
4813 
4814 static int
4815 bnxt_dev_init(struct rte_eth_dev *eth_dev)
4816 {
4817 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
4818 	static int version_printed;
4819 	struct bnxt *bp;
4820 	int rc;
4821 
4822 	if (version_printed++ == 0)
4823 		PMD_DRV_LOG(INFO, "%s\n", bnxt_version);
4824 
4825 	eth_dev->dev_ops = &bnxt_dev_ops;
4826 	eth_dev->rx_pkt_burst = &bnxt_recv_pkts;
4827 	eth_dev->tx_pkt_burst = &bnxt_xmit_pkts;
4828 
4829 	/*
4830 	 * For secondary processes, we don't initialise any further
4831 	 * as primary has already done this work.
4832 	 */
4833 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
4834 		return 0;
4835 
4836 	rte_eth_copy_pci_info(eth_dev, pci_dev);
4837 
4838 	bp = eth_dev->data->dev_private;
4839 
4840 	bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE;
4841 
4842 	if (bnxt_vf_pciid(pci_dev->id.device_id))
4843 		bp->flags |= BNXT_FLAG_VF;
4844 
4845 	if (bnxt_thor_device(pci_dev->id.device_id))
4846 		bp->flags |= BNXT_FLAG_THOR_CHIP;
4847 
4848 	if (pci_dev->id.device_id == BROADCOM_DEV_ID_58802 ||
4849 	    pci_dev->id.device_id == BROADCOM_DEV_ID_58804 ||
4850 	    pci_dev->id.device_id == BROADCOM_DEV_ID_58808 ||
4851 	    pci_dev->id.device_id == BROADCOM_DEV_ID_58802_VF)
4852 		bp->flags |= BNXT_FLAG_STINGRAY;
4853 
4854 	rc = bnxt_init_board(eth_dev);
4855 	if (rc) {
4856 		PMD_DRV_LOG(ERR,
4857 			    "Failed to initialize board rc: %x\n", rc);
4858 		return rc;
4859 	}
4860 
4861 	rc = bnxt_alloc_hwrm_resources(bp);
4862 	if (rc) {
4863 		PMD_DRV_LOG(ERR,
4864 			    "Failed to allocate hwrm resource rc: %x\n", rc);
4865 		goto error_free;
4866 	}
4867 	rc = bnxt_init_resources(bp, false);
4868 	if (rc)
4869 		goto error_free;
4870 
4871 	rc = bnxt_alloc_stats_mem(bp);
4872 	if (rc)
4873 		goto error_free;
4874 
4875 	/* Pass the information to the rte_eth_dev_close() that it should also
4876 	 * release the private port resources.
4877 	 */
4878 	eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
4879 
4880 	PMD_DRV_LOG(INFO,
4881 		    DRV_MODULE_NAME "found at mem %" PRIX64 ", node addr %pM\n",
4882 		    pci_dev->mem_resource[0].phys_addr,
4883 		    pci_dev->mem_resource[0].addr);
4884 
4885 	return 0;
4886 
4887 error_free:
4888 	bnxt_dev_uninit(eth_dev);
4889 	return rc;
4890 }
4891 
4892 static void
4893 bnxt_uninit_locks(struct bnxt *bp)
4894 {
4895 	pthread_mutex_destroy(&bp->flow_lock);
4896 	pthread_mutex_destroy(&bp->def_cp_lock);
4897 }
4898 
4899 static int
4900 bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev)
4901 {
4902 	int rc;
4903 
4904 	bnxt_free_int(bp);
4905 	bnxt_free_mem(bp, reconfig_dev);
4906 	bnxt_hwrm_func_buf_unrgtr(bp);
4907 	rc = bnxt_hwrm_func_driver_unregister(bp, 0);
4908 	bp->flags &= ~BNXT_FLAG_REGISTERED;
4909 	bnxt_free_ctx_mem(bp);
4910 	if (!reconfig_dev) {
4911 		bnxt_free_hwrm_resources(bp);
4912 
4913 		if (bp->recovery_info != NULL) {
4914 			rte_free(bp->recovery_info);
4915 			bp->recovery_info = NULL;
4916 		}
4917 	}
4918 
4919 	bnxt_uninit_locks(bp);
4920 	rte_free(bp->ptp_cfg);
4921 	bp->ptp_cfg = NULL;
4922 	return rc;
4923 }
4924 
4925 static int
4926 bnxt_dev_uninit(struct rte_eth_dev *eth_dev)
4927 {
4928 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
4929 		return -EPERM;
4930 
4931 	PMD_DRV_LOG(DEBUG, "Calling Device uninit\n");
4932 
4933 	if (eth_dev->state != RTE_ETH_DEV_UNUSED)
4934 		bnxt_dev_close_op(eth_dev);
4935 
4936 	return 0;
4937 }
4938 
4939 static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
4940 	struct rte_pci_device *pci_dev)
4941 {
4942 	return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct bnxt),
4943 		bnxt_dev_init);
4944 }
4945 
4946 static int bnxt_pci_remove(struct rte_pci_device *pci_dev)
4947 {
4948 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
4949 		return rte_eth_dev_pci_generic_remove(pci_dev,
4950 				bnxt_dev_uninit);
4951 	else
4952 		return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
4953 }
4954 
4955 static struct rte_pci_driver bnxt_rte_pmd = {
4956 	.id_table = bnxt_pci_id_map,
4957 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
4958 	.probe = bnxt_pci_probe,
4959 	.remove = bnxt_pci_remove,
4960 };
4961 
4962 static bool
4963 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
4964 {
4965 	if (strcmp(dev->device->driver->name, drv->driver.name))
4966 		return false;
4967 
4968 	return true;
4969 }
4970 
4971 bool is_bnxt_supported(struct rte_eth_dev *dev)
4972 {
4973 	return is_device_supported(dev, &bnxt_rte_pmd);
4974 }
4975 
4976 RTE_INIT(bnxt_init_log)
4977 {
4978 	bnxt_logtype_driver = rte_log_register("pmd.net.bnxt.driver");
4979 	if (bnxt_logtype_driver >= 0)
4980 		rte_log_set_level(bnxt_logtype_driver, RTE_LOG_NOTICE);
4981 }
4982 
4983 RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd);
4984 RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map);
4985 RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci");
4986