xref: /dpdk/drivers/net/bnxt/bnxt_ethdev.c (revision 2f8cc21f0943e1fde68b38e09f9bf238d0eb7811)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5 
6 #include <inttypes.h>
7 #include <stdbool.h>
8 
9 #include <rte_dev.h>
10 #include <rte_ethdev_driver.h>
11 #include <rte_ethdev_pci.h>
12 #include <rte_malloc.h>
13 #include <rte_cycles.h>
14 #include <rte_alarm.h>
15 #include <rte_kvargs.h>
16 
17 #include "bnxt.h"
18 #include "bnxt_filter.h"
19 #include "bnxt_hwrm.h"
20 #include "bnxt_irq.h"
21 #include "bnxt_ring.h"
22 #include "bnxt_rxq.h"
23 #include "bnxt_rxr.h"
24 #include "bnxt_stats.h"
25 #include "bnxt_txq.h"
26 #include "bnxt_txr.h"
27 #include "bnxt_vnic.h"
28 #include "hsi_struct_def_dpdk.h"
29 #include "bnxt_nvm_defs.h"
30 
31 #define DRV_MODULE_NAME		"bnxt"
32 static const char bnxt_version[] =
33 	"Broadcom NetXtreme driver " DRV_MODULE_NAME;
34 int bnxt_logtype_driver;
35 
36 /*
37  * The set of PCI devices this driver supports
38  */
39 static const struct rte_pci_id bnxt_pci_id_map[] = {
40 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM,
41 			 BROADCOM_DEV_ID_STRATUS_NIC_VF1) },
42 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM,
43 			 BROADCOM_DEV_ID_STRATUS_NIC_VF2) },
44 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) },
45 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) },
46 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) },
47 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57302) },
48 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_PF) },
49 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_VF) },
50 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_NS2) },
51 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402) },
52 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404) },
53 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_PF) },
54 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_VF) },
55 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402_MF) },
56 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_RJ45) },
57 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404_MF) },
58 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_MF) },
59 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_SFP) },
60 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_MF) },
61 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5741X_VF) },
62 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5731X_VF) },
63 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57314) },
64 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_MF) },
65 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57311) },
66 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57312) },
67 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412) },
68 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414) },
69 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_RJ45) },
70 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_RJ45) },
71 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412_MF) },
72 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_RJ45) },
73 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_SFP) },
74 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_SFP) },
75 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) },
76 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) },
77 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) },
78 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802) },
79 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58804) },
80 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58808) },
81 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802_VF) },
82 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508) },
83 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504) },
84 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502) },
85 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF1) },
86 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF2) },
87 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508_MF1) },
88 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504_MF1) },
89 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502_MF1) },
90 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508_MF2) },
91 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504_MF2) },
92 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502_MF2) },
93 	{ .vendor_id = 0, /* sentinel */ },
94 };
95 
96 #define BNXT_ETH_RSS_SUPPORT (	\
97 	ETH_RSS_IPV4 |		\
98 	ETH_RSS_NONFRAG_IPV4_TCP |	\
99 	ETH_RSS_NONFRAG_IPV4_UDP |	\
100 	ETH_RSS_IPV6 |		\
101 	ETH_RSS_NONFRAG_IPV6_TCP |	\
102 	ETH_RSS_NONFRAG_IPV6_UDP)
103 
104 #define BNXT_DEV_TX_OFFLOAD_SUPPORT (DEV_TX_OFFLOAD_VLAN_INSERT | \
105 				     DEV_TX_OFFLOAD_IPV4_CKSUM | \
106 				     DEV_TX_OFFLOAD_TCP_CKSUM | \
107 				     DEV_TX_OFFLOAD_UDP_CKSUM | \
108 				     DEV_TX_OFFLOAD_TCP_TSO | \
109 				     DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
110 				     DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \
111 				     DEV_TX_OFFLOAD_GRE_TNL_TSO | \
112 				     DEV_TX_OFFLOAD_IPIP_TNL_TSO | \
113 				     DEV_TX_OFFLOAD_GENEVE_TNL_TSO | \
114 				     DEV_TX_OFFLOAD_QINQ_INSERT | \
115 				     DEV_TX_OFFLOAD_MULTI_SEGS)
116 
117 #define BNXT_DEV_RX_OFFLOAD_SUPPORT (DEV_RX_OFFLOAD_VLAN_FILTER | \
118 				     DEV_RX_OFFLOAD_VLAN_STRIP | \
119 				     DEV_RX_OFFLOAD_IPV4_CKSUM | \
120 				     DEV_RX_OFFLOAD_UDP_CKSUM | \
121 				     DEV_RX_OFFLOAD_TCP_CKSUM | \
122 				     DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
123 				     DEV_RX_OFFLOAD_JUMBO_FRAME | \
124 				     DEV_RX_OFFLOAD_KEEP_CRC | \
125 				     DEV_RX_OFFLOAD_VLAN_EXTEND | \
126 				     DEV_RX_OFFLOAD_TCP_LRO | \
127 				     DEV_RX_OFFLOAD_SCATTER | \
128 				     DEV_RX_OFFLOAD_RSS_HASH)
129 
130 #define BNXT_DEVARG_TRUFLOW	"host-based-truflow"
131 #define BNXT_DEVARG_FLOW_XSTAT	"flow-xstat"
132 static const char *const bnxt_dev_args[] = {
133 	BNXT_DEVARG_TRUFLOW,
134 	BNXT_DEVARG_FLOW_XSTAT,
135 	NULL
136 };
137 
138 /*
139  * truflow == false to disable the feature
140  * truflow == true to enable the feature
141  */
142 #define	BNXT_DEVARG_TRUFLOW_INVALID(truflow)	((truflow) > 1)
143 
144 /*
145  * flow_xstat == false to disable the feature
146  * flow_xstat == true to enable the feature
147  */
148 #define	BNXT_DEVARG_FLOW_XSTAT_INVALID(flow_xstat)	((flow_xstat) > 1)
149 
150 static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask);
151 static void bnxt_print_link_info(struct rte_eth_dev *eth_dev);
152 static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev);
153 static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev);
154 static int bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev);
155 static void bnxt_cancel_fw_health_check(struct bnxt *bp);
156 static int bnxt_restore_vlan_filters(struct bnxt *bp);
157 static void bnxt_dev_recover(void *arg);
158 static void bnxt_free_error_recovery_info(struct bnxt *bp);
159 
160 int is_bnxt_in_error(struct bnxt *bp)
161 {
162 	if (bp->flags & BNXT_FLAG_FATAL_ERROR)
163 		return -EIO;
164 	if (bp->flags & BNXT_FLAG_FW_RESET)
165 		return -EBUSY;
166 
167 	return 0;
168 }
169 
170 /***********************/
171 
172 /*
173  * High level utility functions
174  */
175 
176 static uint16_t bnxt_rss_ctxts(const struct bnxt *bp)
177 {
178 	if (!BNXT_CHIP_THOR(bp))
179 		return 1;
180 
181 	return RTE_ALIGN_MUL_CEIL(bp->rx_nr_rings,
182 				  BNXT_RSS_ENTRIES_PER_CTX_THOR) /
183 				    BNXT_RSS_ENTRIES_PER_CTX_THOR;
184 }
185 
186 static uint16_t  bnxt_rss_hash_tbl_size(const struct bnxt *bp)
187 {
188 	if (!BNXT_CHIP_THOR(bp))
189 		return HW_HASH_INDEX_SIZE;
190 
191 	return bnxt_rss_ctxts(bp) * BNXT_RSS_ENTRIES_PER_CTX_THOR;
192 }
193 
194 static void bnxt_free_mem(struct bnxt *bp, bool reconfig)
195 {
196 	bnxt_free_filter_mem(bp);
197 	bnxt_free_vnic_attributes(bp);
198 	bnxt_free_vnic_mem(bp);
199 
200 	/* tx/rx rings are configured as part of *_queue_setup callbacks.
201 	 * If the number of rings change across fw update,
202 	 * we don't have much choice except to warn the user.
203 	 */
204 	if (!reconfig) {
205 		bnxt_free_stats(bp);
206 		bnxt_free_tx_rings(bp);
207 		bnxt_free_rx_rings(bp);
208 	}
209 	bnxt_free_async_cp_ring(bp);
210 	bnxt_free_rxtx_nq_ring(bp);
211 
212 	rte_free(bp->grp_info);
213 	bp->grp_info = NULL;
214 }
215 
216 static int bnxt_alloc_mem(struct bnxt *bp, bool reconfig)
217 {
218 	int rc;
219 
220 	rc = bnxt_alloc_ring_grps(bp);
221 	if (rc)
222 		goto alloc_mem_err;
223 
224 	rc = bnxt_alloc_async_ring_struct(bp);
225 	if (rc)
226 		goto alloc_mem_err;
227 
228 	rc = bnxt_alloc_vnic_mem(bp);
229 	if (rc)
230 		goto alloc_mem_err;
231 
232 	rc = bnxt_alloc_vnic_attributes(bp);
233 	if (rc)
234 		goto alloc_mem_err;
235 
236 	rc = bnxt_alloc_filter_mem(bp);
237 	if (rc)
238 		goto alloc_mem_err;
239 
240 	rc = bnxt_alloc_async_cp_ring(bp);
241 	if (rc)
242 		goto alloc_mem_err;
243 
244 	rc = bnxt_alloc_rxtx_nq_ring(bp);
245 	if (rc)
246 		goto alloc_mem_err;
247 
248 	return 0;
249 
250 alloc_mem_err:
251 	bnxt_free_mem(bp, reconfig);
252 	return rc;
253 }
254 
255 static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id)
256 {
257 	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
258 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
259 	uint64_t rx_offloads = dev_conf->rxmode.offloads;
260 	struct bnxt_rx_queue *rxq;
261 	unsigned int j;
262 	int rc;
263 
264 	rc = bnxt_vnic_grp_alloc(bp, vnic);
265 	if (rc)
266 		goto err_out;
267 
268 	PMD_DRV_LOG(DEBUG, "vnic[%d] = %p vnic->fw_grp_ids = %p\n",
269 		    vnic_id, vnic, vnic->fw_grp_ids);
270 
271 	rc = bnxt_hwrm_vnic_alloc(bp, vnic);
272 	if (rc)
273 		goto err_out;
274 
275 	/* Alloc RSS context only if RSS mode is enabled */
276 	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) {
277 		int j, nr_ctxs = bnxt_rss_ctxts(bp);
278 
279 		rc = 0;
280 		for (j = 0; j < nr_ctxs; j++) {
281 			rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, j);
282 			if (rc)
283 				break;
284 		}
285 		if (rc) {
286 			PMD_DRV_LOG(ERR,
287 				    "HWRM vnic %d ctx %d alloc failure rc: %x\n",
288 				    vnic_id, j, rc);
289 			goto err_out;
290 		}
291 		vnic->num_lb_ctxts = nr_ctxs;
292 	}
293 
294 	/*
295 	 * Firmware sets pf pair in default vnic cfg. If the VLAN strip
296 	 * setting is not available at this time, it will not be
297 	 * configured correctly in the CFA.
298 	 */
299 	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
300 		vnic->vlan_strip = true;
301 	else
302 		vnic->vlan_strip = false;
303 
304 	rc = bnxt_hwrm_vnic_cfg(bp, vnic);
305 	if (rc)
306 		goto err_out;
307 
308 	rc = bnxt_set_hwrm_vnic_filters(bp, vnic);
309 	if (rc)
310 		goto err_out;
311 
312 	for (j = 0; j < bp->rx_num_qs_per_vnic; j++) {
313 		rxq = bp->eth_dev->data->rx_queues[j];
314 
315 		PMD_DRV_LOG(DEBUG,
316 			    "rxq[%d]->vnic=%p vnic->fw_grp_ids=%p\n",
317 			    j, rxq->vnic, rxq->vnic->fw_grp_ids);
318 
319 		if (BNXT_HAS_RING_GRPS(bp) && rxq->rx_deferred_start)
320 			rxq->vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
321 		else
322 			vnic->rx_queue_cnt++;
323 	}
324 
325 	PMD_DRV_LOG(DEBUG, "vnic->rx_queue_cnt = %d\n", vnic->rx_queue_cnt);
326 
327 	rc = bnxt_vnic_rss_configure(bp, vnic);
328 	if (rc)
329 		goto err_out;
330 
331 	bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
332 
333 	if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
334 		bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 1);
335 	else
336 		bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 0);
337 
338 	return 0;
339 err_out:
340 	PMD_DRV_LOG(ERR, "HWRM vnic %d cfg failure rc: %x\n",
341 		    vnic_id, rc);
342 	return rc;
343 }
344 
345 static int bnxt_register_fc_ctx_mem(struct bnxt *bp)
346 {
347 	int rc = 0;
348 
349 	rc = bnxt_hwrm_ctx_rgtr(bp, bp->rx_fc_in_tbl.dma,
350 				&bp->rx_fc_in_tbl.ctx_id);
351 	if (rc)
352 		return rc;
353 
354 	PMD_DRV_LOG(DEBUG,
355 		    "rx_fc_in_tbl.va = %p rx_fc_in_tbl.dma = %p"
356 		    " rx_fc_in_tbl.ctx_id = %d\n",
357 		    bp->rx_fc_in_tbl.va,
358 		    (void *)((uintptr_t)bp->rx_fc_in_tbl.dma),
359 		    bp->rx_fc_in_tbl.ctx_id);
360 
361 	rc = bnxt_hwrm_ctx_rgtr(bp, bp->rx_fc_out_tbl.dma,
362 				&bp->rx_fc_out_tbl.ctx_id);
363 	if (rc)
364 		return rc;
365 
366 	PMD_DRV_LOG(DEBUG,
367 		    "rx_fc_out_tbl.va = %p rx_fc_out_tbl.dma = %p"
368 		    " rx_fc_out_tbl.ctx_id = %d\n",
369 		    bp->rx_fc_out_tbl.va,
370 		    (void *)((uintptr_t)bp->rx_fc_out_tbl.dma),
371 		    bp->rx_fc_out_tbl.ctx_id);
372 
373 	rc = bnxt_hwrm_ctx_rgtr(bp, bp->tx_fc_in_tbl.dma,
374 				&bp->tx_fc_in_tbl.ctx_id);
375 	if (rc)
376 		return rc;
377 
378 	PMD_DRV_LOG(DEBUG,
379 		    "tx_fc_in_tbl.va = %p tx_fc_in_tbl.dma = %p"
380 		    " tx_fc_in_tbl.ctx_id = %d\n",
381 		    bp->tx_fc_in_tbl.va,
382 		    (void *)((uintptr_t)bp->tx_fc_in_tbl.dma),
383 		    bp->tx_fc_in_tbl.ctx_id);
384 
385 	rc = bnxt_hwrm_ctx_rgtr(bp, bp->tx_fc_out_tbl.dma,
386 				&bp->tx_fc_out_tbl.ctx_id);
387 	if (rc)
388 		return rc;
389 
390 	PMD_DRV_LOG(DEBUG,
391 		    "tx_fc_out_tbl.va = %p tx_fc_out_tbl.dma = %p"
392 		    " tx_fc_out_tbl.ctx_id = %d\n",
393 		    bp->tx_fc_out_tbl.va,
394 		    (void *)((uintptr_t)bp->tx_fc_out_tbl.dma),
395 		    bp->tx_fc_out_tbl.ctx_id);
396 
397 	memset(bp->rx_fc_out_tbl.va, 0, bp->rx_fc_out_tbl.size);
398 	rc = bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_RX,
399 				       CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC,
400 				       bp->rx_fc_out_tbl.ctx_id,
401 				       bp->max_fc,
402 				       true);
403 	if (rc)
404 		return rc;
405 
406 	memset(bp->tx_fc_out_tbl.va, 0, bp->tx_fc_out_tbl.size);
407 	rc = bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_TX,
408 				       CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC,
409 				       bp->tx_fc_out_tbl.ctx_id,
410 				       bp->max_fc,
411 				       true);
412 
413 	return rc;
414 }
415 
416 static int bnxt_alloc_ctx_mem_buf(char *type, size_t size,
417 				  struct bnxt_ctx_mem_buf_info *ctx)
418 {
419 	if (!ctx)
420 		return -EINVAL;
421 
422 	ctx->va = rte_zmalloc(type, size, 0);
423 	if (ctx->va == NULL)
424 		return -ENOMEM;
425 	rte_mem_lock_page(ctx->va);
426 	ctx->size = size;
427 	ctx->dma = rte_mem_virt2iova(ctx->va);
428 	if (ctx->dma == RTE_BAD_IOVA)
429 		return -ENOMEM;
430 
431 	return 0;
432 }
433 
434 static int bnxt_init_fc_ctx_mem(struct bnxt *bp)
435 {
436 	struct rte_pci_device *pdev = bp->pdev;
437 	char type[RTE_MEMZONE_NAMESIZE];
438 	uint16_t max_fc;
439 	int rc = 0;
440 
441 	max_fc = bp->max_fc;
442 
443 	sprintf(type, "bnxt_rx_fc_in_" PCI_PRI_FMT, pdev->addr.domain,
444 		pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
445 	/* 4 bytes for each counter-id */
446 	rc = bnxt_alloc_ctx_mem_buf(type, max_fc * 4, &bp->rx_fc_in_tbl);
447 	if (rc)
448 		return rc;
449 
450 	sprintf(type, "bnxt_rx_fc_out_" PCI_PRI_FMT, pdev->addr.domain,
451 		pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
452 	/* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */
453 	rc = bnxt_alloc_ctx_mem_buf(type, max_fc * 16, &bp->rx_fc_out_tbl);
454 	if (rc)
455 		return rc;
456 
457 	sprintf(type, "bnxt_tx_fc_in_" PCI_PRI_FMT, pdev->addr.domain,
458 		pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
459 	/* 4 bytes for each counter-id */
460 	rc = bnxt_alloc_ctx_mem_buf(type, max_fc * 4, &bp->tx_fc_in_tbl);
461 	if (rc)
462 		return rc;
463 
464 	sprintf(type, "bnxt_tx_fc_out_" PCI_PRI_FMT, pdev->addr.domain,
465 		pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
466 	/* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */
467 	rc = bnxt_alloc_ctx_mem_buf(type, max_fc * 16, &bp->tx_fc_out_tbl);
468 	if (rc)
469 		return rc;
470 
471 	rc = bnxt_register_fc_ctx_mem(bp);
472 
473 	return rc;
474 }
475 
476 static int bnxt_init_ctx_mem(struct bnxt *bp)
477 {
478 	int rc = 0;
479 
480 	if (!(bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS) ||
481 	    !(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)))
482 		return 0;
483 
484 	rc = bnxt_hwrm_cfa_counter_qcaps(bp, &bp->max_fc);
485 	if (rc)
486 		return rc;
487 
488 	rc = bnxt_init_fc_ctx_mem(bp);
489 
490 	return rc;
491 }
492 
493 static int bnxt_init_chip(struct bnxt *bp)
494 {
495 	struct rte_eth_link new;
496 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev);
497 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
498 	uint32_t intr_vector = 0;
499 	uint32_t queue_id, base = BNXT_MISC_VEC_ID;
500 	uint32_t vec = BNXT_MISC_VEC_ID;
501 	unsigned int i, j;
502 	int rc;
503 
504 	if (bp->eth_dev->data->mtu > RTE_ETHER_MTU) {
505 		bp->eth_dev->data->dev_conf.rxmode.offloads |=
506 			DEV_RX_OFFLOAD_JUMBO_FRAME;
507 		bp->flags |= BNXT_FLAG_JUMBO;
508 	} else {
509 		bp->eth_dev->data->dev_conf.rxmode.offloads &=
510 			~DEV_RX_OFFLOAD_JUMBO_FRAME;
511 		bp->flags &= ~BNXT_FLAG_JUMBO;
512 	}
513 
514 	/* THOR does not support ring groups.
515 	 * But we will use the array to save RSS context IDs.
516 	 */
517 	if (BNXT_CHIP_THOR(bp))
518 		bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_THOR;
519 
520 	rc = bnxt_alloc_all_hwrm_stat_ctxs(bp);
521 	if (rc) {
522 		PMD_DRV_LOG(ERR, "HWRM stat ctx alloc failure rc: %x\n", rc);
523 		goto err_out;
524 	}
525 
526 	rc = bnxt_alloc_hwrm_rings(bp);
527 	if (rc) {
528 		PMD_DRV_LOG(ERR, "HWRM ring alloc failure rc: %x\n", rc);
529 		goto err_out;
530 	}
531 
532 	rc = bnxt_alloc_all_hwrm_ring_grps(bp);
533 	if (rc) {
534 		PMD_DRV_LOG(ERR, "HWRM ring grp alloc failure: %x\n", rc);
535 		goto err_out;
536 	}
537 
538 	if (!(bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY))
539 		goto skip_cosq_cfg;
540 
541 	for (j = 0, i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
542 		if (bp->rx_cos_queue[i].id != 0xff) {
543 			struct bnxt_vnic_info *vnic = &bp->vnic_info[j++];
544 
545 			if (!vnic) {
546 				PMD_DRV_LOG(ERR,
547 					    "Num pools more than FW profile\n");
548 				rc = -EINVAL;
549 				goto err_out;
550 			}
551 			vnic->cos_queue_id = bp->rx_cos_queue[i].id;
552 			bp->rx_cosq_cnt++;
553 		}
554 	}
555 
556 skip_cosq_cfg:
557 	rc = bnxt_mq_rx_configure(bp);
558 	if (rc) {
559 		PMD_DRV_LOG(ERR, "MQ mode configure failure rc: %x\n", rc);
560 		goto err_out;
561 	}
562 
563 	/* VNIC configuration */
564 	for (i = 0; i < bp->nr_vnics; i++) {
565 		rc = bnxt_setup_one_vnic(bp, i);
566 		if (rc)
567 			goto err_out;
568 	}
569 
570 	rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL);
571 	if (rc) {
572 		PMD_DRV_LOG(ERR,
573 			"HWRM cfa l2 rx mask failure rc: %x\n", rc);
574 		goto err_out;
575 	}
576 
577 	/* check and configure queue intr-vector mapping */
578 	if ((rte_intr_cap_multiple(intr_handle) ||
579 	     !RTE_ETH_DEV_SRIOV(bp->eth_dev).active) &&
580 	    bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) {
581 		intr_vector = bp->eth_dev->data->nb_rx_queues;
582 		PMD_DRV_LOG(DEBUG, "intr_vector = %d\n", intr_vector);
583 		if (intr_vector > bp->rx_cp_nr_rings) {
584 			PMD_DRV_LOG(ERR, "At most %d intr queues supported",
585 					bp->rx_cp_nr_rings);
586 			return -ENOTSUP;
587 		}
588 		rc = rte_intr_efd_enable(intr_handle, intr_vector);
589 		if (rc)
590 			return rc;
591 	}
592 
593 	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
594 		intr_handle->intr_vec =
595 			rte_zmalloc("intr_vec",
596 				    bp->eth_dev->data->nb_rx_queues *
597 				    sizeof(int), 0);
598 		if (intr_handle->intr_vec == NULL) {
599 			PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues"
600 				" intr_vec", bp->eth_dev->data->nb_rx_queues);
601 			rc = -ENOMEM;
602 			goto err_disable;
603 		}
604 		PMD_DRV_LOG(DEBUG, "intr_handle->intr_vec = %p "
605 			"intr_handle->nb_efd = %d intr_handle->max_intr = %d\n",
606 			 intr_handle->intr_vec, intr_handle->nb_efd,
607 			intr_handle->max_intr);
608 		for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues;
609 		     queue_id++) {
610 			intr_handle->intr_vec[queue_id] =
611 							vec + BNXT_RX_VEC_START;
612 			if (vec < base + intr_handle->nb_efd - 1)
613 				vec++;
614 		}
615 	}
616 
617 	/* enable uio/vfio intr/eventfd mapping */
618 	rc = rte_intr_enable(intr_handle);
619 #ifndef RTE_EXEC_ENV_FREEBSD
620 	/* In FreeBSD OS, nic_uio driver does not support interrupts */
621 	if (rc)
622 		goto err_free;
623 #endif
624 
625 	rc = bnxt_get_hwrm_link_config(bp, &new);
626 	if (rc) {
627 		PMD_DRV_LOG(ERR, "HWRM Get link config failure rc: %x\n", rc);
628 		goto err_free;
629 	}
630 
631 	if (!bp->link_info.link_up) {
632 		rc = bnxt_set_hwrm_link_config(bp, true);
633 		if (rc) {
634 			PMD_DRV_LOG(ERR,
635 				"HWRM link config failure rc: %x\n", rc);
636 			goto err_free;
637 		}
638 	}
639 	bnxt_print_link_info(bp->eth_dev);
640 
641 	bp->mark_table = rte_zmalloc("bnxt_mark_table", BNXT_MARK_TABLE_SZ, 0);
642 	if (!bp->mark_table)
643 		PMD_DRV_LOG(ERR, "Allocation of mark table failed\n");
644 
645 	return 0;
646 
647 err_free:
648 	rte_free(intr_handle->intr_vec);
649 err_disable:
650 	rte_intr_efd_disable(intr_handle);
651 err_out:
652 	/* Some of the error status returned by FW may not be from errno.h */
653 	if (rc > 0)
654 		rc = -EIO;
655 
656 	return rc;
657 }
658 
659 static int bnxt_shutdown_nic(struct bnxt *bp)
660 {
661 	bnxt_free_all_hwrm_resources(bp);
662 	bnxt_free_all_filters(bp);
663 	bnxt_free_all_vnics(bp);
664 	return 0;
665 }
666 
667 /*
668  * Device configuration and status function
669  */
670 
671 static uint32_t bnxt_get_speed_capabilities(struct bnxt *bp)
672 {
673 	uint32_t link_speed = bp->link_info.support_speeds;
674 	uint32_t speed_capa = 0;
675 
676 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB)
677 		speed_capa |= ETH_LINK_SPEED_100M;
678 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD)
679 		speed_capa |= ETH_LINK_SPEED_100M_HD;
680 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB)
681 		speed_capa |= ETH_LINK_SPEED_1G;
682 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB)
683 		speed_capa |= ETH_LINK_SPEED_2_5G;
684 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB)
685 		speed_capa |= ETH_LINK_SPEED_10G;
686 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB)
687 		speed_capa |= ETH_LINK_SPEED_20G;
688 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB)
689 		speed_capa |= ETH_LINK_SPEED_25G;
690 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB)
691 		speed_capa |= ETH_LINK_SPEED_40G;
692 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB)
693 		speed_capa |= ETH_LINK_SPEED_50G;
694 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB)
695 		speed_capa |= ETH_LINK_SPEED_100G;
696 
697 	if (bp->link_info.auto_mode == HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE)
698 		speed_capa |= ETH_LINK_SPEED_FIXED;
699 	else
700 		speed_capa |= ETH_LINK_SPEED_AUTONEG;
701 
702 	return speed_capa;
703 }
704 
705 static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
706 				struct rte_eth_dev_info *dev_info)
707 {
708 	struct rte_pci_device *pdev = RTE_DEV_TO_PCI(eth_dev->device);
709 	struct bnxt *bp = eth_dev->data->dev_private;
710 	uint16_t max_vnics, i, j, vpool, vrxq;
711 	unsigned int max_rx_rings;
712 	int rc;
713 
714 	rc = is_bnxt_in_error(bp);
715 	if (rc)
716 		return rc;
717 
718 	/* MAC Specifics */
719 	dev_info->max_mac_addrs = bp->max_l2_ctx;
720 	dev_info->max_hash_mac_addrs = 0;
721 
722 	/* PF/VF specifics */
723 	if (BNXT_PF(bp))
724 		dev_info->max_vfs = pdev->max_vfs;
725 
726 	max_rx_rings = BNXT_MAX_RINGS(bp);
727 	/* For the sake of symmetry, max_rx_queues = max_tx_queues */
728 	dev_info->max_rx_queues = max_rx_rings;
729 	dev_info->max_tx_queues = max_rx_rings;
730 	dev_info->reta_size = bnxt_rss_hash_tbl_size(bp);
731 	dev_info->hash_key_size = 40;
732 	max_vnics = bp->max_vnics;
733 
734 	/* MTU specifics */
735 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
736 	dev_info->max_mtu = BNXT_MAX_MTU;
737 
738 	/* Fast path specifics */
739 	dev_info->min_rx_bufsize = 1;
740 	dev_info->max_rx_pktlen = BNXT_MAX_PKT_LEN;
741 
742 	dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
743 	if (bp->flags & BNXT_FLAG_PTP_SUPPORTED)
744 		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
745 	dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT;
746 	dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
747 
748 	dev_info->speed_capa = bnxt_get_speed_capabilities(bp);
749 
750 	/* *INDENT-OFF* */
751 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
752 		.rx_thresh = {
753 			.pthresh = 8,
754 			.hthresh = 8,
755 			.wthresh = 0,
756 		},
757 		.rx_free_thresh = 32,
758 		/* If no descriptors available, pkts are dropped by default */
759 		.rx_drop_en = 1,
760 	};
761 
762 	dev_info->default_txconf = (struct rte_eth_txconf) {
763 		.tx_thresh = {
764 			.pthresh = 32,
765 			.hthresh = 0,
766 			.wthresh = 0,
767 		},
768 		.tx_free_thresh = 32,
769 		.tx_rs_thresh = 32,
770 	};
771 	eth_dev->data->dev_conf.intr_conf.lsc = 1;
772 
773 	eth_dev->data->dev_conf.intr_conf.rxq = 1;
774 	dev_info->rx_desc_lim.nb_min = BNXT_MIN_RING_DESC;
775 	dev_info->rx_desc_lim.nb_max = BNXT_MAX_RX_RING_DESC;
776 	dev_info->tx_desc_lim.nb_min = BNXT_MIN_RING_DESC;
777 	dev_info->tx_desc_lim.nb_max = BNXT_MAX_TX_RING_DESC;
778 
779 	/* *INDENT-ON* */
780 
781 	/*
782 	 * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim
783 	 *       need further investigation.
784 	 */
785 
786 	/* VMDq resources */
787 	vpool = 64; /* ETH_64_POOLS */
788 	vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */
789 	for (i = 0; i < 4; vpool >>= 1, i++) {
790 		if (max_vnics > vpool) {
791 			for (j = 0; j < 5; vrxq >>= 1, j++) {
792 				if (dev_info->max_rx_queues > vrxq) {
793 					if (vpool > vrxq)
794 						vpool = vrxq;
795 					goto found;
796 				}
797 			}
798 			/* Not enough resources to support VMDq */
799 			break;
800 		}
801 	}
802 	/* Not enough resources to support VMDq */
803 	vpool = 0;
804 	vrxq = 0;
805 found:
806 	dev_info->max_vmdq_pools = vpool;
807 	dev_info->vmdq_queue_num = vrxq;
808 
809 	dev_info->vmdq_pool_base = 0;
810 	dev_info->vmdq_queue_base = 0;
811 
812 	return 0;
813 }
814 
815 /* Configure the device based on the configuration provided */
816 static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
817 {
818 	struct bnxt *bp = eth_dev->data->dev_private;
819 	uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
820 	int rc;
821 
822 	bp->rx_queues = (void *)eth_dev->data->rx_queues;
823 	bp->tx_queues = (void *)eth_dev->data->tx_queues;
824 	bp->tx_nr_rings = eth_dev->data->nb_tx_queues;
825 	bp->rx_nr_rings = eth_dev->data->nb_rx_queues;
826 
827 	rc = is_bnxt_in_error(bp);
828 	if (rc)
829 		return rc;
830 
831 	if (BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)) {
832 		rc = bnxt_hwrm_check_vf_rings(bp);
833 		if (rc) {
834 			PMD_DRV_LOG(ERR, "HWRM insufficient resources\n");
835 			return -ENOSPC;
836 		}
837 
838 		/* If a resource has already been allocated - in this case
839 		 * it is the async completion ring, free it. Reallocate it after
840 		 * resource reservation. This will ensure the resource counts
841 		 * are calculated correctly.
842 		 */
843 
844 		pthread_mutex_lock(&bp->def_cp_lock);
845 
846 		if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) {
847 			bnxt_disable_int(bp);
848 			bnxt_free_cp_ring(bp, bp->async_cp_ring);
849 		}
850 
851 		rc = bnxt_hwrm_func_reserve_vf_resc(bp, false);
852 		if (rc) {
853 			PMD_DRV_LOG(ERR, "HWRM resource alloc fail:%x\n", rc);
854 			pthread_mutex_unlock(&bp->def_cp_lock);
855 			return -ENOSPC;
856 		}
857 
858 		if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) {
859 			rc = bnxt_alloc_async_cp_ring(bp);
860 			if (rc) {
861 				pthread_mutex_unlock(&bp->def_cp_lock);
862 				return rc;
863 			}
864 			bnxt_enable_int(bp);
865 		}
866 
867 		pthread_mutex_unlock(&bp->def_cp_lock);
868 	} else {
869 		/* legacy driver needs to get updated values */
870 		rc = bnxt_hwrm_func_qcaps(bp);
871 		if (rc) {
872 			PMD_DRV_LOG(ERR, "hwrm func qcaps fail:%d\n", rc);
873 			return rc;
874 		}
875 	}
876 
877 	/* Inherit new configurations */
878 	if (eth_dev->data->nb_rx_queues > bp->max_rx_rings ||
879 	    eth_dev->data->nb_tx_queues > bp->max_tx_rings ||
880 	    eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues
881 		+ BNXT_NUM_ASYNC_CPR(bp) > bp->max_cp_rings ||
882 	    eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues >
883 	    bp->max_stat_ctx)
884 		goto resource_error;
885 
886 	if (BNXT_HAS_RING_GRPS(bp) &&
887 	    (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps)
888 		goto resource_error;
889 
890 	if (!(eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) &&
891 	    bp->max_vnics < eth_dev->data->nb_rx_queues)
892 		goto resource_error;
893 
894 	bp->rx_cp_nr_rings = bp->rx_nr_rings;
895 	bp->tx_cp_nr_rings = bp->tx_nr_rings;
896 
897 	if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
898 		rx_offloads |= DEV_RX_OFFLOAD_RSS_HASH;
899 	eth_dev->data->dev_conf.rxmode.offloads = rx_offloads;
900 
901 	if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
902 		eth_dev->data->mtu =
903 			eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
904 			RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE *
905 			BNXT_NUM_VLANS;
906 		bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu);
907 	}
908 	return 0;
909 
910 resource_error:
911 	PMD_DRV_LOG(ERR,
912 		    "Insufficient resources to support requested config\n");
913 	PMD_DRV_LOG(ERR,
914 		    "Num Queues Requested: Tx %d, Rx %d\n",
915 		    eth_dev->data->nb_tx_queues,
916 		    eth_dev->data->nb_rx_queues);
917 	PMD_DRV_LOG(ERR,
918 		    "MAX: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d, Vnic %d\n",
919 		    bp->max_tx_rings, bp->max_rx_rings, bp->max_cp_rings,
920 		    bp->max_stat_ctx, bp->max_ring_grps, bp->max_vnics);
921 	return -ENOSPC;
922 }
923 
924 static void bnxt_print_link_info(struct rte_eth_dev *eth_dev)
925 {
926 	struct rte_eth_link *link = &eth_dev->data->dev_link;
927 
928 	if (link->link_status)
929 		PMD_DRV_LOG(INFO, "Port %d Link Up - speed %u Mbps - %s\n",
930 			eth_dev->data->port_id,
931 			(uint32_t)link->link_speed,
932 			(link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
933 			("full-duplex") : ("half-duplex\n"));
934 	else
935 		PMD_DRV_LOG(INFO, "Port %d Link Down\n",
936 			eth_dev->data->port_id);
937 }
938 
939 /*
940  * Determine whether the current configuration requires support for scattered
941  * receive; return 1 if scattered receive is required and 0 if not.
942  */
943 static int bnxt_scattered_rx(struct rte_eth_dev *eth_dev)
944 {
945 	uint16_t buf_size;
946 	int i;
947 
948 	if (eth_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER)
949 		return 1;
950 
951 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
952 		struct bnxt_rx_queue *rxq = eth_dev->data->rx_queues[i];
953 
954 		buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
955 				      RTE_PKTMBUF_HEADROOM);
956 		if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size)
957 			return 1;
958 	}
959 	return 0;
960 }
961 
962 static eth_rx_burst_t
963 bnxt_receive_function(struct rte_eth_dev *eth_dev)
964 {
965 	struct bnxt *bp = eth_dev->data->dev_private;
966 
967 #ifdef RTE_ARCH_X86
968 #ifndef RTE_LIBRTE_IEEE1588
969 	/*
970 	 * Vector mode receive can be enabled only if scatter rx is not
971 	 * in use and rx offloads are limited to VLAN stripping and
972 	 * CRC stripping.
973 	 */
974 	if (!eth_dev->data->scattered_rx &&
975 	    !(eth_dev->data->dev_conf.rxmode.offloads &
976 	      ~(DEV_RX_OFFLOAD_VLAN_STRIP |
977 		DEV_RX_OFFLOAD_KEEP_CRC |
978 		DEV_RX_OFFLOAD_JUMBO_FRAME |
979 		DEV_RX_OFFLOAD_IPV4_CKSUM |
980 		DEV_RX_OFFLOAD_UDP_CKSUM |
981 		DEV_RX_OFFLOAD_TCP_CKSUM |
982 		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
983 		DEV_RX_OFFLOAD_RSS_HASH |
984 		DEV_RX_OFFLOAD_VLAN_FILTER)) &&
985 	    !bp->truflow) {
986 		PMD_DRV_LOG(INFO, "Using vector mode receive for port %d\n",
987 			    eth_dev->data->port_id);
988 		bp->flags |= BNXT_FLAG_RX_VECTOR_PKT_MODE;
989 		return bnxt_recv_pkts_vec;
990 	}
991 	PMD_DRV_LOG(INFO, "Vector mode receive disabled for port %d\n",
992 		    eth_dev->data->port_id);
993 	PMD_DRV_LOG(INFO,
994 		    "Port %d scatter: %d rx offload: %" PRIX64 "\n",
995 		    eth_dev->data->port_id,
996 		    eth_dev->data->scattered_rx,
997 		    eth_dev->data->dev_conf.rxmode.offloads);
998 #endif
999 #endif
1000 	bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE;
1001 	return bnxt_recv_pkts;
1002 }
1003 
1004 static eth_tx_burst_t
1005 bnxt_transmit_function(__rte_unused struct rte_eth_dev *eth_dev)
1006 {
1007 #ifdef RTE_ARCH_X86
1008 #ifndef RTE_LIBRTE_IEEE1588
1009 	/*
1010 	 * Vector mode transmit can be enabled only if not using scatter rx
1011 	 * or tx offloads.
1012 	 */
1013 	if (!eth_dev->data->scattered_rx &&
1014 	    !eth_dev->data->dev_conf.txmode.offloads) {
1015 		PMD_DRV_LOG(INFO, "Using vector mode transmit for port %d\n",
1016 			    eth_dev->data->port_id);
1017 		return bnxt_xmit_pkts_vec;
1018 	}
1019 	PMD_DRV_LOG(INFO, "Vector mode transmit disabled for port %d\n",
1020 		    eth_dev->data->port_id);
1021 	PMD_DRV_LOG(INFO,
1022 		    "Port %d scatter: %d tx offload: %" PRIX64 "\n",
1023 		    eth_dev->data->port_id,
1024 		    eth_dev->data->scattered_rx,
1025 		    eth_dev->data->dev_conf.txmode.offloads);
1026 #endif
1027 #endif
1028 	return bnxt_xmit_pkts;
1029 }
1030 
1031 static int bnxt_handle_if_change_status(struct bnxt *bp)
1032 {
1033 	int rc;
1034 
1035 	/* Since fw has undergone a reset and lost all contexts,
1036 	 * set fatal flag to not issue hwrm during cleanup
1037 	 */
1038 	bp->flags |= BNXT_FLAG_FATAL_ERROR;
1039 	bnxt_uninit_resources(bp, true);
1040 
1041 	/* clear fatal flag so that re-init happens */
1042 	bp->flags &= ~BNXT_FLAG_FATAL_ERROR;
1043 	rc = bnxt_init_resources(bp, true);
1044 
1045 	bp->flags &= ~BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE;
1046 
1047 	return rc;
1048 }
1049 
1050 static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
1051 {
1052 	struct bnxt *bp = eth_dev->data->dev_private;
1053 	uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
1054 	int vlan_mask = 0;
1055 	int rc, retry_cnt = BNXT_IF_CHANGE_RETRY_COUNT;
1056 
1057 	if (!eth_dev->data->nb_tx_queues || !eth_dev->data->nb_rx_queues) {
1058 		PMD_DRV_LOG(ERR, "Queues are not configured yet!\n");
1059 		return -EINVAL;
1060 	}
1061 
1062 	if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) {
1063 		PMD_DRV_LOG(ERR,
1064 			"RxQ cnt %d > CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n",
1065 			bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1066 	}
1067 
1068 	do {
1069 		rc = bnxt_hwrm_if_change(bp, true);
1070 		if (rc == 0 || rc != -EAGAIN)
1071 			break;
1072 
1073 		rte_delay_ms(BNXT_IF_CHANGE_RETRY_INTERVAL);
1074 	} while (retry_cnt--);
1075 
1076 	if (rc)
1077 		return rc;
1078 
1079 	if (bp->flags & BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE) {
1080 		rc = bnxt_handle_if_change_status(bp);
1081 		if (rc)
1082 			return rc;
1083 	}
1084 
1085 	bnxt_enable_int(bp);
1086 
1087 	rc = bnxt_init_chip(bp);
1088 	if (rc)
1089 		goto error;
1090 
1091 	eth_dev->data->scattered_rx = bnxt_scattered_rx(eth_dev);
1092 	eth_dev->data->dev_started = 1;
1093 
1094 	bnxt_link_update(eth_dev, 1, ETH_LINK_UP);
1095 
1096 	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
1097 		vlan_mask |= ETH_VLAN_FILTER_MASK;
1098 	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
1099 		vlan_mask |= ETH_VLAN_STRIP_MASK;
1100 	rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask);
1101 	if (rc)
1102 		goto error;
1103 
1104 	eth_dev->rx_pkt_burst = bnxt_receive_function(eth_dev);
1105 	eth_dev->tx_pkt_burst = bnxt_transmit_function(eth_dev);
1106 
1107 	pthread_mutex_lock(&bp->def_cp_lock);
1108 	bnxt_schedule_fw_health_check(bp);
1109 	pthread_mutex_unlock(&bp->def_cp_lock);
1110 
1111 	if (bp->truflow)
1112 		bnxt_ulp_init(bp);
1113 
1114 	return 0;
1115 
1116 error:
1117 	bnxt_shutdown_nic(bp);
1118 	bnxt_free_tx_mbufs(bp);
1119 	bnxt_free_rx_mbufs(bp);
1120 	bnxt_hwrm_if_change(bp, false);
1121 	eth_dev->data->dev_started = 0;
1122 	return rc;
1123 }
1124 
1125 static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev)
1126 {
1127 	struct bnxt *bp = eth_dev->data->dev_private;
1128 	int rc = 0;
1129 
1130 	if (!bp->link_info.link_up)
1131 		rc = bnxt_set_hwrm_link_config(bp, true);
1132 	if (!rc)
1133 		eth_dev->data->dev_link.link_status = 1;
1134 
1135 	bnxt_print_link_info(eth_dev);
1136 	return rc;
1137 }
1138 
1139 static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev)
1140 {
1141 	struct bnxt *bp = eth_dev->data->dev_private;
1142 
1143 	eth_dev->data->dev_link.link_status = 0;
1144 	bnxt_set_hwrm_link_config(bp, false);
1145 	bp->link_info.link_up = 0;
1146 
1147 	return 0;
1148 }
1149 
1150 /* Unload the driver, release resources */
1151 static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
1152 {
1153 	struct bnxt *bp = eth_dev->data->dev_private;
1154 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1155 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1156 
1157 	if (bp->truflow)
1158 		bnxt_ulp_deinit(bp);
1159 
1160 	eth_dev->data->dev_started = 0;
1161 	/* Prevent crashes when queues are still in use */
1162 	eth_dev->rx_pkt_burst = &bnxt_dummy_recv_pkts;
1163 	eth_dev->tx_pkt_burst = &bnxt_dummy_xmit_pkts;
1164 
1165 	bnxt_disable_int(bp);
1166 
1167 	/* disable uio/vfio intr/eventfd mapping */
1168 	rte_intr_disable(intr_handle);
1169 
1170 	bnxt_cancel_fw_health_check(bp);
1171 
1172 	bnxt_dev_set_link_down_op(eth_dev);
1173 
1174 	/* Wait for link to be reset and the async notification to process.
1175 	 * During reset recovery, there is no need to wait and
1176 	 * VF/NPAR functions do not have privilege to change PHY config.
1177 	 */
1178 	if (!is_bnxt_in_error(bp) && BNXT_SINGLE_PF(bp))
1179 		bnxt_link_update(eth_dev, 1, ETH_LINK_DOWN);
1180 
1181 	/* Clean queue intr-vector mapping */
1182 	rte_intr_efd_disable(intr_handle);
1183 	if (intr_handle->intr_vec != NULL) {
1184 		rte_free(intr_handle->intr_vec);
1185 		intr_handle->intr_vec = NULL;
1186 	}
1187 
1188 	bnxt_hwrm_port_clr_stats(bp);
1189 	bnxt_free_tx_mbufs(bp);
1190 	bnxt_free_rx_mbufs(bp);
1191 	/* Process any remaining notifications in default completion queue */
1192 	bnxt_int_handler(eth_dev);
1193 	bnxt_shutdown_nic(bp);
1194 	bnxt_hwrm_if_change(bp, false);
1195 
1196 	rte_free(bp->mark_table);
1197 	bp->mark_table = NULL;
1198 
1199 	bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE;
1200 	bp->rx_cosq_cnt = 0;
1201 }
1202 
1203 static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
1204 {
1205 	struct bnxt *bp = eth_dev->data->dev_private;
1206 
1207 	/* cancel the recovery handler before remove dev */
1208 	rte_eal_alarm_cancel(bnxt_dev_reset_and_resume, (void *)bp);
1209 	rte_eal_alarm_cancel(bnxt_dev_recover, (void *)bp);
1210 	bnxt_cancel_fc_thread(bp);
1211 
1212 	if (eth_dev->data->dev_started)
1213 		bnxt_dev_stop_op(eth_dev);
1214 
1215 	bnxt_uninit_resources(bp, false);
1216 
1217 	eth_dev->dev_ops = NULL;
1218 	eth_dev->rx_pkt_burst = NULL;
1219 	eth_dev->tx_pkt_burst = NULL;
1220 
1221 	rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone);
1222 	bp->tx_mem_zone = NULL;
1223 	rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone);
1224 	bp->rx_mem_zone = NULL;
1225 
1226 	rte_free(bp->pf.vf_info);
1227 	bp->pf.vf_info = NULL;
1228 
1229 	rte_free(bp->grp_info);
1230 	bp->grp_info = NULL;
1231 }
1232 
1233 static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev,
1234 				    uint32_t index)
1235 {
1236 	struct bnxt *bp = eth_dev->data->dev_private;
1237 	uint64_t pool_mask = eth_dev->data->mac_pool_sel[index];
1238 	struct bnxt_vnic_info *vnic;
1239 	struct bnxt_filter_info *filter, *temp_filter;
1240 	uint32_t i;
1241 
1242 	if (is_bnxt_in_error(bp))
1243 		return;
1244 
1245 	/*
1246 	 * Loop through all VNICs from the specified filter flow pools to
1247 	 * remove the corresponding MAC addr filter
1248 	 */
1249 	for (i = 0; i < bp->nr_vnics; i++) {
1250 		if (!(pool_mask & (1ULL << i)))
1251 			continue;
1252 
1253 		vnic = &bp->vnic_info[i];
1254 		filter = STAILQ_FIRST(&vnic->filter);
1255 		while (filter) {
1256 			temp_filter = STAILQ_NEXT(filter, next);
1257 			if (filter->mac_index == index) {
1258 				STAILQ_REMOVE(&vnic->filter, filter,
1259 						bnxt_filter_info, next);
1260 				bnxt_hwrm_clear_l2_filter(bp, filter);
1261 				bnxt_free_filter(bp, filter);
1262 			}
1263 			filter = temp_filter;
1264 		}
1265 	}
1266 }
1267 
1268 static int bnxt_add_mac_filter(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1269 			       struct rte_ether_addr *mac_addr, uint32_t index,
1270 			       uint32_t pool)
1271 {
1272 	struct bnxt_filter_info *filter;
1273 	int rc = 0;
1274 
1275 	/* Attach requested MAC address to the new l2_filter */
1276 	STAILQ_FOREACH(filter, &vnic->filter, next) {
1277 		if (filter->mac_index == index) {
1278 			PMD_DRV_LOG(DEBUG,
1279 				    "MAC addr already existed for pool %d\n",
1280 				    pool);
1281 			return 0;
1282 		}
1283 	}
1284 
1285 	filter = bnxt_alloc_filter(bp);
1286 	if (!filter) {
1287 		PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
1288 		return -ENODEV;
1289 	}
1290 
1291 	/* bnxt_alloc_filter copies default MAC to filter->l2_addr. So,
1292 	 * if the MAC that's been programmed now is a different one, then,
1293 	 * copy that addr to filter->l2_addr
1294 	 */
1295 	if (mac_addr)
1296 		memcpy(filter->l2_addr, mac_addr, RTE_ETHER_ADDR_LEN);
1297 	filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
1298 
1299 	rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
1300 	if (!rc) {
1301 		filter->mac_index = index;
1302 		if (filter->mac_index == 0)
1303 			STAILQ_INSERT_HEAD(&vnic->filter, filter, next);
1304 		else
1305 			STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
1306 	} else {
1307 		bnxt_free_filter(bp, filter);
1308 	}
1309 
1310 	return rc;
1311 }
1312 
1313 static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
1314 				struct rte_ether_addr *mac_addr,
1315 				uint32_t index, uint32_t pool)
1316 {
1317 	struct bnxt *bp = eth_dev->data->dev_private;
1318 	struct bnxt_vnic_info *vnic = &bp->vnic_info[pool];
1319 	int rc = 0;
1320 
1321 	rc = is_bnxt_in_error(bp);
1322 	if (rc)
1323 		return rc;
1324 
1325 	if (BNXT_VF(bp) & !BNXT_VF_IS_TRUSTED(bp)) {
1326 		PMD_DRV_LOG(ERR, "Cannot add MAC address to a VF interface\n");
1327 		return -ENOTSUP;
1328 	}
1329 
1330 	if (!vnic) {
1331 		PMD_DRV_LOG(ERR, "VNIC not found for pool %d!\n", pool);
1332 		return -EINVAL;
1333 	}
1334 
1335 	/* Filter settings will get applied when port is started */
1336 	if (!eth_dev->data->dev_started)
1337 		return 0;
1338 
1339 	rc = bnxt_add_mac_filter(bp, vnic, mac_addr, index, pool);
1340 
1341 	return rc;
1342 }
1343 
1344 int bnxt_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete,
1345 		     bool exp_link_status)
1346 {
1347 	int rc = 0;
1348 	struct bnxt *bp = eth_dev->data->dev_private;
1349 	struct rte_eth_link new;
1350 	int cnt = exp_link_status ? BNXT_LINK_UP_WAIT_CNT :
1351 		  BNXT_LINK_DOWN_WAIT_CNT;
1352 
1353 	rc = is_bnxt_in_error(bp);
1354 	if (rc)
1355 		return rc;
1356 
1357 	memset(&new, 0, sizeof(new));
1358 	do {
1359 		/* Retrieve link info from hardware */
1360 		rc = bnxt_get_hwrm_link_config(bp, &new);
1361 		if (rc) {
1362 			new.link_speed = ETH_LINK_SPEED_100M;
1363 			new.link_duplex = ETH_LINK_FULL_DUPLEX;
1364 			PMD_DRV_LOG(ERR,
1365 				"Failed to retrieve link rc = 0x%x!\n", rc);
1366 			goto out;
1367 		}
1368 
1369 		if (!wait_to_complete || new.link_status == exp_link_status)
1370 			break;
1371 
1372 		rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
1373 	} while (cnt--);
1374 
1375 out:
1376 	/* Timed out or success */
1377 	if (new.link_status != eth_dev->data->dev_link.link_status ||
1378 	new.link_speed != eth_dev->data->dev_link.link_speed) {
1379 		rte_eth_linkstatus_set(eth_dev, &new);
1380 
1381 		_rte_eth_dev_callback_process(eth_dev,
1382 					      RTE_ETH_EVENT_INTR_LSC,
1383 					      NULL);
1384 
1385 		bnxt_print_link_info(eth_dev);
1386 	}
1387 
1388 	return rc;
1389 }
1390 
1391 static int bnxt_link_update_op(struct rte_eth_dev *eth_dev,
1392 			       int wait_to_complete)
1393 {
1394 	return bnxt_link_update(eth_dev, wait_to_complete, ETH_LINK_UP);
1395 }
1396 
1397 static int bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev)
1398 {
1399 	struct bnxt *bp = eth_dev->data->dev_private;
1400 	struct bnxt_vnic_info *vnic;
1401 	uint32_t old_flags;
1402 	int rc;
1403 
1404 	rc = is_bnxt_in_error(bp);
1405 	if (rc)
1406 		return rc;
1407 
1408 	/* Filter settings will get applied when port is started */
1409 	if (!eth_dev->data->dev_started)
1410 		return 0;
1411 
1412 	if (bp->vnic_info == NULL)
1413 		return 0;
1414 
1415 	vnic = BNXT_GET_DEFAULT_VNIC(bp);
1416 
1417 	old_flags = vnic->flags;
1418 	vnic->flags |= BNXT_VNIC_INFO_PROMISC;
1419 	rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
1420 	if (rc != 0)
1421 		vnic->flags = old_flags;
1422 
1423 	return rc;
1424 }
1425 
1426 static int bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev)
1427 {
1428 	struct bnxt *bp = eth_dev->data->dev_private;
1429 	struct bnxt_vnic_info *vnic;
1430 	uint32_t old_flags;
1431 	int rc;
1432 
1433 	rc = is_bnxt_in_error(bp);
1434 	if (rc)
1435 		return rc;
1436 
1437 	/* Filter settings will get applied when port is started */
1438 	if (!eth_dev->data->dev_started)
1439 		return 0;
1440 
1441 	if (bp->vnic_info == NULL)
1442 		return 0;
1443 
1444 	vnic = BNXT_GET_DEFAULT_VNIC(bp);
1445 
1446 	old_flags = vnic->flags;
1447 	vnic->flags &= ~BNXT_VNIC_INFO_PROMISC;
1448 	rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
1449 	if (rc != 0)
1450 		vnic->flags = old_flags;
1451 
1452 	return rc;
1453 }
1454 
1455 static int bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev)
1456 {
1457 	struct bnxt *bp = eth_dev->data->dev_private;
1458 	struct bnxt_vnic_info *vnic;
1459 	uint32_t old_flags;
1460 	int rc;
1461 
1462 	rc = is_bnxt_in_error(bp);
1463 	if (rc)
1464 		return rc;
1465 
1466 	/* Filter settings will get applied when port is started */
1467 	if (!eth_dev->data->dev_started)
1468 		return 0;
1469 
1470 	if (bp->vnic_info == NULL)
1471 		return 0;
1472 
1473 	vnic = BNXT_GET_DEFAULT_VNIC(bp);
1474 
1475 	old_flags = vnic->flags;
1476 	vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
1477 	rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
1478 	if (rc != 0)
1479 		vnic->flags = old_flags;
1480 
1481 	return rc;
1482 }
1483 
1484 static int bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev)
1485 {
1486 	struct bnxt *bp = eth_dev->data->dev_private;
1487 	struct bnxt_vnic_info *vnic;
1488 	uint32_t old_flags;
1489 	int rc;
1490 
1491 	rc = is_bnxt_in_error(bp);
1492 	if (rc)
1493 		return rc;
1494 
1495 	/* Filter settings will get applied when port is started */
1496 	if (!eth_dev->data->dev_started)
1497 		return 0;
1498 
1499 	if (bp->vnic_info == NULL)
1500 		return 0;
1501 
1502 	vnic = BNXT_GET_DEFAULT_VNIC(bp);
1503 
1504 	old_flags = vnic->flags;
1505 	vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
1506 	rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
1507 	if (rc != 0)
1508 		vnic->flags = old_flags;
1509 
1510 	return rc;
1511 }
1512 
1513 /* Return bnxt_rx_queue pointer corresponding to a given rxq. */
1514 static struct bnxt_rx_queue *bnxt_qid_to_rxq(struct bnxt *bp, uint16_t qid)
1515 {
1516 	if (qid >= bp->rx_nr_rings)
1517 		return NULL;
1518 
1519 	return bp->eth_dev->data->rx_queues[qid];
1520 }
1521 
1522 /* Return rxq corresponding to a given rss table ring/group ID. */
1523 static uint16_t bnxt_rss_to_qid(struct bnxt *bp, uint16_t fwr)
1524 {
1525 	struct bnxt_rx_queue *rxq;
1526 	unsigned int i;
1527 
1528 	if (!BNXT_HAS_RING_GRPS(bp)) {
1529 		for (i = 0; i < bp->rx_nr_rings; i++) {
1530 			rxq = bp->eth_dev->data->rx_queues[i];
1531 			if (rxq->rx_ring->rx_ring_struct->fw_ring_id == fwr)
1532 				return rxq->index;
1533 		}
1534 	} else {
1535 		for (i = 0; i < bp->rx_nr_rings; i++) {
1536 			if (bp->grp_info[i].fw_grp_id == fwr)
1537 				return i;
1538 		}
1539 	}
1540 
1541 	return INVALID_HW_RING_ID;
1542 }
1543 
1544 static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
1545 			    struct rte_eth_rss_reta_entry64 *reta_conf,
1546 			    uint16_t reta_size)
1547 {
1548 	struct bnxt *bp = eth_dev->data->dev_private;
1549 	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1550 	struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp);
1551 	uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp);
1552 	uint16_t idx, sft;
1553 	int i, rc;
1554 
1555 	rc = is_bnxt_in_error(bp);
1556 	if (rc)
1557 		return rc;
1558 
1559 	if (!vnic->rss_table)
1560 		return -EINVAL;
1561 
1562 	if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
1563 		return -EINVAL;
1564 
1565 	if (reta_size != tbl_size) {
1566 		PMD_DRV_LOG(ERR, "The configured hash table lookup size "
1567 			"(%d) must equal the size supported by the hardware "
1568 			"(%d)\n", reta_size, tbl_size);
1569 		return -EINVAL;
1570 	}
1571 
1572 	for (i = 0; i < reta_size; i++) {
1573 		struct bnxt_rx_queue *rxq;
1574 
1575 		idx = i / RTE_RETA_GROUP_SIZE;
1576 		sft = i % RTE_RETA_GROUP_SIZE;
1577 
1578 		if (!(reta_conf[idx].mask & (1ULL << sft)))
1579 			continue;
1580 
1581 		rxq = bnxt_qid_to_rxq(bp, reta_conf[idx].reta[sft]);
1582 		if (!rxq) {
1583 			PMD_DRV_LOG(ERR, "Invalid ring in reta_conf.\n");
1584 			return -EINVAL;
1585 		}
1586 
1587 		if (BNXT_CHIP_THOR(bp)) {
1588 			vnic->rss_table[i * 2] =
1589 				rxq->rx_ring->rx_ring_struct->fw_ring_id;
1590 			vnic->rss_table[i * 2 + 1] =
1591 				rxq->cp_ring->cp_ring_struct->fw_ring_id;
1592 		} else {
1593 			vnic->rss_table[i] =
1594 			    vnic->fw_grp_ids[reta_conf[idx].reta[sft]];
1595 		}
1596 	}
1597 
1598 	bnxt_hwrm_vnic_rss_cfg(bp, vnic);
1599 	return 0;
1600 }
1601 
1602 static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
1603 			      struct rte_eth_rss_reta_entry64 *reta_conf,
1604 			      uint16_t reta_size)
1605 {
1606 	struct bnxt *bp = eth_dev->data->dev_private;
1607 	struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp);
1608 	uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp);
1609 	uint16_t idx, sft, i;
1610 	int rc;
1611 
1612 	rc = is_bnxt_in_error(bp);
1613 	if (rc)
1614 		return rc;
1615 
1616 	/* Retrieve from the default VNIC */
1617 	if (!vnic)
1618 		return -EINVAL;
1619 	if (!vnic->rss_table)
1620 		return -EINVAL;
1621 
1622 	if (reta_size != tbl_size) {
1623 		PMD_DRV_LOG(ERR, "The configured hash table lookup size "
1624 			"(%d) must equal the size supported by the hardware "
1625 			"(%d)\n", reta_size, tbl_size);
1626 		return -EINVAL;
1627 	}
1628 
1629 	for (idx = 0, i = 0; i < reta_size; i++) {
1630 		idx = i / RTE_RETA_GROUP_SIZE;
1631 		sft = i % RTE_RETA_GROUP_SIZE;
1632 
1633 		if (reta_conf[idx].mask & (1ULL << sft)) {
1634 			uint16_t qid;
1635 
1636 			if (BNXT_CHIP_THOR(bp))
1637 				qid = bnxt_rss_to_qid(bp,
1638 						      vnic->rss_table[i * 2]);
1639 			else
1640 				qid = bnxt_rss_to_qid(bp, vnic->rss_table[i]);
1641 
1642 			if (qid == INVALID_HW_RING_ID) {
1643 				PMD_DRV_LOG(ERR, "Inv. entry in rss table.\n");
1644 				return -EINVAL;
1645 			}
1646 			reta_conf[idx].reta[sft] = qid;
1647 		}
1648 	}
1649 
1650 	return 0;
1651 }
1652 
1653 static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
1654 				   struct rte_eth_rss_conf *rss_conf)
1655 {
1656 	struct bnxt *bp = eth_dev->data->dev_private;
1657 	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1658 	struct bnxt_vnic_info *vnic;
1659 	int rc;
1660 
1661 	rc = is_bnxt_in_error(bp);
1662 	if (rc)
1663 		return rc;
1664 
1665 	/*
1666 	 * If RSS enablement were different than dev_configure,
1667 	 * then return -EINVAL
1668 	 */
1669 	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
1670 		if (!rss_conf->rss_hf)
1671 			PMD_DRV_LOG(ERR, "Hash type NONE\n");
1672 	} else {
1673 		if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT)
1674 			return -EINVAL;
1675 	}
1676 
1677 	bp->flags |= BNXT_FLAG_UPDATE_HASH;
1678 	memcpy(&bp->rss_conf, rss_conf, sizeof(*rss_conf));
1679 
1680 	/* Update the default RSS VNIC(s) */
1681 	vnic = BNXT_GET_DEFAULT_VNIC(bp);
1682 	vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss_conf->rss_hf);
1683 
1684 	/*
1685 	 * If hashkey is not specified, use the previously configured
1686 	 * hashkey
1687 	 */
1688 	if (!rss_conf->rss_key)
1689 		goto rss_config;
1690 
1691 	if (rss_conf->rss_key_len != HW_HASH_KEY_SIZE) {
1692 		PMD_DRV_LOG(ERR,
1693 			    "Invalid hashkey length, should be 16 bytes\n");
1694 		return -EINVAL;
1695 	}
1696 	memcpy(vnic->rss_hash_key, rss_conf->rss_key, rss_conf->rss_key_len);
1697 
1698 rss_config:
1699 	bnxt_hwrm_vnic_rss_cfg(bp, vnic);
1700 	return 0;
1701 }
1702 
1703 static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev,
1704 				     struct rte_eth_rss_conf *rss_conf)
1705 {
1706 	struct bnxt *bp = eth_dev->data->dev_private;
1707 	struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp);
1708 	int len, rc;
1709 	uint32_t hash_types;
1710 
1711 	rc = is_bnxt_in_error(bp);
1712 	if (rc)
1713 		return rc;
1714 
1715 	/* RSS configuration is the same for all VNICs */
1716 	if (vnic && vnic->rss_hash_key) {
1717 		if (rss_conf->rss_key) {
1718 			len = rss_conf->rss_key_len <= HW_HASH_KEY_SIZE ?
1719 			      rss_conf->rss_key_len : HW_HASH_KEY_SIZE;
1720 			memcpy(rss_conf->rss_key, vnic->rss_hash_key, len);
1721 		}
1722 
1723 		hash_types = vnic->hash_type;
1724 		rss_conf->rss_hf = 0;
1725 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) {
1726 			rss_conf->rss_hf |= ETH_RSS_IPV4;
1727 			hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
1728 		}
1729 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) {
1730 			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
1731 			hash_types &=
1732 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
1733 		}
1734 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) {
1735 			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
1736 			hash_types &=
1737 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
1738 		}
1739 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) {
1740 			rss_conf->rss_hf |= ETH_RSS_IPV6;
1741 			hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
1742 		}
1743 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) {
1744 			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
1745 			hash_types &=
1746 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
1747 		}
1748 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) {
1749 			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
1750 			hash_types &=
1751 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
1752 		}
1753 		if (hash_types) {
1754 			PMD_DRV_LOG(ERR,
1755 				"Unknown RSS config from firmware (%08x), RSS disabled",
1756 				vnic->hash_type);
1757 			return -ENOTSUP;
1758 		}
1759 	} else {
1760 		rss_conf->rss_hf = 0;
1761 	}
1762 	return 0;
1763 }
1764 
1765 static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev,
1766 			       struct rte_eth_fc_conf *fc_conf)
1767 {
1768 	struct bnxt *bp = dev->data->dev_private;
1769 	struct rte_eth_link link_info;
1770 	int rc;
1771 
1772 	rc = is_bnxt_in_error(bp);
1773 	if (rc)
1774 		return rc;
1775 
1776 	rc = bnxt_get_hwrm_link_config(bp, &link_info);
1777 	if (rc)
1778 		return rc;
1779 
1780 	memset(fc_conf, 0, sizeof(*fc_conf));
1781 	if (bp->link_info.auto_pause)
1782 		fc_conf->autoneg = 1;
1783 	switch (bp->link_info.pause) {
1784 	case 0:
1785 		fc_conf->mode = RTE_FC_NONE;
1786 		break;
1787 	case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX:
1788 		fc_conf->mode = RTE_FC_TX_PAUSE;
1789 		break;
1790 	case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX:
1791 		fc_conf->mode = RTE_FC_RX_PAUSE;
1792 		break;
1793 	case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX |
1794 			HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX):
1795 		fc_conf->mode = RTE_FC_FULL;
1796 		break;
1797 	}
1798 	return 0;
1799 }
1800 
1801 static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
1802 			       struct rte_eth_fc_conf *fc_conf)
1803 {
1804 	struct bnxt *bp = dev->data->dev_private;
1805 	int rc;
1806 
1807 	rc = is_bnxt_in_error(bp);
1808 	if (rc)
1809 		return rc;
1810 
1811 	if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) {
1812 		PMD_DRV_LOG(ERR, "Flow Control Settings cannot be modified\n");
1813 		return -ENOTSUP;
1814 	}
1815 
1816 	switch (fc_conf->mode) {
1817 	case RTE_FC_NONE:
1818 		bp->link_info.auto_pause = 0;
1819 		bp->link_info.force_pause = 0;
1820 		break;
1821 	case RTE_FC_RX_PAUSE:
1822 		if (fc_conf->autoneg) {
1823 			bp->link_info.auto_pause =
1824 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
1825 			bp->link_info.force_pause = 0;
1826 		} else {
1827 			bp->link_info.auto_pause = 0;
1828 			bp->link_info.force_pause =
1829 					HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
1830 		}
1831 		break;
1832 	case RTE_FC_TX_PAUSE:
1833 		if (fc_conf->autoneg) {
1834 			bp->link_info.auto_pause =
1835 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX;
1836 			bp->link_info.force_pause = 0;
1837 		} else {
1838 			bp->link_info.auto_pause = 0;
1839 			bp->link_info.force_pause =
1840 					HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX;
1841 		}
1842 		break;
1843 	case RTE_FC_FULL:
1844 		if (fc_conf->autoneg) {
1845 			bp->link_info.auto_pause =
1846 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX |
1847 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
1848 			bp->link_info.force_pause = 0;
1849 		} else {
1850 			bp->link_info.auto_pause = 0;
1851 			bp->link_info.force_pause =
1852 					HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX |
1853 					HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
1854 		}
1855 		break;
1856 	}
1857 	return bnxt_set_hwrm_link_config(bp, true);
1858 }
1859 
1860 /* Add UDP tunneling port */
1861 static int
1862 bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
1863 			 struct rte_eth_udp_tunnel *udp_tunnel)
1864 {
1865 	struct bnxt *bp = eth_dev->data->dev_private;
1866 	uint16_t tunnel_type = 0;
1867 	int rc = 0;
1868 
1869 	rc = is_bnxt_in_error(bp);
1870 	if (rc)
1871 		return rc;
1872 
1873 	switch (udp_tunnel->prot_type) {
1874 	case RTE_TUNNEL_TYPE_VXLAN:
1875 		if (bp->vxlan_port_cnt) {
1876 			PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
1877 				udp_tunnel->udp_port);
1878 			if (bp->vxlan_port != udp_tunnel->udp_port) {
1879 				PMD_DRV_LOG(ERR, "Only one port allowed\n");
1880 				return -ENOSPC;
1881 			}
1882 			bp->vxlan_port_cnt++;
1883 			return 0;
1884 		}
1885 		tunnel_type =
1886 			HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN;
1887 		bp->vxlan_port_cnt++;
1888 		break;
1889 	case RTE_TUNNEL_TYPE_GENEVE:
1890 		if (bp->geneve_port_cnt) {
1891 			PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
1892 				udp_tunnel->udp_port);
1893 			if (bp->geneve_port != udp_tunnel->udp_port) {
1894 				PMD_DRV_LOG(ERR, "Only one port allowed\n");
1895 				return -ENOSPC;
1896 			}
1897 			bp->geneve_port_cnt++;
1898 			return 0;
1899 		}
1900 		tunnel_type =
1901 			HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE;
1902 		bp->geneve_port_cnt++;
1903 		break;
1904 	default:
1905 		PMD_DRV_LOG(ERR, "Tunnel type is not supported\n");
1906 		return -ENOTSUP;
1907 	}
1908 	rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port,
1909 					     tunnel_type);
1910 	return rc;
1911 }
1912 
1913 static int
1914 bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
1915 			 struct rte_eth_udp_tunnel *udp_tunnel)
1916 {
1917 	struct bnxt *bp = eth_dev->data->dev_private;
1918 	uint16_t tunnel_type = 0;
1919 	uint16_t port = 0;
1920 	int rc = 0;
1921 
1922 	rc = is_bnxt_in_error(bp);
1923 	if (rc)
1924 		return rc;
1925 
1926 	switch (udp_tunnel->prot_type) {
1927 	case RTE_TUNNEL_TYPE_VXLAN:
1928 		if (!bp->vxlan_port_cnt) {
1929 			PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
1930 			return -EINVAL;
1931 		}
1932 		if (bp->vxlan_port != udp_tunnel->udp_port) {
1933 			PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n",
1934 				udp_tunnel->udp_port, bp->vxlan_port);
1935 			return -EINVAL;
1936 		}
1937 		if (--bp->vxlan_port_cnt)
1938 			return 0;
1939 
1940 		tunnel_type =
1941 			HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN;
1942 		port = bp->vxlan_fw_dst_port_id;
1943 		break;
1944 	case RTE_TUNNEL_TYPE_GENEVE:
1945 		if (!bp->geneve_port_cnt) {
1946 			PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
1947 			return -EINVAL;
1948 		}
1949 		if (bp->geneve_port != udp_tunnel->udp_port) {
1950 			PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n",
1951 				udp_tunnel->udp_port, bp->geneve_port);
1952 			return -EINVAL;
1953 		}
1954 		if (--bp->geneve_port_cnt)
1955 			return 0;
1956 
1957 		tunnel_type =
1958 			HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE;
1959 		port = bp->geneve_fw_dst_port_id;
1960 		break;
1961 	default:
1962 		PMD_DRV_LOG(ERR, "Tunnel type is not supported\n");
1963 		return -ENOTSUP;
1964 	}
1965 
1966 	rc = bnxt_hwrm_tunnel_dst_port_free(bp, port, tunnel_type);
1967 	if (!rc) {
1968 		if (tunnel_type ==
1969 		    HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN)
1970 			bp->vxlan_port = 0;
1971 		if (tunnel_type ==
1972 		    HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE)
1973 			bp->geneve_port = 0;
1974 	}
1975 	return rc;
1976 }
1977 
1978 static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
1979 {
1980 	struct bnxt_filter_info *filter;
1981 	struct bnxt_vnic_info *vnic;
1982 	int rc = 0;
1983 	uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN;
1984 
1985 	vnic = BNXT_GET_DEFAULT_VNIC(bp);
1986 	filter = STAILQ_FIRST(&vnic->filter);
1987 	while (filter) {
1988 		/* Search for this matching MAC+VLAN filter */
1989 		if (bnxt_vlan_filter_exists(bp, filter, chk, vlan_id)) {
1990 			/* Delete the filter */
1991 			rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1992 			if (rc)
1993 				return rc;
1994 			STAILQ_REMOVE(&vnic->filter, filter,
1995 				      bnxt_filter_info, next);
1996 			bnxt_free_filter(bp, filter);
1997 			PMD_DRV_LOG(INFO,
1998 				    "Deleted vlan filter for %d\n",
1999 				    vlan_id);
2000 			return 0;
2001 		}
2002 		filter = STAILQ_NEXT(filter, next);
2003 	}
2004 	return -ENOENT;
2005 }
2006 
2007 static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
2008 {
2009 	struct bnxt_filter_info *filter;
2010 	struct bnxt_vnic_info *vnic;
2011 	int rc = 0;
2012 	uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
2013 		HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
2014 	uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN;
2015 
2016 	/* Implementation notes on the use of VNIC in this command:
2017 	 *
2018 	 * By default, these filters belong to default vnic for the function.
2019 	 * Once these filters are set up, only destination VNIC can be modified.
2020 	 * If the destination VNIC is not specified in this command,
2021 	 * then the HWRM shall only create an l2 context id.
2022 	 */
2023 
2024 	vnic = BNXT_GET_DEFAULT_VNIC(bp);
2025 	filter = STAILQ_FIRST(&vnic->filter);
2026 	/* Check if the VLAN has already been added */
2027 	while (filter) {
2028 		if (bnxt_vlan_filter_exists(bp, filter, chk, vlan_id))
2029 			return -EEXIST;
2030 
2031 		filter = STAILQ_NEXT(filter, next);
2032 	}
2033 
2034 	/* No match found. Alloc a fresh filter and issue the L2_FILTER_ALLOC
2035 	 * command to create MAC+VLAN filter with the right flags, enables set.
2036 	 */
2037 	filter = bnxt_alloc_filter(bp);
2038 	if (!filter) {
2039 		PMD_DRV_LOG(ERR,
2040 			    "MAC/VLAN filter alloc failed\n");
2041 		return -ENOMEM;
2042 	}
2043 	/* MAC + VLAN ID filter */
2044 	/* If l2_ivlan == 0 and l2_ivlan_mask != 0, only
2045 	 * untagged packets are received
2046 	 *
2047 	 * If l2_ivlan != 0 and l2_ivlan_mask != 0, untagged
2048 	 * packets and only the programmed vlan's packets are received
2049 	 */
2050 	filter->l2_ivlan = vlan_id;
2051 	filter->l2_ivlan_mask = 0x0FFF;
2052 	filter->enables |= en;
2053 	filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
2054 
2055 	rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
2056 	if (rc) {
2057 		/* Free the newly allocated filter as we were
2058 		 * not able to create the filter in hardware.
2059 		 */
2060 		bnxt_free_filter(bp, filter);
2061 		return rc;
2062 	}
2063 
2064 	filter->mac_index = 0;
2065 	/* Add this new filter to the list */
2066 	if (vlan_id == 0)
2067 		STAILQ_INSERT_HEAD(&vnic->filter, filter, next);
2068 	else
2069 		STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
2070 
2071 	PMD_DRV_LOG(INFO,
2072 		    "Added Vlan filter for %d\n", vlan_id);
2073 	return rc;
2074 }
2075 
2076 static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev,
2077 		uint16_t vlan_id, int on)
2078 {
2079 	struct bnxt *bp = eth_dev->data->dev_private;
2080 	int rc;
2081 
2082 	rc = is_bnxt_in_error(bp);
2083 	if (rc)
2084 		return rc;
2085 
2086 	if (!eth_dev->data->dev_started) {
2087 		PMD_DRV_LOG(ERR, "port must be started before setting vlan\n");
2088 		return -EINVAL;
2089 	}
2090 
2091 	/* These operations apply to ALL existing MAC/VLAN filters */
2092 	if (on)
2093 		return bnxt_add_vlan_filter(bp, vlan_id);
2094 	else
2095 		return bnxt_del_vlan_filter(bp, vlan_id);
2096 }
2097 
2098 static int bnxt_del_dflt_mac_filter(struct bnxt *bp,
2099 				    struct bnxt_vnic_info *vnic)
2100 {
2101 	struct bnxt_filter_info *filter;
2102 	int rc;
2103 
2104 	filter = STAILQ_FIRST(&vnic->filter);
2105 	while (filter) {
2106 		if (filter->mac_index == 0 &&
2107 		    !memcmp(filter->l2_addr, bp->mac_addr,
2108 			    RTE_ETHER_ADDR_LEN)) {
2109 			rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2110 			if (!rc) {
2111 				STAILQ_REMOVE(&vnic->filter, filter,
2112 					      bnxt_filter_info, next);
2113 				bnxt_free_filter(bp, filter);
2114 			}
2115 			return rc;
2116 		}
2117 		filter = STAILQ_NEXT(filter, next);
2118 	}
2119 	return 0;
2120 }
2121 
2122 static int
2123 bnxt_config_vlan_hw_filter(struct bnxt *bp, uint64_t rx_offloads)
2124 {
2125 	struct bnxt_vnic_info *vnic;
2126 	unsigned int i;
2127 	int rc;
2128 
2129 	vnic = BNXT_GET_DEFAULT_VNIC(bp);
2130 	if (!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) {
2131 		/* Remove any VLAN filters programmed */
2132 		for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++)
2133 			bnxt_del_vlan_filter(bp, i);
2134 
2135 		rc = bnxt_add_mac_filter(bp, vnic, NULL, 0, 0);
2136 		if (rc)
2137 			return rc;
2138 	} else {
2139 		/* Default filter will allow packets that match the
2140 		 * dest mac. So, it has to be deleted, otherwise, we
2141 		 * will endup receiving vlan packets for which the
2142 		 * filter is not programmed, when hw-vlan-filter
2143 		 * configuration is ON
2144 		 */
2145 		bnxt_del_dflt_mac_filter(bp, vnic);
2146 		/* This filter will allow only untagged packets */
2147 		bnxt_add_vlan_filter(bp, 0);
2148 	}
2149 	PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n",
2150 		    !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER));
2151 
2152 	return 0;
2153 }
2154 
2155 static int bnxt_free_one_vnic(struct bnxt *bp, uint16_t vnic_id)
2156 {
2157 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
2158 	unsigned int i;
2159 	int rc;
2160 
2161 	/* Destroy vnic filters and vnic */
2162 	if (bp->eth_dev->data->dev_conf.rxmode.offloads &
2163 	    DEV_RX_OFFLOAD_VLAN_FILTER) {
2164 		for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++)
2165 			bnxt_del_vlan_filter(bp, i);
2166 	}
2167 	bnxt_del_dflt_mac_filter(bp, vnic);
2168 
2169 	rc = bnxt_hwrm_vnic_free(bp, vnic);
2170 	if (rc)
2171 		return rc;
2172 
2173 	rte_free(vnic->fw_grp_ids);
2174 	vnic->fw_grp_ids = NULL;
2175 
2176 	vnic->rx_queue_cnt = 0;
2177 
2178 	return 0;
2179 }
2180 
2181 static int
2182 bnxt_config_vlan_hw_stripping(struct bnxt *bp, uint64_t rx_offloads)
2183 {
2184 	struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp);
2185 	int rc;
2186 
2187 	/* Destroy, recreate and reconfigure the default vnic */
2188 	rc = bnxt_free_one_vnic(bp, 0);
2189 	if (rc)
2190 		return rc;
2191 
2192 	/* default vnic 0 */
2193 	rc = bnxt_setup_one_vnic(bp, 0);
2194 	if (rc)
2195 		return rc;
2196 
2197 	if (bp->eth_dev->data->dev_conf.rxmode.offloads &
2198 	    DEV_RX_OFFLOAD_VLAN_FILTER) {
2199 		rc = bnxt_add_vlan_filter(bp, 0);
2200 		if (rc)
2201 			return rc;
2202 		rc = bnxt_restore_vlan_filters(bp);
2203 		if (rc)
2204 			return rc;
2205 	} else {
2206 		rc = bnxt_add_mac_filter(bp, vnic, NULL, 0, 0);
2207 		if (rc)
2208 			return rc;
2209 	}
2210 
2211 	rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2212 	if (rc)
2213 		return rc;
2214 
2215 	PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n",
2216 		    !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP));
2217 
2218 	return rc;
2219 }
2220 
2221 static int
2222 bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
2223 {
2224 	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
2225 	struct bnxt *bp = dev->data->dev_private;
2226 	int rc;
2227 
2228 	rc = is_bnxt_in_error(bp);
2229 	if (rc)
2230 		return rc;
2231 
2232 	/* Filter settings will get applied when port is started */
2233 	if (!dev->data->dev_started)
2234 		return 0;
2235 
2236 	if (mask & ETH_VLAN_FILTER_MASK) {
2237 		/* Enable or disable VLAN filtering */
2238 		rc = bnxt_config_vlan_hw_filter(bp, rx_offloads);
2239 		if (rc)
2240 			return rc;
2241 	}
2242 
2243 	if (mask & ETH_VLAN_STRIP_MASK) {
2244 		/* Enable or disable VLAN stripping */
2245 		rc = bnxt_config_vlan_hw_stripping(bp, rx_offloads);
2246 		if (rc)
2247 			return rc;
2248 	}
2249 
2250 	if (mask & ETH_VLAN_EXTEND_MASK) {
2251 		if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
2252 			PMD_DRV_LOG(DEBUG, "Extend VLAN supported\n");
2253 		else
2254 			PMD_DRV_LOG(INFO, "Extend VLAN unsupported\n");
2255 	}
2256 
2257 	return 0;
2258 }
2259 
2260 static int
2261 bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
2262 		      uint16_t tpid)
2263 {
2264 	struct bnxt *bp = dev->data->dev_private;
2265 	int qinq = dev->data->dev_conf.rxmode.offloads &
2266 		   DEV_RX_OFFLOAD_VLAN_EXTEND;
2267 
2268 	if (vlan_type != ETH_VLAN_TYPE_INNER &&
2269 	    vlan_type != ETH_VLAN_TYPE_OUTER) {
2270 		PMD_DRV_LOG(ERR,
2271 			    "Unsupported vlan type.");
2272 		return -EINVAL;
2273 	}
2274 	if (!qinq) {
2275 		PMD_DRV_LOG(ERR,
2276 			    "QinQ not enabled. Needs to be ON as we can "
2277 			    "accelerate only outer vlan\n");
2278 		return -EINVAL;
2279 	}
2280 
2281 	if (vlan_type == ETH_VLAN_TYPE_OUTER) {
2282 		switch (tpid) {
2283 		case RTE_ETHER_TYPE_QINQ:
2284 			bp->outer_tpid_bd =
2285 				TX_BD_LONG_CFA_META_VLAN_TPID_TPID88A8;
2286 				break;
2287 		case RTE_ETHER_TYPE_VLAN:
2288 			bp->outer_tpid_bd =
2289 				TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100;
2290 				break;
2291 		case 0x9100:
2292 			bp->outer_tpid_bd =
2293 				TX_BD_LONG_CFA_META_VLAN_TPID_TPID9100;
2294 				break;
2295 		case 0x9200:
2296 			bp->outer_tpid_bd =
2297 				TX_BD_LONG_CFA_META_VLAN_TPID_TPID9200;
2298 				break;
2299 		case 0x9300:
2300 			bp->outer_tpid_bd =
2301 				 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9300;
2302 				break;
2303 		default:
2304 			PMD_DRV_LOG(ERR, "Invalid TPID: %x\n", tpid);
2305 			return -EINVAL;
2306 		}
2307 		bp->outer_tpid_bd |= tpid;
2308 		PMD_DRV_LOG(INFO, "outer_tpid_bd = %x\n", bp->outer_tpid_bd);
2309 	} else if (vlan_type == ETH_VLAN_TYPE_INNER) {
2310 		PMD_DRV_LOG(ERR,
2311 			    "Can accelerate only outer vlan in QinQ\n");
2312 		return -EINVAL;
2313 	}
2314 
2315 	return 0;
2316 }
2317 
2318 static int
2319 bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev,
2320 			     struct rte_ether_addr *addr)
2321 {
2322 	struct bnxt *bp = dev->data->dev_private;
2323 	/* Default Filter is tied to VNIC 0 */
2324 	struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp);
2325 	int rc;
2326 
2327 	rc = is_bnxt_in_error(bp);
2328 	if (rc)
2329 		return rc;
2330 
2331 	if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
2332 		return -EPERM;
2333 
2334 	if (rte_is_zero_ether_addr(addr))
2335 		return -EINVAL;
2336 
2337 	/* Filter settings will get applied when port is started */
2338 	if (!dev->data->dev_started)
2339 		return 0;
2340 
2341 	/* Check if the requested MAC is already added */
2342 	if (memcmp(addr, bp->mac_addr, RTE_ETHER_ADDR_LEN) == 0)
2343 		return 0;
2344 
2345 	/* Destroy filter and re-create it */
2346 	bnxt_del_dflt_mac_filter(bp, vnic);
2347 
2348 	memcpy(bp->mac_addr, addr, RTE_ETHER_ADDR_LEN);
2349 	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
2350 		/* This filter will allow only untagged packets */
2351 		rc = bnxt_add_vlan_filter(bp, 0);
2352 	} else {
2353 		rc = bnxt_add_mac_filter(bp, vnic, addr, 0, 0);
2354 	}
2355 
2356 	PMD_DRV_LOG(DEBUG, "Set MAC addr\n");
2357 	return rc;
2358 }
2359 
2360 static int
2361 bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev,
2362 			  struct rte_ether_addr *mc_addr_set,
2363 			  uint32_t nb_mc_addr)
2364 {
2365 	struct bnxt *bp = eth_dev->data->dev_private;
2366 	char *mc_addr_list = (char *)mc_addr_set;
2367 	struct bnxt_vnic_info *vnic;
2368 	uint32_t off = 0, i = 0;
2369 	int rc;
2370 
2371 	rc = is_bnxt_in_error(bp);
2372 	if (rc)
2373 		return rc;
2374 
2375 	vnic = BNXT_GET_DEFAULT_VNIC(bp);
2376 
2377 	if (nb_mc_addr > BNXT_MAX_MC_ADDRS) {
2378 		vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
2379 		goto allmulti;
2380 	}
2381 
2382 	/* TODO Check for Duplicate mcast addresses */
2383 	vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
2384 	for (i = 0; i < nb_mc_addr; i++) {
2385 		memcpy(vnic->mc_list + off, &mc_addr_list[i],
2386 			RTE_ETHER_ADDR_LEN);
2387 		off += RTE_ETHER_ADDR_LEN;
2388 	}
2389 
2390 	vnic->mc_addr_cnt = i;
2391 	if (vnic->mc_addr_cnt)
2392 		vnic->flags |= BNXT_VNIC_INFO_MCAST;
2393 	else
2394 		vnic->flags &= ~BNXT_VNIC_INFO_MCAST;
2395 
2396 allmulti:
2397 	return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2398 }
2399 
2400 static int
2401 bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
2402 {
2403 	struct bnxt *bp = dev->data->dev_private;
2404 	uint8_t fw_major = (bp->fw_ver >> 24) & 0xff;
2405 	uint8_t fw_minor = (bp->fw_ver >> 16) & 0xff;
2406 	uint8_t fw_updt = (bp->fw_ver >> 8) & 0xff;
2407 	int ret;
2408 
2409 	ret = snprintf(fw_version, fw_size, "%d.%d.%d",
2410 			fw_major, fw_minor, fw_updt);
2411 
2412 	ret += 1; /* add the size of '\0' */
2413 	if (fw_size < (uint32_t)ret)
2414 		return ret;
2415 	else
2416 		return 0;
2417 }
2418 
2419 static void
2420 bnxt_rxq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id,
2421 	struct rte_eth_rxq_info *qinfo)
2422 {
2423 	struct bnxt *bp = dev->data->dev_private;
2424 	struct bnxt_rx_queue *rxq;
2425 
2426 	if (is_bnxt_in_error(bp))
2427 		return;
2428 
2429 	rxq = dev->data->rx_queues[queue_id];
2430 
2431 	qinfo->mp = rxq->mb_pool;
2432 	qinfo->scattered_rx = dev->data->scattered_rx;
2433 	qinfo->nb_desc = rxq->nb_rx_desc;
2434 
2435 	qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
2436 	qinfo->conf.rx_drop_en = 0;
2437 	qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
2438 }
2439 
2440 static void
2441 bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id,
2442 	struct rte_eth_txq_info *qinfo)
2443 {
2444 	struct bnxt *bp = dev->data->dev_private;
2445 	struct bnxt_tx_queue *txq;
2446 
2447 	if (is_bnxt_in_error(bp))
2448 		return;
2449 
2450 	txq = dev->data->tx_queues[queue_id];
2451 
2452 	qinfo->nb_desc = txq->nb_tx_desc;
2453 
2454 	qinfo->conf.tx_thresh.pthresh = txq->pthresh;
2455 	qinfo->conf.tx_thresh.hthresh = txq->hthresh;
2456 	qinfo->conf.tx_thresh.wthresh = txq->wthresh;
2457 
2458 	qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
2459 	qinfo->conf.tx_rs_thresh = 0;
2460 	qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
2461 }
2462 
2463 int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
2464 {
2465 	struct bnxt *bp = eth_dev->data->dev_private;
2466 	uint32_t new_pkt_size;
2467 	uint32_t rc = 0;
2468 	uint32_t i;
2469 
2470 	rc = is_bnxt_in_error(bp);
2471 	if (rc)
2472 		return rc;
2473 
2474 	/* Exit if receive queues are not configured yet */
2475 	if (!eth_dev->data->nb_rx_queues)
2476 		return rc;
2477 
2478 	new_pkt_size = new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
2479 		       VLAN_TAG_SIZE * BNXT_NUM_VLANS;
2480 
2481 #ifdef RTE_ARCH_X86
2482 	/*
2483 	 * If vector-mode tx/rx is active, disallow any MTU change that would
2484 	 * require scattered receive support.
2485 	 */
2486 	if (eth_dev->data->dev_started &&
2487 	    (eth_dev->rx_pkt_burst == bnxt_recv_pkts_vec ||
2488 	     eth_dev->tx_pkt_burst == bnxt_xmit_pkts_vec) &&
2489 	    (new_pkt_size >
2490 	     eth_dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
2491 		PMD_DRV_LOG(ERR,
2492 			    "MTU change would require scattered rx support. ");
2493 		PMD_DRV_LOG(ERR, "Stop port before changing MTU.\n");
2494 		return -EINVAL;
2495 	}
2496 #endif
2497 
2498 	if (new_mtu > RTE_ETHER_MTU) {
2499 		bp->flags |= BNXT_FLAG_JUMBO;
2500 		bp->eth_dev->data->dev_conf.rxmode.offloads |=
2501 			DEV_RX_OFFLOAD_JUMBO_FRAME;
2502 	} else {
2503 		bp->eth_dev->data->dev_conf.rxmode.offloads &=
2504 			~DEV_RX_OFFLOAD_JUMBO_FRAME;
2505 		bp->flags &= ~BNXT_FLAG_JUMBO;
2506 	}
2507 
2508 	/* Is there a change in mtu setting? */
2509 	if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len == new_pkt_size)
2510 		return rc;
2511 
2512 	for (i = 0; i < bp->nr_vnics; i++) {
2513 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2514 		uint16_t size = 0;
2515 
2516 		vnic->mru = BNXT_VNIC_MRU(new_mtu);
2517 		rc = bnxt_hwrm_vnic_cfg(bp, vnic);
2518 		if (rc)
2519 			break;
2520 
2521 		size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
2522 		size -= RTE_PKTMBUF_HEADROOM;
2523 
2524 		if (size < new_mtu) {
2525 			rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
2526 			if (rc)
2527 				return rc;
2528 		}
2529 	}
2530 
2531 	if (!rc)
2532 		eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_pkt_size;
2533 
2534 	PMD_DRV_LOG(INFO, "New MTU is %d\n", new_mtu);
2535 
2536 	return rc;
2537 }
2538 
2539 static int
2540 bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on)
2541 {
2542 	struct bnxt *bp = dev->data->dev_private;
2543 	uint16_t vlan = bp->vlan;
2544 	int rc;
2545 
2546 	rc = is_bnxt_in_error(bp);
2547 	if (rc)
2548 		return rc;
2549 
2550 	if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) {
2551 		PMD_DRV_LOG(ERR,
2552 			"PVID cannot be modified for this function\n");
2553 		return -ENOTSUP;
2554 	}
2555 	bp->vlan = on ? pvid : 0;
2556 
2557 	rc = bnxt_hwrm_set_default_vlan(bp, 0, 0);
2558 	if (rc)
2559 		bp->vlan = vlan;
2560 	return rc;
2561 }
2562 
2563 static int
2564 bnxt_dev_led_on_op(struct rte_eth_dev *dev)
2565 {
2566 	struct bnxt *bp = dev->data->dev_private;
2567 	int rc;
2568 
2569 	rc = is_bnxt_in_error(bp);
2570 	if (rc)
2571 		return rc;
2572 
2573 	return bnxt_hwrm_port_led_cfg(bp, true);
2574 }
2575 
2576 static int
2577 bnxt_dev_led_off_op(struct rte_eth_dev *dev)
2578 {
2579 	struct bnxt *bp = dev->data->dev_private;
2580 	int rc;
2581 
2582 	rc = is_bnxt_in_error(bp);
2583 	if (rc)
2584 		return rc;
2585 
2586 	return bnxt_hwrm_port_led_cfg(bp, false);
2587 }
2588 
2589 static uint32_t
2590 bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id)
2591 {
2592 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2593 	uint32_t desc = 0, raw_cons = 0, cons;
2594 	struct bnxt_cp_ring_info *cpr;
2595 	struct bnxt_rx_queue *rxq;
2596 	struct rx_pkt_cmpl *rxcmp;
2597 	int rc;
2598 
2599 	rc = is_bnxt_in_error(bp);
2600 	if (rc)
2601 		return rc;
2602 
2603 	rxq = dev->data->rx_queues[rx_queue_id];
2604 	cpr = rxq->cp_ring;
2605 	raw_cons = cpr->cp_raw_cons;
2606 
2607 	while (1) {
2608 		cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
2609 		rte_prefetch0(&cpr->cp_desc_ring[cons]);
2610 		rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
2611 
2612 		if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct)) {
2613 			break;
2614 		} else {
2615 			raw_cons++;
2616 			desc++;
2617 		}
2618 	}
2619 
2620 	return desc;
2621 }
2622 
2623 static int
2624 bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset)
2625 {
2626 	struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
2627 	struct bnxt_rx_ring_info *rxr;
2628 	struct bnxt_cp_ring_info *cpr;
2629 	struct bnxt_sw_rx_bd *rx_buf;
2630 	struct rx_pkt_cmpl *rxcmp;
2631 	uint32_t cons, cp_cons;
2632 	int rc;
2633 
2634 	if (!rxq)
2635 		return -EINVAL;
2636 
2637 	rc = is_bnxt_in_error(rxq->bp);
2638 	if (rc)
2639 		return rc;
2640 
2641 	cpr = rxq->cp_ring;
2642 	rxr = rxq->rx_ring;
2643 
2644 	if (offset >= rxq->nb_rx_desc)
2645 		return -EINVAL;
2646 
2647 	cons = RING_CMP(cpr->cp_ring_struct, offset);
2648 	cp_cons = cpr->cp_raw_cons;
2649 	rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
2650 
2651 	if (cons > cp_cons) {
2652 		if (CMPL_VALID(rxcmp, cpr->valid))
2653 			return RTE_ETH_RX_DESC_DONE;
2654 	} else {
2655 		if (CMPL_VALID(rxcmp, !cpr->valid))
2656 			return RTE_ETH_RX_DESC_DONE;
2657 	}
2658 	rx_buf = &rxr->rx_buf_ring[cons];
2659 	if (rx_buf->mbuf == NULL)
2660 		return RTE_ETH_RX_DESC_UNAVAIL;
2661 
2662 
2663 	return RTE_ETH_RX_DESC_AVAIL;
2664 }
2665 
2666 static int
2667 bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset)
2668 {
2669 	struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue;
2670 	struct bnxt_tx_ring_info *txr;
2671 	struct bnxt_cp_ring_info *cpr;
2672 	struct bnxt_sw_tx_bd *tx_buf;
2673 	struct tx_pkt_cmpl *txcmp;
2674 	uint32_t cons, cp_cons;
2675 	int rc;
2676 
2677 	if (!txq)
2678 		return -EINVAL;
2679 
2680 	rc = is_bnxt_in_error(txq->bp);
2681 	if (rc)
2682 		return rc;
2683 
2684 	cpr = txq->cp_ring;
2685 	txr = txq->tx_ring;
2686 
2687 	if (offset >= txq->nb_tx_desc)
2688 		return -EINVAL;
2689 
2690 	cons = RING_CMP(cpr->cp_ring_struct, offset);
2691 	txcmp = (struct tx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
2692 	cp_cons = cpr->cp_raw_cons;
2693 
2694 	if (cons > cp_cons) {
2695 		if (CMPL_VALID(txcmp, cpr->valid))
2696 			return RTE_ETH_TX_DESC_UNAVAIL;
2697 	} else {
2698 		if (CMPL_VALID(txcmp, !cpr->valid))
2699 			return RTE_ETH_TX_DESC_UNAVAIL;
2700 	}
2701 	tx_buf = &txr->tx_buf_ring[cons];
2702 	if (tx_buf->mbuf == NULL)
2703 		return RTE_ETH_TX_DESC_DONE;
2704 
2705 	return RTE_ETH_TX_DESC_FULL;
2706 }
2707 
2708 static struct bnxt_filter_info *
2709 bnxt_match_and_validate_ether_filter(struct bnxt *bp,
2710 				struct rte_eth_ethertype_filter *efilter,
2711 				struct bnxt_vnic_info *vnic0,
2712 				struct bnxt_vnic_info *vnic,
2713 				int *ret)
2714 {
2715 	struct bnxt_filter_info *mfilter = NULL;
2716 	int match = 0;
2717 	*ret = 0;
2718 
2719 	if (efilter->ether_type == RTE_ETHER_TYPE_IPV4 ||
2720 		efilter->ether_type == RTE_ETHER_TYPE_IPV6) {
2721 		PMD_DRV_LOG(ERR, "invalid ether_type(0x%04x) in"
2722 			" ethertype filter.", efilter->ether_type);
2723 		*ret = -EINVAL;
2724 		goto exit;
2725 	}
2726 	if (efilter->queue >= bp->rx_nr_rings) {
2727 		PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue);
2728 		*ret = -EINVAL;
2729 		goto exit;
2730 	}
2731 
2732 	vnic0 = BNXT_GET_DEFAULT_VNIC(bp);
2733 	vnic = &bp->vnic_info[efilter->queue];
2734 	if (vnic == NULL) {
2735 		PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue);
2736 		*ret = -EINVAL;
2737 		goto exit;
2738 	}
2739 
2740 	if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) {
2741 		STAILQ_FOREACH(mfilter, &vnic0->filter, next) {
2742 			if ((!memcmp(efilter->mac_addr.addr_bytes,
2743 				     mfilter->l2_addr, RTE_ETHER_ADDR_LEN) &&
2744 			     mfilter->flags ==
2745 			     HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP &&
2746 			     mfilter->ethertype == efilter->ether_type)) {
2747 				match = 1;
2748 				break;
2749 			}
2750 		}
2751 	} else {
2752 		STAILQ_FOREACH(mfilter, &vnic->filter, next)
2753 			if ((!memcmp(efilter->mac_addr.addr_bytes,
2754 				     mfilter->l2_addr, RTE_ETHER_ADDR_LEN) &&
2755 			     mfilter->ethertype == efilter->ether_type &&
2756 			     mfilter->flags ==
2757 			     HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX)) {
2758 				match = 1;
2759 				break;
2760 			}
2761 	}
2762 
2763 	if (match)
2764 		*ret = -EEXIST;
2765 
2766 exit:
2767 	return mfilter;
2768 }
2769 
2770 static int
2771 bnxt_ethertype_filter(struct rte_eth_dev *dev,
2772 			enum rte_filter_op filter_op,
2773 			void *arg)
2774 {
2775 	struct bnxt *bp = dev->data->dev_private;
2776 	struct rte_eth_ethertype_filter *efilter =
2777 			(struct rte_eth_ethertype_filter *)arg;
2778 	struct bnxt_filter_info *bfilter, *filter1;
2779 	struct bnxt_vnic_info *vnic, *vnic0;
2780 	int ret;
2781 
2782 	if (filter_op == RTE_ETH_FILTER_NOP)
2783 		return 0;
2784 
2785 	if (arg == NULL) {
2786 		PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
2787 			    filter_op);
2788 		return -EINVAL;
2789 	}
2790 
2791 	vnic0 = BNXT_GET_DEFAULT_VNIC(bp);
2792 	vnic = &bp->vnic_info[efilter->queue];
2793 
2794 	switch (filter_op) {
2795 	case RTE_ETH_FILTER_ADD:
2796 		bnxt_match_and_validate_ether_filter(bp, efilter,
2797 							vnic0, vnic, &ret);
2798 		if (ret < 0)
2799 			return ret;
2800 
2801 		bfilter = bnxt_get_unused_filter(bp);
2802 		if (bfilter == NULL) {
2803 			PMD_DRV_LOG(ERR,
2804 				"Not enough resources for a new filter.\n");
2805 			return -ENOMEM;
2806 		}
2807 		bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER;
2808 		memcpy(bfilter->l2_addr, efilter->mac_addr.addr_bytes,
2809 		       RTE_ETHER_ADDR_LEN);
2810 		memcpy(bfilter->dst_macaddr, efilter->mac_addr.addr_bytes,
2811 		       RTE_ETHER_ADDR_LEN);
2812 		bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR;
2813 		bfilter->ethertype = efilter->ether_type;
2814 		bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2815 
2816 		filter1 = bnxt_get_l2_filter(bp, bfilter, vnic0);
2817 		if (filter1 == NULL) {
2818 			ret = -EINVAL;
2819 			goto cleanup;
2820 		}
2821 		bfilter->enables |=
2822 			HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
2823 		bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id;
2824 
2825 		bfilter->dst_id = vnic->fw_vnic_id;
2826 
2827 		if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) {
2828 			bfilter->flags =
2829 				HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
2830 		}
2831 
2832 		ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter);
2833 		if (ret)
2834 			goto cleanup;
2835 		STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next);
2836 		break;
2837 	case RTE_ETH_FILTER_DELETE:
2838 		filter1 = bnxt_match_and_validate_ether_filter(bp, efilter,
2839 							vnic0, vnic, &ret);
2840 		if (ret == -EEXIST) {
2841 			ret = bnxt_hwrm_clear_ntuple_filter(bp, filter1);
2842 
2843 			STAILQ_REMOVE(&vnic->filter, filter1, bnxt_filter_info,
2844 				      next);
2845 			bnxt_free_filter(bp, filter1);
2846 		} else if (ret == 0) {
2847 			PMD_DRV_LOG(ERR, "No matching filter found\n");
2848 		}
2849 		break;
2850 	default:
2851 		PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
2852 		ret = -EINVAL;
2853 		goto error;
2854 	}
2855 	return ret;
2856 cleanup:
2857 	bnxt_free_filter(bp, bfilter);
2858 error:
2859 	return ret;
2860 }
2861 
2862 static inline int
2863 parse_ntuple_filter(struct bnxt *bp,
2864 		    struct rte_eth_ntuple_filter *nfilter,
2865 		    struct bnxt_filter_info *bfilter)
2866 {
2867 	uint32_t en = 0;
2868 
2869 	if (nfilter->queue >= bp->rx_nr_rings) {
2870 		PMD_DRV_LOG(ERR, "Invalid queue %d\n", nfilter->queue);
2871 		return -EINVAL;
2872 	}
2873 
2874 	switch (nfilter->dst_port_mask) {
2875 	case UINT16_MAX:
2876 		bfilter->dst_port_mask = -1;
2877 		bfilter->dst_port = nfilter->dst_port;
2878 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT |
2879 			NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
2880 		break;
2881 	default:
2882 		PMD_DRV_LOG(ERR, "invalid dst_port mask.");
2883 		return -EINVAL;
2884 	}
2885 
2886 	bfilter->ip_addr_type = NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
2887 	en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2888 
2889 	switch (nfilter->proto_mask) {
2890 	case UINT8_MAX:
2891 		if (nfilter->proto == 17) /* IPPROTO_UDP */
2892 			bfilter->ip_protocol = 17;
2893 		else if (nfilter->proto == 6) /* IPPROTO_TCP */
2894 			bfilter->ip_protocol = 6;
2895 		else
2896 			return -EINVAL;
2897 		en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2898 		break;
2899 	default:
2900 		PMD_DRV_LOG(ERR, "invalid protocol mask.");
2901 		return -EINVAL;
2902 	}
2903 
2904 	switch (nfilter->dst_ip_mask) {
2905 	case UINT32_MAX:
2906 		bfilter->dst_ipaddr_mask[0] = -1;
2907 		bfilter->dst_ipaddr[0] = nfilter->dst_ip;
2908 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR |
2909 			NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
2910 		break;
2911 	default:
2912 		PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
2913 		return -EINVAL;
2914 	}
2915 
2916 	switch (nfilter->src_ip_mask) {
2917 	case UINT32_MAX:
2918 		bfilter->src_ipaddr_mask[0] = -1;
2919 		bfilter->src_ipaddr[0] = nfilter->src_ip;
2920 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
2921 			NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
2922 		break;
2923 	default:
2924 		PMD_DRV_LOG(ERR, "invalid src_ip mask.");
2925 		return -EINVAL;
2926 	}
2927 
2928 	switch (nfilter->src_port_mask) {
2929 	case UINT16_MAX:
2930 		bfilter->src_port_mask = -1;
2931 		bfilter->src_port = nfilter->src_port;
2932 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
2933 			NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
2934 		break;
2935 	default:
2936 		PMD_DRV_LOG(ERR, "invalid src_port mask.");
2937 		return -EINVAL;
2938 	}
2939 
2940 	bfilter->enables = en;
2941 	return 0;
2942 }
2943 
2944 static struct bnxt_filter_info*
2945 bnxt_match_ntuple_filter(struct bnxt *bp,
2946 			 struct bnxt_filter_info *bfilter,
2947 			 struct bnxt_vnic_info **mvnic)
2948 {
2949 	struct bnxt_filter_info *mfilter = NULL;
2950 	int i;
2951 
2952 	for (i = bp->nr_vnics - 1; i >= 0; i--) {
2953 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2954 		STAILQ_FOREACH(mfilter, &vnic->filter, next) {
2955 			if (bfilter->src_ipaddr[0] == mfilter->src_ipaddr[0] &&
2956 			    bfilter->src_ipaddr_mask[0] ==
2957 			    mfilter->src_ipaddr_mask[0] &&
2958 			    bfilter->src_port == mfilter->src_port &&
2959 			    bfilter->src_port_mask == mfilter->src_port_mask &&
2960 			    bfilter->dst_ipaddr[0] == mfilter->dst_ipaddr[0] &&
2961 			    bfilter->dst_ipaddr_mask[0] ==
2962 			    mfilter->dst_ipaddr_mask[0] &&
2963 			    bfilter->dst_port == mfilter->dst_port &&
2964 			    bfilter->dst_port_mask == mfilter->dst_port_mask &&
2965 			    bfilter->flags == mfilter->flags &&
2966 			    bfilter->enables == mfilter->enables) {
2967 				if (mvnic)
2968 					*mvnic = vnic;
2969 				return mfilter;
2970 			}
2971 		}
2972 	}
2973 	return NULL;
2974 }
2975 
2976 static int
2977 bnxt_cfg_ntuple_filter(struct bnxt *bp,
2978 		       struct rte_eth_ntuple_filter *nfilter,
2979 		       enum rte_filter_op filter_op)
2980 {
2981 	struct bnxt_filter_info *bfilter, *mfilter, *filter1;
2982 	struct bnxt_vnic_info *vnic, *vnic0, *mvnic;
2983 	int ret;
2984 
2985 	if (nfilter->flags != RTE_5TUPLE_FLAGS) {
2986 		PMD_DRV_LOG(ERR, "only 5tuple is supported.");
2987 		return -EINVAL;
2988 	}
2989 
2990 	if (nfilter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
2991 		PMD_DRV_LOG(ERR, "Ntuple filter: TCP flags not supported\n");
2992 		return -EINVAL;
2993 	}
2994 
2995 	bfilter = bnxt_get_unused_filter(bp);
2996 	if (bfilter == NULL) {
2997 		PMD_DRV_LOG(ERR,
2998 			"Not enough resources for a new filter.\n");
2999 		return -ENOMEM;
3000 	}
3001 	ret = parse_ntuple_filter(bp, nfilter, bfilter);
3002 	if (ret < 0)
3003 		goto free_filter;
3004 
3005 	vnic = &bp->vnic_info[nfilter->queue];
3006 	vnic0 = BNXT_GET_DEFAULT_VNIC(bp);
3007 	filter1 = STAILQ_FIRST(&vnic0->filter);
3008 	if (filter1 == NULL) {
3009 		ret = -EINVAL;
3010 		goto free_filter;
3011 	}
3012 
3013 	bfilter->dst_id = vnic->fw_vnic_id;
3014 	bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id;
3015 	bfilter->enables |=
3016 		HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
3017 	bfilter->ethertype = 0x800;
3018 	bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3019 
3020 	mfilter = bnxt_match_ntuple_filter(bp, bfilter, &mvnic);
3021 
3022 	if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD &&
3023 	    bfilter->dst_id == mfilter->dst_id) {
3024 		PMD_DRV_LOG(ERR, "filter exists.\n");
3025 		ret = -EEXIST;
3026 		goto free_filter;
3027 	} else if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD &&
3028 		   bfilter->dst_id != mfilter->dst_id) {
3029 		mfilter->dst_id = vnic->fw_vnic_id;
3030 		ret = bnxt_hwrm_set_ntuple_filter(bp, mfilter->dst_id, mfilter);
3031 		STAILQ_REMOVE(&mvnic->filter, mfilter, bnxt_filter_info, next);
3032 		STAILQ_INSERT_TAIL(&vnic->filter, mfilter, next);
3033 		PMD_DRV_LOG(ERR, "filter with matching pattern exists.\n");
3034 		PMD_DRV_LOG(ERR, " Updated it to the new destination queue\n");
3035 		goto free_filter;
3036 	}
3037 	if (mfilter == NULL && filter_op == RTE_ETH_FILTER_DELETE) {
3038 		PMD_DRV_LOG(ERR, "filter doesn't exist.");
3039 		ret = -ENOENT;
3040 		goto free_filter;
3041 	}
3042 
3043 	if (filter_op == RTE_ETH_FILTER_ADD) {
3044 		bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER;
3045 		ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter);
3046 		if (ret)
3047 			goto free_filter;
3048 		STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next);
3049 	} else {
3050 		if (mfilter == NULL) {
3051 			/* This should not happen. But for Coverity! */
3052 			ret = -ENOENT;
3053 			goto free_filter;
3054 		}
3055 		ret = bnxt_hwrm_clear_ntuple_filter(bp, mfilter);
3056 
3057 		STAILQ_REMOVE(&vnic->filter, mfilter, bnxt_filter_info, next);
3058 		bnxt_free_filter(bp, mfilter);
3059 		bnxt_free_filter(bp, bfilter);
3060 	}
3061 
3062 	return 0;
3063 free_filter:
3064 	bnxt_free_filter(bp, bfilter);
3065 	return ret;
3066 }
3067 
3068 static int
3069 bnxt_ntuple_filter(struct rte_eth_dev *dev,
3070 			enum rte_filter_op filter_op,
3071 			void *arg)
3072 {
3073 	struct bnxt *bp = dev->data->dev_private;
3074 	int ret;
3075 
3076 	if (filter_op == RTE_ETH_FILTER_NOP)
3077 		return 0;
3078 
3079 	if (arg == NULL) {
3080 		PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
3081 			    filter_op);
3082 		return -EINVAL;
3083 	}
3084 
3085 	switch (filter_op) {
3086 	case RTE_ETH_FILTER_ADD:
3087 		ret = bnxt_cfg_ntuple_filter(bp,
3088 			(struct rte_eth_ntuple_filter *)arg,
3089 			filter_op);
3090 		break;
3091 	case RTE_ETH_FILTER_DELETE:
3092 		ret = bnxt_cfg_ntuple_filter(bp,
3093 			(struct rte_eth_ntuple_filter *)arg,
3094 			filter_op);
3095 		break;
3096 	default:
3097 		PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
3098 		ret = -EINVAL;
3099 		break;
3100 	}
3101 	return ret;
3102 }
3103 
3104 static int
3105 bnxt_parse_fdir_filter(struct bnxt *bp,
3106 		       struct rte_eth_fdir_filter *fdir,
3107 		       struct bnxt_filter_info *filter)
3108 {
3109 	enum rte_fdir_mode fdir_mode =
3110 		bp->eth_dev->data->dev_conf.fdir_conf.mode;
3111 	struct bnxt_vnic_info *vnic0, *vnic;
3112 	struct bnxt_filter_info *filter1;
3113 	uint32_t en = 0;
3114 	int i;
3115 
3116 	if (fdir_mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
3117 		return -EINVAL;
3118 
3119 	filter->l2_ovlan = fdir->input.flow_ext.vlan_tci;
3120 	en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
3121 
3122 	switch (fdir->input.flow_type) {
3123 	case RTE_ETH_FLOW_IPV4:
3124 	case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
3125 		/* FALLTHROUGH */
3126 		filter->src_ipaddr[0] = fdir->input.flow.ip4_flow.src_ip;
3127 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
3128 		filter->dst_ipaddr[0] = fdir->input.flow.ip4_flow.dst_ip;
3129 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
3130 		filter->ip_protocol = fdir->input.flow.ip4_flow.proto;
3131 		en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
3132 		filter->ip_addr_type =
3133 			NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
3134 		filter->src_ipaddr_mask[0] = 0xffffffff;
3135 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
3136 		filter->dst_ipaddr_mask[0] = 0xffffffff;
3137 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
3138 		filter->ethertype = 0x800;
3139 		filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3140 		break;
3141 	case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
3142 		filter->src_port = fdir->input.flow.tcp4_flow.src_port;
3143 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
3144 		filter->dst_port = fdir->input.flow.tcp4_flow.dst_port;
3145 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
3146 		filter->dst_port_mask = 0xffff;
3147 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
3148 		filter->src_port_mask = 0xffff;
3149 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
3150 		filter->src_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.src_ip;
3151 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
3152 		filter->dst_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.dst_ip;
3153 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
3154 		filter->ip_protocol = 6;
3155 		en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
3156 		filter->ip_addr_type =
3157 			NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
3158 		filter->src_ipaddr_mask[0] = 0xffffffff;
3159 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
3160 		filter->dst_ipaddr_mask[0] = 0xffffffff;
3161 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
3162 		filter->ethertype = 0x800;
3163 		filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3164 		break;
3165 	case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
3166 		filter->src_port = fdir->input.flow.udp4_flow.src_port;
3167 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
3168 		filter->dst_port = fdir->input.flow.udp4_flow.dst_port;
3169 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
3170 		filter->dst_port_mask = 0xffff;
3171 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
3172 		filter->src_port_mask = 0xffff;
3173 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
3174 		filter->src_ipaddr[0] = fdir->input.flow.udp4_flow.ip.src_ip;
3175 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
3176 		filter->dst_ipaddr[0] = fdir->input.flow.udp4_flow.ip.dst_ip;
3177 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
3178 		filter->ip_protocol = 17;
3179 		en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
3180 		filter->ip_addr_type =
3181 			NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
3182 		filter->src_ipaddr_mask[0] = 0xffffffff;
3183 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
3184 		filter->dst_ipaddr_mask[0] = 0xffffffff;
3185 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
3186 		filter->ethertype = 0x800;
3187 		filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3188 		break;
3189 	case RTE_ETH_FLOW_IPV6:
3190 	case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
3191 		/* FALLTHROUGH */
3192 		filter->ip_addr_type =
3193 			NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
3194 		filter->ip_protocol = fdir->input.flow.ipv6_flow.proto;
3195 		en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
3196 		rte_memcpy(filter->src_ipaddr,
3197 			   fdir->input.flow.ipv6_flow.src_ip, 16);
3198 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
3199 		rte_memcpy(filter->dst_ipaddr,
3200 			   fdir->input.flow.ipv6_flow.dst_ip, 16);
3201 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
3202 		memset(filter->dst_ipaddr_mask, 0xff, 16);
3203 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
3204 		memset(filter->src_ipaddr_mask, 0xff, 16);
3205 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
3206 		filter->ethertype = 0x86dd;
3207 		filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3208 		break;
3209 	case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
3210 		filter->src_port = fdir->input.flow.tcp6_flow.src_port;
3211 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
3212 		filter->dst_port = fdir->input.flow.tcp6_flow.dst_port;
3213 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
3214 		filter->dst_port_mask = 0xffff;
3215 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
3216 		filter->src_port_mask = 0xffff;
3217 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
3218 		filter->ip_addr_type =
3219 			NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
3220 		filter->ip_protocol = fdir->input.flow.tcp6_flow.ip.proto;
3221 		en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
3222 		rte_memcpy(filter->src_ipaddr,
3223 			   fdir->input.flow.tcp6_flow.ip.src_ip, 16);
3224 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
3225 		rte_memcpy(filter->dst_ipaddr,
3226 			   fdir->input.flow.tcp6_flow.ip.dst_ip, 16);
3227 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
3228 		memset(filter->dst_ipaddr_mask, 0xff, 16);
3229 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
3230 		memset(filter->src_ipaddr_mask, 0xff, 16);
3231 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
3232 		filter->ethertype = 0x86dd;
3233 		filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3234 		break;
3235 	case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
3236 		filter->src_port = fdir->input.flow.udp6_flow.src_port;
3237 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
3238 		filter->dst_port = fdir->input.flow.udp6_flow.dst_port;
3239 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
3240 		filter->dst_port_mask = 0xffff;
3241 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
3242 		filter->src_port_mask = 0xffff;
3243 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
3244 		filter->ip_addr_type =
3245 			NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
3246 		filter->ip_protocol = fdir->input.flow.udp6_flow.ip.proto;
3247 		en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
3248 		rte_memcpy(filter->src_ipaddr,
3249 			   fdir->input.flow.udp6_flow.ip.src_ip, 16);
3250 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
3251 		rte_memcpy(filter->dst_ipaddr,
3252 			   fdir->input.flow.udp6_flow.ip.dst_ip, 16);
3253 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
3254 		memset(filter->dst_ipaddr_mask, 0xff, 16);
3255 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
3256 		memset(filter->src_ipaddr_mask, 0xff, 16);
3257 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
3258 		filter->ethertype = 0x86dd;
3259 		filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3260 		break;
3261 	case RTE_ETH_FLOW_L2_PAYLOAD:
3262 		filter->ethertype = fdir->input.flow.l2_flow.ether_type;
3263 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3264 		break;
3265 	case RTE_ETH_FLOW_VXLAN:
3266 		if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
3267 			return -EINVAL;
3268 		filter->vni = fdir->input.flow.tunnel_flow.tunnel_id;
3269 		filter->tunnel_type =
3270 			CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
3271 		en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE;
3272 		break;
3273 	case RTE_ETH_FLOW_NVGRE:
3274 		if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
3275 			return -EINVAL;
3276 		filter->vni = fdir->input.flow.tunnel_flow.tunnel_id;
3277 		filter->tunnel_type =
3278 			CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
3279 		en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE;
3280 		break;
3281 	case RTE_ETH_FLOW_UNKNOWN:
3282 	case RTE_ETH_FLOW_RAW:
3283 	case RTE_ETH_FLOW_FRAG_IPV4:
3284 	case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
3285 	case RTE_ETH_FLOW_FRAG_IPV6:
3286 	case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
3287 	case RTE_ETH_FLOW_IPV6_EX:
3288 	case RTE_ETH_FLOW_IPV6_TCP_EX:
3289 	case RTE_ETH_FLOW_IPV6_UDP_EX:
3290 	case RTE_ETH_FLOW_GENEVE:
3291 		/* FALLTHROUGH */
3292 	default:
3293 		return -EINVAL;
3294 	}
3295 
3296 	vnic0 = BNXT_GET_DEFAULT_VNIC(bp);
3297 	vnic = &bp->vnic_info[fdir->action.rx_queue];
3298 	if (vnic == NULL) {
3299 		PMD_DRV_LOG(ERR, "Invalid queue %d\n", fdir->action.rx_queue);
3300 		return -EINVAL;
3301 	}
3302 
3303 	if (fdir_mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
3304 		rte_memcpy(filter->dst_macaddr,
3305 			fdir->input.flow.mac_vlan_flow.mac_addr.addr_bytes, 6);
3306 			en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR;
3307 	}
3308 
3309 	if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) {
3310 		filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
3311 		filter1 = STAILQ_FIRST(&vnic0->filter);
3312 		//filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
3313 	} else {
3314 		filter->dst_id = vnic->fw_vnic_id;
3315 		for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
3316 			if (filter->dst_macaddr[i] == 0x00)
3317 				filter1 = STAILQ_FIRST(&vnic0->filter);
3318 			else
3319 				filter1 = bnxt_get_l2_filter(bp, filter, vnic);
3320 	}
3321 
3322 	if (filter1 == NULL)
3323 		return -EINVAL;
3324 
3325 	en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
3326 	filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
3327 
3328 	filter->enables = en;
3329 
3330 	return 0;
3331 }
3332 
3333 static struct bnxt_filter_info *
3334 bnxt_match_fdir(struct bnxt *bp, struct bnxt_filter_info *nf,
3335 		struct bnxt_vnic_info **mvnic)
3336 {
3337 	struct bnxt_filter_info *mf = NULL;
3338 	int i;
3339 
3340 	for (i = bp->nr_vnics - 1; i >= 0; i--) {
3341 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3342 
3343 		STAILQ_FOREACH(mf, &vnic->filter, next) {
3344 			if (mf->filter_type == nf->filter_type &&
3345 			    mf->flags == nf->flags &&
3346 			    mf->src_port == nf->src_port &&
3347 			    mf->src_port_mask == nf->src_port_mask &&
3348 			    mf->dst_port == nf->dst_port &&
3349 			    mf->dst_port_mask == nf->dst_port_mask &&
3350 			    mf->ip_protocol == nf->ip_protocol &&
3351 			    mf->ip_addr_type == nf->ip_addr_type &&
3352 			    mf->ethertype == nf->ethertype &&
3353 			    mf->vni == nf->vni &&
3354 			    mf->tunnel_type == nf->tunnel_type &&
3355 			    mf->l2_ovlan == nf->l2_ovlan &&
3356 			    mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
3357 			    mf->l2_ivlan == nf->l2_ivlan &&
3358 			    mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
3359 			    !memcmp(mf->l2_addr, nf->l2_addr,
3360 				    RTE_ETHER_ADDR_LEN) &&
3361 			    !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
3362 				    RTE_ETHER_ADDR_LEN) &&
3363 			    !memcmp(mf->src_macaddr, nf->src_macaddr,
3364 				    RTE_ETHER_ADDR_LEN) &&
3365 			    !memcmp(mf->dst_macaddr, nf->dst_macaddr,
3366 				    RTE_ETHER_ADDR_LEN) &&
3367 			    !memcmp(mf->src_ipaddr, nf->src_ipaddr,
3368 				    sizeof(nf->src_ipaddr)) &&
3369 			    !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
3370 				    sizeof(nf->src_ipaddr_mask)) &&
3371 			    !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
3372 				    sizeof(nf->dst_ipaddr)) &&
3373 			    !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
3374 				    sizeof(nf->dst_ipaddr_mask))) {
3375 				if (mvnic)
3376 					*mvnic = vnic;
3377 				return mf;
3378 			}
3379 		}
3380 	}
3381 	return NULL;
3382 }
3383 
3384 static int
3385 bnxt_fdir_filter(struct rte_eth_dev *dev,
3386 		 enum rte_filter_op filter_op,
3387 		 void *arg)
3388 {
3389 	struct bnxt *bp = dev->data->dev_private;
3390 	struct rte_eth_fdir_filter *fdir  = (struct rte_eth_fdir_filter *)arg;
3391 	struct bnxt_filter_info *filter, *match;
3392 	struct bnxt_vnic_info *vnic, *mvnic;
3393 	int ret = 0, i;
3394 
3395 	if (filter_op == RTE_ETH_FILTER_NOP)
3396 		return 0;
3397 
3398 	if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
3399 		return -EINVAL;
3400 
3401 	switch (filter_op) {
3402 	case RTE_ETH_FILTER_ADD:
3403 	case RTE_ETH_FILTER_DELETE:
3404 		/* FALLTHROUGH */
3405 		filter = bnxt_get_unused_filter(bp);
3406 		if (filter == NULL) {
3407 			PMD_DRV_LOG(ERR,
3408 				"Not enough resources for a new flow.\n");
3409 			return -ENOMEM;
3410 		}
3411 
3412 		ret = bnxt_parse_fdir_filter(bp, fdir, filter);
3413 		if (ret != 0)
3414 			goto free_filter;
3415 		filter->filter_type = HWRM_CFA_NTUPLE_FILTER;
3416 
3417 		if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
3418 			vnic = &bp->vnic_info[0];
3419 		else
3420 			vnic = &bp->vnic_info[fdir->action.rx_queue];
3421 
3422 		match = bnxt_match_fdir(bp, filter, &mvnic);
3423 		if (match != NULL && filter_op == RTE_ETH_FILTER_ADD) {
3424 			if (match->dst_id == vnic->fw_vnic_id) {
3425 				PMD_DRV_LOG(ERR, "Flow already exists.\n");
3426 				ret = -EEXIST;
3427 				goto free_filter;
3428 			} else {
3429 				match->dst_id = vnic->fw_vnic_id;
3430 				ret = bnxt_hwrm_set_ntuple_filter(bp,
3431 								  match->dst_id,
3432 								  match);
3433 				STAILQ_REMOVE(&mvnic->filter, match,
3434 					      bnxt_filter_info, next);
3435 				STAILQ_INSERT_TAIL(&vnic->filter, match, next);
3436 				PMD_DRV_LOG(ERR,
3437 					"Filter with matching pattern exist\n");
3438 				PMD_DRV_LOG(ERR,
3439 					"Updated it to new destination q\n");
3440 				goto free_filter;
3441 			}
3442 		}
3443 		if (match == NULL && filter_op == RTE_ETH_FILTER_DELETE) {
3444 			PMD_DRV_LOG(ERR, "Flow does not exist.\n");
3445 			ret = -ENOENT;
3446 			goto free_filter;
3447 		}
3448 
3449 		if (filter_op == RTE_ETH_FILTER_ADD) {
3450 			ret = bnxt_hwrm_set_ntuple_filter(bp,
3451 							  filter->dst_id,
3452 							  filter);
3453 			if (ret)
3454 				goto free_filter;
3455 			STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
3456 		} else {
3457 			ret = bnxt_hwrm_clear_ntuple_filter(bp, match);
3458 			STAILQ_REMOVE(&vnic->filter, match,
3459 				      bnxt_filter_info, next);
3460 			bnxt_free_filter(bp, match);
3461 			bnxt_free_filter(bp, filter);
3462 		}
3463 		break;
3464 	case RTE_ETH_FILTER_FLUSH:
3465 		for (i = bp->nr_vnics - 1; i >= 0; i--) {
3466 			struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3467 
3468 			STAILQ_FOREACH(filter, &vnic->filter, next) {
3469 				if (filter->filter_type ==
3470 				    HWRM_CFA_NTUPLE_FILTER) {
3471 					ret =
3472 					bnxt_hwrm_clear_ntuple_filter(bp,
3473 								      filter);
3474 					STAILQ_REMOVE(&vnic->filter, filter,
3475 						      bnxt_filter_info, next);
3476 				}
3477 			}
3478 		}
3479 		return ret;
3480 	case RTE_ETH_FILTER_UPDATE:
3481 	case RTE_ETH_FILTER_STATS:
3482 	case RTE_ETH_FILTER_INFO:
3483 		PMD_DRV_LOG(ERR, "operation %u not implemented", filter_op);
3484 		break;
3485 	default:
3486 		PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
3487 		ret = -EINVAL;
3488 		break;
3489 	}
3490 	return ret;
3491 
3492 free_filter:
3493 	bnxt_free_filter(bp, filter);
3494 	return ret;
3495 }
3496 
3497 static int
3498 bnxt_filter_ctrl_op(struct rte_eth_dev *dev,
3499 		    enum rte_filter_type filter_type,
3500 		    enum rte_filter_op filter_op, void *arg)
3501 {
3502 	struct bnxt *bp = dev->data->dev_private;
3503 	int ret = 0;
3504 
3505 	ret = is_bnxt_in_error(dev->data->dev_private);
3506 	if (ret)
3507 		return ret;
3508 
3509 	switch (filter_type) {
3510 	case RTE_ETH_FILTER_TUNNEL:
3511 		PMD_DRV_LOG(ERR,
3512 			"filter type: %d: To be implemented\n", filter_type);
3513 		break;
3514 	case RTE_ETH_FILTER_FDIR:
3515 		ret = bnxt_fdir_filter(dev, filter_op, arg);
3516 		break;
3517 	case RTE_ETH_FILTER_NTUPLE:
3518 		ret = bnxt_ntuple_filter(dev, filter_op, arg);
3519 		break;
3520 	case RTE_ETH_FILTER_ETHERTYPE:
3521 		ret = bnxt_ethertype_filter(dev, filter_op, arg);
3522 		break;
3523 	case RTE_ETH_FILTER_GENERIC:
3524 		if (filter_op != RTE_ETH_FILTER_GET)
3525 			return -EINVAL;
3526 		if (bp->truflow)
3527 			*(const void **)arg = &bnxt_ulp_rte_flow_ops;
3528 		else
3529 			*(const void **)arg = &bnxt_flow_ops;
3530 		break;
3531 	default:
3532 		PMD_DRV_LOG(ERR,
3533 			"Filter type (%d) not supported", filter_type);
3534 		ret = -EINVAL;
3535 		break;
3536 	}
3537 	return ret;
3538 }
3539 
3540 static const uint32_t *
3541 bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev *dev)
3542 {
3543 	static const uint32_t ptypes[] = {
3544 		RTE_PTYPE_L2_ETHER_VLAN,
3545 		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
3546 		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
3547 		RTE_PTYPE_L4_ICMP,
3548 		RTE_PTYPE_L4_TCP,
3549 		RTE_PTYPE_L4_UDP,
3550 		RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
3551 		RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
3552 		RTE_PTYPE_INNER_L4_ICMP,
3553 		RTE_PTYPE_INNER_L4_TCP,
3554 		RTE_PTYPE_INNER_L4_UDP,
3555 		RTE_PTYPE_UNKNOWN
3556 	};
3557 
3558 	if (!dev->rx_pkt_burst)
3559 		return NULL;
3560 
3561 	return ptypes;
3562 }
3563 
3564 static int bnxt_map_regs(struct bnxt *bp, uint32_t *reg_arr, int count,
3565 			 int reg_win)
3566 {
3567 	uint32_t reg_base = *reg_arr & 0xfffff000;
3568 	uint32_t win_off;
3569 	int i;
3570 
3571 	for (i = 0; i < count; i++) {
3572 		if ((reg_arr[i] & 0xfffff000) != reg_base)
3573 			return -ERANGE;
3574 	}
3575 	win_off = BNXT_GRCPF_REG_WINDOW_BASE_OUT + (reg_win - 1) * 4;
3576 	rte_write32(reg_base, (uint8_t *)bp->bar0 + win_off);
3577 	return 0;
3578 }
3579 
3580 static int bnxt_map_ptp_regs(struct bnxt *bp)
3581 {
3582 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3583 	uint32_t *reg_arr;
3584 	int rc, i;
3585 
3586 	reg_arr = ptp->rx_regs;
3587 	rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_RX_REGS, 5);
3588 	if (rc)
3589 		return rc;
3590 
3591 	reg_arr = ptp->tx_regs;
3592 	rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_TX_REGS, 6);
3593 	if (rc)
3594 		return rc;
3595 
3596 	for (i = 0; i < BNXT_PTP_RX_REGS; i++)
3597 		ptp->rx_mapped_regs[i] = 0x5000 + (ptp->rx_regs[i] & 0xfff);
3598 
3599 	for (i = 0; i < BNXT_PTP_TX_REGS; i++)
3600 		ptp->tx_mapped_regs[i] = 0x6000 + (ptp->tx_regs[i] & 0xfff);
3601 
3602 	return 0;
3603 }
3604 
3605 static void bnxt_unmap_ptp_regs(struct bnxt *bp)
3606 {
3607 	rte_write32(0, (uint8_t *)bp->bar0 +
3608 			 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 16);
3609 	rte_write32(0, (uint8_t *)bp->bar0 +
3610 			 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 20);
3611 }
3612 
3613 static uint64_t bnxt_cc_read(struct bnxt *bp)
3614 {
3615 	uint64_t ns;
3616 
3617 	ns = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3618 			      BNXT_GRCPF_REG_SYNC_TIME));
3619 	ns |= (uint64_t)(rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3620 					  BNXT_GRCPF_REG_SYNC_TIME + 4))) << 32;
3621 	return ns;
3622 }
3623 
3624 static int bnxt_get_tx_ts(struct bnxt *bp, uint64_t *ts)
3625 {
3626 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3627 	uint32_t fifo;
3628 
3629 	fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3630 				ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO]));
3631 	if (fifo & BNXT_PTP_TX_FIFO_EMPTY)
3632 		return -EAGAIN;
3633 
3634 	fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3635 				ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO]));
3636 	*ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3637 				ptp->tx_mapped_regs[BNXT_PTP_TX_TS_L]));
3638 	*ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3639 				ptp->tx_mapped_regs[BNXT_PTP_TX_TS_H])) << 32;
3640 
3641 	return 0;
3642 }
3643 
3644 static int bnxt_get_rx_ts(struct bnxt *bp, uint64_t *ts)
3645 {
3646 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3647 	struct bnxt_pf_info *pf = &bp->pf;
3648 	uint16_t port_id;
3649 	uint32_t fifo;
3650 
3651 	if (!ptp)
3652 		return -ENODEV;
3653 
3654 	fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3655 				ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO]));
3656 	if (!(fifo & BNXT_PTP_RX_FIFO_PENDING))
3657 		return -EAGAIN;
3658 
3659 	port_id = pf->port_id;
3660 	rte_write32(1 << port_id, (uint8_t *)bp->bar0 +
3661 	       ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]);
3662 
3663 	fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3664 				   ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO]));
3665 	if (fifo & BNXT_PTP_RX_FIFO_PENDING) {
3666 /*		bnxt_clr_rx_ts(bp);	  TBD  */
3667 		return -EBUSY;
3668 	}
3669 
3670 	*ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3671 				ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L]));
3672 	*ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3673 				ptp->rx_mapped_regs[BNXT_PTP_RX_TS_H])) << 32;
3674 
3675 	return 0;
3676 }
3677 
3678 static int
3679 bnxt_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
3680 {
3681 	uint64_t ns;
3682 	struct bnxt *bp = dev->data->dev_private;
3683 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3684 
3685 	if (!ptp)
3686 		return 0;
3687 
3688 	ns = rte_timespec_to_ns(ts);
3689 	/* Set the timecounters to a new value. */
3690 	ptp->tc.nsec = ns;
3691 
3692 	return 0;
3693 }
3694 
3695 static int
3696 bnxt_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
3697 {
3698 	struct bnxt *bp = dev->data->dev_private;
3699 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3700 	uint64_t ns, systime_cycles = 0;
3701 	int rc = 0;
3702 
3703 	if (!ptp)
3704 		return 0;
3705 
3706 	if (BNXT_CHIP_THOR(bp))
3707 		rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME,
3708 					     &systime_cycles);
3709 	else
3710 		systime_cycles = bnxt_cc_read(bp);
3711 
3712 	ns = rte_timecounter_update(&ptp->tc, systime_cycles);
3713 	*ts = rte_ns_to_timespec(ns);
3714 
3715 	return rc;
3716 }
3717 static int
3718 bnxt_timesync_enable(struct rte_eth_dev *dev)
3719 {
3720 	struct bnxt *bp = dev->data->dev_private;
3721 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3722 	uint32_t shift = 0;
3723 	int rc;
3724 
3725 	if (!ptp)
3726 		return 0;
3727 
3728 	ptp->rx_filter = 1;
3729 	ptp->tx_tstamp_en = 1;
3730 	ptp->rxctl = BNXT_PTP_MSG_EVENTS;
3731 
3732 	rc = bnxt_hwrm_ptp_cfg(bp);
3733 	if (rc)
3734 		return rc;
3735 
3736 	memset(&ptp->tc, 0, sizeof(struct rte_timecounter));
3737 	memset(&ptp->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
3738 	memset(&ptp->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
3739 
3740 	ptp->tc.cc_mask = BNXT_CYCLECOUNTER_MASK;
3741 	ptp->tc.cc_shift = shift;
3742 	ptp->tc.nsec_mask = (1ULL << shift) - 1;
3743 
3744 	ptp->rx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK;
3745 	ptp->rx_tstamp_tc.cc_shift = shift;
3746 	ptp->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
3747 
3748 	ptp->tx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK;
3749 	ptp->tx_tstamp_tc.cc_shift = shift;
3750 	ptp->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
3751 
3752 	if (!BNXT_CHIP_THOR(bp))
3753 		bnxt_map_ptp_regs(bp);
3754 
3755 	return 0;
3756 }
3757 
3758 static int
3759 bnxt_timesync_disable(struct rte_eth_dev *dev)
3760 {
3761 	struct bnxt *bp = dev->data->dev_private;
3762 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3763 
3764 	if (!ptp)
3765 		return 0;
3766 
3767 	ptp->rx_filter = 0;
3768 	ptp->tx_tstamp_en = 0;
3769 	ptp->rxctl = 0;
3770 
3771 	bnxt_hwrm_ptp_cfg(bp);
3772 
3773 	if (!BNXT_CHIP_THOR(bp))
3774 		bnxt_unmap_ptp_regs(bp);
3775 
3776 	return 0;
3777 }
3778 
3779 static int
3780 bnxt_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
3781 				 struct timespec *timestamp,
3782 				 uint32_t flags __rte_unused)
3783 {
3784 	struct bnxt *bp = dev->data->dev_private;
3785 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3786 	uint64_t rx_tstamp_cycles = 0;
3787 	uint64_t ns;
3788 
3789 	if (!ptp)
3790 		return 0;
3791 
3792 	if (BNXT_CHIP_THOR(bp))
3793 		rx_tstamp_cycles = ptp->rx_timestamp;
3794 	else
3795 		bnxt_get_rx_ts(bp, &rx_tstamp_cycles);
3796 
3797 	ns = rte_timecounter_update(&ptp->rx_tstamp_tc, rx_tstamp_cycles);
3798 	*timestamp = rte_ns_to_timespec(ns);
3799 	return  0;
3800 }
3801 
3802 static int
3803 bnxt_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
3804 				 struct timespec *timestamp)
3805 {
3806 	struct bnxt *bp = dev->data->dev_private;
3807 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3808 	uint64_t tx_tstamp_cycles = 0;
3809 	uint64_t ns;
3810 	int rc = 0;
3811 
3812 	if (!ptp)
3813 		return 0;
3814 
3815 	if (BNXT_CHIP_THOR(bp))
3816 		rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_PATH_TX,
3817 					     &tx_tstamp_cycles);
3818 	else
3819 		rc = bnxt_get_tx_ts(bp, &tx_tstamp_cycles);
3820 
3821 	ns = rte_timecounter_update(&ptp->tx_tstamp_tc, tx_tstamp_cycles);
3822 	*timestamp = rte_ns_to_timespec(ns);
3823 
3824 	return rc;
3825 }
3826 
3827 static int
3828 bnxt_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
3829 {
3830 	struct bnxt *bp = dev->data->dev_private;
3831 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3832 
3833 	if (!ptp)
3834 		return 0;
3835 
3836 	ptp->tc.nsec += delta;
3837 
3838 	return 0;
3839 }
3840 
3841 static int
3842 bnxt_get_eeprom_length_op(struct rte_eth_dev *dev)
3843 {
3844 	struct bnxt *bp = dev->data->dev_private;
3845 	int rc;
3846 	uint32_t dir_entries;
3847 	uint32_t entry_length;
3848 
3849 	rc = is_bnxt_in_error(bp);
3850 	if (rc)
3851 		return rc;
3852 
3853 	PMD_DRV_LOG(INFO, PCI_PRI_FMT "\n",
3854 		    bp->pdev->addr.domain, bp->pdev->addr.bus,
3855 		    bp->pdev->addr.devid, bp->pdev->addr.function);
3856 
3857 	rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
3858 	if (rc != 0)
3859 		return rc;
3860 
3861 	return dir_entries * entry_length;
3862 }
3863 
3864 static int
3865 bnxt_get_eeprom_op(struct rte_eth_dev *dev,
3866 		struct rte_dev_eeprom_info *in_eeprom)
3867 {
3868 	struct bnxt *bp = dev->data->dev_private;
3869 	uint32_t index;
3870 	uint32_t offset;
3871 	int rc;
3872 
3873 	rc = is_bnxt_in_error(bp);
3874 	if (rc)
3875 		return rc;
3876 
3877 	PMD_DRV_LOG(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d\n",
3878 		    bp->pdev->addr.domain, bp->pdev->addr.bus,
3879 		    bp->pdev->addr.devid, bp->pdev->addr.function,
3880 		    in_eeprom->offset, in_eeprom->length);
3881 
3882 	if (in_eeprom->offset == 0) /* special offset value to get directory */
3883 		return bnxt_get_nvram_directory(bp, in_eeprom->length,
3884 						in_eeprom->data);
3885 
3886 	index = in_eeprom->offset >> 24;
3887 	offset = in_eeprom->offset & 0xffffff;
3888 
3889 	if (index != 0)
3890 		return bnxt_hwrm_get_nvram_item(bp, index - 1, offset,
3891 					   in_eeprom->length, in_eeprom->data);
3892 
3893 	return 0;
3894 }
3895 
3896 static bool bnxt_dir_type_is_ape_bin_format(uint16_t dir_type)
3897 {
3898 	switch (dir_type) {
3899 	case BNX_DIR_TYPE_CHIMP_PATCH:
3900 	case BNX_DIR_TYPE_BOOTCODE:
3901 	case BNX_DIR_TYPE_BOOTCODE_2:
3902 	case BNX_DIR_TYPE_APE_FW:
3903 	case BNX_DIR_TYPE_APE_PATCH:
3904 	case BNX_DIR_TYPE_KONG_FW:
3905 	case BNX_DIR_TYPE_KONG_PATCH:
3906 	case BNX_DIR_TYPE_BONO_FW:
3907 	case BNX_DIR_TYPE_BONO_PATCH:
3908 		/* FALLTHROUGH */
3909 		return true;
3910 	}
3911 
3912 	return false;
3913 }
3914 
3915 static bool bnxt_dir_type_is_other_exec_format(uint16_t dir_type)
3916 {
3917 	switch (dir_type) {
3918 	case BNX_DIR_TYPE_AVS:
3919 	case BNX_DIR_TYPE_EXP_ROM_MBA:
3920 	case BNX_DIR_TYPE_PCIE:
3921 	case BNX_DIR_TYPE_TSCF_UCODE:
3922 	case BNX_DIR_TYPE_EXT_PHY:
3923 	case BNX_DIR_TYPE_CCM:
3924 	case BNX_DIR_TYPE_ISCSI_BOOT:
3925 	case BNX_DIR_TYPE_ISCSI_BOOT_IPV6:
3926 	case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6:
3927 		/* FALLTHROUGH */
3928 		return true;
3929 	}
3930 
3931 	return false;
3932 }
3933 
3934 static bool bnxt_dir_type_is_executable(uint16_t dir_type)
3935 {
3936 	return bnxt_dir_type_is_ape_bin_format(dir_type) ||
3937 		bnxt_dir_type_is_other_exec_format(dir_type);
3938 }
3939 
3940 static int
3941 bnxt_set_eeprom_op(struct rte_eth_dev *dev,
3942 		struct rte_dev_eeprom_info *in_eeprom)
3943 {
3944 	struct bnxt *bp = dev->data->dev_private;
3945 	uint8_t index, dir_op;
3946 	uint16_t type, ext, ordinal, attr;
3947 	int rc;
3948 
3949 	rc = is_bnxt_in_error(bp);
3950 	if (rc)
3951 		return rc;
3952 
3953 	PMD_DRV_LOG(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d\n",
3954 		    bp->pdev->addr.domain, bp->pdev->addr.bus,
3955 		    bp->pdev->addr.devid, bp->pdev->addr.function,
3956 		    in_eeprom->offset, in_eeprom->length);
3957 
3958 	if (!BNXT_PF(bp)) {
3959 		PMD_DRV_LOG(ERR, "NVM write not supported from a VF\n");
3960 		return -EINVAL;
3961 	}
3962 
3963 	type = in_eeprom->magic >> 16;
3964 
3965 	if (type == 0xffff) { /* special value for directory operations */
3966 		index = in_eeprom->magic & 0xff;
3967 		dir_op = in_eeprom->magic >> 8;
3968 		if (index == 0)
3969 			return -EINVAL;
3970 		switch (dir_op) {
3971 		case 0x0e: /* erase */
3972 			if (in_eeprom->offset != ~in_eeprom->magic)
3973 				return -EINVAL;
3974 			return bnxt_hwrm_erase_nvram_directory(bp, index - 1);
3975 		default:
3976 			return -EINVAL;
3977 		}
3978 	}
3979 
3980 	/* Create or re-write an NVM item: */
3981 	if (bnxt_dir_type_is_executable(type) == true)
3982 		return -EOPNOTSUPP;
3983 	ext = in_eeprom->magic & 0xffff;
3984 	ordinal = in_eeprom->offset >> 16;
3985 	attr = in_eeprom->offset & 0xffff;
3986 
3987 	return bnxt_hwrm_flash_nvram(bp, type, ordinal, ext, attr,
3988 				     in_eeprom->data, in_eeprom->length);
3989 }
3990 
3991 /*
3992  * Initialization
3993  */
3994 
3995 static const struct eth_dev_ops bnxt_dev_ops = {
3996 	.dev_infos_get = bnxt_dev_info_get_op,
3997 	.dev_close = bnxt_dev_close_op,
3998 	.dev_configure = bnxt_dev_configure_op,
3999 	.dev_start = bnxt_dev_start_op,
4000 	.dev_stop = bnxt_dev_stop_op,
4001 	.dev_set_link_up = bnxt_dev_set_link_up_op,
4002 	.dev_set_link_down = bnxt_dev_set_link_down_op,
4003 	.stats_get = bnxt_stats_get_op,
4004 	.stats_reset = bnxt_stats_reset_op,
4005 	.rx_queue_setup = bnxt_rx_queue_setup_op,
4006 	.rx_queue_release = bnxt_rx_queue_release_op,
4007 	.tx_queue_setup = bnxt_tx_queue_setup_op,
4008 	.tx_queue_release = bnxt_tx_queue_release_op,
4009 	.rx_queue_intr_enable = bnxt_rx_queue_intr_enable_op,
4010 	.rx_queue_intr_disable = bnxt_rx_queue_intr_disable_op,
4011 	.reta_update = bnxt_reta_update_op,
4012 	.reta_query = bnxt_reta_query_op,
4013 	.rss_hash_update = bnxt_rss_hash_update_op,
4014 	.rss_hash_conf_get = bnxt_rss_hash_conf_get_op,
4015 	.link_update = bnxt_link_update_op,
4016 	.promiscuous_enable = bnxt_promiscuous_enable_op,
4017 	.promiscuous_disable = bnxt_promiscuous_disable_op,
4018 	.allmulticast_enable = bnxt_allmulticast_enable_op,
4019 	.allmulticast_disable = bnxt_allmulticast_disable_op,
4020 	.mac_addr_add = bnxt_mac_addr_add_op,
4021 	.mac_addr_remove = bnxt_mac_addr_remove_op,
4022 	.flow_ctrl_get = bnxt_flow_ctrl_get_op,
4023 	.flow_ctrl_set = bnxt_flow_ctrl_set_op,
4024 	.udp_tunnel_port_add  = bnxt_udp_tunnel_port_add_op,
4025 	.udp_tunnel_port_del  = bnxt_udp_tunnel_port_del_op,
4026 	.vlan_filter_set = bnxt_vlan_filter_set_op,
4027 	.vlan_offload_set = bnxt_vlan_offload_set_op,
4028 	.vlan_tpid_set = bnxt_vlan_tpid_set_op,
4029 	.vlan_pvid_set = bnxt_vlan_pvid_set_op,
4030 	.mtu_set = bnxt_mtu_set_op,
4031 	.mac_addr_set = bnxt_set_default_mac_addr_op,
4032 	.xstats_get = bnxt_dev_xstats_get_op,
4033 	.xstats_get_names = bnxt_dev_xstats_get_names_op,
4034 	.xstats_reset = bnxt_dev_xstats_reset_op,
4035 	.fw_version_get = bnxt_fw_version_get,
4036 	.set_mc_addr_list = bnxt_dev_set_mc_addr_list_op,
4037 	.rxq_info_get = bnxt_rxq_info_get_op,
4038 	.txq_info_get = bnxt_txq_info_get_op,
4039 	.dev_led_on = bnxt_dev_led_on_op,
4040 	.dev_led_off = bnxt_dev_led_off_op,
4041 	.xstats_get_by_id = bnxt_dev_xstats_get_by_id_op,
4042 	.xstats_get_names_by_id = bnxt_dev_xstats_get_names_by_id_op,
4043 	.rx_queue_count = bnxt_rx_queue_count_op,
4044 	.rx_descriptor_status = bnxt_rx_descriptor_status_op,
4045 	.tx_descriptor_status = bnxt_tx_descriptor_status_op,
4046 	.rx_queue_start = bnxt_rx_queue_start,
4047 	.rx_queue_stop = bnxt_rx_queue_stop,
4048 	.tx_queue_start = bnxt_tx_queue_start,
4049 	.tx_queue_stop = bnxt_tx_queue_stop,
4050 	.filter_ctrl = bnxt_filter_ctrl_op,
4051 	.dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op,
4052 	.get_eeprom_length    = bnxt_get_eeprom_length_op,
4053 	.get_eeprom           = bnxt_get_eeprom_op,
4054 	.set_eeprom           = bnxt_set_eeprom_op,
4055 	.timesync_enable      = bnxt_timesync_enable,
4056 	.timesync_disable     = bnxt_timesync_disable,
4057 	.timesync_read_time   = bnxt_timesync_read_time,
4058 	.timesync_write_time   = bnxt_timesync_write_time,
4059 	.timesync_adjust_time = bnxt_timesync_adjust_time,
4060 	.timesync_read_rx_timestamp = bnxt_timesync_read_rx_timestamp,
4061 	.timesync_read_tx_timestamp = bnxt_timesync_read_tx_timestamp,
4062 };
4063 
4064 static uint32_t bnxt_map_reset_regs(struct bnxt *bp, uint32_t reg)
4065 {
4066 	uint32_t offset;
4067 
4068 	/* Only pre-map the reset GRC registers using window 3 */
4069 	rte_write32(reg & 0xfffff000, (uint8_t *)bp->bar0 +
4070 		    BNXT_GRCPF_REG_WINDOW_BASE_OUT + 8);
4071 
4072 	offset = BNXT_GRCP_WINDOW_3_BASE + (reg & 0xffc);
4073 
4074 	return offset;
4075 }
4076 
4077 int bnxt_map_fw_health_status_regs(struct bnxt *bp)
4078 {
4079 	struct bnxt_error_recovery_info *info = bp->recovery_info;
4080 	uint32_t reg_base = 0xffffffff;
4081 	int i;
4082 
4083 	/* Only pre-map the monitoring GRC registers using window 2 */
4084 	for (i = 0; i < BNXT_FW_STATUS_REG_CNT; i++) {
4085 		uint32_t reg = info->status_regs[i];
4086 
4087 		if (BNXT_FW_STATUS_REG_TYPE(reg) != BNXT_FW_STATUS_REG_TYPE_GRC)
4088 			continue;
4089 
4090 		if (reg_base == 0xffffffff)
4091 			reg_base = reg & 0xfffff000;
4092 		if ((reg & 0xfffff000) != reg_base)
4093 			return -ERANGE;
4094 
4095 		/* Use mask 0xffc as the Lower 2 bits indicates
4096 		 * address space location
4097 		 */
4098 		info->mapped_status_regs[i] = BNXT_GRCP_WINDOW_2_BASE +
4099 						(reg & 0xffc);
4100 	}
4101 
4102 	if (reg_base == 0xffffffff)
4103 		return 0;
4104 
4105 	rte_write32(reg_base, (uint8_t *)bp->bar0 +
4106 		    BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
4107 
4108 	return 0;
4109 }
4110 
4111 static void bnxt_write_fw_reset_reg(struct bnxt *bp, uint32_t index)
4112 {
4113 	struct bnxt_error_recovery_info *info = bp->recovery_info;
4114 	uint32_t delay = info->delay_after_reset[index];
4115 	uint32_t val = info->reset_reg_val[index];
4116 	uint32_t reg = info->reset_reg[index];
4117 	uint32_t type, offset;
4118 
4119 	type = BNXT_FW_STATUS_REG_TYPE(reg);
4120 	offset = BNXT_FW_STATUS_REG_OFF(reg);
4121 
4122 	switch (type) {
4123 	case BNXT_FW_STATUS_REG_TYPE_CFG:
4124 		rte_pci_write_config(bp->pdev, &val, sizeof(val), offset);
4125 		break;
4126 	case BNXT_FW_STATUS_REG_TYPE_GRC:
4127 		offset = bnxt_map_reset_regs(bp, offset);
4128 		rte_write32(val, (uint8_t *)bp->bar0 + offset);
4129 		break;
4130 	case BNXT_FW_STATUS_REG_TYPE_BAR0:
4131 		rte_write32(val, (uint8_t *)bp->bar0 + offset);
4132 		break;
4133 	}
4134 	/* wait on a specific interval of time until core reset is complete */
4135 	if (delay)
4136 		rte_delay_ms(delay);
4137 }
4138 
4139 static void bnxt_dev_cleanup(struct bnxt *bp)
4140 {
4141 	bnxt_set_hwrm_link_config(bp, false);
4142 	bp->link_info.link_up = 0;
4143 	if (bp->eth_dev->data->dev_started)
4144 		bnxt_dev_stop_op(bp->eth_dev);
4145 
4146 	bnxt_uninit_resources(bp, true);
4147 }
4148 
4149 static int bnxt_restore_vlan_filters(struct bnxt *bp)
4150 {
4151 	struct rte_eth_dev *dev = bp->eth_dev;
4152 	struct rte_vlan_filter_conf *vfc;
4153 	int vidx, vbit, rc;
4154 	uint16_t vlan_id;
4155 
4156 	for (vlan_id = 1; vlan_id <= RTE_ETHER_MAX_VLAN_ID; vlan_id++) {
4157 		vfc = &dev->data->vlan_filter_conf;
4158 		vidx = vlan_id / 64;
4159 		vbit = vlan_id % 64;
4160 
4161 		/* Each bit corresponds to a VLAN id */
4162 		if (vfc->ids[vidx] & (UINT64_C(1) << vbit)) {
4163 			rc = bnxt_add_vlan_filter(bp, vlan_id);
4164 			if (rc)
4165 				return rc;
4166 		}
4167 	}
4168 
4169 	return 0;
4170 }
4171 
4172 static int bnxt_restore_mac_filters(struct bnxt *bp)
4173 {
4174 	struct rte_eth_dev *dev = bp->eth_dev;
4175 	struct rte_eth_dev_info dev_info;
4176 	struct rte_ether_addr *addr;
4177 	uint64_t pool_mask;
4178 	uint32_t pool = 0;
4179 	uint16_t i;
4180 	int rc;
4181 
4182 	if (BNXT_VF(bp) & !BNXT_VF_IS_TRUSTED(bp))
4183 		return 0;
4184 
4185 	rc = bnxt_dev_info_get_op(dev, &dev_info);
4186 	if (rc)
4187 		return rc;
4188 
4189 	/* replay MAC address configuration */
4190 	for (i = 1; i < dev_info.max_mac_addrs; i++) {
4191 		addr = &dev->data->mac_addrs[i];
4192 
4193 		/* skip zero address */
4194 		if (rte_is_zero_ether_addr(addr))
4195 			continue;
4196 
4197 		pool = 0;
4198 		pool_mask = dev->data->mac_pool_sel[i];
4199 
4200 		do {
4201 			if (pool_mask & 1ULL) {
4202 				rc = bnxt_mac_addr_add_op(dev, addr, i, pool);
4203 				if (rc)
4204 					return rc;
4205 			}
4206 			pool_mask >>= 1;
4207 			pool++;
4208 		} while (pool_mask);
4209 	}
4210 
4211 	return 0;
4212 }
4213 
4214 static int bnxt_restore_filters(struct bnxt *bp)
4215 {
4216 	struct rte_eth_dev *dev = bp->eth_dev;
4217 	int ret = 0;
4218 
4219 	if (dev->data->all_multicast) {
4220 		ret = bnxt_allmulticast_enable_op(dev);
4221 		if (ret)
4222 			return ret;
4223 	}
4224 	if (dev->data->promiscuous) {
4225 		ret = bnxt_promiscuous_enable_op(dev);
4226 		if (ret)
4227 			return ret;
4228 	}
4229 
4230 	ret = bnxt_restore_mac_filters(bp);
4231 	if (ret)
4232 		return ret;
4233 
4234 	ret = bnxt_restore_vlan_filters(bp);
4235 	/* TODO restore other filters as well */
4236 	return ret;
4237 }
4238 
4239 static void bnxt_dev_recover(void *arg)
4240 {
4241 	struct bnxt *bp = arg;
4242 	int timeout = bp->fw_reset_max_msecs;
4243 	int rc = 0;
4244 
4245 	/* Clear Error flag so that device re-init should happen */
4246 	bp->flags &= ~BNXT_FLAG_FATAL_ERROR;
4247 
4248 	do {
4249 		rc = bnxt_hwrm_ver_get(bp, SHORT_HWRM_CMD_TIMEOUT);
4250 		if (rc == 0)
4251 			break;
4252 		rte_delay_ms(BNXT_FW_READY_WAIT_INTERVAL);
4253 		timeout -= BNXT_FW_READY_WAIT_INTERVAL;
4254 	} while (rc && timeout);
4255 
4256 	if (rc) {
4257 		PMD_DRV_LOG(ERR, "FW is not Ready after reset\n");
4258 		goto err;
4259 	}
4260 
4261 	rc = bnxt_init_resources(bp, true);
4262 	if (rc) {
4263 		PMD_DRV_LOG(ERR,
4264 			    "Failed to initialize resources after reset\n");
4265 		goto err;
4266 	}
4267 	/* clear reset flag as the device is initialized now */
4268 	bp->flags &= ~BNXT_FLAG_FW_RESET;
4269 
4270 	rc = bnxt_dev_start_op(bp->eth_dev);
4271 	if (rc) {
4272 		PMD_DRV_LOG(ERR, "Failed to start port after reset\n");
4273 		goto err_start;
4274 	}
4275 
4276 	rc = bnxt_restore_filters(bp);
4277 	if (rc)
4278 		goto err_start;
4279 
4280 	PMD_DRV_LOG(INFO, "Recovered from FW reset\n");
4281 	return;
4282 err_start:
4283 	bnxt_dev_stop_op(bp->eth_dev);
4284 err:
4285 	bp->flags |= BNXT_FLAG_FATAL_ERROR;
4286 	bnxt_uninit_resources(bp, false);
4287 	PMD_DRV_LOG(ERR, "Failed to recover from FW reset\n");
4288 }
4289 
4290 void bnxt_dev_reset_and_resume(void *arg)
4291 {
4292 	struct bnxt *bp = arg;
4293 	int rc;
4294 
4295 	bnxt_dev_cleanup(bp);
4296 
4297 	bnxt_wait_for_device_shutdown(bp);
4298 
4299 	rc = rte_eal_alarm_set(US_PER_MS * bp->fw_reset_min_msecs,
4300 			       bnxt_dev_recover, (void *)bp);
4301 	if (rc)
4302 		PMD_DRV_LOG(ERR, "Error setting recovery alarm");
4303 }
4304 
4305 uint32_t bnxt_read_fw_status_reg(struct bnxt *bp, uint32_t index)
4306 {
4307 	struct bnxt_error_recovery_info *info = bp->recovery_info;
4308 	uint32_t reg = info->status_regs[index];
4309 	uint32_t type, offset, val = 0;
4310 
4311 	type = BNXT_FW_STATUS_REG_TYPE(reg);
4312 	offset = BNXT_FW_STATUS_REG_OFF(reg);
4313 
4314 	switch (type) {
4315 	case BNXT_FW_STATUS_REG_TYPE_CFG:
4316 		rte_pci_read_config(bp->pdev, &val, sizeof(val), offset);
4317 		break;
4318 	case BNXT_FW_STATUS_REG_TYPE_GRC:
4319 		offset = info->mapped_status_regs[index];
4320 		/* FALLTHROUGH */
4321 	case BNXT_FW_STATUS_REG_TYPE_BAR0:
4322 		val = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
4323 				       offset));
4324 		break;
4325 	}
4326 
4327 	return val;
4328 }
4329 
4330 static int bnxt_fw_reset_all(struct bnxt *bp)
4331 {
4332 	struct bnxt_error_recovery_info *info = bp->recovery_info;
4333 	uint32_t i;
4334 	int rc = 0;
4335 
4336 	if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) {
4337 		/* Reset through master function driver */
4338 		for (i = 0; i < info->reg_array_cnt; i++)
4339 			bnxt_write_fw_reset_reg(bp, i);
4340 		/* Wait for time specified by FW after triggering reset */
4341 		rte_delay_ms(info->master_func_wait_period_after_reset);
4342 	} else if (info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) {
4343 		/* Reset with the help of Kong processor */
4344 		rc = bnxt_hwrm_fw_reset(bp);
4345 		if (rc)
4346 			PMD_DRV_LOG(ERR, "Failed to reset FW\n");
4347 	}
4348 
4349 	return rc;
4350 }
4351 
4352 static void bnxt_fw_reset_cb(void *arg)
4353 {
4354 	struct bnxt *bp = arg;
4355 	struct bnxt_error_recovery_info *info = bp->recovery_info;
4356 	int rc = 0;
4357 
4358 	/* Only Master function can do FW reset */
4359 	if (bnxt_is_master_func(bp) &&
4360 	    bnxt_is_recovery_enabled(bp)) {
4361 		rc = bnxt_fw_reset_all(bp);
4362 		if (rc) {
4363 			PMD_DRV_LOG(ERR, "Adapter recovery failed\n");
4364 			return;
4365 		}
4366 	}
4367 
4368 	/* if recovery method is ERROR_RECOVERY_CO_CPU, KONG will send
4369 	 * EXCEPTION_FATAL_ASYNC event to all the functions
4370 	 * (including MASTER FUNC). After receiving this Async, all the active
4371 	 * drivers should treat this case as FW initiated recovery
4372 	 */
4373 	if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) {
4374 		bp->fw_reset_min_msecs = BNXT_MIN_FW_READY_TIMEOUT;
4375 		bp->fw_reset_max_msecs = BNXT_MAX_FW_RESET_TIMEOUT;
4376 
4377 		/* To recover from error */
4378 		rte_eal_alarm_set(US_PER_MS, bnxt_dev_reset_and_resume,
4379 				  (void *)bp);
4380 	}
4381 }
4382 
4383 /* Driver should poll FW heartbeat, reset_counter with the frequency
4384  * advertised by FW in HWRM_ERROR_RECOVERY_QCFG.
4385  * When the driver detects heartbeat stop or change in reset_counter,
4386  * it has to trigger a reset to recover from the error condition.
4387  * A “master PF” is the function who will have the privilege to
4388  * initiate the chimp reset. The master PF will be elected by the
4389  * firmware and will be notified through async message.
4390  */
4391 static void bnxt_check_fw_health(void *arg)
4392 {
4393 	struct bnxt *bp = arg;
4394 	struct bnxt_error_recovery_info *info = bp->recovery_info;
4395 	uint32_t val = 0, wait_msec;
4396 
4397 	if (!info || !bnxt_is_recovery_enabled(bp) ||
4398 	    is_bnxt_in_error(bp))
4399 		return;
4400 
4401 	val = bnxt_read_fw_status_reg(bp, BNXT_FW_HEARTBEAT_CNT_REG);
4402 	if (val == info->last_heart_beat)
4403 		goto reset;
4404 
4405 	info->last_heart_beat = val;
4406 
4407 	val = bnxt_read_fw_status_reg(bp, BNXT_FW_RECOVERY_CNT_REG);
4408 	if (val != info->last_reset_counter)
4409 		goto reset;
4410 
4411 	info->last_reset_counter = val;
4412 
4413 	rte_eal_alarm_set(US_PER_MS * info->driver_polling_freq,
4414 			  bnxt_check_fw_health, (void *)bp);
4415 
4416 	return;
4417 reset:
4418 	/* Stop DMA to/from device */
4419 	bp->flags |= BNXT_FLAG_FATAL_ERROR;
4420 	bp->flags |= BNXT_FLAG_FW_RESET;
4421 
4422 	PMD_DRV_LOG(ERR, "Detected FW dead condition\n");
4423 
4424 	if (bnxt_is_master_func(bp))
4425 		wait_msec = info->master_func_wait_period;
4426 	else
4427 		wait_msec = info->normal_func_wait_period;
4428 
4429 	rte_eal_alarm_set(US_PER_MS * wait_msec,
4430 			  bnxt_fw_reset_cb, (void *)bp);
4431 }
4432 
4433 void bnxt_schedule_fw_health_check(struct bnxt *bp)
4434 {
4435 	uint32_t polling_freq;
4436 
4437 	if (!bnxt_is_recovery_enabled(bp))
4438 		return;
4439 
4440 	if (bp->flags & BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED)
4441 		return;
4442 
4443 	polling_freq = bp->recovery_info->driver_polling_freq;
4444 
4445 	rte_eal_alarm_set(US_PER_MS * polling_freq,
4446 			  bnxt_check_fw_health, (void *)bp);
4447 	bp->flags |= BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED;
4448 }
4449 
4450 static void bnxt_cancel_fw_health_check(struct bnxt *bp)
4451 {
4452 	if (!bnxt_is_recovery_enabled(bp))
4453 		return;
4454 
4455 	rte_eal_alarm_cancel(bnxt_check_fw_health, (void *)bp);
4456 	bp->flags &= ~BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED;
4457 }
4458 
4459 static bool bnxt_vf_pciid(uint16_t device_id)
4460 {
4461 	switch (device_id) {
4462 	case BROADCOM_DEV_ID_57304_VF:
4463 	case BROADCOM_DEV_ID_57406_VF:
4464 	case BROADCOM_DEV_ID_5731X_VF:
4465 	case BROADCOM_DEV_ID_5741X_VF:
4466 	case BROADCOM_DEV_ID_57414_VF:
4467 	case BROADCOM_DEV_ID_STRATUS_NIC_VF1:
4468 	case BROADCOM_DEV_ID_STRATUS_NIC_VF2:
4469 	case BROADCOM_DEV_ID_58802_VF:
4470 	case BROADCOM_DEV_ID_57500_VF1:
4471 	case BROADCOM_DEV_ID_57500_VF2:
4472 		/* FALLTHROUGH */
4473 		return true;
4474 	default:
4475 		return false;
4476 	}
4477 }
4478 
4479 static bool bnxt_thor_device(uint16_t device_id)
4480 {
4481 	switch (device_id) {
4482 	case BROADCOM_DEV_ID_57508:
4483 	case BROADCOM_DEV_ID_57504:
4484 	case BROADCOM_DEV_ID_57502:
4485 	case BROADCOM_DEV_ID_57508_MF1:
4486 	case BROADCOM_DEV_ID_57504_MF1:
4487 	case BROADCOM_DEV_ID_57502_MF1:
4488 	case BROADCOM_DEV_ID_57508_MF2:
4489 	case BROADCOM_DEV_ID_57504_MF2:
4490 	case BROADCOM_DEV_ID_57502_MF2:
4491 	case BROADCOM_DEV_ID_57500_VF1:
4492 	case BROADCOM_DEV_ID_57500_VF2:
4493 		/* FALLTHROUGH */
4494 		return true;
4495 	default:
4496 		return false;
4497 	}
4498 }
4499 
4500 bool bnxt_stratus_device(struct bnxt *bp)
4501 {
4502 	uint16_t device_id = bp->pdev->id.device_id;
4503 
4504 	switch (device_id) {
4505 	case BROADCOM_DEV_ID_STRATUS_NIC:
4506 	case BROADCOM_DEV_ID_STRATUS_NIC_VF1:
4507 	case BROADCOM_DEV_ID_STRATUS_NIC_VF2:
4508 		/* FALLTHROUGH */
4509 		return true;
4510 	default:
4511 		return false;
4512 	}
4513 }
4514 
4515 static int bnxt_init_board(struct rte_eth_dev *eth_dev)
4516 {
4517 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
4518 	struct bnxt *bp = eth_dev->data->dev_private;
4519 
4520 	/* enable device (incl. PCI PM wakeup), and bus-mastering */
4521 	bp->bar0 = (void *)pci_dev->mem_resource[0].addr;
4522 	bp->doorbell_base = (void *)pci_dev->mem_resource[2].addr;
4523 	if (!bp->bar0 || !bp->doorbell_base) {
4524 		PMD_DRV_LOG(ERR, "Unable to access Hardware\n");
4525 		return -ENODEV;
4526 	}
4527 
4528 	bp->eth_dev = eth_dev;
4529 	bp->pdev = pci_dev;
4530 
4531 	return 0;
4532 }
4533 
4534 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
4535 				  struct bnxt_ctx_pg_info *ctx_pg,
4536 				  uint32_t mem_size,
4537 				  const char *suffix,
4538 				  uint16_t idx)
4539 {
4540 	struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
4541 	const struct rte_memzone *mz = NULL;
4542 	char mz_name[RTE_MEMZONE_NAMESIZE];
4543 	rte_iova_t mz_phys_addr;
4544 	uint64_t valid_bits = 0;
4545 	uint32_t sz;
4546 	int i;
4547 
4548 	if (!mem_size)
4549 		return 0;
4550 
4551 	rmem->nr_pages = RTE_ALIGN_MUL_CEIL(mem_size, BNXT_PAGE_SIZE) /
4552 			 BNXT_PAGE_SIZE;
4553 	rmem->page_size = BNXT_PAGE_SIZE;
4554 	rmem->pg_arr = ctx_pg->ctx_pg_arr;
4555 	rmem->dma_arr = ctx_pg->ctx_dma_arr;
4556 	rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
4557 
4558 	valid_bits = PTU_PTE_VALID;
4559 
4560 	if (rmem->nr_pages > 1) {
4561 		snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
4562 			 "bnxt_ctx_pg_tbl%s_%x_%d",
4563 			 suffix, idx, bp->eth_dev->data->port_id);
4564 		mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
4565 		mz = rte_memzone_lookup(mz_name);
4566 		if (!mz) {
4567 			mz = rte_memzone_reserve_aligned(mz_name,
4568 						rmem->nr_pages * 8,
4569 						SOCKET_ID_ANY,
4570 						RTE_MEMZONE_2MB |
4571 						RTE_MEMZONE_SIZE_HINT_ONLY |
4572 						RTE_MEMZONE_IOVA_CONTIG,
4573 						BNXT_PAGE_SIZE);
4574 			if (mz == NULL)
4575 				return -ENOMEM;
4576 		}
4577 
4578 		memset(mz->addr, 0, mz->len);
4579 		mz_phys_addr = mz->iova;
4580 
4581 		rmem->pg_tbl = mz->addr;
4582 		rmem->pg_tbl_map = mz_phys_addr;
4583 		rmem->pg_tbl_mz = mz;
4584 	}
4585 
4586 	snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_%s_%x_%d",
4587 		 suffix, idx, bp->eth_dev->data->port_id);
4588 	mz = rte_memzone_lookup(mz_name);
4589 	if (!mz) {
4590 		mz = rte_memzone_reserve_aligned(mz_name,
4591 						 mem_size,
4592 						 SOCKET_ID_ANY,
4593 						 RTE_MEMZONE_1GB |
4594 						 RTE_MEMZONE_SIZE_HINT_ONLY |
4595 						 RTE_MEMZONE_IOVA_CONTIG,
4596 						 BNXT_PAGE_SIZE);
4597 		if (mz == NULL)
4598 			return -ENOMEM;
4599 	}
4600 
4601 	memset(mz->addr, 0, mz->len);
4602 	mz_phys_addr = mz->iova;
4603 
4604 	for (sz = 0, i = 0; sz < mem_size; sz += BNXT_PAGE_SIZE, i++) {
4605 		rmem->pg_arr[i] = ((char *)mz->addr) + sz;
4606 		rmem->dma_arr[i] = mz_phys_addr + sz;
4607 
4608 		if (rmem->nr_pages > 1) {
4609 			if (i == rmem->nr_pages - 2 &&
4610 			    (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
4611 				valid_bits |= PTU_PTE_NEXT_TO_LAST;
4612 			else if (i == rmem->nr_pages - 1 &&
4613 				 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
4614 				valid_bits |= PTU_PTE_LAST;
4615 
4616 			rmem->pg_tbl[i] = rte_cpu_to_le_64(rmem->dma_arr[i] |
4617 							   valid_bits);
4618 		}
4619 	}
4620 
4621 	rmem->mz = mz;
4622 	if (rmem->vmem_size)
4623 		rmem->vmem = (void **)mz->addr;
4624 	rmem->dma_arr[0] = mz_phys_addr;
4625 	return 0;
4626 }
4627 
4628 static void bnxt_free_ctx_mem(struct bnxt *bp)
4629 {
4630 	int i;
4631 
4632 	if (!bp->ctx || !(bp->ctx->flags & BNXT_CTX_FLAG_INITED))
4633 		return;
4634 
4635 	bp->ctx->flags &= ~BNXT_CTX_FLAG_INITED;
4636 	rte_memzone_free(bp->ctx->qp_mem.ring_mem.mz);
4637 	rte_memzone_free(bp->ctx->srq_mem.ring_mem.mz);
4638 	rte_memzone_free(bp->ctx->cq_mem.ring_mem.mz);
4639 	rte_memzone_free(bp->ctx->vnic_mem.ring_mem.mz);
4640 	rte_memzone_free(bp->ctx->stat_mem.ring_mem.mz);
4641 	rte_memzone_free(bp->ctx->qp_mem.ring_mem.pg_tbl_mz);
4642 	rte_memzone_free(bp->ctx->srq_mem.ring_mem.pg_tbl_mz);
4643 	rte_memzone_free(bp->ctx->cq_mem.ring_mem.pg_tbl_mz);
4644 	rte_memzone_free(bp->ctx->vnic_mem.ring_mem.pg_tbl_mz);
4645 	rte_memzone_free(bp->ctx->stat_mem.ring_mem.pg_tbl_mz);
4646 
4647 	for (i = 0; i < bp->ctx->tqm_fp_rings_count + 1; i++) {
4648 		if (bp->ctx->tqm_mem[i])
4649 			rte_memzone_free(bp->ctx->tqm_mem[i]->ring_mem.mz);
4650 	}
4651 
4652 	rte_free(bp->ctx);
4653 	bp->ctx = NULL;
4654 }
4655 
4656 #define bnxt_roundup(x, y)   ((((x) + ((y) - 1)) / (y)) * (y))
4657 
4658 #define min_t(type, x, y) ({                    \
4659 	type __min1 = (x);                      \
4660 	type __min2 = (y);                      \
4661 	__min1 < __min2 ? __min1 : __min2; })
4662 
4663 #define max_t(type, x, y) ({                    \
4664 	type __max1 = (x);                      \
4665 	type __max2 = (y);                      \
4666 	__max1 > __max2 ? __max1 : __max2; })
4667 
4668 #define clamp_t(type, _x, min, max)     min_t(type, max_t(type, _x, min), max)
4669 
4670 int bnxt_alloc_ctx_mem(struct bnxt *bp)
4671 {
4672 	struct bnxt_ctx_pg_info *ctx_pg;
4673 	struct bnxt_ctx_mem_info *ctx;
4674 	uint32_t mem_size, ena, entries;
4675 	uint32_t entries_sp, min;
4676 	int i, rc;
4677 
4678 	rc = bnxt_hwrm_func_backing_store_qcaps(bp);
4679 	if (rc) {
4680 		PMD_DRV_LOG(ERR, "Query context mem capability failed\n");
4681 		return rc;
4682 	}
4683 	ctx = bp->ctx;
4684 	if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
4685 		return 0;
4686 
4687 	ctx_pg = &ctx->qp_mem;
4688 	ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries;
4689 	mem_size = ctx->qp_entry_size * ctx_pg->entries;
4690 	rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "qp_mem", 0);
4691 	if (rc)
4692 		return rc;
4693 
4694 	ctx_pg = &ctx->srq_mem;
4695 	ctx_pg->entries = ctx->srq_max_l2_entries;
4696 	mem_size = ctx->srq_entry_size * ctx_pg->entries;
4697 	rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "srq_mem", 0);
4698 	if (rc)
4699 		return rc;
4700 
4701 	ctx_pg = &ctx->cq_mem;
4702 	ctx_pg->entries = ctx->cq_max_l2_entries;
4703 	mem_size = ctx->cq_entry_size * ctx_pg->entries;
4704 	rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "cq_mem", 0);
4705 	if (rc)
4706 		return rc;
4707 
4708 	ctx_pg = &ctx->vnic_mem;
4709 	ctx_pg->entries = ctx->vnic_max_vnic_entries +
4710 		ctx->vnic_max_ring_table_entries;
4711 	mem_size = ctx->vnic_entry_size * ctx_pg->entries;
4712 	rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "vnic_mem", 0);
4713 	if (rc)
4714 		return rc;
4715 
4716 	ctx_pg = &ctx->stat_mem;
4717 	ctx_pg->entries = ctx->stat_max_entries;
4718 	mem_size = ctx->stat_entry_size * ctx_pg->entries;
4719 	rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "stat_mem", 0);
4720 	if (rc)
4721 		return rc;
4722 
4723 	min = ctx->tqm_min_entries_per_ring;
4724 
4725 	entries_sp = ctx->qp_max_l2_entries +
4726 		     ctx->vnic_max_vnic_entries +
4727 		     2 * ctx->qp_min_qp1_entries + min;
4728 	entries_sp = bnxt_roundup(entries_sp, ctx->tqm_entries_multiple);
4729 
4730 	entries = ctx->qp_max_l2_entries + ctx->qp_min_qp1_entries;
4731 	entries = bnxt_roundup(entries, ctx->tqm_entries_multiple);
4732 	entries = clamp_t(uint32_t, entries, min,
4733 			  ctx->tqm_max_entries_per_ring);
4734 	for (i = 0, ena = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
4735 		ctx_pg = ctx->tqm_mem[i];
4736 		ctx_pg->entries = i ? entries : entries_sp;
4737 		mem_size = ctx->tqm_entry_size * ctx_pg->entries;
4738 		rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "tqm_mem", i);
4739 		if (rc)
4740 			return rc;
4741 		ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP << i;
4742 	}
4743 
4744 	ena |= FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES;
4745 	rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
4746 	if (rc)
4747 		PMD_DRV_LOG(ERR,
4748 			    "Failed to configure context mem: rc = %d\n", rc);
4749 	else
4750 		ctx->flags |= BNXT_CTX_FLAG_INITED;
4751 
4752 	return rc;
4753 }
4754 
4755 static int bnxt_alloc_stats_mem(struct bnxt *bp)
4756 {
4757 	struct rte_pci_device *pci_dev = bp->pdev;
4758 	char mz_name[RTE_MEMZONE_NAMESIZE];
4759 	const struct rte_memzone *mz = NULL;
4760 	uint32_t total_alloc_len;
4761 	rte_iova_t mz_phys_addr;
4762 
4763 	if (pci_dev->id.device_id == BROADCOM_DEV_ID_NS2)
4764 		return 0;
4765 
4766 	snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
4767 		 "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain,
4768 		 pci_dev->addr.bus, pci_dev->addr.devid,
4769 		 pci_dev->addr.function, "rx_port_stats");
4770 	mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
4771 	mz = rte_memzone_lookup(mz_name);
4772 	total_alloc_len =
4773 		RTE_CACHE_LINE_ROUNDUP(sizeof(struct rx_port_stats) +
4774 				       sizeof(struct rx_port_stats_ext) + 512);
4775 	if (!mz) {
4776 		mz = rte_memzone_reserve(mz_name, total_alloc_len,
4777 					 SOCKET_ID_ANY,
4778 					 RTE_MEMZONE_2MB |
4779 					 RTE_MEMZONE_SIZE_HINT_ONLY |
4780 					 RTE_MEMZONE_IOVA_CONTIG);
4781 		if (mz == NULL)
4782 			return -ENOMEM;
4783 	}
4784 	memset(mz->addr, 0, mz->len);
4785 	mz_phys_addr = mz->iova;
4786 
4787 	bp->rx_mem_zone = (const void *)mz;
4788 	bp->hw_rx_port_stats = mz->addr;
4789 	bp->hw_rx_port_stats_map = mz_phys_addr;
4790 
4791 	snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
4792 		 "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain,
4793 		 pci_dev->addr.bus, pci_dev->addr.devid,
4794 		 pci_dev->addr.function, "tx_port_stats");
4795 	mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
4796 	mz = rte_memzone_lookup(mz_name);
4797 	total_alloc_len =
4798 		RTE_CACHE_LINE_ROUNDUP(sizeof(struct tx_port_stats) +
4799 				       sizeof(struct tx_port_stats_ext) + 512);
4800 	if (!mz) {
4801 		mz = rte_memzone_reserve(mz_name,
4802 					 total_alloc_len,
4803 					 SOCKET_ID_ANY,
4804 					 RTE_MEMZONE_2MB |
4805 					 RTE_MEMZONE_SIZE_HINT_ONLY |
4806 					 RTE_MEMZONE_IOVA_CONTIG);
4807 		if (mz == NULL)
4808 			return -ENOMEM;
4809 	}
4810 	memset(mz->addr, 0, mz->len);
4811 	mz_phys_addr = mz->iova;
4812 
4813 	bp->tx_mem_zone = (const void *)mz;
4814 	bp->hw_tx_port_stats = mz->addr;
4815 	bp->hw_tx_port_stats_map = mz_phys_addr;
4816 	bp->flags |= BNXT_FLAG_PORT_STATS;
4817 
4818 	/* Display extended statistics if FW supports it */
4819 	if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_8_4 ||
4820 	    bp->hwrm_spec_code == HWRM_SPEC_CODE_1_9_0 ||
4821 	    !(bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED))
4822 		return 0;
4823 
4824 	bp->hw_rx_port_stats_ext = (void *)
4825 		((uint8_t *)bp->hw_rx_port_stats +
4826 		 sizeof(struct rx_port_stats));
4827 	bp->hw_rx_port_stats_ext_map = bp->hw_rx_port_stats_map +
4828 		sizeof(struct rx_port_stats);
4829 	bp->flags |= BNXT_FLAG_EXT_RX_PORT_STATS;
4830 
4831 	if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_9_2 ||
4832 	    bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED) {
4833 		bp->hw_tx_port_stats_ext = (void *)
4834 			((uint8_t *)bp->hw_tx_port_stats +
4835 			 sizeof(struct tx_port_stats));
4836 		bp->hw_tx_port_stats_ext_map =
4837 			bp->hw_tx_port_stats_map +
4838 			sizeof(struct tx_port_stats);
4839 		bp->flags |= BNXT_FLAG_EXT_TX_PORT_STATS;
4840 	}
4841 
4842 	return 0;
4843 }
4844 
4845 static int bnxt_setup_mac_addr(struct rte_eth_dev *eth_dev)
4846 {
4847 	struct bnxt *bp = eth_dev->data->dev_private;
4848 	int rc = 0;
4849 
4850 	eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl",
4851 					       RTE_ETHER_ADDR_LEN *
4852 					       bp->max_l2_ctx,
4853 					       0);
4854 	if (eth_dev->data->mac_addrs == NULL) {
4855 		PMD_DRV_LOG(ERR, "Failed to alloc MAC addr tbl\n");
4856 		return -ENOMEM;
4857 	}
4858 
4859 	if (bnxt_check_zero_bytes(bp->dflt_mac_addr, RTE_ETHER_ADDR_LEN)) {
4860 		if (BNXT_PF(bp))
4861 			return -EINVAL;
4862 
4863 		/* Generate a random MAC address, if none was assigned by PF */
4864 		PMD_DRV_LOG(INFO, "VF MAC address not assigned by Host PF\n");
4865 		bnxt_eth_hw_addr_random(bp->mac_addr);
4866 		PMD_DRV_LOG(INFO,
4867 			    "Assign random MAC:%02X:%02X:%02X:%02X:%02X:%02X\n",
4868 			    bp->mac_addr[0], bp->mac_addr[1], bp->mac_addr[2],
4869 			    bp->mac_addr[3], bp->mac_addr[4], bp->mac_addr[5]);
4870 
4871 		rc = bnxt_hwrm_set_mac(bp);
4872 		if (!rc)
4873 			memcpy(&bp->eth_dev->data->mac_addrs[0], bp->mac_addr,
4874 			       RTE_ETHER_ADDR_LEN);
4875 		return rc;
4876 	}
4877 
4878 	/* Copy the permanent MAC from the FUNC_QCAPS response */
4879 	memcpy(bp->mac_addr, bp->dflt_mac_addr, RTE_ETHER_ADDR_LEN);
4880 	memcpy(&eth_dev->data->mac_addrs[0], bp->mac_addr, RTE_ETHER_ADDR_LEN);
4881 
4882 	return rc;
4883 }
4884 
4885 static int bnxt_restore_dflt_mac(struct bnxt *bp)
4886 {
4887 	int rc = 0;
4888 
4889 	/* MAC is already configured in FW */
4890 	if (!bnxt_check_zero_bytes(bp->dflt_mac_addr, RTE_ETHER_ADDR_LEN))
4891 		return 0;
4892 
4893 	/* Restore the old MAC configured */
4894 	rc = bnxt_hwrm_set_mac(bp);
4895 	if (rc)
4896 		PMD_DRV_LOG(ERR, "Failed to restore MAC address\n");
4897 
4898 	return rc;
4899 }
4900 
4901 static void bnxt_config_vf_req_fwd(struct bnxt *bp)
4902 {
4903 	if (!BNXT_PF(bp))
4904 		return;
4905 
4906 #define ALLOW_FUNC(x)	\
4907 	{ \
4908 		uint32_t arg = (x); \
4909 		bp->pf.vf_req_fwd[((arg) >> 5)] &= \
4910 		~rte_cpu_to_le_32(1 << ((arg) & 0x1f)); \
4911 	}
4912 
4913 	/* Forward all requests if firmware is new enough */
4914 	if (((bp->fw_ver >= ((20 << 24) | (6 << 16) | (100 << 8))) &&
4915 	     (bp->fw_ver < ((20 << 24) | (7 << 16)))) ||
4916 	    ((bp->fw_ver >= ((20 << 24) | (8 << 16))))) {
4917 		memset(bp->pf.vf_req_fwd, 0xff, sizeof(bp->pf.vf_req_fwd));
4918 	} else {
4919 		PMD_DRV_LOG(WARNING,
4920 			    "Firmware too old for VF mailbox functionality\n");
4921 		memset(bp->pf.vf_req_fwd, 0, sizeof(bp->pf.vf_req_fwd));
4922 	}
4923 
4924 	/*
4925 	 * The following are used for driver cleanup. If we disallow these,
4926 	 * VF drivers can't clean up cleanly.
4927 	 */
4928 	ALLOW_FUNC(HWRM_FUNC_DRV_UNRGTR);
4929 	ALLOW_FUNC(HWRM_VNIC_FREE);
4930 	ALLOW_FUNC(HWRM_RING_FREE);
4931 	ALLOW_FUNC(HWRM_RING_GRP_FREE);
4932 	ALLOW_FUNC(HWRM_VNIC_RSS_COS_LB_CTX_FREE);
4933 	ALLOW_FUNC(HWRM_CFA_L2_FILTER_FREE);
4934 	ALLOW_FUNC(HWRM_STAT_CTX_FREE);
4935 	ALLOW_FUNC(HWRM_PORT_PHY_QCFG);
4936 	ALLOW_FUNC(HWRM_VNIC_TPA_CFG);
4937 }
4938 
4939 uint16_t
4940 bnxt_get_svif(uint16_t port_id, bool func_svif)
4941 {
4942 	struct rte_eth_dev *eth_dev;
4943 	struct bnxt *bp;
4944 
4945 	eth_dev = &rte_eth_devices[port_id];
4946 	bp = eth_dev->data->dev_private;
4947 
4948 	return func_svif ? bp->func_svif : bp->port_svif;
4949 }
4950 
4951 uint16_t
4952 bnxt_get_vnic_id(uint16_t port)
4953 {
4954 	struct rte_eth_dev *eth_dev;
4955 	struct bnxt_vnic_info *vnic;
4956 	struct bnxt *bp;
4957 
4958 	eth_dev = &rte_eth_devices[port];
4959 	bp = eth_dev->data->dev_private;
4960 
4961 	vnic = BNXT_GET_DEFAULT_VNIC(bp);
4962 
4963 	return vnic->fw_vnic_id;
4964 }
4965 
4966 uint16_t
4967 bnxt_get_fw_func_id(uint16_t port)
4968 {
4969 	struct rte_eth_dev *eth_dev;
4970 	struct bnxt *bp;
4971 
4972 	eth_dev = &rte_eth_devices[port];
4973 	bp = eth_dev->data->dev_private;
4974 
4975 	return bp->fw_fid;
4976 }
4977 
4978 static void bnxt_alloc_error_recovery_info(struct bnxt *bp)
4979 {
4980 	struct bnxt_error_recovery_info *info = bp->recovery_info;
4981 
4982 	if (info) {
4983 		if (!(bp->fw_cap & BNXT_FW_CAP_HCOMM_FW_STATUS))
4984 			memset(info, 0, sizeof(*info));
4985 		return;
4986 	}
4987 
4988 	if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
4989 		return;
4990 
4991 	info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg",
4992 			   sizeof(*info), 0);
4993 	if (!info)
4994 		bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
4995 
4996 	bp->recovery_info = info;
4997 }
4998 
4999 static void bnxt_check_fw_status(struct bnxt *bp)
5000 {
5001 	uint32_t fw_status;
5002 
5003 	if (!(bp->recovery_info &&
5004 	      (bp->fw_cap & BNXT_FW_CAP_HCOMM_FW_STATUS)))
5005 		return;
5006 
5007 	fw_status = bnxt_read_fw_status_reg(bp, BNXT_FW_STATUS_REG);
5008 	if (fw_status != BNXT_FW_STATUS_HEALTHY)
5009 		PMD_DRV_LOG(ERR, "Firmware not responding, status: %#x\n",
5010 			    fw_status);
5011 }
5012 
5013 static int bnxt_map_hcomm_fw_status_reg(struct bnxt *bp)
5014 {
5015 	struct bnxt_error_recovery_info *info = bp->recovery_info;
5016 	uint32_t status_loc;
5017 	uint32_t sig_ver;
5018 
5019 	rte_write32(HCOMM_STATUS_STRUCT_LOC, (uint8_t *)bp->bar0 +
5020 		    BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
5021 	sig_ver = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
5022 				   BNXT_GRCP_WINDOW_2_BASE +
5023 				   offsetof(struct hcomm_status,
5024 					    sig_ver)));
5025 	/* If the signature is absent, then FW does not support this feature */
5026 	if ((sig_ver & HCOMM_STATUS_SIGNATURE_MASK) !=
5027 	    HCOMM_STATUS_SIGNATURE_VAL)
5028 		return 0;
5029 
5030 	if (!info) {
5031 		info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg",
5032 				   sizeof(*info), 0);
5033 		if (!info)
5034 			return -ENOMEM;
5035 		bp->recovery_info = info;
5036 	} else {
5037 		memset(info, 0, sizeof(*info));
5038 	}
5039 
5040 	status_loc = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
5041 				      BNXT_GRCP_WINDOW_2_BASE +
5042 				      offsetof(struct hcomm_status,
5043 					       fw_status_loc)));
5044 
5045 	/* Only pre-map the FW health status GRC register */
5046 	if (BNXT_FW_STATUS_REG_TYPE(status_loc) != BNXT_FW_STATUS_REG_TYPE_GRC)
5047 		return 0;
5048 
5049 	info->status_regs[BNXT_FW_STATUS_REG] = status_loc;
5050 	info->mapped_status_regs[BNXT_FW_STATUS_REG] =
5051 		BNXT_GRCP_WINDOW_2_BASE + (status_loc & BNXT_GRCP_OFFSET_MASK);
5052 
5053 	rte_write32((status_loc & BNXT_GRCP_BASE_MASK), (uint8_t *)bp->bar0 +
5054 		    BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
5055 
5056 	bp->fw_cap |= BNXT_FW_CAP_HCOMM_FW_STATUS;
5057 
5058 	return 0;
5059 }
5060 
5061 static int bnxt_init_fw(struct bnxt *bp)
5062 {
5063 	uint16_t mtu;
5064 	int rc = 0;
5065 
5066 	bp->fw_cap = 0;
5067 
5068 	rc = bnxt_map_hcomm_fw_status_reg(bp);
5069 	if (rc)
5070 		return rc;
5071 
5072 	rc = bnxt_hwrm_ver_get(bp, DFLT_HWRM_CMD_TIMEOUT);
5073 	if (rc) {
5074 		bnxt_check_fw_status(bp);
5075 		return rc;
5076 	}
5077 
5078 	rc = bnxt_hwrm_func_reset(bp);
5079 	if (rc)
5080 		return -EIO;
5081 
5082 	rc = bnxt_hwrm_vnic_qcaps(bp);
5083 	if (rc)
5084 		return rc;
5085 
5086 	rc = bnxt_hwrm_queue_qportcfg(bp);
5087 	if (rc)
5088 		return rc;
5089 
5090 	/* Get the MAX capabilities for this function.
5091 	 * This function also allocates context memory for TQM rings and
5092 	 * informs the firmware about this allocated backing store memory.
5093 	 */
5094 	rc = bnxt_hwrm_func_qcaps(bp);
5095 	if (rc)
5096 		return rc;
5097 
5098 	rc = bnxt_hwrm_func_qcfg(bp, &mtu);
5099 	if (rc)
5100 		return rc;
5101 
5102 	bnxt_hwrm_port_mac_qcfg(bp);
5103 
5104 	rc = bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(bp);
5105 	if (rc)
5106 		return rc;
5107 
5108 	bnxt_alloc_error_recovery_info(bp);
5109 	/* Get the adapter error recovery support info */
5110 	rc = bnxt_hwrm_error_recovery_qcfg(bp);
5111 	if (rc)
5112 		bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
5113 
5114 	bnxt_hwrm_port_led_qcaps(bp);
5115 
5116 	return 0;
5117 }
5118 
5119 static int
5120 bnxt_init_locks(struct bnxt *bp)
5121 {
5122 	int err;
5123 
5124 	err = pthread_mutex_init(&bp->flow_lock, NULL);
5125 	if (err) {
5126 		PMD_DRV_LOG(ERR, "Unable to initialize flow_lock\n");
5127 		return err;
5128 	}
5129 
5130 	err = pthread_mutex_init(&bp->def_cp_lock, NULL);
5131 	if (err)
5132 		PMD_DRV_LOG(ERR, "Unable to initialize def_cp_lock\n");
5133 	return err;
5134 }
5135 
5136 static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev)
5137 {
5138 	int rc;
5139 
5140 	rc = bnxt_init_fw(bp);
5141 	if (rc)
5142 		return rc;
5143 
5144 	if (!reconfig_dev) {
5145 		rc = bnxt_setup_mac_addr(bp->eth_dev);
5146 		if (rc)
5147 			return rc;
5148 	} else {
5149 		rc = bnxt_restore_dflt_mac(bp);
5150 		if (rc)
5151 			return rc;
5152 	}
5153 
5154 	bnxt_config_vf_req_fwd(bp);
5155 
5156 	rc = bnxt_hwrm_func_driver_register(bp);
5157 	if (rc) {
5158 		PMD_DRV_LOG(ERR, "Failed to register driver");
5159 		return -EBUSY;
5160 	}
5161 
5162 	if (BNXT_PF(bp)) {
5163 		if (bp->pdev->max_vfs) {
5164 			rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs);
5165 			if (rc) {
5166 				PMD_DRV_LOG(ERR, "Failed to allocate VFs\n");
5167 				return rc;
5168 			}
5169 		} else {
5170 			rc = bnxt_hwrm_allocate_pf_only(bp);
5171 			if (rc) {
5172 				PMD_DRV_LOG(ERR,
5173 					    "Failed to allocate PF resources");
5174 				return rc;
5175 			}
5176 		}
5177 	}
5178 
5179 	rc = bnxt_alloc_mem(bp, reconfig_dev);
5180 	if (rc)
5181 		return rc;
5182 
5183 	rc = bnxt_setup_int(bp);
5184 	if (rc)
5185 		return rc;
5186 
5187 	rc = bnxt_request_int(bp);
5188 	if (rc)
5189 		return rc;
5190 
5191 	rc = bnxt_init_ctx_mem(bp);
5192 	if (rc) {
5193 		PMD_DRV_LOG(ERR, "Failed to init adv_flow_counters\n");
5194 		return rc;
5195 	}
5196 
5197 	rc = bnxt_init_locks(bp);
5198 	if (rc)
5199 		return rc;
5200 
5201 	return 0;
5202 }
5203 
5204 static int
5205 bnxt_parse_devarg_truflow(__rte_unused const char *key,
5206 			  const char *value, void *opaque_arg)
5207 {
5208 	struct bnxt *bp = opaque_arg;
5209 	unsigned long truflow;
5210 	char *end = NULL;
5211 
5212 	if (!value || !opaque_arg) {
5213 		PMD_DRV_LOG(ERR,
5214 			    "Invalid parameter passed to truflow devargs.\n");
5215 		return -EINVAL;
5216 	}
5217 
5218 	truflow = strtoul(value, &end, 10);
5219 	if (end == NULL || *end != '\0' ||
5220 	    (truflow == ULONG_MAX && errno == ERANGE)) {
5221 		PMD_DRV_LOG(ERR,
5222 			    "Invalid parameter passed to truflow devargs.\n");
5223 		return -EINVAL;
5224 	}
5225 
5226 	if (BNXT_DEVARG_TRUFLOW_INVALID(truflow)) {
5227 		PMD_DRV_LOG(ERR,
5228 			    "Invalid value passed to truflow devargs.\n");
5229 		return -EINVAL;
5230 	}
5231 
5232 	bp->truflow = truflow;
5233 	if (bp->truflow)
5234 		PMD_DRV_LOG(INFO, "Host-based truflow feature enabled.\n");
5235 
5236 	return 0;
5237 }
5238 
5239 static int
5240 bnxt_parse_devarg_flow_xstat(__rte_unused const char *key,
5241 			     const char *value, void *opaque_arg)
5242 {
5243 	struct bnxt *bp = opaque_arg;
5244 	unsigned long flow_xstat;
5245 	char *end = NULL;
5246 
5247 	if (!value || !opaque_arg) {
5248 		PMD_DRV_LOG(ERR,
5249 			    "Invalid parameter passed to flow_xstat devarg.\n");
5250 		return -EINVAL;
5251 	}
5252 
5253 	flow_xstat = strtoul(value, &end, 10);
5254 	if (end == NULL || *end != '\0' ||
5255 	    (flow_xstat == ULONG_MAX && errno == ERANGE)) {
5256 		PMD_DRV_LOG(ERR,
5257 			    "Invalid parameter passed to flow_xstat devarg.\n");
5258 		return -EINVAL;
5259 	}
5260 
5261 	if (BNXT_DEVARG_FLOW_XSTAT_INVALID(flow_xstat)) {
5262 		PMD_DRV_LOG(ERR,
5263 			    "Invalid value passed to flow_xstat devarg.\n");
5264 		return -EINVAL;
5265 	}
5266 
5267 	bp->flow_xstat = flow_xstat;
5268 	if (bp->flow_xstat)
5269 		PMD_DRV_LOG(INFO, "flow_xstat feature enabled.\n");
5270 
5271 	return 0;
5272 }
5273 
5274 static void
5275 bnxt_parse_dev_args(struct bnxt *bp, struct rte_devargs *devargs)
5276 {
5277 	struct rte_kvargs *kvlist;
5278 
5279 	if (devargs == NULL)
5280 		return;
5281 
5282 	kvlist = rte_kvargs_parse(devargs->args, bnxt_dev_args);
5283 	if (kvlist == NULL)
5284 		return;
5285 
5286 	/*
5287 	 * Handler for "truflow" devarg.
5288 	 * Invoked as for ex: "-w 0000:00:0d.0,host-based-truflow=1”
5289 	 */
5290 	rte_kvargs_process(kvlist, BNXT_DEVARG_TRUFLOW,
5291 			   bnxt_parse_devarg_truflow, bp);
5292 
5293 	/*
5294 	 * Handler for "flow_xstat" devarg.
5295 	 * Invoked as for ex: "-w 0000:00:0d.0,flow_xstat=1”
5296 	 */
5297 	rte_kvargs_process(kvlist, BNXT_DEVARG_FLOW_XSTAT,
5298 			   bnxt_parse_devarg_flow_xstat, bp);
5299 
5300 	rte_kvargs_free(kvlist);
5301 }
5302 
5303 static int
5304 bnxt_dev_init(struct rte_eth_dev *eth_dev)
5305 {
5306 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
5307 	static int version_printed;
5308 	struct bnxt *bp;
5309 	int rc;
5310 
5311 	if (version_printed++ == 0)
5312 		PMD_DRV_LOG(INFO, "%s\n", bnxt_version);
5313 
5314 	eth_dev->dev_ops = &bnxt_dev_ops;
5315 	eth_dev->rx_pkt_burst = &bnxt_recv_pkts;
5316 	eth_dev->tx_pkt_burst = &bnxt_xmit_pkts;
5317 
5318 	/*
5319 	 * For secondary processes, we don't initialise any further
5320 	 * as primary has already done this work.
5321 	 */
5322 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
5323 		return 0;
5324 
5325 	rte_eth_copy_pci_info(eth_dev, pci_dev);
5326 
5327 	bp = eth_dev->data->dev_private;
5328 
5329 	/* Parse dev arguments passed on when starting the DPDK application. */
5330 	bnxt_parse_dev_args(bp, pci_dev->device.devargs);
5331 
5332 	bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE;
5333 
5334 	if (bnxt_vf_pciid(pci_dev->id.device_id))
5335 		bp->flags |= BNXT_FLAG_VF;
5336 
5337 	if (bnxt_thor_device(pci_dev->id.device_id))
5338 		bp->flags |= BNXT_FLAG_THOR_CHIP;
5339 
5340 	if (pci_dev->id.device_id == BROADCOM_DEV_ID_58802 ||
5341 	    pci_dev->id.device_id == BROADCOM_DEV_ID_58804 ||
5342 	    pci_dev->id.device_id == BROADCOM_DEV_ID_58808 ||
5343 	    pci_dev->id.device_id == BROADCOM_DEV_ID_58802_VF)
5344 		bp->flags |= BNXT_FLAG_STINGRAY;
5345 
5346 	rc = bnxt_init_board(eth_dev);
5347 	if (rc) {
5348 		PMD_DRV_LOG(ERR,
5349 			    "Failed to initialize board rc: %x\n", rc);
5350 		return rc;
5351 	}
5352 
5353 	rc = bnxt_alloc_hwrm_resources(bp);
5354 	if (rc) {
5355 		PMD_DRV_LOG(ERR,
5356 			    "Failed to allocate hwrm resource rc: %x\n", rc);
5357 		goto error_free;
5358 	}
5359 	rc = bnxt_init_resources(bp, false);
5360 	if (rc)
5361 		goto error_free;
5362 
5363 	rc = bnxt_alloc_stats_mem(bp);
5364 	if (rc)
5365 		goto error_free;
5366 
5367 	/* Pass the information to the rte_eth_dev_close() that it should also
5368 	 * release the private port resources.
5369 	 */
5370 	eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
5371 
5372 	PMD_DRV_LOG(INFO,
5373 		    DRV_MODULE_NAME "found at mem %" PRIX64 ", node addr %pM\n",
5374 		    pci_dev->mem_resource[0].phys_addr,
5375 		    pci_dev->mem_resource[0].addr);
5376 
5377 	return 0;
5378 
5379 error_free:
5380 	bnxt_dev_uninit(eth_dev);
5381 	return rc;
5382 }
5383 
5384 
5385 static void bnxt_free_ctx_mem_buf(struct bnxt_ctx_mem_buf_info *ctx)
5386 {
5387 	if (!ctx)
5388 		return;
5389 
5390 	if (ctx->va)
5391 		rte_free(ctx->va);
5392 
5393 	ctx->va = NULL;
5394 	ctx->dma = RTE_BAD_IOVA;
5395 	ctx->ctx_id = BNXT_CTX_VAL_INVAL;
5396 }
5397 
5398 static void bnxt_unregister_fc_ctx_mem(struct bnxt *bp)
5399 {
5400 	bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_RX,
5401 				  CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC,
5402 				  bp->rx_fc_out_tbl.ctx_id,
5403 				  bp->max_fc,
5404 				  false);
5405 
5406 	bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_TX,
5407 				  CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC,
5408 				  bp->tx_fc_out_tbl.ctx_id,
5409 				  bp->max_fc,
5410 				  false);
5411 
5412 	if (bp->rx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL)
5413 		bnxt_hwrm_ctx_unrgtr(bp, bp->rx_fc_in_tbl.ctx_id);
5414 	bp->rx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL;
5415 
5416 	if (bp->rx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL)
5417 		bnxt_hwrm_ctx_unrgtr(bp, bp->rx_fc_out_tbl.ctx_id);
5418 	bp->rx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL;
5419 
5420 	if (bp->tx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL)
5421 		bnxt_hwrm_ctx_unrgtr(bp, bp->tx_fc_in_tbl.ctx_id);
5422 	bp->tx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL;
5423 
5424 	if (bp->tx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL)
5425 		bnxt_hwrm_ctx_unrgtr(bp, bp->tx_fc_out_tbl.ctx_id);
5426 	bp->tx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL;
5427 }
5428 
5429 static void bnxt_uninit_fc_ctx_mem(struct bnxt *bp)
5430 {
5431 	bnxt_unregister_fc_ctx_mem(bp);
5432 
5433 	bnxt_free_ctx_mem_buf(&bp->rx_fc_in_tbl);
5434 	bnxt_free_ctx_mem_buf(&bp->rx_fc_out_tbl);
5435 	bnxt_free_ctx_mem_buf(&bp->tx_fc_in_tbl);
5436 	bnxt_free_ctx_mem_buf(&bp->tx_fc_out_tbl);
5437 }
5438 
5439 static void bnxt_uninit_ctx_mem(struct bnxt *bp)
5440 {
5441 	bnxt_uninit_fc_ctx_mem(bp);
5442 }
5443 
5444 static void
5445 bnxt_free_error_recovery_info(struct bnxt *bp)
5446 {
5447 	rte_free(bp->recovery_info);
5448 	bp->recovery_info = NULL;
5449 	bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
5450 }
5451 
5452 static void
5453 bnxt_uninit_locks(struct bnxt *bp)
5454 {
5455 	pthread_mutex_destroy(&bp->flow_lock);
5456 	pthread_mutex_destroy(&bp->def_cp_lock);
5457 }
5458 
5459 static int
5460 bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev)
5461 {
5462 	int rc;
5463 
5464 	bnxt_free_int(bp);
5465 	bnxt_free_mem(bp, reconfig_dev);
5466 	bnxt_hwrm_func_buf_unrgtr(bp);
5467 	rc = bnxt_hwrm_func_driver_unregister(bp, 0);
5468 	bp->flags &= ~BNXT_FLAG_REGISTERED;
5469 	bnxt_free_ctx_mem(bp);
5470 	if (!reconfig_dev) {
5471 		bnxt_free_hwrm_resources(bp);
5472 		bnxt_free_error_recovery_info(bp);
5473 	}
5474 
5475 	bnxt_uninit_ctx_mem(bp);
5476 
5477 	bnxt_uninit_locks(bp);
5478 	rte_free(bp->ptp_cfg);
5479 	bp->ptp_cfg = NULL;
5480 	return rc;
5481 }
5482 
5483 static int
5484 bnxt_dev_uninit(struct rte_eth_dev *eth_dev)
5485 {
5486 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
5487 		return -EPERM;
5488 
5489 	PMD_DRV_LOG(DEBUG, "Calling Device uninit\n");
5490 
5491 	if (eth_dev->state != RTE_ETH_DEV_UNUSED)
5492 		bnxt_dev_close_op(eth_dev);
5493 
5494 	return 0;
5495 }
5496 
5497 static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
5498 	struct rte_pci_device *pci_dev)
5499 {
5500 	return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct bnxt),
5501 		bnxt_dev_init);
5502 }
5503 
5504 static int bnxt_pci_remove(struct rte_pci_device *pci_dev)
5505 {
5506 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
5507 		return rte_eth_dev_pci_generic_remove(pci_dev,
5508 				bnxt_dev_uninit);
5509 	else
5510 		return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
5511 }
5512 
5513 static struct rte_pci_driver bnxt_rte_pmd = {
5514 	.id_table = bnxt_pci_id_map,
5515 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
5516 	.probe = bnxt_pci_probe,
5517 	.remove = bnxt_pci_remove,
5518 };
5519 
5520 static bool
5521 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
5522 {
5523 	if (strcmp(dev->device->driver->name, drv->driver.name))
5524 		return false;
5525 
5526 	return true;
5527 }
5528 
5529 bool is_bnxt_supported(struct rte_eth_dev *dev)
5530 {
5531 	return is_device_supported(dev, &bnxt_rte_pmd);
5532 }
5533 
5534 RTE_INIT(bnxt_init_log)
5535 {
5536 	bnxt_logtype_driver = rte_log_register("pmd.net.bnxt.driver");
5537 	if (bnxt_logtype_driver >= 0)
5538 		rte_log_set_level(bnxt_logtype_driver, RTE_LOG_NOTICE);
5539 }
5540 
5541 RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd);
5542 RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map);
5543 RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci");
5544