xref: /dpdk/drivers/net/bnxt/bnxt_ethdev.c (revision c2faa1d1969ec53cfa9aa7f079ea7f1372ffddcd)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5 
6 #include <inttypes.h>
7 #include <stdbool.h>
8 
9 #include <rte_dev.h>
10 #include <rte_ethdev_driver.h>
11 #include <rte_ethdev_pci.h>
12 #include <rte_malloc.h>
13 #include <rte_cycles.h>
14 
15 #include "bnxt.h"
16 #include "bnxt_cpr.h"
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
19 #include "bnxt_irq.h"
20 #include "bnxt_ring.h"
21 #include "bnxt_rxq.h"
22 #include "bnxt_rxr.h"
23 #include "bnxt_stats.h"
24 #include "bnxt_txq.h"
25 #include "bnxt_txr.h"
26 #include "bnxt_vnic.h"
27 #include "hsi_struct_def_dpdk.h"
28 #include "bnxt_nvm_defs.h"
29 
30 #define DRV_MODULE_NAME		"bnxt"
31 static const char bnxt_version[] =
32 	"Broadcom Cumulus driver " DRV_MODULE_NAME "\n";
33 int bnxt_logtype_driver;
34 
35 #define PCI_VENDOR_ID_BROADCOM 0x14E4
36 
37 #define BROADCOM_DEV_ID_STRATUS_NIC_VF1 0x1606
38 #define BROADCOM_DEV_ID_STRATUS_NIC_VF2 0x1609
39 #define BROADCOM_DEV_ID_STRATUS_NIC 0x1614
40 #define BROADCOM_DEV_ID_57414_VF 0x16c1
41 #define BROADCOM_DEV_ID_57301 0x16c8
42 #define BROADCOM_DEV_ID_57302 0x16c9
43 #define BROADCOM_DEV_ID_57304_PF 0x16ca
44 #define BROADCOM_DEV_ID_57304_VF 0x16cb
45 #define BROADCOM_DEV_ID_57417_MF 0x16cc
46 #define BROADCOM_DEV_ID_NS2 0x16cd
47 #define BROADCOM_DEV_ID_57311 0x16ce
48 #define BROADCOM_DEV_ID_57312 0x16cf
49 #define BROADCOM_DEV_ID_57402 0x16d0
50 #define BROADCOM_DEV_ID_57404 0x16d1
51 #define BROADCOM_DEV_ID_57406_PF 0x16d2
52 #define BROADCOM_DEV_ID_57406_VF 0x16d3
53 #define BROADCOM_DEV_ID_57402_MF 0x16d4
54 #define BROADCOM_DEV_ID_57407_RJ45 0x16d5
55 #define BROADCOM_DEV_ID_57412 0x16d6
56 #define BROADCOM_DEV_ID_57414 0x16d7
57 #define BROADCOM_DEV_ID_57416_RJ45 0x16d8
58 #define BROADCOM_DEV_ID_57417_RJ45 0x16d9
59 #define BROADCOM_DEV_ID_5741X_VF 0x16dc
60 #define BROADCOM_DEV_ID_57412_MF 0x16de
61 #define BROADCOM_DEV_ID_57314 0x16df
62 #define BROADCOM_DEV_ID_57317_RJ45 0x16e0
63 #define BROADCOM_DEV_ID_5731X_VF 0x16e1
64 #define BROADCOM_DEV_ID_57417_SFP 0x16e2
65 #define BROADCOM_DEV_ID_57416_SFP 0x16e3
66 #define BROADCOM_DEV_ID_57317_SFP 0x16e4
67 #define BROADCOM_DEV_ID_57404_MF 0x16e7
68 #define BROADCOM_DEV_ID_57406_MF 0x16e8
69 #define BROADCOM_DEV_ID_57407_SFP 0x16e9
70 #define BROADCOM_DEV_ID_57407_MF 0x16ea
71 #define BROADCOM_DEV_ID_57414_MF 0x16ec
72 #define BROADCOM_DEV_ID_57416_MF 0x16ee
73 #define BROADCOM_DEV_ID_58802 0xd802
74 #define BROADCOM_DEV_ID_58804 0xd804
75 #define BROADCOM_DEV_ID_58808 0x16f0
76 
77 static const struct rte_pci_id bnxt_pci_id_map[] = {
78 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM,
79 			 BROADCOM_DEV_ID_STRATUS_NIC_VF1) },
80 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM,
81 			 BROADCOM_DEV_ID_STRATUS_NIC_VF2) },
82 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) },
83 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) },
84 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) },
85 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57302) },
86 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_PF) },
87 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_VF) },
88 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_NS2) },
89 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402) },
90 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404) },
91 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_PF) },
92 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_VF) },
93 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402_MF) },
94 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_RJ45) },
95 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404_MF) },
96 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_MF) },
97 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_SFP) },
98 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_MF) },
99 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5741X_VF) },
100 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5731X_VF) },
101 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57314) },
102 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_MF) },
103 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57311) },
104 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57312) },
105 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412) },
106 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414) },
107 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_RJ45) },
108 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_RJ45) },
109 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412_MF) },
110 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_RJ45) },
111 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_SFP) },
112 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_SFP) },
113 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) },
114 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) },
115 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) },
116 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802) },
117 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58804) },
118 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58808) },
119 	{ .vendor_id = 0, /* sentinel */ },
120 };
121 
122 #define BNXT_ETH_RSS_SUPPORT (	\
123 	ETH_RSS_IPV4 |		\
124 	ETH_RSS_NONFRAG_IPV4_TCP |	\
125 	ETH_RSS_NONFRAG_IPV4_UDP |	\
126 	ETH_RSS_IPV6 |		\
127 	ETH_RSS_NONFRAG_IPV6_TCP |	\
128 	ETH_RSS_NONFRAG_IPV6_UDP)
129 
130 #define BNXT_DEV_TX_OFFLOAD_SUPPORT (DEV_TX_OFFLOAD_VLAN_INSERT | \
131 				     DEV_TX_OFFLOAD_IPV4_CKSUM | \
132 				     DEV_TX_OFFLOAD_TCP_CKSUM | \
133 				     DEV_TX_OFFLOAD_UDP_CKSUM | \
134 				     DEV_TX_OFFLOAD_TCP_TSO | \
135 				     DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
136 				     DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \
137 				     DEV_TX_OFFLOAD_GRE_TNL_TSO | \
138 				     DEV_TX_OFFLOAD_IPIP_TNL_TSO | \
139 				     DEV_TX_OFFLOAD_GENEVE_TNL_TSO | \
140 				     DEV_TX_OFFLOAD_MULTI_SEGS)
141 
142 #define BNXT_DEV_RX_OFFLOAD_SUPPORT (DEV_RX_OFFLOAD_VLAN_FILTER | \
143 				     DEV_RX_OFFLOAD_VLAN_STRIP | \
144 				     DEV_RX_OFFLOAD_IPV4_CKSUM | \
145 				     DEV_RX_OFFLOAD_UDP_CKSUM | \
146 				     DEV_RX_OFFLOAD_TCP_CKSUM | \
147 				     DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
148 				     DEV_RX_OFFLOAD_JUMBO_FRAME | \
149 				     DEV_RX_OFFLOAD_CRC_STRIP | \
150 				     DEV_RX_OFFLOAD_TCP_LRO)
151 
152 static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask);
153 static void bnxt_print_link_info(struct rte_eth_dev *eth_dev);
154 
155 /***********************/
156 
157 /*
158  * High level utility functions
159  */
160 
161 static void bnxt_free_mem(struct bnxt *bp)
162 {
163 	bnxt_free_filter_mem(bp);
164 	bnxt_free_vnic_attributes(bp);
165 	bnxt_free_vnic_mem(bp);
166 
167 	bnxt_free_stats(bp);
168 	bnxt_free_tx_rings(bp);
169 	bnxt_free_rx_rings(bp);
170 	bnxt_free_def_cp_ring(bp);
171 }
172 
173 static int bnxt_alloc_mem(struct bnxt *bp)
174 {
175 	int rc;
176 
177 	/* Default completion ring */
178 	rc = bnxt_init_def_ring_struct(bp, SOCKET_ID_ANY);
179 	if (rc)
180 		goto alloc_mem_err;
181 
182 	rc = bnxt_alloc_rings(bp, 0, NULL, NULL,
183 			      bp->def_cp_ring, "def_cp");
184 	if (rc)
185 		goto alloc_mem_err;
186 
187 	rc = bnxt_alloc_vnic_mem(bp);
188 	if (rc)
189 		goto alloc_mem_err;
190 
191 	rc = bnxt_alloc_vnic_attributes(bp);
192 	if (rc)
193 		goto alloc_mem_err;
194 
195 	rc = bnxt_alloc_filter_mem(bp);
196 	if (rc)
197 		goto alloc_mem_err;
198 
199 	return 0;
200 
201 alloc_mem_err:
202 	bnxt_free_mem(bp);
203 	return rc;
204 }
205 
206 static int bnxt_init_chip(struct bnxt *bp)
207 {
208 	unsigned int i;
209 	struct rte_eth_link new;
210 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev);
211 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
212 	uint32_t intr_vector = 0;
213 	uint32_t queue_id, base = BNXT_MISC_VEC_ID;
214 	uint32_t vec = BNXT_MISC_VEC_ID;
215 	int rc;
216 
217 	/* disable uio/vfio intr/eventfd mapping */
218 	rte_intr_disable(intr_handle);
219 
220 	if (bp->eth_dev->data->mtu > ETHER_MTU) {
221 		bp->eth_dev->data->dev_conf.rxmode.offloads |=
222 			DEV_RX_OFFLOAD_JUMBO_FRAME;
223 		bp->flags |= BNXT_FLAG_JUMBO;
224 	} else {
225 		bp->eth_dev->data->dev_conf.rxmode.offloads &=
226 			~DEV_RX_OFFLOAD_JUMBO_FRAME;
227 		bp->flags &= ~BNXT_FLAG_JUMBO;
228 	}
229 
230 	rc = bnxt_alloc_all_hwrm_stat_ctxs(bp);
231 	if (rc) {
232 		PMD_DRV_LOG(ERR, "HWRM stat ctx alloc failure rc: %x\n", rc);
233 		goto err_out;
234 	}
235 
236 	rc = bnxt_alloc_hwrm_rings(bp);
237 	if (rc) {
238 		PMD_DRV_LOG(ERR, "HWRM ring alloc failure rc: %x\n", rc);
239 		goto err_out;
240 	}
241 
242 	rc = bnxt_alloc_all_hwrm_ring_grps(bp);
243 	if (rc) {
244 		PMD_DRV_LOG(ERR, "HWRM ring grp alloc failure: %x\n", rc);
245 		goto err_out;
246 	}
247 
248 	rc = bnxt_mq_rx_configure(bp);
249 	if (rc) {
250 		PMD_DRV_LOG(ERR, "MQ mode configure failure rc: %x\n", rc);
251 		goto err_out;
252 	}
253 
254 	/* VNIC configuration */
255 	for (i = 0; i < bp->nr_vnics; i++) {
256 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
257 
258 		rc = bnxt_hwrm_vnic_alloc(bp, vnic);
259 		if (rc) {
260 			PMD_DRV_LOG(ERR, "HWRM vnic %d alloc failure rc: %x\n",
261 				i, rc);
262 			goto err_out;
263 		}
264 
265 		rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic);
266 		if (rc) {
267 			PMD_DRV_LOG(ERR,
268 				"HWRM vnic %d ctx alloc failure rc: %x\n",
269 				i, rc);
270 			goto err_out;
271 		}
272 
273 		rc = bnxt_hwrm_vnic_cfg(bp, vnic);
274 		if (rc) {
275 			PMD_DRV_LOG(ERR, "HWRM vnic %d cfg failure rc: %x\n",
276 				i, rc);
277 			goto err_out;
278 		}
279 
280 		rc = bnxt_set_hwrm_vnic_filters(bp, vnic);
281 		if (rc) {
282 			PMD_DRV_LOG(ERR,
283 				"HWRM vnic %d filter failure rc: %x\n",
284 				i, rc);
285 			goto err_out;
286 		}
287 
288 		rc = bnxt_vnic_rss_configure(bp, vnic);
289 		if (rc) {
290 			PMD_DRV_LOG(ERR,
291 				    "HWRM vnic set RSS failure rc: %x\n", rc);
292 			goto err_out;
293 		}
294 
295 		bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
296 
297 		if (bp->eth_dev->data->dev_conf.rxmode.offloads &
298 		    DEV_RX_OFFLOAD_TCP_LRO)
299 			bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 1);
300 		else
301 			bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 0);
302 	}
303 	rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL);
304 	if (rc) {
305 		PMD_DRV_LOG(ERR,
306 			"HWRM cfa l2 rx mask failure rc: %x\n", rc);
307 		goto err_out;
308 	}
309 
310 	/* check and configure queue intr-vector mapping */
311 	if ((rte_intr_cap_multiple(intr_handle) ||
312 	     !RTE_ETH_DEV_SRIOV(bp->eth_dev).active) &&
313 	    bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) {
314 		intr_vector = bp->eth_dev->data->nb_rx_queues;
315 		PMD_DRV_LOG(DEBUG, "intr_vector = %d\n", intr_vector);
316 		if (intr_vector > bp->rx_cp_nr_rings) {
317 			PMD_DRV_LOG(ERR, "At most %d intr queues supported",
318 					bp->rx_cp_nr_rings);
319 			return -ENOTSUP;
320 		}
321 		if (rte_intr_efd_enable(intr_handle, intr_vector))
322 			return -1;
323 	}
324 
325 	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
326 		intr_handle->intr_vec =
327 			rte_zmalloc("intr_vec",
328 				    bp->eth_dev->data->nb_rx_queues *
329 				    sizeof(int), 0);
330 		if (intr_handle->intr_vec == NULL) {
331 			PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues"
332 				" intr_vec", bp->eth_dev->data->nb_rx_queues);
333 			return -ENOMEM;
334 		}
335 		PMD_DRV_LOG(DEBUG, "intr_handle->intr_vec = %p "
336 			"intr_handle->nb_efd = %d intr_handle->max_intr = %d\n",
337 			 intr_handle->intr_vec, intr_handle->nb_efd,
338 			intr_handle->max_intr);
339 	}
340 
341 	for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues;
342 	     queue_id++) {
343 		intr_handle->intr_vec[queue_id] = vec;
344 		if (vec < base + intr_handle->nb_efd - 1)
345 			vec++;
346 	}
347 
348 	/* enable uio/vfio intr/eventfd mapping */
349 	rte_intr_enable(intr_handle);
350 
351 	rc = bnxt_get_hwrm_link_config(bp, &new);
352 	if (rc) {
353 		PMD_DRV_LOG(ERR, "HWRM Get link config failure rc: %x\n", rc);
354 		goto err_out;
355 	}
356 
357 	if (!bp->link_info.link_up) {
358 		rc = bnxt_set_hwrm_link_config(bp, true);
359 		if (rc) {
360 			PMD_DRV_LOG(ERR,
361 				"HWRM link config failure rc: %x\n", rc);
362 			goto err_out;
363 		}
364 	}
365 	bnxt_print_link_info(bp->eth_dev);
366 
367 	return 0;
368 
369 err_out:
370 	bnxt_free_all_hwrm_resources(bp);
371 
372 	/* Some of the error status returned by FW may not be from errno.h */
373 	if (rc > 0)
374 		rc = -EIO;
375 
376 	return rc;
377 }
378 
379 static int bnxt_shutdown_nic(struct bnxt *bp)
380 {
381 	bnxt_free_all_hwrm_resources(bp);
382 	bnxt_free_all_filters(bp);
383 	bnxt_free_all_vnics(bp);
384 	return 0;
385 }
386 
387 static int bnxt_init_nic(struct bnxt *bp)
388 {
389 	int rc;
390 
391 	rc = bnxt_init_ring_grps(bp);
392 	if (rc)
393 		return rc;
394 
395 	bnxt_init_vnics(bp);
396 	bnxt_init_filters(bp);
397 
398 	return 0;
399 }
400 
401 /*
402  * Device configuration and status function
403  */
404 
405 static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
406 				  struct rte_eth_dev_info *dev_info)
407 {
408 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
409 	uint16_t max_vnics, i, j, vpool, vrxq;
410 	unsigned int max_rx_rings;
411 
412 	/* MAC Specifics */
413 	dev_info->max_mac_addrs = bp->max_l2_ctx;
414 	dev_info->max_hash_mac_addrs = 0;
415 
416 	/* PF/VF specifics */
417 	if (BNXT_PF(bp))
418 		dev_info->max_vfs = bp->pdev->max_vfs;
419 	max_rx_rings = RTE_MIN(bp->max_vnics, RTE_MIN(bp->max_l2_ctx,
420 						RTE_MIN(bp->max_rsscos_ctx,
421 						bp->max_stat_ctx)));
422 	/* For the sake of symmetry, max_rx_queues = max_tx_queues */
423 	dev_info->max_rx_queues = max_rx_rings;
424 	dev_info->max_tx_queues = max_rx_rings;
425 	dev_info->reta_size = bp->max_rsscos_ctx;
426 	dev_info->hash_key_size = 40;
427 	max_vnics = bp->max_vnics;
428 
429 	/* Fast path specifics */
430 	dev_info->min_rx_bufsize = 1;
431 	dev_info->max_rx_pktlen = BNXT_MAX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN
432 				  + VLAN_TAG_SIZE;
433 
434 	dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
435 	if (bp->flags & BNXT_FLAG_PTP_SUPPORTED)
436 		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
437 	dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT;
438 	dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
439 
440 	/* *INDENT-OFF* */
441 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
442 		.rx_thresh = {
443 			.pthresh = 8,
444 			.hthresh = 8,
445 			.wthresh = 0,
446 		},
447 		.rx_free_thresh = 32,
448 		/* If no descriptors available, pkts are dropped by default */
449 		.rx_drop_en = 1,
450 	};
451 
452 	dev_info->default_txconf = (struct rte_eth_txconf) {
453 		.tx_thresh = {
454 			.pthresh = 32,
455 			.hthresh = 0,
456 			.wthresh = 0,
457 		},
458 		.tx_free_thresh = 32,
459 		.tx_rs_thresh = 32,
460 	};
461 	eth_dev->data->dev_conf.intr_conf.lsc = 1;
462 
463 	eth_dev->data->dev_conf.intr_conf.rxq = 1;
464 
465 	/* *INDENT-ON* */
466 
467 	/*
468 	 * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim
469 	 *       need further investigation.
470 	 */
471 
472 	/* VMDq resources */
473 	vpool = 64; /* ETH_64_POOLS */
474 	vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */
475 	for (i = 0; i < 4; vpool >>= 1, i++) {
476 		if (max_vnics > vpool) {
477 			for (j = 0; j < 5; vrxq >>= 1, j++) {
478 				if (dev_info->max_rx_queues > vrxq) {
479 					if (vpool > vrxq)
480 						vpool = vrxq;
481 					goto found;
482 				}
483 			}
484 			/* Not enough resources to support VMDq */
485 			break;
486 		}
487 	}
488 	/* Not enough resources to support VMDq */
489 	vpool = 0;
490 	vrxq = 0;
491 found:
492 	dev_info->max_vmdq_pools = vpool;
493 	dev_info->vmdq_queue_num = vrxq;
494 
495 	dev_info->vmdq_pool_base = 0;
496 	dev_info->vmdq_queue_base = 0;
497 }
498 
499 /* Configure the device based on the configuration provided */
500 static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
501 {
502 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
503 	uint64_t tx_offloads = eth_dev->data->dev_conf.txmode.offloads;
504 	uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
505 
506 	if (tx_offloads != (tx_offloads & BNXT_DEV_TX_OFFLOAD_SUPPORT)) {
507 		PMD_DRV_LOG
508 			(ERR,
509 			 "Tx offloads requested 0x%" PRIx64 " supported 0x%x\n",
510 			 tx_offloads, BNXT_DEV_TX_OFFLOAD_SUPPORT);
511 		return -ENOTSUP;
512 	}
513 
514 	if (rx_offloads != (rx_offloads & BNXT_DEV_RX_OFFLOAD_SUPPORT)) {
515 		PMD_DRV_LOG
516 			(ERR,
517 			 "Rx offloads requested 0x%" PRIx64 " supported 0x%x\n",
518 			    rx_offloads, BNXT_DEV_RX_OFFLOAD_SUPPORT);
519 		return -ENOTSUP;
520 	}
521 
522 	bp->rx_queues = (void *)eth_dev->data->rx_queues;
523 	bp->tx_queues = (void *)eth_dev->data->tx_queues;
524 
525 	/* Inherit new configurations */
526 	if (eth_dev->data->nb_rx_queues > bp->max_rx_rings ||
527 	    eth_dev->data->nb_tx_queues > bp->max_tx_rings ||
528 	    eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues + 1 >
529 	    bp->max_cp_rings ||
530 	    eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues >
531 	    bp->max_stat_ctx ||
532 	    (uint32_t)(eth_dev->data->nb_rx_queues + 1) > bp->max_ring_grps) {
533 		PMD_DRV_LOG(ERR,
534 			"Insufficient resources to support requested config\n");
535 		PMD_DRV_LOG(ERR,
536 			"Num Queues Requested: Tx %d, Rx %d\n",
537 			eth_dev->data->nb_tx_queues,
538 			eth_dev->data->nb_rx_queues);
539 		PMD_DRV_LOG(ERR,
540 			"Res available: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d\n",
541 			bp->max_tx_rings, bp->max_rx_rings, bp->max_cp_rings,
542 			bp->max_stat_ctx, bp->max_ring_grps);
543 		return -ENOSPC;
544 	}
545 
546 	bp->rx_nr_rings = eth_dev->data->nb_rx_queues;
547 	bp->tx_nr_rings = eth_dev->data->nb_tx_queues;
548 	bp->rx_cp_nr_rings = bp->rx_nr_rings;
549 	bp->tx_cp_nr_rings = bp->tx_nr_rings;
550 
551 	if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
552 		eth_dev->data->mtu =
553 				eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
554 				ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE;
555 	return 0;
556 }
557 
558 static void bnxt_print_link_info(struct rte_eth_dev *eth_dev)
559 {
560 	struct rte_eth_link *link = &eth_dev->data->dev_link;
561 
562 	if (link->link_status)
563 		PMD_DRV_LOG(INFO, "Port %d Link Up - speed %u Mbps - %s\n",
564 			eth_dev->data->port_id,
565 			(uint32_t)link->link_speed,
566 			(link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
567 			("full-duplex") : ("half-duplex\n"));
568 	else
569 		PMD_DRV_LOG(INFO, "Port %d Link Down\n",
570 			eth_dev->data->port_id);
571 }
572 
573 static int bnxt_dev_lsc_intr_setup(struct rte_eth_dev *eth_dev)
574 {
575 	bnxt_print_link_info(eth_dev);
576 	return 0;
577 }
578 
579 static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
580 {
581 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
582 	uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
583 	int vlan_mask = 0;
584 	int rc;
585 
586 	if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) {
587 		PMD_DRV_LOG(ERR,
588 			"RxQ cnt %d > CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n",
589 			bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS);
590 	}
591 	bp->dev_stopped = 0;
592 
593 	rc = bnxt_init_chip(bp);
594 	if (rc)
595 		goto error;
596 
597 	bnxt_link_update_op(eth_dev, 1);
598 
599 	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
600 		vlan_mask |= ETH_VLAN_FILTER_MASK;
601 	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
602 		vlan_mask |= ETH_VLAN_STRIP_MASK;
603 	rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask);
604 	if (rc)
605 		goto error;
606 
607 	bp->flags |= BNXT_FLAG_INIT_DONE;
608 	return 0;
609 
610 error:
611 	bnxt_shutdown_nic(bp);
612 	bnxt_free_tx_mbufs(bp);
613 	bnxt_free_rx_mbufs(bp);
614 	return rc;
615 }
616 
617 static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev)
618 {
619 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
620 	int rc = 0;
621 
622 	if (!bp->link_info.link_up)
623 		rc = bnxt_set_hwrm_link_config(bp, true);
624 	if (!rc)
625 		eth_dev->data->dev_link.link_status = 1;
626 
627 	bnxt_print_link_info(eth_dev);
628 	return 0;
629 }
630 
631 static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev)
632 {
633 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
634 
635 	eth_dev->data->dev_link.link_status = 0;
636 	bnxt_set_hwrm_link_config(bp, false);
637 	bp->link_info.link_up = 0;
638 
639 	return 0;
640 }
641 
642 /* Unload the driver, release resources */
643 static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
644 {
645 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
646 
647 	if (bp->eth_dev->data->dev_started) {
648 		/* TBD: STOP HW queues DMA */
649 		eth_dev->data->dev_link.link_status = 0;
650 	}
651 	bnxt_set_hwrm_link_config(bp, false);
652 	bnxt_hwrm_port_clr_stats(bp);
653 	bp->flags &= ~BNXT_FLAG_INIT_DONE;
654 	bnxt_shutdown_nic(bp);
655 	bp->dev_stopped = 1;
656 }
657 
658 static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
659 {
660 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
661 
662 	if (bp->dev_stopped == 0)
663 		bnxt_dev_stop_op(eth_dev);
664 
665 	bnxt_free_tx_mbufs(bp);
666 	bnxt_free_rx_mbufs(bp);
667 	bnxt_free_mem(bp);
668 	if (eth_dev->data->mac_addrs != NULL) {
669 		rte_free(eth_dev->data->mac_addrs);
670 		eth_dev->data->mac_addrs = NULL;
671 	}
672 	if (bp->grp_info != NULL) {
673 		rte_free(bp->grp_info);
674 		bp->grp_info = NULL;
675 	}
676 }
677 
678 static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev,
679 				    uint32_t index)
680 {
681 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
682 	uint64_t pool_mask = eth_dev->data->mac_pool_sel[index];
683 	struct bnxt_vnic_info *vnic;
684 	struct bnxt_filter_info *filter, *temp_filter;
685 	uint32_t pool = RTE_MIN(MAX_FF_POOLS, ETH_64_POOLS);
686 	uint32_t i;
687 
688 	/*
689 	 * Loop through all VNICs from the specified filter flow pools to
690 	 * remove the corresponding MAC addr filter
691 	 */
692 	for (i = 0; i < pool; i++) {
693 		if (!(pool_mask & (1ULL << i)))
694 			continue;
695 
696 		STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
697 			filter = STAILQ_FIRST(&vnic->filter);
698 			while (filter) {
699 				temp_filter = STAILQ_NEXT(filter, next);
700 				if (filter->mac_index == index) {
701 					STAILQ_REMOVE(&vnic->filter, filter,
702 						      bnxt_filter_info, next);
703 					bnxt_hwrm_clear_l2_filter(bp, filter);
704 					filter->mac_index = INVALID_MAC_INDEX;
705 					memset(&filter->l2_addr, 0,
706 					       ETHER_ADDR_LEN);
707 					STAILQ_INSERT_TAIL(
708 							&bp->free_filter_list,
709 							filter, next);
710 				}
711 				filter = temp_filter;
712 			}
713 		}
714 	}
715 }
716 
717 static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
718 				struct ether_addr *mac_addr,
719 				uint32_t index, uint32_t pool)
720 {
721 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
722 	struct bnxt_vnic_info *vnic = STAILQ_FIRST(&bp->ff_pool[pool]);
723 	struct bnxt_filter_info *filter;
724 
725 	if (BNXT_VF(bp)) {
726 		PMD_DRV_LOG(ERR, "Cannot add MAC address to a VF interface\n");
727 		return -ENOTSUP;
728 	}
729 
730 	if (!vnic) {
731 		PMD_DRV_LOG(ERR, "VNIC not found for pool %d!\n", pool);
732 		return -EINVAL;
733 	}
734 	/* Attach requested MAC address to the new l2_filter */
735 	STAILQ_FOREACH(filter, &vnic->filter, next) {
736 		if (filter->mac_index == index) {
737 			PMD_DRV_LOG(ERR,
738 				"MAC addr already existed for pool %d\n", pool);
739 			return 0;
740 		}
741 	}
742 	filter = bnxt_alloc_filter(bp);
743 	if (!filter) {
744 		PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
745 		return -ENODEV;
746 	}
747 	STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
748 	filter->mac_index = index;
749 	memcpy(filter->l2_addr, mac_addr, ETHER_ADDR_LEN);
750 	return bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
751 }
752 
753 int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete)
754 {
755 	int rc = 0;
756 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
757 	struct rte_eth_link new;
758 	unsigned int cnt = BNXT_LINK_WAIT_CNT;
759 
760 	memset(&new, 0, sizeof(new));
761 	do {
762 		/* Retrieve link info from hardware */
763 		rc = bnxt_get_hwrm_link_config(bp, &new);
764 		if (rc) {
765 			new.link_speed = ETH_LINK_SPEED_100M;
766 			new.link_duplex = ETH_LINK_FULL_DUPLEX;
767 			PMD_DRV_LOG(ERR,
768 				"Failed to retrieve link rc = 0x%x!\n", rc);
769 			goto out;
770 		}
771 		rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
772 
773 		if (!wait_to_complete)
774 			break;
775 	} while (!new.link_status && cnt--);
776 
777 out:
778 	/* Timed out or success */
779 	if (new.link_status != eth_dev->data->dev_link.link_status ||
780 	new.link_speed != eth_dev->data->dev_link.link_speed) {
781 		memcpy(&eth_dev->data->dev_link, &new,
782 			sizeof(struct rte_eth_link));
783 
784 		_rte_eth_dev_callback_process(eth_dev,
785 					      RTE_ETH_EVENT_INTR_LSC,
786 					      NULL);
787 
788 		bnxt_print_link_info(eth_dev);
789 	}
790 
791 	return rc;
792 }
793 
794 static void bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev)
795 {
796 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
797 	struct bnxt_vnic_info *vnic;
798 
799 	if (bp->vnic_info == NULL)
800 		return;
801 
802 	vnic = &bp->vnic_info[0];
803 
804 	vnic->flags |= BNXT_VNIC_INFO_PROMISC;
805 	bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
806 }
807 
808 static void bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev)
809 {
810 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
811 	struct bnxt_vnic_info *vnic;
812 
813 	if (bp->vnic_info == NULL)
814 		return;
815 
816 	vnic = &bp->vnic_info[0];
817 
818 	vnic->flags &= ~BNXT_VNIC_INFO_PROMISC;
819 	bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
820 }
821 
822 static void bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev)
823 {
824 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
825 	struct bnxt_vnic_info *vnic;
826 
827 	if (bp->vnic_info == NULL)
828 		return;
829 
830 	vnic = &bp->vnic_info[0];
831 
832 	vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
833 	bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
834 }
835 
836 static void bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev)
837 {
838 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
839 	struct bnxt_vnic_info *vnic;
840 
841 	if (bp->vnic_info == NULL)
842 		return;
843 
844 	vnic = &bp->vnic_info[0];
845 
846 	vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
847 	bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
848 }
849 
850 static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
851 			    struct rte_eth_rss_reta_entry64 *reta_conf,
852 			    uint16_t reta_size)
853 {
854 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
855 	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
856 	struct bnxt_vnic_info *vnic;
857 	int i;
858 
859 	if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
860 		return -EINVAL;
861 
862 	if (reta_size != HW_HASH_INDEX_SIZE) {
863 		PMD_DRV_LOG(ERR, "The configured hash table lookup size "
864 			"(%d) must equal the size supported by the hardware "
865 			"(%d)\n", reta_size, HW_HASH_INDEX_SIZE);
866 		return -EINVAL;
867 	}
868 	/* Update the RSS VNIC(s) */
869 	for (i = 0; i < MAX_FF_POOLS; i++) {
870 		STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
871 			memcpy(vnic->rss_table, reta_conf, reta_size);
872 
873 			bnxt_hwrm_vnic_rss_cfg(bp, vnic);
874 		}
875 	}
876 	return 0;
877 }
878 
879 static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
880 			      struct rte_eth_rss_reta_entry64 *reta_conf,
881 			      uint16_t reta_size)
882 {
883 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
884 	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
885 	struct rte_intr_handle *intr_handle
886 		= &bp->pdev->intr_handle;
887 
888 	/* Retrieve from the default VNIC */
889 	if (!vnic)
890 		return -EINVAL;
891 	if (!vnic->rss_table)
892 		return -EINVAL;
893 
894 	if (reta_size != HW_HASH_INDEX_SIZE) {
895 		PMD_DRV_LOG(ERR, "The configured hash table lookup size "
896 			"(%d) must equal the size supported by the hardware "
897 			"(%d)\n", reta_size, HW_HASH_INDEX_SIZE);
898 		return -EINVAL;
899 	}
900 	/* EW - need to revisit here copying from uint64_t to uint16_t */
901 	memcpy(reta_conf, vnic->rss_table, reta_size);
902 
903 	if (rte_intr_allow_others(intr_handle)) {
904 		if (eth_dev->data->dev_conf.intr_conf.lsc != 0)
905 			bnxt_dev_lsc_intr_setup(eth_dev);
906 	}
907 
908 	return 0;
909 }
910 
911 static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
912 				   struct rte_eth_rss_conf *rss_conf)
913 {
914 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
915 	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
916 	struct bnxt_vnic_info *vnic;
917 	uint16_t hash_type = 0;
918 	int i;
919 
920 	/*
921 	 * If RSS enablement were different than dev_configure,
922 	 * then return -EINVAL
923 	 */
924 	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
925 		if (!rss_conf->rss_hf)
926 			PMD_DRV_LOG(ERR, "Hash type NONE\n");
927 	} else {
928 		if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT)
929 			return -EINVAL;
930 	}
931 
932 	bp->flags |= BNXT_FLAG_UPDATE_HASH;
933 	memcpy(&bp->rss_conf, rss_conf, sizeof(*rss_conf));
934 
935 	if (rss_conf->rss_hf & ETH_RSS_IPV4)
936 		hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
937 	if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
938 		hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
939 	if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
940 		hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
941 	if (rss_conf->rss_hf & ETH_RSS_IPV6)
942 		hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
943 	if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
944 		hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
945 	if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
946 		hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
947 
948 	/* Update the RSS VNIC(s) */
949 	for (i = 0; i < MAX_FF_POOLS; i++) {
950 		STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
951 			vnic->hash_type = hash_type;
952 
953 			/*
954 			 * Use the supplied key if the key length is
955 			 * acceptable and the rss_key is not NULL
956 			 */
957 			if (rss_conf->rss_key &&
958 			    rss_conf->rss_key_len <= HW_HASH_KEY_SIZE)
959 				memcpy(vnic->rss_hash_key, rss_conf->rss_key,
960 				       rss_conf->rss_key_len);
961 
962 			bnxt_hwrm_vnic_rss_cfg(bp, vnic);
963 		}
964 	}
965 	return 0;
966 }
967 
968 static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev,
969 				     struct rte_eth_rss_conf *rss_conf)
970 {
971 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
972 	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
973 	int len;
974 	uint32_t hash_types;
975 
976 	/* RSS configuration is the same for all VNICs */
977 	if (vnic && vnic->rss_hash_key) {
978 		if (rss_conf->rss_key) {
979 			len = rss_conf->rss_key_len <= HW_HASH_KEY_SIZE ?
980 			      rss_conf->rss_key_len : HW_HASH_KEY_SIZE;
981 			memcpy(rss_conf->rss_key, vnic->rss_hash_key, len);
982 		}
983 
984 		hash_types = vnic->hash_type;
985 		rss_conf->rss_hf = 0;
986 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) {
987 			rss_conf->rss_hf |= ETH_RSS_IPV4;
988 			hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
989 		}
990 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) {
991 			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
992 			hash_types &=
993 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
994 		}
995 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) {
996 			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
997 			hash_types &=
998 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
999 		}
1000 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) {
1001 			rss_conf->rss_hf |= ETH_RSS_IPV6;
1002 			hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
1003 		}
1004 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) {
1005 			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
1006 			hash_types &=
1007 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
1008 		}
1009 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) {
1010 			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
1011 			hash_types &=
1012 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
1013 		}
1014 		if (hash_types) {
1015 			PMD_DRV_LOG(ERR,
1016 				"Unknwon RSS config from firmware (%08x), RSS disabled",
1017 				vnic->hash_type);
1018 			return -ENOTSUP;
1019 		}
1020 	} else {
1021 		rss_conf->rss_hf = 0;
1022 	}
1023 	return 0;
1024 }
1025 
1026 static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev,
1027 			       struct rte_eth_fc_conf *fc_conf)
1028 {
1029 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1030 	struct rte_eth_link link_info;
1031 	int rc;
1032 
1033 	rc = bnxt_get_hwrm_link_config(bp, &link_info);
1034 	if (rc)
1035 		return rc;
1036 
1037 	memset(fc_conf, 0, sizeof(*fc_conf));
1038 	if (bp->link_info.auto_pause)
1039 		fc_conf->autoneg = 1;
1040 	switch (bp->link_info.pause) {
1041 	case 0:
1042 		fc_conf->mode = RTE_FC_NONE;
1043 		break;
1044 	case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX:
1045 		fc_conf->mode = RTE_FC_TX_PAUSE;
1046 		break;
1047 	case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX:
1048 		fc_conf->mode = RTE_FC_RX_PAUSE;
1049 		break;
1050 	case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX |
1051 			HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX):
1052 		fc_conf->mode = RTE_FC_FULL;
1053 		break;
1054 	}
1055 	return 0;
1056 }
1057 
1058 static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
1059 			       struct rte_eth_fc_conf *fc_conf)
1060 {
1061 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1062 
1063 	if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) {
1064 		PMD_DRV_LOG(ERR, "Flow Control Settings cannot be modified\n");
1065 		return -ENOTSUP;
1066 	}
1067 
1068 	switch (fc_conf->mode) {
1069 	case RTE_FC_NONE:
1070 		bp->link_info.auto_pause = 0;
1071 		bp->link_info.force_pause = 0;
1072 		break;
1073 	case RTE_FC_RX_PAUSE:
1074 		if (fc_conf->autoneg) {
1075 			bp->link_info.auto_pause =
1076 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
1077 			bp->link_info.force_pause = 0;
1078 		} else {
1079 			bp->link_info.auto_pause = 0;
1080 			bp->link_info.force_pause =
1081 					HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
1082 		}
1083 		break;
1084 	case RTE_FC_TX_PAUSE:
1085 		if (fc_conf->autoneg) {
1086 			bp->link_info.auto_pause =
1087 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX;
1088 			bp->link_info.force_pause = 0;
1089 		} else {
1090 			bp->link_info.auto_pause = 0;
1091 			bp->link_info.force_pause =
1092 					HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX;
1093 		}
1094 		break;
1095 	case RTE_FC_FULL:
1096 		if (fc_conf->autoneg) {
1097 			bp->link_info.auto_pause =
1098 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX |
1099 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
1100 			bp->link_info.force_pause = 0;
1101 		} else {
1102 			bp->link_info.auto_pause = 0;
1103 			bp->link_info.force_pause =
1104 					HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX |
1105 					HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
1106 		}
1107 		break;
1108 	}
1109 	return bnxt_set_hwrm_link_config(bp, true);
1110 }
1111 
1112 /* Add UDP tunneling port */
1113 static int
1114 bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
1115 			 struct rte_eth_udp_tunnel *udp_tunnel)
1116 {
1117 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
1118 	uint16_t tunnel_type = 0;
1119 	int rc = 0;
1120 
1121 	switch (udp_tunnel->prot_type) {
1122 	case RTE_TUNNEL_TYPE_VXLAN:
1123 		if (bp->vxlan_port_cnt) {
1124 			PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
1125 				udp_tunnel->udp_port);
1126 			if (bp->vxlan_port != udp_tunnel->udp_port) {
1127 				PMD_DRV_LOG(ERR, "Only one port allowed\n");
1128 				return -ENOSPC;
1129 			}
1130 			bp->vxlan_port_cnt++;
1131 			return 0;
1132 		}
1133 		tunnel_type =
1134 			HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN;
1135 		bp->vxlan_port_cnt++;
1136 		break;
1137 	case RTE_TUNNEL_TYPE_GENEVE:
1138 		if (bp->geneve_port_cnt) {
1139 			PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
1140 				udp_tunnel->udp_port);
1141 			if (bp->geneve_port != udp_tunnel->udp_port) {
1142 				PMD_DRV_LOG(ERR, "Only one port allowed\n");
1143 				return -ENOSPC;
1144 			}
1145 			bp->geneve_port_cnt++;
1146 			return 0;
1147 		}
1148 		tunnel_type =
1149 			HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE;
1150 		bp->geneve_port_cnt++;
1151 		break;
1152 	default:
1153 		PMD_DRV_LOG(ERR, "Tunnel type is not supported\n");
1154 		return -ENOTSUP;
1155 	}
1156 	rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port,
1157 					     tunnel_type);
1158 	return rc;
1159 }
1160 
1161 static int
1162 bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
1163 			 struct rte_eth_udp_tunnel *udp_tunnel)
1164 {
1165 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
1166 	uint16_t tunnel_type = 0;
1167 	uint16_t port = 0;
1168 	int rc = 0;
1169 
1170 	switch (udp_tunnel->prot_type) {
1171 	case RTE_TUNNEL_TYPE_VXLAN:
1172 		if (!bp->vxlan_port_cnt) {
1173 			PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
1174 			return -EINVAL;
1175 		}
1176 		if (bp->vxlan_port != udp_tunnel->udp_port) {
1177 			PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n",
1178 				udp_tunnel->udp_port, bp->vxlan_port);
1179 			return -EINVAL;
1180 		}
1181 		if (--bp->vxlan_port_cnt)
1182 			return 0;
1183 
1184 		tunnel_type =
1185 			HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN;
1186 		port = bp->vxlan_fw_dst_port_id;
1187 		break;
1188 	case RTE_TUNNEL_TYPE_GENEVE:
1189 		if (!bp->geneve_port_cnt) {
1190 			PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
1191 			return -EINVAL;
1192 		}
1193 		if (bp->geneve_port != udp_tunnel->udp_port) {
1194 			PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n",
1195 				udp_tunnel->udp_port, bp->geneve_port);
1196 			return -EINVAL;
1197 		}
1198 		if (--bp->geneve_port_cnt)
1199 			return 0;
1200 
1201 		tunnel_type =
1202 			HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE;
1203 		port = bp->geneve_fw_dst_port_id;
1204 		break;
1205 	default:
1206 		PMD_DRV_LOG(ERR, "Tunnel type is not supported\n");
1207 		return -ENOTSUP;
1208 	}
1209 
1210 	rc = bnxt_hwrm_tunnel_dst_port_free(bp, port, tunnel_type);
1211 	if (!rc) {
1212 		if (tunnel_type ==
1213 		    HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN)
1214 			bp->vxlan_port = 0;
1215 		if (tunnel_type ==
1216 		    HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE)
1217 			bp->geneve_port = 0;
1218 	}
1219 	return rc;
1220 }
1221 
1222 static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
1223 {
1224 	struct bnxt_filter_info *filter, *temp_filter, *new_filter;
1225 	struct bnxt_vnic_info *vnic;
1226 	unsigned int i;
1227 	int rc = 0;
1228 	uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN;
1229 
1230 	/* Cycle through all VNICs */
1231 	for (i = 0; i < bp->nr_vnics; i++) {
1232 		/*
1233 		 * For each VNIC and each associated filter(s)
1234 		 * if VLAN exists && VLAN matches vlan_id
1235 		 *      remove the MAC+VLAN filter
1236 		 *      add a new MAC only filter
1237 		 * else
1238 		 *      VLAN filter doesn't exist, just skip and continue
1239 		 */
1240 		STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
1241 			filter = STAILQ_FIRST(&vnic->filter);
1242 			while (filter) {
1243 				temp_filter = STAILQ_NEXT(filter, next);
1244 
1245 				if (filter->enables & chk &&
1246 				    filter->l2_ovlan == vlan_id) {
1247 					/* Must delete the filter */
1248 					STAILQ_REMOVE(&vnic->filter, filter,
1249 						      bnxt_filter_info, next);
1250 					bnxt_hwrm_clear_l2_filter(bp, filter);
1251 					STAILQ_INSERT_TAIL(
1252 							&bp->free_filter_list,
1253 							filter, next);
1254 
1255 					/*
1256 					 * Need to examine to see if the MAC
1257 					 * filter already existed or not before
1258 					 * allocating a new one
1259 					 */
1260 
1261 					new_filter = bnxt_alloc_filter(bp);
1262 					if (!new_filter) {
1263 						PMD_DRV_LOG(ERR,
1264 							"MAC/VLAN filter alloc failed\n");
1265 						rc = -ENOMEM;
1266 						goto exit;
1267 					}
1268 					STAILQ_INSERT_TAIL(&vnic->filter,
1269 							   new_filter, next);
1270 					/* Inherit MAC from previous filter */
1271 					new_filter->mac_index =
1272 							filter->mac_index;
1273 					memcpy(new_filter->l2_addr,
1274 					       filter->l2_addr, ETHER_ADDR_LEN);
1275 					/* MAC only filter */
1276 					rc = bnxt_hwrm_set_l2_filter(bp,
1277 							vnic->fw_vnic_id,
1278 							new_filter);
1279 					if (rc)
1280 						goto exit;
1281 					PMD_DRV_LOG(INFO,
1282 						"Del Vlan filter for %d\n",
1283 						vlan_id);
1284 				}
1285 				filter = temp_filter;
1286 			}
1287 		}
1288 	}
1289 exit:
1290 	return rc;
1291 }
1292 
1293 static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
1294 {
1295 	struct bnxt_filter_info *filter, *temp_filter, *new_filter;
1296 	struct bnxt_vnic_info *vnic;
1297 	unsigned int i;
1298 	int rc = 0;
1299 	uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN |
1300 		HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK;
1301 	uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN;
1302 
1303 	/* Cycle through all VNICs */
1304 	for (i = 0; i < bp->nr_vnics; i++) {
1305 		/*
1306 		 * For each VNIC and each associated filter(s)
1307 		 * if VLAN exists:
1308 		 *   if VLAN matches vlan_id
1309 		 *      VLAN filter already exists, just skip and continue
1310 		 *   else
1311 		 *      add a new MAC+VLAN filter
1312 		 * else
1313 		 *   Remove the old MAC only filter
1314 		 *    Add a new MAC+VLAN filter
1315 		 */
1316 		STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
1317 			filter = STAILQ_FIRST(&vnic->filter);
1318 			while (filter) {
1319 				temp_filter = STAILQ_NEXT(filter, next);
1320 
1321 				if (filter->enables & chk) {
1322 					if (filter->l2_ovlan == vlan_id)
1323 						goto cont;
1324 				} else {
1325 					/* Must delete the MAC filter */
1326 					STAILQ_REMOVE(&vnic->filter, filter,
1327 						      bnxt_filter_info, next);
1328 					bnxt_hwrm_clear_l2_filter(bp, filter);
1329 					filter->l2_ovlan = 0;
1330 					STAILQ_INSERT_TAIL(
1331 							&bp->free_filter_list,
1332 							filter, next);
1333 				}
1334 				new_filter = bnxt_alloc_filter(bp);
1335 				if (!new_filter) {
1336 					PMD_DRV_LOG(ERR,
1337 						"MAC/VLAN filter alloc failed\n");
1338 					rc = -ENOMEM;
1339 					goto exit;
1340 				}
1341 				STAILQ_INSERT_TAIL(&vnic->filter, new_filter,
1342 						   next);
1343 				/* Inherit MAC from the previous filter */
1344 				new_filter->mac_index = filter->mac_index;
1345 				memcpy(new_filter->l2_addr, filter->l2_addr,
1346 				       ETHER_ADDR_LEN);
1347 				/* MAC + VLAN ID filter */
1348 				new_filter->l2_ovlan = vlan_id;
1349 				new_filter->l2_ovlan_mask = 0xF000;
1350 				new_filter->enables |= en;
1351 				rc = bnxt_hwrm_set_l2_filter(bp,
1352 							     vnic->fw_vnic_id,
1353 							     new_filter);
1354 				if (rc)
1355 					goto exit;
1356 				PMD_DRV_LOG(INFO,
1357 					"Added Vlan filter for %d\n", vlan_id);
1358 cont:
1359 				filter = temp_filter;
1360 			}
1361 		}
1362 	}
1363 exit:
1364 	return rc;
1365 }
1366 
1367 static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev,
1368 				   uint16_t vlan_id, int on)
1369 {
1370 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
1371 
1372 	/* These operations apply to ALL existing MAC/VLAN filters */
1373 	if (on)
1374 		return bnxt_add_vlan_filter(bp, vlan_id);
1375 	else
1376 		return bnxt_del_vlan_filter(bp, vlan_id);
1377 }
1378 
1379 static int
1380 bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
1381 {
1382 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1383 	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
1384 	unsigned int i;
1385 
1386 	if (mask & ETH_VLAN_FILTER_MASK) {
1387 		if (!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) {
1388 			/* Remove any VLAN filters programmed */
1389 			for (i = 0; i < 4095; i++)
1390 				bnxt_del_vlan_filter(bp, i);
1391 		}
1392 		PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n",
1393 			!!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER));
1394 	}
1395 
1396 	if (mask & ETH_VLAN_STRIP_MASK) {
1397 		/* Enable or disable VLAN stripping */
1398 		for (i = 0; i < bp->nr_vnics; i++) {
1399 			struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1400 			if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
1401 				vnic->vlan_strip = true;
1402 			else
1403 				vnic->vlan_strip = false;
1404 			bnxt_hwrm_vnic_cfg(bp, vnic);
1405 		}
1406 		PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n",
1407 			!!(rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP));
1408 	}
1409 
1410 	if (mask & ETH_VLAN_EXTEND_MASK)
1411 		PMD_DRV_LOG(ERR, "Extend VLAN Not supported\n");
1412 
1413 	return 0;
1414 }
1415 
1416 static int
1417 bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr)
1418 {
1419 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1420 	/* Default Filter is tied to VNIC 0 */
1421 	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
1422 	struct bnxt_filter_info *filter;
1423 	int rc;
1424 
1425 	if (BNXT_VF(bp))
1426 		return -EPERM;
1427 
1428 	memcpy(bp->mac_addr, addr, sizeof(bp->mac_addr));
1429 
1430 	STAILQ_FOREACH(filter, &vnic->filter, next) {
1431 		/* Default Filter is at Index 0 */
1432 		if (filter->mac_index != 0)
1433 			continue;
1434 		rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1435 		if (rc)
1436 			return rc;
1437 		memcpy(filter->l2_addr, bp->mac_addr, ETHER_ADDR_LEN);
1438 		memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
1439 		filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
1440 		filter->enables |=
1441 			HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
1442 			HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK;
1443 		rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
1444 		if (rc)
1445 			return rc;
1446 		filter->mac_index = 0;
1447 		PMD_DRV_LOG(DEBUG, "Set MAC addr\n");
1448 	}
1449 
1450 	return 0;
1451 }
1452 
1453 static int
1454 bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev,
1455 			  struct ether_addr *mc_addr_set,
1456 			  uint32_t nb_mc_addr)
1457 {
1458 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
1459 	char *mc_addr_list = (char *)mc_addr_set;
1460 	struct bnxt_vnic_info *vnic;
1461 	uint32_t off = 0, i = 0;
1462 
1463 	vnic = &bp->vnic_info[0];
1464 
1465 	if (nb_mc_addr > BNXT_MAX_MC_ADDRS) {
1466 		vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
1467 		goto allmulti;
1468 	}
1469 
1470 	/* TODO Check for Duplicate mcast addresses */
1471 	vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
1472 	for (i = 0; i < nb_mc_addr; i++) {
1473 		memcpy(vnic->mc_list + off, &mc_addr_list[i], ETHER_ADDR_LEN);
1474 		off += ETHER_ADDR_LEN;
1475 	}
1476 
1477 	vnic->mc_addr_cnt = i;
1478 
1479 allmulti:
1480 	return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
1481 }
1482 
1483 static int
1484 bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
1485 {
1486 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1487 	uint8_t fw_major = (bp->fw_ver >> 24) & 0xff;
1488 	uint8_t fw_minor = (bp->fw_ver >> 16) & 0xff;
1489 	uint8_t fw_updt = (bp->fw_ver >> 8) & 0xff;
1490 	int ret;
1491 
1492 	ret = snprintf(fw_version, fw_size, "%d.%d.%d",
1493 			fw_major, fw_minor, fw_updt);
1494 
1495 	ret += 1; /* add the size of '\0' */
1496 	if (fw_size < (uint32_t)ret)
1497 		return ret;
1498 	else
1499 		return 0;
1500 }
1501 
1502 static void
1503 bnxt_rxq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id,
1504 	struct rte_eth_rxq_info *qinfo)
1505 {
1506 	struct bnxt_rx_queue *rxq;
1507 
1508 	rxq = dev->data->rx_queues[queue_id];
1509 
1510 	qinfo->mp = rxq->mb_pool;
1511 	qinfo->scattered_rx = dev->data->scattered_rx;
1512 	qinfo->nb_desc = rxq->nb_rx_desc;
1513 
1514 	qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
1515 	qinfo->conf.rx_drop_en = 0;
1516 	qinfo->conf.rx_deferred_start = 0;
1517 }
1518 
1519 static void
1520 bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id,
1521 	struct rte_eth_txq_info *qinfo)
1522 {
1523 	struct bnxt_tx_queue *txq;
1524 
1525 	txq = dev->data->tx_queues[queue_id];
1526 
1527 	qinfo->nb_desc = txq->nb_tx_desc;
1528 
1529 	qinfo->conf.tx_thresh.pthresh = txq->pthresh;
1530 	qinfo->conf.tx_thresh.hthresh = txq->hthresh;
1531 	qinfo->conf.tx_thresh.wthresh = txq->wthresh;
1532 
1533 	qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
1534 	qinfo->conf.tx_rs_thresh = 0;
1535 	qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
1536 }
1537 
1538 static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
1539 {
1540 	struct bnxt *bp = eth_dev->data->dev_private;
1541 	struct rte_eth_dev_info dev_info;
1542 	uint32_t max_dev_mtu;
1543 	uint32_t rc = 0;
1544 	uint32_t i;
1545 
1546 	bnxt_dev_info_get_op(eth_dev, &dev_info);
1547 	max_dev_mtu = dev_info.max_rx_pktlen -
1548 		      ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE * 2;
1549 
1550 	if (new_mtu < ETHER_MIN_MTU || new_mtu > max_dev_mtu) {
1551 		PMD_DRV_LOG(ERR, "MTU requested must be within (%d, %d)\n",
1552 			ETHER_MIN_MTU, max_dev_mtu);
1553 		return -EINVAL;
1554 	}
1555 
1556 
1557 	if (new_mtu > ETHER_MTU) {
1558 		bp->flags |= BNXT_FLAG_JUMBO;
1559 		bp->eth_dev->data->dev_conf.rxmode.offloads |=
1560 			DEV_RX_OFFLOAD_JUMBO_FRAME;
1561 	} else {
1562 		bp->eth_dev->data->dev_conf.rxmode.offloads &=
1563 			~DEV_RX_OFFLOAD_JUMBO_FRAME;
1564 		bp->flags &= ~BNXT_FLAG_JUMBO;
1565 	}
1566 
1567 	eth_dev->data->dev_conf.rxmode.max_rx_pkt_len =
1568 		new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;
1569 
1570 	eth_dev->data->mtu = new_mtu;
1571 	PMD_DRV_LOG(INFO, "New MTU is %d\n", eth_dev->data->mtu);
1572 
1573 	for (i = 0; i < bp->nr_vnics; i++) {
1574 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1575 
1576 		vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1577 					ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;
1578 		rc = bnxt_hwrm_vnic_cfg(bp, vnic);
1579 		if (rc)
1580 			break;
1581 
1582 		rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
1583 		if (rc)
1584 			return rc;
1585 	}
1586 
1587 	return rc;
1588 }
1589 
1590 static int
1591 bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on)
1592 {
1593 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1594 	uint16_t vlan = bp->vlan;
1595 	int rc;
1596 
1597 	if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) {
1598 		PMD_DRV_LOG(ERR,
1599 			"PVID cannot be modified for this function\n");
1600 		return -ENOTSUP;
1601 	}
1602 	bp->vlan = on ? pvid : 0;
1603 
1604 	rc = bnxt_hwrm_set_default_vlan(bp, 0, 0);
1605 	if (rc)
1606 		bp->vlan = vlan;
1607 	return rc;
1608 }
1609 
1610 static int
1611 bnxt_dev_led_on_op(struct rte_eth_dev *dev)
1612 {
1613 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1614 
1615 	return bnxt_hwrm_port_led_cfg(bp, true);
1616 }
1617 
1618 static int
1619 bnxt_dev_led_off_op(struct rte_eth_dev *dev)
1620 {
1621 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1622 
1623 	return bnxt_hwrm_port_led_cfg(bp, false);
1624 }
1625 
1626 static uint32_t
1627 bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1628 {
1629 	uint32_t desc = 0, raw_cons = 0, cons;
1630 	struct bnxt_cp_ring_info *cpr;
1631 	struct bnxt_rx_queue *rxq;
1632 	struct rx_pkt_cmpl *rxcmp;
1633 	uint16_t cmp_type;
1634 	uint8_t cmp = 1;
1635 	bool valid;
1636 
1637 	rxq = dev->data->rx_queues[rx_queue_id];
1638 	cpr = rxq->cp_ring;
1639 	valid = cpr->valid;
1640 
1641 	while (raw_cons < rxq->nb_rx_desc) {
1642 		cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
1643 		rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
1644 
1645 		if (!CMPL_VALID(rxcmp, valid))
1646 			goto nothing_to_do;
1647 		valid = FLIP_VALID(cons, cpr->cp_ring_struct->ring_mask, valid);
1648 		cmp_type = CMP_TYPE(rxcmp);
1649 		if (cmp_type == RX_TPA_END_CMPL_TYPE_RX_TPA_END) {
1650 			cmp = (rte_le_to_cpu_32(
1651 					((struct rx_tpa_end_cmpl *)
1652 					 (rxcmp))->agg_bufs_v1) &
1653 			       RX_TPA_END_CMPL_AGG_BUFS_MASK) >>
1654 				RX_TPA_END_CMPL_AGG_BUFS_SFT;
1655 			desc++;
1656 		} else if (cmp_type == 0x11) {
1657 			desc++;
1658 			cmp = (rxcmp->agg_bufs_v1 &
1659 				   RX_PKT_CMPL_AGG_BUFS_MASK) >>
1660 				RX_PKT_CMPL_AGG_BUFS_SFT;
1661 		} else {
1662 			cmp = 1;
1663 		}
1664 nothing_to_do:
1665 		raw_cons += cmp ? cmp : 2;
1666 	}
1667 
1668 	return desc;
1669 }
1670 
1671 static int
1672 bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset)
1673 {
1674 	struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
1675 	struct bnxt_rx_ring_info *rxr;
1676 	struct bnxt_cp_ring_info *cpr;
1677 	struct bnxt_sw_rx_bd *rx_buf;
1678 	struct rx_pkt_cmpl *rxcmp;
1679 	uint32_t cons, cp_cons;
1680 
1681 	if (!rxq)
1682 		return -EINVAL;
1683 
1684 	cpr = rxq->cp_ring;
1685 	rxr = rxq->rx_ring;
1686 
1687 	if (offset >= rxq->nb_rx_desc)
1688 		return -EINVAL;
1689 
1690 	cons = RING_CMP(cpr->cp_ring_struct, offset);
1691 	cp_cons = cpr->cp_raw_cons;
1692 	rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
1693 
1694 	if (cons > cp_cons) {
1695 		if (CMPL_VALID(rxcmp, cpr->valid))
1696 			return RTE_ETH_RX_DESC_DONE;
1697 	} else {
1698 		if (CMPL_VALID(rxcmp, !cpr->valid))
1699 			return RTE_ETH_RX_DESC_DONE;
1700 	}
1701 	rx_buf = &rxr->rx_buf_ring[cons];
1702 	if (rx_buf->mbuf == NULL)
1703 		return RTE_ETH_RX_DESC_UNAVAIL;
1704 
1705 
1706 	return RTE_ETH_RX_DESC_AVAIL;
1707 }
1708 
1709 static int
1710 bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset)
1711 {
1712 	struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue;
1713 	struct bnxt_tx_ring_info *txr;
1714 	struct bnxt_cp_ring_info *cpr;
1715 	struct bnxt_sw_tx_bd *tx_buf;
1716 	struct tx_pkt_cmpl *txcmp;
1717 	uint32_t cons, cp_cons;
1718 
1719 	if (!txq)
1720 		return -EINVAL;
1721 
1722 	cpr = txq->cp_ring;
1723 	txr = txq->tx_ring;
1724 
1725 	if (offset >= txq->nb_tx_desc)
1726 		return -EINVAL;
1727 
1728 	cons = RING_CMP(cpr->cp_ring_struct, offset);
1729 	txcmp = (struct tx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
1730 	cp_cons = cpr->cp_raw_cons;
1731 
1732 	if (cons > cp_cons) {
1733 		if (CMPL_VALID(txcmp, cpr->valid))
1734 			return RTE_ETH_TX_DESC_UNAVAIL;
1735 	} else {
1736 		if (CMPL_VALID(txcmp, !cpr->valid))
1737 			return RTE_ETH_TX_DESC_UNAVAIL;
1738 	}
1739 	tx_buf = &txr->tx_buf_ring[cons];
1740 	if (tx_buf->mbuf == NULL)
1741 		return RTE_ETH_TX_DESC_DONE;
1742 
1743 	return RTE_ETH_TX_DESC_FULL;
1744 }
1745 
1746 static struct bnxt_filter_info *
1747 bnxt_match_and_validate_ether_filter(struct bnxt *bp,
1748 				struct rte_eth_ethertype_filter *efilter,
1749 				struct bnxt_vnic_info *vnic0,
1750 				struct bnxt_vnic_info *vnic,
1751 				int *ret)
1752 {
1753 	struct bnxt_filter_info *mfilter = NULL;
1754 	int match = 0;
1755 	*ret = 0;
1756 
1757 	if (efilter->ether_type == ETHER_TYPE_IPv4 ||
1758 		efilter->ether_type == ETHER_TYPE_IPv6) {
1759 		PMD_DRV_LOG(ERR, "invalid ether_type(0x%04x) in"
1760 			" ethertype filter.", efilter->ether_type);
1761 		*ret = -EINVAL;
1762 		goto exit;
1763 	}
1764 	if (efilter->queue >= bp->rx_nr_rings) {
1765 		PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue);
1766 		*ret = -EINVAL;
1767 		goto exit;
1768 	}
1769 
1770 	vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
1771 	vnic = STAILQ_FIRST(&bp->ff_pool[efilter->queue]);
1772 	if (vnic == NULL) {
1773 		PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue);
1774 		*ret = -EINVAL;
1775 		goto exit;
1776 	}
1777 
1778 	if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) {
1779 		STAILQ_FOREACH(mfilter, &vnic0->filter, next) {
1780 			if ((!memcmp(efilter->mac_addr.addr_bytes,
1781 				     mfilter->l2_addr, ETHER_ADDR_LEN) &&
1782 			     mfilter->flags ==
1783 			     HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP &&
1784 			     mfilter->ethertype == efilter->ether_type)) {
1785 				match = 1;
1786 				break;
1787 			}
1788 		}
1789 	} else {
1790 		STAILQ_FOREACH(mfilter, &vnic->filter, next)
1791 			if ((!memcmp(efilter->mac_addr.addr_bytes,
1792 				     mfilter->l2_addr, ETHER_ADDR_LEN) &&
1793 			     mfilter->ethertype == efilter->ether_type &&
1794 			     mfilter->flags ==
1795 			     HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX)) {
1796 				match = 1;
1797 				break;
1798 			}
1799 	}
1800 
1801 	if (match)
1802 		*ret = -EEXIST;
1803 
1804 exit:
1805 	return mfilter;
1806 }
1807 
1808 static int
1809 bnxt_ethertype_filter(struct rte_eth_dev *dev,
1810 			enum rte_filter_op filter_op,
1811 			void *arg)
1812 {
1813 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1814 	struct rte_eth_ethertype_filter *efilter =
1815 			(struct rte_eth_ethertype_filter *)arg;
1816 	struct bnxt_filter_info *bfilter, *filter1;
1817 	struct bnxt_vnic_info *vnic, *vnic0;
1818 	int ret;
1819 
1820 	if (filter_op == RTE_ETH_FILTER_NOP)
1821 		return 0;
1822 
1823 	if (arg == NULL) {
1824 		PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
1825 			    filter_op);
1826 		return -EINVAL;
1827 	}
1828 
1829 	vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
1830 	vnic = STAILQ_FIRST(&bp->ff_pool[efilter->queue]);
1831 
1832 	switch (filter_op) {
1833 	case RTE_ETH_FILTER_ADD:
1834 		bnxt_match_and_validate_ether_filter(bp, efilter,
1835 							vnic0, vnic, &ret);
1836 		if (ret < 0)
1837 			return ret;
1838 
1839 		bfilter = bnxt_get_unused_filter(bp);
1840 		if (bfilter == NULL) {
1841 			PMD_DRV_LOG(ERR,
1842 				"Not enough resources for a new filter.\n");
1843 			return -ENOMEM;
1844 		}
1845 		bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER;
1846 		memcpy(bfilter->l2_addr, efilter->mac_addr.addr_bytes,
1847 		       ETHER_ADDR_LEN);
1848 		memcpy(bfilter->dst_macaddr, efilter->mac_addr.addr_bytes,
1849 		       ETHER_ADDR_LEN);
1850 		bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR;
1851 		bfilter->ethertype = efilter->ether_type;
1852 		bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
1853 
1854 		filter1 = bnxt_get_l2_filter(bp, bfilter, vnic0);
1855 		if (filter1 == NULL) {
1856 			ret = -1;
1857 			goto cleanup;
1858 		}
1859 		bfilter->enables |=
1860 			HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1861 		bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id;
1862 
1863 		bfilter->dst_id = vnic->fw_vnic_id;
1864 
1865 		if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) {
1866 			bfilter->flags =
1867 				HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
1868 		}
1869 
1870 		ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter);
1871 		if (ret)
1872 			goto cleanup;
1873 		STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next);
1874 		break;
1875 	case RTE_ETH_FILTER_DELETE:
1876 		filter1 = bnxt_match_and_validate_ether_filter(bp, efilter,
1877 							vnic0, vnic, &ret);
1878 		if (ret == -EEXIST) {
1879 			ret = bnxt_hwrm_clear_ntuple_filter(bp, filter1);
1880 
1881 			STAILQ_REMOVE(&vnic->filter, filter1, bnxt_filter_info,
1882 				      next);
1883 			bnxt_free_filter(bp, filter1);
1884 		} else if (ret == 0) {
1885 			PMD_DRV_LOG(ERR, "No matching filter found\n");
1886 		}
1887 		break;
1888 	default:
1889 		PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
1890 		ret = -EINVAL;
1891 		goto error;
1892 	}
1893 	return ret;
1894 cleanup:
1895 	bnxt_free_filter(bp, bfilter);
1896 error:
1897 	return ret;
1898 }
1899 
1900 static inline int
1901 parse_ntuple_filter(struct bnxt *bp,
1902 		    struct rte_eth_ntuple_filter *nfilter,
1903 		    struct bnxt_filter_info *bfilter)
1904 {
1905 	uint32_t en = 0;
1906 
1907 	if (nfilter->queue >= bp->rx_nr_rings) {
1908 		PMD_DRV_LOG(ERR, "Invalid queue %d\n", nfilter->queue);
1909 		return -EINVAL;
1910 	}
1911 
1912 	switch (nfilter->dst_port_mask) {
1913 	case UINT16_MAX:
1914 		bfilter->dst_port_mask = -1;
1915 		bfilter->dst_port = nfilter->dst_port;
1916 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT |
1917 			NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
1918 		break;
1919 	default:
1920 		PMD_DRV_LOG(ERR, "invalid dst_port mask.");
1921 		return -EINVAL;
1922 	}
1923 
1924 	bfilter->ip_addr_type = NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
1925 	en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
1926 
1927 	switch (nfilter->proto_mask) {
1928 	case UINT8_MAX:
1929 		if (nfilter->proto == 17) /* IPPROTO_UDP */
1930 			bfilter->ip_protocol = 17;
1931 		else if (nfilter->proto == 6) /* IPPROTO_TCP */
1932 			bfilter->ip_protocol = 6;
1933 		else
1934 			return -EINVAL;
1935 		en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
1936 		break;
1937 	default:
1938 		PMD_DRV_LOG(ERR, "invalid protocol mask.");
1939 		return -EINVAL;
1940 	}
1941 
1942 	switch (nfilter->dst_ip_mask) {
1943 	case UINT32_MAX:
1944 		bfilter->dst_ipaddr_mask[0] = -1;
1945 		bfilter->dst_ipaddr[0] = nfilter->dst_ip;
1946 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR |
1947 			NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
1948 		break;
1949 	default:
1950 		PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
1951 		return -EINVAL;
1952 	}
1953 
1954 	switch (nfilter->src_ip_mask) {
1955 	case UINT32_MAX:
1956 		bfilter->src_ipaddr_mask[0] = -1;
1957 		bfilter->src_ipaddr[0] = nfilter->src_ip;
1958 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
1959 			NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
1960 		break;
1961 	default:
1962 		PMD_DRV_LOG(ERR, "invalid src_ip mask.");
1963 		return -EINVAL;
1964 	}
1965 
1966 	switch (nfilter->src_port_mask) {
1967 	case UINT16_MAX:
1968 		bfilter->src_port_mask = -1;
1969 		bfilter->src_port = nfilter->src_port;
1970 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
1971 			NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
1972 		break;
1973 	default:
1974 		PMD_DRV_LOG(ERR, "invalid src_port mask.");
1975 		return -EINVAL;
1976 	}
1977 
1978 	//TODO Priority
1979 	//nfilter->priority = (uint8_t)filter->priority;
1980 
1981 	bfilter->enables = en;
1982 	return 0;
1983 }
1984 
1985 static struct bnxt_filter_info*
1986 bnxt_match_ntuple_filter(struct bnxt *bp,
1987 			 struct bnxt_filter_info *bfilter,
1988 			 struct bnxt_vnic_info **mvnic)
1989 {
1990 	struct bnxt_filter_info *mfilter = NULL;
1991 	int i;
1992 
1993 	for (i = bp->nr_vnics - 1; i >= 0; i--) {
1994 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1995 		STAILQ_FOREACH(mfilter, &vnic->filter, next) {
1996 			if (bfilter->src_ipaddr[0] == mfilter->src_ipaddr[0] &&
1997 			    bfilter->src_ipaddr_mask[0] ==
1998 			    mfilter->src_ipaddr_mask[0] &&
1999 			    bfilter->src_port == mfilter->src_port &&
2000 			    bfilter->src_port_mask == mfilter->src_port_mask &&
2001 			    bfilter->dst_ipaddr[0] == mfilter->dst_ipaddr[0] &&
2002 			    bfilter->dst_ipaddr_mask[0] ==
2003 			    mfilter->dst_ipaddr_mask[0] &&
2004 			    bfilter->dst_port == mfilter->dst_port &&
2005 			    bfilter->dst_port_mask == mfilter->dst_port_mask &&
2006 			    bfilter->flags == mfilter->flags &&
2007 			    bfilter->enables == mfilter->enables) {
2008 				if (mvnic)
2009 					*mvnic = vnic;
2010 				return mfilter;
2011 			}
2012 		}
2013 	}
2014 	return NULL;
2015 }
2016 
2017 static int
2018 bnxt_cfg_ntuple_filter(struct bnxt *bp,
2019 		       struct rte_eth_ntuple_filter *nfilter,
2020 		       enum rte_filter_op filter_op)
2021 {
2022 	struct bnxt_filter_info *bfilter, *mfilter, *filter1;
2023 	struct bnxt_vnic_info *vnic, *vnic0, *mvnic;
2024 	int ret;
2025 
2026 	if (nfilter->flags != RTE_5TUPLE_FLAGS) {
2027 		PMD_DRV_LOG(ERR, "only 5tuple is supported.");
2028 		return -EINVAL;
2029 	}
2030 
2031 	if (nfilter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
2032 		PMD_DRV_LOG(ERR, "Ntuple filter: TCP flags not supported\n");
2033 		return -EINVAL;
2034 	}
2035 
2036 	bfilter = bnxt_get_unused_filter(bp);
2037 	if (bfilter == NULL) {
2038 		PMD_DRV_LOG(ERR,
2039 			"Not enough resources for a new filter.\n");
2040 		return -ENOMEM;
2041 	}
2042 	ret = parse_ntuple_filter(bp, nfilter, bfilter);
2043 	if (ret < 0)
2044 		goto free_filter;
2045 
2046 	vnic = STAILQ_FIRST(&bp->ff_pool[nfilter->queue]);
2047 	vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
2048 	filter1 = STAILQ_FIRST(&vnic0->filter);
2049 	if (filter1 == NULL) {
2050 		ret = -1;
2051 		goto free_filter;
2052 	}
2053 
2054 	bfilter->dst_id = vnic->fw_vnic_id;
2055 	bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id;
2056 	bfilter->enables |=
2057 		HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
2058 	bfilter->ethertype = 0x800;
2059 	bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2060 
2061 	mfilter = bnxt_match_ntuple_filter(bp, bfilter, &mvnic);
2062 
2063 	if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD &&
2064 	    bfilter->dst_id == mfilter->dst_id) {
2065 		PMD_DRV_LOG(ERR, "filter exists.\n");
2066 		ret = -EEXIST;
2067 		goto free_filter;
2068 	} else if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD &&
2069 		   bfilter->dst_id != mfilter->dst_id) {
2070 		mfilter->dst_id = vnic->fw_vnic_id;
2071 		ret = bnxt_hwrm_set_ntuple_filter(bp, mfilter->dst_id, mfilter);
2072 		STAILQ_REMOVE(&mvnic->filter, mfilter, bnxt_filter_info, next);
2073 		STAILQ_INSERT_TAIL(&vnic->filter, mfilter, next);
2074 		PMD_DRV_LOG(ERR, "filter with matching pattern exists.\n");
2075 		PMD_DRV_LOG(ERR, " Updated it to the new destination queue\n");
2076 		goto free_filter;
2077 	}
2078 	if (mfilter == NULL && filter_op == RTE_ETH_FILTER_DELETE) {
2079 		PMD_DRV_LOG(ERR, "filter doesn't exist.");
2080 		ret = -ENOENT;
2081 		goto free_filter;
2082 	}
2083 
2084 	if (filter_op == RTE_ETH_FILTER_ADD) {
2085 		bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER;
2086 		ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter);
2087 		if (ret)
2088 			goto free_filter;
2089 		STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next);
2090 	} else {
2091 		if (mfilter == NULL) {
2092 			/* This should not happen. But for Coverity! */
2093 			ret = -ENOENT;
2094 			goto free_filter;
2095 		}
2096 		ret = bnxt_hwrm_clear_ntuple_filter(bp, mfilter);
2097 
2098 		STAILQ_REMOVE(&vnic->filter, mfilter, bnxt_filter_info, next);
2099 		bnxt_free_filter(bp, mfilter);
2100 		mfilter->fw_l2_filter_id = -1;
2101 		bnxt_free_filter(bp, bfilter);
2102 		bfilter->fw_l2_filter_id = -1;
2103 	}
2104 
2105 	return 0;
2106 free_filter:
2107 	bfilter->fw_l2_filter_id = -1;
2108 	bnxt_free_filter(bp, bfilter);
2109 	return ret;
2110 }
2111 
2112 static int
2113 bnxt_ntuple_filter(struct rte_eth_dev *dev,
2114 			enum rte_filter_op filter_op,
2115 			void *arg)
2116 {
2117 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2118 	int ret;
2119 
2120 	if (filter_op == RTE_ETH_FILTER_NOP)
2121 		return 0;
2122 
2123 	if (arg == NULL) {
2124 		PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
2125 			    filter_op);
2126 		return -EINVAL;
2127 	}
2128 
2129 	switch (filter_op) {
2130 	case RTE_ETH_FILTER_ADD:
2131 		ret = bnxt_cfg_ntuple_filter(bp,
2132 			(struct rte_eth_ntuple_filter *)arg,
2133 			filter_op);
2134 		break;
2135 	case RTE_ETH_FILTER_DELETE:
2136 		ret = bnxt_cfg_ntuple_filter(bp,
2137 			(struct rte_eth_ntuple_filter *)arg,
2138 			filter_op);
2139 		break;
2140 	default:
2141 		PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
2142 		ret = -EINVAL;
2143 		break;
2144 	}
2145 	return ret;
2146 }
2147 
2148 static int
2149 bnxt_parse_fdir_filter(struct bnxt *bp,
2150 		       struct rte_eth_fdir_filter *fdir,
2151 		       struct bnxt_filter_info *filter)
2152 {
2153 	enum rte_fdir_mode fdir_mode =
2154 		bp->eth_dev->data->dev_conf.fdir_conf.mode;
2155 	struct bnxt_vnic_info *vnic0, *vnic;
2156 	struct bnxt_filter_info *filter1;
2157 	uint32_t en = 0;
2158 	int i;
2159 
2160 	if (fdir_mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
2161 		return -EINVAL;
2162 
2163 	filter->l2_ovlan = fdir->input.flow_ext.vlan_tci;
2164 	en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
2165 
2166 	switch (fdir->input.flow_type) {
2167 	case RTE_ETH_FLOW_IPV4:
2168 	case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
2169 		/* FALLTHROUGH */
2170 		filter->src_ipaddr[0] = fdir->input.flow.ip4_flow.src_ip;
2171 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
2172 		filter->dst_ipaddr[0] = fdir->input.flow.ip4_flow.dst_ip;
2173 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
2174 		filter->ip_protocol = fdir->input.flow.ip4_flow.proto;
2175 		en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2176 		filter->ip_addr_type =
2177 			NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
2178 		filter->src_ipaddr_mask[0] = 0xffffffff;
2179 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
2180 		filter->dst_ipaddr_mask[0] = 0xffffffff;
2181 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
2182 		filter->ethertype = 0x800;
2183 		filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2184 		break;
2185 	case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
2186 		filter->src_port = fdir->input.flow.tcp4_flow.src_port;
2187 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
2188 		filter->dst_port = fdir->input.flow.tcp4_flow.dst_port;
2189 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
2190 		filter->dst_port_mask = 0xffff;
2191 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
2192 		filter->src_port_mask = 0xffff;
2193 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
2194 		filter->src_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.src_ip;
2195 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
2196 		filter->dst_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.dst_ip;
2197 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
2198 		filter->ip_protocol = 6;
2199 		en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2200 		filter->ip_addr_type =
2201 			NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
2202 		filter->src_ipaddr_mask[0] = 0xffffffff;
2203 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
2204 		filter->dst_ipaddr_mask[0] = 0xffffffff;
2205 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
2206 		filter->ethertype = 0x800;
2207 		filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2208 		break;
2209 	case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
2210 		filter->src_port = fdir->input.flow.udp4_flow.src_port;
2211 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
2212 		filter->dst_port = fdir->input.flow.udp4_flow.dst_port;
2213 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
2214 		filter->dst_port_mask = 0xffff;
2215 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
2216 		filter->src_port_mask = 0xffff;
2217 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
2218 		filter->src_ipaddr[0] = fdir->input.flow.udp4_flow.ip.src_ip;
2219 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
2220 		filter->dst_ipaddr[0] = fdir->input.flow.udp4_flow.ip.dst_ip;
2221 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
2222 		filter->ip_protocol = 17;
2223 		en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2224 		filter->ip_addr_type =
2225 			NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
2226 		filter->src_ipaddr_mask[0] = 0xffffffff;
2227 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
2228 		filter->dst_ipaddr_mask[0] = 0xffffffff;
2229 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
2230 		filter->ethertype = 0x800;
2231 		filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2232 		break;
2233 	case RTE_ETH_FLOW_IPV6:
2234 	case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
2235 		/* FALLTHROUGH */
2236 		filter->ip_addr_type =
2237 			NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
2238 		filter->ip_protocol = fdir->input.flow.ipv6_flow.proto;
2239 		en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2240 		rte_memcpy(filter->src_ipaddr,
2241 			   fdir->input.flow.ipv6_flow.src_ip, 16);
2242 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
2243 		rte_memcpy(filter->dst_ipaddr,
2244 			   fdir->input.flow.ipv6_flow.dst_ip, 16);
2245 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
2246 		memset(filter->dst_ipaddr_mask, 0xff, 16);
2247 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
2248 		memset(filter->src_ipaddr_mask, 0xff, 16);
2249 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
2250 		filter->ethertype = 0x86dd;
2251 		filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2252 		break;
2253 	case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
2254 		filter->src_port = fdir->input.flow.tcp6_flow.src_port;
2255 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
2256 		filter->dst_port = fdir->input.flow.tcp6_flow.dst_port;
2257 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
2258 		filter->dst_port_mask = 0xffff;
2259 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
2260 		filter->src_port_mask = 0xffff;
2261 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
2262 		filter->ip_addr_type =
2263 			NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
2264 		filter->ip_protocol = fdir->input.flow.tcp6_flow.ip.proto;
2265 		en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2266 		rte_memcpy(filter->src_ipaddr,
2267 			   fdir->input.flow.tcp6_flow.ip.src_ip, 16);
2268 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
2269 		rte_memcpy(filter->dst_ipaddr,
2270 			   fdir->input.flow.tcp6_flow.ip.dst_ip, 16);
2271 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
2272 		memset(filter->dst_ipaddr_mask, 0xff, 16);
2273 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
2274 		memset(filter->src_ipaddr_mask, 0xff, 16);
2275 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
2276 		filter->ethertype = 0x86dd;
2277 		filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2278 		break;
2279 	case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
2280 		filter->src_port = fdir->input.flow.udp6_flow.src_port;
2281 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
2282 		filter->dst_port = fdir->input.flow.udp6_flow.dst_port;
2283 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
2284 		filter->dst_port_mask = 0xffff;
2285 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
2286 		filter->src_port_mask = 0xffff;
2287 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
2288 		filter->ip_addr_type =
2289 			NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
2290 		filter->ip_protocol = fdir->input.flow.udp6_flow.ip.proto;
2291 		en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2292 		rte_memcpy(filter->src_ipaddr,
2293 			   fdir->input.flow.udp6_flow.ip.src_ip, 16);
2294 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
2295 		rte_memcpy(filter->dst_ipaddr,
2296 			   fdir->input.flow.udp6_flow.ip.dst_ip, 16);
2297 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
2298 		memset(filter->dst_ipaddr_mask, 0xff, 16);
2299 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
2300 		memset(filter->src_ipaddr_mask, 0xff, 16);
2301 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
2302 		filter->ethertype = 0x86dd;
2303 		filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2304 		break;
2305 	case RTE_ETH_FLOW_L2_PAYLOAD:
2306 		filter->ethertype = fdir->input.flow.l2_flow.ether_type;
2307 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2308 		break;
2309 	case RTE_ETH_FLOW_VXLAN:
2310 		if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
2311 			return -EINVAL;
2312 		filter->vni = fdir->input.flow.tunnel_flow.tunnel_id;
2313 		filter->tunnel_type =
2314 			CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
2315 		en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE;
2316 		break;
2317 	case RTE_ETH_FLOW_NVGRE:
2318 		if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
2319 			return -EINVAL;
2320 		filter->vni = fdir->input.flow.tunnel_flow.tunnel_id;
2321 		filter->tunnel_type =
2322 			CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
2323 		en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE;
2324 		break;
2325 	case RTE_ETH_FLOW_UNKNOWN:
2326 	case RTE_ETH_FLOW_RAW:
2327 	case RTE_ETH_FLOW_FRAG_IPV4:
2328 	case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
2329 	case RTE_ETH_FLOW_FRAG_IPV6:
2330 	case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
2331 	case RTE_ETH_FLOW_IPV6_EX:
2332 	case RTE_ETH_FLOW_IPV6_TCP_EX:
2333 	case RTE_ETH_FLOW_IPV6_UDP_EX:
2334 	case RTE_ETH_FLOW_GENEVE:
2335 		/* FALLTHROUGH */
2336 	default:
2337 		return -EINVAL;
2338 	}
2339 
2340 	vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
2341 	vnic = STAILQ_FIRST(&bp->ff_pool[fdir->action.rx_queue]);
2342 	if (vnic == NULL) {
2343 		PMD_DRV_LOG(ERR, "Invalid queue %d\n", fdir->action.rx_queue);
2344 		return -EINVAL;
2345 	}
2346 
2347 
2348 	if (fdir_mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
2349 		rte_memcpy(filter->dst_macaddr,
2350 			fdir->input.flow.mac_vlan_flow.mac_addr.addr_bytes, 6);
2351 			en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR;
2352 	}
2353 
2354 	if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) {
2355 		filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
2356 		filter1 = STAILQ_FIRST(&vnic0->filter);
2357 		//filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
2358 	} else {
2359 		filter->dst_id = vnic->fw_vnic_id;
2360 		for (i = 0; i < ETHER_ADDR_LEN; i++)
2361 			if (filter->dst_macaddr[i] == 0x00)
2362 				filter1 = STAILQ_FIRST(&vnic0->filter);
2363 			else
2364 				filter1 = bnxt_get_l2_filter(bp, filter, vnic);
2365 	}
2366 
2367 	if (filter1 == NULL)
2368 		return -EINVAL;
2369 
2370 	en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
2371 	filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
2372 
2373 	filter->enables = en;
2374 
2375 	return 0;
2376 }
2377 
2378 static struct bnxt_filter_info *
2379 bnxt_match_fdir(struct bnxt *bp, struct bnxt_filter_info *nf,
2380 		struct bnxt_vnic_info **mvnic)
2381 {
2382 	struct bnxt_filter_info *mf = NULL;
2383 	int i;
2384 
2385 	for (i = bp->nr_vnics - 1; i >= 0; i--) {
2386 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2387 
2388 		STAILQ_FOREACH(mf, &vnic->filter, next) {
2389 			if (mf->filter_type == nf->filter_type &&
2390 			    mf->flags == nf->flags &&
2391 			    mf->src_port == nf->src_port &&
2392 			    mf->src_port_mask == nf->src_port_mask &&
2393 			    mf->dst_port == nf->dst_port &&
2394 			    mf->dst_port_mask == nf->dst_port_mask &&
2395 			    mf->ip_protocol == nf->ip_protocol &&
2396 			    mf->ip_addr_type == nf->ip_addr_type &&
2397 			    mf->ethertype == nf->ethertype &&
2398 			    mf->vni == nf->vni &&
2399 			    mf->tunnel_type == nf->tunnel_type &&
2400 			    mf->l2_ovlan == nf->l2_ovlan &&
2401 			    mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
2402 			    mf->l2_ivlan == nf->l2_ivlan &&
2403 			    mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
2404 			    !memcmp(mf->l2_addr, nf->l2_addr, ETHER_ADDR_LEN) &&
2405 			    !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
2406 				    ETHER_ADDR_LEN) &&
2407 			    !memcmp(mf->src_macaddr, nf->src_macaddr,
2408 				    ETHER_ADDR_LEN) &&
2409 			    !memcmp(mf->dst_macaddr, nf->dst_macaddr,
2410 				    ETHER_ADDR_LEN) &&
2411 			    !memcmp(mf->src_ipaddr, nf->src_ipaddr,
2412 				    sizeof(nf->src_ipaddr)) &&
2413 			    !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
2414 				    sizeof(nf->src_ipaddr_mask)) &&
2415 			    !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
2416 				    sizeof(nf->dst_ipaddr)) &&
2417 			    !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
2418 				    sizeof(nf->dst_ipaddr_mask))) {
2419 				if (mvnic)
2420 					*mvnic = vnic;
2421 				return mf;
2422 			}
2423 		}
2424 	}
2425 	return NULL;
2426 }
2427 
2428 static int
2429 bnxt_fdir_filter(struct rte_eth_dev *dev,
2430 		 enum rte_filter_op filter_op,
2431 		 void *arg)
2432 {
2433 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2434 	struct rte_eth_fdir_filter *fdir  = (struct rte_eth_fdir_filter *)arg;
2435 	struct bnxt_filter_info *filter, *match;
2436 	struct bnxt_vnic_info *vnic, *mvnic;
2437 	int ret = 0, i;
2438 
2439 	if (filter_op == RTE_ETH_FILTER_NOP)
2440 		return 0;
2441 
2442 	if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
2443 		return -EINVAL;
2444 
2445 	switch (filter_op) {
2446 	case RTE_ETH_FILTER_ADD:
2447 	case RTE_ETH_FILTER_DELETE:
2448 		filter = bnxt_get_unused_filter(bp);
2449 		if (filter == NULL) {
2450 			PMD_DRV_LOG(ERR,
2451 				"Not enough resources for a new flow.\n");
2452 			return -ENOMEM;
2453 		}
2454 
2455 		ret = bnxt_parse_fdir_filter(bp, fdir, filter);
2456 		if (ret != 0)
2457 			goto free_filter;
2458 		filter->filter_type = HWRM_CFA_NTUPLE_FILTER;
2459 
2460 		if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
2461 			vnic = STAILQ_FIRST(&bp->ff_pool[0]);
2462 		else
2463 			vnic = STAILQ_FIRST(&bp->ff_pool[fdir->action.rx_queue]);
2464 
2465 		match = bnxt_match_fdir(bp, filter, &mvnic);
2466 		if (match != NULL && filter_op == RTE_ETH_FILTER_ADD) {
2467 			if (match->dst_id == vnic->fw_vnic_id) {
2468 				PMD_DRV_LOG(ERR, "Flow already exists.\n");
2469 				ret = -EEXIST;
2470 				goto free_filter;
2471 			} else {
2472 				match->dst_id = vnic->fw_vnic_id;
2473 				ret = bnxt_hwrm_set_ntuple_filter(bp,
2474 								  match->dst_id,
2475 								  match);
2476 				STAILQ_REMOVE(&mvnic->filter, match,
2477 					      bnxt_filter_info, next);
2478 				STAILQ_INSERT_TAIL(&vnic->filter, match, next);
2479 				PMD_DRV_LOG(ERR,
2480 					"Filter with matching pattern exist\n");
2481 				PMD_DRV_LOG(ERR,
2482 					"Updated it to new destination q\n");
2483 				goto free_filter;
2484 			}
2485 		}
2486 		if (match == NULL && filter_op == RTE_ETH_FILTER_DELETE) {
2487 			PMD_DRV_LOG(ERR, "Flow does not exist.\n");
2488 			ret = -ENOENT;
2489 			goto free_filter;
2490 		}
2491 
2492 		if (filter_op == RTE_ETH_FILTER_ADD) {
2493 			ret = bnxt_hwrm_set_ntuple_filter(bp,
2494 							  filter->dst_id,
2495 							  filter);
2496 			if (ret)
2497 				goto free_filter;
2498 			STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
2499 		} else {
2500 			ret = bnxt_hwrm_clear_ntuple_filter(bp, match);
2501 			STAILQ_REMOVE(&vnic->filter, match,
2502 				      bnxt_filter_info, next);
2503 			bnxt_free_filter(bp, match);
2504 			filter->fw_l2_filter_id = -1;
2505 			bnxt_free_filter(bp, filter);
2506 		}
2507 		break;
2508 	case RTE_ETH_FILTER_FLUSH:
2509 		for (i = bp->nr_vnics - 1; i >= 0; i--) {
2510 			struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2511 
2512 			STAILQ_FOREACH(filter, &vnic->filter, next) {
2513 				if (filter->filter_type ==
2514 				    HWRM_CFA_NTUPLE_FILTER) {
2515 					ret =
2516 					bnxt_hwrm_clear_ntuple_filter(bp,
2517 								      filter);
2518 					STAILQ_REMOVE(&vnic->filter, filter,
2519 						      bnxt_filter_info, next);
2520 				}
2521 			}
2522 		}
2523 		return ret;
2524 	case RTE_ETH_FILTER_UPDATE:
2525 	case RTE_ETH_FILTER_STATS:
2526 	case RTE_ETH_FILTER_INFO:
2527 		PMD_DRV_LOG(ERR, "operation %u not implemented", filter_op);
2528 		break;
2529 	default:
2530 		PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
2531 		ret = -EINVAL;
2532 		break;
2533 	}
2534 	return ret;
2535 
2536 free_filter:
2537 	filter->fw_l2_filter_id = -1;
2538 	bnxt_free_filter(bp, filter);
2539 	return ret;
2540 }
2541 
2542 static int
2543 bnxt_filter_ctrl_op(struct rte_eth_dev *dev __rte_unused,
2544 		    enum rte_filter_type filter_type,
2545 		    enum rte_filter_op filter_op, void *arg)
2546 {
2547 	int ret = 0;
2548 
2549 	switch (filter_type) {
2550 	case RTE_ETH_FILTER_TUNNEL:
2551 		PMD_DRV_LOG(ERR,
2552 			"filter type: %d: To be implemented\n", filter_type);
2553 		break;
2554 	case RTE_ETH_FILTER_FDIR:
2555 		ret = bnxt_fdir_filter(dev, filter_op, arg);
2556 		break;
2557 	case RTE_ETH_FILTER_NTUPLE:
2558 		ret = bnxt_ntuple_filter(dev, filter_op, arg);
2559 		break;
2560 	case RTE_ETH_FILTER_ETHERTYPE:
2561 		ret = bnxt_ethertype_filter(dev, filter_op, arg);
2562 		break;
2563 	case RTE_ETH_FILTER_GENERIC:
2564 		if (filter_op != RTE_ETH_FILTER_GET)
2565 			return -EINVAL;
2566 		*(const void **)arg = &bnxt_flow_ops;
2567 		break;
2568 	default:
2569 		PMD_DRV_LOG(ERR,
2570 			"Filter type (%d) not supported", filter_type);
2571 		ret = -EINVAL;
2572 		break;
2573 	}
2574 	return ret;
2575 }
2576 
2577 static const uint32_t *
2578 bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev *dev)
2579 {
2580 	static const uint32_t ptypes[] = {
2581 		RTE_PTYPE_L2_ETHER_VLAN,
2582 		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
2583 		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
2584 		RTE_PTYPE_L4_ICMP,
2585 		RTE_PTYPE_L4_TCP,
2586 		RTE_PTYPE_L4_UDP,
2587 		RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
2588 		RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
2589 		RTE_PTYPE_INNER_L4_ICMP,
2590 		RTE_PTYPE_INNER_L4_TCP,
2591 		RTE_PTYPE_INNER_L4_UDP,
2592 		RTE_PTYPE_UNKNOWN
2593 	};
2594 
2595 	if (dev->rx_pkt_burst == bnxt_recv_pkts)
2596 		return ptypes;
2597 	return NULL;
2598 }
2599 
2600 static int bnxt_map_regs(struct bnxt *bp, uint32_t *reg_arr, int count,
2601 			 int reg_win)
2602 {
2603 	uint32_t reg_base = *reg_arr & 0xfffff000;
2604 	uint32_t win_off;
2605 	int i;
2606 
2607 	for (i = 0; i < count; i++) {
2608 		if ((reg_arr[i] & 0xfffff000) != reg_base)
2609 			return -ERANGE;
2610 	}
2611 	win_off = BNXT_GRCPF_REG_WINDOW_BASE_OUT + (reg_win - 1) * 4;
2612 	rte_cpu_to_le_32(rte_write32(reg_base, (uint8_t *)bp->bar0 + win_off));
2613 	return 0;
2614 }
2615 
2616 static int bnxt_map_ptp_regs(struct bnxt *bp)
2617 {
2618 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2619 	uint32_t *reg_arr;
2620 	int rc, i;
2621 
2622 	reg_arr = ptp->rx_regs;
2623 	rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_RX_REGS, 5);
2624 	if (rc)
2625 		return rc;
2626 
2627 	reg_arr = ptp->tx_regs;
2628 	rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_TX_REGS, 6);
2629 	if (rc)
2630 		return rc;
2631 
2632 	for (i = 0; i < BNXT_PTP_RX_REGS; i++)
2633 		ptp->rx_mapped_regs[i] = 0x5000 + (ptp->rx_regs[i] & 0xfff);
2634 
2635 	for (i = 0; i < BNXT_PTP_TX_REGS; i++)
2636 		ptp->tx_mapped_regs[i] = 0x6000 + (ptp->tx_regs[i] & 0xfff);
2637 
2638 	return 0;
2639 }
2640 
2641 static void bnxt_unmap_ptp_regs(struct bnxt *bp)
2642 {
2643 	rte_cpu_to_le_32(rte_write32(0, (uint8_t *)bp->bar0 +
2644 			 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 16));
2645 	rte_cpu_to_le_32(rte_write32(0, (uint8_t *)bp->bar0 +
2646 			 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 20));
2647 }
2648 
2649 static uint64_t bnxt_cc_read(struct bnxt *bp)
2650 {
2651 	uint64_t ns;
2652 
2653 	ns = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
2654 			      BNXT_GRCPF_REG_SYNC_TIME));
2655 	ns |= (uint64_t)(rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
2656 					  BNXT_GRCPF_REG_SYNC_TIME + 4))) << 32;
2657 	return ns;
2658 }
2659 
2660 static int bnxt_get_tx_ts(struct bnxt *bp, uint64_t *ts)
2661 {
2662 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2663 	uint32_t fifo;
2664 
2665 	fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
2666 				ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO]));
2667 	if (fifo & BNXT_PTP_TX_FIFO_EMPTY)
2668 		return -EAGAIN;
2669 
2670 	fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
2671 				ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO]));
2672 	*ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
2673 				ptp->tx_mapped_regs[BNXT_PTP_TX_TS_L]));
2674 	*ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
2675 				ptp->tx_mapped_regs[BNXT_PTP_TX_TS_H])) << 32;
2676 
2677 	return 0;
2678 }
2679 
2680 static int bnxt_get_rx_ts(struct bnxt *bp, uint64_t *ts)
2681 {
2682 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2683 	struct bnxt_pf_info *pf = &bp->pf;
2684 	uint16_t port_id;
2685 	uint32_t fifo;
2686 
2687 	if (!ptp)
2688 		return -ENODEV;
2689 
2690 	fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
2691 				ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO]));
2692 	if (!(fifo & BNXT_PTP_RX_FIFO_PENDING))
2693 		return -EAGAIN;
2694 
2695 	port_id = pf->port_id;
2696 	rte_cpu_to_le_32(rte_write32(1 << port_id, (uint8_t *)bp->bar0 +
2697 	       ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]));
2698 
2699 	fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
2700 				   ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO]));
2701 	if (fifo & BNXT_PTP_RX_FIFO_PENDING) {
2702 /*		bnxt_clr_rx_ts(bp);	  TBD  */
2703 		return -EBUSY;
2704 	}
2705 
2706 	*ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
2707 				ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L]));
2708 	*ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
2709 				ptp->rx_mapped_regs[BNXT_PTP_RX_TS_H])) << 32;
2710 
2711 	return 0;
2712 }
2713 
2714 static int
2715 bnxt_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
2716 {
2717 	uint64_t ns;
2718 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2719 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2720 
2721 	if (!ptp)
2722 		return 0;
2723 
2724 	ns = rte_timespec_to_ns(ts);
2725 	/* Set the timecounters to a new value. */
2726 	ptp->tc.nsec = ns;
2727 
2728 	return 0;
2729 }
2730 
2731 static int
2732 bnxt_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
2733 {
2734 	uint64_t ns, systime_cycles;
2735 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2736 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2737 
2738 	if (!ptp)
2739 		return 0;
2740 
2741 	systime_cycles = bnxt_cc_read(bp);
2742 	ns = rte_timecounter_update(&ptp->tc, systime_cycles);
2743 	*ts = rte_ns_to_timespec(ns);
2744 
2745 	return 0;
2746 }
2747 static int
2748 bnxt_timesync_enable(struct rte_eth_dev *dev)
2749 {
2750 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2751 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2752 	uint32_t shift = 0;
2753 
2754 	if (!ptp)
2755 		return 0;
2756 
2757 	ptp->rx_filter = 1;
2758 	ptp->tx_tstamp_en = 1;
2759 	ptp->rxctl = BNXT_PTP_MSG_EVENTS;
2760 
2761 	if (!bnxt_hwrm_ptp_cfg(bp))
2762 		bnxt_map_ptp_regs(bp);
2763 
2764 	memset(&ptp->tc, 0, sizeof(struct rte_timecounter));
2765 	memset(&ptp->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
2766 	memset(&ptp->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
2767 
2768 	ptp->tc.cc_mask = BNXT_CYCLECOUNTER_MASK;
2769 	ptp->tc.cc_shift = shift;
2770 	ptp->tc.nsec_mask = (1ULL << shift) - 1;
2771 
2772 	ptp->rx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK;
2773 	ptp->rx_tstamp_tc.cc_shift = shift;
2774 	ptp->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
2775 
2776 	ptp->tx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK;
2777 	ptp->tx_tstamp_tc.cc_shift = shift;
2778 	ptp->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
2779 
2780 	return 0;
2781 }
2782 
2783 static int
2784 bnxt_timesync_disable(struct rte_eth_dev *dev)
2785 {
2786 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2787 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2788 
2789 	if (!ptp)
2790 		return 0;
2791 
2792 	ptp->rx_filter = 0;
2793 	ptp->tx_tstamp_en = 0;
2794 	ptp->rxctl = 0;
2795 
2796 	bnxt_hwrm_ptp_cfg(bp);
2797 
2798 	bnxt_unmap_ptp_regs(bp);
2799 
2800 	return 0;
2801 }
2802 
2803 static int
2804 bnxt_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
2805 				 struct timespec *timestamp,
2806 				 uint32_t flags __rte_unused)
2807 {
2808 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2809 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2810 	uint64_t rx_tstamp_cycles = 0;
2811 	uint64_t ns;
2812 
2813 	if (!ptp)
2814 		return 0;
2815 
2816 	bnxt_get_rx_ts(bp, &rx_tstamp_cycles);
2817 	ns = rte_timecounter_update(&ptp->rx_tstamp_tc, rx_tstamp_cycles);
2818 	*timestamp = rte_ns_to_timespec(ns);
2819 	return  0;
2820 }
2821 
2822 static int
2823 bnxt_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
2824 				 struct timespec *timestamp)
2825 {
2826 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2827 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2828 	uint64_t tx_tstamp_cycles = 0;
2829 	uint64_t ns;
2830 
2831 	if (!ptp)
2832 		return 0;
2833 
2834 	bnxt_get_tx_ts(bp, &tx_tstamp_cycles);
2835 	ns = rte_timecounter_update(&ptp->tx_tstamp_tc, tx_tstamp_cycles);
2836 	*timestamp = rte_ns_to_timespec(ns);
2837 
2838 	return 0;
2839 }
2840 
2841 static int
2842 bnxt_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
2843 {
2844 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2845 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2846 
2847 	if (!ptp)
2848 		return 0;
2849 
2850 	ptp->tc.nsec += delta;
2851 
2852 	return 0;
2853 }
2854 
2855 static int
2856 bnxt_get_eeprom_length_op(struct rte_eth_dev *dev)
2857 {
2858 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2859 	int rc;
2860 	uint32_t dir_entries;
2861 	uint32_t entry_length;
2862 
2863 	PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x\n",
2864 		bp->pdev->addr.domain, bp->pdev->addr.bus,
2865 		bp->pdev->addr.devid, bp->pdev->addr.function);
2866 
2867 	rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
2868 	if (rc != 0)
2869 		return rc;
2870 
2871 	return dir_entries * entry_length;
2872 }
2873 
2874 static int
2875 bnxt_get_eeprom_op(struct rte_eth_dev *dev,
2876 		struct rte_dev_eeprom_info *in_eeprom)
2877 {
2878 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2879 	uint32_t index;
2880 	uint32_t offset;
2881 
2882 	PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x in_eeprom->offset = %d "
2883 		"len = %d\n", bp->pdev->addr.domain,
2884 		bp->pdev->addr.bus, bp->pdev->addr.devid,
2885 		bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length);
2886 
2887 	if (in_eeprom->offset == 0) /* special offset value to get directory */
2888 		return bnxt_get_nvram_directory(bp, in_eeprom->length,
2889 						in_eeprom->data);
2890 
2891 	index = in_eeprom->offset >> 24;
2892 	offset = in_eeprom->offset & 0xffffff;
2893 
2894 	if (index != 0)
2895 		return bnxt_hwrm_get_nvram_item(bp, index - 1, offset,
2896 					   in_eeprom->length, in_eeprom->data);
2897 
2898 	return 0;
2899 }
2900 
2901 static bool bnxt_dir_type_is_ape_bin_format(uint16_t dir_type)
2902 {
2903 	switch (dir_type) {
2904 	case BNX_DIR_TYPE_CHIMP_PATCH:
2905 	case BNX_DIR_TYPE_BOOTCODE:
2906 	case BNX_DIR_TYPE_BOOTCODE_2:
2907 	case BNX_DIR_TYPE_APE_FW:
2908 	case BNX_DIR_TYPE_APE_PATCH:
2909 	case BNX_DIR_TYPE_KONG_FW:
2910 	case BNX_DIR_TYPE_KONG_PATCH:
2911 	case BNX_DIR_TYPE_BONO_FW:
2912 	case BNX_DIR_TYPE_BONO_PATCH:
2913 		return true;
2914 	}
2915 
2916 	return false;
2917 }
2918 
2919 static bool bnxt_dir_type_is_other_exec_format(uint16_t dir_type)
2920 {
2921 	switch (dir_type) {
2922 	case BNX_DIR_TYPE_AVS:
2923 	case BNX_DIR_TYPE_EXP_ROM_MBA:
2924 	case BNX_DIR_TYPE_PCIE:
2925 	case BNX_DIR_TYPE_TSCF_UCODE:
2926 	case BNX_DIR_TYPE_EXT_PHY:
2927 	case BNX_DIR_TYPE_CCM:
2928 	case BNX_DIR_TYPE_ISCSI_BOOT:
2929 	case BNX_DIR_TYPE_ISCSI_BOOT_IPV6:
2930 	case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6:
2931 		return true;
2932 	}
2933 
2934 	return false;
2935 }
2936 
2937 static bool bnxt_dir_type_is_executable(uint16_t dir_type)
2938 {
2939 	return bnxt_dir_type_is_ape_bin_format(dir_type) ||
2940 		bnxt_dir_type_is_other_exec_format(dir_type);
2941 }
2942 
2943 static int
2944 bnxt_set_eeprom_op(struct rte_eth_dev *dev,
2945 		struct rte_dev_eeprom_info *in_eeprom)
2946 {
2947 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2948 	uint8_t index, dir_op;
2949 	uint16_t type, ext, ordinal, attr;
2950 
2951 	PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x in_eeprom->offset = %d "
2952 		"len = %d\n", bp->pdev->addr.domain,
2953 		bp->pdev->addr.bus, bp->pdev->addr.devid,
2954 		bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length);
2955 
2956 	if (!BNXT_PF(bp)) {
2957 		PMD_DRV_LOG(ERR, "NVM write not supported from a VF\n");
2958 		return -EINVAL;
2959 	}
2960 
2961 	type = in_eeprom->magic >> 16;
2962 
2963 	if (type == 0xffff) { /* special value for directory operations */
2964 		index = in_eeprom->magic & 0xff;
2965 		dir_op = in_eeprom->magic >> 8;
2966 		if (index == 0)
2967 			return -EINVAL;
2968 		switch (dir_op) {
2969 		case 0x0e: /* erase */
2970 			if (in_eeprom->offset != ~in_eeprom->magic)
2971 				return -EINVAL;
2972 			return bnxt_hwrm_erase_nvram_directory(bp, index - 1);
2973 		default:
2974 			return -EINVAL;
2975 		}
2976 	}
2977 
2978 	/* Create or re-write an NVM item: */
2979 	if (bnxt_dir_type_is_executable(type) == true)
2980 		return -EOPNOTSUPP;
2981 	ext = in_eeprom->magic & 0xffff;
2982 	ordinal = in_eeprom->offset >> 16;
2983 	attr = in_eeprom->offset & 0xffff;
2984 
2985 	return bnxt_hwrm_flash_nvram(bp, type, ordinal, ext, attr,
2986 				     in_eeprom->data, in_eeprom->length);
2987 	return 0;
2988 }
2989 
2990 /*
2991  * Initialization
2992  */
2993 
2994 static const struct eth_dev_ops bnxt_dev_ops = {
2995 	.dev_infos_get = bnxt_dev_info_get_op,
2996 	.dev_close = bnxt_dev_close_op,
2997 	.dev_configure = bnxt_dev_configure_op,
2998 	.dev_start = bnxt_dev_start_op,
2999 	.dev_stop = bnxt_dev_stop_op,
3000 	.dev_set_link_up = bnxt_dev_set_link_up_op,
3001 	.dev_set_link_down = bnxt_dev_set_link_down_op,
3002 	.stats_get = bnxt_stats_get_op,
3003 	.stats_reset = bnxt_stats_reset_op,
3004 	.rx_queue_setup = bnxt_rx_queue_setup_op,
3005 	.rx_queue_release = bnxt_rx_queue_release_op,
3006 	.tx_queue_setup = bnxt_tx_queue_setup_op,
3007 	.tx_queue_release = bnxt_tx_queue_release_op,
3008 	.rx_queue_intr_enable = bnxt_rx_queue_intr_enable_op,
3009 	.rx_queue_intr_disable = bnxt_rx_queue_intr_disable_op,
3010 	.reta_update = bnxt_reta_update_op,
3011 	.reta_query = bnxt_reta_query_op,
3012 	.rss_hash_update = bnxt_rss_hash_update_op,
3013 	.rss_hash_conf_get = bnxt_rss_hash_conf_get_op,
3014 	.link_update = bnxt_link_update_op,
3015 	.promiscuous_enable = bnxt_promiscuous_enable_op,
3016 	.promiscuous_disable = bnxt_promiscuous_disable_op,
3017 	.allmulticast_enable = bnxt_allmulticast_enable_op,
3018 	.allmulticast_disable = bnxt_allmulticast_disable_op,
3019 	.mac_addr_add = bnxt_mac_addr_add_op,
3020 	.mac_addr_remove = bnxt_mac_addr_remove_op,
3021 	.flow_ctrl_get = bnxt_flow_ctrl_get_op,
3022 	.flow_ctrl_set = bnxt_flow_ctrl_set_op,
3023 	.udp_tunnel_port_add  = bnxt_udp_tunnel_port_add_op,
3024 	.udp_tunnel_port_del  = bnxt_udp_tunnel_port_del_op,
3025 	.vlan_filter_set = bnxt_vlan_filter_set_op,
3026 	.vlan_offload_set = bnxt_vlan_offload_set_op,
3027 	.vlan_pvid_set = bnxt_vlan_pvid_set_op,
3028 	.mtu_set = bnxt_mtu_set_op,
3029 	.mac_addr_set = bnxt_set_default_mac_addr_op,
3030 	.xstats_get = bnxt_dev_xstats_get_op,
3031 	.xstats_get_names = bnxt_dev_xstats_get_names_op,
3032 	.xstats_reset = bnxt_dev_xstats_reset_op,
3033 	.fw_version_get = bnxt_fw_version_get,
3034 	.set_mc_addr_list = bnxt_dev_set_mc_addr_list_op,
3035 	.rxq_info_get = bnxt_rxq_info_get_op,
3036 	.txq_info_get = bnxt_txq_info_get_op,
3037 	.dev_led_on = bnxt_dev_led_on_op,
3038 	.dev_led_off = bnxt_dev_led_off_op,
3039 	.xstats_get_by_id = bnxt_dev_xstats_get_by_id_op,
3040 	.xstats_get_names_by_id = bnxt_dev_xstats_get_names_by_id_op,
3041 	.rx_queue_count = bnxt_rx_queue_count_op,
3042 	.rx_descriptor_status = bnxt_rx_descriptor_status_op,
3043 	.tx_descriptor_status = bnxt_tx_descriptor_status_op,
3044 	.rx_queue_start = bnxt_rx_queue_start,
3045 	.rx_queue_stop = bnxt_rx_queue_stop,
3046 	.tx_queue_start = bnxt_tx_queue_start,
3047 	.tx_queue_stop = bnxt_tx_queue_stop,
3048 	.filter_ctrl = bnxt_filter_ctrl_op,
3049 	.dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op,
3050 	.get_eeprom_length    = bnxt_get_eeprom_length_op,
3051 	.get_eeprom           = bnxt_get_eeprom_op,
3052 	.set_eeprom           = bnxt_set_eeprom_op,
3053 	.timesync_enable      = bnxt_timesync_enable,
3054 	.timesync_disable     = bnxt_timesync_disable,
3055 	.timesync_read_time   = bnxt_timesync_read_time,
3056 	.timesync_write_time   = bnxt_timesync_write_time,
3057 	.timesync_adjust_time = bnxt_timesync_adjust_time,
3058 	.timesync_read_rx_timestamp = bnxt_timesync_read_rx_timestamp,
3059 	.timesync_read_tx_timestamp = bnxt_timesync_read_tx_timestamp,
3060 };
3061 
3062 static bool bnxt_vf_pciid(uint16_t id)
3063 {
3064 	if (id == BROADCOM_DEV_ID_57304_VF ||
3065 	    id == BROADCOM_DEV_ID_57406_VF ||
3066 	    id == BROADCOM_DEV_ID_5731X_VF ||
3067 	    id == BROADCOM_DEV_ID_5741X_VF ||
3068 	    id == BROADCOM_DEV_ID_57414_VF ||
3069 	    id == BROADCOM_DEV_ID_STRATUS_NIC_VF1 ||
3070 	    id == BROADCOM_DEV_ID_STRATUS_NIC_VF2)
3071 		return true;
3072 	return false;
3073 }
3074 
3075 static int bnxt_init_board(struct rte_eth_dev *eth_dev)
3076 {
3077 	struct bnxt *bp = eth_dev->data->dev_private;
3078 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
3079 	int rc;
3080 
3081 	/* enable device (incl. PCI PM wakeup), and bus-mastering */
3082 	if (!pci_dev->mem_resource[0].addr) {
3083 		PMD_DRV_LOG(ERR,
3084 			"Cannot find PCI device base address, aborting\n");
3085 		rc = -ENODEV;
3086 		goto init_err_disable;
3087 	}
3088 
3089 	bp->eth_dev = eth_dev;
3090 	bp->pdev = pci_dev;
3091 
3092 	bp->bar0 = (void *)pci_dev->mem_resource[0].addr;
3093 	if (!bp->bar0) {
3094 		PMD_DRV_LOG(ERR, "Cannot map device registers, aborting\n");
3095 		rc = -ENOMEM;
3096 		goto init_err_release;
3097 	}
3098 
3099 	if (!pci_dev->mem_resource[2].addr) {
3100 		PMD_DRV_LOG(ERR,
3101 			    "Cannot find PCI device BAR 2 address, aborting\n");
3102 		rc = -ENODEV;
3103 		goto init_err_release;
3104 	} else {
3105 		bp->doorbell_base = (void *)pci_dev->mem_resource[2].addr;
3106 	}
3107 
3108 	return 0;
3109 
3110 init_err_release:
3111 	if (bp->bar0)
3112 		bp->bar0 = NULL;
3113 	if (bp->doorbell_base)
3114 		bp->doorbell_base = NULL;
3115 
3116 init_err_disable:
3117 
3118 	return rc;
3119 }
3120 
3121 static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev);
3122 
3123 #define ALLOW_FUNC(x)	\
3124 	{ \
3125 		typeof(x) arg = (x); \
3126 		bp->pf.vf_req_fwd[((arg) >> 5)] &= \
3127 		~rte_cpu_to_le_32(1 << ((arg) & 0x1f)); \
3128 	}
3129 static int
3130 bnxt_dev_init(struct rte_eth_dev *eth_dev)
3131 {
3132 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
3133 	char mz_name[RTE_MEMZONE_NAMESIZE];
3134 	const struct rte_memzone *mz = NULL;
3135 	static int version_printed;
3136 	uint32_t total_alloc_len;
3137 	rte_iova_t mz_phys_addr;
3138 	struct bnxt *bp;
3139 	int rc;
3140 
3141 	if (version_printed++ == 0)
3142 		PMD_DRV_LOG(INFO, "%s\n", bnxt_version);
3143 
3144 	rte_eth_copy_pci_info(eth_dev, pci_dev);
3145 
3146 	bp = eth_dev->data->dev_private;
3147 
3148 	bp->dev_stopped = 1;
3149 
3150 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3151 		goto skip_init;
3152 
3153 	if (bnxt_vf_pciid(pci_dev->id.device_id))
3154 		bp->flags |= BNXT_FLAG_VF;
3155 
3156 	rc = bnxt_init_board(eth_dev);
3157 	if (rc) {
3158 		PMD_DRV_LOG(ERR,
3159 			"Board initialization failed rc: %x\n", rc);
3160 		goto error;
3161 	}
3162 skip_init:
3163 	eth_dev->dev_ops = &bnxt_dev_ops;
3164 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3165 		return 0;
3166 	eth_dev->rx_pkt_burst = &bnxt_recv_pkts;
3167 	eth_dev->tx_pkt_burst = &bnxt_xmit_pkts;
3168 
3169 	if (BNXT_PF(bp) && pci_dev->id.device_id != BROADCOM_DEV_ID_NS2) {
3170 		snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
3171 			 "bnxt_%04x:%02x:%02x:%02x-%s", pci_dev->addr.domain,
3172 			 pci_dev->addr.bus, pci_dev->addr.devid,
3173 			 pci_dev->addr.function, "rx_port_stats");
3174 		mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
3175 		mz = rte_memzone_lookup(mz_name);
3176 		total_alloc_len = RTE_CACHE_LINE_ROUNDUP(
3177 				sizeof(struct rx_port_stats) + 512);
3178 		if (!mz) {
3179 			mz = rte_memzone_reserve(mz_name, total_alloc_len,
3180 					SOCKET_ID_ANY,
3181 					RTE_MEMZONE_2MB |
3182 					RTE_MEMZONE_SIZE_HINT_ONLY |
3183 					RTE_MEMZONE_IOVA_CONTIG);
3184 			if (mz == NULL)
3185 				return -ENOMEM;
3186 		}
3187 		memset(mz->addr, 0, mz->len);
3188 		mz_phys_addr = mz->iova;
3189 		if ((unsigned long)mz->addr == mz_phys_addr) {
3190 			PMD_DRV_LOG(WARNING,
3191 				"Memzone physical address same as virtual.\n");
3192 			PMD_DRV_LOG(WARNING,
3193 				"Using rte_mem_virt2iova()\n");
3194 			mz_phys_addr = rte_mem_virt2iova(mz->addr);
3195 			if (mz_phys_addr == 0) {
3196 				PMD_DRV_LOG(ERR,
3197 				"unable to map address to physical memory\n");
3198 				return -ENOMEM;
3199 			}
3200 		}
3201 
3202 		bp->rx_mem_zone = (const void *)mz;
3203 		bp->hw_rx_port_stats = mz->addr;
3204 		bp->hw_rx_port_stats_map = mz_phys_addr;
3205 
3206 		snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
3207 			 "bnxt_%04x:%02x:%02x:%02x-%s", pci_dev->addr.domain,
3208 			 pci_dev->addr.bus, pci_dev->addr.devid,
3209 			 pci_dev->addr.function, "tx_port_stats");
3210 		mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
3211 		mz = rte_memzone_lookup(mz_name);
3212 		total_alloc_len = RTE_CACHE_LINE_ROUNDUP(
3213 				sizeof(struct tx_port_stats) + 512);
3214 		if (!mz) {
3215 			mz = rte_memzone_reserve(mz_name,
3216 					total_alloc_len,
3217 					SOCKET_ID_ANY,
3218 					RTE_MEMZONE_2MB |
3219 					RTE_MEMZONE_SIZE_HINT_ONLY |
3220 					RTE_MEMZONE_IOVA_CONTIG);
3221 			if (mz == NULL)
3222 				return -ENOMEM;
3223 		}
3224 		memset(mz->addr, 0, mz->len);
3225 		mz_phys_addr = mz->iova;
3226 		if ((unsigned long)mz->addr == mz_phys_addr) {
3227 			PMD_DRV_LOG(WARNING,
3228 				"Memzone physical address same as virtual.\n");
3229 			PMD_DRV_LOG(WARNING,
3230 				"Using rte_mem_virt2iova()\n");
3231 			mz_phys_addr = rte_mem_virt2iova(mz->addr);
3232 			if (mz_phys_addr == 0) {
3233 				PMD_DRV_LOG(ERR,
3234 				"unable to map address to physical memory\n");
3235 				return -ENOMEM;
3236 			}
3237 		}
3238 
3239 		bp->tx_mem_zone = (const void *)mz;
3240 		bp->hw_tx_port_stats = mz->addr;
3241 		bp->hw_tx_port_stats_map = mz_phys_addr;
3242 
3243 		bp->flags |= BNXT_FLAG_PORT_STATS;
3244 	}
3245 
3246 	rc = bnxt_alloc_hwrm_resources(bp);
3247 	if (rc) {
3248 		PMD_DRV_LOG(ERR,
3249 			"hwrm resource allocation failure rc: %x\n", rc);
3250 		goto error_free;
3251 	}
3252 	rc = bnxt_hwrm_ver_get(bp);
3253 	if (rc)
3254 		goto error_free;
3255 	rc = bnxt_hwrm_queue_qportcfg(bp);
3256 	if (rc) {
3257 		PMD_DRV_LOG(ERR, "hwrm queue qportcfg failed\n");
3258 		goto error_free;
3259 	}
3260 
3261 	rc = bnxt_hwrm_func_qcfg(bp);
3262 	if (rc) {
3263 		PMD_DRV_LOG(ERR, "hwrm func qcfg failed\n");
3264 		goto error_free;
3265 	}
3266 
3267 	/* Get the MAX capabilities for this function */
3268 	rc = bnxt_hwrm_func_qcaps(bp);
3269 	if (rc) {
3270 		PMD_DRV_LOG(ERR, "hwrm query capability failure rc: %x\n", rc);
3271 		goto error_free;
3272 	}
3273 	if (bp->max_tx_rings == 0) {
3274 		PMD_DRV_LOG(ERR, "No TX rings available!\n");
3275 		rc = -EBUSY;
3276 		goto error_free;
3277 	}
3278 	eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl",
3279 					ETHER_ADDR_LEN * bp->max_l2_ctx, 0);
3280 	if (eth_dev->data->mac_addrs == NULL) {
3281 		PMD_DRV_LOG(ERR,
3282 			"Failed to alloc %u bytes needed to store MAC addr tbl",
3283 			ETHER_ADDR_LEN * bp->max_l2_ctx);
3284 		rc = -ENOMEM;
3285 		goto error_free;
3286 	}
3287 
3288 	if (check_zero_bytes(bp->dflt_mac_addr, ETHER_ADDR_LEN)) {
3289 		PMD_DRV_LOG(ERR,
3290 			    "Invalid MAC addr %02X:%02X:%02X:%02X:%02X:%02X\n",
3291 			    bp->dflt_mac_addr[0], bp->dflt_mac_addr[1],
3292 			    bp->dflt_mac_addr[2], bp->dflt_mac_addr[3],
3293 			    bp->dflt_mac_addr[4], bp->dflt_mac_addr[5]);
3294 		rc = -EINVAL;
3295 		goto error_free;
3296 	}
3297 	/* Copy the permanent MAC from the qcap response address now. */
3298 	memcpy(bp->mac_addr, bp->dflt_mac_addr, sizeof(bp->mac_addr));
3299 	memcpy(&eth_dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN);
3300 
3301 	if (bp->max_ring_grps < bp->rx_cp_nr_rings) {
3302 		/* 1 ring is for default completion ring */
3303 		PMD_DRV_LOG(ERR, "Insufficient resource: Ring Group\n");
3304 		rc = -ENOSPC;
3305 		goto error_free;
3306 	}
3307 
3308 	bp->grp_info = rte_zmalloc("bnxt_grp_info",
3309 				sizeof(*bp->grp_info) * bp->max_ring_grps, 0);
3310 	if (!bp->grp_info) {
3311 		PMD_DRV_LOG(ERR,
3312 			"Failed to alloc %zu bytes to store group info table\n",
3313 			sizeof(*bp->grp_info) * bp->max_ring_grps);
3314 		rc = -ENOMEM;
3315 		goto error_free;
3316 	}
3317 
3318 	/* Forward all requests if firmware is new enough */
3319 	if (((bp->fw_ver >= ((20 << 24) | (6 << 16) | (100 << 8))) &&
3320 	    (bp->fw_ver < ((20 << 24) | (7 << 16)))) ||
3321 	    ((bp->fw_ver >= ((20 << 24) | (8 << 16))))) {
3322 		memset(bp->pf.vf_req_fwd, 0xff, sizeof(bp->pf.vf_req_fwd));
3323 	} else {
3324 		PMD_DRV_LOG(WARNING,
3325 			"Firmware too old for VF mailbox functionality\n");
3326 		memset(bp->pf.vf_req_fwd, 0, sizeof(bp->pf.vf_req_fwd));
3327 	}
3328 
3329 	/*
3330 	 * The following are used for driver cleanup.  If we disallow these,
3331 	 * VF drivers can't clean up cleanly.
3332 	 */
3333 	ALLOW_FUNC(HWRM_FUNC_DRV_UNRGTR);
3334 	ALLOW_FUNC(HWRM_VNIC_FREE);
3335 	ALLOW_FUNC(HWRM_RING_FREE);
3336 	ALLOW_FUNC(HWRM_RING_GRP_FREE);
3337 	ALLOW_FUNC(HWRM_VNIC_RSS_COS_LB_CTX_FREE);
3338 	ALLOW_FUNC(HWRM_CFA_L2_FILTER_FREE);
3339 	ALLOW_FUNC(HWRM_STAT_CTX_FREE);
3340 	ALLOW_FUNC(HWRM_PORT_PHY_QCFG);
3341 	ALLOW_FUNC(HWRM_VNIC_TPA_CFG);
3342 	rc = bnxt_hwrm_func_driver_register(bp);
3343 	if (rc) {
3344 		PMD_DRV_LOG(ERR,
3345 			"Failed to register driver");
3346 		rc = -EBUSY;
3347 		goto error_free;
3348 	}
3349 
3350 	PMD_DRV_LOG(INFO,
3351 		DRV_MODULE_NAME " found at mem %" PRIx64 ", node addr %pM\n",
3352 		pci_dev->mem_resource[0].phys_addr,
3353 		pci_dev->mem_resource[0].addr);
3354 
3355 	rc = bnxt_hwrm_func_reset(bp);
3356 	if (rc) {
3357 		PMD_DRV_LOG(ERR, "hwrm chip reset failure rc: %x\n", rc);
3358 		rc = -EIO;
3359 		goto error_free;
3360 	}
3361 
3362 	if (BNXT_PF(bp)) {
3363 		//if (bp->pf.active_vfs) {
3364 			// TODO: Deallocate VF resources?
3365 		//}
3366 		if (bp->pdev->max_vfs) {
3367 			rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs);
3368 			if (rc) {
3369 				PMD_DRV_LOG(ERR, "Failed to allocate VFs\n");
3370 				goto error_free;
3371 			}
3372 		} else {
3373 			rc = bnxt_hwrm_allocate_pf_only(bp);
3374 			if (rc) {
3375 				PMD_DRV_LOG(ERR,
3376 					"Failed to allocate PF resources\n");
3377 				goto error_free;
3378 			}
3379 		}
3380 	}
3381 
3382 	bnxt_hwrm_port_led_qcaps(bp);
3383 
3384 	rc = bnxt_setup_int(bp);
3385 	if (rc)
3386 		goto error_free;
3387 
3388 	rc = bnxt_alloc_mem(bp);
3389 	if (rc)
3390 		goto error_free_int;
3391 
3392 	rc = bnxt_request_int(bp);
3393 	if (rc)
3394 		goto error_free_int;
3395 
3396 	rc = bnxt_alloc_def_cp_ring(bp);
3397 	if (rc)
3398 		goto error_free_int;
3399 
3400 	bnxt_enable_int(bp);
3401 	bnxt_init_nic(bp);
3402 
3403 	return 0;
3404 
3405 error_free_int:
3406 	bnxt_disable_int(bp);
3407 	bnxt_free_def_cp_ring(bp);
3408 	bnxt_hwrm_func_buf_unrgtr(bp);
3409 	bnxt_free_int(bp);
3410 	bnxt_free_mem(bp);
3411 error_free:
3412 	bnxt_dev_uninit(eth_dev);
3413 error:
3414 	return rc;
3415 }
3416 
3417 static int
3418 bnxt_dev_uninit(struct rte_eth_dev *eth_dev) {
3419 	struct bnxt *bp = eth_dev->data->dev_private;
3420 	int rc;
3421 
3422 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3423 		return -EPERM;
3424 
3425 	bnxt_disable_int(bp);
3426 	bnxt_free_int(bp);
3427 	bnxt_free_mem(bp);
3428 	if (eth_dev->data->mac_addrs != NULL) {
3429 		rte_free(eth_dev->data->mac_addrs);
3430 		eth_dev->data->mac_addrs = NULL;
3431 	}
3432 	if (bp->grp_info != NULL) {
3433 		rte_free(bp->grp_info);
3434 		bp->grp_info = NULL;
3435 	}
3436 	rc = bnxt_hwrm_func_driver_unregister(bp, 0);
3437 	bnxt_free_hwrm_resources(bp);
3438 	rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone);
3439 	rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone);
3440 	if (bp->dev_stopped == 0)
3441 		bnxt_dev_close_op(eth_dev);
3442 	if (bp->pf.vf_info)
3443 		rte_free(bp->pf.vf_info);
3444 	eth_dev->dev_ops = NULL;
3445 	eth_dev->rx_pkt_burst = NULL;
3446 	eth_dev->tx_pkt_burst = NULL;
3447 
3448 	return rc;
3449 }
3450 
3451 static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3452 	struct rte_pci_device *pci_dev)
3453 {
3454 	return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct bnxt),
3455 		bnxt_dev_init);
3456 }
3457 
3458 static int bnxt_pci_remove(struct rte_pci_device *pci_dev)
3459 {
3460 	return rte_eth_dev_pci_generic_remove(pci_dev, bnxt_dev_uninit);
3461 }
3462 
3463 static struct rte_pci_driver bnxt_rte_pmd = {
3464 	.id_table = bnxt_pci_id_map,
3465 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING |
3466 		RTE_PCI_DRV_INTR_LSC,
3467 	.probe = bnxt_pci_probe,
3468 	.remove = bnxt_pci_remove,
3469 };
3470 
3471 static bool
3472 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
3473 {
3474 	if (strcmp(dev->device->driver->name, drv->driver.name))
3475 		return false;
3476 
3477 	return true;
3478 }
3479 
3480 bool is_bnxt_supported(struct rte_eth_dev *dev)
3481 {
3482 	return is_device_supported(dev, &bnxt_rte_pmd);
3483 }
3484 
3485 RTE_INIT(bnxt_init_log);
3486 static void
3487 bnxt_init_log(void)
3488 {
3489 	bnxt_logtype_driver = rte_log_register("pmd.bnxt.driver");
3490 	if (bnxt_logtype_driver >= 0)
3491 		rte_log_set_level(bnxt_logtype_driver, RTE_LOG_INFO);
3492 }
3493 
3494 RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd);
3495 RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map);
3496 RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci");
3497