xref: /dpdk/drivers/net/bnxt/bnxt_ethdev.c (revision 89f0711f9ddfb5822da9d34f384b92f72a61c4dc)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) Broadcom Limited.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Broadcom Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <inttypes.h>
35 #include <stdbool.h>
36 
37 #include <rte_dev.h>
38 #include <rte_ethdev_driver.h>
39 #include <rte_ethdev_pci.h>
40 #include <rte_malloc.h>
41 #include <rte_cycles.h>
42 
43 #include "bnxt.h"
44 #include "bnxt_cpr.h"
45 #include "bnxt_filter.h"
46 #include "bnxt_hwrm.h"
47 #include "bnxt_irq.h"
48 #include "bnxt_ring.h"
49 #include "bnxt_rxq.h"
50 #include "bnxt_rxr.h"
51 #include "bnxt_stats.h"
52 #include "bnxt_txq.h"
53 #include "bnxt_txr.h"
54 #include "bnxt_vnic.h"
55 #include "hsi_struct_def_dpdk.h"
56 #include "bnxt_nvm_defs.h"
57 
58 #define DRV_MODULE_NAME		"bnxt"
59 static const char bnxt_version[] =
60 	"Broadcom Cumulus driver " DRV_MODULE_NAME "\n";
61 int bnxt_logtype_driver;
62 
63 #define PCI_VENDOR_ID_BROADCOM 0x14E4
64 
65 #define BROADCOM_DEV_ID_STRATUS_NIC_VF 0x1609
66 #define BROADCOM_DEV_ID_STRATUS_NIC 0x1614
67 #define BROADCOM_DEV_ID_57414_VF 0x16c1
68 #define BROADCOM_DEV_ID_57301 0x16c8
69 #define BROADCOM_DEV_ID_57302 0x16c9
70 #define BROADCOM_DEV_ID_57304_PF 0x16ca
71 #define BROADCOM_DEV_ID_57304_VF 0x16cb
72 #define BROADCOM_DEV_ID_57417_MF 0x16cc
73 #define BROADCOM_DEV_ID_NS2 0x16cd
74 #define BROADCOM_DEV_ID_57311 0x16ce
75 #define BROADCOM_DEV_ID_57312 0x16cf
76 #define BROADCOM_DEV_ID_57402 0x16d0
77 #define BROADCOM_DEV_ID_57404 0x16d1
78 #define BROADCOM_DEV_ID_57406_PF 0x16d2
79 #define BROADCOM_DEV_ID_57406_VF 0x16d3
80 #define BROADCOM_DEV_ID_57402_MF 0x16d4
81 #define BROADCOM_DEV_ID_57407_RJ45 0x16d5
82 #define BROADCOM_DEV_ID_57412 0x16d6
83 #define BROADCOM_DEV_ID_57414 0x16d7
84 #define BROADCOM_DEV_ID_57416_RJ45 0x16d8
85 #define BROADCOM_DEV_ID_57417_RJ45 0x16d9
86 #define BROADCOM_DEV_ID_5741X_VF 0x16dc
87 #define BROADCOM_DEV_ID_57412_MF 0x16de
88 #define BROADCOM_DEV_ID_57314 0x16df
89 #define BROADCOM_DEV_ID_57317_RJ45 0x16e0
90 #define BROADCOM_DEV_ID_5731X_VF 0x16e1
91 #define BROADCOM_DEV_ID_57417_SFP 0x16e2
92 #define BROADCOM_DEV_ID_57416_SFP 0x16e3
93 #define BROADCOM_DEV_ID_57317_SFP 0x16e4
94 #define BROADCOM_DEV_ID_57404_MF 0x16e7
95 #define BROADCOM_DEV_ID_57406_MF 0x16e8
96 #define BROADCOM_DEV_ID_57407_SFP 0x16e9
97 #define BROADCOM_DEV_ID_57407_MF 0x16ea
98 #define BROADCOM_DEV_ID_57414_MF 0x16ec
99 #define BROADCOM_DEV_ID_57416_MF 0x16ee
100 
101 static const struct rte_pci_id bnxt_pci_id_map[] = {
102 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM,
103 			 BROADCOM_DEV_ID_STRATUS_NIC_VF) },
104 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) },
105 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) },
106 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) },
107 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57302) },
108 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_PF) },
109 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_VF) },
110 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_NS2) },
111 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402) },
112 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404) },
113 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_PF) },
114 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_VF) },
115 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402_MF) },
116 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_RJ45) },
117 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404_MF) },
118 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_MF) },
119 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_SFP) },
120 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_MF) },
121 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5741X_VF) },
122 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5731X_VF) },
123 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57314) },
124 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_MF) },
125 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57311) },
126 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57312) },
127 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412) },
128 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414) },
129 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_RJ45) },
130 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_RJ45) },
131 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412_MF) },
132 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_RJ45) },
133 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_SFP) },
134 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_SFP) },
135 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) },
136 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) },
137 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) },
138 	{ .vendor_id = 0, /* sentinel */ },
139 };
140 
141 #define BNXT_ETH_RSS_SUPPORT (	\
142 	ETH_RSS_IPV4 |		\
143 	ETH_RSS_NONFRAG_IPV4_TCP |	\
144 	ETH_RSS_NONFRAG_IPV4_UDP |	\
145 	ETH_RSS_IPV6 |		\
146 	ETH_RSS_NONFRAG_IPV6_TCP |	\
147 	ETH_RSS_NONFRAG_IPV6_UDP)
148 
149 static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask);
150 static void bnxt_print_link_info(struct rte_eth_dev *eth_dev);
151 
152 /***********************/
153 
154 /*
155  * High level utility functions
156  */
157 
158 static void bnxt_free_mem(struct bnxt *bp)
159 {
160 	bnxt_free_filter_mem(bp);
161 	bnxt_free_vnic_attributes(bp);
162 	bnxt_free_vnic_mem(bp);
163 
164 	bnxt_free_stats(bp);
165 	bnxt_free_tx_rings(bp);
166 	bnxt_free_rx_rings(bp);
167 	bnxt_free_def_cp_ring(bp);
168 }
169 
170 static int bnxt_alloc_mem(struct bnxt *bp)
171 {
172 	int rc;
173 
174 	/* Default completion ring */
175 	rc = bnxt_init_def_ring_struct(bp, SOCKET_ID_ANY);
176 	if (rc)
177 		goto alloc_mem_err;
178 
179 	rc = bnxt_alloc_rings(bp, 0, NULL, NULL,
180 			      bp->def_cp_ring, "def_cp");
181 	if (rc)
182 		goto alloc_mem_err;
183 
184 	rc = bnxt_alloc_vnic_mem(bp);
185 	if (rc)
186 		goto alloc_mem_err;
187 
188 	rc = bnxt_alloc_vnic_attributes(bp);
189 	if (rc)
190 		goto alloc_mem_err;
191 
192 	rc = bnxt_alloc_filter_mem(bp);
193 	if (rc)
194 		goto alloc_mem_err;
195 
196 	return 0;
197 
198 alloc_mem_err:
199 	bnxt_free_mem(bp);
200 	return rc;
201 }
202 
203 static int bnxt_init_chip(struct bnxt *bp)
204 {
205 	unsigned int i;
206 	struct rte_eth_link new;
207 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev);
208 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
209 	uint32_t intr_vector = 0;
210 	uint32_t queue_id, base = BNXT_MISC_VEC_ID;
211 	uint32_t vec = BNXT_MISC_VEC_ID;
212 	int rc;
213 
214 	/* disable uio/vfio intr/eventfd mapping */
215 	rte_intr_disable(intr_handle);
216 
217 	if (bp->eth_dev->data->mtu > ETHER_MTU) {
218 		bp->eth_dev->data->dev_conf.rxmode.jumbo_frame = 1;
219 		bp->flags |= BNXT_FLAG_JUMBO;
220 	} else {
221 		bp->eth_dev->data->dev_conf.rxmode.jumbo_frame = 0;
222 		bp->flags &= ~BNXT_FLAG_JUMBO;
223 	}
224 
225 	rc = bnxt_alloc_all_hwrm_stat_ctxs(bp);
226 	if (rc) {
227 		PMD_DRV_LOG(ERR, "HWRM stat ctx alloc failure rc: %x\n", rc);
228 		goto err_out;
229 	}
230 
231 	rc = bnxt_alloc_hwrm_rings(bp);
232 	if (rc) {
233 		PMD_DRV_LOG(ERR, "HWRM ring alloc failure rc: %x\n", rc);
234 		goto err_out;
235 	}
236 
237 	rc = bnxt_alloc_all_hwrm_ring_grps(bp);
238 	if (rc) {
239 		PMD_DRV_LOG(ERR, "HWRM ring grp alloc failure: %x\n", rc);
240 		goto err_out;
241 	}
242 
243 	rc = bnxt_mq_rx_configure(bp);
244 	if (rc) {
245 		PMD_DRV_LOG(ERR, "MQ mode configure failure rc: %x\n", rc);
246 		goto err_out;
247 	}
248 
249 	/* VNIC configuration */
250 	for (i = 0; i < bp->nr_vnics; i++) {
251 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
252 
253 		rc = bnxt_hwrm_vnic_alloc(bp, vnic);
254 		if (rc) {
255 			PMD_DRV_LOG(ERR, "HWRM vnic %d alloc failure rc: %x\n",
256 				i, rc);
257 			goto err_out;
258 		}
259 
260 		rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic);
261 		if (rc) {
262 			PMD_DRV_LOG(ERR,
263 				"HWRM vnic %d ctx alloc failure rc: %x\n",
264 				i, rc);
265 			goto err_out;
266 		}
267 
268 		rc = bnxt_hwrm_vnic_cfg(bp, vnic);
269 		if (rc) {
270 			PMD_DRV_LOG(ERR, "HWRM vnic %d cfg failure rc: %x\n",
271 				i, rc);
272 			goto err_out;
273 		}
274 
275 		rc = bnxt_set_hwrm_vnic_filters(bp, vnic);
276 		if (rc) {
277 			PMD_DRV_LOG(ERR,
278 				"HWRM vnic %d filter failure rc: %x\n",
279 				i, rc);
280 			goto err_out;
281 		}
282 
283 		rc = bnxt_vnic_rss_configure(bp, vnic);
284 		if (rc) {
285 			PMD_DRV_LOG(ERR,
286 				    "HWRM vnic set RSS failure rc: %x\n", rc);
287 			goto err_out;
288 		}
289 
290 		bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
291 
292 		if (bp->eth_dev->data->dev_conf.rxmode.enable_lro)
293 			bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 1);
294 		else
295 			bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 0);
296 	}
297 	rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL);
298 	if (rc) {
299 		PMD_DRV_LOG(ERR,
300 			"HWRM cfa l2 rx mask failure rc: %x\n", rc);
301 		goto err_out;
302 	}
303 
304 	/* check and configure queue intr-vector mapping */
305 	if ((rte_intr_cap_multiple(intr_handle) ||
306 	     !RTE_ETH_DEV_SRIOV(bp->eth_dev).active) &&
307 	    bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) {
308 		intr_vector = bp->eth_dev->data->nb_rx_queues;
309 		PMD_DRV_LOG(DEBUG, "intr_vector = %d\n", intr_vector);
310 		if (intr_vector > bp->rx_cp_nr_rings) {
311 			PMD_DRV_LOG(ERR, "At most %d intr queues supported",
312 					bp->rx_cp_nr_rings);
313 			return -ENOTSUP;
314 		}
315 		if (rte_intr_efd_enable(intr_handle, intr_vector))
316 			return -1;
317 	}
318 
319 	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
320 		intr_handle->intr_vec =
321 			rte_zmalloc("intr_vec",
322 				    bp->eth_dev->data->nb_rx_queues *
323 				    sizeof(int), 0);
324 		if (intr_handle->intr_vec == NULL) {
325 			PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues"
326 				" intr_vec", bp->eth_dev->data->nb_rx_queues);
327 			return -ENOMEM;
328 		}
329 		PMD_DRV_LOG(DEBUG, "intr_handle->intr_vec = %p "
330 			"intr_handle->nb_efd = %d intr_handle->max_intr = %d\n",
331 			 intr_handle->intr_vec, intr_handle->nb_efd,
332 			intr_handle->max_intr);
333 	}
334 
335 	for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues;
336 	     queue_id++) {
337 		intr_handle->intr_vec[queue_id] = vec;
338 		if (vec < base + intr_handle->nb_efd - 1)
339 			vec++;
340 	}
341 
342 	/* enable uio/vfio intr/eventfd mapping */
343 	rte_intr_enable(intr_handle);
344 
345 	rc = bnxt_get_hwrm_link_config(bp, &new);
346 	if (rc) {
347 		PMD_DRV_LOG(ERR, "HWRM Get link config failure rc: %x\n", rc);
348 		goto err_out;
349 	}
350 
351 	if (!bp->link_info.link_up) {
352 		rc = bnxt_set_hwrm_link_config(bp, true);
353 		if (rc) {
354 			PMD_DRV_LOG(ERR,
355 				"HWRM link config failure rc: %x\n", rc);
356 			goto err_out;
357 		}
358 	}
359 	bnxt_print_link_info(bp->eth_dev);
360 
361 	return 0;
362 
363 err_out:
364 	bnxt_free_all_hwrm_resources(bp);
365 
366 	/* Some of the error status returned by FW may not be from errno.h */
367 	if (rc > 0)
368 		rc = -EIO;
369 
370 	return rc;
371 }
372 
373 static int bnxt_shutdown_nic(struct bnxt *bp)
374 {
375 	bnxt_free_all_hwrm_resources(bp);
376 	bnxt_free_all_filters(bp);
377 	bnxt_free_all_vnics(bp);
378 	return 0;
379 }
380 
381 static int bnxt_init_nic(struct bnxt *bp)
382 {
383 	int rc;
384 
385 	rc = bnxt_init_ring_grps(bp);
386 	if (rc)
387 		return rc;
388 
389 	bnxt_init_vnics(bp);
390 	bnxt_init_filters(bp);
391 
392 	rc = bnxt_init_chip(bp);
393 	if (rc)
394 		return rc;
395 
396 	return 0;
397 }
398 
399 /*
400  * Device configuration and status function
401  */
402 
403 static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
404 				  struct rte_eth_dev_info *dev_info)
405 {
406 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
407 	uint16_t max_vnics, i, j, vpool, vrxq;
408 	unsigned int max_rx_rings;
409 
410 	dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
411 
412 	/* MAC Specifics */
413 	dev_info->max_mac_addrs = bp->max_l2_ctx;
414 	dev_info->max_hash_mac_addrs = 0;
415 
416 	/* PF/VF specifics */
417 	if (BNXT_PF(bp))
418 		dev_info->max_vfs = bp->pdev->max_vfs;
419 	max_rx_rings = RTE_MIN(bp->max_vnics, RTE_MIN(bp->max_l2_ctx,
420 						RTE_MIN(bp->max_rsscos_ctx,
421 						bp->max_stat_ctx)));
422 	/* For the sake of symmetry, max_rx_queues = max_tx_queues */
423 	dev_info->max_rx_queues = max_rx_rings;
424 	dev_info->max_tx_queues = max_rx_rings;
425 	dev_info->reta_size = bp->max_rsscos_ctx;
426 	dev_info->hash_key_size = 40;
427 	max_vnics = bp->max_vnics;
428 
429 	/* Fast path specifics */
430 	dev_info->min_rx_bufsize = 1;
431 	dev_info->max_rx_pktlen = BNXT_MAX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN
432 				  + VLAN_TAG_SIZE;
433 	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
434 					DEV_RX_OFFLOAD_IPV4_CKSUM |
435 					DEV_RX_OFFLOAD_UDP_CKSUM |
436 					DEV_RX_OFFLOAD_TCP_CKSUM |
437 					DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
438 	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
439 					DEV_TX_OFFLOAD_IPV4_CKSUM |
440 					DEV_TX_OFFLOAD_TCP_CKSUM |
441 					DEV_TX_OFFLOAD_UDP_CKSUM |
442 					DEV_TX_OFFLOAD_TCP_TSO |
443 					DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
444 					DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
445 					DEV_TX_OFFLOAD_GRE_TNL_TSO |
446 					DEV_TX_OFFLOAD_IPIP_TNL_TSO |
447 					DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
448 
449 	/* *INDENT-OFF* */
450 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
451 		.rx_thresh = {
452 			.pthresh = 8,
453 			.hthresh = 8,
454 			.wthresh = 0,
455 		},
456 		.rx_free_thresh = 32,
457 		.rx_drop_en = 0,
458 	};
459 
460 	dev_info->default_txconf = (struct rte_eth_txconf) {
461 		.tx_thresh = {
462 			.pthresh = 32,
463 			.hthresh = 0,
464 			.wthresh = 0,
465 		},
466 		.tx_free_thresh = 32,
467 		.tx_rs_thresh = 32,
468 		.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
469 			     ETH_TXQ_FLAGS_NOOFFLOADS,
470 	};
471 	eth_dev->data->dev_conf.intr_conf.lsc = 1;
472 
473 	eth_dev->data->dev_conf.intr_conf.rxq = 1;
474 
475 	/* *INDENT-ON* */
476 
477 	/*
478 	 * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim
479 	 *       need further investigation.
480 	 */
481 
482 	/* VMDq resources */
483 	vpool = 64; /* ETH_64_POOLS */
484 	vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */
485 	for (i = 0; i < 4; vpool >>= 1, i++) {
486 		if (max_vnics > vpool) {
487 			for (j = 0; j < 5; vrxq >>= 1, j++) {
488 				if (dev_info->max_rx_queues > vrxq) {
489 					if (vpool > vrxq)
490 						vpool = vrxq;
491 					goto found;
492 				}
493 			}
494 			/* Not enough resources to support VMDq */
495 			break;
496 		}
497 	}
498 	/* Not enough resources to support VMDq */
499 	vpool = 0;
500 	vrxq = 0;
501 found:
502 	dev_info->max_vmdq_pools = vpool;
503 	dev_info->vmdq_queue_num = vrxq;
504 
505 	dev_info->vmdq_pool_base = 0;
506 	dev_info->vmdq_queue_base = 0;
507 }
508 
509 /* Configure the device based on the configuration provided */
510 static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
511 {
512 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
513 
514 	bp->rx_queues = (void *)eth_dev->data->rx_queues;
515 	bp->tx_queues = (void *)eth_dev->data->tx_queues;
516 
517 	/* Inherit new configurations */
518 	if (eth_dev->data->nb_rx_queues > bp->max_rx_rings ||
519 	    eth_dev->data->nb_tx_queues > bp->max_tx_rings ||
520 	    eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues + 1 >
521 	    bp->max_cp_rings ||
522 	    eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues >
523 	    bp->max_stat_ctx ||
524 	    (uint32_t)(eth_dev->data->nb_rx_queues + 1) > bp->max_ring_grps) {
525 		PMD_DRV_LOG(ERR,
526 			"Insufficient resources to support requested config\n");
527 		PMD_DRV_LOG(ERR,
528 			"Num Queues Requested: Tx %d, Rx %d\n",
529 			eth_dev->data->nb_tx_queues,
530 			eth_dev->data->nb_rx_queues);
531 		PMD_DRV_LOG(ERR,
532 			"Res available: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d\n",
533 			bp->max_tx_rings, bp->max_rx_rings, bp->max_cp_rings,
534 			bp->max_stat_ctx, bp->max_ring_grps);
535 		return -ENOSPC;
536 	}
537 
538 	bp->rx_nr_rings = eth_dev->data->nb_rx_queues;
539 	bp->tx_nr_rings = eth_dev->data->nb_tx_queues;
540 	bp->rx_cp_nr_rings = bp->rx_nr_rings;
541 	bp->tx_cp_nr_rings = bp->tx_nr_rings;
542 
543 	if (eth_dev->data->dev_conf.rxmode.jumbo_frame)
544 		eth_dev->data->mtu =
545 				eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
546 				ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE;
547 	return 0;
548 }
549 
550 static void bnxt_print_link_info(struct rte_eth_dev *eth_dev)
551 {
552 	struct rte_eth_link *link = &eth_dev->data->dev_link;
553 
554 	if (link->link_status)
555 		PMD_DRV_LOG(INFO, "Port %d Link Up - speed %u Mbps - %s\n",
556 			eth_dev->data->port_id,
557 			(uint32_t)link->link_speed,
558 			(link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
559 			("full-duplex") : ("half-duplex\n"));
560 	else
561 		PMD_DRV_LOG(INFO, "Port %d Link Down\n",
562 			eth_dev->data->port_id);
563 }
564 
565 static int bnxt_dev_lsc_intr_setup(struct rte_eth_dev *eth_dev)
566 {
567 	bnxt_print_link_info(eth_dev);
568 	return 0;
569 }
570 
571 static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
572 {
573 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
574 	int vlan_mask = 0;
575 	int rc;
576 
577 	if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) {
578 		PMD_DRV_LOG(ERR,
579 			"RxQ cnt %d > CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n",
580 			bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS);
581 	}
582 	bp->dev_stopped = 0;
583 
584 	rc = bnxt_init_nic(bp);
585 	if (rc)
586 		goto error;
587 
588 	bnxt_link_update_op(eth_dev, 1);
589 
590 	if (eth_dev->data->dev_conf.rxmode.hw_vlan_filter)
591 		vlan_mask |= ETH_VLAN_FILTER_MASK;
592 	if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip)
593 		vlan_mask |= ETH_VLAN_STRIP_MASK;
594 	rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask);
595 	if (rc)
596 		goto error;
597 
598 	bp->flags |= BNXT_FLAG_INIT_DONE;
599 	return 0;
600 
601 error:
602 	bnxt_shutdown_nic(bp);
603 	bnxt_free_tx_mbufs(bp);
604 	bnxt_free_rx_mbufs(bp);
605 	return rc;
606 }
607 
608 static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev)
609 {
610 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
611 	int rc = 0;
612 
613 	if (!bp->link_info.link_up)
614 		rc = bnxt_set_hwrm_link_config(bp, true);
615 	if (!rc)
616 		eth_dev->data->dev_link.link_status = 1;
617 
618 	bnxt_print_link_info(eth_dev);
619 	return 0;
620 }
621 
622 static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev)
623 {
624 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
625 
626 	eth_dev->data->dev_link.link_status = 0;
627 	bnxt_set_hwrm_link_config(bp, false);
628 	bp->link_info.link_up = 0;
629 
630 	return 0;
631 }
632 
633 /* Unload the driver, release resources */
634 static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
635 {
636 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
637 
638 	if (bp->eth_dev->data->dev_started) {
639 		/* TBD: STOP HW queues DMA */
640 		eth_dev->data->dev_link.link_status = 0;
641 	}
642 	bnxt_set_hwrm_link_config(bp, false);
643 	bnxt_hwrm_port_clr_stats(bp);
644 	bp->flags &= ~BNXT_FLAG_INIT_DONE;
645 	bnxt_shutdown_nic(bp);
646 	bp->dev_stopped = 1;
647 }
648 
649 static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
650 {
651 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
652 
653 	if (bp->dev_stopped == 0)
654 		bnxt_dev_stop_op(eth_dev);
655 
656 	bnxt_free_tx_mbufs(bp);
657 	bnxt_free_rx_mbufs(bp);
658 	bnxt_free_mem(bp);
659 	if (eth_dev->data->mac_addrs != NULL) {
660 		rte_free(eth_dev->data->mac_addrs);
661 		eth_dev->data->mac_addrs = NULL;
662 	}
663 	if (bp->grp_info != NULL) {
664 		rte_free(bp->grp_info);
665 		bp->grp_info = NULL;
666 	}
667 }
668 
669 static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev,
670 				    uint32_t index)
671 {
672 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
673 	uint64_t pool_mask = eth_dev->data->mac_pool_sel[index];
674 	struct bnxt_vnic_info *vnic;
675 	struct bnxt_filter_info *filter, *temp_filter;
676 	uint32_t pool = RTE_MIN(MAX_FF_POOLS, ETH_64_POOLS);
677 	uint32_t i;
678 
679 	/*
680 	 * Loop through all VNICs from the specified filter flow pools to
681 	 * remove the corresponding MAC addr filter
682 	 */
683 	for (i = 0; i < pool; i++) {
684 		if (!(pool_mask & (1ULL << i)))
685 			continue;
686 
687 		STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
688 			filter = STAILQ_FIRST(&vnic->filter);
689 			while (filter) {
690 				temp_filter = STAILQ_NEXT(filter, next);
691 				if (filter->mac_index == index) {
692 					STAILQ_REMOVE(&vnic->filter, filter,
693 						      bnxt_filter_info, next);
694 					bnxt_hwrm_clear_l2_filter(bp, filter);
695 					filter->mac_index = INVALID_MAC_INDEX;
696 					memset(&filter->l2_addr, 0,
697 					       ETHER_ADDR_LEN);
698 					STAILQ_INSERT_TAIL(
699 							&bp->free_filter_list,
700 							filter, next);
701 				}
702 				filter = temp_filter;
703 			}
704 		}
705 	}
706 }
707 
708 static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
709 				struct ether_addr *mac_addr,
710 				uint32_t index, uint32_t pool)
711 {
712 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
713 	struct bnxt_vnic_info *vnic = STAILQ_FIRST(&bp->ff_pool[pool]);
714 	struct bnxt_filter_info *filter;
715 
716 	if (BNXT_VF(bp)) {
717 		PMD_DRV_LOG(ERR, "Cannot add MAC address to a VF interface\n");
718 		return -ENOTSUP;
719 	}
720 
721 	if (!vnic) {
722 		PMD_DRV_LOG(ERR, "VNIC not found for pool %d!\n", pool);
723 		return -EINVAL;
724 	}
725 	/* Attach requested MAC address to the new l2_filter */
726 	STAILQ_FOREACH(filter, &vnic->filter, next) {
727 		if (filter->mac_index == index) {
728 			PMD_DRV_LOG(ERR,
729 				"MAC addr already existed for pool %d\n", pool);
730 			return -EINVAL;
731 		}
732 	}
733 	filter = bnxt_alloc_filter(bp);
734 	if (!filter) {
735 		PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
736 		return -ENODEV;
737 	}
738 	STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
739 	filter->mac_index = index;
740 	memcpy(filter->l2_addr, mac_addr, ETHER_ADDR_LEN);
741 	return bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
742 }
743 
744 int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete)
745 {
746 	int rc = 0;
747 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
748 	struct rte_eth_link new;
749 	unsigned int cnt = BNXT_LINK_WAIT_CNT;
750 
751 	memset(&new, 0, sizeof(new));
752 	do {
753 		/* Retrieve link info from hardware */
754 		rc = bnxt_get_hwrm_link_config(bp, &new);
755 		if (rc) {
756 			new.link_speed = ETH_LINK_SPEED_100M;
757 			new.link_duplex = ETH_LINK_FULL_DUPLEX;
758 			PMD_DRV_LOG(ERR,
759 				"Failed to retrieve link rc = 0x%x!\n", rc);
760 			goto out;
761 		}
762 		rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
763 
764 		if (!wait_to_complete)
765 			break;
766 	} while (!new.link_status && cnt--);
767 
768 out:
769 	/* Timed out or success */
770 	if (new.link_status != eth_dev->data->dev_link.link_status ||
771 	new.link_speed != eth_dev->data->dev_link.link_speed) {
772 		memcpy(&eth_dev->data->dev_link, &new,
773 			sizeof(struct rte_eth_link));
774 		bnxt_print_link_info(eth_dev);
775 	}
776 
777 	return rc;
778 }
779 
780 static void bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev)
781 {
782 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
783 	struct bnxt_vnic_info *vnic;
784 
785 	if (bp->vnic_info == NULL)
786 		return;
787 
788 	vnic = &bp->vnic_info[0];
789 
790 	vnic->flags |= BNXT_VNIC_INFO_PROMISC;
791 	bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
792 }
793 
794 static void bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev)
795 {
796 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
797 	struct bnxt_vnic_info *vnic;
798 
799 	if (bp->vnic_info == NULL)
800 		return;
801 
802 	vnic = &bp->vnic_info[0];
803 
804 	vnic->flags &= ~BNXT_VNIC_INFO_PROMISC;
805 	bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
806 }
807 
808 static void bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev)
809 {
810 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
811 	struct bnxt_vnic_info *vnic;
812 
813 	if (bp->vnic_info == NULL)
814 		return;
815 
816 	vnic = &bp->vnic_info[0];
817 
818 	vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
819 	bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
820 }
821 
822 static void bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev)
823 {
824 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
825 	struct bnxt_vnic_info *vnic;
826 
827 	if (bp->vnic_info == NULL)
828 		return;
829 
830 	vnic = &bp->vnic_info[0];
831 
832 	vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
833 	bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
834 }
835 
836 static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
837 			    struct rte_eth_rss_reta_entry64 *reta_conf,
838 			    uint16_t reta_size)
839 {
840 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
841 	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
842 	struct bnxt_vnic_info *vnic;
843 	int i;
844 
845 	if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
846 		return -EINVAL;
847 
848 	if (reta_size != HW_HASH_INDEX_SIZE) {
849 		PMD_DRV_LOG(ERR, "The configured hash table lookup size "
850 			"(%d) must equal the size supported by the hardware "
851 			"(%d)\n", reta_size, HW_HASH_INDEX_SIZE);
852 		return -EINVAL;
853 	}
854 	/* Update the RSS VNIC(s) */
855 	for (i = 0; i < MAX_FF_POOLS; i++) {
856 		STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
857 			memcpy(vnic->rss_table, reta_conf, reta_size);
858 
859 			bnxt_hwrm_vnic_rss_cfg(bp, vnic);
860 		}
861 	}
862 	return 0;
863 }
864 
865 static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
866 			      struct rte_eth_rss_reta_entry64 *reta_conf,
867 			      uint16_t reta_size)
868 {
869 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
870 	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
871 	struct rte_intr_handle *intr_handle
872 		= &bp->pdev->intr_handle;
873 
874 	/* Retrieve from the default VNIC */
875 	if (!vnic)
876 		return -EINVAL;
877 	if (!vnic->rss_table)
878 		return -EINVAL;
879 
880 	if (reta_size != HW_HASH_INDEX_SIZE) {
881 		PMD_DRV_LOG(ERR, "The configured hash table lookup size "
882 			"(%d) must equal the size supported by the hardware "
883 			"(%d)\n", reta_size, HW_HASH_INDEX_SIZE);
884 		return -EINVAL;
885 	}
886 	/* EW - need to revisit here copying from uint64_t to uint16_t */
887 	memcpy(reta_conf, vnic->rss_table, reta_size);
888 
889 	if (rte_intr_allow_others(intr_handle)) {
890 		if (eth_dev->data->dev_conf.intr_conf.lsc != 0)
891 			bnxt_dev_lsc_intr_setup(eth_dev);
892 	}
893 
894 	return 0;
895 }
896 
897 static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
898 				   struct rte_eth_rss_conf *rss_conf)
899 {
900 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
901 	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
902 	struct bnxt_vnic_info *vnic;
903 	uint16_t hash_type = 0;
904 	int i;
905 
906 	/*
907 	 * If RSS enablement were different than dev_configure,
908 	 * then return -EINVAL
909 	 */
910 	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
911 		if (!rss_conf->rss_hf)
912 			PMD_DRV_LOG(ERR, "Hash type NONE\n");
913 	} else {
914 		if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT)
915 			return -EINVAL;
916 	}
917 
918 	bp->flags |= BNXT_FLAG_UPDATE_HASH;
919 	memcpy(&bp->rss_conf, rss_conf, sizeof(*rss_conf));
920 
921 	if (rss_conf->rss_hf & ETH_RSS_IPV4)
922 		hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
923 	if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
924 		hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
925 	if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
926 		hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
927 	if (rss_conf->rss_hf & ETH_RSS_IPV6)
928 		hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
929 	if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
930 		hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
931 	if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
932 		hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
933 
934 	/* Update the RSS VNIC(s) */
935 	for (i = 0; i < MAX_FF_POOLS; i++) {
936 		STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
937 			vnic->hash_type = hash_type;
938 
939 			/*
940 			 * Use the supplied key if the key length is
941 			 * acceptable and the rss_key is not NULL
942 			 */
943 			if (rss_conf->rss_key &&
944 			    rss_conf->rss_key_len <= HW_HASH_KEY_SIZE)
945 				memcpy(vnic->rss_hash_key, rss_conf->rss_key,
946 				       rss_conf->rss_key_len);
947 
948 			bnxt_hwrm_vnic_rss_cfg(bp, vnic);
949 		}
950 	}
951 	return 0;
952 }
953 
954 static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev,
955 				     struct rte_eth_rss_conf *rss_conf)
956 {
957 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
958 	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
959 	int len;
960 	uint32_t hash_types;
961 
962 	/* RSS configuration is the same for all VNICs */
963 	if (vnic && vnic->rss_hash_key) {
964 		if (rss_conf->rss_key) {
965 			len = rss_conf->rss_key_len <= HW_HASH_KEY_SIZE ?
966 			      rss_conf->rss_key_len : HW_HASH_KEY_SIZE;
967 			memcpy(rss_conf->rss_key, vnic->rss_hash_key, len);
968 		}
969 
970 		hash_types = vnic->hash_type;
971 		rss_conf->rss_hf = 0;
972 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) {
973 			rss_conf->rss_hf |= ETH_RSS_IPV4;
974 			hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
975 		}
976 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) {
977 			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
978 			hash_types &=
979 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
980 		}
981 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) {
982 			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
983 			hash_types &=
984 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
985 		}
986 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) {
987 			rss_conf->rss_hf |= ETH_RSS_IPV6;
988 			hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
989 		}
990 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) {
991 			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
992 			hash_types &=
993 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
994 		}
995 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) {
996 			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
997 			hash_types &=
998 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
999 		}
1000 		if (hash_types) {
1001 			PMD_DRV_LOG(ERR,
1002 				"Unknwon RSS config from firmware (%08x), RSS disabled",
1003 				vnic->hash_type);
1004 			return -ENOTSUP;
1005 		}
1006 	} else {
1007 		rss_conf->rss_hf = 0;
1008 	}
1009 	return 0;
1010 }
1011 
1012 static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev,
1013 			       struct rte_eth_fc_conf *fc_conf)
1014 {
1015 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1016 	struct rte_eth_link link_info;
1017 	int rc;
1018 
1019 	rc = bnxt_get_hwrm_link_config(bp, &link_info);
1020 	if (rc)
1021 		return rc;
1022 
1023 	memset(fc_conf, 0, sizeof(*fc_conf));
1024 	if (bp->link_info.auto_pause)
1025 		fc_conf->autoneg = 1;
1026 	switch (bp->link_info.pause) {
1027 	case 0:
1028 		fc_conf->mode = RTE_FC_NONE;
1029 		break;
1030 	case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX:
1031 		fc_conf->mode = RTE_FC_TX_PAUSE;
1032 		break;
1033 	case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX:
1034 		fc_conf->mode = RTE_FC_RX_PAUSE;
1035 		break;
1036 	case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX |
1037 			HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX):
1038 		fc_conf->mode = RTE_FC_FULL;
1039 		break;
1040 	}
1041 	return 0;
1042 }
1043 
1044 static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
1045 			       struct rte_eth_fc_conf *fc_conf)
1046 {
1047 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1048 
1049 	if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) {
1050 		PMD_DRV_LOG(ERR, "Flow Control Settings cannot be modified\n");
1051 		return -ENOTSUP;
1052 	}
1053 
1054 	switch (fc_conf->mode) {
1055 	case RTE_FC_NONE:
1056 		bp->link_info.auto_pause = 0;
1057 		bp->link_info.force_pause = 0;
1058 		break;
1059 	case RTE_FC_RX_PAUSE:
1060 		if (fc_conf->autoneg) {
1061 			bp->link_info.auto_pause =
1062 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
1063 			bp->link_info.force_pause = 0;
1064 		} else {
1065 			bp->link_info.auto_pause = 0;
1066 			bp->link_info.force_pause =
1067 					HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
1068 		}
1069 		break;
1070 	case RTE_FC_TX_PAUSE:
1071 		if (fc_conf->autoneg) {
1072 			bp->link_info.auto_pause =
1073 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX;
1074 			bp->link_info.force_pause = 0;
1075 		} else {
1076 			bp->link_info.auto_pause = 0;
1077 			bp->link_info.force_pause =
1078 					HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX;
1079 		}
1080 		break;
1081 	case RTE_FC_FULL:
1082 		if (fc_conf->autoneg) {
1083 			bp->link_info.auto_pause =
1084 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX |
1085 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
1086 			bp->link_info.force_pause = 0;
1087 		} else {
1088 			bp->link_info.auto_pause = 0;
1089 			bp->link_info.force_pause =
1090 					HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX |
1091 					HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
1092 		}
1093 		break;
1094 	}
1095 	return bnxt_set_hwrm_link_config(bp, true);
1096 }
1097 
1098 /* Add UDP tunneling port */
1099 static int
1100 bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
1101 			 struct rte_eth_udp_tunnel *udp_tunnel)
1102 {
1103 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
1104 	uint16_t tunnel_type = 0;
1105 	int rc = 0;
1106 
1107 	switch (udp_tunnel->prot_type) {
1108 	case RTE_TUNNEL_TYPE_VXLAN:
1109 		if (bp->vxlan_port_cnt) {
1110 			PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
1111 				udp_tunnel->udp_port);
1112 			if (bp->vxlan_port != udp_tunnel->udp_port) {
1113 				PMD_DRV_LOG(ERR, "Only one port allowed\n");
1114 				return -ENOSPC;
1115 			}
1116 			bp->vxlan_port_cnt++;
1117 			return 0;
1118 		}
1119 		tunnel_type =
1120 			HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN;
1121 		bp->vxlan_port_cnt++;
1122 		break;
1123 	case RTE_TUNNEL_TYPE_GENEVE:
1124 		if (bp->geneve_port_cnt) {
1125 			PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
1126 				udp_tunnel->udp_port);
1127 			if (bp->geneve_port != udp_tunnel->udp_port) {
1128 				PMD_DRV_LOG(ERR, "Only one port allowed\n");
1129 				return -ENOSPC;
1130 			}
1131 			bp->geneve_port_cnt++;
1132 			return 0;
1133 		}
1134 		tunnel_type =
1135 			HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE;
1136 		bp->geneve_port_cnt++;
1137 		break;
1138 	default:
1139 		PMD_DRV_LOG(ERR, "Tunnel type is not supported\n");
1140 		return -ENOTSUP;
1141 	}
1142 	rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port,
1143 					     tunnel_type);
1144 	return rc;
1145 }
1146 
1147 static int
1148 bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
1149 			 struct rte_eth_udp_tunnel *udp_tunnel)
1150 {
1151 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
1152 	uint16_t tunnel_type = 0;
1153 	uint16_t port = 0;
1154 	int rc = 0;
1155 
1156 	switch (udp_tunnel->prot_type) {
1157 	case RTE_TUNNEL_TYPE_VXLAN:
1158 		if (!bp->vxlan_port_cnt) {
1159 			PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
1160 			return -EINVAL;
1161 		}
1162 		if (bp->vxlan_port != udp_tunnel->udp_port) {
1163 			PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n",
1164 				udp_tunnel->udp_port, bp->vxlan_port);
1165 			return -EINVAL;
1166 		}
1167 		if (--bp->vxlan_port_cnt)
1168 			return 0;
1169 
1170 		tunnel_type =
1171 			HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN;
1172 		port = bp->vxlan_fw_dst_port_id;
1173 		break;
1174 	case RTE_TUNNEL_TYPE_GENEVE:
1175 		if (!bp->geneve_port_cnt) {
1176 			PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
1177 			return -EINVAL;
1178 		}
1179 		if (bp->geneve_port != udp_tunnel->udp_port) {
1180 			PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n",
1181 				udp_tunnel->udp_port, bp->geneve_port);
1182 			return -EINVAL;
1183 		}
1184 		if (--bp->geneve_port_cnt)
1185 			return 0;
1186 
1187 		tunnel_type =
1188 			HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE;
1189 		port = bp->geneve_fw_dst_port_id;
1190 		break;
1191 	default:
1192 		PMD_DRV_LOG(ERR, "Tunnel type is not supported\n");
1193 		return -ENOTSUP;
1194 	}
1195 
1196 	rc = bnxt_hwrm_tunnel_dst_port_free(bp, port, tunnel_type);
1197 	if (!rc) {
1198 		if (tunnel_type ==
1199 		    HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN)
1200 			bp->vxlan_port = 0;
1201 		if (tunnel_type ==
1202 		    HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE)
1203 			bp->geneve_port = 0;
1204 	}
1205 	return rc;
1206 }
1207 
1208 static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
1209 {
1210 	struct bnxt_filter_info *filter, *temp_filter, *new_filter;
1211 	struct bnxt_vnic_info *vnic;
1212 	unsigned int i;
1213 	int rc = 0;
1214 	uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN;
1215 
1216 	/* Cycle through all VNICs */
1217 	for (i = 0; i < bp->nr_vnics; i++) {
1218 		/*
1219 		 * For each VNIC and each associated filter(s)
1220 		 * if VLAN exists && VLAN matches vlan_id
1221 		 *      remove the MAC+VLAN filter
1222 		 *      add a new MAC only filter
1223 		 * else
1224 		 *      VLAN filter doesn't exist, just skip and continue
1225 		 */
1226 		STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
1227 			filter = STAILQ_FIRST(&vnic->filter);
1228 			while (filter) {
1229 				temp_filter = STAILQ_NEXT(filter, next);
1230 
1231 				if (filter->enables & chk &&
1232 				    filter->l2_ovlan == vlan_id) {
1233 					/* Must delete the filter */
1234 					STAILQ_REMOVE(&vnic->filter, filter,
1235 						      bnxt_filter_info, next);
1236 					bnxt_hwrm_clear_l2_filter(bp, filter);
1237 					STAILQ_INSERT_TAIL(
1238 							&bp->free_filter_list,
1239 							filter, next);
1240 
1241 					/*
1242 					 * Need to examine to see if the MAC
1243 					 * filter already existed or not before
1244 					 * allocating a new one
1245 					 */
1246 
1247 					new_filter = bnxt_alloc_filter(bp);
1248 					if (!new_filter) {
1249 						PMD_DRV_LOG(ERR,
1250 							"MAC/VLAN filter alloc failed\n");
1251 						rc = -ENOMEM;
1252 						goto exit;
1253 					}
1254 					STAILQ_INSERT_TAIL(&vnic->filter,
1255 							   new_filter, next);
1256 					/* Inherit MAC from previous filter */
1257 					new_filter->mac_index =
1258 							filter->mac_index;
1259 					memcpy(new_filter->l2_addr,
1260 					       filter->l2_addr, ETHER_ADDR_LEN);
1261 					/* MAC only filter */
1262 					rc = bnxt_hwrm_set_l2_filter(bp,
1263 							vnic->fw_vnic_id,
1264 							new_filter);
1265 					if (rc)
1266 						goto exit;
1267 					PMD_DRV_LOG(INFO,
1268 						"Del Vlan filter for %d\n",
1269 						vlan_id);
1270 				}
1271 				filter = temp_filter;
1272 			}
1273 		}
1274 	}
1275 exit:
1276 	return rc;
1277 }
1278 
1279 static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
1280 {
1281 	struct bnxt_filter_info *filter, *temp_filter, *new_filter;
1282 	struct bnxt_vnic_info *vnic;
1283 	unsigned int i;
1284 	int rc = 0;
1285 	uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN |
1286 		HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK;
1287 	uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN;
1288 
1289 	/* Cycle through all VNICs */
1290 	for (i = 0; i < bp->nr_vnics; i++) {
1291 		/*
1292 		 * For each VNIC and each associated filter(s)
1293 		 * if VLAN exists:
1294 		 *   if VLAN matches vlan_id
1295 		 *      VLAN filter already exists, just skip and continue
1296 		 *   else
1297 		 *      add a new MAC+VLAN filter
1298 		 * else
1299 		 *   Remove the old MAC only filter
1300 		 *    Add a new MAC+VLAN filter
1301 		 */
1302 		STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
1303 			filter = STAILQ_FIRST(&vnic->filter);
1304 			while (filter) {
1305 				temp_filter = STAILQ_NEXT(filter, next);
1306 
1307 				if (filter->enables & chk) {
1308 					if (filter->l2_ovlan == vlan_id)
1309 						goto cont;
1310 				} else {
1311 					/* Must delete the MAC filter */
1312 					STAILQ_REMOVE(&vnic->filter, filter,
1313 						      bnxt_filter_info, next);
1314 					bnxt_hwrm_clear_l2_filter(bp, filter);
1315 					filter->l2_ovlan = 0;
1316 					STAILQ_INSERT_TAIL(
1317 							&bp->free_filter_list,
1318 							filter, next);
1319 				}
1320 				new_filter = bnxt_alloc_filter(bp);
1321 				if (!new_filter) {
1322 					PMD_DRV_LOG(ERR,
1323 						"MAC/VLAN filter alloc failed\n");
1324 					rc = -ENOMEM;
1325 					goto exit;
1326 				}
1327 				STAILQ_INSERT_TAIL(&vnic->filter, new_filter,
1328 						   next);
1329 				/* Inherit MAC from the previous filter */
1330 				new_filter->mac_index = filter->mac_index;
1331 				memcpy(new_filter->l2_addr, filter->l2_addr,
1332 				       ETHER_ADDR_LEN);
1333 				/* MAC + VLAN ID filter */
1334 				new_filter->l2_ovlan = vlan_id;
1335 				new_filter->l2_ovlan_mask = 0xF000;
1336 				new_filter->enables |= en;
1337 				rc = bnxt_hwrm_set_l2_filter(bp,
1338 							     vnic->fw_vnic_id,
1339 							     new_filter);
1340 				if (rc)
1341 					goto exit;
1342 				PMD_DRV_LOG(INFO,
1343 					"Added Vlan filter for %d\n", vlan_id);
1344 cont:
1345 				filter = temp_filter;
1346 			}
1347 		}
1348 	}
1349 exit:
1350 	return rc;
1351 }
1352 
1353 static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev,
1354 				   uint16_t vlan_id, int on)
1355 {
1356 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
1357 
1358 	/* These operations apply to ALL existing MAC/VLAN filters */
1359 	if (on)
1360 		return bnxt_add_vlan_filter(bp, vlan_id);
1361 	else
1362 		return bnxt_del_vlan_filter(bp, vlan_id);
1363 }
1364 
1365 static int
1366 bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
1367 {
1368 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1369 	unsigned int i;
1370 
1371 	if (mask & ETH_VLAN_FILTER_MASK) {
1372 		if (!dev->data->dev_conf.rxmode.hw_vlan_filter) {
1373 			/* Remove any VLAN filters programmed */
1374 			for (i = 0; i < 4095; i++)
1375 				bnxt_del_vlan_filter(bp, i);
1376 		}
1377 		PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n",
1378 			dev->data->dev_conf.rxmode.hw_vlan_filter);
1379 	}
1380 
1381 	if (mask & ETH_VLAN_STRIP_MASK) {
1382 		/* Enable or disable VLAN stripping */
1383 		for (i = 0; i < bp->nr_vnics; i++) {
1384 			struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1385 			if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1386 				vnic->vlan_strip = true;
1387 			else
1388 				vnic->vlan_strip = false;
1389 			bnxt_hwrm_vnic_cfg(bp, vnic);
1390 		}
1391 		PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n",
1392 			dev->data->dev_conf.rxmode.hw_vlan_strip);
1393 	}
1394 
1395 	if (mask & ETH_VLAN_EXTEND_MASK)
1396 		PMD_DRV_LOG(ERR, "Extend VLAN Not supported\n");
1397 
1398 	return 0;
1399 }
1400 
1401 static void
1402 bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr)
1403 {
1404 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1405 	/* Default Filter is tied to VNIC 0 */
1406 	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
1407 	struct bnxt_filter_info *filter;
1408 	int rc;
1409 
1410 	if (BNXT_VF(bp))
1411 		return;
1412 
1413 	memcpy(bp->mac_addr, addr, sizeof(bp->mac_addr));
1414 
1415 	STAILQ_FOREACH(filter, &vnic->filter, next) {
1416 		/* Default Filter is at Index 0 */
1417 		if (filter->mac_index != 0)
1418 			continue;
1419 		rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1420 		if (rc)
1421 			break;
1422 		memcpy(filter->l2_addr, bp->mac_addr, ETHER_ADDR_LEN);
1423 		memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
1424 		filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
1425 		filter->enables |=
1426 			HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
1427 			HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK;
1428 		rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
1429 		if (rc)
1430 			break;
1431 		filter->mac_index = 0;
1432 		PMD_DRV_LOG(DEBUG, "Set MAC addr\n");
1433 	}
1434 }
1435 
1436 static int
1437 bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev,
1438 			  struct ether_addr *mc_addr_set,
1439 			  uint32_t nb_mc_addr)
1440 {
1441 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
1442 	char *mc_addr_list = (char *)mc_addr_set;
1443 	struct bnxt_vnic_info *vnic;
1444 	uint32_t off = 0, i = 0;
1445 
1446 	vnic = &bp->vnic_info[0];
1447 
1448 	if (nb_mc_addr > BNXT_MAX_MC_ADDRS) {
1449 		vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
1450 		goto allmulti;
1451 	}
1452 
1453 	/* TODO Check for Duplicate mcast addresses */
1454 	vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
1455 	for (i = 0; i < nb_mc_addr; i++) {
1456 		memcpy(vnic->mc_list + off, &mc_addr_list[i], ETHER_ADDR_LEN);
1457 		off += ETHER_ADDR_LEN;
1458 	}
1459 
1460 	vnic->mc_addr_cnt = i;
1461 
1462 allmulti:
1463 	return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
1464 }
1465 
1466 static int
1467 bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
1468 {
1469 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1470 	uint8_t fw_major = (bp->fw_ver >> 24) & 0xff;
1471 	uint8_t fw_minor = (bp->fw_ver >> 16) & 0xff;
1472 	uint8_t fw_updt = (bp->fw_ver >> 8) & 0xff;
1473 	int ret;
1474 
1475 	ret = snprintf(fw_version, fw_size, "%d.%d.%d",
1476 			fw_major, fw_minor, fw_updt);
1477 
1478 	ret += 1; /* add the size of '\0' */
1479 	if (fw_size < (uint32_t)ret)
1480 		return ret;
1481 	else
1482 		return 0;
1483 }
1484 
1485 static void
1486 bnxt_rxq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id,
1487 	struct rte_eth_rxq_info *qinfo)
1488 {
1489 	struct bnxt_rx_queue *rxq;
1490 
1491 	rxq = dev->data->rx_queues[queue_id];
1492 
1493 	qinfo->mp = rxq->mb_pool;
1494 	qinfo->scattered_rx = dev->data->scattered_rx;
1495 	qinfo->nb_desc = rxq->nb_rx_desc;
1496 
1497 	qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
1498 	qinfo->conf.rx_drop_en = 0;
1499 	qinfo->conf.rx_deferred_start = 0;
1500 }
1501 
1502 static void
1503 bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id,
1504 	struct rte_eth_txq_info *qinfo)
1505 {
1506 	struct bnxt_tx_queue *txq;
1507 
1508 	txq = dev->data->tx_queues[queue_id];
1509 
1510 	qinfo->nb_desc = txq->nb_tx_desc;
1511 
1512 	qinfo->conf.tx_thresh.pthresh = txq->pthresh;
1513 	qinfo->conf.tx_thresh.hthresh = txq->hthresh;
1514 	qinfo->conf.tx_thresh.wthresh = txq->wthresh;
1515 
1516 	qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
1517 	qinfo->conf.tx_rs_thresh = 0;
1518 	qinfo->conf.txq_flags = txq->txq_flags;
1519 	qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
1520 }
1521 
1522 static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
1523 {
1524 	struct bnxt *bp = eth_dev->data->dev_private;
1525 	struct rte_eth_dev_info dev_info;
1526 	uint32_t max_dev_mtu;
1527 	uint32_t rc = 0;
1528 	uint32_t i;
1529 
1530 	bnxt_dev_info_get_op(eth_dev, &dev_info);
1531 	max_dev_mtu = dev_info.max_rx_pktlen -
1532 		      ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE * 2;
1533 
1534 	if (new_mtu < ETHER_MIN_MTU || new_mtu > max_dev_mtu) {
1535 		PMD_DRV_LOG(ERR, "MTU requested must be within (%d, %d)\n",
1536 			ETHER_MIN_MTU, max_dev_mtu);
1537 		return -EINVAL;
1538 	}
1539 
1540 
1541 	if (new_mtu > ETHER_MTU) {
1542 		bp->flags |= BNXT_FLAG_JUMBO;
1543 		eth_dev->data->dev_conf.rxmode.jumbo_frame = 1;
1544 	} else {
1545 		eth_dev->data->dev_conf.rxmode.jumbo_frame = 0;
1546 		bp->flags &= ~BNXT_FLAG_JUMBO;
1547 	}
1548 
1549 	eth_dev->data->dev_conf.rxmode.max_rx_pkt_len =
1550 		new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;
1551 
1552 	eth_dev->data->mtu = new_mtu;
1553 	PMD_DRV_LOG(INFO, "New MTU is %d\n", eth_dev->data->mtu);
1554 
1555 	for (i = 0; i < bp->nr_vnics; i++) {
1556 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1557 
1558 		vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1559 					ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;
1560 		rc = bnxt_hwrm_vnic_cfg(bp, vnic);
1561 		if (rc)
1562 			break;
1563 
1564 		rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
1565 		if (rc)
1566 			return rc;
1567 	}
1568 
1569 	return rc;
1570 }
1571 
1572 static int
1573 bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on)
1574 {
1575 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1576 	uint16_t vlan = bp->vlan;
1577 	int rc;
1578 
1579 	if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) {
1580 		PMD_DRV_LOG(ERR,
1581 			"PVID cannot be modified for this function\n");
1582 		return -ENOTSUP;
1583 	}
1584 	bp->vlan = on ? pvid : 0;
1585 
1586 	rc = bnxt_hwrm_set_default_vlan(bp, 0, 0);
1587 	if (rc)
1588 		bp->vlan = vlan;
1589 	return rc;
1590 }
1591 
1592 static int
1593 bnxt_dev_led_on_op(struct rte_eth_dev *dev)
1594 {
1595 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1596 
1597 	return bnxt_hwrm_port_led_cfg(bp, true);
1598 }
1599 
1600 static int
1601 bnxt_dev_led_off_op(struct rte_eth_dev *dev)
1602 {
1603 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1604 
1605 	return bnxt_hwrm_port_led_cfg(bp, false);
1606 }
1607 
1608 static uint32_t
1609 bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1610 {
1611 	uint32_t desc = 0, raw_cons = 0, cons;
1612 	struct bnxt_cp_ring_info *cpr;
1613 	struct bnxt_rx_queue *rxq;
1614 	struct rx_pkt_cmpl *rxcmp;
1615 	uint16_t cmp_type;
1616 	uint8_t cmp = 1;
1617 	bool valid;
1618 
1619 	rxq = dev->data->rx_queues[rx_queue_id];
1620 	cpr = rxq->cp_ring;
1621 	valid = cpr->valid;
1622 
1623 	while (raw_cons < rxq->nb_rx_desc) {
1624 		cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
1625 		rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
1626 
1627 		if (!CMPL_VALID(rxcmp, valid))
1628 			goto nothing_to_do;
1629 		valid = FLIP_VALID(cons, cpr->cp_ring_struct->ring_mask, valid);
1630 		cmp_type = CMP_TYPE(rxcmp);
1631 		if (cmp_type == RX_TPA_END_CMPL_TYPE_RX_TPA_END) {
1632 			cmp = (rte_le_to_cpu_32(
1633 					((struct rx_tpa_end_cmpl *)
1634 					 (rxcmp))->agg_bufs_v1) &
1635 			       RX_TPA_END_CMPL_AGG_BUFS_MASK) >>
1636 				RX_TPA_END_CMPL_AGG_BUFS_SFT;
1637 			desc++;
1638 		} else if (cmp_type == 0x11) {
1639 			desc++;
1640 			cmp = (rxcmp->agg_bufs_v1 &
1641 				   RX_PKT_CMPL_AGG_BUFS_MASK) >>
1642 				RX_PKT_CMPL_AGG_BUFS_SFT;
1643 		} else {
1644 			cmp = 1;
1645 		}
1646 nothing_to_do:
1647 		raw_cons += cmp ? cmp : 2;
1648 	}
1649 
1650 	return desc;
1651 }
1652 
1653 static int
1654 bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset)
1655 {
1656 	struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
1657 	struct bnxt_rx_ring_info *rxr;
1658 	struct bnxt_cp_ring_info *cpr;
1659 	struct bnxt_sw_rx_bd *rx_buf;
1660 	struct rx_pkt_cmpl *rxcmp;
1661 	uint32_t cons, cp_cons;
1662 
1663 	if (!rxq)
1664 		return -EINVAL;
1665 
1666 	cpr = rxq->cp_ring;
1667 	rxr = rxq->rx_ring;
1668 
1669 	if (offset >= rxq->nb_rx_desc)
1670 		return -EINVAL;
1671 
1672 	cons = RING_CMP(cpr->cp_ring_struct, offset);
1673 	cp_cons = cpr->cp_raw_cons;
1674 	rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
1675 
1676 	if (cons > cp_cons) {
1677 		if (CMPL_VALID(rxcmp, cpr->valid))
1678 			return RTE_ETH_RX_DESC_DONE;
1679 	} else {
1680 		if (CMPL_VALID(rxcmp, !cpr->valid))
1681 			return RTE_ETH_RX_DESC_DONE;
1682 	}
1683 	rx_buf = &rxr->rx_buf_ring[cons];
1684 	if (rx_buf->mbuf == NULL)
1685 		return RTE_ETH_RX_DESC_UNAVAIL;
1686 
1687 
1688 	return RTE_ETH_RX_DESC_AVAIL;
1689 }
1690 
1691 static int
1692 bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset)
1693 {
1694 	struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue;
1695 	struct bnxt_tx_ring_info *txr;
1696 	struct bnxt_cp_ring_info *cpr;
1697 	struct bnxt_sw_tx_bd *tx_buf;
1698 	struct tx_pkt_cmpl *txcmp;
1699 	uint32_t cons, cp_cons;
1700 
1701 	if (!txq)
1702 		return -EINVAL;
1703 
1704 	cpr = txq->cp_ring;
1705 	txr = txq->tx_ring;
1706 
1707 	if (offset >= txq->nb_tx_desc)
1708 		return -EINVAL;
1709 
1710 	cons = RING_CMP(cpr->cp_ring_struct, offset);
1711 	txcmp = (struct tx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
1712 	cp_cons = cpr->cp_raw_cons;
1713 
1714 	if (cons > cp_cons) {
1715 		if (CMPL_VALID(txcmp, cpr->valid))
1716 			return RTE_ETH_TX_DESC_UNAVAIL;
1717 	} else {
1718 		if (CMPL_VALID(txcmp, !cpr->valid))
1719 			return RTE_ETH_TX_DESC_UNAVAIL;
1720 	}
1721 	tx_buf = &txr->tx_buf_ring[cons];
1722 	if (tx_buf->mbuf == NULL)
1723 		return RTE_ETH_TX_DESC_DONE;
1724 
1725 	return RTE_ETH_TX_DESC_FULL;
1726 }
1727 
1728 static struct bnxt_filter_info *
1729 bnxt_match_and_validate_ether_filter(struct bnxt *bp,
1730 				struct rte_eth_ethertype_filter *efilter,
1731 				struct bnxt_vnic_info *vnic0,
1732 				struct bnxt_vnic_info *vnic,
1733 				int *ret)
1734 {
1735 	struct bnxt_filter_info *mfilter = NULL;
1736 	int match = 0;
1737 	*ret = 0;
1738 
1739 	if (efilter->ether_type == ETHER_TYPE_IPv4 ||
1740 		efilter->ether_type == ETHER_TYPE_IPv6) {
1741 		PMD_DRV_LOG(ERR, "invalid ether_type(0x%04x) in"
1742 			" ethertype filter.", efilter->ether_type);
1743 		*ret = -EINVAL;
1744 		goto exit;
1745 	}
1746 	if (efilter->queue >= bp->rx_nr_rings) {
1747 		PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue);
1748 		*ret = -EINVAL;
1749 		goto exit;
1750 	}
1751 
1752 	vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
1753 	vnic = STAILQ_FIRST(&bp->ff_pool[efilter->queue]);
1754 	if (vnic == NULL) {
1755 		PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue);
1756 		*ret = -EINVAL;
1757 		goto exit;
1758 	}
1759 
1760 	if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) {
1761 		STAILQ_FOREACH(mfilter, &vnic0->filter, next) {
1762 			if ((!memcmp(efilter->mac_addr.addr_bytes,
1763 				     mfilter->l2_addr, ETHER_ADDR_LEN) &&
1764 			     mfilter->flags ==
1765 			     HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP &&
1766 			     mfilter->ethertype == efilter->ether_type)) {
1767 				match = 1;
1768 				break;
1769 			}
1770 		}
1771 	} else {
1772 		STAILQ_FOREACH(mfilter, &vnic->filter, next)
1773 			if ((!memcmp(efilter->mac_addr.addr_bytes,
1774 				     mfilter->l2_addr, ETHER_ADDR_LEN) &&
1775 			     mfilter->ethertype == efilter->ether_type &&
1776 			     mfilter->flags ==
1777 			     HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX)) {
1778 				match = 1;
1779 				break;
1780 			}
1781 	}
1782 
1783 	if (match)
1784 		*ret = -EEXIST;
1785 
1786 exit:
1787 	return mfilter;
1788 }
1789 
1790 static int
1791 bnxt_ethertype_filter(struct rte_eth_dev *dev,
1792 			enum rte_filter_op filter_op,
1793 			void *arg)
1794 {
1795 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1796 	struct rte_eth_ethertype_filter *efilter =
1797 			(struct rte_eth_ethertype_filter *)arg;
1798 	struct bnxt_filter_info *bfilter, *filter1;
1799 	struct bnxt_vnic_info *vnic, *vnic0;
1800 	int ret;
1801 
1802 	if (filter_op == RTE_ETH_FILTER_NOP)
1803 		return 0;
1804 
1805 	if (arg == NULL) {
1806 		PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
1807 			    filter_op);
1808 		return -EINVAL;
1809 	}
1810 
1811 	vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
1812 	vnic = STAILQ_FIRST(&bp->ff_pool[efilter->queue]);
1813 
1814 	switch (filter_op) {
1815 	case RTE_ETH_FILTER_ADD:
1816 		bnxt_match_and_validate_ether_filter(bp, efilter,
1817 							vnic0, vnic, &ret);
1818 		if (ret < 0)
1819 			return ret;
1820 
1821 		bfilter = bnxt_get_unused_filter(bp);
1822 		if (bfilter == NULL) {
1823 			PMD_DRV_LOG(ERR,
1824 				"Not enough resources for a new filter.\n");
1825 			return -ENOMEM;
1826 		}
1827 		bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER;
1828 		memcpy(bfilter->l2_addr, efilter->mac_addr.addr_bytes,
1829 		       ETHER_ADDR_LEN);
1830 		memcpy(bfilter->dst_macaddr, efilter->mac_addr.addr_bytes,
1831 		       ETHER_ADDR_LEN);
1832 		bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR;
1833 		bfilter->ethertype = efilter->ether_type;
1834 		bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
1835 
1836 		filter1 = bnxt_get_l2_filter(bp, bfilter, vnic0);
1837 		if (filter1 == NULL) {
1838 			ret = -1;
1839 			goto cleanup;
1840 		}
1841 		bfilter->enables |=
1842 			HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1843 		bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id;
1844 
1845 		bfilter->dst_id = vnic->fw_vnic_id;
1846 
1847 		if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) {
1848 			bfilter->flags =
1849 				HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
1850 		}
1851 
1852 		ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter);
1853 		if (ret)
1854 			goto cleanup;
1855 		STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next);
1856 		break;
1857 	case RTE_ETH_FILTER_DELETE:
1858 		filter1 = bnxt_match_and_validate_ether_filter(bp, efilter,
1859 							vnic0, vnic, &ret);
1860 		if (ret == -EEXIST) {
1861 			ret = bnxt_hwrm_clear_ntuple_filter(bp, filter1);
1862 
1863 			STAILQ_REMOVE(&vnic->filter, filter1, bnxt_filter_info,
1864 				      next);
1865 			bnxt_free_filter(bp, filter1);
1866 		} else if (ret == 0) {
1867 			PMD_DRV_LOG(ERR, "No matching filter found\n");
1868 		}
1869 		break;
1870 	default:
1871 		PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
1872 		ret = -EINVAL;
1873 		goto error;
1874 	}
1875 	return ret;
1876 cleanup:
1877 	bnxt_free_filter(bp, bfilter);
1878 error:
1879 	return ret;
1880 }
1881 
1882 static inline int
1883 parse_ntuple_filter(struct bnxt *bp,
1884 		    struct rte_eth_ntuple_filter *nfilter,
1885 		    struct bnxt_filter_info *bfilter)
1886 {
1887 	uint32_t en = 0;
1888 
1889 	if (nfilter->queue >= bp->rx_nr_rings) {
1890 		PMD_DRV_LOG(ERR, "Invalid queue %d\n", nfilter->queue);
1891 		return -EINVAL;
1892 	}
1893 
1894 	switch (nfilter->dst_port_mask) {
1895 	case UINT16_MAX:
1896 		bfilter->dst_port_mask = -1;
1897 		bfilter->dst_port = nfilter->dst_port;
1898 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT |
1899 			NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
1900 		break;
1901 	default:
1902 		PMD_DRV_LOG(ERR, "invalid dst_port mask.");
1903 		return -EINVAL;
1904 	}
1905 
1906 	bfilter->ip_addr_type = NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
1907 	en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
1908 
1909 	switch (nfilter->proto_mask) {
1910 	case UINT8_MAX:
1911 		if (nfilter->proto == 17) /* IPPROTO_UDP */
1912 			bfilter->ip_protocol = 17;
1913 		else if (nfilter->proto == 6) /* IPPROTO_TCP */
1914 			bfilter->ip_protocol = 6;
1915 		else
1916 			return -EINVAL;
1917 		en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
1918 		break;
1919 	default:
1920 		PMD_DRV_LOG(ERR, "invalid protocol mask.");
1921 		return -EINVAL;
1922 	}
1923 
1924 	switch (nfilter->dst_ip_mask) {
1925 	case UINT32_MAX:
1926 		bfilter->dst_ipaddr_mask[0] = -1;
1927 		bfilter->dst_ipaddr[0] = nfilter->dst_ip;
1928 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR |
1929 			NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
1930 		break;
1931 	default:
1932 		PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
1933 		return -EINVAL;
1934 	}
1935 
1936 	switch (nfilter->src_ip_mask) {
1937 	case UINT32_MAX:
1938 		bfilter->src_ipaddr_mask[0] = -1;
1939 		bfilter->src_ipaddr[0] = nfilter->src_ip;
1940 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
1941 			NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
1942 		break;
1943 	default:
1944 		PMD_DRV_LOG(ERR, "invalid src_ip mask.");
1945 		return -EINVAL;
1946 	}
1947 
1948 	switch (nfilter->src_port_mask) {
1949 	case UINT16_MAX:
1950 		bfilter->src_port_mask = -1;
1951 		bfilter->src_port = nfilter->src_port;
1952 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
1953 			NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
1954 		break;
1955 	default:
1956 		PMD_DRV_LOG(ERR, "invalid src_port mask.");
1957 		return -EINVAL;
1958 	}
1959 
1960 	//TODO Priority
1961 	//nfilter->priority = (uint8_t)filter->priority;
1962 
1963 	bfilter->enables = en;
1964 	return 0;
1965 }
1966 
1967 static struct bnxt_filter_info*
1968 bnxt_match_ntuple_filter(struct bnxt *bp,
1969 			 struct bnxt_filter_info *bfilter,
1970 			 struct bnxt_vnic_info **mvnic)
1971 {
1972 	struct bnxt_filter_info *mfilter = NULL;
1973 	int i;
1974 
1975 	for (i = bp->nr_vnics - 1; i >= 0; i--) {
1976 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1977 		STAILQ_FOREACH(mfilter, &vnic->filter, next) {
1978 			if (bfilter->src_ipaddr[0] == mfilter->src_ipaddr[0] &&
1979 			    bfilter->src_ipaddr_mask[0] ==
1980 			    mfilter->src_ipaddr_mask[0] &&
1981 			    bfilter->src_port == mfilter->src_port &&
1982 			    bfilter->src_port_mask == mfilter->src_port_mask &&
1983 			    bfilter->dst_ipaddr[0] == mfilter->dst_ipaddr[0] &&
1984 			    bfilter->dst_ipaddr_mask[0] ==
1985 			    mfilter->dst_ipaddr_mask[0] &&
1986 			    bfilter->dst_port == mfilter->dst_port &&
1987 			    bfilter->dst_port_mask == mfilter->dst_port_mask &&
1988 			    bfilter->flags == mfilter->flags &&
1989 			    bfilter->enables == mfilter->enables) {
1990 				if (mvnic)
1991 					*mvnic = vnic;
1992 				return mfilter;
1993 			}
1994 		}
1995 	}
1996 	return NULL;
1997 }
1998 
1999 static int
2000 bnxt_cfg_ntuple_filter(struct bnxt *bp,
2001 		       struct rte_eth_ntuple_filter *nfilter,
2002 		       enum rte_filter_op filter_op)
2003 {
2004 	struct bnxt_filter_info *bfilter, *mfilter, *filter1;
2005 	struct bnxt_vnic_info *vnic, *vnic0, *mvnic;
2006 	int ret;
2007 
2008 	if (nfilter->flags != RTE_5TUPLE_FLAGS) {
2009 		PMD_DRV_LOG(ERR, "only 5tuple is supported.");
2010 		return -EINVAL;
2011 	}
2012 
2013 	if (nfilter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
2014 		PMD_DRV_LOG(ERR, "Ntuple filter: TCP flags not supported\n");
2015 		return -EINVAL;
2016 	}
2017 
2018 	bfilter = bnxt_get_unused_filter(bp);
2019 	if (bfilter == NULL) {
2020 		PMD_DRV_LOG(ERR,
2021 			"Not enough resources for a new filter.\n");
2022 		return -ENOMEM;
2023 	}
2024 	ret = parse_ntuple_filter(bp, nfilter, bfilter);
2025 	if (ret < 0)
2026 		goto free_filter;
2027 
2028 	vnic = STAILQ_FIRST(&bp->ff_pool[nfilter->queue]);
2029 	vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
2030 	filter1 = STAILQ_FIRST(&vnic0->filter);
2031 	if (filter1 == NULL) {
2032 		ret = -1;
2033 		goto free_filter;
2034 	}
2035 
2036 	bfilter->dst_id = vnic->fw_vnic_id;
2037 	bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id;
2038 	bfilter->enables |=
2039 		HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
2040 	bfilter->ethertype = 0x800;
2041 	bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2042 
2043 	mfilter = bnxt_match_ntuple_filter(bp, bfilter, &mvnic);
2044 
2045 	if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD &&
2046 	    bfilter->dst_id == mfilter->dst_id) {
2047 		PMD_DRV_LOG(ERR, "filter exists.\n");
2048 		ret = -EEXIST;
2049 		goto free_filter;
2050 	} else if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD &&
2051 		   bfilter->dst_id != mfilter->dst_id) {
2052 		mfilter->dst_id = vnic->fw_vnic_id;
2053 		ret = bnxt_hwrm_set_ntuple_filter(bp, mfilter->dst_id, mfilter);
2054 		STAILQ_REMOVE(&mvnic->filter, mfilter, bnxt_filter_info, next);
2055 		STAILQ_INSERT_TAIL(&vnic->filter, mfilter, next);
2056 		PMD_DRV_LOG(ERR, "filter with matching pattern exists.\n");
2057 		PMD_DRV_LOG(ERR, " Updated it to the new destination queue\n");
2058 		goto free_filter;
2059 	}
2060 	if (mfilter == NULL && filter_op == RTE_ETH_FILTER_DELETE) {
2061 		PMD_DRV_LOG(ERR, "filter doesn't exist.");
2062 		ret = -ENOENT;
2063 		goto free_filter;
2064 	}
2065 
2066 	if (filter_op == RTE_ETH_FILTER_ADD) {
2067 		bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER;
2068 		ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter);
2069 		if (ret)
2070 			goto free_filter;
2071 		STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next);
2072 	} else {
2073 		if (mfilter == NULL) {
2074 			/* This should not happen. But for Coverity! */
2075 			ret = -ENOENT;
2076 			goto free_filter;
2077 		}
2078 		ret = bnxt_hwrm_clear_ntuple_filter(bp, mfilter);
2079 
2080 		STAILQ_REMOVE(&vnic->filter, mfilter, bnxt_filter_info, next);
2081 		bnxt_free_filter(bp, mfilter);
2082 		mfilter->fw_l2_filter_id = -1;
2083 		bnxt_free_filter(bp, bfilter);
2084 		bfilter->fw_l2_filter_id = -1;
2085 	}
2086 
2087 	return 0;
2088 free_filter:
2089 	bfilter->fw_l2_filter_id = -1;
2090 	bnxt_free_filter(bp, bfilter);
2091 	return ret;
2092 }
2093 
2094 static int
2095 bnxt_ntuple_filter(struct rte_eth_dev *dev,
2096 			enum rte_filter_op filter_op,
2097 			void *arg)
2098 {
2099 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2100 	int ret;
2101 
2102 	if (filter_op == RTE_ETH_FILTER_NOP)
2103 		return 0;
2104 
2105 	if (arg == NULL) {
2106 		PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
2107 			    filter_op);
2108 		return -EINVAL;
2109 	}
2110 
2111 	switch (filter_op) {
2112 	case RTE_ETH_FILTER_ADD:
2113 		ret = bnxt_cfg_ntuple_filter(bp,
2114 			(struct rte_eth_ntuple_filter *)arg,
2115 			filter_op);
2116 		break;
2117 	case RTE_ETH_FILTER_DELETE:
2118 		ret = bnxt_cfg_ntuple_filter(bp,
2119 			(struct rte_eth_ntuple_filter *)arg,
2120 			filter_op);
2121 		break;
2122 	default:
2123 		PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
2124 		ret = -EINVAL;
2125 		break;
2126 	}
2127 	return ret;
2128 }
2129 
2130 static int
2131 bnxt_parse_fdir_filter(struct bnxt *bp,
2132 		       struct rte_eth_fdir_filter *fdir,
2133 		       struct bnxt_filter_info *filter)
2134 {
2135 	enum rte_fdir_mode fdir_mode =
2136 		bp->eth_dev->data->dev_conf.fdir_conf.mode;
2137 	struct bnxt_vnic_info *vnic0, *vnic;
2138 	struct bnxt_filter_info *filter1;
2139 	uint32_t en = 0;
2140 	int i;
2141 
2142 	if (fdir_mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
2143 		return -EINVAL;
2144 
2145 	filter->l2_ovlan = fdir->input.flow_ext.vlan_tci;
2146 	en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
2147 
2148 	switch (fdir->input.flow_type) {
2149 	case RTE_ETH_FLOW_IPV4:
2150 	case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
2151 		/* FALLTHROUGH */
2152 		filter->src_ipaddr[0] = fdir->input.flow.ip4_flow.src_ip;
2153 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
2154 		filter->dst_ipaddr[0] = fdir->input.flow.ip4_flow.dst_ip;
2155 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
2156 		filter->ip_protocol = fdir->input.flow.ip4_flow.proto;
2157 		en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2158 		filter->ip_addr_type =
2159 			NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
2160 		filter->src_ipaddr_mask[0] = 0xffffffff;
2161 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
2162 		filter->dst_ipaddr_mask[0] = 0xffffffff;
2163 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
2164 		filter->ethertype = 0x800;
2165 		filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2166 		break;
2167 	case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
2168 		filter->src_port = fdir->input.flow.tcp4_flow.src_port;
2169 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
2170 		filter->dst_port = fdir->input.flow.tcp4_flow.dst_port;
2171 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
2172 		filter->dst_port_mask = 0xffff;
2173 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
2174 		filter->src_port_mask = 0xffff;
2175 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
2176 		filter->src_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.src_ip;
2177 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
2178 		filter->dst_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.dst_ip;
2179 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
2180 		filter->ip_protocol = 6;
2181 		en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2182 		filter->ip_addr_type =
2183 			NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
2184 		filter->src_ipaddr_mask[0] = 0xffffffff;
2185 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
2186 		filter->dst_ipaddr_mask[0] = 0xffffffff;
2187 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
2188 		filter->ethertype = 0x800;
2189 		filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2190 		break;
2191 	case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
2192 		filter->src_port = fdir->input.flow.udp4_flow.src_port;
2193 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
2194 		filter->dst_port = fdir->input.flow.udp4_flow.dst_port;
2195 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
2196 		filter->dst_port_mask = 0xffff;
2197 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
2198 		filter->src_port_mask = 0xffff;
2199 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
2200 		filter->src_ipaddr[0] = fdir->input.flow.udp4_flow.ip.src_ip;
2201 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
2202 		filter->dst_ipaddr[0] = fdir->input.flow.udp4_flow.ip.dst_ip;
2203 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
2204 		filter->ip_protocol = 17;
2205 		en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2206 		filter->ip_addr_type =
2207 			NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
2208 		filter->src_ipaddr_mask[0] = 0xffffffff;
2209 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
2210 		filter->dst_ipaddr_mask[0] = 0xffffffff;
2211 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
2212 		filter->ethertype = 0x800;
2213 		filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2214 		break;
2215 	case RTE_ETH_FLOW_IPV6:
2216 	case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
2217 		/* FALLTHROUGH */
2218 		filter->ip_addr_type =
2219 			NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
2220 		filter->ip_protocol = fdir->input.flow.ipv6_flow.proto;
2221 		en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2222 		rte_memcpy(filter->src_ipaddr,
2223 			   fdir->input.flow.ipv6_flow.src_ip, 16);
2224 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
2225 		rte_memcpy(filter->dst_ipaddr,
2226 			   fdir->input.flow.ipv6_flow.dst_ip, 16);
2227 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
2228 		memset(filter->dst_ipaddr_mask, 0xff, 16);
2229 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
2230 		memset(filter->src_ipaddr_mask, 0xff, 16);
2231 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
2232 		filter->ethertype = 0x86dd;
2233 		filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2234 		break;
2235 	case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
2236 		filter->src_port = fdir->input.flow.tcp6_flow.src_port;
2237 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
2238 		filter->dst_port = fdir->input.flow.tcp6_flow.dst_port;
2239 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
2240 		filter->dst_port_mask = 0xffff;
2241 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
2242 		filter->src_port_mask = 0xffff;
2243 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
2244 		filter->ip_addr_type =
2245 			NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
2246 		filter->ip_protocol = fdir->input.flow.tcp6_flow.ip.proto;
2247 		en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2248 		rte_memcpy(filter->src_ipaddr,
2249 			   fdir->input.flow.tcp6_flow.ip.src_ip, 16);
2250 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
2251 		rte_memcpy(filter->dst_ipaddr,
2252 			   fdir->input.flow.tcp6_flow.ip.dst_ip, 16);
2253 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
2254 		memset(filter->dst_ipaddr_mask, 0xff, 16);
2255 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
2256 		memset(filter->src_ipaddr_mask, 0xff, 16);
2257 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
2258 		filter->ethertype = 0x86dd;
2259 		filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2260 		break;
2261 	case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
2262 		filter->src_port = fdir->input.flow.udp6_flow.src_port;
2263 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
2264 		filter->dst_port = fdir->input.flow.udp6_flow.dst_port;
2265 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
2266 		filter->dst_port_mask = 0xffff;
2267 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
2268 		filter->src_port_mask = 0xffff;
2269 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
2270 		filter->ip_addr_type =
2271 			NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
2272 		filter->ip_protocol = fdir->input.flow.udp6_flow.ip.proto;
2273 		en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2274 		rte_memcpy(filter->src_ipaddr,
2275 			   fdir->input.flow.udp6_flow.ip.src_ip, 16);
2276 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
2277 		rte_memcpy(filter->dst_ipaddr,
2278 			   fdir->input.flow.udp6_flow.ip.dst_ip, 16);
2279 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
2280 		memset(filter->dst_ipaddr_mask, 0xff, 16);
2281 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
2282 		memset(filter->src_ipaddr_mask, 0xff, 16);
2283 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
2284 		filter->ethertype = 0x86dd;
2285 		filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2286 		break;
2287 	case RTE_ETH_FLOW_L2_PAYLOAD:
2288 		filter->ethertype = fdir->input.flow.l2_flow.ether_type;
2289 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2290 		break;
2291 	case RTE_ETH_FLOW_VXLAN:
2292 		if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
2293 			return -EINVAL;
2294 		filter->vni = fdir->input.flow.tunnel_flow.tunnel_id;
2295 		filter->tunnel_type =
2296 			CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
2297 		en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE;
2298 		break;
2299 	case RTE_ETH_FLOW_NVGRE:
2300 		if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
2301 			return -EINVAL;
2302 		filter->vni = fdir->input.flow.tunnel_flow.tunnel_id;
2303 		filter->tunnel_type =
2304 			CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
2305 		en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE;
2306 		break;
2307 	case RTE_ETH_FLOW_UNKNOWN:
2308 	case RTE_ETH_FLOW_RAW:
2309 	case RTE_ETH_FLOW_FRAG_IPV4:
2310 	case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
2311 	case RTE_ETH_FLOW_FRAG_IPV6:
2312 	case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
2313 	case RTE_ETH_FLOW_IPV6_EX:
2314 	case RTE_ETH_FLOW_IPV6_TCP_EX:
2315 	case RTE_ETH_FLOW_IPV6_UDP_EX:
2316 	case RTE_ETH_FLOW_GENEVE:
2317 		/* FALLTHROUGH */
2318 	default:
2319 		return -EINVAL;
2320 	}
2321 
2322 	vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
2323 	vnic = STAILQ_FIRST(&bp->ff_pool[fdir->action.rx_queue]);
2324 	if (vnic == NULL) {
2325 		PMD_DRV_LOG(ERR, "Invalid queue %d\n", fdir->action.rx_queue);
2326 		return -EINVAL;
2327 	}
2328 
2329 
2330 	if (fdir_mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
2331 		rte_memcpy(filter->dst_macaddr,
2332 			fdir->input.flow.mac_vlan_flow.mac_addr.addr_bytes, 6);
2333 			en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR;
2334 	}
2335 
2336 	if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) {
2337 		filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
2338 		filter1 = STAILQ_FIRST(&vnic0->filter);
2339 		//filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
2340 	} else {
2341 		filter->dst_id = vnic->fw_vnic_id;
2342 		for (i = 0; i < ETHER_ADDR_LEN; i++)
2343 			if (filter->dst_macaddr[i] == 0x00)
2344 				filter1 = STAILQ_FIRST(&vnic0->filter);
2345 			else
2346 				filter1 = bnxt_get_l2_filter(bp, filter, vnic);
2347 	}
2348 
2349 	if (filter1 == NULL)
2350 		return -EINVAL;
2351 
2352 	en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
2353 	filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
2354 
2355 	filter->enables = en;
2356 
2357 	return 0;
2358 }
2359 
2360 static struct bnxt_filter_info *
2361 bnxt_match_fdir(struct bnxt *bp, struct bnxt_filter_info *nf)
2362 {
2363 	struct bnxt_filter_info *mf = NULL;
2364 	int i;
2365 
2366 	for (i = bp->nr_vnics - 1; i >= 0; i--) {
2367 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2368 
2369 		STAILQ_FOREACH(mf, &vnic->filter, next) {
2370 			if (mf->filter_type == nf->filter_type &&
2371 			    mf->flags == nf->flags &&
2372 			    mf->src_port == nf->src_port &&
2373 			    mf->src_port_mask == nf->src_port_mask &&
2374 			    mf->dst_port == nf->dst_port &&
2375 			    mf->dst_port_mask == nf->dst_port_mask &&
2376 			    mf->ip_protocol == nf->ip_protocol &&
2377 			    mf->ip_addr_type == nf->ip_addr_type &&
2378 			    mf->ethertype == nf->ethertype &&
2379 			    mf->vni == nf->vni &&
2380 			    mf->tunnel_type == nf->tunnel_type &&
2381 			    mf->l2_ovlan == nf->l2_ovlan &&
2382 			    mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
2383 			    mf->l2_ivlan == nf->l2_ivlan &&
2384 			    mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
2385 			    !memcmp(mf->l2_addr, nf->l2_addr, ETHER_ADDR_LEN) &&
2386 			    !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
2387 				    ETHER_ADDR_LEN) &&
2388 			    !memcmp(mf->src_macaddr, nf->src_macaddr,
2389 				    ETHER_ADDR_LEN) &&
2390 			    !memcmp(mf->dst_macaddr, nf->dst_macaddr,
2391 				    ETHER_ADDR_LEN) &&
2392 			    !memcmp(mf->src_ipaddr, nf->src_ipaddr,
2393 				    sizeof(nf->src_ipaddr)) &&
2394 			    !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
2395 				    sizeof(nf->src_ipaddr_mask)) &&
2396 			    !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
2397 				    sizeof(nf->dst_ipaddr)) &&
2398 			    !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
2399 				    sizeof(nf->dst_ipaddr_mask)))
2400 				return mf;
2401 		}
2402 	}
2403 	return NULL;
2404 }
2405 
2406 static int
2407 bnxt_fdir_filter(struct rte_eth_dev *dev,
2408 		 enum rte_filter_op filter_op,
2409 		 void *arg)
2410 {
2411 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2412 	struct rte_eth_fdir_filter *fdir  = (struct rte_eth_fdir_filter *)arg;
2413 	struct bnxt_filter_info *filter, *match;
2414 	struct bnxt_vnic_info *vnic;
2415 	int ret = 0, i;
2416 
2417 	if (filter_op == RTE_ETH_FILTER_NOP)
2418 		return 0;
2419 
2420 	if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
2421 		return -EINVAL;
2422 
2423 	switch (filter_op) {
2424 	case RTE_ETH_FILTER_ADD:
2425 	case RTE_ETH_FILTER_DELETE:
2426 		/* FALLTHROUGH */
2427 		filter = bnxt_get_unused_filter(bp);
2428 		if (filter == NULL) {
2429 			PMD_DRV_LOG(ERR,
2430 				"Not enough resources for a new flow.\n");
2431 			return -ENOMEM;
2432 		}
2433 
2434 		ret = bnxt_parse_fdir_filter(bp, fdir, filter);
2435 		if (ret != 0)
2436 			goto free_filter;
2437 		filter->filter_type = HWRM_CFA_NTUPLE_FILTER;
2438 
2439 		match = bnxt_match_fdir(bp, filter);
2440 		if (match != NULL && filter_op == RTE_ETH_FILTER_ADD) {
2441 			PMD_DRV_LOG(ERR, "Flow already exists.\n");
2442 			ret = -EEXIST;
2443 			goto free_filter;
2444 		}
2445 		if (match == NULL && filter_op == RTE_ETH_FILTER_DELETE) {
2446 			PMD_DRV_LOG(ERR, "Flow does not exist.\n");
2447 			ret = -ENOENT;
2448 			goto free_filter;
2449 		}
2450 
2451 		if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
2452 			vnic = STAILQ_FIRST(&bp->ff_pool[0]);
2453 		else
2454 			vnic =
2455 			STAILQ_FIRST(&bp->ff_pool[fdir->action.rx_queue]);
2456 
2457 		if (filter_op == RTE_ETH_FILTER_ADD) {
2458 			ret = bnxt_hwrm_set_ntuple_filter(bp,
2459 							  filter->dst_id,
2460 							  filter);
2461 			if (ret)
2462 				goto free_filter;
2463 			STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
2464 		} else {
2465 			ret = bnxt_hwrm_clear_ntuple_filter(bp, match);
2466 			STAILQ_REMOVE(&vnic->filter, match,
2467 				      bnxt_filter_info, next);
2468 			bnxt_free_filter(bp, match);
2469 			filter->fw_l2_filter_id = -1;
2470 			bnxt_free_filter(bp, filter);
2471 		}
2472 		break;
2473 	case RTE_ETH_FILTER_FLUSH:
2474 		for (i = bp->nr_vnics - 1; i >= 0; i--) {
2475 			struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2476 
2477 			STAILQ_FOREACH(filter, &vnic->filter, next) {
2478 				if (filter->filter_type ==
2479 				    HWRM_CFA_NTUPLE_FILTER) {
2480 					ret =
2481 					bnxt_hwrm_clear_ntuple_filter(bp,
2482 								      filter);
2483 					STAILQ_REMOVE(&vnic->filter, filter,
2484 						      bnxt_filter_info, next);
2485 				}
2486 			}
2487 		}
2488 		return ret;
2489 	case RTE_ETH_FILTER_UPDATE:
2490 	case RTE_ETH_FILTER_STATS:
2491 	case RTE_ETH_FILTER_INFO:
2492 		/* FALLTHROUGH */
2493 		PMD_DRV_LOG(ERR, "operation %u not implemented", filter_op);
2494 		break;
2495 	default:
2496 		PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
2497 		ret = -EINVAL;
2498 		break;
2499 	}
2500 	return ret;
2501 
2502 free_filter:
2503 	filter->fw_l2_filter_id = -1;
2504 	bnxt_free_filter(bp, filter);
2505 	return ret;
2506 }
2507 
2508 static int
2509 bnxt_filter_ctrl_op(struct rte_eth_dev *dev __rte_unused,
2510 		    enum rte_filter_type filter_type,
2511 		    enum rte_filter_op filter_op, void *arg)
2512 {
2513 	int ret = 0;
2514 
2515 	switch (filter_type) {
2516 	case RTE_ETH_FILTER_TUNNEL:
2517 		PMD_DRV_LOG(ERR,
2518 			"filter type: %d: To be implemented\n", filter_type);
2519 		break;
2520 	case RTE_ETH_FILTER_FDIR:
2521 		ret = bnxt_fdir_filter(dev, filter_op, arg);
2522 		break;
2523 	case RTE_ETH_FILTER_NTUPLE:
2524 		ret = bnxt_ntuple_filter(dev, filter_op, arg);
2525 		break;
2526 	case RTE_ETH_FILTER_ETHERTYPE:
2527 		ret = bnxt_ethertype_filter(dev, filter_op, arg);
2528 		break;
2529 	case RTE_ETH_FILTER_GENERIC:
2530 		if (filter_op != RTE_ETH_FILTER_GET)
2531 			return -EINVAL;
2532 		*(const void **)arg = &bnxt_flow_ops;
2533 		break;
2534 	default:
2535 		PMD_DRV_LOG(ERR,
2536 			"Filter type (%d) not supported", filter_type);
2537 		ret = -EINVAL;
2538 		break;
2539 	}
2540 	return ret;
2541 }
2542 
2543 static const uint32_t *
2544 bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev *dev)
2545 {
2546 	static const uint32_t ptypes[] = {
2547 		RTE_PTYPE_L2_ETHER_VLAN,
2548 		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
2549 		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
2550 		RTE_PTYPE_L4_ICMP,
2551 		RTE_PTYPE_L4_TCP,
2552 		RTE_PTYPE_L4_UDP,
2553 		RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
2554 		RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
2555 		RTE_PTYPE_INNER_L4_ICMP,
2556 		RTE_PTYPE_INNER_L4_TCP,
2557 		RTE_PTYPE_INNER_L4_UDP,
2558 		RTE_PTYPE_UNKNOWN
2559 	};
2560 
2561 	if (dev->rx_pkt_burst == bnxt_recv_pkts)
2562 		return ptypes;
2563 	return NULL;
2564 }
2565 
2566 static int bnxt_map_regs(struct bnxt *bp, uint32_t *reg_arr, int count,
2567 			 int reg_win)
2568 {
2569 	uint32_t reg_base = *reg_arr & 0xfffff000;
2570 	uint32_t win_off;
2571 	int i;
2572 
2573 	for (i = 0; i < count; i++) {
2574 		if ((reg_arr[i] & 0xfffff000) != reg_base)
2575 			return -ERANGE;
2576 	}
2577 	win_off = BNXT_GRCPF_REG_WINDOW_BASE_OUT + (reg_win - 1) * 4;
2578 	rte_cpu_to_le_32(rte_write32(reg_base, (uint8_t *)bp->bar0 + win_off));
2579 	return 0;
2580 }
2581 
2582 static int bnxt_map_ptp_regs(struct bnxt *bp)
2583 {
2584 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2585 	uint32_t *reg_arr;
2586 	int rc, i;
2587 
2588 	reg_arr = ptp->rx_regs;
2589 	rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_RX_REGS, 5);
2590 	if (rc)
2591 		return rc;
2592 
2593 	reg_arr = ptp->tx_regs;
2594 	rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_TX_REGS, 6);
2595 	if (rc)
2596 		return rc;
2597 
2598 	for (i = 0; i < BNXT_PTP_RX_REGS; i++)
2599 		ptp->rx_mapped_regs[i] = 0x5000 + (ptp->rx_regs[i] & 0xfff);
2600 
2601 	for (i = 0; i < BNXT_PTP_TX_REGS; i++)
2602 		ptp->tx_mapped_regs[i] = 0x6000 + (ptp->tx_regs[i] & 0xfff);
2603 
2604 	return 0;
2605 }
2606 
2607 static void bnxt_unmap_ptp_regs(struct bnxt *bp)
2608 {
2609 	rte_cpu_to_le_32(rte_write32(0, (uint8_t *)bp->bar0 +
2610 			 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 16));
2611 	rte_cpu_to_le_32(rte_write32(0, (uint8_t *)bp->bar0 +
2612 			 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 20));
2613 }
2614 
2615 static uint64_t bnxt_cc_read(struct bnxt *bp)
2616 {
2617 	uint64_t ns;
2618 
2619 	ns = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
2620 			      BNXT_GRCPF_REG_SYNC_TIME));
2621 	ns |= (uint64_t)(rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
2622 					  BNXT_GRCPF_REG_SYNC_TIME + 4))) << 32;
2623 	return ns;
2624 }
2625 
2626 static int bnxt_get_tx_ts(struct bnxt *bp, uint64_t *ts)
2627 {
2628 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2629 	uint32_t fifo;
2630 
2631 	fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
2632 				ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO]));
2633 	if (fifo & BNXT_PTP_TX_FIFO_EMPTY)
2634 		return -EAGAIN;
2635 
2636 	fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
2637 				ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO]));
2638 	*ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
2639 				ptp->tx_mapped_regs[BNXT_PTP_TX_TS_L]));
2640 	*ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
2641 				ptp->tx_mapped_regs[BNXT_PTP_TX_TS_H])) << 32;
2642 
2643 	return 0;
2644 }
2645 
2646 static int bnxt_get_rx_ts(struct bnxt *bp, uint64_t *ts)
2647 {
2648 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2649 	struct bnxt_pf_info *pf = &bp->pf;
2650 	uint16_t port_id;
2651 	uint32_t fifo;
2652 
2653 	if (!ptp)
2654 		return -ENODEV;
2655 
2656 	fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
2657 				ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO]));
2658 	if (!(fifo & BNXT_PTP_RX_FIFO_PENDING))
2659 		return -EAGAIN;
2660 
2661 	port_id = pf->port_id;
2662 	rte_cpu_to_le_32(rte_write32(1 << port_id, (uint8_t *)bp->bar0 +
2663 	       ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]));
2664 
2665 	fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
2666 				   ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO]));
2667 	if (fifo & BNXT_PTP_RX_FIFO_PENDING) {
2668 /*		bnxt_clr_rx_ts(bp);	  TBD  */
2669 		return -EBUSY;
2670 	}
2671 
2672 	*ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
2673 				ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L]));
2674 	*ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
2675 				ptp->rx_mapped_regs[BNXT_PTP_RX_TS_H])) << 32;
2676 
2677 	return 0;
2678 }
2679 
2680 static int
2681 bnxt_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
2682 {
2683 	uint64_t ns;
2684 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2685 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2686 
2687 	if (!ptp)
2688 		return 0;
2689 
2690 	ns = rte_timespec_to_ns(ts);
2691 	/* Set the timecounters to a new value. */
2692 	ptp->tc.nsec = ns;
2693 
2694 	return 0;
2695 }
2696 
2697 static int
2698 bnxt_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
2699 {
2700 	uint64_t ns, systime_cycles;
2701 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2702 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2703 
2704 	if (!ptp)
2705 		return 0;
2706 
2707 	systime_cycles = bnxt_cc_read(bp);
2708 	ns = rte_timecounter_update(&ptp->tc, systime_cycles);
2709 	*ts = rte_ns_to_timespec(ns);
2710 
2711 	return 0;
2712 }
2713 static int
2714 bnxt_timesync_enable(struct rte_eth_dev *dev)
2715 {
2716 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2717 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2718 	uint32_t shift = 0;
2719 
2720 	if (!ptp)
2721 		return 0;
2722 
2723 	ptp->rx_filter = 1;
2724 	ptp->tx_tstamp_en = 1;
2725 	ptp->rxctl = BNXT_PTP_MSG_EVENTS;
2726 
2727 	if (!bnxt_hwrm_ptp_cfg(bp))
2728 		bnxt_map_ptp_regs(bp);
2729 
2730 	memset(&ptp->tc, 0, sizeof(struct rte_timecounter));
2731 	memset(&ptp->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
2732 	memset(&ptp->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
2733 
2734 	ptp->tc.cc_mask = BNXT_CYCLECOUNTER_MASK;
2735 	ptp->tc.cc_shift = shift;
2736 	ptp->tc.nsec_mask = (1ULL << shift) - 1;
2737 
2738 	ptp->rx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK;
2739 	ptp->rx_tstamp_tc.cc_shift = shift;
2740 	ptp->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
2741 
2742 	ptp->tx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK;
2743 	ptp->tx_tstamp_tc.cc_shift = shift;
2744 	ptp->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
2745 
2746 	return 0;
2747 }
2748 
2749 static int
2750 bnxt_timesync_disable(struct rte_eth_dev *dev)
2751 {
2752 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2753 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2754 
2755 	if (!ptp)
2756 		return 0;
2757 
2758 	ptp->rx_filter = 0;
2759 	ptp->tx_tstamp_en = 0;
2760 	ptp->rxctl = 0;
2761 
2762 	bnxt_hwrm_ptp_cfg(bp);
2763 
2764 	bnxt_unmap_ptp_regs(bp);
2765 
2766 	return 0;
2767 }
2768 
2769 static int
2770 bnxt_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
2771 				 struct timespec *timestamp,
2772 				 uint32_t flags __rte_unused)
2773 {
2774 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2775 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2776 	uint64_t rx_tstamp_cycles = 0;
2777 	uint64_t ns;
2778 
2779 	if (!ptp)
2780 		return 0;
2781 
2782 	bnxt_get_rx_ts(bp, &rx_tstamp_cycles);
2783 	ns = rte_timecounter_update(&ptp->rx_tstamp_tc, rx_tstamp_cycles);
2784 	*timestamp = rte_ns_to_timespec(ns);
2785 	return  0;
2786 }
2787 
2788 static int
2789 bnxt_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
2790 				 struct timespec *timestamp)
2791 {
2792 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2793 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2794 	uint64_t tx_tstamp_cycles = 0;
2795 	uint64_t ns;
2796 
2797 	if (!ptp)
2798 		return 0;
2799 
2800 	bnxt_get_tx_ts(bp, &tx_tstamp_cycles);
2801 	ns = rte_timecounter_update(&ptp->tx_tstamp_tc, tx_tstamp_cycles);
2802 	*timestamp = rte_ns_to_timespec(ns);
2803 
2804 	return 0;
2805 }
2806 
2807 static int
2808 bnxt_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
2809 {
2810 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2811 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2812 
2813 	if (!ptp)
2814 		return 0;
2815 
2816 	ptp->tc.nsec += delta;
2817 
2818 	return 0;
2819 }
2820 
2821 static int
2822 bnxt_get_eeprom_length_op(struct rte_eth_dev *dev)
2823 {
2824 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2825 	int rc;
2826 	uint32_t dir_entries;
2827 	uint32_t entry_length;
2828 
2829 	PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x\n",
2830 		bp->pdev->addr.domain, bp->pdev->addr.bus,
2831 		bp->pdev->addr.devid, bp->pdev->addr.function);
2832 
2833 	rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
2834 	if (rc != 0)
2835 		return rc;
2836 
2837 	return dir_entries * entry_length;
2838 }
2839 
2840 static int
2841 bnxt_get_eeprom_op(struct rte_eth_dev *dev,
2842 		struct rte_dev_eeprom_info *in_eeprom)
2843 {
2844 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2845 	uint32_t index;
2846 	uint32_t offset;
2847 
2848 	PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x in_eeprom->offset = %d "
2849 		"len = %d\n", bp->pdev->addr.domain,
2850 		bp->pdev->addr.bus, bp->pdev->addr.devid,
2851 		bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length);
2852 
2853 	if (in_eeprom->offset == 0) /* special offset value to get directory */
2854 		return bnxt_get_nvram_directory(bp, in_eeprom->length,
2855 						in_eeprom->data);
2856 
2857 	index = in_eeprom->offset >> 24;
2858 	offset = in_eeprom->offset & 0xffffff;
2859 
2860 	if (index != 0)
2861 		return bnxt_hwrm_get_nvram_item(bp, index - 1, offset,
2862 					   in_eeprom->length, in_eeprom->data);
2863 
2864 	return 0;
2865 }
2866 
2867 static bool bnxt_dir_type_is_ape_bin_format(uint16_t dir_type)
2868 {
2869 	switch (dir_type) {
2870 	case BNX_DIR_TYPE_CHIMP_PATCH:
2871 	case BNX_DIR_TYPE_BOOTCODE:
2872 	case BNX_DIR_TYPE_BOOTCODE_2:
2873 	case BNX_DIR_TYPE_APE_FW:
2874 	case BNX_DIR_TYPE_APE_PATCH:
2875 	case BNX_DIR_TYPE_KONG_FW:
2876 	case BNX_DIR_TYPE_KONG_PATCH:
2877 	case BNX_DIR_TYPE_BONO_FW:
2878 	case BNX_DIR_TYPE_BONO_PATCH:
2879 		return true;
2880 	}
2881 
2882 	return false;
2883 }
2884 
2885 static bool bnxt_dir_type_is_other_exec_format(uint16_t dir_type)
2886 {
2887 	switch (dir_type) {
2888 	case BNX_DIR_TYPE_AVS:
2889 	case BNX_DIR_TYPE_EXP_ROM_MBA:
2890 	case BNX_DIR_TYPE_PCIE:
2891 	case BNX_DIR_TYPE_TSCF_UCODE:
2892 	case BNX_DIR_TYPE_EXT_PHY:
2893 	case BNX_DIR_TYPE_CCM:
2894 	case BNX_DIR_TYPE_ISCSI_BOOT:
2895 	case BNX_DIR_TYPE_ISCSI_BOOT_IPV6:
2896 	case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6:
2897 		return true;
2898 	}
2899 
2900 	return false;
2901 }
2902 
2903 static bool bnxt_dir_type_is_executable(uint16_t dir_type)
2904 {
2905 	return bnxt_dir_type_is_ape_bin_format(dir_type) ||
2906 		bnxt_dir_type_is_other_exec_format(dir_type);
2907 }
2908 
2909 static int
2910 bnxt_set_eeprom_op(struct rte_eth_dev *dev,
2911 		struct rte_dev_eeprom_info *in_eeprom)
2912 {
2913 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2914 	uint8_t index, dir_op;
2915 	uint16_t type, ext, ordinal, attr;
2916 
2917 	PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x in_eeprom->offset = %d "
2918 		"len = %d\n", bp->pdev->addr.domain,
2919 		bp->pdev->addr.bus, bp->pdev->addr.devid,
2920 		bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length);
2921 
2922 	if (!BNXT_PF(bp)) {
2923 		PMD_DRV_LOG(ERR, "NVM write not supported from a VF\n");
2924 		return -EINVAL;
2925 	}
2926 
2927 	type = in_eeprom->magic >> 16;
2928 
2929 	if (type == 0xffff) { /* special value for directory operations */
2930 		index = in_eeprom->magic & 0xff;
2931 		dir_op = in_eeprom->magic >> 8;
2932 		if (index == 0)
2933 			return -EINVAL;
2934 		switch (dir_op) {
2935 		case 0x0e: /* erase */
2936 			if (in_eeprom->offset != ~in_eeprom->magic)
2937 				return -EINVAL;
2938 			return bnxt_hwrm_erase_nvram_directory(bp, index - 1);
2939 		default:
2940 			return -EINVAL;
2941 		}
2942 	}
2943 
2944 	/* Create or re-write an NVM item: */
2945 	if (bnxt_dir_type_is_executable(type) == true)
2946 		return -EOPNOTSUPP;
2947 	ext = in_eeprom->magic & 0xffff;
2948 	ordinal = in_eeprom->offset >> 16;
2949 	attr = in_eeprom->offset & 0xffff;
2950 
2951 	return bnxt_hwrm_flash_nvram(bp, type, ordinal, ext, attr,
2952 				     in_eeprom->data, in_eeprom->length);
2953 	return 0;
2954 }
2955 
2956 /*
2957  * Initialization
2958  */
2959 
2960 static const struct eth_dev_ops bnxt_dev_ops = {
2961 	.dev_infos_get = bnxt_dev_info_get_op,
2962 	.dev_close = bnxt_dev_close_op,
2963 	.dev_configure = bnxt_dev_configure_op,
2964 	.dev_start = bnxt_dev_start_op,
2965 	.dev_stop = bnxt_dev_stop_op,
2966 	.dev_set_link_up = bnxt_dev_set_link_up_op,
2967 	.dev_set_link_down = bnxt_dev_set_link_down_op,
2968 	.stats_get = bnxt_stats_get_op,
2969 	.stats_reset = bnxt_stats_reset_op,
2970 	.rx_queue_setup = bnxt_rx_queue_setup_op,
2971 	.rx_queue_release = bnxt_rx_queue_release_op,
2972 	.tx_queue_setup = bnxt_tx_queue_setup_op,
2973 	.tx_queue_release = bnxt_tx_queue_release_op,
2974 	.rx_queue_intr_enable = bnxt_rx_queue_intr_enable_op,
2975 	.rx_queue_intr_disable = bnxt_rx_queue_intr_disable_op,
2976 	.reta_update = bnxt_reta_update_op,
2977 	.reta_query = bnxt_reta_query_op,
2978 	.rss_hash_update = bnxt_rss_hash_update_op,
2979 	.rss_hash_conf_get = bnxt_rss_hash_conf_get_op,
2980 	.link_update = bnxt_link_update_op,
2981 	.promiscuous_enable = bnxt_promiscuous_enable_op,
2982 	.promiscuous_disable = bnxt_promiscuous_disable_op,
2983 	.allmulticast_enable = bnxt_allmulticast_enable_op,
2984 	.allmulticast_disable = bnxt_allmulticast_disable_op,
2985 	.mac_addr_add = bnxt_mac_addr_add_op,
2986 	.mac_addr_remove = bnxt_mac_addr_remove_op,
2987 	.flow_ctrl_get = bnxt_flow_ctrl_get_op,
2988 	.flow_ctrl_set = bnxt_flow_ctrl_set_op,
2989 	.udp_tunnel_port_add  = bnxt_udp_tunnel_port_add_op,
2990 	.udp_tunnel_port_del  = bnxt_udp_tunnel_port_del_op,
2991 	.vlan_filter_set = bnxt_vlan_filter_set_op,
2992 	.vlan_offload_set = bnxt_vlan_offload_set_op,
2993 	.vlan_pvid_set = bnxt_vlan_pvid_set_op,
2994 	.mtu_set = bnxt_mtu_set_op,
2995 	.mac_addr_set = bnxt_set_default_mac_addr_op,
2996 	.xstats_get = bnxt_dev_xstats_get_op,
2997 	.xstats_get_names = bnxt_dev_xstats_get_names_op,
2998 	.xstats_reset = bnxt_dev_xstats_reset_op,
2999 	.fw_version_get = bnxt_fw_version_get,
3000 	.set_mc_addr_list = bnxt_dev_set_mc_addr_list_op,
3001 	.rxq_info_get = bnxt_rxq_info_get_op,
3002 	.txq_info_get = bnxt_txq_info_get_op,
3003 	.dev_led_on = bnxt_dev_led_on_op,
3004 	.dev_led_off = bnxt_dev_led_off_op,
3005 	.xstats_get_by_id = bnxt_dev_xstats_get_by_id_op,
3006 	.xstats_get_names_by_id = bnxt_dev_xstats_get_names_by_id_op,
3007 	.rx_queue_count = bnxt_rx_queue_count_op,
3008 	.rx_descriptor_status = bnxt_rx_descriptor_status_op,
3009 	.tx_descriptor_status = bnxt_tx_descriptor_status_op,
3010 	.rx_queue_start = bnxt_rx_queue_start,
3011 	.rx_queue_stop = bnxt_rx_queue_stop,
3012 	.tx_queue_start = bnxt_tx_queue_start,
3013 	.tx_queue_stop = bnxt_tx_queue_stop,
3014 	.filter_ctrl = bnxt_filter_ctrl_op,
3015 	.dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op,
3016 	.get_eeprom_length    = bnxt_get_eeprom_length_op,
3017 	.get_eeprom           = bnxt_get_eeprom_op,
3018 	.set_eeprom           = bnxt_set_eeprom_op,
3019 	.timesync_enable      = bnxt_timesync_enable,
3020 	.timesync_disable     = bnxt_timesync_disable,
3021 	.timesync_read_time   = bnxt_timesync_read_time,
3022 	.timesync_write_time   = bnxt_timesync_write_time,
3023 	.timesync_adjust_time = bnxt_timesync_adjust_time,
3024 	.timesync_read_rx_timestamp = bnxt_timesync_read_rx_timestamp,
3025 	.timesync_read_tx_timestamp = bnxt_timesync_read_tx_timestamp,
3026 };
3027 
3028 static bool bnxt_vf_pciid(uint16_t id)
3029 {
3030 	if (id == BROADCOM_DEV_ID_57304_VF ||
3031 	    id == BROADCOM_DEV_ID_57406_VF ||
3032 	    id == BROADCOM_DEV_ID_5731X_VF ||
3033 	    id == BROADCOM_DEV_ID_5741X_VF ||
3034 	    id == BROADCOM_DEV_ID_57414_VF ||
3035 	    id == BROADCOM_DEV_ID_STRATUS_NIC_VF)
3036 		return true;
3037 	return false;
3038 }
3039 
3040 static int bnxt_init_board(struct rte_eth_dev *eth_dev)
3041 {
3042 	struct bnxt *bp = eth_dev->data->dev_private;
3043 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
3044 	int rc;
3045 
3046 	/* enable device (incl. PCI PM wakeup), and bus-mastering */
3047 	if (!pci_dev->mem_resource[0].addr) {
3048 		PMD_DRV_LOG(ERR,
3049 			"Cannot find PCI device base address, aborting\n");
3050 		rc = -ENODEV;
3051 		goto init_err_disable;
3052 	}
3053 
3054 	bp->eth_dev = eth_dev;
3055 	bp->pdev = pci_dev;
3056 
3057 	bp->bar0 = (void *)pci_dev->mem_resource[0].addr;
3058 	if (!bp->bar0) {
3059 		PMD_DRV_LOG(ERR, "Cannot map device registers, aborting\n");
3060 		rc = -ENOMEM;
3061 		goto init_err_release;
3062 	}
3063 	return 0;
3064 
3065 init_err_release:
3066 	if (bp->bar0)
3067 		bp->bar0 = NULL;
3068 
3069 init_err_disable:
3070 
3071 	return rc;
3072 }
3073 
3074 static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev);
3075 
3076 #define ALLOW_FUNC(x)	\
3077 	{ \
3078 		typeof(x) arg = (x); \
3079 		bp->pf.vf_req_fwd[((arg) >> 5)] &= \
3080 		~rte_cpu_to_le_32(1 << ((arg) & 0x1f)); \
3081 	}
3082 static int
3083 bnxt_dev_init(struct rte_eth_dev *eth_dev)
3084 {
3085 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
3086 	char mz_name[RTE_MEMZONE_NAMESIZE];
3087 	const struct rte_memzone *mz = NULL;
3088 	static int version_printed;
3089 	uint32_t total_alloc_len;
3090 	rte_iova_t mz_phys_addr;
3091 	struct bnxt *bp;
3092 	int rc;
3093 
3094 	if (version_printed++ == 0)
3095 		PMD_DRV_LOG(INFO, "%s\n", bnxt_version);
3096 
3097 	rte_eth_copy_pci_info(eth_dev, pci_dev);
3098 
3099 	bp = eth_dev->data->dev_private;
3100 
3101 	rte_atomic64_init(&bp->rx_mbuf_alloc_fail);
3102 	bp->dev_stopped = 1;
3103 
3104 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3105 		goto skip_init;
3106 
3107 	if (bnxt_vf_pciid(pci_dev->id.device_id))
3108 		bp->flags |= BNXT_FLAG_VF;
3109 
3110 	rc = bnxt_init_board(eth_dev);
3111 	if (rc) {
3112 		PMD_DRV_LOG(ERR,
3113 			"Board initialization failed rc: %x\n", rc);
3114 		goto error;
3115 	}
3116 skip_init:
3117 	eth_dev->dev_ops = &bnxt_dev_ops;
3118 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3119 		return 0;
3120 	eth_dev->rx_pkt_burst = &bnxt_recv_pkts;
3121 	eth_dev->tx_pkt_burst = &bnxt_xmit_pkts;
3122 
3123 	if (BNXT_PF(bp) && pci_dev->id.device_id != BROADCOM_DEV_ID_NS2) {
3124 		snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
3125 			 "bnxt_%04x:%02x:%02x:%02x-%s", pci_dev->addr.domain,
3126 			 pci_dev->addr.bus, pci_dev->addr.devid,
3127 			 pci_dev->addr.function, "rx_port_stats");
3128 		mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
3129 		mz = rte_memzone_lookup(mz_name);
3130 		total_alloc_len = RTE_CACHE_LINE_ROUNDUP(
3131 				sizeof(struct rx_port_stats) + 512);
3132 		if (!mz) {
3133 			mz = rte_memzone_reserve(mz_name, total_alloc_len,
3134 						 SOCKET_ID_ANY,
3135 						 RTE_MEMZONE_2MB |
3136 						 RTE_MEMZONE_SIZE_HINT_ONLY);
3137 			if (mz == NULL)
3138 				return -ENOMEM;
3139 		}
3140 		memset(mz->addr, 0, mz->len);
3141 		mz_phys_addr = mz->iova;
3142 		if ((unsigned long)mz->addr == mz_phys_addr) {
3143 			PMD_DRV_LOG(WARNING,
3144 				"Memzone physical address same as virtual.\n");
3145 			PMD_DRV_LOG(WARNING,
3146 				"Using rte_mem_virt2iova()\n");
3147 			mz_phys_addr = rte_mem_virt2iova(mz->addr);
3148 			if (mz_phys_addr == 0) {
3149 				PMD_DRV_LOG(ERR,
3150 				"unable to map address to physical memory\n");
3151 				return -ENOMEM;
3152 			}
3153 		}
3154 
3155 		bp->rx_mem_zone = (const void *)mz;
3156 		bp->hw_rx_port_stats = mz->addr;
3157 		bp->hw_rx_port_stats_map = mz_phys_addr;
3158 
3159 		snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
3160 			 "bnxt_%04x:%02x:%02x:%02x-%s", pci_dev->addr.domain,
3161 			 pci_dev->addr.bus, pci_dev->addr.devid,
3162 			 pci_dev->addr.function, "tx_port_stats");
3163 		mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
3164 		mz = rte_memzone_lookup(mz_name);
3165 		total_alloc_len = RTE_CACHE_LINE_ROUNDUP(
3166 				sizeof(struct tx_port_stats) + 512);
3167 		if (!mz) {
3168 			mz = rte_memzone_reserve(mz_name, total_alloc_len,
3169 						 SOCKET_ID_ANY,
3170 						 RTE_MEMZONE_2MB |
3171 						 RTE_MEMZONE_SIZE_HINT_ONLY);
3172 			if (mz == NULL)
3173 				return -ENOMEM;
3174 		}
3175 		memset(mz->addr, 0, mz->len);
3176 		mz_phys_addr = mz->iova;
3177 		if ((unsigned long)mz->addr == mz_phys_addr) {
3178 			PMD_DRV_LOG(WARNING,
3179 				"Memzone physical address same as virtual.\n");
3180 			PMD_DRV_LOG(WARNING,
3181 				"Using rte_mem_virt2iova()\n");
3182 			mz_phys_addr = rte_mem_virt2iova(mz->addr);
3183 			if (mz_phys_addr == 0) {
3184 				PMD_DRV_LOG(ERR,
3185 				"unable to map address to physical memory\n");
3186 				return -ENOMEM;
3187 			}
3188 		}
3189 
3190 		bp->tx_mem_zone = (const void *)mz;
3191 		bp->hw_tx_port_stats = mz->addr;
3192 		bp->hw_tx_port_stats_map = mz_phys_addr;
3193 
3194 		bp->flags |= BNXT_FLAG_PORT_STATS;
3195 	}
3196 
3197 	rc = bnxt_alloc_hwrm_resources(bp);
3198 	if (rc) {
3199 		PMD_DRV_LOG(ERR,
3200 			"hwrm resource allocation failure rc: %x\n", rc);
3201 		goto error_free;
3202 	}
3203 	rc = bnxt_hwrm_ver_get(bp);
3204 	if (rc)
3205 		goto error_free;
3206 	rc = bnxt_hwrm_queue_qportcfg(bp);
3207 	if (rc) {
3208 		PMD_DRV_LOG(ERR, "hwrm queue qportcfg failed\n");
3209 		goto error_free;
3210 	}
3211 
3212 	rc = bnxt_hwrm_func_qcfg(bp);
3213 	if (rc) {
3214 		PMD_DRV_LOG(ERR, "hwrm func qcfg failed\n");
3215 		goto error_free;
3216 	}
3217 
3218 	/* Get the MAX capabilities for this function */
3219 	rc = bnxt_hwrm_func_qcaps(bp);
3220 	if (rc) {
3221 		PMD_DRV_LOG(ERR, "hwrm query capability failure rc: %x\n", rc);
3222 		goto error_free;
3223 	}
3224 	if (bp->max_tx_rings == 0) {
3225 		PMD_DRV_LOG(ERR, "No TX rings available!\n");
3226 		rc = -EBUSY;
3227 		goto error_free;
3228 	}
3229 	eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl",
3230 					ETHER_ADDR_LEN * bp->max_l2_ctx, 0);
3231 	if (eth_dev->data->mac_addrs == NULL) {
3232 		PMD_DRV_LOG(ERR,
3233 			"Failed to alloc %u bytes needed to store MAC addr tbl",
3234 			ETHER_ADDR_LEN * bp->max_l2_ctx);
3235 		rc = -ENOMEM;
3236 		goto error_free;
3237 	}
3238 
3239 	if (check_zero_bytes(bp->dflt_mac_addr, ETHER_ADDR_LEN)) {
3240 		PMD_DRV_LOG(ERR,
3241 			    "Invalid MAC addr %02X:%02X:%02X:%02X:%02X:%02X\n",
3242 			    bp->dflt_mac_addr[0], bp->dflt_mac_addr[1],
3243 			    bp->dflt_mac_addr[2], bp->dflt_mac_addr[3],
3244 			    bp->dflt_mac_addr[4], bp->dflt_mac_addr[5]);
3245 		rc = -EINVAL;
3246 		goto error_free;
3247 	}
3248 	/* Copy the permanent MAC from the qcap response address now. */
3249 	memcpy(bp->mac_addr, bp->dflt_mac_addr, sizeof(bp->mac_addr));
3250 	memcpy(&eth_dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN);
3251 
3252 	if (bp->max_ring_grps < bp->rx_cp_nr_rings) {
3253 		/* 1 ring is for default completion ring */
3254 		PMD_DRV_LOG(ERR, "Insufficient resource: Ring Group\n");
3255 		rc = -ENOSPC;
3256 		goto error_free;
3257 	}
3258 
3259 	bp->grp_info = rte_zmalloc("bnxt_grp_info",
3260 				sizeof(*bp->grp_info) * bp->max_ring_grps, 0);
3261 	if (!bp->grp_info) {
3262 		PMD_DRV_LOG(ERR,
3263 			"Failed to alloc %zu bytes to store group info table\n",
3264 			sizeof(*bp->grp_info) * bp->max_ring_grps);
3265 		rc = -ENOMEM;
3266 		goto error_free;
3267 	}
3268 
3269 	/* Forward all requests if firmware is new enough */
3270 	if (((bp->fw_ver >= ((20 << 24) | (6 << 16) | (100 << 8))) &&
3271 	    (bp->fw_ver < ((20 << 24) | (7 << 16)))) ||
3272 	    ((bp->fw_ver >= ((20 << 24) | (8 << 16))))) {
3273 		memset(bp->pf.vf_req_fwd, 0xff, sizeof(bp->pf.vf_req_fwd));
3274 	} else {
3275 		PMD_DRV_LOG(WARNING,
3276 			"Firmware too old for VF mailbox functionality\n");
3277 		memset(bp->pf.vf_req_fwd, 0, sizeof(bp->pf.vf_req_fwd));
3278 	}
3279 
3280 	/*
3281 	 * The following are used for driver cleanup.  If we disallow these,
3282 	 * VF drivers can't clean up cleanly.
3283 	 */
3284 	ALLOW_FUNC(HWRM_FUNC_DRV_UNRGTR);
3285 	ALLOW_FUNC(HWRM_VNIC_FREE);
3286 	ALLOW_FUNC(HWRM_RING_FREE);
3287 	ALLOW_FUNC(HWRM_RING_GRP_FREE);
3288 	ALLOW_FUNC(HWRM_VNIC_RSS_COS_LB_CTX_FREE);
3289 	ALLOW_FUNC(HWRM_CFA_L2_FILTER_FREE);
3290 	ALLOW_FUNC(HWRM_STAT_CTX_FREE);
3291 	ALLOW_FUNC(HWRM_PORT_PHY_QCFG);
3292 	ALLOW_FUNC(HWRM_VNIC_TPA_CFG);
3293 	rc = bnxt_hwrm_func_driver_register(bp);
3294 	if (rc) {
3295 		PMD_DRV_LOG(ERR,
3296 			"Failed to register driver");
3297 		rc = -EBUSY;
3298 		goto error_free;
3299 	}
3300 
3301 	PMD_DRV_LOG(INFO,
3302 		DRV_MODULE_NAME " found at mem %" PRIx64 ", node addr %pM\n",
3303 		pci_dev->mem_resource[0].phys_addr,
3304 		pci_dev->mem_resource[0].addr);
3305 
3306 	rc = bnxt_hwrm_func_reset(bp);
3307 	if (rc) {
3308 		PMD_DRV_LOG(ERR, "hwrm chip reset failure rc: %x\n", rc);
3309 		rc = -EIO;
3310 		goto error_free;
3311 	}
3312 
3313 	if (BNXT_PF(bp)) {
3314 		//if (bp->pf.active_vfs) {
3315 			// TODO: Deallocate VF resources?
3316 		//}
3317 		if (bp->pdev->max_vfs) {
3318 			rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs);
3319 			if (rc) {
3320 				PMD_DRV_LOG(ERR, "Failed to allocate VFs\n");
3321 				goto error_free;
3322 			}
3323 		} else {
3324 			rc = bnxt_hwrm_allocate_pf_only(bp);
3325 			if (rc) {
3326 				PMD_DRV_LOG(ERR,
3327 					"Failed to allocate PF resources\n");
3328 				goto error_free;
3329 			}
3330 		}
3331 	}
3332 
3333 	bnxt_hwrm_port_led_qcaps(bp);
3334 
3335 	rc = bnxt_setup_int(bp);
3336 	if (rc)
3337 		goto error_free;
3338 
3339 	rc = bnxt_alloc_mem(bp);
3340 	if (rc)
3341 		goto error_free_int;
3342 
3343 	rc = bnxt_request_int(bp);
3344 	if (rc)
3345 		goto error_free_int;
3346 
3347 	rc = bnxt_alloc_def_cp_ring(bp);
3348 	if (rc)
3349 		goto error_free_int;
3350 
3351 	bnxt_enable_int(bp);
3352 
3353 	return 0;
3354 
3355 error_free_int:
3356 	bnxt_disable_int(bp);
3357 	bnxt_free_def_cp_ring(bp);
3358 	bnxt_hwrm_func_buf_unrgtr(bp);
3359 	bnxt_free_int(bp);
3360 	bnxt_free_mem(bp);
3361 error_free:
3362 	bnxt_dev_uninit(eth_dev);
3363 error:
3364 	return rc;
3365 }
3366 
3367 static int
3368 bnxt_dev_uninit(struct rte_eth_dev *eth_dev) {
3369 	struct bnxt *bp = eth_dev->data->dev_private;
3370 	int rc;
3371 
3372 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3373 		return -EPERM;
3374 
3375 	bnxt_disable_int(bp);
3376 	bnxt_free_int(bp);
3377 	bnxt_free_mem(bp);
3378 	if (eth_dev->data->mac_addrs != NULL) {
3379 		rte_free(eth_dev->data->mac_addrs);
3380 		eth_dev->data->mac_addrs = NULL;
3381 	}
3382 	if (bp->grp_info != NULL) {
3383 		rte_free(bp->grp_info);
3384 		bp->grp_info = NULL;
3385 	}
3386 	rc = bnxt_hwrm_func_driver_unregister(bp, 0);
3387 	bnxt_free_hwrm_resources(bp);
3388 	rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone);
3389 	rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone);
3390 	if (bp->dev_stopped == 0)
3391 		bnxt_dev_close_op(eth_dev);
3392 	if (bp->pf.vf_info)
3393 		rte_free(bp->pf.vf_info);
3394 	eth_dev->dev_ops = NULL;
3395 	eth_dev->rx_pkt_burst = NULL;
3396 	eth_dev->tx_pkt_burst = NULL;
3397 
3398 	return rc;
3399 }
3400 
3401 static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3402 	struct rte_pci_device *pci_dev)
3403 {
3404 	return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct bnxt),
3405 		bnxt_dev_init);
3406 }
3407 
3408 static int bnxt_pci_remove(struct rte_pci_device *pci_dev)
3409 {
3410 	return rte_eth_dev_pci_generic_remove(pci_dev, bnxt_dev_uninit);
3411 }
3412 
3413 static struct rte_pci_driver bnxt_rte_pmd = {
3414 	.id_table = bnxt_pci_id_map,
3415 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING |
3416 		RTE_PCI_DRV_INTR_LSC,
3417 	.probe = bnxt_pci_probe,
3418 	.remove = bnxt_pci_remove,
3419 };
3420 
3421 static bool
3422 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
3423 {
3424 	if (strcmp(dev->device->driver->name, drv->driver.name))
3425 		return false;
3426 
3427 	return true;
3428 }
3429 
3430 bool is_bnxt_supported(struct rte_eth_dev *dev)
3431 {
3432 	return is_device_supported(dev, &bnxt_rte_pmd);
3433 }
3434 
3435 RTE_INIT(bnxt_init_log);
3436 static void
3437 bnxt_init_log(void)
3438 {
3439 	bnxt_logtype_driver = rte_log_register("pmd.bnxt.driver");
3440 	if (bnxt_logtype_driver >= 0)
3441 		rte_log_set_level(bnxt_logtype_driver, RTE_LOG_NOTICE);
3442 }
3443 
3444 RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd);
3445 RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map);
3446 RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci");
3447