xref: /dpdk/drivers/net/bnxt/bnxt_hwrm.c (revision 7a535f301db655582fe44c26b908a40c9dc4983f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2023 Broadcom
3  * All rights reserved.
4  */
5 
6 #include <unistd.h>
7 
8 #include <rte_byteorder.h>
9 #include <rte_common.h>
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_memzone.h>
13 #include <rte_version.h>
14 #include <rte_io.h>
15 
16 #include "bnxt.h"
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
19 #include "bnxt_rxq.h"
20 #include "bnxt_rxr.h"
21 #include "bnxt_ring.h"
22 #include "bnxt_txq.h"
23 #include "bnxt_txr.h"
24 #include "bnxt_vnic.h"
25 #include "hsi_struct_def_dpdk.h"
26 #include "bnxt_ulp_utils.h"
27 
28 struct bnxt_plcmodes_cfg {
29 	uint32_t	flags;
30 	uint16_t	jumbo_thresh;
31 	uint16_t	hds_offset;
32 	uint16_t	hds_threshold;
33 };
34 
35 const char *media_type[] = { "Unknown", "Twisted Pair",
36 	"Direct Attached Copper", "Fiber"
37 };
38 
39 #define MAX_MEDIA_TYPE (sizeof(media_type) / sizeof(const char *))
40 
41 const char *link_status_str[] = { "Down. No link or cable detected.",
42 	"Down. No link, but a cable has been detected.", "Up.",
43 };
44 
45 #define MAX_LINK_STR (sizeof(link_status_str) / sizeof(const char *))
46 
47 const char *fec_mode[] = {
48 	"No active FEC",
49 	"FEC CLAUSE 74 (Fire Code).",
50 	"FEC CLAUSE 91 RS(528,514).",
51 	"FEC RS544_1XN",
52 	"FEC RS(544,528)",
53 	"FEC RS272_1XN",
54 	"FEC RS(272,257)"
55 };
56 
57 #define MAX_FEC_MODE (sizeof(fec_mode) / sizeof(const char *))
58 
59 const char *signal_mode[] = {
60 	"NRZ", "PAM4", "PAM4_112"
61 };
62 
63 #define MAX_SIG_MODE (sizeof(signal_mode) / sizeof(const char *))
64 
65 /* multi-purpose multi-key table container.
66  * Add a unique entry for a new PHY attribs as per HW CAS.
67  * Query it using a helper functions.
68  */
69 struct link_speeds2_tbl {
70 	uint16_t force_val;
71 	uint16_t auto_val;
72 	uint32_t rte_speed;
73 	uint32_t rte_speed_num;
74 	uint16_t hwrm_speed;
75 	uint16_t sig_mode;
76 	uint16_t lanes;
77 	const char *desc;
78 } link_speeds2_tbl[] = {
79 	{
80 		10,
81 		0,
82 		RTE_ETH_LINK_SPEED_1G,
83 		RTE_ETH_SPEED_NUM_1G,
84 		HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_1GB,
85 		BNXT_SIG_MODE_NRZ,
86 		1,
87 		"1Gb NRZ",
88 	}, {
89 		100,
90 		1,
91 		RTE_ETH_LINK_SPEED_10G,
92 		RTE_ETH_SPEED_NUM_10G,
93 		HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_10GB,
94 		BNXT_SIG_MODE_NRZ,
95 		1,
96 		"10Gb NRZ",
97 	}, {
98 		250,
99 		2,
100 		RTE_ETH_LINK_SPEED_25G,
101 		RTE_ETH_SPEED_NUM_25G,
102 		HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_25GB,
103 		BNXT_SIG_MODE_NRZ,
104 		1,
105 		"25Gb NRZ",
106 	}, {
107 		400,
108 		3,
109 		RTE_ETH_LINK_SPEED_40G,
110 		RTE_ETH_SPEED_NUM_40G,
111 		HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_40GB,
112 		BNXT_SIG_MODE_NRZ,
113 		4,
114 		"40Gb NRZ",
115 	}, {
116 		500,
117 		4,
118 		RTE_ETH_LINK_SPEED_50G,
119 		RTE_ETH_SPEED_NUM_50G,
120 		HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_50GB,
121 		BNXT_SIG_MODE_NRZ,
122 		2,
123 		"50Gb NRZ",
124 	}, {
125 		1000,
126 		5,
127 		RTE_ETH_LINK_SPEED_100G,
128 		RTE_ETH_SPEED_NUM_100G,
129 		HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_100GB,
130 		BNXT_SIG_MODE_NRZ,
131 		4,
132 		"100Gb NRZ",
133 	}, {
134 		501,
135 		6,
136 		RTE_ETH_LINK_SPEED_50G,
137 		RTE_ETH_SPEED_NUM_50G,
138 		HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_50GB_PAM4_56,
139 		BNXT_SIG_MODE_PAM4,
140 		1,
141 		"50Gb (PAM4-56: 50G per lane)",
142 	}, {
143 		1001,
144 		7,
145 		RTE_ETH_LINK_SPEED_100G,
146 		RTE_ETH_SPEED_NUM_100G,
147 		HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_100GB_PAM4_56,
148 		BNXT_SIG_MODE_PAM4,
149 		2,
150 		"100Gb (PAM4-56: 50G per lane)",
151 	}, {
152 		2001,
153 		8,
154 		RTE_ETH_LINK_SPEED_200G,
155 		RTE_ETH_SPEED_NUM_200G,
156 		HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_200GB_PAM4_56,
157 		BNXT_SIG_MODE_PAM4,
158 		4,
159 		"200Gb (PAM4-56: 50G per lane)",
160 	}, {
161 		4001,
162 		9,
163 		RTE_ETH_LINK_SPEED_400G,
164 		RTE_ETH_SPEED_NUM_400G,
165 		HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_400GB_PAM4_56,
166 		BNXT_SIG_MODE_PAM4,
167 		8,
168 		"400Gb (PAM4-56: 50G per lane)",
169 	}, {
170 		1002,
171 		10,
172 		RTE_ETH_LINK_SPEED_100G,
173 		RTE_ETH_SPEED_NUM_100G,
174 		HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_100GB_PAM4_112,
175 		BNXT_SIG_MODE_PAM4_112,
176 		1,
177 		"100Gb (PAM4-112: 100G per lane)",
178 	}, {
179 		2002,
180 		11,
181 		RTE_ETH_LINK_SPEED_200G,
182 		RTE_ETH_SPEED_NUM_200G,
183 		HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_200GB_PAM4_112,
184 		BNXT_SIG_MODE_PAM4_112,
185 		2,
186 		"200Gb (PAM4-112: 100G per lane)",
187 	}, {
188 		4002,
189 		12,
190 		RTE_ETH_LINK_SPEED_400G,
191 		RTE_ETH_SPEED_NUM_400G,
192 		HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_400GB_PAM4_112,
193 		BNXT_SIG_MODE_PAM4_112,
194 		4,
195 		"400Gb (PAM4-112: 100G per lane)",
196 	}, {
197 		0,
198 		13,
199 		RTE_ETH_LINK_SPEED_AUTONEG, /* None matches, AN is default 0 */
200 		RTE_ETH_SPEED_NUM_NONE,	/* None matches, No speed */
201 		HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_1GB, /* Placeholder for wrong HWRM */
202 		BNXT_SIG_MODE_NRZ, /* default sig */
203 		0,
204 		"Unknown",
205 	},
206 };
207 
208 #define BNXT_SPEEDS2_TBL_SZ (sizeof(link_speeds2_tbl) / sizeof(*link_speeds2_tbl))
209 
210 /* In hwrm_phy_qcfg reports trained up speeds in link_speed(offset:0x8[31:16]) */
211 struct link_speeds_tbl {
212 	uint16_t hwrm_speed;
213 	uint32_t rte_speed_num;
214 	const char *desc;
215 } link_speeds_tbl[] = {
216 	{
217 		HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB,
218 		RTE_ETH_SPEED_NUM_100M, "100 MB",
219 	}, {
220 		HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB,
221 		RTE_ETH_SPEED_NUM_1G, "1 GB",
222 	}, {
223 		HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB,
224 		RTE_ETH_SPEED_NUM_2_5G, "25 GB",
225 	}, {
226 		HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB,
227 		RTE_ETH_SPEED_NUM_10G, "10 GB",
228 	}, {
229 		HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB,
230 		RTE_ETH_SPEED_NUM_20G, "20 GB",
231 	}, {
232 		HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB,
233 		RTE_ETH_SPEED_NUM_40G, "40 GB",
234 	}, {
235 		HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB,
236 		RTE_ETH_SPEED_NUM_50G, "50 GB",
237 	}, {
238 		HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB,
239 		RTE_ETH_SPEED_NUM_100G, "100 GB",
240 	}, {
241 		HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_200GB,
242 		RTE_ETH_SPEED_NUM_200G, "200 GB",
243 	}, {
244 		HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_400GB,
245 		RTE_ETH_SPEED_NUM_400G, "400 GB",
246 	}, {
247 		0, RTE_ETH_SPEED_NUM_NONE, "None",
248 	},
249 };
250 
251 #define BNXT_SPEEDS_TBL_SZ (sizeof(link_speeds_tbl) / sizeof(*link_speeds_tbl))
252 
253 static const char *bnxt_get_xcvr_type(uint32_t xcvr_identifier_type_tx_lpi_timer)
254 {
255 	uint32_t xcvr_type = HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_MASK &
256 		xcvr_identifier_type_tx_lpi_timer;
257 
258 	/* Addressing only known CMIS types */
259 	switch (xcvr_type) {
260 	case HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_SFP:
261 		return "SFP";
262 	case HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFP:
263 		return "QSFP";
264 	case HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFPPLUS:
265 		return "QSFP+";
266 	case HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFP28:
267 		return "QSFP28";
268 	case HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFPDD:
269 		return "QSFP112";
270 	case HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFP112:
271 		return "QSFP-DD";
272 	case HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_UNKNOWN:
273 		return "Unknown";
274 	default:
275 		/* All other/new CMIS variants belong here */
276 		return "QSFP-xx new CMIS variant";
277 	}
278 }
279 
280 /* Utility function to lookup speeds2 table and
281  * return a rte to hwrm speed matching row to the client
282  */
283 static struct link_speeds2_tbl *bnxt_get_rte_hwrm_speeds2_entry(struct bnxt *bp)
284 {
285 	int i, max;
286 	uint32_t speed, lanes;
287 	bool check_lanes;
288 	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
289 
290 	speed = dev_conf->link_speeds;
291 	lanes = bp->link_info->pmd_speed_lanes;
292 
293 	max = BNXT_SPEEDS2_TBL_SZ - 1;
294 	speed &= ~RTE_ETH_LINK_SPEED_FIXED;
295 	check_lanes = !(lanes == 0);
296 
297 	for (i = 0; i < max; i++) {
298 		if (speed == link_speeds2_tbl[i].rte_speed &&
299 		    (lanes == link_speeds2_tbl[i].lanes || !check_lanes))
300 			break;
301 	}
302 
303 	if (!check_lanes)
304 		PMD_DRV_LOG_LINE(INFO, "Given lanes %d, Configuring default lanes %d %s",
305 			    lanes, link_speeds2_tbl[i].lanes, link_speeds2_tbl[i].desc);
306 	return (struct link_speeds2_tbl *)&link_speeds2_tbl[i];
307 }
308 
309 /* Utility function to lookup speeds2 table and
310  * return a hwrm to rte speed matching row to the client
311  */
312 static struct link_speeds2_tbl *bnxt_get_hwrm_to_rte_speeds2_entry(uint16_t speed)
313 {
314 	int i, max;
315 
316 	max = BNXT_SPEEDS2_TBL_SZ - 1;
317 	for (i = 0; i < max; i++) {
318 		if (speed == link_speeds2_tbl[i].hwrm_speed)
319 			break;
320 	}
321 	return (struct link_speeds2_tbl *)&link_speeds2_tbl[i];
322 }
323 
324 /* Helper function to lookup auto link_speed table */
325 static struct link_speeds_tbl *bnxt_get_hwrm_to_rte_speeds_entry(uint16_t speed)
326 {
327 	int i, max;
328 
329 	max = BNXT_SPEEDS_TBL_SZ - 1;
330 
331 	for (i = 0; i < max ; i++) {
332 		if (speed == link_speeds_tbl[i].hwrm_speed)
333 			break;
334 	}
335 	return (struct link_speeds_tbl *)&link_speeds_tbl[i];
336 }
337 
338 static int page_getenum(size_t size)
339 {
340 	if (size <= 1 << 4)
341 		return 4;
342 	if (size <= 1 << 12)
343 		return 12;
344 	if (size <= 1 << 13)
345 		return 13;
346 	if (size <= 1 << 16)
347 		return 16;
348 	if (size <= 1 << 21)
349 		return 21;
350 	if (size <= 1 << 22)
351 		return 22;
352 	if (size <= 1 << 30)
353 		return 30;
354 	PMD_DRV_LOG_LINE(ERR, "Page size %zu out of range", size);
355 	return sizeof(int) * 8 - 1;
356 }
357 
358 static int page_roundup(size_t size)
359 {
360 	return 1 << page_getenum(size);
361 }
362 
363 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem,
364 				  uint8_t *pg_attr,
365 				  uint64_t *pg_dir)
366 {
367 	if (rmem->nr_pages == 0)
368 		return;
369 
370 	if (rmem->nr_pages > 1) {
371 		*pg_attr = 1;
372 		*pg_dir = rte_cpu_to_le_64(rmem->pg_tbl_map);
373 	} else {
374 		*pg_dir = rte_cpu_to_le_64(rmem->dma_arr[0]);
375 	}
376 }
377 
378 static struct bnxt_cp_ring_info*
379 bnxt_get_ring_info_by_id(struct bnxt *bp, uint16_t rid, uint16_t type)
380 {
381 	struct bnxt_cp_ring_info *cp_ring = NULL;
382 	uint16_t i;
383 
384 	switch (type) {
385 	case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
386 	case HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG:
387 		/* FALLTHROUGH */
388 		for (i = 0; i < bp->rx_cp_nr_rings; i++) {
389 			struct bnxt_rx_queue *rxq = bp->rx_queues[i];
390 
391 			if (rxq->cp_ring->cp_ring_struct->fw_ring_id ==
392 			    rte_cpu_to_le_16(rid)) {
393 				return rxq->cp_ring;
394 			}
395 		}
396 		break;
397 	case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
398 		for (i = 0; i < bp->tx_cp_nr_rings; i++) {
399 			struct bnxt_tx_queue *txq = bp->tx_queues[i];
400 
401 			if (txq->cp_ring->cp_ring_struct->fw_ring_id ==
402 			    rte_cpu_to_le_16(rid)) {
403 				return txq->cp_ring;
404 			}
405 		}
406 
407 		/* MPC ring is of type TX. MPC is not allocated on Thor, Wh+. */
408 		if (bp->mpc == NULL)
409 			goto skip_mpc;
410 
411 		for (i = 0; i < BNXT_MPC_CHNL_MAX; i++) {
412 			struct bnxt_mpc_txq *mpc_queue;
413 
414 			if (!(bp->mpc->mpc_chnls_en & (1 << i)))
415 				continue;
416 			mpc_queue = bp->mpc->mpc_txq[i];
417 			if (!mpc_queue)
418 				continue;
419 
420 			if (mpc_queue->cp_ring->cp_ring_struct->fw_ring_id ==
421 			    rte_cpu_to_le_16(rid))
422 				return mpc_queue->cp_ring;
423 		}
424 skip_mpc:
425 		break;
426 	default:
427 		return cp_ring;
428 	}
429 	return cp_ring;
430 }
431 
432 /* Complete a sweep of the CQ ring for the corresponding Tx/Rx/AGG ring.
433  * If the CMPL_BASE_TYPE_HWRM_DONE is not encountered by the last pass,
434  * before timeout, we force the done bit for the cleanup to proceed.
435  * Also if cpr is null, do nothing.. The HWRM command is  not for a
436  * Tx/Rx/AGG ring cleanup.
437  */
438 static int
439 bnxt_check_cq_hwrm_done(struct bnxt_cp_ring_info *cpr,
440 			bool tx, bool rx, bool timeout)
441 {
442 	int done = 0;
443 
444 	if (cpr != NULL) {
445 		if (tx)
446 			done = bnxt_flush_tx_cmp(cpr);
447 
448 		if (rx)
449 			done = bnxt_flush_rx_cmp(cpr);
450 
451 		if (done)
452 			PMD_DRV_LOG_LINE(DEBUG, "HWRM DONE for %s ring",
453 				    rx ? "Rx" : "Tx");
454 
455 		/* We are about to timeout and still haven't seen the
456 		 * HWRM done for the Ring free. Force the cleanup.
457 		 */
458 		if (!done && timeout) {
459 			done = 1;
460 			PMD_DRV_LOG_LINE(ERR, "Timing out for %s ring",
461 					 rx ? "Rx" : "Tx");
462 		}
463 	} else {
464 		/* This HWRM command is not for a Tx/Rx/AGG ring cleanup.
465 		 * Otherwise the cpr would have been valid. So do nothing.
466 		 */
467 		done = 1;
468 	}
469 
470 	return done;
471 }
472 
473 /*
474  * HWRM Functions (sent to HWRM)
475  * These are named bnxt_hwrm_*() and return 0 on success or -110 if the
476  * HWRM command times out, or a negative error code if the HWRM
477  * command was failed by the FW.
478  */
479 
480 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
481 				  uint32_t msg_len, bool use_kong_mb)
482 {
483 	unsigned int i;
484 	struct input *req = msg;
485 	struct output *resp = bp->hwrm_cmd_resp_addr;
486 	uint32_t *data = msg;
487 	uint8_t *bar;
488 	uint8_t *valid;
489 	uint16_t max_req_len = bp->max_req_len;
490 	struct hwrm_short_input short_input = { 0 };
491 	uint16_t bar_offset = use_kong_mb ?
492 		GRCPF_REG_KONG_CHANNEL_OFFSET : GRCPF_REG_CHIMP_CHANNEL_OFFSET;
493 	uint16_t mb_trigger_offset = use_kong_mb ?
494 		GRCPF_REG_KONG_COMM_TRIGGER : GRCPF_REG_CHIMP_COMM_TRIGGER;
495 	struct bnxt_cp_ring_info *cpr = NULL;
496 	bool is_rx = false;
497 	bool is_tx = false;
498 	uint32_t timeout;
499 
500 	/* Do not send HWRM commands to firmware in error state */
501 	if (bp->flags & BNXT_FLAG_FATAL_ERROR)
502 		return 0;
503 
504 	/* If previous HWRM command timed out, do not send new HWRM command */
505 	if (bp->flags & BNXT_FLAG_FW_TIMEDOUT)
506 		return 0;
507 
508 	timeout = bp->hwrm_cmd_timeout;
509 
510 	/* Update the message length for backing store config for new FW. */
511 	if (bp->fw_ver >= HWRM_VERSION_1_10_2_13 &&
512 	    rte_cpu_to_le_16(req->req_type) == HWRM_FUNC_BACKING_STORE_CFG)
513 		msg_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
514 
515 	if (bp->flags & BNXT_FLAG_SHORT_CMD ||
516 	    msg_len > bp->max_req_len) {
517 		void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
518 
519 		memset(short_cmd_req, 0, bp->hwrm_max_ext_req_len);
520 		memcpy(short_cmd_req, req, msg_len);
521 
522 		short_input.req_type = rte_cpu_to_le_16(req->req_type);
523 		short_input.signature = rte_cpu_to_le_16(
524 					HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD);
525 		short_input.size = rte_cpu_to_le_16(msg_len);
526 		short_input.req_addr =
527 			rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
528 
529 		data = (uint32_t *)&short_input;
530 		msg_len = sizeof(short_input);
531 
532 		max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
533 	}
534 
535 	/* Write request msg to hwrm channel */
536 	for (i = 0; i < msg_len; i += 4) {
537 		bar = (uint8_t *)bp->bar0 + bar_offset + i;
538 		rte_write32(*data, bar);
539 		data++;
540 	}
541 
542 	/* Zero the rest of the request space */
543 	for (; i < max_req_len; i += 4) {
544 		bar = (uint8_t *)bp->bar0 + bar_offset + i;
545 		rte_write32(0, bar);
546 	}
547 
548 	/* Ring channel doorbell */
549 	bar = (uint8_t *)bp->bar0 + mb_trigger_offset;
550 	rte_write32(1, bar);
551 	/*
552 	 * Make sure the channel doorbell ring command complete before
553 	 * reading the response to avoid getting stale or invalid
554 	 * responses.
555 	 */
556 	rte_io_mb();
557 
558 	/* Check ring flush is done.
559 	 * This is valid only for Tx and Rx rings (including AGG rings).
560 	 * The Tx and Rx rings should be freed once the HW confirms all
561 	 * the internal buffers and BDs associated with the rings are
562 	 * consumed and the corresponding DMA is handled.
563 	 */
564 	if (rte_cpu_to_le_16(req->cmpl_ring) != INVALID_HW_RING_ID) {
565 		/* Check if the TxCQ matches. If that fails check if RxCQ
566 		 * matches. And if neither match, is_rx = false, is_tx = false.
567 		 */
568 		cpr = bnxt_get_ring_info_by_id(bp, req->cmpl_ring,
569 					       HWRM_RING_FREE_INPUT_RING_TYPE_TX);
570 		if (cpr == NULL) {
571 			/* Not a TxCQ. Check if the RxCQ matches. */
572 			cpr =
573 			bnxt_get_ring_info_by_id(bp, req->cmpl_ring,
574 						 HWRM_RING_FREE_INPUT_RING_TYPE_RX);
575 			if (cpr != NULL)
576 				is_rx = true;
577 		} else {
578 			is_tx = true;
579 		}
580 	}
581 
582 	/* Poll for the valid bit */
583 	for (i = 0; i < timeout; i++) {
584 		int done;
585 
586 		done = bnxt_check_cq_hwrm_done(cpr, is_tx, is_rx,
587 					       i == timeout - 1);
588 		/* Sanity check on the resp->resp_len */
589 		rte_io_rmb();
590 		if (resp->resp_len && resp->resp_len <= bp->max_resp_len) {
591 			/* Last byte of resp contains the valid key */
592 			valid = (uint8_t *)resp + resp->resp_len - 1;
593 			if (*valid == HWRM_RESP_VALID_KEY && done)
594 				break;
595 		}
596 		rte_delay_us(1);
597 	}
598 
599 	if (i >= timeout) {
600 		/* Suppress VER_GET timeout messages during reset recovery */
601 		if (bp->flags & BNXT_FLAG_FW_RESET &&
602 		    rte_cpu_to_le_16(req->req_type) == HWRM_VER_GET)
603 			return -ETIMEDOUT;
604 
605 		PMD_DRV_LOG_LINE(ERR,
606 			    "Error(timeout) sending msg 0x%04x, seq_id %d",
607 			    req->req_type, req->seq_id);
608 		bp->flags |= BNXT_FLAG_FW_TIMEDOUT;
609 		return -ETIMEDOUT;
610 	}
611 	return 0;
612 }
613 
614 /*
615  * HWRM_PREP() should be used to prepare *ALL* HWRM commands. It grabs the
616  * spinlock, and does initial processing.
617  *
618  * HWRM_CHECK_RESULT() returns errors on failure and may not be used.  It
619  * releases the spinlock only if it returns. If the regular int return codes
620  * are not used by the function, HWRM_CHECK_RESULT() should not be used
621  * directly, rather it should be copied and modified to suit the function.
622  *
623  * HWRM_UNLOCK() must be called after all response processing is completed.
624  */
625 #define HWRM_PREP(req, type, kong) do {	\
626 	rte_spinlock_lock(&bp->hwrm_lock); \
627 	if (bp->hwrm_cmd_resp_addr == NULL) { \
628 		rte_spinlock_unlock(&bp->hwrm_lock); \
629 		return -EACCES; \
630 	} \
631 	memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
632 	(req)->req_type = rte_cpu_to_le_16(type); \
633 	(req)->cmpl_ring = rte_cpu_to_le_16(-1); \
634 	(req)->seq_id = kong ? rte_cpu_to_le_16(bp->kong_cmd_seq++) :\
635 		rte_cpu_to_le_16(bp->chimp_cmd_seq++); \
636 	(req)->target_id = rte_cpu_to_le_16(0xffff); \
637 	(req)->resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
638 } while (0)
639 
640 #define HWRM_CHECK_RESULT_SILENT() do {\
641 	if (rc) { \
642 		rte_spinlock_unlock(&bp->hwrm_lock); \
643 		return rc; \
644 	} \
645 	if (resp->error_code) { \
646 		rc = rte_le_to_cpu_16(resp->error_code); \
647 		rte_spinlock_unlock(&bp->hwrm_lock); \
648 		return rc; \
649 	} \
650 } while (0)
651 
652 #define HWRM_CHECK_RESULT() do {\
653 	if (rc) { \
654 		PMD_DRV_LOG_LINE(ERR, "failed rc:%d", rc); \
655 		rte_spinlock_unlock(&bp->hwrm_lock); \
656 		if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
657 			rc = -EACCES; \
658 		else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \
659 			rc = -ENOSPC; \
660 		else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \
661 			rc = -EINVAL; \
662 		else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \
663 			rc = -ENOTSUP; \
664 		else if (rc == HWRM_ERR_CODE_HOT_RESET_PROGRESS) \
665 			rc = -EAGAIN; \
666 		else if (rc > 0) \
667 			rc = -EIO; \
668 		return rc; \
669 	} \
670 	if (resp->error_code) { \
671 		rc = rte_le_to_cpu_16(resp->error_code); \
672 		if (resp->resp_len >= 16) { \
673 			struct hwrm_err_output *tmp_hwrm_err_op = \
674 						(void *)resp; \
675 			PMD_DRV_LOG_LINE(ERR, \
676 				"error %d:%d:%08x:%04x", \
677 				rc, tmp_hwrm_err_op->cmd_err, \
678 				rte_le_to_cpu_32(\
679 					tmp_hwrm_err_op->opaque_0), \
680 				rte_le_to_cpu_16(\
681 					tmp_hwrm_err_op->opaque_1)); \
682 		} else { \
683 			PMD_DRV_LOG_LINE(ERR, "error %d", rc); \
684 		} \
685 		rte_spinlock_unlock(&bp->hwrm_lock); \
686 		if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
687 			rc = -EACCES; \
688 		else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \
689 			rc = -ENOSPC; \
690 		else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \
691 			rc = -EINVAL; \
692 		else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \
693 			rc = -ENOTSUP; \
694 		else if (rc == HWRM_ERR_CODE_HOT_RESET_PROGRESS) \
695 			rc = -EAGAIN; \
696 		else if (rc > 0) \
697 			rc = -EIO; \
698 		return rc; \
699 	} \
700 } while (0)
701 
702 #define HWRM_UNLOCK()		rte_spinlock_unlock(&bp->hwrm_lock)
703 
704 int bnxt_hwrm_tf_message_direct(struct bnxt *bp,
705 				bool use_kong_mb,
706 				uint16_t msg_type,
707 				void *msg,
708 				uint32_t msg_len,
709 				void *resp_msg,
710 				uint32_t resp_len)
711 {
712 	int rc = 0;
713 	bool mailbox = BNXT_USE_CHIMP_MB;
714 	struct input *req = msg;
715 	struct output *resp = bp->hwrm_cmd_resp_addr;
716 
717 	if (use_kong_mb)
718 		mailbox = BNXT_USE_KONG(bp);
719 
720 	HWRM_PREP(req, msg_type, mailbox);
721 
722 	rc = bnxt_hwrm_send_message(bp, req, msg_len, mailbox);
723 
724 	HWRM_CHECK_RESULT();
725 
726 	if (resp_msg)
727 		memcpy(resp_msg, resp, resp_len);
728 
729 	HWRM_UNLOCK();
730 
731 	return rc;
732 }
733 
734 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
735 {
736 	int rc = 0;
737 	struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
738 	struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
739 
740 	HWRM_PREP(&req, HWRM_CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
741 	req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
742 	req.mask = 0;
743 
744 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
745 
746 	HWRM_CHECK_RESULT();
747 	HWRM_UNLOCK();
748 
749 	return rc;
750 }
751 
752 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
753 				 struct bnxt_vnic_info *vnic,
754 				 uint16_t vlan_count,
755 				 struct bnxt_vlan_table_entry *vlan_table)
756 {
757 	int rc = 0;
758 	struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
759 	struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
760 	uint32_t mask = 0;
761 
762 	if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
763 		return rc;
764 
765 	HWRM_PREP(&req, HWRM_CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
766 	req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
767 
768 	if (vnic->flags & BNXT_VNIC_INFO_BCAST)
769 		mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
770 	if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
771 		mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
772 
773 	if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
774 		mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
775 
776 	if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI) {
777 		mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
778 	} else if (vnic->flags & BNXT_VNIC_INFO_MCAST) {
779 		mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
780 		req.num_mc_entries = rte_cpu_to_le_32(bp->nb_mc_addr);
781 		req.mc_tbl_addr = rte_cpu_to_le_64(bp->mc_list_dma_addr);
782 	}
783 	if (vlan_table) {
784 		if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
785 			mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
786 		req.vlan_tag_tbl_addr =
787 			rte_cpu_to_le_64(rte_malloc_virt2iova(vlan_table));
788 		req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
789 	}
790 	req.mask = rte_cpu_to_le_32(mask);
791 
792 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
793 
794 	HWRM_CHECK_RESULT();
795 	HWRM_UNLOCK();
796 
797 	return rc;
798 }
799 
800 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
801 			uint16_t vlan_count,
802 			struct bnxt_vlan_antispoof_table_entry *vlan_table)
803 {
804 	int rc = 0;
805 	struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
806 	struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
807 						bp->hwrm_cmd_resp_addr;
808 
809 	/*
810 	 * Older HWRM versions did not support this command, and the set_rx_mask
811 	 * list was used for anti-spoof. In 1.8.0, the TX path configuration was
812 	 * removed from set_rx_mask call, and this command was added.
813 	 *
814 	 * This command is also present from 1.7.8.11 and higher,
815 	 * as well as 1.7.8.0
816 	 */
817 	if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
818 		if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
819 			if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
820 					(11)))
821 				return 0;
822 		}
823 	}
824 	HWRM_PREP(&req, HWRM_CFA_VLAN_ANTISPOOF_CFG, BNXT_USE_CHIMP_MB);
825 	req.fid = rte_cpu_to_le_16(fid);
826 
827 	req.vlan_tag_mask_tbl_addr =
828 		rte_cpu_to_le_64(rte_malloc_virt2iova(vlan_table));
829 	req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
830 
831 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
832 
833 	HWRM_CHECK_RESULT();
834 	HWRM_UNLOCK();
835 
836 	return rc;
837 }
838 
839 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
840 			     struct bnxt_filter_info *filter)
841 {
842 	int rc = 0;
843 	struct bnxt_filter_info *l2_filter = filter;
844 	struct bnxt_vnic_info *vnic = NULL;
845 	struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
846 	struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
847 
848 	if (filter->fw_l2_filter_id == UINT64_MAX)
849 		return 0;
850 
851 	if (filter->matching_l2_fltr_ptr)
852 		l2_filter = filter->matching_l2_fltr_ptr;
853 
854 	PMD_DRV_LOG_LINE(DEBUG, "filter: %p l2_filter: %p ref_cnt: %d",
855 		    filter, l2_filter, l2_filter->l2_ref_cnt);
856 
857 	if (l2_filter->l2_ref_cnt == 0)
858 		return 0;
859 
860 	if (l2_filter->l2_ref_cnt > 0)
861 		l2_filter->l2_ref_cnt--;
862 
863 	if (l2_filter->l2_ref_cnt > 0)
864 		return 0;
865 
866 	HWRM_PREP(&req, HWRM_CFA_L2_FILTER_FREE, BNXT_USE_CHIMP_MB);
867 
868 	req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
869 
870 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
871 
872 	HWRM_CHECK_RESULT();
873 	HWRM_UNLOCK();
874 
875 	filter->fw_l2_filter_id = UINT64_MAX;
876 	if (l2_filter->l2_ref_cnt == 0) {
877 		vnic = l2_filter->vnic;
878 		if (vnic) {
879 			STAILQ_REMOVE(&vnic->filter, l2_filter,
880 				      bnxt_filter_info, next);
881 			bnxt_free_filter(bp, l2_filter);
882 		}
883 	}
884 
885 	return 0;
886 }
887 
888 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
889 			 uint16_t dst_id,
890 			 struct bnxt_filter_info *filter)
891 {
892 	int rc = 0;
893 	struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
894 	struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
895 	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
896 	const struct rte_eth_vmdq_rx_conf *conf =
897 		    &dev_conf->rx_adv_conf.vmdq_rx_conf;
898 	uint32_t enables = 0;
899 	uint16_t j = dst_id - 1;
900 
901 	//TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
902 	if ((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG) &&
903 	    conf->pool_map[j].pools & (1UL << j)) {
904 		PMD_DRV_LOG_LINE(DEBUG,
905 			"Add vlan %u to vmdq pool %u",
906 			conf->pool_map[j].vlan_id, j);
907 
908 		filter->l2_ivlan = conf->pool_map[j].vlan_id;
909 		filter->enables |=
910 			HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
911 			HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
912 	}
913 
914 	if (filter->fw_l2_filter_id != UINT64_MAX)
915 		bnxt_hwrm_clear_l2_filter(bp, filter);
916 
917 	HWRM_PREP(&req, HWRM_CFA_L2_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
918 
919 	/* PMD does not support XDP and RoCE */
920 	filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_XDP_DISABLE |
921 			HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_TRAFFIC_L2;
922 	req.flags = rte_cpu_to_le_32(filter->flags);
923 
924 	enables = filter->enables |
925 	      HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
926 	req.dst_id = rte_cpu_to_le_16(dst_id);
927 
928 	if (enables &
929 	    HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
930 		memcpy(req.l2_addr, filter->l2_addr,
931 		       RTE_ETHER_ADDR_LEN);
932 	if (enables &
933 	    HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
934 		memcpy(req.l2_addr_mask, filter->l2_addr_mask,
935 		       RTE_ETHER_ADDR_LEN);
936 	if (enables &
937 	    HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
938 		req.l2_ovlan = filter->l2_ovlan;
939 	if (enables &
940 	    HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
941 		req.l2_ivlan = filter->l2_ivlan;
942 	if (enables &
943 	    HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
944 		req.l2_ovlan_mask = filter->l2_ovlan_mask;
945 	if (enables &
946 	    HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
947 		req.l2_ivlan_mask = filter->l2_ivlan_mask;
948 	if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
949 		req.src_id = rte_cpu_to_le_32(filter->src_id);
950 	if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
951 		req.src_type = filter->src_type;
952 	if (filter->pri_hint) {
953 		req.pri_hint = filter->pri_hint;
954 		req.l2_filter_id_hint =
955 			rte_cpu_to_le_64(filter->l2_filter_id_hint);
956 	}
957 
958 	req.enables = rte_cpu_to_le_32(enables);
959 
960 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
961 
962 	HWRM_CHECK_RESULT();
963 
964 	filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
965 	filter->flow_id = rte_le_to_cpu_32(resp->flow_id);
966 	HWRM_UNLOCK();
967 
968 	filter->l2_ref_cnt++;
969 
970 	return rc;
971 }
972 
973 int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
974 {
975 	struct hwrm_port_mac_cfg_input req = {.req_type = 0};
976 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
977 	uint32_t flags = 0;
978 	int rc;
979 
980 	if (!ptp)
981 		return 0;
982 
983 	HWRM_PREP(&req, HWRM_PORT_MAC_CFG, BNXT_USE_CHIMP_MB);
984 
985 	if (ptp->rx_filter)
986 		flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
987 	else
988 		flags |=
989 			HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
990 	if (ptp->tx_tstamp_en)
991 		flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
992 	else
993 		flags |=
994 			HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
995 
996 	if (ptp->filter_all)
997 		flags |=  HWRM_PORT_MAC_CFG_INPUT_FLAGS_ALL_RX_TS_CAPTURE_ENABLE;
998 	else if (bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS)
999 		flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_ALL_RX_TS_CAPTURE_DISABLE;
1000 
1001 	req.flags = rte_cpu_to_le_32(flags);
1002 	req.enables = rte_cpu_to_le_32
1003 		(HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
1004 	req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
1005 
1006 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1007 	HWRM_UNLOCK();
1008 
1009 	return rc;
1010 }
1011 
1012 static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
1013 {
1014 	int rc = 0;
1015 	struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
1016 	struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1017 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
1018 
1019 	if (ptp)
1020 		return 0;
1021 
1022 	HWRM_PREP(&req, HWRM_PORT_MAC_PTP_QCFG, BNXT_USE_CHIMP_MB);
1023 
1024 	req.port_id = rte_cpu_to_le_16(bp->pf->port_id);
1025 
1026 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1027 
1028 	HWRM_CHECK_RESULT();
1029 
1030 	/* TODO Revisit for Thor 2 */
1031 	if (BNXT_CHIP_P5(bp)) {
1032 		if (!(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_HWRM_ACCESS))
1033 			return 0;
1034 	} else {
1035 		if (!(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS))
1036 			return 0;
1037 	}
1038 
1039 	if (resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_ONE_STEP_TX_TS)
1040 		bp->flags |= BNXT_FLAG_FW_CAP_ONE_STEP_TX_TS;
1041 
1042 	ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
1043 	if (!ptp)
1044 		return -ENOMEM;
1045 
1046 	if (!BNXT_CHIP_P5(bp)) {
1047 		ptp->rx_regs[BNXT_PTP_RX_TS_L] =
1048 			rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
1049 		ptp->rx_regs[BNXT_PTP_RX_TS_H] =
1050 			rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
1051 		ptp->rx_regs[BNXT_PTP_RX_SEQ] =
1052 			rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
1053 		ptp->rx_regs[BNXT_PTP_RX_FIFO] =
1054 			rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
1055 		ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
1056 			rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
1057 		ptp->tx_regs[BNXT_PTP_TX_TS_L] =
1058 			rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
1059 		ptp->tx_regs[BNXT_PTP_TX_TS_H] =
1060 			rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
1061 		ptp->tx_regs[BNXT_PTP_TX_SEQ] =
1062 			rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
1063 		ptp->tx_regs[BNXT_PTP_TX_FIFO] =
1064 			rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
1065 	}
1066 
1067 	ptp->bp = bp;
1068 	bp->ptp_cfg = ptp;
1069 
1070 	return 0;
1071 }
1072 
1073 void bnxt_free_vf_info(struct bnxt *bp)
1074 {
1075 	int i;
1076 
1077 	if (bp->pf == NULL)
1078 		return;
1079 
1080 	if (bp->pf->vf_info == NULL)
1081 		return;
1082 
1083 	for (i = 0; i < bp->pf->max_vfs; i++) {
1084 		rte_free(bp->pf->vf_info[i].vlan_table);
1085 		bp->pf->vf_info[i].vlan_table = NULL;
1086 		rte_free(bp->pf->vf_info[i].vlan_as_table);
1087 		bp->pf->vf_info[i].vlan_as_table = NULL;
1088 	}
1089 	rte_free(bp->pf->vf_info);
1090 	bp->pf->vf_info = NULL;
1091 }
1092 
1093 static int bnxt_alloc_vf_info(struct bnxt *bp, uint16_t max_vfs)
1094 {
1095 	struct bnxt_child_vf_info *vf_info = bp->pf->vf_info;
1096 	int i;
1097 
1098 	if (vf_info)
1099 		bnxt_free_vf_info(bp);
1100 
1101 	vf_info = rte_zmalloc("bnxt_vf_info", sizeof(*vf_info) * max_vfs, 0);
1102 	if (vf_info == NULL) {
1103 		PMD_DRV_LOG_LINE(ERR, "Failed to alloc vf info");
1104 		return -ENOMEM;
1105 	}
1106 
1107 	bp->pf->max_vfs = max_vfs;
1108 	for (i = 0; i < max_vfs; i++) {
1109 		vf_info[i].fid = bp->pf->first_vf_id + i;
1110 		vf_info[i].vlan_table = rte_zmalloc("VF VLAN table",
1111 						    getpagesize(), getpagesize());
1112 		if (vf_info[i].vlan_table == NULL) {
1113 			PMD_DRV_LOG_LINE(ERR, "Failed to alloc VLAN table for VF %d", i);
1114 			goto err;
1115 		}
1116 		rte_mem_lock_page(vf_info[i].vlan_table);
1117 
1118 		vf_info[i].vlan_as_table = rte_zmalloc("VF VLAN AS table",
1119 						       getpagesize(), getpagesize());
1120 		if (vf_info[i].vlan_as_table == NULL) {
1121 			PMD_DRV_LOG_LINE(ERR, "Failed to alloc VLAN AS table for VF %d", i);
1122 			goto err;
1123 		}
1124 		rte_mem_lock_page(vf_info[i].vlan_as_table);
1125 
1126 		STAILQ_INIT(&vf_info[i].filter);
1127 	}
1128 
1129 	bp->pf->vf_info = vf_info;
1130 
1131 	return 0;
1132 err:
1133 	bnxt_free_vf_info(bp);
1134 	return -ENOMEM;
1135 }
1136 
1137 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
1138 {
1139 	int rc = 0;
1140 	struct hwrm_func_qcaps_input req = {.req_type = 0 };
1141 	struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
1142 	uint32_t flags, flags_ext2, flags_ext3;
1143 	uint16_t new_max_vfs;
1144 
1145 	HWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB);
1146 
1147 	req.fid = rte_cpu_to_le_16(0xffff);
1148 
1149 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1150 
1151 	HWRM_CHECK_RESULT();
1152 
1153 	bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
1154 	flags = rte_le_to_cpu_32(resp->flags);
1155 	flags_ext2 = rte_le_to_cpu_32(resp->flags_ext2);
1156 	flags_ext3 = rte_le_to_cpu_32(resp->flags_ext3);
1157 
1158 	if (BNXT_PF(bp)) {
1159 		bp->pf->port_id = resp->port_id;
1160 		bp->pf->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
1161 		bp->pf->total_vfs = rte_le_to_cpu_16(resp->max_vfs);
1162 		new_max_vfs = bp->pdev->max_vfs;
1163 		if (new_max_vfs != bp->pf->max_vfs) {
1164 			rc = bnxt_alloc_vf_info(bp, new_max_vfs);
1165 			if (rc)
1166 				goto unlock;
1167 		}
1168 	}
1169 
1170 	bp->fw_fid = rte_le_to_cpu_32(resp->fid);
1171 	if (!bnxt_check_zero_bytes(resp->mac_address, RTE_ETHER_ADDR_LEN)) {
1172 		bp->flags |= BNXT_FLAG_DFLT_MAC_SET;
1173 		memcpy(bp->mac_addr, &resp->mac_address, RTE_ETHER_ADDR_LEN);
1174 	} else {
1175 		bp->flags &= ~BNXT_FLAG_DFLT_MAC_SET;
1176 	}
1177 	bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
1178 	bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
1179 	bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
1180 	bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
1181 	bp->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
1182 	bp->max_rx_em_flows = rte_le_to_cpu_16(resp->max_rx_em_flows);
1183 	bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
1184 	if (!BNXT_CHIP_P5_P7(bp) && !bp->pdev->max_vfs)
1185 		bp->max_l2_ctx += bp->max_rx_em_flows;
1186 	if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY)
1187 		bp->max_vnics = rte_le_to_cpu_16(BNXT_MAX_VNICS_COS_CLASSIFY);
1188 	else
1189 		bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
1190 	PMD_DRV_LOG_LINE(DEBUG, "Max l2_cntxts is %d vnics is %d",
1191 		    bp->max_l2_ctx, bp->max_vnics);
1192 	bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
1193 	bp->max_mcast_addr = rte_le_to_cpu_32(resp->max_mcast_filters);
1194 	if (!bp->max_mcast_addr)
1195 		bp->max_mcast_addr = BNXT_DFLT_MAX_MC_ADDR;
1196 	memcpy(bp->dsn, resp->device_serial_number, sizeof(bp->dsn));
1197 
1198 	if (BNXT_PF(bp))
1199 		bp->pf->total_vnics = rte_le_to_cpu_16(resp->max_vnics);
1200 
1201 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
1202 		if (BNXT_CHIP_P5(bp) || BNXT_PF(bp)) {
1203 			bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
1204 			PMD_DRV_LOG_LINE(DEBUG, "PTP SUPPORTED");
1205 			HWRM_UNLOCK();
1206 			bnxt_hwrm_ptp_qcfg(bp);
1207 		}
1208 	}
1209 
1210 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_STATS_SUPPORTED)
1211 		bp->flags |= BNXT_FLAG_EXT_STATS_SUPPORTED;
1212 
1213 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERROR_RECOVERY_CAPABLE) {
1214 		bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
1215 		PMD_DRV_LOG_LINE(DEBUG, "Adapter Error recovery SUPPORTED");
1216 	}
1217 
1218 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERR_RECOVER_RELOAD)
1219 		bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
1220 
1221 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_HOT_RESET_CAPABLE)
1222 		bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
1223 
1224 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_LINK_ADMIN_STATUS_SUPPORTED)
1225 		bp->fw_cap |= BNXT_FW_CAP_LINK_ADMIN;
1226 
1227 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_BS_V2_SUPPORTED) {
1228 		PMD_DRV_LOG_LINE(DEBUG, "Backing store v2 supported");
1229 		if (BNXT_CHIP_P7(bp))
1230 			bp->fw_cap |= BNXT_FW_CAP_BACKING_STORE_V2;
1231 	}
1232 
1233 	/* only initialize the mpc capability one time */
1234 	if (resp->mpc_chnls_cap && !bp->mpc) {
1235 		struct bnxt_mpc *mpc;
1236 
1237 		mpc = rte_zmalloc("bnxt_mpc", sizeof(*mpc), 0);
1238 		if (!mpc) {
1239 			/* no impact to basic NIC functionalities. Truflow
1240 			 * will be disabled if mpc is not setup.
1241 			 */
1242 			PMD_DRV_LOG_LINE(ERR, "Fail allocate mpc memory");
1243 		} else {
1244 			mpc->mpc_chnls_cap = resp->mpc_chnls_cap;
1245 			bp->mpc = mpc;
1246 		}
1247 	}
1248 
1249 	if (!(flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_VLAN_ACCELERATION_TX_DISABLED)) {
1250 		bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
1251 		PMD_DRV_LOG_LINE(DEBUG, "VLAN acceleration for TX is enabled");
1252 	}
1253 
1254 	bp->tunnel_disable_flag = rte_le_to_cpu_16(resp->tunnel_disable_flag);
1255 	if (bp->tunnel_disable_flag)
1256 		PMD_DRV_LOG_LINE(DEBUG, "Tunnel parsing capability is disabled, flags : %#x",
1257 			    bp->tunnel_disable_flag);
1258 
1259 	if (flags_ext2 & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED)
1260 		bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS;
1261 	if (flags_ext2 & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_UDP_GSO_SUPPORTED)
1262 		bp->fw_cap |= BNXT_FW_CAP_UDP_GSO;
1263 	if (flags_ext3 & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT3_RX_RATE_PROFILE_SEL_SUPPORTED)
1264 		bp->fw_cap |= BNXT_FW_CAP_RX_RATE_PROFILE;
1265 
1266 unlock:
1267 	HWRM_UNLOCK();
1268 
1269 	return rc;
1270 }
1271 
1272 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
1273 {
1274 	int rc;
1275 
1276 	rc = __bnxt_hwrm_func_qcaps(bp);
1277 	if (rc == -ENOMEM)
1278 		return rc;
1279 
1280 	if (!rc && bp->hwrm_spec_code >= HWRM_SPEC_CODE_1_8_3) {
1281 		rc = bnxt_alloc_ctx_mem(bp);
1282 		if (rc)
1283 			return rc;
1284 
1285 		/* On older FW,
1286 		 * bnxt_hwrm_func_resc_qcaps can fail and cause init failure.
1287 		 * But the error can be ignored. Return success.
1288 		 */
1289 		rc = bnxt_hwrm_func_resc_qcaps(bp);
1290 		if (!rc)
1291 			bp->flags |= BNXT_FLAG_NEW_RM;
1292 	}
1293 
1294 	return 0;
1295 }
1296 
1297 /* VNIC cap covers capability of all VNICs. So no need to pass vnic_id */
1298 int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
1299 {
1300 	int rc = 0;
1301 	uint32_t flags;
1302 	struct hwrm_vnic_qcaps_input req = {.req_type = 0 };
1303 	struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
1304 
1305 	HWRM_PREP(&req, HWRM_VNIC_QCAPS, BNXT_USE_CHIMP_MB);
1306 
1307 	req.target_id = rte_cpu_to_le_16(0xffff);
1308 
1309 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1310 
1311 	HWRM_CHECK_RESULT();
1312 
1313 	bp->vnic_cap_flags = 0;
1314 
1315 	flags = rte_le_to_cpu_32(resp->flags);
1316 
1317 	if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_COS_ASSIGNMENT_CAP) {
1318 		bp->vnic_cap_flags |= BNXT_VNIC_CAP_COS_CLASSIFY;
1319 		PMD_DRV_LOG_LINE(INFO, "CoS assignment capability enabled");
1320 	}
1321 
1322 	if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_OUTERMOST_RSS_CAP)
1323 		bp->vnic_cap_flags |= BNXT_VNIC_CAP_OUTER_RSS;
1324 
1325 	if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_OUTERMOST_RSS_TRUSTED_VF_CAP) {
1326 		bp->vnic_cap_flags |= BNXT_VNIC_CAP_OUTER_RSS_TRUSTED_VF;
1327 		PMD_DRV_LOG_LINE(DEBUG, "Trusted VF's outer RSS capability is enabled");
1328 	}
1329 
1330 	if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RX_CMPL_V2_CAP)
1331 		bp->vnic_cap_flags |= BNXT_VNIC_CAP_RX_CMPL_V2;
1332 
1333 	if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_VLAN_STRIP_CAP) {
1334 		bp->vnic_cap_flags |= BNXT_VNIC_CAP_VLAN_RX_STRIP;
1335 		PMD_DRV_LOG_LINE(DEBUG, "Rx VLAN strip capability enabled");
1336 	}
1337 
1338 	if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RING_SELECT_MODE_XOR_CAP)
1339 		bp->vnic_cap_flags |= BNXT_VNIC_CAP_XOR_MODE;
1340 
1341 	if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RING_SELECT_MODE_TOEPLITZ_CHKSM_CAP)
1342 		bp->vnic_cap_flags |= BNXT_VNIC_CAP_CHKSM_MODE;
1343 
1344 	if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RSS_IPV6_FLOW_LABEL_CAP)
1345 		bp->vnic_cap_flags |= BNXT_VNIC_CAP_IPV6_FLOW_LABEL_MODE;
1346 
1347 	if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_L2_CQE_MODE_CAP)
1348 		bp->vnic_cap_flags |= BNXT_VNIC_CAP_L2_CQE_MODE;
1349 
1350 	if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP)
1351 		bp->vnic_cap_flags |= BNXT_VNIC_CAP_AH_SPI4_CAP;
1352 
1353 	if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP)
1354 		bp->vnic_cap_flags |= BNXT_VNIC_CAP_AH_SPI6_CAP;
1355 
1356 	if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP)
1357 		bp->vnic_cap_flags |= BNXT_VNIC_CAP_ESP_SPI4_CAP;
1358 
1359 	if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP)
1360 		bp->vnic_cap_flags |= BNXT_VNIC_CAP_ESP_SPI6_CAP;
1361 
1362 	if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_HW_TUNNEL_TPA_CAP)
1363 		bp->vnic_cap_flags |= BNXT_VNIC_CAP_VNIC_TUNNEL_TPA;
1364 
1365 	bp->max_tpa_v2 = rte_le_to_cpu_16(resp->max_aggs_supported);
1366 
1367 	HWRM_UNLOCK();
1368 
1369 	return rc;
1370 }
1371 
1372 int bnxt_hwrm_func_reset(struct bnxt *bp)
1373 {
1374 	int rc = 0;
1375 	struct hwrm_func_reset_input req = {.req_type = 0 };
1376 	struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
1377 
1378 	HWRM_PREP(&req, HWRM_FUNC_RESET, BNXT_USE_CHIMP_MB);
1379 
1380 	req.enables = rte_cpu_to_le_32(0);
1381 
1382 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1383 
1384 	HWRM_CHECK_RESULT();
1385 	HWRM_UNLOCK();
1386 
1387 	return rc;
1388 }
1389 
1390 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
1391 {
1392 	int rc;
1393 	uint32_t flags = 0;
1394 	struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
1395 	struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
1396 
1397 	if (bp->flags & BNXT_FLAG_REGISTERED)
1398 		return 0;
1399 
1400 	if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
1401 		flags = HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_HOT_RESET_SUPPORT;
1402 	if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
1403 		flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_ERROR_RECOVERY_SUPPORT;
1404 
1405 	/* PFs and trusted VFs should indicate the support of the
1406 	 * Master capability on non Stingray platform
1407 	 */
1408 	if ((BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) && !BNXT_STINGRAY(bp))
1409 		flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_MASTER_SUPPORT;
1410 
1411 	HWRM_PREP(&req, HWRM_FUNC_DRV_RGTR, BNXT_USE_CHIMP_MB);
1412 	req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
1413 			HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
1414 	req.ver_maj_8b = RTE_VER_YEAR;
1415 	req.ver_min_8b = RTE_VER_MONTH;
1416 	req.ver_upd_8b = RTE_VER_MINOR;
1417 
1418 	if (BNXT_PF(bp)) {
1419 		req.enables |= rte_cpu_to_le_32(
1420 			HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD);
1421 		memcpy(req.vf_req_fwd, bp->pf->vf_req_fwd,
1422 		       RTE_MIN(sizeof(req.vf_req_fwd),
1423 			       sizeof(bp->pf->vf_req_fwd)));
1424 	}
1425 
1426 	req.flags = rte_cpu_to_le_32(flags);
1427 
1428 	req.async_event_fwd[0] |=
1429 		rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
1430 				 ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
1431 				 ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE |
1432 				 ASYNC_CMPL_EVENT_ID_LINK_SPEED_CHANGE |
1433 				 ASYNC_CMPL_EVENT_ID_RESET_NOTIFY);
1434 	if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
1435 		req.async_event_fwd[0] |=
1436 			rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_ERROR_RECOVERY);
1437 	req.async_event_fwd[1] |=
1438 		rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
1439 				 ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
1440 	if (BNXT_PF(bp))
1441 		req.async_event_fwd[1] |=
1442 			rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_DBG_NOTIFICATION);
1443 
1444 	if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) {
1445 		req.async_event_fwd[1] |=
1446 		rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE |
1447 				 ASYNC_CMPL_EVENT_ID_VF_FLR);
1448 	}
1449 
1450 	req.async_event_fwd[2] |=
1451 		rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_ECHO_REQUEST |
1452 				 ASYNC_CMPL_EVENT_ID_ERROR_REPORT |
1453 				 ASYNC_CMPL_EVENT_ID_RSS_CHANGE);
1454 
1455 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1456 
1457 	HWRM_CHECK_RESULT();
1458 
1459 	flags = rte_le_to_cpu_32(resp->flags);
1460 	if (flags & HWRM_FUNC_DRV_RGTR_OUTPUT_FLAGS_IF_CHANGE_SUPPORTED)
1461 		bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
1462 
1463 	HWRM_UNLOCK();
1464 
1465 	bp->flags |= BNXT_FLAG_REGISTERED;
1466 
1467 	return rc;
1468 }
1469 
1470 int bnxt_hwrm_check_vf_rings(struct bnxt *bp)
1471 {
1472 	if (!(BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)))
1473 		return 0;
1474 
1475 	return bnxt_hwrm_func_reserve_vf_resc(bp, true);
1476 }
1477 
1478 int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test)
1479 {
1480 	int rc;
1481 	uint32_t flags = 0;
1482 	uint32_t enables;
1483 	struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1484 	struct hwrm_func_vf_cfg_input req = {0};
1485 	uint8_t mpc_ring_cnt = bp->mpc ? BNXT_MPC_RINGS_SUPPORTED : 0;
1486 
1487 	HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
1488 
1489 	enables = HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS  |
1490 		  HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS   |
1491 		  HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS  |
1492 		  HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
1493 		  HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS;
1494 
1495 	if (BNXT_HAS_RING_GRPS(bp)) {
1496 		enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;
1497 		req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings);
1498 	}
1499 
1500 	req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings + mpc_ring_cnt);
1501 	req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings *
1502 					    AGG_RING_MULTIPLIER);
1503 	req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings +
1504 					     bp->tx_nr_rings +
1505 					     mpc_ring_cnt);
1506 	req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings +
1507 					      bp->tx_nr_rings +
1508 					      BNXT_NUM_ASYNC_CPR(bp) +
1509 					      mpc_ring_cnt);
1510 	if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) {
1511 		req.num_vnics = rte_cpu_to_le_16(RTE_MIN(BNXT_VNIC_MAX_SUPPORTED_ID,
1512 							 bp->max_vnics));
1513 		enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
1514 		req.num_rsscos_ctxs = rte_cpu_to_le_16(RTE_MIN(BNXT_VNIC_MAX_SUPPORTED_ID,
1515 							       bp->max_rsscos_ctx));
1516 	} else {
1517 		req.num_vnics = rte_cpu_to_le_16(bp->rx_nr_rings);
1518 	}
1519 
1520 	if (bp->vf_resv_strategy ==
1521 	    HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
1522 		enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS |
1523 			   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS |
1524 			   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
1525 		req.num_rsscos_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_RSS_CTX);
1526 		req.num_l2_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_L2_CTX);
1527 		req.num_vnics = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_VNIC);
1528 	} else if (bp->vf_resv_strategy ==
1529 		   HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MAXIMAL) {
1530 		enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
1531 		req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
1532 	}
1533 
1534 	if (test)
1535 		flags = HWRM_FUNC_VF_CFG_INPUT_FLAGS_TX_ASSETS_TEST |
1536 			HWRM_FUNC_VF_CFG_INPUT_FLAGS_RX_ASSETS_TEST |
1537 			HWRM_FUNC_VF_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST |
1538 			HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST |
1539 			HWRM_FUNC_VF_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST |
1540 			HWRM_FUNC_VF_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST;
1541 
1542 	if (test && BNXT_HAS_RING_GRPS(bp))
1543 		flags |= HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST;
1544 
1545 	req.flags = rte_cpu_to_le_32(flags);
1546 	req.enables |= rte_cpu_to_le_32(enables);
1547 
1548 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1549 
1550 	if (test)
1551 		HWRM_CHECK_RESULT_SILENT();
1552 	else
1553 		HWRM_CHECK_RESULT();
1554 
1555 	HWRM_UNLOCK();
1556 	return rc;
1557 }
1558 
1559 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
1560 {
1561 	int rc;
1562 	struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
1563 	struct hwrm_func_resource_qcaps_input req = {0};
1564 
1565 	HWRM_PREP(&req, HWRM_FUNC_RESOURCE_QCAPS, BNXT_USE_CHIMP_MB);
1566 	req.fid = rte_cpu_to_le_16(0xffff);
1567 
1568 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1569 
1570 	HWRM_CHECK_RESULT_SILENT();
1571 
1572 	bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
1573 	bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
1574 	bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
1575 	bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
1576 	bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
1577 	/* func_resource_qcaps does not return max_rx_em_flows.
1578 	 * So use the value provided by func_qcaps.
1579 	 */
1580 	bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
1581 	if (!BNXT_CHIP_P5_P7(bp) && !bp->pdev->max_vfs)
1582 		bp->max_l2_ctx += bp->max_rx_em_flows;
1583 	if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY)
1584 		bp->max_vnics = rte_le_to_cpu_16(BNXT_MAX_VNICS_COS_CLASSIFY);
1585 	else
1586 		bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
1587 	bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
1588 	if (BNXT_CHIP_P7(bp))
1589 		bp->max_nq_rings = BNXT_P7_MAX_NQ_RING_CNT;
1590 	else
1591 		bp->max_nq_rings = rte_le_to_cpu_16(resp->max_msix);
1592 	bp->vf_resv_strategy = rte_le_to_cpu_16(resp->vf_reservation_strategy);
1593 	if (bp->vf_resv_strategy >
1594 	    HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC)
1595 		bp->vf_resv_strategy =
1596 		HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MAXIMAL;
1597 
1598 	HWRM_UNLOCK();
1599 	return rc;
1600 }
1601 
1602 int bnxt_hwrm_ver_get(struct bnxt *bp, uint32_t timeout)
1603 {
1604 	int rc = 0;
1605 	struct hwrm_ver_get_input req = {.req_type = 0 };
1606 	struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
1607 	uint32_t fw_version;
1608 	uint16_t max_resp_len;
1609 	char type[RTE_MEMZONE_NAMESIZE];
1610 	uint32_t dev_caps_cfg;
1611 
1612 	bp->max_req_len = HWRM_MAX_REQ_LEN;
1613 	bp->hwrm_cmd_timeout = timeout;
1614 	HWRM_PREP(&req, HWRM_VER_GET, BNXT_USE_CHIMP_MB);
1615 
1616 	req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
1617 	req.hwrm_intf_min = HWRM_VERSION_MINOR;
1618 	req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
1619 
1620 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1621 
1622 	if (bp->flags & BNXT_FLAG_FW_RESET)
1623 		HWRM_CHECK_RESULT_SILENT();
1624 	else
1625 		HWRM_CHECK_RESULT();
1626 
1627 	PMD_DRV_LOG_LINE(INFO, "%d.%d.%d:%d.%d.%d.%d",
1628 		resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
1629 		resp->hwrm_intf_upd_8b, resp->hwrm_fw_maj_8b,
1630 		resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b,
1631 		resp->hwrm_fw_rsvd_8b);
1632 	bp->fw_ver = ((uint32_t)resp->hwrm_fw_maj_8b << 24) |
1633 		     ((uint32_t)resp->hwrm_fw_min_8b << 16) |
1634 		     ((uint32_t)resp->hwrm_fw_bld_8b << 8) |
1635 		     resp->hwrm_fw_rsvd_8b;
1636 	PMD_DRV_LOG_LINE(INFO, "Driver HWRM version: %d.%d.%d",
1637 		HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
1638 
1639 	fw_version = resp->hwrm_intf_maj_8b << 16;
1640 	fw_version |= resp->hwrm_intf_min_8b << 8;
1641 	fw_version |= resp->hwrm_intf_upd_8b;
1642 	bp->hwrm_spec_code = fw_version;
1643 
1644 	/* def_req_timeout value is in milliseconds */
1645 	bp->hwrm_cmd_timeout = rte_le_to_cpu_16(resp->def_req_timeout);
1646 	/* convert timeout to usec */
1647 	bp->hwrm_cmd_timeout *= 1000;
1648 	if (!bp->hwrm_cmd_timeout)
1649 		bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
1650 
1651 	if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) {
1652 		PMD_DRV_LOG_LINE(ERR, "Unsupported firmware API version");
1653 		rc = -EINVAL;
1654 		goto error;
1655 	}
1656 
1657 	if (bp->max_req_len > resp->max_req_win_len) {
1658 		PMD_DRV_LOG_LINE(ERR, "Unsupported request length");
1659 		rc = -EINVAL;
1660 		goto error;
1661 	}
1662 
1663 	bp->chip_num = rte_le_to_cpu_16(resp->chip_num);
1664 
1665 	bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
1666 	bp->hwrm_max_ext_req_len = rte_le_to_cpu_16(resp->max_ext_req_len);
1667 	if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
1668 		bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
1669 
1670 	max_resp_len = rte_le_to_cpu_16(resp->max_resp_len);
1671 	dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
1672 
1673 	RTE_VERIFY(max_resp_len <= bp->max_resp_len);
1674 	bp->max_resp_len = max_resp_len;
1675 	bp->chip_rev = resp->chip_rev;
1676 
1677 	if ((dev_caps_cfg &
1678 		HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
1679 	    (dev_caps_cfg &
1680 	     HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) {
1681 		PMD_DRV_LOG_LINE(DEBUG, "Short command supported");
1682 		bp->flags |= BNXT_FLAG_SHORT_CMD;
1683 	}
1684 
1685 	if (((dev_caps_cfg &
1686 	      HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
1687 	     (dev_caps_cfg &
1688 	      HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) ||
1689 	    bp->hwrm_max_ext_req_len > HWRM_MAX_REQ_LEN) {
1690 		sprintf(type, "bnxt_hwrm_short_" PCI_PRI_FMT,
1691 			bp->pdev->addr.domain, bp->pdev->addr.bus,
1692 			bp->pdev->addr.devid, bp->pdev->addr.function);
1693 
1694 		rte_free(bp->hwrm_short_cmd_req_addr);
1695 
1696 		bp->hwrm_short_cmd_req_addr =
1697 				rte_malloc(type, bp->hwrm_max_ext_req_len, 0);
1698 		if (bp->hwrm_short_cmd_req_addr == NULL) {
1699 			rc = -ENOMEM;
1700 			goto error;
1701 		}
1702 		bp->hwrm_short_cmd_req_dma_addr =
1703 			rte_malloc_virt2iova(bp->hwrm_short_cmd_req_addr);
1704 		if (bp->hwrm_short_cmd_req_dma_addr == RTE_BAD_IOVA) {
1705 			rte_free(bp->hwrm_short_cmd_req_addr);
1706 			PMD_DRV_LOG_LINE(ERR,
1707 				"Unable to map buffer to physical memory.");
1708 			rc = -ENOMEM;
1709 			goto error;
1710 		}
1711 	}
1712 	if (dev_caps_cfg &
1713 	    HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) {
1714 		bp->flags |= BNXT_FLAG_KONG_MB_EN;
1715 		PMD_DRV_LOG_LINE(DEBUG, "Kong mailbox channel enabled");
1716 	}
1717 	if (dev_caps_cfg &
1718 	    HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
1719 		PMD_DRV_LOG_LINE(DEBUG, "FW supports Trusted VFs");
1720 	if (dev_caps_cfg &
1721 	    HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED) {
1722 		bp->fw_cap |= BNXT_FW_CAP_ADV_FLOW_MGMT;
1723 		PMD_DRV_LOG_LINE(DEBUG, "FW supports advanced flow management");
1724 	}
1725 
1726 	if (dev_caps_cfg &
1727 	    HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_ADV_FLOW_COUNTERS_SUPPORTED) {
1728 		PMD_DRV_LOG_LINE(DEBUG, "FW supports advanced flow counters");
1729 		bp->fw_cap |= BNXT_FW_CAP_ADV_FLOW_COUNTERS;
1730 	}
1731 
1732 	if (dev_caps_cfg &
1733 	    HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_TRUFLOW_SUPPORTED) {
1734 		PMD_DRV_LOG_LINE(DEBUG, "Host-based truflow feature enabled.");
1735 		bp->fw_cap |= BNXT_FW_CAP_TRUFLOW_EN;
1736 	}
1737 
1738 error:
1739 	HWRM_UNLOCK();
1740 	return rc;
1741 }
1742 
1743 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp)
1744 {
1745 	int rc;
1746 	struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
1747 	struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
1748 
1749 	if (!(bp->flags & BNXT_FLAG_REGISTERED))
1750 		return 0;
1751 
1752 	HWRM_PREP(&req, HWRM_FUNC_DRV_UNRGTR, BNXT_USE_CHIMP_MB);
1753 
1754 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1755 
1756 	HWRM_CHECK_RESULT();
1757 	HWRM_UNLOCK();
1758 
1759 	PMD_DRV_LOG_LINE(DEBUG, "Port %u: Unregistered with fw",
1760 		    bp->eth_dev->data->port_id);
1761 
1762 	return rc;
1763 }
1764 
1765 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
1766 {
1767 	int rc = 0;
1768 	struct hwrm_port_phy_cfg_input req = {0};
1769 	struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1770 	uint32_t enables = 0;
1771 
1772 	HWRM_PREP(&req, HWRM_PORT_PHY_CFG, BNXT_USE_CHIMP_MB);
1773 
1774 	if (conf->link_up) {
1775 		/* Setting Fixed Speed. But AutoNeg is ON, So disable it */
1776 		if (bp->link_info->auto_mode && conf->link_speed) {
1777 			req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
1778 			PMD_DRV_LOG_LINE(DEBUG, "Disabling AutoNeg");
1779 		}
1780 
1781 		req.flags = rte_cpu_to_le_32(conf->phy_flags);
1782 		/*
1783 		 * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
1784 		 * any auto mode, even "none".
1785 		 */
1786 		if (!conf->link_speed) {
1787 			/* No speeds specified. Enable AutoNeg - all speeds */
1788 			enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
1789 			req.auto_mode =
1790 				HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
1791 		} else {
1792 			if (bp->link_info->link_signal_mode) {
1793 				enables |=
1794 				HWRM_PORT_PHY_CFG_IN_EN_FORCE_PAM4_LINK_SPEED;
1795 				req.force_pam4_link_speed =
1796 					rte_cpu_to_le_16(conf->link_speed);
1797 			} else {
1798 				req.force_link_speed =
1799 					rte_cpu_to_le_16(conf->link_speed);
1800 			}
1801 		}
1802 		/* AutoNeg - Advertise speeds specified. */
1803 		if ((conf->auto_link_speed_mask || conf->auto_pam4_link_speed_mask) &&
1804 		    !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
1805 			req.auto_mode =
1806 				HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
1807 			if (conf->auto_pam4_link_speed_mask) {
1808 				enables |=
1809 				HWRM_PORT_PHY_CFG_IN_EN_AUTO_PAM4_LINK_SPD_MASK;
1810 				req.auto_link_pam4_speed_mask =
1811 				rte_cpu_to_le_16(conf->auto_pam4_link_speed_mask);
1812 			}
1813 			if (conf->auto_link_speed_mask) {
1814 				enables |=
1815 				HWRM_PORT_PHY_CFG_IN_EN_AUTO_LINK_SPEED_MASK;
1816 				req.auto_link_speed_mask =
1817 				rte_cpu_to_le_16(conf->auto_link_speed_mask);
1818 			}
1819 		}
1820 		if (conf->auto_link_speed &&
1821 		!(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE))
1822 			enables |=
1823 				HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
1824 
1825 		req.auto_duplex = conf->duplex;
1826 		enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
1827 		req.auto_pause = conf->auto_pause;
1828 		req.force_pause = conf->force_pause;
1829 		/* Set force_pause if there is no auto or if there is a force */
1830 		if (req.auto_pause && !req.force_pause)
1831 			enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
1832 		else
1833 			enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
1834 
1835 		req.enables = rte_cpu_to_le_32(enables);
1836 	} else {
1837 		req.flags =
1838 		rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
1839 		PMD_DRV_LOG_LINE(INFO, "Force Link Down");
1840 	}
1841 
1842 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1843 
1844 	HWRM_CHECK_RESULT();
1845 	HWRM_UNLOCK();
1846 
1847 	return rc;
1848 }
1849 
1850 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
1851 				   struct bnxt_link_info *link_info)
1852 {
1853 	int rc = 0;
1854 	struct hwrm_port_phy_qcfg_input req = {0};
1855 	struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1856 
1857 	HWRM_PREP(&req, HWRM_PORT_PHY_QCFG, BNXT_USE_CHIMP_MB);
1858 
1859 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1860 
1861 	HWRM_CHECK_RESULT();
1862 
1863 	link_info->phy_link_status = resp->link;
1864 	link_info->link_up =
1865 		(link_info->phy_link_status ==
1866 		 HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
1867 	link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
1868 	link_info->duplex = resp->duplex_cfg;
1869 	link_info->pause = resp->pause;
1870 	link_info->auto_pause = resp->auto_pause;
1871 	link_info->force_pause = resp->force_pause;
1872 	link_info->auto_mode = resp->auto_mode;
1873 	link_info->phy_type = resp->phy_type;
1874 	link_info->media_type = resp->media_type;
1875 
1876 	link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
1877 	link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
1878 	link_info->auto_link_speed_mask = rte_le_to_cpu_16(resp->auto_link_speed_mask);
1879 	link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
1880 	link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
1881 	link_info->phy_ver[0] = resp->phy_maj;
1882 	link_info->phy_ver[1] = resp->phy_min;
1883 	link_info->phy_ver[2] = resp->phy_bld;
1884 	link_info->link_signal_mode =
1885 		resp->active_fec_signal_mode & HWRM_PORT_PHY_QCFG_OUTPUT_SIGNAL_MODE_MASK;
1886 	link_info->option_flags = resp->option_flags;
1887 	link_info->force_pam4_link_speed =
1888 			rte_le_to_cpu_16(resp->force_pam4_link_speed);
1889 	link_info->support_pam4_speeds =
1890 			rte_le_to_cpu_16(resp->support_pam4_speeds);
1891 	link_info->auto_pam4_link_speed_mask =
1892 			rte_le_to_cpu_16(resp->auto_pam4_link_speed_mask);
1893 	/* P7 uses speeds2 fields */
1894 	if (BNXT_LINK_SPEEDS_V2(bp) && BNXT_LINK_SPEEDS_V2_OPTIONS(link_info->option_flags)) {
1895 		link_info->support_speeds2 = rte_le_to_cpu_16(resp->support_speeds2);
1896 		link_info->force_link_speeds2 = rte_le_to_cpu_16(resp->force_link_speeds2);
1897 		link_info->auto_link_speeds2 = rte_le_to_cpu_16(resp->auto_link_speeds2);
1898 		link_info->active_lanes = resp->active_lanes;
1899 		if (!link_info->auto_mode)
1900 			link_info->link_speed = link_info->force_link_speeds2;
1901 	}
1902 	link_info->module_status = resp->module_status;
1903 	HWRM_UNLOCK();
1904 
1905 	/* Display the captured P7 phy details */
1906 	if (BNXT_LINK_SPEEDS_V2(bp)) {
1907 		PMD_DRV_LOG_LINE(DEBUG, "Phytype:%d, Media_type:%d, Status: %d, Link Signal:%d",
1908 			    link_info->phy_type,
1909 			    link_info->media_type,
1910 			    link_info->phy_link_status,
1911 			    link_info->link_signal_mode);
1912 		PMD_DRV_LOG_LINE(DEBUG, "Active Fec: %d Support_speeds2:%x, Force_link_speedsv2:%x",
1913 			    (resp->active_fec_signal_mode &
1914 				HWRM_PORT_PHY_QCFG_OUTPUT_ACTIVE_FEC_MASK) >> 4,
1915 			    link_info->support_speeds2, link_info->force_link_speeds2);
1916 		PMD_DRV_LOG_LINE(DEBUG, "Auto_link_speedsv2:%x, Active_lanes:%d",
1917 			    link_info->auto_link_speeds2,
1918 			    link_info->active_lanes);
1919 
1920 		const char *desc;
1921 
1922 		if (link_info->auto_mode)
1923 			desc = ((struct link_speeds_tbl *)
1924 				bnxt_get_hwrm_to_rte_speeds_entry(link_info->link_speed))->desc;
1925 		else
1926 			desc = ((struct link_speeds2_tbl *)
1927 				bnxt_get_hwrm_to_rte_speeds2_entry(link_info->link_speed))->desc;
1928 
1929 		PMD_DRV_LOG_LINE(INFO, "Link Speed: %s %s, Status: %s Signal-mode: %s",
1930 			    desc,
1931 			    !(link_info->auto_mode) ? "Forced" : "AutoNegotiated",
1932 			    link_status_str[link_info->phy_link_status % MAX_LINK_STR],
1933 			    signal_mode[link_info->link_signal_mode % MAX_SIG_MODE]);
1934 		PMD_DRV_LOG_LINE(INFO, "Media type: %s, Xcvr type: %s, Active FEC: %s Lanes: %d",
1935 			    media_type[link_info->media_type % MAX_MEDIA_TYPE],
1936 			    bnxt_get_xcvr_type(rte_le_to_cpu_32
1937 					       (resp->xcvr_identifier_type_tx_lpi_timer)),
1938 			    fec_mode[((resp->active_fec_signal_mode &
1939 				       HWRM_PORT_PHY_QCFG_OUTPUT_ACTIVE_FEC_MASK) >> 4) %
1940 			    MAX_FEC_MODE], link_info->active_lanes);
1941 		return rc;
1942 	}
1943 
1944 	PMD_DRV_LOG_LINE(DEBUG, "Link Speed:%d,Auto:%d:%x:%x,Support:%x,Force:%x",
1945 		    link_info->link_speed, link_info->auto_mode,
1946 		    link_info->auto_link_speed, link_info->auto_link_speed_mask,
1947 		    link_info->support_speeds, link_info->force_link_speed);
1948 	PMD_DRV_LOG_LINE(DEBUG, "Link Signal:%d,PAM::Auto:%x,Support:%x,Force:%x",
1949 		    link_info->link_signal_mode,
1950 		    link_info->auto_pam4_link_speed_mask,
1951 		    link_info->support_pam4_speeds,
1952 		    link_info->force_pam4_link_speed);
1953 	return rc;
1954 }
1955 
1956 int bnxt_hwrm_port_phy_qcaps(struct bnxt *bp)
1957 {
1958 	int rc = 0;
1959 	struct hwrm_port_phy_qcaps_input req = {0};
1960 	struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
1961 	struct bnxt_link_info *link_info = bp->link_info;
1962 
1963 	if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
1964 		return 0;
1965 
1966 	HWRM_PREP(&req, HWRM_PORT_PHY_QCAPS, BNXT_USE_CHIMP_MB);
1967 
1968 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1969 
1970 	HWRM_CHECK_RESULT_SILENT();
1971 
1972 	bp->port_cnt = resp->port_cnt;
1973 	if (resp->supported_speeds_auto_mode)
1974 		link_info->support_auto_speeds =
1975 			rte_le_to_cpu_16(resp->supported_speeds_auto_mode);
1976 	if (resp->supported_pam4_speeds_auto_mode)
1977 		link_info->support_pam4_auto_speeds =
1978 			rte_le_to_cpu_16(resp->supported_pam4_speeds_auto_mode);
1979 	/* P7 chips now report all speeds here */
1980 	if (resp->flags2 & HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS2_SPEEDS2_SUPPORTED)
1981 		link_info->support_speeds_v2 = true;
1982 	if (link_info->support_speeds_v2) {
1983 		link_info->supported_speeds2_force_mode =
1984 			rte_le_to_cpu_16(resp->supported_speeds2_force_mode);
1985 		link_info->supported_speeds2_auto_mode =
1986 			rte_le_to_cpu_16(resp->supported_speeds2_auto_mode);
1987 	}
1988 
1989 	HWRM_UNLOCK();
1990 
1991 	/* Older firmware does not have supported_auto_speeds, so assume
1992 	 * that all supported speeds can be autonegotiated.
1993 	 */
1994 	if (link_info->auto_link_speed_mask && !link_info->support_auto_speeds)
1995 		link_info->support_auto_speeds = link_info->support_speeds;
1996 
1997 	return 0;
1998 }
1999 
2000 static bool _bnxt_find_lossy_profile(struct bnxt *bp)
2001 {
2002 	int i = 0;
2003 
2004 	for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) {
2005 		if (bp->tx_cos_queue[i].profile ==
2006 		    HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
2007 			bp->tx_cosq_id[0] = bp->tx_cos_queue[i].id;
2008 			return true;
2009 		}
2010 	}
2011 	return false;
2012 }
2013 
2014 static bool _bnxt_find_lossy_nic_profile(struct bnxt *bp)
2015 {
2016 	int i = 0, j = 0;
2017 
2018 	for (i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
2019 		for (j = 0; j < BNXT_COS_QUEUE_COUNT; j++) {
2020 			if (bp->tx_cos_queue[i].profile ==
2021 			    HWRM_QUEUE_SERVICE_PROFILE_LOSSY &&
2022 			    bp->tx_cos_queue[j].profile_type ==
2023 			    HWRM_QUEUE_SERVICE_PROFILE_TYPE_NIC) {
2024 				bp->tx_cosq_id[0] = bp->tx_cos_queue[i].id;
2025 				return true;
2026 			}
2027 		}
2028 	}
2029 	return false;
2030 }
2031 
2032 static bool bnxt_find_lossy_profile(struct bnxt *bp, bool use_prof_type)
2033 {
2034 	int i;
2035 
2036 	for (i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
2037 		PMD_DRV_LOG_LINE(DEBUG, "profile %d, profile_id %d, type %d",
2038 			    bp->tx_cos_queue[i].profile,
2039 			    bp->tx_cos_queue[i].id,
2040 			    bp->tx_cos_queue[i].profile_type);
2041 	}
2042 
2043 	if (use_prof_type)
2044 		return _bnxt_find_lossy_nic_profile(bp);
2045 	else
2046 		return _bnxt_find_lossy_profile(bp);
2047 }
2048 
2049 static void bnxt_find_first_valid_profile(struct bnxt *bp)
2050 {
2051 	int i = 0;
2052 
2053 	for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) {
2054 		if (bp->tx_cos_queue[i].profile !=
2055 		    HWRM_QUEUE_SERVICE_PROFILE_UNKNOWN &&
2056 		    bp->tx_cos_queue[i].id !=
2057 		    HWRM_QUEUE_SERVICE_PROFILE_UNKNOWN) {
2058 			bp->tx_cosq_id[0] = bp->tx_cos_queue[i].id;
2059 			break;
2060 		}
2061 	}
2062 }
2063 
2064 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
2065 {
2066 	int rc = 0;
2067 	struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
2068 	struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
2069 	uint32_t dir = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
2070 	bool use_prof_type = false;
2071 	int i;
2072 
2073 get_rx_info:
2074 	HWRM_PREP(&req, HWRM_QUEUE_QPORTCFG, BNXT_USE_CHIMP_MB);
2075 
2076 	req.flags = rte_cpu_to_le_32(dir);
2077 	/* HWRM Version >= 1.9.1 only if COS Classification is not required. */
2078 	if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1 &&
2079 	    !(bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY))
2080 		req.drv_qmap_cap =
2081 			HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED;
2082 
2083 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2084 
2085 	HWRM_CHECK_RESULT();
2086 
2087 	if (resp->queue_cfg_info &
2088 	    HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_CFG_INFO_USE_PROFILE_TYPE)
2089 		use_prof_type = true;
2090 
2091 	if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX) {
2092 		GET_TX_QUEUE_INFO(0);
2093 		GET_TX_QUEUE_INFO(1);
2094 		GET_TX_QUEUE_INFO(2);
2095 		GET_TX_QUEUE_INFO(3);
2096 		GET_TX_QUEUE_INFO(4);
2097 		GET_TX_QUEUE_INFO(5);
2098 		GET_TX_QUEUE_INFO(6);
2099 		GET_TX_QUEUE_INFO(7);
2100 		if (use_prof_type) {
2101 			GET_TX_QUEUE_TYPE_INFO(0);
2102 			GET_TX_QUEUE_TYPE_INFO(1);
2103 			GET_TX_QUEUE_TYPE_INFO(2);
2104 			GET_TX_QUEUE_TYPE_INFO(3);
2105 			GET_TX_QUEUE_TYPE_INFO(4);
2106 			GET_TX_QUEUE_TYPE_INFO(5);
2107 			GET_TX_QUEUE_TYPE_INFO(6);
2108 			GET_TX_QUEUE_TYPE_INFO(7);
2109 		}
2110 	} else  {
2111 		GET_RX_QUEUE_INFO(0);
2112 		GET_RX_QUEUE_INFO(1);
2113 		GET_RX_QUEUE_INFO(2);
2114 		GET_RX_QUEUE_INFO(3);
2115 		GET_RX_QUEUE_INFO(4);
2116 		GET_RX_QUEUE_INFO(5);
2117 		GET_RX_QUEUE_INFO(6);
2118 		GET_RX_QUEUE_INFO(7);
2119 	}
2120 
2121 	HWRM_UNLOCK();
2122 
2123 	if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX)
2124 		goto done;
2125 
2126 	if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) {
2127 		bp->tx_cosq_id[0] = bp->tx_cos_queue[0].id;
2128 	} else {
2129 		int j;
2130 
2131 		/* iterate and find the COSq profile to use for Tx */
2132 		if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY) {
2133 			for (j = 0, i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
2134 				if (bp->tx_cos_queue[i].id != 0xff)
2135 					bp->tx_cosq_id[j++] =
2136 						bp->tx_cos_queue[i].id;
2137 			}
2138 		} else {
2139 			/* When CoS classification is disabled, for normal NIC
2140 			 * operations, ideally we should look to use LOSSY.
2141 			 * If not found, fallback to the first valid profile
2142 			 */
2143 			if (!bnxt_find_lossy_profile(bp, use_prof_type))
2144 				bnxt_find_first_valid_profile(bp);
2145 
2146 		}
2147 	}
2148 	PMD_DRV_LOG_LINE(DEBUG, "Tx COS Queue ID %d", bp->tx_cosq_id[0]);
2149 
2150 	bp->max_tc = resp->max_configurable_queues;
2151 	bp->max_lltc = resp->max_configurable_lossless_queues;
2152 	if (bp->max_tc > BNXT_MAX_QUEUE)
2153 		bp->max_tc = BNXT_MAX_QUEUE;
2154 	bp->max_q = bp->max_tc;
2155 
2156 	if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX) {
2157 		dir = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX;
2158 		goto get_rx_info;
2159 	}
2160 
2161 done:
2162 	return rc;
2163 }
2164 
2165 static const uint8_t
2166 mpc_chnl_types[] = {HWRM_RING_ALLOC_INPUT_MPC_CHNLS_TYPE_TCE,
2167 		    HWRM_RING_ALLOC_INPUT_MPC_CHNLS_TYPE_RCE,
2168 		    HWRM_RING_ALLOC_INPUT_MPC_CHNLS_TYPE_TE_CFA,
2169 		    HWRM_RING_ALLOC_INPUT_MPC_CHNLS_TYPE_RE_CFA,
2170 		    HWRM_RING_ALLOC_INPUT_MPC_CHNLS_TYPE_PRIMATE};
2171 
2172 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
2173 			 struct bnxt_ring *ring,
2174 			 uint32_t ring_type, uint32_t map_index,
2175 			 uint32_t stats_ctx_id, uint32_t cmpl_ring_id,
2176 			 uint16_t tx_cosq_id)
2177 {
2178 	int rc = 0;
2179 	uint32_t enables = 0;
2180 	struct hwrm_ring_alloc_input req = {.req_type = 0 };
2181 	struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2182 	struct rte_mempool *mb_pool;
2183 	uint16_t rx_buf_size;
2184 
2185 	HWRM_PREP(&req, HWRM_RING_ALLOC, BNXT_USE_CHIMP_MB);
2186 
2187 	req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
2188 	req.fbo = rte_cpu_to_le_32(0);
2189 	/* Association of ring index with doorbell index */
2190 	req.logical_id = rte_cpu_to_le_16(map_index);
2191 	req.length = rte_cpu_to_le_32(ring->ring_size);
2192 
2193 	switch (ring_type) {
2194 	case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
2195 		req.ring_type = ring_type;
2196 		req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
2197 		req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
2198 		if (bp->fw_cap & BNXT_FW_CAP_TX_COAL_CMPL)
2199 			req.cmpl_coal_cnt =
2200 				HWRM_RING_ALLOC_INPUT_CMPL_COAL_CNT_COAL_OFF;
2201 		if (tx_cosq_id != MPC_HW_COS_ID) {
2202 			req.queue_id = rte_cpu_to_le_16(tx_cosq_id);
2203 		} else {
2204 			uint32_t mpc_chnl = BNXT_MPC_CHNL(map_index);
2205 
2206 			req.logical_id =
2207 				rte_cpu_to_le_16(BNXT_MPC_QIDX(map_index));
2208 			if (mpc_chnl >= BNXT_MPC_CHNL_MAX)
2209 				return -EINVAL;
2210 			enables |= HWRM_RING_ALLOC_INPUT_ENABLES_MPC_CHNLS_TYPE;
2211 			req.mpc_chnls_type = mpc_chnl_types[mpc_chnl];
2212 		}
2213 		if (stats_ctx_id != INVALID_STATS_CTX_ID)
2214 			enables |=
2215 			HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
2216 		break;
2217 	case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
2218 		req.ring_type = ring_type;
2219 		req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
2220 		req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
2221 		if (BNXT_CHIP_P5_P7(bp)) {
2222 			mb_pool = bp->rx_queues[0]->mb_pool;
2223 			rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
2224 				      RTE_PKTMBUF_HEADROOM;
2225 			rx_buf_size = RTE_MIN(BNXT_MAX_PKT_LEN, rx_buf_size);
2226 			req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);
2227 			enables |=
2228 				HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID;
2229 		}
2230 		if (stats_ctx_id != INVALID_STATS_CTX_ID)
2231 			enables |=
2232 				HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
2233 		if (bp->fw_cap & BNXT_FW_CAP_RX_RATE_PROFILE) {
2234 			req.rx_rate_profile_sel =
2235 				HWRM_RING_ALLOC_INPUT_RX_RATE_PROFILE_SEL_POLL_MODE;
2236 			enables |= HWRM_RING_ALLOC_INPUT_ENABLES_RX_RATE_PROFILE_VALID;
2237 		}
2238 		break;
2239 	case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
2240 		req.ring_type = ring_type;
2241 		if (BNXT_HAS_NQ(bp)) {
2242 			/* Association of cp ring with nq */
2243 			req.nq_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
2244 			enables |=
2245 				HWRM_RING_ALLOC_INPUT_ENABLES_NQ_RING_ID_VALID;
2246 		}
2247 		req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
2248 		break;
2249 	case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
2250 		req.ring_type = ring_type;
2251 		req.page_size = BNXT_PAGE_SHFT;
2252 		req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
2253 		break;
2254 	case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
2255 		req.ring_type = ring_type;
2256 		req.rx_ring_id = rte_cpu_to_le_16(ring->fw_rx_ring_id);
2257 
2258 		mb_pool = bp->rx_queues[0]->mb_pool;
2259 		rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
2260 			      RTE_PKTMBUF_HEADROOM;
2261 		rx_buf_size = RTE_MIN(BNXT_MAX_PKT_LEN, rx_buf_size);
2262 		req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);
2263 
2264 		req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
2265 		enables |= HWRM_RING_ALLOC_INPUT_ENABLES_RX_RING_ID_VALID |
2266 			   HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID |
2267 			   HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
2268 		if (bp->fw_cap & BNXT_FW_CAP_RX_RATE_PROFILE) {
2269 			req.rx_rate_profile_sel =
2270 				HWRM_RING_ALLOC_INPUT_RX_RATE_PROFILE_SEL_POLL_MODE;
2271 			enables |= HWRM_RING_ALLOC_INPUT_ENABLES_RX_RATE_PROFILE_VALID;
2272 		}
2273 		break;
2274 	default:
2275 		PMD_DRV_LOG_LINE(ERR, "hwrm alloc invalid ring type %d",
2276 			ring_type);
2277 		HWRM_UNLOCK();
2278 		return -EINVAL;
2279 	}
2280 	req.enables = rte_cpu_to_le_32(enables);
2281 
2282 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2283 
2284 	if (rc || resp->error_code) {
2285 		if (rc == 0 && resp->error_code)
2286 			rc = rte_le_to_cpu_16(resp->error_code);
2287 		switch (ring_type) {
2288 		case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
2289 			PMD_DRV_LOG_LINE(ERR,
2290 				"hwrm_ring_alloc cp failed. rc:%d", rc);
2291 			HWRM_UNLOCK();
2292 			return rc;
2293 		case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
2294 			PMD_DRV_LOG_LINE(ERR,
2295 				    "hwrm_ring_alloc rx failed. rc:%d", rc);
2296 			HWRM_UNLOCK();
2297 			return rc;
2298 		case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
2299 			PMD_DRV_LOG_LINE(ERR,
2300 				    "hwrm_ring_alloc rx agg failed. rc:%d",
2301 				    rc);
2302 			HWRM_UNLOCK();
2303 			return rc;
2304 		case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
2305 			PMD_DRV_LOG_LINE(ERR,
2306 				    "hwrm_ring_alloc tx failed. rc:%d", rc);
2307 			HWRM_UNLOCK();
2308 			return rc;
2309 		case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
2310 			PMD_DRV_LOG_LINE(ERR,
2311 				    "hwrm_ring_alloc nq failed. rc:%d", rc);
2312 			HWRM_UNLOCK();
2313 			return rc;
2314 		default:
2315 			PMD_DRV_LOG_LINE(ERR, "Invalid ring. rc:%d", rc);
2316 			HWRM_UNLOCK();
2317 			return rc;
2318 		}
2319 	}
2320 
2321 	ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
2322 	HWRM_UNLOCK();
2323 	return rc;
2324 }
2325 
2326 int bnxt_hwrm_ring_free(struct bnxt *bp,
2327 			struct bnxt_ring *ring, uint32_t ring_type,
2328 			uint16_t cp_ring_id)
2329 {
2330 	int rc;
2331 	struct hwrm_ring_free_input req = {.req_type = 0 };
2332 	struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
2333 
2334 	if (ring->fw_ring_id == INVALID_HW_RING_ID)
2335 		return -EINVAL;
2336 
2337 	HWRM_PREP(&req, HWRM_RING_FREE, BNXT_USE_CHIMP_MB);
2338 
2339 	req.ring_type = ring_type;
2340 	req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
2341 	req.cmpl_ring = rte_cpu_to_le_16(cp_ring_id);
2342 
2343 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2344 	ring->fw_ring_id = INVALID_HW_RING_ID;
2345 
2346 	if (rc || resp->error_code) {
2347 		if (rc == 0 && resp->error_code)
2348 			rc = rte_le_to_cpu_16(resp->error_code);
2349 		HWRM_UNLOCK();
2350 
2351 		switch (ring_type) {
2352 		case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
2353 			PMD_DRV_LOG_LINE(ERR, "hwrm_ring_free cp failed. rc:%d",
2354 				rc);
2355 			return rc;
2356 		case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
2357 			PMD_DRV_LOG_LINE(ERR, "hwrm_ring_free rx failed. rc:%d",
2358 				rc);
2359 			return rc;
2360 		case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
2361 			PMD_DRV_LOG_LINE(ERR, "hwrm_ring_free tx failed. rc:%d",
2362 				rc);
2363 			return rc;
2364 		case HWRM_RING_FREE_INPUT_RING_TYPE_NQ:
2365 			PMD_DRV_LOG_LINE(ERR,
2366 				    "hwrm_ring_free nq failed. rc:%d", rc);
2367 			return rc;
2368 		case HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG:
2369 			PMD_DRV_LOG_LINE(ERR,
2370 				    "hwrm_ring_free agg failed. rc:%d", rc);
2371 			return rc;
2372 		default:
2373 			PMD_DRV_LOG_LINE(ERR, "Invalid ring, rc:%d", rc);
2374 			return rc;
2375 		}
2376 	}
2377 	HWRM_UNLOCK();
2378 	return 0;
2379 }
2380 
2381 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
2382 {
2383 	int rc = 0;
2384 	struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
2385 	struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2386 
2387 	/* Don't attempt to re-create the ring group if it is already created */
2388 	if (bp->grp_info[idx].fw_grp_id != INVALID_HW_RING_ID)
2389 		return 0;
2390 
2391 	HWRM_PREP(&req, HWRM_RING_GRP_ALLOC, BNXT_USE_CHIMP_MB);
2392 
2393 	req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
2394 	req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
2395 	req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
2396 	req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
2397 
2398 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2399 
2400 	HWRM_CHECK_RESULT();
2401 
2402 	bp->grp_info[idx].fw_grp_id = rte_le_to_cpu_16(resp->ring_group_id);
2403 
2404 	HWRM_UNLOCK();
2405 
2406 	return rc;
2407 }
2408 
2409 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
2410 {
2411 	int rc;
2412 	struct hwrm_ring_grp_free_input req = {.req_type = 0 };
2413 	struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
2414 
2415 	if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
2416 		return 0;
2417 
2418 	HWRM_PREP(&req, HWRM_RING_GRP_FREE, BNXT_USE_CHIMP_MB);
2419 
2420 	req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
2421 
2422 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2423 
2424 	HWRM_CHECK_RESULT();
2425 	HWRM_UNLOCK();
2426 
2427 	bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
2428 	return rc;
2429 }
2430 
2431 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2432 {
2433 	int rc = 0;
2434 	struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
2435 	struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
2436 
2437 	if (cpr->hw_stats_ctx_id == HWRM_NA_SIGNATURE)
2438 		return rc;
2439 
2440 	HWRM_PREP(&req, HWRM_STAT_CTX_CLR_STATS, BNXT_USE_CHIMP_MB);
2441 
2442 	req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
2443 
2444 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2445 
2446 	HWRM_CHECK_RESULT();
2447 	HWRM_UNLOCK();
2448 
2449 	return rc;
2450 }
2451 
2452 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2453 {
2454 	int rc;
2455 	struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
2456 	struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2457 
2458 	if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE)
2459 		return 0;
2460 
2461 	HWRM_PREP(&req, HWRM_STAT_CTX_ALLOC, BNXT_USE_CHIMP_MB);
2462 
2463 	req.stats_dma_length = rte_cpu_to_le_16(BNXT_HWRM_CTX_GET_SIZE(bp));
2464 
2465 	req.update_period_ms = rte_cpu_to_le_32(0);
2466 
2467 	req.stats_dma_addr = rte_cpu_to_le_64(cpr->hw_stats_map);
2468 
2469 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2470 
2471 	HWRM_CHECK_RESULT();
2472 
2473 	cpr->hw_stats_ctx_id = rte_le_to_cpu_32(resp->stat_ctx_id);
2474 
2475 	HWRM_UNLOCK();
2476 
2477 	return rc;
2478 }
2479 
2480 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2481 {
2482 	int rc;
2483 	struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
2484 	struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
2485 
2486 	if (cpr->hw_stats_ctx_id == HWRM_NA_SIGNATURE)
2487 		return 0;
2488 
2489 	HWRM_PREP(&req, HWRM_STAT_CTX_FREE, BNXT_USE_CHIMP_MB);
2490 
2491 	req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
2492 
2493 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2494 
2495 	HWRM_CHECK_RESULT();
2496 	HWRM_UNLOCK();
2497 
2498 	cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
2499 
2500 	return rc;
2501 }
2502 
2503 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2504 {
2505 	int rc = 0;
2506 	struct hwrm_vnic_alloc_input req = { 0 };
2507 	struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2508 
2509 	vnic->mru = BNXT_VNIC_MRU(bp->eth_dev->data->mtu);
2510 	HWRM_PREP(&req, HWRM_VNIC_ALLOC, BNXT_USE_CHIMP_MB);
2511 
2512 	if (vnic->func_default)
2513 		req.flags =
2514 			rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
2515 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2516 
2517 	HWRM_CHECK_RESULT();
2518 
2519 	vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
2520 	HWRM_UNLOCK();
2521 	PMD_DRV_LOG_LINE(DEBUG, "VNIC ID %x", vnic->fw_vnic_id);
2522 	return rc;
2523 }
2524 
2525 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
2526 					struct bnxt_vnic_info *vnic,
2527 					struct bnxt_plcmodes_cfg *pmode)
2528 {
2529 	int rc = 0;
2530 	struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
2531 	struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2532 
2533 	HWRM_PREP(&req, HWRM_VNIC_PLCMODES_QCFG, BNXT_USE_CHIMP_MB);
2534 
2535 	req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2536 
2537 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2538 
2539 	HWRM_CHECK_RESULT();
2540 
2541 	pmode->flags = rte_le_to_cpu_32(resp->flags);
2542 	/* dflt_vnic bit doesn't exist in the _cfg command */
2543 	pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
2544 	pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
2545 	pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
2546 	pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
2547 
2548 	HWRM_UNLOCK();
2549 
2550 	return rc;
2551 }
2552 
2553 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
2554 				       struct bnxt_vnic_info *vnic,
2555 				       struct bnxt_plcmodes_cfg *pmode)
2556 {
2557 	int rc = 0;
2558 	struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
2559 	struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2560 
2561 	if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2562 		PMD_DRV_LOG_LINE(DEBUG, "VNIC ID %x", vnic->fw_vnic_id);
2563 		return rc;
2564 	}
2565 
2566 	HWRM_PREP(&req, HWRM_VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
2567 
2568 	req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2569 	req.flags = rte_cpu_to_le_32(pmode->flags);
2570 	req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
2571 	req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
2572 	req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
2573 	req.enables = rte_cpu_to_le_32(
2574 	    HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
2575 	    HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
2576 	    HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
2577 	);
2578 
2579 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2580 
2581 	HWRM_CHECK_RESULT();
2582 	HWRM_UNLOCK();
2583 
2584 	return rc;
2585 }
2586 
2587 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2588 {
2589 	int rc = 0;
2590 	struct hwrm_vnic_cfg_input req = {.req_type = 0 };
2591 	struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2592 	struct bnxt_plcmodes_cfg pmodes = { 0 };
2593 	uint32_t ctx_enable_flag = 0;
2594 	uint32_t enables = 0;
2595 
2596 	if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2597 		PMD_DRV_LOG_LINE(DEBUG, "VNIC ID %x", vnic->fw_vnic_id);
2598 		return rc;
2599 	}
2600 
2601 	rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
2602 	if (rc)
2603 		return rc;
2604 
2605 	HWRM_PREP(&req, HWRM_VNIC_CFG, BNXT_USE_CHIMP_MB);
2606 
2607 	if (BNXT_CHIP_P5_P7(bp)) {
2608 		int dflt_rxq = vnic->start_grp_id;
2609 		struct bnxt_rx_ring_info *rxr;
2610 		struct bnxt_cp_ring_info *cpr;
2611 		struct bnxt_rx_queue *rxq;
2612 		int i;
2613 
2614 		/*
2615 		 * The first active receive ring is used as the VNIC
2616 		 * default receive ring. If there are no active receive
2617 		 * rings (all corresponding receive queues are stopped),
2618 		 * the first receive ring is used.
2619 		 */
2620 		for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++) {
2621 			rxq = bp->eth_dev->data->rx_queues[i];
2622 			if (rxq->rx_started &&
2623 			    bnxt_vnic_queue_id_is_valid(vnic, i)) {
2624 				dflt_rxq = i;
2625 				break;
2626 			}
2627 		}
2628 
2629 		rxq = bp->eth_dev->data->rx_queues[dflt_rxq];
2630 		rxr = rxq->rx_ring;
2631 		cpr = rxq->cp_ring;
2632 
2633 		req.default_rx_ring_id =
2634 			rte_cpu_to_le_16(rxr->rx_ring_struct->fw_ring_id);
2635 		req.default_cmpl_ring_id =
2636 			rte_cpu_to_le_16(cpr->cp_ring_struct->fw_ring_id);
2637 		enables = HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_RX_RING_ID |
2638 			  HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_CMPL_RING_ID;
2639 		if (bp->vnic_cap_flags & BNXT_VNIC_CAP_RX_CMPL_V2) {
2640 			enables |= HWRM_VNIC_CFG_INPUT_ENABLES_RX_CSUM_V2_MODE;
2641 			req.rx_csum_v2_mode =
2642 				HWRM_VNIC_CFG_INPUT_RX_CSUM_V2_MODE_ALL_OK;
2643 		}
2644 		goto config_mru;
2645 	}
2646 
2647 	/* Only RSS support for now TBD: COS & LB */
2648 	enables = HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP;
2649 	if (vnic->lb_rule != 0xffff)
2650 		ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
2651 	if (vnic->cos_rule != 0xffff)
2652 		ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
2653 	if (vnic->rss_rule != (uint16_t)HWRM_NA_SIGNATURE) {
2654 		ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
2655 		ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
2656 	}
2657 	if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY) {
2658 		ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_QUEUE_ID;
2659 		req.queue_id = rte_cpu_to_le_16(vnic->cos_queue_id);
2660 	}
2661 
2662 	enables |= ctx_enable_flag;
2663 	req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
2664 	req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
2665 	req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
2666 	req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
2667 
2668 config_mru:
2669 	if (bnxt_compressed_rx_cqe_mode_enabled(bp)) {
2670 		req.l2_cqe_mode = HWRM_VNIC_CFG_INPUT_L2_CQE_MODE_COMPRESSED;
2671 		enables |= HWRM_VNIC_CFG_INPUT_ENABLES_L2_CQE_MODE;
2672 		PMD_DRV_LOG_LINE(DEBUG, "Enabling compressed Rx CQE");
2673 	}
2674 
2675 	req.enables = rte_cpu_to_le_32(enables);
2676 	req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2677 	req.mru = rte_cpu_to_le_16(vnic->mru);
2678 	/* Configure default VNIC only once. */
2679 	if (vnic->func_default && !(bp->flags & BNXT_FLAG_DFLT_VNIC_SET)) {
2680 		req.flags |=
2681 		    rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
2682 		bp->flags |= BNXT_FLAG_DFLT_VNIC_SET;
2683 	}
2684 	if (vnic->vlan_strip)
2685 		req.flags |=
2686 		    rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
2687 	if (vnic->bd_stall)
2688 		req.flags |=
2689 		    rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
2690 	if (vnic->rss_dflt_cr)
2691 		req.flags |= rte_cpu_to_le_32(
2692 			HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
2693 
2694 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2695 
2696 	HWRM_CHECK_RESULT();
2697 	HWRM_UNLOCK();
2698 
2699 	rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
2700 
2701 	return rc;
2702 }
2703 
2704 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
2705 		int16_t fw_vf_id)
2706 {
2707 	int rc = 0;
2708 	struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
2709 	struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2710 
2711 	if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2712 		PMD_DRV_LOG_LINE(DEBUG, "VNIC QCFG ID %d", vnic->fw_vnic_id);
2713 		return rc;
2714 	}
2715 	HWRM_PREP(&req, HWRM_VNIC_QCFG, BNXT_USE_CHIMP_MB);
2716 
2717 	req.enables =
2718 		rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
2719 	req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2720 	req.vf_id = rte_cpu_to_le_16(fw_vf_id);
2721 
2722 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2723 
2724 	HWRM_CHECK_RESULT();
2725 
2726 	vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
2727 	vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
2728 	vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
2729 	vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
2730 	vnic->mru = rte_le_to_cpu_16(resp->mru);
2731 	vnic->func_default = rte_le_to_cpu_32(
2732 			resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
2733 	vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
2734 			HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
2735 	vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
2736 			HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
2737 	vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
2738 			HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
2739 
2740 	HWRM_UNLOCK();
2741 
2742 	return rc;
2743 }
2744 
2745 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp,
2746 			     struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
2747 {
2748 	int rc = 0;
2749 	uint16_t ctx_id;
2750 	struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
2751 	struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
2752 						bp->hwrm_cmd_resp_addr;
2753 
2754 	HWRM_PREP(&req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, BNXT_USE_CHIMP_MB);
2755 
2756 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2757 	HWRM_CHECK_RESULT();
2758 
2759 	ctx_id = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
2760 	if (!BNXT_HAS_RING_GRPS(bp))
2761 		vnic->fw_grp_ids[ctx_idx] = ctx_id;
2762 	else if (ctx_idx == 0)
2763 		vnic->rss_rule = ctx_id;
2764 
2765 	HWRM_UNLOCK();
2766 
2767 	return rc;
2768 }
2769 
2770 static
2771 int _bnxt_hwrm_vnic_ctx_free(struct bnxt *bp,
2772 			     struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
2773 {
2774 	int rc = 0;
2775 	struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
2776 	struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
2777 						bp->hwrm_cmd_resp_addr;
2778 
2779 	if (ctx_idx == (uint16_t)HWRM_NA_SIGNATURE) {
2780 		PMD_DRV_LOG_LINE(DEBUG, "VNIC RSS Rule %x", vnic->rss_rule);
2781 		return rc;
2782 	}
2783 	HWRM_PREP(&req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, BNXT_USE_CHIMP_MB);
2784 
2785 	req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(ctx_idx);
2786 
2787 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2788 
2789 	HWRM_CHECK_RESULT();
2790 	HWRM_UNLOCK();
2791 
2792 	return rc;
2793 }
2794 
2795 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2796 {
2797 	int rc = 0;
2798 
2799 	if (BNXT_CHIP_P5_P7(bp)) {
2800 		int j;
2801 
2802 		for (j = 0; j < vnic->num_lb_ctxts; j++) {
2803 			rc = _bnxt_hwrm_vnic_ctx_free(bp,
2804 						      vnic,
2805 						      vnic->fw_grp_ids[j]);
2806 			vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
2807 		}
2808 		vnic->num_lb_ctxts = 0;
2809 	} else {
2810 		rc = _bnxt_hwrm_vnic_ctx_free(bp, vnic, vnic->rss_rule);
2811 		vnic->rss_rule = INVALID_HW_RING_ID;
2812 	}
2813 
2814 	return rc;
2815 }
2816 
2817 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2818 {
2819 	int rc = 0;
2820 	struct hwrm_vnic_free_input req = {.req_type = 0 };
2821 	struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
2822 
2823 	if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2824 		PMD_DRV_LOG_LINE(DEBUG, "VNIC FREE ID %x", vnic->fw_vnic_id);
2825 		return rc;
2826 	}
2827 
2828 	HWRM_PREP(&req, HWRM_VNIC_FREE, BNXT_USE_CHIMP_MB);
2829 
2830 	req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2831 
2832 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2833 
2834 	HWRM_CHECK_RESULT();
2835 	HWRM_UNLOCK();
2836 
2837 	vnic->fw_vnic_id = INVALID_HW_RING_ID;
2838 	/* Configure default VNIC again if necessary. */
2839 	if (vnic->func_default && (bp->flags & BNXT_FLAG_DFLT_VNIC_SET))
2840 		bp->flags &= ~BNXT_FLAG_DFLT_VNIC_SET;
2841 
2842 	return rc;
2843 }
2844 
2845 static uint32_t bnxt_sanitize_rss_type(struct bnxt *bp, uint32_t types)
2846 {
2847 	uint32_t hwrm_type = types;
2848 
2849 	if (types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6_FLOW_LABEL &&
2850 	    !(bp->vnic_cap_flags & BNXT_VNIC_CAP_IPV6_FLOW_LABEL_MODE))
2851 		hwrm_type &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6_FLOW_LABEL;
2852 
2853 	if (types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_ESP_SPI_IPV4 &&
2854 	    !(bp->vnic_cap_flags & BNXT_VNIC_CAP_ESP_SPI4_CAP))
2855 		hwrm_type &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_ESP_SPI_IPV4;
2856 	if (types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_ESP_SPI_IPV6 &&
2857 	    !(bp->vnic_cap_flags & BNXT_VNIC_CAP_ESP_SPI6_CAP))
2858 		hwrm_type &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_ESP_SPI_IPV6;
2859 
2860 	if (types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_AH_SPI_IPV4 &&
2861 	    !(bp->vnic_cap_flags & BNXT_VNIC_CAP_AH_SPI4_CAP))
2862 		hwrm_type &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_AH_SPI_IPV4;
2863 
2864 	if (types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_AH_SPI_IPV6 &&
2865 	    !(bp->vnic_cap_flags & BNXT_VNIC_CAP_AH_SPI6_CAP))
2866 		hwrm_type &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_AH_SPI_IPV6;
2867 
2868 	return hwrm_type;
2869 }
2870 
2871 #ifdef RTE_LIBRTE_BNXT_TRUFLOW_DEBUG
2872 static int
2873 bnxt_hwrm_vnic_rss_qcfg_p5(struct bnxt *bp)
2874 {
2875 	struct hwrm_vnic_rss_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2876 	struct hwrm_vnic_rss_qcfg_input req = {0};
2877 	int rc;
2878 
2879 	HWRM_PREP(&req, HWRM_VNIC_RSS_QCFG, BNXT_USE_CHIMP_MB);
2880 	/* vnic_id and rss_ctx_idx must be set to INVALID to read the
2881 	 * global hash mode.
2882 	 */
2883 	req.vnic_id = rte_cpu_to_le_16(BNXT_DFLT_VNIC_ID_INVALID);
2884 	req.rss_ctx_idx = rte_cpu_to_le_16(BNXT_RSS_CTX_IDX_INVALID);
2885 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
2886 				    BNXT_USE_CHIMP_MB);
2887 	HWRM_CHECK_RESULT();
2888 	HWRM_UNLOCK();
2889 	PMD_DRV_LOG_LINE(DEBUG, "RSS QCFG: Hash level %d", resp->hash_mode_flags);
2890 
2891 	return rc;
2892 }
2893 #endif
2894 
2895 static int
2896 bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2897 {
2898 	int i;
2899 	int rc = 0;
2900 	int nr_ctxs = vnic->num_lb_ctxts;
2901 	struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
2902 	struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2903 
2904 	for (i = 0; i < nr_ctxs; i++) {
2905 		HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
2906 
2907 		req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2908 		req.hash_type = rte_cpu_to_le_32(bnxt_sanitize_rss_type(bp, vnic->hash_type));
2909 		/* Update req with vnic ring_select_mode for P7 */
2910 		if (BNXT_CHIP_P7(bp))
2911 			req.ring_select_mode = vnic->ring_select_mode;
2912 		/* When the vnic_id in the request field is a valid
2913 		 * one, the hash_mode_flags in the request field must
2914 		 * be set to DEFAULT. And any request to change the
2915 		 * default behavior must be done in a separate call
2916 		 * to HWRM_VNIC_RSS_CFG by exclusively setting hash
2917 		 * mode and vnic_id, rss_ctx_idx to INVALID.
2918 		 */
2919 		req.hash_mode_flags = BNXT_HASH_MODE_DEFAULT;
2920 
2921 		req.hash_key_tbl_addr =
2922 			rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
2923 
2924 		req.ring_grp_tbl_addr =
2925 			rte_cpu_to_le_64(vnic->rss_table_dma_addr +
2926 					 i * BNXT_RSS_ENTRIES_PER_CTX_P5 *
2927 					 2 * sizeof(uint16_t));
2928 		req.ring_table_pair_index = i;
2929 		req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
2930 
2931 		rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
2932 					    BNXT_USE_CHIMP_MB);
2933 
2934 		HWRM_CHECK_RESULT();
2935 		HWRM_UNLOCK();
2936 		PMD_DRV_LOG_LINE(DEBUG, "RSS CFG: Hash level %d", req.hash_mode_flags);
2937 	}
2938 
2939 	return rc;
2940 }
2941 
2942 static int
2943 bnxt_hwrm_vnic_rss_cfg_hash_mode_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2944 {
2945 	struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2946 	struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
2947 	int rc = 0;
2948 
2949 	/* The reason we are returning success here is that this
2950 	 * call is in the context of user/stack RSS configuration.
2951 	 * Even though OUTER RSS is not supported, the normal RSS
2952 	 * configuration should continue to work.
2953 	 */
2954 	if ((BNXT_CHIP_P5(bp) && BNXT_VNIC_OUTER_RSS_UNSUPPORTED(bp)) ||
2955 	    (!BNXT_CHIP_P5(bp) && !(bp->vnic_cap_flags & BNXT_VNIC_CAP_OUTER_RSS)))
2956 		return 0;
2957 
2958 	/* TODO Revisit for Thor 2 */
2959 	/* if (BNXT_CHIP_P5_P7(bp))
2960 	 *	bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic);
2961 	 */
2962 	/* Don't call RSS hash level configuration if the current
2963 	 * hash level is the same as the hash level that is requested.
2964 	 */
2965 	if (vnic->prev_hash_mode == vnic->hash_mode)
2966 		return 0;
2967 
2968 	HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
2969 
2970 	/* For FW, hash_mode == DEFAULT means that
2971 	 * the FW is capable of doing INNER & OUTER RSS as well.
2972 	 * DEFAULT doesn't mean that the FW is
2973 	 * going to change the hash_mode to INNER. However, for
2974 	 * the USER, DEFAULT means, change the hash mode to the
2975 	 * NIC's DEFAULT hash mode which is INNER.
2976 	 *
2977 	 * Hence, driver should make the translation of hash_mode
2978 	 * to INNERMOST when hash_mode from the dpdk stack is
2979 	 * DEFAULT.
2980 	 */
2981 	if (vnic->hash_mode == BNXT_HASH_MODE_DEFAULT)
2982 		req.hash_mode_flags = BNXT_HASH_MODE_INNERMOST;
2983 	else
2984 		req.hash_mode_flags = vnic->hash_mode;
2985 	req.vnic_id = rte_cpu_to_le_16(BNXT_DFLT_VNIC_ID_INVALID);
2986 	req.rss_ctx_idx = rte_cpu_to_le_16(BNXT_RSS_CTX_IDX_INVALID);
2987 
2988 	PMD_DRV_LOG_LINE(DEBUG, "RSS CFG: Hash level %d", req.hash_mode_flags);
2989 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
2990 				    BNXT_USE_CHIMP_MB);
2991 
2992 	HWRM_CHECK_RESULT();
2993 	/* Store the programmed hash_mode in prev_hash_mode so that
2994 	 * it can checked against the next user requested hash mode.
2995 	 */
2996 	if (!rc)
2997 		vnic->prev_hash_mode = vnic->hash_mode;
2998 	HWRM_UNLOCK();
2999 	return rc;
3000 }
3001 
3002 static int
3003 bnxt_hwrm_vnic_rss_cfg_non_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
3004 {
3005 	struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
3006 	struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3007 	int rc = 0;
3008 
3009 	if (vnic->num_lb_ctxts == 0)
3010 		return rc;
3011 
3012 	HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
3013 
3014 	req.hash_type = rte_cpu_to_le_32(bnxt_sanitize_rss_type(bp, vnic->hash_type));
3015 	req.hash_mode_flags = vnic->hash_mode;
3016 
3017 	req.ring_grp_tbl_addr =
3018 	    rte_cpu_to_le_64(vnic->rss_table_dma_addr);
3019 	req.hash_key_tbl_addr =
3020 	    rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
3021 	req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
3022 	req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
3023 
3024 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3025 
3026 	HWRM_CHECK_RESULT();
3027 	HWRM_UNLOCK();
3028 
3029 	return rc;
3030 }
3031 
3032 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
3033 			   struct bnxt_vnic_info *vnic)
3034 {
3035 	int rc = 0;
3036 
3037 	if (!vnic->rss_table)
3038 		return 0;
3039 
3040 	/* Handle all the non-thor skus rss here */
3041 	if (!BNXT_CHIP_P5_P7(bp))
3042 		return bnxt_hwrm_vnic_rss_cfg_non_p5(bp, vnic);
3043 
3044 	/* Handle Thor2 and Thor skus rss here */
3045 	rc = bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic);
3046 
3047 	/* configure hash mode for Thor/Thor2 */
3048 	if (!rc)
3049 		return bnxt_hwrm_vnic_rss_cfg_hash_mode_p5(bp, vnic);
3050 
3051 	return rc;
3052 }
3053 
3054 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
3055 			struct bnxt_vnic_info *vnic)
3056 {
3057 	struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3058 	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
3059 	struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
3060 	uint64_t rx_offloads = dev_conf->rxmode.offloads;
3061 	uint8_t rs = !!(rx_offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT);
3062 	uint32_t flags, enables;
3063 	uint16_t size;
3064 	int rc = 0;
3065 
3066 	if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
3067 		PMD_DRV_LOG_LINE(DEBUG, "VNIC ID %x", vnic->fw_vnic_id);
3068 		return rc;
3069 	}
3070 
3071 	HWRM_PREP(&req, HWRM_VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
3072 	flags = HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT;
3073 	enables = HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID;
3074 
3075 	size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
3076 	size -= RTE_PKTMBUF_HEADROOM;
3077 	size = RTE_MIN(BNXT_MAX_PKT_LEN, size);
3078 	req.jumbo_thresh = rte_cpu_to_le_16(size);
3079 
3080 	if (rs & vnic->hds_threshold) {
3081 		flags |=
3082 			HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_IPV4 |
3083 			HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_IPV6;
3084 		req.hds_threshold = rte_cpu_to_le_16(vnic->hds_threshold);
3085 		enables |=
3086 		HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID;
3087 	}
3088 
3089 	req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
3090 	req.flags = rte_cpu_to_le_32(flags);
3091 	req.enables = rte_cpu_to_le_32(enables);
3092 
3093 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3094 
3095 	HWRM_CHECK_RESULT();
3096 	HWRM_UNLOCK();
3097 
3098 	return rc;
3099 }
3100 
3101 #define BNXT_DFLT_TUNL_TPA_BMAP					\
3102 	(HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_GRE |	\
3103 	 HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_IPV4 |	\
3104 	 HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_IPV6)
3105 
3106 static void bnxt_vnic_update_tunl_tpa_bmap(struct bnxt *bp,
3107 					   struct hwrm_vnic_tpa_cfg_input *req)
3108 {
3109 	uint32_t tunl_tpa_bmap = BNXT_DFLT_TUNL_TPA_BMAP;
3110 
3111 	if (!(bp->vnic_cap_flags & BNXT_VNIC_CAP_VNIC_TUNNEL_TPA))
3112 		return;
3113 
3114 	if (bp->vxlan_port_cnt)
3115 		tunl_tpa_bmap |= HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_VXLAN |
3116 			HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_VXLAN_GPE;
3117 
3118 	if (bp->geneve_port_cnt)
3119 		tunl_tpa_bmap |= HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_GENEVE;
3120 
3121 	req->enables |= rte_cpu_to_le_32(HWRM_VNIC_TPA_CFG_INPUT_ENABLES_TNL_TPA_EN);
3122 	req->tnl_tpa_en_bitmap = rte_cpu_to_le_32(tunl_tpa_bmap);
3123 }
3124 
3125 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
3126 			struct bnxt_vnic_info *vnic, bool enable)
3127 {
3128 	int rc = 0;
3129 	struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
3130 	struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3131 
3132 	if (bnxt_compressed_rx_cqe_mode_enabled(bp)) {
3133 		/* Don't worry if disabling TPA */
3134 		if (!enable)
3135 			return 0;
3136 
3137 		/* Return an error if enabling TPA w/ compressed Rx CQE. */
3138 		PMD_DRV_LOG_LINE(ERR, "No HW support for LRO with compressed Rx");
3139 		return -ENOTSUP;
3140 	}
3141 
3142 	if ((BNXT_CHIP_P5(bp) || BNXT_CHIP_P7(bp)) && !bp->max_tpa_v2) {
3143 		if (enable)
3144 			PMD_DRV_LOG_LINE(ERR, "No HW support for LRO");
3145 		return -ENOTSUP;
3146 	}
3147 
3148 	if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
3149 		PMD_DRV_LOG_LINE(DEBUG, "Invalid vNIC ID");
3150 		return 0;
3151 	}
3152 
3153 	HWRM_PREP(&req, HWRM_VNIC_TPA_CFG, BNXT_USE_CHIMP_MB);
3154 
3155 	if (enable) {
3156 		req.enables = rte_cpu_to_le_32(
3157 				HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
3158 				HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
3159 				HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
3160 		req.flags = rte_cpu_to_le_32(
3161 				HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
3162 				HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
3163 				HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
3164 				HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
3165 				HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
3166 			HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
3167 		req.max_aggs = rte_cpu_to_le_16(BNXT_TPA_MAX_AGGS(bp));
3168 		req.max_agg_segs = rte_cpu_to_le_16(BNXT_TPA_MAX_SEGS(bp));
3169 		req.min_agg_len = rte_cpu_to_le_32(512);
3170 
3171 		if (BNXT_CHIP_P5_P7(bp))
3172 			req.max_aggs = rte_cpu_to_le_16(bp->max_tpa_v2);
3173 
3174 		/* For tpa v2 handle as per spec mss and log2 units */
3175 		if (BNXT_CHIP_P7(bp)) {
3176 			uint32_t nsegs, n, segs = 0;
3177 			uint16_t mss = bp->eth_dev->data->mtu - 40;
3178 			size_t page_size = rte_mem_page_size();
3179 			uint32_t max_mbuf_frags =
3180 				BNXT_TPA_MAX_PAGES / (rte_mem_page_size() + 1);
3181 
3182 			/* Calculate the number of segs based on mss */
3183 			if (mss <= page_size) {
3184 				n = page_size / mss;
3185 				nsegs = (max_mbuf_frags - 1) * n;
3186 			} else {
3187 				n = mss / page_size;
3188 				if (mss & (page_size - 1))
3189 					n++;
3190 				nsegs = (max_mbuf_frags - n) / n;
3191 			}
3192 			segs = rte_log2_u32(nsegs);
3193 			req.max_agg_segs = rte_cpu_to_le_16(segs);
3194 		}
3195 		bnxt_vnic_update_tunl_tpa_bmap(bp, &req);
3196 	}
3197 	req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
3198 
3199 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3200 
3201 	HWRM_CHECK_RESULT();
3202 	HWRM_UNLOCK();
3203 
3204 	return rc;
3205 }
3206 
3207 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
3208 {
3209 	struct hwrm_func_cfg_input req = {0};
3210 	struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3211 	int rc;
3212 
3213 	req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags);
3214 	req.enables = rte_cpu_to_le_32(
3215 			HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
3216 	memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
3217 	req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
3218 
3219 	HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3220 
3221 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3222 	HWRM_CHECK_RESULT();
3223 	HWRM_UNLOCK();
3224 
3225 	bp->pf->vf_info[vf].random_mac = false;
3226 
3227 	return rc;
3228 }
3229 
3230 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
3231 				  uint64_t *dropped)
3232 {
3233 	int rc = 0;
3234 	struct hwrm_func_qstats_input req = {.req_type = 0};
3235 	struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
3236 
3237 	HWRM_PREP(&req, HWRM_FUNC_QSTATS, BNXT_USE_CHIMP_MB);
3238 
3239 	req.fid = rte_cpu_to_le_16(fid);
3240 
3241 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3242 
3243 	HWRM_CHECK_RESULT();
3244 
3245 	if (dropped)
3246 		*dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
3247 
3248 	HWRM_UNLOCK();
3249 
3250 	return rc;
3251 }
3252 
3253 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
3254 			  struct rte_eth_stats *stats,
3255 			  struct hwrm_func_qstats_output *func_qstats)
3256 {
3257 	int rc = 0;
3258 	struct hwrm_func_qstats_input req = {.req_type = 0};
3259 	struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
3260 
3261 	HWRM_PREP(&req, HWRM_FUNC_QSTATS, BNXT_USE_CHIMP_MB);
3262 
3263 	req.fid = rte_cpu_to_le_16(fid);
3264 
3265 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3266 
3267 	HWRM_CHECK_RESULT();
3268 	if (func_qstats)
3269 		memcpy(func_qstats, resp,
3270 		       sizeof(struct hwrm_func_qstats_output));
3271 
3272 	if (!stats)
3273 		goto exit;
3274 
3275 	stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
3276 	stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
3277 	stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
3278 	stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
3279 	stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
3280 	stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
3281 
3282 	stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
3283 	stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
3284 	stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
3285 	stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
3286 	stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
3287 	stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
3288 
3289 	stats->imissed = rte_le_to_cpu_64(resp->rx_discard_pkts);
3290 	stats->ierrors = rte_le_to_cpu_64(resp->rx_drop_pkts);
3291 	stats->oerrors = rte_le_to_cpu_64(resp->tx_discard_pkts);
3292 
3293 exit:
3294 	HWRM_UNLOCK();
3295 
3296 	return rc;
3297 }
3298 
3299 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
3300 {
3301 	int rc = 0;
3302 	struct hwrm_func_clr_stats_input req = {.req_type = 0};
3303 	struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
3304 
3305 	HWRM_PREP(&req, HWRM_FUNC_CLR_STATS, BNXT_USE_CHIMP_MB);
3306 
3307 	req.fid = rte_cpu_to_le_16(fid);
3308 
3309 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3310 
3311 	HWRM_CHECK_RESULT();
3312 	HWRM_UNLOCK();
3313 
3314 	return rc;
3315 }
3316 
3317 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
3318 {
3319 	unsigned int i;
3320 	int rc = 0;
3321 
3322 	for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
3323 		struct bnxt_tx_queue *txq;
3324 		struct bnxt_rx_queue *rxq;
3325 		struct bnxt_cp_ring_info *cpr;
3326 
3327 		if (i >= bp->rx_cp_nr_rings) {
3328 			txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
3329 			cpr = txq->cp_ring;
3330 		} else {
3331 			rxq = bp->rx_queues[i];
3332 			cpr = rxq->cp_ring;
3333 		}
3334 
3335 		rc = bnxt_hwrm_stat_clear(bp, cpr);
3336 		if (rc)
3337 			return rc;
3338 	}
3339 	return 0;
3340 }
3341 
3342 static int
3343 bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
3344 {
3345 	int rc;
3346 	unsigned int i;
3347 	struct bnxt_cp_ring_info *cpr;
3348 
3349 	for (i = 0; i < bp->rx_cp_nr_rings; i++) {
3350 
3351 		cpr = bp->rx_queues[i]->cp_ring;
3352 		if (BNXT_HAS_RING_GRPS(bp))
3353 			bp->grp_info[i].fw_stats_ctx = -1;
3354 		if (cpr == NULL)
3355 			continue;
3356 		rc = bnxt_hwrm_stat_ctx_free(bp, cpr);
3357 		if (rc)
3358 			return rc;
3359 	}
3360 
3361 	for (i = 0; i < bp->tx_cp_nr_rings; i++) {
3362 		cpr = bp->tx_queues[i]->cp_ring;
3363 		if (cpr == NULL)
3364 			continue;
3365 		rc = bnxt_hwrm_stat_ctx_free(bp, cpr);
3366 		if (rc)
3367 			return rc;
3368 	}
3369 
3370 	return 0;
3371 }
3372 
3373 static int
3374 bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
3375 {
3376 	uint16_t idx;
3377 	uint32_t rc = 0;
3378 
3379 	if (!BNXT_HAS_RING_GRPS(bp))
3380 		return 0;
3381 
3382 	for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
3383 
3384 		if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
3385 			continue;
3386 
3387 		rc = bnxt_hwrm_ring_grp_free(bp, idx);
3388 
3389 		if (rc)
3390 			return rc;
3391 	}
3392 	return rc;
3393 }
3394 
3395 void bnxt_free_nq_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
3396 {
3397 	struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
3398 
3399 	bnxt_hwrm_ring_free(bp,
3400 			    cp_ring,
3401 			    HWRM_RING_FREE_INPUT_RING_TYPE_NQ,
3402 			    INVALID_HW_RING_ID);
3403 	memset(cpr->cp_desc_ring, 0,
3404 	       cpr->cp_ring_struct->ring_size * sizeof(*cpr->cp_desc_ring));
3405 	cpr->cp_raw_cons = 0;
3406 }
3407 
3408 void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
3409 {
3410 	struct bnxt_ring *cp_ring;
3411 
3412 	cp_ring = cpr ? cpr->cp_ring_struct : NULL;
3413 
3414 	if (cp_ring == NULL || cpr->cp_desc_ring == NULL)
3415 		return;
3416 
3417 	bnxt_hwrm_ring_free(bp,
3418 			    cp_ring,
3419 			    HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL,
3420 			    INVALID_HW_RING_ID);
3421 	memset(cpr->cp_desc_ring, 0,
3422 	       cpr->cp_ring_struct->ring_size * sizeof(*cpr->cp_desc_ring));
3423 	cpr->cp_raw_cons = 0;
3424 }
3425 
3426 void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
3427 {
3428 	struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
3429 	struct bnxt_rx_ring_info *rxr = rxq ? rxq->rx_ring : NULL;
3430 	struct bnxt_ring *ring = rxr ? rxr->rx_ring_struct : NULL;
3431 	struct bnxt_cp_ring_info *cpr = rxq ? rxq->cp_ring : NULL;
3432 
3433 	if (BNXT_HAS_RING_GRPS(bp))
3434 		bnxt_hwrm_ring_grp_free(bp, queue_index);
3435 
3436 	if (ring != NULL && cpr != NULL)
3437 		bnxt_hwrm_ring_free(bp, ring,
3438 				    HWRM_RING_FREE_INPUT_RING_TYPE_RX,
3439 				    cpr->cp_ring_struct->fw_ring_id);
3440 	if (BNXT_HAS_RING_GRPS(bp))
3441 		bp->grp_info[queue_index].rx_fw_ring_id = INVALID_HW_RING_ID;
3442 
3443 	/* Check agg ring struct explicitly.
3444 	 * bnxt_need_agg_ring() returns the current state of offload flags,
3445 	 * but we may have to deal with agg ring struct before the offload
3446 	 * flags are updated.
3447 	 */
3448 	if (!bnxt_need_agg_ring(bp->eth_dev) ||
3449 	    (rxr && rxr->ag_ring_struct == NULL))
3450 		goto no_agg;
3451 
3452 	ring = rxr ? rxr->ag_ring_struct : NULL;
3453 	if (ring != NULL && cpr != NULL) {
3454 		bnxt_hwrm_ring_free(bp, ring,
3455 				    BNXT_CHIP_P5_P7(bp) ?
3456 				    HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG :
3457 				    HWRM_RING_FREE_INPUT_RING_TYPE_RX,
3458 				    cpr->cp_ring_struct->fw_ring_id);
3459 	}
3460 	if (BNXT_HAS_RING_GRPS(bp))
3461 		bp->grp_info[queue_index].ag_fw_ring_id = INVALID_HW_RING_ID;
3462 
3463 no_agg:
3464 	if (cpr != NULL) {
3465 		bnxt_hwrm_stat_ctx_free(bp, cpr);
3466 		bnxt_free_cp_ring(bp, cpr);
3467 	}
3468 
3469 	if (BNXT_HAS_RING_GRPS(bp))
3470 		bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;
3471 }
3472 
3473 int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int queue_index)
3474 {
3475 	int rc;
3476 	struct hwrm_ring_reset_input req = {.req_type = 0 };
3477 	struct hwrm_ring_reset_output *resp = bp->hwrm_cmd_resp_addr;
3478 
3479 	HWRM_PREP(&req, HWRM_RING_RESET, BNXT_USE_CHIMP_MB);
3480 
3481 	req.ring_type = HWRM_RING_RESET_INPUT_RING_TYPE_RX_RING_GRP;
3482 	req.ring_id = rte_cpu_to_le_16(bp->grp_info[queue_index].fw_grp_id);
3483 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3484 
3485 	HWRM_CHECK_RESULT();
3486 
3487 	HWRM_UNLOCK();
3488 
3489 	return rc;
3490 }
3491 
3492 static int
3493 bnxt_free_all_hwrm_rings(struct bnxt *bp)
3494 {
3495 	unsigned int i;
3496 
3497 	for (i = 0; i < bp->tx_cp_nr_rings; i++)
3498 		bnxt_free_hwrm_tx_ring(bp, i);
3499 
3500 	for (i = 0; i < bp->rx_cp_nr_rings; i++)
3501 		bnxt_free_hwrm_rx_ring(bp, i);
3502 
3503 	return 0;
3504 }
3505 
3506 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
3507 {
3508 	uint16_t i;
3509 	uint32_t rc = 0;
3510 
3511 	if (!BNXT_HAS_RING_GRPS(bp))
3512 		return 0;
3513 
3514 	for (i = 0; i < bp->rx_cp_nr_rings; i++) {
3515 		rc = bnxt_hwrm_ring_grp_alloc(bp, i);
3516 		if (rc)
3517 			return rc;
3518 	}
3519 	return rc;
3520 }
3521 
3522 /*
3523  * HWRM utility functions
3524  */
3525 
3526 void bnxt_free_hwrm_resources(struct bnxt *bp)
3527 {
3528 	/* Release memzone */
3529 	rte_free(bp->hwrm_cmd_resp_addr);
3530 	rte_free(bp->hwrm_short_cmd_req_addr);
3531 	bp->hwrm_cmd_resp_addr = NULL;
3532 	bp->hwrm_short_cmd_req_addr = NULL;
3533 	bp->hwrm_cmd_resp_dma_addr = 0;
3534 	bp->hwrm_short_cmd_req_dma_addr = 0;
3535 }
3536 
3537 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
3538 {
3539 	struct rte_pci_device *pdev = bp->pdev;
3540 	char type[RTE_MEMZONE_NAMESIZE];
3541 
3542 	sprintf(type, "bnxt_hwrm_" PCI_PRI_FMT, pdev->addr.domain,
3543 		pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
3544 	bp->max_resp_len = BNXT_PAGE_SIZE;
3545 	bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
3546 	if (bp->hwrm_cmd_resp_addr == NULL)
3547 		return -ENOMEM;
3548 	bp->hwrm_cmd_resp_dma_addr =
3549 		rte_malloc_virt2iova(bp->hwrm_cmd_resp_addr);
3550 	if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
3551 		PMD_DRV_LOG_LINE(ERR,
3552 			"unable to map response address to physical memory");
3553 		return -ENOMEM;
3554 	}
3555 	rte_spinlock_init(&bp->hwrm_lock);
3556 
3557 	return 0;
3558 }
3559 
3560 int
3561 bnxt_clear_one_vnic_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
3562 {
3563 	int rc = 0;
3564 
3565 	if (filter->filter_type == HWRM_CFA_EM_FILTER) {
3566 		rc = bnxt_hwrm_clear_em_filter(bp, filter);
3567 		if (rc)
3568 			return rc;
3569 	} else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
3570 		rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
3571 		if (rc)
3572 			return rc;
3573 	}
3574 
3575 	rc = bnxt_hwrm_clear_l2_filter(bp, filter);
3576 	return rc;
3577 }
3578 
3579 static int
3580 bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
3581 {
3582 	struct bnxt_filter_info *filter;
3583 	int rc = 0;
3584 
3585 	STAILQ_FOREACH(filter, &vnic->filter, next) {
3586 		rc = bnxt_clear_one_vnic_filter(bp, filter);
3587 		STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
3588 		bnxt_free_filter(bp, filter);
3589 	}
3590 	return rc;
3591 }
3592 
3593 static int
3594 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
3595 {
3596 	struct bnxt_filter_info *filter;
3597 	struct rte_flow *flow;
3598 	int rc = 0;
3599 
3600 	while (!STAILQ_EMPTY(&vnic->flow_list)) {
3601 		flow = STAILQ_FIRST(&vnic->flow_list);
3602 		filter = flow->filter;
3603 		PMD_DRV_LOG_LINE(DEBUG, "filter type %d", filter->filter_type);
3604 		rc = bnxt_clear_one_vnic_filter(bp, filter);
3605 
3606 		STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
3607 		rte_free(flow);
3608 	}
3609 	return rc;
3610 }
3611 
3612 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
3613 {
3614 	struct bnxt_filter_info *filter;
3615 	int rc = 0;
3616 
3617 	STAILQ_FOREACH(filter, &vnic->filter, next) {
3618 		if (filter->filter_type == HWRM_CFA_EM_FILTER)
3619 			rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
3620 						     filter);
3621 		else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
3622 			rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
3623 							 filter);
3624 		else
3625 			rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
3626 						     filter);
3627 		if (rc)
3628 			break;
3629 	}
3630 	return rc;
3631 }
3632 
3633 static void
3634 bnxt_free_tunnel_ports(struct bnxt *bp)
3635 {
3636 	if (bp->vxlan_port_cnt)
3637 		bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
3638 			HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
3639 
3640 	if (bp->geneve_port_cnt)
3641 		bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
3642 			HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
3643 
3644 	if (bp->ecpri_port_cnt)
3645 		bnxt_hwrm_tunnel_dst_port_free(bp, bp->ecpri_fw_dst_port_id,
3646 			HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_ECPRI);
3647 
3648 	if (bp->l2_etype_tunnel_cnt)
3649 		bnxt_hwrm_tunnel_dst_port_free(bp, bp->l2_etype_tunnel_id,
3650 			HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_L2_ETYPE);
3651 }
3652 
3653 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
3654 {
3655 	int i;
3656 
3657 	if (bp->vnic_info == NULL)
3658 		return;
3659 
3660 	/*
3661 	 * Cleanup VNICs in reverse order, to make sure the L2 filter
3662 	 * from vnic0 is last to be cleaned up.
3663 	 */
3664 	for (i = bp->max_vnics - 1; i >= 0; i--) {
3665 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3666 
3667 		if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
3668 			continue;
3669 
3670 		if (vnic->func_default && (bp->flags & BNXT_FLAG_DFLT_VNIC_SET))
3671 			bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
3672 		bnxt_clear_hwrm_vnic_flows(bp, vnic);
3673 
3674 		bnxt_clear_hwrm_vnic_filters(bp, vnic);
3675 
3676 		bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
3677 
3678 		bnxt_hwrm_vnic_free(bp, vnic);
3679 
3680 		bnxt_hwrm_vnic_ctx_free(bp, vnic);
3681 
3682 		rte_free(vnic->fw_grp_ids);
3683 		vnic->fw_grp_ids = NULL;
3684 		if (vnic->ref_cnt && !vnic->rx_queue_cnt)
3685 			vnic->ref_cnt--;
3686 	}
3687 	/* Ring resources */
3688 	bnxt_free_all_hwrm_rings(bp);
3689 	bnxt_free_all_hwrm_ring_grps(bp);
3690 	bnxt_free_all_hwrm_stat_ctxs(bp);
3691 	bnxt_free_tunnel_ports(bp);
3692 }
3693 
3694 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
3695 {
3696 	uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
3697 
3698 	if ((conf_link_speed & RTE_ETH_LINK_SPEED_FIXED) == RTE_ETH_LINK_SPEED_AUTONEG)
3699 		return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
3700 
3701 	switch (conf_link_speed) {
3702 	case RTE_ETH_LINK_SPEED_10M_HD:
3703 	case RTE_ETH_LINK_SPEED_100M_HD:
3704 		/* FALLTHROUGH */
3705 		return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
3706 	}
3707 	return hw_link_duplex;
3708 }
3709 
3710 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
3711 {
3712 	return !conf_link;
3713 }
3714 
3715 uint16_t bnxt_parse_eth_link_speed_v2(struct bnxt *bp)
3716 {
3717 	/* get bitmap value based on speed */
3718 	return ((struct link_speeds2_tbl *)
3719 		bnxt_get_rte_hwrm_speeds2_entry(bp))->force_val;
3720 }
3721 
3722 static uint16_t bnxt_parse_eth_link_speed(struct bnxt *bp, uint32_t conf_link_speed,
3723 					  struct bnxt_link_info *link_info)
3724 {
3725 	uint16_t support_pam4_speeds = link_info->support_pam4_speeds;
3726 	uint16_t support_speeds = link_info->support_speeds;
3727 	uint16_t eth_link_speed = 0;
3728 
3729 	if (conf_link_speed == RTE_ETH_LINK_SPEED_AUTONEG)
3730 		return RTE_ETH_LINK_SPEED_AUTONEG;
3731 
3732 	/* Handle P7 chips saperately. It got enhanced phy attribs to choose from */
3733 	if (BNXT_LINK_SPEEDS_V2(bp))
3734 		return bnxt_parse_eth_link_speed_v2(bp);
3735 
3736 	switch (conf_link_speed & ~RTE_ETH_LINK_SPEED_FIXED) {
3737 	case RTE_ETH_LINK_SPEED_100M:
3738 	case RTE_ETH_LINK_SPEED_100M_HD:
3739 		/* FALLTHROUGH */
3740 		eth_link_speed =
3741 			HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100MB;
3742 		break;
3743 	case RTE_ETH_LINK_SPEED_1G:
3744 		eth_link_speed =
3745 			HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_1GB;
3746 		break;
3747 	case RTE_ETH_LINK_SPEED_2_5G:
3748 		eth_link_speed =
3749 			HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_2_5GB;
3750 		break;
3751 	case RTE_ETH_LINK_SPEED_10G:
3752 		eth_link_speed =
3753 			HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
3754 		break;
3755 	case RTE_ETH_LINK_SPEED_20G:
3756 		eth_link_speed =
3757 			HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_20GB;
3758 		break;
3759 	case RTE_ETH_LINK_SPEED_25G:
3760 		eth_link_speed =
3761 			HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_25GB;
3762 		link_info->link_signal_mode = BNXT_SIG_MODE_NRZ;
3763 		break;
3764 	case RTE_ETH_LINK_SPEED_40G:
3765 		eth_link_speed =
3766 			HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
3767 		break;
3768 	case RTE_ETH_LINK_SPEED_50G:
3769 		if (support_speeds & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB) {
3770 			eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
3771 			link_info->link_signal_mode = BNXT_SIG_MODE_NRZ;
3772 		} else if (support_pam4_speeds &
3773 			   HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_50G) {
3774 			eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_50GB;
3775 			link_info->link_signal_mode = BNXT_SIG_MODE_PAM4;
3776 		}
3777 		break;
3778 	case RTE_ETH_LINK_SPEED_100G:
3779 		if (support_speeds & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB) {
3780 			eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
3781 			link_info->link_signal_mode = BNXT_SIG_MODE_NRZ;
3782 		} else if (support_pam4_speeds &
3783 			   HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_100G) {
3784 			eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_100GB;
3785 			link_info->link_signal_mode = BNXT_SIG_MODE_PAM4;
3786 		}
3787 		break;
3788 	case RTE_ETH_LINK_SPEED_200G:
3789 		eth_link_speed =
3790 			HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
3791 		link_info->link_signal_mode = BNXT_SIG_MODE_PAM4;
3792 		break;
3793 	default:
3794 		PMD_DRV_LOG_LINE(ERR,
3795 			"Unsupported link speed %d; default to AUTO",
3796 			conf_link_speed);
3797 		break;
3798 	}
3799 	return eth_link_speed;
3800 }
3801 
3802 #define BNXT_SUPPORTED_SPEEDS (RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_100M_HD | \
3803 		RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_2_5G | \
3804 		RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_20G | RTE_ETH_LINK_SPEED_25G | \
3805 		RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_50G | \
3806 		RTE_ETH_LINK_SPEED_100G | RTE_ETH_LINK_SPEED_200G)
3807 #define BNXT_SUPPORTED_SPEEDS2 ((BNXT_SUPPORTED_SPEEDS | RTE_ETH_LINK_SPEED_400G) & \
3808 		~(RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_100M_HD | \
3809 		  RTE_ETH_LINK_SPEED_2_5G | RTE_ETH_LINK_SPEED_20G))
3810 
3811 static int bnxt_validate_link_speed(struct bnxt *bp)
3812 {
3813 	uint32_t link_speed = bp->eth_dev->data->dev_conf.link_speeds;
3814 	uint16_t port_id = bp->eth_dev->data->port_id;
3815 	uint32_t link_speed_capa;
3816 	uint32_t one_speed;
3817 
3818 	if (link_speed == RTE_ETH_LINK_SPEED_AUTONEG)
3819 		return 0;
3820 
3821 	link_speed_capa = bnxt_get_speed_capabilities(bp);
3822 
3823 	if (link_speed & RTE_ETH_LINK_SPEED_FIXED) {
3824 		one_speed = link_speed & ~RTE_ETH_LINK_SPEED_FIXED;
3825 
3826 		if (one_speed & (one_speed - 1)) {
3827 			PMD_DRV_LOG_LINE(ERR,
3828 				"Invalid advertised speeds (%u) for port %u",
3829 				link_speed, port_id);
3830 			return -EINVAL;
3831 		}
3832 		if ((one_speed & link_speed_capa) != one_speed) {
3833 			PMD_DRV_LOG_LINE(ERR,
3834 				"Unsupported advertised speed (%u) for port %u",
3835 				link_speed, port_id);
3836 			return -EINVAL;
3837 		}
3838 	} else {
3839 		if (!(link_speed & link_speed_capa)) {
3840 			PMD_DRV_LOG_LINE(ERR,
3841 				"Unsupported advertised speeds (%u) for port %u",
3842 				link_speed, port_id);
3843 			return -EINVAL;
3844 		}
3845 	}
3846 	return 0;
3847 }
3848 
3849 static uint16_t
3850 bnxt_parse_eth_link_speed_mask_v2(struct bnxt *bp, uint32_t link_speed)
3851 {
3852 	uint16_t ret = 0;
3853 
3854 	if (link_speed == RTE_ETH_LINK_SPEED_AUTONEG)
3855 		return bp->link_info->supported_speeds2_auto_mode;
3856 
3857 	return ret;
3858 }
3859 
3860 static uint16_t
3861 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
3862 {
3863 	uint16_t ret = 0;
3864 
3865 	if (BNXT_LINK_SPEEDS_V2(bp))
3866 		return bnxt_parse_eth_link_speed_mask_v2(bp, link_speed);
3867 
3868 	if (link_speed == RTE_ETH_LINK_SPEED_AUTONEG) {
3869 		if (bp->link_info->support_speeds)
3870 			return bp->link_info->support_speeds;
3871 		link_speed = BNXT_SUPPORTED_SPEEDS;
3872 	}
3873 
3874 	if (link_speed & RTE_ETH_LINK_SPEED_100M)
3875 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
3876 	if (link_speed & RTE_ETH_LINK_SPEED_100M_HD)
3877 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
3878 	if (link_speed & RTE_ETH_LINK_SPEED_1G)
3879 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
3880 	if (link_speed & RTE_ETH_LINK_SPEED_2_5G)
3881 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
3882 	if (link_speed & RTE_ETH_LINK_SPEED_10G)
3883 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
3884 	if (link_speed & RTE_ETH_LINK_SPEED_20G)
3885 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
3886 	if (link_speed & RTE_ETH_LINK_SPEED_25G)
3887 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
3888 	if (link_speed & RTE_ETH_LINK_SPEED_40G)
3889 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
3890 	if (link_speed & RTE_ETH_LINK_SPEED_50G)
3891 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
3892 	if (link_speed & RTE_ETH_LINK_SPEED_100G)
3893 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
3894 	if (link_speed & RTE_ETH_LINK_SPEED_200G)
3895 		ret |= HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
3896 	return ret;
3897 }
3898 
3899 static uint32_t bnxt_parse_hw_link_speed_v2(uint16_t hw_link_speed)
3900 {
3901 	return ((struct link_speeds2_tbl *)
3902 		bnxt_get_hwrm_to_rte_speeds2_entry(hw_link_speed))->rte_speed_num;
3903 }
3904 
3905 static uint32_t bnxt_parse_hw_link_speed(struct bnxt *bp, uint16_t hw_link_speed)
3906 {
3907 	uint32_t eth_link_speed = RTE_ETH_SPEED_NUM_NONE;
3908 
3909 	/* query fixed speed2 table if not autoneg */
3910 	if (BNXT_LINK_SPEEDS_V2(bp) && !bp->link_info->auto_mode)
3911 		return bnxt_parse_hw_link_speed_v2(hw_link_speed);
3912 
3913 	/* for P7 and earlier nics link_speed carries AN'd speed */
3914 	switch (hw_link_speed) {
3915 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
3916 		eth_link_speed = RTE_ETH_SPEED_NUM_100M;
3917 		break;
3918 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
3919 		eth_link_speed = RTE_ETH_SPEED_NUM_1G;
3920 		break;
3921 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
3922 		eth_link_speed = RTE_ETH_SPEED_NUM_2_5G;
3923 		break;
3924 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
3925 		eth_link_speed = RTE_ETH_SPEED_NUM_10G;
3926 		break;
3927 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
3928 		eth_link_speed = RTE_ETH_SPEED_NUM_20G;
3929 		break;
3930 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
3931 		eth_link_speed = RTE_ETH_SPEED_NUM_25G;
3932 		break;
3933 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
3934 		eth_link_speed = RTE_ETH_SPEED_NUM_40G;
3935 		break;
3936 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
3937 		eth_link_speed = RTE_ETH_SPEED_NUM_50G;
3938 		break;
3939 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
3940 		eth_link_speed = RTE_ETH_SPEED_NUM_100G;
3941 		break;
3942 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_200GB:
3943 		eth_link_speed = RTE_ETH_SPEED_NUM_200G;
3944 		break;
3945 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_400GB:
3946 		eth_link_speed = RTE_ETH_SPEED_NUM_400G;
3947 		break;
3948 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
3949 	default:
3950 		PMD_DRV_LOG_LINE(ERR, "HWRM link speed %d not defined",
3951 			hw_link_speed);
3952 		break;
3953 	}
3954 	return eth_link_speed;
3955 }
3956 
3957 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
3958 {
3959 	uint16_t eth_link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
3960 
3961 	switch (hw_link_duplex) {
3962 	case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
3963 	case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
3964 		/* FALLTHROUGH */
3965 		eth_link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
3966 		break;
3967 	case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
3968 		eth_link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
3969 		break;
3970 	default:
3971 		PMD_DRV_LOG_LINE(ERR, "HWRM link duplex %d not defined",
3972 			hw_link_duplex);
3973 		break;
3974 	}
3975 	return eth_link_duplex;
3976 }
3977 
3978 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
3979 {
3980 	int rc = 0;
3981 	struct bnxt_link_info *link_info = bp->link_info;
3982 
3983 	rc = bnxt_hwrm_port_phy_qcaps(bp);
3984 	if (rc)
3985 		PMD_DRV_LOG_LINE(ERR, "Get link config failed with rc %d", rc);
3986 
3987 	rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
3988 	if (rc) {
3989 		PMD_DRV_LOG_LINE(ERR, "Get link config failed with rc %d", rc);
3990 		goto exit;
3991 	}
3992 
3993 	if (link_info->link_speed)
3994 		link->link_speed = bnxt_parse_hw_link_speed(bp, link_info->link_speed);
3995 	else
3996 		link->link_speed = RTE_ETH_SPEED_NUM_NONE;
3997 	link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
3998 	link->link_status = link_info->link_up;
3999 	link->link_autoneg = link_info->auto_mode ==
4000 		HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
4001 		RTE_ETH_LINK_FIXED : RTE_ETH_LINK_AUTONEG;
4002 exit:
4003 	return rc;
4004 }
4005 
4006 static int bnxt_hwrm_port_phy_cfg_v2(struct bnxt *bp, struct bnxt_link_info *conf)
4007 {
4008 	struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4009 	struct hwrm_port_phy_cfg_input req = {0};
4010 	uint32_t enables = 0;
4011 	int rc = 0;
4012 
4013 	HWRM_PREP(&req, HWRM_PORT_PHY_CFG, BNXT_USE_CHIMP_MB);
4014 
4015 	if (!conf->link_up) {
4016 		req.flags =
4017 		rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
4018 		PMD_DRV_LOG_LINE(ERR, "Force Link Down");
4019 		goto link_down;
4020 	}
4021 
4022 	/* Setting Fixed Speed. But AutoNeg is ON, So disable it */
4023 	if (bp->link_info->auto_mode && conf->link_speed) {
4024 		req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
4025 		PMD_DRV_LOG_LINE(DEBUG, "Disabling AutoNeg");
4026 	}
4027 	req.flags = rte_cpu_to_le_32(conf->phy_flags);
4028 	if (!conf->link_speed) {
4029 		/* No speeds specified. Enable AutoNeg - all speeds */
4030 		enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEEDS2_MASK;
4031 		enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
4032 		req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
4033 		req.auto_link_speeds2_mask =
4034 			rte_cpu_to_le_16(bp->link_info->supported_speeds2_auto_mode);
4035 	} else {
4036 		enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_LINK_SPEEDS2;
4037 		req.force_link_speeds2 = rte_cpu_to_le_16(conf->link_speed);
4038 		PMD_DRV_LOG_LINE(INFO, "Force speed %d", conf->link_speed);
4039 	}
4040 
4041 	/* Fill rest of the req message */
4042 	req.auto_duplex = conf->duplex;
4043 	if (req.auto_mode != HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK)
4044 		enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
4045 	req.auto_pause = conf->auto_pause;
4046 	req.force_pause = conf->force_pause;
4047 	if (req.auto_pause)
4048 		req.force_pause = 0;
4049 	/* Set force_pause if there is no auto or if there is a force */
4050 	if (req.auto_pause && !req.force_pause)
4051 		enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
4052 	else
4053 		enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
4054 	req.enables = rte_cpu_to_le_32(enables);
4055 
4056 link_down:
4057 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4058 
4059 	HWRM_CHECK_RESULT();
4060 	HWRM_UNLOCK();
4061 	return rc;
4062 }
4063 
4064 static int bnxt_set_hwrm_link_config_v2(struct bnxt *bp, bool link_up)
4065 {
4066 	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
4067 	struct bnxt_link_info link_req;
4068 	uint16_t speed, autoneg;
4069 	int rc = 0;
4070 
4071 	memset(&link_req, 0, sizeof(link_req));
4072 	link_req.link_up = link_up;
4073 	if (!link_up)
4074 		goto port_phy_cfg;
4075 
4076 	autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
4077 	speed = bnxt_parse_eth_link_speed(bp, dev_conf->link_speeds,
4078 					  bp->link_info);
4079 	link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
4080 	if (autoneg == 1) {
4081 		link_req.phy_flags |=
4082 			HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
4083 		link_req.cfg_auto_link_speeds2_mask =
4084 			bnxt_parse_eth_link_speed_mask(bp, dev_conf->link_speeds);
4085 	} else {
4086 		if (bp->link_info->phy_type ==
4087 		    HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
4088 		    bp->link_info->phy_type ==
4089 		    HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
4090 		    bp->link_info->media_type ==
4091 		    HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
4092 			PMD_DRV_LOG_LINE(ERR, "10GBase-T devices must autoneg");
4093 			return -EINVAL;
4094 		}
4095 
4096 		link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
4097 		/* If user wants a particular speed try that first. */
4098 		link_req.link_speed = speed;
4099 	}
4100 	link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
4101 	link_req.auto_pause = bp->link_info->auto_pause;
4102 	link_req.force_pause = bp->link_info->force_pause;
4103 
4104 port_phy_cfg:
4105 	rc = bnxt_hwrm_port_phy_cfg_v2(bp, &link_req);
4106 	if (rc)
4107 		PMD_DRV_LOG_LINE(ERR, "Set link config failed with rc %d", rc);
4108 
4109 	return rc;
4110 }
4111 
4112 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
4113 {
4114 	int rc = 0;
4115 	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
4116 	struct bnxt_link_info link_req;
4117 	uint16_t speed, autoneg;
4118 
4119 	if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
4120 		return 0;
4121 
4122 	rc = bnxt_validate_link_speed(bp);
4123 	if (rc)
4124 		goto error;
4125 
4126 	if (BNXT_LINK_SPEEDS_V2(bp))
4127 		return bnxt_set_hwrm_link_config_v2(bp, link_up);
4128 
4129 	memset(&link_req, 0, sizeof(link_req));
4130 	link_req.link_up = link_up;
4131 	if (!link_up)
4132 		goto port_phy_cfg;
4133 
4134 	/* Get user requested autoneg setting */
4135 	autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
4136 	if (BNXT_CHIP_P5_P7(bp) &&
4137 	    dev_conf->link_speeds & RTE_ETH_LINK_SPEED_40G) {
4138 		/* 40G is not supported as part of media auto detect.
4139 		 * The speed should be forced and autoneg disabled
4140 		 * to configure 40G speed.
4141 		 */
4142 		PMD_DRV_LOG_LINE(INFO, "Disabling autoneg for 40G");
4143 		autoneg = 0;
4144 	}
4145 
4146 	/* Override based on current Autoneg setting in PHY for 200G */
4147 	if (autoneg == 1 && BNXT_CHIP_P5(bp) && bp->link_info->auto_mode == 0 &&
4148 	    bp->link_info->force_pam4_link_speed ==
4149 	    HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB) {
4150 		autoneg = 0;
4151 		PMD_DRV_LOG_LINE(DEBUG, "Disabling autoneg for 200G");
4152 	}
4153 
4154 	speed = bnxt_parse_eth_link_speed(bp, dev_conf->link_speeds,
4155 					  bp->link_info);
4156 	link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
4157 	/* Autoneg can be done only when the FW allows. */
4158 	if (autoneg == 1 &&
4159 	    (bp->link_info->support_auto_speeds || bp->link_info->support_pam4_auto_speeds)) {
4160 		link_req.phy_flags |=
4161 				HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
4162 		link_req.auto_link_speed_mask =
4163 			bnxt_parse_eth_link_speed_mask(bp,
4164 						       dev_conf->link_speeds);
4165 		link_req.auto_pam4_link_speed_mask =
4166 			bp->link_info->auto_pam4_link_speed_mask;
4167 	} else {
4168 		if (bp->link_info->phy_type ==
4169 		    HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
4170 		    bp->link_info->phy_type ==
4171 		    HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
4172 		    bp->link_info->media_type ==
4173 		    HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
4174 			PMD_DRV_LOG_LINE(ERR, "10GBase-T devices must autoneg");
4175 			return -EINVAL;
4176 		}
4177 
4178 		link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
4179 		/* If user wants a particular speed try that first. */
4180 		if (speed)
4181 			link_req.link_speed = speed;
4182 		else if (bp->link_info->force_pam4_link_speed)
4183 			link_req.link_speed =
4184 				bp->link_info->force_pam4_link_speed;
4185 		else if (bp->link_info->force_link_speed)
4186 			link_req.link_speed = bp->link_info->force_link_speed;
4187 		else if (bp->link_info->auto_pam4_link_speed_mask)
4188 			link_req.link_speed =
4189 				bp->link_info->auto_pam4_link_speed_mask;
4190 		else if (bp->link_info->support_pam4_speeds)
4191 			link_req.link_speed =
4192 				bp->link_info->support_pam4_speeds;
4193 		else
4194 			link_req.link_speed = bp->link_info->auto_link_speed;
4195 		/* Auto PAM4 link speed is zero, but auto_link_speed is not
4196 		 * zero. Use the auto_link_speed.
4197 		 */
4198 		if (bp->link_info->auto_link_speed != 0 &&
4199 		    bp->link_info->auto_pam4_link_speed_mask == 0)
4200 			link_req.link_speed = bp->link_info->auto_link_speed;
4201 	}
4202 	link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
4203 	link_req.auto_pause = bp->link_info->auto_pause;
4204 	link_req.force_pause = bp->link_info->force_pause;
4205 
4206 port_phy_cfg:
4207 	rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
4208 	if (rc) {
4209 		PMD_DRV_LOG_LINE(ERR,
4210 			"Set link config failed with rc %d", rc);
4211 	}
4212 
4213 error:
4214 	return rc;
4215 }
4216 
4217 int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu)
4218 {
4219 	struct hwrm_func_qcfg_input req = {0};
4220 	struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
4221 	uint16_t flags;
4222 	int rc = 0;
4223 	bp->func_svif = BNXT_SVIF_INVALID;
4224 	uint16_t svif_info;
4225 
4226 	HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
4227 	req.fid = rte_cpu_to_le_16(0xffff);
4228 
4229 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4230 
4231 	HWRM_CHECK_RESULT();
4232 
4233 	bp->vlan = rte_le_to_cpu_16(resp->vlan) & RTE_ETH_VLAN_ID_MAX;
4234 
4235 	svif_info = rte_le_to_cpu_16(resp->svif_info);
4236 	if (svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_VALID)
4237 		bp->func_svif =	svif_info &
4238 				     HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_MASK;
4239 
4240 	flags = rte_le_to_cpu_16(resp->flags);
4241 	if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
4242 		bp->flags |= BNXT_FLAG_MULTI_HOST;
4243 
4244 	if (BNXT_VF(bp) &&
4245 	    !BNXT_VF_IS_TRUSTED(bp) &&
4246 	    (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
4247 		bp->flags |= BNXT_FLAG_TRUSTED_VF_EN;
4248 		PMD_DRV_LOG_LINE(INFO, "Trusted VF cap enabled");
4249 	} else if (BNXT_VF(bp) &&
4250 		   BNXT_VF_IS_TRUSTED(bp) &&
4251 		   !(flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
4252 		bp->flags &= ~BNXT_FLAG_TRUSTED_VF_EN;
4253 		PMD_DRV_LOG_LINE(INFO, "Trusted VF cap disabled");
4254 	}
4255 
4256 	if (mtu)
4257 		*mtu = rte_le_to_cpu_16(resp->admin_mtu);
4258 
4259 	switch (resp->port_partition_type) {
4260 	case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
4261 	case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
4262 	case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
4263 		/* FALLTHROUGH */
4264 		bp->flags |= BNXT_FLAG_NPAR_PF;
4265 		break;
4266 	default:
4267 		bp->flags &= ~BNXT_FLAG_NPAR_PF;
4268 		break;
4269 	}
4270 
4271 	bp->legacy_db_size =
4272 		rte_le_to_cpu_16(resp->legacy_l2_db_size_kb) * 1024;
4273 
4274 	HWRM_UNLOCK();
4275 
4276 	return rc;
4277 }
4278 
4279 int bnxt_hwrm_parent_pf_qcfg(struct bnxt *bp)
4280 {
4281 	struct hwrm_func_qcfg_input req = {0};
4282 	struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
4283 	uint16_t flags;
4284 	int rc;
4285 
4286 	if (!BNXT_VF_IS_TRUSTED(bp))
4287 		return 0;
4288 
4289 	if (!bp->parent)
4290 		return -EINVAL;
4291 
4292 	bp->parent->fid = BNXT_PF_FID_INVALID;
4293 
4294 	HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
4295 
4296 	req.fid = rte_cpu_to_le_16(0xfffe); /* Request parent PF information. */
4297 
4298 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4299 
4300 	HWRM_CHECK_RESULT_SILENT();
4301 
4302 	memcpy(bp->parent->mac_addr, resp->mac_address, RTE_ETHER_ADDR_LEN);
4303 	bp->parent->vnic = rte_le_to_cpu_16(resp->dflt_vnic_id);
4304 	bp->parent->fid = rte_le_to_cpu_16(resp->fid);
4305 	bp->parent->port_id = rte_le_to_cpu_16(resp->port_id);
4306 
4307 	flags = rte_le_to_cpu_16(resp->flags);
4308 
4309 	/* check for the mulit-host support */
4310 	if (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST) {
4311 		bp->flags |= BNXT_FLAG_MULTI_HOST;
4312 		bp->multi_host_pf_pci_id = resp->pci_id;
4313 		PMD_DRV_LOG_LINE(INFO, "Mult-Host system Parent PCI-ID: 0x%x", resp->pci_id);
4314 	}
4315 
4316 	/* check for the multi-root support */
4317 	if (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_ROOT) {
4318 		bp->flags2 |= BNXT_FLAGS2_MULTIROOT_EN;
4319 		PMD_DRV_LOG_LINE(DEBUG, "PF enabled with multi root capability");
4320 	}
4321 
4322 	HWRM_UNLOCK();
4323 
4324 	return 0;
4325 }
4326 
4327 static int bnxt_hwrm_set_tpa(struct bnxt *bp)
4328 {
4329 	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
4330 	uint64_t rx_offloads = dev_conf->rxmode.offloads;
4331 	bool tpa_flags = 0;
4332 	int rc, i;
4333 
4334 	tpa_flags = (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) ?  true : false;
4335 	for (i = 0; i < bp->max_vnics; i++) {
4336 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4337 
4338 		if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
4339 			continue;
4340 
4341 		rc = bnxt_hwrm_vnic_tpa_cfg(bp, vnic, tpa_flags);
4342 		if (rc)
4343 			return rc;
4344 	}
4345 	return 0;
4346 }
4347 
4348 int bnxt_hwrm_get_dflt_vnic_svif(struct bnxt *bp, uint16_t fid,
4349 				 uint16_t *vnic_id, uint16_t *svif)
4350 {
4351 	struct hwrm_func_qcfg_input req = {0};
4352 	struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
4353 	uint16_t svif_info;
4354 	int rc = 0;
4355 
4356 	HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
4357 	req.fid = rte_cpu_to_le_16(fid);
4358 
4359 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4360 
4361 	HWRM_CHECK_RESULT();
4362 
4363 	svif_info = rte_le_to_cpu_16(resp->svif_info);
4364 	if (svif && (svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_VALID)) {
4365 		*svif = svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_MASK;
4366 		/* When the VF corresponding to the VFR is down at the time of
4367 		 * VFR conduit creation, the VFR rule will be programmed with
4368 		 * invalid vnic id because FW will return default vnic id as
4369 		 * INVALID when queried through FUNC_QCFG. As a result, when
4370 		 * the VF is brought up, VF won't receive packets because
4371 		 * INVALID vnic id is already programmed.
4372 		 *
4373 		 * Hence, use svif value as vnic id during VFR conduit creation
4374 		 * as both svif and default vnic id values are same and will
4375 		 * never change.
4376 		 */
4377 		if (vnic_id)
4378 			*vnic_id = *svif;
4379 	} else {
4380 		rc = -EINVAL;
4381 	}
4382 
4383 	HWRM_UNLOCK();
4384 
4385 	bnxt_hwrm_set_tpa(bp);
4386 
4387 	return rc;
4388 }
4389 
4390 int bnxt_hwrm_port_mac_qcfg(struct bnxt *bp)
4391 {
4392 	struct hwrm_port_mac_qcfg_input req = {0};
4393 	struct hwrm_port_mac_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
4394 	uint16_t port_svif_info;
4395 	int rc;
4396 
4397 	bp->port_svif = BNXT_SVIF_INVALID;
4398 
4399 	if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
4400 		return 0;
4401 
4402 	HWRM_PREP(&req, HWRM_PORT_MAC_QCFG, BNXT_USE_CHIMP_MB);
4403 
4404 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4405 
4406 	HWRM_CHECK_RESULT_SILENT();
4407 
4408 	port_svif_info = rte_le_to_cpu_16(resp->port_svif_info);
4409 	if (port_svif_info &
4410 	    HWRM_PORT_MAC_QCFG_OUTPUT_PORT_SVIF_INFO_PORT_SVIF_VALID)
4411 		bp->port_svif = port_svif_info &
4412 			HWRM_PORT_MAC_QCFG_OUTPUT_PORT_SVIF_INFO_PORT_SVIF_MASK;
4413 
4414 	HWRM_UNLOCK();
4415 
4416 	return 0;
4417 }
4418 
4419 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp,
4420 				 struct bnxt_pf_resource_info *pf_resc)
4421 {
4422 	struct hwrm_func_cfg_input req = {0};
4423 	struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4424 	uint32_t enables;
4425 	int rc;
4426 
4427 	enables = HWRM_FUNC_CFG_INPUT_ENABLES_ADMIN_MTU |
4428 		  HWRM_FUNC_CFG_INPUT_ENABLES_HOST_MTU |
4429 		  HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
4430 		  HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
4431 		  HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
4432 		  HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
4433 		  HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
4434 		  HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
4435 		  HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
4436 		  HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS;
4437 
4438 	if (BNXT_HAS_RING_GRPS(bp)) {
4439 		enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;
4440 		req.num_hw_ring_grps =
4441 			rte_cpu_to_le_16(pf_resc->num_hw_ring_grps);
4442 	} else if (BNXT_HAS_NQ(bp)) {
4443 		enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MSIX;
4444 		req.num_msix = rte_cpu_to_le_16(pf_resc->num_nq_rings);
4445 	}
4446 
4447 	req.flags = rte_cpu_to_le_32(bp->pf->func_cfg_flags);
4448 	req.admin_mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
4449 	req.host_mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu);
4450 	req.mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu));
4451 	req.num_rsscos_ctxs = rte_cpu_to_le_16(pf_resc->num_rsscos_ctxs);
4452 	req.num_stat_ctxs = rte_cpu_to_le_16(pf_resc->num_stat_ctxs);
4453 	req.num_cmpl_rings = rte_cpu_to_le_16(pf_resc->num_cp_rings);
4454 	req.num_tx_rings = rte_cpu_to_le_16(pf_resc->num_tx_rings);
4455 	req.num_rx_rings = rte_cpu_to_le_16(pf_resc->num_rx_rings);
4456 	req.num_l2_ctxs = rte_cpu_to_le_16(pf_resc->num_l2_ctxs);
4457 	req.num_vnics = rte_cpu_to_le_16(pf_resc->num_vnics);
4458 	req.fid = rte_cpu_to_le_16(0xffff);
4459 	req.enables = rte_cpu_to_le_32(enables);
4460 
4461 	HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
4462 
4463 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4464 
4465 	HWRM_CHECK_RESULT();
4466 	HWRM_UNLOCK();
4467 
4468 	return rc;
4469 }
4470 
4471 /* min values are the guaranteed resources and max values are subject
4472  * to availability. The strategy for now is to keep both min & max
4473  * values the same.
4474  */
4475 static void
4476 bnxt_fill_vf_func_cfg_req_new(struct bnxt *bp,
4477 			      struct hwrm_func_vf_resource_cfg_input *req,
4478 			      int num_vfs)
4479 {
4480 	req->max_rsscos_ctx = rte_cpu_to_le_16(bp->max_rsscos_ctx /
4481 					       (num_vfs + 1));
4482 	req->min_rsscos_ctx = req->max_rsscos_ctx;
4483 	req->max_stat_ctx = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
4484 	req->min_stat_ctx = req->max_stat_ctx;
4485 	req->max_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
4486 					       (num_vfs + 1));
4487 	req->min_cmpl_rings = req->max_cmpl_rings;
4488 	req->max_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
4489 	req->min_tx_rings = req->max_tx_rings;
4490 	req->max_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
4491 	req->min_rx_rings = req->max_rx_rings;
4492 	req->max_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
4493 	req->min_l2_ctxs = req->max_l2_ctxs;
4494 	req->max_vnics = rte_cpu_to_le_16(bp->max_vnics / (num_vfs + 1));
4495 	req->min_vnics = req->max_vnics;
4496 	req->max_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
4497 						 (num_vfs + 1));
4498 	req->min_hw_ring_grps = req->max_hw_ring_grps;
4499 	req->max_msix = rte_cpu_to_le_16(bp->max_nq_rings / (num_vfs + 1));
4500 }
4501 
4502 static void
4503 bnxt_fill_vf_func_cfg_req_old(struct bnxt *bp,
4504 			      struct hwrm_func_cfg_input *req,
4505 			      int num_vfs)
4506 {
4507 	req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_ADMIN_MTU |
4508 			HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
4509 			HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
4510 			HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
4511 			HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
4512 			HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
4513 			HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
4514 			HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
4515 			HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
4516 			HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
4517 
4518 	req->admin_mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
4519 					  RTE_ETHER_CRC_LEN + RTE_VLAN_HLEN *
4520 					  BNXT_NUM_VLANS);
4521 	req->mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu));
4522 	req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
4523 						(num_vfs + 1));
4524 	req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
4525 	req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
4526 					       (num_vfs + 1));
4527 	req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
4528 	req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
4529 	req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
4530 	/* TODO: For now, do not support VMDq/RFS on VFs. */
4531 	req->num_vnics = rte_cpu_to_le_16(1);
4532 	req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
4533 						 (num_vfs + 1));
4534 }
4535 
4536 /* Update the port wide resource values based on how many resources
4537  * got allocated to the VF.
4538  */
4539 static int bnxt_update_max_resources(struct bnxt *bp,
4540 				     int vf)
4541 {
4542 	struct hwrm_func_qcfg_input req = {0};
4543 	struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
4544 	int rc;
4545 
4546 	/* Get the actual allocated values now */
4547 	HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
4548 	req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
4549 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4550 	HWRM_CHECK_RESULT();
4551 
4552 	bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->alloc_rsscos_ctx);
4553 	bp->max_stat_ctx -= rte_le_to_cpu_16(resp->alloc_stat_ctx);
4554 	bp->max_cp_rings -= rte_le_to_cpu_16(resp->alloc_cmpl_rings);
4555 	bp->max_tx_rings -= rte_le_to_cpu_16(resp->alloc_tx_rings);
4556 	bp->max_rx_rings -= rte_le_to_cpu_16(resp->alloc_rx_rings);
4557 	bp->max_l2_ctx -= rte_le_to_cpu_16(resp->alloc_l2_ctx);
4558 	bp->max_ring_grps -= rte_le_to_cpu_16(resp->alloc_hw_ring_grps);
4559 	bp->max_nq_rings -= rte_le_to_cpu_16(resp->alloc_msix);
4560 	bp->max_vnics -= rte_le_to_cpu_16(resp->alloc_vnics);
4561 
4562 	HWRM_UNLOCK();
4563 
4564 	return 0;
4565 }
4566 
4567 /* Update the PF resource values based on how many resources
4568  * got allocated to it.
4569  */
4570 static int bnxt_update_max_resources_pf_only(struct bnxt *bp)
4571 {
4572 	struct hwrm_func_qcfg_input req = {0};
4573 	struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
4574 	int rc;
4575 
4576 	/* Get the actual allocated values now */
4577 	HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
4578 	req.fid = rte_cpu_to_le_16(0xffff);
4579 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4580 	HWRM_CHECK_RESULT();
4581 
4582 	bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->alloc_rsscos_ctx);
4583 	bp->max_stat_ctx = rte_le_to_cpu_16(resp->alloc_stat_ctx);
4584 	bp->max_cp_rings = rte_le_to_cpu_16(resp->alloc_cmpl_rings);
4585 	bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
4586 	bp->max_rx_rings = rte_le_to_cpu_16(resp->alloc_rx_rings);
4587 	bp->max_l2_ctx = rte_le_to_cpu_16(resp->alloc_l2_ctx);
4588 	bp->max_ring_grps = rte_le_to_cpu_16(resp->alloc_hw_ring_grps);
4589 	bp->max_vnics = rte_le_to_cpu_16(resp->alloc_vnics);
4590 
4591 	HWRM_UNLOCK();
4592 
4593 	return 0;
4594 }
4595 
4596 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
4597 {
4598 	struct hwrm_func_qcfg_input req = {0};
4599 	struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
4600 	int rc;
4601 
4602 	/* Check for zero MAC address */
4603 	HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
4604 	req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
4605 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4606 	HWRM_CHECK_RESULT();
4607 	rc = rte_le_to_cpu_16(resp->vlan);
4608 
4609 	HWRM_UNLOCK();
4610 
4611 	return rc;
4612 }
4613 
4614 static int bnxt_query_pf_resources(struct bnxt *bp,
4615 				   struct bnxt_pf_resource_info *pf_resc)
4616 {
4617 	struct hwrm_func_qcfg_input req = {0};
4618 	struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
4619 	int rc;
4620 
4621 	/* And copy the allocated numbers into the pf struct */
4622 	HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
4623 	req.fid = rte_cpu_to_le_16(0xffff);
4624 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4625 	HWRM_CHECK_RESULT();
4626 
4627 	pf_resc->num_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
4628 	pf_resc->num_rsscos_ctxs = rte_le_to_cpu_16(resp->alloc_rsscos_ctx);
4629 	pf_resc->num_stat_ctxs = rte_le_to_cpu_16(resp->alloc_stat_ctx);
4630 	pf_resc->num_cp_rings = rte_le_to_cpu_16(resp->alloc_cmpl_rings);
4631 	pf_resc->num_rx_rings = rte_le_to_cpu_16(resp->alloc_rx_rings);
4632 	pf_resc->num_l2_ctxs = rte_le_to_cpu_16(resp->alloc_l2_ctx);
4633 	pf_resc->num_hw_ring_grps = rte_le_to_cpu_32(resp->alloc_hw_ring_grps);
4634 	pf_resc->num_nq_rings = rte_le_to_cpu_32(resp->alloc_msix);
4635 	pf_resc->num_vnics = rte_le_to_cpu_16(resp->alloc_vnics);
4636 	bp->pf->evb_mode = resp->evb_mode;
4637 
4638 	HWRM_UNLOCK();
4639 
4640 	return rc;
4641 }
4642 
4643 static void
4644 bnxt_calculate_pf_resources(struct bnxt *bp,
4645 			    struct bnxt_pf_resource_info *pf_resc,
4646 			    int num_vfs)
4647 {
4648 	if (!num_vfs) {
4649 		pf_resc->num_rsscos_ctxs = bp->max_rsscos_ctx;
4650 		pf_resc->num_stat_ctxs = bp->max_stat_ctx;
4651 		pf_resc->num_cp_rings = bp->max_cp_rings;
4652 		pf_resc->num_tx_rings = bp->max_tx_rings;
4653 		pf_resc->num_rx_rings = bp->max_rx_rings;
4654 		pf_resc->num_l2_ctxs = bp->max_l2_ctx;
4655 		pf_resc->num_hw_ring_grps = bp->max_ring_grps;
4656 		pf_resc->num_nq_rings = bp->max_nq_rings;
4657 		pf_resc->num_vnics = bp->max_vnics;
4658 
4659 		return;
4660 	}
4661 
4662 	pf_resc->num_rsscos_ctxs = bp->max_rsscos_ctx / (num_vfs + 1) +
4663 				   bp->max_rsscos_ctx % (num_vfs + 1);
4664 	pf_resc->num_stat_ctxs = bp->max_stat_ctx / (num_vfs + 1) +
4665 				 bp->max_stat_ctx % (num_vfs + 1);
4666 	pf_resc->num_cp_rings = bp->max_cp_rings / (num_vfs + 1) +
4667 				bp->max_cp_rings % (num_vfs + 1);
4668 	pf_resc->num_tx_rings = bp->max_tx_rings / (num_vfs + 1) +
4669 				bp->max_tx_rings % (num_vfs + 1);
4670 	pf_resc->num_rx_rings = bp->max_rx_rings / (num_vfs + 1) +
4671 				bp->max_rx_rings % (num_vfs + 1);
4672 	pf_resc->num_l2_ctxs = bp->max_l2_ctx / (num_vfs + 1) +
4673 			       bp->max_l2_ctx % (num_vfs + 1);
4674 	pf_resc->num_hw_ring_grps = bp->max_ring_grps / (num_vfs + 1) +
4675 				    bp->max_ring_grps % (num_vfs + 1);
4676 	pf_resc->num_nq_rings = bp->max_nq_rings / (num_vfs + 1) +
4677 				bp->max_nq_rings % (num_vfs + 1);
4678 	pf_resc->num_vnics = bp->max_vnics / (num_vfs + 1) +
4679 				bp->max_vnics % (num_vfs + 1);
4680 }
4681 
4682 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
4683 {
4684 	struct bnxt_pf_resource_info pf_resc = { 0 };
4685 	int rc;
4686 
4687 	if (!BNXT_PF(bp)) {
4688 		PMD_DRV_LOG_LINE(ERR, "Attempt to allocate VFs on a VF!");
4689 		return -EINVAL;
4690 	}
4691 
4692 	rc = bnxt_hwrm_func_qcaps(bp);
4693 	if (rc)
4694 		return rc;
4695 
4696 	bnxt_calculate_pf_resources(bp, &pf_resc, 0);
4697 
4698 	bp->pf->func_cfg_flags &=
4699 		~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
4700 		  HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
4701 	bp->pf->func_cfg_flags |=
4702 		HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
4703 
4704 	rc = bnxt_hwrm_pf_func_cfg(bp, &pf_resc);
4705 	if (rc)
4706 		return rc;
4707 
4708 	rc = bnxt_update_max_resources_pf_only(bp);
4709 
4710 	return rc;
4711 }
4712 
4713 static int
4714 bnxt_configure_vf_req_buf(struct bnxt *bp, int num_vfs)
4715 {
4716 	size_t req_buf_sz, sz;
4717 	int i, rc;
4718 
4719 	req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
4720 	bp->pf->vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
4721 		page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
4722 	if (bp->pf->vf_req_buf == NULL) {
4723 		return -ENOMEM;
4724 	}
4725 
4726 	for (sz = 0; sz < req_buf_sz; sz += getpagesize())
4727 		rte_mem_lock_page(((char *)bp->pf->vf_req_buf) + sz);
4728 
4729 	for (i = 0; i < num_vfs; i++)
4730 		bp->pf->vf_info[i].req_buf = ((char *)bp->pf->vf_req_buf) +
4731 					     (i * HWRM_MAX_REQ_LEN);
4732 
4733 	rc = bnxt_hwrm_func_buf_rgtr(bp, num_vfs);
4734 	if (rc)
4735 		rte_free(bp->pf->vf_req_buf);
4736 
4737 	return rc;
4738 }
4739 
4740 static int
4741 bnxt_process_vf_resc_config_new(struct bnxt *bp, int num_vfs)
4742 {
4743 	struct hwrm_func_vf_resource_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4744 	struct hwrm_func_vf_resource_cfg_input req = {0};
4745 	int i, rc = 0;
4746 
4747 	bnxt_fill_vf_func_cfg_req_new(bp, &req, num_vfs);
4748 	bp->pf->active_vfs = 0;
4749 	for (i = 0; i < num_vfs; i++) {
4750 		HWRM_PREP(&req, HWRM_FUNC_VF_RESOURCE_CFG, BNXT_USE_CHIMP_MB);
4751 		req.vf_id = rte_cpu_to_le_16(bp->pf->vf_info[i].fid);
4752 		rc = bnxt_hwrm_send_message(bp,
4753 					    &req,
4754 					    sizeof(req),
4755 					    BNXT_USE_CHIMP_MB);
4756 		if (rc || resp->error_code) {
4757 			PMD_DRV_LOG_LINE(ERR,
4758 				"Failed to initialize VF %d", i);
4759 			PMD_DRV_LOG_LINE(ERR,
4760 				"Not all VFs available. (%d, %d)",
4761 				rc, resp->error_code);
4762 			HWRM_UNLOCK();
4763 
4764 			/* If the first VF configuration itself fails,
4765 			 * unregister the vf_fwd_request buffer.
4766 			 */
4767 			if (i == 0)
4768 				bnxt_hwrm_func_buf_unrgtr(bp);
4769 			break;
4770 		}
4771 		HWRM_UNLOCK();
4772 
4773 		/* Update the max resource values based on the resource values
4774 		 * allocated to the VF.
4775 		 */
4776 		bnxt_update_max_resources(bp, i);
4777 		bp->pf->active_vfs++;
4778 		bnxt_hwrm_func_clr_stats(bp, bp->pf->vf_info[i].fid);
4779 	}
4780 
4781 	return 0;
4782 }
4783 
4784 static int
4785 bnxt_process_vf_resc_config_old(struct bnxt *bp, int num_vfs)
4786 {
4787 	struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4788 	struct hwrm_func_cfg_input req = {0};
4789 	int i, rc;
4790 
4791 	bnxt_fill_vf_func_cfg_req_old(bp, &req, num_vfs);
4792 
4793 	bp->pf->active_vfs = 0;
4794 	for (i = 0; i < num_vfs; i++) {
4795 		HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
4796 		req.flags = rte_cpu_to_le_32(bp->pf->vf_info[i].func_cfg_flags);
4797 		req.fid = rte_cpu_to_le_16(bp->pf->vf_info[i].fid);
4798 		rc = bnxt_hwrm_send_message(bp,
4799 					    &req,
4800 					    sizeof(req),
4801 					    BNXT_USE_CHIMP_MB);
4802 
4803 		/* Clear enable flag for next pass */
4804 		req.enables &= ~rte_cpu_to_le_32(
4805 				HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
4806 
4807 		if (rc || resp->error_code) {
4808 			PMD_DRV_LOG_LINE(ERR,
4809 				"Failed to initialize VF %d", i);
4810 			PMD_DRV_LOG_LINE(ERR,
4811 				"Not all VFs available. (%d, %d)",
4812 				rc, resp->error_code);
4813 			HWRM_UNLOCK();
4814 
4815 			/* If the first VF configuration itself fails,
4816 			 * unregister the vf_fwd_request buffer.
4817 			 */
4818 			if (i == 0)
4819 				bnxt_hwrm_func_buf_unrgtr(bp);
4820 			break;
4821 		}
4822 
4823 		HWRM_UNLOCK();
4824 
4825 		/* Update the max resource values based on the resource values
4826 		 * allocated to the VF.
4827 		 */
4828 		bnxt_update_max_resources(bp, i);
4829 		bp->pf->active_vfs++;
4830 		bnxt_hwrm_func_clr_stats(bp, bp->pf->vf_info[i].fid);
4831 	}
4832 
4833 	return 0;
4834 }
4835 
4836 static void
4837 bnxt_configure_vf_resources(struct bnxt *bp, int num_vfs)
4838 {
4839 	if (bp->flags & BNXT_FLAG_NEW_RM)
4840 		bnxt_process_vf_resc_config_new(bp, num_vfs);
4841 	else
4842 		bnxt_process_vf_resc_config_old(bp, num_vfs);
4843 }
4844 
4845 static void
4846 bnxt_update_pf_resources(struct bnxt *bp,
4847 			 struct bnxt_pf_resource_info *pf_resc)
4848 {
4849 	bp->max_rsscos_ctx = pf_resc->num_rsscos_ctxs;
4850 	bp->max_stat_ctx = pf_resc->num_stat_ctxs;
4851 	bp->max_cp_rings = pf_resc->num_cp_rings;
4852 	bp->max_tx_rings = pf_resc->num_tx_rings;
4853 	bp->max_rx_rings = pf_resc->num_rx_rings;
4854 	bp->max_ring_grps = pf_resc->num_hw_ring_grps;
4855 	bp->max_nq_rings = pf_resc->num_nq_rings;
4856 	bp->max_vnics = pf_resc->num_vnics;
4857 }
4858 
4859 static int32_t
4860 bnxt_configure_pf_resources(struct bnxt *bp,
4861 			    struct bnxt_pf_resource_info *pf_resc)
4862 {
4863 	/*
4864 	 * We're using STD_TX_RING_MODE here which will limit the TX
4865 	 * rings. This will allow QoS to function properly. Not setting this
4866 	 * will cause PF rings to break bandwidth settings.
4867 	 */
4868 	bp->pf->func_cfg_flags &=
4869 		~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
4870 		  HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
4871 	bp->pf->func_cfg_flags |=
4872 		HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
4873 	return bnxt_hwrm_pf_func_cfg(bp, pf_resc);
4874 }
4875 
4876 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
4877 {
4878 	struct bnxt_pf_resource_info pf_resc = { 0 };
4879 	int rc;
4880 
4881 	if (!BNXT_PF(bp)) {
4882 		PMD_DRV_LOG_LINE(ERR, "Attempt to allocate VFs on a VF!");
4883 		return -EINVAL;
4884 	}
4885 
4886 	rc = bnxt_hwrm_func_qcaps(bp);
4887 	if (rc)
4888 		return rc;
4889 
4890 	bnxt_calculate_pf_resources(bp, &pf_resc, num_vfs);
4891 
4892 	rc = bnxt_configure_pf_resources(bp, &pf_resc);
4893 	if (rc)
4894 		return rc;
4895 
4896 	rc = bnxt_query_pf_resources(bp, &pf_resc);
4897 	if (rc)
4898 		return rc;
4899 
4900 	/*
4901 	 * Now, create and register a buffer to hold forwarded VF requests
4902 	 */
4903 	rc = bnxt_configure_vf_req_buf(bp, num_vfs);
4904 	if (rc)
4905 		return rc;
4906 
4907 	bnxt_configure_vf_resources(bp, num_vfs);
4908 
4909 	bnxt_update_pf_resources(bp, &pf_resc);
4910 
4911 	return 0;
4912 }
4913 
4914 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
4915 {
4916 	struct hwrm_func_cfg_input req = {0};
4917 	struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4918 	int rc;
4919 
4920 	HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
4921 
4922 	req.fid = rte_cpu_to_le_16(0xffff);
4923 	req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
4924 	req.evb_mode = bp->pf->evb_mode;
4925 
4926 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4927 	HWRM_CHECK_RESULT();
4928 	HWRM_UNLOCK();
4929 
4930 	return rc;
4931 }
4932 
4933 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
4934 				uint8_t tunnel_type)
4935 {
4936 	struct hwrm_tunnel_dst_port_alloc_input req = {0};
4937 	struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4938 	int rc = 0;
4939 
4940 	HWRM_PREP(&req, HWRM_TUNNEL_DST_PORT_ALLOC, BNXT_USE_CHIMP_MB);
4941 	req.tunnel_type = tunnel_type;
4942 	req.tunnel_dst_port_val = rte_cpu_to_be_16(port);
4943 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4944 	HWRM_CHECK_RESULT();
4945 
4946 	switch (tunnel_type) {
4947 	case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
4948 		bp->vxlan_fw_dst_port_id =
4949 			rte_le_to_cpu_16(resp->tunnel_dst_port_id);
4950 		bp->vxlan_port = port;
4951 		break;
4952 	case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
4953 		bp->geneve_fw_dst_port_id =
4954 			rte_le_to_cpu_16(resp->tunnel_dst_port_id);
4955 		bp->geneve_port = port;
4956 		break;
4957 	case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_ECPRI:
4958 		bp->ecpri_fw_dst_port_id =
4959 			rte_le_to_cpu_16(resp->tunnel_dst_port_id);
4960 		bp->ecpri_port = port;
4961 		bp->ecpri_upar_in_use = resp->upar_in_use;
4962 		break;
4963 	case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_L2_ETYPE:
4964 		bp->l2_etype_tunnel_id = port;
4965 		bp->l2_etype_upar_in_use = resp->upar_in_use;
4966 		break;
4967 	case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4:
4968 		bp->vxlan_ip_upar_in_use = resp->upar_in_use;
4969 		bp->vxlan_ip_port = port;
4970 		PMD_DRV_LOG_LINE(DEBUG, "vxlan_ip_upar_in_use %x port %x",
4971 				 bp->vxlan_ip_upar_in_use, bp->vxlan_ip_port);
4972 		break;
4973 	default:
4974 		break;
4975 	}
4976 
4977 	HWRM_UNLOCK();
4978 
4979 	bnxt_hwrm_set_tpa(bp);
4980 
4981 	return rc;
4982 }
4983 
4984 int bnxt_hwrm_tunnel_upar_id_get(struct bnxt *bp, uint8_t *upar_id,
4985 				 uint8_t tunnel_type)
4986 {
4987 	struct hwrm_tunnel_dst_port_query_input req = {0};
4988 	struct hwrm_tunnel_dst_port_query_output *resp = bp->hwrm_cmd_resp_addr;
4989 	int rc = 0;
4990 
4991 	HWRM_PREP(&req, HWRM_TUNNEL_DST_PORT_QUERY, BNXT_USE_CHIMP_MB);
4992 	req.tunnel_type = tunnel_type;
4993 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4994 	HWRM_CHECK_RESULT();
4995 
4996 	switch (tunnel_type) {
4997 	case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_ECPRI:
4998 		*upar_id = resp->upar_in_use;
4999 		break;
5000 	case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_SRV6:
5001 		*upar_id = resp->upar_in_use;
5002 		break;
5003 	case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_L2_ETYPE:
5004 		*upar_id = resp->upar_in_use;
5005 		break;
5006 	default:
5007 		/* INVALID UPAR Id if another tunnel type tries to retrieve */
5008 		*upar_id = 0xff;
5009 		break;
5010 	}
5011 
5012 	HWRM_UNLOCK();
5013 
5014 	return rc;
5015 }
5016 
5017 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
5018 				uint8_t tunnel_type)
5019 {
5020 	struct hwrm_tunnel_dst_port_free_input req = {0};
5021 	struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
5022 	int rc = 0;
5023 
5024 	HWRM_PREP(&req, HWRM_TUNNEL_DST_PORT_FREE, BNXT_USE_CHIMP_MB);
5025 
5026 	req.tunnel_type = tunnel_type;
5027 	req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
5028 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5029 
5030 	HWRM_CHECK_RESULT();
5031 	HWRM_UNLOCK();
5032 
5033 	if (tunnel_type ==
5034 	    HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN) {
5035 		bp->vxlan_port = 0;
5036 		bp->vxlan_port_cnt = 0;
5037 	}
5038 
5039 	if (tunnel_type ==
5040 	    HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE) {
5041 		bp->geneve_port = 0;
5042 		bp->geneve_port_cnt = 0;
5043 	}
5044 
5045 	if (tunnel_type ==
5046 	    HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_ECPRI) {
5047 		bp->ecpri_port = 0;
5048 		bp->ecpri_upar_in_use = 0;
5049 		bp->ecpri_port_cnt = 0;
5050 	}
5051 
5052 	if (tunnel_type ==
5053 	    HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_L2_ETYPE) {
5054 		bp->l2_etype_tunnel_cnt = 0;
5055 		bp->l2_etype_tunnel_id = 0;
5056 		bp->l2_etype_upar_in_use = 0;
5057 	}
5058 
5059 	bnxt_hwrm_set_tpa(bp);
5060 	return rc;
5061 }
5062 
5063 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
5064 					uint32_t flags)
5065 {
5066 	struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
5067 	struct hwrm_func_cfg_input req = {0};
5068 	int rc;
5069 
5070 	HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
5071 
5072 	req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
5073 	req.flags = rte_cpu_to_le_32(flags);
5074 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5075 
5076 	HWRM_CHECK_RESULT();
5077 	HWRM_UNLOCK();
5078 
5079 	return rc;
5080 }
5081 
5082 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
5083 {
5084 	uint32_t *flag = flagp;
5085 
5086 	vnic->flags = *flag;
5087 }
5088 
5089 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5090 {
5091 	return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
5092 }
5093 
5094 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp, int num_vfs)
5095 {
5096 	struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
5097 	struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
5098 	int rc;
5099 
5100 	HWRM_PREP(&req, HWRM_FUNC_BUF_RGTR, BNXT_USE_CHIMP_MB);
5101 
5102 	req.req_buf_num_pages = rte_cpu_to_le_16(1);
5103 	req.req_buf_page_size =
5104 		rte_cpu_to_le_16(page_getenum(num_vfs * HWRM_MAX_REQ_LEN));
5105 	req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
5106 	req.req_buf_page_addr0 =
5107 		rte_cpu_to_le_64(rte_malloc_virt2iova(bp->pf->vf_req_buf));
5108 	if (req.req_buf_page_addr0 == RTE_BAD_IOVA) {
5109 		PMD_DRV_LOG_LINE(ERR,
5110 			"unable to map buffer address to physical memory");
5111 		HWRM_UNLOCK();
5112 		return -ENOMEM;
5113 	}
5114 
5115 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5116 
5117 	HWRM_CHECK_RESULT();
5118 	HWRM_UNLOCK();
5119 
5120 	return rc;
5121 }
5122 
5123 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
5124 {
5125 	int rc = 0;
5126 	struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
5127 	struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
5128 
5129 	if (!(BNXT_PF(bp) && bp->pdev->max_vfs))
5130 		return 0;
5131 
5132 	HWRM_PREP(&req, HWRM_FUNC_BUF_UNRGTR, BNXT_USE_CHIMP_MB);
5133 
5134 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5135 
5136 	HWRM_CHECK_RESULT();
5137 	HWRM_UNLOCK();
5138 
5139 	return rc;
5140 }
5141 
5142 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
5143 {
5144 	struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
5145 	struct hwrm_func_cfg_input req = {0};
5146 	int rc;
5147 
5148 	HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
5149 
5150 	req.fid = rte_cpu_to_le_16(0xffff);
5151 	req.flags = rte_cpu_to_le_32(bp->pf->func_cfg_flags);
5152 	req.enables = rte_cpu_to_le_32(
5153 			HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
5154 	req.async_event_cr = rte_cpu_to_le_16(
5155 			bp->async_cp_ring->cp_ring_struct->fw_ring_id);
5156 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5157 
5158 	HWRM_CHECK_RESULT();
5159 	HWRM_UNLOCK();
5160 
5161 	return rc;
5162 }
5163 
5164 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
5165 {
5166 	struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
5167 	struct hwrm_func_vf_cfg_input req = {0};
5168 	int rc;
5169 
5170 	HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
5171 
5172 	req.enables = rte_cpu_to_le_32(
5173 			HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
5174 	req.async_event_cr = rte_cpu_to_le_16(
5175 			bp->async_cp_ring->cp_ring_struct->fw_ring_id);
5176 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5177 
5178 	HWRM_CHECK_RESULT();
5179 	HWRM_UNLOCK();
5180 
5181 	return rc;
5182 }
5183 
5184 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
5185 {
5186 	struct hwrm_func_cfg_input req = {0};
5187 	struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
5188 	uint16_t dflt_vlan, fid;
5189 	uint32_t func_cfg_flags;
5190 	int rc = 0;
5191 
5192 	HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
5193 
5194 	if (is_vf) {
5195 		dflt_vlan = bp->pf->vf_info[vf].dflt_vlan;
5196 		fid = bp->pf->vf_info[vf].fid;
5197 		func_cfg_flags = bp->pf->vf_info[vf].func_cfg_flags;
5198 	} else {
5199 		fid = rte_cpu_to_le_16(0xffff);
5200 		func_cfg_flags = bp->pf->func_cfg_flags;
5201 		dflt_vlan = bp->vlan;
5202 	}
5203 
5204 	req.flags = rte_cpu_to_le_32(func_cfg_flags);
5205 	req.fid = rte_cpu_to_le_16(fid);
5206 	req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
5207 	req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
5208 
5209 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5210 
5211 	HWRM_CHECK_RESULT();
5212 	HWRM_UNLOCK();
5213 
5214 	return rc;
5215 }
5216 
5217 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
5218 			uint16_t max_bw, uint16_t enables)
5219 {
5220 	struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
5221 	struct hwrm_func_cfg_input req = {0};
5222 	int rc;
5223 
5224 	HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
5225 
5226 	req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
5227 	req.enables |= rte_cpu_to_le_32(enables);
5228 	req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags);
5229 	req.max_bw = rte_cpu_to_le_32(max_bw);
5230 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5231 
5232 	HWRM_CHECK_RESULT();
5233 	HWRM_UNLOCK();
5234 
5235 	return rc;
5236 }
5237 
5238 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
5239 {
5240 	struct hwrm_func_cfg_input req = {0};
5241 	struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
5242 	int rc = 0;
5243 
5244 	HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
5245 
5246 	req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags);
5247 	req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
5248 	req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
5249 	req.dflt_vlan = rte_cpu_to_le_16(bp->pf->vf_info[vf].dflt_vlan);
5250 
5251 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5252 
5253 	HWRM_CHECK_RESULT();
5254 	HWRM_UNLOCK();
5255 
5256 	return rc;
5257 }
5258 
5259 int bnxt_hwrm_set_async_event_cr(struct bnxt *bp)
5260 {
5261 	int rc;
5262 
5263 	if (BNXT_PF(bp))
5264 		rc = bnxt_hwrm_func_cfg_def_cp(bp);
5265 	else
5266 		rc = bnxt_hwrm_vf_func_cfg_def_cp(bp);
5267 
5268 	return rc;
5269 }
5270 
5271 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
5272 			      void *encaped, size_t ec_size)
5273 {
5274 	int rc = 0;
5275 	struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
5276 	struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
5277 
5278 	if (ec_size > sizeof(req.encap_request))
5279 		return -1;
5280 
5281 	HWRM_PREP(&req, HWRM_REJECT_FWD_RESP, BNXT_USE_CHIMP_MB);
5282 
5283 	req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
5284 	memcpy(req.encap_request, encaped, ec_size);
5285 
5286 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5287 
5288 	HWRM_CHECK_RESULT();
5289 	HWRM_UNLOCK();
5290 
5291 	return rc;
5292 }
5293 
5294 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
5295 				       struct rte_ether_addr *mac)
5296 {
5297 	struct hwrm_func_qcfg_input req = {0};
5298 	struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5299 	int rc;
5300 
5301 	HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
5302 
5303 	req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
5304 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5305 
5306 	HWRM_CHECK_RESULT();
5307 
5308 	memcpy(mac->addr_bytes, resp->mac_address, RTE_ETHER_ADDR_LEN);
5309 
5310 	HWRM_UNLOCK();
5311 
5312 	return rc;
5313 }
5314 
5315 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
5316 			    void *encaped, size_t ec_size)
5317 {
5318 	int rc = 0;
5319 	struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
5320 	struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
5321 
5322 	if (ec_size > sizeof(req.encap_request))
5323 		return -1;
5324 
5325 	HWRM_PREP(&req, HWRM_EXEC_FWD_RESP, BNXT_USE_CHIMP_MB);
5326 
5327 	req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
5328 	memcpy(req.encap_request, encaped, ec_size);
5329 
5330 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5331 
5332 	HWRM_CHECK_RESULT();
5333 	HWRM_UNLOCK();
5334 
5335 	return rc;
5336 }
5337 
5338 int bnxt_hwrm_fwd_resp(struct bnxt *bp, uint16_t target_id,
5339 		       void *encaped, size_t ec_size,
5340 		       uint64_t encap_resp_addr, uint16_t cmpl_ring)
5341 {
5342 	int rc = 0;
5343 	struct hwrm_fwd_resp_input req = {.req_type = 0};
5344 	struct hwrm_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
5345 
5346 	if (ec_size > sizeof(req.encap_resp))
5347 		return -1;
5348 
5349 	HWRM_PREP(&req, HWRM_FWD_RESP, BNXT_USE_CHIMP_MB);
5350 
5351 	req.target_id = rte_cpu_to_le_16(target_id);
5352 	req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
5353 	req.encap_resp_len = rte_cpu_to_le_16(ec_size);
5354 	req.encap_resp_addr = encap_resp_addr;
5355 	req.encap_resp_cmpl_ring = cmpl_ring;
5356 	memcpy(req.encap_resp, encaped, ec_size);
5357 
5358 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5359 
5360 	HWRM_CHECK_RESULT();
5361 	HWRM_UNLOCK();
5362 
5363 	return rc;
5364 }
5365 
5366 static void bnxt_update_prev_stat(uint64_t *cntr, uint64_t *prev_cntr)
5367 {
5368 	/* One of the HW stat values that make up this counter was zero as
5369 	 * returned by HW in this iteration, so use the previous
5370 	 * iteration's counter value
5371 	 */
5372 	if (!cntr || !prev_cntr)
5373 		return;
5374 	if (*prev_cntr && *cntr == 0)
5375 		*cntr = *prev_cntr;
5376 	else
5377 		*prev_cntr = *cntr;
5378 }
5379 
5380 int bnxt_hwrm_ring_stats(struct bnxt *bp, uint32_t cid, int idx,
5381 			 struct bnxt_ring_stats *ring_stats, bool rx)
5382 {
5383 	int rc = 0;
5384 	struct hwrm_stat_ctx_query_input req = {.req_type = 0};
5385 	struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
5386 
5387 	HWRM_PREP(&req, HWRM_STAT_CTX_QUERY, BNXT_USE_CHIMP_MB);
5388 
5389 	req.stat_ctx_id = rte_cpu_to_le_32(cid);
5390 
5391 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5392 
5393 	HWRM_CHECK_RESULT();
5394 
5395 	if (rx) {
5396 		struct bnxt_ring_stats *prev_stats = &bp->prev_rx_ring_stats[idx];
5397 
5398 		ring_stats->rx_ucast_pkts = rte_le_to_cpu_64(resp->rx_ucast_pkts);
5399 		bnxt_update_prev_stat(&ring_stats->rx_ucast_pkts,
5400 				      &prev_stats->rx_ucast_pkts);
5401 
5402 		ring_stats->rx_mcast_pkts = rte_le_to_cpu_64(resp->rx_mcast_pkts);
5403 		bnxt_update_prev_stat(&ring_stats->rx_mcast_pkts,
5404 				      &prev_stats->rx_mcast_pkts);
5405 
5406 		ring_stats->rx_bcast_pkts = rte_le_to_cpu_64(resp->rx_bcast_pkts);
5407 		bnxt_update_prev_stat(&ring_stats->rx_bcast_pkts,
5408 				      &prev_stats->rx_bcast_pkts);
5409 
5410 		ring_stats->rx_ucast_bytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
5411 		bnxt_update_prev_stat(&ring_stats->rx_ucast_bytes,
5412 				      &prev_stats->rx_ucast_bytes);
5413 
5414 		ring_stats->rx_mcast_bytes = rte_le_to_cpu_64(resp->rx_mcast_bytes);
5415 		bnxt_update_prev_stat(&ring_stats->rx_mcast_bytes,
5416 				      &prev_stats->rx_mcast_bytes);
5417 
5418 		ring_stats->rx_bcast_bytes = rte_le_to_cpu_64(resp->rx_bcast_bytes);
5419 		bnxt_update_prev_stat(&ring_stats->rx_bcast_bytes,
5420 				      &prev_stats->rx_bcast_bytes);
5421 
5422 		ring_stats->rx_discard_pkts = rte_le_to_cpu_64(resp->rx_discard_pkts);
5423 		bnxt_update_prev_stat(&ring_stats->rx_discard_pkts,
5424 				      &prev_stats->rx_discard_pkts);
5425 
5426 		ring_stats->rx_error_pkts = rte_le_to_cpu_64(resp->rx_error_pkts);
5427 		bnxt_update_prev_stat(&ring_stats->rx_error_pkts,
5428 				      &prev_stats->rx_error_pkts);
5429 
5430 		ring_stats->rx_agg_pkts = rte_le_to_cpu_64(resp->rx_agg_pkts);
5431 		bnxt_update_prev_stat(&ring_stats->rx_agg_pkts,
5432 				      &prev_stats->rx_agg_pkts);
5433 
5434 		ring_stats->rx_agg_bytes = rte_le_to_cpu_64(resp->rx_agg_bytes);
5435 		bnxt_update_prev_stat(&ring_stats->rx_agg_bytes,
5436 				      &prev_stats->rx_agg_bytes);
5437 
5438 		ring_stats->rx_agg_events = rte_le_to_cpu_64(resp->rx_agg_events);
5439 		bnxt_update_prev_stat(&ring_stats->rx_agg_events,
5440 				      &prev_stats->rx_agg_events);
5441 
5442 		ring_stats->rx_agg_aborts = rte_le_to_cpu_64(resp->rx_agg_aborts);
5443 		bnxt_update_prev_stat(&ring_stats->rx_agg_aborts,
5444 				      &prev_stats->rx_agg_aborts);
5445 	} else {
5446 		struct bnxt_ring_stats *prev_stats = &bp->prev_tx_ring_stats[idx];
5447 
5448 		ring_stats->tx_ucast_pkts = rte_le_to_cpu_64(resp->tx_ucast_pkts);
5449 		bnxt_update_prev_stat(&ring_stats->tx_ucast_pkts,
5450 				      &prev_stats->tx_ucast_pkts);
5451 
5452 		ring_stats->tx_mcast_pkts = rte_le_to_cpu_64(resp->tx_mcast_pkts);
5453 		bnxt_update_prev_stat(&ring_stats->tx_mcast_pkts,
5454 				      &prev_stats->tx_mcast_pkts);
5455 
5456 		ring_stats->tx_bcast_pkts = rte_le_to_cpu_64(resp->tx_bcast_pkts);
5457 		bnxt_update_prev_stat(&ring_stats->tx_bcast_pkts,
5458 				      &prev_stats->tx_bcast_pkts);
5459 
5460 		ring_stats->tx_ucast_bytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
5461 		bnxt_update_prev_stat(&ring_stats->tx_ucast_bytes,
5462 				      &prev_stats->tx_ucast_bytes);
5463 
5464 		ring_stats->tx_mcast_bytes = rte_le_to_cpu_64(resp->tx_mcast_bytes);
5465 		bnxt_update_prev_stat(&ring_stats->tx_mcast_bytes,
5466 				      &prev_stats->tx_mcast_bytes);
5467 
5468 		ring_stats->tx_bcast_bytes = rte_le_to_cpu_64(resp->tx_bcast_bytes);
5469 		bnxt_update_prev_stat(&ring_stats->tx_bcast_bytes,
5470 				      &prev_stats->tx_bcast_bytes);
5471 
5472 		ring_stats->tx_discard_pkts = rte_le_to_cpu_64(resp->tx_discard_pkts);
5473 		bnxt_update_prev_stat(&ring_stats->tx_discard_pkts,
5474 				      &prev_stats->tx_discard_pkts);
5475 	}
5476 
5477 	HWRM_UNLOCK();
5478 
5479 	return rc;
5480 }
5481 
5482 int bnxt_hwrm_ring_stats_ext(struct bnxt *bp, uint32_t cid, int idx,
5483 			     struct bnxt_ring_stats_ext *ring_stats, bool rx)
5484 {
5485 	int rc = 0;
5486 	struct hwrm_stat_ext_ctx_query_input req = {.req_type = 0};
5487 	struct hwrm_stat_ext_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
5488 
5489 	HWRM_PREP(&req, HWRM_STAT_EXT_CTX_QUERY, BNXT_USE_CHIMP_MB);
5490 
5491 	req.stat_ctx_id = rte_cpu_to_le_32(cid);
5492 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5493 
5494 	HWRM_CHECK_RESULT();
5495 
5496 	if (rx) {
5497 		struct bnxt_ring_stats_ext *prev_stats = &bp->prev_rx_ring_stats_ext[idx];
5498 
5499 		ring_stats->rx_ucast_pkts = rte_le_to_cpu_64(resp->rx_ucast_pkts);
5500 		bnxt_update_prev_stat(&ring_stats->rx_ucast_pkts,
5501 				      &prev_stats->rx_ucast_pkts);
5502 
5503 		ring_stats->rx_mcast_pkts = rte_le_to_cpu_64(resp->rx_mcast_pkts);
5504 		bnxt_update_prev_stat(&ring_stats->rx_mcast_pkts,
5505 				      &prev_stats->rx_mcast_pkts);
5506 
5507 		ring_stats->rx_bcast_pkts = rte_le_to_cpu_64(resp->rx_bcast_pkts);
5508 		bnxt_update_prev_stat(&ring_stats->rx_bcast_pkts,
5509 				      &prev_stats->rx_bcast_pkts);
5510 
5511 		ring_stats->rx_ucast_bytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
5512 		bnxt_update_prev_stat(&ring_stats->rx_ucast_bytes,
5513 				      &prev_stats->rx_ucast_bytes);
5514 
5515 		ring_stats->rx_mcast_bytes = rte_le_to_cpu_64(resp->rx_mcast_bytes);
5516 		bnxt_update_prev_stat(&ring_stats->rx_mcast_bytes,
5517 				      &prev_stats->rx_mcast_bytes);
5518 
5519 		ring_stats->rx_bcast_bytes = rte_le_to_cpu_64(resp->rx_bcast_bytes);
5520 		bnxt_update_prev_stat(&ring_stats->rx_bcast_bytes,
5521 				      &prev_stats->rx_bcast_bytes);
5522 
5523 		ring_stats->rx_discard_pkts = rte_le_to_cpu_64(resp->rx_discard_pkts);
5524 		bnxt_update_prev_stat(&ring_stats->rx_discard_pkts,
5525 				      &prev_stats->rx_discard_pkts);
5526 
5527 		ring_stats->rx_error_pkts = rte_le_to_cpu_64(resp->rx_error_pkts);
5528 		bnxt_update_prev_stat(&ring_stats->rx_error_pkts,
5529 				      &prev_stats->rx_error_pkts);
5530 
5531 		ring_stats->rx_tpa_eligible_pkt = rte_le_to_cpu_64(resp->rx_tpa_eligible_pkt);
5532 		bnxt_update_prev_stat(&ring_stats->rx_tpa_eligible_pkt,
5533 				      &prev_stats->rx_tpa_eligible_pkt);
5534 
5535 		ring_stats->rx_tpa_eligible_bytes = rte_le_to_cpu_64(resp->rx_tpa_eligible_bytes);
5536 		bnxt_update_prev_stat(&ring_stats->rx_tpa_eligible_bytes,
5537 				      &prev_stats->rx_tpa_eligible_bytes);
5538 
5539 		ring_stats->rx_tpa_pkt = rte_le_to_cpu_64(resp->rx_tpa_pkt);
5540 		bnxt_update_prev_stat(&ring_stats->rx_tpa_pkt,
5541 				      &prev_stats->rx_tpa_pkt);
5542 
5543 		ring_stats->rx_tpa_bytes = rte_le_to_cpu_64(resp->rx_tpa_bytes);
5544 		bnxt_update_prev_stat(&ring_stats->rx_tpa_bytes,
5545 				      &prev_stats->rx_tpa_bytes);
5546 
5547 		ring_stats->rx_tpa_errors = rte_le_to_cpu_64(resp->rx_tpa_errors);
5548 		bnxt_update_prev_stat(&ring_stats->rx_tpa_errors,
5549 				      &prev_stats->rx_tpa_errors);
5550 
5551 		ring_stats->rx_tpa_events = rte_le_to_cpu_64(resp->rx_tpa_events);
5552 		bnxt_update_prev_stat(&ring_stats->rx_tpa_events,
5553 				      &prev_stats->rx_tpa_events);
5554 	} else {
5555 		struct bnxt_ring_stats_ext *prev_stats = &bp->prev_tx_ring_stats_ext[idx];
5556 
5557 		ring_stats->tx_ucast_pkts = rte_le_to_cpu_64(resp->tx_ucast_pkts);
5558 		bnxt_update_prev_stat(&ring_stats->tx_ucast_pkts,
5559 				      &prev_stats->tx_ucast_pkts);
5560 
5561 		ring_stats->tx_mcast_pkts = rte_le_to_cpu_64(resp->tx_mcast_pkts);
5562 		bnxt_update_prev_stat(&ring_stats->tx_mcast_pkts,
5563 				      &prev_stats->tx_mcast_pkts);
5564 
5565 		ring_stats->tx_bcast_pkts = rte_le_to_cpu_64(resp->tx_bcast_pkts);
5566 		bnxt_update_prev_stat(&ring_stats->tx_bcast_pkts,
5567 				      &prev_stats->tx_bcast_pkts);
5568 
5569 		ring_stats->tx_ucast_bytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
5570 		bnxt_update_prev_stat(&ring_stats->tx_ucast_bytes,
5571 				      &prev_stats->tx_ucast_bytes);
5572 
5573 		ring_stats->tx_mcast_bytes = rte_le_to_cpu_64(resp->tx_mcast_bytes);
5574 		bnxt_update_prev_stat(&ring_stats->tx_mcast_bytes,
5575 				      &prev_stats->tx_mcast_bytes);
5576 
5577 		ring_stats->tx_bcast_bytes = rte_le_to_cpu_64(resp->tx_bcast_bytes);
5578 		bnxt_update_prev_stat(&ring_stats->tx_bcast_bytes,
5579 				      &prev_stats->tx_bcast_bytes);
5580 
5581 		ring_stats->tx_discard_pkts = rte_le_to_cpu_64(resp->tx_discard_pkts);
5582 		bnxt_update_prev_stat(&ring_stats->tx_discard_pkts,
5583 				      &prev_stats->tx_discard_pkts);
5584 
5585 		ring_stats->tx_error_pkts = rte_le_to_cpu_64(resp->tx_error_pkts);
5586 		bnxt_update_prev_stat(&ring_stats->tx_error_pkts,
5587 				      &prev_stats->tx_error_pkts);
5588 	}
5589 
5590 	HWRM_UNLOCK();
5591 
5592 	return rc;
5593 }
5594 
5595 int bnxt_hwrm_port_qstats(struct bnxt *bp)
5596 {
5597 	struct hwrm_port_qstats_input req = {0};
5598 	struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
5599 	struct bnxt_pf_info *pf = bp->pf;
5600 	int rc;
5601 
5602 	HWRM_PREP(&req, HWRM_PORT_QSTATS, BNXT_USE_CHIMP_MB);
5603 
5604 	req.port_id = rte_cpu_to_le_16(pf->port_id);
5605 	req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
5606 	req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
5607 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5608 
5609 	HWRM_CHECK_RESULT();
5610 	HWRM_UNLOCK();
5611 
5612 	return rc;
5613 }
5614 
5615 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
5616 {
5617 	struct hwrm_port_clr_stats_input req = {0};
5618 	struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
5619 	struct bnxt_pf_info *pf = bp->pf;
5620 	int rc;
5621 
5622 	/* Not allowed on NS2 device, NPAR, MultiHost, VF */
5623 	if (!(bp->flags & BNXT_FLAG_PORT_STATS) || BNXT_VF(bp) ||
5624 	    BNXT_NPAR(bp) || BNXT_MH(bp) || BNXT_TOTAL_VFS(bp))
5625 		return 0;
5626 
5627 	HWRM_PREP(&req, HWRM_PORT_CLR_STATS, BNXT_USE_CHIMP_MB);
5628 
5629 	req.port_id = rte_cpu_to_le_16(pf->port_id);
5630 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5631 
5632 	HWRM_CHECK_RESULT();
5633 	HWRM_UNLOCK();
5634 
5635 	return rc;
5636 }
5637 
5638 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
5639 {
5640 	struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5641 	struct hwrm_port_led_qcaps_input req = {0};
5642 	int rc;
5643 
5644 	if (BNXT_VF(bp))
5645 		return 0;
5646 
5647 	HWRM_PREP(&req, HWRM_PORT_LED_QCAPS, BNXT_USE_CHIMP_MB);
5648 	req.port_id = bp->pf->port_id;
5649 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5650 
5651 	HWRM_CHECK_RESULT_SILENT();
5652 
5653 	if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
5654 		unsigned int i;
5655 
5656 		bp->leds->num_leds = resp->num_leds;
5657 		memcpy(bp->leds, &resp->led0_id,
5658 			sizeof(bp->leds[0]) * bp->leds->num_leds);
5659 		for (i = 0; i < bp->leds->num_leds; i++) {
5660 			struct bnxt_led_info *led = &bp->leds[i];
5661 
5662 			uint16_t caps = led->led_state_caps;
5663 
5664 			if (!led->led_group_id ||
5665 				!BNXT_LED_ALT_BLINK_CAP(caps)) {
5666 				bp->leds->num_leds = 0;
5667 				break;
5668 			}
5669 		}
5670 	}
5671 
5672 	HWRM_UNLOCK();
5673 
5674 	return rc;
5675 }
5676 
5677 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
5678 {
5679 	struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
5680 	struct hwrm_port_led_cfg_input req = {0};
5681 	struct bnxt_led_cfg *led_cfg;
5682 	uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
5683 	uint16_t duration = 0;
5684 	int rc, i;
5685 
5686 	if (BNXT_VF(bp) || !bp->leds || !bp->leds->num_leds)
5687 		return -EOPNOTSUPP;
5688 
5689 	HWRM_PREP(&req, HWRM_PORT_LED_CFG, BNXT_USE_CHIMP_MB);
5690 
5691 	if (led_on) {
5692 		led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
5693 		duration = rte_cpu_to_le_16(500);
5694 	}
5695 	req.port_id = bp->pf->port_id;
5696 	req.num_leds = bp->leds->num_leds;
5697 	led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
5698 	for (i = 0; i < bp->leds->num_leds; i++, led_cfg++) {
5699 		req.enables |= BNXT_LED_DFLT_ENABLES(i);
5700 		led_cfg->led_id = bp->leds[i].led_id;
5701 		led_cfg->led_state = led_state;
5702 		led_cfg->led_blink_on = duration;
5703 		led_cfg->led_blink_off = duration;
5704 		led_cfg->led_group_id = bp->leds[i].led_group_id;
5705 	}
5706 
5707 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5708 
5709 	HWRM_CHECK_RESULT();
5710 	HWRM_UNLOCK();
5711 
5712 	return rc;
5713 }
5714 
5715 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
5716 			       uint32_t *length)
5717 {
5718 	int rc;
5719 	struct hwrm_nvm_get_dir_info_input req = {0};
5720 	struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
5721 
5722 	HWRM_PREP(&req, HWRM_NVM_GET_DIR_INFO, BNXT_USE_CHIMP_MB);
5723 
5724 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5725 
5726 	HWRM_CHECK_RESULT();
5727 
5728 	*entries = rte_le_to_cpu_32(resp->entries);
5729 	*length = rte_le_to_cpu_32(resp->entry_length);
5730 
5731 	HWRM_UNLOCK();
5732 	return rc;
5733 }
5734 
5735 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
5736 {
5737 	int rc;
5738 	uint32_t dir_entries;
5739 	uint32_t entry_length;
5740 	uint8_t *buf;
5741 	size_t buflen;
5742 	rte_iova_t dma_handle;
5743 	struct hwrm_nvm_get_dir_entries_input req = {0};
5744 	struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
5745 
5746 	rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
5747 	if (rc != 0)
5748 		return rc;
5749 
5750 	*data++ = dir_entries;
5751 	*data++ = entry_length;
5752 	len -= 2;
5753 	memset(data, 0xff, len);
5754 
5755 	buflen = dir_entries * entry_length;
5756 	buf = rte_malloc("nvm_dir", buflen, 0);
5757 	if (buf == NULL)
5758 		return -ENOMEM;
5759 	dma_handle = rte_malloc_virt2iova(buf);
5760 	if (dma_handle == RTE_BAD_IOVA) {
5761 		rte_free(buf);
5762 		PMD_DRV_LOG_LINE(ERR,
5763 			"unable to map response address to physical memory");
5764 		return -ENOMEM;
5765 	}
5766 	HWRM_PREP(&req, HWRM_NVM_GET_DIR_ENTRIES, BNXT_USE_CHIMP_MB);
5767 	req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
5768 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5769 
5770 	if (rc == 0)
5771 		memcpy(data, buf, len > buflen ? buflen : len);
5772 
5773 	rte_free(buf);
5774 	HWRM_CHECK_RESULT();
5775 	HWRM_UNLOCK();
5776 
5777 	return rc;
5778 }
5779 
5780 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
5781 			     uint32_t offset, uint32_t length,
5782 			     uint8_t *data)
5783 {
5784 	int rc;
5785 	uint8_t *buf;
5786 	rte_iova_t dma_handle;
5787 	struct hwrm_nvm_read_input req = {0};
5788 	struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
5789 
5790 	buf = rte_malloc("nvm_item", length, 0);
5791 	if (!buf)
5792 		return -ENOMEM;
5793 
5794 	dma_handle = rte_malloc_virt2iova(buf);
5795 	if (dma_handle == RTE_BAD_IOVA) {
5796 		rte_free(buf);
5797 		PMD_DRV_LOG_LINE(ERR,
5798 			"unable to map response address to physical memory");
5799 		return -ENOMEM;
5800 	}
5801 	HWRM_PREP(&req, HWRM_NVM_READ, BNXT_USE_CHIMP_MB);
5802 	req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
5803 	req.dir_idx = rte_cpu_to_le_16(index);
5804 	req.offset = rte_cpu_to_le_32(offset);
5805 	req.len = rte_cpu_to_le_32(length);
5806 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5807 	if (rc == 0)
5808 		memcpy(data, buf, length);
5809 
5810 	rte_free(buf);
5811 	HWRM_CHECK_RESULT();
5812 	HWRM_UNLOCK();
5813 
5814 	return rc;
5815 }
5816 
5817 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
5818 {
5819 	int rc;
5820 	struct hwrm_nvm_erase_dir_entry_input req = {0};
5821 	struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
5822 
5823 	HWRM_PREP(&req, HWRM_NVM_ERASE_DIR_ENTRY, BNXT_USE_CHIMP_MB);
5824 	req.dir_idx = rte_cpu_to_le_16(index);
5825 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5826 	HWRM_CHECK_RESULT();
5827 	HWRM_UNLOCK();
5828 
5829 	return rc;
5830 }
5831 
5832 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
5833 			  uint16_t dir_ordinal, uint16_t dir_ext,
5834 			  uint16_t dir_attr, const uint8_t *data,
5835 			  size_t data_len)
5836 {
5837 	int rc;
5838 	struct hwrm_nvm_write_input req = {0};
5839 	struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
5840 	rte_iova_t dma_handle;
5841 	uint8_t *buf;
5842 
5843 	buf = rte_malloc("nvm_write", data_len, 0);
5844 	if (!buf)
5845 		return -ENOMEM;
5846 
5847 	dma_handle = rte_malloc_virt2iova(buf);
5848 	if (dma_handle == RTE_BAD_IOVA) {
5849 		rte_free(buf);
5850 		PMD_DRV_LOG_LINE(ERR,
5851 			"unable to map response address to physical memory");
5852 		return -ENOMEM;
5853 	}
5854 	memcpy(buf, data, data_len);
5855 
5856 	HWRM_PREP(&req, HWRM_NVM_WRITE, BNXT_USE_CHIMP_MB);
5857 
5858 	req.dir_type = rte_cpu_to_le_16(dir_type);
5859 	req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
5860 	req.dir_ext = rte_cpu_to_le_16(dir_ext);
5861 	req.dir_attr = rte_cpu_to_le_16(dir_attr);
5862 	req.dir_data_length = rte_cpu_to_le_32(data_len);
5863 	req.host_src_addr = rte_cpu_to_le_64(dma_handle);
5864 
5865 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5866 
5867 	rte_free(buf);
5868 	HWRM_CHECK_RESULT();
5869 	HWRM_UNLOCK();
5870 
5871 	return rc;
5872 }
5873 
5874 static void
5875 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
5876 {
5877 	uint32_t *count = cbdata;
5878 
5879 	*count = *count + 1;
5880 }
5881 
5882 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
5883 				     struct bnxt_vnic_info *vnic __rte_unused)
5884 {
5885 	return 0;
5886 }
5887 
5888 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
5889 {
5890 	uint32_t count = 0;
5891 
5892 	bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
5893 	    &count, bnxt_vnic_count_hwrm_stub);
5894 
5895 	return count;
5896 }
5897 
5898 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
5899 					uint16_t *vnic_ids)
5900 {
5901 	struct hwrm_func_vf_vnic_ids_query_input req = {0};
5902 	struct hwrm_func_vf_vnic_ids_query_output *resp =
5903 						bp->hwrm_cmd_resp_addr;
5904 	int rc;
5905 
5906 	/* First query all VNIC ids */
5907 	HWRM_PREP(&req, HWRM_FUNC_VF_VNIC_IDS_QUERY, BNXT_USE_CHIMP_MB);
5908 
5909 	req.vf_id = rte_cpu_to_le_16(bp->pf->first_vf_id + vf);
5910 	req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf->total_vnics);
5911 	req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_malloc_virt2iova(vnic_ids));
5912 
5913 	if (req.vnic_id_tbl_addr == RTE_BAD_IOVA) {
5914 		HWRM_UNLOCK();
5915 		PMD_DRV_LOG_LINE(ERR,
5916 		"unable to map VNIC ID table address to physical memory");
5917 		return -ENOMEM;
5918 	}
5919 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5920 	HWRM_CHECK_RESULT();
5921 	rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
5922 
5923 	HWRM_UNLOCK();
5924 
5925 	return rc;
5926 }
5927 
5928 /*
5929  * This function queries the VNIC IDs  for a specified VF. It then calls
5930  * the vnic_cb to update the necessary field in vnic_info with cbdata.
5931  * Then it calls the hwrm_cb function to program this new vnic configuration.
5932  */
5933 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
5934 	void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
5935 	int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
5936 {
5937 	struct bnxt_vnic_info vnic;
5938 	int rc = 0;
5939 	int i, num_vnic_ids;
5940 	uint16_t *vnic_ids;
5941 	size_t vnic_id_sz;
5942 	size_t sz;
5943 
5944 	/* First query all VNIC ids */
5945 	vnic_id_sz = bp->pf->total_vnics * sizeof(*vnic_ids);
5946 	vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
5947 			RTE_CACHE_LINE_SIZE);
5948 	if (vnic_ids == NULL)
5949 		return -ENOMEM;
5950 
5951 	for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
5952 		rte_mem_lock_page(((char *)vnic_ids) + sz);
5953 
5954 	num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
5955 
5956 	if (num_vnic_ids < 0)
5957 		return num_vnic_ids;
5958 
5959 	/* Retrieve VNIC, update bd_stall then update */
5960 
5961 	for (i = 0; i < num_vnic_ids; i++) {
5962 		memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
5963 		vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
5964 		rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf->first_vf_id + vf);
5965 		if (rc)
5966 			break;
5967 		if (vnic.mru <= 4)	/* Indicates unallocated */
5968 			continue;
5969 
5970 		vnic_cb(&vnic, cbdata);
5971 
5972 		rc = hwrm_cb(bp, &vnic);
5973 		if (rc)
5974 			break;
5975 	}
5976 
5977 	rte_free(vnic_ids);
5978 
5979 	return rc;
5980 }
5981 
5982 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
5983 					      bool on)
5984 {
5985 	struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
5986 	struct hwrm_func_cfg_input req = {0};
5987 	int rc;
5988 
5989 	HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
5990 
5991 	req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
5992 	req.enables |= rte_cpu_to_le_32(
5993 			HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
5994 	req.vlan_antispoof_mode = on ?
5995 		HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
5996 		HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
5997 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5998 
5999 	HWRM_CHECK_RESULT();
6000 	HWRM_UNLOCK();
6001 
6002 	return rc;
6003 }
6004 
6005 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
6006 {
6007 	struct bnxt_vnic_info vnic;
6008 	uint16_t *vnic_ids;
6009 	size_t vnic_id_sz;
6010 	int num_vnic_ids, i;
6011 	size_t sz;
6012 	int rc;
6013 
6014 	vnic_id_sz = bp->pf->total_vnics * sizeof(*vnic_ids);
6015 	vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
6016 			RTE_CACHE_LINE_SIZE);
6017 	if (vnic_ids == NULL)
6018 		return -ENOMEM;
6019 
6020 	for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
6021 		rte_mem_lock_page(((char *)vnic_ids) + sz);
6022 
6023 	rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
6024 	if (rc <= 0)
6025 		goto exit;
6026 	num_vnic_ids = rc;
6027 
6028 	/*
6029 	 * Loop through to find the default VNIC ID.
6030 	 * TODO: The easier way would be to obtain the resp->dflt_vnic_id
6031 	 * by sending the hwrm_func_qcfg command to the firmware.
6032 	 */
6033 	for (i = 0; i < num_vnic_ids; i++) {
6034 		memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
6035 		vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
6036 		rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
6037 					bp->pf->first_vf_id + vf);
6038 		if (rc)
6039 			goto exit;
6040 		if (vnic.func_default) {
6041 			rte_free(vnic_ids);
6042 			return vnic.fw_vnic_id;
6043 		}
6044 	}
6045 	/* Could not find a default VNIC. */
6046 	PMD_DRV_LOG_LINE(ERR, "No default VNIC");
6047 exit:
6048 	rte_free(vnic_ids);
6049 	return rc;
6050 }
6051 
6052 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
6053 			 uint16_t dst_id,
6054 			 struct bnxt_filter_info *filter)
6055 {
6056 	int rc = 0;
6057 	struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
6058 	struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
6059 	uint32_t enables = 0;
6060 
6061 	if (filter->fw_em_filter_id != UINT64_MAX)
6062 		bnxt_hwrm_clear_em_filter(bp, filter);
6063 
6064 	HWRM_PREP(&req, HWRM_CFA_EM_FLOW_ALLOC, BNXT_USE_KONG(bp));
6065 
6066 	req.flags = rte_cpu_to_le_32(filter->flags);
6067 
6068 	enables = filter->enables |
6069 	      HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
6070 	req.dst_id = rte_cpu_to_le_16(dst_id);
6071 
6072 	if (filter->ip_addr_type) {
6073 		req.ip_addr_type = filter->ip_addr_type;
6074 		enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
6075 	}
6076 	if (enables &
6077 	    HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
6078 		req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
6079 	if (enables &
6080 	    HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
6081 		memcpy(req.src_macaddr, filter->src_macaddr,
6082 		       RTE_ETHER_ADDR_LEN);
6083 	if (enables &
6084 	    HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
6085 		memcpy(req.dst_macaddr, filter->dst_macaddr,
6086 		       RTE_ETHER_ADDR_LEN);
6087 	if (enables &
6088 	    HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
6089 		req.ovlan_vid = filter->l2_ovlan;
6090 	if (enables &
6091 	    HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
6092 		req.ivlan_vid = filter->l2_ivlan;
6093 	if (enables &
6094 	    HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
6095 		req.ethertype = rte_cpu_to_be_16(filter->ethertype);
6096 	if (enables &
6097 	    HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
6098 		req.ip_protocol = filter->ip_protocol;
6099 	if (enables &
6100 	    HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
6101 		req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
6102 	if (enables &
6103 	    HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
6104 		req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
6105 	if (enables &
6106 	    HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
6107 		req.src_port = rte_cpu_to_be_16(filter->src_port);
6108 	if (enables &
6109 	    HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
6110 		req.dst_port = rte_cpu_to_be_16(filter->dst_port);
6111 	if (enables &
6112 	    HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
6113 		req.mirror_vnic_id = filter->mirror_vnic_id;
6114 
6115 	req.enables = rte_cpu_to_le_32(enables);
6116 
6117 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
6118 
6119 	HWRM_CHECK_RESULT();
6120 
6121 	filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
6122 	HWRM_UNLOCK();
6123 
6124 	return rc;
6125 }
6126 
6127 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
6128 {
6129 	int rc = 0;
6130 	struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
6131 	struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
6132 
6133 	if (filter->fw_em_filter_id == UINT64_MAX)
6134 		return 0;
6135 
6136 	HWRM_PREP(&req, HWRM_CFA_EM_FLOW_FREE, BNXT_USE_KONG(bp));
6137 
6138 	req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
6139 
6140 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
6141 
6142 	HWRM_CHECK_RESULT();
6143 	HWRM_UNLOCK();
6144 
6145 	filter->fw_em_filter_id = UINT64_MAX;
6146 	filter->fw_l2_filter_id = UINT64_MAX;
6147 
6148 	return 0;
6149 }
6150 
6151 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
6152 			 uint16_t dst_id,
6153 			 struct bnxt_filter_info *filter)
6154 {
6155 	int rc = 0;
6156 	struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
6157 	struct hwrm_cfa_ntuple_filter_alloc_output *resp =
6158 						bp->hwrm_cmd_resp_addr;
6159 	uint32_t enables = 0;
6160 
6161 	if (filter->fw_ntuple_filter_id != UINT64_MAX)
6162 		bnxt_hwrm_clear_ntuple_filter(bp, filter);
6163 
6164 	HWRM_PREP(&req, HWRM_CFA_NTUPLE_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
6165 
6166 	req.flags = rte_cpu_to_le_32(filter->flags);
6167 
6168 	enables = filter->enables |
6169 	      HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
6170 	req.dst_id = rte_cpu_to_le_16(dst_id);
6171 
6172 	if (filter->ip_addr_type) {
6173 		req.ip_addr_type = filter->ip_addr_type;
6174 		enables |=
6175 			HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
6176 	}
6177 	if (enables &
6178 	    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
6179 		req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
6180 	if (enables &
6181 	    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
6182 		memcpy(req.src_macaddr, filter->src_macaddr,
6183 		       RTE_ETHER_ADDR_LEN);
6184 	if (enables &
6185 	    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
6186 		req.ethertype = rte_cpu_to_be_16(filter->ethertype);
6187 	if (enables &
6188 	    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
6189 		req.ip_protocol = filter->ip_protocol;
6190 	if (enables &
6191 	    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
6192 		req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
6193 	if (enables &
6194 	    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
6195 		req.src_ipaddr_mask[0] =
6196 			rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
6197 	if (enables &
6198 	    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
6199 		req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
6200 	if (enables &
6201 	    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
6202 		req.dst_ipaddr_mask[0] =
6203 			rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
6204 	if (enables &
6205 	    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
6206 		req.src_port = rte_cpu_to_le_16(filter->src_port);
6207 	if (enables &
6208 	    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
6209 		req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
6210 	if (enables &
6211 	    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
6212 		req.dst_port = rte_cpu_to_le_16(filter->dst_port);
6213 	if (enables &
6214 	    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
6215 		req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
6216 
6217 	req.enables = rte_cpu_to_le_32(enables);
6218 
6219 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
6220 
6221 	HWRM_CHECK_RESULT();
6222 
6223 	filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
6224 	filter->flow_id = rte_le_to_cpu_32(resp->flow_id);
6225 	HWRM_UNLOCK();
6226 
6227 	return rc;
6228 }
6229 
6230 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
6231 				struct bnxt_filter_info *filter)
6232 {
6233 	int rc = 0;
6234 	struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
6235 	struct hwrm_cfa_ntuple_filter_free_output *resp =
6236 						bp->hwrm_cmd_resp_addr;
6237 
6238 	if (filter->fw_ntuple_filter_id == UINT64_MAX)
6239 		return 0;
6240 
6241 	HWRM_PREP(&req, HWRM_CFA_NTUPLE_FILTER_FREE, BNXT_USE_CHIMP_MB);
6242 
6243 	req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
6244 
6245 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
6246 
6247 	HWRM_CHECK_RESULT();
6248 	HWRM_UNLOCK();
6249 
6250 	filter->fw_ntuple_filter_id = UINT64_MAX;
6251 
6252 	return 0;
6253 }
6254 
6255 int
6256 bnxt_vnic_rss_configure_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6257 {
6258 	struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
6259 	uint8_t *rxq_state = bp->eth_dev->data->rx_queue_state;
6260 	struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
6261 	struct bnxt_rx_queue **rxqs = bp->rx_queues;
6262 	uint16_t *ring_tbl = vnic->rss_table;
6263 	int nr_ctxs = vnic->num_lb_ctxts;
6264 	int max_rings = bp->rx_nr_rings;
6265 	int i, j, k, cnt;
6266 	int rc = 0;
6267 
6268 	for (i = 0, k = 0; i < nr_ctxs; i++) {
6269 		struct bnxt_rx_ring_info *rxr;
6270 		struct bnxt_cp_ring_info *cpr;
6271 
6272 		HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
6273 
6274 		req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
6275 		req.hash_type = rte_cpu_to_le_32(bnxt_sanitize_rss_type(bp, vnic->hash_type));
6276 		req.hash_mode_flags = vnic->hash_mode;
6277 		req.ring_select_mode = vnic->ring_select_mode;
6278 
6279 		req.ring_grp_tbl_addr =
6280 		    rte_cpu_to_le_64(vnic->rss_table_dma_addr +
6281 				     i * BNXT_RSS_ENTRIES_PER_CTX_P5 *
6282 				     2 * sizeof(*ring_tbl));
6283 		req.hash_key_tbl_addr =
6284 		    rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
6285 
6286 		req.ring_table_pair_index = i;
6287 		req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
6288 
6289 		for (j = 0; j < 64; j++) {
6290 			uint16_t ring_id;
6291 
6292 			/* Find next active ring. */
6293 			for (cnt = 0; cnt < max_rings; cnt++) {
6294 				if (rxq_state[k] != RTE_ETH_QUEUE_STATE_STOPPED)
6295 					break;
6296 				if (++k == max_rings)
6297 					k = 0;
6298 			}
6299 
6300 			/* Return if no rings are active. */
6301 			if (cnt == max_rings) {
6302 				HWRM_UNLOCK();
6303 				return 0;
6304 			}
6305 
6306 			/* Add rx/cp ring pair to RSS table. */
6307 			rxr = rxqs[k]->rx_ring;
6308 			cpr = rxqs[k]->cp_ring;
6309 
6310 			ring_id = rxr->rx_ring_struct->fw_ring_id;
6311 			*ring_tbl++ = rte_cpu_to_le_16(ring_id);
6312 			ring_id = cpr->cp_ring_struct->fw_ring_id;
6313 			*ring_tbl++ = rte_cpu_to_le_16(ring_id);
6314 
6315 			if (++k == max_rings)
6316 				k = 0;
6317 		}
6318 		rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
6319 					    BNXT_USE_CHIMP_MB);
6320 
6321 		HWRM_CHECK_RESULT();
6322 		HWRM_UNLOCK();
6323 	}
6324 
6325 	return rc;
6326 }
6327 
6328 int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6329 {
6330 	unsigned int rss_idx, fw_idx, i;
6331 
6332 	if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
6333 		return 0;
6334 
6335 	if (!(vnic->rss_table && vnic->hash_type))
6336 		return 0;
6337 
6338 	if (BNXT_CHIP_P5_P7(bp))
6339 		return bnxt_vnic_rss_configure_p5(bp, vnic);
6340 
6341 	/*
6342 	 * Fill the RSS hash & redirection table with
6343 	 * ring group ids for all VNICs
6344 	 */
6345 	for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
6346 	     rss_idx++, fw_idx++) {
6347 		for (i = 0; i < bp->rx_cp_nr_rings; i++) {
6348 			fw_idx %= bp->rx_cp_nr_rings;
6349 			if (vnic->fw_grp_ids[fw_idx] != INVALID_HW_RING_ID)
6350 				break;
6351 			fw_idx++;
6352 		}
6353 
6354 		if (i == bp->rx_cp_nr_rings)
6355 			return 0;
6356 
6357 		vnic->rss_table[rss_idx] = vnic->fw_grp_ids[fw_idx];
6358 	}
6359 
6360 	return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
6361 }
6362 
6363 static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
6364 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
6365 {
6366 	uint16_t flags;
6367 
6368 	req->num_cmpl_aggr_int = rte_cpu_to_le_16(hw_coal->num_cmpl_aggr_int);
6369 
6370 	/* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
6371 	req->num_cmpl_dma_aggr = rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr);
6372 
6373 	/* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
6374 	req->num_cmpl_dma_aggr_during_int =
6375 		rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr_during_int);
6376 
6377 	req->int_lat_tmr_max = rte_cpu_to_le_16(hw_coal->int_lat_tmr_max);
6378 
6379 	/* min timer set to 1/2 of interrupt timer */
6380 	req->int_lat_tmr_min = rte_cpu_to_le_16(hw_coal->int_lat_tmr_min);
6381 
6382 	/* buf timer set to 1/4 of interrupt timer */
6383 	req->cmpl_aggr_dma_tmr = rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr);
6384 
6385 	req->cmpl_aggr_dma_tmr_during_int =
6386 		rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr_during_int);
6387 
6388 	flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
6389 		HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
6390 	req->flags = rte_cpu_to_le_16(flags);
6391 }
6392 
6393 static int bnxt_hwrm_set_coal_params_p5(struct bnxt *bp,
6394 		struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *agg_req)
6395 {
6396 	struct hwrm_ring_aggint_qcaps_input req = {0};
6397 	struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6398 	uint32_t enables;
6399 	uint16_t flags;
6400 	int rc;
6401 
6402 	HWRM_PREP(&req, HWRM_RING_AGGINT_QCAPS, BNXT_USE_CHIMP_MB);
6403 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
6404 	HWRM_CHECK_RESULT();
6405 
6406 	agg_req->num_cmpl_dma_aggr = resp->num_cmpl_dma_aggr_max;
6407 	agg_req->cmpl_aggr_dma_tmr = resp->cmpl_aggr_dma_tmr_min;
6408 
6409 	flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
6410 		HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
6411 	agg_req->flags = rte_cpu_to_le_16(flags);
6412 	enables =
6413 	 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_CMPL_AGGR_DMA_TMR |
6414 	 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_NUM_CMPL_DMA_AGGR;
6415 	agg_req->enables = rte_cpu_to_le_32(enables);
6416 
6417 	HWRM_UNLOCK();
6418 	return rc;
6419 }
6420 
6421 int bnxt_hwrm_set_ring_coal(struct bnxt *bp,
6422 			struct bnxt_coal *coal, uint16_t ring_id)
6423 {
6424 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
6425 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_output *resp =
6426 						bp->hwrm_cmd_resp_addr;
6427 	int rc;
6428 
6429 	/* Set ring coalesce parameters only for 100G NICs */
6430 	if (BNXT_CHIP_P5_P7(bp)) {
6431 		if (bnxt_hwrm_set_coal_params_p5(bp, &req))
6432 			return -1;
6433 	} else if (bnxt_stratus_device(bp)) {
6434 		bnxt_hwrm_set_coal_params(coal, &req);
6435 	} else {
6436 		return 0;
6437 	}
6438 
6439 	HWRM_PREP(&req,
6440 		  HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
6441 		  BNXT_USE_CHIMP_MB);
6442 	req.ring_id = rte_cpu_to_le_16(ring_id);
6443 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
6444 	HWRM_CHECK_RESULT();
6445 	HWRM_UNLOCK();
6446 	return 0;
6447 }
6448 
6449 static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem *ctxm,
6450 				      uint8_t init_val,
6451 				      uint8_t init_offset,
6452 				      bool init_mask_set)
6453 {
6454 	ctxm->init_value = init_val;
6455 	ctxm->init_offset = BNXT_CTX_INIT_INVALID_OFFSET;
6456 	if (init_mask_set)
6457 		ctxm->init_offset = init_offset * 4;
6458 	else
6459 		ctxm->init_value = 0;
6460 }
6461 
6462 static int bnxt_alloc_all_ctx_pg_info(struct bnxt *bp)
6463 {
6464 	struct bnxt_ctx_mem_info *ctx = bp->ctx;
6465 	char name[RTE_MEMZONE_NAMESIZE];
6466 	uint16_t type;
6467 
6468 	for (type = 0; type < ctx->types; type++) {
6469 		struct bnxt_ctx_mem *ctxm = &ctx->ctx_arr[type];
6470 		int n = 1;
6471 
6472 		if (!ctxm->max_entries || ctxm->pg_info)
6473 			continue;
6474 
6475 		if (ctxm->instance_bmap)
6476 			n = hweight32(ctxm->instance_bmap);
6477 
6478 		sprintf(name, "bnxt_ctx_pgmem_%d_%d",
6479 			bp->eth_dev->data->port_id, type);
6480 		ctxm->pg_info = rte_malloc(name, sizeof(*ctxm->pg_info) * n,
6481 					   RTE_CACHE_LINE_SIZE);
6482 		if (!ctxm->pg_info)
6483 			return -ENOMEM;
6484 	}
6485 	return 0;
6486 }
6487 
6488 static void bnxt_init_ctx_v2_driver_managed(struct bnxt *bp __rte_unused,
6489 					    struct bnxt_ctx_mem *ctxm)
6490 {
6491 	switch (ctxm->type) {
6492 	case HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_SQ_DB_SHADOW:
6493 	case HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_RQ_DB_SHADOW:
6494 	case HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_SRQ_DB_SHADOW:
6495 	case HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_CQ_DB_SHADOW:
6496 		/* FALLTHROUGH */
6497 		ctxm->entry_size = 0;
6498 		ctxm->min_entries = 1;
6499 		ctxm->max_entries = 1;
6500 		break;
6501 	}
6502 }
6503 
6504 int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp)
6505 {
6506 	struct hwrm_func_backing_store_qcaps_v2_input req = {0};
6507 	struct hwrm_func_backing_store_qcaps_v2_output *resp =
6508 		bp->hwrm_cmd_resp_addr;
6509 	struct bnxt_ctx_mem_info *ctx = bp->ctx;
6510 	uint16_t last_valid_type = BNXT_CTX_INV;
6511 	uint16_t last_valid_idx = 0;
6512 	uint16_t types, type;
6513 	int rc;
6514 
6515 	types = 0;
6516 	type = 0;
6517 	do {
6518 		struct bnxt_ctx_mem *ctxm;
6519 		uint8_t init_val, init_off, i;
6520 		uint32_t *p;
6521 		uint32_t flags;
6522 		bool cnt = true;
6523 
6524 		HWRM_PREP(&req, HWRM_FUNC_BACKING_STORE_QCAPS_V2, BNXT_USE_CHIMP_MB);
6525 		req.type = rte_cpu_to_le_16(type);
6526 		rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
6527 		HWRM_CHECK_RESULT();
6528 
6529 		flags = rte_le_to_cpu_32(resp->flags);
6530 		type = rte_le_to_cpu_16(resp->next_valid_type);
6531 		if (!(flags & HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_FLAGS_TYPE_VALID)) {
6532 			cnt = false;
6533 			goto next;
6534 		}
6535 
6536 		ctxm = &bp->ctx->ctx_arr[types];
6537 		ctxm->type = rte_le_to_cpu_16(resp->type);
6538 
6539 		ctxm->flags = flags;
6540 		if (flags &
6541 		    HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_FLAGS_DRIVER_MANAGED_MEMORY) {
6542 			bnxt_init_ctx_v2_driver_managed(bp, ctxm);
6543 			goto next;
6544 		}
6545 		ctxm->entry_size = rte_le_to_cpu_16(resp->entry_size);
6546 
6547 		if (ctxm->entry_size == 0)
6548 			goto next;
6549 
6550 		ctxm->instance_bmap = rte_le_to_cpu_32(resp->instance_bit_map);
6551 		ctxm->entry_multiple = resp->entry_multiple;
6552 		ctxm->max_entries = rte_le_to_cpu_32(resp->max_num_entries);
6553 		ctxm->min_entries = rte_le_to_cpu_32(resp->min_num_entries);
6554 		init_val = resp->ctx_init_value;
6555 		init_off = resp->ctx_init_offset;
6556 		bnxt_init_ctx_initializer(ctxm, init_val, init_off,
6557 					  BNXT_CTX_INIT_VALID(flags));
6558 		ctxm->split_entry_cnt = RTE_MIN(resp->subtype_valid_cnt,
6559 						BNXT_MAX_SPLIT_ENTRY);
6560 		for (i = 0, p = &resp->split_entry_0; i < ctxm->split_entry_cnt;
6561 		     i++, p++)
6562 			ctxm->split[i] = rte_le_to_cpu_32(*p);
6563 
6564 		PMD_DRV_LOG_LINE(DEBUG,
6565 			    "type:0x%x size:%d multiple:%d max:%d min:%d split:%d init_val:%d init_off:%d init:%d bmap:0x%x",
6566 			    ctxm->type, ctxm->entry_size,
6567 			    ctxm->entry_multiple, ctxm->max_entries, ctxm->min_entries,
6568 			    ctxm->split_entry_cnt, init_val, init_off,
6569 			    BNXT_CTX_INIT_VALID(flags), ctxm->instance_bmap);
6570 		last_valid_type = ctxm->type;
6571 		last_valid_idx = types;
6572 next:
6573 		if (cnt)
6574 			types++;
6575 		HWRM_UNLOCK();
6576 	} while (types < bp->ctx->types && type != BNXT_CTX_INV);
6577 	ctx->ctx_arr[last_valid_idx].last = true;
6578 	PMD_DRV_LOG_LINE(DEBUG, "Last valid type 0x%x", last_valid_type);
6579 
6580 	rc = bnxt_alloc_all_ctx_pg_info(bp);
6581 	if (rc == 0)
6582 		rc = bnxt_alloc_ctx_pg_tbls(bp);
6583 	return rc;
6584 }
6585 
6586 int bnxt_hwrm_func_backing_store_types_count(struct bnxt *bp)
6587 {
6588 	struct hwrm_func_backing_store_qcaps_v2_input req = {0};
6589 	struct hwrm_func_backing_store_qcaps_v2_output *resp =
6590 		bp->hwrm_cmd_resp_addr;
6591 	uint16_t type = 0;
6592 	int types = 0;
6593 	int rc;
6594 
6595 	/* Calculate number of valid context types */
6596 	do {
6597 		uint32_t flags;
6598 
6599 		HWRM_PREP(&req, HWRM_FUNC_BACKING_STORE_QCAPS_V2, BNXT_USE_CHIMP_MB);
6600 		req.type = rte_cpu_to_le_16(type);
6601 		rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
6602 		HWRM_CHECK_RESULT();
6603 
6604 		flags = rte_le_to_cpu_32(resp->flags);
6605 		type = rte_le_to_cpu_16(resp->next_valid_type);
6606 		HWRM_UNLOCK();
6607 
6608 		if (flags & HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_FLAGS_TYPE_VALID) {
6609 			PMD_DRV_LOG_LINE(DEBUG, "Valid types 0x%x", req.type);
6610 			types++;
6611 		}
6612 	} while (type != HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_INVALID);
6613 	PMD_DRV_LOG_LINE(DEBUG, "Number of valid types %d", types);
6614 
6615 	return types;
6616 }
6617 
6618 int bnxt_hwrm_func_backing_store_ctx_alloc(struct bnxt *bp, uint16_t types)
6619 {
6620 	int alloc_len = sizeof(struct bnxt_ctx_mem_info);
6621 
6622 	if (!BNXT_CHIP_P5_P7(bp) ||
6623 	    bp->hwrm_spec_code < HWRM_VERSION_1_9_2 ||
6624 	    BNXT_VF(bp) ||
6625 	    bp->ctx)
6626 		return 0;
6627 
6628 	bp->ctx = rte_zmalloc("bnxt_ctx_mem", alloc_len,
6629 			      RTE_CACHE_LINE_SIZE);
6630 	if (bp->ctx == NULL)
6631 		return -ENOMEM;
6632 
6633 	alloc_len = sizeof(struct bnxt_ctx_mem) * types;
6634 	bp->ctx->ctx_arr = rte_zmalloc("bnxt_ctx_mem_arr",
6635 				       alloc_len,
6636 				       RTE_CACHE_LINE_SIZE);
6637 	if (bp->ctx->ctx_arr == NULL)
6638 		return -ENOMEM;
6639 
6640 	bp->ctx->types = types;
6641 	return 0;
6642 }
6643 
6644 int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
6645 {
6646 	struct hwrm_func_backing_store_qcaps_input req = {0};
6647 	struct hwrm_func_backing_store_qcaps_output *resp =
6648 		bp->hwrm_cmd_resp_addr;
6649 	struct bnxt_ctx_pg_info *ctx_pg;
6650 	struct bnxt_ctx_mem_info *ctx;
6651 	int rc, i, tqm_rings;
6652 
6653 	if (!BNXT_CHIP_P5_P7(bp) ||
6654 	    bp->hwrm_spec_code < HWRM_VERSION_1_9_2 ||
6655 	    BNXT_VF(bp) ||
6656 	    bp->ctx->flags & BNXT_CTX_FLAG_INITED)
6657 		return 0;
6658 
6659 	ctx = bp->ctx;
6660 	HWRM_PREP(&req, HWRM_FUNC_BACKING_STORE_QCAPS, BNXT_USE_CHIMP_MB);
6661 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
6662 	HWRM_CHECK_RESULT_SILENT();
6663 
6664 	ctx->qp_max_entries = rte_le_to_cpu_32(resp->qp_max_entries);
6665 	ctx->qp_min_qp1_entries =
6666 		rte_le_to_cpu_16(resp->qp_min_qp1_entries);
6667 	ctx->qp_max_l2_entries =
6668 		rte_le_to_cpu_16(resp->qp_max_l2_entries);
6669 	ctx->qp_entry_size = rte_le_to_cpu_16(resp->qp_entry_size);
6670 	ctx->srq_max_l2_entries =
6671 		rte_le_to_cpu_16(resp->srq_max_l2_entries);
6672 	ctx->srq_max_entries = rte_le_to_cpu_32(resp->srq_max_entries);
6673 	ctx->srq_entry_size = rte_le_to_cpu_16(resp->srq_entry_size);
6674 	if (BNXT_CHIP_P7(bp))
6675 		ctx->cq_max_l2_entries =
6676 			RTE_MIN(BNXT_P7_CQ_MAX_L2_ENT,
6677 				rte_le_to_cpu_16(resp->cq_max_l2_entries));
6678 	else
6679 		ctx->cq_max_l2_entries =
6680 			rte_le_to_cpu_16(resp->cq_max_l2_entries);
6681 	ctx->cq_max_entries = rte_le_to_cpu_32(resp->cq_max_entries);
6682 	ctx->cq_entry_size = rte_le_to_cpu_16(resp->cq_entry_size);
6683 	ctx->vnic_max_vnic_entries =
6684 		rte_le_to_cpu_16(resp->vnic_max_vnic_entries);
6685 	ctx->vnic_max_ring_table_entries =
6686 		rte_le_to_cpu_16(resp->vnic_max_ring_table_entries);
6687 	ctx->vnic_entry_size = rte_le_to_cpu_16(resp->vnic_entry_size);
6688 	ctx->stat_max_entries =
6689 		rte_le_to_cpu_32(resp->stat_max_entries);
6690 	ctx->stat_entry_size = rte_le_to_cpu_16(resp->stat_entry_size);
6691 	ctx->tqm_entry_size = rte_le_to_cpu_16(resp->tqm_entry_size);
6692 	ctx->tqm_min_entries_per_ring =
6693 		rte_le_to_cpu_32(resp->tqm_min_entries_per_ring);
6694 	ctx->tqm_max_entries_per_ring =
6695 		rte_le_to_cpu_32(resp->tqm_max_entries_per_ring);
6696 	ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
6697 	if (!ctx->tqm_entries_multiple)
6698 		ctx->tqm_entries_multiple = 1;
6699 	ctx->mrav_max_entries =
6700 		rte_le_to_cpu_32(resp->mrav_max_entries);
6701 	ctx->mrav_entry_size = rte_le_to_cpu_16(resp->mrav_entry_size);
6702 	ctx->tim_entry_size = rte_le_to_cpu_16(resp->tim_entry_size);
6703 	ctx->tim_max_entries = rte_le_to_cpu_32(resp->tim_max_entries);
6704 	ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
6705 
6706 	ctx->tqm_fp_rings_count = ctx->tqm_fp_rings_count ?
6707 				  RTE_MIN(ctx->tqm_fp_rings_count,
6708 					  BNXT_MAX_TQM_FP_LEGACY_RINGS) :
6709 				  bp->max_q;
6710 
6711 	/* Check if the ext ring count needs to be counted.
6712 	 * Ext ring count is available only with new FW so we should not
6713 	 * look at the field on older FW.
6714 	 */
6715 	if (ctx->tqm_fp_rings_count == BNXT_MAX_TQM_FP_LEGACY_RINGS &&
6716 	    bp->hwrm_max_ext_req_len >= BNXT_BACKING_STORE_CFG_LEN) {
6717 		ctx->tqm_fp_rings_count += resp->tqm_fp_rings_count_ext;
6718 		ctx->tqm_fp_rings_count = RTE_MIN(BNXT_MAX_TQM_FP_RINGS,
6719 						  ctx->tqm_fp_rings_count);
6720 	}
6721 
6722 	tqm_rings = ctx->tqm_fp_rings_count + 1;
6723 
6724 	ctx_pg = rte_malloc("bnxt_ctx_pg_mem",
6725 			    sizeof(*ctx_pg) * tqm_rings,
6726 			    RTE_CACHE_LINE_SIZE);
6727 	if (!ctx_pg) {
6728 		rc = -ENOMEM;
6729 		goto ctx_err;
6730 	}
6731 	for (i = 0; i < tqm_rings; i++, ctx_pg++)
6732 		ctx->tqm_mem[i] = ctx_pg;
6733 
6734 ctx_err:
6735 	HWRM_UNLOCK();
6736 	return rc;
6737 }
6738 
6739 int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt *bp,
6740 					struct bnxt_ctx_mem *ctxm)
6741 {
6742 	struct hwrm_func_backing_store_cfg_v2_input req = {0};
6743 	struct hwrm_func_backing_store_cfg_v2_output *resp =
6744 		bp->hwrm_cmd_resp_addr;
6745 	struct bnxt_ctx_pg_info *ctx_pg;
6746 	int i, j, k;
6747 	uint32_t *p;
6748 	int rc = 0;
6749 	int w = 1;
6750 	int b = 1;
6751 
6752 	if (!BNXT_PF(bp)) {
6753 		PMD_DRV_LOG_LINE(INFO,
6754 			    "Backing store config V2 can be issued on PF only");
6755 		return 0;
6756 	}
6757 
6758 	if (!(ctxm->flags & BNXT_CTX_MEM_TYPE_VALID) || !ctxm->pg_info)
6759 		return 0;
6760 
6761 	if (ctxm->instance_bmap)
6762 		b = ctxm->instance_bmap;
6763 
6764 	w = hweight32(b);
6765 
6766 	for (i = 0, j = 0; i < w && rc == 0; i++) {
6767 		if (!(b & (1 << i)))
6768 			continue;
6769 
6770 		HWRM_PREP(&req, HWRM_FUNC_BACKING_STORE_CFG_V2, BNXT_USE_CHIMP_MB);
6771 		req.type = rte_cpu_to_le_16(ctxm->type);
6772 		req.entry_size = rte_cpu_to_le_16(ctxm->entry_size);
6773 		req.subtype_valid_cnt = ctxm->split_entry_cnt;
6774 		for (k = 0, p = &req.split_entry_0; k < ctxm->split_entry_cnt; k++)
6775 			p[k] = rte_cpu_to_le_32(ctxm->split[k]);
6776 
6777 		req.instance = rte_cpu_to_le_16(i);
6778 		ctx_pg = &ctxm->pg_info[j++];
6779 		if (!ctx_pg->entries)
6780 			goto unlock;
6781 
6782 		req.num_entries = rte_cpu_to_le_32(ctx_pg->entries);
6783 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6784 				      &req.page_size_pbl_level,
6785 				      &req.page_dir);
6786 		PMD_DRV_LOG_LINE(DEBUG,
6787 			    "Backing store config V2 type:0x%x last %d, instance %d, hw %d",
6788 			    req.type, ctxm->last, j, w);
6789 		if (ctxm->last && i == (w - 1))
6790 			req.flags =
6791 			rte_cpu_to_le_32(BACKING_STORE_CFG_V2_IN_FLG_CFG_ALL_DONE);
6792 
6793 		rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
6794 		HWRM_CHECK_RESULT();
6795 unlock:
6796 		HWRM_UNLOCK();
6797 	}
6798 	return rc;
6799 }
6800 
6801 int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, uint32_t enables)
6802 {
6803 	struct hwrm_func_backing_store_cfg_input req = {0};
6804 	struct hwrm_func_backing_store_cfg_output *resp =
6805 		bp->hwrm_cmd_resp_addr;
6806 	struct bnxt_ctx_mem_info *ctx = bp->ctx;
6807 	struct bnxt_ctx_pg_info *ctx_pg;
6808 	uint32_t *num_entries;
6809 	uint64_t *pg_dir;
6810 	uint8_t *pg_attr;
6811 	uint32_t ena;
6812 	int i, rc;
6813 
6814 	if (!ctx)
6815 		return 0;
6816 
6817 	HWRM_PREP(&req, HWRM_FUNC_BACKING_STORE_CFG, BNXT_USE_CHIMP_MB);
6818 	req.enables = rte_cpu_to_le_32(enables);
6819 
6820 	if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP) {
6821 		ctx_pg = &ctx->qp_mem;
6822 		req.qp_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
6823 		req.qp_num_qp1_entries =
6824 			rte_cpu_to_le_16(ctx->qp_min_qp1_entries);
6825 		req.qp_num_l2_entries =
6826 			rte_cpu_to_le_16(ctx->qp_max_l2_entries);
6827 		req.qp_entry_size = rte_cpu_to_le_16(ctx->qp_entry_size);
6828 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6829 				      &req.qpc_pg_size_qpc_lvl,
6830 				      &req.qpc_page_dir);
6831 	}
6832 
6833 	if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ) {
6834 		ctx_pg = &ctx->srq_mem;
6835 		req.srq_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
6836 		req.srq_num_l2_entries =
6837 				 rte_cpu_to_le_16(ctx->srq_max_l2_entries);
6838 		req.srq_entry_size = rte_cpu_to_le_16(ctx->srq_entry_size);
6839 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6840 				      &req.srq_pg_size_srq_lvl,
6841 				      &req.srq_page_dir);
6842 	}
6843 
6844 	if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ) {
6845 		ctx_pg = &ctx->cq_mem;
6846 		req.cq_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
6847 		req.cq_num_l2_entries =
6848 				rte_cpu_to_le_16(ctx->cq_max_l2_entries);
6849 		req.cq_entry_size = rte_cpu_to_le_16(ctx->cq_entry_size);
6850 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6851 				      &req.cq_pg_size_cq_lvl,
6852 				      &req.cq_page_dir);
6853 	}
6854 
6855 	if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC) {
6856 		ctx_pg = &ctx->vnic_mem;
6857 		req.vnic_num_vnic_entries =
6858 			rte_cpu_to_le_16(ctx->vnic_max_vnic_entries);
6859 		req.vnic_num_ring_table_entries =
6860 			rte_cpu_to_le_16(ctx->vnic_max_ring_table_entries);
6861 		req.vnic_entry_size = rte_cpu_to_le_16(ctx->vnic_entry_size);
6862 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6863 				      &req.vnic_pg_size_vnic_lvl,
6864 				      &req.vnic_page_dir);
6865 	}
6866 
6867 	if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT) {
6868 		ctx_pg = &ctx->stat_mem;
6869 		req.stat_num_entries = rte_cpu_to_le_16(ctx->stat_max_entries);
6870 		req.stat_entry_size = rte_cpu_to_le_16(ctx->stat_entry_size);
6871 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6872 				      &req.stat_pg_size_stat_lvl,
6873 				      &req.stat_page_dir);
6874 	}
6875 
6876 	req.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size);
6877 	num_entries = &req.tqm_sp_num_entries;
6878 	pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl;
6879 	pg_dir = &req.tqm_sp_page_dir;
6880 	ena = HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP;
6881 	for (i = 0; i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
6882 		if (!(enables & ena))
6883 			continue;
6884 
6885 		req.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size);
6886 
6887 		ctx_pg = ctx->tqm_mem[i];
6888 		*num_entries = rte_cpu_to_le_16(ctx_pg->entries);
6889 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
6890 	}
6891 
6892 	if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING8) {
6893 		/* DPDK does not need to configure MRAV and TIM type.
6894 		 * So we are skipping over MRAV and TIM. Skip to configure
6895 		 * HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING8.
6896 		 */
6897 		ctx_pg = ctx->tqm_mem[BNXT_MAX_TQM_LEGACY_RINGS];
6898 		req.tqm_ring8_num_entries = rte_cpu_to_le_16(ctx_pg->entries);
6899 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6900 				      &req.tqm_ring8_pg_size_tqm_ring_lvl,
6901 				      &req.tqm_ring8_page_dir);
6902 	}
6903 
6904 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
6905 	HWRM_CHECK_RESULT();
6906 	HWRM_UNLOCK();
6907 
6908 	return rc;
6909 }
6910 
6911 int bnxt_hwrm_ext_port_qstats(struct bnxt *bp)
6912 {
6913 	struct hwrm_port_qstats_ext_input req = {0};
6914 	struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
6915 	struct bnxt_pf_info *pf = bp->pf;
6916 	int rc;
6917 
6918 	if (!(bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS ||
6919 	      bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS))
6920 		return 0;
6921 
6922 	HWRM_PREP(&req, HWRM_PORT_QSTATS_EXT, BNXT_USE_CHIMP_MB);
6923 
6924 	req.port_id = rte_cpu_to_le_16(pf->port_id);
6925 	if (bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS) {
6926 		req.tx_stat_host_addr =
6927 			rte_cpu_to_le_64(bp->hw_tx_port_stats_ext_map);
6928 		req.tx_stat_size =
6929 			rte_cpu_to_le_16(sizeof(struct tx_port_stats_ext));
6930 	}
6931 	if (bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS) {
6932 		req.rx_stat_host_addr =
6933 			rte_cpu_to_le_64(bp->hw_rx_port_stats_ext_map);
6934 		req.rx_stat_size =
6935 			rte_cpu_to_le_16(sizeof(struct rx_port_stats_ext));
6936 	}
6937 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
6938 
6939 	if (rc) {
6940 		bp->fw_rx_port_stats_ext_size = 0;
6941 		bp->fw_tx_port_stats_ext_size = 0;
6942 	} else {
6943 		bp->fw_rx_port_stats_ext_size =
6944 			rte_le_to_cpu_16(resp->rx_stat_size);
6945 		bp->fw_tx_port_stats_ext_size =
6946 			rte_le_to_cpu_16(resp->tx_stat_size);
6947 	}
6948 
6949 	HWRM_CHECK_RESULT();
6950 	HWRM_UNLOCK();
6951 
6952 	return rc;
6953 }
6954 
6955 int
6956 bnxt_hwrm_tunnel_redirect(struct bnxt *bp, uint8_t type)
6957 {
6958 	struct hwrm_cfa_redirect_tunnel_type_alloc_input req = {0};
6959 	struct hwrm_cfa_redirect_tunnel_type_alloc_output *resp =
6960 		bp->hwrm_cmd_resp_addr;
6961 	int rc = 0;
6962 
6963 	HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC, BNXT_USE_CHIMP_MB);
6964 	req.tunnel_type = type;
6965 	req.dest_fid = bp->fw_fid;
6966 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
6967 	HWRM_CHECK_RESULT();
6968 
6969 	HWRM_UNLOCK();
6970 
6971 	return rc;
6972 }
6973 
6974 int
6975 bnxt_hwrm_tunnel_redirect_free(struct bnxt *bp, uint8_t type)
6976 {
6977 	struct hwrm_cfa_redirect_tunnel_type_free_input req = {0};
6978 	struct hwrm_cfa_redirect_tunnel_type_free_output *resp =
6979 		bp->hwrm_cmd_resp_addr;
6980 	int rc = 0;
6981 
6982 	HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE, BNXT_USE_CHIMP_MB);
6983 	req.tunnel_type = type;
6984 	req.dest_fid = bp->fw_fid;
6985 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
6986 	HWRM_CHECK_RESULT();
6987 
6988 	HWRM_UNLOCK();
6989 
6990 	return rc;
6991 }
6992 
6993 int bnxt_hwrm_tunnel_redirect_query(struct bnxt *bp, uint32_t *type)
6994 {
6995 	struct hwrm_cfa_redirect_query_tunnel_type_input req = {0};
6996 	struct hwrm_cfa_redirect_query_tunnel_type_output *resp =
6997 		bp->hwrm_cmd_resp_addr;
6998 	int rc = 0;
6999 
7000 	HWRM_PREP(&req, HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE, BNXT_USE_CHIMP_MB);
7001 	req.src_fid = bp->fw_fid;
7002 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
7003 	HWRM_CHECK_RESULT();
7004 
7005 	if (type)
7006 		*type = rte_le_to_cpu_32(resp->tunnel_mask);
7007 
7008 	HWRM_UNLOCK();
7009 
7010 	return rc;
7011 }
7012 
7013 int bnxt_hwrm_tunnel_redirect_info(struct bnxt *bp, uint8_t tun_type,
7014 				   uint16_t *dst_fid)
7015 {
7016 	struct hwrm_cfa_redirect_tunnel_type_info_input req = {0};
7017 	struct hwrm_cfa_redirect_tunnel_type_info_output *resp =
7018 		bp->hwrm_cmd_resp_addr;
7019 	int rc = 0;
7020 
7021 	HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO, BNXT_USE_CHIMP_MB);
7022 	req.src_fid = bp->fw_fid;
7023 	req.tunnel_type = tun_type;
7024 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
7025 	HWRM_CHECK_RESULT();
7026 
7027 	if (dst_fid)
7028 		*dst_fid = rte_le_to_cpu_16(resp->dest_fid);
7029 
7030 	PMD_DRV_LOG_LINE(DEBUG, "dst_fid: %x", resp->dest_fid);
7031 
7032 	HWRM_UNLOCK();
7033 
7034 	return rc;
7035 }
7036 
7037 int bnxt_hwrm_set_mac(struct bnxt *bp)
7038 {
7039 	struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
7040 	struct hwrm_func_vf_cfg_input req = {0};
7041 	int rc = 0;
7042 
7043 	if (!BNXT_VF(bp))
7044 		return 0;
7045 
7046 	HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
7047 
7048 	req.enables =
7049 		rte_cpu_to_le_32(HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
7050 	memcpy(req.dflt_mac_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN);
7051 
7052 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
7053 
7054 	HWRM_CHECK_RESULT();
7055 
7056 	HWRM_UNLOCK();
7057 
7058 	return rc;
7059 }
7060 
7061 int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
7062 {
7063 	struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
7064 	struct hwrm_func_drv_if_change_input req = {0};
7065 	uint32_t flags;
7066 	int rc;
7067 
7068 	if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
7069 		return 0;
7070 
7071 	/* Do not issue FUNC_DRV_IF_CHANGE during reset recovery.
7072 	 * If we issue FUNC_DRV_IF_CHANGE with flags down before
7073 	 * FUNC_DRV_UNRGTR, FW resets before FUNC_DRV_UNRGTR
7074 	 */
7075 	if (!up && (bp->flags & BNXT_FLAG_FW_RESET))
7076 		return 0;
7077 
7078 	HWRM_PREP(&req, HWRM_FUNC_DRV_IF_CHANGE, BNXT_USE_CHIMP_MB);
7079 
7080 	if (up)
7081 		req.flags =
7082 		rte_cpu_to_le_32(HWRM_FUNC_DRV_IF_CHANGE_INPUT_FLAGS_UP);
7083 
7084 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
7085 
7086 	HWRM_CHECK_RESULT();
7087 	flags = rte_le_to_cpu_32(resp->flags);
7088 	HWRM_UNLOCK();
7089 
7090 	if (!up)
7091 		return 0;
7092 
7093 	if (flags & HWRM_FUNC_DRV_IF_CHANGE_OUTPUT_FLAGS_HOT_FW_RESET_DONE) {
7094 		PMD_DRV_LOG_LINE(INFO, "FW reset happened while port was down");
7095 		bp->flags |= BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE;
7096 	}
7097 
7098 	return 0;
7099 }
7100 
7101 int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
7102 {
7103 	struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
7104 	struct bnxt_error_recovery_info *info = bp->recovery_info;
7105 	struct hwrm_error_recovery_qcfg_input req = {0};
7106 	uint32_t flags = 0;
7107 	unsigned int i;
7108 	int rc;
7109 
7110 	/* Older FW does not have error recovery support */
7111 	if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7112 		return 0;
7113 
7114 	HWRM_PREP(&req, HWRM_ERROR_RECOVERY_QCFG, BNXT_USE_CHIMP_MB);
7115 
7116 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
7117 
7118 	HWRM_CHECK_RESULT();
7119 
7120 	flags = rte_le_to_cpu_32(resp->flags);
7121 	if (flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_HOST)
7122 		info->flags |= BNXT_FLAG_ERROR_RECOVERY_HOST;
7123 	else if (flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_CO_CPU)
7124 		info->flags |= BNXT_FLAG_ERROR_RECOVERY_CO_CPU;
7125 
7126 	if ((info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) &&
7127 	    !(bp->flags & BNXT_FLAG_KONG_MB_EN)) {
7128 		rc = -EINVAL;
7129 		goto err;
7130 	}
7131 
7132 	/* FW returned values are in units of 100msec */
7133 	info->driver_polling_freq =
7134 		rte_le_to_cpu_32(resp->driver_polling_freq) * 100;
7135 	info->primary_func_wait_period =
7136 		rte_le_to_cpu_32(resp->master_func_wait_period) * 100;
7137 	info->normal_func_wait_period =
7138 		rte_le_to_cpu_32(resp->normal_func_wait_period) * 100;
7139 	info->primary_func_wait_period_after_reset =
7140 		rte_le_to_cpu_32(resp->master_func_wait_period_after_reset) * 100;
7141 	info->max_bailout_time_after_reset =
7142 		rte_le_to_cpu_32(resp->max_bailout_time_after_reset) * 100;
7143 	info->status_regs[BNXT_FW_STATUS_REG] =
7144 		rte_le_to_cpu_32(resp->fw_health_status_reg);
7145 	info->status_regs[BNXT_FW_HEARTBEAT_CNT_REG] =
7146 		rte_le_to_cpu_32(resp->fw_heartbeat_reg);
7147 	info->status_regs[BNXT_FW_RECOVERY_CNT_REG] =
7148 		rte_le_to_cpu_32(resp->fw_reset_cnt_reg);
7149 	info->status_regs[BNXT_FW_RESET_INPROG_REG] =
7150 		rte_le_to_cpu_32(resp->reset_inprogress_reg);
7151 	info->reg_array_cnt =
7152 		rte_le_to_cpu_32(resp->reg_array_cnt);
7153 
7154 	if (info->reg_array_cnt >= BNXT_NUM_RESET_REG) {
7155 		rc = -EINVAL;
7156 		goto err;
7157 	}
7158 
7159 	for (i = 0; i < info->reg_array_cnt; i++) {
7160 		info->reset_reg[i] =
7161 			rte_le_to_cpu_32(resp->reset_reg[i]);
7162 		info->reset_reg_val[i] =
7163 			rte_le_to_cpu_32(resp->reset_reg_val[i]);
7164 		info->delay_after_reset[i] =
7165 			resp->delay_after_reset[i];
7166 	}
7167 err:
7168 	HWRM_UNLOCK();
7169 
7170 	/* Map the FW status registers */
7171 	if (!rc)
7172 		rc = bnxt_map_fw_health_status_regs(bp);
7173 
7174 	if (rc) {
7175 		rte_free(bp->recovery_info);
7176 		bp->recovery_info = NULL;
7177 	}
7178 	return rc;
7179 }
7180 
7181 int bnxt_hwrm_fw_reset(struct bnxt *bp)
7182 {
7183 	struct hwrm_fw_reset_output *resp = bp->hwrm_cmd_resp_addr;
7184 	struct hwrm_fw_reset_input req = {0};
7185 	int rc;
7186 
7187 	if (!BNXT_PF(bp))
7188 		return -EOPNOTSUPP;
7189 
7190 	HWRM_PREP(&req, HWRM_FW_RESET, BNXT_USE_KONG(bp));
7191 
7192 	req.embedded_proc_type =
7193 		HWRM_FW_RESET_INPUT_EMBEDDED_PROC_TYPE_CHIP;
7194 	req.selfrst_status =
7195 		HWRM_FW_RESET_INPUT_SELFRST_STATUS_SELFRSTASAP;
7196 	req.flags = HWRM_FW_RESET_INPUT_FLAGS_RESET_GRACEFUL;
7197 
7198 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
7199 				    BNXT_USE_KONG(bp));
7200 
7201 	HWRM_CHECK_RESULT();
7202 	HWRM_UNLOCK();
7203 
7204 	return rc;
7205 }
7206 
7207 int bnxt_hwrm_port_ts_query(struct bnxt *bp, uint8_t path, uint64_t *timestamp)
7208 {
7209 	struct hwrm_port_ts_query_output *resp = bp->hwrm_cmd_resp_addr;
7210 	struct hwrm_port_ts_query_input req = {0};
7211 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
7212 	uint32_t flags = 0;
7213 	int rc;
7214 
7215 	if (!ptp)
7216 		return 0;
7217 
7218 	HWRM_PREP(&req, HWRM_PORT_TS_QUERY, BNXT_USE_CHIMP_MB);
7219 
7220 	switch (path) {
7221 	case BNXT_PTP_FLAGS_PATH_TX:
7222 		flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_TX;
7223 		break;
7224 	case BNXT_PTP_FLAGS_PATH_RX:
7225 		flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_RX;
7226 		break;
7227 	case BNXT_PTP_FLAGS_CURRENT_TIME:
7228 		flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_CURRENT_TIME;
7229 		break;
7230 	}
7231 
7232 	req.flags = rte_cpu_to_le_32(flags);
7233 	req.port_id = rte_cpu_to_le_16(bp->pf->port_id);
7234 
7235 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
7236 
7237 	HWRM_CHECK_RESULT();
7238 
7239 	if (timestamp) {
7240 		*timestamp = rte_le_to_cpu_32(resp->ptp_msg_ts[0]);
7241 		*timestamp |=
7242 			(uint64_t)(rte_le_to_cpu_32(resp->ptp_msg_ts[1])) << 32;
7243 	}
7244 	HWRM_UNLOCK();
7245 
7246 	return rc;
7247 }
7248 
7249 int bnxt_hwrm_cfa_counter_qcaps(struct bnxt *bp, uint16_t *max_fc)
7250 {
7251 	int rc = 0;
7252 
7253 	struct hwrm_cfa_counter_qcaps_input req = {0};
7254 	struct hwrm_cfa_counter_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
7255 
7256 	if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
7257 		PMD_DRV_LOG_LINE(DEBUG,
7258 			    "Not a PF or trusted VF. Command not supported");
7259 		return 0;
7260 	}
7261 
7262 	HWRM_PREP(&req, HWRM_CFA_COUNTER_QCAPS, BNXT_USE_KONG(bp));
7263 	req.target_id = rte_cpu_to_le_16(bp->fw_fid);
7264 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
7265 
7266 	HWRM_CHECK_RESULT();
7267 	if (max_fc)
7268 		*max_fc = rte_le_to_cpu_16(resp->max_rx_fc);
7269 	HWRM_UNLOCK();
7270 
7271 	return 0;
7272 }
7273 
7274 int bnxt_hwrm_ctx_rgtr(struct bnxt *bp, rte_iova_t dma_addr, uint16_t *ctx_id)
7275 {
7276 	int rc = 0;
7277 	struct hwrm_cfa_ctx_mem_rgtr_input req = {.req_type = 0 };
7278 	struct hwrm_cfa_ctx_mem_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
7279 
7280 	if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
7281 		PMD_DRV_LOG_LINE(DEBUG,
7282 			    "Not a PF or trusted VF. Command not supported");
7283 		return 0;
7284 	}
7285 
7286 	HWRM_PREP(&req, HWRM_CFA_CTX_MEM_RGTR, BNXT_USE_KONG(bp));
7287 
7288 	req.page_level = HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_LEVEL_LVL_0;
7289 	req.page_size = HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_SIZE_2M;
7290 	req.page_dir = rte_cpu_to_le_64(dma_addr);
7291 
7292 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
7293 
7294 	HWRM_CHECK_RESULT();
7295 	if (ctx_id) {
7296 		*ctx_id  = rte_le_to_cpu_16(resp->ctx_id);
7297 		PMD_DRV_LOG_LINE(DEBUG, "ctx_id = %d", *ctx_id);
7298 	}
7299 	HWRM_UNLOCK();
7300 
7301 	return 0;
7302 }
7303 
7304 int bnxt_hwrm_ctx_unrgtr(struct bnxt *bp, uint16_t ctx_id)
7305 {
7306 	int rc = 0;
7307 	struct hwrm_cfa_ctx_mem_unrgtr_input req = {.req_type = 0 };
7308 	struct hwrm_cfa_ctx_mem_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
7309 
7310 	if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
7311 		PMD_DRV_LOG_LINE(DEBUG,
7312 			    "Not a PF or trusted VF. Command not supported");
7313 		return 0;
7314 	}
7315 
7316 	HWRM_PREP(&req, HWRM_CFA_CTX_MEM_UNRGTR, BNXT_USE_KONG(bp));
7317 
7318 	req.ctx_id = rte_cpu_to_le_16(ctx_id);
7319 
7320 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
7321 
7322 	HWRM_CHECK_RESULT();
7323 	HWRM_UNLOCK();
7324 
7325 	return rc;
7326 }
7327 
7328 int bnxt_hwrm_cfa_counter_cfg(struct bnxt *bp, enum bnxt_flow_dir dir,
7329 			      uint16_t cntr, uint16_t ctx_id,
7330 			      uint32_t num_entries, bool enable)
7331 {
7332 	struct hwrm_cfa_counter_cfg_input req = {0};
7333 	struct hwrm_cfa_counter_cfg_output *resp = bp->hwrm_cmd_resp_addr;
7334 	uint16_t flags = 0;
7335 	int rc;
7336 
7337 	if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
7338 		PMD_DRV_LOG_LINE(DEBUG,
7339 			    "Not a PF or trusted VF. Command not supported");
7340 		return 0;
7341 	}
7342 
7343 	HWRM_PREP(&req, HWRM_CFA_COUNTER_CFG, BNXT_USE_KONG(bp));
7344 
7345 	req.target_id = rte_cpu_to_le_16(bp->fw_fid);
7346 	req.counter_type = rte_cpu_to_le_16(cntr);
7347 	flags = enable ? HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE_ENABLE :
7348 		HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE_DISABLE;
7349 	flags |= HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_DATA_TRANSFER_MODE_PULL;
7350 	if (dir == BNXT_DIR_RX)
7351 		flags |=  HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH_RX;
7352 	else if (dir == BNXT_DIR_TX)
7353 		flags |=  HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH_TX;
7354 	req.flags = rte_cpu_to_le_16(flags);
7355 	req.ctx_id =  rte_cpu_to_le_16(ctx_id);
7356 	req.num_entries = rte_cpu_to_le_32(num_entries);
7357 
7358 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
7359 	HWRM_CHECK_RESULT();
7360 	HWRM_UNLOCK();
7361 
7362 	return 0;
7363 }
7364 
7365 int bnxt_hwrm_cfa_counter_qstats(struct bnxt *bp,
7366 				 enum bnxt_flow_dir dir,
7367 				 uint16_t cntr,
7368 				 uint16_t num_entries)
7369 {
7370 	struct hwrm_cfa_counter_qstats_output *resp = bp->hwrm_cmd_resp_addr;
7371 	struct hwrm_cfa_counter_qstats_input req = {0};
7372 	uint16_t flow_ctx_id = 0;
7373 	uint16_t flags = 0;
7374 	int rc = 0;
7375 
7376 	if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
7377 		PMD_DRV_LOG_LINE(DEBUG,
7378 			    "Not a PF or trusted VF. Command not supported");
7379 		return 0;
7380 	}
7381 
7382 	if (dir == BNXT_DIR_RX) {
7383 		flow_ctx_id = bp->flow_stat->rx_fc_in_tbl.ctx_id;
7384 		flags = HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_RX;
7385 	} else if (dir == BNXT_DIR_TX) {
7386 		flow_ctx_id = bp->flow_stat->tx_fc_in_tbl.ctx_id;
7387 		flags = HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_TX;
7388 	}
7389 
7390 	HWRM_PREP(&req, HWRM_CFA_COUNTER_QSTATS, BNXT_USE_KONG(bp));
7391 	req.target_id = rte_cpu_to_le_16(bp->fw_fid);
7392 	req.counter_type = rte_cpu_to_le_16(cntr);
7393 	req.input_flow_ctx_id = rte_cpu_to_le_16(flow_ctx_id);
7394 	req.num_entries = rte_cpu_to_le_16(num_entries);
7395 	req.flags = rte_cpu_to_le_16(flags);
7396 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
7397 
7398 	HWRM_CHECK_RESULT();
7399 	HWRM_UNLOCK();
7400 
7401 	return 0;
7402 }
7403 
7404 int bnxt_hwrm_first_vf_id_query(struct bnxt *bp, uint16_t fid,
7405 				uint16_t *first_vf_id)
7406 {
7407 	int rc = 0;
7408 	struct hwrm_func_qcaps_input req = {.req_type = 0 };
7409 	struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
7410 
7411 	HWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB);
7412 
7413 	req.fid = rte_cpu_to_le_16(fid);
7414 
7415 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
7416 
7417 	HWRM_CHECK_RESULT();
7418 
7419 	if (first_vf_id)
7420 		*first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
7421 
7422 	HWRM_UNLOCK();
7423 
7424 	return rc;
7425 }
7426 
7427 int bnxt_hwrm_cfa_pair_exists(struct bnxt *bp, struct bnxt_representor *rep_bp)
7428 {
7429 	struct hwrm_cfa_pair_info_output *resp = bp->hwrm_cmd_resp_addr;
7430 	struct hwrm_cfa_pair_info_input req = {0};
7431 	int rc = 0;
7432 
7433 	if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
7434 		PMD_DRV_LOG_LINE(DEBUG,
7435 			    "Not a PF or trusted VF. Command not supported");
7436 		return 0;
7437 	}
7438 
7439 	HWRM_PREP(&req, HWRM_CFA_PAIR_INFO, BNXT_USE_CHIMP_MB);
7440 	snprintf(req.pair_name, sizeof(req.pair_name), "%svfr%d",
7441 		 bp->eth_dev->data->name, rep_bp->vf_id);
7442 	req.flags =
7443 		rte_cpu_to_le_32(HWRM_CFA_PAIR_INFO_INPUT_FLAGS_LOOKUP_TYPE);
7444 
7445 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
7446 	HWRM_CHECK_RESULT();
7447 	if (rc == HWRM_ERR_CODE_SUCCESS && strlen(resp->pair_name)) {
7448 		HWRM_UNLOCK();
7449 		return !rc;
7450 	}
7451 	HWRM_UNLOCK();
7452 	return rc;
7453 }
7454 
7455 int bnxt_hwrm_cfa_pair_alloc(struct bnxt *bp, struct bnxt_representor *rep_bp)
7456 {
7457 	struct hwrm_cfa_pair_alloc_output *resp = bp->hwrm_cmd_resp_addr;
7458 	struct hwrm_cfa_pair_alloc_input req = {0};
7459 	int rc;
7460 
7461 	if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
7462 		PMD_DRV_LOG_LINE(DEBUG,
7463 			    "Not a PF or trusted VF. Command not supported");
7464 		return 0;
7465 	}
7466 
7467 	HWRM_PREP(&req, HWRM_CFA_PAIR_ALLOC, BNXT_USE_CHIMP_MB);
7468 	req.pair_mode = HWRM_CFA_PAIR_FREE_INPUT_PAIR_MODE_REP2FN_TRUFLOW;
7469 	snprintf(req.pair_name, sizeof(req.pair_name), "%svfr%d",
7470 		 bp->eth_dev->data->name, rep_bp->vf_id);
7471 
7472 	req.pf_b_id = rep_bp->parent_pf_idx;
7473 	req.vf_b_id = BNXT_REP_PF(rep_bp) ? rte_cpu_to_le_16(((uint16_t)-1)) :
7474 						rte_cpu_to_le_16(rep_bp->vf_id);
7475 	req.vf_a_id = rte_cpu_to_le_16(bp->fw_fid);
7476 	req.host_b_id = 1; /* TBD - Confirm if this is OK */
7477 
7478 	req.enables |= rep_bp->flags & BNXT_REP_Q_R2F_VALID ?
7479 			HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_Q_AB_VALID : 0;
7480 	req.enables |= rep_bp->flags & BNXT_REP_Q_F2R_VALID ?
7481 			HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_Q_BA_VALID : 0;
7482 	req.enables |= rep_bp->flags & BNXT_REP_FC_R2F_VALID ?
7483 			HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_FC_AB_VALID : 0;
7484 	req.enables |= rep_bp->flags & BNXT_REP_FC_F2R_VALID ?
7485 			HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_FC_BA_VALID : 0;
7486 
7487 	req.q_ab = rep_bp->rep_q_r2f;
7488 	req.q_ba = rep_bp->rep_q_f2r;
7489 	req.fc_ab = rep_bp->rep_fc_r2f;
7490 	req.fc_ba = rep_bp->rep_fc_f2r;
7491 
7492 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
7493 	HWRM_CHECK_RESULT();
7494 
7495 	HWRM_UNLOCK();
7496 	PMD_DRV_LOG_LINE(DEBUG, "%s %d allocated",
7497 		    BNXT_REP_PF(rep_bp) ? "PFR" : "VFR", rep_bp->vf_id);
7498 	return rc;
7499 }
7500 
7501 int bnxt_hwrm_cfa_pair_free(struct bnxt *bp, struct bnxt_representor *rep_bp)
7502 {
7503 	struct hwrm_cfa_pair_free_output *resp = bp->hwrm_cmd_resp_addr;
7504 	struct hwrm_cfa_pair_free_input req = {0};
7505 	int rc;
7506 
7507 	if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
7508 		PMD_DRV_LOG_LINE(DEBUG,
7509 			    "Not a PF or trusted VF. Command not supported");
7510 		return 0;
7511 	}
7512 
7513 	HWRM_PREP(&req, HWRM_CFA_PAIR_FREE, BNXT_USE_CHIMP_MB);
7514 	snprintf(req.pair_name, sizeof(req.pair_name), "%svfr%d",
7515 		 bp->eth_dev->data->name, rep_bp->vf_id);
7516 	req.pf_b_id = rep_bp->parent_pf_idx;
7517 	req.pair_mode = HWRM_CFA_PAIR_FREE_INPUT_PAIR_MODE_REP2FN_TRUFLOW;
7518 	req.vf_id = BNXT_REP_PF(rep_bp) ? rte_cpu_to_le_16(((uint16_t)-1)) :
7519 						rte_cpu_to_le_16(rep_bp->vf_id);
7520 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
7521 	HWRM_CHECK_RESULT();
7522 	HWRM_UNLOCK();
7523 	PMD_DRV_LOG_LINE(DEBUG, "%s %d freed", BNXT_REP_PF(rep_bp) ? "PFR" : "VFR",
7524 		    rep_bp->vf_id);
7525 	return rc;
7526 }
7527 
7528 int bnxt_hwrm_fw_echo_reply(struct bnxt *bp, uint32_t echo_req_data1,
7529 			    uint32_t echo_req_data2)
7530 {
7531 	struct hwrm_func_echo_response_input req = {0};
7532 	struct hwrm_func_echo_response_output *resp = bp->hwrm_cmd_resp_addr;
7533 	int rc;
7534 
7535 	HWRM_PREP(&req, HWRM_FUNC_ECHO_RESPONSE, BNXT_USE_CHIMP_MB);
7536 	req.event_data1 = rte_cpu_to_le_32(echo_req_data1);
7537 	req.event_data2 = rte_cpu_to_le_32(echo_req_data2);
7538 
7539 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
7540 
7541 	HWRM_CHECK_RESULT();
7542 	HWRM_UNLOCK();
7543 
7544 	return rc;
7545 }
7546 
7547 int bnxt_hwrm_poll_ver_get(struct bnxt *bp)
7548 {
7549 	struct hwrm_ver_get_input req = {.req_type = 0 };
7550 	struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
7551 	int rc = 0;
7552 
7553 	bp->max_req_len = HWRM_MAX_REQ_LEN;
7554 	bp->max_resp_len = BNXT_PAGE_SIZE;
7555 	bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
7556 
7557 	HWRM_PREP(&req, HWRM_VER_GET, BNXT_USE_CHIMP_MB);
7558 	req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
7559 	req.hwrm_intf_min = HWRM_VERSION_MINOR;
7560 	req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
7561 
7562 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
7563 
7564 	HWRM_CHECK_RESULT_SILENT();
7565 	HWRM_UNLOCK();
7566 
7567 	return rc;
7568 }
7569 
7570 int bnxt_hwrm_read_sfp_module_eeprom_info(struct bnxt *bp, uint16_t i2c_addr,
7571 					  uint16_t page_number, uint16_t start_addr,
7572 					  uint16_t data_length, uint8_t *buf)
7573 {
7574 	struct hwrm_port_phy_i2c_read_output *resp = bp->hwrm_cmd_resp_addr;
7575 	struct hwrm_port_phy_i2c_read_input req = {0};
7576 	uint32_t enables = HWRM_PORT_PHY_I2C_READ_INPUT_ENABLES_PAGE_OFFSET;
7577 	int rc, byte_offset = 0;
7578 
7579 	do {
7580 		uint16_t xfer_size;
7581 
7582 		HWRM_PREP(&req, HWRM_PORT_PHY_I2C_READ, BNXT_USE_CHIMP_MB);
7583 		req.i2c_slave_addr = i2c_addr;
7584 		req.page_number = rte_cpu_to_le_16(page_number);
7585 		req.port_id = rte_cpu_to_le_16(bp->pf->port_id);
7586 
7587 		xfer_size = RTE_MIN(data_length, BNXT_MAX_PHY_I2C_RESP_SIZE);
7588 		req.page_offset = rte_cpu_to_le_16(start_addr + byte_offset);
7589 		req.data_length = xfer_size;
7590 		req.enables = rte_cpu_to_le_32(start_addr + byte_offset ? enables : 0);
7591 		rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
7592 		HWRM_CHECK_RESULT();
7593 
7594 		memcpy(buf + byte_offset, resp->data, xfer_size);
7595 
7596 		data_length -= xfer_size;
7597 		byte_offset += xfer_size;
7598 
7599 		HWRM_UNLOCK();
7600 	} while (data_length > 0);
7601 
7602 	return rc;
7603 }
7604 
7605 void bnxt_free_hwrm_tx_ring(struct bnxt *bp, int queue_index)
7606 {
7607 	struct bnxt_tx_queue *txq = bp->tx_queues[queue_index];
7608 	struct bnxt_tx_ring_info *txr = txq->tx_ring;
7609 	struct bnxt_ring *ring = txr->tx_ring_struct;
7610 	struct bnxt_cp_ring_info *cpr = txq->cp_ring;
7611 
7612 	bnxt_hwrm_ring_free(bp,
7613 			    ring,
7614 			    HWRM_RING_FREE_INPUT_RING_TYPE_TX,
7615 			    cpr->cp_ring_struct->fw_ring_id);
7616 	txr->tx_raw_prod = 0;
7617 	txr->tx_raw_cons = 0;
7618 	memset(txr->tx_desc_ring, 0,
7619 		txr->tx_ring_struct->ring_size * sizeof(*txr->tx_desc_ring));
7620 	memset(txr->tx_buf_ring, 0,
7621 		txr->tx_ring_struct->ring_size * sizeof(*txr->tx_buf_ring));
7622 
7623 	bnxt_hwrm_stat_ctx_free(bp, cpr);
7624 
7625 	bnxt_free_cp_ring(bp, cpr);
7626 }
7627 
7628 int bnxt_hwrm_config_host_mtu(struct bnxt *bp)
7629 {
7630 	struct hwrm_func_cfg_input req = {0};
7631 	struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
7632 	int rc;
7633 
7634 	if (!BNXT_PF(bp))
7635 		return 0;
7636 
7637 	HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
7638 
7639 	req.fid = rte_cpu_to_le_16(0xffff);
7640 	req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_HOST_MTU);
7641 	req.host_mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu);
7642 
7643 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
7644 	HWRM_CHECK_RESULT();
7645 	HWRM_UNLOCK();
7646 
7647 	return rc;
7648 }
7649 
7650 int bnxt_hwrm_func_cfg_mpc(struct bnxt *bp, uint8_t mpc_chnls_msk, bool enable)
7651 {
7652 	struct hwrm_func_cfg_input req = {0};
7653 	struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
7654 	int rc;
7655 	uint16_t mpc_chnls = 0;
7656 
7657 	HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
7658 	req.fid = rte_cpu_to_le_16(0xffff);
7659 	req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MPC_CHNLS);
7660 	if (enable) {
7661 		if (mpc_chnls_msk & (1 << BNXT_MPC_CHNL_TCE))
7662 			mpc_chnls |= HWRM_FUNC_CFG_INPUT_MPC_CHNLS_TCE_ENABLE;
7663 		if (mpc_chnls_msk & (1 << BNXT_MPC_CHNL_RCE))
7664 			mpc_chnls |= HWRM_FUNC_CFG_INPUT_MPC_CHNLS_RCE_ENABLE;
7665 		if (mpc_chnls_msk & (1 << BNXT_MPC_CHNL_TE_CFA))
7666 			mpc_chnls |= HWRM_FUNC_CFG_INPUT_MPC_CHNLS_TE_CFA_ENABLE;
7667 		if (mpc_chnls_msk & (1 << BNXT_MPC_CHNL_RE_CFA))
7668 			mpc_chnls |= HWRM_FUNC_CFG_INPUT_MPC_CHNLS_RE_CFA_ENABLE;
7669 		if (mpc_chnls_msk & (1 << BNXT_MPC_CHNL_PRIMATE))
7670 			mpc_chnls |= HWRM_FUNC_CFG_INPUT_MPC_CHNLS_PRIMATE_ENABLE;
7671 	} else {
7672 		if (mpc_chnls_msk & (1 << BNXT_MPC_CHNL_TCE))
7673 			mpc_chnls |= HWRM_FUNC_CFG_INPUT_MPC_CHNLS_TCE_DISABLE;
7674 		if (mpc_chnls_msk & (1 << BNXT_MPC_CHNL_RCE))
7675 			mpc_chnls |= HWRM_FUNC_CFG_INPUT_MPC_CHNLS_RCE_DISABLE;
7676 		if (mpc_chnls_msk & (1 << BNXT_MPC_CHNL_TE_CFA))
7677 			mpc_chnls |= HWRM_FUNC_CFG_INPUT_MPC_CHNLS_TE_CFA_DISABLE;
7678 		if (mpc_chnls_msk & (1 << BNXT_MPC_CHNL_RE_CFA))
7679 			mpc_chnls |= HWRM_FUNC_CFG_INPUT_MPC_CHNLS_RE_CFA_DISABLE;
7680 		if (mpc_chnls_msk & (1 << BNXT_MPC_CHNL_PRIMATE))
7681 			mpc_chnls |= HWRM_FUNC_CFG_INPUT_MPC_CHNLS_PRIMATE_DISABLE;
7682 	}
7683 	req.mpc_chnls = rte_cpu_to_le_16(mpc_chnls);
7684 
7685 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
7686 
7687 	HWRM_CHECK_RESULT();
7688 	HWRM_UNLOCK();
7689 
7690 	return rc;
7691 }
7692 
7693 int
7694 bnxt_vnic_rss_clear_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
7695 {
7696 	struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
7697 	struct hwrm_vnic_rss_cfg_input req = {0};
7698 	int nr_ctxs = vnic->num_lb_ctxts;
7699 	int i, rc = 0;
7700 
7701 	for (i = 0; i < nr_ctxs; i++) {
7702 		HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
7703 
7704 		req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
7705 		req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
7706 
7707 		rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
7708 
7709 		HWRM_CHECK_RESULT();
7710 		HWRM_UNLOCK();
7711 	}
7712 
7713 	return rc;
7714 }
7715 
7716 int bnxt_hwrm_tf_oem_cmd(struct bnxt *bp,
7717 			 uint32_t *in,
7718 			 uint16_t in_len,
7719 			 uint32_t *out,
7720 			 uint16_t out_len)
7721 {
7722 	struct hwrm_oem_cmd_output *resp = bp->hwrm_cmd_resp_addr;
7723 	struct hwrm_oem_cmd_input req = {0};
7724 	int rc = 0;
7725 
7726 	if (!BNXT_VF(bp)) {
7727 		PMD_DRV_LOG_LINE(DEBUG, "Not a VF. Command not supported");
7728 		return -ENOTSUP;
7729 	}
7730 
7731 	HWRM_PREP(&req, HWRM_OEM_CMD, BNXT_USE_CHIMP_MB);
7732 
7733 	req.oem_id = rte_cpu_to_le_32(0x14e4);
7734 	req.naming_authority =
7735 		HWRM_OEM_CMD_INPUT_NAMING_AUTHORITY_PCI_SIG;
7736 	req.message_family =
7737 		HWRM_OEM_CMD_INPUT_MESSAGE_FAMILY_TRUFLOW;
7738 	memcpy(req.oem_data, in, in_len);
7739 
7740 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
7741 
7742 	HWRM_CHECK_RESULT();
7743 	if (resp->oem_id == 0x14e4 &&
7744 	    resp->naming_authority ==
7745 		HWRM_OEM_CMD_INPUT_NAMING_AUTHORITY_PCI_SIG &&
7746 	    resp->message_family ==
7747 		HWRM_OEM_CMD_INPUT_MESSAGE_FAMILY_TRUFLOW)
7748 		memcpy(out, resp->oem_data, out_len);
7749 	HWRM_UNLOCK();
7750 
7751 	return rc;
7752 }
7753 
7754 int
7755 bnxt_hwrm_vnic_update(struct bnxt *bp,
7756 		      struct bnxt_vnic_info *vnic,
7757 		      uint8_t valid)
7758 {
7759 	struct hwrm_vnic_update_input req = {0};
7760 	struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
7761 	int rc;
7762 
7763 	HWRM_PREP(&req, HWRM_VNIC_UPDATE, BNXT_USE_CHIMP_MB);
7764 
7765 	req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
7766 
7767 	if (valid & HWRM_VNIC_UPDATE_INPUT_ENABLES_METADATA_FORMAT_TYPE_VALID)
7768 		req.metadata_format_type = vnic->metadata_format;
7769 	if (valid & HWRM_VNIC_UPDATE_INPUT_ENABLES_VNIC_STATE_VALID)
7770 		req.vnic_state = vnic->state;
7771 	if (valid & HWRM_VNIC_UPDATE_INPUT_ENABLES_MRU_VALID)
7772 		req.mru = rte_cpu_to_le_16(vnic->mru);
7773 
7774 	req.enables = rte_cpu_to_le_32(valid);
7775 
7776 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
7777 
7778 	HWRM_CHECK_RESULT();
7779 	HWRM_UNLOCK();
7780 
7781 	return rc;
7782 }
7783 
7784 int
7785 bnxt_hwrm_release_afm_func(struct bnxt *bp, uint16_t fid, uint16_t rfid,
7786 			   uint8_t type, uint32_t flags)
7787 {
7788 	int rc = 0;
7789 	struct hwrm_cfa_release_afm_func_input req = { 0 };
7790 	struct hwrm_cfa_release_afm_func_output *resp = bp->hwrm_cmd_resp_addr;
7791 
7792 	HWRM_PREP(&req, HWRM_CFA_RELEASE_AFM_FUNC, BNXT_USE_CHIMP_MB);
7793 
7794 	req.fid = rte_le_to_cpu_16(fid);
7795 	req.rfid = rte_le_to_cpu_16(rfid);
7796 	req.flags = rte_le_to_cpu_32(flags);
7797 	req.type = type;
7798 
7799 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
7800 
7801 	HWRM_CHECK_RESULT();
7802 	HWRM_UNLOCK();
7803 
7804 	return rc;
7805 }
7806