xref: /dpdk/drivers/net/bnxt/bnxt_hwrm.c (revision 68a03efeed657e6e05f281479b33b51102797e15)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2021 Broadcom
3  * All rights reserved.
4  */
5 
6 #include <unistd.h>
7 
8 #include <rte_byteorder.h>
9 #include <rte_common.h>
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_memzone.h>
13 #include <rte_version.h>
14 #include <rte_io.h>
15 
16 #include "bnxt.h"
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
19 #include "bnxt_rxq.h"
20 #include "bnxt_rxr.h"
21 #include "bnxt_ring.h"
22 #include "bnxt_txq.h"
23 #include "bnxt_txr.h"
24 #include "bnxt_vnic.h"
25 #include "hsi_struct_def_dpdk.h"
26 
27 #define HWRM_SPEC_CODE_1_8_3		0x10803
28 #define HWRM_VERSION_1_9_1		0x10901
29 #define HWRM_VERSION_1_9_2		0x10903
30 #define HWRM_VERSION_1_10_2_13		0x10a020d
31 struct bnxt_plcmodes_cfg {
32 	uint32_t	flags;
33 	uint16_t	jumbo_thresh;
34 	uint16_t	hds_offset;
35 	uint16_t	hds_threshold;
36 };
37 
38 static int page_getenum(size_t size)
39 {
40 	if (size <= 1 << 4)
41 		return 4;
42 	if (size <= 1 << 12)
43 		return 12;
44 	if (size <= 1 << 13)
45 		return 13;
46 	if (size <= 1 << 16)
47 		return 16;
48 	if (size <= 1 << 21)
49 		return 21;
50 	if (size <= 1 << 22)
51 		return 22;
52 	if (size <= 1 << 30)
53 		return 30;
54 	PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
55 	return sizeof(int) * 8 - 1;
56 }
57 
58 static int page_roundup(size_t size)
59 {
60 	return 1 << page_getenum(size);
61 }
62 
63 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem,
64 				  uint8_t *pg_attr,
65 				  uint64_t *pg_dir)
66 {
67 	if (rmem->nr_pages == 0)
68 		return;
69 
70 	if (rmem->nr_pages > 1) {
71 		*pg_attr = 1;
72 		*pg_dir = rte_cpu_to_le_64(rmem->pg_tbl_map);
73 	} else {
74 		*pg_dir = rte_cpu_to_le_64(rmem->dma_arr[0]);
75 	}
76 }
77 
78 static struct bnxt_cp_ring_info*
79 bnxt_get_ring_info_by_id(struct bnxt *bp, uint16_t rid, uint16_t type)
80 {
81 	struct bnxt_cp_ring_info *cp_ring = NULL;
82 	uint16_t i;
83 
84 	switch (type) {
85 	case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
86 	case HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG:
87 		/* FALLTHROUGH */
88 		for (i = 0; i < bp->rx_cp_nr_rings; i++) {
89 			struct bnxt_rx_queue *rxq = bp->rx_queues[i];
90 
91 			if (rxq->cp_ring->cp_ring_struct->fw_ring_id ==
92 			    rte_cpu_to_le_16(rid)) {
93 				return rxq->cp_ring;
94 			}
95 		}
96 		break;
97 	case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
98 		for (i = 0; i < bp->tx_cp_nr_rings; i++) {
99 			struct bnxt_tx_queue *txq = bp->tx_queues[i];
100 
101 			if (txq->cp_ring->cp_ring_struct->fw_ring_id ==
102 			    rte_cpu_to_le_16(rid)) {
103 				return txq->cp_ring;
104 			}
105 		}
106 		break;
107 	default:
108 		return cp_ring;
109 	}
110 	return cp_ring;
111 }
112 
113 /* Complete a sweep of the CQ ring for the corresponding Tx/Rx/AGG ring.
114  * If the CMPL_BASE_TYPE_HWRM_DONE is not encountered by the last pass,
115  * before timeout, we force the done bit for the cleanup to proceed.
116  * Also if cpr is null, do nothing.. The HWRM command is  not for a
117  * Tx/Rx/AGG ring cleanup.
118  */
119 static int
120 bnxt_check_cq_hwrm_done(struct bnxt_cp_ring_info *cpr,
121 			bool tx, bool rx, bool timeout)
122 {
123 	int done = 0;
124 
125 	if (cpr != NULL) {
126 		if (tx)
127 			done = bnxt_flush_tx_cmp(cpr);
128 
129 		if (rx)
130 			done = bnxt_flush_rx_cmp(cpr);
131 
132 		if (done)
133 			PMD_DRV_LOG(DEBUG, "HWRM DONE for %s ring\n",
134 				    rx ? "Rx" : "Tx");
135 
136 		/* We are about to timeout and still haven't seen the
137 		 * HWRM done for the Ring free. Force the cleanup.
138 		 */
139 		if (!done && timeout) {
140 			done = 1;
141 			PMD_DRV_LOG(DEBUG, "Timing out for %s ring\n",
142 				    rx ? "Rx" : "Tx");
143 		}
144 	} else {
145 		/* This HWRM command is not for a Tx/Rx/AGG ring cleanup.
146 		 * Otherwise the cpr would have been valid. So do nothing.
147 		 */
148 		done = 1;
149 	}
150 
151 	return done;
152 }
153 
154 /*
155  * HWRM Functions (sent to HWRM)
156  * These are named bnxt_hwrm_*() and return 0 on success or -110 if the
157  * HWRM command times out, or a negative error code if the HWRM
158  * command was failed by the FW.
159  */
160 
161 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
162 				  uint32_t msg_len, bool use_kong_mb)
163 {
164 	unsigned int i;
165 	struct input *req = msg;
166 	struct output *resp = bp->hwrm_cmd_resp_addr;
167 	uint32_t *data = msg;
168 	uint8_t *bar;
169 	uint8_t *valid;
170 	uint16_t max_req_len = bp->max_req_len;
171 	struct hwrm_short_input short_input = { 0 };
172 	uint16_t bar_offset = use_kong_mb ?
173 		GRCPF_REG_KONG_CHANNEL_OFFSET : GRCPF_REG_CHIMP_CHANNEL_OFFSET;
174 	uint16_t mb_trigger_offset = use_kong_mb ?
175 		GRCPF_REG_KONG_COMM_TRIGGER : GRCPF_REG_CHIMP_COMM_TRIGGER;
176 	struct bnxt_cp_ring_info *cpr = NULL;
177 	bool is_rx = false;
178 	bool is_tx = false;
179 	uint32_t timeout;
180 
181 	/* Do not send HWRM commands to firmware in error state */
182 	if (bp->flags & BNXT_FLAG_FATAL_ERROR)
183 		return 0;
184 
185 	timeout = bp->hwrm_cmd_timeout;
186 
187 	/* Update the message length for backing store config for new FW. */
188 	if (bp->fw_ver >= HWRM_VERSION_1_10_2_13 &&
189 	    rte_cpu_to_le_16(req->req_type) == HWRM_FUNC_BACKING_STORE_CFG)
190 		msg_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
191 
192 	if (bp->flags & BNXT_FLAG_SHORT_CMD ||
193 	    msg_len > bp->max_req_len) {
194 		void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
195 
196 		memset(short_cmd_req, 0, bp->hwrm_max_ext_req_len);
197 		memcpy(short_cmd_req, req, msg_len);
198 
199 		short_input.req_type = rte_cpu_to_le_16(req->req_type);
200 		short_input.signature = rte_cpu_to_le_16(
201 					HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD);
202 		short_input.size = rte_cpu_to_le_16(msg_len);
203 		short_input.req_addr =
204 			rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
205 
206 		data = (uint32_t *)&short_input;
207 		msg_len = sizeof(short_input);
208 
209 		max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
210 	}
211 
212 	/* Write request msg to hwrm channel */
213 	for (i = 0; i < msg_len; i += 4) {
214 		bar = (uint8_t *)bp->bar0 + bar_offset + i;
215 		rte_write32(*data, bar);
216 		data++;
217 	}
218 
219 	/* Zero the rest of the request space */
220 	for (; i < max_req_len; i += 4) {
221 		bar = (uint8_t *)bp->bar0 + bar_offset + i;
222 		rte_write32(0, bar);
223 	}
224 
225 	/* Ring channel doorbell */
226 	bar = (uint8_t *)bp->bar0 + mb_trigger_offset;
227 	rte_write32(1, bar);
228 	/*
229 	 * Make sure the channel doorbell ring command complete before
230 	 * reading the response to avoid getting stale or invalid
231 	 * responses.
232 	 */
233 	rte_io_mb();
234 
235 	/* Check ring flush is done.
236 	 * This is valid only for Tx and Rx rings (including AGG rings).
237 	 * The Tx and Rx rings should be freed once the HW confirms all
238 	 * the internal buffers and BDs associated with the rings are
239 	 * consumed and the corresponding DMA is handled.
240 	 */
241 	if (rte_cpu_to_le_16(req->cmpl_ring) != INVALID_HW_RING_ID) {
242 		/* Check if the TxCQ matches. If that fails check if RxCQ
243 		 * matches. And if neither match, is_rx = false, is_tx = false.
244 		 */
245 		cpr = bnxt_get_ring_info_by_id(bp, req->cmpl_ring,
246 					       HWRM_RING_FREE_INPUT_RING_TYPE_TX);
247 		if (cpr == NULL) {
248 			/* Not a TxCQ. Check if the RxCQ matches. */
249 			cpr =
250 			bnxt_get_ring_info_by_id(bp, req->cmpl_ring,
251 						 HWRM_RING_FREE_INPUT_RING_TYPE_RX);
252 			if (cpr != NULL)
253 				is_rx = true;
254 		} else {
255 			is_tx = true;
256 		}
257 	}
258 
259 	/* Poll for the valid bit */
260 	for (i = 0; i < timeout; i++) {
261 		int done;
262 
263 		done = bnxt_check_cq_hwrm_done(cpr, is_tx, is_rx,
264 					       i == timeout - 1);
265 		/* Sanity check on the resp->resp_len */
266 		rte_io_rmb();
267 		if (resp->resp_len && resp->resp_len <= bp->max_resp_len) {
268 			/* Last byte of resp contains the valid key */
269 			valid = (uint8_t *)resp + resp->resp_len - 1;
270 			if (*valid == HWRM_RESP_VALID_KEY && done)
271 				break;
272 		}
273 		rte_delay_us(1);
274 	}
275 
276 	if (i >= timeout) {
277 		/* Suppress VER_GET timeout messages during reset recovery */
278 		if (bp->flags & BNXT_FLAG_FW_RESET &&
279 		    rte_cpu_to_le_16(req->req_type) == HWRM_VER_GET)
280 			return -ETIMEDOUT;
281 
282 		PMD_DRV_LOG(ERR,
283 			    "Error(timeout) sending msg 0x%04x, seq_id %d\n",
284 			    req->req_type, req->seq_id);
285 		return -ETIMEDOUT;
286 	}
287 	return 0;
288 }
289 
290 /*
291  * HWRM_PREP() should be used to prepare *ALL* HWRM commands. It grabs the
292  * spinlock, and does initial processing.
293  *
294  * HWRM_CHECK_RESULT() returns errors on failure and may not be used.  It
295  * releases the spinlock only if it returns. If the regular int return codes
296  * are not used by the function, HWRM_CHECK_RESULT() should not be used
297  * directly, rather it should be copied and modified to suit the function.
298  *
299  * HWRM_UNLOCK() must be called after all response processing is completed.
300  */
301 #define HWRM_PREP(req, type, kong) do {	\
302 	rte_spinlock_lock(&bp->hwrm_lock); \
303 	if (bp->hwrm_cmd_resp_addr == NULL) { \
304 		rte_spinlock_unlock(&bp->hwrm_lock); \
305 		return -EACCES; \
306 	} \
307 	memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
308 	(req)->req_type = rte_cpu_to_le_16(type); \
309 	(req)->cmpl_ring = rte_cpu_to_le_16(-1); \
310 	(req)->seq_id = kong ? rte_cpu_to_le_16(bp->kong_cmd_seq++) :\
311 		rte_cpu_to_le_16(bp->chimp_cmd_seq++); \
312 	(req)->target_id = rte_cpu_to_le_16(0xffff); \
313 	(req)->resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
314 } while (0)
315 
316 #define HWRM_CHECK_RESULT_SILENT() do {\
317 	if (rc) { \
318 		rte_spinlock_unlock(&bp->hwrm_lock); \
319 		return rc; \
320 	} \
321 	if (resp->error_code) { \
322 		rc = rte_le_to_cpu_16(resp->error_code); \
323 		rte_spinlock_unlock(&bp->hwrm_lock); \
324 		return rc; \
325 	} \
326 } while (0)
327 
328 #define HWRM_CHECK_RESULT() do {\
329 	if (rc) { \
330 		PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
331 		rte_spinlock_unlock(&bp->hwrm_lock); \
332 		if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
333 			rc = -EACCES; \
334 		else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \
335 			rc = -ENOSPC; \
336 		else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \
337 			rc = -EINVAL; \
338 		else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \
339 			rc = -ENOTSUP; \
340 		else if (rc == HWRM_ERR_CODE_HOT_RESET_PROGRESS) \
341 			rc = -EAGAIN; \
342 		else if (rc > 0) \
343 			rc = -EIO; \
344 		return rc; \
345 	} \
346 	if (resp->error_code) { \
347 		rc = rte_le_to_cpu_16(resp->error_code); \
348 		if (resp->resp_len >= 16) { \
349 			struct hwrm_err_output *tmp_hwrm_err_op = \
350 						(void *)resp; \
351 			PMD_DRV_LOG(ERR, \
352 				"error %d:%d:%08x:%04x\n", \
353 				rc, tmp_hwrm_err_op->cmd_err, \
354 				rte_le_to_cpu_32(\
355 					tmp_hwrm_err_op->opaque_0), \
356 				rte_le_to_cpu_16(\
357 					tmp_hwrm_err_op->opaque_1)); \
358 		} else { \
359 			PMD_DRV_LOG(ERR, "error %d\n", rc); \
360 		} \
361 		rte_spinlock_unlock(&bp->hwrm_lock); \
362 		if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
363 			rc = -EACCES; \
364 		else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \
365 			rc = -ENOSPC; \
366 		else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \
367 			rc = -EINVAL; \
368 		else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \
369 			rc = -ENOTSUP; \
370 		else if (rc == HWRM_ERR_CODE_HOT_RESET_PROGRESS) \
371 			rc = -EAGAIN; \
372 		else if (rc > 0) \
373 			rc = -EIO; \
374 		return rc; \
375 	} \
376 } while (0)
377 
378 #define HWRM_UNLOCK()		rte_spinlock_unlock(&bp->hwrm_lock)
379 
380 int bnxt_hwrm_tf_message_direct(struct bnxt *bp,
381 				bool use_kong_mb,
382 				uint16_t msg_type,
383 				void *msg,
384 				uint32_t msg_len,
385 				void *resp_msg,
386 				uint32_t resp_len)
387 {
388 	int rc = 0;
389 	bool mailbox = BNXT_USE_CHIMP_MB;
390 	struct input *req = msg;
391 	struct output *resp = bp->hwrm_cmd_resp_addr;
392 
393 	if (use_kong_mb)
394 		mailbox = BNXT_USE_KONG(bp);
395 
396 	HWRM_PREP(req, msg_type, mailbox);
397 
398 	rc = bnxt_hwrm_send_message(bp, req, msg_len, mailbox);
399 
400 	HWRM_CHECK_RESULT();
401 
402 	if (resp_msg)
403 		memcpy(resp_msg, resp, resp_len);
404 
405 	HWRM_UNLOCK();
406 
407 	return rc;
408 }
409 
410 int bnxt_hwrm_tf_message_tunneled(struct bnxt *bp,
411 				  bool use_kong_mb,
412 				  uint16_t tf_type,
413 				  uint16_t tf_subtype,
414 				  uint32_t *tf_response_code,
415 				  void *msg,
416 				  uint32_t msg_len,
417 				  void *response,
418 				  uint32_t response_len)
419 {
420 	int rc = 0;
421 	struct hwrm_cfa_tflib_input req = { .req_type = 0 };
422 	struct hwrm_cfa_tflib_output *resp = bp->hwrm_cmd_resp_addr;
423 	bool mailbox = BNXT_USE_CHIMP_MB;
424 
425 	if (msg_len > sizeof(req.tf_req))
426 		return -ENOMEM;
427 
428 	if (use_kong_mb)
429 		mailbox = BNXT_USE_KONG(bp);
430 
431 	HWRM_PREP(&req, HWRM_TF, mailbox);
432 	/* Build request using the user supplied request payload.
433 	 * TLV request size is checked at build time against HWRM
434 	 * request max size, thus no checking required.
435 	 */
436 	req.tf_type = tf_type;
437 	req.tf_subtype = tf_subtype;
438 	memcpy(req.tf_req, msg, msg_len);
439 
440 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), mailbox);
441 	HWRM_CHECK_RESULT();
442 
443 	/* Copy the resp to user provided response buffer */
444 	if (response != NULL)
445 		/* Post process response data. We need to copy only
446 		 * the 'payload' as the HWRM data structure really is
447 		 * HWRM header + msg header + payload and the TFLIB
448 		 * only provided a payload place holder.
449 		 */
450 		if (response_len != 0) {
451 			memcpy(response,
452 			       resp->tf_resp,
453 			       response_len);
454 		}
455 
456 	/* Extract the internal tflib response code */
457 	*tf_response_code = resp->tf_resp_code;
458 	HWRM_UNLOCK();
459 
460 	return rc;
461 }
462 
463 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
464 {
465 	int rc = 0;
466 	struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
467 	struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
468 
469 	HWRM_PREP(&req, HWRM_CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
470 	req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
471 	req.mask = 0;
472 
473 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
474 
475 	HWRM_CHECK_RESULT();
476 	HWRM_UNLOCK();
477 
478 	return rc;
479 }
480 
481 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
482 				 struct bnxt_vnic_info *vnic,
483 				 uint16_t vlan_count,
484 				 struct bnxt_vlan_table_entry *vlan_table)
485 {
486 	int rc = 0;
487 	struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
488 	struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
489 	uint32_t mask = 0;
490 
491 	if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
492 		return rc;
493 
494 	HWRM_PREP(&req, HWRM_CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
495 	req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
496 
497 	if (vnic->flags & BNXT_VNIC_INFO_BCAST)
498 		mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
499 	if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
500 		mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
501 
502 	if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
503 		mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
504 
505 	if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI) {
506 		mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
507 	} else if (vnic->flags & BNXT_VNIC_INFO_MCAST) {
508 		mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
509 		req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
510 		req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
511 	}
512 	if (vlan_table) {
513 		if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
514 			mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
515 		req.vlan_tag_tbl_addr =
516 			rte_cpu_to_le_64(rte_malloc_virt2iova(vlan_table));
517 		req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
518 	}
519 	req.mask = rte_cpu_to_le_32(mask);
520 
521 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
522 
523 	HWRM_CHECK_RESULT();
524 	HWRM_UNLOCK();
525 
526 	return rc;
527 }
528 
529 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
530 			uint16_t vlan_count,
531 			struct bnxt_vlan_antispoof_table_entry *vlan_table)
532 {
533 	int rc = 0;
534 	struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
535 	struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
536 						bp->hwrm_cmd_resp_addr;
537 
538 	/*
539 	 * Older HWRM versions did not support this command, and the set_rx_mask
540 	 * list was used for anti-spoof. In 1.8.0, the TX path configuration was
541 	 * removed from set_rx_mask call, and this command was added.
542 	 *
543 	 * This command is also present from 1.7.8.11 and higher,
544 	 * as well as 1.7.8.0
545 	 */
546 	if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
547 		if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
548 			if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
549 					(11)))
550 				return 0;
551 		}
552 	}
553 	HWRM_PREP(&req, HWRM_CFA_VLAN_ANTISPOOF_CFG, BNXT_USE_CHIMP_MB);
554 	req.fid = rte_cpu_to_le_16(fid);
555 
556 	req.vlan_tag_mask_tbl_addr =
557 		rte_cpu_to_le_64(rte_malloc_virt2iova(vlan_table));
558 	req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
559 
560 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
561 
562 	HWRM_CHECK_RESULT();
563 	HWRM_UNLOCK();
564 
565 	return rc;
566 }
567 
568 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
569 			     struct bnxt_filter_info *filter)
570 {
571 	int rc = 0;
572 	struct bnxt_filter_info *l2_filter = filter;
573 	struct bnxt_vnic_info *vnic = NULL;
574 	struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
575 	struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
576 
577 	if (filter->fw_l2_filter_id == UINT64_MAX)
578 		return 0;
579 
580 	if (filter->matching_l2_fltr_ptr)
581 		l2_filter = filter->matching_l2_fltr_ptr;
582 
583 	PMD_DRV_LOG(DEBUG, "filter: %p l2_filter: %p ref_cnt: %d\n",
584 		    filter, l2_filter, l2_filter->l2_ref_cnt);
585 
586 	if (l2_filter->l2_ref_cnt == 0)
587 		return 0;
588 
589 	if (l2_filter->l2_ref_cnt > 0)
590 		l2_filter->l2_ref_cnt--;
591 
592 	if (l2_filter->l2_ref_cnt > 0)
593 		return 0;
594 
595 	HWRM_PREP(&req, HWRM_CFA_L2_FILTER_FREE, BNXT_USE_CHIMP_MB);
596 
597 	req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
598 
599 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
600 
601 	HWRM_CHECK_RESULT();
602 	HWRM_UNLOCK();
603 
604 	filter->fw_l2_filter_id = UINT64_MAX;
605 	if (l2_filter->l2_ref_cnt == 0) {
606 		vnic = l2_filter->vnic;
607 		if (vnic) {
608 			STAILQ_REMOVE(&vnic->filter, l2_filter,
609 				      bnxt_filter_info, next);
610 			bnxt_free_filter(bp, l2_filter);
611 		}
612 	}
613 
614 	return 0;
615 }
616 
617 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
618 			 uint16_t dst_id,
619 			 struct bnxt_filter_info *filter)
620 {
621 	int rc = 0;
622 	struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
623 	struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
624 	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
625 	const struct rte_eth_vmdq_rx_conf *conf =
626 		    &dev_conf->rx_adv_conf.vmdq_rx_conf;
627 	uint32_t enables = 0;
628 	uint16_t j = dst_id - 1;
629 
630 	//TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
631 	if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
632 	    conf->pool_map[j].pools & (1UL << j)) {
633 		PMD_DRV_LOG(DEBUG,
634 			"Add vlan %u to vmdq pool %u\n",
635 			conf->pool_map[j].vlan_id, j);
636 
637 		filter->l2_ivlan = conf->pool_map[j].vlan_id;
638 		filter->enables |=
639 			HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
640 			HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
641 	}
642 
643 	if (filter->fw_l2_filter_id != UINT64_MAX)
644 		bnxt_hwrm_clear_l2_filter(bp, filter);
645 
646 	HWRM_PREP(&req, HWRM_CFA_L2_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
647 
648 	/* PMD does not support XDP and RoCE */
649 	filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_XDP_DISABLE |
650 			HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_TRAFFIC_L2;
651 	req.flags = rte_cpu_to_le_32(filter->flags);
652 
653 	enables = filter->enables |
654 	      HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
655 	req.dst_id = rte_cpu_to_le_16(dst_id);
656 
657 	if (enables &
658 	    HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
659 		memcpy(req.l2_addr, filter->l2_addr,
660 		       RTE_ETHER_ADDR_LEN);
661 	if (enables &
662 	    HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
663 		memcpy(req.l2_addr_mask, filter->l2_addr_mask,
664 		       RTE_ETHER_ADDR_LEN);
665 	if (enables &
666 	    HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
667 		req.l2_ovlan = filter->l2_ovlan;
668 	if (enables &
669 	    HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
670 		req.l2_ivlan = filter->l2_ivlan;
671 	if (enables &
672 	    HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
673 		req.l2_ovlan_mask = filter->l2_ovlan_mask;
674 	if (enables &
675 	    HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
676 		req.l2_ivlan_mask = filter->l2_ivlan_mask;
677 	if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
678 		req.src_id = rte_cpu_to_le_32(filter->src_id);
679 	if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
680 		req.src_type = filter->src_type;
681 	if (filter->pri_hint) {
682 		req.pri_hint = filter->pri_hint;
683 		req.l2_filter_id_hint =
684 			rte_cpu_to_le_64(filter->l2_filter_id_hint);
685 	}
686 
687 	req.enables = rte_cpu_to_le_32(enables);
688 
689 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
690 
691 	HWRM_CHECK_RESULT();
692 
693 	filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
694 	filter->flow_id = rte_le_to_cpu_32(resp->flow_id);
695 	HWRM_UNLOCK();
696 
697 	filter->l2_ref_cnt++;
698 
699 	return rc;
700 }
701 
702 int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
703 {
704 	struct hwrm_port_mac_cfg_input req = {.req_type = 0};
705 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
706 	uint32_t flags = 0;
707 	int rc;
708 
709 	if (!ptp)
710 		return 0;
711 
712 	HWRM_PREP(&req, HWRM_PORT_MAC_CFG, BNXT_USE_CHIMP_MB);
713 
714 	if (ptp->rx_filter)
715 		flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
716 	else
717 		flags |=
718 			HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
719 	if (ptp->tx_tstamp_en)
720 		flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
721 	else
722 		flags |=
723 			HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
724 	req.flags = rte_cpu_to_le_32(flags);
725 	req.enables = rte_cpu_to_le_32
726 		(HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
727 	req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
728 
729 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
730 	HWRM_UNLOCK();
731 
732 	return rc;
733 }
734 
735 static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
736 {
737 	int rc = 0;
738 	struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
739 	struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
740 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
741 
742 	if (ptp)
743 		return 0;
744 
745 	HWRM_PREP(&req, HWRM_PORT_MAC_PTP_QCFG, BNXT_USE_CHIMP_MB);
746 
747 	req.port_id = rte_cpu_to_le_16(bp->pf->port_id);
748 
749 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
750 
751 	HWRM_CHECK_RESULT();
752 
753 	if (!BNXT_CHIP_P5(bp) &&
754 	    !(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS))
755 		return 0;
756 
757 	if (resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_ONE_STEP_TX_TS)
758 		bp->flags |= BNXT_FLAG_FW_CAP_ONE_STEP_TX_TS;
759 
760 	ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
761 	if (!ptp)
762 		return -ENOMEM;
763 
764 	if (!BNXT_CHIP_P5(bp)) {
765 		ptp->rx_regs[BNXT_PTP_RX_TS_L] =
766 			rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
767 		ptp->rx_regs[BNXT_PTP_RX_TS_H] =
768 			rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
769 		ptp->rx_regs[BNXT_PTP_RX_SEQ] =
770 			rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
771 		ptp->rx_regs[BNXT_PTP_RX_FIFO] =
772 			rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
773 		ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
774 			rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
775 		ptp->tx_regs[BNXT_PTP_TX_TS_L] =
776 			rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
777 		ptp->tx_regs[BNXT_PTP_TX_TS_H] =
778 			rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
779 		ptp->tx_regs[BNXT_PTP_TX_SEQ] =
780 			rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
781 		ptp->tx_regs[BNXT_PTP_TX_FIFO] =
782 			rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
783 	}
784 
785 	ptp->bp = bp;
786 	bp->ptp_cfg = ptp;
787 
788 	return 0;
789 }
790 
791 void bnxt_free_vf_info(struct bnxt *bp)
792 {
793 	int i;
794 
795 	if (bp->pf->vf_info == NULL)
796 		return;
797 
798 	for (i = 0; i < bp->pf->max_vfs; i++) {
799 		rte_free(bp->pf->vf_info[i].vlan_table);
800 		bp->pf->vf_info[i].vlan_table = NULL;
801 		rte_free(bp->pf->vf_info[i].vlan_as_table);
802 		bp->pf->vf_info[i].vlan_as_table = NULL;
803 	}
804 	rte_free(bp->pf->vf_info);
805 	bp->pf->vf_info = NULL;
806 }
807 
808 static int bnxt_alloc_vf_info(struct bnxt *bp, uint16_t max_vfs)
809 {
810 	struct bnxt_child_vf_info *vf_info = bp->pf->vf_info;
811 	int i;
812 
813 	if (vf_info)
814 		bnxt_free_vf_info(bp);
815 
816 	vf_info = rte_zmalloc("bnxt_vf_info", sizeof(*vf_info) * max_vfs, 0);
817 	if (vf_info == NULL) {
818 		PMD_DRV_LOG(ERR, "Failed to alloc vf info\n");
819 		return -ENOMEM;
820 	}
821 
822 	bp->pf->max_vfs = max_vfs;
823 	for (i = 0; i < max_vfs; i++) {
824 		vf_info[i].fid = bp->pf->first_vf_id + i;
825 		vf_info[i].vlan_table = rte_zmalloc("VF VLAN table",
826 						    getpagesize(), getpagesize());
827 		if (vf_info[i].vlan_table == NULL) {
828 			PMD_DRV_LOG(ERR, "Failed to alloc VLAN table for VF %d\n", i);
829 			goto err;
830 		}
831 		rte_mem_lock_page(vf_info[i].vlan_table);
832 
833 		vf_info[i].vlan_as_table = rte_zmalloc("VF VLAN AS table",
834 						       getpagesize(), getpagesize());
835 		if (vf_info[i].vlan_as_table == NULL) {
836 			PMD_DRV_LOG(ERR, "Failed to alloc VLAN AS table for VF %d\n", i);
837 			goto err;
838 		}
839 		rte_mem_lock_page(vf_info[i].vlan_as_table);
840 
841 		STAILQ_INIT(&vf_info[i].filter);
842 	}
843 
844 	bp->pf->vf_info = vf_info;
845 
846 	return 0;
847 err:
848 	bnxt_free_vf_info(bp);
849 	return -ENOMEM;
850 }
851 
852 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
853 {
854 	int rc = 0;
855 	struct hwrm_func_qcaps_input req = {.req_type = 0 };
856 	struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
857 	uint16_t new_max_vfs;
858 	uint32_t flags;
859 
860 	HWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB);
861 
862 	req.fid = rte_cpu_to_le_16(0xffff);
863 
864 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
865 
866 	HWRM_CHECK_RESULT();
867 
868 	bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
869 	flags = rte_le_to_cpu_32(resp->flags);
870 	if (BNXT_PF(bp)) {
871 		bp->pf->port_id = resp->port_id;
872 		bp->pf->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
873 		bp->pf->total_vfs = rte_le_to_cpu_16(resp->max_vfs);
874 		new_max_vfs = bp->pdev->max_vfs;
875 		if (new_max_vfs != bp->pf->max_vfs) {
876 			rc = bnxt_alloc_vf_info(bp, new_max_vfs);
877 			if (rc)
878 				goto unlock;
879 		}
880 	}
881 
882 	bp->fw_fid = rte_le_to_cpu_32(resp->fid);
883 	if (!bnxt_check_zero_bytes(resp->mac_address, RTE_ETHER_ADDR_LEN)) {
884 		bp->flags |= BNXT_FLAG_DFLT_MAC_SET;
885 		memcpy(bp->mac_addr, &resp->mac_address, RTE_ETHER_ADDR_LEN);
886 	} else {
887 		bp->flags &= ~BNXT_FLAG_DFLT_MAC_SET;
888 	}
889 	bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
890 	bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
891 	bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
892 	bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
893 	bp->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
894 	bp->max_rx_em_flows = rte_le_to_cpu_16(resp->max_rx_em_flows);
895 	bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
896 	if (!BNXT_CHIP_P5(bp) && !bp->pdev->max_vfs)
897 		bp->max_l2_ctx += bp->max_rx_em_flows;
898 	/* TODO: For now, do not support VMDq/RFS on VFs. */
899 	if (BNXT_PF(bp)) {
900 		if (bp->pf->max_vfs)
901 			bp->max_vnics = 1;
902 		else
903 			bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
904 	} else {
905 		bp->max_vnics = 1;
906 	}
907 	PMD_DRV_LOG(DEBUG, "Max l2_cntxts is %d vnics is %d\n",
908 		    bp->max_l2_ctx, bp->max_vnics);
909 	bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
910 	if (BNXT_PF(bp)) {
911 		bp->pf->total_vnics = rte_le_to_cpu_16(resp->max_vnics);
912 		if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
913 			bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
914 			PMD_DRV_LOG(DEBUG, "PTP SUPPORTED\n");
915 			HWRM_UNLOCK();
916 			bnxt_hwrm_ptp_qcfg(bp);
917 		}
918 	}
919 
920 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_STATS_SUPPORTED)
921 		bp->flags |= BNXT_FLAG_EXT_STATS_SUPPORTED;
922 
923 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERROR_RECOVERY_CAPABLE) {
924 		bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
925 		PMD_DRV_LOG(DEBUG, "Adapter Error recovery SUPPORTED\n");
926 	}
927 
928 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERR_RECOVER_RELOAD)
929 		bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
930 
931 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_HOT_RESET_CAPABLE)
932 		bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
933 
934 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_LINK_ADMIN_STATUS_SUPPORTED)
935 		bp->fw_cap |= BNXT_FW_CAP_LINK_ADMIN;
936 
937 unlock:
938 	HWRM_UNLOCK();
939 
940 	return rc;
941 }
942 
943 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
944 {
945 	int rc;
946 
947 	rc = __bnxt_hwrm_func_qcaps(bp);
948 	if (rc == -ENOMEM)
949 		return rc;
950 
951 	if (!rc && bp->hwrm_spec_code >= HWRM_SPEC_CODE_1_8_3) {
952 		rc = bnxt_alloc_ctx_mem(bp);
953 		if (rc)
954 			return rc;
955 
956 		/* On older FW,
957 		 * bnxt_hwrm_func_resc_qcaps can fail and cause init failure.
958 		 * But the error can be ignored. Return success.
959 		 */
960 		rc = bnxt_hwrm_func_resc_qcaps(bp);
961 		if (!rc)
962 			bp->flags |= BNXT_FLAG_NEW_RM;
963 	}
964 
965 	return 0;
966 }
967 
968 /* VNIC cap covers capability of all VNICs. So no need to pass vnic_id */
969 int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
970 {
971 	int rc = 0;
972 	uint32_t flags;
973 	struct hwrm_vnic_qcaps_input req = {.req_type = 0 };
974 	struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
975 
976 	HWRM_PREP(&req, HWRM_VNIC_QCAPS, BNXT_USE_CHIMP_MB);
977 
978 	req.target_id = rte_cpu_to_le_16(0xffff);
979 
980 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
981 
982 	HWRM_CHECK_RESULT();
983 
984 	flags = rte_le_to_cpu_32(resp->flags);
985 
986 	if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_COS_ASSIGNMENT_CAP) {
987 		bp->vnic_cap_flags |= BNXT_VNIC_CAP_COS_CLASSIFY;
988 		PMD_DRV_LOG(INFO, "CoS assignment capability enabled\n");
989 	}
990 
991 	if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_OUTERMOST_RSS_CAP)
992 		bp->vnic_cap_flags |= BNXT_VNIC_CAP_OUTER_RSS;
993 
994 	if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RX_CMPL_V2_CAP)
995 		bp->vnic_cap_flags |= BNXT_VNIC_CAP_RX_CMPL_V2;
996 
997 	bp->max_tpa_v2 = rte_le_to_cpu_16(resp->max_aggs_supported);
998 
999 	HWRM_UNLOCK();
1000 
1001 	return rc;
1002 }
1003 
1004 int bnxt_hwrm_func_reset(struct bnxt *bp)
1005 {
1006 	int rc = 0;
1007 	struct hwrm_func_reset_input req = {.req_type = 0 };
1008 	struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
1009 
1010 	HWRM_PREP(&req, HWRM_FUNC_RESET, BNXT_USE_CHIMP_MB);
1011 
1012 	req.enables = rte_cpu_to_le_32(0);
1013 
1014 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1015 
1016 	HWRM_CHECK_RESULT();
1017 	HWRM_UNLOCK();
1018 
1019 	return rc;
1020 }
1021 
1022 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
1023 {
1024 	int rc;
1025 	uint32_t flags = 0;
1026 	struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
1027 	struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
1028 
1029 	if (bp->flags & BNXT_FLAG_REGISTERED)
1030 		return 0;
1031 
1032 	if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
1033 		flags = HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_HOT_RESET_SUPPORT;
1034 	if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
1035 		flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_ERROR_RECOVERY_SUPPORT;
1036 
1037 	/* PFs and trusted VFs should indicate the support of the
1038 	 * Master capability on non Stingray platform
1039 	 */
1040 	if ((BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) && !BNXT_STINGRAY(bp))
1041 		flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_MASTER_SUPPORT;
1042 
1043 	HWRM_PREP(&req, HWRM_FUNC_DRV_RGTR, BNXT_USE_CHIMP_MB);
1044 	req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
1045 			HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
1046 	req.ver_maj = RTE_VER_YEAR;
1047 	req.ver_min = RTE_VER_MONTH;
1048 	req.ver_upd = RTE_VER_MINOR;
1049 
1050 	if (BNXT_PF(bp)) {
1051 		req.enables |= rte_cpu_to_le_32(
1052 			HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD);
1053 		memcpy(req.vf_req_fwd, bp->pf->vf_req_fwd,
1054 		       RTE_MIN(sizeof(req.vf_req_fwd),
1055 			       sizeof(bp->pf->vf_req_fwd)));
1056 	}
1057 
1058 	req.flags = rte_cpu_to_le_32(flags);
1059 
1060 	req.async_event_fwd[0] |=
1061 		rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
1062 				 ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
1063 				 ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE |
1064 				 ASYNC_CMPL_EVENT_ID_LINK_SPEED_CHANGE |
1065 				 ASYNC_CMPL_EVENT_ID_RESET_NOTIFY);
1066 	if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
1067 		req.async_event_fwd[0] |=
1068 			rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_ERROR_RECOVERY);
1069 	req.async_event_fwd[1] |=
1070 		rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
1071 				 ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
1072 	if (BNXT_PF(bp))
1073 		req.async_event_fwd[1] |=
1074 			rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_DBG_NOTIFICATION);
1075 
1076 	if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))
1077 		req.async_event_fwd[1] |=
1078 		rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE);
1079 
1080 	req.async_event_fwd[2] |=
1081 		rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_ECHO_REQUEST);
1082 
1083 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1084 
1085 	HWRM_CHECK_RESULT();
1086 
1087 	flags = rte_le_to_cpu_32(resp->flags);
1088 	if (flags & HWRM_FUNC_DRV_RGTR_OUTPUT_FLAGS_IF_CHANGE_SUPPORTED)
1089 		bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
1090 
1091 	HWRM_UNLOCK();
1092 
1093 	bp->flags |= BNXT_FLAG_REGISTERED;
1094 
1095 	return rc;
1096 }
1097 
1098 int bnxt_hwrm_check_vf_rings(struct bnxt *bp)
1099 {
1100 	if (!(BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)))
1101 		return 0;
1102 
1103 	return bnxt_hwrm_func_reserve_vf_resc(bp, true);
1104 }
1105 
1106 int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test)
1107 {
1108 	int rc;
1109 	uint32_t flags = 0;
1110 	uint32_t enables;
1111 	struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1112 	struct hwrm_func_vf_cfg_input req = {0};
1113 
1114 	HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
1115 
1116 	enables = HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS  |
1117 		  HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS   |
1118 		  HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS  |
1119 		  HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
1120 		  HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS;
1121 
1122 	if (BNXT_HAS_RING_GRPS(bp)) {
1123 		enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;
1124 		req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings);
1125 	}
1126 
1127 	req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings);
1128 	req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings *
1129 					    AGG_RING_MULTIPLIER);
1130 	req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings);
1131 	req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings +
1132 					      bp->tx_nr_rings +
1133 					      BNXT_NUM_ASYNC_CPR(bp));
1134 	req.num_vnics = rte_cpu_to_le_16(bp->rx_nr_rings);
1135 	if (bp->vf_resv_strategy ==
1136 	    HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
1137 		enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS |
1138 			   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS |
1139 			   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
1140 		req.num_rsscos_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_RSS_CTX);
1141 		req.num_l2_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_L2_CTX);
1142 		req.num_vnics = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_VNIC);
1143 	} else if (bp->vf_resv_strategy ==
1144 		   HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MAXIMAL) {
1145 		enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
1146 		req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
1147 	}
1148 
1149 	if (test)
1150 		flags = HWRM_FUNC_VF_CFG_INPUT_FLAGS_TX_ASSETS_TEST |
1151 			HWRM_FUNC_VF_CFG_INPUT_FLAGS_RX_ASSETS_TEST |
1152 			HWRM_FUNC_VF_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST |
1153 			HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST |
1154 			HWRM_FUNC_VF_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST |
1155 			HWRM_FUNC_VF_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST;
1156 
1157 	if (test && BNXT_HAS_RING_GRPS(bp))
1158 		flags |= HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST;
1159 
1160 	req.flags = rte_cpu_to_le_32(flags);
1161 	req.enables |= rte_cpu_to_le_32(enables);
1162 
1163 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1164 
1165 	if (test)
1166 		HWRM_CHECK_RESULT_SILENT();
1167 	else
1168 		HWRM_CHECK_RESULT();
1169 
1170 	HWRM_UNLOCK();
1171 	return rc;
1172 }
1173 
1174 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
1175 {
1176 	int rc;
1177 	struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
1178 	struct hwrm_func_resource_qcaps_input req = {0};
1179 
1180 	HWRM_PREP(&req, HWRM_FUNC_RESOURCE_QCAPS, BNXT_USE_CHIMP_MB);
1181 	req.fid = rte_cpu_to_le_16(0xffff);
1182 
1183 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1184 
1185 	HWRM_CHECK_RESULT_SILENT();
1186 
1187 	bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
1188 	bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
1189 	bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
1190 	bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
1191 	bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
1192 	/* func_resource_qcaps does not return max_rx_em_flows.
1193 	 * So use the value provided by func_qcaps.
1194 	 */
1195 	bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
1196 	if (!BNXT_CHIP_P5(bp) && !bp->pdev->max_vfs)
1197 		bp->max_l2_ctx += bp->max_rx_em_flows;
1198 	bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
1199 	bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
1200 	bp->max_nq_rings = rte_le_to_cpu_16(resp->max_msix);
1201 	bp->vf_resv_strategy = rte_le_to_cpu_16(resp->vf_reservation_strategy);
1202 	if (bp->vf_resv_strategy >
1203 	    HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC)
1204 		bp->vf_resv_strategy =
1205 		HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MAXIMAL;
1206 
1207 	HWRM_UNLOCK();
1208 	return rc;
1209 }
1210 
1211 int bnxt_hwrm_ver_get(struct bnxt *bp, uint32_t timeout)
1212 {
1213 	int rc = 0;
1214 	struct hwrm_ver_get_input req = {.req_type = 0 };
1215 	struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
1216 	uint32_t fw_version;
1217 	uint16_t max_resp_len;
1218 	char type[RTE_MEMZONE_NAMESIZE];
1219 	uint32_t dev_caps_cfg;
1220 
1221 	bp->max_req_len = HWRM_MAX_REQ_LEN;
1222 	bp->hwrm_cmd_timeout = timeout;
1223 	HWRM_PREP(&req, HWRM_VER_GET, BNXT_USE_CHIMP_MB);
1224 
1225 	req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
1226 	req.hwrm_intf_min = HWRM_VERSION_MINOR;
1227 	req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
1228 
1229 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1230 
1231 	if (bp->flags & BNXT_FLAG_FW_RESET)
1232 		HWRM_CHECK_RESULT_SILENT();
1233 	else
1234 		HWRM_CHECK_RESULT();
1235 
1236 	if (resp->flags & HWRM_VER_GET_OUTPUT_FLAGS_DEV_NOT_RDY) {
1237 		rc = -EAGAIN;
1238 		goto error;
1239 	}
1240 
1241 	PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d.%d\n",
1242 		resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
1243 		resp->hwrm_intf_upd_8b, resp->hwrm_fw_maj_8b,
1244 		resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b,
1245 		resp->hwrm_fw_rsvd_8b);
1246 	bp->fw_ver = (resp->hwrm_fw_maj_8b << 24) |
1247 		     (resp->hwrm_fw_min_8b << 16) |
1248 		     (resp->hwrm_fw_bld_8b << 8) |
1249 		     resp->hwrm_fw_rsvd_8b;
1250 	PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
1251 		HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
1252 
1253 	fw_version = resp->hwrm_intf_maj_8b << 16;
1254 	fw_version |= resp->hwrm_intf_min_8b << 8;
1255 	fw_version |= resp->hwrm_intf_upd_8b;
1256 	bp->hwrm_spec_code = fw_version;
1257 
1258 	/* def_req_timeout value is in milliseconds */
1259 	bp->hwrm_cmd_timeout = rte_le_to_cpu_16(resp->def_req_timeout);
1260 	/* convert timeout to usec */
1261 	bp->hwrm_cmd_timeout *= 1000;
1262 	if (!bp->hwrm_cmd_timeout)
1263 		bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
1264 
1265 	if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) {
1266 		PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
1267 		rc = -EINVAL;
1268 		goto error;
1269 	}
1270 
1271 	if (bp->max_req_len > resp->max_req_win_len) {
1272 		PMD_DRV_LOG(ERR, "Unsupported request length\n");
1273 		rc = -EINVAL;
1274 		goto error;
1275 	}
1276 
1277 	bp->chip_num = rte_le_to_cpu_16(resp->chip_num);
1278 
1279 	bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
1280 	bp->hwrm_max_ext_req_len = rte_le_to_cpu_16(resp->max_ext_req_len);
1281 	if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
1282 		bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
1283 
1284 	max_resp_len = rte_le_to_cpu_16(resp->max_resp_len);
1285 	dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
1286 
1287 	RTE_VERIFY(max_resp_len <= bp->max_resp_len);
1288 	bp->max_resp_len = max_resp_len;
1289 
1290 	if ((dev_caps_cfg &
1291 		HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
1292 	    (dev_caps_cfg &
1293 	     HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) {
1294 		PMD_DRV_LOG(DEBUG, "Short command supported\n");
1295 		bp->flags |= BNXT_FLAG_SHORT_CMD;
1296 	}
1297 
1298 	if (((dev_caps_cfg &
1299 	      HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
1300 	     (dev_caps_cfg &
1301 	      HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) ||
1302 	    bp->hwrm_max_ext_req_len > HWRM_MAX_REQ_LEN) {
1303 		sprintf(type, "bnxt_hwrm_short_" PCI_PRI_FMT,
1304 			bp->pdev->addr.domain, bp->pdev->addr.bus,
1305 			bp->pdev->addr.devid, bp->pdev->addr.function);
1306 
1307 		rte_free(bp->hwrm_short_cmd_req_addr);
1308 
1309 		bp->hwrm_short_cmd_req_addr =
1310 				rte_malloc(type, bp->hwrm_max_ext_req_len, 0);
1311 		if (bp->hwrm_short_cmd_req_addr == NULL) {
1312 			rc = -ENOMEM;
1313 			goto error;
1314 		}
1315 		bp->hwrm_short_cmd_req_dma_addr =
1316 			rte_malloc_virt2iova(bp->hwrm_short_cmd_req_addr);
1317 		if (bp->hwrm_short_cmd_req_dma_addr == RTE_BAD_IOVA) {
1318 			rte_free(bp->hwrm_short_cmd_req_addr);
1319 			PMD_DRV_LOG(ERR,
1320 				"Unable to map buffer to physical memory.\n");
1321 			rc = -ENOMEM;
1322 			goto error;
1323 		}
1324 	}
1325 	if (dev_caps_cfg &
1326 	    HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) {
1327 		bp->flags |= BNXT_FLAG_KONG_MB_EN;
1328 		PMD_DRV_LOG(DEBUG, "Kong mailbox channel enabled\n");
1329 	}
1330 	if (dev_caps_cfg &
1331 	    HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
1332 		PMD_DRV_LOG(DEBUG, "FW supports Trusted VFs\n");
1333 	if (dev_caps_cfg &
1334 	    HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED) {
1335 		bp->fw_cap |= BNXT_FW_CAP_ADV_FLOW_MGMT;
1336 		PMD_DRV_LOG(DEBUG, "FW supports advanced flow management\n");
1337 	}
1338 
1339 	if (dev_caps_cfg &
1340 	    HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_ADV_FLOW_COUNTERS_SUPPORTED) {
1341 		PMD_DRV_LOG(DEBUG, "FW supports advanced flow counters\n");
1342 		bp->fw_cap |= BNXT_FW_CAP_ADV_FLOW_COUNTERS;
1343 	}
1344 
1345 error:
1346 	HWRM_UNLOCK();
1347 	return rc;
1348 }
1349 
1350 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
1351 {
1352 	int rc;
1353 	struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
1354 	struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
1355 
1356 	if (!(bp->flags & BNXT_FLAG_REGISTERED))
1357 		return 0;
1358 
1359 	HWRM_PREP(&req, HWRM_FUNC_DRV_UNRGTR, BNXT_USE_CHIMP_MB);
1360 	req.flags = flags;
1361 
1362 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1363 
1364 	HWRM_CHECK_RESULT();
1365 	HWRM_UNLOCK();
1366 
1367 	return rc;
1368 }
1369 
1370 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
1371 {
1372 	int rc = 0;
1373 	struct hwrm_port_phy_cfg_input req = {0};
1374 	struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1375 	uint32_t enables = 0;
1376 
1377 	HWRM_PREP(&req, HWRM_PORT_PHY_CFG, BNXT_USE_CHIMP_MB);
1378 
1379 	if (conf->link_up) {
1380 		/* Setting Fixed Speed. But AutoNeg is ON, So disable it */
1381 		if (bp->link_info->auto_mode && conf->link_speed) {
1382 			req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
1383 			PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
1384 		}
1385 
1386 		req.flags = rte_cpu_to_le_32(conf->phy_flags);
1387 		/*
1388 		 * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
1389 		 * any auto mode, even "none".
1390 		 */
1391 		if (!conf->link_speed) {
1392 			/* No speeds specified. Enable AutoNeg - all speeds */
1393 			enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
1394 			req.auto_mode =
1395 				HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
1396 		} else {
1397 			if (bp->link_info->link_signal_mode) {
1398 				enables |=
1399 				HWRM_PORT_PHY_CFG_IN_EN_FORCE_PAM4_LINK_SPEED;
1400 				req.force_pam4_link_speed =
1401 					rte_cpu_to_le_16(conf->link_speed);
1402 			} else {
1403 				req.force_link_speed =
1404 					rte_cpu_to_le_16(conf->link_speed);
1405 			}
1406 		}
1407 		/* AutoNeg - Advertise speeds specified. */
1408 		if (conf->auto_link_speed_mask &&
1409 		    !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
1410 			req.auto_mode =
1411 				HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
1412 			req.auto_link_speed_mask =
1413 				conf->auto_link_speed_mask;
1414 			if (conf->auto_pam4_link_speeds) {
1415 				enables |=
1416 				HWRM_PORT_PHY_CFG_IN_EN_AUTO_PAM4_LINK_SPD_MASK;
1417 				req.auto_link_pam4_speed_mask =
1418 					conf->auto_pam4_link_speeds;
1419 			} else {
1420 				enables |=
1421 				HWRM_PORT_PHY_CFG_IN_EN_AUTO_LINK_SPEED_MASK;
1422 			}
1423 		}
1424 		if (conf->auto_link_speed &&
1425 		!(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE))
1426 			enables |=
1427 				HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
1428 
1429 		req.auto_duplex = conf->duplex;
1430 		enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
1431 		req.auto_pause = conf->auto_pause;
1432 		req.force_pause = conf->force_pause;
1433 		/* Set force_pause if there is no auto or if there is a force */
1434 		if (req.auto_pause && !req.force_pause)
1435 			enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
1436 		else
1437 			enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
1438 
1439 		req.enables = rte_cpu_to_le_32(enables);
1440 	} else {
1441 		req.flags =
1442 		rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
1443 		PMD_DRV_LOG(INFO, "Force Link Down\n");
1444 	}
1445 
1446 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1447 
1448 	HWRM_CHECK_RESULT();
1449 	HWRM_UNLOCK();
1450 
1451 	PMD_DRV_LOG(DEBUG, "Port %u: Unregistered with fw\n",
1452 		    bp->eth_dev->data->port_id);
1453 	return rc;
1454 }
1455 
1456 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
1457 				   struct bnxt_link_info *link_info)
1458 {
1459 	int rc = 0;
1460 	struct hwrm_port_phy_qcfg_input req = {0};
1461 	struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1462 
1463 	HWRM_PREP(&req, HWRM_PORT_PHY_QCFG, BNXT_USE_CHIMP_MB);
1464 
1465 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1466 
1467 	HWRM_CHECK_RESULT();
1468 
1469 	link_info->phy_link_status = resp->link;
1470 	link_info->link_up =
1471 		(link_info->phy_link_status ==
1472 		 HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
1473 	link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
1474 	link_info->duplex = resp->duplex_cfg;
1475 	link_info->pause = resp->pause;
1476 	link_info->auto_pause = resp->auto_pause;
1477 	link_info->force_pause = resp->force_pause;
1478 	link_info->auto_mode = resp->auto_mode;
1479 	link_info->phy_type = resp->phy_type;
1480 	link_info->media_type = resp->media_type;
1481 
1482 	link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
1483 	link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
1484 	link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
1485 	link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
1486 	link_info->phy_ver[0] = resp->phy_maj;
1487 	link_info->phy_ver[1] = resp->phy_min;
1488 	link_info->phy_ver[2] = resp->phy_bld;
1489 	link_info->link_signal_mode =
1490 		rte_le_to_cpu_16(resp->active_fec_signal_mode);
1491 	link_info->force_pam4_link_speed =
1492 			rte_le_to_cpu_16(resp->force_pam4_link_speed);
1493 	link_info->support_pam4_speeds =
1494 			rte_le_to_cpu_16(resp->support_pam4_speeds);
1495 	link_info->auto_pam4_link_speeds =
1496 			rte_le_to_cpu_16(resp->auto_pam4_link_speed_mask);
1497 	HWRM_UNLOCK();
1498 
1499 	PMD_DRV_LOG(DEBUG, "Link Speed:%d,Auto:%d:%x:%x,Support:%x,Force:%x\n",
1500 		    link_info->link_speed, link_info->auto_mode,
1501 		    link_info->auto_link_speed, link_info->auto_link_speed_mask,
1502 		    link_info->support_speeds, link_info->force_link_speed);
1503 	PMD_DRV_LOG(DEBUG, "Link Signal:%d,PAM::Auto:%x,Support:%x,Force:%x\n",
1504 		    link_info->link_signal_mode,
1505 		    link_info->auto_pam4_link_speeds,
1506 		    link_info->support_pam4_speeds,
1507 		    link_info->force_pam4_link_speed);
1508 	return rc;
1509 }
1510 
1511 int bnxt_hwrm_port_phy_qcaps(struct bnxt *bp)
1512 {
1513 	int rc = 0;
1514 	struct hwrm_port_phy_qcaps_input req = {0};
1515 	struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
1516 	struct bnxt_link_info *link_info = bp->link_info;
1517 
1518 	if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
1519 		return 0;
1520 
1521 	HWRM_PREP(&req, HWRM_PORT_PHY_QCAPS, BNXT_USE_CHIMP_MB);
1522 
1523 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1524 
1525 	HWRM_CHECK_RESULT_SILENT();
1526 
1527 	bp->port_cnt = resp->port_cnt;
1528 	if (resp->supported_speeds_auto_mode)
1529 		link_info->support_auto_speeds =
1530 			rte_le_to_cpu_16(resp->supported_speeds_auto_mode);
1531 	if (resp->supported_pam4_speeds_auto_mode)
1532 		link_info->support_pam4_auto_speeds =
1533 			rte_le_to_cpu_16(resp->supported_pam4_speeds_auto_mode);
1534 
1535 	HWRM_UNLOCK();
1536 
1537 	return 0;
1538 }
1539 
1540 static bool bnxt_find_lossy_profile(struct bnxt *bp)
1541 {
1542 	int i = 0;
1543 
1544 	for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) {
1545 		if (bp->tx_cos_queue[i].profile ==
1546 		    HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
1547 			bp->tx_cosq_id[0] = bp->tx_cos_queue[i].id;
1548 			return true;
1549 		}
1550 	}
1551 	return false;
1552 }
1553 
1554 static void bnxt_find_first_valid_profile(struct bnxt *bp)
1555 {
1556 	int i = 0;
1557 
1558 	for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) {
1559 		if (bp->tx_cos_queue[i].profile !=
1560 		    HWRM_QUEUE_SERVICE_PROFILE_UNKNOWN &&
1561 		    bp->tx_cos_queue[i].id !=
1562 		    HWRM_QUEUE_SERVICE_PROFILE_UNKNOWN) {
1563 			bp->tx_cosq_id[0] = bp->tx_cos_queue[i].id;
1564 			break;
1565 		}
1566 	}
1567 }
1568 
1569 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
1570 {
1571 	int rc = 0;
1572 	struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
1573 	struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
1574 	uint32_t dir = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
1575 	int i;
1576 
1577 get_rx_info:
1578 	HWRM_PREP(&req, HWRM_QUEUE_QPORTCFG, BNXT_USE_CHIMP_MB);
1579 
1580 	req.flags = rte_cpu_to_le_32(dir);
1581 	/* HWRM Version >= 1.9.1 only if COS Classification is not required. */
1582 	if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1 &&
1583 	    !(bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY))
1584 		req.drv_qmap_cap =
1585 			HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED;
1586 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1587 
1588 	HWRM_CHECK_RESULT();
1589 
1590 	if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX) {
1591 		GET_TX_QUEUE_INFO(0);
1592 		GET_TX_QUEUE_INFO(1);
1593 		GET_TX_QUEUE_INFO(2);
1594 		GET_TX_QUEUE_INFO(3);
1595 		GET_TX_QUEUE_INFO(4);
1596 		GET_TX_QUEUE_INFO(5);
1597 		GET_TX_QUEUE_INFO(6);
1598 		GET_TX_QUEUE_INFO(7);
1599 	} else  {
1600 		GET_RX_QUEUE_INFO(0);
1601 		GET_RX_QUEUE_INFO(1);
1602 		GET_RX_QUEUE_INFO(2);
1603 		GET_RX_QUEUE_INFO(3);
1604 		GET_RX_QUEUE_INFO(4);
1605 		GET_RX_QUEUE_INFO(5);
1606 		GET_RX_QUEUE_INFO(6);
1607 		GET_RX_QUEUE_INFO(7);
1608 	}
1609 
1610 	HWRM_UNLOCK();
1611 
1612 	if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX)
1613 		goto done;
1614 
1615 	if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) {
1616 		bp->tx_cosq_id[0] = bp->tx_cos_queue[0].id;
1617 	} else {
1618 		int j;
1619 
1620 		/* iterate and find the COSq profile to use for Tx */
1621 		if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY) {
1622 			for (j = 0, i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
1623 				if (bp->tx_cos_queue[i].id != 0xff)
1624 					bp->tx_cosq_id[j++] =
1625 						bp->tx_cos_queue[i].id;
1626 			}
1627 		} else {
1628 			/* When CoS classification is disabled, for normal NIC
1629 			 * operations, ideally we should look to use LOSSY.
1630 			 * If not found, fallback to the first valid profile
1631 			 */
1632 			if (!bnxt_find_lossy_profile(bp))
1633 				bnxt_find_first_valid_profile(bp);
1634 
1635 		}
1636 	}
1637 
1638 	bp->max_tc = resp->max_configurable_queues;
1639 	bp->max_lltc = resp->max_configurable_lossless_queues;
1640 	if (bp->max_tc > BNXT_MAX_QUEUE)
1641 		bp->max_tc = BNXT_MAX_QUEUE;
1642 	bp->max_q = bp->max_tc;
1643 
1644 	if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX) {
1645 		dir = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX;
1646 		goto get_rx_info;
1647 	}
1648 
1649 done:
1650 	return rc;
1651 }
1652 
1653 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
1654 			 struct bnxt_ring *ring,
1655 			 uint32_t ring_type, uint32_t map_index,
1656 			 uint32_t stats_ctx_id, uint32_t cmpl_ring_id,
1657 			 uint16_t tx_cosq_id)
1658 {
1659 	int rc = 0;
1660 	uint32_t enables = 0;
1661 	struct hwrm_ring_alloc_input req = {.req_type = 0 };
1662 	struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1663 	struct rte_mempool *mb_pool;
1664 	uint16_t rx_buf_size;
1665 
1666 	HWRM_PREP(&req, HWRM_RING_ALLOC, BNXT_USE_CHIMP_MB);
1667 
1668 	req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
1669 	req.fbo = rte_cpu_to_le_32(0);
1670 	/* Association of ring index with doorbell index */
1671 	req.logical_id = rte_cpu_to_le_16(map_index);
1672 	req.length = rte_cpu_to_le_32(ring->ring_size);
1673 
1674 	switch (ring_type) {
1675 	case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1676 		req.ring_type = ring_type;
1677 		req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1678 		req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1679 		req.queue_id = rte_cpu_to_le_16(tx_cosq_id);
1680 		if (stats_ctx_id != INVALID_STATS_CTX_ID)
1681 			enables |=
1682 			HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1683 		break;
1684 	case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1685 		req.ring_type = ring_type;
1686 		req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1687 		req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1688 		if (BNXT_CHIP_P5(bp)) {
1689 			mb_pool = bp->rx_queues[0]->mb_pool;
1690 			rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
1691 				      RTE_PKTMBUF_HEADROOM;
1692 			rx_buf_size = RTE_MIN(BNXT_MAX_PKT_LEN, rx_buf_size);
1693 			req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);
1694 			enables |=
1695 				HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID;
1696 		}
1697 		if (stats_ctx_id != INVALID_STATS_CTX_ID)
1698 			enables |=
1699 				HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1700 		break;
1701 	case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1702 		req.ring_type = ring_type;
1703 		if (BNXT_HAS_NQ(bp)) {
1704 			/* Association of cp ring with nq */
1705 			req.nq_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1706 			enables |=
1707 				HWRM_RING_ALLOC_INPUT_ENABLES_NQ_RING_ID_VALID;
1708 		}
1709 		req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1710 		break;
1711 	case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
1712 		req.ring_type = ring_type;
1713 		req.page_size = BNXT_PAGE_SHFT;
1714 		req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1715 		break;
1716 	case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
1717 		req.ring_type = ring_type;
1718 		req.rx_ring_id = rte_cpu_to_le_16(ring->fw_rx_ring_id);
1719 
1720 		mb_pool = bp->rx_queues[0]->mb_pool;
1721 		rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
1722 			      RTE_PKTMBUF_HEADROOM;
1723 		rx_buf_size = RTE_MIN(BNXT_MAX_PKT_LEN, rx_buf_size);
1724 		req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);
1725 
1726 		req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1727 		enables |= HWRM_RING_ALLOC_INPUT_ENABLES_RX_RING_ID_VALID |
1728 			   HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID |
1729 			   HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1730 		break;
1731 	default:
1732 		PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
1733 			ring_type);
1734 		HWRM_UNLOCK();
1735 		return -EINVAL;
1736 	}
1737 	req.enables = rte_cpu_to_le_32(enables);
1738 
1739 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1740 
1741 	if (rc || resp->error_code) {
1742 		if (rc == 0 && resp->error_code)
1743 			rc = rte_le_to_cpu_16(resp->error_code);
1744 		switch (ring_type) {
1745 		case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1746 			PMD_DRV_LOG(ERR,
1747 				"hwrm_ring_alloc cp failed. rc:%d\n", rc);
1748 			HWRM_UNLOCK();
1749 			return rc;
1750 		case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1751 			PMD_DRV_LOG(ERR,
1752 				    "hwrm_ring_alloc rx failed. rc:%d\n", rc);
1753 			HWRM_UNLOCK();
1754 			return rc;
1755 		case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
1756 			PMD_DRV_LOG(ERR,
1757 				    "hwrm_ring_alloc rx agg failed. rc:%d\n",
1758 				    rc);
1759 			HWRM_UNLOCK();
1760 			return rc;
1761 		case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1762 			PMD_DRV_LOG(ERR,
1763 				    "hwrm_ring_alloc tx failed. rc:%d\n", rc);
1764 			HWRM_UNLOCK();
1765 			return rc;
1766 		case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
1767 			PMD_DRV_LOG(ERR,
1768 				    "hwrm_ring_alloc nq failed. rc:%d\n", rc);
1769 			HWRM_UNLOCK();
1770 			return rc;
1771 		default:
1772 			PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
1773 			HWRM_UNLOCK();
1774 			return rc;
1775 		}
1776 	}
1777 
1778 	ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
1779 	HWRM_UNLOCK();
1780 	return rc;
1781 }
1782 
1783 int bnxt_hwrm_ring_free(struct bnxt *bp,
1784 			struct bnxt_ring *ring, uint32_t ring_type,
1785 			uint16_t cp_ring_id)
1786 {
1787 	int rc;
1788 	struct hwrm_ring_free_input req = {.req_type = 0 };
1789 	struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
1790 
1791 	HWRM_PREP(&req, HWRM_RING_FREE, BNXT_USE_CHIMP_MB);
1792 
1793 	req.ring_type = ring_type;
1794 	req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
1795 	req.cmpl_ring = rte_cpu_to_le_16(cp_ring_id);
1796 
1797 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1798 
1799 	if (rc || resp->error_code) {
1800 		if (rc == 0 && resp->error_code)
1801 			rc = rte_le_to_cpu_16(resp->error_code);
1802 		HWRM_UNLOCK();
1803 
1804 		switch (ring_type) {
1805 		case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1806 			PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
1807 				rc);
1808 			return rc;
1809 		case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1810 			PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
1811 				rc);
1812 			return rc;
1813 		case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1814 			PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
1815 				rc);
1816 			return rc;
1817 		case HWRM_RING_FREE_INPUT_RING_TYPE_NQ:
1818 			PMD_DRV_LOG(ERR,
1819 				    "hwrm_ring_free nq failed. rc:%d\n", rc);
1820 			return rc;
1821 		case HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG:
1822 			PMD_DRV_LOG(ERR,
1823 				    "hwrm_ring_free agg failed. rc:%d\n", rc);
1824 			return rc;
1825 		default:
1826 			PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
1827 			return rc;
1828 		}
1829 	}
1830 	HWRM_UNLOCK();
1831 	return 0;
1832 }
1833 
1834 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
1835 {
1836 	int rc = 0;
1837 	struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
1838 	struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1839 
1840 	HWRM_PREP(&req, HWRM_RING_GRP_ALLOC, BNXT_USE_CHIMP_MB);
1841 
1842 	req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
1843 	req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
1844 	req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
1845 	req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
1846 
1847 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1848 
1849 	HWRM_CHECK_RESULT();
1850 
1851 	bp->grp_info[idx].fw_grp_id = rte_le_to_cpu_16(resp->ring_group_id);
1852 
1853 	HWRM_UNLOCK();
1854 
1855 	return rc;
1856 }
1857 
1858 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1859 {
1860 	int rc;
1861 	struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1862 	struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1863 
1864 	HWRM_PREP(&req, HWRM_RING_GRP_FREE, BNXT_USE_CHIMP_MB);
1865 
1866 	req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1867 
1868 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1869 
1870 	HWRM_CHECK_RESULT();
1871 	HWRM_UNLOCK();
1872 
1873 	bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1874 	return rc;
1875 }
1876 
1877 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1878 {
1879 	int rc = 0;
1880 	struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1881 	struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1882 
1883 	if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
1884 		return rc;
1885 
1886 	HWRM_PREP(&req, HWRM_STAT_CTX_CLR_STATS, BNXT_USE_CHIMP_MB);
1887 
1888 	req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
1889 
1890 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1891 
1892 	HWRM_CHECK_RESULT();
1893 	HWRM_UNLOCK();
1894 
1895 	return rc;
1896 }
1897 
1898 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1899 				unsigned int idx __rte_unused)
1900 {
1901 	int rc;
1902 	struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1903 	struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1904 
1905 	HWRM_PREP(&req, HWRM_STAT_CTX_ALLOC, BNXT_USE_CHIMP_MB);
1906 
1907 	req.update_period_ms = rte_cpu_to_le_32(0);
1908 
1909 	req.stats_dma_addr = rte_cpu_to_le_64(cpr->hw_stats_map);
1910 
1911 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1912 
1913 	HWRM_CHECK_RESULT();
1914 
1915 	cpr->hw_stats_ctx_id = rte_le_to_cpu_32(resp->stat_ctx_id);
1916 
1917 	HWRM_UNLOCK();
1918 
1919 	return rc;
1920 }
1921 
1922 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1923 				unsigned int idx __rte_unused)
1924 {
1925 	int rc;
1926 	struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1927 	struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1928 
1929 	HWRM_PREP(&req, HWRM_STAT_CTX_FREE, BNXT_USE_CHIMP_MB);
1930 
1931 	req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
1932 
1933 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1934 
1935 	HWRM_CHECK_RESULT();
1936 	HWRM_UNLOCK();
1937 
1938 	return rc;
1939 }
1940 
1941 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1942 {
1943 	int rc = 0, i, j;
1944 	struct hwrm_vnic_alloc_input req = { 0 };
1945 	struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1946 
1947 	if (!BNXT_HAS_RING_GRPS(bp))
1948 		goto skip_ring_grps;
1949 
1950 	/* map ring groups to this vnic */
1951 	PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
1952 		vnic->start_grp_id, vnic->end_grp_id);
1953 	for (i = vnic->start_grp_id, j = 0; i < vnic->end_grp_id; i++, j++)
1954 		vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1955 
1956 	vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1957 	vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1958 	vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1959 	vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1960 
1961 skip_ring_grps:
1962 	vnic->mru = BNXT_VNIC_MRU(bp->eth_dev->data->mtu);
1963 	HWRM_PREP(&req, HWRM_VNIC_ALLOC, BNXT_USE_CHIMP_MB);
1964 
1965 	if (vnic->func_default)
1966 		req.flags =
1967 			rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
1968 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1969 
1970 	HWRM_CHECK_RESULT();
1971 
1972 	vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1973 	HWRM_UNLOCK();
1974 	PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1975 	return rc;
1976 }
1977 
1978 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1979 					struct bnxt_vnic_info *vnic,
1980 					struct bnxt_plcmodes_cfg *pmode)
1981 {
1982 	int rc = 0;
1983 	struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1984 	struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1985 
1986 	HWRM_PREP(&req, HWRM_VNIC_PLCMODES_QCFG, BNXT_USE_CHIMP_MB);
1987 
1988 	req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1989 
1990 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1991 
1992 	HWRM_CHECK_RESULT();
1993 
1994 	pmode->flags = rte_le_to_cpu_32(resp->flags);
1995 	/* dflt_vnic bit doesn't exist in the _cfg command */
1996 	pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1997 	pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1998 	pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1999 	pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
2000 
2001 	HWRM_UNLOCK();
2002 
2003 	return rc;
2004 }
2005 
2006 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
2007 				       struct bnxt_vnic_info *vnic,
2008 				       struct bnxt_plcmodes_cfg *pmode)
2009 {
2010 	int rc = 0;
2011 	struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
2012 	struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2013 
2014 	if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2015 		PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
2016 		return rc;
2017 	}
2018 
2019 	HWRM_PREP(&req, HWRM_VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
2020 
2021 	req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2022 	req.flags = rte_cpu_to_le_32(pmode->flags);
2023 	req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
2024 	req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
2025 	req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
2026 	req.enables = rte_cpu_to_le_32(
2027 	    HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
2028 	    HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
2029 	    HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
2030 	);
2031 
2032 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2033 
2034 	HWRM_CHECK_RESULT();
2035 	HWRM_UNLOCK();
2036 
2037 	return rc;
2038 }
2039 
2040 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2041 {
2042 	int rc = 0;
2043 	struct hwrm_vnic_cfg_input req = {.req_type = 0 };
2044 	struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2045 	struct bnxt_plcmodes_cfg pmodes = { 0 };
2046 	uint32_t ctx_enable_flag = 0;
2047 	uint32_t enables = 0;
2048 
2049 	if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2050 		PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
2051 		return rc;
2052 	}
2053 
2054 	rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
2055 	if (rc)
2056 		return rc;
2057 
2058 	HWRM_PREP(&req, HWRM_VNIC_CFG, BNXT_USE_CHIMP_MB);
2059 
2060 	if (BNXT_CHIP_P5(bp)) {
2061 		int dflt_rxq = vnic->start_grp_id;
2062 		struct bnxt_rx_ring_info *rxr;
2063 		struct bnxt_cp_ring_info *cpr;
2064 		struct bnxt_rx_queue *rxq;
2065 		int i;
2066 
2067 		/*
2068 		 * The first active receive ring is used as the VNIC
2069 		 * default receive ring. If there are no active receive
2070 		 * rings (all corresponding receive queues are stopped),
2071 		 * the first receive ring is used.
2072 		 */
2073 		for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++) {
2074 			rxq = bp->eth_dev->data->rx_queues[i];
2075 			if (rxq->rx_started) {
2076 				dflt_rxq = i;
2077 				break;
2078 			}
2079 		}
2080 
2081 		rxq = bp->eth_dev->data->rx_queues[dflt_rxq];
2082 		rxr = rxq->rx_ring;
2083 		cpr = rxq->cp_ring;
2084 
2085 		req.default_rx_ring_id =
2086 			rte_cpu_to_le_16(rxr->rx_ring_struct->fw_ring_id);
2087 		req.default_cmpl_ring_id =
2088 			rte_cpu_to_le_16(cpr->cp_ring_struct->fw_ring_id);
2089 		enables = HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_RX_RING_ID |
2090 			  HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_CMPL_RING_ID;
2091 		if (bp->vnic_cap_flags & BNXT_VNIC_CAP_RX_CMPL_V2) {
2092 			enables |= HWRM_VNIC_CFG_INPUT_ENABLES_RX_CSUM_V2_MODE;
2093 			req.rx_csum_v2_mode =
2094 				HWRM_VNIC_CFG_INPUT_RX_CSUM_V2_MODE_ALL_OK;
2095 		}
2096 		goto config_mru;
2097 	}
2098 
2099 	/* Only RSS support for now TBD: COS & LB */
2100 	enables = HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP;
2101 	if (vnic->lb_rule != 0xffff)
2102 		ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
2103 	if (vnic->cos_rule != 0xffff)
2104 		ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
2105 	if (vnic->rss_rule != (uint16_t)HWRM_NA_SIGNATURE) {
2106 		ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
2107 		ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
2108 	}
2109 	if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY) {
2110 		ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_QUEUE_ID;
2111 		req.queue_id = rte_cpu_to_le_16(vnic->cos_queue_id);
2112 	}
2113 
2114 	enables |= ctx_enable_flag;
2115 	req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
2116 	req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
2117 	req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
2118 	req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
2119 
2120 config_mru:
2121 	req.enables = rte_cpu_to_le_32(enables);
2122 	req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2123 	req.mru = rte_cpu_to_le_16(vnic->mru);
2124 	/* Configure default VNIC only once. */
2125 	if (vnic->func_default && !(bp->flags & BNXT_FLAG_DFLT_VNIC_SET)) {
2126 		req.flags |=
2127 		    rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
2128 		bp->flags |= BNXT_FLAG_DFLT_VNIC_SET;
2129 	}
2130 	if (vnic->vlan_strip)
2131 		req.flags |=
2132 		    rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
2133 	if (vnic->bd_stall)
2134 		req.flags |=
2135 		    rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
2136 	if (vnic->rss_dflt_cr)
2137 		req.flags |= rte_cpu_to_le_32(
2138 			HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
2139 
2140 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2141 
2142 	HWRM_CHECK_RESULT();
2143 	HWRM_UNLOCK();
2144 
2145 	rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
2146 
2147 	return rc;
2148 }
2149 
2150 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
2151 		int16_t fw_vf_id)
2152 {
2153 	int rc = 0;
2154 	struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
2155 	struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2156 
2157 	if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2158 		PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
2159 		return rc;
2160 	}
2161 	HWRM_PREP(&req, HWRM_VNIC_QCFG, BNXT_USE_CHIMP_MB);
2162 
2163 	req.enables =
2164 		rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
2165 	req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2166 	req.vf_id = rte_cpu_to_le_16(fw_vf_id);
2167 
2168 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2169 
2170 	HWRM_CHECK_RESULT();
2171 
2172 	vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
2173 	vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
2174 	vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
2175 	vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
2176 	vnic->mru = rte_le_to_cpu_16(resp->mru);
2177 	vnic->func_default = rte_le_to_cpu_32(
2178 			resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
2179 	vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
2180 			HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
2181 	vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
2182 			HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
2183 	vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
2184 			HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
2185 
2186 	HWRM_UNLOCK();
2187 
2188 	return rc;
2189 }
2190 
2191 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp,
2192 			     struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
2193 {
2194 	int rc = 0;
2195 	uint16_t ctx_id;
2196 	struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
2197 	struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
2198 						bp->hwrm_cmd_resp_addr;
2199 
2200 	HWRM_PREP(&req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, BNXT_USE_CHIMP_MB);
2201 
2202 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2203 	HWRM_CHECK_RESULT();
2204 
2205 	ctx_id = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
2206 	if (!BNXT_HAS_RING_GRPS(bp))
2207 		vnic->fw_grp_ids[ctx_idx] = ctx_id;
2208 	else if (ctx_idx == 0)
2209 		vnic->rss_rule = ctx_id;
2210 
2211 	HWRM_UNLOCK();
2212 
2213 	return rc;
2214 }
2215 
2216 static
2217 int _bnxt_hwrm_vnic_ctx_free(struct bnxt *bp,
2218 			     struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
2219 {
2220 	int rc = 0;
2221 	struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
2222 	struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
2223 						bp->hwrm_cmd_resp_addr;
2224 
2225 	if (ctx_idx == (uint16_t)HWRM_NA_SIGNATURE) {
2226 		PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
2227 		return rc;
2228 	}
2229 	HWRM_PREP(&req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, BNXT_USE_CHIMP_MB);
2230 
2231 	req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(ctx_idx);
2232 
2233 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2234 
2235 	HWRM_CHECK_RESULT();
2236 	HWRM_UNLOCK();
2237 
2238 	return rc;
2239 }
2240 
2241 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2242 {
2243 	int rc = 0;
2244 
2245 	if (BNXT_CHIP_P5(bp)) {
2246 		int j;
2247 
2248 		for (j = 0; j < vnic->num_lb_ctxts; j++) {
2249 			rc = _bnxt_hwrm_vnic_ctx_free(bp,
2250 						      vnic,
2251 						      vnic->fw_grp_ids[j]);
2252 			vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
2253 		}
2254 		vnic->num_lb_ctxts = 0;
2255 	} else {
2256 		rc = _bnxt_hwrm_vnic_ctx_free(bp, vnic, vnic->rss_rule);
2257 		vnic->rss_rule = INVALID_HW_RING_ID;
2258 	}
2259 
2260 	return rc;
2261 }
2262 
2263 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2264 {
2265 	int rc = 0;
2266 	struct hwrm_vnic_free_input req = {.req_type = 0 };
2267 	struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
2268 
2269 	if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2270 		PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
2271 		return rc;
2272 	}
2273 
2274 	HWRM_PREP(&req, HWRM_VNIC_FREE, BNXT_USE_CHIMP_MB);
2275 
2276 	req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2277 
2278 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2279 
2280 	HWRM_CHECK_RESULT();
2281 	HWRM_UNLOCK();
2282 
2283 	vnic->fw_vnic_id = INVALID_HW_RING_ID;
2284 	/* Configure default VNIC again if necessary. */
2285 	if (vnic->func_default && (bp->flags & BNXT_FLAG_DFLT_VNIC_SET))
2286 		bp->flags &= ~BNXT_FLAG_DFLT_VNIC_SET;
2287 
2288 	return rc;
2289 }
2290 
2291 static int
2292 bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2293 {
2294 	int i;
2295 	int rc = 0;
2296 	int nr_ctxs = vnic->num_lb_ctxts;
2297 	struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
2298 	struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2299 
2300 	for (i = 0; i < nr_ctxs; i++) {
2301 		HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
2302 
2303 		req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2304 		req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
2305 		req.hash_mode_flags = vnic->hash_mode;
2306 
2307 		req.hash_key_tbl_addr =
2308 			rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
2309 
2310 		req.ring_grp_tbl_addr =
2311 			rte_cpu_to_le_64(vnic->rss_table_dma_addr +
2312 					 i * HW_HASH_INDEX_SIZE);
2313 		req.ring_table_pair_index = i;
2314 		req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
2315 
2316 		rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
2317 					    BNXT_USE_CHIMP_MB);
2318 
2319 		HWRM_CHECK_RESULT();
2320 		HWRM_UNLOCK();
2321 	}
2322 
2323 	return rc;
2324 }
2325 
2326 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
2327 			   struct bnxt_vnic_info *vnic)
2328 {
2329 	int rc = 0;
2330 	struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
2331 	struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2332 
2333 	if (!vnic->rss_table)
2334 		return 0;
2335 
2336 	if (BNXT_CHIP_P5(bp))
2337 		return bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic);
2338 
2339 	HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
2340 
2341 	req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
2342 	req.hash_mode_flags = vnic->hash_mode;
2343 
2344 	req.ring_grp_tbl_addr =
2345 	    rte_cpu_to_le_64(vnic->rss_table_dma_addr);
2346 	req.hash_key_tbl_addr =
2347 	    rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
2348 	req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
2349 	req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2350 
2351 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2352 
2353 	HWRM_CHECK_RESULT();
2354 	HWRM_UNLOCK();
2355 
2356 	return rc;
2357 }
2358 
2359 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
2360 			struct bnxt_vnic_info *vnic)
2361 {
2362 	int rc = 0;
2363 	struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
2364 	struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2365 	uint16_t size;
2366 
2367 	if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2368 		PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
2369 		return rc;
2370 	}
2371 
2372 	HWRM_PREP(&req, HWRM_VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
2373 
2374 	req.flags = rte_cpu_to_le_32(
2375 			HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
2376 
2377 	req.enables = rte_cpu_to_le_32(
2378 		HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
2379 
2380 	size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
2381 	size -= RTE_PKTMBUF_HEADROOM;
2382 	size = RTE_MIN(BNXT_MAX_PKT_LEN, size);
2383 
2384 	req.jumbo_thresh = rte_cpu_to_le_16(size);
2385 	req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2386 
2387 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2388 
2389 	HWRM_CHECK_RESULT();
2390 	HWRM_UNLOCK();
2391 
2392 	return rc;
2393 }
2394 
2395 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
2396 			struct bnxt_vnic_info *vnic, bool enable)
2397 {
2398 	int rc = 0;
2399 	struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
2400 	struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2401 
2402 	if (BNXT_CHIP_P5(bp) && !bp->max_tpa_v2) {
2403 		if (enable)
2404 			PMD_DRV_LOG(ERR, "No HW support for LRO\n");
2405 		return -ENOTSUP;
2406 	}
2407 
2408 	if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2409 		PMD_DRV_LOG(DEBUG, "Invalid vNIC ID\n");
2410 		return 0;
2411 	}
2412 
2413 	HWRM_PREP(&req, HWRM_VNIC_TPA_CFG, BNXT_USE_CHIMP_MB);
2414 
2415 	if (enable) {
2416 		req.enables = rte_cpu_to_le_32(
2417 				HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
2418 				HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
2419 				HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
2420 		req.flags = rte_cpu_to_le_32(
2421 				HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
2422 				HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
2423 				HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
2424 				HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
2425 				HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
2426 			HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
2427 		req.max_aggs = rte_cpu_to_le_16(BNXT_TPA_MAX_AGGS(bp));
2428 		req.max_agg_segs = rte_cpu_to_le_16(BNXT_TPA_MAX_SEGS(bp));
2429 		req.min_agg_len = rte_cpu_to_le_32(512);
2430 	}
2431 	req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2432 
2433 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2434 
2435 	HWRM_CHECK_RESULT();
2436 	HWRM_UNLOCK();
2437 
2438 	return rc;
2439 }
2440 
2441 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
2442 {
2443 	struct hwrm_func_cfg_input req = {0};
2444 	struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2445 	int rc;
2446 
2447 	req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags);
2448 	req.enables = rte_cpu_to_le_32(
2449 			HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2450 	memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
2451 	req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
2452 
2453 	HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
2454 
2455 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2456 	HWRM_CHECK_RESULT();
2457 	HWRM_UNLOCK();
2458 
2459 	bp->pf->vf_info[vf].random_mac = false;
2460 
2461 	return rc;
2462 }
2463 
2464 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
2465 				  uint64_t *dropped)
2466 {
2467 	int rc = 0;
2468 	struct hwrm_func_qstats_input req = {.req_type = 0};
2469 	struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2470 
2471 	HWRM_PREP(&req, HWRM_FUNC_QSTATS, BNXT_USE_CHIMP_MB);
2472 
2473 	req.fid = rte_cpu_to_le_16(fid);
2474 
2475 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2476 
2477 	HWRM_CHECK_RESULT();
2478 
2479 	if (dropped)
2480 		*dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
2481 
2482 	HWRM_UNLOCK();
2483 
2484 	return rc;
2485 }
2486 
2487 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
2488 			  struct rte_eth_stats *stats,
2489 			  struct hwrm_func_qstats_output *func_qstats)
2490 {
2491 	int rc = 0;
2492 	struct hwrm_func_qstats_input req = {.req_type = 0};
2493 	struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2494 
2495 	HWRM_PREP(&req, HWRM_FUNC_QSTATS, BNXT_USE_CHIMP_MB);
2496 
2497 	req.fid = rte_cpu_to_le_16(fid);
2498 
2499 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2500 
2501 	HWRM_CHECK_RESULT();
2502 	if (func_qstats)
2503 		memcpy(func_qstats, resp,
2504 		       sizeof(struct hwrm_func_qstats_output));
2505 
2506 	if (!stats)
2507 		goto exit;
2508 
2509 	stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
2510 	stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
2511 	stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
2512 	stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
2513 	stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
2514 	stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
2515 
2516 	stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
2517 	stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
2518 	stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
2519 	stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
2520 	stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
2521 	stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
2522 
2523 	stats->imissed = rte_le_to_cpu_64(resp->rx_discard_pkts);
2524 	stats->ierrors = rte_le_to_cpu_64(resp->rx_drop_pkts);
2525 	stats->oerrors = rte_le_to_cpu_64(resp->tx_discard_pkts);
2526 
2527 exit:
2528 	HWRM_UNLOCK();
2529 
2530 	return rc;
2531 }
2532 
2533 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
2534 {
2535 	int rc = 0;
2536 	struct hwrm_func_clr_stats_input req = {.req_type = 0};
2537 	struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
2538 
2539 	HWRM_PREP(&req, HWRM_FUNC_CLR_STATS, BNXT_USE_CHIMP_MB);
2540 
2541 	req.fid = rte_cpu_to_le_16(fid);
2542 
2543 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2544 
2545 	HWRM_CHECK_RESULT();
2546 	HWRM_UNLOCK();
2547 
2548 	return rc;
2549 }
2550 
2551 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
2552 {
2553 	unsigned int i;
2554 	int rc = 0;
2555 
2556 	for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2557 		struct bnxt_tx_queue *txq;
2558 		struct bnxt_rx_queue *rxq;
2559 		struct bnxt_cp_ring_info *cpr;
2560 
2561 		if (i >= bp->rx_cp_nr_rings) {
2562 			txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
2563 			cpr = txq->cp_ring;
2564 		} else {
2565 			rxq = bp->rx_queues[i];
2566 			cpr = rxq->cp_ring;
2567 		}
2568 
2569 		rc = bnxt_hwrm_stat_clear(bp, cpr);
2570 		if (rc)
2571 			return rc;
2572 	}
2573 	return 0;
2574 }
2575 
2576 static int
2577 bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
2578 {
2579 	int rc;
2580 	unsigned int i;
2581 	struct bnxt_cp_ring_info *cpr;
2582 
2583 	for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2584 
2585 		if (i >= bp->rx_cp_nr_rings) {
2586 			cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
2587 		} else {
2588 			cpr = bp->rx_queues[i]->cp_ring;
2589 			if (BNXT_HAS_RING_GRPS(bp))
2590 				bp->grp_info[i].fw_stats_ctx = -1;
2591 		}
2592 		if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
2593 			rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
2594 			cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
2595 			if (rc)
2596 				return rc;
2597 		}
2598 	}
2599 	return 0;
2600 }
2601 
2602 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
2603 {
2604 	unsigned int i;
2605 	int rc = 0;
2606 
2607 	for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2608 		struct bnxt_tx_queue *txq;
2609 		struct bnxt_rx_queue *rxq;
2610 		struct bnxt_cp_ring_info *cpr;
2611 
2612 		if (i >= bp->rx_cp_nr_rings) {
2613 			txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
2614 			cpr = txq->cp_ring;
2615 		} else {
2616 			rxq = bp->rx_queues[i];
2617 			cpr = rxq->cp_ring;
2618 		}
2619 
2620 		rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
2621 
2622 		if (rc)
2623 			return rc;
2624 	}
2625 	return rc;
2626 }
2627 
2628 static int
2629 bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
2630 {
2631 	uint16_t idx;
2632 	uint32_t rc = 0;
2633 
2634 	if (!BNXT_HAS_RING_GRPS(bp))
2635 		return 0;
2636 
2637 	for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
2638 
2639 		if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
2640 			continue;
2641 
2642 		rc = bnxt_hwrm_ring_grp_free(bp, idx);
2643 
2644 		if (rc)
2645 			return rc;
2646 	}
2647 	return rc;
2648 }
2649 
2650 void bnxt_free_nq_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2651 {
2652 	struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
2653 
2654 	bnxt_hwrm_ring_free(bp, cp_ring,
2655 			    HWRM_RING_FREE_INPUT_RING_TYPE_NQ,
2656 			    INVALID_HW_RING_ID);
2657 	cp_ring->fw_ring_id = INVALID_HW_RING_ID;
2658 	memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
2659 				     sizeof(*cpr->cp_desc_ring));
2660 	cpr->cp_raw_cons = 0;
2661 	cpr->valid = 0;
2662 }
2663 
2664 void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2665 {
2666 	struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
2667 
2668 	bnxt_hwrm_ring_free(bp, cp_ring,
2669 			HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL,
2670 			INVALID_HW_RING_ID);
2671 	cp_ring->fw_ring_id = INVALID_HW_RING_ID;
2672 	memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
2673 			sizeof(*cpr->cp_desc_ring));
2674 	cpr->cp_raw_cons = 0;
2675 	cpr->valid = 0;
2676 }
2677 
2678 void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
2679 {
2680 	struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
2681 	struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
2682 	struct bnxt_ring *ring = rxr->rx_ring_struct;
2683 	struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
2684 
2685 	if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2686 		bnxt_hwrm_ring_free(bp, ring,
2687 				    HWRM_RING_FREE_INPUT_RING_TYPE_RX,
2688 				    cpr->cp_ring_struct->fw_ring_id);
2689 		ring->fw_ring_id = INVALID_HW_RING_ID;
2690 		if (BNXT_HAS_RING_GRPS(bp))
2691 			bp->grp_info[queue_index].rx_fw_ring_id =
2692 							INVALID_HW_RING_ID;
2693 	}
2694 	ring = rxr->ag_ring_struct;
2695 	if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2696 		bnxt_hwrm_ring_free(bp, ring,
2697 				    BNXT_CHIP_P5(bp) ?
2698 				    HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG :
2699 				    HWRM_RING_FREE_INPUT_RING_TYPE_RX,
2700 				    cpr->cp_ring_struct->fw_ring_id);
2701 		if (BNXT_HAS_RING_GRPS(bp))
2702 			bp->grp_info[queue_index].ag_fw_ring_id =
2703 							INVALID_HW_RING_ID;
2704 	}
2705 	if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
2706 		bnxt_free_cp_ring(bp, cpr);
2707 
2708 	if (BNXT_HAS_RING_GRPS(bp))
2709 		bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;
2710 }
2711 
2712 static int
2713 bnxt_free_all_hwrm_rings(struct bnxt *bp)
2714 {
2715 	unsigned int i;
2716 
2717 	for (i = 0; i < bp->tx_cp_nr_rings; i++) {
2718 		struct bnxt_tx_queue *txq = bp->tx_queues[i];
2719 		struct bnxt_tx_ring_info *txr = txq->tx_ring;
2720 		struct bnxt_ring *ring = txr->tx_ring_struct;
2721 		struct bnxt_cp_ring_info *cpr = txq->cp_ring;
2722 
2723 		if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2724 			bnxt_hwrm_ring_free(bp, ring,
2725 					HWRM_RING_FREE_INPUT_RING_TYPE_TX,
2726 					cpr->cp_ring_struct->fw_ring_id);
2727 			ring->fw_ring_id = INVALID_HW_RING_ID;
2728 			memset(txr->tx_desc_ring, 0,
2729 					txr->tx_ring_struct->ring_size *
2730 					sizeof(*txr->tx_desc_ring));
2731 			memset(txr->tx_buf_ring, 0,
2732 					txr->tx_ring_struct->ring_size *
2733 					sizeof(*txr->tx_buf_ring));
2734 			txr->tx_raw_prod = 0;
2735 			txr->tx_raw_cons = 0;
2736 		}
2737 		if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
2738 			bnxt_free_cp_ring(bp, cpr);
2739 			cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
2740 		}
2741 	}
2742 
2743 	for (i = 0; i < bp->rx_cp_nr_rings; i++)
2744 		bnxt_free_hwrm_rx_ring(bp, i);
2745 
2746 	return 0;
2747 }
2748 
2749 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
2750 {
2751 	uint16_t i;
2752 	uint32_t rc = 0;
2753 
2754 	if (!BNXT_HAS_RING_GRPS(bp))
2755 		return 0;
2756 
2757 	for (i = 0; i < bp->rx_cp_nr_rings; i++) {
2758 		rc = bnxt_hwrm_ring_grp_alloc(bp, i);
2759 		if (rc)
2760 			return rc;
2761 	}
2762 	return rc;
2763 }
2764 
2765 /*
2766  * HWRM utility functions
2767  */
2768 
2769 void bnxt_free_hwrm_resources(struct bnxt *bp)
2770 {
2771 	/* Release memzone */
2772 	rte_free(bp->hwrm_cmd_resp_addr);
2773 	rte_free(bp->hwrm_short_cmd_req_addr);
2774 	bp->hwrm_cmd_resp_addr = NULL;
2775 	bp->hwrm_short_cmd_req_addr = NULL;
2776 	bp->hwrm_cmd_resp_dma_addr = 0;
2777 	bp->hwrm_short_cmd_req_dma_addr = 0;
2778 }
2779 
2780 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
2781 {
2782 	struct rte_pci_device *pdev = bp->pdev;
2783 	char type[RTE_MEMZONE_NAMESIZE];
2784 
2785 	sprintf(type, "bnxt_hwrm_" PCI_PRI_FMT, pdev->addr.domain,
2786 		pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
2787 	bp->max_resp_len = BNXT_PAGE_SIZE;
2788 	bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
2789 	if (bp->hwrm_cmd_resp_addr == NULL)
2790 		return -ENOMEM;
2791 	bp->hwrm_cmd_resp_dma_addr =
2792 		rte_malloc_virt2iova(bp->hwrm_cmd_resp_addr);
2793 	if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
2794 		PMD_DRV_LOG(ERR,
2795 			"unable to map response address to physical memory\n");
2796 		return -ENOMEM;
2797 	}
2798 	rte_spinlock_init(&bp->hwrm_lock);
2799 
2800 	return 0;
2801 }
2802 
2803 int
2804 bnxt_clear_one_vnic_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
2805 {
2806 	int rc = 0;
2807 
2808 	if (filter->filter_type == HWRM_CFA_EM_FILTER) {
2809 		rc = bnxt_hwrm_clear_em_filter(bp, filter);
2810 		if (rc)
2811 			return rc;
2812 	} else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
2813 		rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
2814 		if (rc)
2815 			return rc;
2816 	}
2817 
2818 	rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2819 	return rc;
2820 }
2821 
2822 static int
2823 bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2824 {
2825 	struct bnxt_filter_info *filter;
2826 	int rc = 0;
2827 
2828 	STAILQ_FOREACH(filter, &vnic->filter, next) {
2829 		rc = bnxt_clear_one_vnic_filter(bp, filter);
2830 		STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
2831 		bnxt_free_filter(bp, filter);
2832 	}
2833 	return rc;
2834 }
2835 
2836 static int
2837 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2838 {
2839 	struct bnxt_filter_info *filter;
2840 	struct rte_flow *flow;
2841 	int rc = 0;
2842 
2843 	while (!STAILQ_EMPTY(&vnic->flow_list)) {
2844 		flow = STAILQ_FIRST(&vnic->flow_list);
2845 		filter = flow->filter;
2846 		PMD_DRV_LOG(DEBUG, "filter type %d\n", filter->filter_type);
2847 		rc = bnxt_clear_one_vnic_filter(bp, filter);
2848 
2849 		STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
2850 		rte_free(flow);
2851 	}
2852 	return rc;
2853 }
2854 
2855 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2856 {
2857 	struct bnxt_filter_info *filter;
2858 	int rc = 0;
2859 
2860 	STAILQ_FOREACH(filter, &vnic->filter, next) {
2861 		if (filter->filter_type == HWRM_CFA_EM_FILTER)
2862 			rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
2863 						     filter);
2864 		else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2865 			rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
2866 							 filter);
2867 		else
2868 			rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
2869 						     filter);
2870 		if (rc)
2871 			break;
2872 	}
2873 	return rc;
2874 }
2875 
2876 static void
2877 bnxt_free_tunnel_ports(struct bnxt *bp)
2878 {
2879 	if (bp->vxlan_port_cnt)
2880 		bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
2881 			HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
2882 
2883 	if (bp->geneve_port_cnt)
2884 		bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
2885 			HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
2886 }
2887 
2888 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
2889 {
2890 	int i;
2891 
2892 	if (bp->vnic_info == NULL)
2893 		return;
2894 
2895 	/*
2896 	 * Cleanup VNICs in reverse order, to make sure the L2 filter
2897 	 * from vnic0 is last to be cleaned up.
2898 	 */
2899 	for (i = bp->max_vnics - 1; i >= 0; i--) {
2900 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2901 
2902 		if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
2903 			continue;
2904 
2905 		bnxt_clear_hwrm_vnic_flows(bp, vnic);
2906 
2907 		bnxt_clear_hwrm_vnic_filters(bp, vnic);
2908 
2909 		bnxt_hwrm_vnic_ctx_free(bp, vnic);
2910 
2911 		bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
2912 
2913 		bnxt_hwrm_vnic_free(bp, vnic);
2914 
2915 		rte_free(vnic->fw_grp_ids);
2916 	}
2917 	/* Ring resources */
2918 	bnxt_free_all_hwrm_rings(bp);
2919 	bnxt_free_all_hwrm_ring_grps(bp);
2920 	bnxt_free_all_hwrm_stat_ctxs(bp);
2921 	bnxt_free_tunnel_ports(bp);
2922 }
2923 
2924 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
2925 {
2926 	uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2927 
2928 	if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
2929 		return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2930 
2931 	switch (conf_link_speed) {
2932 	case ETH_LINK_SPEED_10M_HD:
2933 	case ETH_LINK_SPEED_100M_HD:
2934 		/* FALLTHROUGH */
2935 		return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
2936 	}
2937 	return hw_link_duplex;
2938 }
2939 
2940 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
2941 {
2942 	return !conf_link;
2943 }
2944 
2945 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed,
2946 					  uint16_t pam4_link)
2947 {
2948 	uint16_t eth_link_speed = 0;
2949 
2950 	if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
2951 		return ETH_LINK_SPEED_AUTONEG;
2952 
2953 	switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
2954 	case ETH_LINK_SPEED_100M:
2955 	case ETH_LINK_SPEED_100M_HD:
2956 		/* FALLTHROUGH */
2957 		eth_link_speed =
2958 			HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
2959 		break;
2960 	case ETH_LINK_SPEED_1G:
2961 		eth_link_speed =
2962 			HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
2963 		break;
2964 	case ETH_LINK_SPEED_2_5G:
2965 		eth_link_speed =
2966 			HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
2967 		break;
2968 	case ETH_LINK_SPEED_10G:
2969 		eth_link_speed =
2970 			HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
2971 		break;
2972 	case ETH_LINK_SPEED_20G:
2973 		eth_link_speed =
2974 			HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
2975 		break;
2976 	case ETH_LINK_SPEED_25G:
2977 		eth_link_speed =
2978 			HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
2979 		break;
2980 	case ETH_LINK_SPEED_40G:
2981 		eth_link_speed =
2982 			HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
2983 		break;
2984 	case ETH_LINK_SPEED_50G:
2985 		eth_link_speed = pam4_link ?
2986 			HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_50GB :
2987 			HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
2988 		break;
2989 	case ETH_LINK_SPEED_100G:
2990 		eth_link_speed = pam4_link ?
2991 			HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_100GB :
2992 			HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
2993 		break;
2994 	case ETH_LINK_SPEED_200G:
2995 		eth_link_speed =
2996 			HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
2997 		break;
2998 	default:
2999 		PMD_DRV_LOG(ERR,
3000 			"Unsupported link speed %d; default to AUTO\n",
3001 			conf_link_speed);
3002 		break;
3003 	}
3004 	return eth_link_speed;
3005 }
3006 
3007 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
3008 		ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
3009 		ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
3010 		ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | \
3011 		ETH_LINK_SPEED_100G | ETH_LINK_SPEED_200G)
3012 
3013 static int bnxt_validate_link_speed(struct bnxt *bp)
3014 {
3015 	uint32_t link_speed = bp->eth_dev->data->dev_conf.link_speeds;
3016 	uint16_t port_id = bp->eth_dev->data->port_id;
3017 	uint32_t link_speed_capa;
3018 	uint32_t one_speed;
3019 
3020 	if (link_speed == ETH_LINK_SPEED_AUTONEG)
3021 		return 0;
3022 
3023 	link_speed_capa = bnxt_get_speed_capabilities(bp);
3024 
3025 	if (link_speed & ETH_LINK_SPEED_FIXED) {
3026 		one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
3027 
3028 		if (one_speed & (one_speed - 1)) {
3029 			PMD_DRV_LOG(ERR,
3030 				"Invalid advertised speeds (%u) for port %u\n",
3031 				link_speed, port_id);
3032 			return -EINVAL;
3033 		}
3034 		if ((one_speed & link_speed_capa) != one_speed) {
3035 			PMD_DRV_LOG(ERR,
3036 				"Unsupported advertised speed (%u) for port %u\n",
3037 				link_speed, port_id);
3038 			return -EINVAL;
3039 		}
3040 	} else {
3041 		if (!(link_speed & link_speed_capa)) {
3042 			PMD_DRV_LOG(ERR,
3043 				"Unsupported advertised speeds (%u) for port %u\n",
3044 				link_speed, port_id);
3045 			return -EINVAL;
3046 		}
3047 	}
3048 	return 0;
3049 }
3050 
3051 static uint16_t
3052 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
3053 {
3054 	uint16_t ret = 0;
3055 
3056 	if (link_speed == ETH_LINK_SPEED_AUTONEG) {
3057 		if (bp->link_info->support_speeds)
3058 			return bp->link_info->support_speeds;
3059 		link_speed = BNXT_SUPPORTED_SPEEDS;
3060 	}
3061 
3062 	if (link_speed & ETH_LINK_SPEED_100M)
3063 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
3064 	if (link_speed & ETH_LINK_SPEED_100M_HD)
3065 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
3066 	if (link_speed & ETH_LINK_SPEED_1G)
3067 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
3068 	if (link_speed & ETH_LINK_SPEED_2_5G)
3069 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
3070 	if (link_speed & ETH_LINK_SPEED_10G)
3071 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
3072 	if (link_speed & ETH_LINK_SPEED_20G)
3073 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
3074 	if (link_speed & ETH_LINK_SPEED_25G)
3075 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
3076 	if (link_speed & ETH_LINK_SPEED_40G)
3077 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
3078 	if (link_speed & ETH_LINK_SPEED_50G)
3079 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
3080 	if (link_speed & ETH_LINK_SPEED_100G)
3081 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
3082 	if (link_speed & ETH_LINK_SPEED_200G)
3083 		ret |= HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
3084 	return ret;
3085 }
3086 
3087 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
3088 {
3089 	uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
3090 
3091 	switch (hw_link_speed) {
3092 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
3093 		eth_link_speed = ETH_SPEED_NUM_100M;
3094 		break;
3095 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
3096 		eth_link_speed = ETH_SPEED_NUM_1G;
3097 		break;
3098 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
3099 		eth_link_speed = ETH_SPEED_NUM_2_5G;
3100 		break;
3101 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
3102 		eth_link_speed = ETH_SPEED_NUM_10G;
3103 		break;
3104 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
3105 		eth_link_speed = ETH_SPEED_NUM_20G;
3106 		break;
3107 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
3108 		eth_link_speed = ETH_SPEED_NUM_25G;
3109 		break;
3110 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
3111 		eth_link_speed = ETH_SPEED_NUM_40G;
3112 		break;
3113 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
3114 		eth_link_speed = ETH_SPEED_NUM_50G;
3115 		break;
3116 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
3117 		eth_link_speed = ETH_SPEED_NUM_100G;
3118 		break;
3119 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_200GB:
3120 		eth_link_speed = ETH_SPEED_NUM_200G;
3121 		break;
3122 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
3123 	default:
3124 		PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
3125 			hw_link_speed);
3126 		break;
3127 	}
3128 	return eth_link_speed;
3129 }
3130 
3131 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
3132 {
3133 	uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
3134 
3135 	switch (hw_link_duplex) {
3136 	case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
3137 	case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
3138 		/* FALLTHROUGH */
3139 		eth_link_duplex = ETH_LINK_FULL_DUPLEX;
3140 		break;
3141 	case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
3142 		eth_link_duplex = ETH_LINK_HALF_DUPLEX;
3143 		break;
3144 	default:
3145 		PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
3146 			hw_link_duplex);
3147 		break;
3148 	}
3149 	return eth_link_duplex;
3150 }
3151 
3152 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
3153 {
3154 	int rc = 0;
3155 	struct bnxt_link_info *link_info = bp->link_info;
3156 
3157 	rc = bnxt_hwrm_port_phy_qcaps(bp);
3158 	if (rc)
3159 		PMD_DRV_LOG(ERR, "Get link config failed with rc %d\n", rc);
3160 
3161 	rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
3162 	if (rc) {
3163 		PMD_DRV_LOG(ERR, "Get link config failed with rc %d\n", rc);
3164 		goto exit;
3165 	}
3166 
3167 	if (link_info->link_speed)
3168 		link->link_speed =
3169 			bnxt_parse_hw_link_speed(link_info->link_speed);
3170 	else
3171 		link->link_speed = ETH_SPEED_NUM_NONE;
3172 	link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
3173 	link->link_status = link_info->link_up;
3174 	link->link_autoneg = link_info->auto_mode ==
3175 		HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
3176 		ETH_LINK_FIXED : ETH_LINK_AUTONEG;
3177 exit:
3178 	return rc;
3179 }
3180 
3181 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
3182 {
3183 	int rc = 0;
3184 	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
3185 	struct bnxt_link_info link_req;
3186 	uint16_t speed, autoneg;
3187 
3188 	if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
3189 		return 0;
3190 
3191 	rc = bnxt_validate_link_speed(bp);
3192 	if (rc)
3193 		goto error;
3194 
3195 	memset(&link_req, 0, sizeof(link_req));
3196 	link_req.link_up = link_up;
3197 	if (!link_up)
3198 		goto port_phy_cfg;
3199 
3200 	autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
3201 	if (BNXT_CHIP_P5(bp) &&
3202 	    dev_conf->link_speeds == ETH_LINK_SPEED_40G) {
3203 		/* 40G is not supported as part of media auto detect.
3204 		 * The speed should be forced and autoneg disabled
3205 		 * to configure 40G speed.
3206 		 */
3207 		PMD_DRV_LOG(INFO, "Disabling autoneg for 40G\n");
3208 		autoneg = 0;
3209 	}
3210 
3211 	/* No auto speeds and no auto_pam4_link. Disable autoneg */
3212 	if (bp->link_info->auto_link_speed == 0 &&
3213 	    bp->link_info->link_signal_mode &&
3214 	    bp->link_info->auto_pam4_link_speeds == 0)
3215 		autoneg = 0;
3216 
3217 	speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds,
3218 					  bp->link_info->link_signal_mode);
3219 	link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
3220 	/* Autoneg can be done only when the FW allows.
3221 	 * When user configures fixed speed of 40G and later changes to
3222 	 * any other speed, auto_link_speed/force_link_speed is still set
3223 	 * to 40G until link comes up at new speed.
3224 	 */
3225 	if (autoneg == 1 &&
3226 	    !(!BNXT_CHIP_P5(bp) &&
3227 	      (bp->link_info->auto_link_speed ||
3228 	       bp->link_info->force_link_speed))) {
3229 		link_req.phy_flags |=
3230 				HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
3231 		link_req.auto_link_speed_mask =
3232 			bnxt_parse_eth_link_speed_mask(bp,
3233 						       dev_conf->link_speeds);
3234 	} else {
3235 		if (bp->link_info->phy_type ==
3236 		    HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
3237 		    bp->link_info->phy_type ==
3238 		    HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
3239 		    bp->link_info->media_type ==
3240 		    HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
3241 			PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
3242 			return -EINVAL;
3243 		}
3244 
3245 		link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
3246 		/* If user wants a particular speed try that first. */
3247 		if (speed)
3248 			link_req.link_speed = speed;
3249 		else if (bp->link_info->force_pam4_link_speed)
3250 			link_req.link_speed =
3251 				bp->link_info->force_pam4_link_speed;
3252 		else if (bp->link_info->auto_pam4_link_speeds)
3253 			link_req.link_speed =
3254 				bp->link_info->auto_pam4_link_speeds;
3255 		else if (bp->link_info->support_pam4_speeds)
3256 			link_req.link_speed =
3257 				bp->link_info->support_pam4_speeds;
3258 		else if (bp->link_info->force_link_speed)
3259 			link_req.link_speed = bp->link_info->force_link_speed;
3260 		else
3261 			link_req.link_speed = bp->link_info->auto_link_speed;
3262 		/* Auto PAM4 link speed is zero, but auto_link_speed is not
3263 		 * zero. Use the auto_link_speed.
3264 		 */
3265 		if (bp->link_info->auto_link_speed != 0 &&
3266 		    bp->link_info->auto_pam4_link_speeds == 0)
3267 			link_req.link_speed = bp->link_info->auto_link_speed;
3268 	}
3269 	link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
3270 	link_req.auto_pause = bp->link_info->auto_pause;
3271 	link_req.force_pause = bp->link_info->force_pause;
3272 
3273 port_phy_cfg:
3274 	rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
3275 	if (rc) {
3276 		PMD_DRV_LOG(ERR,
3277 			"Set link config failed with rc %d\n", rc);
3278 	}
3279 
3280 error:
3281 	return rc;
3282 }
3283 
3284 /* JIRA 22088 */
3285 int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu)
3286 {
3287 	struct hwrm_func_qcfg_input req = {0};
3288 	struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3289 	uint16_t flags;
3290 	int rc = 0;
3291 	bp->func_svif = BNXT_SVIF_INVALID;
3292 	uint16_t svif_info;
3293 
3294 	HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3295 	req.fid = rte_cpu_to_le_16(0xffff);
3296 
3297 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3298 
3299 	HWRM_CHECK_RESULT();
3300 
3301 	/* Hard Coded.. 0xfff VLAN ID mask */
3302 	bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
3303 
3304 	svif_info = rte_le_to_cpu_16(resp->svif_info);
3305 	if (svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_VALID)
3306 		bp->func_svif =	svif_info &
3307 				     HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_MASK;
3308 
3309 	flags = rte_le_to_cpu_16(resp->flags);
3310 	if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
3311 		bp->flags |= BNXT_FLAG_MULTI_HOST;
3312 
3313 	if (BNXT_VF(bp) &&
3314 	    !BNXT_VF_IS_TRUSTED(bp) &&
3315 	    (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
3316 		bp->flags |= BNXT_FLAG_TRUSTED_VF_EN;
3317 		PMD_DRV_LOG(INFO, "Trusted VF cap enabled\n");
3318 	} else if (BNXT_VF(bp) &&
3319 		   BNXT_VF_IS_TRUSTED(bp) &&
3320 		   !(flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
3321 		bp->flags &= ~BNXT_FLAG_TRUSTED_VF_EN;
3322 		PMD_DRV_LOG(INFO, "Trusted VF cap disabled\n");
3323 	}
3324 
3325 	if (mtu)
3326 		*mtu = rte_le_to_cpu_16(resp->mtu);
3327 
3328 	switch (resp->port_partition_type) {
3329 	case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
3330 	case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
3331 	case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
3332 		/* FALLTHROUGH */
3333 		bp->flags |= BNXT_FLAG_NPAR_PF;
3334 		break;
3335 	default:
3336 		bp->flags &= ~BNXT_FLAG_NPAR_PF;
3337 		break;
3338 	}
3339 
3340 	bp->legacy_db_size =
3341 		rte_le_to_cpu_16(resp->legacy_l2_db_size_kb) * 1024;
3342 
3343 	HWRM_UNLOCK();
3344 
3345 	return rc;
3346 }
3347 
3348 int bnxt_hwrm_parent_pf_qcfg(struct bnxt *bp)
3349 {
3350 	struct hwrm_func_qcfg_input req = {0};
3351 	struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3352 	int rc;
3353 
3354 	if (!BNXT_VF_IS_TRUSTED(bp))
3355 		return 0;
3356 
3357 	if (!bp->parent)
3358 		return -EINVAL;
3359 
3360 	bp->parent->fid = BNXT_PF_FID_INVALID;
3361 
3362 	HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3363 
3364 	req.fid = rte_cpu_to_le_16(0xfffe); /* Request parent PF information. */
3365 
3366 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3367 
3368 	HWRM_CHECK_RESULT_SILENT();
3369 
3370 	memcpy(bp->parent->mac_addr, resp->mac_address, RTE_ETHER_ADDR_LEN);
3371 	bp->parent->vnic = rte_le_to_cpu_16(resp->dflt_vnic_id);
3372 	bp->parent->fid = rte_le_to_cpu_16(resp->fid);
3373 	bp->parent->port_id = rte_le_to_cpu_16(resp->port_id);
3374 
3375 	/* FIXME: Temporary workaround - remove when firmware issue is fixed. */
3376 	if (bp->parent->vnic == 0) {
3377 		PMD_DRV_LOG(DEBUG, "parent VNIC unavailable.\n");
3378 		/* Use hard-coded values appropriate for current Wh+ fw. */
3379 		if (bp->parent->fid == 2)
3380 			bp->parent->vnic = 0x100;
3381 		else
3382 			bp->parent->vnic = 1;
3383 	}
3384 
3385 	HWRM_UNLOCK();
3386 
3387 	return 0;
3388 }
3389 
3390 int bnxt_hwrm_get_dflt_vnic_svif(struct bnxt *bp, uint16_t fid,
3391 				 uint16_t *vnic_id, uint16_t *svif)
3392 {
3393 	struct hwrm_func_qcfg_input req = {0};
3394 	struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3395 	uint16_t svif_info;
3396 	int rc = 0;
3397 
3398 	HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3399 	req.fid = rte_cpu_to_le_16(fid);
3400 
3401 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3402 
3403 	HWRM_CHECK_RESULT();
3404 
3405 	if (vnic_id)
3406 		*vnic_id = rte_le_to_cpu_16(resp->dflt_vnic_id);
3407 
3408 	svif_info = rte_le_to_cpu_16(resp->svif_info);
3409 	if (svif && (svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_VALID))
3410 		*svif = svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_MASK;
3411 
3412 	HWRM_UNLOCK();
3413 
3414 	return rc;
3415 }
3416 
3417 int bnxt_hwrm_port_mac_qcfg(struct bnxt *bp)
3418 {
3419 	struct hwrm_port_mac_qcfg_input req = {0};
3420 	struct hwrm_port_mac_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3421 	uint16_t port_svif_info;
3422 	int rc;
3423 
3424 	bp->port_svif = BNXT_SVIF_INVALID;
3425 
3426 	if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
3427 		return 0;
3428 
3429 	HWRM_PREP(&req, HWRM_PORT_MAC_QCFG, BNXT_USE_CHIMP_MB);
3430 
3431 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3432 
3433 	HWRM_CHECK_RESULT_SILENT();
3434 
3435 	port_svif_info = rte_le_to_cpu_16(resp->port_svif_info);
3436 	if (port_svif_info &
3437 	    HWRM_PORT_MAC_QCFG_OUTPUT_PORT_SVIF_INFO_PORT_SVIF_VALID)
3438 		bp->port_svif = port_svif_info &
3439 			HWRM_PORT_MAC_QCFG_OUTPUT_PORT_SVIF_INFO_PORT_SVIF_MASK;
3440 
3441 	HWRM_UNLOCK();
3442 
3443 	return 0;
3444 }
3445 
3446 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp,
3447 				 struct bnxt_pf_resource_info *pf_resc)
3448 {
3449 	struct hwrm_func_cfg_input req = {0};
3450 	struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3451 	uint32_t enables;
3452 	int rc;
3453 
3454 	enables = HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
3455 		  HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
3456 		  HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
3457 		  HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
3458 		  HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
3459 		  HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
3460 		  HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
3461 		  HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
3462 		  HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS;
3463 
3464 	if (BNXT_HAS_RING_GRPS(bp)) {
3465 		enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;
3466 		req.num_hw_ring_grps =
3467 			rte_cpu_to_le_16(pf_resc->num_hw_ring_grps);
3468 	} else if (BNXT_HAS_NQ(bp)) {
3469 		enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MSIX;
3470 		req.num_msix = rte_cpu_to_le_16(bp->max_nq_rings);
3471 	}
3472 
3473 	req.flags = rte_cpu_to_le_32(bp->pf->func_cfg_flags);
3474 	req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
3475 	req.mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu));
3476 	req.num_rsscos_ctxs = rte_cpu_to_le_16(pf_resc->num_rsscos_ctxs);
3477 	req.num_stat_ctxs = rte_cpu_to_le_16(pf_resc->num_stat_ctxs);
3478 	req.num_cmpl_rings = rte_cpu_to_le_16(pf_resc->num_cp_rings);
3479 	req.num_tx_rings = rte_cpu_to_le_16(pf_resc->num_tx_rings);
3480 	req.num_rx_rings = rte_cpu_to_le_16(pf_resc->num_rx_rings);
3481 	req.num_l2_ctxs = rte_cpu_to_le_16(pf_resc->num_l2_ctxs);
3482 	req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
3483 	req.fid = rte_cpu_to_le_16(0xffff);
3484 	req.enables = rte_cpu_to_le_32(enables);
3485 
3486 	HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3487 
3488 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3489 
3490 	HWRM_CHECK_RESULT();
3491 	HWRM_UNLOCK();
3492 
3493 	return rc;
3494 }
3495 
3496 /* min values are the guaranteed resources and max values are subject
3497  * to availability. The strategy for now is to keep both min & max
3498  * values the same.
3499  */
3500 static void
3501 bnxt_fill_vf_func_cfg_req_new(struct bnxt *bp,
3502 			      struct hwrm_func_vf_resource_cfg_input *req,
3503 			      int num_vfs)
3504 {
3505 	req->max_rsscos_ctx = rte_cpu_to_le_16(bp->max_rsscos_ctx /
3506 					       (num_vfs + 1));
3507 	req->min_rsscos_ctx = req->max_rsscos_ctx;
3508 	req->max_stat_ctx = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
3509 	req->min_stat_ctx = req->max_stat_ctx;
3510 	req->max_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
3511 					       (num_vfs + 1));
3512 	req->min_cmpl_rings = req->max_cmpl_rings;
3513 	req->max_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
3514 	req->min_tx_rings = req->max_tx_rings;
3515 	req->max_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
3516 	req->min_rx_rings = req->max_rx_rings;
3517 	req->max_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
3518 	req->min_l2_ctxs = req->max_l2_ctxs;
3519 	/* TODO: For now, do not support VMDq/RFS on VFs. */
3520 	req->max_vnics = rte_cpu_to_le_16(1);
3521 	req->min_vnics = req->max_vnics;
3522 	req->max_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
3523 						 (num_vfs + 1));
3524 	req->min_hw_ring_grps = req->max_hw_ring_grps;
3525 	req->flags =
3526 	 rte_cpu_to_le_16(HWRM_FUNC_VF_RESOURCE_CFG_INPUT_FLAGS_MIN_GUARANTEED);
3527 }
3528 
3529 static void
3530 bnxt_fill_vf_func_cfg_req_old(struct bnxt *bp,
3531 			      struct hwrm_func_cfg_input *req,
3532 			      int num_vfs)
3533 {
3534 	req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
3535 			HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
3536 			HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
3537 			HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
3538 			HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
3539 			HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
3540 			HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
3541 			HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
3542 			HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
3543 			HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
3544 
3545 	req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
3546 				    RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE *
3547 				    BNXT_NUM_VLANS);
3548 	req->mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu));
3549 	req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
3550 						(num_vfs + 1));
3551 	req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
3552 	req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
3553 					       (num_vfs + 1));
3554 	req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
3555 	req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
3556 	req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
3557 	/* TODO: For now, do not support VMDq/RFS on VFs. */
3558 	req->num_vnics = rte_cpu_to_le_16(1);
3559 	req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
3560 						 (num_vfs + 1));
3561 }
3562 
3563 /* Update the port wide resource values based on how many resources
3564  * got allocated to the VF.
3565  */
3566 static int bnxt_update_max_resources(struct bnxt *bp,
3567 				     int vf)
3568 {
3569 	struct hwrm_func_qcfg_input req = {0};
3570 	struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3571 	int rc;
3572 
3573 	/* Get the actual allocated values now */
3574 	HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3575 	req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
3576 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3577 	HWRM_CHECK_RESULT();
3578 
3579 	bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->alloc_rsscos_ctx);
3580 	bp->max_stat_ctx -= rte_le_to_cpu_16(resp->alloc_stat_ctx);
3581 	bp->max_cp_rings -= rte_le_to_cpu_16(resp->alloc_cmpl_rings);
3582 	bp->max_tx_rings -= rte_le_to_cpu_16(resp->alloc_tx_rings);
3583 	bp->max_rx_rings -= rte_le_to_cpu_16(resp->alloc_rx_rings);
3584 	bp->max_l2_ctx -= rte_le_to_cpu_16(resp->alloc_l2_ctx);
3585 	bp->max_ring_grps -= rte_le_to_cpu_16(resp->alloc_hw_ring_grps);
3586 
3587 	HWRM_UNLOCK();
3588 
3589 	return 0;
3590 }
3591 
3592 /* Update the PF resource values based on how many resources
3593  * got allocated to it.
3594  */
3595 static int bnxt_update_max_resources_pf_only(struct bnxt *bp)
3596 {
3597 	struct hwrm_func_qcfg_input req = {0};
3598 	struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3599 	int rc;
3600 
3601 	/* Get the actual allocated values now */
3602 	HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3603 	req.fid = rte_cpu_to_le_16(0xffff);
3604 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3605 	HWRM_CHECK_RESULT();
3606 
3607 	bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->alloc_rsscos_ctx);
3608 	bp->max_stat_ctx = rte_le_to_cpu_16(resp->alloc_stat_ctx);
3609 	bp->max_cp_rings = rte_le_to_cpu_16(resp->alloc_cmpl_rings);
3610 	bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
3611 	bp->max_rx_rings = rte_le_to_cpu_16(resp->alloc_rx_rings);
3612 	bp->max_l2_ctx = rte_le_to_cpu_16(resp->alloc_l2_ctx);
3613 	bp->max_ring_grps = rte_le_to_cpu_16(resp->alloc_hw_ring_grps);
3614 	bp->max_vnics = rte_le_to_cpu_16(resp->alloc_vnics);
3615 
3616 	HWRM_UNLOCK();
3617 
3618 	return 0;
3619 }
3620 
3621 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
3622 {
3623 	struct hwrm_func_qcfg_input req = {0};
3624 	struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3625 	int rc;
3626 
3627 	/* Check for zero MAC address */
3628 	HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3629 	req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
3630 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3631 	HWRM_CHECK_RESULT();
3632 	rc = rte_le_to_cpu_16(resp->vlan);
3633 
3634 	HWRM_UNLOCK();
3635 
3636 	return rc;
3637 }
3638 
3639 static int bnxt_query_pf_resources(struct bnxt *bp,
3640 				   struct bnxt_pf_resource_info *pf_resc)
3641 {
3642 	struct hwrm_func_qcfg_input req = {0};
3643 	struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3644 	int rc;
3645 
3646 	/* And copy the allocated numbers into the pf struct */
3647 	HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3648 	req.fid = rte_cpu_to_le_16(0xffff);
3649 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3650 	HWRM_CHECK_RESULT();
3651 
3652 	pf_resc->num_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
3653 	pf_resc->num_rsscos_ctxs = rte_le_to_cpu_16(resp->alloc_rsscos_ctx);
3654 	pf_resc->num_stat_ctxs = rte_le_to_cpu_16(resp->alloc_stat_ctx);
3655 	pf_resc->num_cp_rings = rte_le_to_cpu_16(resp->alloc_cmpl_rings);
3656 	pf_resc->num_rx_rings = rte_le_to_cpu_16(resp->alloc_rx_rings);
3657 	pf_resc->num_l2_ctxs = rte_le_to_cpu_16(resp->alloc_l2_ctx);
3658 	pf_resc->num_hw_ring_grps = rte_le_to_cpu_32(resp->alloc_hw_ring_grps);
3659 	bp->pf->evb_mode = resp->evb_mode;
3660 
3661 	HWRM_UNLOCK();
3662 
3663 	return rc;
3664 }
3665 
3666 static void
3667 bnxt_calculate_pf_resources(struct bnxt *bp,
3668 			    struct bnxt_pf_resource_info *pf_resc,
3669 			    int num_vfs)
3670 {
3671 	if (!num_vfs) {
3672 		pf_resc->num_rsscos_ctxs = bp->max_rsscos_ctx;
3673 		pf_resc->num_stat_ctxs = bp->max_stat_ctx;
3674 		pf_resc->num_cp_rings = bp->max_cp_rings;
3675 		pf_resc->num_tx_rings = bp->max_tx_rings;
3676 		pf_resc->num_rx_rings = bp->max_rx_rings;
3677 		pf_resc->num_l2_ctxs = bp->max_l2_ctx;
3678 		pf_resc->num_hw_ring_grps = bp->max_ring_grps;
3679 
3680 		return;
3681 	}
3682 
3683 	pf_resc->num_rsscos_ctxs = bp->max_rsscos_ctx / (num_vfs + 1) +
3684 				   bp->max_rsscos_ctx % (num_vfs + 1);
3685 	pf_resc->num_stat_ctxs = bp->max_stat_ctx / (num_vfs + 1) +
3686 				 bp->max_stat_ctx % (num_vfs + 1);
3687 	pf_resc->num_cp_rings = bp->max_cp_rings / (num_vfs + 1) +
3688 				bp->max_cp_rings % (num_vfs + 1);
3689 	pf_resc->num_tx_rings = bp->max_tx_rings / (num_vfs + 1) +
3690 				bp->max_tx_rings % (num_vfs + 1);
3691 	pf_resc->num_rx_rings = bp->max_rx_rings / (num_vfs + 1) +
3692 				bp->max_rx_rings % (num_vfs + 1);
3693 	pf_resc->num_l2_ctxs = bp->max_l2_ctx / (num_vfs + 1) +
3694 			       bp->max_l2_ctx % (num_vfs + 1);
3695 	pf_resc->num_hw_ring_grps = bp->max_ring_grps / (num_vfs + 1) +
3696 				    bp->max_ring_grps % (num_vfs + 1);
3697 }
3698 
3699 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
3700 {
3701 	struct bnxt_pf_resource_info pf_resc = { 0 };
3702 	int rc;
3703 
3704 	if (!BNXT_PF(bp)) {
3705 		PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
3706 		return -EINVAL;
3707 	}
3708 
3709 	rc = bnxt_hwrm_func_qcaps(bp);
3710 	if (rc)
3711 		return rc;
3712 
3713 	bnxt_calculate_pf_resources(bp, &pf_resc, 0);
3714 
3715 	bp->pf->func_cfg_flags &=
3716 		~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
3717 		  HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
3718 	bp->pf->func_cfg_flags |=
3719 		HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
3720 
3721 	rc = bnxt_hwrm_pf_func_cfg(bp, &pf_resc);
3722 	if (rc)
3723 		return rc;
3724 
3725 	rc = bnxt_update_max_resources_pf_only(bp);
3726 
3727 	return rc;
3728 }
3729 
3730 static int
3731 bnxt_configure_vf_req_buf(struct bnxt *bp, int num_vfs)
3732 {
3733 	size_t req_buf_sz, sz;
3734 	int i, rc;
3735 
3736 	req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
3737 	bp->pf->vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
3738 		page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
3739 	if (bp->pf->vf_req_buf == NULL) {
3740 		return -ENOMEM;
3741 	}
3742 
3743 	for (sz = 0; sz < req_buf_sz; sz += getpagesize())
3744 		rte_mem_lock_page(((char *)bp->pf->vf_req_buf) + sz);
3745 
3746 	for (i = 0; i < num_vfs; i++)
3747 		bp->pf->vf_info[i].req_buf = ((char *)bp->pf->vf_req_buf) +
3748 					     (i * HWRM_MAX_REQ_LEN);
3749 
3750 	rc = bnxt_hwrm_func_buf_rgtr(bp, num_vfs);
3751 	if (rc)
3752 		rte_free(bp->pf->vf_req_buf);
3753 
3754 	return rc;
3755 }
3756 
3757 static int
3758 bnxt_process_vf_resc_config_new(struct bnxt *bp, int num_vfs)
3759 {
3760 	struct hwrm_func_vf_resource_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3761 	struct hwrm_func_vf_resource_cfg_input req = {0};
3762 	int i, rc = 0;
3763 
3764 	bnxt_fill_vf_func_cfg_req_new(bp, &req, num_vfs);
3765 	bp->pf->active_vfs = 0;
3766 	for (i = 0; i < num_vfs; i++) {
3767 		HWRM_PREP(&req, HWRM_FUNC_VF_RESOURCE_CFG, BNXT_USE_CHIMP_MB);
3768 		req.vf_id = rte_cpu_to_le_16(bp->pf->vf_info[i].fid);
3769 		rc = bnxt_hwrm_send_message(bp,
3770 					    &req,
3771 					    sizeof(req),
3772 					    BNXT_USE_CHIMP_MB);
3773 		if (rc || resp->error_code) {
3774 			PMD_DRV_LOG(ERR,
3775 				"Failed to initialize VF %d\n", i);
3776 			PMD_DRV_LOG(ERR,
3777 				"Not all VFs available. (%d, %d)\n",
3778 				rc, resp->error_code);
3779 			HWRM_UNLOCK();
3780 
3781 			/* If the first VF configuration itself fails,
3782 			 * unregister the vf_fwd_request buffer.
3783 			 */
3784 			if (i == 0)
3785 				bnxt_hwrm_func_buf_unrgtr(bp);
3786 			break;
3787 		}
3788 		HWRM_UNLOCK();
3789 
3790 		/* Update the max resource values based on the resource values
3791 		 * allocated to the VF.
3792 		 */
3793 		bnxt_update_max_resources(bp, i);
3794 		bp->pf->active_vfs++;
3795 		bnxt_hwrm_func_clr_stats(bp, bp->pf->vf_info[i].fid);
3796 	}
3797 
3798 	return 0;
3799 }
3800 
3801 static int
3802 bnxt_process_vf_resc_config_old(struct bnxt *bp, int num_vfs)
3803 {
3804 	struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3805 	struct hwrm_func_cfg_input req = {0};
3806 	int i, rc;
3807 
3808 	bnxt_fill_vf_func_cfg_req_old(bp, &req, num_vfs);
3809 
3810 	bp->pf->active_vfs = 0;
3811 	for (i = 0; i < num_vfs; i++) {
3812 		HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3813 		req.flags = rte_cpu_to_le_32(bp->pf->vf_info[i].func_cfg_flags);
3814 		req.fid = rte_cpu_to_le_16(bp->pf->vf_info[i].fid);
3815 		rc = bnxt_hwrm_send_message(bp,
3816 					    &req,
3817 					    sizeof(req),
3818 					    BNXT_USE_CHIMP_MB);
3819 
3820 		/* Clear enable flag for next pass */
3821 		req.enables &= ~rte_cpu_to_le_32(
3822 				HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
3823 
3824 		if (rc || resp->error_code) {
3825 			PMD_DRV_LOG(ERR,
3826 				"Failed to initialize VF %d\n", i);
3827 			PMD_DRV_LOG(ERR,
3828 				"Not all VFs available. (%d, %d)\n",
3829 				rc, resp->error_code);
3830 			HWRM_UNLOCK();
3831 
3832 			/* If the first VF configuration itself fails,
3833 			 * unregister the vf_fwd_request buffer.
3834 			 */
3835 			if (i == 0)
3836 				bnxt_hwrm_func_buf_unrgtr(bp);
3837 			break;
3838 		}
3839 
3840 		HWRM_UNLOCK();
3841 
3842 		/* Update the max resource values based on the resource values
3843 		 * allocated to the VF.
3844 		 */
3845 		bnxt_update_max_resources(bp, i);
3846 		bp->pf->active_vfs++;
3847 		bnxt_hwrm_func_clr_stats(bp, bp->pf->vf_info[i].fid);
3848 	}
3849 
3850 	return 0;
3851 }
3852 
3853 static void
3854 bnxt_configure_vf_resources(struct bnxt *bp, int num_vfs)
3855 {
3856 	if (bp->flags & BNXT_FLAG_NEW_RM)
3857 		bnxt_process_vf_resc_config_new(bp, num_vfs);
3858 	else
3859 		bnxt_process_vf_resc_config_old(bp, num_vfs);
3860 }
3861 
3862 static void
3863 bnxt_update_pf_resources(struct bnxt *bp,
3864 			 struct bnxt_pf_resource_info *pf_resc)
3865 {
3866 	bp->max_rsscos_ctx = pf_resc->num_rsscos_ctxs;
3867 	bp->max_stat_ctx = pf_resc->num_stat_ctxs;
3868 	bp->max_cp_rings = pf_resc->num_cp_rings;
3869 	bp->max_tx_rings = pf_resc->num_tx_rings;
3870 	bp->max_rx_rings = pf_resc->num_rx_rings;
3871 	bp->max_ring_grps = pf_resc->num_hw_ring_grps;
3872 }
3873 
3874 static int32_t
3875 bnxt_configure_pf_resources(struct bnxt *bp,
3876 			    struct bnxt_pf_resource_info *pf_resc)
3877 {
3878 	/*
3879 	 * We're using STD_TX_RING_MODE here which will limit the TX
3880 	 * rings. This will allow QoS to function properly. Not setting this
3881 	 * will cause PF rings to break bandwidth settings.
3882 	 */
3883 	bp->pf->func_cfg_flags &=
3884 		~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
3885 		  HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
3886 	bp->pf->func_cfg_flags |=
3887 		HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
3888 	return bnxt_hwrm_pf_func_cfg(bp, pf_resc);
3889 }
3890 
3891 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
3892 {
3893 	struct bnxt_pf_resource_info pf_resc = { 0 };
3894 	int rc;
3895 
3896 	if (!BNXT_PF(bp)) {
3897 		PMD_DRV_LOG(ERR, "Attempt to allocate VFs on a VF!\n");
3898 		return -EINVAL;
3899 	}
3900 
3901 	rc = bnxt_hwrm_func_qcaps(bp);
3902 	if (rc)
3903 		return rc;
3904 
3905 	bnxt_calculate_pf_resources(bp, &pf_resc, num_vfs);
3906 
3907 	rc = bnxt_configure_pf_resources(bp, &pf_resc);
3908 	if (rc)
3909 		return rc;
3910 
3911 	rc = bnxt_query_pf_resources(bp, &pf_resc);
3912 	if (rc)
3913 		return rc;
3914 
3915 	/*
3916 	 * Now, create and register a buffer to hold forwarded VF requests
3917 	 */
3918 	rc = bnxt_configure_vf_req_buf(bp, num_vfs);
3919 	if (rc)
3920 		return rc;
3921 
3922 	bnxt_configure_vf_resources(bp, num_vfs);
3923 
3924 	bnxt_update_pf_resources(bp, &pf_resc);
3925 
3926 	return 0;
3927 }
3928 
3929 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
3930 {
3931 	struct hwrm_func_cfg_input req = {0};
3932 	struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3933 	int rc;
3934 
3935 	HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3936 
3937 	req.fid = rte_cpu_to_le_16(0xffff);
3938 	req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
3939 	req.evb_mode = bp->pf->evb_mode;
3940 
3941 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3942 	HWRM_CHECK_RESULT();
3943 	HWRM_UNLOCK();
3944 
3945 	return rc;
3946 }
3947 
3948 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
3949 				uint8_t tunnel_type)
3950 {
3951 	struct hwrm_tunnel_dst_port_alloc_input req = {0};
3952 	struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3953 	int rc = 0;
3954 
3955 	HWRM_PREP(&req, HWRM_TUNNEL_DST_PORT_ALLOC, BNXT_USE_CHIMP_MB);
3956 	req.tunnel_type = tunnel_type;
3957 	req.tunnel_dst_port_val = rte_cpu_to_be_16(port);
3958 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3959 	HWRM_CHECK_RESULT();
3960 
3961 	switch (tunnel_type) {
3962 	case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
3963 		bp->vxlan_fw_dst_port_id =
3964 			rte_le_to_cpu_16(resp->tunnel_dst_port_id);
3965 		bp->vxlan_port = port;
3966 		break;
3967 	case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
3968 		bp->geneve_fw_dst_port_id =
3969 			rte_le_to_cpu_16(resp->tunnel_dst_port_id);
3970 		bp->geneve_port = port;
3971 		break;
3972 	default:
3973 		break;
3974 	}
3975 
3976 	HWRM_UNLOCK();
3977 
3978 	return rc;
3979 }
3980 
3981 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
3982 				uint8_t tunnel_type)
3983 {
3984 	struct hwrm_tunnel_dst_port_free_input req = {0};
3985 	struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
3986 	int rc = 0;
3987 
3988 	HWRM_PREP(&req, HWRM_TUNNEL_DST_PORT_FREE, BNXT_USE_CHIMP_MB);
3989 
3990 	req.tunnel_type = tunnel_type;
3991 	req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
3992 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3993 
3994 	HWRM_CHECK_RESULT();
3995 	HWRM_UNLOCK();
3996 
3997 	if (tunnel_type ==
3998 	    HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN) {
3999 		bp->vxlan_port = 0;
4000 		bp->vxlan_port_cnt = 0;
4001 	}
4002 
4003 	if (tunnel_type ==
4004 	    HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE) {
4005 		bp->geneve_port = 0;
4006 		bp->geneve_port_cnt = 0;
4007 	}
4008 
4009 	return rc;
4010 }
4011 
4012 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
4013 					uint32_t flags)
4014 {
4015 	struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4016 	struct hwrm_func_cfg_input req = {0};
4017 	int rc;
4018 
4019 	HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
4020 
4021 	req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
4022 	req.flags = rte_cpu_to_le_32(flags);
4023 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4024 
4025 	HWRM_CHECK_RESULT();
4026 	HWRM_UNLOCK();
4027 
4028 	return rc;
4029 }
4030 
4031 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
4032 {
4033 	uint32_t *flag = flagp;
4034 
4035 	vnic->flags = *flag;
4036 }
4037 
4038 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
4039 {
4040 	return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
4041 }
4042 
4043 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp, int num_vfs)
4044 {
4045 	struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
4046 	struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
4047 	int rc;
4048 
4049 	HWRM_PREP(&req, HWRM_FUNC_BUF_RGTR, BNXT_USE_CHIMP_MB);
4050 
4051 	req.req_buf_num_pages = rte_cpu_to_le_16(1);
4052 	req.req_buf_page_size =
4053 		rte_cpu_to_le_16(page_getenum(num_vfs * HWRM_MAX_REQ_LEN));
4054 	req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
4055 	req.req_buf_page_addr0 =
4056 		rte_cpu_to_le_64(rte_malloc_virt2iova(bp->pf->vf_req_buf));
4057 	if (req.req_buf_page_addr0 == RTE_BAD_IOVA) {
4058 		PMD_DRV_LOG(ERR,
4059 			"unable to map buffer address to physical memory\n");
4060 		HWRM_UNLOCK();
4061 		return -ENOMEM;
4062 	}
4063 
4064 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4065 
4066 	HWRM_CHECK_RESULT();
4067 	HWRM_UNLOCK();
4068 
4069 	return rc;
4070 }
4071 
4072 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
4073 {
4074 	int rc = 0;
4075 	struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
4076 	struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
4077 
4078 	if (!(BNXT_PF(bp) && bp->pdev->max_vfs))
4079 		return 0;
4080 
4081 	HWRM_PREP(&req, HWRM_FUNC_BUF_UNRGTR, BNXT_USE_CHIMP_MB);
4082 
4083 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4084 
4085 	HWRM_CHECK_RESULT();
4086 	HWRM_UNLOCK();
4087 
4088 	return rc;
4089 }
4090 
4091 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
4092 {
4093 	struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4094 	struct hwrm_func_cfg_input req = {0};
4095 	int rc;
4096 
4097 	HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
4098 
4099 	req.fid = rte_cpu_to_le_16(0xffff);
4100 	req.flags = rte_cpu_to_le_32(bp->pf->func_cfg_flags);
4101 	req.enables = rte_cpu_to_le_32(
4102 			HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
4103 	req.async_event_cr = rte_cpu_to_le_16(
4104 			bp->async_cp_ring->cp_ring_struct->fw_ring_id);
4105 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4106 
4107 	HWRM_CHECK_RESULT();
4108 	HWRM_UNLOCK();
4109 
4110 	return rc;
4111 }
4112 
4113 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
4114 {
4115 	struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4116 	struct hwrm_func_vf_cfg_input req = {0};
4117 	int rc;
4118 
4119 	HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
4120 
4121 	req.enables = rte_cpu_to_le_32(
4122 			HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
4123 	req.async_event_cr = rte_cpu_to_le_16(
4124 			bp->async_cp_ring->cp_ring_struct->fw_ring_id);
4125 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4126 
4127 	HWRM_CHECK_RESULT();
4128 	HWRM_UNLOCK();
4129 
4130 	return rc;
4131 }
4132 
4133 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
4134 {
4135 	struct hwrm_func_cfg_input req = {0};
4136 	struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4137 	uint16_t dflt_vlan, fid;
4138 	uint32_t func_cfg_flags;
4139 	int rc = 0;
4140 
4141 	HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
4142 
4143 	if (is_vf) {
4144 		dflt_vlan = bp->pf->vf_info[vf].dflt_vlan;
4145 		fid = bp->pf->vf_info[vf].fid;
4146 		func_cfg_flags = bp->pf->vf_info[vf].func_cfg_flags;
4147 	} else {
4148 		fid = rte_cpu_to_le_16(0xffff);
4149 		func_cfg_flags = bp->pf->func_cfg_flags;
4150 		dflt_vlan = bp->vlan;
4151 	}
4152 
4153 	req.flags = rte_cpu_to_le_32(func_cfg_flags);
4154 	req.fid = rte_cpu_to_le_16(fid);
4155 	req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
4156 	req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
4157 
4158 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4159 
4160 	HWRM_CHECK_RESULT();
4161 	HWRM_UNLOCK();
4162 
4163 	return rc;
4164 }
4165 
4166 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
4167 			uint16_t max_bw, uint16_t enables)
4168 {
4169 	struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4170 	struct hwrm_func_cfg_input req = {0};
4171 	int rc;
4172 
4173 	HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
4174 
4175 	req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
4176 	req.enables |= rte_cpu_to_le_32(enables);
4177 	req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags);
4178 	req.max_bw = rte_cpu_to_le_32(max_bw);
4179 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4180 
4181 	HWRM_CHECK_RESULT();
4182 	HWRM_UNLOCK();
4183 
4184 	return rc;
4185 }
4186 
4187 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
4188 {
4189 	struct hwrm_func_cfg_input req = {0};
4190 	struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4191 	int rc = 0;
4192 
4193 	HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
4194 
4195 	req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags);
4196 	req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
4197 	req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
4198 	req.dflt_vlan = rte_cpu_to_le_16(bp->pf->vf_info[vf].dflt_vlan);
4199 
4200 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4201 
4202 	HWRM_CHECK_RESULT();
4203 	HWRM_UNLOCK();
4204 
4205 	return rc;
4206 }
4207 
4208 int bnxt_hwrm_set_async_event_cr(struct bnxt *bp)
4209 {
4210 	int rc;
4211 
4212 	if (BNXT_PF(bp))
4213 		rc = bnxt_hwrm_func_cfg_def_cp(bp);
4214 	else
4215 		rc = bnxt_hwrm_vf_func_cfg_def_cp(bp);
4216 
4217 	return rc;
4218 }
4219 
4220 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
4221 			      void *encaped, size_t ec_size)
4222 {
4223 	int rc = 0;
4224 	struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
4225 	struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
4226 
4227 	if (ec_size > sizeof(req.encap_request))
4228 		return -1;
4229 
4230 	HWRM_PREP(&req, HWRM_REJECT_FWD_RESP, BNXT_USE_CHIMP_MB);
4231 
4232 	req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
4233 	memcpy(req.encap_request, encaped, ec_size);
4234 
4235 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4236 
4237 	HWRM_CHECK_RESULT();
4238 	HWRM_UNLOCK();
4239 
4240 	return rc;
4241 }
4242 
4243 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
4244 				       struct rte_ether_addr *mac)
4245 {
4246 	struct hwrm_func_qcfg_input req = {0};
4247 	struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
4248 	int rc;
4249 
4250 	HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
4251 
4252 	req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
4253 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4254 
4255 	HWRM_CHECK_RESULT();
4256 
4257 	memcpy(mac->addr_bytes, resp->mac_address, RTE_ETHER_ADDR_LEN);
4258 
4259 	HWRM_UNLOCK();
4260 
4261 	return rc;
4262 }
4263 
4264 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
4265 			    void *encaped, size_t ec_size)
4266 {
4267 	int rc = 0;
4268 	struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
4269 	struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
4270 
4271 	if (ec_size > sizeof(req.encap_request))
4272 		return -1;
4273 
4274 	HWRM_PREP(&req, HWRM_EXEC_FWD_RESP, BNXT_USE_CHIMP_MB);
4275 
4276 	req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
4277 	memcpy(req.encap_request, encaped, ec_size);
4278 
4279 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4280 
4281 	HWRM_CHECK_RESULT();
4282 	HWRM_UNLOCK();
4283 
4284 	return rc;
4285 }
4286 
4287 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
4288 			 struct rte_eth_stats *stats, uint8_t rx)
4289 {
4290 	int rc = 0;
4291 	struct hwrm_stat_ctx_query_input req = {.req_type = 0};
4292 	struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
4293 
4294 	HWRM_PREP(&req, HWRM_STAT_CTX_QUERY, BNXT_USE_CHIMP_MB);
4295 
4296 	req.stat_ctx_id = rte_cpu_to_le_32(cid);
4297 
4298 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4299 
4300 	HWRM_CHECK_RESULT();
4301 
4302 	if (rx) {
4303 		stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
4304 		stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
4305 		stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
4306 		stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
4307 		stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
4308 		stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
4309 		stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_discard_pkts);
4310 		stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_error_pkts);
4311 	} else {
4312 		stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
4313 		stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
4314 		stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
4315 		stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
4316 		stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
4317 		stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
4318 	}
4319 
4320 	HWRM_UNLOCK();
4321 
4322 	return rc;
4323 }
4324 
4325 int bnxt_hwrm_port_qstats(struct bnxt *bp)
4326 {
4327 	struct hwrm_port_qstats_input req = {0};
4328 	struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
4329 	struct bnxt_pf_info *pf = bp->pf;
4330 	int rc;
4331 
4332 	HWRM_PREP(&req, HWRM_PORT_QSTATS, BNXT_USE_CHIMP_MB);
4333 
4334 	req.port_id = rte_cpu_to_le_16(pf->port_id);
4335 	req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
4336 	req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
4337 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4338 
4339 	HWRM_CHECK_RESULT();
4340 	HWRM_UNLOCK();
4341 
4342 	return rc;
4343 }
4344 
4345 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
4346 {
4347 	struct hwrm_port_clr_stats_input req = {0};
4348 	struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
4349 	struct bnxt_pf_info *pf = bp->pf;
4350 	int rc;
4351 
4352 	/* Not allowed on NS2 device, NPAR, MultiHost, VF */
4353 	if (!(bp->flags & BNXT_FLAG_PORT_STATS) || BNXT_VF(bp) ||
4354 	    BNXT_NPAR(bp) || BNXT_MH(bp) || BNXT_TOTAL_VFS(bp))
4355 		return 0;
4356 
4357 	HWRM_PREP(&req, HWRM_PORT_CLR_STATS, BNXT_USE_CHIMP_MB);
4358 
4359 	req.port_id = rte_cpu_to_le_16(pf->port_id);
4360 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4361 
4362 	HWRM_CHECK_RESULT();
4363 	HWRM_UNLOCK();
4364 
4365 	return rc;
4366 }
4367 
4368 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
4369 {
4370 	struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4371 	struct hwrm_port_led_qcaps_input req = {0};
4372 	int rc;
4373 
4374 	if (BNXT_VF(bp))
4375 		return 0;
4376 
4377 	HWRM_PREP(&req, HWRM_PORT_LED_QCAPS, BNXT_USE_CHIMP_MB);
4378 	req.port_id = bp->pf->port_id;
4379 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4380 
4381 	HWRM_CHECK_RESULT_SILENT();
4382 
4383 	if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
4384 		unsigned int i;
4385 
4386 		bp->leds->num_leds = resp->num_leds;
4387 		memcpy(bp->leds, &resp->led0_id,
4388 			sizeof(bp->leds[0]) * bp->leds->num_leds);
4389 		for (i = 0; i < bp->leds->num_leds; i++) {
4390 			struct bnxt_led_info *led = &bp->leds[i];
4391 
4392 			uint16_t caps = led->led_state_caps;
4393 
4394 			if (!led->led_group_id ||
4395 				!BNXT_LED_ALT_BLINK_CAP(caps)) {
4396 				bp->leds->num_leds = 0;
4397 				break;
4398 			}
4399 		}
4400 	}
4401 
4402 	HWRM_UNLOCK();
4403 
4404 	return rc;
4405 }
4406 
4407 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
4408 {
4409 	struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4410 	struct hwrm_port_led_cfg_input req = {0};
4411 	struct bnxt_led_cfg *led_cfg;
4412 	uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
4413 	uint16_t duration = 0;
4414 	int rc, i;
4415 
4416 	if (!bp->leds->num_leds || BNXT_VF(bp))
4417 		return -EOPNOTSUPP;
4418 
4419 	HWRM_PREP(&req, HWRM_PORT_LED_CFG, BNXT_USE_CHIMP_MB);
4420 
4421 	if (led_on) {
4422 		led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
4423 		duration = rte_cpu_to_le_16(500);
4424 	}
4425 	req.port_id = bp->pf->port_id;
4426 	req.num_leds = bp->leds->num_leds;
4427 	led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
4428 	for (i = 0; i < bp->leds->num_leds; i++, led_cfg++) {
4429 		req.enables |= BNXT_LED_DFLT_ENABLES(i);
4430 		led_cfg->led_id = bp->leds[i].led_id;
4431 		led_cfg->led_state = led_state;
4432 		led_cfg->led_blink_on = duration;
4433 		led_cfg->led_blink_off = duration;
4434 		led_cfg->led_group_id = bp->leds[i].led_group_id;
4435 	}
4436 
4437 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4438 
4439 	HWRM_CHECK_RESULT();
4440 	HWRM_UNLOCK();
4441 
4442 	return rc;
4443 }
4444 
4445 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
4446 			       uint32_t *length)
4447 {
4448 	int rc;
4449 	struct hwrm_nvm_get_dir_info_input req = {0};
4450 	struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
4451 
4452 	HWRM_PREP(&req, HWRM_NVM_GET_DIR_INFO, BNXT_USE_CHIMP_MB);
4453 
4454 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4455 
4456 	HWRM_CHECK_RESULT();
4457 
4458 	*entries = rte_le_to_cpu_32(resp->entries);
4459 	*length = rte_le_to_cpu_32(resp->entry_length);
4460 
4461 	HWRM_UNLOCK();
4462 	return rc;
4463 }
4464 
4465 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
4466 {
4467 	int rc;
4468 	uint32_t dir_entries;
4469 	uint32_t entry_length;
4470 	uint8_t *buf;
4471 	size_t buflen;
4472 	rte_iova_t dma_handle;
4473 	struct hwrm_nvm_get_dir_entries_input req = {0};
4474 	struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
4475 
4476 	rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
4477 	if (rc != 0)
4478 		return rc;
4479 
4480 	*data++ = dir_entries;
4481 	*data++ = entry_length;
4482 	len -= 2;
4483 	memset(data, 0xff, len);
4484 
4485 	buflen = dir_entries * entry_length;
4486 	buf = rte_malloc("nvm_dir", buflen, 0);
4487 	if (buf == NULL)
4488 		return -ENOMEM;
4489 	dma_handle = rte_malloc_virt2iova(buf);
4490 	if (dma_handle == RTE_BAD_IOVA) {
4491 		rte_free(buf);
4492 		PMD_DRV_LOG(ERR,
4493 			"unable to map response address to physical memory\n");
4494 		return -ENOMEM;
4495 	}
4496 	HWRM_PREP(&req, HWRM_NVM_GET_DIR_ENTRIES, BNXT_USE_CHIMP_MB);
4497 	req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
4498 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4499 
4500 	if (rc == 0)
4501 		memcpy(data, buf, len > buflen ? buflen : len);
4502 
4503 	rte_free(buf);
4504 	HWRM_CHECK_RESULT();
4505 	HWRM_UNLOCK();
4506 
4507 	return rc;
4508 }
4509 
4510 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
4511 			     uint32_t offset, uint32_t length,
4512 			     uint8_t *data)
4513 {
4514 	int rc;
4515 	uint8_t *buf;
4516 	rte_iova_t dma_handle;
4517 	struct hwrm_nvm_read_input req = {0};
4518 	struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
4519 
4520 	buf = rte_malloc("nvm_item", length, 0);
4521 	if (!buf)
4522 		return -ENOMEM;
4523 
4524 	dma_handle = rte_malloc_virt2iova(buf);
4525 	if (dma_handle == RTE_BAD_IOVA) {
4526 		rte_free(buf);
4527 		PMD_DRV_LOG(ERR,
4528 			"unable to map response address to physical memory\n");
4529 		return -ENOMEM;
4530 	}
4531 	HWRM_PREP(&req, HWRM_NVM_READ, BNXT_USE_CHIMP_MB);
4532 	req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
4533 	req.dir_idx = rte_cpu_to_le_16(index);
4534 	req.offset = rte_cpu_to_le_32(offset);
4535 	req.len = rte_cpu_to_le_32(length);
4536 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4537 	if (rc == 0)
4538 		memcpy(data, buf, length);
4539 
4540 	rte_free(buf);
4541 	HWRM_CHECK_RESULT();
4542 	HWRM_UNLOCK();
4543 
4544 	return rc;
4545 }
4546 
4547 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
4548 {
4549 	int rc;
4550 	struct hwrm_nvm_erase_dir_entry_input req = {0};
4551 	struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
4552 
4553 	HWRM_PREP(&req, HWRM_NVM_ERASE_DIR_ENTRY, BNXT_USE_CHIMP_MB);
4554 	req.dir_idx = rte_cpu_to_le_16(index);
4555 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4556 	HWRM_CHECK_RESULT();
4557 	HWRM_UNLOCK();
4558 
4559 	return rc;
4560 }
4561 
4562 
4563 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
4564 			  uint16_t dir_ordinal, uint16_t dir_ext,
4565 			  uint16_t dir_attr, const uint8_t *data,
4566 			  size_t data_len)
4567 {
4568 	int rc;
4569 	struct hwrm_nvm_write_input req = {0};
4570 	struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
4571 	rte_iova_t dma_handle;
4572 	uint8_t *buf;
4573 
4574 	buf = rte_malloc("nvm_write", data_len, 0);
4575 	if (!buf)
4576 		return -ENOMEM;
4577 
4578 	dma_handle = rte_malloc_virt2iova(buf);
4579 	if (dma_handle == RTE_BAD_IOVA) {
4580 		rte_free(buf);
4581 		PMD_DRV_LOG(ERR,
4582 			"unable to map response address to physical memory\n");
4583 		return -ENOMEM;
4584 	}
4585 	memcpy(buf, data, data_len);
4586 
4587 	HWRM_PREP(&req, HWRM_NVM_WRITE, BNXT_USE_CHIMP_MB);
4588 
4589 	req.dir_type = rte_cpu_to_le_16(dir_type);
4590 	req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
4591 	req.dir_ext = rte_cpu_to_le_16(dir_ext);
4592 	req.dir_attr = rte_cpu_to_le_16(dir_attr);
4593 	req.dir_data_length = rte_cpu_to_le_32(data_len);
4594 	req.host_src_addr = rte_cpu_to_le_64(dma_handle);
4595 
4596 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4597 
4598 	rte_free(buf);
4599 	HWRM_CHECK_RESULT();
4600 	HWRM_UNLOCK();
4601 
4602 	return rc;
4603 }
4604 
4605 static void
4606 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
4607 {
4608 	uint32_t *count = cbdata;
4609 
4610 	*count = *count + 1;
4611 }
4612 
4613 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
4614 				     struct bnxt_vnic_info *vnic __rte_unused)
4615 {
4616 	return 0;
4617 }
4618 
4619 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
4620 {
4621 	uint32_t count = 0;
4622 
4623 	bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
4624 	    &count, bnxt_vnic_count_hwrm_stub);
4625 
4626 	return count;
4627 }
4628 
4629 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
4630 					uint16_t *vnic_ids)
4631 {
4632 	struct hwrm_func_vf_vnic_ids_query_input req = {0};
4633 	struct hwrm_func_vf_vnic_ids_query_output *resp =
4634 						bp->hwrm_cmd_resp_addr;
4635 	int rc;
4636 
4637 	/* First query all VNIC ids */
4638 	HWRM_PREP(&req, HWRM_FUNC_VF_VNIC_IDS_QUERY, BNXT_USE_CHIMP_MB);
4639 
4640 	req.vf_id = rte_cpu_to_le_16(bp->pf->first_vf_id + vf);
4641 	req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf->total_vnics);
4642 	req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_malloc_virt2iova(vnic_ids));
4643 
4644 	if (req.vnic_id_tbl_addr == RTE_BAD_IOVA) {
4645 		HWRM_UNLOCK();
4646 		PMD_DRV_LOG(ERR,
4647 		"unable to map VNIC ID table address to physical memory\n");
4648 		return -ENOMEM;
4649 	}
4650 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4651 	HWRM_CHECK_RESULT();
4652 	rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
4653 
4654 	HWRM_UNLOCK();
4655 
4656 	return rc;
4657 }
4658 
4659 /*
4660  * This function queries the VNIC IDs  for a specified VF. It then calls
4661  * the vnic_cb to update the necessary field in vnic_info with cbdata.
4662  * Then it calls the hwrm_cb function to program this new vnic configuration.
4663  */
4664 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
4665 	void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
4666 	int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
4667 {
4668 	struct bnxt_vnic_info vnic;
4669 	int rc = 0;
4670 	int i, num_vnic_ids;
4671 	uint16_t *vnic_ids;
4672 	size_t vnic_id_sz;
4673 	size_t sz;
4674 
4675 	/* First query all VNIC ids */
4676 	vnic_id_sz = bp->pf->total_vnics * sizeof(*vnic_ids);
4677 	vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
4678 			RTE_CACHE_LINE_SIZE);
4679 	if (vnic_ids == NULL)
4680 		return -ENOMEM;
4681 
4682 	for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
4683 		rte_mem_lock_page(((char *)vnic_ids) + sz);
4684 
4685 	num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
4686 
4687 	if (num_vnic_ids < 0)
4688 		return num_vnic_ids;
4689 
4690 	/* Retrieve VNIC, update bd_stall then update */
4691 
4692 	for (i = 0; i < num_vnic_ids; i++) {
4693 		memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
4694 		vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
4695 		rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf->first_vf_id + vf);
4696 		if (rc)
4697 			break;
4698 		if (vnic.mru <= 4)	/* Indicates unallocated */
4699 			continue;
4700 
4701 		vnic_cb(&vnic, cbdata);
4702 
4703 		rc = hwrm_cb(bp, &vnic);
4704 		if (rc)
4705 			break;
4706 	}
4707 
4708 	rte_free(vnic_ids);
4709 
4710 	return rc;
4711 }
4712 
4713 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
4714 					      bool on)
4715 {
4716 	struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4717 	struct hwrm_func_cfg_input req = {0};
4718 	int rc;
4719 
4720 	HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
4721 
4722 	req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
4723 	req.enables |= rte_cpu_to_le_32(
4724 			HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
4725 	req.vlan_antispoof_mode = on ?
4726 		HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
4727 		HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
4728 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4729 
4730 	HWRM_CHECK_RESULT();
4731 	HWRM_UNLOCK();
4732 
4733 	return rc;
4734 }
4735 
4736 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
4737 {
4738 	struct bnxt_vnic_info vnic;
4739 	uint16_t *vnic_ids;
4740 	size_t vnic_id_sz;
4741 	int num_vnic_ids, i;
4742 	size_t sz;
4743 	int rc;
4744 
4745 	vnic_id_sz = bp->pf->total_vnics * sizeof(*vnic_ids);
4746 	vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
4747 			RTE_CACHE_LINE_SIZE);
4748 	if (vnic_ids == NULL)
4749 		return -ENOMEM;
4750 
4751 	for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
4752 		rte_mem_lock_page(((char *)vnic_ids) + sz);
4753 
4754 	rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
4755 	if (rc <= 0)
4756 		goto exit;
4757 	num_vnic_ids = rc;
4758 
4759 	/*
4760 	 * Loop through to find the default VNIC ID.
4761 	 * TODO: The easier way would be to obtain the resp->dflt_vnic_id
4762 	 * by sending the hwrm_func_qcfg command to the firmware.
4763 	 */
4764 	for (i = 0; i < num_vnic_ids; i++) {
4765 		memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
4766 		vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
4767 		rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
4768 					bp->pf->first_vf_id + vf);
4769 		if (rc)
4770 			goto exit;
4771 		if (vnic.func_default) {
4772 			rte_free(vnic_ids);
4773 			return vnic.fw_vnic_id;
4774 		}
4775 	}
4776 	/* Could not find a default VNIC. */
4777 	PMD_DRV_LOG(ERR, "No default VNIC\n");
4778 exit:
4779 	rte_free(vnic_ids);
4780 	return rc;
4781 }
4782 
4783 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
4784 			 uint16_t dst_id,
4785 			 struct bnxt_filter_info *filter)
4786 {
4787 	int rc = 0;
4788 	struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
4789 	struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4790 	uint32_t enables = 0;
4791 
4792 	if (filter->fw_em_filter_id != UINT64_MAX)
4793 		bnxt_hwrm_clear_em_filter(bp, filter);
4794 
4795 	HWRM_PREP(&req, HWRM_CFA_EM_FLOW_ALLOC, BNXT_USE_KONG(bp));
4796 
4797 	req.flags = rte_cpu_to_le_32(filter->flags);
4798 
4799 	enables = filter->enables |
4800 	      HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
4801 	req.dst_id = rte_cpu_to_le_16(dst_id);
4802 
4803 	if (filter->ip_addr_type) {
4804 		req.ip_addr_type = filter->ip_addr_type;
4805 		enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
4806 	}
4807 	if (enables &
4808 	    HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
4809 		req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
4810 	if (enables &
4811 	    HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
4812 		memcpy(req.src_macaddr, filter->src_macaddr,
4813 		       RTE_ETHER_ADDR_LEN);
4814 	if (enables &
4815 	    HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
4816 		memcpy(req.dst_macaddr, filter->dst_macaddr,
4817 		       RTE_ETHER_ADDR_LEN);
4818 	if (enables &
4819 	    HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
4820 		req.ovlan_vid = filter->l2_ovlan;
4821 	if (enables &
4822 	    HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
4823 		req.ivlan_vid = filter->l2_ivlan;
4824 	if (enables &
4825 	    HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
4826 		req.ethertype = rte_cpu_to_be_16(filter->ethertype);
4827 	if (enables &
4828 	    HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
4829 		req.ip_protocol = filter->ip_protocol;
4830 	if (enables &
4831 	    HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
4832 		req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
4833 	if (enables &
4834 	    HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
4835 		req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
4836 	if (enables &
4837 	    HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
4838 		req.src_port = rte_cpu_to_be_16(filter->src_port);
4839 	if (enables &
4840 	    HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
4841 		req.dst_port = rte_cpu_to_be_16(filter->dst_port);
4842 	if (enables &
4843 	    HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
4844 		req.mirror_vnic_id = filter->mirror_vnic_id;
4845 
4846 	req.enables = rte_cpu_to_le_32(enables);
4847 
4848 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
4849 
4850 	HWRM_CHECK_RESULT();
4851 
4852 	filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
4853 	HWRM_UNLOCK();
4854 
4855 	return rc;
4856 }
4857 
4858 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
4859 {
4860 	int rc = 0;
4861 	struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
4862 	struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
4863 
4864 	if (filter->fw_em_filter_id == UINT64_MAX)
4865 		return 0;
4866 
4867 	HWRM_PREP(&req, HWRM_CFA_EM_FLOW_FREE, BNXT_USE_KONG(bp));
4868 
4869 	req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
4870 
4871 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
4872 
4873 	HWRM_CHECK_RESULT();
4874 	HWRM_UNLOCK();
4875 
4876 	filter->fw_em_filter_id = UINT64_MAX;
4877 	filter->fw_l2_filter_id = UINT64_MAX;
4878 
4879 	return 0;
4880 }
4881 
4882 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
4883 			 uint16_t dst_id,
4884 			 struct bnxt_filter_info *filter)
4885 {
4886 	int rc = 0;
4887 	struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
4888 	struct hwrm_cfa_ntuple_filter_alloc_output *resp =
4889 						bp->hwrm_cmd_resp_addr;
4890 	uint32_t enables = 0;
4891 
4892 	if (filter->fw_ntuple_filter_id != UINT64_MAX)
4893 		bnxt_hwrm_clear_ntuple_filter(bp, filter);
4894 
4895 	HWRM_PREP(&req, HWRM_CFA_NTUPLE_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
4896 
4897 	req.flags = rte_cpu_to_le_32(filter->flags);
4898 
4899 	enables = filter->enables |
4900 	      HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
4901 	req.dst_id = rte_cpu_to_le_16(dst_id);
4902 
4903 	if (filter->ip_addr_type) {
4904 		req.ip_addr_type = filter->ip_addr_type;
4905 		enables |=
4906 			HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
4907 	}
4908 	if (enables &
4909 	    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
4910 		req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
4911 	if (enables &
4912 	    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
4913 		memcpy(req.src_macaddr, filter->src_macaddr,
4914 		       RTE_ETHER_ADDR_LEN);
4915 	if (enables &
4916 	    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
4917 		req.ethertype = rte_cpu_to_be_16(filter->ethertype);
4918 	if (enables &
4919 	    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
4920 		req.ip_protocol = filter->ip_protocol;
4921 	if (enables &
4922 	    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
4923 		req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
4924 	if (enables &
4925 	    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
4926 		req.src_ipaddr_mask[0] =
4927 			rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
4928 	if (enables &
4929 	    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
4930 		req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
4931 	if (enables &
4932 	    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
4933 		req.dst_ipaddr_mask[0] =
4934 			rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
4935 	if (enables &
4936 	    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
4937 		req.src_port = rte_cpu_to_le_16(filter->src_port);
4938 	if (enables &
4939 	    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
4940 		req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
4941 	if (enables &
4942 	    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
4943 		req.dst_port = rte_cpu_to_le_16(filter->dst_port);
4944 	if (enables &
4945 	    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
4946 		req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
4947 	if (enables &
4948 	    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
4949 		req.mirror_vnic_id = filter->mirror_vnic_id;
4950 
4951 	req.enables = rte_cpu_to_le_32(enables);
4952 
4953 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4954 
4955 	HWRM_CHECK_RESULT();
4956 
4957 	filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
4958 	filter->flow_id = rte_le_to_cpu_32(resp->flow_id);
4959 	HWRM_UNLOCK();
4960 
4961 	return rc;
4962 }
4963 
4964 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
4965 				struct bnxt_filter_info *filter)
4966 {
4967 	int rc = 0;
4968 	struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
4969 	struct hwrm_cfa_ntuple_filter_free_output *resp =
4970 						bp->hwrm_cmd_resp_addr;
4971 
4972 	if (filter->fw_ntuple_filter_id == UINT64_MAX)
4973 		return 0;
4974 
4975 	HWRM_PREP(&req, HWRM_CFA_NTUPLE_FILTER_FREE, BNXT_USE_CHIMP_MB);
4976 
4977 	req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
4978 
4979 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4980 
4981 	HWRM_CHECK_RESULT();
4982 	HWRM_UNLOCK();
4983 
4984 	filter->fw_ntuple_filter_id = UINT64_MAX;
4985 
4986 	return 0;
4987 }
4988 
4989 static int
4990 bnxt_vnic_rss_configure_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
4991 {
4992 	struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4993 	uint8_t *rx_queue_state = bp->eth_dev->data->rx_queue_state;
4994 	struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
4995 	struct bnxt_rx_queue **rxqs = bp->rx_queues;
4996 	uint16_t *ring_tbl = vnic->rss_table;
4997 	int nr_ctxs = vnic->num_lb_ctxts;
4998 	int max_rings = bp->rx_nr_rings;
4999 	int i, j, k, cnt;
5000 	int rc = 0;
5001 
5002 	for (i = 0, k = 0; i < nr_ctxs; i++) {
5003 		struct bnxt_rx_ring_info *rxr;
5004 		struct bnxt_cp_ring_info *cpr;
5005 
5006 		HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
5007 
5008 		req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
5009 		req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
5010 		req.hash_mode_flags = vnic->hash_mode;
5011 
5012 		req.ring_grp_tbl_addr =
5013 		    rte_cpu_to_le_64(vnic->rss_table_dma_addr +
5014 				     i * BNXT_RSS_ENTRIES_PER_CTX_P5 *
5015 				     2 * sizeof(*ring_tbl));
5016 		req.hash_key_tbl_addr =
5017 		    rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
5018 
5019 		req.ring_table_pair_index = i;
5020 		req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
5021 
5022 		for (j = 0; j < 64; j++) {
5023 			uint16_t ring_id;
5024 
5025 			/* Find next active ring. */
5026 			for (cnt = 0; cnt < max_rings; cnt++) {
5027 				if (rx_queue_state[k] !=
5028 						RTE_ETH_QUEUE_STATE_STOPPED)
5029 					break;
5030 				if (++k == max_rings)
5031 					k = 0;
5032 			}
5033 
5034 			/* Return if no rings are active. */
5035 			if (cnt == max_rings) {
5036 				HWRM_UNLOCK();
5037 				return 0;
5038 			}
5039 
5040 			/* Add rx/cp ring pair to RSS table. */
5041 			rxr = rxqs[k]->rx_ring;
5042 			cpr = rxqs[k]->cp_ring;
5043 
5044 			ring_id = rxr->rx_ring_struct->fw_ring_id;
5045 			*ring_tbl++ = rte_cpu_to_le_16(ring_id);
5046 			ring_id = cpr->cp_ring_struct->fw_ring_id;
5047 			*ring_tbl++ = rte_cpu_to_le_16(ring_id);
5048 
5049 			if (++k == max_rings)
5050 				k = 0;
5051 		}
5052 		rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
5053 					    BNXT_USE_CHIMP_MB);
5054 
5055 		HWRM_CHECK_RESULT();
5056 		HWRM_UNLOCK();
5057 	}
5058 
5059 	return rc;
5060 }
5061 
5062 int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5063 {
5064 	unsigned int rss_idx, fw_idx, i;
5065 
5066 	if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
5067 		return 0;
5068 
5069 	if (!(vnic->rss_table && vnic->hash_type))
5070 		return 0;
5071 
5072 	if (BNXT_CHIP_P5(bp))
5073 		return bnxt_vnic_rss_configure_p5(bp, vnic);
5074 
5075 	/*
5076 	 * Fill the RSS hash & redirection table with
5077 	 * ring group ids for all VNICs
5078 	 */
5079 	for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
5080 	     rss_idx++, fw_idx++) {
5081 		for (i = 0; i < bp->rx_cp_nr_rings; i++) {
5082 			fw_idx %= bp->rx_cp_nr_rings;
5083 			if (vnic->fw_grp_ids[fw_idx] != INVALID_HW_RING_ID)
5084 				break;
5085 			fw_idx++;
5086 		}
5087 
5088 		if (i == bp->rx_cp_nr_rings)
5089 			return 0;
5090 
5091 		vnic->rss_table[rss_idx] = vnic->fw_grp_ids[fw_idx];
5092 	}
5093 
5094 	return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
5095 }
5096 
5097 static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
5098 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
5099 {
5100 	uint16_t flags;
5101 
5102 	req->num_cmpl_aggr_int = rte_cpu_to_le_16(hw_coal->num_cmpl_aggr_int);
5103 
5104 	/* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
5105 	req->num_cmpl_dma_aggr = rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr);
5106 
5107 	/* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
5108 	req->num_cmpl_dma_aggr_during_int =
5109 		rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr_during_int);
5110 
5111 	req->int_lat_tmr_max = rte_cpu_to_le_16(hw_coal->int_lat_tmr_max);
5112 
5113 	/* min timer set to 1/2 of interrupt timer */
5114 	req->int_lat_tmr_min = rte_cpu_to_le_16(hw_coal->int_lat_tmr_min);
5115 
5116 	/* buf timer set to 1/4 of interrupt timer */
5117 	req->cmpl_aggr_dma_tmr = rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr);
5118 
5119 	req->cmpl_aggr_dma_tmr_during_int =
5120 		rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr_during_int);
5121 
5122 	flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
5123 		HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
5124 	req->flags = rte_cpu_to_le_16(flags);
5125 }
5126 
5127 static int bnxt_hwrm_set_coal_params_p5(struct bnxt *bp,
5128 		struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *agg_req)
5129 {
5130 	struct hwrm_ring_aggint_qcaps_input req = {0};
5131 	struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5132 	uint32_t enables;
5133 	uint16_t flags;
5134 	int rc;
5135 
5136 	HWRM_PREP(&req, HWRM_RING_AGGINT_QCAPS, BNXT_USE_CHIMP_MB);
5137 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5138 	HWRM_CHECK_RESULT();
5139 
5140 	agg_req->num_cmpl_dma_aggr = resp->num_cmpl_dma_aggr_max;
5141 	agg_req->cmpl_aggr_dma_tmr = resp->cmpl_aggr_dma_tmr_min;
5142 
5143 	flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
5144 		HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
5145 	agg_req->flags = rte_cpu_to_le_16(flags);
5146 	enables =
5147 	 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_CMPL_AGGR_DMA_TMR |
5148 	 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_NUM_CMPL_DMA_AGGR;
5149 	agg_req->enables = rte_cpu_to_le_32(enables);
5150 
5151 	HWRM_UNLOCK();
5152 	return rc;
5153 }
5154 
5155 int bnxt_hwrm_set_ring_coal(struct bnxt *bp,
5156 			struct bnxt_coal *coal, uint16_t ring_id)
5157 {
5158 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
5159 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_output *resp =
5160 						bp->hwrm_cmd_resp_addr;
5161 	int rc;
5162 
5163 	/* Set ring coalesce parameters only for 100G NICs */
5164 	if (BNXT_CHIP_P5(bp)) {
5165 		if (bnxt_hwrm_set_coal_params_p5(bp, &req))
5166 			return -1;
5167 	} else if (bnxt_stratus_device(bp)) {
5168 		bnxt_hwrm_set_coal_params(coal, &req);
5169 	} else {
5170 		return 0;
5171 	}
5172 
5173 	HWRM_PREP(&req,
5174 		  HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
5175 		  BNXT_USE_CHIMP_MB);
5176 	req.ring_id = rte_cpu_to_le_16(ring_id);
5177 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5178 	HWRM_CHECK_RESULT();
5179 	HWRM_UNLOCK();
5180 	return 0;
5181 }
5182 
5183 #define BNXT_RTE_MEMZONE_FLAG  (RTE_MEMZONE_1GB | RTE_MEMZONE_IOVA_CONTIG)
5184 int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
5185 {
5186 	struct hwrm_func_backing_store_qcaps_input req = {0};
5187 	struct hwrm_func_backing_store_qcaps_output *resp =
5188 		bp->hwrm_cmd_resp_addr;
5189 	struct bnxt_ctx_pg_info *ctx_pg;
5190 	struct bnxt_ctx_mem_info *ctx;
5191 	int total_alloc_len;
5192 	int rc, i, tqm_rings;
5193 
5194 	if (!BNXT_CHIP_P5(bp) ||
5195 	    bp->hwrm_spec_code < HWRM_VERSION_1_9_2 ||
5196 	    BNXT_VF(bp) ||
5197 	    bp->ctx)
5198 		return 0;
5199 
5200 	HWRM_PREP(&req, HWRM_FUNC_BACKING_STORE_QCAPS, BNXT_USE_CHIMP_MB);
5201 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5202 	HWRM_CHECK_RESULT_SILENT();
5203 
5204 	total_alloc_len = sizeof(*ctx);
5205 	ctx = rte_zmalloc("bnxt_ctx_mem", total_alloc_len,
5206 			  RTE_CACHE_LINE_SIZE);
5207 	if (!ctx) {
5208 		rc = -ENOMEM;
5209 		goto ctx_err;
5210 	}
5211 
5212 	ctx->qp_max_entries = rte_le_to_cpu_32(resp->qp_max_entries);
5213 	ctx->qp_min_qp1_entries =
5214 		rte_le_to_cpu_16(resp->qp_min_qp1_entries);
5215 	ctx->qp_max_l2_entries =
5216 		rte_le_to_cpu_16(resp->qp_max_l2_entries);
5217 	ctx->qp_entry_size = rte_le_to_cpu_16(resp->qp_entry_size);
5218 	ctx->srq_max_l2_entries =
5219 		rte_le_to_cpu_16(resp->srq_max_l2_entries);
5220 	ctx->srq_max_entries = rte_le_to_cpu_32(resp->srq_max_entries);
5221 	ctx->srq_entry_size = rte_le_to_cpu_16(resp->srq_entry_size);
5222 	ctx->cq_max_l2_entries =
5223 		rte_le_to_cpu_16(resp->cq_max_l2_entries);
5224 	ctx->cq_max_entries = rte_le_to_cpu_32(resp->cq_max_entries);
5225 	ctx->cq_entry_size = rte_le_to_cpu_16(resp->cq_entry_size);
5226 	ctx->vnic_max_vnic_entries =
5227 		rte_le_to_cpu_16(resp->vnic_max_vnic_entries);
5228 	ctx->vnic_max_ring_table_entries =
5229 		rte_le_to_cpu_16(resp->vnic_max_ring_table_entries);
5230 	ctx->vnic_entry_size = rte_le_to_cpu_16(resp->vnic_entry_size);
5231 	ctx->stat_max_entries =
5232 		rte_le_to_cpu_32(resp->stat_max_entries);
5233 	ctx->stat_entry_size = rte_le_to_cpu_16(resp->stat_entry_size);
5234 	ctx->tqm_entry_size = rte_le_to_cpu_16(resp->tqm_entry_size);
5235 	ctx->tqm_min_entries_per_ring =
5236 		rte_le_to_cpu_32(resp->tqm_min_entries_per_ring);
5237 	ctx->tqm_max_entries_per_ring =
5238 		rte_le_to_cpu_32(resp->tqm_max_entries_per_ring);
5239 	ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
5240 	if (!ctx->tqm_entries_multiple)
5241 		ctx->tqm_entries_multiple = 1;
5242 	ctx->mrav_max_entries =
5243 		rte_le_to_cpu_32(resp->mrav_max_entries);
5244 	ctx->mrav_entry_size = rte_le_to_cpu_16(resp->mrav_entry_size);
5245 	ctx->tim_entry_size = rte_le_to_cpu_16(resp->tim_entry_size);
5246 	ctx->tim_max_entries = rte_le_to_cpu_32(resp->tim_max_entries);
5247 	ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
5248 
5249 	ctx->tqm_fp_rings_count = ctx->tqm_fp_rings_count ?
5250 				  RTE_MIN(ctx->tqm_fp_rings_count,
5251 					  BNXT_MAX_TQM_FP_LEGACY_RINGS) :
5252 				  bp->max_q;
5253 
5254 	/* Check if the ext ring count needs to be counted.
5255 	 * Ext ring count is available only with new FW so we should not
5256 	 * look at the field on older FW.
5257 	 */
5258 	if (ctx->tqm_fp_rings_count == BNXT_MAX_TQM_FP_LEGACY_RINGS &&
5259 	    bp->hwrm_max_ext_req_len >= BNXT_BACKING_STORE_CFG_LEN) {
5260 		ctx->tqm_fp_rings_count += resp->tqm_fp_rings_count_ext;
5261 		ctx->tqm_fp_rings_count = RTE_MIN(BNXT_MAX_TQM_FP_RINGS,
5262 						  ctx->tqm_fp_rings_count);
5263 	}
5264 
5265 	tqm_rings = ctx->tqm_fp_rings_count + 1;
5266 
5267 	ctx_pg = rte_malloc("bnxt_ctx_pg_mem",
5268 			    sizeof(*ctx_pg) * tqm_rings,
5269 			    RTE_CACHE_LINE_SIZE);
5270 	if (!ctx_pg) {
5271 		rc = -ENOMEM;
5272 		goto ctx_err;
5273 	}
5274 	for (i = 0; i < tqm_rings; i++, ctx_pg++)
5275 		ctx->tqm_mem[i] = ctx_pg;
5276 
5277 	bp->ctx = ctx;
5278 ctx_err:
5279 	HWRM_UNLOCK();
5280 	return rc;
5281 }
5282 
5283 int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, uint32_t enables)
5284 {
5285 	struct hwrm_func_backing_store_cfg_input req = {0};
5286 	struct hwrm_func_backing_store_cfg_output *resp =
5287 		bp->hwrm_cmd_resp_addr;
5288 	struct bnxt_ctx_mem_info *ctx = bp->ctx;
5289 	struct bnxt_ctx_pg_info *ctx_pg;
5290 	uint32_t *num_entries;
5291 	uint64_t *pg_dir;
5292 	uint8_t *pg_attr;
5293 	uint32_t ena;
5294 	int i, rc;
5295 
5296 	if (!ctx)
5297 		return 0;
5298 
5299 	HWRM_PREP(&req, HWRM_FUNC_BACKING_STORE_CFG, BNXT_USE_CHIMP_MB);
5300 	req.enables = rte_cpu_to_le_32(enables);
5301 
5302 	if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP) {
5303 		ctx_pg = &ctx->qp_mem;
5304 		req.qp_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
5305 		req.qp_num_qp1_entries =
5306 			rte_cpu_to_le_16(ctx->qp_min_qp1_entries);
5307 		req.qp_num_l2_entries =
5308 			rte_cpu_to_le_16(ctx->qp_max_l2_entries);
5309 		req.qp_entry_size = rte_cpu_to_le_16(ctx->qp_entry_size);
5310 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5311 				      &req.qpc_pg_size_qpc_lvl,
5312 				      &req.qpc_page_dir);
5313 	}
5314 
5315 	if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ) {
5316 		ctx_pg = &ctx->srq_mem;
5317 		req.srq_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
5318 		req.srq_num_l2_entries =
5319 				 rte_cpu_to_le_16(ctx->srq_max_l2_entries);
5320 		req.srq_entry_size = rte_cpu_to_le_16(ctx->srq_entry_size);
5321 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5322 				      &req.srq_pg_size_srq_lvl,
5323 				      &req.srq_page_dir);
5324 	}
5325 
5326 	if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ) {
5327 		ctx_pg = &ctx->cq_mem;
5328 		req.cq_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
5329 		req.cq_num_l2_entries =
5330 				rte_cpu_to_le_16(ctx->cq_max_l2_entries);
5331 		req.cq_entry_size = rte_cpu_to_le_16(ctx->cq_entry_size);
5332 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5333 				      &req.cq_pg_size_cq_lvl,
5334 				      &req.cq_page_dir);
5335 	}
5336 
5337 	if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC) {
5338 		ctx_pg = &ctx->vnic_mem;
5339 		req.vnic_num_vnic_entries =
5340 			rte_cpu_to_le_16(ctx->vnic_max_vnic_entries);
5341 		req.vnic_num_ring_table_entries =
5342 			rte_cpu_to_le_16(ctx->vnic_max_ring_table_entries);
5343 		req.vnic_entry_size = rte_cpu_to_le_16(ctx->vnic_entry_size);
5344 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5345 				      &req.vnic_pg_size_vnic_lvl,
5346 				      &req.vnic_page_dir);
5347 	}
5348 
5349 	if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT) {
5350 		ctx_pg = &ctx->stat_mem;
5351 		req.stat_num_entries = rte_cpu_to_le_16(ctx->stat_max_entries);
5352 		req.stat_entry_size = rte_cpu_to_le_16(ctx->stat_entry_size);
5353 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5354 				      &req.stat_pg_size_stat_lvl,
5355 				      &req.stat_page_dir);
5356 	}
5357 
5358 	req.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size);
5359 	num_entries = &req.tqm_sp_num_entries;
5360 	pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl;
5361 	pg_dir = &req.tqm_sp_page_dir;
5362 	ena = HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP;
5363 	for (i = 0; i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
5364 		if (!(enables & ena))
5365 			continue;
5366 
5367 		req.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size);
5368 
5369 		ctx_pg = ctx->tqm_mem[i];
5370 		*num_entries = rte_cpu_to_le_16(ctx_pg->entries);
5371 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
5372 	}
5373 
5374 	if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING8) {
5375 		/* DPDK does not need to configure MRAV and TIM type.
5376 		 * So we are skipping over MRAV and TIM. Skip to configure
5377 		 * HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING8.
5378 		 */
5379 		ctx_pg = ctx->tqm_mem[BNXT_MAX_TQM_LEGACY_RINGS];
5380 		req.tqm_ring8_num_entries = rte_cpu_to_le_16(ctx_pg->entries);
5381 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5382 				      &req.tqm_ring8_pg_size_tqm_ring_lvl,
5383 				      &req.tqm_ring8_page_dir);
5384 	}
5385 
5386 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5387 	HWRM_CHECK_RESULT();
5388 	HWRM_UNLOCK();
5389 
5390 	return rc;
5391 }
5392 
5393 int bnxt_hwrm_ext_port_qstats(struct bnxt *bp)
5394 {
5395 	struct hwrm_port_qstats_ext_input req = {0};
5396 	struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
5397 	struct bnxt_pf_info *pf = bp->pf;
5398 	int rc;
5399 
5400 	if (!(bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS ||
5401 	      bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS))
5402 		return 0;
5403 
5404 	HWRM_PREP(&req, HWRM_PORT_QSTATS_EXT, BNXT_USE_CHIMP_MB);
5405 
5406 	req.port_id = rte_cpu_to_le_16(pf->port_id);
5407 	if (bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS) {
5408 		req.tx_stat_host_addr =
5409 			rte_cpu_to_le_64(bp->hw_tx_port_stats_ext_map);
5410 		req.tx_stat_size =
5411 			rte_cpu_to_le_16(sizeof(struct tx_port_stats_ext));
5412 	}
5413 	if (bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS) {
5414 		req.rx_stat_host_addr =
5415 			rte_cpu_to_le_64(bp->hw_rx_port_stats_ext_map);
5416 		req.rx_stat_size =
5417 			rte_cpu_to_le_16(sizeof(struct rx_port_stats_ext));
5418 	}
5419 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5420 
5421 	if (rc) {
5422 		bp->fw_rx_port_stats_ext_size = 0;
5423 		bp->fw_tx_port_stats_ext_size = 0;
5424 	} else {
5425 		bp->fw_rx_port_stats_ext_size =
5426 			rte_le_to_cpu_16(resp->rx_stat_size);
5427 		bp->fw_tx_port_stats_ext_size =
5428 			rte_le_to_cpu_16(resp->tx_stat_size);
5429 	}
5430 
5431 	HWRM_CHECK_RESULT();
5432 	HWRM_UNLOCK();
5433 
5434 	return rc;
5435 }
5436 
5437 int
5438 bnxt_hwrm_tunnel_redirect(struct bnxt *bp, uint8_t type)
5439 {
5440 	struct hwrm_cfa_redirect_tunnel_type_alloc_input req = {0};
5441 	struct hwrm_cfa_redirect_tunnel_type_alloc_output *resp =
5442 		bp->hwrm_cmd_resp_addr;
5443 	int rc = 0;
5444 
5445 	HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC, BNXT_USE_CHIMP_MB);
5446 	req.tunnel_type = type;
5447 	req.dest_fid = bp->fw_fid;
5448 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5449 	HWRM_CHECK_RESULT();
5450 
5451 	HWRM_UNLOCK();
5452 
5453 	return rc;
5454 }
5455 
5456 int
5457 bnxt_hwrm_tunnel_redirect_free(struct bnxt *bp, uint8_t type)
5458 {
5459 	struct hwrm_cfa_redirect_tunnel_type_free_input req = {0};
5460 	struct hwrm_cfa_redirect_tunnel_type_free_output *resp =
5461 		bp->hwrm_cmd_resp_addr;
5462 	int rc = 0;
5463 
5464 	HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE, BNXT_USE_CHIMP_MB);
5465 	req.tunnel_type = type;
5466 	req.dest_fid = bp->fw_fid;
5467 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5468 	HWRM_CHECK_RESULT();
5469 
5470 	HWRM_UNLOCK();
5471 
5472 	return rc;
5473 }
5474 
5475 int bnxt_hwrm_tunnel_redirect_query(struct bnxt *bp, uint32_t *type)
5476 {
5477 	struct hwrm_cfa_redirect_query_tunnel_type_input req = {0};
5478 	struct hwrm_cfa_redirect_query_tunnel_type_output *resp =
5479 		bp->hwrm_cmd_resp_addr;
5480 	int rc = 0;
5481 
5482 	HWRM_PREP(&req, HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE, BNXT_USE_CHIMP_MB);
5483 	req.src_fid = bp->fw_fid;
5484 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5485 	HWRM_CHECK_RESULT();
5486 
5487 	if (type)
5488 		*type = rte_le_to_cpu_32(resp->tunnel_mask);
5489 
5490 	HWRM_UNLOCK();
5491 
5492 	return rc;
5493 }
5494 
5495 int bnxt_hwrm_tunnel_redirect_info(struct bnxt *bp, uint8_t tun_type,
5496 				   uint16_t *dst_fid)
5497 {
5498 	struct hwrm_cfa_redirect_tunnel_type_info_input req = {0};
5499 	struct hwrm_cfa_redirect_tunnel_type_info_output *resp =
5500 		bp->hwrm_cmd_resp_addr;
5501 	int rc = 0;
5502 
5503 	HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO, BNXT_USE_CHIMP_MB);
5504 	req.src_fid = bp->fw_fid;
5505 	req.tunnel_type = tun_type;
5506 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5507 	HWRM_CHECK_RESULT();
5508 
5509 	if (dst_fid)
5510 		*dst_fid = rte_le_to_cpu_16(resp->dest_fid);
5511 
5512 	PMD_DRV_LOG(DEBUG, "dst_fid: %x\n", resp->dest_fid);
5513 
5514 	HWRM_UNLOCK();
5515 
5516 	return rc;
5517 }
5518 
5519 int bnxt_hwrm_set_mac(struct bnxt *bp)
5520 {
5521 	struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
5522 	struct hwrm_func_vf_cfg_input req = {0};
5523 	int rc = 0;
5524 
5525 	if (!BNXT_VF(bp))
5526 		return 0;
5527 
5528 	HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
5529 
5530 	req.enables =
5531 		rte_cpu_to_le_32(HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
5532 	memcpy(req.dflt_mac_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN);
5533 
5534 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5535 
5536 	HWRM_CHECK_RESULT();
5537 
5538 	HWRM_UNLOCK();
5539 
5540 	return rc;
5541 }
5542 
5543 int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
5544 {
5545 	struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
5546 	struct hwrm_func_drv_if_change_input req = {0};
5547 	uint32_t flags;
5548 	int rc;
5549 
5550 	if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
5551 		return 0;
5552 
5553 	/* Do not issue FUNC_DRV_IF_CHANGE during reset recovery.
5554 	 * If we issue FUNC_DRV_IF_CHANGE with flags down before
5555 	 * FUNC_DRV_UNRGTR, FW resets before FUNC_DRV_UNRGTR
5556 	 */
5557 	if (!up && (bp->flags & BNXT_FLAG_FW_RESET))
5558 		return 0;
5559 
5560 	HWRM_PREP(&req, HWRM_FUNC_DRV_IF_CHANGE, BNXT_USE_CHIMP_MB);
5561 
5562 	if (up)
5563 		req.flags =
5564 		rte_cpu_to_le_32(HWRM_FUNC_DRV_IF_CHANGE_INPUT_FLAGS_UP);
5565 
5566 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5567 
5568 	HWRM_CHECK_RESULT();
5569 	flags = rte_le_to_cpu_32(resp->flags);
5570 	HWRM_UNLOCK();
5571 
5572 	if (!up)
5573 		return 0;
5574 
5575 	if (flags & HWRM_FUNC_DRV_IF_CHANGE_OUTPUT_FLAGS_HOT_FW_RESET_DONE) {
5576 		PMD_DRV_LOG(INFO, "FW reset happened while port was down\n");
5577 		bp->flags |= BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE;
5578 	}
5579 
5580 	return 0;
5581 }
5582 
5583 int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
5584 {
5585 	struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5586 	struct bnxt_error_recovery_info *info = bp->recovery_info;
5587 	struct hwrm_error_recovery_qcfg_input req = {0};
5588 	uint32_t flags = 0;
5589 	unsigned int i;
5590 	int rc;
5591 
5592 	/* Older FW does not have error recovery support */
5593 	if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
5594 		return 0;
5595 
5596 	HWRM_PREP(&req, HWRM_ERROR_RECOVERY_QCFG, BNXT_USE_CHIMP_MB);
5597 
5598 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5599 
5600 	HWRM_CHECK_RESULT();
5601 
5602 	flags = rte_le_to_cpu_32(resp->flags);
5603 	if (flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_HOST)
5604 		info->flags |= BNXT_FLAG_ERROR_RECOVERY_HOST;
5605 	else if (flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_CO_CPU)
5606 		info->flags |= BNXT_FLAG_ERROR_RECOVERY_CO_CPU;
5607 
5608 	if ((info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) &&
5609 	    !(bp->flags & BNXT_FLAG_KONG_MB_EN)) {
5610 		rc = -EINVAL;
5611 		goto err;
5612 	}
5613 
5614 	/* FW returned values are in units of 100msec */
5615 	info->driver_polling_freq =
5616 		rte_le_to_cpu_32(resp->driver_polling_freq) * 100;
5617 	info->master_func_wait_period =
5618 		rte_le_to_cpu_32(resp->master_func_wait_period) * 100;
5619 	info->normal_func_wait_period =
5620 		rte_le_to_cpu_32(resp->normal_func_wait_period) * 100;
5621 	info->master_func_wait_period_after_reset =
5622 		rte_le_to_cpu_32(resp->master_func_wait_period_after_reset) * 100;
5623 	info->max_bailout_time_after_reset =
5624 		rte_le_to_cpu_32(resp->max_bailout_time_after_reset) * 100;
5625 	info->status_regs[BNXT_FW_STATUS_REG] =
5626 		rte_le_to_cpu_32(resp->fw_health_status_reg);
5627 	info->status_regs[BNXT_FW_HEARTBEAT_CNT_REG] =
5628 		rte_le_to_cpu_32(resp->fw_heartbeat_reg);
5629 	info->status_regs[BNXT_FW_RECOVERY_CNT_REG] =
5630 		rte_le_to_cpu_32(resp->fw_reset_cnt_reg);
5631 	info->status_regs[BNXT_FW_RESET_INPROG_REG] =
5632 		rte_le_to_cpu_32(resp->reset_inprogress_reg);
5633 	info->reg_array_cnt =
5634 		rte_le_to_cpu_32(resp->reg_array_cnt);
5635 
5636 	if (info->reg_array_cnt >= BNXT_NUM_RESET_REG) {
5637 		rc = -EINVAL;
5638 		goto err;
5639 	}
5640 
5641 	for (i = 0; i < info->reg_array_cnt; i++) {
5642 		info->reset_reg[i] =
5643 			rte_le_to_cpu_32(resp->reset_reg[i]);
5644 		info->reset_reg_val[i] =
5645 			rte_le_to_cpu_32(resp->reset_reg_val[i]);
5646 		info->delay_after_reset[i] =
5647 			resp->delay_after_reset[i];
5648 	}
5649 err:
5650 	HWRM_UNLOCK();
5651 
5652 	/* Map the FW status registers */
5653 	if (!rc)
5654 		rc = bnxt_map_fw_health_status_regs(bp);
5655 
5656 	if (rc) {
5657 		rte_free(bp->recovery_info);
5658 		bp->recovery_info = NULL;
5659 	}
5660 	return rc;
5661 }
5662 
5663 int bnxt_hwrm_fw_reset(struct bnxt *bp)
5664 {
5665 	struct hwrm_fw_reset_output *resp = bp->hwrm_cmd_resp_addr;
5666 	struct hwrm_fw_reset_input req = {0};
5667 	int rc;
5668 
5669 	if (!BNXT_PF(bp))
5670 		return -EOPNOTSUPP;
5671 
5672 	HWRM_PREP(&req, HWRM_FW_RESET, BNXT_USE_KONG(bp));
5673 
5674 	req.embedded_proc_type =
5675 		HWRM_FW_RESET_INPUT_EMBEDDED_PROC_TYPE_CHIP;
5676 	req.selfrst_status =
5677 		HWRM_FW_RESET_INPUT_SELFRST_STATUS_SELFRSTASAP;
5678 	req.flags = HWRM_FW_RESET_INPUT_FLAGS_RESET_GRACEFUL;
5679 
5680 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
5681 				    BNXT_USE_KONG(bp));
5682 
5683 	HWRM_CHECK_RESULT();
5684 	HWRM_UNLOCK();
5685 
5686 	return rc;
5687 }
5688 
5689 int bnxt_hwrm_port_ts_query(struct bnxt *bp, uint8_t path, uint64_t *timestamp)
5690 {
5691 	struct hwrm_port_ts_query_output *resp = bp->hwrm_cmd_resp_addr;
5692 	struct hwrm_port_ts_query_input req = {0};
5693 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
5694 	uint32_t flags = 0;
5695 	int rc;
5696 
5697 	if (!ptp)
5698 		return 0;
5699 
5700 	HWRM_PREP(&req, HWRM_PORT_TS_QUERY, BNXT_USE_CHIMP_MB);
5701 
5702 	switch (path) {
5703 	case BNXT_PTP_FLAGS_PATH_TX:
5704 		flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_TX;
5705 		break;
5706 	case BNXT_PTP_FLAGS_PATH_RX:
5707 		flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_RX;
5708 		break;
5709 	case BNXT_PTP_FLAGS_CURRENT_TIME:
5710 		flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_CURRENT_TIME;
5711 		break;
5712 	}
5713 
5714 	req.flags = rte_cpu_to_le_32(flags);
5715 	req.port_id = rte_cpu_to_le_16(bp->pf->port_id);
5716 
5717 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5718 
5719 	HWRM_CHECK_RESULT();
5720 
5721 	if (timestamp) {
5722 		*timestamp = rte_le_to_cpu_32(resp->ptp_msg_ts[0]);
5723 		*timestamp |=
5724 			(uint64_t)(rte_le_to_cpu_32(resp->ptp_msg_ts[1])) << 32;
5725 	}
5726 	HWRM_UNLOCK();
5727 
5728 	return rc;
5729 }
5730 
5731 int bnxt_hwrm_cfa_counter_qcaps(struct bnxt *bp, uint16_t *max_fc)
5732 {
5733 	int rc = 0;
5734 
5735 	struct hwrm_cfa_counter_qcaps_input req = {0};
5736 	struct hwrm_cfa_counter_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5737 
5738 	if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5739 		PMD_DRV_LOG(DEBUG,
5740 			    "Not a PF or trusted VF. Command not supported\n");
5741 		return 0;
5742 	}
5743 
5744 	HWRM_PREP(&req, HWRM_CFA_COUNTER_QCAPS, BNXT_USE_KONG(bp));
5745 	req.target_id = rte_cpu_to_le_16(bp->fw_fid);
5746 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5747 
5748 	HWRM_CHECK_RESULT();
5749 	if (max_fc)
5750 		*max_fc = rte_le_to_cpu_16(resp->max_rx_fc);
5751 	HWRM_UNLOCK();
5752 
5753 	return 0;
5754 }
5755 
5756 int bnxt_hwrm_ctx_rgtr(struct bnxt *bp, rte_iova_t dma_addr, uint16_t *ctx_id)
5757 {
5758 	int rc = 0;
5759 	struct hwrm_cfa_ctx_mem_rgtr_input req = {.req_type = 0 };
5760 	struct hwrm_cfa_ctx_mem_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
5761 
5762 	if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5763 		PMD_DRV_LOG(DEBUG,
5764 			    "Not a PF or trusted VF. Command not supported\n");
5765 		return 0;
5766 	}
5767 
5768 	HWRM_PREP(&req, HWRM_CFA_CTX_MEM_RGTR, BNXT_USE_KONG(bp));
5769 
5770 	req.page_level = HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_LEVEL_LVL_0;
5771 	req.page_size = HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_SIZE_2M;
5772 	req.page_dir = rte_cpu_to_le_64(dma_addr);
5773 
5774 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5775 
5776 	HWRM_CHECK_RESULT();
5777 	if (ctx_id) {
5778 		*ctx_id  = rte_le_to_cpu_16(resp->ctx_id);
5779 		PMD_DRV_LOG(DEBUG, "ctx_id = %d\n", *ctx_id);
5780 	}
5781 	HWRM_UNLOCK();
5782 
5783 	return 0;
5784 }
5785 
5786 int bnxt_hwrm_ctx_unrgtr(struct bnxt *bp, uint16_t ctx_id)
5787 {
5788 	int rc = 0;
5789 	struct hwrm_cfa_ctx_mem_unrgtr_input req = {.req_type = 0 };
5790 	struct hwrm_cfa_ctx_mem_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
5791 
5792 	if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5793 		PMD_DRV_LOG(DEBUG,
5794 			    "Not a PF or trusted VF. Command not supported\n");
5795 		return 0;
5796 	}
5797 
5798 	HWRM_PREP(&req, HWRM_CFA_CTX_MEM_UNRGTR, BNXT_USE_KONG(bp));
5799 
5800 	req.ctx_id = rte_cpu_to_le_16(ctx_id);
5801 
5802 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5803 
5804 	HWRM_CHECK_RESULT();
5805 	HWRM_UNLOCK();
5806 
5807 	return rc;
5808 }
5809 
5810 int bnxt_hwrm_cfa_counter_cfg(struct bnxt *bp, enum bnxt_flow_dir dir,
5811 			      uint16_t cntr, uint16_t ctx_id,
5812 			      uint32_t num_entries, bool enable)
5813 {
5814 	struct hwrm_cfa_counter_cfg_input req = {0};
5815 	struct hwrm_cfa_counter_cfg_output *resp = bp->hwrm_cmd_resp_addr;
5816 	uint16_t flags = 0;
5817 	int rc;
5818 
5819 	if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5820 		PMD_DRV_LOG(DEBUG,
5821 			    "Not a PF or trusted VF. Command not supported\n");
5822 		return 0;
5823 	}
5824 
5825 	HWRM_PREP(&req, HWRM_CFA_COUNTER_CFG, BNXT_USE_KONG(bp));
5826 
5827 	req.target_id = rte_cpu_to_le_16(bp->fw_fid);
5828 	req.counter_type = rte_cpu_to_le_16(cntr);
5829 	flags = enable ? HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE_ENABLE :
5830 		HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE_DISABLE;
5831 	flags |= HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_DATA_TRANSFER_MODE_PULL;
5832 	if (dir == BNXT_DIR_RX)
5833 		flags |=  HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH_RX;
5834 	else if (dir == BNXT_DIR_TX)
5835 		flags |=  HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH_TX;
5836 	req.flags = rte_cpu_to_le_16(flags);
5837 	req.ctx_id =  rte_cpu_to_le_16(ctx_id);
5838 	req.num_entries = rte_cpu_to_le_32(num_entries);
5839 
5840 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5841 	HWRM_CHECK_RESULT();
5842 	HWRM_UNLOCK();
5843 
5844 	return 0;
5845 }
5846 
5847 int bnxt_hwrm_cfa_counter_qstats(struct bnxt *bp,
5848 				 enum bnxt_flow_dir dir,
5849 				 uint16_t cntr,
5850 				 uint16_t num_entries)
5851 {
5852 	struct hwrm_cfa_counter_qstats_output *resp = bp->hwrm_cmd_resp_addr;
5853 	struct hwrm_cfa_counter_qstats_input req = {0};
5854 	uint16_t flow_ctx_id = 0;
5855 	uint16_t flags = 0;
5856 	int rc = 0;
5857 
5858 	if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5859 		PMD_DRV_LOG(DEBUG,
5860 			    "Not a PF or trusted VF. Command not supported\n");
5861 		return 0;
5862 	}
5863 
5864 	if (dir == BNXT_DIR_RX) {
5865 		flow_ctx_id = bp->flow_stat->rx_fc_in_tbl.ctx_id;
5866 		flags = HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_RX;
5867 	} else if (dir == BNXT_DIR_TX) {
5868 		flow_ctx_id = bp->flow_stat->tx_fc_in_tbl.ctx_id;
5869 		flags = HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_TX;
5870 	}
5871 
5872 	HWRM_PREP(&req, HWRM_CFA_COUNTER_QSTATS, BNXT_USE_KONG(bp));
5873 	req.target_id = rte_cpu_to_le_16(bp->fw_fid);
5874 	req.counter_type = rte_cpu_to_le_16(cntr);
5875 	req.input_flow_ctx_id = rte_cpu_to_le_16(flow_ctx_id);
5876 	req.num_entries = rte_cpu_to_le_16(num_entries);
5877 	req.flags = rte_cpu_to_le_16(flags);
5878 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5879 
5880 	HWRM_CHECK_RESULT();
5881 	HWRM_UNLOCK();
5882 
5883 	return 0;
5884 }
5885 
5886 int bnxt_hwrm_first_vf_id_query(struct bnxt *bp, uint16_t fid,
5887 				uint16_t *first_vf_id)
5888 {
5889 	int rc = 0;
5890 	struct hwrm_func_qcaps_input req = {.req_type = 0 };
5891 	struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5892 
5893 	HWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB);
5894 
5895 	req.fid = rte_cpu_to_le_16(fid);
5896 
5897 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5898 
5899 	HWRM_CHECK_RESULT();
5900 
5901 	if (first_vf_id)
5902 		*first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
5903 
5904 	HWRM_UNLOCK();
5905 
5906 	return rc;
5907 }
5908 
5909 int bnxt_hwrm_cfa_pair_alloc(struct bnxt *bp, struct bnxt_representor *rep_bp)
5910 {
5911 	struct hwrm_cfa_pair_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5912 	struct hwrm_cfa_pair_alloc_input req = {0};
5913 	int rc;
5914 
5915 	if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5916 		PMD_DRV_LOG(DEBUG,
5917 			    "Not a PF or trusted VF. Command not supported\n");
5918 		return 0;
5919 	}
5920 
5921 	HWRM_PREP(&req, HWRM_CFA_PAIR_ALLOC, BNXT_USE_CHIMP_MB);
5922 	req.pair_mode = HWRM_CFA_PAIR_FREE_INPUT_PAIR_MODE_REP2FN_TRUFLOW;
5923 	snprintf(req.pair_name, sizeof(req.pair_name), "%svfr%d",
5924 		 bp->eth_dev->data->name, rep_bp->vf_id);
5925 
5926 	req.pf_b_id = rep_bp->parent_pf_idx;
5927 	req.vf_b_id = BNXT_REP_PF(rep_bp) ? rte_cpu_to_le_16(((uint16_t)-1)) :
5928 						rte_cpu_to_le_16(rep_bp->vf_id);
5929 	req.vf_a_id = rte_cpu_to_le_16(bp->fw_fid);
5930 	req.host_b_id = 1; /* TBD - Confirm if this is OK */
5931 
5932 	req.enables |= rep_bp->flags & BNXT_REP_Q_R2F_VALID ?
5933 			HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_Q_AB_VALID : 0;
5934 	req.enables |= rep_bp->flags & BNXT_REP_Q_F2R_VALID ?
5935 			HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_Q_BA_VALID : 0;
5936 	req.enables |= rep_bp->flags & BNXT_REP_FC_R2F_VALID ?
5937 			HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_FC_AB_VALID : 0;
5938 	req.enables |= rep_bp->flags & BNXT_REP_FC_F2R_VALID ?
5939 			HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_FC_BA_VALID : 0;
5940 
5941 	req.q_ab = rep_bp->rep_q_r2f;
5942 	req.q_ba = rep_bp->rep_q_f2r;
5943 	req.fc_ab = rep_bp->rep_fc_r2f;
5944 	req.fc_ba = rep_bp->rep_fc_f2r;
5945 
5946 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5947 	HWRM_CHECK_RESULT();
5948 
5949 	HWRM_UNLOCK();
5950 	PMD_DRV_LOG(DEBUG, "%s %d allocated\n",
5951 		    BNXT_REP_PF(rep_bp) ? "PFR" : "VFR", rep_bp->vf_id);
5952 	return rc;
5953 }
5954 
5955 int bnxt_hwrm_cfa_pair_free(struct bnxt *bp, struct bnxt_representor *rep_bp)
5956 {
5957 	struct hwrm_cfa_pair_free_output *resp = bp->hwrm_cmd_resp_addr;
5958 	struct hwrm_cfa_pair_free_input req = {0};
5959 	int rc;
5960 
5961 	if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5962 		PMD_DRV_LOG(DEBUG,
5963 			    "Not a PF or trusted VF. Command not supported\n");
5964 		return 0;
5965 	}
5966 
5967 	HWRM_PREP(&req, HWRM_CFA_PAIR_FREE, BNXT_USE_CHIMP_MB);
5968 	snprintf(req.pair_name, sizeof(req.pair_name), "%svfr%d",
5969 		 bp->eth_dev->data->name, rep_bp->vf_id);
5970 	req.pf_b_id = rep_bp->parent_pf_idx;
5971 	req.pair_mode = HWRM_CFA_PAIR_FREE_INPUT_PAIR_MODE_REP2FN_TRUFLOW;
5972 	req.vf_id = BNXT_REP_PF(rep_bp) ? rte_cpu_to_le_16(((uint16_t)-1)) :
5973 						rte_cpu_to_le_16(rep_bp->vf_id);
5974 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5975 	HWRM_CHECK_RESULT();
5976 	HWRM_UNLOCK();
5977 	PMD_DRV_LOG(DEBUG, "%s %d freed\n", BNXT_REP_PF(rep_bp) ? "PFR" : "VFR",
5978 		    rep_bp->vf_id);
5979 	return rc;
5980 }
5981 
5982 int bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(struct bnxt *bp)
5983 {
5984 	struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp =
5985 					bp->hwrm_cmd_resp_addr;
5986 	struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0};
5987 	uint32_t flags = 0;
5988 	int rc = 0;
5989 
5990 	if (!(bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_MGMT))
5991 		return 0;
5992 
5993 	if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5994 		PMD_DRV_LOG(DEBUG,
5995 			    "Not a PF or trusted VF. Command not supported\n");
5996 		return 0;
5997 	}
5998 
5999 	HWRM_PREP(&req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, BNXT_USE_CHIMP_MB);
6000 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
6001 
6002 	HWRM_CHECK_RESULT();
6003 	flags = rte_le_to_cpu_32(resp->flags);
6004 	HWRM_UNLOCK();
6005 
6006 	if (flags & HWRM_CFA_ADV_FLOW_MGNT_QCAPS_RFS_RING_TBL_IDX_V2_SUPPORTED)
6007 		bp->flags |= BNXT_FLAG_FLOW_CFA_RFS_RING_TBL_IDX_V2;
6008 	else
6009 		bp->flags |= BNXT_FLAG_RFS_NEEDS_VNIC;
6010 
6011 	return rc;
6012 }
6013 
6014 int bnxt_hwrm_fw_echo_reply(struct bnxt *bp, uint32_t echo_req_data1,
6015 			    uint32_t echo_req_data2)
6016 {
6017 	struct hwrm_func_echo_response_input req = {0};
6018 	struct hwrm_func_echo_response_output *resp = bp->hwrm_cmd_resp_addr;
6019 	int rc;
6020 
6021 	HWRM_PREP(&req, HWRM_FUNC_ECHO_RESPONSE, BNXT_USE_CHIMP_MB);
6022 	req.event_data1 = rte_cpu_to_le_32(echo_req_data1);
6023 	req.event_data2 = rte_cpu_to_le_32(echo_req_data2);
6024 
6025 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
6026 
6027 	HWRM_CHECK_RESULT();
6028 	HWRM_UNLOCK();
6029 
6030 	return rc;
6031 }
6032 
6033 int bnxt_hwrm_poll_ver_get(struct bnxt *bp)
6034 {
6035 	struct hwrm_ver_get_input req = {.req_type = 0 };
6036 	struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
6037 	int rc = 0;
6038 
6039 	bp->max_req_len = HWRM_MAX_REQ_LEN;
6040 	bp->max_resp_len = BNXT_PAGE_SIZE;
6041 	bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
6042 
6043 	HWRM_PREP(&req, HWRM_VER_GET, BNXT_USE_CHIMP_MB);
6044 	req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
6045 	req.hwrm_intf_min = HWRM_VERSION_MINOR;
6046 	req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
6047 
6048 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
6049 
6050 	HWRM_CHECK_RESULT_SILENT();
6051 
6052 	if (resp->flags & HWRM_VER_GET_OUTPUT_FLAGS_DEV_NOT_RDY)
6053 		rc = -EAGAIN;
6054 
6055 	HWRM_UNLOCK();
6056 
6057 	return rc;
6058 }
6059