xref: /dpdk/drivers/net/bnxt/bnxt_vnic.c (revision 35e03bafdce10b98fc68383ed323cece507f8bb7)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2023 Broadcom
3  * All rights reserved.
4  */
5 
6 #include <inttypes.h>
7 
8 #include <rte_memzone.h>
9 #include <rte_malloc.h>
10 
11 #include "bnxt.h"
12 #include "bnxt_rxq.h"
13 #include "bnxt_rxr.h"
14 #include "bnxt_ring.h"
15 #include "bnxt_vnic.h"
16 #include "hsi_struct_def_dpdk.h"
17 #include "bnxt_hwrm.h"
18 
19 /* Macros to manipulate vnic bitmaps*/
20 #define BNXT_VNIC_BITMAP_SIZE	64
21 #define BNXT_VNIC_BITMAP_SET(b, i)	((b[(i) / BNXT_VNIC_BITMAP_SIZE]) |= \
22 			(1UL << ((BNXT_VNIC_BITMAP_SIZE - 1) - \
23 			((i) % BNXT_VNIC_BITMAP_SIZE))))
24 
25 #define BNXT_VNIC_BITMAP_RESET(b, i)	((b[(i) / BNXT_VNIC_BITMAP_SIZE]) &= \
26 			(~(1UL << ((BNXT_VNIC_BITMAP_SIZE - 1) - \
27 			((i) % BNXT_VNIC_BITMAP_SIZE)))))
28 
29 #define BNXT_VNIC_BITMAP_GET(b, i)	(((b[(i) / BNXT_VNIC_BITMAP_SIZE]) >> \
30 			((BNXT_VNIC_BITMAP_SIZE - 1) - \
31 			((i) % BNXT_VNIC_BITMAP_SIZE))) & 1)
32 
33 static uint16_t rss_query_queues[BNXT_VNIC_MAX_QUEUE_SIZE];
34 /*
35  * VNIC Functions
36  */
37 
38 void bnxt_prandom_bytes(void *dest_ptr, size_t len)
39 {
40 	char *dest = (char *)dest_ptr;
41 	uint64_t rb;
42 
43 	while (len) {
44 		rb = rte_rand();
45 		if (len >= 8) {
46 			memcpy(dest, &rb, 8);
47 			len -= 8;
48 			dest += 8;
49 		} else {
50 			memcpy(dest, &rb, len);
51 			dest += len;
52 			len = 0;
53 		}
54 	}
55 }
56 
57 static void bnxt_init_vnics(struct bnxt *bp)
58 {
59 	struct bnxt_vnic_info *vnic;
60 	uint16_t max_vnics;
61 	int i;
62 
63 	max_vnics = bp->max_vnics;
64 	STAILQ_INIT(&bp->free_vnic_list);
65 	for (i = 0; i < max_vnics; i++) {
66 		vnic = &bp->vnic_info[i];
67 		vnic->fw_vnic_id = (uint16_t)HWRM_NA_SIGNATURE;
68 		vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
69 		vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
70 		vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
71 		vnic->hash_mode =
72 			HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_DEFAULT;
73 		vnic->prev_hash_mode =
74 			HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_DEFAULT;
75 		vnic->rx_queue_cnt = 0;
76 
77 		STAILQ_INIT(&vnic->filter);
78 		STAILQ_INIT(&vnic->flow_list);
79 		STAILQ_INSERT_TAIL(&bp->free_vnic_list, vnic, next);
80 	}
81 }
82 
83 struct bnxt_vnic_info *bnxt_alloc_vnic(struct bnxt *bp)
84 {
85 	struct bnxt_vnic_info *vnic;
86 
87 	/* Find the 1st unused vnic from the free_vnic_list pool*/
88 	vnic = STAILQ_FIRST(&bp->free_vnic_list);
89 	if (!vnic) {
90 		PMD_DRV_LOG_LINE(ERR, "No more free VNIC resources");
91 		return NULL;
92 	}
93 	STAILQ_REMOVE_HEAD(&bp->free_vnic_list, next);
94 	return vnic;
95 }
96 
97 void bnxt_free_all_vnics(struct bnxt *bp)
98 {
99 	struct bnxt_vnic_info *vnic;
100 	unsigned int i;
101 
102 	if (bp->vnic_info == NULL)
103 		return;
104 
105 	for (i = 0; i < bp->max_vnics; i++) {
106 		vnic = &bp->vnic_info[i];
107 		STAILQ_INSERT_TAIL(&bp->free_vnic_list, vnic, next);
108 		if (vnic->ref_cnt) {
109 			/* clean up the default vnic details */
110 			bnxt_vnic_rss_action_free(bp, i);
111 		}
112 
113 		vnic->rx_queue_cnt = 0;
114 	}
115 }
116 
117 void bnxt_free_vnic_attributes(struct bnxt *bp)
118 {
119 	struct bnxt_vnic_info *vnic;
120 	unsigned int i;
121 
122 	if (bp->vnic_info == NULL)
123 		return;
124 
125 	for (i = 0; i < bp->max_vnics; i++) {
126 		vnic = &bp->vnic_info[i];
127 		vnic->rss_hash_key = NULL;
128 		vnic->rss_table = NULL;
129 	}
130 	rte_memzone_free(bp->vnic_rss_mz);
131 	bp->vnic_rss_mz = NULL;
132 }
133 
134 int bnxt_alloc_vnic_attributes(struct bnxt *bp, bool reconfig)
135 {
136 	struct bnxt_vnic_info *vnic;
137 	struct rte_pci_device *pdev = bp->pdev;
138 	const struct rte_memzone *mz;
139 	char mz_name[RTE_MEMZONE_NAMESIZE];
140 	uint32_t entry_length;
141 	size_t rss_table_size;
142 	int i;
143 	rte_iova_t mz_phys_addr;
144 
145 	entry_length = HW_HASH_KEY_SIZE;
146 
147 	if (BNXT_CHIP_P5_P7(bp))
148 		rss_table_size = BNXT_RSS_TBL_SIZE_P5 *
149 				 2 * sizeof(*vnic->rss_table);
150 	else
151 		rss_table_size = HW_HASH_INDEX_SIZE * sizeof(*vnic->rss_table);
152 
153 	entry_length = RTE_CACHE_LINE_ROUNDUP(entry_length + rss_table_size);
154 
155 	snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
156 		 "bnxt_" PCI_PRI_FMT "_vnicattr", pdev->addr.domain,
157 		 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
158 	mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
159 	mz = rte_memzone_lookup(mz_name);
160 	if (mz == NULL) {
161 		mz = rte_memzone_reserve_aligned(mz_name,
162 						 entry_length * bp->max_vnics,
163 						 bp->eth_dev->device->numa_node,
164 						 RTE_MEMZONE_2MB |
165 						 RTE_MEMZONE_SIZE_HINT_ONLY,
166 						 BNXT_PAGE_SIZE);
167 		if (mz == NULL) {
168 			PMD_DRV_LOG_LINE(ERR,
169 				    "Cannot allocate vnic_attributes memory");
170 			return -ENOMEM;
171 		}
172 	}
173 	bp->vnic_rss_mz = mz;
174 	for (i = 0; i < bp->max_vnics; i++) {
175 		uint32_t offset = entry_length * i;
176 
177 		vnic = &bp->vnic_info[i];
178 
179 		mz_phys_addr = mz->iova + offset;
180 
181 		/* Allocate rss table and hash key */
182 		vnic->rss_table = (void *)((char *)mz->addr + offset);
183 		vnic->rss_table_dma_addr = mz_phys_addr;
184 		memset(vnic->rss_table, -1, entry_length);
185 
186 		vnic->rss_hash_key = (void *)((char *)vnic->rss_table + rss_table_size);
187 		vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + rss_table_size;
188 		if (!reconfig) {
189 			bnxt_prandom_bytes(vnic->rss_hash_key, HW_HASH_KEY_SIZE);
190 			memcpy(bp->rss_conf.rss_key, vnic->rss_hash_key, HW_HASH_KEY_SIZE);
191 		} else {
192 			memcpy(vnic->rss_hash_key, bp->rss_conf.rss_key, HW_HASH_KEY_SIZE);
193 		}
194 	}
195 
196 	return 0;
197 }
198 
199 void bnxt_free_vnic_mem(struct bnxt *bp)
200 {
201 	struct bnxt_vnic_info *vnic;
202 	uint16_t max_vnics, i;
203 
204 	if (bp->vnic_info == NULL)
205 		return;
206 
207 	max_vnics = bp->max_vnics;
208 	for (i = 0; i < max_vnics; i++) {
209 		vnic = &bp->vnic_info[i];
210 		if (vnic->fw_vnic_id != (uint16_t)HWRM_NA_SIGNATURE) {
211 			PMD_DRV_LOG_LINE(ERR, "VNIC is not freed yet!");
212 			/* TODO Call HWRM to free VNIC */
213 		}
214 	}
215 
216 	rte_free(bp->vnic_info);
217 	bp->vnic_info = NULL;
218 }
219 
220 int bnxt_alloc_vnic_mem(struct bnxt *bp)
221 {
222 	struct bnxt_vnic_info *vnic_mem;
223 	uint16_t max_vnics;
224 
225 	max_vnics = bp->max_vnics;
226 	/* Allocate memory for VNIC pool and filter pool */
227 	vnic_mem = rte_zmalloc("bnxt_vnic_info",
228 			       max_vnics * sizeof(struct bnxt_vnic_info), 0);
229 	if (vnic_mem == NULL) {
230 		PMD_DRV_LOG_LINE(ERR, "Failed to alloc memory for %d VNICs",
231 			max_vnics);
232 		return -ENOMEM;
233 	}
234 	bp->vnic_info = vnic_mem;
235 	bnxt_init_vnics(bp);
236 	return 0;
237 }
238 
239 int bnxt_vnic_grp_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
240 {
241 	uint32_t size = sizeof(*vnic->fw_grp_ids) * bp->max_ring_grps;
242 	uint32_t i;
243 
244 	vnic->fw_grp_ids = rte_zmalloc("vnic_fw_grp_ids", size, 0);
245 	if (!vnic->fw_grp_ids) {
246 		PMD_DRV_LOG_LINE(ERR,
247 			    "Failed to alloc %d bytes for group ids",
248 			    size);
249 		return -ENOMEM;
250 	}
251 
252 	/* Initialize to invalid ring id */
253 	for (i = 0; i < bp->max_ring_grps; i++)
254 		vnic->fw_grp_ids[i] = INVALID_HW_RING_ID;
255 
256 	return 0;
257 }
258 
259 uint32_t bnxt_rte_to_hwrm_hash_types(uint64_t rte_type)
260 {
261 	uint32_t hwrm_type = 0;
262 
263 	if (rte_type & RTE_ETH_RSS_IPV4_CHKSUM)
264 		hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
265 	if (rte_type & RTE_ETH_RSS_L4_CHKSUM)
266 		hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4 |
267 			     HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
268 	if ((rte_type & RTE_ETH_RSS_IPV4) ||
269 	    (rte_type & RTE_ETH_RSS_ECPRI))
270 		hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
271 	if (rte_type & RTE_ETH_RSS_ECPRI)
272 		hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
273 	if (rte_type & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
274 		hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
275 	if (rte_type & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
276 		hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
277 	if (rte_type & RTE_ETH_RSS_IPV6)
278 		hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
279 	if (rte_type & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
280 		hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
281 	if (rte_type & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
282 		hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
283 	if (rte_type & RTE_ETH_RSS_IPV6_FLOW_LABEL)
284 		hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6_FLOW_LABEL;
285 	if (rte_type & RTE_ETH_RSS_ESP)
286 		hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_ESP_SPI_IPV4 |
287 			     HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_ESP_SPI_IPV6;
288 	if (rte_type & RTE_ETH_RSS_AH)
289 		hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_AH_SPI_IPV4 |
290 			     HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_AH_SPI_IPV6;
291 
292 	return hwrm_type;
293 }
294 
295 int bnxt_rte_to_hwrm_hash_level(struct bnxt *bp, uint64_t hash_f, uint32_t lvl)
296 {
297 	uint32_t mode = HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_DEFAULT;
298 	bool l3 = (hash_f & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_ECPRI));
299 	bool l4 = (hash_f & (RTE_ETH_RSS_NONFRAG_IPV4_UDP |
300 			     RTE_ETH_RSS_NONFRAG_IPV6_UDP |
301 			     RTE_ETH_RSS_NONFRAG_IPV4_TCP |
302 			     RTE_ETH_RSS_NONFRAG_IPV6_TCP));
303 	bool l3_only = l3 && !l4;
304 	bool l3_and_l4 = l3 && l4;
305 	bool cksum = !!(hash_f &
306 			(RTE_ETH_RSS_IPV4_CHKSUM | RTE_ETH_RSS_L4_CHKSUM));
307 	bool fl = !!(hash_f & RTE_ETH_RSS_IPV6_FLOW_LABEL);
308 
309 	/* If FW has not advertised capability to configure outer/inner
310 	 * RSS hashing , just log a message. HW will work in default RSS mode.
311 	 */
312 	if ((BNXT_CHIP_P5(bp) && BNXT_VNIC_OUTER_RSS_UNSUPPORTED(bp)) ||
313 	    (!BNXT_CHIP_P5(bp) && !(bp->vnic_cap_flags & BNXT_VNIC_CAP_OUTER_RSS))) {
314 		if (lvl)
315 			PMD_DRV_LOG_LINE(INFO,
316 				    "Given RSS level is unsupported, using default RSS level");
317 		return mode;
318 	}
319 
320 	switch (lvl) {
321 	case BNXT_RSS_LEVEL_INNERMOST:
322 		/* Irrespective of what RTE says, FW always does 4 tuple */
323 		if (l3_and_l4 || l4 || l3_only || cksum || fl)
324 			mode = BNXT_HASH_MODE_INNERMOST;
325 		break;
326 	case BNXT_RSS_LEVEL_OUTERMOST:
327 		/* Irrespective of what RTE says, FW always does 4 tuple */
328 		if (l3_and_l4 || l4 || l3_only || cksum || fl)
329 			mode = BNXT_HASH_MODE_OUTERMOST;
330 		break;
331 	default:
332 		mode = BNXT_HASH_MODE_DEFAULT;
333 		break;
334 	}
335 
336 	return mode;
337 }
338 
339 uint64_t bnxt_hwrm_to_rte_rss_level(struct bnxt *bp, uint32_t mode)
340 {
341 	uint64_t rss_level = 0;
342 
343 	/* If FW has not advertised capability to configure inner/outer RSS
344 	 * return default hash mode.
345 	 */
346 	if ((BNXT_CHIP_P5(bp) && BNXT_VNIC_OUTER_RSS_UNSUPPORTED(bp)) ||
347 	    (!BNXT_CHIP_P5(bp) && !(bp->vnic_cap_flags & BNXT_VNIC_CAP_OUTER_RSS)))
348 		return RTE_ETH_RSS_LEVEL_PMD_DEFAULT;
349 
350 	if (mode == HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_OUTERMOST_2 ||
351 	    mode == HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_OUTERMOST_4)
352 		rss_level |= RTE_ETH_RSS_LEVEL_OUTERMOST;
353 	else if (mode == HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_INNERMOST_2 ||
354 		 mode == HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_INNERMOST_4)
355 		rss_level |= RTE_ETH_RSS_LEVEL_INNERMOST;
356 	else
357 		rss_level |= RTE_ETH_RSS_LEVEL_PMD_DEFAULT;
358 
359 	return rss_level;
360 }
361 
362 static
363 int32_t bnxt_vnic_populate_rss_table_p5(struct bnxt *bp,
364 					struct bnxt_vnic_info *vnic)
365 {
366 	uint32_t ctx_idx = 0, rss_idx = 0, cnt = 0;
367 	uint32_t q_id = -1;
368 	struct bnxt_rx_queue *rxq;
369 	uint16_t *ring_tbl = vnic->rss_table;
370 	uint8_t *rx_queue_state = bp->eth_dev->data->rx_queue_state;
371 	uint16_t ring_id;
372 
373 	/* For P5 platform */
374 	for (ctx_idx = 0; ctx_idx < vnic->num_lb_ctxts; ctx_idx++) {
375 		for (rss_idx = 0; rss_idx < BNXT_RSS_ENTRIES_PER_CTX_P5;
376 		      rss_idx++) {
377 			/* Find next active ring. */
378 			for (cnt = 0; cnt < BNXT_VNIC_MAX_QUEUE_SIZE; cnt++) {
379 				if (++q_id == bp->rx_nr_rings)
380 					q_id = 0; /* reset the q_id */
381 				if (BNXT_VNIC_BITMAP_GET(vnic->queue_bitmap,
382 							 q_id) &&
383 				    rx_queue_state[q_id] !=
384 						RTE_ETH_QUEUE_STATE_STOPPED)
385 					break;
386 			}
387 
388 			/* no active queues exit */
389 			if (cnt == BNXT_VNIC_MAX_QUEUE_SIZE)
390 				return 0;
391 
392 			rxq = bp->rx_queues[q_id];
393 			ring_id = rxq->rx_ring->rx_ring_struct->fw_ring_id;
394 			*ring_tbl++ = rte_cpu_to_le_16(ring_id);
395 			ring_id = rxq->cp_ring->cp_ring_struct->fw_ring_id;
396 			*ring_tbl++ = rte_cpu_to_le_16(ring_id);
397 		}
398 	}
399 	return 0;
400 }
401 
402 static
403 int32_t bnxt_vnic_populate_rss_table_p4(struct bnxt *bp,
404 					struct bnxt_vnic_info *vnic)
405 {
406 	uint32_t rss_idx = 0, cnt = 0;
407 	uint32_t q_id = -1;
408 	uint16_t *ring_tbl = vnic->rss_table;
409 	uint8_t *rx_queue_state = bp->eth_dev->data->rx_queue_state;
410 	uint16_t ring_id;
411 
412 	/* For Wh+ platform */
413 	for (rss_idx = 0; rss_idx < bnxt_rss_hash_tbl_size(bp); rss_idx++) {
414 		/* Find next active ring. */
415 		for (cnt = 0; cnt < BNXT_VNIC_MAX_QUEUE_SIZE; cnt++) {
416 			if (++q_id == bp->rx_nr_rings)
417 				q_id = 0; /* reset the q_id */
418 			if (BNXT_VNIC_BITMAP_GET(vnic->queue_bitmap,
419 						 q_id) &&
420 			    rx_queue_state[q_id] !=
421 					RTE_ETH_QUEUE_STATE_STOPPED)
422 				break;
423 		}
424 
425 		/* no active queues exit */
426 		if (cnt == BNXT_VNIC_MAX_QUEUE_SIZE)
427 			return 0;
428 
429 		ring_id = vnic->fw_grp_ids[q_id];
430 		*ring_tbl++ = rte_cpu_to_le_16(ring_id);
431 	}
432 	return 0;
433 }
434 
435 static
436 int32_t bnxt_vnic_populate_rss_table(struct bnxt *bp,
437 				     struct bnxt_vnic_info *vnic)
438 {
439 	/* RSS table population is different for p4 and p5, p7 platforms */
440 	if (BNXT_CHIP_P5_P7(bp))
441 		return bnxt_vnic_populate_rss_table_p5(bp, vnic);
442 
443 	return bnxt_vnic_populate_rss_table_p4(bp, vnic);
444 }
445 
446 static void
447 bnxt_vnic_queue_delete(struct bnxt *bp, uint16_t vnic_idx)
448 {
449 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_idx];
450 
451 	if (bnxt_hwrm_vnic_free(bp, vnic))
452 		PMD_DRV_LOG_LINE(ERR, "Failed to delete queue");
453 
454 	if (vnic->fw_grp_ids) {
455 		rte_free(vnic->fw_grp_ids);
456 		vnic->fw_grp_ids = NULL;
457 	}
458 
459 	vnic->rx_queue_cnt = 0;
460 	if (bp->nr_vnics)
461 		bp->nr_vnics--;
462 
463 	/* reset the queue_bitmap */
464 	memset(vnic->queue_bitmap, 0, sizeof(vnic->queue_bitmap));
465 }
466 
467 static struct bnxt_vnic_info*
468 bnxt_vnic_queue_create(struct bnxt *bp, int32_t vnic_id, uint16_t q_index)
469 {
470 	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
471 	uint8_t *rx_queue_state = bp->eth_dev->data->rx_queue_state;
472 	uint64_t rx_offloads = dev_conf->rxmode.offloads;
473 	struct bnxt_vnic_info *vnic;
474 	struct bnxt_rx_queue *rxq = NULL;
475 	int32_t rc = -EINVAL;
476 	uint16_t saved_mru = 0;
477 
478 	vnic = &bp->vnic_info[vnic_id];
479 	if (vnic->rx_queue_cnt) {
480 		PMD_DRV_LOG_LINE(ERR, "invalid queue configuration %d", vnic_id);
481 		return NULL;
482 	}
483 
484 	/* set the queue_bitmap */
485 	BNXT_VNIC_BITMAP_SET(vnic->queue_bitmap, q_index);
486 
487 	rxq = bp->rx_queues[q_index];
488 	if (rx_queue_state[q_index] == RTE_ETH_QUEUE_STATE_STOPPED)
489 		rxq->rx_started = 0;
490 	else
491 		rxq->rx_started = 1;
492 
493 	vnic->rx_queue_cnt++;
494 	vnic->start_grp_id = q_index;
495 	vnic->end_grp_id = q_index + 1;
496 	vnic->func_default = 0;	/* This is not a default VNIC. */
497 	bp->nr_vnics++;
498 
499 	/* Allocate vnic group for p4 platform */
500 	rc = bnxt_vnic_grp_alloc(bp, vnic);
501 	if (rc) {
502 		PMD_DRV_LOG_LINE(DEBUG, "Failed to allocate vnic groups");
503 		goto cleanup;
504 	}
505 
506 	/* populate the fw group table */
507 	bnxt_vnic_ring_grp_populate(bp, vnic);
508 	bnxt_vnic_rules_init(vnic);
509 
510 	rc = bnxt_hwrm_vnic_alloc(bp, vnic);
511 	if (rc) {
512 		PMD_DRV_LOG_LINE(DEBUG, "Failed to allocate vnic %d", q_index);
513 		goto cleanup;
514 	}
515 
516 	/* store the mru so we can set it to zero in hw */
517 	if (rxq->rx_started == 0) {
518 		saved_mru = vnic->mru;
519 		vnic->mru = 0;
520 	}
521 
522 	rc = bnxt_hwrm_vnic_cfg(bp, vnic);
523 	if (rxq->rx_started == 0)
524 		vnic->mru = saved_mru;
525 
526 	if (rc) {
527 		PMD_DRV_LOG_LINE(DEBUG, "Failed to configure vnic %d", q_index);
528 		goto cleanup;
529 	}
530 
531 	rc = bnxt_hwrm_vnic_tpa_cfg(bp, vnic,
532 				   (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) ?
533 				    true : false);
534 	if (rc)
535 		PMD_DRV_LOG_LINE(DEBUG, "Failed to configure TPA on this vnic %d", q_index);
536 
537 	rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
538 	if (rc) {
539 		PMD_DRV_LOG_LINE(DEBUG, "Failed to configure vnic plcmode %d",
540 			    q_index);
541 		goto cleanup;
542 	}
543 
544 	vnic->ref_cnt++;
545 	return vnic;
546 
547 cleanup:
548 	bnxt_vnic_queue_delete(bp, vnic_id);
549 	return NULL;
550 }
551 
552 static inline int32_t
553 bnxt_vnic_queue_db_lookup(struct bnxt *bp, uint64_t *q_list)
554 {
555 	/* lookup in the database to check if it is in use */
556 	return rte_hash_lookup(bp->vnic_queue_db.rss_q_db,
557 			       (const void *)q_list);
558 }
559 
560 static inline int32_t
561 bnxt_vnic_queue_db_del(struct bnxt *bp, uint64_t *q_list)
562 {
563 	return rte_hash_del_key(bp->vnic_queue_db.rss_q_db,
564 				(const void *)q_list);
565 }
566 
567 static int32_t
568 bnxt_vnic_queue_db_add(struct bnxt *bp, uint64_t *q_list)
569 {
570 	struct bnxt_vnic_info *vnic_info;
571 	int32_t vnic_id, rc = -1;
572 
573 	vnic_id = rte_hash_add_key(bp->vnic_queue_db.rss_q_db,
574 				   (const void *)q_list);
575 
576 	if (vnic_id < 0 || vnic_id >= bp->max_vnics) {
577 		PMD_DRV_LOG_LINE(DEBUG, "unable to assign vnic index %d",
578 			    vnic_id);
579 		return rc;
580 	}
581 
582 	vnic_info = &bp->vnic_info[vnic_id];
583 	if (vnic_info->fw_vnic_id != INVALID_HW_RING_ID) {
584 		PMD_DRV_LOG_LINE(DEBUG, "Invalid ring id for %d.", vnic_id);
585 		return rc;
586 	}
587 	return vnic_id;
588 }
589 
590 /* Function to validate the incoming rss configuration */
591 static
592 int32_t bnxt_vnic_queue_db_rss_validate(struct bnxt *bp,
593 					struct bnxt_vnic_rss_info *rss_info,
594 					int32_t *vnic_idx)
595 {
596 	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
597 	int32_t rc = -EINVAL;
598 	uint32_t idx = 0;
599 	int32_t out_idx;
600 
601 	if (!(dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS)) {
602 		PMD_DRV_LOG_LINE(ERR, "Error Rss is not supported on this port");
603 		return rc;
604 	}
605 
606 	/* rss queue is zero then use the default vnic */
607 	if (rss_info->queue_num == 0) {
608 		*vnic_idx = 0;
609 		return 0;
610 	}
611 
612 	/* Check to see if the queues id are in supported range */
613 	if (rss_info->queue_num > bp->rx_nr_rings) {
614 		PMD_DRV_LOG_LINE(ERR, "Error unsupported queue num.");
615 		return rc;
616 	}
617 
618 	/* validate the queue ids are in correct range */
619 	for (idx = 0; idx < BNXT_VNIC_MAX_QUEUE_SIZE; idx++) {
620 		if (BNXT_VNIC_BITMAP_GET(rss_info->queue_list, idx)) {
621 			if (idx >= bp->rx_nr_rings) {
622 				PMD_DRV_LOG_LINE(ERR,
623 					    "Error %d beyond support size %u",
624 					    idx, bp->rx_nr_rings);
625 				return rc;
626 			}
627 		}
628 	}
629 
630 	/* check if the vnic already exist */
631 	out_idx = bnxt_vnic_queue_db_lookup(bp, rss_info->queue_list);
632 	if (out_idx < 0 || out_idx >= bp->max_vnics)
633 		return -ENOENT; /* entry not found */
634 
635 	/* found an entry */
636 	*vnic_idx = out_idx;
637 	return 0;
638 }
639 
640 static void
641 bnxt_vnic_rss_delete(struct bnxt *bp, uint16_t q_index)
642 {
643 	struct bnxt_vnic_info *vnic;
644 
645 	vnic = &bp->vnic_info[q_index];
646 	if (vnic->rx_queue_cnt >= 1)
647 		bnxt_hwrm_vnic_ctx_free(bp, vnic);
648 
649 	if (vnic->fw_vnic_id != INVALID_HW_RING_ID)
650 		bnxt_hwrm_vnic_free(bp, vnic);
651 
652 	if (vnic->fw_grp_ids) {
653 		rte_free(vnic->fw_grp_ids);
654 		vnic->fw_grp_ids = NULL;
655 	}
656 
657 	/* Update the vnic details for all the rx queues */
658 	vnic->rx_queue_cnt = 0;
659 	memset(vnic->queue_bitmap, 0, sizeof(vnic->queue_bitmap));
660 
661 	if (bp->nr_vnics)
662 		bp->nr_vnics--;
663 }
664 
665 /* The validation of the rss_info should be done before calling this function*/
666 
667 static struct bnxt_vnic_info *
668 bnxt_vnic_rss_create(struct bnxt *bp,
669 		     struct bnxt_vnic_rss_info *rss_info,
670 		     uint16_t vnic_id)
671 {
672 	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
673 	uint8_t *rx_queue_state = bp->eth_dev->data->rx_queue_state;
674 	uint64_t rx_offloads = dev_conf->rxmode.offloads;
675 	struct bnxt_vnic_info *vnic;
676 	struct bnxt_rx_queue *rxq = NULL;
677 	uint32_t idx, nr_ctxs, config_rss = 0;
678 	uint16_t saved_mru = 0;
679 	uint16_t active_q_cnt = 0;
680 	int16_t first_q = -1;
681 	int16_t end_q = -1;
682 	int32_t rc = 0;
683 
684 	/* Assign the vnic to be used for this rss configuration */
685 	vnic = &bp->vnic_info[vnic_id];
686 
687 	/* Update the vnic details for all the rx queues */
688 	for (idx = 0; idx < BNXT_VNIC_MAX_QUEUE_SIZE; idx++) {
689 		if (BNXT_VNIC_BITMAP_GET(rss_info->queue_list, idx)) {
690 			rxq = bp->rx_queues[idx];
691 			if (rx_queue_state[idx] ==
692 			    RTE_ETH_QUEUE_STATE_STOPPED) {
693 				rxq->rx_started = 0;
694 			} else {
695 				rxq->rx_started = 1;
696 				active_q_cnt++;
697 			}
698 			vnic->rx_queue_cnt++;
699 
700 			/* Update the queue list */
701 			BNXT_VNIC_BITMAP_SET(vnic->queue_bitmap, idx);
702 			if (first_q == -1)
703 				first_q = idx;
704 			end_q = idx;
705 		}
706 	}
707 	vnic->start_grp_id = first_q;
708 	vnic->end_grp_id = end_q + 1;
709 	vnic->func_default = 0;	/* This is not a default VNIC. */
710 	bp->nr_vnics++;
711 
712 	/* Allocate vnic group for p4 platform */
713 	rc = bnxt_vnic_grp_alloc(bp, vnic);
714 	if (rc) {
715 		PMD_DRV_LOG_LINE(ERR, "Failed to allocate vnic groups");
716 		goto fail_cleanup;
717 	}
718 
719 	/* populate the fw group table */
720 	bnxt_vnic_ring_grp_populate(bp, vnic);
721 	bnxt_vnic_rules_init(vnic);
722 
723 	/* Allocate the vnic in the firmware */
724 	rc = bnxt_hwrm_vnic_alloc(bp, vnic);
725 	if (rc) {
726 		PMD_DRV_LOG_LINE(ERR, "Failed to allocate vnic %d", idx);
727 		goto fail_cleanup;
728 	}
729 
730 	/* Allocate the vnic rss context */
731 	/* RSS table size in P5 is 512. Cap max Rx rings to same value */
732 	nr_ctxs = bnxt_rss_ctxts(bp);
733 	for (idx = 0; idx < nr_ctxs; idx++) {
734 		rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, idx);
735 		if (rc)
736 			break;
737 	}
738 	if (rc) {
739 		PMD_DRV_LOG_LINE(ERR,
740 			    "HWRM ctx %d alloc failure rc: %x", idx, rc);
741 		goto fail_cleanup;
742 	}
743 	vnic->num_lb_ctxts = nr_ctxs;
744 
745 	saved_mru = vnic->mru;
746 	if (!active_q_cnt)
747 		vnic->mru = 0;
748 
749 	/* configure the vnic details in firmware */
750 	rc = bnxt_hwrm_vnic_cfg(bp, vnic);
751 	vnic->mru = saved_mru;
752 	if (rc) {
753 		PMD_DRV_LOG_LINE(ERR, "Failed to configure vnic %d", idx);
754 		goto fail_cleanup;
755 	}
756 
757 	rc = bnxt_hwrm_vnic_tpa_cfg(bp, vnic,
758 				   (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) ?
759 				    true : false);
760 	if (rc)
761 		PMD_DRV_LOG_LINE(DEBUG, "Failed to configure TPA on this vnic %d", idx);
762 
763 	rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
764 	if (rc) {
765 		PMD_DRV_LOG_LINE(ERR, "Failed to configure vnic plcmode %d",
766 			    idx);
767 		goto fail_cleanup;
768 	}
769 
770 	/* Remove unsupported types */
771 	rss_info->rss_types &= bnxt_eth_rss_support(bp);
772 
773 	/* If only unsupported type(s) are specified then quit */
774 	if (rss_info->rss_types == 0) {
775 		PMD_DRV_LOG_LINE(ERR,
776 			    "Unsupported RSS hash type(s)");
777 		goto fail_cleanup;
778 	}
779 
780 	/* hwrm_type conversion */
781 	vnic->hash_f = rss_info->rss_func;
782 	vnic->rss_types = rss_info->rss_types;
783 	vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss_info->rss_types);
784 	vnic->hash_mode = bnxt_rte_to_hwrm_hash_level(bp, rss_info->rss_types,
785 						      rss_info->rss_level);
786 
787 	/* configure the key */
788 	if (!rss_info->key_len) {
789 		/* If hash key has not been specified, use random hash key.*/
790 		bnxt_prandom_bytes(vnic->rss_hash_key, HW_HASH_KEY_SIZE);
791 		vnic->key_len = HW_HASH_KEY_SIZE;
792 	} else {
793 		memcpy(vnic->rss_hash_key, rss_info->key, rss_info->key_len);
794 		vnic->key_len = rss_info->key_len;
795 	}
796 
797 	/* Prepare the indirection table */
798 	bnxt_vnic_populate_rss_table(bp, vnic);
799 
800 	/* check to see if there is at least one queue that is active */
801 	for (idx = vnic->start_grp_id; idx < vnic->end_grp_id; idx++) {
802 		if (bnxt_vnic_queue_id_is_valid(vnic, idx) &&
803 		    bp->rx_queues[idx]->rx_started) {
804 			config_rss = 1;
805 			break;
806 		}
807 	}
808 
809 	/* configure the rss table */
810 	if (config_rss) {
811 		rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic);
812 		if (rc) {
813 			memset(vnic->rss_hash_key, 0, HW_HASH_KEY_SIZE);
814 			PMD_DRV_LOG_LINE(ERR,
815 				    "Failed to configure vnic rss details %d",
816 				    idx);
817 			goto fail_cleanup;
818 		}
819 	}
820 
821 	vnic->ref_cnt++;
822 	return vnic;
823 
824 fail_cleanup:
825 	bnxt_vnic_rss_delete(bp, idx);
826 	return NULL;
827 }
828 
829 void
830 bnxt_vnic_rss_query_info_fill(struct bnxt *bp,
831 			      struct rte_flow_action_rss *rss_conf,
832 			      uint16_t vnic_id)
833 {
834 	struct bnxt_vnic_info *vnic_info;
835 	int idx;
836 
837 	vnic_info = bnxt_vnic_queue_db_get_vnic(bp, vnic_id);
838 	if (vnic_info == NULL) {
839 		PMD_DRV_LOG_LINE(ERR, "lookup failed for id %d", vnic_id);
840 		return;
841 	}
842 
843 	rss_conf->key_len = vnic_info->key_len;
844 	rss_conf->key = vnic_info->rss_hash_key;
845 	rss_conf->func = vnic_info->hash_f;
846 	rss_conf->level = vnic_info->hash_mode;
847 	rss_conf->types = vnic_info->rss_types;
848 
849 	memset(rss_query_queues, 0, sizeof(rss_query_queues));
850 	for (idx = 0; idx < BNXT_VNIC_MAX_QUEUE_SIZE; idx++)
851 		if (BNXT_VNIC_BITMAP_GET(vnic_info->queue_bitmap, idx)) {
852 			rss_query_queues[rss_conf->queue_num] = idx;
853 			rss_conf->queue_num += 1;
854 		}
855 	rss_conf->queue = (const uint16_t *)&rss_query_queues;
856 }
857 
858 int32_t
859 bnxt_vnic_rss_queue_status_update(struct bnxt *bp, struct bnxt_vnic_info *vnic)
860 {
861 	if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
862 		return 0;
863 
864 	if (!(vnic->rss_table && vnic->hash_type))
865 		return 0;
866 
867 	/* Prepare the indirection table */
868 	bnxt_vnic_populate_rss_table(bp, vnic);
869 
870 	/* configure the rss table */
871 	if (bnxt_hwrm_vnic_rss_cfg(bp, vnic)) {
872 		PMD_DRV_LOG_LINE(DEBUG, "Failed to update vnic rss details");
873 		return -EINVAL;
874 	}
875 	return 0;
876 }
877 
878 static int32_t
879 bnxt_vnic_rss_hash_algo_update(struct bnxt *bp,
880 			       struct bnxt_vnic_info *vnic,
881 			       struct bnxt_vnic_rss_info *rss_info)
882 {
883 	uint8_t old_rss_hash_key[HW_HASH_KEY_SIZE] = { 0 };
884 	uint32_t hash_type;
885 	uint8_t hash_mode;
886 	uint8_t ring_mode;
887 	uint32_t apply = 0;
888 	int rc;
889 
890 	/* validate key length */
891 	if (rss_info->key_len != 0 && rss_info->key_len != HW_HASH_KEY_SIZE) {
892 		PMD_DRV_LOG_LINE(ERR,
893 			    "Invalid hashkey length, should be %d bytes",
894 			    HW_HASH_KEY_SIZE);
895 		return -EINVAL;
896 	}
897 
898 	/* Remove unsupported types */
899 	rss_info->rss_types &= bnxt_eth_rss_support(bp);
900 
901 	/* If only unsupported type(s) are specified then quit */
902 	if (!rss_info->rss_types) {
903 		PMD_DRV_LOG_LINE(ERR,
904 			    "Unsupported RSS hash type");
905 		return -EINVAL;
906 	}
907 
908 	/* hwrm_type conversion */
909 	hash_type = bnxt_rte_to_hwrm_hash_types(rss_info->rss_types);
910 	hash_mode = bnxt_rte_to_hwrm_hash_level(bp, rss_info->rss_types,
911 						rss_info->rss_level);
912 	ring_mode = vnic->ring_select_mode;
913 
914 	/* For P7 chips update the hash_type if hash_type not explicitly passed.
915 	 * TODO: For P5 chips.
916 	 */
917 	if (BNXT_CHIP_P7(bp) &&
918 	    hash_mode == BNXT_HASH_MODE_DEFAULT && !hash_type)
919 		vnic->hash_type = HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4 |
920 			HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
921 
922 	rc = bnxt_rte_flow_to_hwrm_ring_select_mode(rss_info->rss_func,
923 						    rss_info->rss_types,
924 						    bp,
925 						    vnic);
926 	if (rc)
927 		return -EINVAL;
928 
929 	if (vnic->hash_mode != hash_mode ||
930 	    vnic->hash_type != hash_type ||
931 	    vnic->ring_select_mode != ring_mode) {
932 		apply = 1;
933 		vnic->hash_mode = hash_mode;
934 		vnic->hash_type = hash_type;
935 	}
936 	/* Store the old hash key before programming the new one. It will
937 	 * be used to restore the old hash key when HWRM_VNIC_RSS_CFG
938 	 * fails.
939 	 */
940 	memcpy(old_rss_hash_key, vnic->rss_hash_key, HW_HASH_KEY_SIZE);
941 	if (rss_info->key_len != 0 && memcmp(rss_info->key, vnic->rss_hash_key,
942 					     HW_HASH_KEY_SIZE)) {
943 		apply = 1;
944 		memcpy(vnic->rss_hash_key, rss_info->key, HW_HASH_KEY_SIZE);
945 	}
946 
947 	if (apply) {
948 		if (bnxt_hwrm_vnic_rss_cfg(bp, vnic)) {
949 			memcpy(vnic->rss_hash_key, old_rss_hash_key, HW_HASH_KEY_SIZE);
950 			PMD_DRV_LOG_LINE(ERR, "Error configuring vnic RSS config");
951 			return -EINVAL;
952 		}
953 		PMD_DRV_LOG_LINE(INFO, "Rss config successfully applied");
954 	}
955 	return 0;
956 }
957 
958 int32_t bnxt_vnic_queue_db_deinit(struct bnxt *bp)
959 {
960 	rte_hash_free(bp->vnic_queue_db.rss_q_db);
961 	return 0;
962 }
963 
964 int32_t bnxt_vnic_queue_db_init(struct bnxt *bp)
965 {
966 	struct rte_hash_parameters hash_tbl_params = {0};
967 	char hash_tbl_name[64] = {0};
968 
969 	/* choose the least supported value */
970 	if (bp->rx_nr_rings > BNXT_VNIC_MAX_QUEUE_SIZE)
971 		bp->vnic_queue_db.num_queues = BNXT_VNIC_MAX_QUEUE_SIZE;
972 	else
973 		bp->vnic_queue_db.num_queues = bp->rx_nr_rings;
974 
975 	/* create the hash table for the rss hash entries */
976 	snprintf(hash_tbl_name, sizeof(hash_tbl_name),
977 		 "bnxt_rss_hash_%d", bp->eth_dev->data->port_id);
978 	hash_tbl_params.name = hash_tbl_name;
979 	hash_tbl_params.entries = (bp->max_vnics > BNXT_VNIC_MAX_SUPPORTED_ID) ?
980 		BNXT_VNIC_MAX_SUPPORTED_ID : bp->max_vnics;
981 	hash_tbl_params.key_len = BNXT_VNIC_MAX_QUEUE_SZ_IN_8BITS;
982 	hash_tbl_params.socket_id = rte_socket_id();
983 	bp->vnic_queue_db.rss_q_db = rte_hash_create(&hash_tbl_params);
984 	if (bp->vnic_queue_db.rss_q_db == NULL) {
985 		PMD_DRV_LOG_LINE(ERR, "Failed to create rss hash tbl");
986 		return -ENOMEM;
987 	}
988 	return 0;
989 }
990 
991 void bnxt_vnic_queue_db_update_dlft_vnic(struct bnxt *bp)
992 {
993 	struct bnxt_vnic_info *dflt_vnic;
994 	uint64_t bitmap[BNXT_VNIC_MAX_QUEUE_SZ_IN_64BITS];
995 	uint32_t idx;
996 	int32_t vnic_id;
997 
998 	/* populate all the queue ids in the default vnic */
999 	memset(bitmap, 0, sizeof(bitmap));
1000 	for (idx = 0; idx < bp->vnic_queue_db.num_queues; idx++)
1001 		BNXT_VNIC_BITMAP_SET(bitmap, idx);
1002 
1003 	vnic_id  = bnxt_vnic_queue_db_add(bp, bitmap);
1004 	if (vnic_id < 0) {
1005 		PMD_DRV_LOG_LINE(ERR, "Unable to alloc vnic for default rss");
1006 		return;
1007 	}
1008 
1009 	dflt_vnic  = bnxt_vnic_queue_db_get_vnic(bp, vnic_id);
1010 	if (dflt_vnic == NULL) {
1011 		PMD_DRV_LOG_LINE(ERR, "Invalid vnic for default rss %d", vnic_id);
1012 		return;
1013 	}
1014 	/* Update the default vnic structure */
1015 	bp->vnic_queue_db.dflt_vnic_id = vnic_id;
1016 	memcpy(dflt_vnic->queue_bitmap, bitmap, sizeof(bitmap));
1017 	dflt_vnic->rx_queue_cnt = bp->vnic_queue_db.num_queues;
1018 	dflt_vnic->ref_cnt++;
1019 }
1020 
1021 int32_t bnxt_vnic_queue_action_alloc(struct bnxt *bp,
1022 				     uint16_t q_index,
1023 				     uint16_t *vnic_idx,
1024 				     uint16_t *vnicid)
1025 {
1026 	uint64_t queue_list[BNXT_VNIC_MAX_QUEUE_SZ_IN_64BITS] = {0};
1027 	struct bnxt_vnic_info *vnic_info;
1028 	int32_t idx;
1029 	int32_t rc = -EINVAL;
1030 
1031 	/* validate the given queue id */
1032 	if (q_index >= bp->rx_nr_rings || q_index >= BNXT_VNIC_MAX_QUEUE_SIZE) {
1033 		PMD_DRV_LOG_LINE(ERR, "invalid queue id should be less than %d",
1034 			    bp->rx_nr_rings);
1035 		return rc;
1036 	}
1037 
1038 	/* Populate the queue list */
1039 	BNXT_VNIC_BITMAP_SET(queue_list, q_index);
1040 
1041 	/* check to see if the q_index is already in use */
1042 	idx = bnxt_vnic_queue_db_lookup(bp, queue_list);
1043 	if (idx < 0) {
1044 		/* Assign the vnic slot */
1045 		idx = bnxt_vnic_queue_db_add(bp, queue_list);
1046 		if (idx < 0) {
1047 			PMD_DRV_LOG_LINE(DEBUG, "Unable to alloc vnic for queue");
1048 			return rc;
1049 		}
1050 
1051 		/* Allocate a new one */
1052 		vnic_info = bnxt_vnic_queue_create(bp, idx, q_index);
1053 		if (!vnic_info) {
1054 			PMD_DRV_LOG_LINE(ERR, "failed to create vnic - %d",
1055 				    q_index);
1056 			bnxt_vnic_queue_db_del(bp, queue_list);
1057 			return rc; /* failed */
1058 		}
1059 	} else {
1060 		vnic_info = bnxt_vnic_queue_db_get_vnic(bp, idx);
1061 		if (vnic_info == NULL) {
1062 			PMD_DRV_LOG_LINE(ERR, "Unable to lookup vnic for queue %d",
1063 				    q_index);
1064 			return rc;
1065 		}
1066 		/* increment the reference count and return the vnic id */
1067 		vnic_info->ref_cnt++;
1068 	}
1069 	*vnic_idx = (uint16_t)idx;
1070 	*vnicid = vnic_info->fw_vnic_id;
1071 	return 0;
1072 }
1073 
1074 int32_t
1075 bnxt_vnic_queue_action_free(struct bnxt *bp, uint16_t vnic_id)
1076 {
1077 	struct bnxt_vnic_info *vnic_info;
1078 	int32_t rc = -EINVAL;
1079 	int32_t vnic_idx = vnic_id, idx;
1080 
1081 	/* validate the given vnic idx */
1082 	if (vnic_idx >= bp->max_vnics) {
1083 		PMD_DRV_LOG_LINE(ERR, "invalid vnic idx %d", vnic_idx);
1084 		return rc;
1085 	}
1086 
1087 	/* validate the vnic info */
1088 	vnic_info = &bp->vnic_info[vnic_idx];
1089 	if (!vnic_info->rx_queue_cnt) {
1090 		PMD_DRV_LOG_LINE(ERR, "Invalid vnic idx, no queues being used");
1091 		return rc;
1092 	}
1093 	if (vnic_info->ref_cnt) {
1094 		vnic_info->ref_cnt--;
1095 		if (!vnic_info->ref_cnt) {
1096 			idx  = bnxt_vnic_queue_db_del(bp,
1097 						      vnic_info->queue_bitmap);
1098 			/* Check to ensure there is no corruption */
1099 			if (idx != vnic_idx)
1100 				PMD_DRV_LOG_LINE(ERR, "bad vnic idx %d", vnic_idx);
1101 
1102 			bnxt_vnic_queue_delete(bp, vnic_idx);
1103 		}
1104 	}
1105 	return 0;
1106 }
1107 
1108 int32_t
1109 bnxt_vnic_rss_action_alloc(struct bnxt *bp,
1110 				   struct bnxt_vnic_rss_info *rss_info,
1111 				   uint16_t *vnic_idx,
1112 				   uint16_t *vnicid)
1113 {
1114 	struct bnxt_vnic_info *vnic_info = NULL;
1115 	int32_t rc = -EINVAL;
1116 	int32_t idx;
1117 
1118 	/* validate the given parameters */
1119 	rc = bnxt_vnic_queue_db_rss_validate(bp, rss_info, &idx);
1120 	if (rc == -EINVAL) {
1121 		PMD_DRV_LOG_LINE(ERR, "Failed to apply the rss action.");
1122 		return rc;
1123 	} else if (rc == -ENOENT) {
1124 		/* Allocate a new entry */
1125 		idx = bnxt_vnic_queue_db_add(bp, rss_info->queue_list);
1126 		if (idx < 0) {
1127 			PMD_DRV_LOG_LINE(DEBUG, "Unable to alloc vnic for rss");
1128 			return rc;
1129 		}
1130 		/* create the rss vnic */
1131 		vnic_info = bnxt_vnic_rss_create(bp, rss_info, idx);
1132 		if (!vnic_info) {
1133 			PMD_DRV_LOG_LINE(ERR, "Failed to create rss action.");
1134 			bnxt_vnic_queue_db_del(bp, rss_info->queue_list);
1135 			return rc;
1136 		}
1137 	} else {
1138 		vnic_info = bnxt_vnic_queue_db_get_vnic(bp, idx);
1139 		if (vnic_info == NULL) {
1140 			PMD_DRV_LOG_LINE(ERR, "Unable to lookup vnic for idx %d",
1141 				    idx);
1142 			return rc;
1143 		}
1144 		/* increment the reference count and return the vnic id */
1145 		vnic_info->ref_cnt++;
1146 
1147 		/* check configuration has changed then update hash details */
1148 		rc = bnxt_vnic_rss_hash_algo_update(bp, vnic_info, rss_info);
1149 		if (rc) {
1150 			PMD_DRV_LOG_LINE(ERR, "Failed to update the rss action.");
1151 			return rc;
1152 		}
1153 	}
1154 	*vnic_idx = idx;
1155 	*vnicid = vnic_info->fw_vnic_id;
1156 	return 0;
1157 }
1158 
1159 /* Delete the vnic associated with the given rss action index */
1160 int32_t
1161 bnxt_vnic_rss_action_free(struct bnxt *bp, uint16_t vnic_id)
1162 {
1163 	uint64_t bitmap[BNXT_VNIC_MAX_QUEUE_SZ_IN_64BITS];
1164 	struct bnxt_vnic_info *vnic_info;
1165 	int32_t rc = -EINVAL;
1166 	uint64_t *q_list;
1167 	int32_t idx = 0;
1168 
1169 	/* validate the given vnic id */
1170 	if (vnic_id >= bp->max_vnics) {
1171 		PMD_DRV_LOG_LINE(ERR, "invalid vnic id %d", vnic_id);
1172 		return rc;
1173 	}
1174 
1175 	/* validate vnic info */
1176 	vnic_info = &bp->vnic_info[vnic_id];
1177 	if (!vnic_info->rx_queue_cnt) {
1178 		PMD_DRV_LOG_LINE(ERR, "Invalid vnic id, not using any queues");
1179 		return rc;
1180 	}
1181 
1182 	if (vnic_info->ref_cnt) {
1183 		vnic_info->ref_cnt--;
1184 		if (!vnic_info->ref_cnt) {
1185 			if (bp->vnic_queue_db.dflt_vnic_id == vnic_id) {
1186 				/* in case of default queue, list can be
1187 				 * changed by reta config so need a list
1188 				 * with all queues populated.
1189 				 */
1190 				memset(bitmap, 0, sizeof(bitmap));
1191 				for (idx = 0;
1192 				      idx < bp->vnic_queue_db.num_queues;
1193 				      idx++)
1194 					BNXT_VNIC_BITMAP_SET(bitmap, idx);
1195 				q_list = bitmap;
1196 			} else {
1197 				q_list = vnic_info->queue_bitmap;
1198 			}
1199 			idx  = bnxt_vnic_queue_db_del(bp, q_list);
1200 
1201 			/* check to ensure there is no corruption */
1202 			if (idx != vnic_id)
1203 				PMD_DRV_LOG_LINE(ERR, "bad vnic idx %d", vnic_id);
1204 			bnxt_vnic_rss_delete(bp, vnic_id);
1205 		}
1206 	}
1207 	return 0;
1208 }
1209 
1210 int32_t
1211 bnxt_vnic_reta_config_update(struct bnxt *bp,
1212 				     struct bnxt_vnic_info *vnic_info,
1213 				     struct rte_eth_rss_reta_entry64 *reta_conf,
1214 				     uint16_t reta_size)
1215 {
1216 	uint64_t l_bitmap[BNXT_VNIC_MAX_QUEUE_SZ_IN_64BITS] = {0};
1217 	uint16_t i, sft, idx;
1218 	uint16_t q_id;
1219 
1220 	for (i = 0; i < reta_size; i++) {
1221 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
1222 		sft = i % RTE_ETH_RETA_GROUP_SIZE;
1223 
1224 		if (!(reta_conf[idx].mask & (1ULL << sft)))
1225 			continue;
1226 
1227 		q_id = reta_conf[idx].reta[sft];
1228 		if (q_id >= bp->vnic_queue_db.num_queues ||
1229 		    !bp->eth_dev->data->rx_queues[q_id]) {
1230 			PMD_DRV_LOG_LINE(ERR, "Queue id %d is invalid", q_id);
1231 			return -EINVAL;
1232 		}
1233 		BNXT_VNIC_BITMAP_SET(l_bitmap, q_id);
1234 	}
1235 	/* update the queue bitmap after the validation */
1236 	memcpy(vnic_info->queue_bitmap, l_bitmap, sizeof(l_bitmap));
1237 	return 0;
1238 }
1239 
1240 int32_t
1241 bnxt_vnic_queue_id_is_valid(struct bnxt_vnic_info *vnic_info,
1242 				    uint16_t queue_id)
1243 {
1244 	if (BNXT_VNIC_BITMAP_GET(vnic_info->queue_bitmap, queue_id))
1245 		return 1;
1246 	return 0;
1247 }
1248 
1249 void
1250 bnxt_vnic_ring_grp_populate(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1251 {
1252 	uint32_t i;
1253 
1254 	/* check if ring group is supported */
1255 	if (!BNXT_HAS_RING_GRPS(bp))
1256 		return;
1257 
1258 	/* map ring groups to this vnic */
1259 	for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++)
1260 		if (bnxt_vnic_queue_id_is_valid(vnic, i) &&
1261 			bp->rx_queues[i]->rx_started)
1262 			vnic->fw_grp_ids[i] = bp->grp_info[i].fw_grp_id;
1263 
1264 	vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1265 }
1266 
1267 void
1268 bnxt_vnic_rules_init(struct bnxt_vnic_info *vnic)
1269 {
1270 	vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1271 	vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1272 	vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1273 }
1274 
1275 int32_t
1276 bnxt_vnic_mru_config(struct bnxt *bp, uint16_t new_mtu)
1277 {
1278 	struct bnxt_vnic_info *vnic;
1279 	uint16_t size = 0;
1280 	int32_t rc = 0;
1281 	uint32_t i;
1282 
1283 	for (i = 0; i < bp->max_vnics; i++) {
1284 		vnic = &bp->vnic_info[i];
1285 		if (vnic->fw_vnic_id == INVALID_VNIC_ID)
1286 			continue;
1287 
1288 		vnic->mru = BNXT_VNIC_MRU(new_mtu);
1289 		rc = bnxt_hwrm_vnic_cfg(bp, vnic);
1290 		if (rc)
1291 			break;
1292 
1293 		size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1294 		size -= RTE_PKTMBUF_HEADROOM;
1295 
1296 		if (size < new_mtu) {
1297 			rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
1298 			if (rc)
1299 				break;
1300 		}
1301 	}
1302 	return rc;
1303 }
1304 
1305 struct bnxt_vnic_info *
1306 bnxt_vnic_queue_db_get_vnic(struct bnxt *bp, uint16_t vnic_idx)
1307 {
1308 	struct bnxt_vnic_info *vnic_info;
1309 
1310 	if (vnic_idx >= bp->max_vnics) {
1311 		PMD_DRV_LOG_LINE(ERR, "invalid vnic index %u", vnic_idx);
1312 		return NULL;
1313 	}
1314 	vnic_info = &bp->vnic_info[vnic_idx];
1315 	return vnic_info;
1316 }
1317 
1318 struct bnxt_vnic_info *
1319 bnxt_vnic_queue_id_get_next(struct bnxt *bp, uint16_t queue_id,
1320 			    uint16_t *vnic_idx)
1321 {
1322 	struct bnxt_vnic_info *vnic = NULL;
1323 	uint16_t i = *vnic_idx;
1324 
1325 	while (i < bp->max_vnics) {
1326 		vnic = &bp->vnic_info[i];
1327 		if (vnic->ref_cnt && BNXT_VNIC_BITMAP_GET(vnic->queue_bitmap,
1328 							  queue_id)) {
1329 			/* found a vnic that has the queue id */
1330 			*vnic_idx = i;
1331 			return vnic;
1332 		}
1333 		i++;
1334 	}
1335 	return NULL;
1336 }
1337 
1338 void
1339 bnxt_vnic_tpa_cfg(struct bnxt *bp, uint16_t queue_id, bool flag)
1340 {
1341 	struct bnxt_vnic_info *vnic = NULL;
1342 	uint16_t vnic_idx = 0;
1343 
1344 	while ((vnic = bnxt_vnic_queue_id_get_next(bp, queue_id,
1345 						   &vnic_idx)) != NULL) {
1346 		bnxt_hwrm_vnic_tpa_cfg(bp, vnic, flag);
1347 		vnic_idx++;
1348 	}
1349 }
1350 
1351 inline struct bnxt_vnic_info *
1352 bnxt_get_default_vnic(struct bnxt *bp)
1353 {
1354 	return &bp->vnic_info[bp->vnic_queue_db.dflt_vnic_id];
1355 }
1356 
1357 uint8_t _bnxt_rte_to_hwrm_ring_select_mode(enum rte_eth_hash_function hash_f)
1358 {
1359 	/* If RTE_ETH_HASH_FUNCTION_DEFAULT || RTE_ETH_HASH_FUNCTION_TOEPLITZ */
1360 	uint8_t mode = HWRM_VNIC_RSS_CFG_INPUT_RING_SELECT_MODE_TOEPLITZ;
1361 
1362 	if (hash_f == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR)
1363 		mode = HWRM_VNIC_RSS_CFG_INPUT_RING_SELECT_MODE_XOR;
1364 
1365 	return mode;
1366 }
1367 
1368 int bnxt_rte_flow_to_hwrm_ring_select_mode(enum rte_eth_hash_function hash_f,
1369 					   uint64_t types, struct bnxt *bp,
1370 					   struct bnxt_vnic_info *vnic)
1371 {
1372 	if (hash_f != RTE_ETH_HASH_FUNCTION_TOEPLITZ &&
1373 	    hash_f != RTE_ETH_HASH_FUNCTION_DEFAULT) {
1374 		if (hash_f == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ ||
1375 		    (!BNXT_CHIP_P7(bp) && hash_f == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR)) {
1376 			PMD_DRV_LOG_LINE(ERR, "Unsupported hash function");
1377 			return -ENOTSUP;
1378 		}
1379 	}
1380 
1381 	if (types & RTE_ETH_RSS_IPV4_CHKSUM || types & RTE_ETH_RSS_L4_CHKSUM) {
1382 		if ((bp->vnic_cap_flags & BNXT_VNIC_CAP_CHKSM_MODE) &&
1383 			(hash_f == RTE_ETH_HASH_FUNCTION_DEFAULT ||
1384 			 hash_f == RTE_ETH_HASH_FUNCTION_TOEPLITZ)) {
1385 			/* Checksum mode cannot with hash func makes no sense */
1386 			vnic->ring_select_mode =
1387 				HWRM_VNIC_RSS_CFG_INPUT_RING_SELECT_MODE_TOEPLITZ_CHECKSUM;
1388 			vnic->hash_f_local = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
1389 			/* shadow copy types as !hash_f is always true with default func */
1390 			vnic->rss_types_local = types;
1391 			return 0;
1392 		}
1393 		PMD_DRV_LOG_LINE(ERR, "Hash function not supported with checksun type");
1394 		return -ENOTSUP;
1395 	}
1396 
1397 	vnic->ring_select_mode = _bnxt_rte_to_hwrm_ring_select_mode(hash_f);
1398 	vnic->hash_f_local = hash_f;
1399 	/* shadow copy types as !hash_f is always true with default func */
1400 	vnic->rss_types_local = types;
1401 	return 0;
1402 }
1403 
1404 int bnxt_rte_eth_to_hwrm_ring_select_mode(struct bnxt *bp, uint64_t types,
1405 					  struct bnxt_vnic_info *vnic)
1406 {
1407 	/* If the config update comes via ethdev, there is no way to
1408 	 * specify anything for hash function.
1409 	 * So its either TOEPLITZ or the Checksum mode.
1410 	 * Note that checksum mode is not supported on older devices.
1411 	 */
1412 	if (types == RTE_ETH_RSS_IPV4_CHKSUM) {
1413 		if (bp->vnic_cap_flags & BNXT_VNIC_CAP_CHKSM_MODE)
1414 			vnic->ring_select_mode =
1415 			HWRM_VNIC_RSS_CFG_INPUT_RING_SELECT_MODE_TOEPLITZ_CHECKSUM;
1416 		else
1417 			return -ENOTSUP;
1418 	}
1419 
1420 	/* Older devices can support TOEPLITZ only.
1421 	 * Thor2 supports other hash functions, but can't change using this path.
1422 	 */
1423 	vnic->ring_select_mode =
1424 		HWRM_VNIC_RSS_CFG_INPUT_RING_SELECT_MODE_TOEPLITZ;
1425 	vnic->hash_f_local =
1426 		HWRM_VNIC_RSS_CFG_INPUT_RING_SELECT_MODE_TOEPLITZ;
1427 	return 0;
1428 }
1429 
1430 void bnxt_hwrm_rss_to_rte_hash_conf(struct bnxt_vnic_info *vnic,
1431 				    uint64_t *rss_conf)
1432 {
1433 	uint32_t hash_types;
1434 
1435 	/* check for local shadow rte types */
1436 	if (vnic->rss_types_local != 0) {
1437 		*rss_conf = vnic->rss_types_local;
1438 		return;
1439 	}
1440 
1441 	hash_types = vnic->hash_type;
1442 	*rss_conf = 0;
1443 	if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4)
1444 		*rss_conf |= RTE_ETH_RSS_IPV4;
1445 	if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4)
1446 		*rss_conf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
1447 	if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4)
1448 		*rss_conf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
1449 	if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6)
1450 		*rss_conf |= RTE_ETH_RSS_IPV6;
1451 	if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6)
1452 		*rss_conf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
1453 	if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6)
1454 		*rss_conf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
1455 	if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6_FLOW_LABEL)
1456 		*rss_conf |= RTE_ETH_RSS_IPV6_FLOW_LABEL;
1457 	if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_AH_SPI_IPV6 ||
1458 	    hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_AH_SPI_IPV4)
1459 		*rss_conf |= RTE_ETH_RSS_AH;
1460 	if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_ESP_SPI_IPV6 ||
1461 	    hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_ESP_SPI_IPV4)
1462 		*rss_conf |= RTE_ETH_RSS_ESP;
1463 }
1464