xref: /dpdk/drivers/net/bnxt/bnxt_rxq.c (revision 68a03efeed657e6e05f281479b33b51102797e15)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2021 Broadcom
3  * All rights reserved.
4  */
5 
6 #include <inttypes.h>
7 
8 #include <rte_malloc.h>
9 
10 #include "bnxt.h"
11 #include "bnxt_filter.h"
12 #include "bnxt_hwrm.h"
13 #include "bnxt_ring.h"
14 #include "bnxt_rxq.h"
15 #include "bnxt_rxr.h"
16 #include "bnxt_vnic.h"
17 #include "hsi_struct_def_dpdk.h"
18 
19 /*
20  * RX Queues
21  */
22 
23 void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq)
24 {
25 	if (rxq && rxq->cp_ring && rxq->cp_ring->hw_stats)
26 		rxq->cp_ring->hw_stats = NULL;
27 }
28 
29 int bnxt_mq_rx_configure(struct bnxt *bp)
30 {
31 	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
32 	const struct rte_eth_vmdq_rx_conf *conf =
33 		    &dev_conf->rx_adv_conf.vmdq_rx_conf;
34 	unsigned int i, j, nb_q_per_grp = 1, ring_idx = 0;
35 	int start_grp_id, end_grp_id = 1, rc = 0;
36 	struct bnxt_vnic_info *vnic;
37 	struct bnxt_filter_info *filter;
38 	enum rte_eth_nb_pools pools = 1, max_pools = 0;
39 	struct bnxt_rx_queue *rxq;
40 
41 	bp->nr_vnics = 0;
42 
43 	/* Single queue mode */
44 	if (bp->rx_cp_nr_rings < 2) {
45 		vnic = &bp->vnic_info[0];
46 		if (!vnic) {
47 			PMD_DRV_LOG(ERR, "VNIC alloc failed\n");
48 			rc = -ENOMEM;
49 			goto err_out;
50 		}
51 		vnic->flags |= BNXT_VNIC_INFO_BCAST;
52 		bp->nr_vnics++;
53 
54 		rxq = bp->eth_dev->data->rx_queues[0];
55 		rxq->vnic = vnic;
56 
57 		vnic->func_default = true;
58 		vnic->start_grp_id = 0;
59 		vnic->end_grp_id = vnic->start_grp_id;
60 		filter = bnxt_alloc_filter(bp);
61 		if (!filter) {
62 			PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
63 			rc = -ENOMEM;
64 			goto err_out;
65 		}
66 		filter->mac_index = 0;
67 		filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
68 		STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
69 		goto out;
70 	}
71 
72 	/* Multi-queue mode */
73 	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB_RSS) {
74 		/* VMDq ONLY, VMDq+RSS, VMDq+DCB, VMDq+DCB+RSS */
75 
76 		switch (dev_conf->rxmode.mq_mode) {
77 		case ETH_MQ_RX_VMDQ_RSS:
78 		case ETH_MQ_RX_VMDQ_ONLY:
79 		case ETH_MQ_RX_VMDQ_DCB_RSS:
80 			/* FALLTHROUGH */
81 			/* ETH_8/64_POOLs */
82 			pools = conf->nb_queue_pools;
83 			/* For each pool, allocate MACVLAN CFA rule & VNIC */
84 			max_pools = RTE_MIN(bp->max_vnics,
85 					    RTE_MIN(bp->max_l2_ctx,
86 					    RTE_MIN(bp->max_rsscos_ctx,
87 						    ETH_64_POOLS)));
88 			PMD_DRV_LOG(DEBUG,
89 				    "pools = %u max_pools = %u\n",
90 				    pools, max_pools);
91 			if (pools > max_pools)
92 				pools = max_pools;
93 			break;
94 		case ETH_MQ_RX_RSS:
95 			pools = bp->rx_cosq_cnt ? bp->rx_cosq_cnt : 1;
96 			break;
97 		default:
98 			PMD_DRV_LOG(ERR, "Unsupported mq_mod %d\n",
99 				dev_conf->rxmode.mq_mode);
100 			rc = -EINVAL;
101 			goto err_out;
102 		}
103 	} else if (!dev_conf->rxmode.mq_mode) {
104 		pools = bp->rx_cosq_cnt ? bp->rx_cosq_cnt : pools;
105 	}
106 
107 	pools = RTE_MIN(pools, bp->rx_cp_nr_rings);
108 	nb_q_per_grp = bp->rx_cp_nr_rings / pools;
109 	PMD_DRV_LOG(DEBUG, "pools = %u nb_q_per_grp = %u\n",
110 		    pools, nb_q_per_grp);
111 	start_grp_id = 0;
112 	end_grp_id = nb_q_per_grp;
113 
114 	for (i = 0; i < pools; i++) {
115 		vnic = &bp->vnic_info[i];
116 		if (!vnic) {
117 			PMD_DRV_LOG(ERR, "VNIC alloc failed\n");
118 			rc = -ENOMEM;
119 			goto err_out;
120 		}
121 		vnic->flags |= BNXT_VNIC_INFO_BCAST;
122 		bp->nr_vnics++;
123 
124 		for (j = 0; j < nb_q_per_grp; j++, ring_idx++) {
125 			rxq = bp->eth_dev->data->rx_queues[ring_idx];
126 			rxq->vnic = vnic;
127 			PMD_DRV_LOG(DEBUG,
128 				    "rxq[%d] = %p vnic[%d] = %p\n",
129 				    ring_idx, rxq, i, vnic);
130 		}
131 		if (i == 0) {
132 			if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB) {
133 				bp->eth_dev->data->promiscuous = 1;
134 				vnic->flags |= BNXT_VNIC_INFO_PROMISC;
135 			}
136 			vnic->func_default = true;
137 		}
138 		vnic->start_grp_id = start_grp_id;
139 		vnic->end_grp_id = end_grp_id;
140 
141 		if (i) {
142 			if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB ||
143 			    !(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS))
144 				vnic->rss_dflt_cr = true;
145 			goto skip_filter_allocation;
146 		}
147 		filter = bnxt_alloc_filter(bp);
148 		if (!filter) {
149 			PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
150 			rc = -ENOMEM;
151 			goto err_out;
152 		}
153 		filter->mac_index = 0;
154 		filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
155 		/*
156 		 * TODO: Configure & associate CFA rule for
157 		 * each VNIC for each VMDq with MACVLAN, MACVLAN+TC
158 		 */
159 		STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
160 
161 skip_filter_allocation:
162 		start_grp_id = end_grp_id;
163 		end_grp_id += nb_q_per_grp;
164 	}
165 
166 out:
167 	bp->rx_num_qs_per_vnic = nb_q_per_grp;
168 
169 	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
170 		struct rte_eth_rss_conf *rss = &dev_conf->rx_adv_conf.rss_conf;
171 
172 		if (bp->flags & BNXT_FLAG_UPDATE_HASH)
173 			bp->flags &= ~BNXT_FLAG_UPDATE_HASH;
174 
175 		for (i = 0; i < bp->nr_vnics; i++) {
176 			uint32_t lvl = ETH_RSS_LEVEL(rss->rss_hf);
177 
178 			vnic = &bp->vnic_info[i];
179 			vnic->hash_type =
180 				bnxt_rte_to_hwrm_hash_types(rss->rss_hf);
181 			vnic->hash_mode =
182 				bnxt_rte_to_hwrm_hash_level(bp,
183 							    rss->rss_hf,
184 							    lvl);
185 
186 			/*
187 			 * Use the supplied key if the key length is
188 			 * acceptable and the rss_key is not NULL
189 			 */
190 			if (rss->rss_key &&
191 			    rss->rss_key_len <= HW_HASH_KEY_SIZE)
192 				memcpy(vnic->rss_hash_key,
193 				       rss->rss_key, rss->rss_key_len);
194 		}
195 	}
196 
197 	return rc;
198 
199 err_out:
200 	/* Free allocated vnic/filters */
201 
202 	return rc;
203 }
204 
205 void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
206 {
207 	struct rte_mbuf **sw_ring;
208 	struct bnxt_tpa_info *tpa_info;
209 	uint16_t i;
210 
211 	if (!rxq || !rxq->rx_ring)
212 		return;
213 
214 	sw_ring = rxq->rx_ring->rx_buf_ring;
215 	if (sw_ring) {
216 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
217 		/*
218 		 * The vector receive burst function does not set used
219 		 * mbuf pointers to NULL, do that here to simplify
220 		 * cleanup logic.
221 		 */
222 		for (i = 0; i < rxq->rxrearm_nb; i++)
223 			sw_ring[rxq->rxrearm_start + i] = NULL;
224 		rxq->rxrearm_nb = 0;
225 #endif
226 		for (i = 0;
227 		     i < rxq->rx_ring->rx_ring_struct->ring_size; i++) {
228 			if (sw_ring[i]) {
229 				if (sw_ring[i] != &rxq->fake_mbuf)
230 					rte_pktmbuf_free_seg(sw_ring[i]);
231 				sw_ring[i] = NULL;
232 			}
233 		}
234 	}
235 	/* Free up mbufs in Agg ring */
236 	sw_ring = rxq->rx_ring->ag_buf_ring;
237 	if (sw_ring) {
238 		for (i = 0;
239 		     i < rxq->rx_ring->ag_ring_struct->ring_size; i++) {
240 			if (sw_ring[i]) {
241 				rte_pktmbuf_free_seg(sw_ring[i]);
242 				sw_ring[i] = NULL;
243 			}
244 		}
245 	}
246 
247 	/* Free up mbufs in TPA */
248 	tpa_info = rxq->rx_ring->tpa_info;
249 	if (tpa_info) {
250 		int max_aggs = BNXT_TPA_MAX_AGGS(rxq->bp);
251 
252 		for (i = 0; i < max_aggs; i++) {
253 			if (tpa_info[i].mbuf) {
254 				rte_pktmbuf_free_seg(tpa_info[i].mbuf);
255 				tpa_info[i].mbuf = NULL;
256 			}
257 		}
258 	}
259 
260 }
261 
262 void bnxt_free_rx_mbufs(struct bnxt *bp)
263 {
264 	struct bnxt_rx_queue *rxq;
265 	int i;
266 
267 	for (i = 0; i < (int)bp->rx_nr_rings; i++) {
268 		rxq = bp->rx_queues[i];
269 		bnxt_rx_queue_release_mbufs(rxq);
270 	}
271 }
272 
273 void bnxt_rx_queue_release_op(void *rx_queue)
274 {
275 	struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
276 
277 	if (rxq) {
278 		if (is_bnxt_in_error(rxq->bp))
279 			return;
280 
281 		bnxt_rx_queue_release_mbufs(rxq);
282 
283 		/* Free RX ring hardware descriptors */
284 		if (rxq->rx_ring) {
285 			bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
286 			rte_free(rxq->rx_ring->rx_ring_struct);
287 			/* Free RX Agg ring hardware descriptors */
288 			bnxt_free_ring(rxq->rx_ring->ag_ring_struct);
289 			rte_free(rxq->rx_ring->ag_ring_struct);
290 
291 			rte_free(rxq->rx_ring);
292 		}
293 		/* Free RX completion ring hardware descriptors */
294 		if (rxq->cp_ring) {
295 			bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
296 			rte_free(rxq->cp_ring->cp_ring_struct);
297 			rte_free(rxq->cp_ring);
298 		}
299 
300 		bnxt_free_rxq_stats(rxq);
301 		rte_memzone_free(rxq->mz);
302 		rxq->mz = NULL;
303 
304 		rte_free(rxq);
305 	}
306 }
307 
308 int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
309 			       uint16_t queue_idx,
310 			       uint16_t nb_desc,
311 			       unsigned int socket_id,
312 			       const struct rte_eth_rxconf *rx_conf,
313 			       struct rte_mempool *mp)
314 {
315 	struct bnxt *bp = eth_dev->data->dev_private;
316 	uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
317 	struct bnxt_rx_queue *rxq;
318 	int rc = 0;
319 	uint8_t queue_state;
320 
321 	rc = is_bnxt_in_error(bp);
322 	if (rc)
323 		return rc;
324 
325 	if (queue_idx >= bnxt_max_rings(bp)) {
326 		PMD_DRV_LOG(ERR,
327 			"Cannot create Rx ring %d. Only %d rings available\n",
328 			queue_idx, bp->max_rx_rings);
329 		return -EINVAL;
330 	}
331 
332 	if (nb_desc < BNXT_MIN_RING_DESC || nb_desc > MAX_RX_DESC_CNT) {
333 		PMD_DRV_LOG(ERR, "nb_desc %d is invalid\n", nb_desc);
334 		return -EINVAL;
335 	}
336 
337 	if (eth_dev->data->rx_queues) {
338 		rxq = eth_dev->data->rx_queues[queue_idx];
339 		if (rxq)
340 			bnxt_rx_queue_release_op(rxq);
341 	}
342 	rxq = rte_zmalloc_socket("bnxt_rx_queue", sizeof(struct bnxt_rx_queue),
343 				 RTE_CACHE_LINE_SIZE, socket_id);
344 	if (!rxq) {
345 		PMD_DRV_LOG(ERR, "bnxt_rx_queue allocation failed!\n");
346 		return -ENOMEM;
347 	}
348 	rxq->bp = bp;
349 	rxq->mb_pool = mp;
350 	rxq->nb_rx_desc = nb_desc;
351 	rxq->rx_free_thresh =
352 		RTE_MIN(rte_align32pow2(nb_desc) / 4, RTE_BNXT_MAX_RX_BURST);
353 
354 	if (rx_conf->rx_drop_en != BNXT_DEFAULT_RX_DROP_EN)
355 		PMD_DRV_LOG(NOTICE,
356 			    "Per-queue config of drop-en is not supported.\n");
357 	rxq->drop_en = BNXT_DEFAULT_RX_DROP_EN;
358 
359 	PMD_DRV_LOG(DEBUG, "RX Buf MTU %d\n", eth_dev->data->mtu);
360 
361 	rc = bnxt_init_rx_ring_struct(rxq, socket_id);
362 	if (rc) {
363 		PMD_DRV_LOG(ERR,
364 			    "init_rx_ring_struct failed!\n");
365 		goto err;
366 	}
367 
368 	PMD_DRV_LOG(DEBUG, "RX Buf size is %d\n", rxq->rx_buf_size);
369 	rxq->queue_id = queue_idx;
370 	rxq->port_id = eth_dev->data->port_id;
371 	if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC)
372 		rxq->crc_len = RTE_ETHER_CRC_LEN;
373 	else
374 		rxq->crc_len = 0;
375 
376 	eth_dev->data->rx_queues[queue_idx] = rxq;
377 	/* Allocate RX ring hardware descriptors */
378 	rc = bnxt_alloc_rings(bp, queue_idx, NULL, rxq, rxq->cp_ring, NULL,
379 			     "rxr");
380 	if (rc) {
381 		PMD_DRV_LOG(ERR,
382 			    "ring_dma_zone_reserve for rx_ring failed!\n");
383 		goto err;
384 	}
385 	rte_atomic64_init(&rxq->rx_mbuf_alloc_fail);
386 
387 	/* rxq 0 must not be stopped when used as async CPR */
388 	if (!BNXT_NUM_ASYNC_CPR(bp) && queue_idx == 0)
389 		rxq->rx_deferred_start = false;
390 	else
391 		rxq->rx_deferred_start = rx_conf->rx_deferred_start;
392 
393 	if (rxq->rx_deferred_start) {
394 		queue_state = RTE_ETH_QUEUE_STATE_STOPPED;
395 		rxq->rx_started = false;
396 	} else {
397 		queue_state = RTE_ETH_QUEUE_STATE_STARTED;
398 		rxq->rx_started = true;
399 	}
400 	eth_dev->data->rx_queue_state[queue_idx] = queue_state;
401 
402 	/* Configure mtu if it is different from what was configured before */
403 	if (!queue_idx)
404 		bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu);
405 
406 	return 0;
407 err:
408 	bnxt_rx_queue_release_op(rxq);
409 	return rc;
410 }
411 
412 int
413 bnxt_rx_queue_intr_enable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id)
414 {
415 	struct bnxt *bp = eth_dev->data->dev_private;
416 	struct bnxt_rx_queue *rxq;
417 	struct bnxt_cp_ring_info *cpr;
418 	int rc = 0;
419 
420 	rc = is_bnxt_in_error(bp);
421 	if (rc)
422 		return rc;
423 
424 	if (eth_dev->data->rx_queues) {
425 		rxq = eth_dev->data->rx_queues[queue_id];
426 		if (!rxq)
427 			return -EINVAL;
428 
429 		cpr = rxq->cp_ring;
430 		B_CP_DB_REARM(cpr, cpr->cp_raw_cons);
431 	}
432 	return rc;
433 }
434 
435 int
436 bnxt_rx_queue_intr_disable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id)
437 {
438 	struct bnxt *bp = eth_dev->data->dev_private;
439 	struct bnxt_rx_queue *rxq;
440 	struct bnxt_cp_ring_info *cpr;
441 	int rc = 0;
442 
443 	rc = is_bnxt_in_error(bp);
444 	if (rc)
445 		return rc;
446 
447 	if (eth_dev->data->rx_queues) {
448 		rxq = eth_dev->data->rx_queues[queue_id];
449 		if (!rxq)
450 			return -EINVAL;
451 
452 		cpr = rxq->cp_ring;
453 		B_CP_DB_DISARM(cpr);
454 	}
455 	return rc;
456 }
457 
458 int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
459 {
460 	struct bnxt *bp = dev->data->dev_private;
461 	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
462 	struct bnxt_rx_queue *rxq = bp->rx_queues[rx_queue_id];
463 	struct bnxt_vnic_info *vnic = NULL;
464 	int rc = 0;
465 
466 	rc = is_bnxt_in_error(bp);
467 	if (rc)
468 		return rc;
469 
470 	if (rxq == NULL) {
471 		PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id);
472 		return -EINVAL;
473 	}
474 
475 	/* Set the queue state to started here.
476 	 * We check the status of the queue while posting buffer.
477 	 * If queue is it started, we do not post buffers for Rx.
478 	 */
479 	rxq->rx_started = true;
480 	dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
481 
482 	bnxt_free_hwrm_rx_ring(bp, rx_queue_id);
483 	rc = bnxt_alloc_hwrm_rx_ring(bp, rx_queue_id);
484 	if (rc)
485 		return rc;
486 
487 	if (BNXT_CHIP_P5(bp)) {
488 		/* Reconfigure default receive ring and MRU. */
489 		bnxt_hwrm_vnic_cfg(bp, rxq->vnic);
490 	}
491 	PMD_DRV_LOG(INFO, "Rx queue started %d\n", rx_queue_id);
492 
493 	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
494 		vnic = rxq->vnic;
495 
496 		if (BNXT_HAS_RING_GRPS(bp)) {
497 			if (vnic->fw_grp_ids[rx_queue_id] != INVALID_HW_RING_ID)
498 				return 0;
499 
500 			vnic->fw_grp_ids[rx_queue_id] =
501 					bp->grp_info[rx_queue_id].fw_grp_id;
502 			PMD_DRV_LOG(DEBUG,
503 				    "vnic = %p fw_grp_id = %d\n",
504 				    vnic, bp->grp_info[rx_queue_id].fw_grp_id);
505 		}
506 
507 		PMD_DRV_LOG(DEBUG, "Rx Queue Count %d\n", vnic->rx_queue_cnt);
508 		rc = bnxt_vnic_rss_configure(bp, vnic);
509 	}
510 
511 	if (rc != 0) {
512 		dev->data->rx_queue_state[rx_queue_id] =
513 				RTE_ETH_QUEUE_STATE_STOPPED;
514 		rxq->rx_started = false;
515 	}
516 
517 	PMD_DRV_LOG(INFO,
518 		    "queue %d, rx_deferred_start %d, state %d!\n",
519 		    rx_queue_id, rxq->rx_deferred_start,
520 		    bp->eth_dev->data->rx_queue_state[rx_queue_id]);
521 
522 	return rc;
523 }
524 
525 int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
526 {
527 	struct bnxt *bp = dev->data->dev_private;
528 	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
529 	struct bnxt_vnic_info *vnic = NULL;
530 	struct bnxt_rx_queue *rxq = NULL;
531 	int active_queue_cnt = 0;
532 	int i, rc = 0;
533 
534 	rc = is_bnxt_in_error(bp);
535 	if (rc)
536 		return rc;
537 
538 	/* For the stingray platform and other platforms needing tighter
539 	 * control of resource utilization, Rx CQ 0 also works as
540 	 * Default CQ for async notifications
541 	 */
542 	if (!BNXT_NUM_ASYNC_CPR(bp) && !rx_queue_id) {
543 		PMD_DRV_LOG(ERR, "Cannot stop Rx queue id %d\n", rx_queue_id);
544 		return -EINVAL;
545 	}
546 
547 	rxq = bp->rx_queues[rx_queue_id];
548 	if (!rxq) {
549 		PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id);
550 		return -EINVAL;
551 	}
552 
553 	vnic = rxq->vnic;
554 	if (!vnic) {
555 		PMD_DRV_LOG(ERR, "VNIC not initialized for RxQ %d\n",
556 			    rx_queue_id);
557 		return -EINVAL;
558 	}
559 
560 	dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
561 	rxq->rx_started = false;
562 	PMD_DRV_LOG(DEBUG, "Rx queue stopped\n");
563 
564 	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
565 		if (BNXT_HAS_RING_GRPS(bp))
566 			vnic->fw_grp_ids[rx_queue_id] = INVALID_HW_RING_ID;
567 
568 		PMD_DRV_LOG(DEBUG, "Rx Queue Count %d\n", vnic->rx_queue_cnt);
569 		rc = bnxt_vnic_rss_configure(bp, vnic);
570 	}
571 
572 	/* Compute current number of active receive queues. */
573 	for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++)
574 		if (bp->rx_queues[i]->rx_started)
575 			active_queue_cnt++;
576 
577 	if (BNXT_CHIP_P5(bp)) {
578 		/*
579 		 * For Thor, we need to ensure that the VNIC default receive
580 		 * ring corresponds to an active receive queue. When no queue
581 		 * is active, we need to temporarily set the MRU to zero so
582 		 * that packets are dropped early in the receive pipeline in
583 		 * order to prevent the VNIC default receive ring from being
584 		 * accessed.
585 		 */
586 		if (active_queue_cnt == 0) {
587 			uint16_t saved_mru = vnic->mru;
588 
589 			vnic->mru = 0;
590 			/* Reconfigure default receive ring and MRU. */
591 			bnxt_hwrm_vnic_cfg(bp, vnic);
592 			vnic->mru = saved_mru;
593 		} else {
594 			/* Reconfigure default receive ring. */
595 			bnxt_hwrm_vnic_cfg(bp, vnic);
596 		}
597 	} else if (active_queue_cnt) {
598 		/*
599 		 * If the queue being stopped is the current default queue and
600 		 * there are other active queues, pick one of them as the
601 		 * default and reconfigure the vnic.
602 		 */
603 		if (vnic->dflt_ring_grp == bp->grp_info[rx_queue_id].fw_grp_id) {
604 			for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++) {
605 				if (bp->rx_queues[i]->rx_started) {
606 					vnic->dflt_ring_grp =
607 						bp->grp_info[i].fw_grp_id;
608 					bnxt_hwrm_vnic_cfg(bp, vnic);
609 					break;
610 				}
611 			}
612 		}
613 	}
614 
615 	if (rc == 0)
616 		bnxt_rx_queue_release_mbufs(rxq);
617 
618 	return rc;
619 }
620