1fd710bb1SScott Branden /* SPDX-License-Identifier: BSD-3-Clause 26d160d77SRandy Schacher * Copyright(c) 2014-2023 Broadcom 36133f207SAjit Khaparde * All rights reserved. 46133f207SAjit Khaparde */ 56133f207SAjit Khaparde 66133f207SAjit Khaparde #include <inttypes.h> 76133f207SAjit Khaparde 86133f207SAjit Khaparde #include <rte_malloc.h> 96133f207SAjit Khaparde 106133f207SAjit Khaparde #include "bnxt.h" 116133f207SAjit Khaparde #include "bnxt_filter.h" 126133f207SAjit Khaparde #include "bnxt_hwrm.h" 136133f207SAjit Khaparde #include "bnxt_ring.h" 146133f207SAjit Khaparde #include "bnxt_rxq.h" 152eb53b13SAjit Khaparde #include "bnxt_rxr.h" 166133f207SAjit Khaparde #include "bnxt_vnic.h" 176133f207SAjit Khaparde #include "hsi_struct_def_dpdk.h" 186133f207SAjit Khaparde 196133f207SAjit Khaparde /* 206133f207SAjit Khaparde * RX Queues 216133f207SAjit Khaparde */ 226133f207SAjit Khaparde 2347a956a8SKalesh AP uint64_t bnxt_get_rx_port_offloads(struct bnxt *bp) 2447a956a8SKalesh AP { 2547a956a8SKalesh AP uint64_t rx_offload_capa; 2647a956a8SKalesh AP 2747a956a8SKalesh AP rx_offload_capa = RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | 2847a956a8SKalesh AP RTE_ETH_RX_OFFLOAD_UDP_CKSUM | 2947a956a8SKalesh AP RTE_ETH_RX_OFFLOAD_TCP_CKSUM | 3047a956a8SKalesh AP RTE_ETH_RX_OFFLOAD_KEEP_CRC | 3147a956a8SKalesh AP RTE_ETH_RX_OFFLOAD_SCATTER | 32*b5dafa31SAjit Khaparde RTE_ETH_RX_OFFLOAD_RSS_HASH | 33*b5dafa31SAjit Khaparde RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT; 3447a956a8SKalesh AP 352c033311SKishore Padmanabha /* In P7 platform if truflow is enabled then vlan offload is disabled*/ 362c033311SKishore Padmanabha if (!(BNXT_TRUFLOW_EN(bp) && BNXT_CHIP_P7(bp))) 372c033311SKishore Padmanabha rx_offload_capa |= (RTE_ETH_RX_OFFLOAD_VLAN_FILTER | 382c033311SKishore Padmanabha RTE_ETH_RX_OFFLOAD_VLAN_EXTEND); 392c033311SKishore Padmanabha 402c033311SKishore Padmanabha 412c033311SKishore Padmanabha if (!bnxt_compressed_rx_cqe_mode_enabled(bp)) 42019c1816SAjit Khaparde rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TCP_LRO; 4347a956a8SKalesh AP if (bp->flags & BNXT_FLAG_PTP_SUPPORTED) 4447a956a8SKalesh AP rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP; 452c033311SKishore Padmanabha if (bp->vnic_cap_flags & BNXT_VNIC_CAP_VLAN_RX_STRIP) { 462c033311SKishore Padmanabha if (!(BNXT_TRUFLOW_EN(bp) && BNXT_CHIP_P7(bp))) 4747a956a8SKalesh AP rx_offload_capa |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 482c033311SKishore Padmanabha } 4947a956a8SKalesh AP 50c0278f6eSKalesh AP if (BNXT_TUNNELED_OFFLOADS_CAP_ALL_EN(bp)) 51c0278f6eSKalesh AP rx_offload_capa |= RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | 52c0278f6eSKalesh AP RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM; 53c0278f6eSKalesh AP 5447a956a8SKalesh AP return rx_offload_capa; 5547a956a8SKalesh AP } 5647a956a8SKalesh AP 57657c2a7fSAjit Khaparde /* Determine whether the current configuration needs aggregation ring in HW. */ 58657c2a7fSAjit Khaparde int bnxt_need_agg_ring(struct rte_eth_dev *eth_dev) 59657c2a7fSAjit Khaparde { 60657c2a7fSAjit Khaparde /* scattered_rx will be true if OFFLOAD_SCATTER is enabled, 61657c2a7fSAjit Khaparde * if LRO is enabled, or if the max packet len is greater than the 62657c2a7fSAjit Khaparde * mbuf data size. So AGG ring will be needed whenever scattered_rx 63657c2a7fSAjit Khaparde * is set. 64657c2a7fSAjit Khaparde */ 65657c2a7fSAjit Khaparde return eth_dev->data->scattered_rx ? 1 : 0; 66657c2a7fSAjit Khaparde } 67657c2a7fSAjit Khaparde 686133f207SAjit Khaparde void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq) 696133f207SAjit Khaparde { 70f7b36e8fSRahul Gupta if (rxq && rxq->cp_ring && rxq->cp_ring->hw_stats) 71f7b36e8fSRahul Gupta rxq->cp_ring->hw_stats = NULL; 726133f207SAjit Khaparde } 736133f207SAjit Khaparde 746133f207SAjit Khaparde int bnxt_mq_rx_configure(struct bnxt *bp) 756133f207SAjit Khaparde { 766133f207SAjit Khaparde struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 770a90c56eSKalesh AP struct rte_eth_rss_conf *rss = &bp->rss_conf; 78a2033fdaSAjit Khaparde const struct rte_eth_vmdq_rx_conf *conf = 79a2033fdaSAjit Khaparde &dev_conf->rx_adv_conf.vmdq_rx_conf; 804191bc8fSAjit Khaparde unsigned int i, j, nb_q_per_grp = 1, ring_idx = 0; 814191bc8fSAjit Khaparde int start_grp_id, end_grp_id = 1, rc = 0; 826133f207SAjit Khaparde struct bnxt_vnic_info *vnic; 836133f207SAjit Khaparde struct bnxt_filter_info *filter; 84fcdd7210SRahul Gupta enum rte_eth_nb_pools pools = 1, max_pools = 0; 856133f207SAjit Khaparde struct bnxt_rx_queue *rxq; 866133f207SAjit Khaparde 876133f207SAjit Khaparde bp->nr_vnics = 0; 886133f207SAjit Khaparde 896133f207SAjit Khaparde /* Multi-queue mode */ 90295968d1SFerruh Yigit if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB_RSS) { 916133f207SAjit Khaparde /* VMDq ONLY, VMDq+RSS, VMDq+DCB, VMDq+DCB+RSS */ 926133f207SAjit Khaparde 936133f207SAjit Khaparde switch (dev_conf->rxmode.mq_mode) { 94295968d1SFerruh Yigit case RTE_ETH_MQ_RX_VMDQ_RSS: 95295968d1SFerruh Yigit case RTE_ETH_MQ_RX_VMDQ_ONLY: 96295968d1SFerruh Yigit case RTE_ETH_MQ_RX_VMDQ_DCB_RSS: 97704d430cSAjit Khaparde /* FALLTHROUGH */ 986133f207SAjit Khaparde /* ETH_8/64_POOLs */ 996133f207SAjit Khaparde pools = conf->nb_queue_pools; 1004191bc8fSAjit Khaparde /* For each pool, allocate MACVLAN CFA rule & VNIC */ 1014191bc8fSAjit Khaparde max_pools = RTE_MIN(bp->max_vnics, 1024191bc8fSAjit Khaparde RTE_MIN(bp->max_l2_ctx, 1034191bc8fSAjit Khaparde RTE_MIN(bp->max_rsscos_ctx, 104295968d1SFerruh Yigit RTE_ETH_64_POOLS))); 105e99981afSDavid Marchand PMD_DRV_LOG_LINE(DEBUG, 106e99981afSDavid Marchand "pools = %u max_pools = %u", 10751fafb89SSomnath Kotur pools, max_pools); 1084191bc8fSAjit Khaparde if (pools > max_pools) 1094191bc8fSAjit Khaparde pools = max_pools; 1106133f207SAjit Khaparde break; 111295968d1SFerruh Yigit case RTE_ETH_MQ_RX_RSS: 11284d49664SVenkat Duvvuru pools = bp->rx_cosq_cnt ? bp->rx_cosq_cnt : 1; 1134191bc8fSAjit Khaparde break; 1146133f207SAjit Khaparde default: 115e99981afSDavid Marchand PMD_DRV_LOG_LINE(ERR, "Unsupported mq_mod %d", 1166133f207SAjit Khaparde dev_conf->rxmode.mq_mode); 1176133f207SAjit Khaparde rc = -EINVAL; 1186133f207SAjit Khaparde goto err_out; 1196133f207SAjit Khaparde } 120fcdd7210SRahul Gupta } else if (!dev_conf->rxmode.mq_mode) { 121fcdd7210SRahul Gupta pools = bp->rx_cosq_cnt ? bp->rx_cosq_cnt : pools; 1226133f207SAjit Khaparde } 123fcdd7210SRahul Gupta 124fcdd7210SRahul Gupta pools = RTE_MIN(pools, bp->rx_cp_nr_rings); 1256133f207SAjit Khaparde nb_q_per_grp = bp->rx_cp_nr_rings / pools; 126e99981afSDavid Marchand PMD_DRV_LOG_LINE(DEBUG, "pools = %u nb_q_per_grp = %u", 127618bbdabSStephen Hemminger pools, nb_q_per_grp); 128daef48efSAjit Khaparde start_grp_id = 0; 129daef48efSAjit Khaparde end_grp_id = nb_q_per_grp; 1306133f207SAjit Khaparde 1316133f207SAjit Khaparde for (i = 0; i < pools; i++) { 13251fafb89SSomnath Kotur vnic = &bp->vnic_info[i]; 1336133f207SAjit Khaparde if (!vnic) { 134e99981afSDavid Marchand PMD_DRV_LOG_LINE(ERR, "VNIC alloc failed"); 1356133f207SAjit Khaparde rc = -ENOMEM; 1366133f207SAjit Khaparde goto err_out; 1376133f207SAjit Khaparde } 1384cfe399fSAjit Khaparde vnic->flags |= BNXT_VNIC_INFO_BCAST; 1396133f207SAjit Khaparde bp->nr_vnics++; 1406133f207SAjit Khaparde 1418103a57aSAjit Khaparde for (j = 0; j < nb_q_per_grp; j++, ring_idx++) { 1426133f207SAjit Khaparde rxq = bp->eth_dev->data->rx_queues[ring_idx]; 1436133f207SAjit Khaparde rxq->vnic = vnic; 144e99981afSDavid Marchand PMD_DRV_LOG_LINE(DEBUG, 145e99981afSDavid Marchand "rxq[%d] = %p vnic[%d] = %p", 14651fafb89SSomnath Kotur ring_idx, rxq, i, vnic); 1476133f207SAjit Khaparde } 148a2033fdaSAjit Khaparde if (i == 0) { 149295968d1SFerruh Yigit if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB) { 150a2033fdaSAjit Khaparde bp->eth_dev->data->promiscuous = 1; 151a2033fdaSAjit Khaparde vnic->flags |= BNXT_VNIC_INFO_PROMISC; 152a2033fdaSAjit Khaparde } 1536133f207SAjit Khaparde vnic->func_default = true; 154a2033fdaSAjit Khaparde } 1556133f207SAjit Khaparde vnic->start_grp_id = start_grp_id; 1566133f207SAjit Khaparde vnic->end_grp_id = end_grp_id; 1576133f207SAjit Khaparde 1588103a57aSAjit Khaparde if (i) { 159295968d1SFerruh Yigit if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB || 160295968d1SFerruh Yigit !(dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS)) 1614191bc8fSAjit Khaparde vnic->rss_dflt_cr = true; 1624191bc8fSAjit Khaparde goto skip_filter_allocation; 1634191bc8fSAjit Khaparde } 1646133f207SAjit Khaparde filter = bnxt_alloc_filter(bp); 1656133f207SAjit Khaparde if (!filter) { 166e99981afSDavid Marchand PMD_DRV_LOG_LINE(ERR, "L2 filter alloc failed"); 1676133f207SAjit Khaparde rc = -ENOMEM; 1686133f207SAjit Khaparde goto err_out; 1696133f207SAjit Khaparde } 170938a87dbSVenkat Duvvuru filter->mac_index = 0; 171afef822bSAjit Khaparde filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST; 1726133f207SAjit Khaparde /* 1736133f207SAjit Khaparde * TODO: Configure & associate CFA rule for 1746133f207SAjit Khaparde * each VNIC for each VMDq with MACVLAN, MACVLAN+TC 1756133f207SAjit Khaparde */ 1766133f207SAjit Khaparde STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 1776133f207SAjit Khaparde 1784191bc8fSAjit Khaparde skip_filter_allocation: 17939464bd3SAjit Khaparde start_grp_id = end_grp_id; 1806133f207SAjit Khaparde end_grp_id += nb_q_per_grp; 1816133f207SAjit Khaparde } 1826133f207SAjit Khaparde 183c2319030SVenkat Duvvuru bp->rx_num_qs_per_vnic = nb_q_per_grp; 184c2319030SVenkat Duvvuru 1854191bc8fSAjit Khaparde for (i = 0; i < bp->nr_vnics; i++) { 186295968d1SFerruh Yigit uint32_t lvl = RTE_ETH_RSS_LEVEL(rss->rss_hf); 1877ed45b1aSAjit Khaparde 18851fafb89SSomnath Kotur vnic = &bp->vnic_info[i]; 1890a90c56eSKalesh AP vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss->rss_hf); 1900a90c56eSKalesh AP vnic->hash_mode = bnxt_rte_to_hwrm_hash_level(bp, rss->rss_hf, lvl); 1914191bc8fSAjit Khaparde 1924191bc8fSAjit Khaparde /* 1934191bc8fSAjit Khaparde * Use the supplied key if the key length is 1944191bc8fSAjit Khaparde * acceptable and the rss_key is not NULL 1954191bc8fSAjit Khaparde */ 1960a90c56eSKalesh AP if (rss->rss_key && rss->rss_key_len <= HW_HASH_KEY_SIZE) 1970a90c56eSKalesh AP memcpy(vnic->rss_hash_key, rss->rss_key, rss->rss_key_len); 1984191bc8fSAjit Khaparde } 1994191bc8fSAjit Khaparde 2006133f207SAjit Khaparde return rc; 2016133f207SAjit Khaparde 2026133f207SAjit Khaparde err_out: 2036133f207SAjit Khaparde /* Free allocated vnic/filters */ 2046133f207SAjit Khaparde 2056133f207SAjit Khaparde return rc; 2066133f207SAjit Khaparde } 2076133f207SAjit Khaparde 20814255b35SAjit Khaparde void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq) 2096133f207SAjit Khaparde { 21098bb60d9SLance Richardson struct rte_mbuf **sw_ring; 2110958d8b6SAjit Khaparde struct bnxt_tpa_info *tpa_info; 2122eb53b13SAjit Khaparde uint16_t i; 2132eb53b13SAjit Khaparde 21497c32717SSomnath Kotur if (!rxq || !rxq->rx_ring) 215f0f7b157SKalesh AP return; 216f0f7b157SKalesh AP 2172eb53b13SAjit Khaparde sw_ring = rxq->rx_ring->rx_buf_ring; 2182eb53b13SAjit Khaparde if (sw_ring) { 2190f4d2afbSLance Richardson #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) 2200f4d2afbSLance Richardson /* 2210f4d2afbSLance Richardson * The vector receive burst function does not set used 2220f4d2afbSLance Richardson * mbuf pointers to NULL, do that here to simplify 2230f4d2afbSLance Richardson * cleanup logic. 2240f4d2afbSLance Richardson */ 2250f4d2afbSLance Richardson for (i = 0; i < rxq->rxrearm_nb; i++) 2260f4d2afbSLance Richardson sw_ring[rxq->rxrearm_start + i] = NULL; 2270f4d2afbSLance Richardson rxq->rxrearm_nb = 0; 2280f4d2afbSLance Richardson #endif 2293f07f2daSXiaoxin Peng for (i = 0; 2303f07f2daSXiaoxin Peng i < rxq->rx_ring->rx_ring_struct->ring_size; i++) { 23198bb60d9SLance Richardson if (sw_ring[i]) { 232deae8514SLance Richardson if (sw_ring[i] != &rxq->fake_mbuf) 23398bb60d9SLance Richardson rte_pktmbuf_free_seg(sw_ring[i]); 23498bb60d9SLance Richardson sw_ring[i] = NULL; 2352eb53b13SAjit Khaparde } 2362eb53b13SAjit Khaparde } 2372eb53b13SAjit Khaparde } 238daef48efSAjit Khaparde /* Free up mbufs in Agg ring */ 23979fc1c53SAjit Khaparde if (rxq->bp == NULL || 24079fc1c53SAjit Khaparde rxq->bp->eth_dev == NULL || 24179fc1c53SAjit Khaparde !bnxt_need_agg_ring(rxq->bp->eth_dev)) 242657c2a7fSAjit Khaparde return; 243657c2a7fSAjit Khaparde 244daef48efSAjit Khaparde sw_ring = rxq->rx_ring->ag_buf_ring; 245daef48efSAjit Khaparde if (sw_ring) { 2463f07f2daSXiaoxin Peng for (i = 0; 2473f07f2daSXiaoxin Peng i < rxq->rx_ring->ag_ring_struct->ring_size; i++) { 24898bb60d9SLance Richardson if (sw_ring[i]) { 24998bb60d9SLance Richardson rte_pktmbuf_free_seg(sw_ring[i]); 25098bb60d9SLance Richardson sw_ring[i] = NULL; 251daef48efSAjit Khaparde } 252daef48efSAjit Khaparde } 253daef48efSAjit Khaparde } 2540958d8b6SAjit Khaparde 255019c1816SAjit Khaparde if (bnxt_compressed_rx_cqe_mode_enabled(rxq->bp)) 256019c1816SAjit Khaparde return; 257019c1816SAjit Khaparde 2580958d8b6SAjit Khaparde /* Free up mbufs in TPA */ 2590958d8b6SAjit Khaparde tpa_info = rxq->rx_ring->tpa_info; 2600958d8b6SAjit Khaparde if (tpa_info) { 261b150a7e7SLance Richardson int max_aggs = BNXT_TPA_MAX_AGGS(rxq->bp); 262b150a7e7SLance Richardson 263b150a7e7SLance Richardson for (i = 0; i < max_aggs; i++) { 2640958d8b6SAjit Khaparde if (tpa_info[i].mbuf) { 2650958d8b6SAjit Khaparde rte_pktmbuf_free_seg(tpa_info[i].mbuf); 2660958d8b6SAjit Khaparde tpa_info[i].mbuf = NULL; 2670958d8b6SAjit Khaparde } 2680958d8b6SAjit Khaparde } 2690958d8b6SAjit Khaparde } 27014255b35SAjit Khaparde 2716133f207SAjit Khaparde } 2726133f207SAjit Khaparde 2736133f207SAjit Khaparde void bnxt_free_rx_mbufs(struct bnxt *bp) 2746133f207SAjit Khaparde { 2756133f207SAjit Khaparde struct bnxt_rx_queue *rxq; 2766133f207SAjit Khaparde int i; 2776133f207SAjit Khaparde 2786133f207SAjit Khaparde for (i = 0; i < (int)bp->rx_nr_rings; i++) { 2796133f207SAjit Khaparde rxq = bp->rx_queues[i]; 2806133f207SAjit Khaparde bnxt_rx_queue_release_mbufs(rxq); 2816133f207SAjit Khaparde } 2826133f207SAjit Khaparde } 2836133f207SAjit Khaparde 284657c2a7fSAjit Khaparde void bnxt_free_rxq_mem(struct bnxt_rx_queue *rxq) 2856133f207SAjit Khaparde { 2866133f207SAjit Khaparde bnxt_rx_queue_release_mbufs(rxq); 2876133f207SAjit Khaparde 288657c2a7fSAjit Khaparde /* Free RX, AGG ring hardware descriptors */ 28997c32717SSomnath Kotur if (rxq->rx_ring) { 2902eb53b13SAjit Khaparde bnxt_free_ring(rxq->rx_ring->rx_ring_struct); 29197c32717SSomnath Kotur rte_free(rxq->rx_ring->rx_ring_struct); 292657c2a7fSAjit Khaparde rxq->rx_ring->rx_ring_struct = NULL; 293daef48efSAjit Khaparde /* Free RX Agg ring hardware descriptors */ 294daef48efSAjit Khaparde bnxt_free_ring(rxq->rx_ring->ag_ring_struct); 29597c32717SSomnath Kotur rte_free(rxq->rx_ring->ag_ring_struct); 296657c2a7fSAjit Khaparde rxq->rx_ring->ag_ring_struct = NULL; 2972eb53b13SAjit Khaparde 29897c32717SSomnath Kotur rte_free(rxq->rx_ring); 299657c2a7fSAjit Khaparde rxq->rx_ring = NULL; 30097c32717SSomnath Kotur } 3012eb53b13SAjit Khaparde /* Free RX completion ring hardware descriptors */ 30297c32717SSomnath Kotur if (rxq->cp_ring) { 3032eb53b13SAjit Khaparde bnxt_free_ring(rxq->cp_ring->cp_ring_struct); 30497c32717SSomnath Kotur rte_free(rxq->cp_ring->cp_ring_struct); 305657c2a7fSAjit Khaparde rxq->cp_ring->cp_ring_struct = NULL; 30697c32717SSomnath Kotur rte_free(rxq->cp_ring); 307657c2a7fSAjit Khaparde rxq->cp_ring = NULL; 30897c32717SSomnath Kotur } 3092eb53b13SAjit Khaparde 3102eb53b13SAjit Khaparde bnxt_free_rxq_stats(rxq); 31123460b4cSAjit Khaparde rte_memzone_free(rxq->mz); 31223460b4cSAjit Khaparde rxq->mz = NULL; 313657c2a7fSAjit Khaparde } 3146133f207SAjit Khaparde 315657c2a7fSAjit Khaparde void bnxt_rx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx) 316657c2a7fSAjit Khaparde { 317657c2a7fSAjit Khaparde struct bnxt_rx_queue *rxq = dev->data->rx_queues[queue_idx]; 318657c2a7fSAjit Khaparde 319657c2a7fSAjit Khaparde if (rxq != NULL) { 320657c2a7fSAjit Khaparde if (is_bnxt_in_error(rxq->bp)) 321657c2a7fSAjit Khaparde return; 322657c2a7fSAjit Khaparde 323657c2a7fSAjit Khaparde bnxt_free_hwrm_rx_ring(rxq->bp, rxq->queue_id); 324657c2a7fSAjit Khaparde bnxt_free_rxq_mem(rxq); 3256133f207SAjit Khaparde rte_free(rxq); 3266133f207SAjit Khaparde } 3276133f207SAjit Khaparde } 3286133f207SAjit Khaparde 3296133f207SAjit Khaparde int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev, 3306133f207SAjit Khaparde uint16_t queue_idx, 3316133f207SAjit Khaparde uint16_t nb_desc, 3326133f207SAjit Khaparde unsigned int socket_id, 3336133f207SAjit Khaparde const struct rte_eth_rxconf *rx_conf, 3346133f207SAjit Khaparde struct rte_mempool *mp) 3356133f207SAjit Khaparde { 3369c1507d9SAjit Khaparde uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; 337*b5dafa31SAjit Khaparde uint8_t rs = !!(rx_offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT); 338*b5dafa31SAjit Khaparde struct bnxt *bp = eth_dev->data->dev_private; 339*b5dafa31SAjit Khaparde struct rte_eth_rxseg_split *rx_seg = 340*b5dafa31SAjit Khaparde (struct rte_eth_rxseg_split *)rx_conf->rx_seg; 341*b5dafa31SAjit Khaparde uint16_t n_seg = rx_conf->rx_nseg; 3426133f207SAjit Khaparde struct bnxt_rx_queue *rxq; 3432bb1d5dbSAjit Khaparde int rc = 0; 3446133f207SAjit Khaparde 3451bf01f51SKalesh AP rc = is_bnxt_in_error(bp); 3461bf01f51SKalesh AP if (rc) 3471bf01f51SKalesh AP return rc; 3481bf01f51SKalesh AP 349*b5dafa31SAjit Khaparde if (n_seg > 1 && !rs) { 350*b5dafa31SAjit Khaparde PMD_DRV_LOG_LINE(ERR, "n_seg %d does not match buffer split %d setting", 351*b5dafa31SAjit Khaparde n_seg, rs); 352*b5dafa31SAjit Khaparde return -EINVAL; 353*b5dafa31SAjit Khaparde } 354*b5dafa31SAjit Khaparde 355*b5dafa31SAjit Khaparde if (n_seg > BNXT_MAX_BUFFER_SPLIT_SEGS) { 356*b5dafa31SAjit Khaparde PMD_DRV_LOG_LINE(ERR, "n_seg %d not supported", n_seg); 357*b5dafa31SAjit Khaparde return -EINVAL; 358*b5dafa31SAjit Khaparde } 359*b5dafa31SAjit Khaparde 360c72fe7acSSriharsha Basavapatna if (queue_idx >= bnxt_max_rings(bp)) { 361e99981afSDavid Marchand PMD_DRV_LOG_LINE(ERR, 362e99981afSDavid Marchand "Cannot create Rx ring %d. Only %d rings available", 363f10d5282SAjit Khaparde queue_idx, bp->max_rx_rings); 364a42b152aSJay Ding return -EINVAL; 365f10d5282SAjit Khaparde } 366f10d5282SAjit Khaparde 367cec43bbfSLance Richardson if (nb_desc < BNXT_MIN_RING_DESC || nb_desc > MAX_RX_DESC_CNT) { 368e99981afSDavid Marchand PMD_DRV_LOG_LINE(ERR, "nb_desc %d is invalid", nb_desc); 36997c32717SSomnath Kotur return -EINVAL; 3706133f207SAjit Khaparde } 3716133f207SAjit Khaparde 3726133f207SAjit Khaparde if (eth_dev->data->rx_queues) { 3736133f207SAjit Khaparde rxq = eth_dev->data->rx_queues[queue_idx]; 3746133f207SAjit Khaparde if (rxq) 3757483341aSXueming Li bnxt_rx_queue_release_op(eth_dev, queue_idx); 3766133f207SAjit Khaparde } 3776133f207SAjit Khaparde rxq = rte_zmalloc_socket("bnxt_rx_queue", sizeof(struct bnxt_rx_queue), 3786133f207SAjit Khaparde RTE_CACHE_LINE_SIZE, socket_id); 3796133f207SAjit Khaparde if (!rxq) { 380e99981afSDavid Marchand PMD_DRV_LOG_LINE(ERR, "bnxt_rx_queue allocation failed!"); 38197c32717SSomnath Kotur return -ENOMEM; 3826133f207SAjit Khaparde } 3836133f207SAjit Khaparde rxq->bp = bp; 384*b5dafa31SAjit Khaparde if (n_seg > 1) { 385*b5dafa31SAjit Khaparde rxq->mb_pool = rx_seg[BNXT_MEM_POOL_IDX_0].mp; 386*b5dafa31SAjit Khaparde rxq->agg_mb_pool = rx_seg[BNXT_MEM_POOL_IDX_1].mp; 387*b5dafa31SAjit Khaparde } else { 3886133f207SAjit Khaparde rxq->mb_pool = mp; 389*b5dafa31SAjit Khaparde rxq->agg_mb_pool = mp; 390*b5dafa31SAjit Khaparde } 391*b5dafa31SAjit Khaparde 3926133f207SAjit Khaparde rxq->nb_rx_desc = nb_desc; 393cec43bbfSLance Richardson rxq->rx_free_thresh = 394cec43bbfSLance Richardson RTE_MIN(rte_align32pow2(nb_desc) / 4, RTE_BNXT_MAX_RX_BURST); 3956133f207SAjit Khaparde 396e99981afSDavid Marchand PMD_DRV_LOG_LINE(DEBUG, 397e99981afSDavid Marchand "App supplied RXQ drop_en status : %d", rx_conf->rx_drop_en); 398bd881e8dSLance Richardson rxq->drop_en = BNXT_DEFAULT_RX_DROP_EN; 399bd881e8dSLance Richardson 400e99981afSDavid Marchand PMD_DRV_LOG_LINE(DEBUG, "RX Buf MTU %d", eth_dev->data->mtu); 401daef48efSAjit Khaparde 4027483341aSXueming Li eth_dev->data->rx_queues[queue_idx] = rxq; 4037483341aSXueming Li 4042bb1d5dbSAjit Khaparde rc = bnxt_init_rx_ring_struct(rxq, socket_id); 40597c32717SSomnath Kotur if (rc) { 406e99981afSDavid Marchand PMD_DRV_LOG_LINE(ERR, 407e99981afSDavid Marchand "init_rx_ring_struct failed!"); 40897c32717SSomnath Kotur goto err; 40997c32717SSomnath Kotur } 4106133f207SAjit Khaparde 411e99981afSDavid Marchand PMD_DRV_LOG_LINE(DEBUG, "RX Buf size is %d", rxq->rx_buf_size); 4126133f207SAjit Khaparde rxq->queue_id = queue_idx; 4136133f207SAjit Khaparde rxq->port_id = eth_dev->data->port_id; 414295968d1SFerruh Yigit if (rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) 41535b2d13fSOlivier Matz rxq->crc_len = RTE_ETHER_CRC_LEN; 416323e7b66SFerruh Yigit else 417323e7b66SFerruh Yigit rxq->crc_len = 0; 4186133f207SAjit Khaparde 4192eb53b13SAjit Khaparde /* Allocate RX ring hardware descriptors */ 420c6c90a33SLance Richardson rc = bnxt_alloc_rings(bp, socket_id, queue_idx, NULL, rxq, rxq->cp_ring, 421c6c90a33SLance Richardson NULL, "rxr"); 422c72fe7acSSriharsha Basavapatna if (rc) { 423e99981afSDavid Marchand PMD_DRV_LOG_LINE(ERR, 424e99981afSDavid Marchand "ring_dma_zone_reserve for rx_ring failed!"); 42597c32717SSomnath Kotur goto err; 4262eb53b13SAjit Khaparde } 42756bbd6acSRuifeng Wang rxq->rx_mbuf_alloc_fail = 0; 4286133f207SAjit Khaparde 4293955e268SKalesh AP /* rxq 0 must not be stopped when used as async CPR */ 4303955e268SKalesh AP if (!BNXT_NUM_ASYNC_CPR(bp) && queue_idx == 0) 4313955e268SKalesh AP rxq->rx_deferred_start = false; 4323955e268SKalesh AP else 43314255b35SAjit Khaparde rxq->rx_deferred_start = rx_conf->rx_deferred_start; 4343955e268SKalesh AP 4350105ea12SAjit Khaparde rxq->rx_started = rxq->rx_deferred_start ? false : true; 4366d160d77SRandy Schacher rxq->vnic = bnxt_get_default_vnic(bp); 437*b5dafa31SAjit Khaparde rxq->vnic->hds_threshold = n_seg ? rxq->vnic->hds_threshold : 0; 438bc4a000fSLance Richardson 43997c32717SSomnath Kotur return 0; 44097c32717SSomnath Kotur err: 4417483341aSXueming Li bnxt_rx_queue_release_op(eth_dev, queue_idx); 4422bb1d5dbSAjit Khaparde return rc; 4436133f207SAjit Khaparde } 4441fe427fdSSomnath Kotur 4451fe427fdSSomnath Kotur int 4461fe427fdSSomnath Kotur bnxt_rx_queue_intr_enable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id) 4471fe427fdSSomnath Kotur { 4481bf01f51SKalesh AP struct bnxt *bp = eth_dev->data->dev_private; 4491fe427fdSSomnath Kotur struct bnxt_rx_queue *rxq; 4501fe427fdSSomnath Kotur struct bnxt_cp_ring_info *cpr; 4511fe427fdSSomnath Kotur int rc = 0; 4521fe427fdSSomnath Kotur 4531bf01f51SKalesh AP rc = is_bnxt_in_error(bp); 4541bf01f51SKalesh AP if (rc) 4551bf01f51SKalesh AP return rc; 4561bf01f51SKalesh AP 4571fe427fdSSomnath Kotur if (eth_dev->data->rx_queues) { 4581fe427fdSSomnath Kotur rxq = eth_dev->data->rx_queues[queue_id]; 459cd30e6a7SKalesh AP if (!rxq) 460cd30e6a7SKalesh AP return -EINVAL; 461cd30e6a7SKalesh AP 4621fe427fdSSomnath Kotur cpr = rxq->cp_ring; 463c79012a3SRahul Gupta B_CP_DB_REARM(cpr, cpr->cp_raw_cons); 4641fe427fdSSomnath Kotur } 4651fe427fdSSomnath Kotur return rc; 4661fe427fdSSomnath Kotur } 4671fe427fdSSomnath Kotur 4681fe427fdSSomnath Kotur int 4691fe427fdSSomnath Kotur bnxt_rx_queue_intr_disable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id) 4701fe427fdSSomnath Kotur { 4711bf01f51SKalesh AP struct bnxt *bp = eth_dev->data->dev_private; 4721fe427fdSSomnath Kotur struct bnxt_rx_queue *rxq; 4731fe427fdSSomnath Kotur struct bnxt_cp_ring_info *cpr; 4741fe427fdSSomnath Kotur int rc = 0; 4751fe427fdSSomnath Kotur 4761bf01f51SKalesh AP rc = is_bnxt_in_error(bp); 4771bf01f51SKalesh AP if (rc) 4781bf01f51SKalesh AP return rc; 4791bf01f51SKalesh AP 4801fe427fdSSomnath Kotur if (eth_dev->data->rx_queues) { 4811fe427fdSSomnath Kotur rxq = eth_dev->data->rx_queues[queue_id]; 482cd30e6a7SKalesh AP if (!rxq) 483cd30e6a7SKalesh AP return -EINVAL; 484cd30e6a7SKalesh AP 4851fe427fdSSomnath Kotur cpr = rxq->cp_ring; 4861fe427fdSSomnath Kotur B_CP_DB_DISARM(cpr); 4871fe427fdSSomnath Kotur } 4881fe427fdSSomnath Kotur return rc; 4891fe427fdSSomnath Kotur } 4909b63c6fdSAjit Khaparde 4919b63c6fdSAjit Khaparde int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) 4929b63c6fdSAjit Khaparde { 49378466c95SStephen Hemminger struct bnxt *bp = dev->data->dev_private; 4949b63c6fdSAjit Khaparde struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 4959b63c6fdSAjit Khaparde struct bnxt_rx_queue *rxq = bp->rx_queues[rx_queue_id]; 4969b63c6fdSAjit Khaparde struct bnxt_vnic_info *vnic = NULL; 4976d160d77SRandy Schacher uint16_t vnic_idx = 0; 4986d160d77SRandy Schacher uint16_t fw_grp_id = 0; 49914255b35SAjit Khaparde int rc = 0; 5009b63c6fdSAjit Khaparde 5011bf01f51SKalesh AP rc = is_bnxt_in_error(bp); 5021bf01f51SKalesh AP if (rc) 5031bf01f51SKalesh AP return rc; 5041bf01f51SKalesh AP 5059b63c6fdSAjit Khaparde if (rxq == NULL) { 506e99981afSDavid Marchand PMD_DRV_LOG_LINE(ERR, "Invalid Rx queue %d", rx_queue_id); 5079b63c6fdSAjit Khaparde return -EINVAL; 5089b63c6fdSAjit Khaparde } 5099b63c6fdSAjit Khaparde 5106d160d77SRandy Schacher vnic = bnxt_vnic_queue_id_get_next(bp, rx_queue_id, &vnic_idx); 5116d160d77SRandy Schacher if (vnic == NULL) { 512e99981afSDavid Marchand PMD_DRV_LOG_LINE(ERR, "VNIC not initialized for RxQ %d", 5136d160d77SRandy Schacher rx_queue_id); 5146d160d77SRandy Schacher return -EINVAL; 5156d160d77SRandy Schacher } 5166d160d77SRandy Schacher 5170180d304SMike Baucom /* reset the previous stats for the rx_queue since the counters 5180180d304SMike Baucom * will be cleared when the queue is started. 5190180d304SMike Baucom */ 52027c32143SDamodharam Ammepalli if (BNXT_TPA_V2_P7(bp)) 52127c32143SDamodharam Ammepalli memset(&bp->prev_rx_ring_stats_ext[rx_queue_id], 0, 52227c32143SDamodharam Ammepalli sizeof(struct bnxt_ring_stats_ext)); 52327c32143SDamodharam Ammepalli else 5240180d304SMike Baucom memset(&bp->prev_rx_ring_stats[rx_queue_id], 0, 5250180d304SMike Baucom sizeof(struct bnxt_ring_stats)); 5260180d304SMike Baucom 5273955e268SKalesh AP /* Set the queue state to started here. 5283955e268SKalesh AP * We check the status of the queue while posting buffer. 5293955e268SKalesh AP * If queue is it started, we do not post buffers for Rx. 5303955e268SKalesh AP */ 5313955e268SKalesh AP rxq->rx_started = true; 532f9241d2fSLance Richardson dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 533f9241d2fSLance Richardson 53414255b35SAjit Khaparde bnxt_free_hwrm_rx_ring(bp, rx_queue_id); 535bd0a14c9SLance Richardson rc = bnxt_alloc_hwrm_rx_ring(bp, rx_queue_id); 536bd0a14c9SLance Richardson if (rc) 537bd0a14c9SLance Richardson return rc; 538bd0a14c9SLance Richardson 5391b27f824SSomnath Kotur if (BNXT_HAS_RING_GRPS(bp)) 5406d160d77SRandy Schacher fw_grp_id = bp->grp_info[rx_queue_id].fw_grp_id; 5416d160d77SRandy Schacher 5426d160d77SRandy Schacher do { 5436d160d77SRandy Schacher if (BNXT_HAS_RING_GRPS(bp)) 5446d160d77SRandy Schacher vnic->dflt_ring_grp = fw_grp_id; 545fc4bfea5SLance Richardson /* Reconfigure default receive ring and MRU. */ 5466d160d77SRandy Schacher bnxt_hwrm_vnic_cfg(bp, vnic); 5471b27f824SSomnath Kotur 548e99981afSDavid Marchand PMD_DRV_LOG_LINE(INFO, "Rx queue started %d", rx_queue_id); 54914255b35SAjit Khaparde 550295968d1SFerruh Yigit if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) { 551f8168ca0SLance Richardson if (BNXT_HAS_RING_GRPS(bp)) { 5526d160d77SRandy Schacher if (vnic->fw_grp_ids[rx_queue_id] != 5536d160d77SRandy Schacher INVALID_HW_RING_ID) { 554e99981afSDavid Marchand PMD_DRV_LOG_LINE(ERR, "invalid ring id %d", 5556d160d77SRandy Schacher rx_queue_id); 5569b63c6fdSAjit Khaparde return 0; 5573955e268SKalesh AP } 55814255b35SAjit Khaparde 5596d160d77SRandy Schacher vnic->fw_grp_ids[rx_queue_id] = fw_grp_id; 560e99981afSDavid Marchand PMD_DRV_LOG_LINE(DEBUG, "vnic = %p fw_grp_id = %d", 5616d160d77SRandy Schacher vnic, fw_grp_id); 5629b63c6fdSAjit Khaparde } 5639b63c6fdSAjit Khaparde 564e99981afSDavid Marchand PMD_DRV_LOG_LINE(DEBUG, "Rx Queue Count %d", 5656d160d77SRandy Schacher vnic->rx_queue_cnt); 5666d160d77SRandy Schacher rc += bnxt_vnic_rss_queue_status_update(bp, vnic); 5676d160d77SRandy Schacher } 5686d160d77SRandy Schacher vnic_idx++; 5696d160d77SRandy Schacher } while ((vnic = bnxt_vnic_queue_id_get_next(bp, rx_queue_id, 5706d160d77SRandy Schacher &vnic_idx)) != NULL); 5716d160d77SRandy Schacher 572f9241d2fSLance Richardson if (rc != 0) { 573bd0a14c9SLance Richardson dev->data->rx_queue_state[rx_queue_id] = 574f9241d2fSLance Richardson RTE_ETH_QUEUE_STATE_STOPPED; 5753955e268SKalesh AP rxq->rx_started = false; 576f9241d2fSLance Richardson } 577bd0a14c9SLance Richardson 578e99981afSDavid Marchand PMD_DRV_LOG_LINE(INFO, 579e99981afSDavid Marchand "queue %d, rx_deferred_start %d, state %d!", 580bd0a14c9SLance Richardson rx_queue_id, rxq->rx_deferred_start, 581bd0a14c9SLance Richardson bp->eth_dev->data->rx_queue_state[rx_queue_id]); 58214255b35SAjit Khaparde 58314255b35SAjit Khaparde return rc; 5849b63c6fdSAjit Khaparde } 5859b63c6fdSAjit Khaparde 5869b63c6fdSAjit Khaparde int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) 5879b63c6fdSAjit Khaparde { 58878466c95SStephen Hemminger struct bnxt *bp = dev->data->dev_private; 5899b63c6fdSAjit Khaparde struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 5909b63c6fdSAjit Khaparde struct bnxt_vnic_info *vnic = NULL; 59114255b35SAjit Khaparde struct bnxt_rx_queue *rxq = NULL; 592fc4bfea5SLance Richardson int active_queue_cnt = 0; 5936d160d77SRandy Schacher uint16_t vnic_idx = 0, q_id = rx_queue_id; 594fc4bfea5SLance Richardson int i, rc = 0; 59514255b35SAjit Khaparde 5961bf01f51SKalesh AP rc = is_bnxt_in_error(bp); 5971bf01f51SKalesh AP if (rc) 5981bf01f51SKalesh AP return rc; 5991bf01f51SKalesh AP 600bd0a14c9SLance Richardson /* For the stingray platform and other platforms needing tighter 601bd0a14c9SLance Richardson * control of resource utilization, Rx CQ 0 also works as 602bd0a14c9SLance Richardson * Default CQ for async notifications 603bd0a14c9SLance Richardson */ 604bd0a14c9SLance Richardson if (!BNXT_NUM_ASYNC_CPR(bp) && !rx_queue_id) { 605e99981afSDavid Marchand PMD_DRV_LOG_LINE(ERR, "Cannot stop Rx queue id %d", rx_queue_id); 60614255b35SAjit Khaparde return -EINVAL; 60714255b35SAjit Khaparde } 60814255b35SAjit Khaparde 60914255b35SAjit Khaparde rxq = bp->rx_queues[rx_queue_id]; 610e92247f0SAjit Khaparde if (!rxq) { 611e99981afSDavid Marchand PMD_DRV_LOG_LINE(ERR, "Invalid Rx queue %d", rx_queue_id); 6129b63c6fdSAjit Khaparde return -EINVAL; 6139b63c6fdSAjit Khaparde } 6149b63c6fdSAjit Khaparde 6156d160d77SRandy Schacher vnic = bnxt_vnic_queue_id_get_next(bp, q_id, &vnic_idx); 616e92247f0SAjit Khaparde if (!vnic) { 617e99981afSDavid Marchand PMD_DRV_LOG_LINE(ERR, "VNIC not initialized for RxQ %d", q_id); 618e92247f0SAjit Khaparde return -EINVAL; 619e92247f0SAjit Khaparde } 620e92247f0SAjit Khaparde 621084d0cdbSMorten Brørup __rte_assume(q_id < RTE_MAX_QUEUES_PER_PORT); 6226d160d77SRandy Schacher dev->data->rx_queue_state[q_id] = RTE_ETH_QUEUE_STATE_STOPPED; 6233955e268SKalesh AP rxq->rx_started = false; 624e99981afSDavid Marchand PMD_DRV_LOG_LINE(DEBUG, "Rx queue stopped"); 6259b63c6fdSAjit Khaparde 6266d160d77SRandy Schacher do { 6276d160d77SRandy Schacher active_queue_cnt = 0; 628295968d1SFerruh Yigit if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) { 629f8168ca0SLance Richardson if (BNXT_HAS_RING_GRPS(bp)) 6306d160d77SRandy Schacher vnic->fw_grp_ids[q_id] = INVALID_HW_RING_ID; 63136024b2eSAjit Khaparde 632e99981afSDavid Marchand PMD_DRV_LOG_LINE(DEBUG, "Rx Queue Count %d", 6336d160d77SRandy Schacher vnic->rx_queue_cnt); 6346d160d77SRandy Schacher rc = bnxt_vnic_rss_queue_status_update(bp, vnic); 6359b63c6fdSAjit Khaparde } 63614255b35SAjit Khaparde 637fc4bfea5SLance Richardson /* Compute current number of active receive queues. */ 638fc4bfea5SLance Richardson for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++) 639fc4bfea5SLance Richardson if (bp->rx_queues[i]->rx_started) 640fc4bfea5SLance Richardson active_queue_cnt++; 641fc4bfea5SLance Richardson 6423b56c3ffSAjit Khaparde if (BNXT_CHIP_P5_P7(bp)) { 643fc4bfea5SLance Richardson /* 6446d160d77SRandy Schacher * For P5, we need to ensure that the VNIC default 6456d160d77SRandy Schacher * receive ring corresponds to an active receive queue. 6466d160d77SRandy Schacher * When no queue is active, we need to temporarily set 6476d160d77SRandy Schacher * the MRU to zero so that packets are dropped early in 6486d160d77SRandy Schacher * the receive pipeline in order to prevent the VNIC 6496d160d77SRandy Schacher * default receive ring from being accessed. 650fc4bfea5SLance Richardson */ 651fc4bfea5SLance Richardson if (active_queue_cnt == 0) { 652fc4bfea5SLance Richardson uint16_t saved_mru = vnic->mru; 653fc4bfea5SLance Richardson 6543e6fae2bSKalesh AP /* clear RSS setting on vnic. */ 6553e6fae2bSKalesh AP bnxt_vnic_rss_clear_p5(bp, vnic); 6563e6fae2bSKalesh AP 657fc4bfea5SLance Richardson vnic->mru = 0; 658fc4bfea5SLance Richardson /* Reconfigure default receive ring and MRU. */ 659fc4bfea5SLance Richardson bnxt_hwrm_vnic_cfg(bp, vnic); 660fc4bfea5SLance Richardson vnic->mru = saved_mru; 661fc4bfea5SLance Richardson } else { 662fc4bfea5SLance Richardson /* Reconfigure default receive ring. */ 663fc4bfea5SLance Richardson bnxt_hwrm_vnic_cfg(bp, vnic); 664fc4bfea5SLance Richardson } 6656d160d77SRandy Schacher } else if (active_queue_cnt && vnic->dflt_ring_grp == 6666d160d77SRandy Schacher bp->grp_info[q_id].fw_grp_id) { 667e36b1cd6SSamik Gupta /* 6686d160d77SRandy Schacher * If the queue being stopped is the current default 6696d160d77SRandy Schacher * queue and there are other active queues, pick one of 6706d160d77SRandy Schacher * them as the default and reconfigure the vnic. 671e36b1cd6SSamik Gupta */ 6726d160d77SRandy Schacher for (i = vnic->start_grp_id; i < vnic->end_grp_id; 6736d160d77SRandy Schacher i++) { 674e36b1cd6SSamik Gupta if (bp->rx_queues[i]->rx_started) { 675e36b1cd6SSamik Gupta vnic->dflt_ring_grp = 676e36b1cd6SSamik Gupta bp->grp_info[i].fw_grp_id; 677e36b1cd6SSamik Gupta bnxt_hwrm_vnic_cfg(bp, vnic); 678e36b1cd6SSamik Gupta break; 679e36b1cd6SSamik Gupta } 680e36b1cd6SSamik Gupta } 681e36b1cd6SSamik Gupta } 6826d160d77SRandy Schacher vnic_idx++; 6836d160d77SRandy Schacher } while ((vnic = bnxt_vnic_queue_id_get_next(bp, q_id, 6846d160d77SRandy Schacher &vnic_idx)) != NULL); 685fc4bfea5SLance Richardson 68614255b35SAjit Khaparde if (rc == 0) 68714255b35SAjit Khaparde bnxt_rx_queue_release_mbufs(rxq); 68814255b35SAjit Khaparde 68914255b35SAjit Khaparde return rc; 6909b63c6fdSAjit Khaparde } 691