1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2014-2023 Broadcom 3 * All rights reserved. 4 */ 5 6 #include <inttypes.h> 7 8 #include <rte_malloc.h> 9 10 #include "bnxt.h" 11 #include "bnxt_filter.h" 12 #include "bnxt_hwrm.h" 13 #include "bnxt_ring.h" 14 #include "bnxt_rxq.h" 15 #include "bnxt_rxr.h" 16 #include "bnxt_vnic.h" 17 #include "hsi_struct_def_dpdk.h" 18 19 /* 20 * RX Queues 21 */ 22 23 uint64_t bnxt_get_rx_port_offloads(struct bnxt *bp) 24 { 25 uint64_t rx_offload_capa; 26 27 rx_offload_capa = RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | 28 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | 29 RTE_ETH_RX_OFFLOAD_TCP_CKSUM | 30 RTE_ETH_RX_OFFLOAD_KEEP_CRC | 31 RTE_ETH_RX_OFFLOAD_SCATTER | 32 RTE_ETH_RX_OFFLOAD_RSS_HASH | 33 RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT; 34 35 /* In P7 platform if truflow is enabled then vlan offload is disabled*/ 36 if (!(BNXT_TRUFLOW_EN(bp) && BNXT_CHIP_P7(bp))) 37 rx_offload_capa |= (RTE_ETH_RX_OFFLOAD_VLAN_FILTER | 38 RTE_ETH_RX_OFFLOAD_VLAN_EXTEND); 39 40 41 if (!bnxt_compressed_rx_cqe_mode_enabled(bp)) 42 rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TCP_LRO; 43 if (bp->flags & BNXT_FLAG_PTP_SUPPORTED) 44 rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP; 45 if (bp->vnic_cap_flags & BNXT_VNIC_CAP_VLAN_RX_STRIP) { 46 if (!(BNXT_TRUFLOW_EN(bp) && BNXT_CHIP_P7(bp))) 47 rx_offload_capa |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 48 } 49 50 if (BNXT_TUNNELED_OFFLOADS_CAP_ALL_EN(bp)) 51 rx_offload_capa |= RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | 52 RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM; 53 54 return rx_offload_capa; 55 } 56 57 /* Determine whether the current configuration needs aggregation ring in HW. */ 58 int bnxt_need_agg_ring(struct rte_eth_dev *eth_dev) 59 { 60 /* scattered_rx will be true if OFFLOAD_SCATTER is enabled, 61 * if LRO is enabled, or if the max packet len is greater than the 62 * mbuf data size. So AGG ring will be needed whenever scattered_rx 63 * is set. 64 */ 65 return eth_dev->data->scattered_rx ? 1 : 0; 66 } 67 68 void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq) 69 { 70 if (rxq && rxq->cp_ring && rxq->cp_ring->hw_stats) 71 rxq->cp_ring->hw_stats = NULL; 72 } 73 74 int bnxt_mq_rx_configure(struct bnxt *bp) 75 { 76 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 77 struct rte_eth_rss_conf *rss = &bp->rss_conf; 78 const struct rte_eth_vmdq_rx_conf *conf = 79 &dev_conf->rx_adv_conf.vmdq_rx_conf; 80 unsigned int i, j, nb_q_per_grp = 1, ring_idx = 0; 81 int start_grp_id, end_grp_id = 1, rc = 0; 82 struct bnxt_vnic_info *vnic; 83 struct bnxt_filter_info *filter; 84 enum rte_eth_nb_pools pools = 1, max_pools = 0; 85 struct bnxt_rx_queue *rxq; 86 87 bp->nr_vnics = 0; 88 89 /* Multi-queue mode */ 90 if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB_RSS) { 91 /* VMDq ONLY, VMDq+RSS, VMDq+DCB, VMDq+DCB+RSS */ 92 93 switch (dev_conf->rxmode.mq_mode) { 94 case RTE_ETH_MQ_RX_VMDQ_RSS: 95 case RTE_ETH_MQ_RX_VMDQ_ONLY: 96 case RTE_ETH_MQ_RX_VMDQ_DCB_RSS: 97 /* FALLTHROUGH */ 98 /* ETH_8/64_POOLs */ 99 pools = conf->nb_queue_pools; 100 /* For each pool, allocate MACVLAN CFA rule & VNIC */ 101 max_pools = RTE_MIN(bp->max_vnics, 102 RTE_MIN(bp->max_l2_ctx, 103 RTE_MIN(bp->max_rsscos_ctx, 104 RTE_ETH_64_POOLS))); 105 PMD_DRV_LOG_LINE(DEBUG, 106 "pools = %u max_pools = %u", 107 pools, max_pools); 108 if (pools > max_pools) 109 pools = max_pools; 110 break; 111 case RTE_ETH_MQ_RX_RSS: 112 pools = bp->rx_cosq_cnt ? bp->rx_cosq_cnt : 1; 113 break; 114 default: 115 PMD_DRV_LOG_LINE(ERR, "Unsupported mq_mod %d", 116 dev_conf->rxmode.mq_mode); 117 rc = -EINVAL; 118 goto err_out; 119 } 120 } else if (!dev_conf->rxmode.mq_mode) { 121 pools = bp->rx_cosq_cnt ? bp->rx_cosq_cnt : pools; 122 } 123 124 pools = RTE_MIN(pools, bp->rx_cp_nr_rings); 125 nb_q_per_grp = bp->rx_cp_nr_rings / pools; 126 PMD_DRV_LOG_LINE(DEBUG, "pools = %u nb_q_per_grp = %u", 127 pools, nb_q_per_grp); 128 start_grp_id = 0; 129 end_grp_id = nb_q_per_grp; 130 131 for (i = 0; i < pools; i++) { 132 vnic = &bp->vnic_info[i]; 133 if (!vnic) { 134 PMD_DRV_LOG_LINE(ERR, "VNIC alloc failed"); 135 rc = -ENOMEM; 136 goto err_out; 137 } 138 vnic->flags |= BNXT_VNIC_INFO_BCAST; 139 bp->nr_vnics++; 140 141 for (j = 0; j < nb_q_per_grp; j++, ring_idx++) { 142 rxq = bp->eth_dev->data->rx_queues[ring_idx]; 143 rxq->vnic = vnic; 144 PMD_DRV_LOG_LINE(DEBUG, 145 "rxq[%d] = %p vnic[%d] = %p", 146 ring_idx, rxq, i, vnic); 147 } 148 if (i == 0) { 149 if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB) { 150 bp->eth_dev->data->promiscuous = 1; 151 vnic->flags |= BNXT_VNIC_INFO_PROMISC; 152 } 153 vnic->func_default = true; 154 } 155 vnic->start_grp_id = start_grp_id; 156 vnic->end_grp_id = end_grp_id; 157 158 if (i) { 159 if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB || 160 !(dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS)) 161 vnic->rss_dflt_cr = true; 162 goto skip_filter_allocation; 163 } 164 filter = bnxt_alloc_filter(bp); 165 if (!filter) { 166 PMD_DRV_LOG_LINE(ERR, "L2 filter alloc failed"); 167 rc = -ENOMEM; 168 goto err_out; 169 } 170 filter->mac_index = 0; 171 filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST; 172 /* 173 * TODO: Configure & associate CFA rule for 174 * each VNIC for each VMDq with MACVLAN, MACVLAN+TC 175 */ 176 STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 177 178 skip_filter_allocation: 179 start_grp_id = end_grp_id; 180 end_grp_id += nb_q_per_grp; 181 } 182 183 bp->rx_num_qs_per_vnic = nb_q_per_grp; 184 185 for (i = 0; i < bp->nr_vnics; i++) { 186 uint32_t lvl = RTE_ETH_RSS_LEVEL(rss->rss_hf); 187 188 vnic = &bp->vnic_info[i]; 189 vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss->rss_hf); 190 vnic->hash_mode = bnxt_rte_to_hwrm_hash_level(bp, rss->rss_hf, lvl); 191 192 /* 193 * Use the supplied key if the key length is 194 * acceptable and the rss_key is not NULL 195 */ 196 if (rss->rss_key && rss->rss_key_len <= HW_HASH_KEY_SIZE) 197 memcpy(vnic->rss_hash_key, rss->rss_key, rss->rss_key_len); 198 } 199 200 return rc; 201 202 err_out: 203 /* Free allocated vnic/filters */ 204 205 return rc; 206 } 207 208 void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq) 209 { 210 struct rte_mbuf **sw_ring; 211 struct bnxt_tpa_info *tpa_info; 212 uint16_t i; 213 214 if (!rxq || !rxq->rx_ring) 215 return; 216 217 sw_ring = rxq->rx_ring->rx_buf_ring; 218 if (sw_ring) { 219 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) 220 /* 221 * The vector receive burst function does not set used 222 * mbuf pointers to NULL, do that here to simplify 223 * cleanup logic. 224 */ 225 for (i = 0; i < rxq->rxrearm_nb; i++) 226 sw_ring[rxq->rxrearm_start + i] = NULL; 227 rxq->rxrearm_nb = 0; 228 #endif 229 for (i = 0; 230 i < rxq->rx_ring->rx_ring_struct->ring_size; i++) { 231 if (sw_ring[i]) { 232 if (sw_ring[i] != &rxq->fake_mbuf) 233 rte_pktmbuf_free_seg(sw_ring[i]); 234 sw_ring[i] = NULL; 235 } 236 } 237 } 238 /* Free up mbufs in Agg ring */ 239 if (rxq->bp == NULL || 240 rxq->bp->eth_dev == NULL || 241 !bnxt_need_agg_ring(rxq->bp->eth_dev)) 242 return; 243 244 sw_ring = rxq->rx_ring->ag_buf_ring; 245 if (sw_ring) { 246 for (i = 0; 247 i < rxq->rx_ring->ag_ring_struct->ring_size; i++) { 248 if (sw_ring[i]) { 249 rte_pktmbuf_free_seg(sw_ring[i]); 250 sw_ring[i] = NULL; 251 } 252 } 253 } 254 255 if (bnxt_compressed_rx_cqe_mode_enabled(rxq->bp)) 256 return; 257 258 /* Free up mbufs in TPA */ 259 tpa_info = rxq->rx_ring->tpa_info; 260 if (tpa_info) { 261 int max_aggs = BNXT_TPA_MAX_AGGS(rxq->bp); 262 263 for (i = 0; i < max_aggs; i++) { 264 if (tpa_info[i].mbuf) { 265 rte_pktmbuf_free_seg(tpa_info[i].mbuf); 266 tpa_info[i].mbuf = NULL; 267 } 268 } 269 } 270 271 } 272 273 void bnxt_free_rx_mbufs(struct bnxt *bp) 274 { 275 struct bnxt_rx_queue *rxq; 276 int i; 277 278 for (i = 0; i < (int)bp->rx_nr_rings; i++) { 279 rxq = bp->rx_queues[i]; 280 bnxt_rx_queue_release_mbufs(rxq); 281 } 282 } 283 284 void bnxt_free_rxq_mem(struct bnxt_rx_queue *rxq) 285 { 286 bnxt_rx_queue_release_mbufs(rxq); 287 288 /* Free RX, AGG ring hardware descriptors */ 289 if (rxq->rx_ring) { 290 bnxt_free_ring(rxq->rx_ring->rx_ring_struct); 291 rte_free(rxq->rx_ring->rx_ring_struct); 292 rxq->rx_ring->rx_ring_struct = NULL; 293 /* Free RX Agg ring hardware descriptors */ 294 bnxt_free_ring(rxq->rx_ring->ag_ring_struct); 295 rte_free(rxq->rx_ring->ag_ring_struct); 296 rxq->rx_ring->ag_ring_struct = NULL; 297 298 rte_free(rxq->rx_ring); 299 rxq->rx_ring = NULL; 300 } 301 /* Free RX completion ring hardware descriptors */ 302 if (rxq->cp_ring) { 303 bnxt_free_ring(rxq->cp_ring->cp_ring_struct); 304 rte_free(rxq->cp_ring->cp_ring_struct); 305 rxq->cp_ring->cp_ring_struct = NULL; 306 rte_free(rxq->cp_ring); 307 rxq->cp_ring = NULL; 308 } 309 310 bnxt_free_rxq_stats(rxq); 311 rte_memzone_free(rxq->mz); 312 rxq->mz = NULL; 313 } 314 315 void bnxt_rx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx) 316 { 317 struct bnxt_rx_queue *rxq = dev->data->rx_queues[queue_idx]; 318 319 if (rxq != NULL) { 320 if (is_bnxt_in_error(rxq->bp)) 321 return; 322 323 bnxt_free_hwrm_rx_ring(rxq->bp, rxq->queue_id); 324 bnxt_free_rxq_mem(rxq); 325 rte_free(rxq); 326 } 327 } 328 329 int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev, 330 uint16_t queue_idx, 331 uint16_t nb_desc, 332 unsigned int socket_id, 333 const struct rte_eth_rxconf *rx_conf, 334 struct rte_mempool *mp) 335 { 336 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; 337 uint8_t rs = !!(rx_offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT); 338 struct bnxt *bp = eth_dev->data->dev_private; 339 struct rte_eth_rxseg_split *rx_seg = 340 (struct rte_eth_rxseg_split *)rx_conf->rx_seg; 341 uint16_t n_seg = rx_conf->rx_nseg; 342 struct bnxt_rx_queue *rxq; 343 int rc = 0; 344 345 rc = is_bnxt_in_error(bp); 346 if (rc) 347 return rc; 348 349 if (n_seg > 1 && !rs) { 350 PMD_DRV_LOG_LINE(ERR, "n_seg %d does not match buffer split %d setting", 351 n_seg, rs); 352 return -EINVAL; 353 } 354 355 if (n_seg > BNXT_MAX_BUFFER_SPLIT_SEGS) { 356 PMD_DRV_LOG_LINE(ERR, "n_seg %d not supported", n_seg); 357 return -EINVAL; 358 } 359 360 if (queue_idx >= bnxt_max_rings(bp)) { 361 PMD_DRV_LOG_LINE(ERR, 362 "Cannot create Rx ring %d. Only %d rings available", 363 queue_idx, bp->max_rx_rings); 364 return -EINVAL; 365 } 366 367 if (nb_desc < BNXT_MIN_RING_DESC || nb_desc > MAX_RX_DESC_CNT) { 368 PMD_DRV_LOG_LINE(ERR, "nb_desc %d is invalid", nb_desc); 369 return -EINVAL; 370 } 371 372 if (eth_dev->data->rx_queues) { 373 rxq = eth_dev->data->rx_queues[queue_idx]; 374 if (rxq) 375 bnxt_rx_queue_release_op(eth_dev, queue_idx); 376 } 377 rxq = rte_zmalloc_socket("bnxt_rx_queue", sizeof(struct bnxt_rx_queue), 378 RTE_CACHE_LINE_SIZE, socket_id); 379 if (!rxq) { 380 PMD_DRV_LOG_LINE(ERR, "bnxt_rx_queue allocation failed!"); 381 return -ENOMEM; 382 } 383 rxq->bp = bp; 384 if (n_seg > 1) { 385 rxq->mb_pool = rx_seg[BNXT_MEM_POOL_IDX_0].mp; 386 rxq->agg_mb_pool = rx_seg[BNXT_MEM_POOL_IDX_1].mp; 387 } else { 388 rxq->mb_pool = mp; 389 rxq->agg_mb_pool = mp; 390 } 391 392 rxq->nb_rx_desc = nb_desc; 393 rxq->rx_free_thresh = 394 RTE_MIN(rte_align32pow2(nb_desc) / 4, RTE_BNXT_MAX_RX_BURST); 395 396 PMD_DRV_LOG_LINE(DEBUG, 397 "App supplied RXQ drop_en status : %d", rx_conf->rx_drop_en); 398 rxq->drop_en = BNXT_DEFAULT_RX_DROP_EN; 399 400 PMD_DRV_LOG_LINE(DEBUG, "RX Buf MTU %d", eth_dev->data->mtu); 401 402 eth_dev->data->rx_queues[queue_idx] = rxq; 403 404 rc = bnxt_init_rx_ring_struct(rxq, socket_id); 405 if (rc) { 406 PMD_DRV_LOG_LINE(ERR, 407 "init_rx_ring_struct failed!"); 408 goto err; 409 } 410 411 PMD_DRV_LOG_LINE(DEBUG, "RX Buf size is %d", rxq->rx_buf_size); 412 rxq->queue_id = queue_idx; 413 rxq->port_id = eth_dev->data->port_id; 414 if (rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) 415 rxq->crc_len = RTE_ETHER_CRC_LEN; 416 else 417 rxq->crc_len = 0; 418 419 /* Allocate RX ring hardware descriptors */ 420 rc = bnxt_alloc_rings(bp, socket_id, queue_idx, NULL, rxq, rxq->cp_ring, 421 NULL, "rxr"); 422 if (rc) { 423 PMD_DRV_LOG_LINE(ERR, 424 "ring_dma_zone_reserve for rx_ring failed!"); 425 goto err; 426 } 427 rxq->rx_mbuf_alloc_fail = 0; 428 429 /* rxq 0 must not be stopped when used as async CPR */ 430 if (!BNXT_NUM_ASYNC_CPR(bp) && queue_idx == 0) 431 rxq->rx_deferred_start = false; 432 else 433 rxq->rx_deferred_start = rx_conf->rx_deferred_start; 434 435 rxq->rx_started = rxq->rx_deferred_start ? false : true; 436 rxq->vnic = bnxt_get_default_vnic(bp); 437 rxq->vnic->hds_threshold = n_seg ? rxq->vnic->hds_threshold : 0; 438 439 return 0; 440 err: 441 bnxt_rx_queue_release_op(eth_dev, queue_idx); 442 return rc; 443 } 444 445 int 446 bnxt_rx_queue_intr_enable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id) 447 { 448 struct bnxt *bp = eth_dev->data->dev_private; 449 struct bnxt_rx_queue *rxq; 450 struct bnxt_cp_ring_info *cpr; 451 int rc = 0; 452 453 rc = is_bnxt_in_error(bp); 454 if (rc) 455 return rc; 456 457 if (eth_dev->data->rx_queues) { 458 rxq = eth_dev->data->rx_queues[queue_id]; 459 if (!rxq) 460 return -EINVAL; 461 462 cpr = rxq->cp_ring; 463 B_CP_DB_REARM(cpr, cpr->cp_raw_cons); 464 } 465 return rc; 466 } 467 468 int 469 bnxt_rx_queue_intr_disable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id) 470 { 471 struct bnxt *bp = eth_dev->data->dev_private; 472 struct bnxt_rx_queue *rxq; 473 struct bnxt_cp_ring_info *cpr; 474 int rc = 0; 475 476 rc = is_bnxt_in_error(bp); 477 if (rc) 478 return rc; 479 480 if (eth_dev->data->rx_queues) { 481 rxq = eth_dev->data->rx_queues[queue_id]; 482 if (!rxq) 483 return -EINVAL; 484 485 cpr = rxq->cp_ring; 486 B_CP_DB_DISARM(cpr); 487 } 488 return rc; 489 } 490 491 int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) 492 { 493 struct bnxt *bp = dev->data->dev_private; 494 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 495 struct bnxt_rx_queue *rxq = bp->rx_queues[rx_queue_id]; 496 struct bnxt_vnic_info *vnic = NULL; 497 uint16_t vnic_idx = 0; 498 uint16_t fw_grp_id = 0; 499 int rc = 0; 500 501 rc = is_bnxt_in_error(bp); 502 if (rc) 503 return rc; 504 505 if (rxq == NULL) { 506 PMD_DRV_LOG_LINE(ERR, "Invalid Rx queue %d", rx_queue_id); 507 return -EINVAL; 508 } 509 510 vnic = bnxt_vnic_queue_id_get_next(bp, rx_queue_id, &vnic_idx); 511 if (vnic == NULL) { 512 PMD_DRV_LOG_LINE(ERR, "VNIC not initialized for RxQ %d", 513 rx_queue_id); 514 return -EINVAL; 515 } 516 517 /* reset the previous stats for the rx_queue since the counters 518 * will be cleared when the queue is started. 519 */ 520 if (BNXT_TPA_V2_P7(bp)) 521 memset(&bp->prev_rx_ring_stats_ext[rx_queue_id], 0, 522 sizeof(struct bnxt_ring_stats_ext)); 523 else 524 memset(&bp->prev_rx_ring_stats[rx_queue_id], 0, 525 sizeof(struct bnxt_ring_stats)); 526 527 /* Set the queue state to started here. 528 * We check the status of the queue while posting buffer. 529 * If queue is it started, we do not post buffers for Rx. 530 */ 531 rxq->rx_started = true; 532 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 533 534 bnxt_free_hwrm_rx_ring(bp, rx_queue_id); 535 rc = bnxt_alloc_hwrm_rx_ring(bp, rx_queue_id); 536 if (rc) 537 return rc; 538 539 if (BNXT_HAS_RING_GRPS(bp)) 540 fw_grp_id = bp->grp_info[rx_queue_id].fw_grp_id; 541 542 do { 543 if (BNXT_HAS_RING_GRPS(bp)) 544 vnic->dflt_ring_grp = fw_grp_id; 545 /* Reconfigure default receive ring and MRU. */ 546 bnxt_hwrm_vnic_cfg(bp, vnic); 547 548 PMD_DRV_LOG_LINE(INFO, "Rx queue started %d", rx_queue_id); 549 550 if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) { 551 if (BNXT_HAS_RING_GRPS(bp)) { 552 if (vnic->fw_grp_ids[rx_queue_id] != 553 INVALID_HW_RING_ID) { 554 PMD_DRV_LOG_LINE(ERR, "invalid ring id %d", 555 rx_queue_id); 556 return 0; 557 } 558 559 vnic->fw_grp_ids[rx_queue_id] = fw_grp_id; 560 PMD_DRV_LOG_LINE(DEBUG, "vnic = %p fw_grp_id = %d", 561 vnic, fw_grp_id); 562 } 563 564 PMD_DRV_LOG_LINE(DEBUG, "Rx Queue Count %d", 565 vnic->rx_queue_cnt); 566 rc += bnxt_vnic_rss_queue_status_update(bp, vnic); 567 } 568 vnic_idx++; 569 } while ((vnic = bnxt_vnic_queue_id_get_next(bp, rx_queue_id, 570 &vnic_idx)) != NULL); 571 572 if (rc != 0) { 573 dev->data->rx_queue_state[rx_queue_id] = 574 RTE_ETH_QUEUE_STATE_STOPPED; 575 rxq->rx_started = false; 576 } 577 578 PMD_DRV_LOG_LINE(INFO, 579 "queue %d, rx_deferred_start %d, state %d!", 580 rx_queue_id, rxq->rx_deferred_start, 581 bp->eth_dev->data->rx_queue_state[rx_queue_id]); 582 583 return rc; 584 } 585 586 int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) 587 { 588 struct bnxt *bp = dev->data->dev_private; 589 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 590 struct bnxt_vnic_info *vnic = NULL; 591 struct bnxt_rx_queue *rxq = NULL; 592 int active_queue_cnt = 0; 593 uint16_t vnic_idx = 0, q_id = rx_queue_id; 594 int i, rc = 0; 595 596 rc = is_bnxt_in_error(bp); 597 if (rc) 598 return rc; 599 600 /* For the stingray platform and other platforms needing tighter 601 * control of resource utilization, Rx CQ 0 also works as 602 * Default CQ for async notifications 603 */ 604 if (!BNXT_NUM_ASYNC_CPR(bp) && !rx_queue_id) { 605 PMD_DRV_LOG_LINE(ERR, "Cannot stop Rx queue id %d", rx_queue_id); 606 return -EINVAL; 607 } 608 609 rxq = bp->rx_queues[rx_queue_id]; 610 if (!rxq) { 611 PMD_DRV_LOG_LINE(ERR, "Invalid Rx queue %d", rx_queue_id); 612 return -EINVAL; 613 } 614 615 vnic = bnxt_vnic_queue_id_get_next(bp, q_id, &vnic_idx); 616 if (!vnic) { 617 PMD_DRV_LOG_LINE(ERR, "VNIC not initialized for RxQ %d", q_id); 618 return -EINVAL; 619 } 620 621 __rte_assume(q_id < RTE_MAX_QUEUES_PER_PORT); 622 dev->data->rx_queue_state[q_id] = RTE_ETH_QUEUE_STATE_STOPPED; 623 rxq->rx_started = false; 624 PMD_DRV_LOG_LINE(DEBUG, "Rx queue stopped"); 625 626 do { 627 active_queue_cnt = 0; 628 if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) { 629 if (BNXT_HAS_RING_GRPS(bp)) 630 vnic->fw_grp_ids[q_id] = INVALID_HW_RING_ID; 631 632 PMD_DRV_LOG_LINE(DEBUG, "Rx Queue Count %d", 633 vnic->rx_queue_cnt); 634 rc = bnxt_vnic_rss_queue_status_update(bp, vnic); 635 } 636 637 /* Compute current number of active receive queues. */ 638 for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++) 639 if (bp->rx_queues[i]->rx_started) 640 active_queue_cnt++; 641 642 if (BNXT_CHIP_P5_P7(bp)) { 643 /* 644 * For P5, we need to ensure that the VNIC default 645 * receive ring corresponds to an active receive queue. 646 * When no queue is active, we need to temporarily set 647 * the MRU to zero so that packets are dropped early in 648 * the receive pipeline in order to prevent the VNIC 649 * default receive ring from being accessed. 650 */ 651 if (active_queue_cnt == 0) { 652 uint16_t saved_mru = vnic->mru; 653 654 /* clear RSS setting on vnic. */ 655 bnxt_vnic_rss_clear_p5(bp, vnic); 656 657 vnic->mru = 0; 658 /* Reconfigure default receive ring and MRU. */ 659 bnxt_hwrm_vnic_cfg(bp, vnic); 660 vnic->mru = saved_mru; 661 } else { 662 /* Reconfigure default receive ring. */ 663 bnxt_hwrm_vnic_cfg(bp, vnic); 664 } 665 } else if (active_queue_cnt && vnic->dflt_ring_grp == 666 bp->grp_info[q_id].fw_grp_id) { 667 /* 668 * If the queue being stopped is the current default 669 * queue and there are other active queues, pick one of 670 * them as the default and reconfigure the vnic. 671 */ 672 for (i = vnic->start_grp_id; i < vnic->end_grp_id; 673 i++) { 674 if (bp->rx_queues[i]->rx_started) { 675 vnic->dflt_ring_grp = 676 bp->grp_info[i].fw_grp_id; 677 bnxt_hwrm_vnic_cfg(bp, vnic); 678 break; 679 } 680 } 681 } 682 vnic_idx++; 683 } while ((vnic = bnxt_vnic_queue_id_get_next(bp, q_id, 684 &vnic_idx)) != NULL); 685 686 if (rc == 0) 687 bnxt_rx_queue_release_mbufs(rxq); 688 689 return rc; 690 } 691