1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2014-2023 Broadcom 3 * All rights reserved. 4 */ 5 6 #include <rte_bitmap.h> 7 #include <rte_memzone.h> 8 #include <rte_malloc.h> 9 #include <unistd.h> 10 11 #include "bnxt.h" 12 #include "bnxt_hwrm.h" 13 #include "bnxt_ring.h" 14 #include "bnxt_rxq.h" 15 #include "bnxt_rxr.h" 16 #include "bnxt_txq.h" 17 #include "bnxt_txr.h" 18 19 #include "hsi_struct_def_dpdk.h" 20 21 /* 22 * Generic ring handling 23 */ 24 25 void bnxt_free_ring(struct bnxt_ring *ring) 26 { 27 if (!ring) 28 return; 29 30 if (ring->vmem_size && *ring->vmem) { 31 memset((char *)*ring->vmem, 0, ring->vmem_size); 32 *ring->vmem = NULL; 33 } 34 ring->mem_zone = NULL; 35 } 36 37 /* 38 * Ring groups 39 */ 40 41 static void bnxt_init_ring_grps(struct bnxt *bp) 42 { 43 unsigned int i; 44 45 for (i = 0; i < bp->max_ring_grps; i++) 46 memset(&bp->grp_info[i], (uint8_t)HWRM_NA_SIGNATURE, 47 sizeof(struct bnxt_ring_grp_info)); 48 } 49 50 int bnxt_alloc_ring_grps(struct bnxt *bp) 51 { 52 if (bp->max_tx_rings == 0) { 53 PMD_DRV_LOG_LINE(ERR, "No TX rings available!"); 54 return -EBUSY; 55 } 56 57 /* P5 does not support ring groups. 58 * But we will use the array to save RSS context IDs. 59 */ 60 /* TODO Revisit for Thor 2 */ 61 if (BNXT_CHIP_P5_P7(bp)) { 62 bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_P5; 63 } else if (bp->max_ring_grps < bp->rx_cp_nr_rings) { 64 /* 1 ring is for default completion ring */ 65 PMD_DRV_LOG_LINE(ERR, "Insufficient resource: Ring Group"); 66 return -ENOSPC; 67 } 68 69 if (BNXT_HAS_RING_GRPS(bp)) { 70 bp->grp_info = rte_zmalloc("bnxt_grp_info", 71 sizeof(*bp->grp_info) * 72 bp->max_ring_grps, 0); 73 if (!bp->grp_info) { 74 PMD_DRV_LOG_LINE(ERR, 75 "Failed to alloc grp info tbl."); 76 return -ENOMEM; 77 } 78 bnxt_init_ring_grps(bp); 79 } 80 81 return 0; 82 } 83 84 /* 85 * Allocates a completion ring with vmem and stats optionally also allocating 86 * a TX and/or RX ring. Passing NULL as tx_ring_info and/or rx_ring_info 87 * to not allocate them. 88 * 89 * Order in the allocation is: 90 * stats - Always non-zero length 91 * cp vmem - Always zero-length, supported for the bnxt_ring abstraction 92 * tx vmem - Only non-zero length if tx_ring_info is not NULL 93 * rx vmem - Only non-zero length if rx_ring_info is not NULL 94 * cp bd ring - Always non-zero length 95 * tx bd ring - Only non-zero length if tx_ring_info is not NULL 96 * rx bd ring - Only non-zero length if rx_ring_info is not NULL 97 */ 98 int bnxt_alloc_rings(struct bnxt *bp, unsigned int socket_id, uint16_t qidx, 99 struct bnxt_tx_queue *txq, 100 struct bnxt_rx_queue *rxq, 101 struct bnxt_cp_ring_info *cp_ring_info, 102 struct bnxt_cp_ring_info *nq_ring_info, 103 const char *suffix) 104 { 105 struct bnxt_ring *cp_ring = cp_ring_info->cp_ring_struct; 106 struct bnxt_rx_ring_info *rx_ring_info = rxq ? rxq->rx_ring : NULL; 107 struct bnxt_tx_ring_info *tx_ring_info = txq ? txq->tx_ring : NULL; 108 uint64_t rx_offloads = bp->eth_dev->data->dev_conf.rxmode.offloads; 109 int ag_ring_start, ag_bitmap_start, tpa_info_start; 110 int ag_vmem_start, cp_ring_start, nq_ring_start; 111 int total_alloc_len, rx_ring_start, rx_ring_len; 112 struct rte_pci_device *pdev = bp->pdev; 113 struct bnxt_ring *tx_ring, *rx_ring; 114 const struct rte_memzone *mz = NULL; 115 char mz_name[RTE_MEMZONE_NAMESIZE]; 116 rte_iova_t mz_phys_addr; 117 int ag_bitmap_len = 0; 118 int tpa_info_len = 0; 119 int ag_vmem_len = 0; 120 int ag_ring_len = 0; 121 122 int stats_len = (tx_ring_info || rx_ring_info) ? 123 RTE_CACHE_LINE_ROUNDUP(BNXT_HWRM_CTX_GET_SIZE(bp)) : 0; 124 stats_len = RTE_ALIGN(stats_len, 128); 125 126 int cp_vmem_start = stats_len; 127 int cp_vmem_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->vmem_size); 128 cp_vmem_len = RTE_ALIGN(cp_vmem_len, 128); 129 130 int nq_vmem_len = nq_ring_info ? 131 RTE_CACHE_LINE_ROUNDUP(cp_ring->vmem_size) : 0; 132 nq_vmem_len = RTE_ALIGN(nq_vmem_len, 128); 133 134 int nq_vmem_start = cp_vmem_start + cp_vmem_len; 135 136 int tx_vmem_start = nq_vmem_start + nq_vmem_len; 137 int tx_vmem_len = 138 tx_ring_info ? RTE_CACHE_LINE_ROUNDUP(tx_ring_info-> 139 tx_ring_struct->vmem_size) : 0; 140 tx_vmem_len = RTE_ALIGN(tx_vmem_len, 128); 141 142 int rx_vmem_start = tx_vmem_start + tx_vmem_len; 143 int rx_vmem_len = rx_ring_info ? 144 RTE_CACHE_LINE_ROUNDUP(rx_ring_info-> 145 rx_ring_struct->vmem_size) : 0; 146 rx_vmem_len = RTE_ALIGN(rx_vmem_len, 128); 147 148 ag_vmem_start = rx_vmem_start + rx_vmem_len; 149 if (bnxt_need_agg_ring(bp->eth_dev)) 150 ag_vmem_len = rx_ring_info && rx_ring_info->ag_ring_struct ? 151 RTE_CACHE_LINE_ROUNDUP(rx_ring_info->ag_ring_struct->vmem_size) : 0; 152 153 cp_ring_start = ag_vmem_start + ag_vmem_len; 154 cp_ring_start = RTE_ALIGN(cp_ring_start, 4096); 155 156 int cp_ring_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->ring_size * 157 sizeof(struct cmpl_base)); 158 cp_ring_len = RTE_ALIGN(cp_ring_len, 128); 159 nq_ring_start = cp_ring_start + cp_ring_len; 160 nq_ring_start = RTE_ALIGN(nq_ring_start, 4096); 161 162 int nq_ring_len = nq_ring_info ? cp_ring_len : 0; 163 164 int tx_ring_start = nq_ring_start + nq_ring_len; 165 tx_ring_start = RTE_ALIGN(tx_ring_start, 4096); 166 int tx_ring_len = tx_ring_info ? 167 RTE_CACHE_LINE_ROUNDUP(tx_ring_info->tx_ring_struct->ring_size * 168 sizeof(struct tx_bd_long)) : 0; 169 tx_ring_len = RTE_ALIGN(tx_ring_len, 4096); 170 171 rx_ring_start = tx_ring_start + tx_ring_len; 172 rx_ring_start = RTE_ALIGN(rx_ring_start, 4096); 173 rx_ring_len = rx_ring_info ? 174 RTE_CACHE_LINE_ROUNDUP(rx_ring_info->rx_ring_struct->ring_size * 175 sizeof(struct rx_prod_pkt_bd)) : 0; 176 rx_ring_len = RTE_ALIGN(rx_ring_len, 4096); 177 178 ag_ring_start = rx_ring_start + rx_ring_len; 179 ag_ring_start = RTE_ALIGN(ag_ring_start, 4096); 180 181 if (bnxt_need_agg_ring(bp->eth_dev)) { 182 ag_ring_len = rx_ring_len * AGG_RING_SIZE_FACTOR; 183 ag_ring_len = RTE_ALIGN(ag_ring_len, 4096); 184 185 ag_bitmap_len = rx_ring_info ? 186 RTE_CACHE_LINE_ROUNDUP(rte_bitmap_get_memory_footprint( 187 rx_ring_info->rx_ring_struct->ring_size * 188 AGG_RING_SIZE_FACTOR)) : 0; 189 190 if (rx_ring_info && (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) { 191 int tpa_max = BNXT_TPA_MAX_AGGS(bp); 192 193 tpa_info_len = tpa_max * sizeof(struct bnxt_tpa_info); 194 tpa_info_len = RTE_CACHE_LINE_ROUNDUP(tpa_info_len); 195 } 196 } 197 198 ag_bitmap_start = ag_ring_start + ag_ring_len; 199 tpa_info_start = ag_bitmap_start + ag_bitmap_len; 200 total_alloc_len = tpa_info_start + tpa_info_len; 201 202 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 203 "bnxt_" PCI_PRI_FMT "-%04x_%s", pdev->addr.domain, 204 pdev->addr.bus, pdev->addr.devid, pdev->addr.function, qidx, 205 suffix); 206 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 207 mz = rte_memzone_lookup(mz_name); 208 if (!mz) { 209 mz = rte_memzone_reserve_aligned(mz_name, total_alloc_len, 210 socket_id, 211 RTE_MEMZONE_2MB | 212 RTE_MEMZONE_SIZE_HINT_ONLY | 213 RTE_MEMZONE_IOVA_CONTIG, 214 getpagesize()); 215 if (mz == NULL) 216 return -ENOMEM; 217 } 218 memset(mz->addr, 0, mz->len); 219 mz_phys_addr = mz->iova; 220 221 if (tx_ring_info) { 222 txq->mz = mz; 223 tx_ring = tx_ring_info->tx_ring_struct; 224 225 tx_ring->bd = ((char *)mz->addr + tx_ring_start); 226 tx_ring_info->tx_desc_ring = (struct tx_bd_long *)tx_ring->bd; 227 tx_ring->bd_dma = mz_phys_addr + tx_ring_start; 228 tx_ring_info->tx_desc_mapping = tx_ring->bd_dma; 229 tx_ring->mem_zone = (const void *)mz; 230 tx_ring_info->nr_bds = rte_zmalloc("bnxt_nr_bds", 231 sizeof(unsigned short) * 232 tx_ring->ring_size, 0); 233 234 if (!tx_ring->bd) 235 return -ENOMEM; 236 if (tx_ring->vmem_size) { 237 tx_ring->vmem = 238 (void **)((char *)mz->addr + tx_vmem_start); 239 tx_ring_info->tx_buf_ring = 240 (struct rte_mbuf **)tx_ring->vmem; 241 } 242 } 243 244 if (rx_ring_info) { 245 rxq->mz = mz; 246 rx_ring = rx_ring_info->rx_ring_struct; 247 248 rx_ring->bd = ((char *)mz->addr + rx_ring_start); 249 rx_ring_info->rx_desc_ring = 250 (struct rx_prod_pkt_bd *)rx_ring->bd; 251 rx_ring->bd_dma = mz_phys_addr + rx_ring_start; 252 rx_ring_info->rx_desc_mapping = rx_ring->bd_dma; 253 rx_ring->mem_zone = (const void *)mz; 254 255 if (!rx_ring->bd) 256 return -ENOMEM; 257 if (rx_ring->vmem_size) { 258 rx_ring->vmem = 259 (void **)((char *)mz->addr + rx_vmem_start); 260 rx_ring_info->rx_buf_ring = 261 (struct rte_mbuf **)rx_ring->vmem; 262 } 263 264 if (bnxt_need_agg_ring(bp->eth_dev)) { 265 rx_ring = rx_ring_info->ag_ring_struct; 266 267 rx_ring->bd = ((char *)mz->addr + ag_ring_start); 268 rx_ring_info->ag_desc_ring = 269 (struct rx_prod_pkt_bd *)rx_ring->bd; 270 rx_ring->bd_dma = mz->iova + ag_ring_start; 271 rx_ring_info->ag_desc_mapping = rx_ring->bd_dma; 272 rx_ring->mem_zone = (const void *)mz; 273 274 if (!rx_ring->bd) 275 return -ENOMEM; 276 if (rx_ring->vmem_size) { 277 rx_ring->vmem = 278 (void **)((char *)mz->addr + ag_vmem_start); 279 rx_ring_info->ag_buf_ring = 280 (struct rte_mbuf **)rx_ring->vmem; 281 } 282 283 rx_ring_info->ag_bitmap = 284 rte_bitmap_init(rx_ring_info->rx_ring_struct->ring_size * 285 AGG_RING_SIZE_FACTOR, (uint8_t *)mz->addr + 286 ag_bitmap_start, ag_bitmap_len); 287 288 /* TPA info */ 289 if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) 290 rx_ring_info->tpa_info = 291 ((struct bnxt_tpa_info *) 292 ((char *)mz->addr + tpa_info_start)); 293 } 294 } 295 296 cp_ring->bd = ((char *)mz->addr + cp_ring_start); 297 cp_ring->bd_dma = mz_phys_addr + cp_ring_start; 298 cp_ring_info->cp_desc_ring = cp_ring->bd; 299 cp_ring_info->cp_desc_mapping = cp_ring->bd_dma; 300 cp_ring->mem_zone = (const void *)mz; 301 302 if (!cp_ring->bd) 303 return -ENOMEM; 304 if (cp_ring->vmem_size) 305 *cp_ring->vmem = ((char *)mz->addr + stats_len); 306 if (stats_len) { 307 cp_ring_info->hw_stats = mz->addr; 308 } 309 cp_ring_info->hw_stats_map = mz_phys_addr; 310 311 cp_ring_info->hw_stats_ctx_id = HWRM_NA_SIGNATURE; 312 313 if (nq_ring_info) { 314 struct bnxt_ring *nq_ring = nq_ring_info->cp_ring_struct; 315 316 nq_ring->bd = (char *)mz->addr + nq_ring_start; 317 nq_ring->bd_dma = mz_phys_addr + nq_ring_start; 318 nq_ring_info->cp_desc_ring = nq_ring->bd; 319 nq_ring_info->cp_desc_mapping = nq_ring->bd_dma; 320 nq_ring->mem_zone = (const void *)mz; 321 322 if (!nq_ring->bd) 323 return -ENOMEM; 324 if (nq_ring->vmem_size) 325 *nq_ring->vmem = (char *)mz->addr + nq_vmem_start; 326 327 nq_ring_info->hw_stats_ctx_id = HWRM_NA_SIGNATURE; 328 } 329 330 return 0; 331 } 332 333 void bnxt_init_dflt_coal(struct bnxt_coal *coal) 334 { 335 /* Tick values in micro seconds. 336 * 1 coal_buf x bufs_per_record = 1 completion record. 337 */ 338 coal->num_cmpl_aggr_int = BNXT_NUM_CMPL_AGGR_INT; 339 /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */ 340 coal->num_cmpl_dma_aggr = BNXT_NUM_CMPL_DMA_AGGR; 341 /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */ 342 coal->num_cmpl_dma_aggr_during_int = BNXT_NUM_CMPL_DMA_AGGR_DURING_INT; 343 coal->int_lat_tmr_max = BNXT_INT_LAT_TMR_MAX; 344 /* min timer set to 1/2 of interrupt timer */ 345 coal->int_lat_tmr_min = BNXT_INT_LAT_TMR_MIN; 346 /* buf timer set to 1/4 of interrupt timer */ 347 coal->cmpl_aggr_dma_tmr = BNXT_CMPL_AGGR_DMA_TMR; 348 coal->cmpl_aggr_dma_tmr_during_int = BNXT_CMPL_AGGR_DMA_TMR_DURING_INT; 349 } 350 351 void bnxt_set_db(struct bnxt *bp, 352 struct bnxt_db_info *db, 353 uint32_t ring_type, 354 uint32_t map_idx, 355 uint32_t fid, 356 uint32_t ring_mask) 357 { 358 if (BNXT_CHIP_P5_P7(bp)) { 359 int db_offset = DB_PF_OFFSET; 360 switch (ring_type) { 361 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX: 362 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ; 363 break; 364 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX: 365 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG: 366 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ; 367 break; 368 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL: 369 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_CQ; 370 break; 371 case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ: 372 db->db_key64 = DBR_PATH_L2; 373 break; 374 } 375 if (BNXT_CHIP_P7(bp)) { 376 db->db_key64 |= DBR_VALID; 377 db_offset = bp->legacy_db_size; 378 db->db_epoch_mask = ring_mask + 1; 379 } else if (BNXT_VF(bp)) { 380 db_offset = DB_VF_OFFSET; 381 } 382 383 db->doorbell = (char *)bp->doorbell_base + db_offset; 384 db->db_key64 |= (uint64_t)fid << DBR_XID_SFT; 385 db->db_64 = true; 386 } else { 387 db->doorbell = (char *)bp->doorbell_base + map_idx * 0x80; 388 switch (ring_type) { 389 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX: 390 db->db_key32 = DB_KEY_TX; 391 break; 392 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX: 393 db->db_key32 = DB_KEY_RX; 394 break; 395 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL: 396 db->db_key32 = DB_KEY_CP; 397 break; 398 } 399 db->db_64 = false; 400 } 401 db->db_ring_mask = ring_mask; 402 } 403 404 int bnxt_alloc_cmpl_ring(struct bnxt *bp, int queue_index, 405 struct bnxt_cp_ring_info *cpr) 406 { 407 struct bnxt_ring *cp_ring = cpr->cp_ring_struct; 408 uint32_t nq_ring_id = HWRM_NA_SIGNATURE; 409 int cp_ring_index = queue_index + BNXT_RX_VEC_START; 410 struct bnxt_cp_ring_info *nqr = bp->rxtx_nq_ring; 411 uint8_t ring_type; 412 int rc = 0; 413 414 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL; 415 416 if (BNXT_HAS_NQ(bp)) { 417 if (nqr) { 418 nq_ring_id = nqr->cp_ring_struct->fw_ring_id; 419 } else { 420 PMD_DRV_LOG_LINE(ERR, "NQ ring is NULL"); 421 return -EINVAL; 422 } 423 } 424 425 rc = bnxt_hwrm_ring_alloc(bp, cp_ring, ring_type, cp_ring_index, 426 HWRM_NA_SIGNATURE, nq_ring_id, 0); 427 if (rc) 428 return rc; 429 430 cpr->cp_raw_cons = 0; 431 bnxt_set_db(bp, &cpr->cp_db, ring_type, cp_ring_index, 432 cp_ring->fw_ring_id, cp_ring->ring_mask); 433 bnxt_db_cq(cpr); 434 435 return 0; 436 } 437 438 int bnxt_alloc_rxtx_nq_ring(struct bnxt *bp) 439 { 440 struct bnxt_cp_ring_info *nqr; 441 struct bnxt_ring *ring; 442 int ring_index = BNXT_NUM_ASYNC_CPR(bp); 443 uint8_t ring_type; 444 int rc = 0; 445 446 if (!BNXT_HAS_NQ(bp) || bp->rxtx_nq_ring) 447 return 0; 448 449 nqr = rte_zmalloc_socket("nqr", 450 sizeof(struct bnxt_cp_ring_info), 451 RTE_CACHE_LINE_SIZE, 452 bp->eth_dev->device->numa_node); 453 if (nqr == NULL) 454 return -ENOMEM; 455 456 ring = rte_zmalloc_socket("bnxt_cp_ring_struct", 457 sizeof(struct bnxt_ring), 458 RTE_CACHE_LINE_SIZE, 459 bp->eth_dev->device->numa_node); 460 if (ring == NULL) { 461 rte_free(nqr); 462 return -ENOMEM; 463 } 464 465 ring->bd = (void *)nqr->cp_desc_ring; 466 ring->bd_dma = nqr->cp_desc_mapping; 467 ring->ring_size = rte_align32pow2(DEFAULT_CP_RING_SIZE); 468 ring->ring_mask = ring->ring_size - 1; 469 ring->vmem_size = 0; 470 ring->vmem = NULL; 471 ring->fw_ring_id = INVALID_HW_RING_ID; 472 473 nqr->cp_ring_struct = ring; 474 rc = bnxt_alloc_rings(bp, bp->eth_dev->device->numa_node, 0, NULL, 475 NULL, nqr, NULL, "l2_nqr"); 476 if (rc) { 477 rte_free(ring); 478 rte_free(nqr); 479 return -ENOMEM; 480 } 481 482 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ; 483 484 rc = bnxt_hwrm_ring_alloc(bp, ring, ring_type, ring_index, 485 HWRM_NA_SIGNATURE, HWRM_NA_SIGNATURE, 0); 486 if (rc) { 487 rte_free(ring); 488 rte_free(nqr); 489 return rc; 490 } 491 492 bnxt_set_db(bp, &nqr->cp_db, ring_type, ring_index, 493 ring->fw_ring_id, ring->ring_mask); 494 bnxt_db_nq(nqr); 495 496 bp->rxtx_nq_ring = nqr; 497 498 return 0; 499 } 500 501 /* Free RX/TX NQ ring. */ 502 void bnxt_free_rxtx_nq_ring(struct bnxt *bp) 503 { 504 struct bnxt_cp_ring_info *nqr = bp->rxtx_nq_ring; 505 506 if (!nqr) 507 return; 508 509 bnxt_free_nq_ring(bp, nqr); 510 511 bnxt_free_ring(nqr->cp_ring_struct); 512 rte_free(nqr->cp_ring_struct); 513 nqr->cp_ring_struct = NULL; 514 rte_free(nqr); 515 bp->rxtx_nq_ring = NULL; 516 } 517 518 static int bnxt_alloc_rx_ring(struct bnxt *bp, int queue_index) 519 { 520 struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index]; 521 struct bnxt_cp_ring_info *cpr = rxq->cp_ring; 522 struct bnxt_ring *cp_ring = cpr->cp_ring_struct; 523 struct bnxt_rx_ring_info *rxr = rxq->rx_ring; 524 struct bnxt_ring *ring = rxr->rx_ring_struct; 525 uint8_t ring_type; 526 int rc = 0; 527 528 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_RX; 529 530 rc = bnxt_hwrm_ring_alloc(bp, ring, ring_type, 531 queue_index, cpr->hw_stats_ctx_id, 532 cp_ring->fw_ring_id, 0); 533 if (rc) 534 return rc; 535 536 rxr->rx_raw_prod = 0; 537 if (BNXT_HAS_RING_GRPS(bp)) 538 bp->grp_info[queue_index].rx_fw_ring_id = ring->fw_ring_id; 539 bnxt_set_db(bp, &rxr->rx_db, ring_type, queue_index, ring->fw_ring_id, 540 ring->ring_mask); 541 bnxt_db_write(&rxr->rx_db, rxr->rx_raw_prod); 542 543 return 0; 544 } 545 546 static int bnxt_alloc_rx_agg_ring(struct bnxt *bp, int queue_index) 547 { 548 unsigned int map_idx = queue_index + bp->rx_cp_nr_rings; 549 struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index]; 550 struct bnxt_cp_ring_info *cpr = rxq->cp_ring; 551 struct bnxt_ring *cp_ring = cpr->cp_ring_struct; 552 struct bnxt_rx_ring_info *rxr = rxq->rx_ring; 553 struct bnxt_ring *ring = rxr->ag_ring_struct; 554 uint32_t hw_stats_ctx_id = HWRM_NA_SIGNATURE; 555 uint8_t ring_type; 556 int rc = 0; 557 558 if (!bnxt_need_agg_ring(bp->eth_dev)) 559 return 0; 560 561 ring->fw_rx_ring_id = rxr->rx_ring_struct->fw_ring_id; 562 563 if (BNXT_CHIP_P5_P7(bp)) { 564 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG; 565 hw_stats_ctx_id = cpr->hw_stats_ctx_id; 566 } else { 567 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_RX; 568 } 569 570 rc = bnxt_hwrm_ring_alloc(bp, ring, ring_type, map_idx, 571 hw_stats_ctx_id, cp_ring->fw_ring_id, 0); 572 573 if (rc) 574 return rc; 575 576 rxr->ag_raw_prod = 0; 577 rxr->ag_cons = 0; 578 if (BNXT_HAS_RING_GRPS(bp)) 579 bp->grp_info[queue_index].ag_fw_ring_id = ring->fw_ring_id; 580 bnxt_set_db(bp, &rxr->ag_db, ring_type, map_idx, ring->fw_ring_id, 581 ring->ring_mask); 582 bnxt_db_write(&rxr->ag_db, rxr->ag_raw_prod); 583 584 return 0; 585 } 586 587 int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index) 588 { 589 struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index]; 590 struct bnxt_cp_ring_info *cpr = rxq->cp_ring; 591 struct bnxt_ring *cp_ring = cpr->cp_ring_struct; 592 struct bnxt_rx_ring_info *rxr = rxq->rx_ring; 593 struct bnxt_coal coal; 594 int rc; 595 596 /* 597 * Storage for the cp ring is allocated based on worst-case 598 * usage, the actual size to be used by hw is computed here. 599 */ 600 if (bnxt_compressed_rx_cqe_mode_enabled(bp)) { 601 if (bnxt_need_agg_ring(bp->eth_dev)) 602 /* Worst case scenario, needed to accommodate Rx flush 603 * completion during RING_FREE. 604 */ 605 cp_ring->ring_size = rxr->rx_ring_struct->ring_size * 2; 606 else 607 cp_ring->ring_size = rxr->rx_ring_struct->ring_size; 608 } else { 609 cp_ring->ring_size = rxr->rx_ring_struct->ring_size * 2; 610 } 611 612 if (bnxt_need_agg_ring(bp->eth_dev)) 613 cp_ring->ring_size *= AGG_RING_SIZE_FACTOR; 614 615 cp_ring->ring_mask = cp_ring->ring_size - 1; 616 617 rc = bnxt_alloc_cmpl_ring(bp, queue_index, cpr); 618 if (rc) 619 goto err_out; 620 621 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr); 622 if (rc) 623 goto err_out; 624 625 if (BNXT_HAS_RING_GRPS(bp)) { 626 bp->grp_info[queue_index].fw_stats_ctx = cpr->hw_stats_ctx_id; 627 bp->grp_info[queue_index].cp_fw_ring_id = cp_ring->fw_ring_id; 628 } 629 630 bnxt_init_dflt_coal(&coal); 631 bnxt_hwrm_set_ring_coal(bp, &coal, cp_ring->fw_ring_id); 632 633 if (!BNXT_NUM_ASYNC_CPR(bp) && !queue_index) { 634 /* 635 * If a dedicated async event completion ring is not enabled, 636 * use the first completion ring from PF or VF as the default 637 * completion ring for async event handling. 638 */ 639 bp->async_cp_ring = cpr; 640 rc = bnxt_hwrm_set_async_event_cr(bp); 641 if (rc) 642 goto err_out; 643 } 644 645 rc = bnxt_alloc_rx_ring(bp, queue_index); 646 if (rc) 647 goto err_out; 648 649 rc = bnxt_alloc_rx_agg_ring(bp, queue_index); 650 if (rc) 651 goto err_out; 652 653 if (BNXT_HAS_RING_GRPS(bp)) { 654 rc = bnxt_hwrm_ring_grp_alloc(bp, queue_index); 655 if (rc) 656 goto err_out; 657 } 658 659 if (rxq->rx_started) { 660 if (bnxt_init_one_rx_ring(rxq)) { 661 PMD_DRV_LOG_LINE(ERR, 662 "ring%d bnxt_init_one_rx_ring failed!", 663 queue_index); 664 rc = -ENOMEM; 665 goto err_out; 666 } 667 bnxt_db_write(&rxr->rx_db, rxr->rx_raw_prod); 668 if (bnxt_need_agg_ring(bp->eth_dev)) 669 bnxt_db_write(&rxr->ag_db, rxr->ag_raw_prod); 670 } 671 rxq->index = queue_index; 672 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) 673 bnxt_rxq_vec_setup(rxq); 674 #endif 675 676 return 0; 677 678 err_out: 679 PMD_DRV_LOG_LINE(ERR, 680 "Failed to allocate receive queue %d, rc %d.", 681 queue_index, rc); 682 return rc; 683 } 684 685 /* Initialise all rings to -1, its used to free rings later if allocation 686 * of few rings fails. 687 */ 688 static void bnxt_init_all_rings(struct bnxt *bp) 689 { 690 unsigned int i = 0; 691 struct bnxt_rx_queue *rxq; 692 struct bnxt_ring *cp_ring; 693 struct bnxt_ring *ring; 694 struct bnxt_rx_ring_info *rxr; 695 struct bnxt_tx_queue *txq; 696 697 for (i = 0; i < bp->rx_cp_nr_rings; i++) { 698 rxq = bp->rx_queues[i]; 699 /* Rx-compl */ 700 cp_ring = rxq->cp_ring->cp_ring_struct; 701 cp_ring->fw_ring_id = INVALID_HW_RING_ID; 702 /* Rx-Reg */ 703 rxr = rxq->rx_ring; 704 ring = rxr->rx_ring_struct; 705 ring->fw_ring_id = INVALID_HW_RING_ID; 706 /* Rx-AGG */ 707 if (bnxt_need_agg_ring(bp->eth_dev)) { 708 ring = rxr->ag_ring_struct; 709 if (ring != NULL) 710 ring->fw_ring_id = INVALID_HW_RING_ID; 711 } 712 } 713 for (i = 0; i < bp->tx_cp_nr_rings; i++) { 714 txq = bp->tx_queues[i]; 715 /* Tx cmpl */ 716 cp_ring = txq->cp_ring->cp_ring_struct; 717 cp_ring->fw_ring_id = INVALID_HW_RING_ID; 718 /*Tx Ring */ 719 ring = txq->tx_ring->tx_ring_struct; 720 ring->fw_ring_id = INVALID_HW_RING_ID; 721 } 722 } 723 724 /* ring_grp usage: 725 * [0] = default completion ring 726 * [1 -> +rx_cp_nr_rings] = rx_cp, rx rings 727 * [1+rx_cp_nr_rings + 1 -> +tx_cp_nr_rings] = tx_cp, tx rings 728 */ 729 int bnxt_alloc_hwrm_rings(struct bnxt *bp) 730 { 731 struct bnxt_coal coal; 732 unsigned int i; 733 int rc = 0; 734 735 bnxt_init_dflt_coal(&coal); 736 bnxt_init_all_rings(bp); 737 738 for (i = 0; i < bp->rx_cp_nr_rings; i++) { 739 unsigned int soc_id = bp->eth_dev->device->numa_node; 740 struct bnxt_rx_queue *rxq = bp->rx_queues[i]; 741 struct bnxt_rx_ring_info *rxr = rxq->rx_ring; 742 struct bnxt_ring *ring; 743 744 if (bnxt_need_agg_ring(bp->eth_dev)) { 745 ring = rxr->ag_ring_struct; 746 if (ring == NULL) { 747 bnxt_free_rxq_mem(rxq); 748 749 rc = bnxt_init_rx_ring_struct(rxq, soc_id); 750 if (rc) 751 goto err_out; 752 753 rc = bnxt_alloc_rings(bp, soc_id, 754 i, NULL, rxq, 755 rxq->cp_ring, NULL, 756 "rxr"); 757 if (rc) 758 goto err_out; 759 } 760 } 761 762 rc = bnxt_alloc_hwrm_rx_ring(bp, i); 763 if (rc) 764 goto err_out; 765 bnxt_hwrm_set_ring_coal(bp, &coal, 766 rxq->cp_ring->cp_ring_struct->fw_ring_id); 767 } 768 769 /* If something is wrong with Rx ring alloc, skip Tx ring alloc */ 770 for (i = 0; i < bp->tx_cp_nr_rings; i++) { 771 rc = bnxt_alloc_hwrm_tx_ring(bp, i); 772 if (rc) 773 goto err_out; 774 } 775 776 err_out: 777 return rc; 778 } 779 780 /* Allocate dedicated async completion ring. */ 781 int bnxt_alloc_async_cp_ring(struct bnxt *bp) 782 { 783 struct bnxt_cp_ring_info *cpr = bp->async_cp_ring; 784 struct bnxt_ring *cp_ring; 785 uint8_t ring_type; 786 int rc; 787 788 if (BNXT_NUM_ASYNC_CPR(bp) == 0 || cpr == NULL) 789 return 0; 790 791 cp_ring = cpr->cp_ring_struct; 792 793 if (BNXT_HAS_NQ(bp)) 794 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ; 795 else 796 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL; 797 798 rc = bnxt_hwrm_ring_alloc(bp, cp_ring, ring_type, 0, 799 HWRM_NA_SIGNATURE, HWRM_NA_SIGNATURE, 0); 800 801 if (rc) 802 return rc; 803 804 cpr->cp_raw_cons = 0; 805 bnxt_set_db(bp, &cpr->cp_db, ring_type, 0, 806 cp_ring->fw_ring_id, cp_ring->ring_mask); 807 808 if (BNXT_HAS_NQ(bp)) 809 bnxt_db_nq(cpr); 810 else 811 bnxt_db_cq(cpr); 812 813 return bnxt_hwrm_set_async_event_cr(bp); 814 } 815 816 /* Free dedicated async completion ring. */ 817 void bnxt_free_async_cp_ring(struct bnxt *bp) 818 { 819 struct bnxt_cp_ring_info *cpr = bp->async_cp_ring; 820 821 if (BNXT_NUM_ASYNC_CPR(bp) == 0 || cpr == NULL) 822 return; 823 824 if (BNXT_HAS_NQ(bp)) 825 bnxt_free_nq_ring(bp, cpr); 826 else 827 bnxt_free_cp_ring(bp, cpr); 828 829 bnxt_free_ring(cpr->cp_ring_struct); 830 rte_free(cpr->cp_ring_struct); 831 cpr->cp_ring_struct = NULL; 832 rte_free(cpr); 833 bp->async_cp_ring = NULL; 834 } 835 836 int bnxt_alloc_async_ring_struct(struct bnxt *bp) 837 { 838 struct bnxt_cp_ring_info *cpr = NULL; 839 struct bnxt_ring *ring = NULL; 840 841 if (BNXT_NUM_ASYNC_CPR(bp) == 0) 842 return 0; 843 844 cpr = rte_zmalloc_socket("cpr", 845 sizeof(struct bnxt_cp_ring_info), 846 RTE_CACHE_LINE_SIZE, 847 bp->eth_dev->device->numa_node); 848 if (cpr == NULL) 849 return -ENOMEM; 850 851 ring = rte_zmalloc_socket("bnxt_cp_ring_struct", 852 sizeof(struct bnxt_ring), 853 RTE_CACHE_LINE_SIZE, 854 bp->eth_dev->device->numa_node); 855 if (ring == NULL) { 856 rte_free(cpr); 857 return -ENOMEM; 858 } 859 860 ring->bd = (void *)cpr->cp_desc_ring; 861 ring->bd_dma = cpr->cp_desc_mapping; 862 ring->ring_size = rte_align32pow2(DEFAULT_CP_RING_SIZE); 863 ring->ring_mask = ring->ring_size - 1; 864 ring->vmem_size = 0; 865 ring->vmem = NULL; 866 ring->fw_ring_id = INVALID_HW_RING_ID; 867 868 bp->async_cp_ring = cpr; 869 cpr->cp_ring_struct = ring; 870 871 return bnxt_alloc_rings(bp, bp->eth_dev->device->numa_node, 0, NULL, 872 NULL, bp->async_cp_ring, NULL, "def_cp"); 873 } 874 875 int bnxt_alloc_hwrm_tx_ring(struct bnxt *bp, int queue_index) 876 { 877 struct bnxt_tx_queue *txq = bp->tx_queues[queue_index]; 878 struct bnxt_cp_ring_info *cpr = txq->cp_ring; 879 struct bnxt_ring *cp_ring = cpr->cp_ring_struct; 880 struct bnxt_tx_ring_info *txr = txq->tx_ring; 881 struct bnxt_ring *ring = txr->tx_ring_struct; 882 unsigned int idx = queue_index + bp->rx_cp_nr_rings; 883 uint16_t tx_cosq_id = 0; 884 struct bnxt_coal coal; 885 int rc = 0; 886 887 rc = bnxt_alloc_cmpl_ring(bp, idx, cpr); 888 if (rc) 889 goto err_out; 890 891 bnxt_init_dflt_coal(&coal); 892 bnxt_hwrm_set_ring_coal(bp, &coal, cp_ring->fw_ring_id); 893 894 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr); 895 if (rc) 896 goto err_out; 897 898 if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY) 899 tx_cosq_id = bp->tx_cosq_id[queue_index < bp->max_lltc ? queue_index : 0]; 900 else 901 tx_cosq_id = bp->tx_cosq_id[0]; 902 903 rc = bnxt_hwrm_ring_alloc(bp, ring, 904 HWRM_RING_ALLOC_INPUT_RING_TYPE_TX, 905 queue_index, cpr->hw_stats_ctx_id, 906 cp_ring->fw_ring_id, 907 tx_cosq_id); 908 if (rc) 909 goto err_out; 910 911 bnxt_set_db(bp, &txr->tx_db, HWRM_RING_ALLOC_INPUT_RING_TYPE_TX, 912 queue_index, ring->fw_ring_id, 913 ring->ring_mask); 914 txq->index = idx; 915 916 return rc; 917 err_out: 918 bnxt_free_hwrm_tx_ring(bp, queue_index); 919 return rc; 920 } 921