1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2014-2021 Broadcom 3 * All rights reserved. 4 */ 5 6 #include <rte_bitmap.h> 7 #include <rte_memzone.h> 8 #include <rte_malloc.h> 9 #include <unistd.h> 10 11 #include "bnxt.h" 12 #include "bnxt_hwrm.h" 13 #include "bnxt_ring.h" 14 #include "bnxt_rxq.h" 15 #include "bnxt_rxr.h" 16 #include "bnxt_txq.h" 17 #include "bnxt_txr.h" 18 19 #include "hsi_struct_def_dpdk.h" 20 21 /* 22 * Generic ring handling 23 */ 24 25 void bnxt_free_ring(struct bnxt_ring *ring) 26 { 27 if (!ring) 28 return; 29 30 if (ring->vmem_size && *ring->vmem) { 31 memset((char *)*ring->vmem, 0, ring->vmem_size); 32 *ring->vmem = NULL; 33 } 34 ring->mem_zone = NULL; 35 } 36 37 /* 38 * Ring groups 39 */ 40 41 static void bnxt_init_ring_grps(struct bnxt *bp) 42 { 43 unsigned int i; 44 45 for (i = 0; i < bp->max_ring_grps; i++) 46 memset(&bp->grp_info[i], (uint8_t)HWRM_NA_SIGNATURE, 47 sizeof(struct bnxt_ring_grp_info)); 48 } 49 50 int bnxt_alloc_ring_grps(struct bnxt *bp) 51 { 52 if (bp->max_tx_rings == 0) { 53 PMD_DRV_LOG(ERR, "No TX rings available!\n"); 54 return -EBUSY; 55 } 56 57 /* THOR does not support ring groups. 58 * But we will use the array to save RSS context IDs. 59 */ 60 if (BNXT_CHIP_P5(bp)) { 61 bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_P5; 62 } else if (bp->max_ring_grps < bp->rx_cp_nr_rings) { 63 /* 1 ring is for default completion ring */ 64 PMD_DRV_LOG(ERR, "Insufficient resource: Ring Group\n"); 65 return -ENOSPC; 66 } 67 68 if (BNXT_HAS_RING_GRPS(bp)) { 69 bp->grp_info = rte_zmalloc("bnxt_grp_info", 70 sizeof(*bp->grp_info) * 71 bp->max_ring_grps, 0); 72 if (!bp->grp_info) { 73 PMD_DRV_LOG(ERR, 74 "Failed to alloc grp info tbl.\n"); 75 return -ENOMEM; 76 } 77 bnxt_init_ring_grps(bp); 78 } 79 80 return 0; 81 } 82 83 /* 84 * Allocates a completion ring with vmem and stats optionally also allocating 85 * a TX and/or RX ring. Passing NULL as tx_ring_info and/or rx_ring_info 86 * to not allocate them. 87 * 88 * Order in the allocation is: 89 * stats - Always non-zero length 90 * cp vmem - Always zero-length, supported for the bnxt_ring abstraction 91 * tx vmem - Only non-zero length if tx_ring_info is not NULL 92 * rx vmem - Only non-zero length if rx_ring_info is not NULL 93 * cp bd ring - Always non-zero length 94 * tx bd ring - Only non-zero length if tx_ring_info is not NULL 95 * rx bd ring - Only non-zero length if rx_ring_info is not NULL 96 */ 97 int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx, 98 struct bnxt_tx_queue *txq, 99 struct bnxt_rx_queue *rxq, 100 struct bnxt_cp_ring_info *cp_ring_info, 101 struct bnxt_cp_ring_info *nq_ring_info, 102 const char *suffix) 103 { 104 struct bnxt_ring *cp_ring = cp_ring_info->cp_ring_struct; 105 struct bnxt_rx_ring_info *rx_ring_info = rxq ? rxq->rx_ring : NULL; 106 struct bnxt_tx_ring_info *tx_ring_info = txq ? txq->tx_ring : NULL; 107 struct bnxt_ring *tx_ring; 108 struct bnxt_ring *rx_ring; 109 struct rte_pci_device *pdev = bp->pdev; 110 uint64_t rx_offloads = bp->eth_dev->data->dev_conf.rxmode.offloads; 111 const struct rte_memzone *mz = NULL; 112 char mz_name[RTE_MEMZONE_NAMESIZE]; 113 rte_iova_t mz_phys_addr; 114 115 int stats_len = (tx_ring_info || rx_ring_info) ? 116 RTE_CACHE_LINE_ROUNDUP(sizeof(struct hwrm_stat_ctx_query_output) - 117 sizeof (struct hwrm_resp_hdr)) : 0; 118 stats_len = RTE_ALIGN(stats_len, 128); 119 120 int cp_vmem_start = stats_len; 121 int cp_vmem_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->vmem_size); 122 cp_vmem_len = RTE_ALIGN(cp_vmem_len, 128); 123 124 int nq_vmem_len = nq_ring_info ? 125 RTE_CACHE_LINE_ROUNDUP(cp_ring->vmem_size) : 0; 126 nq_vmem_len = RTE_ALIGN(nq_vmem_len, 128); 127 128 int nq_vmem_start = cp_vmem_start + cp_vmem_len; 129 130 int tx_vmem_start = nq_vmem_start + nq_vmem_len; 131 int tx_vmem_len = 132 tx_ring_info ? RTE_CACHE_LINE_ROUNDUP(tx_ring_info-> 133 tx_ring_struct->vmem_size) : 0; 134 tx_vmem_len = RTE_ALIGN(tx_vmem_len, 128); 135 136 int rx_vmem_start = tx_vmem_start + tx_vmem_len; 137 int rx_vmem_len = rx_ring_info ? 138 RTE_CACHE_LINE_ROUNDUP(rx_ring_info-> 139 rx_ring_struct->vmem_size) : 0; 140 rx_vmem_len = RTE_ALIGN(rx_vmem_len, 128); 141 int ag_vmem_start = 0; 142 int ag_vmem_len = 0; 143 int cp_ring_start = 0; 144 int nq_ring_start = 0; 145 146 ag_vmem_start = rx_vmem_start + rx_vmem_len; 147 ag_vmem_len = rx_ring_info ? RTE_CACHE_LINE_ROUNDUP( 148 rx_ring_info->ag_ring_struct->vmem_size) : 0; 149 cp_ring_start = ag_vmem_start + ag_vmem_len; 150 cp_ring_start = RTE_ALIGN(cp_ring_start, 4096); 151 152 int cp_ring_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->ring_size * 153 sizeof(struct cmpl_base)); 154 cp_ring_len = RTE_ALIGN(cp_ring_len, 128); 155 nq_ring_start = cp_ring_start + cp_ring_len; 156 nq_ring_start = RTE_ALIGN(nq_ring_start, 4096); 157 158 int nq_ring_len = nq_ring_info ? cp_ring_len : 0; 159 160 int tx_ring_start = nq_ring_start + nq_ring_len; 161 tx_ring_start = RTE_ALIGN(tx_ring_start, 4096); 162 int tx_ring_len = tx_ring_info ? 163 RTE_CACHE_LINE_ROUNDUP(tx_ring_info->tx_ring_struct->ring_size * 164 sizeof(struct tx_bd_long)) : 0; 165 tx_ring_len = RTE_ALIGN(tx_ring_len, 4096); 166 167 int rx_ring_start = tx_ring_start + tx_ring_len; 168 rx_ring_start = RTE_ALIGN(rx_ring_start, 4096); 169 int rx_ring_len = rx_ring_info ? 170 RTE_CACHE_LINE_ROUNDUP(rx_ring_info->rx_ring_struct->ring_size * 171 sizeof(struct rx_prod_pkt_bd)) : 0; 172 rx_ring_len = RTE_ALIGN(rx_ring_len, 4096); 173 174 int ag_ring_start = rx_ring_start + rx_ring_len; 175 ag_ring_start = RTE_ALIGN(ag_ring_start, 4096); 176 int ag_ring_len = rx_ring_len * AGG_RING_SIZE_FACTOR; 177 ag_ring_len = RTE_ALIGN(ag_ring_len, 4096); 178 179 int ag_bitmap_start = ag_ring_start + ag_ring_len; 180 int ag_bitmap_len = rx_ring_info ? 181 RTE_CACHE_LINE_ROUNDUP(rte_bitmap_get_memory_footprint( 182 rx_ring_info->rx_ring_struct->ring_size * 183 AGG_RING_SIZE_FACTOR)) : 0; 184 185 int tpa_info_start = ag_bitmap_start + ag_bitmap_len; 186 int tpa_info_len = 0; 187 188 if (rx_ring_info && (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)) { 189 int tpa_max = BNXT_TPA_MAX_AGGS(bp); 190 191 tpa_info_len = tpa_max * sizeof(struct bnxt_tpa_info); 192 tpa_info_len = RTE_CACHE_LINE_ROUNDUP(tpa_info_len); 193 } 194 195 int total_alloc_len = tpa_info_start; 196 total_alloc_len += tpa_info_len; 197 198 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 199 "bnxt_" PCI_PRI_FMT "-%04x_%s", pdev->addr.domain, 200 pdev->addr.bus, pdev->addr.devid, pdev->addr.function, qidx, 201 suffix); 202 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 203 mz = rte_memzone_lookup(mz_name); 204 if (!mz) { 205 mz = rte_memzone_reserve_aligned(mz_name, total_alloc_len, 206 SOCKET_ID_ANY, 207 RTE_MEMZONE_2MB | 208 RTE_MEMZONE_SIZE_HINT_ONLY | 209 RTE_MEMZONE_IOVA_CONTIG, 210 getpagesize()); 211 if (mz == NULL) 212 return -ENOMEM; 213 } 214 memset(mz->addr, 0, mz->len); 215 mz_phys_addr = mz->iova; 216 217 if (tx_ring_info) { 218 txq->mz = mz; 219 tx_ring = tx_ring_info->tx_ring_struct; 220 221 tx_ring->bd = ((char *)mz->addr + tx_ring_start); 222 tx_ring_info->tx_desc_ring = (struct tx_bd_long *)tx_ring->bd; 223 tx_ring->bd_dma = mz_phys_addr + tx_ring_start; 224 tx_ring_info->tx_desc_mapping = tx_ring->bd_dma; 225 tx_ring->mem_zone = (const void *)mz; 226 227 if (!tx_ring->bd) 228 return -ENOMEM; 229 if (tx_ring->vmem_size) { 230 tx_ring->vmem = 231 (void **)((char *)mz->addr + tx_vmem_start); 232 tx_ring_info->tx_buf_ring = 233 (struct rte_mbuf **)tx_ring->vmem; 234 } 235 } 236 237 if (rx_ring_info) { 238 rxq->mz = mz; 239 rx_ring = rx_ring_info->rx_ring_struct; 240 241 rx_ring->bd = ((char *)mz->addr + rx_ring_start); 242 rx_ring_info->rx_desc_ring = 243 (struct rx_prod_pkt_bd *)rx_ring->bd; 244 rx_ring->bd_dma = mz_phys_addr + rx_ring_start; 245 rx_ring_info->rx_desc_mapping = rx_ring->bd_dma; 246 rx_ring->mem_zone = (const void *)mz; 247 248 if (!rx_ring->bd) 249 return -ENOMEM; 250 if (rx_ring->vmem_size) { 251 rx_ring->vmem = 252 (void **)((char *)mz->addr + rx_vmem_start); 253 rx_ring_info->rx_buf_ring = 254 (struct rte_mbuf **)rx_ring->vmem; 255 } 256 257 rx_ring = rx_ring_info->ag_ring_struct; 258 259 rx_ring->bd = ((char *)mz->addr + ag_ring_start); 260 rx_ring_info->ag_desc_ring = 261 (struct rx_prod_pkt_bd *)rx_ring->bd; 262 rx_ring->bd_dma = mz->iova + ag_ring_start; 263 rx_ring_info->ag_desc_mapping = rx_ring->bd_dma; 264 rx_ring->mem_zone = (const void *)mz; 265 266 if (!rx_ring->bd) 267 return -ENOMEM; 268 if (rx_ring->vmem_size) { 269 rx_ring->vmem = 270 (void **)((char *)mz->addr + ag_vmem_start); 271 rx_ring_info->ag_buf_ring = 272 (struct rte_mbuf **)rx_ring->vmem; 273 } 274 275 rx_ring_info->ag_bitmap = 276 rte_bitmap_init(rx_ring_info->rx_ring_struct->ring_size * 277 AGG_RING_SIZE_FACTOR, (uint8_t *)mz->addr + 278 ag_bitmap_start, ag_bitmap_len); 279 280 /* TPA info */ 281 if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) 282 rx_ring_info->tpa_info = 283 ((struct bnxt_tpa_info *)((char *)mz->addr + 284 tpa_info_start)); 285 } 286 287 cp_ring->bd = ((char *)mz->addr + cp_ring_start); 288 cp_ring->bd_dma = mz_phys_addr + cp_ring_start; 289 cp_ring_info->cp_desc_ring = cp_ring->bd; 290 cp_ring_info->cp_desc_mapping = cp_ring->bd_dma; 291 cp_ring->mem_zone = (const void *)mz; 292 293 if (!cp_ring->bd) 294 return -ENOMEM; 295 if (cp_ring->vmem_size) 296 *cp_ring->vmem = ((char *)mz->addr + stats_len); 297 if (stats_len) { 298 cp_ring_info->hw_stats = mz->addr; 299 cp_ring_info->hw_stats_map = mz_phys_addr; 300 } 301 cp_ring_info->hw_stats_ctx_id = HWRM_NA_SIGNATURE; 302 303 if (nq_ring_info) { 304 struct bnxt_ring *nq_ring = nq_ring_info->cp_ring_struct; 305 306 nq_ring->bd = (char *)mz->addr + nq_ring_start; 307 nq_ring->bd_dma = mz_phys_addr + nq_ring_start; 308 nq_ring_info->cp_desc_ring = nq_ring->bd; 309 nq_ring_info->cp_desc_mapping = nq_ring->bd_dma; 310 nq_ring->mem_zone = (const void *)mz; 311 312 if (!nq_ring->bd) 313 return -ENOMEM; 314 if (nq_ring->vmem_size) 315 *nq_ring->vmem = (char *)mz->addr + nq_vmem_start; 316 317 nq_ring_info->hw_stats_ctx_id = HWRM_NA_SIGNATURE; 318 } 319 320 return 0; 321 } 322 323 static void bnxt_init_dflt_coal(struct bnxt_coal *coal) 324 { 325 /* Tick values in micro seconds. 326 * 1 coal_buf x bufs_per_record = 1 completion record. 327 */ 328 coal->num_cmpl_aggr_int = BNXT_NUM_CMPL_AGGR_INT; 329 /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */ 330 coal->num_cmpl_dma_aggr = BNXT_NUM_CMPL_DMA_AGGR; 331 /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */ 332 coal->num_cmpl_dma_aggr_during_int = BNXT_NUM_CMPL_DMA_AGGR_DURING_INT; 333 coal->int_lat_tmr_max = BNXT_INT_LAT_TMR_MAX; 334 /* min timer set to 1/2 of interrupt timer */ 335 coal->int_lat_tmr_min = BNXT_INT_LAT_TMR_MIN; 336 /* buf timer set to 1/4 of interrupt timer */ 337 coal->cmpl_aggr_dma_tmr = BNXT_CMPL_AGGR_DMA_TMR; 338 coal->cmpl_aggr_dma_tmr_during_int = BNXT_CMPL_AGGR_DMA_TMR_DURING_INT; 339 } 340 341 static void bnxt_set_db(struct bnxt *bp, 342 struct bnxt_db_info *db, 343 uint32_t ring_type, 344 uint32_t map_idx, 345 uint32_t fid, 346 uint32_t ring_mask) 347 { 348 if (BNXT_CHIP_P5(bp)) { 349 int db_offset = DB_PF_OFFSET; 350 switch (ring_type) { 351 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX: 352 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ; 353 break; 354 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX: 355 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG: 356 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ; 357 break; 358 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL: 359 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_CQ; 360 break; 361 case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ: 362 db->db_key64 = DBR_PATH_L2; 363 break; 364 } 365 if (BNXT_CHIP_SR2(bp)) { 366 db->db_key64 |= DBR_VALID; 367 db_offset = bp->legacy_db_size; 368 } else if (BNXT_VF(bp)) { 369 db_offset = DB_VF_OFFSET; 370 } 371 372 db->doorbell = (char *)bp->doorbell_base + db_offset; 373 db->db_key64 |= (uint64_t)fid << DBR_XID_SFT; 374 db->db_64 = true; 375 } else { 376 db->doorbell = (char *)bp->doorbell_base + map_idx * 0x80; 377 switch (ring_type) { 378 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX: 379 db->db_key32 = DB_KEY_TX; 380 break; 381 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX: 382 db->db_key32 = DB_KEY_RX; 383 break; 384 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL: 385 db->db_key32 = DB_KEY_CP; 386 break; 387 } 388 db->db_64 = false; 389 } 390 db->db_ring_mask = ring_mask; 391 392 if (BNXT_CHIP_SR2(bp)) { 393 db->db_epoch_mask = db->db_ring_mask + 1; 394 db->db_epoch_shift = DBR_EPOCH_SFT - 395 rte_log2_u32(db->db_epoch_mask); 396 } 397 } 398 399 static int bnxt_alloc_cmpl_ring(struct bnxt *bp, int queue_index, 400 struct bnxt_cp_ring_info *cpr) 401 { 402 struct bnxt_ring *cp_ring = cpr->cp_ring_struct; 403 uint32_t nq_ring_id = HWRM_NA_SIGNATURE; 404 int cp_ring_index = queue_index + BNXT_RX_VEC_START; 405 struct bnxt_cp_ring_info *nqr = bp->rxtx_nq_ring; 406 uint8_t ring_type; 407 int rc = 0; 408 409 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL; 410 411 if (BNXT_HAS_NQ(bp)) { 412 if (nqr) { 413 nq_ring_id = nqr->cp_ring_struct->fw_ring_id; 414 } else { 415 PMD_DRV_LOG(ERR, "NQ ring is NULL\n"); 416 return -EINVAL; 417 } 418 } 419 420 rc = bnxt_hwrm_ring_alloc(bp, cp_ring, ring_type, cp_ring_index, 421 HWRM_NA_SIGNATURE, nq_ring_id, 0); 422 if (rc) 423 return rc; 424 425 cpr->cp_raw_cons = 0; 426 bnxt_set_db(bp, &cpr->cp_db, ring_type, cp_ring_index, 427 cp_ring->fw_ring_id, cp_ring->ring_mask); 428 bnxt_db_cq(cpr); 429 430 return 0; 431 } 432 433 int bnxt_alloc_rxtx_nq_ring(struct bnxt *bp) 434 { 435 struct bnxt_cp_ring_info *nqr; 436 struct bnxt_ring *ring; 437 int ring_index = BNXT_NUM_ASYNC_CPR(bp); 438 unsigned int socket_id; 439 uint8_t ring_type; 440 int rc = 0; 441 442 if (!BNXT_HAS_NQ(bp) || bp->rxtx_nq_ring) 443 return 0; 444 445 socket_id = rte_lcore_to_socket_id(rte_get_main_lcore()); 446 447 nqr = rte_zmalloc_socket("nqr", 448 sizeof(struct bnxt_cp_ring_info), 449 RTE_CACHE_LINE_SIZE, socket_id); 450 if (nqr == NULL) 451 return -ENOMEM; 452 453 ring = rte_zmalloc_socket("bnxt_cp_ring_struct", 454 sizeof(struct bnxt_ring), 455 RTE_CACHE_LINE_SIZE, socket_id); 456 if (ring == NULL) { 457 rte_free(nqr); 458 return -ENOMEM; 459 } 460 461 ring->bd = (void *)nqr->cp_desc_ring; 462 ring->bd_dma = nqr->cp_desc_mapping; 463 ring->ring_size = rte_align32pow2(DEFAULT_CP_RING_SIZE); 464 ring->ring_mask = ring->ring_size - 1; 465 ring->vmem_size = 0; 466 ring->vmem = NULL; 467 ring->fw_ring_id = INVALID_HW_RING_ID; 468 469 nqr->cp_ring_struct = ring; 470 rc = bnxt_alloc_rings(bp, 0, NULL, NULL, nqr, NULL, "l2_nqr"); 471 if (rc) { 472 rte_free(ring); 473 rte_free(nqr); 474 return -ENOMEM; 475 } 476 477 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ; 478 479 rc = bnxt_hwrm_ring_alloc(bp, ring, ring_type, ring_index, 480 HWRM_NA_SIGNATURE, HWRM_NA_SIGNATURE, 0); 481 if (rc) { 482 rte_free(ring); 483 rte_free(nqr); 484 return rc; 485 } 486 487 bnxt_set_db(bp, &nqr->cp_db, ring_type, ring_index, 488 ring->fw_ring_id, ring->ring_mask); 489 bnxt_db_nq(nqr); 490 491 bp->rxtx_nq_ring = nqr; 492 493 return 0; 494 } 495 496 /* Free RX/TX NQ ring. */ 497 void bnxt_free_rxtx_nq_ring(struct bnxt *bp) 498 { 499 struct bnxt_cp_ring_info *nqr = bp->rxtx_nq_ring; 500 501 if (!nqr) 502 return; 503 504 bnxt_free_nq_ring(bp, nqr); 505 506 bnxt_free_ring(nqr->cp_ring_struct); 507 rte_free(nqr->cp_ring_struct); 508 nqr->cp_ring_struct = NULL; 509 rte_free(nqr); 510 bp->rxtx_nq_ring = NULL; 511 } 512 513 static int bnxt_alloc_rx_ring(struct bnxt *bp, int queue_index) 514 { 515 struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index]; 516 struct bnxt_cp_ring_info *cpr = rxq->cp_ring; 517 struct bnxt_ring *cp_ring = cpr->cp_ring_struct; 518 struct bnxt_rx_ring_info *rxr = rxq->rx_ring; 519 struct bnxt_ring *ring = rxr->rx_ring_struct; 520 uint8_t ring_type; 521 int rc = 0; 522 523 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_RX; 524 525 rc = bnxt_hwrm_ring_alloc(bp, ring, ring_type, 526 queue_index, cpr->hw_stats_ctx_id, 527 cp_ring->fw_ring_id, 0); 528 if (rc) 529 return rc; 530 531 rxr->rx_raw_prod = 0; 532 if (BNXT_HAS_RING_GRPS(bp)) 533 bp->grp_info[queue_index].rx_fw_ring_id = ring->fw_ring_id; 534 bnxt_set_db(bp, &rxr->rx_db, ring_type, queue_index, ring->fw_ring_id, 535 ring->ring_mask); 536 bnxt_db_write(&rxr->rx_db, rxr->rx_raw_prod); 537 538 return 0; 539 } 540 541 static int bnxt_alloc_rx_agg_ring(struct bnxt *bp, int queue_index) 542 { 543 unsigned int map_idx = queue_index + bp->rx_cp_nr_rings; 544 struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index]; 545 struct bnxt_cp_ring_info *cpr = rxq->cp_ring; 546 struct bnxt_ring *cp_ring = cpr->cp_ring_struct; 547 struct bnxt_rx_ring_info *rxr = rxq->rx_ring; 548 struct bnxt_ring *ring = rxr->ag_ring_struct; 549 uint32_t hw_stats_ctx_id = HWRM_NA_SIGNATURE; 550 uint8_t ring_type; 551 int rc = 0; 552 553 ring->fw_rx_ring_id = rxr->rx_ring_struct->fw_ring_id; 554 555 if (BNXT_CHIP_P5(bp)) { 556 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG; 557 hw_stats_ctx_id = cpr->hw_stats_ctx_id; 558 } else { 559 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_RX; 560 } 561 562 rc = bnxt_hwrm_ring_alloc(bp, ring, ring_type, map_idx, 563 hw_stats_ctx_id, cp_ring->fw_ring_id, 0); 564 565 if (rc) 566 return rc; 567 568 rxr->ag_raw_prod = 0; 569 if (BNXT_HAS_RING_GRPS(bp)) 570 bp->grp_info[queue_index].ag_fw_ring_id = ring->fw_ring_id; 571 bnxt_set_db(bp, &rxr->ag_db, ring_type, map_idx, ring->fw_ring_id, 572 ring->ring_mask); 573 bnxt_db_write(&rxr->ag_db, rxr->ag_raw_prod); 574 575 return 0; 576 } 577 578 int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index) 579 { 580 struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index]; 581 struct bnxt_cp_ring_info *cpr = rxq->cp_ring; 582 struct bnxt_ring *cp_ring = cpr->cp_ring_struct; 583 struct bnxt_rx_ring_info *rxr = rxq->rx_ring; 584 struct bnxt_coal coal; 585 int rc; 586 587 /* 588 * Storage for the cp ring is allocated based on worst-case 589 * usage, the actual size to be used by hw is computed here. 590 */ 591 cp_ring->ring_size = rxr->rx_ring_struct->ring_size * 2; 592 593 if (bp->eth_dev->data->scattered_rx) 594 cp_ring->ring_size *= AGG_RING_SIZE_FACTOR; 595 596 cp_ring->ring_mask = cp_ring->ring_size - 1; 597 598 rc = bnxt_alloc_cmpl_ring(bp, queue_index, cpr); 599 if (rc) 600 goto err_out; 601 602 if (BNXT_HAS_RING_GRPS(bp)) { 603 bp->grp_info[queue_index].fw_stats_ctx = cpr->hw_stats_ctx_id; 604 bp->grp_info[queue_index].cp_fw_ring_id = cp_ring->fw_ring_id; 605 } 606 607 bnxt_init_dflt_coal(&coal); 608 bnxt_hwrm_set_ring_coal(bp, &coal, cp_ring->fw_ring_id); 609 610 if (!BNXT_NUM_ASYNC_CPR(bp) && !queue_index) { 611 /* 612 * If a dedicated async event completion ring is not enabled, 613 * use the first completion ring from PF or VF as the default 614 * completion ring for async event handling. 615 */ 616 bp->async_cp_ring = cpr; 617 rc = bnxt_hwrm_set_async_event_cr(bp); 618 if (rc) 619 goto err_out; 620 } 621 622 rc = bnxt_alloc_rx_ring(bp, queue_index); 623 if (rc) 624 goto err_out; 625 626 rc = bnxt_alloc_rx_agg_ring(bp, queue_index); 627 if (rc) 628 goto err_out; 629 630 if (rxq->rx_started) { 631 if (bnxt_init_one_rx_ring(rxq)) { 632 PMD_DRV_LOG(ERR, "bnxt_init_one_rx_ring failed!\n"); 633 bnxt_rx_queue_release_op(rxq); 634 rc = -ENOMEM; 635 goto err_out; 636 } 637 bnxt_db_write(&rxr->rx_db, rxr->rx_raw_prod); 638 bnxt_db_write(&rxr->ag_db, rxr->ag_raw_prod); 639 } 640 rxq->index = queue_index; 641 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) 642 bnxt_rxq_vec_setup(rxq); 643 #endif 644 645 return 0; 646 647 err_out: 648 PMD_DRV_LOG(ERR, 649 "Failed to allocate receive queue %d, rc %d.\n", 650 queue_index, rc); 651 return rc; 652 } 653 654 /* Initialise all rings to -1, its used to free rings later if allocation 655 * of few rings fails. 656 */ 657 static void bnxt_init_all_rings(struct bnxt *bp) 658 { 659 unsigned int i = 0; 660 struct bnxt_rx_queue *rxq; 661 struct bnxt_ring *cp_ring; 662 struct bnxt_ring *ring; 663 struct bnxt_rx_ring_info *rxr; 664 struct bnxt_tx_queue *txq; 665 666 for (i = 0; i < bp->rx_cp_nr_rings; i++) { 667 rxq = bp->rx_queues[i]; 668 /* Rx-compl */ 669 cp_ring = rxq->cp_ring->cp_ring_struct; 670 cp_ring->fw_ring_id = INVALID_HW_RING_ID; 671 /* Rx-Reg */ 672 rxr = rxq->rx_ring; 673 ring = rxr->rx_ring_struct; 674 ring->fw_ring_id = INVALID_HW_RING_ID; 675 /* Rx-AGG */ 676 ring = rxr->ag_ring_struct; 677 ring->fw_ring_id = INVALID_HW_RING_ID; 678 } 679 for (i = 0; i < bp->tx_cp_nr_rings; i++) { 680 txq = bp->tx_queues[i]; 681 /* Tx cmpl */ 682 cp_ring = txq->cp_ring->cp_ring_struct; 683 cp_ring->fw_ring_id = INVALID_HW_RING_ID; 684 /*Tx Ring */ 685 ring = txq->tx_ring->tx_ring_struct; 686 ring->fw_ring_id = INVALID_HW_RING_ID; 687 } 688 } 689 690 /* ring_grp usage: 691 * [0] = default completion ring 692 * [1 -> +rx_cp_nr_rings] = rx_cp, rx rings 693 * [1+rx_cp_nr_rings + 1 -> +tx_cp_nr_rings] = tx_cp, tx rings 694 */ 695 int bnxt_alloc_hwrm_rings(struct bnxt *bp) 696 { 697 struct bnxt_coal coal; 698 unsigned int i; 699 uint8_t ring_type; 700 int rc = 0; 701 702 bnxt_init_dflt_coal(&coal); 703 bnxt_init_all_rings(bp); 704 705 for (i = 0; i < bp->rx_cp_nr_rings; i++) { 706 rc = bnxt_alloc_hwrm_rx_ring(bp, i); 707 if (rc) 708 goto err_out; 709 } 710 711 for (i = 0; i < bp->tx_cp_nr_rings; i++) { 712 struct bnxt_tx_queue *txq = bp->tx_queues[i]; 713 struct bnxt_cp_ring_info *cpr = txq->cp_ring; 714 struct bnxt_ring *cp_ring = cpr->cp_ring_struct; 715 struct bnxt_tx_ring_info *txr = txq->tx_ring; 716 struct bnxt_ring *ring = txr->tx_ring_struct; 717 unsigned int idx = i + bp->rx_cp_nr_rings; 718 uint16_t tx_cosq_id = 0; 719 720 if (bnxt_alloc_cmpl_ring(bp, idx, cpr)) 721 goto err_out; 722 723 if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY) 724 tx_cosq_id = bp->tx_cosq_id[i < bp->max_lltc ? i : 0]; 725 else 726 tx_cosq_id = bp->tx_cosq_id[0]; 727 /* Tx ring */ 728 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_TX; 729 rc = bnxt_hwrm_ring_alloc(bp, ring, 730 ring_type, 731 i, cpr->hw_stats_ctx_id, 732 cp_ring->fw_ring_id, 733 tx_cosq_id); 734 if (rc) 735 goto err_out; 736 737 bnxt_set_db(bp, &txr->tx_db, ring_type, i, ring->fw_ring_id, 738 ring->ring_mask); 739 txq->index = idx; 740 bnxt_hwrm_set_ring_coal(bp, &coal, cp_ring->fw_ring_id); 741 } 742 743 err_out: 744 return rc; 745 } 746 747 /* Allocate dedicated async completion ring. */ 748 int bnxt_alloc_async_cp_ring(struct bnxt *bp) 749 { 750 struct bnxt_cp_ring_info *cpr = bp->async_cp_ring; 751 struct bnxt_ring *cp_ring; 752 uint8_t ring_type; 753 int rc; 754 755 if (BNXT_NUM_ASYNC_CPR(bp) == 0 || cpr == NULL) 756 return 0; 757 758 cp_ring = cpr->cp_ring_struct; 759 760 if (BNXT_HAS_NQ(bp)) 761 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ; 762 else 763 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL; 764 765 rc = bnxt_hwrm_ring_alloc(bp, cp_ring, ring_type, 0, 766 HWRM_NA_SIGNATURE, HWRM_NA_SIGNATURE, 0); 767 768 if (rc) 769 return rc; 770 771 cpr->cp_raw_cons = 0; 772 cpr->valid = 0; 773 bnxt_set_db(bp, &cpr->cp_db, ring_type, 0, 774 cp_ring->fw_ring_id, cp_ring->ring_mask); 775 776 if (BNXT_HAS_NQ(bp)) 777 bnxt_db_nq(cpr); 778 else 779 bnxt_db_cq(cpr); 780 781 return bnxt_hwrm_set_async_event_cr(bp); 782 } 783 784 /* Free dedicated async completion ring. */ 785 void bnxt_free_async_cp_ring(struct bnxt *bp) 786 { 787 struct bnxt_cp_ring_info *cpr = bp->async_cp_ring; 788 789 if (BNXT_NUM_ASYNC_CPR(bp) == 0 || cpr == NULL) 790 return; 791 792 if (BNXT_HAS_NQ(bp)) 793 bnxt_free_nq_ring(bp, cpr); 794 else 795 bnxt_free_cp_ring(bp, cpr); 796 797 bnxt_free_ring(cpr->cp_ring_struct); 798 rte_free(cpr->cp_ring_struct); 799 cpr->cp_ring_struct = NULL; 800 rte_free(cpr); 801 bp->async_cp_ring = NULL; 802 } 803 804 int bnxt_alloc_async_ring_struct(struct bnxt *bp) 805 { 806 struct bnxt_cp_ring_info *cpr = NULL; 807 struct bnxt_ring *ring = NULL; 808 unsigned int socket_id; 809 810 if (BNXT_NUM_ASYNC_CPR(bp) == 0) 811 return 0; 812 813 socket_id = rte_lcore_to_socket_id(rte_get_main_lcore()); 814 815 cpr = rte_zmalloc_socket("cpr", 816 sizeof(struct bnxt_cp_ring_info), 817 RTE_CACHE_LINE_SIZE, socket_id); 818 if (cpr == NULL) 819 return -ENOMEM; 820 821 ring = rte_zmalloc_socket("bnxt_cp_ring_struct", 822 sizeof(struct bnxt_ring), 823 RTE_CACHE_LINE_SIZE, socket_id); 824 if (ring == NULL) { 825 rte_free(cpr); 826 return -ENOMEM; 827 } 828 829 ring->bd = (void *)cpr->cp_desc_ring; 830 ring->bd_dma = cpr->cp_desc_mapping; 831 ring->ring_size = rte_align32pow2(DEFAULT_CP_RING_SIZE); 832 ring->ring_mask = ring->ring_size - 1; 833 ring->vmem_size = 0; 834 ring->vmem = NULL; 835 836 bp->async_cp_ring = cpr; 837 cpr->cp_ring_struct = ring; 838 839 return bnxt_alloc_rings(bp, 0, NULL, NULL, 840 bp->async_cp_ring, NULL, 841 "def_cp"); 842 } 843