1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2014-2018 Broadcom 3 * All rights reserved. 4 */ 5 6 #include <rte_bitmap.h> 7 #include <rte_memzone.h> 8 #include <rte_malloc.h> 9 #include <unistd.h> 10 11 #include "bnxt.h" 12 #include "bnxt_cpr.h" 13 #include "bnxt_hwrm.h" 14 #include "bnxt_ring.h" 15 #include "bnxt_rxq.h" 16 #include "bnxt_rxr.h" 17 #include "bnxt_txq.h" 18 #include "bnxt_txr.h" 19 20 #include "hsi_struct_def_dpdk.h" 21 22 /* 23 * Generic ring handling 24 */ 25 26 void bnxt_free_ring(struct bnxt_ring *ring) 27 { 28 if (!ring) 29 return; 30 31 if (ring->vmem_size && *ring->vmem) { 32 memset((char *)*ring->vmem, 0, ring->vmem_size); 33 *ring->vmem = NULL; 34 } 35 ring->mem_zone = NULL; 36 } 37 38 /* 39 * Ring groups 40 */ 41 42 int bnxt_init_ring_grps(struct bnxt *bp) 43 { 44 unsigned int i; 45 46 for (i = 0; i < bp->max_ring_grps; i++) 47 memset(&bp->grp_info[i], (uint8_t)HWRM_NA_SIGNATURE, 48 sizeof(struct bnxt_ring_grp_info)); 49 50 return 0; 51 } 52 53 /* 54 * Allocates a completion ring with vmem and stats optionally also allocating 55 * a TX and/or RX ring. Passing NULL as tx_ring_info and/or rx_ring_info 56 * to not allocate them. 57 * 58 * Order in the allocation is: 59 * stats - Always non-zero length 60 * cp vmem - Always zero-length, supported for the bnxt_ring abstraction 61 * tx vmem - Only non-zero length if tx_ring_info is not NULL 62 * rx vmem - Only non-zero length if rx_ring_info is not NULL 63 * cp bd ring - Always non-zero length 64 * tx bd ring - Only non-zero length if tx_ring_info is not NULL 65 * rx bd ring - Only non-zero length if rx_ring_info is not NULL 66 */ 67 int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx, 68 struct bnxt_tx_queue *txq, 69 struct bnxt_rx_queue *rxq, 70 struct bnxt_cp_ring_info *cp_ring_info, 71 struct bnxt_cp_ring_info *nq_ring_info, 72 const char *suffix) 73 { 74 struct bnxt_ring *cp_ring = cp_ring_info->cp_ring_struct; 75 struct bnxt_rx_ring_info *rx_ring_info = rxq ? rxq->rx_ring : NULL; 76 struct bnxt_tx_ring_info *tx_ring_info = txq ? txq->tx_ring : NULL; 77 struct bnxt_ring *tx_ring; 78 struct bnxt_ring *rx_ring; 79 struct rte_pci_device *pdev = bp->pdev; 80 uint64_t rx_offloads = bp->eth_dev->data->dev_conf.rxmode.offloads; 81 const struct rte_memzone *mz = NULL; 82 char mz_name[RTE_MEMZONE_NAMESIZE]; 83 rte_iova_t mz_phys_addr_base; 84 rte_iova_t mz_phys_addr; 85 int sz; 86 87 int stats_len = (tx_ring_info || rx_ring_info) ? 88 RTE_CACHE_LINE_ROUNDUP(sizeof(struct hwrm_stat_ctx_query_output) - 89 sizeof (struct hwrm_resp_hdr)) : 0; 90 stats_len = RTE_ALIGN(stats_len, 128); 91 92 int cp_vmem_start = stats_len; 93 int cp_vmem_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->vmem_size); 94 cp_vmem_len = RTE_ALIGN(cp_vmem_len, 128); 95 96 int nq_vmem_len = BNXT_CHIP_THOR(bp) ? 97 RTE_CACHE_LINE_ROUNDUP(cp_ring->vmem_size) : 0; 98 nq_vmem_len = RTE_ALIGN(nq_vmem_len, 128); 99 100 int nq_vmem_start = cp_vmem_start + cp_vmem_len; 101 102 int tx_vmem_start = nq_vmem_start + nq_vmem_len; 103 int tx_vmem_len = 104 tx_ring_info ? RTE_CACHE_LINE_ROUNDUP(tx_ring_info-> 105 tx_ring_struct->vmem_size) : 0; 106 tx_vmem_len = RTE_ALIGN(tx_vmem_len, 128); 107 108 int rx_vmem_start = tx_vmem_start + tx_vmem_len; 109 int rx_vmem_len = rx_ring_info ? 110 RTE_CACHE_LINE_ROUNDUP(rx_ring_info-> 111 rx_ring_struct->vmem_size) : 0; 112 rx_vmem_len = RTE_ALIGN(rx_vmem_len, 128); 113 int ag_vmem_start = 0; 114 int ag_vmem_len = 0; 115 int cp_ring_start = 0; 116 int nq_ring_start = 0; 117 118 ag_vmem_start = rx_vmem_start + rx_vmem_len; 119 ag_vmem_len = rx_ring_info ? RTE_CACHE_LINE_ROUNDUP( 120 rx_ring_info->ag_ring_struct->vmem_size) : 0; 121 cp_ring_start = ag_vmem_start + ag_vmem_len; 122 cp_ring_start = RTE_ALIGN(cp_ring_start, 4096); 123 124 int cp_ring_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->ring_size * 125 sizeof(struct cmpl_base)); 126 cp_ring_len = RTE_ALIGN(cp_ring_len, 128); 127 nq_ring_start = cp_ring_start + cp_ring_len; 128 nq_ring_start = RTE_ALIGN(nq_ring_start, 4096); 129 130 int nq_ring_len = BNXT_CHIP_THOR(bp) ? cp_ring_len : 0; 131 132 int tx_ring_start = nq_ring_start + nq_ring_len; 133 int tx_ring_len = tx_ring_info ? 134 RTE_CACHE_LINE_ROUNDUP(tx_ring_info->tx_ring_struct->ring_size * 135 sizeof(struct tx_bd_long)) : 0; 136 tx_ring_len = RTE_ALIGN(tx_ring_len, 4096); 137 138 int rx_ring_start = tx_ring_start + tx_ring_len; 139 int rx_ring_len = rx_ring_info ? 140 RTE_CACHE_LINE_ROUNDUP(rx_ring_info->rx_ring_struct->ring_size * 141 sizeof(struct rx_prod_pkt_bd)) : 0; 142 rx_ring_len = RTE_ALIGN(rx_ring_len, 4096); 143 144 int ag_ring_start = rx_ring_start + rx_ring_len; 145 int ag_ring_len = rx_ring_len * AGG_RING_SIZE_FACTOR; 146 ag_ring_len = RTE_ALIGN(ag_ring_len, 4096); 147 148 int ag_bitmap_start = ag_ring_start + ag_ring_len; 149 int ag_bitmap_len = rx_ring_info ? 150 RTE_CACHE_LINE_ROUNDUP(rte_bitmap_get_memory_footprint( 151 rx_ring_info->rx_ring_struct->ring_size * 152 AGG_RING_SIZE_FACTOR)) : 0; 153 154 int tpa_info_start = ag_bitmap_start + ag_bitmap_len; 155 int tpa_info_len = rx_ring_info ? 156 RTE_CACHE_LINE_ROUNDUP(BNXT_TPA_MAX * 157 sizeof(struct bnxt_tpa_info)) : 0; 158 159 int total_alloc_len = tpa_info_start; 160 if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) 161 total_alloc_len += tpa_info_len; 162 163 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 164 "bnxt_%04x:%02x:%02x:%02x-%04x_%s", pdev->addr.domain, 165 pdev->addr.bus, pdev->addr.devid, pdev->addr.function, qidx, 166 suffix); 167 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 168 mz = rte_memzone_lookup(mz_name); 169 if (!mz) { 170 mz = rte_memzone_reserve_aligned(mz_name, total_alloc_len, 171 SOCKET_ID_ANY, 172 RTE_MEMZONE_2MB | 173 RTE_MEMZONE_SIZE_HINT_ONLY | 174 RTE_MEMZONE_IOVA_CONTIG, 175 getpagesize()); 176 if (mz == NULL) 177 return -ENOMEM; 178 } 179 memset(mz->addr, 0, mz->len); 180 mz_phys_addr_base = mz->iova; 181 mz_phys_addr = mz->iova; 182 if ((unsigned long)mz->addr == mz_phys_addr_base) { 183 PMD_DRV_LOG(WARNING, 184 "Memzone physical address same as virtual.\n"); 185 PMD_DRV_LOG(WARNING, 186 "Using rte_mem_virt2iova()\n"); 187 for (sz = 0; sz < total_alloc_len; sz += getpagesize()) 188 rte_mem_lock_page(((char *)mz->addr) + sz); 189 mz_phys_addr_base = rte_mem_virt2iova(mz->addr); 190 mz_phys_addr = rte_mem_virt2iova(mz->addr); 191 if (mz_phys_addr == RTE_BAD_IOVA) { 192 PMD_DRV_LOG(ERR, 193 "unable to map ring address to physical memory\n"); 194 return -ENOMEM; 195 } 196 } 197 198 if (tx_ring_info) { 199 txq->mz = mz; 200 tx_ring = tx_ring_info->tx_ring_struct; 201 202 tx_ring->bd = ((char *)mz->addr + tx_ring_start); 203 tx_ring_info->tx_desc_ring = (struct tx_bd_long *)tx_ring->bd; 204 tx_ring->bd_dma = mz_phys_addr + tx_ring_start; 205 tx_ring_info->tx_desc_mapping = tx_ring->bd_dma; 206 tx_ring->mem_zone = (const void *)mz; 207 208 if (!tx_ring->bd) 209 return -ENOMEM; 210 if (tx_ring->vmem_size) { 211 tx_ring->vmem = 212 (void **)((char *)mz->addr + tx_vmem_start); 213 tx_ring_info->tx_buf_ring = 214 (struct bnxt_sw_tx_bd *)tx_ring->vmem; 215 } 216 } 217 218 if (rx_ring_info) { 219 rxq->mz = mz; 220 rx_ring = rx_ring_info->rx_ring_struct; 221 222 rx_ring->bd = ((char *)mz->addr + rx_ring_start); 223 rx_ring_info->rx_desc_ring = 224 (struct rx_prod_pkt_bd *)rx_ring->bd; 225 rx_ring->bd_dma = mz_phys_addr + rx_ring_start; 226 rx_ring_info->rx_desc_mapping = rx_ring->bd_dma; 227 rx_ring->mem_zone = (const void *)mz; 228 229 if (!rx_ring->bd) 230 return -ENOMEM; 231 if (rx_ring->vmem_size) { 232 rx_ring->vmem = 233 (void **)((char *)mz->addr + rx_vmem_start); 234 rx_ring_info->rx_buf_ring = 235 (struct bnxt_sw_rx_bd *)rx_ring->vmem; 236 } 237 238 rx_ring = rx_ring_info->ag_ring_struct; 239 240 rx_ring->bd = ((char *)mz->addr + ag_ring_start); 241 rx_ring_info->ag_desc_ring = 242 (struct rx_prod_pkt_bd *)rx_ring->bd; 243 rx_ring->bd_dma = mz->iova + ag_ring_start; 244 rx_ring_info->ag_desc_mapping = rx_ring->bd_dma; 245 rx_ring->mem_zone = (const void *)mz; 246 247 if (!rx_ring->bd) 248 return -ENOMEM; 249 if (rx_ring->vmem_size) { 250 rx_ring->vmem = 251 (void **)((char *)mz->addr + ag_vmem_start); 252 rx_ring_info->ag_buf_ring = 253 (struct bnxt_sw_rx_bd *)rx_ring->vmem; 254 } 255 256 rx_ring_info->ag_bitmap = 257 rte_bitmap_init(rx_ring_info->rx_ring_struct->ring_size * 258 AGG_RING_SIZE_FACTOR, (uint8_t *)mz->addr + 259 ag_bitmap_start, ag_bitmap_len); 260 261 /* TPA info */ 262 if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) 263 rx_ring_info->tpa_info = 264 ((struct bnxt_tpa_info *)((char *)mz->addr + 265 tpa_info_start)); 266 } 267 268 cp_ring->bd = ((char *)mz->addr + cp_ring_start); 269 cp_ring->bd_dma = mz_phys_addr + cp_ring_start; 270 cp_ring_info->cp_desc_ring = cp_ring->bd; 271 cp_ring_info->cp_desc_mapping = cp_ring->bd_dma; 272 cp_ring->mem_zone = (const void *)mz; 273 274 if (!cp_ring->bd) 275 return -ENOMEM; 276 if (cp_ring->vmem_size) 277 *cp_ring->vmem = ((char *)mz->addr + stats_len); 278 if (stats_len) { 279 cp_ring_info->hw_stats = mz->addr; 280 cp_ring_info->hw_stats_map = mz_phys_addr; 281 } 282 cp_ring_info->hw_stats_ctx_id = HWRM_NA_SIGNATURE; 283 284 if (nq_ring_info) { 285 struct bnxt_ring *nq_ring = nq_ring_info->cp_ring_struct; 286 287 nq_ring->bd = (char *)mz->addr + nq_ring_start; 288 nq_ring->bd_dma = mz_phys_addr + nq_ring_start; 289 nq_ring_info->cp_desc_ring = nq_ring->bd; 290 nq_ring_info->cp_desc_mapping = nq_ring->bd_dma; 291 nq_ring->mem_zone = (const void *)mz; 292 293 if (!nq_ring->bd) 294 return -ENOMEM; 295 if (nq_ring->vmem_size) 296 *nq_ring->vmem = (char *)mz->addr + nq_vmem_start; 297 298 nq_ring_info->hw_stats_ctx_id = HWRM_NA_SIGNATURE; 299 } 300 301 return 0; 302 } 303 304 static void bnxt_init_dflt_coal(struct bnxt_coal *coal) 305 { 306 /* Tick values in micro seconds. 307 * 1 coal_buf x bufs_per_record = 1 completion record. 308 */ 309 coal->num_cmpl_aggr_int = BNXT_NUM_CMPL_AGGR_INT; 310 /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */ 311 coal->num_cmpl_dma_aggr = BNXT_NUM_CMPL_DMA_AGGR; 312 /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */ 313 coal->num_cmpl_dma_aggr_during_int = BNXT_NUM_CMPL_DMA_AGGR_DURING_INT; 314 coal->int_lat_tmr_max = BNXT_INT_LAT_TMR_MAX; 315 /* min timer set to 1/2 of interrupt timer */ 316 coal->int_lat_tmr_min = BNXT_INT_LAT_TMR_MIN; 317 /* buf timer set to 1/4 of interrupt timer */ 318 coal->cmpl_aggr_dma_tmr = BNXT_CMPL_AGGR_DMA_TMR; 319 coal->cmpl_aggr_dma_tmr_during_int = BNXT_CMPL_AGGR_DMA_TMR_DURING_INT; 320 } 321 322 static void bnxt_set_db(struct bnxt *bp, 323 struct bnxt_db_info *db, 324 uint32_t ring_type, 325 uint32_t map_idx, 326 uint32_t fid) 327 { 328 if (BNXT_CHIP_THOR(bp)) { 329 if (BNXT_PF(bp)) 330 db->doorbell = (char *)bp->doorbell_base + 0x10000; 331 else 332 db->doorbell = (char *)bp->doorbell_base + 0x4000; 333 switch (ring_type) { 334 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX: 335 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ; 336 break; 337 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX: 338 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG: 339 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ; 340 break; 341 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL: 342 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_CQ; 343 break; 344 case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ: 345 db->db_key64 = DBR_PATH_L2; 346 break; 347 } 348 db->db_key64 |= (uint64_t)fid << DBR_XID_SFT; 349 db->db_64 = true; 350 } else { 351 db->doorbell = (char *)bp->doorbell_base + map_idx * 0x80; 352 switch (ring_type) { 353 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX: 354 db->db_key32 = DB_KEY_TX; 355 break; 356 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX: 357 db->db_key32 = DB_KEY_RX; 358 break; 359 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL: 360 db->db_key32 = DB_KEY_CP; 361 break; 362 } 363 db->db_64 = false; 364 } 365 } 366 367 static int bnxt_alloc_cmpl_ring(struct bnxt *bp, int queue_index, 368 struct bnxt_cp_ring_info *cpr, 369 struct bnxt_cp_ring_info *nqr) 370 { 371 struct bnxt_ring *cp_ring = cpr->cp_ring_struct; 372 uint32_t nq_ring_id = HWRM_NA_SIGNATURE; 373 int cp_ring_index = queue_index + BNXT_NUM_ASYNC_CPR(bp); 374 uint8_t ring_type; 375 int rc = 0; 376 377 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL; 378 379 if (BNXT_HAS_NQ(bp)) { 380 if (nqr) { 381 nq_ring_id = nqr->cp_ring_struct->fw_ring_id; 382 } else { 383 PMD_DRV_LOG(ERR, "NQ ring is NULL\n"); 384 return -EINVAL; 385 } 386 } 387 388 rc = bnxt_hwrm_ring_alloc(bp, cp_ring, ring_type, cp_ring_index, 389 HWRM_NA_SIGNATURE, nq_ring_id); 390 if (rc) 391 return rc; 392 393 cpr->cp_cons = 0; 394 bnxt_set_db(bp, &cpr->cp_db, ring_type, cp_ring_index, 395 cp_ring->fw_ring_id); 396 bnxt_db_cq(cpr); 397 398 return 0; 399 } 400 401 static int bnxt_alloc_nq_ring(struct bnxt *bp, int queue_index, 402 struct bnxt_cp_ring_info *nqr) 403 { 404 struct bnxt_ring *nq_ring = nqr->cp_ring_struct; 405 int nq_ring_index = queue_index + BNXT_NUM_ASYNC_CPR(bp); 406 uint8_t ring_type; 407 int rc = 0; 408 409 if (!BNXT_HAS_NQ(bp)) 410 return -EINVAL; 411 412 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ; 413 414 rc = bnxt_hwrm_ring_alloc(bp, nq_ring, ring_type, nq_ring_index, 415 HWRM_NA_SIGNATURE, HWRM_NA_SIGNATURE); 416 if (rc) 417 return rc; 418 419 bnxt_set_db(bp, &nqr->cp_db, ring_type, nq_ring_index, 420 nq_ring->fw_ring_id); 421 bnxt_db_nq(nqr); 422 423 return 0; 424 } 425 426 static int bnxt_alloc_rx_ring(struct bnxt *bp, int queue_index) 427 { 428 struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index]; 429 struct bnxt_cp_ring_info *cpr = rxq->cp_ring; 430 struct bnxt_ring *cp_ring = cpr->cp_ring_struct; 431 struct bnxt_rx_ring_info *rxr = rxq->rx_ring; 432 struct bnxt_ring *ring = rxr->rx_ring_struct; 433 uint8_t ring_type; 434 int rc = 0; 435 436 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_RX; 437 438 rc = bnxt_hwrm_ring_alloc(bp, ring, ring_type, 439 queue_index, cpr->hw_stats_ctx_id, 440 cp_ring->fw_ring_id); 441 if (rc) 442 return rc; 443 444 rxr->rx_prod = 0; 445 if (BNXT_HAS_RING_GRPS(bp)) 446 bp->grp_info[queue_index].rx_fw_ring_id = ring->fw_ring_id; 447 bnxt_set_db(bp, &rxr->rx_db, ring_type, queue_index, ring->fw_ring_id); 448 bnxt_db_write(&rxr->rx_db, rxr->rx_prod); 449 450 return 0; 451 } 452 453 static int bnxt_alloc_rx_agg_ring(struct bnxt *bp, int queue_index) 454 { 455 unsigned int map_idx = queue_index + bp->rx_cp_nr_rings; 456 struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index]; 457 struct bnxt_cp_ring_info *cpr = rxq->cp_ring; 458 struct bnxt_ring *cp_ring = cpr->cp_ring_struct; 459 struct bnxt_rx_ring_info *rxr = rxq->rx_ring; 460 struct bnxt_ring *ring = rxr->ag_ring_struct; 461 uint32_t hw_stats_ctx_id = HWRM_NA_SIGNATURE; 462 uint8_t ring_type; 463 int rc = 0; 464 465 ring->fw_rx_ring_id = rxr->rx_ring_struct->fw_ring_id; 466 467 if (BNXT_CHIP_THOR(bp)) { 468 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG; 469 hw_stats_ctx_id = cpr->hw_stats_ctx_id; 470 } else { 471 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_RX; 472 } 473 474 rc = bnxt_hwrm_ring_alloc(bp, ring, ring_type, map_idx, 475 hw_stats_ctx_id, cp_ring->fw_ring_id); 476 477 if (rc) 478 return rc; 479 480 rxr->ag_prod = 0; 481 if (BNXT_HAS_RING_GRPS(bp)) 482 bp->grp_info[queue_index].ag_fw_ring_id = ring->fw_ring_id; 483 bnxt_set_db(bp, &rxr->ag_db, ring_type, map_idx, ring->fw_ring_id); 484 bnxt_db_write(&rxr->ag_db, rxr->ag_prod); 485 486 return 0; 487 } 488 489 int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index) 490 { 491 struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index]; 492 struct bnxt_cp_ring_info *cpr = rxq->cp_ring; 493 struct bnxt_ring *cp_ring = cpr->cp_ring_struct; 494 struct bnxt_cp_ring_info *nqr = rxq->nq_ring; 495 struct bnxt_rx_ring_info *rxr = rxq->rx_ring; 496 int rc; 497 498 if (BNXT_HAS_NQ(bp)) { 499 rc = bnxt_alloc_nq_ring(bp, queue_index, nqr); 500 if (rc) 501 goto err_out; 502 } 503 504 rc = bnxt_alloc_cmpl_ring(bp, queue_index, cpr, nqr); 505 if (rc) 506 goto err_out; 507 508 if (BNXT_HAS_RING_GRPS(bp)) { 509 bp->grp_info[queue_index].fw_stats_ctx = cpr->hw_stats_ctx_id; 510 bp->grp_info[queue_index].cp_fw_ring_id = cp_ring->fw_ring_id; 511 } 512 513 if (!BNXT_NUM_ASYNC_CPR(bp) && !queue_index) { 514 /* 515 * If a dedicated async event completion ring is not enabled, 516 * use the first completion ring from PF or VF as the default 517 * completion ring for async event handling. 518 */ 519 bp->async_cp_ring = cpr; 520 rc = bnxt_hwrm_set_async_event_cr(bp); 521 if (rc) 522 goto err_out; 523 } 524 525 rc = bnxt_alloc_rx_ring(bp, queue_index); 526 if (rc) 527 goto err_out; 528 529 rc = bnxt_alloc_rx_agg_ring(bp, queue_index); 530 if (rc) 531 goto err_out; 532 533 rxq->rx_buf_use_size = BNXT_MAX_MTU + RTE_ETHER_HDR_LEN + 534 RTE_ETHER_CRC_LEN + (2 * VLAN_TAG_SIZE); 535 536 if (bp->eth_dev->data->rx_queue_state[queue_index] == 537 RTE_ETH_QUEUE_STATE_STARTED) { 538 if (bnxt_init_one_rx_ring(rxq)) { 539 RTE_LOG(ERR, PMD, 540 "bnxt_init_one_rx_ring failed!\n"); 541 bnxt_rx_queue_release_op(rxq); 542 rc = -ENOMEM; 543 goto err_out; 544 } 545 bnxt_db_write(&rxr->rx_db, rxr->rx_prod); 546 bnxt_db_write(&rxr->ag_db, rxr->ag_prod); 547 } 548 rxq->index = queue_index; 549 550 return 0; 551 552 err_out: 553 PMD_DRV_LOG(ERR, 554 "Failed to allocate receive queue %d, rc %d.\n", 555 queue_index, rc); 556 return rc; 557 } 558 559 /* ring_grp usage: 560 * [0] = default completion ring 561 * [1 -> +rx_cp_nr_rings] = rx_cp, rx rings 562 * [1+rx_cp_nr_rings + 1 -> +tx_cp_nr_rings] = tx_cp, tx rings 563 */ 564 int bnxt_alloc_hwrm_rings(struct bnxt *bp) 565 { 566 struct bnxt_coal coal; 567 unsigned int i; 568 uint8_t ring_type; 569 int rc = 0; 570 571 bnxt_init_dflt_coal(&coal); 572 573 for (i = 0; i < bp->rx_cp_nr_rings; i++) { 574 struct bnxt_rx_queue *rxq = bp->rx_queues[i]; 575 struct bnxt_cp_ring_info *cpr = rxq->cp_ring; 576 struct bnxt_cp_ring_info *nqr = rxq->nq_ring; 577 struct bnxt_ring *cp_ring = cpr->cp_ring_struct; 578 struct bnxt_rx_ring_info *rxr = rxq->rx_ring; 579 580 if (BNXT_HAS_NQ(bp)) { 581 if (bnxt_alloc_nq_ring(bp, i, nqr)) 582 goto err_out; 583 } 584 585 if (bnxt_alloc_cmpl_ring(bp, i, cpr, nqr)) 586 goto err_out; 587 588 if (BNXT_HAS_RING_GRPS(bp)) { 589 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id; 590 bp->grp_info[i].cp_fw_ring_id = cp_ring->fw_ring_id; 591 } 592 593 bnxt_hwrm_set_ring_coal(bp, &coal, cp_ring->fw_ring_id); 594 if (!BNXT_NUM_ASYNC_CPR(bp) && !i) { 595 /* 596 * If a dedicated async event completion ring is not 597 * enabled, use the first completion ring as the default 598 * completion ring for async event handling. 599 */ 600 bp->async_cp_ring = cpr; 601 rc = bnxt_hwrm_set_async_event_cr(bp); 602 if (rc) 603 goto err_out; 604 } 605 606 if (bnxt_alloc_rx_ring(bp, i)) 607 goto err_out; 608 609 if (bnxt_alloc_rx_agg_ring(bp, i)) 610 goto err_out; 611 612 rxq->rx_buf_use_size = BNXT_MAX_MTU + RTE_ETHER_HDR_LEN + 613 RTE_ETHER_CRC_LEN + (2 * VLAN_TAG_SIZE); 614 if (bnxt_init_one_rx_ring(rxq)) { 615 PMD_DRV_LOG(ERR, "bnxt_init_one_rx_ring failed!\n"); 616 bnxt_rx_queue_release_op(rxq); 617 return -ENOMEM; 618 } 619 bnxt_db_write(&rxr->rx_db, rxr->rx_prod); 620 bnxt_db_write(&rxr->ag_db, rxr->ag_prod); 621 rxq->index = i; 622 #ifdef RTE_ARCH_X86 623 bnxt_rxq_vec_setup(rxq); 624 #endif 625 } 626 627 for (i = 0; i < bp->tx_cp_nr_rings; i++) { 628 struct bnxt_tx_queue *txq = bp->tx_queues[i]; 629 struct bnxt_cp_ring_info *cpr = txq->cp_ring; 630 struct bnxt_ring *cp_ring = cpr->cp_ring_struct; 631 struct bnxt_cp_ring_info *nqr = txq->nq_ring; 632 struct bnxt_tx_ring_info *txr = txq->tx_ring; 633 struct bnxt_ring *ring = txr->tx_ring_struct; 634 unsigned int idx = i + bp->rx_cp_nr_rings; 635 636 if (BNXT_HAS_NQ(bp)) { 637 if (bnxt_alloc_nq_ring(bp, idx, nqr)) 638 goto err_out; 639 } 640 641 if (bnxt_alloc_cmpl_ring(bp, idx, cpr, nqr)) 642 goto err_out; 643 644 /* Tx ring */ 645 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_TX; 646 rc = bnxt_hwrm_ring_alloc(bp, ring, 647 ring_type, 648 i, cpr->hw_stats_ctx_id, 649 cp_ring->fw_ring_id); 650 if (rc) 651 goto err_out; 652 653 bnxt_set_db(bp, &txr->tx_db, ring_type, i, ring->fw_ring_id); 654 txq->index = idx; 655 bnxt_hwrm_set_ring_coal(bp, &coal, cp_ring->fw_ring_id); 656 } 657 658 err_out: 659 return rc; 660 } 661 662 /* Allocate dedicated async completion ring. */ 663 int bnxt_alloc_async_cp_ring(struct bnxt *bp) 664 { 665 struct bnxt_cp_ring_info *cpr = bp->async_cp_ring; 666 struct bnxt_ring *cp_ring = cpr->cp_ring_struct; 667 uint8_t ring_type; 668 int rc; 669 670 if (BNXT_NUM_ASYNC_CPR(bp) == 0) 671 return 0; 672 673 if (BNXT_HAS_NQ(bp)) 674 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ; 675 else 676 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL; 677 678 rc = bnxt_hwrm_ring_alloc(bp, cp_ring, ring_type, 0, 679 HWRM_NA_SIGNATURE, HWRM_NA_SIGNATURE); 680 681 if (rc) 682 return rc; 683 684 cpr->cp_cons = 0; 685 cpr->valid = 0; 686 bnxt_set_db(bp, &cpr->cp_db, ring_type, 0, 687 cp_ring->fw_ring_id); 688 689 if (BNXT_HAS_NQ(bp)) 690 bnxt_db_nq(cpr); 691 else 692 bnxt_db_cq(cpr); 693 694 return bnxt_hwrm_set_async_event_cr(bp); 695 } 696 697 /* Free dedicated async completion ring. */ 698 void bnxt_free_async_cp_ring(struct bnxt *bp) 699 { 700 struct bnxt_cp_ring_info *cpr = bp->async_cp_ring; 701 702 if (BNXT_NUM_ASYNC_CPR(bp) == 0 || cpr == NULL) 703 return; 704 705 if (BNXT_HAS_NQ(bp)) 706 bnxt_free_nq_ring(bp, cpr); 707 else 708 bnxt_free_cp_ring(bp, cpr); 709 710 bnxt_free_ring(cpr->cp_ring_struct); 711 rte_free(cpr->cp_ring_struct); 712 cpr->cp_ring_struct = NULL; 713 rte_free(cpr); 714 bp->async_cp_ring = NULL; 715 } 716 717 int bnxt_alloc_async_ring_struct(struct bnxt *bp) 718 { 719 struct bnxt_cp_ring_info *cpr = NULL; 720 struct bnxt_ring *ring = NULL; 721 unsigned int socket_id; 722 723 if (BNXT_NUM_ASYNC_CPR(bp) == 0) 724 return 0; 725 726 socket_id = rte_lcore_to_socket_id(rte_get_master_lcore()); 727 728 cpr = rte_zmalloc_socket("cpr", 729 sizeof(struct bnxt_cp_ring_info), 730 RTE_CACHE_LINE_SIZE, socket_id); 731 if (cpr == NULL) 732 return -ENOMEM; 733 734 ring = rte_zmalloc_socket("bnxt_cp_ring_struct", 735 sizeof(struct bnxt_ring), 736 RTE_CACHE_LINE_SIZE, socket_id); 737 if (ring == NULL) { 738 rte_free(cpr); 739 return -ENOMEM; 740 } 741 742 ring->bd = (void *)cpr->cp_desc_ring; 743 ring->bd_dma = cpr->cp_desc_mapping; 744 ring->ring_size = rte_align32pow2(DEFAULT_CP_RING_SIZE); 745 ring->ring_mask = ring->ring_size - 1; 746 ring->vmem_size = 0; 747 ring->vmem = NULL; 748 749 bp->async_cp_ring = cpr; 750 cpr->cp_ring_struct = ring; 751 752 return bnxt_alloc_rings(bp, 0, NULL, NULL, 753 bp->async_cp_ring, NULL, 754 "def_cp"); 755 } 756