1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2014-2023 Broadcom 3 * All rights reserved. 4 */ 5 6 #include <inttypes.h> 7 #include <stdbool.h> 8 9 #include <rte_bitmap.h> 10 #include <rte_byteorder.h> 11 #include <rte_malloc.h> 12 #include <rte_memory.h> 13 #include <rte_alarm.h> 14 15 #include "bnxt.h" 16 #include "bnxt_reps.h" 17 #include "bnxt_ring.h" 18 #include "bnxt_rxr.h" 19 #include "bnxt_rxq.h" 20 #include "hsi_struct_def_dpdk.h" 21 #include "bnxt_hwrm.h" 22 #include "bnxt_tf_common.h" 23 #include "ulp_mark_mgr.h" 24 25 /* 26 * RX Ring handling 27 */ 28 29 static inline struct rte_mbuf *__bnxt_alloc_rx_data(struct rte_mempool *mb) 30 { 31 struct rte_mbuf *data; 32 33 data = rte_mbuf_raw_alloc(mb); 34 35 return data; 36 } 37 38 static inline int bnxt_alloc_rx_data(struct bnxt_rx_queue *rxq, 39 struct bnxt_rx_ring_info *rxr, 40 uint16_t raw_prod) 41 { 42 uint16_t prod = RING_IDX(rxr->rx_ring_struct, raw_prod); 43 struct rx_prod_pkt_bd *rxbd; 44 struct rte_mbuf **rx_buf; 45 struct rte_mbuf *mbuf; 46 47 rxbd = &rxr->rx_desc_ring[prod]; 48 rx_buf = &rxr->rx_buf_ring[prod]; 49 mbuf = __bnxt_alloc_rx_data(rxq->mb_pool); 50 if (!mbuf) { 51 rte_atomic_fetch_add_explicit(&rxq->rx_mbuf_alloc_fail, 1, 52 rte_memory_order_relaxed); 53 /* If buff has failed already, setting this again won't hurt */ 54 rxq->need_realloc = 1; 55 return -ENOMEM; 56 } 57 58 *rx_buf = mbuf; 59 mbuf->data_off = RTE_PKTMBUF_HEADROOM; 60 61 rxbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); 62 63 return 0; 64 } 65 66 static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue *rxq, 67 struct bnxt_rx_ring_info *rxr, 68 uint16_t raw_prod) 69 { 70 uint16_t prod = RING_IDX(rxr->ag_ring_struct, raw_prod); 71 struct rx_prod_pkt_bd *rxbd; 72 struct rte_mbuf **rx_buf; 73 struct rte_mbuf *mbuf; 74 75 rxbd = &rxr->ag_desc_ring[prod]; 76 rx_buf = &rxr->ag_buf_ring[prod]; 77 if (rxbd == NULL) { 78 PMD_DRV_LOG_LINE(ERR, "Jumbo Frame. rxbd is NULL"); 79 return -EINVAL; 80 } 81 82 if (rx_buf == NULL) { 83 PMD_DRV_LOG_LINE(ERR, "Jumbo Frame. rx_buf is NULL"); 84 return -EINVAL; 85 } 86 87 mbuf = __bnxt_alloc_rx_data(rxq->agg_mb_pool); 88 if (!mbuf) { 89 rte_atomic_fetch_add_explicit(&rxq->rx_mbuf_alloc_fail, 1, 90 rte_memory_order_relaxed); 91 /* If buff has failed already, setting this again won't hurt */ 92 rxq->need_realloc = 1; 93 return -ENOMEM; 94 } 95 96 *rx_buf = mbuf; 97 mbuf->data_off = RTE_PKTMBUF_HEADROOM; 98 99 rxbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); 100 101 return 0; 102 } 103 104 static inline void bnxt_reuse_rx_mbuf(struct bnxt_rx_ring_info *rxr, 105 struct rte_mbuf *mbuf) 106 { 107 uint16_t prod, raw_prod = RING_NEXT(rxr->rx_raw_prod); 108 struct rte_mbuf **prod_rx_buf; 109 struct rx_prod_pkt_bd *prod_bd; 110 111 prod = RING_IDX(rxr->rx_ring_struct, raw_prod); 112 prod_rx_buf = &rxr->rx_buf_ring[prod]; 113 114 RTE_ASSERT(*prod_rx_buf == NULL); 115 RTE_ASSERT(mbuf != NULL); 116 117 *prod_rx_buf = mbuf; 118 119 prod_bd = &rxr->rx_desc_ring[prod]; 120 121 prod_bd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); 122 123 rxr->rx_raw_prod = raw_prod; 124 } 125 126 static inline 127 struct rte_mbuf *bnxt_consume_rx_buf(struct bnxt_rx_ring_info *rxr, 128 uint16_t cons) 129 { 130 struct rte_mbuf **cons_rx_buf; 131 struct rte_mbuf *mbuf; 132 133 cons_rx_buf = &rxr->rx_buf_ring[RING_IDX(rxr->rx_ring_struct, cons)]; 134 RTE_ASSERT(*cons_rx_buf != NULL); 135 mbuf = *cons_rx_buf; 136 *cons_rx_buf = NULL; 137 138 return mbuf; 139 } 140 141 static void bnxt_rx_ring_reset(void *arg) 142 { 143 struct bnxt *bp = arg; 144 int i, rc = 0; 145 struct bnxt_rx_queue *rxq; 146 147 for (i = 0; i < (int)bp->rx_nr_rings; i++) { 148 struct bnxt_rx_ring_info *rxr; 149 150 rxq = bp->rx_queues[i]; 151 if (!rxq || !rxq->in_reset) 152 continue; 153 154 rxr = rxq->rx_ring; 155 /* Disable and flush TPA before resetting the RX ring */ 156 if (rxr->tpa_info) 157 bnxt_vnic_tpa_cfg(bp, rxq->queue_id, false); 158 159 rc = bnxt_hwrm_rx_ring_reset(bp, i); 160 if (rc) { 161 PMD_DRV_LOG_LINE(ERR, "Rx ring%d reset failed", i); 162 continue; 163 } 164 165 bnxt_rx_queue_release_mbufs(rxq); 166 rxr->rx_raw_prod = 0; 167 rxr->ag_raw_prod = 0; 168 rxr->ag_cons = 0; 169 rxr->rx_next_cons = 0; 170 bnxt_init_one_rx_ring(rxq); 171 bnxt_db_write(&rxr->rx_db, rxr->rx_raw_prod); 172 bnxt_db_write(&rxr->ag_db, rxr->ag_raw_prod); 173 if (rxr->tpa_info) 174 bnxt_vnic_tpa_cfg(bp, rxq->queue_id, true); 175 176 rxq->in_reset = 0; 177 } 178 } 179 180 181 static void bnxt_sched_ring_reset(struct bnxt_rx_queue *rxq) 182 { 183 rxq->in_reset = 1; 184 rte_eal_alarm_set(1, bnxt_rx_ring_reset, (void *)rxq->bp); 185 } 186 187 static void bnxt_tpa_get_metadata(struct bnxt *bp, 188 struct bnxt_tpa_info *tpa_info, 189 struct rx_tpa_start_cmpl *tpa_start, 190 struct rx_tpa_start_cmpl_hi *tpa_start1) 191 { 192 tpa_info->cfa_code_valid = 0; 193 tpa_info->vlan_valid = 0; 194 tpa_info->hash_valid = 0; 195 tpa_info->l4_csum_valid = 0; 196 197 if (likely(tpa_start->flags_type & 198 rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS_RSS_VALID))) { 199 tpa_info->hash_valid = 1; 200 tpa_info->rss_hash = rte_le_to_cpu_32(tpa_start->rss_hash); 201 } 202 203 if (bp->vnic_cap_flags & BNXT_VNIC_CAP_RX_CMPL_V2) { 204 struct rx_tpa_start_v2_cmpl *v2_tpa_start = (void *)tpa_start; 205 struct rx_tpa_start_v2_cmpl_hi *v2_tpa_start1 = 206 (void *)tpa_start1; 207 208 if (v2_tpa_start->agg_id & 209 RX_TPA_START_V2_CMPL_METADATA1_VALID) { 210 tpa_info->vlan_valid = 1; 211 tpa_info->vlan = 212 rte_le_to_cpu_16(v2_tpa_start1->metadata0); 213 } 214 215 if (v2_tpa_start1->flags2 & RX_CMP_FLAGS2_L4_CSUM_ALL_OK_MASK) 216 tpa_info->l4_csum_valid = 1; 217 218 return; 219 } 220 221 tpa_info->cfa_code_valid = 1; 222 tpa_info->cfa_code = rte_le_to_cpu_16(tpa_start1->cfa_code); 223 if (tpa_start1->flags2 & 224 rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN)) { 225 tpa_info->vlan_valid = 1; 226 tpa_info->vlan = rte_le_to_cpu_32(tpa_start1->metadata); 227 } 228 229 if (likely(tpa_start1->flags2 & 230 rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC))) 231 tpa_info->l4_csum_valid = 1; 232 } 233 234 static void bnxt_tpa_start(struct bnxt_rx_queue *rxq, 235 struct rx_tpa_start_cmpl *tpa_start, 236 struct rx_tpa_start_cmpl_hi *tpa_start1) 237 { 238 struct bnxt_rx_ring_info *rxr = rxq->rx_ring; 239 uint16_t agg_id; 240 uint16_t data_cons; 241 struct bnxt_tpa_info *tpa_info; 242 struct rte_mbuf *mbuf; 243 244 agg_id = bnxt_tpa_start_agg_id(rxq->bp, tpa_start); 245 246 data_cons = tpa_start->opaque; 247 tpa_info = &rxr->tpa_info[agg_id]; 248 if (unlikely(data_cons != rxr->rx_next_cons)) { 249 PMD_DRV_LOG_LINE(ERR, "TPA cons %x, expected cons %x", 250 data_cons, rxr->rx_next_cons); 251 bnxt_sched_ring_reset(rxq); 252 return; 253 } 254 255 mbuf = bnxt_consume_rx_buf(rxr, data_cons); 256 257 bnxt_reuse_rx_mbuf(rxr, tpa_info->mbuf); 258 259 tpa_info->agg_count = 0; 260 tpa_info->mbuf = mbuf; 261 tpa_info->len = rte_le_to_cpu_32(tpa_start->len); 262 263 mbuf->data_off = RTE_PKTMBUF_HEADROOM; 264 mbuf->nb_segs = 1; 265 mbuf->next = NULL; 266 mbuf->pkt_len = rte_le_to_cpu_32(tpa_start->len); 267 mbuf->data_len = mbuf->pkt_len; 268 mbuf->port = rxq->port_id; 269 mbuf->ol_flags = RTE_MBUF_F_RX_LRO; 270 271 bnxt_tpa_get_metadata(rxq->bp, tpa_info, tpa_start, tpa_start1); 272 273 if (likely(tpa_info->hash_valid)) { 274 mbuf->hash.rss = tpa_info->rss_hash; 275 mbuf->ol_flags |= RTE_MBUF_F_RX_RSS_HASH; 276 } else if (tpa_info->cfa_code_valid) { 277 mbuf->hash.fdir.id = tpa_info->cfa_code; 278 mbuf->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID; 279 } 280 281 if (tpa_info->vlan_valid && BNXT_RX_VLAN_STRIP_EN(rxq->bp)) { 282 mbuf->vlan_tci = tpa_info->vlan; 283 mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED; 284 } 285 286 if (likely(tpa_info->l4_csum_valid)) 287 mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; 288 289 /* recycle next mbuf */ 290 data_cons = RING_NEXT(data_cons); 291 bnxt_reuse_rx_mbuf(rxr, bnxt_consume_rx_buf(rxr, data_cons)); 292 293 rxr->rx_next_cons = RING_IDX(rxr->rx_ring_struct, 294 RING_NEXT(data_cons)); 295 } 296 297 static int bnxt_agg_bufs_valid(struct bnxt_cp_ring_info *cpr, 298 uint8_t agg_bufs, uint32_t raw_cp_cons) 299 { 300 uint16_t last_cp_cons; 301 struct rx_pkt_cmpl *agg_cmpl; 302 303 raw_cp_cons = ADV_RAW_CMP(raw_cp_cons, agg_bufs); 304 last_cp_cons = RING_CMP(cpr->cp_ring_struct, raw_cp_cons); 305 agg_cmpl = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[last_cp_cons]; 306 return bnxt_cpr_cmp_valid(agg_cmpl, raw_cp_cons, 307 cpr->cp_ring_struct->ring_size); 308 } 309 310 /* TPA consume agg buffer out of order, allocate connected data only */ 311 static int bnxt_prod_ag_mbuf(struct bnxt_rx_queue *rxq) 312 { 313 struct bnxt_rx_ring_info *rxr = rxq->rx_ring; 314 uint16_t raw_next = RING_NEXT(rxr->ag_raw_prod); 315 uint16_t bmap_next = RING_IDX(rxr->ag_ring_struct, raw_next); 316 317 /* TODO batch allocation for better performance */ 318 while (rte_bitmap_get(rxr->ag_bitmap, bmap_next)) { 319 if (unlikely(bnxt_alloc_ag_data(rxq, rxr, raw_next))) 320 break; 321 rte_bitmap_clear(rxr->ag_bitmap, bmap_next); 322 rxr->ag_raw_prod = raw_next; 323 raw_next = RING_NEXT(raw_next); 324 bmap_next = RING_IDX(rxr->ag_ring_struct, raw_next); 325 } 326 327 return 0; 328 } 329 330 static int bnxt_rx_pages(struct bnxt_rx_queue *rxq, 331 struct rte_mbuf *mbuf, uint32_t *tmp_raw_cons, 332 uint8_t agg_buf, struct bnxt_tpa_info *tpa_info) 333 { 334 struct bnxt_cp_ring_info *cpr = rxq->cp_ring; 335 struct bnxt_rx_ring_info *rxr = rxq->rx_ring; 336 int i; 337 uint16_t cp_cons, ag_cons; 338 struct rx_pkt_cmpl *rxcmp; 339 struct rte_mbuf *last = mbuf; 340 bool is_p5_tpa = tpa_info && BNXT_CHIP_P5_P7(rxq->bp); 341 342 for (i = 0; i < agg_buf; i++) { 343 struct rte_mbuf **ag_buf; 344 struct rte_mbuf *ag_mbuf; 345 346 if (is_p5_tpa) { 347 rxcmp = (void *)&tpa_info->agg_arr[i]; 348 } else { 349 *tmp_raw_cons = NEXT_RAW_CMP(*tmp_raw_cons); 350 cp_cons = RING_CMP(cpr->cp_ring_struct, *tmp_raw_cons); 351 rxcmp = (struct rx_pkt_cmpl *) 352 &cpr->cp_desc_ring[cp_cons]; 353 } 354 355 #ifdef BNXT_DEBUG 356 bnxt_dump_cmpl(cp_cons, rxcmp); 357 #endif 358 359 ag_cons = rxcmp->opaque; 360 RTE_ASSERT(ag_cons <= rxr->ag_ring_struct->ring_mask); 361 ag_buf = &rxr->ag_buf_ring[ag_cons]; 362 ag_mbuf = *ag_buf; 363 if (ag_mbuf == NULL) 364 return -EBUSY; 365 366 ag_mbuf->data_len = rte_le_to_cpu_16(rxcmp->len); 367 368 mbuf->nb_segs++; 369 mbuf->pkt_len += ag_mbuf->data_len; 370 371 last->next = ag_mbuf; 372 last = ag_mbuf; 373 374 *ag_buf = NULL; 375 376 /* 377 * As aggregation buffer consumed out of order in TPA module, 378 * use bitmap to track freed slots to be allocated and notified 379 * to NIC 380 */ 381 rte_bitmap_set(rxr->ag_bitmap, ag_cons); 382 } 383 last->next = NULL; 384 bnxt_prod_ag_mbuf(rxq); 385 return 0; 386 } 387 388 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 389 uint32_t *raw_cons, void *cmp) 390 { 391 struct rx_pkt_cmpl *rxcmp = cmp; 392 uint32_t tmp_raw_cons = *raw_cons; 393 uint8_t cmp_type, agg_bufs = 0; 394 395 cmp_type = CMP_TYPE(rxcmp); 396 397 if (cmp_type == CMPL_BASE_TYPE_RX_L2) { 398 agg_bufs = BNXT_RX_L2_AGG_BUFS(rxcmp); 399 } else if (cmp_type == RX_TPA_END_CMPL_TYPE_RX_TPA_END) { 400 struct rx_tpa_end_cmpl *tpa_end = cmp; 401 402 if (BNXT_CHIP_P5_P7(bp)) 403 return 0; 404 405 agg_bufs = BNXT_TPA_END_AGG_BUFS(tpa_end); 406 } 407 408 if (agg_bufs) { 409 if (!bnxt_agg_bufs_valid(cpr, agg_bufs, tmp_raw_cons)) 410 return -EBUSY; 411 } 412 *raw_cons = tmp_raw_cons; 413 return 0; 414 } 415 416 static inline struct rte_mbuf *bnxt_tpa_end( 417 struct bnxt_rx_queue *rxq, 418 uint32_t *raw_cp_cons, 419 struct rx_tpa_end_cmpl *tpa_end, 420 struct rx_tpa_end_cmpl_hi *tpa_end1) 421 { 422 struct bnxt_cp_ring_info *cpr = rxq->cp_ring; 423 struct bnxt_rx_ring_info *rxr = rxq->rx_ring; 424 uint16_t agg_id; 425 struct rte_mbuf *mbuf; 426 uint8_t agg_bufs; 427 uint8_t payload_offset; 428 struct bnxt_tpa_info *tpa_info; 429 430 if (unlikely(rxq->in_reset)) { 431 PMD_DRV_LOG_LINE(ERR, "rxq->in_reset: raw_cp_cons:%d", 432 *raw_cp_cons); 433 bnxt_discard_rx(rxq->bp, cpr, raw_cp_cons, tpa_end); 434 return NULL; 435 } 436 437 if (BNXT_CHIP_P5_P7(rxq->bp)) { 438 struct rx_tpa_v2_end_cmpl *th_tpa_end; 439 struct rx_tpa_v2_end_cmpl_hi *th_tpa_end1; 440 441 th_tpa_end = (void *)tpa_end; 442 th_tpa_end1 = (void *)tpa_end1; 443 agg_id = BNXT_TPA_END_AGG_ID_TH(th_tpa_end); 444 agg_bufs = BNXT_TPA_END_AGG_BUFS_TH(th_tpa_end1); 445 payload_offset = th_tpa_end1->payload_offset; 446 } else { 447 agg_id = BNXT_TPA_END_AGG_ID(tpa_end); 448 agg_bufs = BNXT_TPA_END_AGG_BUFS(tpa_end); 449 if (!bnxt_agg_bufs_valid(cpr, agg_bufs, *raw_cp_cons)) 450 return NULL; 451 payload_offset = tpa_end->payload_offset; 452 } 453 454 tpa_info = &rxr->tpa_info[agg_id]; 455 mbuf = tpa_info->mbuf; 456 RTE_ASSERT(mbuf != NULL); 457 458 if (agg_bufs) { 459 (void)bnxt_rx_pages(rxq, mbuf, raw_cp_cons, agg_bufs, tpa_info); 460 } 461 mbuf->l4_len = payload_offset; 462 463 struct rte_mbuf *new_data = __bnxt_alloc_rx_data(rxq->mb_pool); 464 RTE_ASSERT(new_data != NULL); 465 if (!new_data) { 466 rte_atomic_fetch_add_explicit(&rxq->rx_mbuf_alloc_fail, 1, 467 rte_memory_order_relaxed); 468 return NULL; 469 } 470 tpa_info->mbuf = new_data; 471 472 return mbuf; 473 } 474 475 alignas(RTE_CACHE_LINE_SIZE) uint32_t bnxt_ptype_table[BNXT_PTYPE_TBL_DIM]; 476 477 static void __rte_cold 478 bnxt_init_ptype_table(void) 479 { 480 uint32_t *pt = bnxt_ptype_table; 481 static bool initialized; 482 int ip6, tun, type; 483 uint32_t l3; 484 int i; 485 486 if (initialized) 487 return; 488 489 for (i = 0; i < BNXT_PTYPE_TBL_DIM; i++) { 490 if (i & BNXT_PTYPE_TBL_VLAN_MSK) 491 pt[i] = RTE_PTYPE_L2_ETHER_VLAN; 492 else 493 pt[i] = RTE_PTYPE_L2_ETHER; 494 495 ip6 = !!(i & BNXT_PTYPE_TBL_IP_VER_MSK); 496 tun = !!(i & BNXT_PTYPE_TBL_TUN_MSK); 497 type = (i & BNXT_PTYPE_TBL_TYPE_MSK) >> BNXT_PTYPE_TBL_TYPE_SFT; 498 499 if (!tun && !ip6) 500 l3 = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN; 501 else if (!tun && ip6) 502 l3 = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN; 503 else if (tun && !ip6) 504 l3 = RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN; 505 else 506 l3 = RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN; 507 508 switch (type) { 509 case BNXT_PTYPE_TBL_TYPE_ICMP: 510 if (tun) 511 pt[i] |= l3 | RTE_PTYPE_INNER_L4_ICMP; 512 else 513 pt[i] |= l3 | RTE_PTYPE_L4_ICMP; 514 break; 515 case BNXT_PTYPE_TBL_TYPE_TCP: 516 if (tun) 517 pt[i] |= l3 | RTE_PTYPE_INNER_L4_TCP; 518 else 519 pt[i] |= l3 | RTE_PTYPE_L4_TCP; 520 break; 521 case BNXT_PTYPE_TBL_TYPE_UDP: 522 if (tun) 523 pt[i] |= l3 | RTE_PTYPE_INNER_L4_UDP; 524 else 525 pt[i] |= l3 | RTE_PTYPE_L4_UDP; 526 break; 527 case BNXT_PTYPE_TBL_TYPE_IP: 528 pt[i] |= l3; 529 break; 530 } 531 } 532 initialized = true; 533 } 534 535 static uint32_t 536 bnxt_parse_pkt_type(struct rx_pkt_cmpl *rxcmp, struct rx_pkt_cmpl_hi *rxcmp1) 537 { 538 uint32_t flags_type, flags2; 539 uint8_t index; 540 541 flags_type = rte_le_to_cpu_16(rxcmp->flags_type); 542 flags2 = rte_le_to_cpu_32(rxcmp1->flags2); 543 544 /* Validate ptype table indexing at build time. */ 545 bnxt_check_ptype_constants(); 546 547 /* 548 * Index format: 549 * bit 0: Set if IP tunnel encapsulated packet. 550 * bit 1: Set if IPv6 packet, clear if IPv4. 551 * bit 2: Set if VLAN tag present. 552 * bits 3-6: Four-bit hardware packet type field. 553 */ 554 index = BNXT_CMPL_ITYPE_TO_IDX(flags_type) | 555 BNXT_CMPL_VLAN_TUN_TO_IDX(flags2) | 556 BNXT_CMPL_IP_VER_TO_IDX(flags2); 557 558 return bnxt_ptype_table[index]; 559 } 560 561 static void 562 bnxt_parse_pkt_type_v3(struct rte_mbuf *mbuf, 563 struct rx_pkt_cmpl *rxcmp_v1, 564 struct rx_pkt_cmpl_hi *rxcmp1_v1) 565 { 566 uint32_t flags_type, flags2, meta; 567 struct rx_pkt_v3_cmpl_hi *rxcmp1; 568 struct rx_pkt_v3_cmpl *rxcmp; 569 uint8_t index; 570 571 rxcmp = (void *)rxcmp_v1; 572 rxcmp1 = (void *)rxcmp1_v1; 573 574 flags_type = rte_le_to_cpu_16(rxcmp->flags_type); 575 flags2 = rte_le_to_cpu_32(rxcmp1->flags2); 576 meta = rte_le_to_cpu_32(rxcmp->metadata1_payload_offset); 577 578 /* TODO */ 579 /* Validate ptype table indexing at build time. */ 580 /* bnxt_check_ptype_constants_v3(); */ 581 582 /* 583 * Index format: 584 * bit 0: Set if IP tunnel encapsulated packet. 585 * bit 1: Set if IPv6 packet, clear if IPv4. 586 * bit 2: Set if VLAN tag present. 587 * bits 3-6: Four-bit hardware packet type field. 588 */ 589 index = BNXT_CMPL_V3_ITYPE_TO_IDX(flags_type) | 590 BNXT_CMPL_V3_VLAN_TO_IDX(meta) | 591 BNXT_CMPL_V3_IP_VER_TO_IDX(flags2); 592 593 mbuf->packet_type = bnxt_ptype_table[index]; 594 } 595 596 static void __rte_cold 597 bnxt_init_ol_flags_tables(struct bnxt_rx_queue *rxq) 598 { 599 struct bnxt_rx_ring_info *rxr = rxq->rx_ring; 600 struct rte_eth_conf *dev_conf; 601 bool outer_cksum_enabled; 602 uint64_t offloads; 603 uint32_t *pt; 604 int i; 605 606 dev_conf = &rxq->bp->eth_dev->data->dev_conf; 607 offloads = dev_conf->rxmode.offloads; 608 609 outer_cksum_enabled = !!(offloads & (RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | 610 RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM)); 611 612 /* Initialize ol_flags table. */ 613 pt = rxr->ol_flags_table; 614 for (i = 0; i < BNXT_OL_FLAGS_TBL_DIM; i++) { 615 pt[i] = 0; 616 617 if (BNXT_RX_VLAN_STRIP_EN(rxq->bp)) { 618 if (i & RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN) 619 pt[i] |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED; 620 } 621 622 if (i & (RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC << 3)) { 623 /* Tunnel case. */ 624 if (outer_cksum_enabled) { 625 if (i & RX_PKT_CMPL_FLAGS2_IP_CS_CALC) 626 pt[i] |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; 627 628 if (i & RX_PKT_CMPL_FLAGS2_L4_CS_CALC) 629 pt[i] |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; 630 631 if (i & RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC) 632 pt[i] |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD; 633 } else { 634 if (i & RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC) 635 pt[i] |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; 636 637 if (i & RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC) 638 pt[i] |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; 639 } 640 } else { 641 /* Non-tunnel case. */ 642 if (i & RX_PKT_CMPL_FLAGS2_IP_CS_CALC) 643 pt[i] |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; 644 645 if (i & RX_PKT_CMPL_FLAGS2_L4_CS_CALC) 646 pt[i] |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; 647 } 648 } 649 650 /* Initialize checksum error table. */ 651 pt = rxr->ol_flags_err_table; 652 for (i = 0; i < BNXT_OL_FLAGS_ERR_TBL_DIM; i++) { 653 pt[i] = 0; 654 655 if (i & (RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC << 2)) { 656 /* Tunnel case. */ 657 if (outer_cksum_enabled) { 658 if (i & (RX_PKT_CMPL_ERRORS_IP_CS_ERROR >> 4)) 659 pt[i] |= RTE_MBUF_F_RX_IP_CKSUM_BAD; 660 661 if (i & (RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR >> 4)) 662 pt[i] |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD; 663 664 if (i & (RX_PKT_CMPL_ERRORS_L4_CS_ERROR >> 4)) 665 pt[i] |= RTE_MBUF_F_RX_L4_CKSUM_BAD; 666 667 if (i & (RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR >> 4)) 668 pt[i] |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD; 669 } else { 670 if (i & (RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR >> 4)) 671 pt[i] |= RTE_MBUF_F_RX_IP_CKSUM_BAD; 672 673 if (i & (RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR >> 4)) 674 pt[i] |= RTE_MBUF_F_RX_L4_CKSUM_BAD; 675 } 676 } else { 677 /* Non-tunnel case. */ 678 if (i & (RX_PKT_CMPL_ERRORS_IP_CS_ERROR >> 4)) 679 pt[i] |= RTE_MBUF_F_RX_IP_CKSUM_BAD; 680 681 if (i & (RX_PKT_CMPL_ERRORS_L4_CS_ERROR >> 4)) 682 pt[i] |= RTE_MBUF_F_RX_L4_CKSUM_BAD; 683 } 684 } 685 } 686 687 static void 688 bnxt_set_ol_flags(struct bnxt_rx_ring_info *rxr, struct rx_pkt_cmpl *rxcmp, 689 struct rx_pkt_cmpl_hi *rxcmp1, struct rte_mbuf *mbuf) 690 { 691 uint16_t flags_type, errors, flags; 692 uint64_t ol_flags; 693 694 flags_type = rte_le_to_cpu_16(rxcmp->flags_type); 695 696 flags = rte_le_to_cpu_32(rxcmp1->flags2) & 697 (RX_PKT_CMPL_FLAGS2_IP_CS_CALC | 698 RX_PKT_CMPL_FLAGS2_L4_CS_CALC | 699 RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC | 700 RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC | 701 RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN); 702 703 flags |= (flags & RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC) << 3; 704 errors = rte_le_to_cpu_16(rxcmp1->errors_v2) & 705 (RX_PKT_CMPL_ERRORS_IP_CS_ERROR | 706 RX_PKT_CMPL_ERRORS_L4_CS_ERROR | 707 RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR | 708 RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR); 709 errors = (errors >> 4) & flags; 710 711 ol_flags = rxr->ol_flags_table[flags & ~errors]; 712 713 if (unlikely(errors)) { 714 errors |= (flags & RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC) << 2; 715 ol_flags |= rxr->ol_flags_err_table[errors]; 716 } 717 718 if (flags_type & RX_PKT_CMPL_FLAGS_RSS_VALID) { 719 mbuf->hash.rss = rte_le_to_cpu_32(rxcmp->rss_hash); 720 ol_flags |= RTE_MBUF_F_RX_RSS_HASH; 721 } 722 723 if (unlikely((flags_type & RX_PKT_CMPL_FLAGS_MASK) == 724 RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP)) 725 ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP | RTE_MBUF_F_RX_IEEE1588_TMST; 726 727 mbuf->ol_flags = ol_flags; 728 } 729 730 static void 731 bnxt_get_rx_ts_p5(struct bnxt *bp, uint32_t rx_ts_cmpl) 732 { 733 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 734 uint64_t last_hwrm_time = 0; 735 uint64_t pkt_time = 0; 736 737 if (!BNXT_CHIP_P5(bp) || !ptp) 738 return; 739 740 /* On P5, Rx timestamps are provided directly in the 741 * Rx completion records to the driver. Only 32 bits of 742 * the timestamp is present in the completion. Driver needs 743 * to read the current 48 bit free running timer using the 744 * HWRM_PORT_TS_QUERY command and combine the upper 16 bits 745 * from the HWRM response with the lower 32 bits in the 746 * Rx completion to produce the 48 bit timestamp for the Rx packet 747 */ 748 rte_spinlock_lock(&ptp->ptp_lock); 749 last_hwrm_time = ptp->old_time; 750 rte_spinlock_unlock(&ptp->ptp_lock); 751 pkt_time = (last_hwrm_time & BNXT_PTP_CURRENT_TIME_MASK) | rx_ts_cmpl; 752 if (rx_ts_cmpl < (uint32_t)last_hwrm_time) { 753 /* timer has rolled over */ 754 pkt_time += (1ULL << 32); 755 } 756 ptp->rx_timestamp = pkt_time; 757 } 758 759 static uint32_t 760 bnxt_ulp_set_mark_in_mbuf_v3(struct bnxt *bp, struct rx_pkt_cmpl_hi *rxcmp1, 761 struct rte_mbuf *mbuf, uint32_t *vfr_flag) 762 { 763 struct rx_pkt_v3_cmpl_hi *rxcmp1_v3 = (void *)rxcmp1; 764 uint32_t flags2, meta, mark_id = 0; 765 /* revisit the usage of gfid/lfid if mark action is supported. 766 * for now, only VFR is using mark and the metadata is the SVIF 767 * (a small number) 768 */ 769 bool gfid = false; 770 int rc = 0; 771 772 flags2 = rte_le_to_cpu_32(rxcmp1_v3->flags2); 773 774 switch (flags2 & RX_PKT_V3_CMPL_HI_FLAGS2_META_FORMAT_MASK) { 775 case RX_PKT_V3_CMPL_HI_FLAGS2_META_FORMAT_CHDR_DATA: 776 /* Only supporting Metadata for ulp now */ 777 meta = rxcmp1_v3->metadata2; 778 break; 779 default: 780 goto skip_mark; 781 } 782 783 rc = ulp_mark_db_mark_get(bp->ulp_ctx, gfid, meta, vfr_flag, &mark_id); 784 if (!rc) { 785 /* Only supporting VFR for now, no Mark actions */ 786 if (vfr_flag && *vfr_flag) 787 return mark_id; 788 } 789 790 skip_mark: 791 mbuf->hash.fdir.hi = 0; 792 793 return 0; 794 } 795 796 static uint32_t 797 bnxt_ulp_set_mark_in_mbuf(struct bnxt *bp, struct rx_pkt_cmpl_hi *rxcmp1, 798 struct rte_mbuf *mbuf, uint32_t *vfr_flag) 799 { 800 uint32_t cfa_code; 801 uint32_t meta_fmt; 802 uint32_t meta; 803 bool gfid = false; 804 uint32_t mark_id; 805 uint32_t flags2; 806 uint32_t gfid_support = 0; 807 int rc; 808 809 if (BNXT_GFID_ENABLED(bp)) 810 gfid_support = 1; 811 812 cfa_code = rte_le_to_cpu_16(rxcmp1->cfa_code); 813 flags2 = rte_le_to_cpu_32(rxcmp1->flags2); 814 meta = rte_le_to_cpu_32(rxcmp1->metadata); 815 816 /* 817 * The flags field holds extra bits of info from [6:4] 818 * which indicate if the flow is in TCAM or EM or EEM 819 */ 820 meta_fmt = (flags2 & BNXT_CFA_META_FMT_MASK) >> 821 BNXT_CFA_META_FMT_SHFT; 822 823 switch (meta_fmt) { 824 case 0: 825 if (gfid_support) { 826 /* Not an LFID or GFID, a flush cmd. */ 827 goto skip_mark; 828 } else { 829 /* LFID mode, no vlan scenario */ 830 gfid = false; 831 } 832 break; 833 case 4: 834 case 5: 835 /* 836 * EM/TCAM case 837 * Assume that EM doesn't support Mark due to GFID 838 * collisions with EEM. Simply return without setting the mark 839 * in the mbuf. 840 */ 841 if (BNXT_CFA_META_EM_TEST(meta)) { 842 /*This is EM hit {EM(1), GFID[27:16], 19'd0 or vtag } */ 843 gfid = true; 844 meta >>= BNXT_RX_META_CFA_CODE_SHIFT; 845 cfa_code |= meta << BNXT_CFA_CODE_META_SHIFT; 846 } else { 847 /* 848 * It is a TCAM entry, so it is an LFID. 849 * The TCAM IDX and Mode can also be determined 850 * by decoding the meta_data. We are not 851 * using these for now. 852 */ 853 } 854 break; 855 case 6: 856 case 7: 857 /* EEM Case, only using gfid in EEM for now. */ 858 gfid = true; 859 860 /* 861 * For EEM flows, The first part of cfa_code is 16 bits. 862 * The second part is embedded in the 863 * metadata field from bit 19 onwards. The driver needs to 864 * ignore the first 19 bits of metadata and use the next 12 865 * bits as higher 12 bits of cfa_code. 866 */ 867 meta >>= BNXT_RX_META_CFA_CODE_SHIFT; 868 cfa_code |= meta << BNXT_CFA_CODE_META_SHIFT; 869 break; 870 default: 871 /* For other values, the cfa_code is assumed to be an LFID. */ 872 break; 873 } 874 875 rc = ulp_mark_db_mark_get(bp->ulp_ctx, gfid, 876 cfa_code, vfr_flag, &mark_id); 877 if (!rc) { 878 /* VF to VFR Rx path. So, skip mark_id injection in mbuf */ 879 if (vfr_flag && *vfr_flag) 880 return mark_id; 881 /* Got the mark, write it to the mbuf and return */ 882 mbuf->hash.fdir.hi = mark_id; 883 *bnxt_cfa_code_dynfield(mbuf) = cfa_code & 0xffffffffull; 884 mbuf->hash.fdir.id = rxcmp1->cfa_code; 885 mbuf->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID; 886 return mark_id; 887 } 888 889 skip_mark: 890 mbuf->hash.fdir.hi = 0; 891 892 return 0; 893 } 894 895 void bnxt_set_mark_in_mbuf(struct bnxt *bp, 896 struct rx_pkt_cmpl_hi *rxcmp1, 897 struct rte_mbuf *mbuf) 898 { 899 uint32_t cfa_code = 0; 900 901 if (unlikely(bp->mark_table == NULL)) 902 return; 903 904 cfa_code = rte_le_to_cpu_16(rxcmp1->cfa_code); 905 if (!cfa_code) 906 return; 907 908 if (cfa_code && !bp->mark_table[cfa_code].valid) 909 return; 910 911 mbuf->hash.fdir.hi = bp->mark_table[cfa_code].mark_id; 912 mbuf->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID; 913 } 914 915 static void 916 bnxt_set_ol_flags_crx(struct bnxt_rx_ring_info *rxr, 917 struct rx_pkt_compress_cmpl *rxcmp, 918 struct rte_mbuf *mbuf) 919 { 920 uint16_t flags_type, errors, flags; 921 uint16_t cserr, tmp; 922 uint64_t ol_flags; 923 924 flags_type = rte_le_to_cpu_16(rxcmp->flags_type); 925 926 cserr = rte_le_to_cpu_16(rxcmp->metadata1_cs_error_calc_v1) & 927 (RX_PKT_COMPRESS_CMPL_CS_ERROR_CALC_MASK | 928 BNXT_RXC_METADATA1_VLAN_VALID); 929 930 flags = cserr & BNXT_CRX_CQE_CSUM_CALC_MASK; 931 tmp = flags; 932 933 /* Set tunnel frame indicator. 934 * This is to correctly index into the flags_err table. 935 */ 936 flags |= (flags & BNXT_CRX_TUN_CS_CALC) ? BNXT_PKT_CMPL_T_IP_CS_CALC << 3 : 0; 937 938 flags = flags >> BNXT_CRX_CQE_CSUM_CALC_SFT; 939 940 errors = cserr & BNXT_CRX_CQE_CSUM_ERROR_MASK; 941 errors = (errors >> RX_PKT_COMPRESS_CMPL_CS_ERROR_CALC_SFT) & flags; 942 943 ol_flags = rxr->ol_flags_table[flags & ~errors]; 944 945 if (unlikely(errors)) { 946 /* Set tunnel frame indicator. 947 * This is to correctly index into the flags_err table. 948 */ 949 errors |= (tmp & BNXT_CRX_TUN_CS_CALC) ? BNXT_PKT_CMPL_T_IP_CS_CALC << 2 : 0; 950 ol_flags |= rxr->ol_flags_err_table[errors]; 951 } 952 953 if (flags_type & RX_PKT_COMPRESS_CMPL_FLAGS_RSS_VALID) { 954 mbuf->hash.rss = rte_le_to_cpu_32(rxcmp->rss_hash); 955 ol_flags |= RTE_MBUF_F_RX_RSS_HASH; 956 } 957 958 mbuf->ol_flags = ol_flags; 959 } 960 961 static uint32_t 962 bnxt_parse_pkt_type_crx(struct rx_pkt_compress_cmpl *rxcmp) 963 { 964 uint16_t flags_type, meta_cs; 965 uint8_t index; 966 967 flags_type = rte_le_to_cpu_16(rxcmp->flags_type); 968 meta_cs = rte_le_to_cpu_16(rxcmp->metadata1_cs_error_calc_v1); 969 970 /* Validate ptype table indexing at build time. */ 971 /* TODO */ 972 /* bnxt_check_ptype_constants(); */ 973 974 /* 975 * Index format: 976 * bit 0: Set if IP tunnel encapsulated packet. 977 * bit 1: Set if IPv6 packet, clear if IPv4. 978 * bit 2: Set if VLAN tag present. 979 * bits 3-6: Four-bit hardware packet type field. 980 */ 981 index = BNXT_CMPL_ITYPE_TO_IDX(flags_type) | 982 BNXT_CMPL_VLAN_TUN_TO_IDX_CRX(meta_cs) | 983 BNXT_CMPL_IP_VER_TO_IDX(flags_type); 984 985 return bnxt_ptype_table[index]; 986 } 987 988 static int bnxt_rx_pages_crx(struct bnxt_rx_queue *rxq, struct rte_mbuf *mbuf, 989 uint32_t *tmp_raw_cons, uint8_t agg_buf) 990 { 991 struct bnxt_cp_ring_info *cpr = rxq->cp_ring; 992 struct bnxt_rx_ring_info *rxr = rxq->rx_ring; 993 int i; 994 uint16_t cp_cons, ag_cons; 995 struct rx_pkt_compress_cmpl *rxcmp; 996 struct rte_mbuf *last = mbuf; 997 998 for (i = 0; i < agg_buf; i++) { 999 struct rte_mbuf **ag_buf; 1000 struct rte_mbuf *ag_mbuf; 1001 1002 *tmp_raw_cons = NEXT_RAW_CMP(*tmp_raw_cons); 1003 cp_cons = RING_CMP(cpr->cp_ring_struct, *tmp_raw_cons); 1004 rxcmp = (struct rx_pkt_compress_cmpl *)&cpr->cp_desc_ring[cp_cons]; 1005 1006 #ifdef BNXT_DEBUG 1007 bnxt_dump_cmpl(cp_cons, rxcmp); 1008 #endif 1009 1010 /* 1011 * The consumer index aka the opaque field for the agg buffers 1012 * is not * available in errors_agg_bufs_opaque. So maintain it 1013 * in driver itself. 1014 */ 1015 ag_cons = rxr->ag_cons; 1016 ag_buf = &rxr->ag_buf_ring[ag_cons]; 1017 ag_mbuf = *ag_buf; 1018 1019 ag_mbuf->data_len = rte_le_to_cpu_16(rxcmp->len); 1020 1021 mbuf->nb_segs++; 1022 mbuf->pkt_len += ag_mbuf->data_len; 1023 1024 last->next = ag_mbuf; 1025 last = ag_mbuf; 1026 1027 *ag_buf = NULL; 1028 /* 1029 * As aggregation buffer consumed out of order in TPA module, 1030 * use bitmap to track freed slots to be allocated and notified 1031 * to NIC. TODO: Is this needed. Most likely not. 1032 */ 1033 rte_bitmap_set(rxr->ag_bitmap, ag_cons); 1034 rxr->ag_cons = RING_IDX(rxr->ag_ring_struct, RING_NEXT(ag_cons)); 1035 } 1036 last->next = NULL; 1037 bnxt_prod_ag_mbuf(rxq); 1038 return 0; 1039 } 1040 1041 static int bnxt_crx_pkt(struct rte_mbuf **rx_pkt, 1042 struct bnxt_rx_queue *rxq, 1043 struct rx_pkt_compress_cmpl *rxcmp, 1044 uint32_t *raw_cons) 1045 { 1046 struct bnxt_cp_ring_info *cpr = rxq->cp_ring; 1047 struct bnxt_rx_ring_info *rxr = rxq->rx_ring; 1048 uint32_t tmp_raw_cons = *raw_cons; 1049 uint16_t cons, raw_prod; 1050 struct rte_mbuf *mbuf; 1051 int rc = 0; 1052 uint8_t agg_buf = 0; 1053 1054 agg_buf = BNXT_CRX_CQE_AGG_BUFS(rxcmp); 1055 /* 1056 * Since size of rx_pkt_cmpl is same as rx_pkt_compress_cmpl, 1057 * we should be able to use bnxt_agg_bufs_valid to check if AGG 1058 * bufs are valid when using compressed CQEs. 1059 * All we want to check here is if the CQE is valid and the 1060 * location of valid bit is same irrespective of the CQE type. 1061 */ 1062 if (agg_buf && !bnxt_agg_bufs_valid(cpr, agg_buf, tmp_raw_cons)) 1063 return -EBUSY; 1064 1065 raw_prod = rxr->rx_raw_prod; 1066 1067 cons = rxcmp->errors_agg_bufs_opaque & BNXT_CRX_CQE_OPAQUE_MASK; 1068 mbuf = bnxt_consume_rx_buf(rxr, cons); 1069 if (mbuf == NULL) 1070 return -EBUSY; 1071 1072 mbuf->data_off = RTE_PKTMBUF_HEADROOM; 1073 mbuf->nb_segs = 1; 1074 mbuf->next = NULL; 1075 mbuf->pkt_len = rxcmp->len; 1076 mbuf->data_len = mbuf->pkt_len; 1077 mbuf->port = rxq->port_id; 1078 1079 bnxt_set_ol_flags_crx(rxr, rxcmp, mbuf); 1080 mbuf->packet_type = bnxt_parse_pkt_type_crx(rxcmp); 1081 bnxt_set_vlan_crx(rxcmp, mbuf); 1082 1083 if (bnxt_alloc_rx_data(rxq, rxr, raw_prod)) { 1084 rc = -ENOMEM; 1085 goto rx; 1086 } 1087 raw_prod = RING_NEXT(raw_prod); 1088 rxr->rx_raw_prod = raw_prod; 1089 1090 if (agg_buf) 1091 bnxt_rx_pages_crx(rxq, mbuf, &tmp_raw_cons, agg_buf); 1092 1093 rx: 1094 rxr->rx_next_cons = RING_IDX(rxr->rx_ring_struct, RING_NEXT(cons)); 1095 *rx_pkt = mbuf; 1096 1097 *raw_cons = tmp_raw_cons; 1098 1099 return rc; 1100 } 1101 1102 static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt, 1103 struct bnxt_rx_queue *rxq, uint32_t *raw_cons) 1104 { 1105 struct bnxt_cp_ring_info *cpr = rxq->cp_ring; 1106 struct bnxt_rx_ring_info *rxr = rxq->rx_ring; 1107 struct rx_pkt_cmpl *rxcmp; 1108 struct rx_pkt_cmpl_hi *rxcmp1; 1109 uint32_t tmp_raw_cons = *raw_cons; 1110 uint16_t cons, raw_prod, cp_cons = 1111 RING_CMP(cpr->cp_ring_struct, tmp_raw_cons); 1112 struct rte_mbuf *mbuf; 1113 int rc = 0; 1114 uint8_t agg_buf = 0; 1115 uint16_t cmp_type; 1116 uint32_t vfr_flag = 0, mark_id = 0; 1117 struct bnxt *bp = rxq->bp; 1118 1119 rxcmp = (struct rx_pkt_cmpl *) 1120 &cpr->cp_desc_ring[cp_cons]; 1121 1122 cmp_type = CMP_TYPE(rxcmp); 1123 1124 if (cmp_type == RX_TPA_V2_ABUF_CMPL_TYPE_RX_TPA_AGG) { 1125 struct rx_tpa_v2_abuf_cmpl *rx_agg = (void *)rxcmp; 1126 uint16_t agg_id = rte_cpu_to_le_16(rx_agg->agg_id); 1127 struct bnxt_tpa_info *tpa_info; 1128 1129 tpa_info = &rxr->tpa_info[agg_id]; 1130 RTE_ASSERT(tpa_info->agg_count < 16); 1131 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg; 1132 rc = -EINVAL; /* Continue w/o new mbuf */ 1133 goto next_rx; 1134 } 1135 1136 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); 1137 cp_cons = RING_CMP(cpr->cp_ring_struct, tmp_raw_cons); 1138 rxcmp1 = (struct rx_pkt_cmpl_hi *)&cpr->cp_desc_ring[cp_cons]; 1139 1140 if (!bnxt_cpr_cmp_valid(rxcmp1, tmp_raw_cons, 1141 cpr->cp_ring_struct->ring_size)) 1142 return -EBUSY; 1143 1144 if (cmp_type == RX_TPA_START_CMPL_TYPE_RX_TPA_START || 1145 cmp_type == RX_TPA_START_V2_CMPL_TYPE_RX_TPA_START_V2 || 1146 cmp_type == RX_TPA_START_V3_CMPL_TYPE_RX_TPA_START_V3) { 1147 bnxt_tpa_start(rxq, (struct rx_tpa_start_cmpl *)rxcmp, 1148 (struct rx_tpa_start_cmpl_hi *)rxcmp1); 1149 rc = -EINVAL; /* Continue w/o new mbuf */ 1150 goto next_rx; 1151 } else if (cmp_type == RX_TPA_END_CMPL_TYPE_RX_TPA_END) { 1152 mbuf = bnxt_tpa_end(rxq, &tmp_raw_cons, 1153 (struct rx_tpa_end_cmpl *)rxcmp, 1154 (struct rx_tpa_end_cmpl_hi *)rxcmp1); 1155 if (unlikely(!mbuf)) 1156 return -EBUSY; 1157 *rx_pkt = mbuf; 1158 goto next_rx; 1159 } else if ((cmp_type != CMPL_BASE_TYPE_RX_L2) && 1160 (cmp_type != CMPL_BASE_TYPE_RX_L2_V2) && 1161 (cmp_type != CMPL_BASE_TYPE_RX_L2_V3)) { 1162 rc = -EINVAL; 1163 goto next_rx; 1164 } 1165 1166 agg_buf = BNXT_RX_L2_AGG_BUFS(rxcmp); 1167 if (agg_buf && !bnxt_agg_bufs_valid(cpr, agg_buf, tmp_raw_cons)) 1168 return -EBUSY; 1169 1170 raw_prod = rxr->rx_raw_prod; 1171 1172 cons = rxcmp->opaque; 1173 if (unlikely(cons != rxr->rx_next_cons)) { 1174 bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp); 1175 PMD_DRV_LOG_LINE(ERR, "RX cons %x != expected cons %x", 1176 cons, rxr->rx_next_cons); 1177 bnxt_sched_ring_reset(rxq); 1178 rc = -EBUSY; 1179 goto next_rx; 1180 } 1181 mbuf = bnxt_consume_rx_buf(rxr, cons); 1182 if (mbuf == NULL) 1183 return -EBUSY; 1184 1185 mbuf->data_off = RTE_PKTMBUF_HEADROOM; 1186 mbuf->nb_segs = 1; 1187 mbuf->next = NULL; 1188 mbuf->pkt_len = rxcmp->len; 1189 mbuf->data_len = mbuf->pkt_len; 1190 mbuf->port = rxq->port_id; 1191 1192 if (unlikely((rte_le_to_cpu_16(rxcmp->flags_type) & 1193 RX_PKT_CMPL_FLAGS_MASK) == 1194 RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP) || 1195 bp->ptp_all_rx_tstamp) 1196 bnxt_get_rx_ts_p5(rxq->bp, rxcmp1->reorder); 1197 1198 if (cmp_type == CMPL_BASE_TYPE_RX_L2_V3) { 1199 bnxt_parse_csum_v3(mbuf, rxcmp1); 1200 bnxt_parse_pkt_type_v3(mbuf, rxcmp, rxcmp1); 1201 bnxt_rx_vlan_v3(mbuf, rxcmp, rxcmp1); 1202 if (BNXT_TRUFLOW_EN(bp)) 1203 mark_id = bnxt_ulp_set_mark_in_mbuf_v3(rxq->bp, rxcmp1, 1204 mbuf, &vfr_flag); 1205 goto reuse_rx_mbuf; 1206 } 1207 1208 if (cmp_type == CMPL_BASE_TYPE_RX_L2_V2) { 1209 bnxt_parse_csum_v2(mbuf, rxcmp1); 1210 bnxt_parse_pkt_type_v2(mbuf, rxcmp, rxcmp1); 1211 bnxt_rx_vlan_v2(mbuf, rxcmp, rxcmp1); 1212 /* TODO Add support for cfa_code parsing */ 1213 goto reuse_rx_mbuf; 1214 } 1215 1216 bnxt_set_ol_flags(rxr, rxcmp, rxcmp1, mbuf); 1217 1218 mbuf->packet_type = bnxt_parse_pkt_type(rxcmp, rxcmp1); 1219 1220 bnxt_set_vlan(rxcmp1, mbuf); 1221 1222 if (BNXT_TRUFLOW_EN(bp)) 1223 mark_id = bnxt_ulp_set_mark_in_mbuf(rxq->bp, rxcmp1, mbuf, 1224 &vfr_flag); 1225 else 1226 bnxt_set_mark_in_mbuf(rxq->bp, rxcmp1, mbuf); 1227 1228 reuse_rx_mbuf: 1229 if (agg_buf) { 1230 rc = bnxt_rx_pages(rxq, mbuf, &tmp_raw_cons, agg_buf, NULL); 1231 if (rc != 0) 1232 return -EBUSY; 1233 } 1234 1235 #ifdef BNXT_DEBUG 1236 if (rxcmp1->errors_v2 & RX_CMP_L2_ERRORS) { 1237 /* Re-install the mbuf back to the rx ring */ 1238 bnxt_reuse_rx_mbuf(rxr, cons, mbuf); 1239 1240 rc = -EIO; 1241 goto next_rx; 1242 } 1243 #endif 1244 /* 1245 * TODO: Redesign this.... 1246 * If the allocation fails, the packet does not get received. 1247 * Simply returning this will result in slowly falling behind 1248 * on the producer ring buffers. 1249 * Instead, "filling up" the producer just before ringing the 1250 * doorbell could be a better solution since it will let the 1251 * producer ring starve until memory is available again pushing 1252 * the drops into hardware and getting them out of the driver 1253 * allowing recovery to a full producer ring. 1254 * 1255 * This could also help with cache usage by preventing per-packet 1256 * calls in favour of a tight loop with the same function being called 1257 * in it. 1258 */ 1259 raw_prod = RING_NEXT(raw_prod); 1260 if (bnxt_alloc_rx_data(rxq, rxr, raw_prod)) { 1261 rc = -ENOMEM; 1262 goto rx; 1263 } 1264 rxr->rx_raw_prod = raw_prod; 1265 rx: 1266 rxr->rx_next_cons = RING_IDX(rxr->rx_ring_struct, RING_NEXT(cons)); 1267 1268 if (BNXT_TRUFLOW_EN(bp) && (BNXT_VF_IS_TRUSTED(bp) || BNXT_PF(bp)) && 1269 vfr_flag) { 1270 bnxt_vfr_recv(mark_id, rxq->queue_id, mbuf); 1271 /* Now return an error so that nb_rx_pkts is not 1272 * incremented. 1273 * This packet was meant to be given to the representor. 1274 * So no need to account the packet and give it to 1275 * parent Rx burst function. 1276 */ 1277 rc = -ENODEV; 1278 goto next_rx; 1279 } 1280 /* 1281 * All MBUFs are allocated with the same size under DPDK, 1282 * no optimization for rx_copy_thresh 1283 */ 1284 *rx_pkt = mbuf; 1285 1286 next_rx: 1287 1288 *raw_cons = tmp_raw_cons; 1289 1290 return rc; 1291 } 1292 1293 static void bnxt_reattempt_buffer_alloc(struct bnxt_rx_queue *rxq) 1294 { 1295 struct bnxt_rx_ring_info *rxr = rxq->rx_ring; 1296 struct bnxt_ring *ring; 1297 uint16_t raw_prod; 1298 uint32_t cnt; 1299 1300 /* Assume alloc passes. On failure, 1301 * need_realloc will be set inside bnxt_alloc_XY_data. 1302 */ 1303 rxq->need_realloc = 0; 1304 if (!bnxt_need_agg_ring(rxq->bp->eth_dev)) 1305 goto alloc_rx; 1306 1307 raw_prod = rxr->ag_raw_prod; 1308 bnxt_prod_ag_mbuf(rxq); 1309 if (raw_prod != rxr->ag_raw_prod) 1310 bnxt_db_write(&rxr->ag_db, rxr->ag_raw_prod); 1311 1312 alloc_rx: 1313 raw_prod = rxr->rx_raw_prod; 1314 ring = rxr->rx_ring_struct; 1315 for (cnt = 0; cnt < ring->ring_size; cnt++) { 1316 struct rte_mbuf **rx_buf; 1317 uint16_t ndx; 1318 1319 ndx = RING_IDX(ring, raw_prod + cnt); 1320 rx_buf = &rxr->rx_buf_ring[ndx]; 1321 1322 /* Buffer already allocated for this index. */ 1323 if (*rx_buf != NULL && *rx_buf != &rxq->fake_mbuf) 1324 continue; 1325 1326 /* This slot is empty. Alloc buffer for Rx */ 1327 if (bnxt_alloc_rx_data(rxq, rxr, raw_prod + cnt)) 1328 break; 1329 1330 rxr->rx_raw_prod = raw_prod + cnt; 1331 bnxt_db_write(&rxr->rx_db, rxr->rx_raw_prod); 1332 } 1333 } 1334 1335 uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 1336 uint16_t nb_pkts) 1337 { 1338 struct bnxt_rx_queue *rxq = rx_queue; 1339 struct bnxt_cp_ring_info *cpr = rxq->cp_ring; 1340 struct bnxt_rx_ring_info *rxr = rxq->rx_ring; 1341 uint16_t rx_raw_prod = rxr->rx_raw_prod; 1342 uint16_t ag_raw_prod = rxr->ag_raw_prod; 1343 uint32_t raw_cons = cpr->cp_raw_cons; 1344 uint32_t cons; 1345 int nb_rx_pkts = 0; 1346 int nb_rep_rx_pkts = 0; 1347 struct rx_pkt_cmpl *rxcmp; 1348 int rc = 0; 1349 bool evt = false; 1350 1351 if (unlikely(is_bnxt_in_error(rxq->bp))) 1352 return 0; 1353 1354 /* If Rx Q was stopped return */ 1355 if (unlikely(!rxq->rx_started)) 1356 return 0; 1357 1358 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) 1359 /* 1360 * Replenish buffers if needed when a transition has been made from 1361 * vector- to non-vector- receive processing. 1362 */ 1363 while (unlikely(rxq->rxrearm_nb)) { 1364 if (!bnxt_alloc_rx_data(rxq, rxr, rxq->rxrearm_start)) { 1365 rxr->rx_raw_prod = rxq->rxrearm_start; 1366 bnxt_db_write(&rxr->rx_db, rxr->rx_raw_prod); 1367 rxq->rxrearm_start++; 1368 rxq->rxrearm_nb--; 1369 } else { 1370 /* Retry allocation on next call. */ 1371 break; 1372 } 1373 } 1374 #endif 1375 1376 /* Handle RX burst request */ 1377 while (1) { 1378 cons = RING_CMP(cpr->cp_ring_struct, raw_cons); 1379 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 1380 1381 if (!bnxt_cpr_cmp_valid(rxcmp, raw_cons, 1382 cpr->cp_ring_struct->ring_size)) 1383 break; 1384 if (CMP_TYPE(rxcmp) == CMPL_BASE_TYPE_HWRM_DONE) { 1385 PMD_DRV_LOG_LINE(ERR, "Rx flush done"); 1386 } else if (CMP_TYPE(rxcmp) == CMPL_BASE_TYPE_RX_L2_COMPRESS) { 1387 rc = bnxt_crx_pkt(&rx_pkts[nb_rx_pkts], rxq, 1388 (struct rx_pkt_compress_cmpl *)rxcmp, 1389 &raw_cons); 1390 } else if ((CMP_TYPE(rxcmp) >= CMPL_BASE_TYPE_RX_TPA_START_V2) && 1391 (CMP_TYPE(rxcmp) <= CMPL_BASE_TYPE_RX_TPA_START_V3)) { 1392 rc = bnxt_rx_pkt(&rx_pkts[nb_rx_pkts], rxq, &raw_cons); 1393 if (!rc) 1394 nb_rx_pkts++; 1395 else if (rc == -EBUSY) /* partial completion */ 1396 break; 1397 else if (rc == -ENODEV) /* completion for representor */ 1398 nb_rep_rx_pkts++; 1399 else if (rc == -ENOMEM) 1400 nb_rx_pkts++; 1401 } else if (!BNXT_NUM_ASYNC_CPR(rxq->bp)) { 1402 evt = 1403 bnxt_event_hwrm_resp_handler(rxq->bp, 1404 (struct cmpl_base *)rxcmp); 1405 /* If the async event is Fatal error, return */ 1406 if (unlikely(is_bnxt_in_error(rxq->bp))) 1407 goto done; 1408 } 1409 1410 raw_cons = NEXT_RAW_CMP(raw_cons); 1411 /* 1412 * The HW reposting may fall behind if mbuf allocation has 1413 * failed. Break and reattempt allocation to prevent that. 1414 */ 1415 if (nb_rx_pkts == nb_pkts || nb_rep_rx_pkts == nb_pkts || evt || 1416 rxq->need_realloc != 0) 1417 break; 1418 } 1419 1420 cpr->cp_raw_cons = raw_cons; 1421 if (!nb_rx_pkts && !nb_rep_rx_pkts && !evt) { 1422 /* 1423 * For PMD, there is no need to keep on pushing to REARM 1424 * the doorbell if there are no new completions 1425 */ 1426 goto done; 1427 } 1428 1429 /* Ring the completion queue doorbell. */ 1430 bnxt_db_cq(cpr); 1431 1432 /* Ring the receive descriptor doorbell. */ 1433 if (rx_raw_prod != rxr->rx_raw_prod) 1434 bnxt_db_write(&rxr->rx_db, rxr->rx_raw_prod); 1435 1436 /* Ring the AGG ring DB */ 1437 if (ag_raw_prod != rxr->ag_raw_prod) 1438 bnxt_db_write(&rxr->ag_db, rxr->ag_raw_prod); 1439 done: 1440 if (unlikely(rxq->need_realloc)) 1441 bnxt_reattempt_buffer_alloc(rxq); 1442 return nb_rx_pkts; 1443 } 1444 1445 void bnxt_free_rx_rings(struct bnxt *bp) 1446 { 1447 int i; 1448 struct bnxt_rx_queue *rxq; 1449 1450 if (!bp->rx_queues) 1451 return; 1452 1453 for (i = 0; i < (int)bp->rx_nr_rings; i++) { 1454 rxq = bp->rx_queues[i]; 1455 if (!rxq) 1456 continue; 1457 1458 bnxt_free_ring(rxq->rx_ring->rx_ring_struct); 1459 rte_free(rxq->rx_ring->rx_ring_struct); 1460 1461 /* Free the Aggregator ring */ 1462 bnxt_free_ring(rxq->rx_ring->ag_ring_struct); 1463 rte_free(rxq->rx_ring->ag_ring_struct); 1464 rxq->rx_ring->ag_ring_struct = NULL; 1465 1466 rte_free(rxq->rx_ring); 1467 1468 bnxt_free_ring(rxq->cp_ring->cp_ring_struct); 1469 rte_free(rxq->cp_ring->cp_ring_struct); 1470 rte_free(rxq->cp_ring); 1471 1472 rte_memzone_free(rxq->mz); 1473 rxq->mz = NULL; 1474 1475 rte_free(rxq); 1476 bp->rx_queues[i] = NULL; 1477 } 1478 } 1479 1480 int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id) 1481 { 1482 struct bnxt_cp_ring_info *cpr; 1483 struct bnxt_rx_ring_info *rxr; 1484 struct bnxt_ring *ring; 1485 1486 rxq->rx_buf_size = BNXT_MAX_PKT_LEN + sizeof(struct rte_mbuf); 1487 1488 if (rxq->rx_ring != NULL) { 1489 rxr = rxq->rx_ring; 1490 } else { 1491 1492 rxr = rte_zmalloc_socket("bnxt_rx_ring", 1493 sizeof(struct bnxt_rx_ring_info), 1494 RTE_CACHE_LINE_SIZE, socket_id); 1495 if (rxr == NULL) 1496 return -ENOMEM; 1497 rxq->rx_ring = rxr; 1498 } 1499 1500 if (rxr->rx_ring_struct == NULL) { 1501 ring = rte_zmalloc_socket("bnxt_rx_ring_struct", 1502 sizeof(struct bnxt_ring), 1503 RTE_CACHE_LINE_SIZE, socket_id); 1504 if (ring == NULL) 1505 return -ENOMEM; 1506 rxr->rx_ring_struct = ring; 1507 ring->ring_size = rte_align32pow2(rxq->nb_rx_desc); 1508 ring->ring_mask = ring->ring_size - 1; 1509 ring->bd = (void *)rxr->rx_desc_ring; 1510 ring->bd_dma = rxr->rx_desc_mapping; 1511 1512 /* Allocate extra rx ring entries for vector rx. */ 1513 ring->vmem_size = sizeof(struct rte_mbuf *) * 1514 (ring->ring_size + BNXT_RX_EXTRA_MBUF_ENTRIES); 1515 1516 ring->vmem = (void **)&rxr->rx_buf_ring; 1517 ring->fw_ring_id = INVALID_HW_RING_ID; 1518 } 1519 1520 if (rxq->cp_ring != NULL) { 1521 cpr = rxq->cp_ring; 1522 } else { 1523 cpr = rte_zmalloc_socket("bnxt_rx_ring", 1524 sizeof(struct bnxt_cp_ring_info), 1525 RTE_CACHE_LINE_SIZE, socket_id); 1526 if (cpr == NULL) 1527 return -ENOMEM; 1528 rxq->cp_ring = cpr; 1529 } 1530 1531 if (cpr->cp_ring_struct == NULL) { 1532 ring = rte_zmalloc_socket("bnxt_rx_ring_struct", 1533 sizeof(struct bnxt_ring), 1534 RTE_CACHE_LINE_SIZE, socket_id); 1535 if (ring == NULL) 1536 return -ENOMEM; 1537 cpr->cp_ring_struct = ring; 1538 1539 /* Allocate two completion slots per entry in desc ring. */ 1540 ring->ring_size = rxr->rx_ring_struct->ring_size * 2; 1541 if (bnxt_need_agg_ring(rxq->bp->eth_dev)) 1542 ring->ring_size *= AGG_RING_SIZE_FACTOR; 1543 1544 ring->ring_size = rte_align32pow2(ring->ring_size); 1545 ring->ring_mask = ring->ring_size - 1; 1546 ring->bd = (void *)cpr->cp_desc_ring; 1547 ring->bd_dma = cpr->cp_desc_mapping; 1548 ring->vmem_size = 0; 1549 ring->vmem = NULL; 1550 ring->fw_ring_id = INVALID_HW_RING_ID; 1551 } 1552 1553 if (!bnxt_need_agg_ring(rxq->bp->eth_dev)) 1554 return 0; 1555 1556 rxr = rxq->rx_ring; 1557 /* Allocate Aggregator rings */ 1558 ring = rte_zmalloc_socket("bnxt_rx_ring_struct", 1559 sizeof(struct bnxt_ring), 1560 RTE_CACHE_LINE_SIZE, socket_id); 1561 if (ring == NULL) 1562 return -ENOMEM; 1563 rxr->ag_ring_struct = ring; 1564 ring->ring_size = rte_align32pow2(rxq->nb_rx_desc * 1565 AGG_RING_SIZE_FACTOR); 1566 ring->ring_mask = ring->ring_size - 1; 1567 ring->bd = (void *)rxr->ag_desc_ring; 1568 ring->bd_dma = rxr->ag_desc_mapping; 1569 ring->vmem_size = ring->ring_size * sizeof(struct rte_mbuf *); 1570 ring->vmem = (void **)&rxr->ag_buf_ring; 1571 ring->fw_ring_id = INVALID_HW_RING_ID; 1572 1573 return 0; 1574 } 1575 1576 static void bnxt_init_rxbds(struct bnxt_ring *ring, uint32_t type, 1577 uint16_t len) 1578 { 1579 uint32_t j; 1580 struct rx_prod_pkt_bd *rx_bd_ring = (struct rx_prod_pkt_bd *)ring->bd; 1581 1582 if (!rx_bd_ring) 1583 return; 1584 for (j = 0; j < ring->ring_size; j++) { 1585 rx_bd_ring[j].flags_type = rte_cpu_to_le_16(type); 1586 rx_bd_ring[j].len = rte_cpu_to_le_16(len); 1587 rx_bd_ring[j].opaque = j; 1588 } 1589 } 1590 1591 int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq) 1592 { 1593 struct bnxt_rx_ring_info *rxr; 1594 struct bnxt_ring *ring; 1595 uint32_t raw_prod, type; 1596 unsigned int i; 1597 uint16_t size; 1598 1599 /* Initialize packet type table. */ 1600 bnxt_init_ptype_table(); 1601 1602 size = rte_pktmbuf_data_room_size(rxq->mb_pool) - RTE_PKTMBUF_HEADROOM; 1603 size = RTE_MIN(BNXT_MAX_PKT_LEN, size); 1604 1605 type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT; 1606 1607 rxr = rxq->rx_ring; 1608 ring = rxr->rx_ring_struct; 1609 bnxt_init_rxbds(ring, type, size); 1610 1611 /* Initialize offload flags parsing table. */ 1612 bnxt_init_ol_flags_tables(rxq); 1613 1614 raw_prod = rxr->rx_raw_prod; 1615 for (i = 0; i < ring->ring_size; i++) { 1616 if (unlikely(!rxr->rx_buf_ring[i])) { 1617 if (bnxt_alloc_rx_data(rxq, rxr, raw_prod) != 0) { 1618 PMD_DRV_LOG_LINE(WARNING, 1619 "RxQ %d allocated %d of %d mbufs", 1620 rxq->queue_id, i, ring->ring_size); 1621 return -ENOMEM; 1622 } 1623 } 1624 rxr->rx_raw_prod = raw_prod; 1625 raw_prod = RING_NEXT(raw_prod); 1626 } 1627 1628 /* Initialize dummy mbuf pointers for vector mode rx. */ 1629 for (i = ring->ring_size; 1630 i < ring->ring_size + BNXT_RX_EXTRA_MBUF_ENTRIES; i++) { 1631 rxr->rx_buf_ring[i] = &rxq->fake_mbuf; 1632 } 1633 1634 /* Explicitly reset this driver internal tracker on a ring init */ 1635 rxr->rx_next_cons = 0; 1636 1637 if (!bnxt_need_agg_ring(rxq->bp->eth_dev)) 1638 return 0; 1639 1640 ring = rxr->ag_ring_struct; 1641 type = RX_PROD_AGG_BD_TYPE_RX_PROD_AGG; 1642 bnxt_init_rxbds(ring, type, size); 1643 raw_prod = rxr->ag_raw_prod; 1644 1645 for (i = 0; i < ring->ring_size; i++) { 1646 if (unlikely(!rxr->ag_buf_ring[i])) { 1647 if (bnxt_alloc_ag_data(rxq, rxr, raw_prod) != 0) { 1648 PMD_DRV_LOG_LINE(WARNING, 1649 "RxQ %d allocated %d of %d mbufs", 1650 rxq->queue_id, i, ring->ring_size); 1651 return -ENOMEM; 1652 } 1653 } 1654 rxr->ag_raw_prod = raw_prod; 1655 raw_prod = RING_NEXT(raw_prod); 1656 } 1657 PMD_DRV_LOG_LINE(DEBUG, "AGG Done!"); 1658 1659 if (bnxt_compressed_rx_cqe_mode_enabled(rxq->bp)) 1660 return 0; 1661 1662 if (rxr->tpa_info) { 1663 unsigned int max_aggs = BNXT_TPA_MAX_AGGS(rxq->bp); 1664 1665 for (i = 0; i < max_aggs; i++) { 1666 if (unlikely(!rxr->tpa_info[i].mbuf)) { 1667 rxr->tpa_info[i].mbuf = 1668 __bnxt_alloc_rx_data(rxq->agg_mb_pool); 1669 if (!rxr->tpa_info[i].mbuf) { 1670 rte_atomic_fetch_add_explicit(&rxq->rx_mbuf_alloc_fail, 1, 1671 rte_memory_order_relaxed); 1672 return -ENOMEM; 1673 } 1674 } 1675 } 1676 } 1677 PMD_DRV_LOG_LINE(DEBUG, "TPA alloc Done!"); 1678 1679 return 0; 1680 } 1681 1682 /* Sweep the Rx completion queue till HWRM_DONE for ring flush is received. 1683 * The mbufs will not be freed in this call. 1684 * They will be freed during ring free as a part of mem cleanup. 1685 */ 1686 int bnxt_flush_rx_cmp(struct bnxt_cp_ring_info *cpr) 1687 { 1688 struct bnxt_ring *cp_ring_struct = cpr->cp_ring_struct; 1689 uint32_t ring_mask = cp_ring_struct->ring_mask; 1690 uint32_t raw_cons = cpr->cp_raw_cons; 1691 struct rx_pkt_cmpl *rxcmp; 1692 uint32_t nb_rx = 0; 1693 uint32_t cons; 1694 1695 do { 1696 cons = RING_CMP(cpr->cp_ring_struct, raw_cons); 1697 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 1698 1699 if (!bnxt_cpr_cmp_valid(rxcmp, raw_cons, ring_mask + 1)) 1700 break; 1701 1702 if (CMP_TYPE(rxcmp) == CMPL_BASE_TYPE_HWRM_DONE) 1703 return 1; 1704 1705 raw_cons = NEXT_RAW_CMP(raw_cons); 1706 nb_rx++; 1707 } while (nb_rx < ring_mask); 1708 1709 if (nb_rx) { 1710 cpr->cp_raw_cons = raw_cons; 1711 /* Ring the completion queue doorbell. */ 1712 bnxt_db_cq(cpr); 1713 } 1714 1715 return 0; 1716 } 1717