1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2021 6WIND S.A. 3 * Copyright 2021 Mellanox Technologies, Ltd 4 */ 5 6 #include <stdint.h> 7 #include <string.h> 8 #include <stdlib.h> 9 10 #include <rte_mbuf.h> 11 #include <rte_mempool.h> 12 #include <rte_prefetch.h> 13 #include <rte_common.h> 14 #include <rte_branch_prediction.h> 15 #include <rte_ether.h> 16 #include <rte_cycles.h> 17 #include <rte_flow.h> 18 19 #include <mlx5_prm.h> 20 #include <mlx5_common.h> 21 #include <mlx5_common_mr.h> 22 23 #include "mlx5_autoconf.h" 24 #include "mlx5_defs.h" 25 #include "mlx5.h" 26 #include "mlx5_utils.h" 27 #include "mlx5_rxtx.h" 28 #include "mlx5_rx.h" 29 30 31 static __rte_always_inline uint32_t 32 rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, 33 volatile struct mlx5_mini_cqe8 *mcqe); 34 35 static __rte_always_inline int 36 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, 37 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe); 38 39 static __rte_always_inline uint32_t 40 rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe); 41 42 static __rte_always_inline void 43 rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt, 44 volatile struct mlx5_cqe *cqe, 45 volatile struct mlx5_mini_cqe8 *mcqe); 46 47 static inline void 48 mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *__rte_restrict tcp, 49 volatile struct mlx5_cqe *__rte_restrict cqe, 50 uint32_t phcsum, uint8_t l4_type); 51 52 static inline void 53 mlx5_lro_update_hdr(uint8_t *__rte_restrict padd, 54 volatile struct mlx5_cqe *__rte_restrict cqe, 55 volatile struct mlx5_mini_cqe8 *mcqe, 56 struct mlx5_rxq_data *rxq, uint32_t len); 57 58 59 /** 60 * Internal function to compute the number of used descriptors in an RX queue. 61 * 62 * @param rxq 63 * The Rx queue. 64 * 65 * @return 66 * The number of used Rx descriptor. 67 */ 68 static uint32_t 69 rx_queue_count(struct mlx5_rxq_data *rxq) 70 { 71 struct rxq_zip *zip = &rxq->zip; 72 volatile struct mlx5_cqe *cqe; 73 const unsigned int cqe_n = (1 << rxq->cqe_n); 74 const unsigned int sges_n = (1 << rxq->sges_n); 75 const unsigned int elts_n = (1 << rxq->elts_n); 76 const unsigned int strd_n = (1 << rxq->strd_num_n); 77 const unsigned int cqe_cnt = cqe_n - 1; 78 unsigned int cq_ci, used; 79 80 /* if we are processing a compressed cqe */ 81 if (zip->ai) { 82 used = zip->cqe_cnt - zip->ai; 83 cq_ci = zip->cq_ci; 84 } else { 85 used = 0; 86 cq_ci = rxq->cq_ci; 87 } 88 cqe = &(*rxq->cqes)[cq_ci & cqe_cnt]; 89 while (check_cqe(cqe, cqe_n, cq_ci) != MLX5_CQE_STATUS_HW_OWN) { 90 int8_t op_own; 91 unsigned int n; 92 93 op_own = cqe->op_own; 94 if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) 95 n = rte_be_to_cpu_32(cqe->byte_cnt); 96 else 97 n = 1; 98 cq_ci += n; 99 used += n; 100 cqe = &(*rxq->cqes)[cq_ci & cqe_cnt]; 101 } 102 used = RTE_MIN(used * sges_n, elts_n * strd_n); 103 return used; 104 } 105 106 /** 107 * DPDK callback to check the status of a Rx descriptor. 108 * 109 * @param rx_queue 110 * The Rx queue. 111 * @param[in] offset 112 * The index of the descriptor in the ring. 113 * 114 * @return 115 * The status of the Rx descriptor. 116 */ 117 int 118 mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset) 119 { 120 struct mlx5_rxq_data *rxq = rx_queue; 121 struct mlx5_rxq_ctrl *rxq_ctrl = 122 container_of(rxq, struct mlx5_rxq_ctrl, rxq); 123 struct rte_eth_dev *dev = ETH_DEV(rxq_ctrl->priv); 124 125 if (dev->rx_pkt_burst == NULL || 126 dev->rx_pkt_burst == removed_rx_burst) { 127 rte_errno = ENOTSUP; 128 return -rte_errno; 129 } 130 if (offset >= (1 << rxq->cqe_n)) { 131 rte_errno = EINVAL; 132 return -rte_errno; 133 } 134 if (offset < rx_queue_count(rxq)) 135 return RTE_ETH_RX_DESC_DONE; 136 return RTE_ETH_RX_DESC_AVAIL; 137 } 138 139 /** 140 * DPDK callback to get the RX queue information. 141 * 142 * @param dev 143 * Pointer to the device structure. 144 * 145 * @param rx_queue_id 146 * Rx queue identificator. 147 * 148 * @param qinfo 149 * Pointer to the RX queue information structure. 150 * 151 * @return 152 * None. 153 */ 154 155 void 156 mlx5_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id, 157 struct rte_eth_rxq_info *qinfo) 158 { 159 struct mlx5_priv *priv = dev->data->dev_private; 160 struct mlx5_rxq_data *rxq = (*priv->rxqs)[rx_queue_id]; 161 struct mlx5_rxq_ctrl *rxq_ctrl = 162 container_of(rxq, struct mlx5_rxq_ctrl, rxq); 163 164 if (!rxq) 165 return; 166 qinfo->mp = mlx5_rxq_mprq_enabled(rxq) ? 167 rxq->mprq_mp : rxq->mp; 168 qinfo->conf.rx_thresh.pthresh = 0; 169 qinfo->conf.rx_thresh.hthresh = 0; 170 qinfo->conf.rx_thresh.wthresh = 0; 171 qinfo->conf.rx_free_thresh = rxq->rq_repl_thresh; 172 qinfo->conf.rx_drop_en = 1; 173 qinfo->conf.rx_deferred_start = rxq_ctrl ? 0 : 1; 174 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads; 175 qinfo->scattered_rx = dev->data->scattered_rx; 176 qinfo->nb_desc = mlx5_rxq_mprq_enabled(rxq) ? 177 (1 << rxq->elts_n) * (1 << rxq->strd_num_n) : 178 (1 << rxq->elts_n); 179 } 180 181 /** 182 * DPDK callback to get the RX packet burst mode information. 183 * 184 * @param dev 185 * Pointer to the device structure. 186 * 187 * @param rx_queue_id 188 * Rx queue identificatior. 189 * 190 * @param mode 191 * Pointer to the burts mode information. 192 * 193 * @return 194 * 0 as success, -EINVAL as failure. 195 */ 196 int 197 mlx5_rx_burst_mode_get(struct rte_eth_dev *dev, 198 uint16_t rx_queue_id __rte_unused, 199 struct rte_eth_burst_mode *mode) 200 { 201 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst; 202 struct mlx5_priv *priv = dev->data->dev_private; 203 struct mlx5_rxq_data *rxq; 204 205 rxq = (*priv->rxqs)[rx_queue_id]; 206 if (!rxq) { 207 rte_errno = EINVAL; 208 return -rte_errno; 209 } 210 if (pkt_burst == mlx5_rx_burst) { 211 snprintf(mode->info, sizeof(mode->info), "%s", "Scalar"); 212 } else if (pkt_burst == mlx5_rx_burst_mprq) { 213 snprintf(mode->info, sizeof(mode->info), "%s", "Multi-Packet RQ"); 214 } else if (pkt_burst == mlx5_rx_burst_vec) { 215 #if defined RTE_ARCH_X86_64 216 snprintf(mode->info, sizeof(mode->info), "%s", "Vector SSE"); 217 #elif defined RTE_ARCH_ARM64 218 snprintf(mode->info, sizeof(mode->info), "%s", "Vector Neon"); 219 #elif defined RTE_ARCH_PPC_64 220 snprintf(mode->info, sizeof(mode->info), "%s", "Vector AltiVec"); 221 #else 222 return -EINVAL; 223 #endif 224 } else if (pkt_burst == mlx5_rx_burst_mprq_vec) { 225 #if defined RTE_ARCH_X86_64 226 snprintf(mode->info, sizeof(mode->info), "%s", "MPRQ Vector SSE"); 227 #elif defined RTE_ARCH_ARM64 228 snprintf(mode->info, sizeof(mode->info), "%s", "MPRQ Vector Neon"); 229 #elif defined RTE_ARCH_PPC_64 230 snprintf(mode->info, sizeof(mode->info), "%s", "MPRQ Vector AltiVec"); 231 #else 232 return -EINVAL; 233 #endif 234 } else { 235 return -EINVAL; 236 } 237 return 0; 238 } 239 240 /** 241 * DPDK callback to get the number of used descriptors in a RX queue. 242 * 243 * @param rx_queue 244 * The Rx queue pointer. 245 * 246 * @return 247 * The number of used rx descriptor. 248 * -EINVAL if the queue is invalid 249 */ 250 uint32_t 251 mlx5_rx_queue_count(void *rx_queue) 252 { 253 struct mlx5_rxq_data *rxq = rx_queue; 254 struct rte_eth_dev *dev; 255 256 if (!rxq) { 257 rte_errno = EINVAL; 258 return -rte_errno; 259 } 260 261 dev = &rte_eth_devices[rxq->port_id]; 262 263 if (dev->rx_pkt_burst == NULL || 264 dev->rx_pkt_burst == removed_rx_burst) { 265 rte_errno = ENOTSUP; 266 return -rte_errno; 267 } 268 269 return rx_queue_count(rxq); 270 } 271 272 #define CLB_VAL_IDX 0 273 #define CLB_MSK_IDX 1 274 static int 275 mlx5_monitor_callback(const uint64_t value, 276 const uint64_t opaque[RTE_POWER_MONITOR_OPAQUE_SZ]) 277 { 278 const uint64_t m = opaque[CLB_MSK_IDX]; 279 const uint64_t v = opaque[CLB_VAL_IDX]; 280 281 return (value & m) == v ? -1 : 0; 282 } 283 284 int mlx5_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc) 285 { 286 struct mlx5_rxq_data *rxq = rx_queue; 287 const unsigned int cqe_num = 1 << rxq->cqe_n; 288 const unsigned int cqe_mask = cqe_num - 1; 289 const uint16_t idx = rxq->cq_ci & cqe_num; 290 volatile struct mlx5_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask]; 291 292 if (unlikely(rxq->cqes == NULL)) { 293 rte_errno = EINVAL; 294 return -rte_errno; 295 } 296 pmc->addr = &cqe->op_own; 297 pmc->opaque[CLB_VAL_IDX] = !!idx; 298 pmc->opaque[CLB_MSK_IDX] = MLX5_CQE_OWNER_MASK; 299 pmc->fn = mlx5_monitor_callback; 300 pmc->size = sizeof(uint8_t); 301 return 0; 302 } 303 304 /** 305 * Translate RX completion flags to packet type. 306 * 307 * @param[in] rxq 308 * Pointer to RX queue structure. 309 * @param[in] cqe 310 * Pointer to CQE. 311 * 312 * @note: fix mlx5_dev_supported_ptypes_get() if any change here. 313 * 314 * @return 315 * Packet type for struct rte_mbuf. 316 */ 317 static inline uint32_t 318 rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, 319 volatile struct mlx5_mini_cqe8 *mcqe) 320 { 321 uint8_t idx; 322 uint8_t ptype; 323 uint8_t pinfo = (cqe->pkt_info & 0x3) << 6; 324 325 /* Get l3/l4 header from mini-CQE in case L3/L4 format*/ 326 if (mcqe == NULL || 327 rxq->mcqe_format != MLX5_CQE_RESP_FORMAT_L34H_STRIDX) 328 ptype = (cqe->hdr_type_etc & 0xfc00) >> 10; 329 else 330 ptype = mcqe->hdr_type >> 2; 331 /* 332 * The index to the array should have: 333 * bit[1:0] = l3_hdr_type 334 * bit[4:2] = l4_hdr_type 335 * bit[5] = ip_frag 336 * bit[6] = tunneled 337 * bit[7] = outer_l3_type 338 */ 339 idx = pinfo | ptype; 340 return mlx5_ptype_table[idx] | rxq->tunnel * !!(idx & (1 << 6)); 341 } 342 343 /** 344 * Initialize Rx WQ and indexes. 345 * 346 * @param[in] rxq 347 * Pointer to RX queue structure. 348 */ 349 void 350 mlx5_rxq_initialize(struct mlx5_rxq_data *rxq) 351 { 352 const unsigned int wqe_n = 1 << rxq->elts_n; 353 unsigned int i; 354 355 for (i = 0; (i != wqe_n); ++i) { 356 volatile struct mlx5_wqe_data_seg *scat; 357 uintptr_t addr; 358 uint32_t byte_count; 359 360 if (mlx5_rxq_mprq_enabled(rxq)) { 361 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[i]; 362 363 scat = &((volatile struct mlx5_wqe_mprq *) 364 rxq->wqes)[i].dseg; 365 addr = (uintptr_t)mlx5_mprq_buf_addr(buf, 366 1 << rxq->strd_num_n); 367 byte_count = (1 << rxq->strd_sz_n) * 368 (1 << rxq->strd_num_n); 369 } else { 370 struct rte_mbuf *buf = (*rxq->elts)[i]; 371 372 scat = &((volatile struct mlx5_wqe_data_seg *) 373 rxq->wqes)[i]; 374 addr = rte_pktmbuf_mtod(buf, uintptr_t); 375 byte_count = DATA_LEN(buf); 376 } 377 /* scat->addr must be able to store a pointer. */ 378 MLX5_ASSERT(sizeof(scat->addr) >= sizeof(uintptr_t)); 379 *scat = (struct mlx5_wqe_data_seg){ 380 .addr = rte_cpu_to_be_64(addr), 381 .byte_count = rte_cpu_to_be_32(byte_count), 382 .lkey = mlx5_rx_addr2mr(rxq, addr), 383 }; 384 } 385 rxq->consumed_strd = 0; 386 rxq->decompressed = 0; 387 rxq->rq_pi = 0; 388 rxq->zip = (struct rxq_zip){ 389 .ai = 0, 390 }; 391 rxq->elts_ci = mlx5_rxq_mprq_enabled(rxq) ? 392 (wqe_n >> rxq->sges_n) * (1 << rxq->strd_num_n) : 0; 393 /* Update doorbell counter. */ 394 rxq->rq_ci = wqe_n >> rxq->sges_n; 395 rte_io_wmb(); 396 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci); 397 } 398 399 /** 400 * Handle a Rx error. 401 * The function inserts the RQ state to reset when the first error CQE is 402 * shown, then drains the CQ by the caller function loop. When the CQ is empty, 403 * it moves the RQ state to ready and initializes the RQ. 404 * Next CQE identification and error counting are in the caller responsibility. 405 * 406 * @param[in] rxq 407 * Pointer to RX queue structure. 408 * @param[in] vec 409 * 1 when called from vectorized Rx burst, need to prepare mbufs for the RQ. 410 * 0 when called from non-vectorized Rx burst. 411 * 412 * @return 413 * -1 in case of recovery error, otherwise the CQE status. 414 */ 415 int 416 mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec) 417 { 418 const uint16_t cqe_n = 1 << rxq->cqe_n; 419 const uint16_t cqe_mask = cqe_n - 1; 420 const uint16_t wqe_n = 1 << rxq->elts_n; 421 const uint16_t strd_n = 1 << rxq->strd_num_n; 422 struct mlx5_rxq_ctrl *rxq_ctrl = 423 container_of(rxq, struct mlx5_rxq_ctrl, rxq); 424 union { 425 volatile struct mlx5_cqe *cqe; 426 volatile struct mlx5_err_cqe *err_cqe; 427 } u = { 428 .cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask], 429 }; 430 struct mlx5_mp_arg_queue_state_modify sm; 431 int ret; 432 433 switch (rxq->err_state) { 434 case MLX5_RXQ_ERR_STATE_NO_ERROR: 435 rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_RESET; 436 /* Fall-through */ 437 case MLX5_RXQ_ERR_STATE_NEED_RESET: 438 sm.is_wq = 1; 439 sm.queue_id = rxq->idx; 440 sm.state = IBV_WQS_RESET; 441 if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv), &sm)) 442 return -1; 443 if (rxq_ctrl->dump_file_n < 444 rxq_ctrl->priv->config.max_dump_files_num) { 445 MKSTR(err_str, "Unexpected CQE error syndrome " 446 "0x%02x CQN = %u RQN = %u wqe_counter = %u" 447 " rq_ci = %u cq_ci = %u", u.err_cqe->syndrome, 448 rxq->cqn, rxq_ctrl->wqn, 449 rte_be_to_cpu_16(u.err_cqe->wqe_counter), 450 rxq->rq_ci << rxq->sges_n, rxq->cq_ci); 451 MKSTR(name, "dpdk_mlx5_port_%u_rxq_%u_%u", 452 rxq->port_id, rxq->idx, (uint32_t)rte_rdtsc()); 453 mlx5_dump_debug_information(name, NULL, err_str, 0); 454 mlx5_dump_debug_information(name, "MLX5 Error CQ:", 455 (const void *)((uintptr_t) 456 rxq->cqes), 457 sizeof(*u.cqe) * cqe_n); 458 mlx5_dump_debug_information(name, "MLX5 Error RQ:", 459 (const void *)((uintptr_t) 460 rxq->wqes), 461 16 * wqe_n); 462 rxq_ctrl->dump_file_n++; 463 } 464 rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_READY; 465 /* Fall-through */ 466 case MLX5_RXQ_ERR_STATE_NEED_READY: 467 ret = check_cqe(u.cqe, cqe_n, rxq->cq_ci); 468 if (ret == MLX5_CQE_STATUS_HW_OWN) { 469 rte_io_wmb(); 470 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); 471 rte_io_wmb(); 472 /* 473 * The RQ consumer index must be zeroed while moving 474 * from RESET state to RDY state. 475 */ 476 *rxq->rq_db = rte_cpu_to_be_32(0); 477 rte_io_wmb(); 478 sm.is_wq = 1; 479 sm.queue_id = rxq->idx; 480 sm.state = IBV_WQS_RDY; 481 if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv), 482 &sm)) 483 return -1; 484 if (vec) { 485 const uint32_t elts_n = 486 mlx5_rxq_mprq_enabled(rxq) ? 487 wqe_n * strd_n : wqe_n; 488 const uint32_t e_mask = elts_n - 1; 489 uint32_t elts_ci = 490 mlx5_rxq_mprq_enabled(rxq) ? 491 rxq->elts_ci : rxq->rq_ci; 492 uint32_t elt_idx; 493 struct rte_mbuf **elt; 494 int i; 495 unsigned int n = elts_n - (elts_ci - 496 rxq->rq_pi); 497 498 for (i = 0; i < (int)n; ++i) { 499 elt_idx = (elts_ci + i) & e_mask; 500 elt = &(*rxq->elts)[elt_idx]; 501 *elt = rte_mbuf_raw_alloc(rxq->mp); 502 if (!*elt) { 503 for (i--; i >= 0; --i) { 504 elt_idx = (elts_ci + 505 i) & elts_n; 506 elt = &(*rxq->elts) 507 [elt_idx]; 508 rte_pktmbuf_free_seg 509 (*elt); 510 } 511 return -1; 512 } 513 } 514 for (i = 0; i < (int)elts_n; ++i) { 515 elt = &(*rxq->elts)[i]; 516 DATA_LEN(*elt) = 517 (uint16_t)((*elt)->buf_len - 518 rte_pktmbuf_headroom(*elt)); 519 } 520 /* Padding with a fake mbuf for vec Rx. */ 521 for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i) 522 (*rxq->elts)[elts_n + i] = 523 &rxq->fake_mbuf; 524 } 525 mlx5_rxq_initialize(rxq); 526 rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR; 527 } 528 return ret; 529 default: 530 return -1; 531 } 532 } 533 534 /** 535 * Get size of the next packet for a given CQE. For compressed CQEs, the 536 * consumer index is updated only once all packets of the current one have 537 * been processed. 538 * 539 * @param rxq 540 * Pointer to RX queue. 541 * @param cqe 542 * CQE to process. 543 * @param[out] mcqe 544 * Store pointer to mini-CQE if compressed. Otherwise, the pointer is not 545 * written. 546 * 547 * @return 548 * 0 in case of empty CQE, otherwise the packet size in bytes. 549 */ 550 static inline int 551 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, 552 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe) 553 { 554 struct rxq_zip *zip = &rxq->zip; 555 uint16_t cqe_n = cqe_cnt + 1; 556 int len; 557 uint16_t idx, end; 558 559 do { 560 len = 0; 561 /* Process compressed data in the CQE and mini arrays. */ 562 if (zip->ai) { 563 volatile struct mlx5_mini_cqe8 (*mc)[8] = 564 (volatile struct mlx5_mini_cqe8 (*)[8]) 565 (uintptr_t)(&(*rxq->cqes)[zip->ca & 566 cqe_cnt].pkt_info); 567 len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt & 568 rxq->byte_mask); 569 *mcqe = &(*mc)[zip->ai & 7]; 570 if ((++zip->ai & 7) == 0) { 571 /* Invalidate consumed CQEs */ 572 idx = zip->ca; 573 end = zip->na; 574 while (idx != end) { 575 (*rxq->cqes)[idx & cqe_cnt].op_own = 576 MLX5_CQE_INVALIDATE; 577 ++idx; 578 } 579 /* 580 * Increment consumer index to skip the number 581 * of CQEs consumed. Hardware leaves holes in 582 * the CQ ring for software use. 583 */ 584 zip->ca = zip->na; 585 zip->na += 8; 586 } 587 if (unlikely(rxq->zip.ai == rxq->zip.cqe_cnt)) { 588 /* Invalidate the rest */ 589 idx = zip->ca; 590 end = zip->cq_ci; 591 592 while (idx != end) { 593 (*rxq->cqes)[idx & cqe_cnt].op_own = 594 MLX5_CQE_INVALIDATE; 595 ++idx; 596 } 597 rxq->cq_ci = zip->cq_ci; 598 zip->ai = 0; 599 } 600 /* 601 * No compressed data, get next CQE and verify if it is 602 * compressed. 603 */ 604 } else { 605 int ret; 606 int8_t op_own; 607 uint32_t cq_ci; 608 609 ret = check_cqe(cqe, cqe_n, rxq->cq_ci); 610 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) { 611 if (unlikely(ret == MLX5_CQE_STATUS_ERR || 612 rxq->err_state)) { 613 ret = mlx5_rx_err_handle(rxq, 0); 614 if (ret == MLX5_CQE_STATUS_HW_OWN || 615 ret == -1) 616 return 0; 617 } else { 618 return 0; 619 } 620 } 621 /* 622 * Introduce the local variable to have queue cq_ci 623 * index in queue structure always consistent with 624 * actual CQE boundary (not pointing to the middle 625 * of compressed CQE session). 626 */ 627 cq_ci = rxq->cq_ci + 1; 628 op_own = cqe->op_own; 629 if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) { 630 volatile struct mlx5_mini_cqe8 (*mc)[8] = 631 (volatile struct mlx5_mini_cqe8 (*)[8]) 632 (uintptr_t)(&(*rxq->cqes) 633 [cq_ci & cqe_cnt].pkt_info); 634 635 /* Fix endianness. */ 636 zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt); 637 /* 638 * Current mini array position is the one 639 * returned by check_cqe64(). 640 * 641 * If completion comprises several mini arrays, 642 * as a special case the second one is located 643 * 7 CQEs after the initial CQE instead of 8 644 * for subsequent ones. 645 */ 646 zip->ca = cq_ci; 647 zip->na = zip->ca + 7; 648 /* Compute the next non compressed CQE. */ 649 zip->cq_ci = rxq->cq_ci + zip->cqe_cnt; 650 /* Get packet size to return. */ 651 len = rte_be_to_cpu_32((*mc)[0].byte_cnt & 652 rxq->byte_mask); 653 *mcqe = &(*mc)[0]; 654 zip->ai = 1; 655 /* Prefetch all to be invalidated */ 656 idx = zip->ca; 657 end = zip->cq_ci; 658 while (idx != end) { 659 rte_prefetch0(&(*rxq->cqes)[(idx) & 660 cqe_cnt]); 661 ++idx; 662 } 663 } else { 664 rxq->cq_ci = cq_ci; 665 len = rte_be_to_cpu_32(cqe->byte_cnt); 666 } 667 } 668 if (unlikely(rxq->err_state)) { 669 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt]; 670 ++rxq->stats.idropped; 671 } else { 672 return len; 673 } 674 } while (1); 675 } 676 677 /** 678 * Translate RX completion flags to offload flags. 679 * 680 * @param[in] cqe 681 * Pointer to CQE. 682 * 683 * @return 684 * Offload flags (ol_flags) for struct rte_mbuf. 685 */ 686 static inline uint32_t 687 rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe) 688 { 689 uint32_t ol_flags = 0; 690 uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc); 691 692 ol_flags = 693 TRANSPOSE(flags, 694 MLX5_CQE_RX_L3_HDR_VALID, 695 RTE_MBUF_F_RX_IP_CKSUM_GOOD) | 696 TRANSPOSE(flags, 697 MLX5_CQE_RX_L4_HDR_VALID, 698 RTE_MBUF_F_RX_L4_CKSUM_GOOD); 699 return ol_flags; 700 } 701 702 /** 703 * Fill in mbuf fields from RX completion flags. 704 * Note that pkt->ol_flags should be initialized outside of this function. 705 * 706 * @param rxq 707 * Pointer to RX queue. 708 * @param pkt 709 * mbuf to fill. 710 * @param cqe 711 * CQE to process. 712 * @param rss_hash_res 713 * Packet RSS Hash result. 714 */ 715 static inline void 716 rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt, 717 volatile struct mlx5_cqe *cqe, 718 volatile struct mlx5_mini_cqe8 *mcqe) 719 { 720 /* Update packet information. */ 721 pkt->packet_type = rxq_cq_to_pkt_type(rxq, cqe, mcqe); 722 723 if (rxq->rss_hash) { 724 uint32_t rss_hash_res = 0; 725 726 /* If compressed, take hash result from mini-CQE. */ 727 if (mcqe == NULL || 728 rxq->mcqe_format != MLX5_CQE_RESP_FORMAT_HASH) 729 rss_hash_res = rte_be_to_cpu_32(cqe->rx_hash_res); 730 else 731 rss_hash_res = rte_be_to_cpu_32(mcqe->rx_hash_result); 732 if (rss_hash_res) { 733 pkt->hash.rss = rss_hash_res; 734 pkt->ol_flags |= RTE_MBUF_F_RX_RSS_HASH; 735 } 736 } 737 if (rxq->mark) { 738 uint32_t mark = 0; 739 740 /* If compressed, take flow tag from mini-CQE. */ 741 if (mcqe == NULL || 742 rxq->mcqe_format != MLX5_CQE_RESP_FORMAT_FTAG_STRIDX) 743 mark = cqe->sop_drop_qpn; 744 else 745 mark = ((mcqe->byte_cnt_flow & 0xff) << 8) | 746 (mcqe->flow_tag_high << 16); 747 if (MLX5_FLOW_MARK_IS_VALID(mark)) { 748 pkt->ol_flags |= RTE_MBUF_F_RX_FDIR; 749 if (mark != RTE_BE32(MLX5_FLOW_MARK_DEFAULT)) { 750 pkt->ol_flags |= RTE_MBUF_F_RX_FDIR_ID; 751 pkt->hash.fdir.hi = mlx5_flow_mark_get(mark); 752 } 753 } 754 } 755 if (rxq->dynf_meta) { 756 uint32_t meta = rte_be_to_cpu_32(cqe->flow_table_metadata) & 757 rxq->flow_meta_port_mask; 758 759 if (meta) { 760 pkt->ol_flags |= rxq->flow_meta_mask; 761 *RTE_MBUF_DYNFIELD(pkt, rxq->flow_meta_offset, 762 uint32_t *) = meta; 763 } 764 } 765 if (rxq->csum) 766 pkt->ol_flags |= rxq_cq_to_ol_flags(cqe); 767 if (rxq->vlan_strip) { 768 bool vlan_strip; 769 770 if (mcqe == NULL || 771 rxq->mcqe_format != MLX5_CQE_RESP_FORMAT_L34H_STRIDX) 772 vlan_strip = cqe->hdr_type_etc & 773 RTE_BE16(MLX5_CQE_VLAN_STRIPPED); 774 else 775 vlan_strip = mcqe->hdr_type & 776 RTE_BE16(MLX5_CQE_VLAN_STRIPPED); 777 if (vlan_strip) { 778 pkt->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED; 779 pkt->vlan_tci = rte_be_to_cpu_16(cqe->vlan_info); 780 } 781 } 782 if (rxq->hw_timestamp) { 783 uint64_t ts = rte_be_to_cpu_64(cqe->timestamp); 784 785 if (rxq->rt_timestamp) 786 ts = mlx5_txpp_convert_rx_ts(rxq->sh, ts); 787 mlx5_timestamp_set(pkt, rxq->timestamp_offset, ts); 788 pkt->ol_flags |= rxq->timestamp_rx_flag; 789 } 790 } 791 792 /** 793 * DPDK callback for RX. 794 * 795 * @param dpdk_rxq 796 * Generic pointer to RX queue structure. 797 * @param[out] pkts 798 * Array to store received packets. 799 * @param pkts_n 800 * Maximum number of packets in array. 801 * 802 * @return 803 * Number of packets successfully received (<= pkts_n). 804 */ 805 uint16_t 806 mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) 807 { 808 struct mlx5_rxq_data *rxq = dpdk_rxq; 809 const unsigned int wqe_cnt = (1 << rxq->elts_n) - 1; 810 const unsigned int cqe_cnt = (1 << rxq->cqe_n) - 1; 811 const unsigned int sges_n = rxq->sges_n; 812 struct rte_mbuf *pkt = NULL; 813 struct rte_mbuf *seg = NULL; 814 volatile struct mlx5_cqe *cqe = 815 &(*rxq->cqes)[rxq->cq_ci & cqe_cnt]; 816 unsigned int i = 0; 817 unsigned int rq_ci = rxq->rq_ci << sges_n; 818 int len = 0; /* keep its value across iterations. */ 819 820 while (pkts_n) { 821 unsigned int idx = rq_ci & wqe_cnt; 822 volatile struct mlx5_wqe_data_seg *wqe = 823 &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx]; 824 struct rte_mbuf *rep = (*rxq->elts)[idx]; 825 volatile struct mlx5_mini_cqe8 *mcqe = NULL; 826 827 if (pkt) 828 NEXT(seg) = rep; 829 seg = rep; 830 rte_prefetch0(seg); 831 rte_prefetch0(cqe); 832 rte_prefetch0(wqe); 833 /* Allocate the buf from the same pool. */ 834 rep = rte_mbuf_raw_alloc(seg->pool); 835 if (unlikely(rep == NULL)) { 836 ++rxq->stats.rx_nombuf; 837 if (!pkt) { 838 /* 839 * no buffers before we even started, 840 * bail out silently. 841 */ 842 break; 843 } 844 while (pkt != seg) { 845 MLX5_ASSERT(pkt != (*rxq->elts)[idx]); 846 rep = NEXT(pkt); 847 NEXT(pkt) = NULL; 848 NB_SEGS(pkt) = 1; 849 rte_mbuf_raw_free(pkt); 850 pkt = rep; 851 } 852 rq_ci >>= sges_n; 853 ++rq_ci; 854 rq_ci <<= sges_n; 855 break; 856 } 857 if (!pkt) { 858 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt]; 859 len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, &mcqe); 860 if (!len) { 861 rte_mbuf_raw_free(rep); 862 break; 863 } 864 pkt = seg; 865 MLX5_ASSERT(len >= (rxq->crc_present << 2)); 866 pkt->ol_flags &= RTE_MBUF_F_EXTERNAL; 867 rxq_cq_to_mbuf(rxq, pkt, cqe, mcqe); 868 if (rxq->crc_present) 869 len -= RTE_ETHER_CRC_LEN; 870 PKT_LEN(pkt) = len; 871 if (cqe->lro_num_seg > 1) { 872 mlx5_lro_update_hdr 873 (rte_pktmbuf_mtod(pkt, uint8_t *), cqe, 874 mcqe, rxq, len); 875 pkt->ol_flags |= RTE_MBUF_F_RX_LRO; 876 pkt->tso_segsz = len / cqe->lro_num_seg; 877 } 878 } 879 DATA_LEN(rep) = DATA_LEN(seg); 880 PKT_LEN(rep) = PKT_LEN(seg); 881 SET_DATA_OFF(rep, DATA_OFF(seg)); 882 PORT(rep) = PORT(seg); 883 (*rxq->elts)[idx] = rep; 884 /* 885 * Fill NIC descriptor with the new buffer. The lkey and size 886 * of the buffers are already known, only the buffer address 887 * changes. 888 */ 889 wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t)); 890 /* If there's only one MR, no need to replace LKey in WQE. */ 891 if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1)) 892 wqe->lkey = mlx5_rx_mb2mr(rxq, rep); 893 if (len > DATA_LEN(seg)) { 894 len -= DATA_LEN(seg); 895 ++NB_SEGS(pkt); 896 ++rq_ci; 897 continue; 898 } 899 DATA_LEN(seg) = len; 900 #ifdef MLX5_PMD_SOFT_COUNTERS 901 /* Increment bytes counter. */ 902 rxq->stats.ibytes += PKT_LEN(pkt); 903 #endif 904 /* Return packet. */ 905 *(pkts++) = pkt; 906 pkt = NULL; 907 --pkts_n; 908 ++i; 909 /* Align consumer index to the next stride. */ 910 rq_ci >>= sges_n; 911 ++rq_ci; 912 rq_ci <<= sges_n; 913 } 914 if (unlikely(i == 0 && ((rq_ci >> sges_n) == rxq->rq_ci))) 915 return 0; 916 /* Update the consumer index. */ 917 rxq->rq_ci = rq_ci >> sges_n; 918 rte_io_wmb(); 919 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); 920 rte_io_wmb(); 921 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci); 922 #ifdef MLX5_PMD_SOFT_COUNTERS 923 /* Increment packets counter. */ 924 rxq->stats.ipackets += i; 925 #endif 926 return i; 927 } 928 929 /** 930 * Update LRO packet TCP header. 931 * The HW LRO feature doesn't update the TCP header after coalescing the 932 * TCP segments but supplies information in CQE to fill it by SW. 933 * 934 * @param tcp 935 * Pointer to the TCP header. 936 * @param cqe 937 * Pointer to the completion entry. 938 * @param phcsum 939 * The L3 pseudo-header checksum. 940 */ 941 static inline void 942 mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *__rte_restrict tcp, 943 volatile struct mlx5_cqe *__rte_restrict cqe, 944 uint32_t phcsum, uint8_t l4_type) 945 { 946 /* 947 * The HW calculates only the TCP payload checksum, need to complete 948 * the TCP header checksum and the L3 pseudo-header checksum. 949 */ 950 uint32_t csum = phcsum + cqe->csum; 951 952 if (l4_type == MLX5_L4_HDR_TYPE_TCP_EMPTY_ACK || 953 l4_type == MLX5_L4_HDR_TYPE_TCP_WITH_ACL) { 954 tcp->tcp_flags |= RTE_TCP_ACK_FLAG; 955 tcp->recv_ack = cqe->lro_ack_seq_num; 956 tcp->rx_win = cqe->lro_tcp_win; 957 } 958 if (cqe->lro_tcppsh_abort_dupack & MLX5_CQE_LRO_PUSH_MASK) 959 tcp->tcp_flags |= RTE_TCP_PSH_FLAG; 960 tcp->cksum = 0; 961 csum += rte_raw_cksum(tcp, (tcp->data_off >> 4) * 4); 962 csum = ((csum & 0xffff0000) >> 16) + (csum & 0xffff); 963 csum = (~csum) & 0xffff; 964 if (csum == 0) 965 csum = 0xffff; 966 tcp->cksum = csum; 967 } 968 969 /** 970 * Update LRO packet headers. 971 * The HW LRO feature doesn't update the L3/TCP headers after coalescing the 972 * TCP segments but supply information in CQE to fill it by SW. 973 * 974 * @param padd 975 * The packet address. 976 * @param cqe 977 * Pointer to the completion entry. 978 * @param len 979 * The packet length. 980 */ 981 static inline void 982 mlx5_lro_update_hdr(uint8_t *__rte_restrict padd, 983 volatile struct mlx5_cqe *__rte_restrict cqe, 984 volatile struct mlx5_mini_cqe8 *mcqe, 985 struct mlx5_rxq_data *rxq, uint32_t len) 986 { 987 union { 988 struct rte_ether_hdr *eth; 989 struct rte_vlan_hdr *vlan; 990 struct rte_ipv4_hdr *ipv4; 991 struct rte_ipv6_hdr *ipv6; 992 struct rte_tcp_hdr *tcp; 993 uint8_t *hdr; 994 } h = { 995 .hdr = padd, 996 }; 997 uint16_t proto = h.eth->ether_type; 998 uint32_t phcsum; 999 uint8_t l4_type; 1000 1001 h.eth++; 1002 while (proto == RTE_BE16(RTE_ETHER_TYPE_VLAN) || 1003 proto == RTE_BE16(RTE_ETHER_TYPE_QINQ)) { 1004 proto = h.vlan->eth_proto; 1005 h.vlan++; 1006 } 1007 if (proto == RTE_BE16(RTE_ETHER_TYPE_IPV4)) { 1008 h.ipv4->time_to_live = cqe->lro_min_ttl; 1009 h.ipv4->total_length = rte_cpu_to_be_16(len - (h.hdr - padd)); 1010 h.ipv4->hdr_checksum = 0; 1011 h.ipv4->hdr_checksum = rte_ipv4_cksum(h.ipv4); 1012 phcsum = rte_ipv4_phdr_cksum(h.ipv4, 0); 1013 h.ipv4++; 1014 } else { 1015 h.ipv6->hop_limits = cqe->lro_min_ttl; 1016 h.ipv6->payload_len = rte_cpu_to_be_16(len - (h.hdr - padd) - 1017 sizeof(*h.ipv6)); 1018 phcsum = rte_ipv6_phdr_cksum(h.ipv6, 0); 1019 h.ipv6++; 1020 } 1021 if (mcqe == NULL || 1022 rxq->mcqe_format != MLX5_CQE_RESP_FORMAT_L34H_STRIDX) 1023 l4_type = (rte_be_to_cpu_16(cqe->hdr_type_etc) & 1024 MLX5_CQE_L4_TYPE_MASK) >> MLX5_CQE_L4_TYPE_SHIFT; 1025 else 1026 l4_type = (rte_be_to_cpu_16(mcqe->hdr_type) & 1027 MLX5_CQE_L4_TYPE_MASK) >> MLX5_CQE_L4_TYPE_SHIFT; 1028 mlx5_lro_update_tcp_hdr(h.tcp, cqe, phcsum, l4_type); 1029 } 1030 1031 void 1032 mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf) 1033 { 1034 mlx5_mprq_buf_free_cb(NULL, buf); 1035 } 1036 1037 /** 1038 * DPDK callback for RX with Multi-Packet RQ support. 1039 * 1040 * @param dpdk_rxq 1041 * Generic pointer to RX queue structure. 1042 * @param[out] pkts 1043 * Array to store received packets. 1044 * @param pkts_n 1045 * Maximum number of packets in array. 1046 * 1047 * @return 1048 * Number of packets successfully received (<= pkts_n). 1049 */ 1050 uint16_t 1051 mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) 1052 { 1053 struct mlx5_rxq_data *rxq = dpdk_rxq; 1054 const uint32_t strd_n = 1 << rxq->strd_num_n; 1055 const uint32_t strd_sz = 1 << rxq->strd_sz_n; 1056 const uint32_t cq_mask = (1 << rxq->cqe_n) - 1; 1057 const uint32_t wq_mask = (1 << rxq->elts_n) - 1; 1058 volatile struct mlx5_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask]; 1059 unsigned int i = 0; 1060 uint32_t rq_ci = rxq->rq_ci; 1061 uint16_t consumed_strd = rxq->consumed_strd; 1062 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_ci & wq_mask]; 1063 1064 while (i < pkts_n) { 1065 struct rte_mbuf *pkt; 1066 int ret; 1067 uint32_t len; 1068 uint16_t strd_cnt; 1069 uint16_t strd_idx; 1070 uint32_t byte_cnt; 1071 volatile struct mlx5_mini_cqe8 *mcqe = NULL; 1072 enum mlx5_rqx_code rxq_code; 1073 1074 if (consumed_strd == strd_n) { 1075 /* Replace WQE if the buffer is still in use. */ 1076 mprq_buf_replace(rxq, rq_ci & wq_mask); 1077 /* Advance to the next WQE. */ 1078 consumed_strd = 0; 1079 ++rq_ci; 1080 buf = (*rxq->mprq_bufs)[rq_ci & wq_mask]; 1081 } 1082 cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask]; 1083 ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &mcqe); 1084 if (!ret) 1085 break; 1086 byte_cnt = ret; 1087 len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT; 1088 MLX5_ASSERT((int)len >= (rxq->crc_present << 2)); 1089 if (rxq->crc_present) 1090 len -= RTE_ETHER_CRC_LEN; 1091 if (mcqe && 1092 rxq->mcqe_format == MLX5_CQE_RESP_FORMAT_FTAG_STRIDX) 1093 strd_cnt = (len / strd_sz) + !!(len % strd_sz); 1094 else 1095 strd_cnt = (byte_cnt & MLX5_MPRQ_STRIDE_NUM_MASK) >> 1096 MLX5_MPRQ_STRIDE_NUM_SHIFT; 1097 MLX5_ASSERT(strd_cnt); 1098 consumed_strd += strd_cnt; 1099 if (byte_cnt & MLX5_MPRQ_FILLER_MASK) 1100 continue; 1101 strd_idx = rte_be_to_cpu_16(mcqe == NULL ? 1102 cqe->wqe_counter : 1103 mcqe->stride_idx); 1104 MLX5_ASSERT(strd_idx < strd_n); 1105 MLX5_ASSERT(!((rte_be_to_cpu_16(cqe->wqe_id) ^ rq_ci) & 1106 wq_mask)); 1107 pkt = rte_pktmbuf_alloc(rxq->mp); 1108 if (unlikely(pkt == NULL)) { 1109 ++rxq->stats.rx_nombuf; 1110 break; 1111 } 1112 len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT; 1113 MLX5_ASSERT((int)len >= (rxq->crc_present << 2)); 1114 if (rxq->crc_present) 1115 len -= RTE_ETHER_CRC_LEN; 1116 rxq_code = mprq_buf_to_pkt(rxq, pkt, len, buf, 1117 strd_idx, strd_cnt); 1118 if (unlikely(rxq_code != MLX5_RXQ_CODE_EXIT)) { 1119 rte_pktmbuf_free_seg(pkt); 1120 if (rxq_code == MLX5_RXQ_CODE_DROPPED) { 1121 ++rxq->stats.idropped; 1122 continue; 1123 } 1124 if (rxq_code == MLX5_RXQ_CODE_NOMBUF) { 1125 ++rxq->stats.rx_nombuf; 1126 break; 1127 } 1128 } 1129 rxq_cq_to_mbuf(rxq, pkt, cqe, mcqe); 1130 if (cqe->lro_num_seg > 1) { 1131 mlx5_lro_update_hdr(rte_pktmbuf_mtod(pkt, uint8_t *), 1132 cqe, mcqe, rxq, len); 1133 pkt->ol_flags |= RTE_MBUF_F_RX_LRO; 1134 pkt->tso_segsz = len / cqe->lro_num_seg; 1135 } 1136 PKT_LEN(pkt) = len; 1137 PORT(pkt) = rxq->port_id; 1138 #ifdef MLX5_PMD_SOFT_COUNTERS 1139 /* Increment bytes counter. */ 1140 rxq->stats.ibytes += PKT_LEN(pkt); 1141 #endif 1142 /* Return packet. */ 1143 *(pkts++) = pkt; 1144 ++i; 1145 } 1146 /* Update the consumer indexes. */ 1147 rxq->consumed_strd = consumed_strd; 1148 rte_io_wmb(); 1149 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); 1150 if (rq_ci != rxq->rq_ci) { 1151 rxq->rq_ci = rq_ci; 1152 rte_io_wmb(); 1153 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci); 1154 } 1155 #ifdef MLX5_PMD_SOFT_COUNTERS 1156 /* Increment packets counter. */ 1157 rxq->stats.ipackets += i; 1158 #endif 1159 return i; 1160 } 1161 1162 /** 1163 * Dummy DPDK callback for RX. 1164 * 1165 * This function is used to temporarily replace the real callback during 1166 * unsafe control operations on the queue, or in case of error. 1167 * 1168 * @param dpdk_rxq 1169 * Generic pointer to RX queue structure. 1170 * @param[out] pkts 1171 * Array to store received packets. 1172 * @param pkts_n 1173 * Maximum number of packets in array. 1174 * 1175 * @return 1176 * Number of packets successfully received (<= pkts_n). 1177 */ 1178 uint16_t 1179 removed_rx_burst(void *dpdk_rxq __rte_unused, 1180 struct rte_mbuf **pkts __rte_unused, 1181 uint16_t pkts_n __rte_unused) 1182 { 1183 rte_mb(); 1184 return 0; 1185 } 1186 1187 /* 1188 * Vectorized Rx routines are not compiled in when required vector instructions 1189 * are not supported on a target architecture. 1190 * The following null stubs are needed for linkage when those are not included 1191 * outside of this file (e.g. mlx5_rxtx_vec_sse.c for x86). 1192 */ 1193 1194 __rte_weak uint16_t 1195 mlx5_rx_burst_vec(void *dpdk_rxq __rte_unused, 1196 struct rte_mbuf **pkts __rte_unused, 1197 uint16_t pkts_n __rte_unused) 1198 { 1199 return 0; 1200 } 1201 1202 __rte_weak uint16_t 1203 mlx5_rx_burst_mprq_vec(void *dpdk_rxq __rte_unused, 1204 struct rte_mbuf **pkts __rte_unused, 1205 uint16_t pkts_n __rte_unused) 1206 { 1207 return 0; 1208 } 1209 1210 __rte_weak int 1211 mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused) 1212 { 1213 return -ENOTSUP; 1214 } 1215 1216 __rte_weak int 1217 mlx5_check_vec_rx_support(struct rte_eth_dev *dev __rte_unused) 1218 { 1219 return -ENOTSUP; 1220 } 1221 1222