1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2021 6WIND S.A. 3 * Copyright 2021 Mellanox Technologies, Ltd 4 */ 5 6 #include <stdint.h> 7 #include <string.h> 8 #include <stdlib.h> 9 10 #include <rte_mbuf.h> 11 #include <rte_mempool.h> 12 #include <rte_prefetch.h> 13 #include <rte_common.h> 14 #include <rte_branch_prediction.h> 15 #include <rte_ether.h> 16 #include <rte_cycles.h> 17 #include <rte_flow.h> 18 19 #include <mlx5_prm.h> 20 #include <mlx5_common.h> 21 #include <mlx5_common_mr.h> 22 23 #include "mlx5_autoconf.h" 24 #include "mlx5_defs.h" 25 #include "mlx5.h" 26 #include "mlx5_utils.h" 27 #include "mlx5_rxtx.h" 28 #include "mlx5_rx.h" 29 30 31 static __rte_always_inline uint32_t 32 rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, 33 volatile struct mlx5_mini_cqe8 *mcqe); 34 35 static __rte_always_inline int 36 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, 37 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe); 38 39 static __rte_always_inline uint32_t 40 rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe); 41 42 static __rte_always_inline void 43 rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt, 44 volatile struct mlx5_cqe *cqe, 45 volatile struct mlx5_mini_cqe8 *mcqe); 46 47 static inline void 48 mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *__rte_restrict tcp, 49 volatile struct mlx5_cqe *__rte_restrict cqe, 50 uint32_t phcsum, uint8_t l4_type); 51 52 static inline void 53 mlx5_lro_update_hdr(uint8_t *__rte_restrict padd, 54 volatile struct mlx5_cqe *__rte_restrict cqe, 55 volatile struct mlx5_mini_cqe8 *mcqe, 56 struct mlx5_rxq_data *rxq, uint32_t len); 57 58 59 /** 60 * Internal function to compute the number of used descriptors in an RX queue. 61 * 62 * @param rxq 63 * The Rx queue. 64 * 65 * @return 66 * The number of used Rx descriptor. 67 */ 68 static uint32_t 69 rx_queue_count(struct mlx5_rxq_data *rxq) 70 { 71 struct rxq_zip *zip = &rxq->zip; 72 volatile struct mlx5_cqe *cqe; 73 const unsigned int cqe_n = (1 << rxq->cqe_n); 74 const unsigned int sges_n = (1 << rxq->sges_n); 75 const unsigned int elts_n = (1 << rxq->elts_n); 76 const unsigned int strd_n = (1 << rxq->strd_num_n); 77 const unsigned int cqe_cnt = cqe_n - 1; 78 unsigned int cq_ci, used; 79 80 /* if we are processing a compressed cqe */ 81 if (zip->ai) { 82 used = zip->cqe_cnt - zip->ai; 83 cq_ci = zip->cq_ci; 84 } else { 85 used = 0; 86 cq_ci = rxq->cq_ci; 87 } 88 cqe = &(*rxq->cqes)[cq_ci & cqe_cnt]; 89 while (check_cqe(cqe, cqe_n, cq_ci) != MLX5_CQE_STATUS_HW_OWN) { 90 int8_t op_own; 91 unsigned int n; 92 93 op_own = cqe->op_own; 94 if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) 95 n = rte_be_to_cpu_32(cqe->byte_cnt); 96 else 97 n = 1; 98 cq_ci += n; 99 used += n; 100 cqe = &(*rxq->cqes)[cq_ci & cqe_cnt]; 101 } 102 used = RTE_MIN(used * sges_n, elts_n * strd_n); 103 return used; 104 } 105 106 /** 107 * DPDK callback to check the status of a Rx descriptor. 108 * 109 * @param rx_queue 110 * The Rx queue. 111 * @param[in] offset 112 * The index of the descriptor in the ring. 113 * 114 * @return 115 * The status of the Rx descriptor. 116 */ 117 int 118 mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset) 119 { 120 struct mlx5_rxq_data *rxq = rx_queue; 121 122 if (offset >= (1 << rxq->cqe_n)) { 123 rte_errno = EINVAL; 124 return -rte_errno; 125 } 126 if (offset < rx_queue_count(rxq)) 127 return RTE_ETH_RX_DESC_DONE; 128 return RTE_ETH_RX_DESC_AVAIL; 129 } 130 131 /** 132 * DPDK callback to get the RX queue information. 133 * 134 * @param dev 135 * Pointer to the device structure. 136 * 137 * @param rx_queue_id 138 * Rx queue identificator. 139 * 140 * @param qinfo 141 * Pointer to the RX queue information structure. 142 * 143 * @return 144 * None. 145 */ 146 147 void 148 mlx5_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id, 149 struct rte_eth_rxq_info *qinfo) 150 { 151 struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, rx_queue_id); 152 struct mlx5_rxq_data *rxq = mlx5_rxq_data_get(dev, rx_queue_id); 153 154 if (!rxq) 155 return; 156 qinfo->mp = mlx5_rxq_mprq_enabled(rxq) ? 157 rxq->mprq_mp : rxq->mp; 158 qinfo->conf.rx_thresh.pthresh = 0; 159 qinfo->conf.rx_thresh.hthresh = 0; 160 qinfo->conf.rx_thresh.wthresh = 0; 161 qinfo->conf.rx_free_thresh = rxq->rq_repl_thresh; 162 qinfo->conf.rx_drop_en = 1; 163 if (rxq_ctrl == NULL || rxq_ctrl->obj == NULL) 164 qinfo->conf.rx_deferred_start = 0; 165 else 166 qinfo->conf.rx_deferred_start = 1; 167 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads; 168 qinfo->scattered_rx = dev->data->scattered_rx; 169 qinfo->nb_desc = mlx5_rxq_mprq_enabled(rxq) ? 170 (1 << rxq->elts_n) * (1 << rxq->strd_num_n) : 171 (1 << rxq->elts_n); 172 } 173 174 /** 175 * DPDK callback to get the RX packet burst mode information. 176 * 177 * @param dev 178 * Pointer to the device structure. 179 * 180 * @param rx_queue_id 181 * Rx queue identificatior. 182 * 183 * @param mode 184 * Pointer to the burts mode information. 185 * 186 * @return 187 * 0 as success, -EINVAL as failure. 188 */ 189 int 190 mlx5_rx_burst_mode_get(struct rte_eth_dev *dev, 191 uint16_t rx_queue_id __rte_unused, 192 struct rte_eth_burst_mode *mode) 193 { 194 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst; 195 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, rx_queue_id); 196 197 if (!rxq) { 198 rte_errno = EINVAL; 199 return -rte_errno; 200 } 201 if (pkt_burst == mlx5_rx_burst) { 202 snprintf(mode->info, sizeof(mode->info), "%s", "Scalar"); 203 } else if (pkt_burst == mlx5_rx_burst_mprq) { 204 snprintf(mode->info, sizeof(mode->info), "%s", "Multi-Packet RQ"); 205 } else if (pkt_burst == mlx5_rx_burst_vec) { 206 #if defined RTE_ARCH_X86_64 207 snprintf(mode->info, sizeof(mode->info), "%s", "Vector SSE"); 208 #elif defined RTE_ARCH_ARM64 209 snprintf(mode->info, sizeof(mode->info), "%s", "Vector Neon"); 210 #elif defined RTE_ARCH_PPC_64 211 snprintf(mode->info, sizeof(mode->info), "%s", "Vector AltiVec"); 212 #else 213 return -EINVAL; 214 #endif 215 } else if (pkt_burst == mlx5_rx_burst_mprq_vec) { 216 #if defined RTE_ARCH_X86_64 217 snprintf(mode->info, sizeof(mode->info), "%s", "MPRQ Vector SSE"); 218 #elif defined RTE_ARCH_ARM64 219 snprintf(mode->info, sizeof(mode->info), "%s", "MPRQ Vector Neon"); 220 #elif defined RTE_ARCH_PPC_64 221 snprintf(mode->info, sizeof(mode->info), "%s", "MPRQ Vector AltiVec"); 222 #else 223 return -EINVAL; 224 #endif 225 } else { 226 return -EINVAL; 227 } 228 return 0; 229 } 230 231 /** 232 * DPDK callback to get the number of used descriptors in a RX queue. 233 * 234 * @param rx_queue 235 * The Rx queue pointer. 236 * 237 * @return 238 * The number of used rx descriptor. 239 * -EINVAL if the queue is invalid 240 */ 241 uint32_t 242 mlx5_rx_queue_count(void *rx_queue) 243 { 244 struct mlx5_rxq_data *rxq = rx_queue; 245 struct rte_eth_dev *dev; 246 247 if (!rxq) { 248 rte_errno = EINVAL; 249 return -rte_errno; 250 } 251 252 dev = &rte_eth_devices[rxq->port_id]; 253 254 if (dev->rx_pkt_burst == NULL || 255 dev->rx_pkt_burst == removed_rx_burst) { 256 rte_errno = ENOTSUP; 257 return -rte_errno; 258 } 259 260 return rx_queue_count(rxq); 261 } 262 263 #define CLB_VAL_IDX 0 264 #define CLB_MSK_IDX 1 265 static int 266 mlx5_monitor_callback(const uint64_t value, 267 const uint64_t opaque[RTE_POWER_MONITOR_OPAQUE_SZ]) 268 { 269 const uint64_t m = opaque[CLB_MSK_IDX]; 270 const uint64_t v = opaque[CLB_VAL_IDX]; 271 272 return (value & m) == v ? -1 : 0; 273 } 274 275 int mlx5_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc) 276 { 277 struct mlx5_rxq_data *rxq = rx_queue; 278 const unsigned int cqe_num = 1 << rxq->cqe_n; 279 const unsigned int cqe_mask = cqe_num - 1; 280 const uint16_t idx = rxq->cq_ci & cqe_num; 281 volatile struct mlx5_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask]; 282 283 if (unlikely(rxq->cqes == NULL)) { 284 rte_errno = EINVAL; 285 return -rte_errno; 286 } 287 pmc->addr = &cqe->op_own; 288 pmc->opaque[CLB_VAL_IDX] = !!idx; 289 pmc->opaque[CLB_MSK_IDX] = MLX5_CQE_OWNER_MASK; 290 pmc->fn = mlx5_monitor_callback; 291 pmc->size = sizeof(uint8_t); 292 return 0; 293 } 294 295 /** 296 * Translate RX completion flags to packet type. 297 * 298 * @param[in] rxq 299 * Pointer to RX queue structure. 300 * @param[in] cqe 301 * Pointer to CQE. 302 * 303 * @note: fix mlx5_dev_supported_ptypes_get() if any change here. 304 * 305 * @return 306 * Packet type for struct rte_mbuf. 307 */ 308 static inline uint32_t 309 rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, 310 volatile struct mlx5_mini_cqe8 *mcqe) 311 { 312 uint8_t idx; 313 uint8_t ptype; 314 uint8_t pinfo = (cqe->pkt_info & 0x3) << 6; 315 316 /* Get l3/l4 header from mini-CQE in case L3/L4 format*/ 317 if (mcqe == NULL || 318 rxq->mcqe_format != MLX5_CQE_RESP_FORMAT_L34H_STRIDX) 319 ptype = (cqe->hdr_type_etc & 0xfc00) >> 10; 320 else 321 ptype = mcqe->hdr_type >> 2; 322 /* 323 * The index to the array should have: 324 * bit[1:0] = l3_hdr_type 325 * bit[4:2] = l4_hdr_type 326 * bit[5] = ip_frag 327 * bit[6] = tunneled 328 * bit[7] = outer_l3_type 329 */ 330 idx = pinfo | ptype; 331 return mlx5_ptype_table[idx] | rxq->tunnel * !!(idx & (1 << 6)); 332 } 333 334 /** 335 * Initialize Rx WQ and indexes. 336 * 337 * @param[in] rxq 338 * Pointer to RX queue structure. 339 */ 340 void 341 mlx5_rxq_initialize(struct mlx5_rxq_data *rxq) 342 { 343 const unsigned int wqe_n = 1 << rxq->elts_n; 344 unsigned int i; 345 346 for (i = 0; (i != wqe_n); ++i) { 347 volatile struct mlx5_wqe_data_seg *scat; 348 uintptr_t addr; 349 uint32_t byte_count; 350 uint32_t lkey; 351 352 if (mlx5_rxq_mprq_enabled(rxq)) { 353 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[i]; 354 355 scat = &((volatile struct mlx5_wqe_mprq *) 356 rxq->wqes)[i].dseg; 357 addr = (uintptr_t)mlx5_mprq_buf_addr(buf, 358 1 << rxq->strd_num_n); 359 byte_count = (1 << rxq->strd_sz_n) * 360 (1 << rxq->strd_num_n); 361 lkey = mlx5_rx_addr2mr(rxq, addr); 362 } else { 363 struct rte_mbuf *buf = (*rxq->elts)[i]; 364 365 scat = &((volatile struct mlx5_wqe_data_seg *) 366 rxq->wqes)[i]; 367 addr = rte_pktmbuf_mtod(buf, uintptr_t); 368 byte_count = DATA_LEN(buf); 369 lkey = mlx5_rx_mb2mr(rxq, buf); 370 } 371 /* scat->addr must be able to store a pointer. */ 372 MLX5_ASSERT(sizeof(scat->addr) >= sizeof(uintptr_t)); 373 *scat = (struct mlx5_wqe_data_seg){ 374 .addr = rte_cpu_to_be_64(addr), 375 .byte_count = rte_cpu_to_be_32(byte_count), 376 .lkey = lkey, 377 }; 378 } 379 rxq->consumed_strd = 0; 380 rxq->decompressed = 0; 381 rxq->rq_pi = 0; 382 rxq->zip = (struct rxq_zip){ 383 .ai = 0, 384 }; 385 rxq->elts_ci = mlx5_rxq_mprq_enabled(rxq) ? 386 (wqe_n >> rxq->sges_n) * (1 << rxq->strd_num_n) : 0; 387 /* Update doorbell counter. */ 388 rxq->rq_ci = wqe_n >> rxq->sges_n; 389 rte_io_wmb(); 390 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci); 391 } 392 393 /** 394 * Handle a Rx error. 395 * The function inserts the RQ state to reset when the first error CQE is 396 * shown, then drains the CQ by the caller function loop. When the CQ is empty, 397 * it moves the RQ state to ready and initializes the RQ. 398 * Next CQE identification and error counting are in the caller responsibility. 399 * 400 * @param[in] rxq 401 * Pointer to RX queue structure. 402 * @param[in] vec 403 * 1 when called from vectorized Rx burst, need to prepare mbufs for the RQ. 404 * 0 when called from non-vectorized Rx burst. 405 * 406 * @return 407 * -1 in case of recovery error, otherwise the CQE status. 408 */ 409 int 410 mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec) 411 { 412 const uint16_t cqe_n = 1 << rxq->cqe_n; 413 const uint16_t cqe_mask = cqe_n - 1; 414 const uint16_t wqe_n = 1 << rxq->elts_n; 415 const uint16_t strd_n = 1 << rxq->strd_num_n; 416 struct mlx5_rxq_ctrl *rxq_ctrl = 417 container_of(rxq, struct mlx5_rxq_ctrl, rxq); 418 union { 419 volatile struct mlx5_cqe *cqe; 420 volatile struct mlx5_err_cqe *err_cqe; 421 } u = { 422 .cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask], 423 }; 424 struct mlx5_mp_arg_queue_state_modify sm; 425 int ret; 426 427 switch (rxq->err_state) { 428 case MLX5_RXQ_ERR_STATE_NO_ERROR: 429 rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_RESET; 430 /* Fall-through */ 431 case MLX5_RXQ_ERR_STATE_NEED_RESET: 432 sm.is_wq = 1; 433 sm.queue_id = rxq->idx; 434 sm.state = IBV_WQS_RESET; 435 if (mlx5_queue_state_modify(RXQ_DEV(rxq_ctrl), &sm)) 436 return -1; 437 if (rxq_ctrl->dump_file_n < 438 RXQ_PORT(rxq_ctrl)->config.max_dump_files_num) { 439 MKSTR(err_str, "Unexpected CQE error syndrome " 440 "0x%02x CQN = %u RQN = %u wqe_counter = %u" 441 " rq_ci = %u cq_ci = %u", u.err_cqe->syndrome, 442 rxq->cqn, rxq_ctrl->wqn, 443 rte_be_to_cpu_16(u.err_cqe->wqe_counter), 444 rxq->rq_ci << rxq->sges_n, rxq->cq_ci); 445 MKSTR(name, "dpdk_mlx5_port_%u_rxq_%u_%u", 446 rxq->port_id, rxq->idx, (uint32_t)rte_rdtsc()); 447 mlx5_dump_debug_information(name, NULL, err_str, 0); 448 mlx5_dump_debug_information(name, "MLX5 Error CQ:", 449 (const void *)((uintptr_t) 450 rxq->cqes), 451 sizeof(*u.cqe) * cqe_n); 452 mlx5_dump_debug_information(name, "MLX5 Error RQ:", 453 (const void *)((uintptr_t) 454 rxq->wqes), 455 16 * wqe_n); 456 rxq_ctrl->dump_file_n++; 457 } 458 rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_READY; 459 /* Fall-through */ 460 case MLX5_RXQ_ERR_STATE_NEED_READY: 461 ret = check_cqe(u.cqe, cqe_n, rxq->cq_ci); 462 if (ret == MLX5_CQE_STATUS_HW_OWN) { 463 rte_io_wmb(); 464 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); 465 rte_io_wmb(); 466 /* 467 * The RQ consumer index must be zeroed while moving 468 * from RESET state to RDY state. 469 */ 470 *rxq->rq_db = rte_cpu_to_be_32(0); 471 rte_io_wmb(); 472 sm.is_wq = 1; 473 sm.queue_id = rxq->idx; 474 sm.state = IBV_WQS_RDY; 475 if (mlx5_queue_state_modify(RXQ_DEV(rxq_ctrl), &sm)) 476 return -1; 477 if (vec) { 478 const uint32_t elts_n = 479 mlx5_rxq_mprq_enabled(rxq) ? 480 wqe_n * strd_n : wqe_n; 481 const uint32_t e_mask = elts_n - 1; 482 uint32_t elts_ci = 483 mlx5_rxq_mprq_enabled(rxq) ? 484 rxq->elts_ci : rxq->rq_ci; 485 uint32_t elt_idx; 486 struct rte_mbuf **elt; 487 int i; 488 unsigned int n = elts_n - (elts_ci - 489 rxq->rq_pi); 490 491 for (i = 0; i < (int)n; ++i) { 492 elt_idx = (elts_ci + i) & e_mask; 493 elt = &(*rxq->elts)[elt_idx]; 494 *elt = rte_mbuf_raw_alloc(rxq->mp); 495 if (!*elt) { 496 for (i--; i >= 0; --i) { 497 elt_idx = (elts_ci + 498 i) & elts_n; 499 elt = &(*rxq->elts) 500 [elt_idx]; 501 rte_pktmbuf_free_seg 502 (*elt); 503 } 504 return -1; 505 } 506 } 507 for (i = 0; i < (int)elts_n; ++i) { 508 elt = &(*rxq->elts)[i]; 509 DATA_LEN(*elt) = 510 (uint16_t)((*elt)->buf_len - 511 rte_pktmbuf_headroom(*elt)); 512 } 513 /* Padding with a fake mbuf for vec Rx. */ 514 for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i) 515 (*rxq->elts)[elts_n + i] = 516 &rxq->fake_mbuf; 517 } 518 mlx5_rxq_initialize(rxq); 519 rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR; 520 } 521 return ret; 522 default: 523 return -1; 524 } 525 } 526 527 /** 528 * Get size of the next packet for a given CQE. For compressed CQEs, the 529 * consumer index is updated only once all packets of the current one have 530 * been processed. 531 * 532 * @param rxq 533 * Pointer to RX queue. 534 * @param cqe 535 * CQE to process. 536 * @param[out] mcqe 537 * Store pointer to mini-CQE if compressed. Otherwise, the pointer is not 538 * written. 539 * 540 * @return 541 * 0 in case of empty CQE, otherwise the packet size in bytes. 542 */ 543 static inline int 544 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, 545 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe) 546 { 547 struct rxq_zip *zip = &rxq->zip; 548 uint16_t cqe_n = cqe_cnt + 1; 549 int len; 550 uint16_t idx, end; 551 552 do { 553 len = 0; 554 /* Process compressed data in the CQE and mini arrays. */ 555 if (zip->ai) { 556 volatile struct mlx5_mini_cqe8 (*mc)[8] = 557 (volatile struct mlx5_mini_cqe8 (*)[8]) 558 (uintptr_t)(&(*rxq->cqes)[zip->ca & 559 cqe_cnt].pkt_info); 560 len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt & 561 rxq->byte_mask); 562 *mcqe = &(*mc)[zip->ai & 7]; 563 if ((++zip->ai & 7) == 0) { 564 /* Invalidate consumed CQEs */ 565 idx = zip->ca; 566 end = zip->na; 567 while (idx != end) { 568 (*rxq->cqes)[idx & cqe_cnt].op_own = 569 MLX5_CQE_INVALIDATE; 570 ++idx; 571 } 572 /* 573 * Increment consumer index to skip the number 574 * of CQEs consumed. Hardware leaves holes in 575 * the CQ ring for software use. 576 */ 577 zip->ca = zip->na; 578 zip->na += 8; 579 } 580 if (unlikely(rxq->zip.ai == rxq->zip.cqe_cnt)) { 581 /* Invalidate the rest */ 582 idx = zip->ca; 583 end = zip->cq_ci; 584 585 while (idx != end) { 586 (*rxq->cqes)[idx & cqe_cnt].op_own = 587 MLX5_CQE_INVALIDATE; 588 ++idx; 589 } 590 rxq->cq_ci = zip->cq_ci; 591 zip->ai = 0; 592 } 593 /* 594 * No compressed data, get next CQE and verify if it is 595 * compressed. 596 */ 597 } else { 598 int ret; 599 int8_t op_own; 600 uint32_t cq_ci; 601 602 ret = check_cqe(cqe, cqe_n, rxq->cq_ci); 603 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) { 604 if (unlikely(ret == MLX5_CQE_STATUS_ERR || 605 rxq->err_state)) { 606 ret = mlx5_rx_err_handle(rxq, 0); 607 if (ret == MLX5_CQE_STATUS_HW_OWN || 608 ret == -1) 609 return 0; 610 } else { 611 return 0; 612 } 613 } 614 /* 615 * Introduce the local variable to have queue cq_ci 616 * index in queue structure always consistent with 617 * actual CQE boundary (not pointing to the middle 618 * of compressed CQE session). 619 */ 620 cq_ci = rxq->cq_ci + 1; 621 op_own = cqe->op_own; 622 if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) { 623 volatile struct mlx5_mini_cqe8 (*mc)[8] = 624 (volatile struct mlx5_mini_cqe8 (*)[8]) 625 (uintptr_t)(&(*rxq->cqes) 626 [cq_ci & cqe_cnt].pkt_info); 627 628 /* Fix endianness. */ 629 zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt); 630 /* 631 * Current mini array position is the one 632 * returned by check_cqe64(). 633 * 634 * If completion comprises several mini arrays, 635 * as a special case the second one is located 636 * 7 CQEs after the initial CQE instead of 8 637 * for subsequent ones. 638 */ 639 zip->ca = cq_ci; 640 zip->na = zip->ca + 7; 641 /* Compute the next non compressed CQE. */ 642 zip->cq_ci = rxq->cq_ci + zip->cqe_cnt; 643 /* Get packet size to return. */ 644 len = rte_be_to_cpu_32((*mc)[0].byte_cnt & 645 rxq->byte_mask); 646 *mcqe = &(*mc)[0]; 647 zip->ai = 1; 648 /* Prefetch all to be invalidated */ 649 idx = zip->ca; 650 end = zip->cq_ci; 651 while (idx != end) { 652 rte_prefetch0(&(*rxq->cqes)[(idx) & 653 cqe_cnt]); 654 ++idx; 655 } 656 } else { 657 rxq->cq_ci = cq_ci; 658 len = rte_be_to_cpu_32(cqe->byte_cnt); 659 } 660 } 661 if (unlikely(rxq->err_state)) { 662 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt]; 663 ++rxq->stats.idropped; 664 } else { 665 return len; 666 } 667 } while (1); 668 } 669 670 /** 671 * Translate RX completion flags to offload flags. 672 * 673 * @param[in] cqe 674 * Pointer to CQE. 675 * 676 * @return 677 * Offload flags (ol_flags) for struct rte_mbuf. 678 */ 679 static inline uint32_t 680 rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe) 681 { 682 uint32_t ol_flags = 0; 683 uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc); 684 685 ol_flags = 686 TRANSPOSE(flags, 687 MLX5_CQE_RX_L3_HDR_VALID, 688 RTE_MBUF_F_RX_IP_CKSUM_GOOD) | 689 TRANSPOSE(flags, 690 MLX5_CQE_RX_L4_HDR_VALID, 691 RTE_MBUF_F_RX_L4_CKSUM_GOOD); 692 return ol_flags; 693 } 694 695 /** 696 * Fill in mbuf fields from RX completion flags. 697 * Note that pkt->ol_flags should be initialized outside of this function. 698 * 699 * @param rxq 700 * Pointer to RX queue. 701 * @param pkt 702 * mbuf to fill. 703 * @param cqe 704 * CQE to process. 705 * @param rss_hash_res 706 * Packet RSS Hash result. 707 */ 708 static inline void 709 rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt, 710 volatile struct mlx5_cqe *cqe, 711 volatile struct mlx5_mini_cqe8 *mcqe) 712 { 713 /* Update packet information. */ 714 pkt->packet_type = rxq_cq_to_pkt_type(rxq, cqe, mcqe); 715 pkt->port = unlikely(rxq->shared) ? cqe->user_index_low : rxq->port_id; 716 717 if (rxq->rss_hash) { 718 uint32_t rss_hash_res = 0; 719 720 /* If compressed, take hash result from mini-CQE. */ 721 if (mcqe == NULL || 722 rxq->mcqe_format != MLX5_CQE_RESP_FORMAT_HASH) 723 rss_hash_res = rte_be_to_cpu_32(cqe->rx_hash_res); 724 else 725 rss_hash_res = rte_be_to_cpu_32(mcqe->rx_hash_result); 726 if (rss_hash_res) { 727 pkt->hash.rss = rss_hash_res; 728 pkt->ol_flags |= RTE_MBUF_F_RX_RSS_HASH; 729 } 730 } 731 if (rxq->mark) { 732 uint32_t mark = 0; 733 734 /* If compressed, take flow tag from mini-CQE. */ 735 if (mcqe == NULL || 736 rxq->mcqe_format != MLX5_CQE_RESP_FORMAT_FTAG_STRIDX) 737 mark = cqe->sop_drop_qpn; 738 else 739 mark = ((mcqe->byte_cnt_flow & 0xff) << 8) | 740 (mcqe->flow_tag_high << 16); 741 if (MLX5_FLOW_MARK_IS_VALID(mark)) { 742 pkt->ol_flags |= RTE_MBUF_F_RX_FDIR; 743 if (mark != RTE_BE32(MLX5_FLOW_MARK_DEFAULT)) { 744 pkt->ol_flags |= RTE_MBUF_F_RX_FDIR_ID; 745 pkt->hash.fdir.hi = mlx5_flow_mark_get(mark); 746 } 747 } 748 } 749 if (rxq->dynf_meta) { 750 uint32_t meta = rte_be_to_cpu_32(cqe->flow_table_metadata) & 751 rxq->flow_meta_port_mask; 752 753 if (meta) { 754 pkt->ol_flags |= rxq->flow_meta_mask; 755 *RTE_MBUF_DYNFIELD(pkt, rxq->flow_meta_offset, 756 uint32_t *) = meta; 757 } 758 } 759 if (rxq->csum) 760 pkt->ol_flags |= rxq_cq_to_ol_flags(cqe); 761 if (rxq->vlan_strip) { 762 bool vlan_strip; 763 764 if (mcqe == NULL || 765 rxq->mcqe_format != MLX5_CQE_RESP_FORMAT_L34H_STRIDX) 766 vlan_strip = cqe->hdr_type_etc & 767 RTE_BE16(MLX5_CQE_VLAN_STRIPPED); 768 else 769 vlan_strip = mcqe->hdr_type & 770 RTE_BE16(MLX5_CQE_VLAN_STRIPPED); 771 if (vlan_strip) { 772 pkt->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED; 773 pkt->vlan_tci = rte_be_to_cpu_16(cqe->vlan_info); 774 } 775 } 776 if (rxq->hw_timestamp) { 777 uint64_t ts = rte_be_to_cpu_64(cqe->timestamp); 778 779 if (rxq->rt_timestamp) 780 ts = mlx5_txpp_convert_rx_ts(rxq->sh, ts); 781 mlx5_timestamp_set(pkt, rxq->timestamp_offset, ts); 782 pkt->ol_flags |= rxq->timestamp_rx_flag; 783 } 784 } 785 786 /** 787 * DPDK callback for RX. 788 * 789 * @param dpdk_rxq 790 * Generic pointer to RX queue structure. 791 * @param[out] pkts 792 * Array to store received packets. 793 * @param pkts_n 794 * Maximum number of packets in array. 795 * 796 * @return 797 * Number of packets successfully received (<= pkts_n). 798 */ 799 uint16_t 800 mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) 801 { 802 struct mlx5_rxq_data *rxq = dpdk_rxq; 803 const unsigned int wqe_cnt = (1 << rxq->elts_n) - 1; 804 const unsigned int cqe_cnt = (1 << rxq->cqe_n) - 1; 805 const unsigned int sges_n = rxq->sges_n; 806 struct rte_mbuf *pkt = NULL; 807 struct rte_mbuf *seg = NULL; 808 volatile struct mlx5_cqe *cqe = 809 &(*rxq->cqes)[rxq->cq_ci & cqe_cnt]; 810 unsigned int i = 0; 811 unsigned int rq_ci = rxq->rq_ci << sges_n; 812 int len = 0; /* keep its value across iterations. */ 813 814 while (pkts_n) { 815 unsigned int idx = rq_ci & wqe_cnt; 816 volatile struct mlx5_wqe_data_seg *wqe = 817 &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx]; 818 struct rte_mbuf *rep = (*rxq->elts)[idx]; 819 volatile struct mlx5_mini_cqe8 *mcqe = NULL; 820 821 if (pkt) 822 NEXT(seg) = rep; 823 seg = rep; 824 rte_prefetch0(seg); 825 rte_prefetch0(cqe); 826 rte_prefetch0(wqe); 827 /* Allocate the buf from the same pool. */ 828 rep = rte_mbuf_raw_alloc(seg->pool); 829 if (unlikely(rep == NULL)) { 830 ++rxq->stats.rx_nombuf; 831 if (!pkt) { 832 /* 833 * no buffers before we even started, 834 * bail out silently. 835 */ 836 break; 837 } 838 while (pkt != seg) { 839 MLX5_ASSERT(pkt != (*rxq->elts)[idx]); 840 rep = NEXT(pkt); 841 NEXT(pkt) = NULL; 842 NB_SEGS(pkt) = 1; 843 rte_mbuf_raw_free(pkt); 844 pkt = rep; 845 } 846 rq_ci >>= sges_n; 847 ++rq_ci; 848 rq_ci <<= sges_n; 849 break; 850 } 851 if (!pkt) { 852 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt]; 853 len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, &mcqe); 854 if (!len) { 855 rte_mbuf_raw_free(rep); 856 break; 857 } 858 pkt = seg; 859 MLX5_ASSERT(len >= (rxq->crc_present << 2)); 860 pkt->ol_flags &= RTE_MBUF_F_EXTERNAL; 861 rxq_cq_to_mbuf(rxq, pkt, cqe, mcqe); 862 if (rxq->crc_present) 863 len -= RTE_ETHER_CRC_LEN; 864 PKT_LEN(pkt) = len; 865 if (cqe->lro_num_seg > 1) { 866 mlx5_lro_update_hdr 867 (rte_pktmbuf_mtod(pkt, uint8_t *), cqe, 868 mcqe, rxq, len); 869 pkt->ol_flags |= RTE_MBUF_F_RX_LRO; 870 pkt->tso_segsz = len / cqe->lro_num_seg; 871 } 872 } 873 DATA_LEN(rep) = DATA_LEN(seg); 874 PKT_LEN(rep) = PKT_LEN(seg); 875 SET_DATA_OFF(rep, DATA_OFF(seg)); 876 PORT(rep) = PORT(seg); 877 (*rxq->elts)[idx] = rep; 878 /* 879 * Fill NIC descriptor with the new buffer. The lkey and size 880 * of the buffers are already known, only the buffer address 881 * changes. 882 */ 883 wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t)); 884 /* If there's only one MR, no need to replace LKey in WQE. */ 885 if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1)) 886 wqe->lkey = mlx5_rx_mb2mr(rxq, rep); 887 if (len > DATA_LEN(seg)) { 888 len -= DATA_LEN(seg); 889 ++NB_SEGS(pkt); 890 ++rq_ci; 891 continue; 892 } 893 DATA_LEN(seg) = len; 894 #ifdef MLX5_PMD_SOFT_COUNTERS 895 /* Increment bytes counter. */ 896 rxq->stats.ibytes += PKT_LEN(pkt); 897 #endif 898 /* Return packet. */ 899 *(pkts++) = pkt; 900 pkt = NULL; 901 --pkts_n; 902 ++i; 903 /* Align consumer index to the next stride. */ 904 rq_ci >>= sges_n; 905 ++rq_ci; 906 rq_ci <<= sges_n; 907 } 908 if (unlikely(i == 0 && ((rq_ci >> sges_n) == rxq->rq_ci))) 909 return 0; 910 /* Update the consumer index. */ 911 rxq->rq_ci = rq_ci >> sges_n; 912 rte_io_wmb(); 913 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); 914 rte_io_wmb(); 915 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci); 916 #ifdef MLX5_PMD_SOFT_COUNTERS 917 /* Increment packets counter. */ 918 rxq->stats.ipackets += i; 919 #endif 920 return i; 921 } 922 923 /** 924 * Update LRO packet TCP header. 925 * The HW LRO feature doesn't update the TCP header after coalescing the 926 * TCP segments but supplies information in CQE to fill it by SW. 927 * 928 * @param tcp 929 * Pointer to the TCP header. 930 * @param cqe 931 * Pointer to the completion entry. 932 * @param phcsum 933 * The L3 pseudo-header checksum. 934 */ 935 static inline void 936 mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *__rte_restrict tcp, 937 volatile struct mlx5_cqe *__rte_restrict cqe, 938 uint32_t phcsum, uint8_t l4_type) 939 { 940 /* 941 * The HW calculates only the TCP payload checksum, need to complete 942 * the TCP header checksum and the L3 pseudo-header checksum. 943 */ 944 uint32_t csum = phcsum + cqe->csum; 945 946 if (l4_type == MLX5_L4_HDR_TYPE_TCP_EMPTY_ACK || 947 l4_type == MLX5_L4_HDR_TYPE_TCP_WITH_ACL) { 948 tcp->tcp_flags |= RTE_TCP_ACK_FLAG; 949 tcp->recv_ack = cqe->lro_ack_seq_num; 950 tcp->rx_win = cqe->lro_tcp_win; 951 } 952 if (cqe->lro_tcppsh_abort_dupack & MLX5_CQE_LRO_PUSH_MASK) 953 tcp->tcp_flags |= RTE_TCP_PSH_FLAG; 954 tcp->cksum = 0; 955 csum += rte_raw_cksum(tcp, (tcp->data_off >> 4) * 4); 956 csum = ((csum & 0xffff0000) >> 16) + (csum & 0xffff); 957 csum = (~csum) & 0xffff; 958 if (csum == 0) 959 csum = 0xffff; 960 tcp->cksum = csum; 961 } 962 963 /** 964 * Update LRO packet headers. 965 * The HW LRO feature doesn't update the L3/TCP headers after coalescing the 966 * TCP segments but supply information in CQE to fill it by SW. 967 * 968 * @param padd 969 * The packet address. 970 * @param cqe 971 * Pointer to the completion entry. 972 * @param len 973 * The packet length. 974 */ 975 static inline void 976 mlx5_lro_update_hdr(uint8_t *__rte_restrict padd, 977 volatile struct mlx5_cqe *__rte_restrict cqe, 978 volatile struct mlx5_mini_cqe8 *mcqe, 979 struct mlx5_rxq_data *rxq, uint32_t len) 980 { 981 union { 982 struct rte_ether_hdr *eth; 983 struct rte_vlan_hdr *vlan; 984 struct rte_ipv4_hdr *ipv4; 985 struct rte_ipv6_hdr *ipv6; 986 struct rte_tcp_hdr *tcp; 987 uint8_t *hdr; 988 } h = { 989 .hdr = padd, 990 }; 991 uint16_t proto = h.eth->ether_type; 992 uint32_t phcsum; 993 uint8_t l4_type; 994 995 h.eth++; 996 while (proto == RTE_BE16(RTE_ETHER_TYPE_VLAN) || 997 proto == RTE_BE16(RTE_ETHER_TYPE_QINQ)) { 998 proto = h.vlan->eth_proto; 999 h.vlan++; 1000 } 1001 if (proto == RTE_BE16(RTE_ETHER_TYPE_IPV4)) { 1002 h.ipv4->time_to_live = cqe->lro_min_ttl; 1003 h.ipv4->total_length = rte_cpu_to_be_16(len - (h.hdr - padd)); 1004 h.ipv4->hdr_checksum = 0; 1005 h.ipv4->hdr_checksum = rte_ipv4_cksum(h.ipv4); 1006 phcsum = rte_ipv4_phdr_cksum(h.ipv4, 0); 1007 h.ipv4++; 1008 } else { 1009 h.ipv6->hop_limits = cqe->lro_min_ttl; 1010 h.ipv6->payload_len = rte_cpu_to_be_16(len - (h.hdr - padd) - 1011 sizeof(*h.ipv6)); 1012 phcsum = rte_ipv6_phdr_cksum(h.ipv6, 0); 1013 h.ipv6++; 1014 } 1015 if (mcqe == NULL || 1016 rxq->mcqe_format != MLX5_CQE_RESP_FORMAT_L34H_STRIDX) 1017 l4_type = (rte_be_to_cpu_16(cqe->hdr_type_etc) & 1018 MLX5_CQE_L4_TYPE_MASK) >> MLX5_CQE_L4_TYPE_SHIFT; 1019 else 1020 l4_type = (rte_be_to_cpu_16(mcqe->hdr_type) & 1021 MLX5_CQE_L4_TYPE_MASK) >> MLX5_CQE_L4_TYPE_SHIFT; 1022 mlx5_lro_update_tcp_hdr(h.tcp, cqe, phcsum, l4_type); 1023 } 1024 1025 void 1026 mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf) 1027 { 1028 mlx5_mprq_buf_free_cb(NULL, buf); 1029 } 1030 1031 /** 1032 * DPDK callback for RX with Multi-Packet RQ support. 1033 * 1034 * @param dpdk_rxq 1035 * Generic pointer to RX queue structure. 1036 * @param[out] pkts 1037 * Array to store received packets. 1038 * @param pkts_n 1039 * Maximum number of packets in array. 1040 * 1041 * @return 1042 * Number of packets successfully received (<= pkts_n). 1043 */ 1044 uint16_t 1045 mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) 1046 { 1047 struct mlx5_rxq_data *rxq = dpdk_rxq; 1048 const uint32_t strd_n = 1 << rxq->strd_num_n; 1049 const uint32_t strd_sz = 1 << rxq->strd_sz_n; 1050 const uint32_t cq_mask = (1 << rxq->cqe_n) - 1; 1051 const uint32_t wq_mask = (1 << rxq->elts_n) - 1; 1052 volatile struct mlx5_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask]; 1053 unsigned int i = 0; 1054 uint32_t rq_ci = rxq->rq_ci; 1055 uint16_t consumed_strd = rxq->consumed_strd; 1056 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_ci & wq_mask]; 1057 1058 while (i < pkts_n) { 1059 struct rte_mbuf *pkt; 1060 int ret; 1061 uint32_t len; 1062 uint16_t strd_cnt; 1063 uint16_t strd_idx; 1064 uint32_t byte_cnt; 1065 volatile struct mlx5_mini_cqe8 *mcqe = NULL; 1066 enum mlx5_rqx_code rxq_code; 1067 1068 if (consumed_strd == strd_n) { 1069 /* Replace WQE if the buffer is still in use. */ 1070 mprq_buf_replace(rxq, rq_ci & wq_mask); 1071 /* Advance to the next WQE. */ 1072 consumed_strd = 0; 1073 ++rq_ci; 1074 buf = (*rxq->mprq_bufs)[rq_ci & wq_mask]; 1075 } 1076 cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask]; 1077 ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &mcqe); 1078 if (!ret) 1079 break; 1080 byte_cnt = ret; 1081 len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT; 1082 MLX5_ASSERT((int)len >= (rxq->crc_present << 2)); 1083 if (rxq->crc_present) 1084 len -= RTE_ETHER_CRC_LEN; 1085 if (mcqe && 1086 rxq->mcqe_format == MLX5_CQE_RESP_FORMAT_FTAG_STRIDX) 1087 strd_cnt = (len / strd_sz) + !!(len % strd_sz); 1088 else 1089 strd_cnt = (byte_cnt & MLX5_MPRQ_STRIDE_NUM_MASK) >> 1090 MLX5_MPRQ_STRIDE_NUM_SHIFT; 1091 MLX5_ASSERT(strd_cnt); 1092 consumed_strd += strd_cnt; 1093 if (byte_cnt & MLX5_MPRQ_FILLER_MASK) 1094 continue; 1095 strd_idx = rte_be_to_cpu_16(mcqe == NULL ? 1096 cqe->wqe_counter : 1097 mcqe->stride_idx); 1098 MLX5_ASSERT(strd_idx < strd_n); 1099 MLX5_ASSERT(!((rte_be_to_cpu_16(cqe->wqe_id) ^ rq_ci) & 1100 wq_mask)); 1101 pkt = rte_pktmbuf_alloc(rxq->mp); 1102 if (unlikely(pkt == NULL)) { 1103 ++rxq->stats.rx_nombuf; 1104 break; 1105 } 1106 len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT; 1107 MLX5_ASSERT((int)len >= (rxq->crc_present << 2)); 1108 if (rxq->crc_present) 1109 len -= RTE_ETHER_CRC_LEN; 1110 rxq_code = mprq_buf_to_pkt(rxq, pkt, len, buf, 1111 strd_idx, strd_cnt); 1112 if (unlikely(rxq_code != MLX5_RXQ_CODE_EXIT)) { 1113 rte_pktmbuf_free_seg(pkt); 1114 if (rxq_code == MLX5_RXQ_CODE_DROPPED) { 1115 ++rxq->stats.idropped; 1116 continue; 1117 } 1118 if (rxq_code == MLX5_RXQ_CODE_NOMBUF) { 1119 ++rxq->stats.rx_nombuf; 1120 break; 1121 } 1122 } 1123 rxq_cq_to_mbuf(rxq, pkt, cqe, mcqe); 1124 if (cqe->lro_num_seg > 1) { 1125 mlx5_lro_update_hdr(rte_pktmbuf_mtod(pkt, uint8_t *), 1126 cqe, mcqe, rxq, len); 1127 pkt->ol_flags |= RTE_MBUF_F_RX_LRO; 1128 pkt->tso_segsz = len / cqe->lro_num_seg; 1129 } 1130 PKT_LEN(pkt) = len; 1131 PORT(pkt) = rxq->port_id; 1132 #ifdef MLX5_PMD_SOFT_COUNTERS 1133 /* Increment bytes counter. */ 1134 rxq->stats.ibytes += PKT_LEN(pkt); 1135 #endif 1136 /* Return packet. */ 1137 *(pkts++) = pkt; 1138 ++i; 1139 } 1140 /* Update the consumer indexes. */ 1141 rxq->consumed_strd = consumed_strd; 1142 rte_io_wmb(); 1143 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); 1144 if (rq_ci != rxq->rq_ci) { 1145 rxq->rq_ci = rq_ci; 1146 rte_io_wmb(); 1147 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci); 1148 } 1149 #ifdef MLX5_PMD_SOFT_COUNTERS 1150 /* Increment packets counter. */ 1151 rxq->stats.ipackets += i; 1152 #endif 1153 return i; 1154 } 1155 1156 /** 1157 * Dummy DPDK callback for RX. 1158 * 1159 * This function is used to temporarily replace the real callback during 1160 * unsafe control operations on the queue, or in case of error. 1161 * 1162 * @param dpdk_rxq 1163 * Generic pointer to RX queue structure. 1164 * @param[out] pkts 1165 * Array to store received packets. 1166 * @param pkts_n 1167 * Maximum number of packets in array. 1168 * 1169 * @return 1170 * Number of packets successfully received (<= pkts_n). 1171 */ 1172 uint16_t 1173 removed_rx_burst(void *dpdk_rxq __rte_unused, 1174 struct rte_mbuf **pkts __rte_unused, 1175 uint16_t pkts_n __rte_unused) 1176 { 1177 rte_mb(); 1178 return 0; 1179 } 1180 1181 /* 1182 * Vectorized Rx routines are not compiled in when required vector instructions 1183 * are not supported on a target architecture. 1184 * The following null stubs are needed for linkage when those are not included 1185 * outside of this file (e.g. mlx5_rxtx_vec_sse.c for x86). 1186 */ 1187 1188 __rte_weak uint16_t 1189 mlx5_rx_burst_vec(void *dpdk_rxq __rte_unused, 1190 struct rte_mbuf **pkts __rte_unused, 1191 uint16_t pkts_n __rte_unused) 1192 { 1193 return 0; 1194 } 1195 1196 __rte_weak uint16_t 1197 mlx5_rx_burst_mprq_vec(void *dpdk_rxq __rte_unused, 1198 struct rte_mbuf **pkts __rte_unused, 1199 uint16_t pkts_n __rte_unused) 1200 { 1201 return 0; 1202 } 1203 1204 __rte_weak int 1205 mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused) 1206 { 1207 return -ENOTSUP; 1208 } 1209 1210 __rte_weak int 1211 mlx5_check_vec_rx_support(struct rte_eth_dev *dev __rte_unused) 1212 { 1213 return -ENOTSUP; 1214 } 1215 1216