1 /*- 2 * BSD LICENSE 3 * 4 * Copyright 2015 6WIND S.A. 5 * Copyright 2015 Mellanox. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of 6WIND S.A. nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <assert.h> 35 #include <stdint.h> 36 #include <string.h> 37 #include <stdlib.h> 38 39 /* Verbs header. */ 40 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ 41 #ifdef PEDANTIC 42 #pragma GCC diagnostic ignored "-pedantic" 43 #endif 44 #include <infiniband/verbs.h> 45 #ifdef PEDANTIC 46 #pragma GCC diagnostic error "-pedantic" 47 #endif 48 49 /* DPDK headers don't like -pedantic. */ 50 #ifdef PEDANTIC 51 #pragma GCC diagnostic ignored "-pedantic" 52 #endif 53 #include <rte_mbuf.h> 54 #include <rte_mempool.h> 55 #include <rte_prefetch.h> 56 #include <rte_common.h> 57 #include <rte_branch_prediction.h> 58 #include <rte_memory.h> 59 #ifdef PEDANTIC 60 #pragma GCC diagnostic error "-pedantic" 61 #endif 62 63 #include "mlx5.h" 64 #include "mlx5_utils.h" 65 #include "mlx5_rxtx.h" 66 #include "mlx5_autoconf.h" 67 #include "mlx5_defs.h" 68 69 /** 70 * Manage TX completions. 71 * 72 * When sending a burst, mlx5_tx_burst() posts several WRs. 73 * To improve performance, a completion event is only required once every 74 * MLX5_PMD_TX_PER_COMP_REQ sends. Doing so discards completion information 75 * for other WRs, but this information would not be used anyway. 76 * 77 * @param txq 78 * Pointer to TX queue structure. 79 * 80 * @return 81 * 0 on success, -1 on failure. 82 */ 83 static int 84 txq_complete(struct txq *txq) 85 { 86 unsigned int elts_comp = txq->elts_comp; 87 unsigned int elts_tail = txq->elts_tail; 88 unsigned int elts_free = txq->elts_tail; 89 const unsigned int elts_n = txq->elts_n; 90 int wcs_n; 91 92 if (unlikely(elts_comp == 0)) 93 return 0; 94 #ifdef DEBUG_SEND 95 DEBUG("%p: processing %u work requests completions", 96 (void *)txq, elts_comp); 97 #endif 98 wcs_n = txq->poll_cnt(txq->cq, elts_comp); 99 if (unlikely(wcs_n == 0)) 100 return 0; 101 if (unlikely(wcs_n < 0)) { 102 DEBUG("%p: ibv_poll_cq() failed (wcs_n=%d)", 103 (void *)txq, wcs_n); 104 return -1; 105 } 106 elts_comp -= wcs_n; 107 assert(elts_comp <= txq->elts_comp); 108 /* 109 * Assume WC status is successful as nothing can be done about it 110 * anyway. 111 */ 112 elts_tail += wcs_n * txq->elts_comp_cd_init; 113 if (elts_tail >= elts_n) 114 elts_tail -= elts_n; 115 116 while (elts_free != elts_tail) { 117 struct txq_elt *elt = &(*txq->elts)[elts_free]; 118 unsigned int elts_free_next = 119 (((elts_free + 1) == elts_n) ? 0 : elts_free + 1); 120 struct rte_mbuf *tmp = elt->buf; 121 struct txq_elt *elt_next = &(*txq->elts)[elts_free_next]; 122 123 #ifndef NDEBUG 124 /* Poisoning. */ 125 memset(elt, 0x66, sizeof(*elt)); 126 #endif 127 RTE_MBUF_PREFETCH_TO_FREE(elt_next->buf); 128 /* Faster than rte_pktmbuf_free(). */ 129 do { 130 struct rte_mbuf *next = NEXT(tmp); 131 132 rte_pktmbuf_free_seg(tmp); 133 tmp = next; 134 } while (tmp != NULL); 135 elts_free = elts_free_next; 136 } 137 138 txq->elts_tail = elts_tail; 139 txq->elts_comp = elts_comp; 140 return 0; 141 } 142 143 struct mlx5_check_mempool_data { 144 int ret; 145 char *start; 146 char *end; 147 }; 148 149 /* Called by mlx5_check_mempool() when iterating the memory chunks. */ 150 static void mlx5_check_mempool_cb(struct rte_mempool *mp, 151 void *opaque, struct rte_mempool_memhdr *memhdr, 152 unsigned mem_idx) 153 { 154 struct mlx5_check_mempool_data *data = opaque; 155 156 (void)mp; 157 (void)mem_idx; 158 159 /* It already failed, skip the next chunks. */ 160 if (data->ret != 0) 161 return; 162 /* It is the first chunk. */ 163 if (data->start == NULL && data->end == NULL) { 164 data->start = memhdr->addr; 165 data->end = data->start + memhdr->len; 166 return; 167 } 168 if (data->end == memhdr->addr) { 169 data->end += memhdr->len; 170 return; 171 } 172 if (data->start == (char *)memhdr->addr + memhdr->len) { 173 data->start -= memhdr->len; 174 return; 175 } 176 /* Error, mempool is not virtually contigous. */ 177 data->ret = -1; 178 } 179 180 /** 181 * Check if a mempool can be used: it must be virtually contiguous. 182 * 183 * @param[in] mp 184 * Pointer to memory pool. 185 * @param[out] start 186 * Pointer to the start address of the mempool virtual memory area 187 * @param[out] end 188 * Pointer to the end address of the mempool virtual memory area 189 * 190 * @return 191 * 0 on success (mempool is virtually contiguous), -1 on error. 192 */ 193 static int mlx5_check_mempool(struct rte_mempool *mp, uintptr_t *start, 194 uintptr_t *end) 195 { 196 struct mlx5_check_mempool_data data; 197 198 memset(&data, 0, sizeof(data)); 199 rte_mempool_mem_iter(mp, mlx5_check_mempool_cb, &data); 200 *start = (uintptr_t)data.start; 201 *end = (uintptr_t)data.end; 202 203 return data.ret; 204 } 205 206 /* For best performance, this function should not be inlined. */ 207 struct ibv_mr *mlx5_mp2mr(struct ibv_pd *, struct rte_mempool *) 208 __attribute__((noinline)); 209 210 /** 211 * Register mempool as a memory region. 212 * 213 * @param pd 214 * Pointer to protection domain. 215 * @param mp 216 * Pointer to memory pool. 217 * 218 * @return 219 * Memory region pointer, NULL in case of error. 220 */ 221 struct ibv_mr * 222 mlx5_mp2mr(struct ibv_pd *pd, struct rte_mempool *mp) 223 { 224 const struct rte_memseg *ms = rte_eal_get_physmem_layout(); 225 uintptr_t start; 226 uintptr_t end; 227 unsigned int i; 228 229 if (mlx5_check_mempool(mp, &start, &end) != 0) { 230 ERROR("mempool %p: not virtually contiguous", 231 (void *)mp); 232 return NULL; 233 } 234 235 DEBUG("mempool %p area start=%p end=%p size=%zu", 236 (void *)mp, (void *)start, (void *)end, 237 (size_t)(end - start)); 238 /* Round start and end to page boundary if found in memory segments. */ 239 for (i = 0; (i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL); ++i) { 240 uintptr_t addr = (uintptr_t)ms[i].addr; 241 size_t len = ms[i].len; 242 unsigned int align = ms[i].hugepage_sz; 243 244 if ((start > addr) && (start < addr + len)) 245 start = RTE_ALIGN_FLOOR(start, align); 246 if ((end > addr) && (end < addr + len)) 247 end = RTE_ALIGN_CEIL(end, align); 248 } 249 DEBUG("mempool %p using start=%p end=%p size=%zu for MR", 250 (void *)mp, (void *)start, (void *)end, 251 (size_t)(end - start)); 252 return ibv_reg_mr(pd, 253 (void *)start, 254 end - start, 255 IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE); 256 } 257 258 /** 259 * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which 260 * the cloned mbuf is allocated is returned instead. 261 * 262 * @param buf 263 * Pointer to mbuf. 264 * 265 * @return 266 * Memory pool where data is located for given mbuf. 267 */ 268 static struct rte_mempool * 269 txq_mb2mp(struct rte_mbuf *buf) 270 { 271 if (unlikely(RTE_MBUF_INDIRECT(buf))) 272 return rte_mbuf_from_indirect(buf)->pool; 273 return buf->pool; 274 } 275 276 /** 277 * Get Memory Region (MR) <-> Memory Pool (MP) association from txq->mp2mr[]. 278 * Add MP to txq->mp2mr[] if it's not registered yet. If mp2mr[] is full, 279 * remove an entry first. 280 * 281 * @param txq 282 * Pointer to TX queue structure. 283 * @param[in] mp 284 * Memory Pool for which a Memory Region lkey must be returned. 285 * 286 * @return 287 * mr->lkey on success, (uint32_t)-1 on failure. 288 */ 289 static uint32_t 290 txq_mp2mr(struct txq *txq, struct rte_mempool *mp) 291 { 292 unsigned int i; 293 struct ibv_mr *mr; 294 295 for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) { 296 if (unlikely(txq->mp2mr[i].mp == NULL)) { 297 /* Unknown MP, add a new MR for it. */ 298 break; 299 } 300 if (txq->mp2mr[i].mp == mp) { 301 assert(txq->mp2mr[i].lkey != (uint32_t)-1); 302 assert(txq->mp2mr[i].mr->lkey == txq->mp2mr[i].lkey); 303 return txq->mp2mr[i].lkey; 304 } 305 } 306 /* Add a new entry, register MR first. */ 307 DEBUG("%p: discovered new memory pool \"%s\" (%p)", 308 (void *)txq, mp->name, (void *)mp); 309 mr = mlx5_mp2mr(txq->priv->pd, mp); 310 if (unlikely(mr == NULL)) { 311 DEBUG("%p: unable to configure MR, ibv_reg_mr() failed.", 312 (void *)txq); 313 return (uint32_t)-1; 314 } 315 if (unlikely(i == RTE_DIM(txq->mp2mr))) { 316 /* Table is full, remove oldest entry. */ 317 DEBUG("%p: MR <-> MP table full, dropping oldest entry.", 318 (void *)txq); 319 --i; 320 claim_zero(ibv_dereg_mr(txq->mp2mr[0].mr)); 321 memmove(&txq->mp2mr[0], &txq->mp2mr[1], 322 (sizeof(txq->mp2mr) - sizeof(txq->mp2mr[0]))); 323 } 324 /* Store the new entry. */ 325 txq->mp2mr[i].mp = mp; 326 txq->mp2mr[i].mr = mr; 327 txq->mp2mr[i].lkey = mr->lkey; 328 DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32, 329 (void *)txq, mp->name, (void *)mp, txq->mp2mr[i].lkey); 330 return txq->mp2mr[i].lkey; 331 } 332 333 struct txq_mp2mr_mbuf_check_data { 334 int ret; 335 }; 336 337 /** 338 * Callback function for rte_mempool_obj_iter() to check whether a given 339 * mempool object looks like a mbuf. 340 * 341 * @param[in] mp 342 * The mempool pointer 343 * @param[in] arg 344 * Context data (struct txq_mp2mr_mbuf_check_data). Contains the 345 * return value. 346 * @param[in] obj 347 * Object address. 348 * @param index 349 * Object index, unused. 350 */ 351 static void 352 txq_mp2mr_mbuf_check(struct rte_mempool *mp, void *arg, void *obj, 353 uint32_t index __rte_unused) 354 { 355 struct txq_mp2mr_mbuf_check_data *data = arg; 356 struct rte_mbuf *buf = obj; 357 358 /* Check whether mbuf structure fits element size and whether mempool 359 * pointer is valid. */ 360 if (sizeof(*buf) > mp->elt_size || buf->pool != mp) 361 data->ret = -1; 362 } 363 364 /** 365 * Iterator function for rte_mempool_walk() to register existing mempools and 366 * fill the MP to MR cache of a TX queue. 367 * 368 * @param[in] mp 369 * Memory Pool to register. 370 * @param *arg 371 * Pointer to TX queue structure. 372 */ 373 void 374 txq_mp2mr_iter(struct rte_mempool *mp, void *arg) 375 { 376 struct txq *txq = arg; 377 struct txq_mp2mr_mbuf_check_data data = { 378 .ret = 0, 379 }; 380 381 /* Register mempool only if the first element looks like a mbuf. */ 382 if (rte_mempool_obj_iter(mp, txq_mp2mr_mbuf_check, &data) == 0 || 383 data.ret == -1) 384 return; 385 txq_mp2mr(txq, mp); 386 } 387 388 /** 389 * Insert VLAN using mbuf headroom space. 390 * 391 * @param buf 392 * Buffer for VLAN insertion. 393 * 394 * @return 395 * 0 on success, errno value on failure. 396 */ 397 static inline int 398 insert_vlan_sw(struct rte_mbuf *buf) 399 { 400 uintptr_t addr; 401 uint32_t vlan; 402 uint16_t head_room_len = rte_pktmbuf_headroom(buf); 403 404 if (head_room_len < 4) 405 return EINVAL; 406 407 addr = rte_pktmbuf_mtod(buf, uintptr_t); 408 vlan = htonl(0x81000000 | buf->vlan_tci); 409 memmove((void *)(addr - 4), (void *)addr, 12); 410 memcpy((void *)(addr + 8), &vlan, sizeof(vlan)); 411 412 SET_DATA_OFF(buf, head_room_len - 4); 413 DATA_LEN(buf) += 4; 414 415 return 0; 416 } 417 418 #if MLX5_PMD_SGE_WR_N > 1 419 420 /** 421 * Copy scattered mbuf contents to a single linear buffer. 422 * 423 * @param[out] linear 424 * Linear output buffer. 425 * @param[in] buf 426 * Scattered input buffer. 427 * 428 * @return 429 * Number of bytes copied to the output buffer or 0 if not large enough. 430 */ 431 static unsigned int 432 linearize_mbuf(linear_t *linear, struct rte_mbuf *buf) 433 { 434 unsigned int size = 0; 435 unsigned int offset; 436 437 do { 438 unsigned int len = DATA_LEN(buf); 439 440 offset = size; 441 size += len; 442 if (unlikely(size > sizeof(*linear))) 443 return 0; 444 memcpy(&(*linear)[offset], 445 rte_pktmbuf_mtod(buf, uint8_t *), 446 len); 447 buf = NEXT(buf); 448 } while (buf != NULL); 449 return size; 450 } 451 452 /** 453 * Handle scattered buffers for mlx5_tx_burst(). 454 * 455 * @param txq 456 * TX queue structure. 457 * @param segs 458 * Number of segments in buf. 459 * @param elt 460 * TX queue element to fill. 461 * @param[in] buf 462 * Buffer to process. 463 * @param elts_head 464 * Index of the linear buffer to use if necessary (normally txq->elts_head). 465 * @param[out] sges 466 * Array filled with SGEs on success. 467 * 468 * @return 469 * A structure containing the processed packet size in bytes and the 470 * number of SGEs. Both fields are set to (unsigned int)-1 in case of 471 * failure. 472 */ 473 static struct tx_burst_sg_ret { 474 unsigned int length; 475 unsigned int num; 476 } 477 tx_burst_sg(struct txq *txq, unsigned int segs, struct txq_elt *elt, 478 struct rte_mbuf *buf, unsigned int elts_head, 479 struct ibv_sge (*sges)[MLX5_PMD_SGE_WR_N]) 480 { 481 unsigned int sent_size = 0; 482 unsigned int j; 483 int linearize = 0; 484 485 /* When there are too many segments, extra segments are 486 * linearized in the last SGE. */ 487 if (unlikely(segs > RTE_DIM(*sges))) { 488 segs = (RTE_DIM(*sges) - 1); 489 linearize = 1; 490 } 491 /* Update element. */ 492 elt->buf = buf; 493 /* Register segments as SGEs. */ 494 for (j = 0; (j != segs); ++j) { 495 struct ibv_sge *sge = &(*sges)[j]; 496 uint32_t lkey; 497 498 /* Retrieve Memory Region key for this memory pool. */ 499 lkey = txq_mp2mr(txq, txq_mb2mp(buf)); 500 if (unlikely(lkey == (uint32_t)-1)) { 501 /* MR does not exist. */ 502 DEBUG("%p: unable to get MP <-> MR association", 503 (void *)txq); 504 /* Clean up TX element. */ 505 elt->buf = NULL; 506 goto stop; 507 } 508 /* Update SGE. */ 509 sge->addr = rte_pktmbuf_mtod(buf, uintptr_t); 510 if (txq->priv->vf) 511 rte_prefetch0((volatile void *) 512 (uintptr_t)sge->addr); 513 sge->length = DATA_LEN(buf); 514 sge->lkey = lkey; 515 sent_size += sge->length; 516 buf = NEXT(buf); 517 } 518 /* If buf is not NULL here and is not going to be linearized, 519 * nb_segs is not valid. */ 520 assert(j == segs); 521 assert((buf == NULL) || (linearize)); 522 /* Linearize extra segments. */ 523 if (linearize) { 524 struct ibv_sge *sge = &(*sges)[segs]; 525 linear_t *linear = &(*txq->elts_linear)[elts_head]; 526 unsigned int size = linearize_mbuf(linear, buf); 527 528 assert(segs == (RTE_DIM(*sges) - 1)); 529 if (size == 0) { 530 /* Invalid packet. */ 531 DEBUG("%p: packet too large to be linearized.", 532 (void *)txq); 533 /* Clean up TX element. */ 534 elt->buf = NULL; 535 goto stop; 536 } 537 /* If MLX5_PMD_SGE_WR_N is 1, free mbuf immediately. */ 538 if (RTE_DIM(*sges) == 1) { 539 do { 540 struct rte_mbuf *next = NEXT(buf); 541 542 rte_pktmbuf_free_seg(buf); 543 buf = next; 544 } while (buf != NULL); 545 elt->buf = NULL; 546 } 547 /* Update SGE. */ 548 sge->addr = (uintptr_t)&(*linear)[0]; 549 sge->length = size; 550 sge->lkey = txq->mr_linear->lkey; 551 sent_size += size; 552 /* Include last segment. */ 553 segs++; 554 } 555 return (struct tx_burst_sg_ret){ 556 .length = sent_size, 557 .num = segs, 558 }; 559 stop: 560 return (struct tx_burst_sg_ret){ 561 .length = -1, 562 .num = -1, 563 }; 564 } 565 566 #endif /* MLX5_PMD_SGE_WR_N > 1 */ 567 568 /** 569 * DPDK callback for TX. 570 * 571 * @param dpdk_txq 572 * Generic pointer to TX queue structure. 573 * @param[in] pkts 574 * Packets to transmit. 575 * @param pkts_n 576 * Number of packets in array. 577 * 578 * @return 579 * Number of packets successfully transmitted (<= pkts_n). 580 */ 581 uint16_t 582 mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) 583 { 584 struct txq *txq = (struct txq *)dpdk_txq; 585 unsigned int elts_head = txq->elts_head; 586 const unsigned int elts_n = txq->elts_n; 587 unsigned int elts_comp_cd = txq->elts_comp_cd; 588 unsigned int elts_comp = 0; 589 unsigned int i; 590 unsigned int max; 591 int err; 592 struct rte_mbuf *buf = pkts[0]; 593 594 assert(elts_comp_cd != 0); 595 /* Prefetch first packet cacheline. */ 596 rte_prefetch0(buf); 597 txq_complete(txq); 598 max = (elts_n - (elts_head - txq->elts_tail)); 599 if (max > elts_n) 600 max -= elts_n; 601 assert(max >= 1); 602 assert(max <= elts_n); 603 /* Always leave one free entry in the ring. */ 604 --max; 605 if (max == 0) 606 return 0; 607 if (max > pkts_n) 608 max = pkts_n; 609 for (i = 0; (i != max); ++i) { 610 struct rte_mbuf *buf_next = pkts[i + 1]; 611 unsigned int elts_head_next = 612 (((elts_head + 1) == elts_n) ? 0 : elts_head + 1); 613 struct txq_elt *elt = &(*txq->elts)[elts_head]; 614 unsigned int segs = NB_SEGS(buf); 615 #ifdef MLX5_PMD_SOFT_COUNTERS 616 unsigned int sent_size = 0; 617 #endif 618 uint32_t send_flags = 0; 619 #ifdef HAVE_VERBS_VLAN_INSERTION 620 int insert_vlan = 0; 621 #endif /* HAVE_VERBS_VLAN_INSERTION */ 622 623 if (i + 1 < max) 624 rte_prefetch0(buf_next); 625 /* Request TX completion. */ 626 if (unlikely(--elts_comp_cd == 0)) { 627 elts_comp_cd = txq->elts_comp_cd_init; 628 ++elts_comp; 629 send_flags |= IBV_EXP_QP_BURST_SIGNALED; 630 } 631 /* Should we enable HW CKSUM offload */ 632 if (buf->ol_flags & 633 (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) { 634 send_flags |= IBV_EXP_QP_BURST_IP_CSUM; 635 /* HW does not support checksum offloads at arbitrary 636 * offsets but automatically recognizes the packet 637 * type. For inner L3/L4 checksums, only VXLAN (UDP) 638 * tunnels are currently supported. */ 639 if (RTE_ETH_IS_TUNNEL_PKT(buf->packet_type)) 640 send_flags |= IBV_EXP_QP_BURST_TUNNEL; 641 } 642 if (buf->ol_flags & PKT_TX_VLAN_PKT) { 643 #ifdef HAVE_VERBS_VLAN_INSERTION 644 if (!txq->priv->mps) 645 insert_vlan = 1; 646 else 647 #endif /* HAVE_VERBS_VLAN_INSERTION */ 648 { 649 err = insert_vlan_sw(buf); 650 if (unlikely(err)) 651 goto stop; 652 } 653 } 654 if (likely(segs == 1)) { 655 uintptr_t addr; 656 uint32_t length; 657 uint32_t lkey; 658 uintptr_t buf_next_addr; 659 660 /* Retrieve buffer information. */ 661 addr = rte_pktmbuf_mtod(buf, uintptr_t); 662 length = DATA_LEN(buf); 663 /* Update element. */ 664 elt->buf = buf; 665 if (txq->priv->vf) 666 rte_prefetch0((volatile void *) 667 (uintptr_t)addr); 668 /* Prefetch next buffer data. */ 669 if (i + 1 < max) { 670 buf_next_addr = 671 rte_pktmbuf_mtod(buf_next, uintptr_t); 672 rte_prefetch0((volatile void *) 673 (uintptr_t)buf_next_addr); 674 } 675 /* Put packet into send queue. */ 676 #if MLX5_PMD_MAX_INLINE > 0 677 if (length <= txq->max_inline) { 678 #ifdef HAVE_VERBS_VLAN_INSERTION 679 if (insert_vlan) 680 err = txq->send_pending_inline_vlan 681 (txq->qp, 682 (void *)addr, 683 length, 684 send_flags, 685 &buf->vlan_tci); 686 else 687 #endif /* HAVE_VERBS_VLAN_INSERTION */ 688 err = txq->send_pending_inline 689 (txq->qp, 690 (void *)addr, 691 length, 692 send_flags); 693 } else 694 #endif 695 { 696 /* Retrieve Memory Region key for this 697 * memory pool. */ 698 lkey = txq_mp2mr(txq, txq_mb2mp(buf)); 699 if (unlikely(lkey == (uint32_t)-1)) { 700 /* MR does not exist. */ 701 DEBUG("%p: unable to get MP <-> MR" 702 " association", (void *)txq); 703 /* Clean up TX element. */ 704 elt->buf = NULL; 705 goto stop; 706 } 707 #ifdef HAVE_VERBS_VLAN_INSERTION 708 if (insert_vlan) 709 err = txq->send_pending_vlan 710 (txq->qp, 711 addr, 712 length, 713 lkey, 714 send_flags, 715 &buf->vlan_tci); 716 else 717 #endif /* HAVE_VERBS_VLAN_INSERTION */ 718 err = txq->send_pending 719 (txq->qp, 720 addr, 721 length, 722 lkey, 723 send_flags); 724 } 725 if (unlikely(err)) 726 goto stop; 727 #ifdef MLX5_PMD_SOFT_COUNTERS 728 sent_size += length; 729 #endif 730 } else { 731 #if MLX5_PMD_SGE_WR_N > 1 732 struct ibv_sge sges[MLX5_PMD_SGE_WR_N]; 733 struct tx_burst_sg_ret ret; 734 735 ret = tx_burst_sg(txq, segs, elt, buf, elts_head, 736 &sges); 737 if (ret.length == (unsigned int)-1) 738 goto stop; 739 /* Put SG list into send queue. */ 740 #ifdef HAVE_VERBS_VLAN_INSERTION 741 if (insert_vlan) 742 err = txq->send_pending_sg_list_vlan 743 (txq->qp, 744 sges, 745 ret.num, 746 send_flags, 747 &buf->vlan_tci); 748 else 749 #endif /* HAVE_VERBS_VLAN_INSERTION */ 750 err = txq->send_pending_sg_list 751 (txq->qp, 752 sges, 753 ret.num, 754 send_flags); 755 if (unlikely(err)) 756 goto stop; 757 #ifdef MLX5_PMD_SOFT_COUNTERS 758 sent_size += ret.length; 759 #endif 760 #else /* MLX5_PMD_SGE_WR_N > 1 */ 761 DEBUG("%p: TX scattered buffers support not" 762 " compiled in", (void *)txq); 763 goto stop; 764 #endif /* MLX5_PMD_SGE_WR_N > 1 */ 765 } 766 elts_head = elts_head_next; 767 buf = buf_next; 768 #ifdef MLX5_PMD_SOFT_COUNTERS 769 /* Increment sent bytes counter. */ 770 txq->stats.obytes += sent_size; 771 #endif 772 } 773 stop: 774 /* Take a shortcut if nothing must be sent. */ 775 if (unlikely(i == 0)) 776 return 0; 777 #ifdef MLX5_PMD_SOFT_COUNTERS 778 /* Increment sent packets counter. */ 779 txq->stats.opackets += i; 780 #endif 781 /* Ring QP doorbell. */ 782 err = txq->send_flush(txq->qp); 783 if (unlikely(err)) { 784 /* A nonzero value is not supposed to be returned. 785 * Nothing can be done about it. */ 786 DEBUG("%p: send_flush() failed with error %d", 787 (void *)txq, err); 788 } 789 txq->elts_head = elts_head; 790 txq->elts_comp += elts_comp; 791 txq->elts_comp_cd = elts_comp_cd; 792 return i; 793 } 794 795 /** 796 * Translate RX completion flags to packet type. 797 * 798 * @param flags 799 * RX completion flags returned by poll_length_flags(). 800 * 801 * @note: fix mlx5_dev_supported_ptypes_get() if any change here. 802 * 803 * @return 804 * Packet type for struct rte_mbuf. 805 */ 806 static inline uint32_t 807 rxq_cq_to_pkt_type(uint32_t flags) 808 { 809 uint32_t pkt_type; 810 811 if (flags & IBV_EXP_CQ_RX_TUNNEL_PACKET) 812 pkt_type = 813 TRANSPOSE(flags, 814 IBV_EXP_CQ_RX_OUTER_IPV4_PACKET, 815 RTE_PTYPE_L3_IPV4) | 816 TRANSPOSE(flags, 817 IBV_EXP_CQ_RX_OUTER_IPV6_PACKET, 818 RTE_PTYPE_L3_IPV6) | 819 TRANSPOSE(flags, 820 IBV_EXP_CQ_RX_IPV4_PACKET, 821 RTE_PTYPE_INNER_L3_IPV4) | 822 TRANSPOSE(flags, 823 IBV_EXP_CQ_RX_IPV6_PACKET, 824 RTE_PTYPE_INNER_L3_IPV6); 825 else 826 pkt_type = 827 TRANSPOSE(flags, 828 IBV_EXP_CQ_RX_IPV4_PACKET, 829 RTE_PTYPE_L3_IPV4) | 830 TRANSPOSE(flags, 831 IBV_EXP_CQ_RX_IPV6_PACKET, 832 RTE_PTYPE_L3_IPV6); 833 return pkt_type; 834 } 835 836 /** 837 * Translate RX completion flags to offload flags. 838 * 839 * @param[in] rxq 840 * Pointer to RX queue structure. 841 * @param flags 842 * RX completion flags returned by poll_length_flags(). 843 * 844 * @return 845 * Offload flags (ol_flags) for struct rte_mbuf. 846 */ 847 static inline uint32_t 848 rxq_cq_to_ol_flags(const struct rxq *rxq, uint32_t flags) 849 { 850 uint32_t ol_flags = 0; 851 852 if (rxq->csum) { 853 /* Set IP checksum flag only for IPv4/IPv6 packets. */ 854 if (flags & 855 (IBV_EXP_CQ_RX_IPV4_PACKET | IBV_EXP_CQ_RX_IPV6_PACKET)) 856 ol_flags |= 857 TRANSPOSE(~flags, 858 IBV_EXP_CQ_RX_IP_CSUM_OK, 859 PKT_RX_IP_CKSUM_BAD); 860 #ifdef HAVE_EXP_CQ_RX_TCP_PACKET 861 /* Set L4 checksum flag only for TCP/UDP packets. */ 862 if (flags & 863 (IBV_EXP_CQ_RX_TCP_PACKET | IBV_EXP_CQ_RX_UDP_PACKET)) 864 #endif /* HAVE_EXP_CQ_RX_TCP_PACKET */ 865 ol_flags |= 866 TRANSPOSE(~flags, 867 IBV_EXP_CQ_RX_TCP_UDP_CSUM_OK, 868 PKT_RX_L4_CKSUM_BAD); 869 } 870 /* 871 * PKT_RX_IP_CKSUM_BAD and PKT_RX_L4_CKSUM_BAD are used in place 872 * of PKT_RX_EIP_CKSUM_BAD because the latter is not functional 873 * (its value is 0). 874 */ 875 if ((flags & IBV_EXP_CQ_RX_TUNNEL_PACKET) && (rxq->csum_l2tun)) 876 ol_flags |= 877 TRANSPOSE(~flags, 878 IBV_EXP_CQ_RX_OUTER_IP_CSUM_OK, 879 PKT_RX_IP_CKSUM_BAD) | 880 TRANSPOSE(~flags, 881 IBV_EXP_CQ_RX_OUTER_TCP_UDP_CSUM_OK, 882 PKT_RX_L4_CKSUM_BAD); 883 return ol_flags; 884 } 885 886 /** 887 * DPDK callback for RX with scattered packets support. 888 * 889 * @param dpdk_rxq 890 * Generic pointer to RX queue structure. 891 * @param[out] pkts 892 * Array to store received packets. 893 * @param pkts_n 894 * Maximum number of packets in array. 895 * 896 * @return 897 * Number of packets successfully received (<= pkts_n). 898 */ 899 uint16_t 900 mlx5_rx_burst_sp(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) 901 { 902 struct rxq *rxq = (struct rxq *)dpdk_rxq; 903 struct rxq_elt_sp (*elts)[rxq->elts_n] = rxq->elts.sp; 904 const unsigned int elts_n = rxq->elts_n; 905 unsigned int elts_head = rxq->elts_head; 906 unsigned int i; 907 unsigned int pkts_ret = 0; 908 int ret; 909 910 if (unlikely(!rxq->sp)) 911 return mlx5_rx_burst(dpdk_rxq, pkts, pkts_n); 912 if (unlikely(elts == NULL)) /* See RTE_DEV_CMD_SET_MTU. */ 913 return 0; 914 for (i = 0; (i != pkts_n); ++i) { 915 struct rxq_elt_sp *elt = &(*elts)[elts_head]; 916 unsigned int len; 917 unsigned int pkt_buf_len; 918 struct rte_mbuf *pkt_buf = NULL; /* Buffer returned in pkts. */ 919 struct rte_mbuf **pkt_buf_next = &pkt_buf; 920 unsigned int seg_headroom = RTE_PKTMBUF_HEADROOM; 921 unsigned int j = 0; 922 uint32_t flags; 923 uint16_t vlan_tci; 924 925 /* Sanity checks. */ 926 assert(elts_head < rxq->elts_n); 927 assert(rxq->elts_head < rxq->elts_n); 928 ret = rxq->poll(rxq->cq, NULL, NULL, &flags, &vlan_tci); 929 if (unlikely(ret < 0)) { 930 struct ibv_wc wc; 931 int wcs_n; 932 933 DEBUG("rxq=%p, poll_length() failed (ret=%d)", 934 (void *)rxq, ret); 935 /* ibv_poll_cq() must be used in case of failure. */ 936 wcs_n = ibv_poll_cq(rxq->cq, 1, &wc); 937 if (unlikely(wcs_n == 0)) 938 break; 939 if (unlikely(wcs_n < 0)) { 940 DEBUG("rxq=%p, ibv_poll_cq() failed (wcs_n=%d)", 941 (void *)rxq, wcs_n); 942 break; 943 } 944 assert(wcs_n == 1); 945 if (unlikely(wc.status != IBV_WC_SUCCESS)) { 946 /* Whatever, just repost the offending WR. */ 947 DEBUG("rxq=%p, wr_id=%" PRIu64 ": bad work" 948 " completion status (%d): %s", 949 (void *)rxq, wc.wr_id, wc.status, 950 ibv_wc_status_str(wc.status)); 951 #ifdef MLX5_PMD_SOFT_COUNTERS 952 /* Increment dropped packets counter. */ 953 ++rxq->stats.idropped; 954 #endif 955 goto repost; 956 } 957 ret = wc.byte_len; 958 } 959 if (ret == 0) 960 break; 961 assert(ret >= (rxq->crc_present << 2)); 962 len = ret - (rxq->crc_present << 2); 963 pkt_buf_len = len; 964 /* 965 * Replace spent segments with new ones, concatenate and 966 * return them as pkt_buf. 967 */ 968 while (1) { 969 struct ibv_sge *sge = &elt->sges[j]; 970 struct rte_mbuf *seg = elt->bufs[j]; 971 struct rte_mbuf *rep; 972 unsigned int seg_tailroom; 973 974 assert(seg != NULL); 975 /* 976 * Fetch initial bytes of packet descriptor into a 977 * cacheline while allocating rep. 978 */ 979 rte_prefetch0(seg); 980 rep = rte_mbuf_raw_alloc(rxq->mp); 981 if (unlikely(rep == NULL)) { 982 /* 983 * Unable to allocate a replacement mbuf, 984 * repost WR. 985 */ 986 DEBUG("rxq=%p: can't allocate a new mbuf", 987 (void *)rxq); 988 if (pkt_buf != NULL) { 989 *pkt_buf_next = NULL; 990 rte_pktmbuf_free(pkt_buf); 991 } 992 /* Increment out of memory counters. */ 993 ++rxq->stats.rx_nombuf; 994 ++rxq->priv->dev->data->rx_mbuf_alloc_failed; 995 goto repost; 996 } 997 #ifndef NDEBUG 998 /* Poison user-modifiable fields in rep. */ 999 NEXT(rep) = (void *)((uintptr_t)-1); 1000 SET_DATA_OFF(rep, 0xdead); 1001 DATA_LEN(rep) = 0xd00d; 1002 PKT_LEN(rep) = 0xdeadd00d; 1003 NB_SEGS(rep) = 0x2a; 1004 PORT(rep) = 0x2a; 1005 rep->ol_flags = -1; 1006 #endif 1007 assert(rep->buf_len == seg->buf_len); 1008 assert(rep->buf_len == rxq->mb_len); 1009 /* Reconfigure sge to use rep instead of seg. */ 1010 assert(sge->lkey == rxq->mr->lkey); 1011 sge->addr = ((uintptr_t)rep->buf_addr + seg_headroom); 1012 elt->bufs[j] = rep; 1013 ++j; 1014 /* Update pkt_buf if it's the first segment, or link 1015 * seg to the previous one and update pkt_buf_next. */ 1016 *pkt_buf_next = seg; 1017 pkt_buf_next = &NEXT(seg); 1018 /* Update seg information. */ 1019 seg_tailroom = (seg->buf_len - seg_headroom); 1020 assert(sge->length == seg_tailroom); 1021 SET_DATA_OFF(seg, seg_headroom); 1022 if (likely(len <= seg_tailroom)) { 1023 /* Last segment. */ 1024 DATA_LEN(seg) = len; 1025 PKT_LEN(seg) = len; 1026 /* Sanity check. */ 1027 assert(rte_pktmbuf_headroom(seg) == 1028 seg_headroom); 1029 assert(rte_pktmbuf_tailroom(seg) == 1030 (seg_tailroom - len)); 1031 break; 1032 } 1033 DATA_LEN(seg) = seg_tailroom; 1034 PKT_LEN(seg) = seg_tailroom; 1035 /* Sanity check. */ 1036 assert(rte_pktmbuf_headroom(seg) == seg_headroom); 1037 assert(rte_pktmbuf_tailroom(seg) == 0); 1038 /* Fix len and clear headroom for next segments. */ 1039 len -= seg_tailroom; 1040 seg_headroom = 0; 1041 } 1042 /* Update head and tail segments. */ 1043 *pkt_buf_next = NULL; 1044 assert(pkt_buf != NULL); 1045 assert(j != 0); 1046 NB_SEGS(pkt_buf) = j; 1047 PORT(pkt_buf) = rxq->port_id; 1048 PKT_LEN(pkt_buf) = pkt_buf_len; 1049 if (rxq->csum | rxq->csum_l2tun | rxq->vlan_strip) { 1050 pkt_buf->packet_type = rxq_cq_to_pkt_type(flags); 1051 pkt_buf->ol_flags = rxq_cq_to_ol_flags(rxq, flags); 1052 #ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS 1053 if (flags & IBV_EXP_CQ_RX_CVLAN_STRIPPED_V1) { 1054 pkt_buf->ol_flags |= PKT_RX_VLAN_PKT; 1055 pkt_buf->vlan_tci = vlan_tci; 1056 } 1057 #endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */ 1058 } 1059 1060 /* Return packet. */ 1061 *(pkts++) = pkt_buf; 1062 ++pkts_ret; 1063 #ifdef MLX5_PMD_SOFT_COUNTERS 1064 /* Increment bytes counter. */ 1065 rxq->stats.ibytes += pkt_buf_len; 1066 #endif 1067 repost: 1068 ret = rxq->recv(rxq->wq, elt->sges, RTE_DIM(elt->sges)); 1069 if (unlikely(ret)) { 1070 /* Inability to repost WRs is fatal. */ 1071 DEBUG("%p: recv_sg_list(): failed (ret=%d)", 1072 (void *)rxq->priv, 1073 ret); 1074 abort(); 1075 } 1076 if (++elts_head >= elts_n) 1077 elts_head = 0; 1078 continue; 1079 } 1080 if (unlikely(i == 0)) 1081 return 0; 1082 rxq->elts_head = elts_head; 1083 #ifdef MLX5_PMD_SOFT_COUNTERS 1084 /* Increment packets counter. */ 1085 rxq->stats.ipackets += pkts_ret; 1086 #endif 1087 return pkts_ret; 1088 } 1089 1090 /** 1091 * DPDK callback for RX. 1092 * 1093 * The following function is the same as mlx5_rx_burst_sp(), except it doesn't 1094 * manage scattered packets. Improves performance when MRU is lower than the 1095 * size of the first segment. 1096 * 1097 * @param dpdk_rxq 1098 * Generic pointer to RX queue structure. 1099 * @param[out] pkts 1100 * Array to store received packets. 1101 * @param pkts_n 1102 * Maximum number of packets in array. 1103 * 1104 * @return 1105 * Number of packets successfully received (<= pkts_n). 1106 */ 1107 uint16_t 1108 mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) 1109 { 1110 struct rxq *rxq = (struct rxq *)dpdk_rxq; 1111 struct rxq_elt (*elts)[rxq->elts_n] = rxq->elts.no_sp; 1112 const unsigned int elts_n = rxq->elts_n; 1113 unsigned int elts_head = rxq->elts_head; 1114 struct ibv_sge sges[pkts_n]; 1115 unsigned int i; 1116 unsigned int pkts_ret = 0; 1117 int ret; 1118 1119 if (unlikely(rxq->sp)) 1120 return mlx5_rx_burst_sp(dpdk_rxq, pkts, pkts_n); 1121 for (i = 0; (i != pkts_n); ++i) { 1122 struct rxq_elt *elt = &(*elts)[elts_head]; 1123 unsigned int len; 1124 struct rte_mbuf *seg = elt->buf; 1125 struct rte_mbuf *rep; 1126 uint32_t flags; 1127 uint16_t vlan_tci; 1128 1129 /* Sanity checks. */ 1130 assert(seg != NULL); 1131 assert(elts_head < rxq->elts_n); 1132 assert(rxq->elts_head < rxq->elts_n); 1133 /* 1134 * Fetch initial bytes of packet descriptor into a 1135 * cacheline while allocating rep. 1136 */ 1137 rte_mbuf_prefetch_part1(seg); 1138 rte_mbuf_prefetch_part2(seg); 1139 ret = rxq->poll(rxq->cq, NULL, NULL, &flags, &vlan_tci); 1140 if (unlikely(ret < 0)) { 1141 struct ibv_wc wc; 1142 int wcs_n; 1143 1144 DEBUG("rxq=%p, poll_length() failed (ret=%d)", 1145 (void *)rxq, ret); 1146 /* ibv_poll_cq() must be used in case of failure. */ 1147 wcs_n = ibv_poll_cq(rxq->cq, 1, &wc); 1148 if (unlikely(wcs_n == 0)) 1149 break; 1150 if (unlikely(wcs_n < 0)) { 1151 DEBUG("rxq=%p, ibv_poll_cq() failed (wcs_n=%d)", 1152 (void *)rxq, wcs_n); 1153 break; 1154 } 1155 assert(wcs_n == 1); 1156 if (unlikely(wc.status != IBV_WC_SUCCESS)) { 1157 /* Whatever, just repost the offending WR. */ 1158 DEBUG("rxq=%p, wr_id=%" PRIu64 ": bad work" 1159 " completion status (%d): %s", 1160 (void *)rxq, wc.wr_id, wc.status, 1161 ibv_wc_status_str(wc.status)); 1162 #ifdef MLX5_PMD_SOFT_COUNTERS 1163 /* Increment dropped packets counter. */ 1164 ++rxq->stats.idropped; 1165 #endif 1166 /* Add SGE to array for repost. */ 1167 sges[i] = elt->sge; 1168 goto repost; 1169 } 1170 ret = wc.byte_len; 1171 } 1172 if (ret == 0) 1173 break; 1174 assert(ret >= (rxq->crc_present << 2)); 1175 len = ret - (rxq->crc_present << 2); 1176 rep = rte_mbuf_raw_alloc(rxq->mp); 1177 if (unlikely(rep == NULL)) { 1178 /* 1179 * Unable to allocate a replacement mbuf, 1180 * repost WR. 1181 */ 1182 DEBUG("rxq=%p: can't allocate a new mbuf", 1183 (void *)rxq); 1184 /* Increment out of memory counters. */ 1185 ++rxq->stats.rx_nombuf; 1186 ++rxq->priv->dev->data->rx_mbuf_alloc_failed; 1187 goto repost; 1188 } 1189 1190 /* Reconfigure sge to use rep instead of seg. */ 1191 elt->sge.addr = (uintptr_t)rep->buf_addr + RTE_PKTMBUF_HEADROOM; 1192 assert(elt->sge.lkey == rxq->mr->lkey); 1193 elt->buf = rep; 1194 1195 /* Add SGE to array for repost. */ 1196 sges[i] = elt->sge; 1197 1198 /* Update seg information. */ 1199 SET_DATA_OFF(seg, RTE_PKTMBUF_HEADROOM); 1200 NB_SEGS(seg) = 1; 1201 PORT(seg) = rxq->port_id; 1202 NEXT(seg) = NULL; 1203 PKT_LEN(seg) = len; 1204 DATA_LEN(seg) = len; 1205 if (rxq->csum | rxq->csum_l2tun | rxq->vlan_strip) { 1206 seg->packet_type = rxq_cq_to_pkt_type(flags); 1207 seg->ol_flags = rxq_cq_to_ol_flags(rxq, flags); 1208 #ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS 1209 if (flags & IBV_EXP_CQ_RX_CVLAN_STRIPPED_V1) { 1210 seg->ol_flags |= PKT_RX_VLAN_PKT; 1211 seg->vlan_tci = vlan_tci; 1212 } 1213 #endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */ 1214 } 1215 /* Return packet. */ 1216 *(pkts++) = seg; 1217 ++pkts_ret; 1218 #ifdef MLX5_PMD_SOFT_COUNTERS 1219 /* Increment bytes counter. */ 1220 rxq->stats.ibytes += len; 1221 #endif 1222 repost: 1223 if (++elts_head >= elts_n) 1224 elts_head = 0; 1225 continue; 1226 } 1227 if (unlikely(i == 0)) 1228 return 0; 1229 /* Repost WRs. */ 1230 #ifdef DEBUG_RECV 1231 DEBUG("%p: reposting %u WRs", (void *)rxq, i); 1232 #endif 1233 ret = rxq->recv(rxq->wq, sges, i); 1234 if (unlikely(ret)) { 1235 /* Inability to repost WRs is fatal. */ 1236 DEBUG("%p: recv_burst(): failed (ret=%d)", 1237 (void *)rxq->priv, 1238 ret); 1239 abort(); 1240 } 1241 rxq->elts_head = elts_head; 1242 #ifdef MLX5_PMD_SOFT_COUNTERS 1243 /* Increment packets counter. */ 1244 rxq->stats.ipackets += pkts_ret; 1245 #endif 1246 return pkts_ret; 1247 } 1248 1249 /** 1250 * Dummy DPDK callback for TX. 1251 * 1252 * This function is used to temporarily replace the real callback during 1253 * unsafe control operations on the queue, or in case of error. 1254 * 1255 * @param dpdk_txq 1256 * Generic pointer to TX queue structure. 1257 * @param[in] pkts 1258 * Packets to transmit. 1259 * @param pkts_n 1260 * Number of packets in array. 1261 * 1262 * @return 1263 * Number of packets successfully transmitted (<= pkts_n). 1264 */ 1265 uint16_t 1266 removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) 1267 { 1268 (void)dpdk_txq; 1269 (void)pkts; 1270 (void)pkts_n; 1271 return 0; 1272 } 1273 1274 /** 1275 * Dummy DPDK callback for RX. 1276 * 1277 * This function is used to temporarily replace the real callback during 1278 * unsafe control operations on the queue, or in case of error. 1279 * 1280 * @param dpdk_rxq 1281 * Generic pointer to RX queue structure. 1282 * @param[out] pkts 1283 * Array to store received packets. 1284 * @param pkts_n 1285 * Maximum number of packets in array. 1286 * 1287 * @return 1288 * Number of packets successfully received (<= pkts_n). 1289 */ 1290 uint16_t 1291 removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) 1292 { 1293 (void)dpdk_rxq; 1294 (void)pkts; 1295 (void)pkts_n; 1296 return 0; 1297 } 1298