1 /*- 2 * BSD LICENSE 3 * 4 * Copyright 2015 6WIND S.A. 5 * Copyright 2015 Mellanox. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of 6WIND S.A. nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #ifndef RTE_PMD_MLX5_RXTX_H_ 35 #define RTE_PMD_MLX5_RXTX_H_ 36 37 #include <stddef.h> 38 #include <stdint.h> 39 #include <sys/queue.h> 40 41 /* Verbs header. */ 42 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ 43 #ifdef PEDANTIC 44 #pragma GCC diagnostic ignored "-Wpedantic" 45 #endif 46 #include <infiniband/verbs.h> 47 #include <infiniband/mlx5dv.h> 48 #ifdef PEDANTIC 49 #pragma GCC diagnostic error "-Wpedantic" 50 #endif 51 52 #include <rte_mbuf.h> 53 #include <rte_mempool.h> 54 #include <rte_common.h> 55 #include <rte_hexdump.h> 56 #include <rte_atomic.h> 57 58 #include "mlx5_utils.h" 59 #include "mlx5.h" 60 #include "mlx5_autoconf.h" 61 #include "mlx5_defs.h" 62 #include "mlx5_prm.h" 63 64 struct mlx5_rxq_stats { 65 unsigned int idx; /**< Mapping index. */ 66 #ifdef MLX5_PMD_SOFT_COUNTERS 67 uint64_t ipackets; /**< Total of successfully received packets. */ 68 uint64_t ibytes; /**< Total of successfully received bytes. */ 69 #endif 70 uint64_t idropped; /**< Total of packets dropped when RX ring full. */ 71 uint64_t rx_nombuf; /**< Total of RX mbuf allocation failures. */ 72 }; 73 74 struct mlx5_txq_stats { 75 unsigned int idx; /**< Mapping index. */ 76 #ifdef MLX5_PMD_SOFT_COUNTERS 77 uint64_t opackets; /**< Total of successfully sent packets. */ 78 uint64_t obytes; /**< Total of successfully sent bytes. */ 79 #endif 80 uint64_t oerrors; /**< Total number of failed transmitted packets. */ 81 }; 82 83 struct priv; 84 85 /* Memory region queue object. */ 86 struct mlx5_mr { 87 LIST_ENTRY(mlx5_mr) next; /**< Pointer to the next element. */ 88 rte_atomic32_t refcnt; /*<< Reference counter. */ 89 uint32_t lkey; /*<< rte_cpu_to_be_32(mr->lkey) */ 90 uintptr_t start; /* Start address of MR */ 91 uintptr_t end; /* End address of MR */ 92 struct ibv_mr *mr; /*<< Memory Region. */ 93 struct rte_mempool *mp; /*<< Memory Pool. */ 94 }; 95 96 /* Compressed CQE context. */ 97 struct rxq_zip { 98 uint16_t ai; /* Array index. */ 99 uint16_t ca; /* Current array index. */ 100 uint16_t na; /* Next array index. */ 101 uint16_t cq_ci; /* The next CQE. */ 102 uint32_t cqe_cnt; /* Number of CQEs. */ 103 }; 104 105 /* RX queue descriptor. */ 106 struct mlx5_rxq_data { 107 unsigned int csum:1; /* Enable checksum offloading. */ 108 unsigned int csum_l2tun:1; /* Same for L2 tunnels. */ 109 unsigned int hw_timestamp:1; /* Enable HW timestamp. */ 110 unsigned int vlan_strip:1; /* Enable VLAN stripping. */ 111 unsigned int crc_present:1; /* CRC must be subtracted. */ 112 unsigned int sges_n:2; /* Log 2 of SGEs (max buffers per packet). */ 113 unsigned int cqe_n:4; /* Log 2 of CQ elements. */ 114 unsigned int elts_n:4; /* Log 2 of Mbufs. */ 115 unsigned int rss_hash:1; /* RSS hash result is enabled. */ 116 unsigned int mark:1; /* Marked flow available on the queue. */ 117 unsigned int :15; /* Remaining bits. */ 118 volatile uint32_t *rq_db; 119 volatile uint32_t *cq_db; 120 uint16_t port_id; 121 uint16_t rq_ci; 122 uint16_t rq_pi; 123 uint16_t cq_ci; 124 volatile struct mlx5_wqe_data_seg(*wqes)[]; 125 volatile struct mlx5_cqe(*cqes)[]; 126 struct rxq_zip zip; /* Compressed context. */ 127 struct rte_mbuf *(*elts)[]; 128 struct rte_mempool *mp; 129 struct mlx5_rxq_stats stats; 130 uint64_t mbuf_initializer; /* Default rearm_data for vectorized Rx. */ 131 struct rte_mbuf fake_mbuf; /* elts padding for vectorized Rx. */ 132 void *cq_uar; /* CQ user access region. */ 133 uint32_t cqn; /* CQ number. */ 134 uint8_t cq_arm_sn; /* CQ arm seq number. */ 135 } __rte_cache_aligned; 136 137 /* Verbs Rx queue elements. */ 138 struct mlx5_rxq_ibv { 139 LIST_ENTRY(mlx5_rxq_ibv) next; /* Pointer to the next element. */ 140 rte_atomic32_t refcnt; /* Reference counter. */ 141 struct mlx5_rxq_ctrl *rxq_ctrl; /* Back pointer to parent. */ 142 struct ibv_cq *cq; /* Completion Queue. */ 143 struct ibv_wq *wq; /* Work Queue. */ 144 struct ibv_comp_channel *channel; 145 struct mlx5_mr *mr; /* Memory Region (for mp). */ 146 }; 147 148 /* RX queue control descriptor. */ 149 struct mlx5_rxq_ctrl { 150 LIST_ENTRY(mlx5_rxq_ctrl) next; /* Pointer to the next element. */ 151 rte_atomic32_t refcnt; /* Reference counter. */ 152 struct priv *priv; /* Back pointer to private data. */ 153 struct mlx5_rxq_ibv *ibv; /* Verbs elements. */ 154 struct mlx5_rxq_data rxq; /* Data path structure. */ 155 unsigned int socket; /* CPU socket ID for allocations. */ 156 unsigned int irq:1; /* Whether IRQ is enabled. */ 157 }; 158 159 /* Indirection table. */ 160 struct mlx5_ind_table_ibv { 161 LIST_ENTRY(mlx5_ind_table_ibv) next; /* Pointer to the next element. */ 162 rte_atomic32_t refcnt; /* Reference counter. */ 163 struct ibv_rwq_ind_table *ind_table; /**< Indirection table. */ 164 uint16_t queues_n; /**< Number of queues in the list. */ 165 uint16_t queues[]; /**< Queue list. */ 166 }; 167 168 /* Hash Rx queue. */ 169 struct mlx5_hrxq { 170 LIST_ENTRY(mlx5_hrxq) next; /* Pointer to the next element. */ 171 rte_atomic32_t refcnt; /* Reference counter. */ 172 struct mlx5_ind_table_ibv *ind_table; /* Indirection table. */ 173 struct ibv_qp *qp; /* Verbs queue pair. */ 174 uint64_t hash_fields; /* Verbs Hash fields. */ 175 uint8_t rss_key_len; /* Hash key length in bytes. */ 176 uint8_t rss_key[]; /* Hash key. */ 177 }; 178 179 /* TX queue descriptor. */ 180 __extension__ 181 struct mlx5_txq_data { 182 uint16_t elts_head; /* Current counter in (*elts)[]. */ 183 uint16_t elts_tail; /* Counter of first element awaiting completion. */ 184 uint16_t elts_comp; /* Counter since last completion request. */ 185 uint16_t mpw_comp; /* WQ index since last completion request. */ 186 uint16_t cq_ci; /* Consumer index for completion queue. */ 187 #ifndef NDEBUG 188 uint16_t cq_pi; /* Producer index for completion queue. */ 189 #endif 190 uint16_t wqe_ci; /* Consumer index for work queue. */ 191 uint16_t wqe_pi; /* Producer index for work queue. */ 192 uint16_t elts_n:4; /* (*elts)[] length (in log2). */ 193 uint16_t cqe_n:4; /* Number of CQ elements (in log2). */ 194 uint16_t wqe_n:4; /* Number of of WQ elements (in log2). */ 195 uint16_t tso_en:1; /* When set hardware TSO is enabled. */ 196 uint16_t tunnel_en:1; 197 /* When set TX offload for tunneled packets are supported. */ 198 uint16_t mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */ 199 uint16_t max_inline; /* Multiple of RTE_CACHE_LINE_SIZE to inline. */ 200 uint16_t inline_max_packet_sz; /* Max packet size for inlining. */ 201 uint16_t mr_cache_idx; /* Index of last hit entry. */ 202 uint32_t qp_num_8s; /* QP number shifted by 8. */ 203 uint64_t offloads; /* Offloads for Tx Queue. */ 204 volatile struct mlx5_cqe (*cqes)[]; /* Completion queue. */ 205 volatile void *wqes; /* Work queue (use volatile to write into). */ 206 volatile uint32_t *qp_db; /* Work queue doorbell. */ 207 volatile uint32_t *cq_db; /* Completion queue doorbell. */ 208 volatile void *bf_reg; /* Blueflame register remapped. */ 209 struct mlx5_mr *mp2mr[MLX5_PMD_TX_MP_CACHE]; /* MR translation table. */ 210 struct rte_mbuf *(*elts)[]; /* TX elements. */ 211 struct mlx5_txq_stats stats; /* TX queue counters. */ 212 } __rte_cache_aligned; 213 214 /* Verbs Rx queue elements. */ 215 struct mlx5_txq_ibv { 216 LIST_ENTRY(mlx5_txq_ibv) next; /* Pointer to the next element. */ 217 rte_atomic32_t refcnt; /* Reference counter. */ 218 struct ibv_cq *cq; /* Completion Queue. */ 219 struct ibv_qp *qp; /* Queue Pair. */ 220 }; 221 222 /* TX queue control descriptor. */ 223 struct mlx5_txq_ctrl { 224 LIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */ 225 rte_atomic32_t refcnt; /* Reference counter. */ 226 struct priv *priv; /* Back pointer to private data. */ 227 unsigned int socket; /* CPU socket ID for allocations. */ 228 unsigned int max_inline_data; /* Max inline data. */ 229 unsigned int max_tso_header; /* Max TSO header size. */ 230 struct mlx5_txq_ibv *ibv; /* Verbs queue object. */ 231 struct mlx5_txq_data txq; /* Data path structure. */ 232 off_t uar_mmap_offset; /* UAR mmap offset for non-primary process. */ 233 volatile void *bf_reg_orig; /* Blueflame register from verbs. */ 234 }; 235 236 /* mlx5_rxq.c */ 237 238 extern uint8_t rss_hash_default_key[]; 239 extern const size_t rss_hash_default_key_len; 240 241 void mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *); 242 int mlx5_rx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int, 243 const struct rte_eth_rxconf *, struct rte_mempool *); 244 void mlx5_rx_queue_release(void *); 245 int priv_rx_intr_vec_enable(struct priv *priv); 246 void priv_rx_intr_vec_disable(struct priv *priv); 247 int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id); 248 int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id); 249 struct mlx5_rxq_ibv *mlx5_priv_rxq_ibv_new(struct priv *, uint16_t); 250 struct mlx5_rxq_ibv *mlx5_priv_rxq_ibv_get(struct priv *, uint16_t); 251 int mlx5_priv_rxq_ibv_release(struct priv *, struct mlx5_rxq_ibv *); 252 int mlx5_priv_rxq_ibv_releasable(struct priv *, struct mlx5_rxq_ibv *); 253 int mlx5_priv_rxq_ibv_verify(struct priv *); 254 struct mlx5_rxq_ctrl *mlx5_priv_rxq_new(struct priv *, uint16_t, 255 uint16_t, unsigned int, 256 const struct rte_eth_rxconf *, 257 struct rte_mempool *); 258 struct mlx5_rxq_ctrl *mlx5_priv_rxq_get(struct priv *, uint16_t); 259 int mlx5_priv_rxq_release(struct priv *, uint16_t); 260 int mlx5_priv_rxq_releasable(struct priv *, uint16_t); 261 int mlx5_priv_rxq_verify(struct priv *); 262 int rxq_alloc_elts(struct mlx5_rxq_ctrl *); 263 struct mlx5_ind_table_ibv *mlx5_priv_ind_table_ibv_new(struct priv *, 264 uint16_t [], 265 uint16_t); 266 struct mlx5_ind_table_ibv *mlx5_priv_ind_table_ibv_get(struct priv *, 267 uint16_t [], 268 uint16_t); 269 int mlx5_priv_ind_table_ibv_release(struct priv *, struct mlx5_ind_table_ibv *); 270 int mlx5_priv_ind_table_ibv_verify(struct priv *); 271 struct mlx5_hrxq *mlx5_priv_hrxq_new(struct priv *, uint8_t *, uint8_t, 272 uint64_t, uint16_t [], uint16_t); 273 struct mlx5_hrxq *mlx5_priv_hrxq_get(struct priv *, uint8_t *, uint8_t, 274 uint64_t, uint16_t [], uint16_t); 275 int mlx5_priv_hrxq_release(struct priv *, struct mlx5_hrxq *); 276 int mlx5_priv_hrxq_ibv_verify(struct priv *); 277 uint64_t mlx5_priv_get_rx_port_offloads(struct priv *); 278 uint64_t mlx5_priv_get_rx_queue_offloads(struct priv *); 279 280 /* mlx5_txq.c */ 281 282 int mlx5_tx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int, 283 const struct rte_eth_txconf *); 284 void mlx5_tx_queue_release(void *); 285 int priv_tx_uar_remap(struct priv *priv, int fd); 286 struct mlx5_txq_ibv *mlx5_priv_txq_ibv_new(struct priv *, uint16_t); 287 struct mlx5_txq_ibv *mlx5_priv_txq_ibv_get(struct priv *, uint16_t); 288 int mlx5_priv_txq_ibv_release(struct priv *, struct mlx5_txq_ibv *); 289 int mlx5_priv_txq_ibv_releasable(struct priv *, struct mlx5_txq_ibv *); 290 int mlx5_priv_txq_ibv_verify(struct priv *); 291 struct mlx5_txq_ctrl *mlx5_priv_txq_new(struct priv *, uint16_t, 292 uint16_t, unsigned int, 293 const struct rte_eth_txconf *); 294 struct mlx5_txq_ctrl *mlx5_priv_txq_get(struct priv *, uint16_t); 295 int mlx5_priv_txq_release(struct priv *, uint16_t); 296 int mlx5_priv_txq_releasable(struct priv *, uint16_t); 297 int mlx5_priv_txq_verify(struct priv *); 298 void txq_alloc_elts(struct mlx5_txq_ctrl *); 299 uint64_t mlx5_priv_get_tx_port_offloads(struct priv *); 300 301 /* mlx5_rxtx.c */ 302 303 extern uint32_t mlx5_ptype_table[]; 304 305 void mlx5_set_ptype_table(void); 306 uint16_t mlx5_tx_burst(void *, struct rte_mbuf **, uint16_t); 307 uint16_t mlx5_tx_burst_mpw(void *, struct rte_mbuf **, uint16_t); 308 uint16_t mlx5_tx_burst_mpw_inline(void *, struct rte_mbuf **, uint16_t); 309 uint16_t mlx5_tx_burst_empw(void *, struct rte_mbuf **, uint16_t); 310 uint16_t mlx5_rx_burst(void *, struct rte_mbuf **, uint16_t); 311 uint16_t removed_tx_burst(void *, struct rte_mbuf **, uint16_t); 312 uint16_t removed_rx_burst(void *, struct rte_mbuf **, uint16_t); 313 int mlx5_rx_descriptor_status(void *, uint16_t); 314 int mlx5_tx_descriptor_status(void *, uint16_t); 315 316 /* Vectorized version of mlx5_rxtx.c */ 317 int priv_check_raw_vec_tx_support(struct priv *, struct rte_eth_dev *); 318 int priv_check_vec_tx_support(struct priv *, struct rte_eth_dev *); 319 int rxq_check_vec_support(struct mlx5_rxq_data *); 320 int priv_check_vec_rx_support(struct priv *); 321 uint16_t mlx5_tx_burst_raw_vec(void *, struct rte_mbuf **, uint16_t); 322 uint16_t mlx5_tx_burst_vec(void *, struct rte_mbuf **, uint16_t); 323 uint16_t mlx5_rx_burst_vec(void *, struct rte_mbuf **, uint16_t); 324 325 /* mlx5_mr.c */ 326 327 void mlx5_mp2mr_iter(struct rte_mempool *, void *); 328 struct mlx5_mr *priv_txq_mp2mr_reg(struct priv *priv, struct mlx5_txq_data *, 329 struct rte_mempool *, unsigned int); 330 struct mlx5_mr *mlx5_txq_mp2mr_reg(struct mlx5_txq_data *, struct rte_mempool *, 331 unsigned int); 332 333 #ifndef NDEBUG 334 /** 335 * Verify or set magic value in CQE. 336 * 337 * @param cqe 338 * Pointer to CQE. 339 * 340 * @return 341 * 0 the first time. 342 */ 343 static inline int 344 check_cqe_seen(volatile struct mlx5_cqe *cqe) 345 { 346 static const uint8_t magic[] = "seen"; 347 volatile uint8_t (*buf)[sizeof(cqe->rsvd0)] = &cqe->rsvd0; 348 int ret = 1; 349 unsigned int i; 350 351 for (i = 0; i < sizeof(magic) && i < sizeof(*buf); ++i) 352 if (!ret || (*buf)[i] != magic[i]) { 353 ret = 0; 354 (*buf)[i] = magic[i]; 355 } 356 return ret; 357 } 358 #endif /* NDEBUG */ 359 360 /** 361 * Check whether CQE is valid. 362 * 363 * @param cqe 364 * Pointer to CQE. 365 * @param cqes_n 366 * Size of completion queue. 367 * @param ci 368 * Consumer index. 369 * 370 * @return 371 * 0 on success, 1 on failure. 372 */ 373 static __rte_always_inline int 374 check_cqe(volatile struct mlx5_cqe *cqe, 375 unsigned int cqes_n, const uint16_t ci) 376 { 377 uint16_t idx = ci & cqes_n; 378 uint8_t op_own = cqe->op_own; 379 uint8_t op_owner = MLX5_CQE_OWNER(op_own); 380 uint8_t op_code = MLX5_CQE_OPCODE(op_own); 381 382 if (unlikely((op_owner != (!!(idx))) || (op_code == MLX5_CQE_INVALID))) 383 return 1; /* No CQE. */ 384 #ifndef NDEBUG 385 if ((op_code == MLX5_CQE_RESP_ERR) || 386 (op_code == MLX5_CQE_REQ_ERR)) { 387 volatile struct mlx5_err_cqe *err_cqe = (volatile void *)cqe; 388 uint8_t syndrome = err_cqe->syndrome; 389 390 if ((syndrome == MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR) || 391 (syndrome == MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR)) 392 return 0; 393 if (!check_cqe_seen(cqe)) { 394 ERROR("unexpected CQE error %u (0x%02x)" 395 " syndrome 0x%02x", 396 op_code, op_code, syndrome); 397 rte_hexdump(stderr, "MLX5 Error CQE:", 398 (const void *)((uintptr_t)err_cqe), 399 sizeof(*err_cqe)); 400 } 401 return 1; 402 } else if ((op_code != MLX5_CQE_RESP_SEND) && 403 (op_code != MLX5_CQE_REQ)) { 404 if (!check_cqe_seen(cqe)) { 405 ERROR("unexpected CQE opcode %u (0x%02x)", 406 op_code, op_code); 407 rte_hexdump(stderr, "MLX5 CQE:", 408 (const void *)((uintptr_t)cqe), 409 sizeof(*cqe)); 410 } 411 return 1; 412 } 413 #endif /* NDEBUG */ 414 return 0; 415 } 416 417 /** 418 * Return the address of the WQE. 419 * 420 * @param txq 421 * Pointer to TX queue structure. 422 * @param wqe_ci 423 * WQE consumer index. 424 * 425 * @return 426 * WQE address. 427 */ 428 static inline uintptr_t * 429 tx_mlx5_wqe(struct mlx5_txq_data *txq, uint16_t ci) 430 { 431 ci &= ((1 << txq->wqe_n) - 1); 432 return (uintptr_t *)((uintptr_t)txq->wqes + ci * MLX5_WQE_SIZE); 433 } 434 435 /** 436 * Manage TX completions. 437 * 438 * When sending a burst, mlx5_tx_burst() posts several WRs. 439 * 440 * @param txq 441 * Pointer to TX queue structure. 442 */ 443 static __rte_always_inline void 444 mlx5_tx_complete(struct mlx5_txq_data *txq) 445 { 446 const uint16_t elts_n = 1 << txq->elts_n; 447 const uint16_t elts_m = elts_n - 1; 448 const unsigned int cqe_n = 1 << txq->cqe_n; 449 const unsigned int cqe_cnt = cqe_n - 1; 450 uint16_t elts_free = txq->elts_tail; 451 uint16_t elts_tail; 452 uint16_t cq_ci = txq->cq_ci; 453 volatile struct mlx5_cqe *cqe = NULL; 454 volatile struct mlx5_wqe_ctrl *ctrl; 455 struct rte_mbuf *m, *free[elts_n]; 456 struct rte_mempool *pool = NULL; 457 unsigned int blk_n = 0; 458 459 cqe = &(*txq->cqes)[cq_ci & cqe_cnt]; 460 if (unlikely(check_cqe(cqe, cqe_n, cq_ci))) 461 return; 462 #ifndef NDEBUG 463 if ((MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_RESP_ERR) || 464 (MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_REQ_ERR)) { 465 if (!check_cqe_seen(cqe)) { 466 ERROR("unexpected error CQE, TX stopped"); 467 rte_hexdump(stderr, "MLX5 TXQ:", 468 (const void *)((uintptr_t)txq->wqes), 469 ((1 << txq->wqe_n) * 470 MLX5_WQE_SIZE)); 471 } 472 return; 473 } 474 #endif /* NDEBUG */ 475 ++cq_ci; 476 txq->wqe_pi = rte_be_to_cpu_16(cqe->wqe_counter); 477 ctrl = (volatile struct mlx5_wqe_ctrl *) 478 tx_mlx5_wqe(txq, txq->wqe_pi); 479 elts_tail = ctrl->ctrl3; 480 assert((elts_tail & elts_m) < (1 << txq->wqe_n)); 481 /* Free buffers. */ 482 while (elts_free != elts_tail) { 483 m = rte_pktmbuf_prefree_seg((*txq->elts)[elts_free++ & elts_m]); 484 if (likely(m != NULL)) { 485 if (likely(m->pool == pool)) { 486 free[blk_n++] = m; 487 } else { 488 if (likely(pool != NULL)) 489 rte_mempool_put_bulk(pool, 490 (void *)free, 491 blk_n); 492 free[0] = m; 493 pool = m->pool; 494 blk_n = 1; 495 } 496 } 497 } 498 if (blk_n) 499 rte_mempool_put_bulk(pool, (void *)free, blk_n); 500 #ifndef NDEBUG 501 elts_free = txq->elts_tail; 502 /* Poisoning. */ 503 while (elts_free != elts_tail) { 504 memset(&(*txq->elts)[elts_free & elts_m], 505 0x66, 506 sizeof((*txq->elts)[elts_free & elts_m])); 507 ++elts_free; 508 } 509 #endif 510 txq->cq_ci = cq_ci; 511 txq->elts_tail = elts_tail; 512 /* Update the consumer index. */ 513 rte_compiler_barrier(); 514 *txq->cq_db = rte_cpu_to_be_32(cq_ci); 515 } 516 517 /** 518 * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which 519 * the cloned mbuf is allocated is returned instead. 520 * 521 * @param buf 522 * Pointer to mbuf. 523 * 524 * @return 525 * Memory pool where data is located for given mbuf. 526 */ 527 static struct rte_mempool * 528 mlx5_tx_mb2mp(struct rte_mbuf *buf) 529 { 530 if (unlikely(RTE_MBUF_INDIRECT(buf))) 531 return rte_mbuf_from_indirect(buf)->pool; 532 return buf->pool; 533 } 534 535 /** 536 * Get Memory Region (MR) <-> rte_mbuf association from txq->mp2mr[]. 537 * Add MP to txq->mp2mr[] if it's not registered yet. If mp2mr[] is full, 538 * remove an entry first. 539 * 540 * @param txq 541 * Pointer to TX queue structure. 542 * @param[in] mp 543 * Memory Pool for which a Memory Region lkey must be returned. 544 * 545 * @return 546 * mr->lkey on success, (uint32_t)-1 on failure. 547 */ 548 static __rte_always_inline uint32_t 549 mlx5_tx_mb2mr(struct mlx5_txq_data *txq, struct rte_mbuf *mb) 550 { 551 uint16_t i = txq->mr_cache_idx; 552 uintptr_t addr = rte_pktmbuf_mtod(mb, uintptr_t); 553 struct mlx5_mr *mr; 554 555 assert(i < RTE_DIM(txq->mp2mr)); 556 if (likely(txq->mp2mr[i]->start <= addr && txq->mp2mr[i]->end > addr)) 557 return txq->mp2mr[i]->lkey; 558 for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) { 559 if (unlikely(txq->mp2mr[i] == NULL || 560 txq->mp2mr[i]->mr == NULL)) { 561 /* Unknown MP, add a new MR for it. */ 562 break; 563 } 564 if (txq->mp2mr[i]->start <= addr && 565 txq->mp2mr[i]->end > addr) { 566 assert(txq->mp2mr[i]->lkey != (uint32_t)-1); 567 txq->mr_cache_idx = i; 568 return txq->mp2mr[i]->lkey; 569 } 570 } 571 mr = mlx5_txq_mp2mr_reg(txq, mlx5_tx_mb2mp(mb), i); 572 /* 573 * Request the reference to use in this queue, the original one is 574 * kept by the control plane. 575 */ 576 if (mr) { 577 rte_atomic32_inc(&mr->refcnt); 578 txq->mr_cache_idx = i >= RTE_DIM(txq->mp2mr) ? i - 1 : i; 579 return mr->lkey; 580 } else { 581 struct rte_mempool *mp = mlx5_tx_mb2mp(mb); 582 583 WARN("Failed to register mempool 0x%p(%s)", 584 (void *)mp, mp->name); 585 } 586 return (uint32_t)-1; 587 } 588 589 /** 590 * Ring TX queue doorbell and flush the update if requested. 591 * 592 * @param txq 593 * Pointer to TX queue structure. 594 * @param wqe 595 * Pointer to the last WQE posted in the NIC. 596 * @param cond 597 * Request for write memory barrier after BlueFlame update. 598 */ 599 static __rte_always_inline void 600 mlx5_tx_dbrec_cond_wmb(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe, 601 int cond) 602 { 603 uint64_t *dst = (uint64_t *)((uintptr_t)txq->bf_reg); 604 volatile uint64_t *src = ((volatile uint64_t *)wqe); 605 606 rte_cio_wmb(); 607 *txq->qp_db = rte_cpu_to_be_32(txq->wqe_ci); 608 /* Ensure ordering between DB record and BF copy. */ 609 rte_wmb(); 610 *dst = *src; 611 if (cond) 612 rte_wmb(); 613 } 614 615 /** 616 * Ring TX queue doorbell and flush the update by write memory barrier. 617 * 618 * @param txq 619 * Pointer to TX queue structure. 620 * @param wqe 621 * Pointer to the last WQE posted in the NIC. 622 */ 623 static __rte_always_inline void 624 mlx5_tx_dbrec(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe) 625 { 626 mlx5_tx_dbrec_cond_wmb(txq, wqe, 1); 627 } 628 629 /** 630 * Convert the Checksum offloads to Verbs. 631 * 632 * @param txq_data 633 * Pointer to the Tx queue. 634 * @param buf 635 * Pointer to the mbuf. 636 * 637 * @return 638 * the converted cs_flags. 639 */ 640 static __rte_always_inline uint8_t 641 txq_ol_cksum_to_cs(struct mlx5_txq_data *txq_data, struct rte_mbuf *buf) 642 { 643 uint8_t cs_flags = 0; 644 645 /* Should we enable HW CKSUM offload */ 646 if (buf->ol_flags & 647 (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | 648 PKT_TX_OUTER_IP_CKSUM)) { 649 if (txq_data->tunnel_en && 650 (buf->ol_flags & 651 (PKT_TX_TUNNEL_GRE | PKT_TX_TUNNEL_VXLAN))) { 652 cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM | 653 MLX5_ETH_WQE_L4_INNER_CSUM; 654 if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM) 655 cs_flags |= MLX5_ETH_WQE_L3_CSUM; 656 } else { 657 cs_flags = MLX5_ETH_WQE_L3_CSUM | 658 MLX5_ETH_WQE_L4_CSUM; 659 } 660 } 661 return cs_flags; 662 } 663 664 /** 665 * Count the number of contiguous single segment packets. 666 * 667 * @param pkts 668 * Pointer to array of packets. 669 * @param pkts_n 670 * Number of packets. 671 * 672 * @return 673 * Number of contiguous single segment packets. 674 */ 675 static __rte_always_inline unsigned int 676 txq_count_contig_single_seg(struct rte_mbuf **pkts, uint16_t pkts_n) 677 { 678 unsigned int pos; 679 680 if (!pkts_n) 681 return 0; 682 /* Count the number of contiguous single segment packets. */ 683 for (pos = 0; pos < pkts_n; ++pos) 684 if (NB_SEGS(pkts[pos]) > 1) 685 break; 686 return pos; 687 } 688 689 /** 690 * Count the number of contiguous multi-segment packets. 691 * 692 * @param pkts 693 * Pointer to array of packets. 694 * @param pkts_n 695 * Number of packets. 696 * 697 * @return 698 * Number of contiguous multi-segment packets. 699 */ 700 static __rte_always_inline unsigned int 701 txq_count_contig_multi_seg(struct rte_mbuf **pkts, uint16_t pkts_n) 702 { 703 unsigned int pos; 704 705 if (!pkts_n) 706 return 0; 707 /* Count the number of contiguous multi-segment packets. */ 708 for (pos = 0; pos < pkts_n; ++pos) 709 if (NB_SEGS(pkts[pos]) == 1) 710 break; 711 return pos; 712 } 713 714 #endif /* RTE_PMD_MLX5_RXTX_H_ */ 715