1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2021 6WIND S.A. 3 * Copyright 2021 Mellanox Technologies, Ltd 4 */ 5 6 #ifndef RTE_PMD_MLX5_RX_H_ 7 #define RTE_PMD_MLX5_RX_H_ 8 9 #include <stdint.h> 10 #include <sys/queue.h> 11 12 #include <rte_mbuf.h> 13 #include <rte_mempool.h> 14 #include <rte_common.h> 15 #include <rte_spinlock.h> 16 17 #include <mlx5_common_mr.h> 18 19 #include "mlx5.h" 20 #include "mlx5_autoconf.h" 21 #include "rte_pmd_mlx5.h" 22 23 /* Support tunnel matching. */ 24 #define MLX5_FLOW_TUNNEL 10 25 26 #define RXQ_PORT(rxq_ctrl) LIST_FIRST(&(rxq_ctrl)->owners)->priv 27 #define RXQ_DEV(rxq_ctrl) ETH_DEV(RXQ_PORT(rxq_ctrl)) 28 #define RXQ_PORT_ID(rxq_ctrl) PORT_ID(RXQ_PORT(rxq_ctrl)) 29 30 /* First entry must be NULL for comparison. */ 31 #define mlx5_mr_btree_len(bt) ((bt)->len - 1) 32 33 struct mlx5_rxq_stats { 34 #ifdef MLX5_PMD_SOFT_COUNTERS 35 uint64_t ipackets; /**< Total of successfully received packets. */ 36 uint64_t ibytes; /**< Total of successfully received bytes. */ 37 #endif 38 uint64_t idropped; /**< Total of packets dropped when RX ring full. */ 39 uint64_t rx_nombuf; /**< Total of RX mbuf allocation failures. */ 40 }; 41 42 /* Compressed CQE context. */ 43 struct rxq_zip { 44 uint16_t cqe_cnt; /* Number of CQEs. */ 45 uint16_t ai; /* Array index. */ 46 uint32_t ca; /* Current array index. */ 47 uint32_t na; /* Next array index. */ 48 uint32_t cq_ci; /* The next CQE. */ 49 }; 50 51 /* Get pointer to the first stride. */ 52 #define mlx5_mprq_buf_addr(ptr, strd_n) (RTE_PTR_ADD((ptr), \ 53 sizeof(struct mlx5_mprq_buf) + \ 54 (strd_n) * \ 55 sizeof(struct rte_mbuf_ext_shared_info) + \ 56 RTE_PKTMBUF_HEADROOM)) 57 58 #define MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES 6 59 #define MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES 9 60 61 enum mlx5_rxq_err_state { 62 MLX5_RXQ_ERR_STATE_NO_ERROR = 0, 63 MLX5_RXQ_ERR_STATE_NEED_RESET, 64 MLX5_RXQ_ERR_STATE_NEED_READY, 65 MLX5_RXQ_ERR_STATE_IGNORE, 66 }; 67 68 enum mlx5_rqx_code { 69 MLX5_RXQ_CODE_EXIT = 0, 70 MLX5_RXQ_CODE_NOMBUF, 71 MLX5_RXQ_CODE_DROPPED, 72 }; 73 74 struct mlx5_eth_rxseg { 75 struct rte_mempool *mp; /**< Memory pool to allocate segment from. */ 76 uint16_t length; /**< Segment data length, configures split point. */ 77 uint16_t offset; /**< Data offset from beginning of mbuf data buffer. */ 78 uint32_t reserved; /**< Reserved field. */ 79 }; 80 81 /* RX queue descriptor. */ 82 struct __rte_cache_aligned mlx5_rxq_data { 83 unsigned int csum:1; /* Enable checksum offloading. */ 84 unsigned int hw_timestamp:1; /* Enable HW timestamp. */ 85 unsigned int rt_timestamp:1; /* Realtime timestamp format. */ 86 unsigned int vlan_strip:1; /* Enable VLAN stripping. */ 87 unsigned int crc_present:1; /* CRC must be subtracted. */ 88 unsigned int sges_n:3; /* Log 2 of SGEs (max buffers per packet). */ 89 unsigned int cqe_n:4; /* Log 2 of CQ elements. */ 90 unsigned int elts_n:4; /* Log 2 of Mbufs. */ 91 unsigned int rss_hash:1; /* RSS hash result is enabled. */ 92 unsigned int mark:1; /* Marked flow available on the queue. */ 93 unsigned int log_strd_num:5; /* Log 2 of the number of stride. */ 94 unsigned int log_strd_sz:4; /* Log 2 of stride size. */ 95 unsigned int strd_shift_en:1; /* Enable 2bytes shift on a stride. */ 96 unsigned int err_state:2; /* enum mlx5_rxq_err_state. */ 97 unsigned int strd_scatter_en:1; /* Scattered packets from a stride. */ 98 unsigned int lro:1; /* Enable LRO. */ 99 unsigned int dynf_meta:1; /* Dynamic metadata is configured. */ 100 unsigned int mcqe_format:3; /* CQE compression format. */ 101 unsigned int shared:1; /* Shared RXQ. */ 102 unsigned int delay_drop:1; /* Enable delay drop. */ 103 unsigned int cqe_comp_layout:1; /* CQE Compression Layout*/ 104 uint16_t port_id; 105 volatile uint32_t *rq_db; 106 volatile uint32_t *cq_db; 107 uint32_t elts_ci; 108 uint32_t rq_ci; 109 uint16_t consumed_strd; /* Number of consumed strides in WQE. */ 110 uint32_t rq_pi; 111 uint32_t cq_ci:24; 112 uint16_t rq_repl_thresh; /* Threshold for buffer replenishment. */ 113 uint32_t byte_mask; 114 union { 115 struct rxq_zip zip; /* Compressed context. */ 116 uint16_t decompressed; 117 /* Number of ready mbufs decompressed from the CQ. */ 118 }; 119 struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */ 120 uint16_t mprq_max_memcpy_len; /* Maximum size of packet to memcpy. */ 121 volatile void *wqes; 122 volatile struct mlx5_cqe(*cqes)[]; 123 struct mlx5_cqe title_cqe; /* Title CQE for CQE compression. */ 124 struct rte_mbuf *(*elts)[]; 125 struct rte_mbuf title_pkt; /* Title packet for CQE compression. */ 126 struct mlx5_mprq_buf *(*mprq_bufs)[]; 127 struct rte_mempool *mp; 128 struct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */ 129 struct mlx5_mprq_buf *mprq_repl; /* Stashed mbuf for replenish. */ 130 struct mlx5_dev_ctx_shared *sh; /* Shared context. */ 131 uint16_t idx; /* Queue index. */ 132 struct mlx5_rxq_stats stats; 133 struct mlx5_rxq_stats stats_reset; /* stats on last reset. */ 134 rte_xmm_t mbuf_initializer; /* Default rearm/flags for vectorized Rx. */ 135 struct rte_mbuf fake_mbuf; /* elts padding for vectorized Rx. */ 136 struct mlx5_uar_data uar_data; /* CQ doorbell. */ 137 uint32_t cqn; /* CQ number. */ 138 uint8_t cq_arm_sn; /* CQ arm seq number. */ 139 uint64_t mark_flag; /* ol_flags to set with marks. */ 140 uint32_t tunnel; /* Tunnel information. */ 141 int timestamp_offset; /* Dynamic mbuf field for timestamp. */ 142 uint64_t timestamp_rx_flag; /* Dynamic mbuf flag for timestamp. */ 143 uint64_t flow_meta_mask; 144 int32_t flow_meta_offset; 145 uint32_t flow_meta_port_mask; 146 uint32_t rxseg_n; /* Number of split segment descriptions. */ 147 struct mlx5_eth_rxseg rxseg[MLX5_MAX_RXQ_NSEG]; 148 /* Buffer split segment descriptions - sizes, offsets, pools. */ 149 }; 150 151 /* RX queue control descriptor. */ 152 struct mlx5_rxq_ctrl { 153 struct mlx5_rxq_data rxq; /* Data path structure. */ 154 LIST_HEAD(priv, mlx5_rxq_priv) owners; /* Owner rxq list. */ 155 struct mlx5_rxq_obj *obj; /* Verbs/DevX elements. */ 156 struct mlx5_dev_ctx_shared *sh; /* Shared context. */ 157 bool is_hairpin; /* Whether RxQ type is Hairpin. */ 158 unsigned int socket; /* CPU socket ID for allocations. */ 159 LIST_ENTRY(mlx5_rxq_ctrl) share_entry; /* Entry in shared RXQ list. */ 160 RTE_ATOMIC(int32_t) ctrl_ref; /* Reference counter. */ 161 uint32_t share_group; /* Group ID of shared RXQ. */ 162 uint16_t share_qid; /* Shared RxQ ID in group. */ 163 unsigned int started:1; /* Whether (shared) RXQ has been started. */ 164 unsigned int irq:1; /* Whether IRQ is enabled. */ 165 uint32_t flow_tunnels_n[MLX5_FLOW_TUNNEL]; /* Tunnels counters. */ 166 uint32_t wqn; /* WQ number. */ 167 uint32_t rxseg_n; /* Number of split segment descriptions. */ 168 struct rte_eth_rxseg_split rxseg[MLX5_MAX_RXQ_NSEG]; 169 /* Saved original buffer split segment configuration. */ 170 uint16_t dump_file_n; /* Number of dump files. */ 171 }; 172 173 /* RX queue private data. */ 174 struct mlx5_rxq_priv { 175 uint16_t idx; /* Queue index. */ 176 RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */ 177 struct mlx5_rxq_ctrl *ctrl; /* Shared Rx Queue. */ 178 LIST_ENTRY(mlx5_rxq_priv) owner_entry; /* Entry in shared rxq_ctrl. */ 179 struct mlx5_priv *priv; /* Back pointer to private data. */ 180 struct mlx5_devx_rq devx_rq; 181 struct rte_eth_hairpin_conf hairpin_conf; /* Hairpin configuration. */ 182 uint32_t hairpin_status; /* Hairpin binding status. */ 183 uint32_t lwm:16; 184 uint32_t lwm_event_pending:1; 185 uint32_t lwm_devx_subscribed:1; 186 }; 187 188 /* mlx5_rxq.c */ 189 190 extern uint8_t rss_hash_default_key[]; 191 192 unsigned int mlx5_rxq_cqe_num(struct mlx5_rxq_data *rxq_data); 193 int mlx5_mprq_free_mp(struct rte_eth_dev *dev); 194 int mlx5_mprq_alloc_mp(struct rte_eth_dev *dev); 195 int mlx5_rx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id); 196 int mlx5_rx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id); 197 int mlx5_rx_queue_start_primary(struct rte_eth_dev *dev, uint16_t queue_id); 198 int mlx5_rx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t queue_id); 199 int mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, 200 unsigned int socket, const struct rte_eth_rxconf *conf, 201 struct rte_mempool *mp); 202 int mlx5_rx_hairpin_queue_setup 203 (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, 204 const struct rte_eth_hairpin_conf *hairpin_conf); 205 void mlx5_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid); 206 int mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev); 207 void mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev); 208 int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id); 209 int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id); 210 int mlx5_rxq_obj_verify(struct rte_eth_dev *dev); 211 struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, 212 uint16_t desc, unsigned int socket, 213 const struct rte_eth_rxconf *conf, 214 const struct rte_eth_rxseg_split *rx_seg, 215 uint16_t n_seg, bool is_extmem); 216 struct mlx5_rxq_ctrl *mlx5_rxq_hairpin_new 217 (struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, uint16_t desc, 218 const struct rte_eth_hairpin_conf *hairpin_conf); 219 struct mlx5_rxq_priv *mlx5_rxq_ref(struct rte_eth_dev *dev, uint16_t idx); 220 uint32_t mlx5_rxq_deref(struct rte_eth_dev *dev, uint16_t idx); 221 struct mlx5_rxq_priv *mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx); 222 struct mlx5_rxq_ctrl *mlx5_rxq_ctrl_get(struct rte_eth_dev *dev, uint16_t idx); 223 struct mlx5_rxq_data *mlx5_rxq_data_get(struct rte_eth_dev *dev, uint16_t idx); 224 struct mlx5_external_q *mlx5_ext_rxq_ref(struct rte_eth_dev *dev, 225 uint16_t idx); 226 uint32_t mlx5_ext_rxq_deref(struct rte_eth_dev *dev, uint16_t idx); 227 struct mlx5_external_q *mlx5_ext_rxq_get(struct rte_eth_dev *dev, 228 uint16_t idx); 229 int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx); 230 int mlx5_rxq_verify(struct rte_eth_dev *dev); 231 int mlx5_ext_rxq_verify(struct rte_eth_dev *dev); 232 int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl); 233 int mlx5_ind_table_obj_verify(struct rte_eth_dev *dev); 234 struct mlx5_ind_table_obj *mlx5_ind_table_obj_get(struct rte_eth_dev *dev, 235 const uint16_t *queues, 236 uint32_t queues_n); 237 struct mlx5_ind_table_obj *mlx5_ind_table_obj_new(struct rte_eth_dev *dev, 238 const uint16_t *queues, 239 uint32_t queues_n, 240 bool standalone, 241 bool ref_qs); 242 int mlx5_ind_table_obj_release(struct rte_eth_dev *dev, 243 struct mlx5_ind_table_obj *ind_tbl, 244 bool deref_rxqs); 245 int mlx5_ind_table_obj_setup(struct rte_eth_dev *dev, 246 struct mlx5_ind_table_obj *ind_tbl, 247 bool ref_qs); 248 int mlx5_ind_table_obj_modify(struct rte_eth_dev *dev, 249 struct mlx5_ind_table_obj *ind_tbl, 250 uint16_t *queues, const uint32_t queues_n, 251 bool standalone, 252 bool ref_new_qs, bool deref_old_qs); 253 int mlx5_ind_table_obj_attach(struct rte_eth_dev *dev, 254 struct mlx5_ind_table_obj *ind_tbl); 255 int mlx5_ind_table_obj_detach(struct rte_eth_dev *dev, 256 struct mlx5_ind_table_obj *ind_tbl); 257 struct mlx5_list_entry *mlx5_hrxq_create_cb(void *tool_ctx, void *cb_ctx); 258 int mlx5_hrxq_match_cb(void *tool_ctx, struct mlx5_list_entry *entry, 259 void *cb_ctx); 260 void mlx5_hrxq_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry); 261 struct mlx5_list_entry *mlx5_hrxq_clone_cb(void *tool_ctx, 262 struct mlx5_list_entry *entry, 263 void *cb_ctx __rte_unused); 264 void mlx5_hrxq_clone_free_cb(void *tool_ctx __rte_unused, 265 struct mlx5_list_entry *entry); 266 struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev, 267 struct mlx5_flow_rss_desc *rss_desc); 268 int mlx5_hrxq_obj_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq); 269 int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hxrq_idx); 270 uint32_t mlx5_hrxq_verify(struct rte_eth_dev *dev); 271 bool mlx5_rxq_is_hairpin(struct rte_eth_dev *dev, uint16_t idx); 272 const struct rte_eth_hairpin_conf *mlx5_rxq_get_hairpin_conf 273 (struct rte_eth_dev *dev, uint16_t idx); 274 struct mlx5_hrxq *mlx5_drop_action_create(struct rte_eth_dev *dev); 275 void mlx5_drop_action_destroy(struct rte_eth_dev *dev); 276 uint64_t mlx5_get_rx_port_offloads(void); 277 uint64_t mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev); 278 void mlx5_rxq_timestamp_set(struct rte_eth_dev *dev); 279 int mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hxrq_idx, 280 const uint8_t *rss_key, uint32_t rss_key_len, 281 uint64_t hash_fields, bool symmetric_hash_function, 282 const uint16_t *queues, uint32_t queues_n); 283 284 /* mlx5_rx.c */ 285 286 uint16_t mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n); 287 void mlx5_rxq_initialize(struct mlx5_rxq_data *rxq); 288 __rte_noinline int mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec, 289 uint16_t err_n, uint16_t *skip_cnt); 290 void mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf); 291 uint16_t mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, 292 uint16_t pkts_n); 293 int mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset); 294 uint32_t mlx5_rx_queue_count(void *rx_queue); 295 void mlx5_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 296 struct rte_eth_rxq_info *qinfo); 297 int mlx5_rx_burst_mode_get(struct rte_eth_dev *dev, uint16_t rx_queue_id, 298 struct rte_eth_burst_mode *mode); 299 int mlx5_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc); 300 void mlx5_dev_interrupt_handler_lwm(void *args); 301 int mlx5_rx_queue_lwm_set(struct rte_eth_dev *dev, uint16_t rx_queue_id, 302 uint8_t lwm); 303 int mlx5_rx_queue_lwm_query(struct rte_eth_dev *dev, uint16_t *rx_queue_id, 304 uint8_t *lwm); 305 306 /* Vectorized version of mlx5_rx.c */ 307 int mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq_data); 308 int mlx5_check_vec_rx_support(struct rte_eth_dev *dev); 309 uint16_t mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, 310 uint16_t pkts_n); 311 uint16_t mlx5_rx_burst_mprq_vec(void *dpdk_rxq, struct rte_mbuf **pkts, 312 uint16_t pkts_n); 313 314 static int mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq); 315 316 /** 317 * Query LKey for an address on Rx. No need to flush local caches 318 * as the Rx mempool database entries are valid for the lifetime of the queue. 319 * 320 * @param rxq 321 * Pointer to Rx queue structure. 322 * @param addr 323 * Address to search. 324 * 325 * @return 326 * Searched LKey on success, UINT32_MAX on no match. 327 * This function always succeeds on valid input. 328 */ 329 static __rte_always_inline uint32_t 330 mlx5_rx_addr2mr(struct mlx5_rxq_data *rxq, uintptr_t addr) 331 { 332 struct mlx5_mr_ctrl *mr_ctrl = &rxq->mr_ctrl; 333 struct rte_mempool *mp; 334 uint32_t lkey; 335 336 /* Linear search on MR cache array. */ 337 lkey = mlx5_mr_lookup_lkey(mr_ctrl->cache, &mr_ctrl->mru, 338 MLX5_MR_CACHE_N, addr); 339 if (likely(lkey != UINT32_MAX)) 340 return lkey; 341 mp = mlx5_rxq_mprq_enabled(rxq) ? rxq->mprq_mp : rxq->mp; 342 return mlx5_mr_mempool2mr_bh(mr_ctrl, mp, addr); 343 } 344 345 /** 346 * Query LKey from a packet buffer for Rx. No need to flush local caches 347 * as the Rx mempool database entries are valid for the lifetime of the queue. 348 * 349 * @param rxq 350 * Pointer to Rx queue structure. 351 * @param mb 352 * Buffer to search the address of. 353 * 354 * @return 355 * Searched LKey on success, UINT32_MAX on no match. 356 * This function always succeeds on valid input. 357 */ 358 static __rte_always_inline uint32_t 359 mlx5_rx_mb2mr(struct mlx5_rxq_data *rxq, struct rte_mbuf *mb) 360 { 361 struct mlx5_mr_ctrl *mr_ctrl = &rxq->mr_ctrl; 362 uintptr_t addr = (uintptr_t)mb->buf_addr; 363 uint32_t lkey; 364 365 /* Linear search on MR cache array. */ 366 lkey = mlx5_mr_lookup_lkey(mr_ctrl->cache, &mr_ctrl->mru, 367 MLX5_MR_CACHE_N, addr); 368 if (likely(lkey != UINT32_MAX)) 369 return lkey; 370 /* Slower search in the mempool database on miss. */ 371 return mlx5_mr_mempool2mr_bh(mr_ctrl, mb->pool, addr); 372 } 373 374 /** 375 * Set timestamp in mbuf dynamic field. 376 * 377 * @param mbuf 378 * Structure to write into. 379 * @param offset 380 * Dynamic field offset in mbuf structure. 381 * @param timestamp 382 * Value to write. 383 */ 384 static __rte_always_inline void 385 mlx5_timestamp_set(struct rte_mbuf *mbuf, int offset, 386 rte_mbuf_timestamp_t timestamp) 387 { 388 *RTE_MBUF_DYNFIELD(mbuf, offset, rte_mbuf_timestamp_t *) = timestamp; 389 } 390 391 /** 392 * Replace MPRQ buffer. 393 * 394 * @param rxq 395 * Pointer to Rx queue structure. 396 * @param rq_idx 397 * RQ index to replace. 398 */ 399 static __rte_always_inline void 400 mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx) 401 { 402 const uint32_t strd_n = RTE_BIT32(rxq->log_strd_num); 403 struct mlx5_mprq_buf *rep = rxq->mprq_repl; 404 volatile struct mlx5_wqe_data_seg *wqe = 405 &((volatile struct mlx5_wqe_mprq *)rxq->wqes)[rq_idx].dseg; 406 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_idx]; 407 void *addr; 408 409 if (rte_atomic_load_explicit(&buf->refcnt, rte_memory_order_relaxed) > 1) { 410 MLX5_ASSERT(rep != NULL); 411 /* Replace MPRQ buf. */ 412 (*rxq->mprq_bufs)[rq_idx] = rep; 413 /* Replace WQE. */ 414 addr = mlx5_mprq_buf_addr(rep, strd_n); 415 wqe->addr = rte_cpu_to_be_64((uintptr_t)addr); 416 /* If there's only one MR, no need to replace LKey in WQE. */ 417 if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1)) 418 wqe->lkey = mlx5_rx_addr2mr(rxq, (uintptr_t)addr); 419 /* Stash a mbuf for next replacement. */ 420 if (likely(!rte_mempool_get(rxq->mprq_mp, (void **)&rep))) 421 rxq->mprq_repl = rep; 422 else 423 rxq->mprq_repl = NULL; 424 /* Release the old buffer. */ 425 mlx5_mprq_buf_free(buf); 426 } else if (unlikely(rxq->mprq_repl == NULL)) { 427 struct mlx5_mprq_buf *rep; 428 429 /* 430 * Currently, the MPRQ mempool is out of buffer 431 * and doing memcpy regardless of the size of Rx 432 * packet. Retry allocation to get back to 433 * normal. 434 */ 435 if (!rte_mempool_get(rxq->mprq_mp, (void **)&rep)) 436 rxq->mprq_repl = rep; 437 } 438 } 439 440 /** 441 * Attach or copy MPRQ buffer content to a packet. 442 * 443 * @param rxq 444 * Pointer to Rx queue structure. 445 * @param pkt 446 * Pointer to a packet to fill. 447 * @param len 448 * Packet length. 449 * @param buf 450 * Pointer to a MPRQ buffer to take the data from. 451 * @param strd_idx 452 * Stride index to start from. 453 * @param strd_cnt 454 * Number of strides to consume. 455 */ 456 static __rte_always_inline enum mlx5_rqx_code 457 mprq_buf_to_pkt(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt, uint32_t len, 458 struct mlx5_mprq_buf *buf, uint16_t strd_idx, uint16_t strd_cnt) 459 { 460 const uint32_t strd_n = RTE_BIT32(rxq->log_strd_num); 461 const uint16_t strd_sz = RTE_BIT32(rxq->log_strd_sz); 462 const uint16_t strd_shift = 463 MLX5_MPRQ_STRIDE_SHIFT_BYTE * rxq->strd_shift_en; 464 const int32_t hdrm_overlap = 465 len + RTE_PKTMBUF_HEADROOM - strd_cnt * strd_sz; 466 const uint32_t offset = strd_idx * strd_sz + strd_shift; 467 void *addr = RTE_PTR_ADD(mlx5_mprq_buf_addr(buf, strd_n), offset); 468 469 /* 470 * Memcpy packets to the target mbuf if: 471 * - The size of packet is smaller than mprq_max_memcpy_len. 472 * - Out of buffer in the Mempool for Multi-Packet RQ. 473 * - The packet's stride overlaps a headroom and scatter is off. 474 */ 475 if (len <= rxq->mprq_max_memcpy_len || 476 rxq->mprq_repl == NULL || 477 (hdrm_overlap > 0 && !rxq->strd_scatter_en)) { 478 if (likely(len <= 479 (uint32_t)(pkt->buf_len - RTE_PKTMBUF_HEADROOM))) { 480 rte_memcpy(rte_pktmbuf_mtod(pkt, void *), 481 addr, len); 482 DATA_LEN(pkt) = len; 483 } else if (rxq->strd_scatter_en) { 484 struct rte_mbuf *prev = pkt; 485 uint32_t seg_len = RTE_MIN(len, (uint32_t) 486 (pkt->buf_len - RTE_PKTMBUF_HEADROOM)); 487 uint32_t rem_len = len - seg_len; 488 489 rte_memcpy(rte_pktmbuf_mtod(pkt, void *), 490 addr, seg_len); 491 DATA_LEN(pkt) = seg_len; 492 while (rem_len) { 493 struct rte_mbuf *next = 494 rte_pktmbuf_alloc(rxq->mp); 495 496 if (unlikely(next == NULL)) 497 return MLX5_RXQ_CODE_NOMBUF; 498 NEXT(prev) = next; 499 SET_DATA_OFF(next, 0); 500 addr = RTE_PTR_ADD(addr, seg_len); 501 seg_len = RTE_MIN(rem_len, (uint32_t) 502 (next->buf_len - RTE_PKTMBUF_HEADROOM)); 503 rte_memcpy 504 (rte_pktmbuf_mtod(next, void *), 505 addr, seg_len); 506 DATA_LEN(next) = seg_len; 507 rem_len -= seg_len; 508 prev = next; 509 ++NB_SEGS(pkt); 510 } 511 } else { 512 return MLX5_RXQ_CODE_DROPPED; 513 } 514 } else { 515 rte_iova_t buf_iova; 516 struct rte_mbuf_ext_shared_info *shinfo; 517 uint16_t buf_len = strd_cnt * strd_sz; 518 void *buf_addr; 519 520 /* Increment the refcnt of the whole chunk. */ 521 rte_atomic_fetch_add_explicit(&buf->refcnt, 1, rte_memory_order_relaxed); 522 MLX5_ASSERT(rte_atomic_load_explicit(&buf->refcnt, 523 rte_memory_order_relaxed) <= strd_n + 1); 524 buf_addr = RTE_PTR_SUB(addr, RTE_PKTMBUF_HEADROOM); 525 /* 526 * MLX5 device doesn't use iova but it is necessary in a 527 * case where the Rx packet is transmitted via a 528 * different PMD. 529 */ 530 buf_iova = rte_mempool_virt2iova(buf) + 531 RTE_PTR_DIFF(buf_addr, buf); 532 shinfo = &buf->shinfos[strd_idx]; 533 rte_mbuf_ext_refcnt_set(shinfo, 1); 534 /* 535 * RTE_MBUF_F_EXTERNAL will be set to pkt->ol_flags when 536 * attaching the stride to mbuf and more offload flags 537 * will be added below by calling rxq_cq_to_mbuf(). 538 * Other fields will be overwritten. 539 */ 540 rte_pktmbuf_attach_extbuf(pkt, buf_addr, buf_iova, 541 buf_len, shinfo); 542 /* Set mbuf head-room. */ 543 SET_DATA_OFF(pkt, RTE_PKTMBUF_HEADROOM); 544 MLX5_ASSERT(pkt->ol_flags & RTE_MBUF_F_EXTERNAL); 545 MLX5_ASSERT(rte_pktmbuf_tailroom(pkt) >= 546 len - (hdrm_overlap > 0 ? hdrm_overlap : 0)); 547 DATA_LEN(pkt) = len; 548 /* 549 * Copy the last fragment of a packet (up to headroom 550 * size bytes) in case there is a stride overlap with 551 * a next packet's headroom. Allocate a separate mbuf 552 * to store this fragment and link it. Scatter is on. 553 */ 554 if (hdrm_overlap > 0) { 555 MLX5_ASSERT(rxq->strd_scatter_en); 556 struct rte_mbuf *seg = 557 rte_pktmbuf_alloc(rxq->mp); 558 559 if (unlikely(seg == NULL)) 560 return MLX5_RXQ_CODE_NOMBUF; 561 SET_DATA_OFF(seg, 0); 562 rte_memcpy(rte_pktmbuf_mtod(seg, void *), 563 RTE_PTR_ADD(addr, len - hdrm_overlap), 564 hdrm_overlap); 565 DATA_LEN(seg) = hdrm_overlap; 566 DATA_LEN(pkt) = len - hdrm_overlap; 567 NEXT(pkt) = seg; 568 NB_SEGS(pkt) = 2; 569 } 570 } 571 return MLX5_RXQ_CODE_EXIT; 572 } 573 574 /** 575 * Check whether Multi-Packet RQ can be enabled for the device. 576 * 577 * @param dev 578 * Pointer to Ethernet device. 579 * 580 * @return 581 * 1 if supported, negative errno value if not. 582 */ 583 static __rte_always_inline int 584 mlx5_check_mprq_support(struct rte_eth_dev *dev) 585 { 586 struct mlx5_priv *priv = dev->data->dev_private; 587 588 if (priv->config.mprq.enabled && 589 priv->rxqs_n >= priv->config.mprq.min_rxqs_num) 590 return 1; 591 return -ENOTSUP; 592 } 593 594 /** 595 * Check whether Multi-Packet RQ is enabled for the Rx queue. 596 * 597 * @param rxq 598 * Pointer to receive queue structure. 599 * 600 * @return 601 * 0 if disabled, otherwise enabled. 602 */ 603 static __rte_always_inline int 604 mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq) 605 { 606 return rxq->log_strd_num > 0; 607 } 608 609 /** 610 * Check whether Multi-Packet RQ is enabled for the device. 611 * 612 * @param dev 613 * Pointer to Ethernet device. 614 * 615 * @return 616 * 0 if disabled, otherwise enabled. 617 */ 618 static __rte_always_inline int 619 mlx5_mprq_enabled(struct rte_eth_dev *dev) 620 { 621 struct mlx5_priv *priv = dev->data->dev_private; 622 uint32_t i; 623 uint16_t n = 0; 624 uint16_t n_ibv = 0; 625 626 if (mlx5_check_mprq_support(dev) < 0) 627 return 0; 628 /* All the configured queues should be enabled. */ 629 for (i = 0; i < priv->rxqs_n; ++i) { 630 struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, i); 631 632 if (rxq_ctrl == NULL || rxq_ctrl->is_hairpin) 633 continue; 634 n_ibv++; 635 if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq)) 636 ++n; 637 } 638 /* Multi-Packet RQ can't be partially configured. */ 639 MLX5_ASSERT(n == 0 || n == n_ibv); 640 return n == n_ibv; 641 } 642 643 /** 644 * Check whether given RxQ is external. 645 * 646 * @param dev 647 * Pointer to Ethernet device. 648 * @param queue_idx 649 * Rx queue index. 650 * 651 * @return 652 * True if is external RxQ, otherwise false. 653 */ 654 static __rte_always_inline bool 655 mlx5_is_external_rxq(struct rte_eth_dev *dev, uint16_t queue_idx) 656 { 657 struct mlx5_priv *priv = dev->data->dev_private; 658 struct mlx5_external_q *rxq; 659 660 if (!priv->ext_rxqs || queue_idx < RTE_PMD_MLX5_EXTERNAL_RX_QUEUE_ID_MIN) 661 return false; 662 rxq = &priv->ext_rxqs[queue_idx - RTE_PMD_MLX5_EXTERNAL_RX_QUEUE_ID_MIN]; 663 return !!rte_atomic_load_explicit(&rxq->refcnt, rte_memory_order_relaxed); 664 } 665 666 #define LWM_COOKIE_RXQID_OFFSET 0 667 #define LWM_COOKIE_RXQID_MASK 0xffff 668 #define LWM_COOKIE_PORTID_OFFSET 16 669 #define LWM_COOKIE_PORTID_MASK 0xffff 670 671 #endif /* RTE_PMD_MLX5_RX_H_ */ 672