1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2021 6WIND S.A. 3 * Copyright 2021 Mellanox Technologies, Ltd 4 */ 5 6 #ifndef RTE_PMD_MLX5_RX_H_ 7 #define RTE_PMD_MLX5_RX_H_ 8 9 #include <stdint.h> 10 #include <sys/queue.h> 11 12 #include <rte_mbuf.h> 13 #include <rte_mempool.h> 14 #include <rte_common.h> 15 #include <rte_spinlock.h> 16 17 #include <mlx5_common_mr.h> 18 19 #include "mlx5.h" 20 #include "mlx5_autoconf.h" 21 22 /* Support tunnel matching. */ 23 #define MLX5_FLOW_TUNNEL 10 24 25 #define RXQ_PORT(rxq_ctrl) LIST_FIRST(&(rxq_ctrl)->owners)->priv 26 #define RXQ_DEV(rxq_ctrl) ETH_DEV(RXQ_PORT(rxq_ctrl)) 27 #define RXQ_PORT_ID(rxq_ctrl) PORT_ID(RXQ_PORT(rxq_ctrl)) 28 29 /* First entry must be NULL for comparison. */ 30 #define mlx5_mr_btree_len(bt) ((bt)->len - 1) 31 32 struct mlx5_rxq_stats { 33 #ifdef MLX5_PMD_SOFT_COUNTERS 34 uint64_t ipackets; /**< Total of successfully received packets. */ 35 uint64_t ibytes; /**< Total of successfully received bytes. */ 36 #endif 37 uint64_t idropped; /**< Total of packets dropped when RX ring full. */ 38 uint64_t rx_nombuf; /**< Total of RX mbuf allocation failures. */ 39 }; 40 41 /* Compressed CQE context. */ 42 struct rxq_zip { 43 uint16_t ai; /* Array index. */ 44 uint16_t ca; /* Current array index. */ 45 uint16_t na; /* Next array index. */ 46 uint16_t cq_ci; /* The next CQE. */ 47 uint32_t cqe_cnt; /* Number of CQEs. */ 48 }; 49 50 /* Get pointer to the first stride. */ 51 #define mlx5_mprq_buf_addr(ptr, strd_n) (RTE_PTR_ADD((ptr), \ 52 sizeof(struct mlx5_mprq_buf) + \ 53 (strd_n) * \ 54 sizeof(struct rte_mbuf_ext_shared_info) + \ 55 RTE_PKTMBUF_HEADROOM)) 56 57 #define MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES 6 58 #define MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES 9 59 60 enum mlx5_rxq_err_state { 61 MLX5_RXQ_ERR_STATE_NO_ERROR = 0, 62 MLX5_RXQ_ERR_STATE_NEED_RESET, 63 MLX5_RXQ_ERR_STATE_NEED_READY, 64 }; 65 66 enum mlx5_rqx_code { 67 MLX5_RXQ_CODE_EXIT = 0, 68 MLX5_RXQ_CODE_NOMBUF, 69 MLX5_RXQ_CODE_DROPPED, 70 }; 71 72 struct mlx5_eth_rxseg { 73 struct rte_mempool *mp; /**< Memory pool to allocate segment from. */ 74 uint16_t length; /**< Segment data length, configures split point. */ 75 uint16_t offset; /**< Data offset from beginning of mbuf data buffer. */ 76 uint32_t reserved; /**< Reserved field. */ 77 }; 78 79 /* RX queue descriptor. */ 80 struct mlx5_rxq_data { 81 unsigned int csum:1; /* Enable checksum offloading. */ 82 unsigned int hw_timestamp:1; /* Enable HW timestamp. */ 83 unsigned int rt_timestamp:1; /* Realtime timestamp format. */ 84 unsigned int vlan_strip:1; /* Enable VLAN stripping. */ 85 unsigned int crc_present:1; /* CRC must be subtracted. */ 86 unsigned int sges_n:3; /* Log 2 of SGEs (max buffers per packet). */ 87 unsigned int cqe_n:4; /* Log 2 of CQ elements. */ 88 unsigned int elts_n:4; /* Log 2 of Mbufs. */ 89 unsigned int rss_hash:1; /* RSS hash result is enabled. */ 90 unsigned int mark:1; /* Marked flow available on the queue. */ 91 unsigned int strd_num_n:5; /* Log 2 of the number of stride. */ 92 unsigned int strd_sz_n:4; /* Log 2 of stride size. */ 93 unsigned int strd_shift_en:1; /* Enable 2bytes shift on a stride. */ 94 unsigned int err_state:2; /* enum mlx5_rxq_err_state. */ 95 unsigned int strd_scatter_en:1; /* Scattered packets from a stride. */ 96 unsigned int lro:1; /* Enable LRO. */ 97 unsigned int dynf_meta:1; /* Dynamic metadata is configured. */ 98 unsigned int mcqe_format:3; /* CQE compression format. */ 99 unsigned int shared:1; /* Shared RXQ. */ 100 unsigned int delay_drop:1; /* Enable delay drop. */ 101 volatile uint32_t *rq_db; 102 volatile uint32_t *cq_db; 103 uint16_t port_id; 104 uint32_t elts_ci; 105 uint32_t rq_ci; 106 uint16_t consumed_strd; /* Number of consumed strides in WQE. */ 107 uint32_t rq_pi; 108 uint32_t cq_ci; 109 uint16_t rq_repl_thresh; /* Threshold for buffer replenishment. */ 110 uint32_t byte_mask; 111 union { 112 struct rxq_zip zip; /* Compressed context. */ 113 uint16_t decompressed; 114 /* Number of ready mbufs decompressed from the CQ. */ 115 }; 116 struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */ 117 uint16_t mprq_max_memcpy_len; /* Maximum size of packet to memcpy. */ 118 volatile void *wqes; 119 volatile struct mlx5_cqe(*cqes)[]; 120 struct rte_mbuf *(*elts)[]; 121 struct mlx5_mprq_buf *(*mprq_bufs)[]; 122 struct rte_mempool *mp; 123 struct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */ 124 struct mlx5_mprq_buf *mprq_repl; /* Stashed mbuf for replenish. */ 125 struct mlx5_dev_ctx_shared *sh; /* Shared context. */ 126 uint16_t idx; /* Queue index. */ 127 struct mlx5_rxq_stats stats; 128 rte_xmm_t mbuf_initializer; /* Default rearm/flags for vectorized Rx. */ 129 struct rte_mbuf fake_mbuf; /* elts padding for vectorized Rx. */ 130 struct mlx5_uar_data uar_data; /* CQ doorbell. */ 131 uint32_t cqn; /* CQ number. */ 132 uint8_t cq_arm_sn; /* CQ arm seq number. */ 133 uint32_t tunnel; /* Tunnel information. */ 134 int timestamp_offset; /* Dynamic mbuf field for timestamp. */ 135 uint64_t timestamp_rx_flag; /* Dynamic mbuf flag for timestamp. */ 136 uint64_t flow_meta_mask; 137 int32_t flow_meta_offset; 138 uint32_t flow_meta_port_mask; 139 uint32_t rxseg_n; /* Number of split segment descriptions. */ 140 struct mlx5_eth_rxseg rxseg[MLX5_MAX_RXQ_NSEG]; 141 /* Buffer split segment descriptions - sizes, offsets, pools. */ 142 } __rte_cache_aligned; 143 144 enum mlx5_rxq_type { 145 MLX5_RXQ_TYPE_STANDARD, /* Standard Rx queue. */ 146 MLX5_RXQ_TYPE_HAIRPIN, /* Hairpin Rx queue. */ 147 MLX5_RXQ_TYPE_UNDEFINED, 148 }; 149 150 /* RX queue control descriptor. */ 151 struct mlx5_rxq_ctrl { 152 struct mlx5_rxq_data rxq; /* Data path structure. */ 153 LIST_ENTRY(mlx5_rxq_ctrl) next; /* Pointer to the next element. */ 154 LIST_HEAD(priv, mlx5_rxq_priv) owners; /* Owner rxq list. */ 155 struct mlx5_rxq_obj *obj; /* Verbs/DevX elements. */ 156 struct mlx5_dev_ctx_shared *sh; /* Shared context. */ 157 enum mlx5_rxq_type type; /* Rxq type. */ 158 unsigned int socket; /* CPU socket ID for allocations. */ 159 LIST_ENTRY(mlx5_rxq_ctrl) share_entry; /* Entry in shared RXQ list. */ 160 uint32_t share_group; /* Group ID of shared RXQ. */ 161 uint16_t share_qid; /* Shared RxQ ID in group. */ 162 unsigned int started:1; /* Whether (shared) RXQ has been started. */ 163 unsigned int irq:1; /* Whether IRQ is enabled. */ 164 uint32_t flow_mark_n; /* Number of Mark/Flag flows using this Queue. */ 165 uint32_t flow_tunnels_n[MLX5_FLOW_TUNNEL]; /* Tunnels counters. */ 166 uint32_t wqn; /* WQ number. */ 167 uint32_t rxseg_n; /* Number of split segment descriptions. */ 168 struct rte_eth_rxseg_split rxseg[MLX5_MAX_RXQ_NSEG]; 169 /* Saved original buffer split segment configuration. */ 170 uint16_t dump_file_n; /* Number of dump files. */ 171 }; 172 173 /* RX queue private data. */ 174 struct mlx5_rxq_priv { 175 uint16_t idx; /* Queue index. */ 176 uint32_t refcnt; /* Reference counter. */ 177 struct mlx5_rxq_ctrl *ctrl; /* Shared Rx Queue. */ 178 LIST_ENTRY(mlx5_rxq_priv) owner_entry; /* Entry in shared rxq_ctrl. */ 179 struct mlx5_priv *priv; /* Back pointer to private data. */ 180 struct mlx5_devx_rq devx_rq; 181 struct rte_eth_hairpin_conf hairpin_conf; /* Hairpin configuration. */ 182 uint32_t hairpin_status; /* Hairpin binding status. */ 183 }; 184 185 /* mlx5_rxq.c */ 186 187 extern uint8_t rss_hash_default_key[]; 188 189 unsigned int mlx5_rxq_cqe_num(struct mlx5_rxq_data *rxq_data); 190 int mlx5_mprq_free_mp(struct rte_eth_dev *dev); 191 int mlx5_mprq_alloc_mp(struct rte_eth_dev *dev); 192 int mlx5_rx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id); 193 int mlx5_rx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id); 194 int mlx5_rx_queue_start_primary(struct rte_eth_dev *dev, uint16_t queue_id); 195 int mlx5_rx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t queue_id); 196 int mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, 197 unsigned int socket, const struct rte_eth_rxconf *conf, 198 struct rte_mempool *mp); 199 int mlx5_rx_hairpin_queue_setup 200 (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, 201 const struct rte_eth_hairpin_conf *hairpin_conf); 202 void mlx5_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid); 203 int mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev); 204 void mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev); 205 int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id); 206 int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id); 207 int mlx5_rxq_obj_verify(struct rte_eth_dev *dev); 208 struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, 209 struct mlx5_rxq_priv *rxq, 210 uint16_t desc, unsigned int socket, 211 const struct rte_eth_rxconf *conf, 212 const struct rte_eth_rxseg_split *rx_seg, 213 uint16_t n_seg); 214 struct mlx5_rxq_ctrl *mlx5_rxq_hairpin_new 215 (struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, uint16_t desc, 216 const struct rte_eth_hairpin_conf *hairpin_conf); 217 struct mlx5_rxq_priv *mlx5_rxq_ref(struct rte_eth_dev *dev, uint16_t idx); 218 uint32_t mlx5_rxq_deref(struct rte_eth_dev *dev, uint16_t idx); 219 struct mlx5_rxq_priv *mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx); 220 struct mlx5_rxq_ctrl *mlx5_rxq_ctrl_get(struct rte_eth_dev *dev, uint16_t idx); 221 struct mlx5_rxq_data *mlx5_rxq_data_get(struct rte_eth_dev *dev, uint16_t idx); 222 int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx); 223 int mlx5_rxq_verify(struct rte_eth_dev *dev); 224 int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl); 225 int mlx5_ind_table_obj_verify(struct rte_eth_dev *dev); 226 struct mlx5_ind_table_obj *mlx5_ind_table_obj_get(struct rte_eth_dev *dev, 227 const uint16_t *queues, 228 uint32_t queues_n); 229 int mlx5_ind_table_obj_release(struct rte_eth_dev *dev, 230 struct mlx5_ind_table_obj *ind_tbl, 231 bool standalone, 232 bool deref_rxqs); 233 int mlx5_ind_table_obj_setup(struct rte_eth_dev *dev, 234 struct mlx5_ind_table_obj *ind_tbl, 235 bool ref_qs); 236 int mlx5_ind_table_obj_modify(struct rte_eth_dev *dev, 237 struct mlx5_ind_table_obj *ind_tbl, 238 uint16_t *queues, const uint32_t queues_n, 239 bool standalone, 240 bool ref_new_qs, bool deref_old_qs); 241 int mlx5_ind_table_obj_attach(struct rte_eth_dev *dev, 242 struct mlx5_ind_table_obj *ind_tbl); 243 int mlx5_ind_table_obj_detach(struct rte_eth_dev *dev, 244 struct mlx5_ind_table_obj *ind_tbl); 245 struct mlx5_list_entry *mlx5_hrxq_create_cb(void *tool_ctx, void *cb_ctx); 246 int mlx5_hrxq_match_cb(void *tool_ctx, struct mlx5_list_entry *entry, 247 void *cb_ctx); 248 void mlx5_hrxq_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry); 249 struct mlx5_list_entry *mlx5_hrxq_clone_cb(void *tool_ctx, 250 struct mlx5_list_entry *entry, 251 void *cb_ctx __rte_unused); 252 void mlx5_hrxq_clone_free_cb(void *tool_ctx __rte_unused, 253 struct mlx5_list_entry *entry); 254 uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev, 255 struct mlx5_flow_rss_desc *rss_desc); 256 int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hxrq_idx); 257 uint32_t mlx5_hrxq_verify(struct rte_eth_dev *dev); 258 enum mlx5_rxq_type mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx); 259 const struct rte_eth_hairpin_conf *mlx5_rxq_get_hairpin_conf 260 (struct rte_eth_dev *dev, uint16_t idx); 261 struct mlx5_hrxq *mlx5_drop_action_create(struct rte_eth_dev *dev); 262 void mlx5_drop_action_destroy(struct rte_eth_dev *dev); 263 uint64_t mlx5_get_rx_port_offloads(void); 264 uint64_t mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev); 265 void mlx5_rxq_timestamp_set(struct rte_eth_dev *dev); 266 int mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hxrq_idx, 267 const uint8_t *rss_key, uint32_t rss_key_len, 268 uint64_t hash_fields, 269 const uint16_t *queues, uint32_t queues_n); 270 271 /* mlx5_rx.c */ 272 273 uint16_t mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n); 274 void mlx5_rxq_initialize(struct mlx5_rxq_data *rxq); 275 __rte_noinline int mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec); 276 void mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf); 277 uint16_t mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, 278 uint16_t pkts_n); 279 uint16_t removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, 280 uint16_t pkts_n); 281 int mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset); 282 uint32_t mlx5_rx_queue_count(void *rx_queue); 283 void mlx5_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 284 struct rte_eth_rxq_info *qinfo); 285 int mlx5_rx_burst_mode_get(struct rte_eth_dev *dev, uint16_t rx_queue_id, 286 struct rte_eth_burst_mode *mode); 287 int mlx5_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc); 288 289 /* Vectorized version of mlx5_rx.c */ 290 int mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq_data); 291 int mlx5_check_vec_rx_support(struct rte_eth_dev *dev); 292 uint16_t mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, 293 uint16_t pkts_n); 294 uint16_t mlx5_rx_burst_mprq_vec(void *dpdk_rxq, struct rte_mbuf **pkts, 295 uint16_t pkts_n); 296 297 static int mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq); 298 299 /** 300 * Query LKey for an address on Rx. No need to flush local caches 301 * as the Rx mempool database entries are valid for the lifetime of the queue. 302 * 303 * @param rxq 304 * Pointer to Rx queue structure. 305 * @param addr 306 * Address to search. 307 * 308 * @return 309 * Searched LKey on success, UINT32_MAX on no match. 310 * This function always succeeds on valid input. 311 */ 312 static __rte_always_inline uint32_t 313 mlx5_rx_addr2mr(struct mlx5_rxq_data *rxq, uintptr_t addr) 314 { 315 struct mlx5_mr_ctrl *mr_ctrl = &rxq->mr_ctrl; 316 struct rte_mempool *mp; 317 uint32_t lkey; 318 319 /* Linear search on MR cache array. */ 320 lkey = mlx5_mr_lookup_lkey(mr_ctrl->cache, &mr_ctrl->mru, 321 MLX5_MR_CACHE_N, addr); 322 if (likely(lkey != UINT32_MAX)) 323 return lkey; 324 mp = mlx5_rxq_mprq_enabled(rxq) ? rxq->mprq_mp : rxq->mp; 325 return mlx5_mr_mempool2mr_bh(mr_ctrl, mp, addr); 326 } 327 328 /** 329 * Query LKey from a packet buffer for Rx. No need to flush local caches 330 * as the Rx mempool database entries are valid for the lifetime of the queue. 331 * 332 * @param rxq 333 * Pointer to Rx queue structure. 334 * @param mb 335 * Buffer to search the address of. 336 * 337 * @return 338 * Searched LKey on success, UINT32_MAX on no match. 339 * This function always succeeds on valid input. 340 */ 341 static __rte_always_inline uint32_t 342 mlx5_rx_mb2mr(struct mlx5_rxq_data *rxq, struct rte_mbuf *mb) 343 { 344 struct mlx5_mr_ctrl *mr_ctrl = &rxq->mr_ctrl; 345 uintptr_t addr = (uintptr_t)mb->buf_addr; 346 uint32_t lkey; 347 348 /* Linear search on MR cache array. */ 349 lkey = mlx5_mr_lookup_lkey(mr_ctrl->cache, &mr_ctrl->mru, 350 MLX5_MR_CACHE_N, addr); 351 if (likely(lkey != UINT32_MAX)) 352 return lkey; 353 /* Slower search in the mempool database on miss. */ 354 return mlx5_mr_mempool2mr_bh(mr_ctrl, mb->pool, addr); 355 } 356 357 /** 358 * Convert timestamp from HW format to linear counter 359 * from Packet Pacing Clock Queue CQE timestamp format. 360 * 361 * @param sh 362 * Pointer to the device shared context. Might be needed 363 * to convert according current device configuration. 364 * @param ts 365 * Timestamp from CQE to convert. 366 * @return 367 * UTC in nanoseconds 368 */ 369 static __rte_always_inline uint64_t 370 mlx5_txpp_convert_rx_ts(struct mlx5_dev_ctx_shared *sh, uint64_t ts) 371 { 372 RTE_SET_USED(sh); 373 return (ts & UINT32_MAX) + (ts >> 32) * NS_PER_S; 374 } 375 376 /** 377 * Set timestamp in mbuf dynamic field. 378 * 379 * @param mbuf 380 * Structure to write into. 381 * @param offset 382 * Dynamic field offset in mbuf structure. 383 * @param timestamp 384 * Value to write. 385 */ 386 static __rte_always_inline void 387 mlx5_timestamp_set(struct rte_mbuf *mbuf, int offset, 388 rte_mbuf_timestamp_t timestamp) 389 { 390 *RTE_MBUF_DYNFIELD(mbuf, offset, rte_mbuf_timestamp_t *) = timestamp; 391 } 392 393 /** 394 * Replace MPRQ buffer. 395 * 396 * @param rxq 397 * Pointer to Rx queue structure. 398 * @param rq_idx 399 * RQ index to replace. 400 */ 401 static __rte_always_inline void 402 mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx) 403 { 404 const uint32_t strd_n = 1 << rxq->strd_num_n; 405 struct mlx5_mprq_buf *rep = rxq->mprq_repl; 406 volatile struct mlx5_wqe_data_seg *wqe = 407 &((volatile struct mlx5_wqe_mprq *)rxq->wqes)[rq_idx].dseg; 408 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_idx]; 409 void *addr; 410 411 if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) > 1) { 412 MLX5_ASSERT(rep != NULL); 413 /* Replace MPRQ buf. */ 414 (*rxq->mprq_bufs)[rq_idx] = rep; 415 /* Replace WQE. */ 416 addr = mlx5_mprq_buf_addr(rep, strd_n); 417 wqe->addr = rte_cpu_to_be_64((uintptr_t)addr); 418 /* If there's only one MR, no need to replace LKey in WQE. */ 419 if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1)) 420 wqe->lkey = mlx5_rx_addr2mr(rxq, (uintptr_t)addr); 421 /* Stash a mbuf for next replacement. */ 422 if (likely(!rte_mempool_get(rxq->mprq_mp, (void **)&rep))) 423 rxq->mprq_repl = rep; 424 else 425 rxq->mprq_repl = NULL; 426 /* Release the old buffer. */ 427 mlx5_mprq_buf_free(buf); 428 } else if (unlikely(rxq->mprq_repl == NULL)) { 429 struct mlx5_mprq_buf *rep; 430 431 /* 432 * Currently, the MPRQ mempool is out of buffer 433 * and doing memcpy regardless of the size of Rx 434 * packet. Retry allocation to get back to 435 * normal. 436 */ 437 if (!rte_mempool_get(rxq->mprq_mp, (void **)&rep)) 438 rxq->mprq_repl = rep; 439 } 440 } 441 442 /** 443 * Attach or copy MPRQ buffer content to a packet. 444 * 445 * @param rxq 446 * Pointer to Rx queue structure. 447 * @param pkt 448 * Pointer to a packet to fill. 449 * @param len 450 * Packet length. 451 * @param buf 452 * Pointer to a MPRQ buffer to take the data from. 453 * @param strd_idx 454 * Stride index to start from. 455 * @param strd_cnt 456 * Number of strides to consume. 457 */ 458 static __rte_always_inline enum mlx5_rqx_code 459 mprq_buf_to_pkt(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt, uint32_t len, 460 struct mlx5_mprq_buf *buf, uint16_t strd_idx, uint16_t strd_cnt) 461 { 462 const uint32_t strd_n = 1 << rxq->strd_num_n; 463 const uint16_t strd_sz = 1 << rxq->strd_sz_n; 464 const uint16_t strd_shift = 465 MLX5_MPRQ_STRIDE_SHIFT_BYTE * rxq->strd_shift_en; 466 const int32_t hdrm_overlap = 467 len + RTE_PKTMBUF_HEADROOM - strd_cnt * strd_sz; 468 const uint32_t offset = strd_idx * strd_sz + strd_shift; 469 void *addr = RTE_PTR_ADD(mlx5_mprq_buf_addr(buf, strd_n), offset); 470 471 /* 472 * Memcpy packets to the target mbuf if: 473 * - The size of packet is smaller than mprq_max_memcpy_len. 474 * - Out of buffer in the Mempool for Multi-Packet RQ. 475 * - The packet's stride overlaps a headroom and scatter is off. 476 */ 477 if (len <= rxq->mprq_max_memcpy_len || 478 rxq->mprq_repl == NULL || 479 (hdrm_overlap > 0 && !rxq->strd_scatter_en)) { 480 if (likely(len <= 481 (uint32_t)(pkt->buf_len - RTE_PKTMBUF_HEADROOM))) { 482 rte_memcpy(rte_pktmbuf_mtod(pkt, void *), 483 addr, len); 484 DATA_LEN(pkt) = len; 485 } else if (rxq->strd_scatter_en) { 486 struct rte_mbuf *prev = pkt; 487 uint32_t seg_len = RTE_MIN(len, (uint32_t) 488 (pkt->buf_len - RTE_PKTMBUF_HEADROOM)); 489 uint32_t rem_len = len - seg_len; 490 491 rte_memcpy(rte_pktmbuf_mtod(pkt, void *), 492 addr, seg_len); 493 DATA_LEN(pkt) = seg_len; 494 while (rem_len) { 495 struct rte_mbuf *next = 496 rte_pktmbuf_alloc(rxq->mp); 497 498 if (unlikely(next == NULL)) 499 return MLX5_RXQ_CODE_NOMBUF; 500 NEXT(prev) = next; 501 SET_DATA_OFF(next, 0); 502 addr = RTE_PTR_ADD(addr, seg_len); 503 seg_len = RTE_MIN(rem_len, (uint32_t) 504 (next->buf_len - RTE_PKTMBUF_HEADROOM)); 505 rte_memcpy 506 (rte_pktmbuf_mtod(next, void *), 507 addr, seg_len); 508 DATA_LEN(next) = seg_len; 509 rem_len -= seg_len; 510 prev = next; 511 ++NB_SEGS(pkt); 512 } 513 } else { 514 return MLX5_RXQ_CODE_DROPPED; 515 } 516 } else { 517 rte_iova_t buf_iova; 518 struct rte_mbuf_ext_shared_info *shinfo; 519 uint16_t buf_len = strd_cnt * strd_sz; 520 void *buf_addr; 521 522 /* Increment the refcnt of the whole chunk. */ 523 __atomic_add_fetch(&buf->refcnt, 1, __ATOMIC_RELAXED); 524 MLX5_ASSERT(__atomic_load_n(&buf->refcnt, 525 __ATOMIC_RELAXED) <= strd_n + 1); 526 buf_addr = RTE_PTR_SUB(addr, RTE_PKTMBUF_HEADROOM); 527 /* 528 * MLX5 device doesn't use iova but it is necessary in a 529 * case where the Rx packet is transmitted via a 530 * different PMD. 531 */ 532 buf_iova = rte_mempool_virt2iova(buf) + 533 RTE_PTR_DIFF(buf_addr, buf); 534 shinfo = &buf->shinfos[strd_idx]; 535 rte_mbuf_ext_refcnt_set(shinfo, 1); 536 /* 537 * RTE_MBUF_F_EXTERNAL will be set to pkt->ol_flags when 538 * attaching the stride to mbuf and more offload flags 539 * will be added below by calling rxq_cq_to_mbuf(). 540 * Other fields will be overwritten. 541 */ 542 rte_pktmbuf_attach_extbuf(pkt, buf_addr, buf_iova, 543 buf_len, shinfo); 544 /* Set mbuf head-room. */ 545 SET_DATA_OFF(pkt, RTE_PKTMBUF_HEADROOM); 546 MLX5_ASSERT(pkt->ol_flags == RTE_MBUF_F_EXTERNAL); 547 MLX5_ASSERT(rte_pktmbuf_tailroom(pkt) >= 548 len - (hdrm_overlap > 0 ? hdrm_overlap : 0)); 549 DATA_LEN(pkt) = len; 550 /* 551 * Copy the last fragment of a packet (up to headroom 552 * size bytes) in case there is a stride overlap with 553 * a next packet's headroom. Allocate a separate mbuf 554 * to store this fragment and link it. Scatter is on. 555 */ 556 if (hdrm_overlap > 0) { 557 MLX5_ASSERT(rxq->strd_scatter_en); 558 struct rte_mbuf *seg = 559 rte_pktmbuf_alloc(rxq->mp); 560 561 if (unlikely(seg == NULL)) 562 return MLX5_RXQ_CODE_NOMBUF; 563 SET_DATA_OFF(seg, 0); 564 rte_memcpy(rte_pktmbuf_mtod(seg, void *), 565 RTE_PTR_ADD(addr, len - hdrm_overlap), 566 hdrm_overlap); 567 DATA_LEN(seg) = hdrm_overlap; 568 DATA_LEN(pkt) = len - hdrm_overlap; 569 NEXT(pkt) = seg; 570 NB_SEGS(pkt) = 2; 571 } 572 } 573 return MLX5_RXQ_CODE_EXIT; 574 } 575 576 /** 577 * Check whether Multi-Packet RQ can be enabled for the device. 578 * 579 * @param dev 580 * Pointer to Ethernet device. 581 * 582 * @return 583 * 1 if supported, negative errno value if not. 584 */ 585 static __rte_always_inline int 586 mlx5_check_mprq_support(struct rte_eth_dev *dev) 587 { 588 struct mlx5_priv *priv = dev->data->dev_private; 589 590 if (priv->config.mprq.enabled && 591 priv->rxqs_n >= priv->config.mprq.min_rxqs_num) 592 return 1; 593 return -ENOTSUP; 594 } 595 596 /** 597 * Check whether Multi-Packet RQ is enabled for the Rx queue. 598 * 599 * @param rxq 600 * Pointer to receive queue structure. 601 * 602 * @return 603 * 0 if disabled, otherwise enabled. 604 */ 605 static __rte_always_inline int 606 mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq) 607 { 608 return rxq->strd_num_n > 0; 609 } 610 611 /** 612 * Check whether Multi-Packet RQ is enabled for the device. 613 * 614 * @param dev 615 * Pointer to Ethernet device. 616 * 617 * @return 618 * 0 if disabled, otherwise enabled. 619 */ 620 static __rte_always_inline int 621 mlx5_mprq_enabled(struct rte_eth_dev *dev) 622 { 623 struct mlx5_priv *priv = dev->data->dev_private; 624 uint32_t i; 625 uint16_t n = 0; 626 uint16_t n_ibv = 0; 627 628 if (mlx5_check_mprq_support(dev) < 0) 629 return 0; 630 /* All the configured queues should be enabled. */ 631 for (i = 0; i < priv->rxqs_n; ++i) { 632 struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, i); 633 634 if (rxq_ctrl == NULL || 635 rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD) 636 continue; 637 n_ibv++; 638 if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq)) 639 ++n; 640 } 641 /* Multi-Packet RQ can't be partially configured. */ 642 MLX5_ASSERT(n == 0 || n == n_ibv); 643 return n == n_ibv; 644 } 645 646 #endif /* RTE_PMD_MLX5_RX_H_ */ 647