1151cbe3aSMichael Baum /* SPDX-License-Identifier: BSD-3-Clause 2151cbe3aSMichael Baum * Copyright 2021 6WIND S.A. 3151cbe3aSMichael Baum * Copyright 2021 Mellanox Technologies, Ltd 4151cbe3aSMichael Baum */ 5151cbe3aSMichael Baum 6151cbe3aSMichael Baum #ifndef RTE_PMD_MLX5_RX_H_ 7151cbe3aSMichael Baum #define RTE_PMD_MLX5_RX_H_ 8151cbe3aSMichael Baum 9151cbe3aSMichael Baum #include <stdint.h> 10151cbe3aSMichael Baum #include <sys/queue.h> 11151cbe3aSMichael Baum 12151cbe3aSMichael Baum #include <rte_mbuf.h> 13151cbe3aSMichael Baum #include <rte_mempool.h> 14151cbe3aSMichael Baum #include <rte_common.h> 15151cbe3aSMichael Baum #include <rte_spinlock.h> 16151cbe3aSMichael Baum 17151cbe3aSMichael Baum #include <mlx5_common_mr.h> 18151cbe3aSMichael Baum 19151cbe3aSMichael Baum #include "mlx5.h" 20151cbe3aSMichael Baum #include "mlx5_autoconf.h" 21311b17e6SMichael Baum #include "rte_pmd_mlx5.h" 22151cbe3aSMichael Baum 23151cbe3aSMichael Baum /* Support tunnel matching. */ 24151cbe3aSMichael Baum #define MLX5_FLOW_TUNNEL 10 25151cbe3aSMichael Baum 265db77fefSXueming Li #define RXQ_PORT(rxq_ctrl) LIST_FIRST(&(rxq_ctrl)->owners)->priv 275db77fefSXueming Li #define RXQ_DEV(rxq_ctrl) ETH_DEV(RXQ_PORT(rxq_ctrl)) 285db77fefSXueming Li #define RXQ_PORT_ID(rxq_ctrl) PORT_ID(RXQ_PORT(rxq_ctrl)) 295db77fefSXueming Li 309f1d636fSMichael Baum /* First entry must be NULL for comparison. */ 319f1d636fSMichael Baum #define mlx5_mr_btree_len(bt) ((bt)->len - 1) 329f1d636fSMichael Baum 33151cbe3aSMichael Baum struct mlx5_rxq_stats { 34151cbe3aSMichael Baum #ifdef MLX5_PMD_SOFT_COUNTERS 35151cbe3aSMichael Baum uint64_t ipackets; /**< Total of successfully received packets. */ 36151cbe3aSMichael Baum uint64_t ibytes; /**< Total of successfully received bytes. */ 37151cbe3aSMichael Baum #endif 38151cbe3aSMichael Baum uint64_t idropped; /**< Total of packets dropped when RX ring full. */ 39151cbe3aSMichael Baum uint64_t rx_nombuf; /**< Total of RX mbuf allocation failures. */ 40151cbe3aSMichael Baum }; 41151cbe3aSMichael Baum 42151cbe3aSMichael Baum /* Compressed CQE context. */ 43151cbe3aSMichael Baum struct rxq_zip { 4402a6195cSAlexander Kozyrev uint16_t cqe_cnt; /* Number of CQEs. */ 45151cbe3aSMichael Baum uint16_t ai; /* Array index. */ 4602a6195cSAlexander Kozyrev uint32_t ca; /* Current array index. */ 4702a6195cSAlexander Kozyrev uint32_t na; /* Next array index. */ 4802a6195cSAlexander Kozyrev uint32_t cq_ci; /* The next CQE. */ 49151cbe3aSMichael Baum }; 50151cbe3aSMichael Baum 51151cbe3aSMichael Baum /* Get pointer to the first stride. */ 52151cbe3aSMichael Baum #define mlx5_mprq_buf_addr(ptr, strd_n) (RTE_PTR_ADD((ptr), \ 53151cbe3aSMichael Baum sizeof(struct mlx5_mprq_buf) + \ 54151cbe3aSMichael Baum (strd_n) * \ 55151cbe3aSMichael Baum sizeof(struct rte_mbuf_ext_shared_info) + \ 56151cbe3aSMichael Baum RTE_PKTMBUF_HEADROOM)) 57151cbe3aSMichael Baum 58151cbe3aSMichael Baum #define MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES 6 59151cbe3aSMichael Baum #define MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES 9 60151cbe3aSMichael Baum 61151cbe3aSMichael Baum enum mlx5_rxq_err_state { 62151cbe3aSMichael Baum MLX5_RXQ_ERR_STATE_NO_ERROR = 0, 63151cbe3aSMichael Baum MLX5_RXQ_ERR_STATE_NEED_RESET, 64151cbe3aSMichael Baum MLX5_RXQ_ERR_STATE_NEED_READY, 65aa67ed30SAlexander Kozyrev MLX5_RXQ_ERR_STATE_IGNORE, 66151cbe3aSMichael Baum }; 67151cbe3aSMichael Baum 68151cbe3aSMichael Baum enum mlx5_rqx_code { 69151cbe3aSMichael Baum MLX5_RXQ_CODE_EXIT = 0, 70151cbe3aSMichael Baum MLX5_RXQ_CODE_NOMBUF, 71151cbe3aSMichael Baum MLX5_RXQ_CODE_DROPPED, 72151cbe3aSMichael Baum }; 73151cbe3aSMichael Baum 74151cbe3aSMichael Baum struct mlx5_eth_rxseg { 75151cbe3aSMichael Baum struct rte_mempool *mp; /**< Memory pool to allocate segment from. */ 76151cbe3aSMichael Baum uint16_t length; /**< Segment data length, configures split point. */ 77151cbe3aSMichael Baum uint16_t offset; /**< Data offset from beginning of mbuf data buffer. */ 78151cbe3aSMichael Baum uint32_t reserved; /**< Reserved field. */ 79151cbe3aSMichael Baum }; 80151cbe3aSMichael Baum 81151cbe3aSMichael Baum /* RX queue descriptor. */ 8227595cd8STyler Retzlaff struct __rte_cache_aligned mlx5_rxq_data { 83151cbe3aSMichael Baum unsigned int csum:1; /* Enable checksum offloading. */ 84151cbe3aSMichael Baum unsigned int hw_timestamp:1; /* Enable HW timestamp. */ 85151cbe3aSMichael Baum unsigned int rt_timestamp:1; /* Realtime timestamp format. */ 86151cbe3aSMichael Baum unsigned int vlan_strip:1; /* Enable VLAN stripping. */ 87151cbe3aSMichael Baum unsigned int crc_present:1; /* CRC must be subtracted. */ 88151cbe3aSMichael Baum unsigned int sges_n:3; /* Log 2 of SGEs (max buffers per packet). */ 89151cbe3aSMichael Baum unsigned int cqe_n:4; /* Log 2 of CQ elements. */ 90151cbe3aSMichael Baum unsigned int elts_n:4; /* Log 2 of Mbufs. */ 91151cbe3aSMichael Baum unsigned int rss_hash:1; /* RSS hash result is enabled. */ 92151cbe3aSMichael Baum unsigned int mark:1; /* Marked flow available on the queue. */ 930947ed38SMichael Baum unsigned int log_strd_num:5; /* Log 2 of the number of stride. */ 940947ed38SMichael Baum unsigned int log_strd_sz:4; /* Log 2 of stride size. */ 95151cbe3aSMichael Baum unsigned int strd_shift_en:1; /* Enable 2bytes shift on a stride. */ 96151cbe3aSMichael Baum unsigned int err_state:2; /* enum mlx5_rxq_err_state. */ 97151cbe3aSMichael Baum unsigned int strd_scatter_en:1; /* Scattered packets from a stride. */ 98151cbe3aSMichael Baum unsigned int lro:1; /* Enable LRO. */ 99151cbe3aSMichael Baum unsigned int dynf_meta:1; /* Dynamic metadata is configured. */ 100151cbe3aSMichael Baum unsigned int mcqe_format:3; /* CQE compression format. */ 10109c25553SXueming Li unsigned int shared:1; /* Shared RXQ. */ 102febcac7bSBing Zhao unsigned int delay_drop:1; /* Enable delay drop. */ 10302a6195cSAlexander Kozyrev unsigned int cqe_comp_layout:1; /* CQE Compression Layout*/ 1042d876343SJiawei Wang uint16_t port_id; 105151cbe3aSMichael Baum volatile uint32_t *rq_db; 106151cbe3aSMichael Baum volatile uint32_t *cq_db; 107151cbe3aSMichael Baum uint32_t elts_ci; 108151cbe3aSMichael Baum uint32_t rq_ci; 109151cbe3aSMichael Baum uint16_t consumed_strd; /* Number of consumed strides in WQE. */ 110151cbe3aSMichael Baum uint32_t rq_pi; 1112d876343SJiawei Wang uint32_t cq_ci:24; 112151cbe3aSMichael Baum uint16_t rq_repl_thresh; /* Threshold for buffer replenishment. */ 113151cbe3aSMichael Baum uint32_t byte_mask; 114151cbe3aSMichael Baum union { 115151cbe3aSMichael Baum struct rxq_zip zip; /* Compressed context. */ 116151cbe3aSMichael Baum uint16_t decompressed; 117151cbe3aSMichael Baum /* Number of ready mbufs decompressed from the CQ. */ 118151cbe3aSMichael Baum }; 119151cbe3aSMichael Baum struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */ 120151cbe3aSMichael Baum uint16_t mprq_max_memcpy_len; /* Maximum size of packet to memcpy. */ 121151cbe3aSMichael Baum volatile void *wqes; 122151cbe3aSMichael Baum volatile struct mlx5_cqe(*cqes)[]; 12302a6195cSAlexander Kozyrev struct mlx5_cqe title_cqe; /* Title CQE for CQE compression. */ 124151cbe3aSMichael Baum struct rte_mbuf *(*elts)[]; 125fc3e1798SAlexander Kozyrev struct rte_mbuf title_pkt; /* Title packet for CQE compression. */ 126151cbe3aSMichael Baum struct mlx5_mprq_buf *(*mprq_bufs)[]; 127151cbe3aSMichael Baum struct rte_mempool *mp; 128151cbe3aSMichael Baum struct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */ 129151cbe3aSMichael Baum struct mlx5_mprq_buf *mprq_repl; /* Stashed mbuf for replenish. */ 130151cbe3aSMichael Baum struct mlx5_dev_ctx_shared *sh; /* Shared context. */ 131151cbe3aSMichael Baum uint16_t idx; /* Queue index. */ 132151cbe3aSMichael Baum struct mlx5_rxq_stats stats; 133773a7de2SRaja Zidane struct mlx5_rxq_stats stats_reset; /* stats on last reset. */ 134151cbe3aSMichael Baum rte_xmm_t mbuf_initializer; /* Default rearm/flags for vectorized Rx. */ 135151cbe3aSMichael Baum struct rte_mbuf fake_mbuf; /* elts padding for vectorized Rx. */ 1365dfa003dSMichael Baum struct mlx5_uar_data uar_data; /* CQ doorbell. */ 137151cbe3aSMichael Baum uint32_t cqn; /* CQ number. */ 138151cbe3aSMichael Baum uint8_t cq_arm_sn; /* CQ arm seq number. */ 139fca8cba4SDavid Marchand uint64_t mark_flag; /* ol_flags to set with marks. */ 140151cbe3aSMichael Baum uint32_t tunnel; /* Tunnel information. */ 141151cbe3aSMichael Baum int timestamp_offset; /* Dynamic mbuf field for timestamp. */ 142151cbe3aSMichael Baum uint64_t timestamp_rx_flag; /* Dynamic mbuf flag for timestamp. */ 143151cbe3aSMichael Baum uint64_t flow_meta_mask; 144151cbe3aSMichael Baum int32_t flow_meta_offset; 145151cbe3aSMichael Baum uint32_t flow_meta_port_mask; 146151cbe3aSMichael Baum uint32_t rxseg_n; /* Number of split segment descriptions. */ 147151cbe3aSMichael Baum struct mlx5_eth_rxseg rxseg[MLX5_MAX_RXQ_NSEG]; 148151cbe3aSMichael Baum /* Buffer split segment descriptions - sizes, offsets, pools. */ 14927595cd8STyler Retzlaff }; 150151cbe3aSMichael Baum 151151cbe3aSMichael Baum /* RX queue control descriptor. */ 152151cbe3aSMichael Baum struct mlx5_rxq_ctrl { 153151cbe3aSMichael Baum struct mlx5_rxq_data rxq; /* Data path structure. */ 1544cda06c3SXueming Li LIST_HEAD(priv, mlx5_rxq_priv) owners; /* Owner rxq list. */ 155151cbe3aSMichael Baum struct mlx5_rxq_obj *obj; /* Verbs/DevX elements. */ 1564cda06c3SXueming Li struct mlx5_dev_ctx_shared *sh; /* Shared context. */ 157c06f77aeSMichael Baum bool is_hairpin; /* Whether RxQ type is Hairpin. */ 158151cbe3aSMichael Baum unsigned int socket; /* CPU socket ID for allocations. */ 15909c25553SXueming Li LIST_ENTRY(mlx5_rxq_ctrl) share_entry; /* Entry in shared RXQ list. */ 160*f8f294c6SBing Zhao RTE_ATOMIC(int32_t) ctrl_ref; /* Reference counter. */ 1614cda06c3SXueming Li uint32_t share_group; /* Group ID of shared RXQ. */ 1624cda06c3SXueming Li uint16_t share_qid; /* Shared RxQ ID in group. */ 16309c25553SXueming Li unsigned int started:1; /* Whether (shared) RXQ has been started. */ 164151cbe3aSMichael Baum unsigned int irq:1; /* Whether IRQ is enabled. */ 165151cbe3aSMichael Baum uint32_t flow_tunnels_n[MLX5_FLOW_TUNNEL]; /* Tunnels counters. */ 166151cbe3aSMichael Baum uint32_t wqn; /* WQ number. */ 167572c9d4bSViacheslav Ovsiienko uint32_t rxseg_n; /* Number of split segment descriptions. */ 168572c9d4bSViacheslav Ovsiienko struct rte_eth_rxseg_split rxseg[MLX5_MAX_RXQ_NSEG]; 169572c9d4bSViacheslav Ovsiienko /* Saved original buffer split segment configuration. */ 170151cbe3aSMichael Baum uint16_t dump_file_n; /* Number of dump files. */ 171151cbe3aSMichael Baum }; 172151cbe3aSMichael Baum 1734cda06c3SXueming Li /* RX queue private data. */ 1744cda06c3SXueming Li struct mlx5_rxq_priv { 1754cda06c3SXueming Li uint16_t idx; /* Queue index. */ 176e12a0166STyler Retzlaff RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */ 1774cda06c3SXueming Li struct mlx5_rxq_ctrl *ctrl; /* Shared Rx Queue. */ 1784cda06c3SXueming Li LIST_ENTRY(mlx5_rxq_priv) owner_entry; /* Entry in shared rxq_ctrl. */ 1794cda06c3SXueming Li struct mlx5_priv *priv; /* Back pointer to private data. */ 1805ceb3a02SXueming Li struct mlx5_devx_rq devx_rq; 18144126bd9SXueming Li struct rte_eth_hairpin_conf hairpin_conf; /* Hairpin configuration. */ 18244126bd9SXueming Li uint32_t hairpin_status; /* Hairpin binding status. */ 1837158e46cSSpike Du uint32_t lwm:16; 18425025da3SSpike Du uint32_t lwm_event_pending:1; 1855c9f3294SSpike Du uint32_t lwm_devx_subscribed:1; 1864cda06c3SXueming Li }; 1874cda06c3SXueming Li 188151cbe3aSMichael Baum /* mlx5_rxq.c */ 189151cbe3aSMichael Baum 190151cbe3aSMichael Baum extern uint8_t rss_hash_default_key[]; 191151cbe3aSMichael Baum 192151cbe3aSMichael Baum unsigned int mlx5_rxq_cqe_num(struct mlx5_rxq_data *rxq_data); 193151cbe3aSMichael Baum int mlx5_mprq_free_mp(struct rte_eth_dev *dev); 194151cbe3aSMichael Baum int mlx5_mprq_alloc_mp(struct rte_eth_dev *dev); 195151cbe3aSMichael Baum int mlx5_rx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id); 196151cbe3aSMichael Baum int mlx5_rx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id); 197151cbe3aSMichael Baum int mlx5_rx_queue_start_primary(struct rte_eth_dev *dev, uint16_t queue_id); 198151cbe3aSMichael Baum int mlx5_rx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t queue_id); 199151cbe3aSMichael Baum int mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, 200151cbe3aSMichael Baum unsigned int socket, const struct rte_eth_rxconf *conf, 201151cbe3aSMichael Baum struct rte_mempool *mp); 202151cbe3aSMichael Baum int mlx5_rx_hairpin_queue_setup 203151cbe3aSMichael Baum (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, 204151cbe3aSMichael Baum const struct rte_eth_hairpin_conf *hairpin_conf); 2057483341aSXueming Li void mlx5_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid); 206151cbe3aSMichael Baum int mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev); 207151cbe3aSMichael Baum void mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev); 208151cbe3aSMichael Baum int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id); 209151cbe3aSMichael Baum int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id); 210151cbe3aSMichael Baum int mlx5_rxq_obj_verify(struct rte_eth_dev *dev); 211f685878aSMichael Baum struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, 212151cbe3aSMichael Baum uint16_t desc, unsigned int socket, 213151cbe3aSMichael Baum const struct rte_eth_rxconf *conf, 214151cbe3aSMichael Baum const struct rte_eth_rxseg_split *rx_seg, 2153a29cb3aSAlexander Kozyrev uint16_t n_seg, bool is_extmem); 216151cbe3aSMichael Baum struct mlx5_rxq_ctrl *mlx5_rxq_hairpin_new 2174cda06c3SXueming Li (struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, uint16_t desc, 218151cbe3aSMichael Baum const struct rte_eth_hairpin_conf *hairpin_conf); 2190cedf34dSXueming Li struct mlx5_rxq_priv *mlx5_rxq_ref(struct rte_eth_dev *dev, uint16_t idx); 2200cedf34dSXueming Li uint32_t mlx5_rxq_deref(struct rte_eth_dev *dev, uint16_t idx); 2210cedf34dSXueming Li struct mlx5_rxq_priv *mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx); 2220cedf34dSXueming Li struct mlx5_rxq_ctrl *mlx5_rxq_ctrl_get(struct rte_eth_dev *dev, uint16_t idx); 2230cedf34dSXueming Li struct mlx5_rxq_data *mlx5_rxq_data_get(struct rte_eth_dev *dev, uint16_t idx); 2248e8b44f2SSuanming Mou struct mlx5_external_q *mlx5_ext_rxq_ref(struct rte_eth_dev *dev, 225311b17e6SMichael Baum uint16_t idx); 226311b17e6SMichael Baum uint32_t mlx5_ext_rxq_deref(struct rte_eth_dev *dev, uint16_t idx); 2278e8b44f2SSuanming Mou struct mlx5_external_q *mlx5_ext_rxq_get(struct rte_eth_dev *dev, 228311b17e6SMichael Baum uint16_t idx); 229151cbe3aSMichael Baum int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx); 230151cbe3aSMichael Baum int mlx5_rxq_verify(struct rte_eth_dev *dev); 231311b17e6SMichael Baum int mlx5_ext_rxq_verify(struct rte_eth_dev *dev); 232151cbe3aSMichael Baum int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl); 233151cbe3aSMichael Baum int mlx5_ind_table_obj_verify(struct rte_eth_dev *dev); 234151cbe3aSMichael Baum struct mlx5_ind_table_obj *mlx5_ind_table_obj_get(struct rte_eth_dev *dev, 235151cbe3aSMichael Baum const uint16_t *queues, 236151cbe3aSMichael Baum uint32_t queues_n); 2373a2f674bSSuanming Mou struct mlx5_ind_table_obj *mlx5_ind_table_obj_new(struct rte_eth_dev *dev, 2383a2f674bSSuanming Mou const uint16_t *queues, 2393a2f674bSSuanming Mou uint32_t queues_n, 2403a2f674bSSuanming Mou bool standalone, 2413a2f674bSSuanming Mou bool ref_qs); 242151cbe3aSMichael Baum int mlx5_ind_table_obj_release(struct rte_eth_dev *dev, 243151cbe3aSMichael Baum struct mlx5_ind_table_obj *ind_tbl, 2448fbce96fSDariusz Sosnowski bool deref_rxqs); 245151cbe3aSMichael Baum int mlx5_ind_table_obj_setup(struct rte_eth_dev *dev, 246c65d6844SDmitry Kozlyuk struct mlx5_ind_table_obj *ind_tbl, 247c65d6844SDmitry Kozlyuk bool ref_qs); 248151cbe3aSMichael Baum int mlx5_ind_table_obj_modify(struct rte_eth_dev *dev, 249151cbe3aSMichael Baum struct mlx5_ind_table_obj *ind_tbl, 250151cbe3aSMichael Baum uint16_t *queues, const uint32_t queues_n, 251ec9b812bSDmitry Kozlyuk bool standalone, 252ec9b812bSDmitry Kozlyuk bool ref_new_qs, bool deref_old_qs); 253ec4e11d4SDmitry Kozlyuk int mlx5_ind_table_obj_attach(struct rte_eth_dev *dev, 254ec4e11d4SDmitry Kozlyuk struct mlx5_ind_table_obj *ind_tbl); 255ec4e11d4SDmitry Kozlyuk int mlx5_ind_table_obj_detach(struct rte_eth_dev *dev, 256ec4e11d4SDmitry Kozlyuk struct mlx5_ind_table_obj *ind_tbl); 2576507c9f5SSuanming Mou struct mlx5_list_entry *mlx5_hrxq_create_cb(void *tool_ctx, void *cb_ctx); 2586507c9f5SSuanming Mou int mlx5_hrxq_match_cb(void *tool_ctx, struct mlx5_list_entry *entry, 259151cbe3aSMichael Baum void *cb_ctx); 2606507c9f5SSuanming Mou void mlx5_hrxq_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry); 2616507c9f5SSuanming Mou struct mlx5_list_entry *mlx5_hrxq_clone_cb(void *tool_ctx, 262491b7137SMatan Azrad struct mlx5_list_entry *entry, 263491b7137SMatan Azrad void *cb_ctx __rte_unused); 2646507c9f5SSuanming Mou void mlx5_hrxq_clone_free_cb(void *tool_ctx __rte_unused, 265491b7137SMatan Azrad struct mlx5_list_entry *entry); 2663a2f674bSSuanming Mou struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev, 267151cbe3aSMichael Baum struct mlx5_flow_rss_desc *rss_desc); 2683a2f674bSSuanming Mou int mlx5_hrxq_obj_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq); 269151cbe3aSMichael Baum int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hxrq_idx); 270151cbe3aSMichael Baum uint32_t mlx5_hrxq_verify(struct rte_eth_dev *dev); 271c06f77aeSMichael Baum bool mlx5_rxq_is_hairpin(struct rte_eth_dev *dev, uint16_t idx); 272151cbe3aSMichael Baum const struct rte_eth_hairpin_conf *mlx5_rxq_get_hairpin_conf 273151cbe3aSMichael Baum (struct rte_eth_dev *dev, uint16_t idx); 274151cbe3aSMichael Baum struct mlx5_hrxq *mlx5_drop_action_create(struct rte_eth_dev *dev); 275151cbe3aSMichael Baum void mlx5_drop_action_destroy(struct rte_eth_dev *dev); 276151cbe3aSMichael Baum uint64_t mlx5_get_rx_port_offloads(void); 277151cbe3aSMichael Baum uint64_t mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev); 278151cbe3aSMichael Baum void mlx5_rxq_timestamp_set(struct rte_eth_dev *dev); 279151cbe3aSMichael Baum int mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hxrq_idx, 280151cbe3aSMichael Baum const uint8_t *rss_key, uint32_t rss_key_len, 2810e04e1e2SXueming Li uint64_t hash_fields, bool symmetric_hash_function, 282151cbe3aSMichael Baum const uint16_t *queues, uint32_t queues_n); 283151cbe3aSMichael Baum 284a96102c8SMichael Baum /* mlx5_rx.c */ 285151cbe3aSMichael Baum 286151cbe3aSMichael Baum uint16_t mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n); 287151cbe3aSMichael Baum void mlx5_rxq_initialize(struct mlx5_rxq_data *rxq); 288aa67ed30SAlexander Kozyrev __rte_noinline int mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec, 289aa67ed30SAlexander Kozyrev uint16_t err_n, uint16_t *skip_cnt); 290151cbe3aSMichael Baum void mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf); 291151cbe3aSMichael Baum uint16_t mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, 292151cbe3aSMichael Baum uint16_t pkts_n); 293151cbe3aSMichael Baum int mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset); 2948d7d4fcdSKonstantin Ananyev uint32_t mlx5_rx_queue_count(void *rx_queue); 295151cbe3aSMichael Baum void mlx5_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 296151cbe3aSMichael Baum struct rte_eth_rxq_info *qinfo); 297151cbe3aSMichael Baum int mlx5_rx_burst_mode_get(struct rte_eth_dev *dev, uint16_t rx_queue_id, 298151cbe3aSMichael Baum struct rte_eth_burst_mode *mode); 299a8f0df6bSAlexander Kozyrev int mlx5_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc); 30025025da3SSpike Du void mlx5_dev_interrupt_handler_lwm(void *args); 3015c9f3294SSpike Du int mlx5_rx_queue_lwm_set(struct rte_eth_dev *dev, uint16_t rx_queue_id, 3025c9f3294SSpike Du uint8_t lwm); 3035c9f3294SSpike Du int mlx5_rx_queue_lwm_query(struct rte_eth_dev *dev, uint16_t *rx_queue_id, 3045c9f3294SSpike Du uint8_t *lwm); 305151cbe3aSMichael Baum 306a96102c8SMichael Baum /* Vectorized version of mlx5_rx.c */ 307151cbe3aSMichael Baum int mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq_data); 308151cbe3aSMichael Baum int mlx5_check_vec_rx_support(struct rte_eth_dev *dev); 309151cbe3aSMichael Baum uint16_t mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, 310151cbe3aSMichael Baum uint16_t pkts_n); 311151cbe3aSMichael Baum uint16_t mlx5_rx_burst_mprq_vec(void *dpdk_rxq, struct rte_mbuf **pkts, 312151cbe3aSMichael Baum uint16_t pkts_n); 313151cbe3aSMichael Baum 314fec28ca0SDmitry Kozlyuk static int mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq); 315151cbe3aSMichael Baum 316151cbe3aSMichael Baum /** 317077be91dSDmitry Kozlyuk * Query LKey for an address on Rx. No need to flush local caches 318fec28ca0SDmitry Kozlyuk * as the Rx mempool database entries are valid for the lifetime of the queue. 319151cbe3aSMichael Baum * 320151cbe3aSMichael Baum * @param rxq 321151cbe3aSMichael Baum * Pointer to Rx queue structure. 322151cbe3aSMichael Baum * @param addr 323151cbe3aSMichael Baum * Address to search. 324151cbe3aSMichael Baum * 325151cbe3aSMichael Baum * @return 326151cbe3aSMichael Baum * Searched LKey on success, UINT32_MAX on no match. 327fec28ca0SDmitry Kozlyuk * This function always succeeds on valid input. 328151cbe3aSMichael Baum */ 329151cbe3aSMichael Baum static __rte_always_inline uint32_t 330151cbe3aSMichael Baum mlx5_rx_addr2mr(struct mlx5_rxq_data *rxq, uintptr_t addr) 331151cbe3aSMichael Baum { 332151cbe3aSMichael Baum struct mlx5_mr_ctrl *mr_ctrl = &rxq->mr_ctrl; 333fec28ca0SDmitry Kozlyuk struct rte_mempool *mp; 334151cbe3aSMichael Baum uint32_t lkey; 335151cbe3aSMichael Baum 336151cbe3aSMichael Baum /* Linear search on MR cache array. */ 337151cbe3aSMichael Baum lkey = mlx5_mr_lookup_lkey(mr_ctrl->cache, &mr_ctrl->mru, 338151cbe3aSMichael Baum MLX5_MR_CACHE_N, addr); 339151cbe3aSMichael Baum if (likely(lkey != UINT32_MAX)) 340151cbe3aSMichael Baum return lkey; 341fec28ca0SDmitry Kozlyuk mp = mlx5_rxq_mprq_enabled(rxq) ? rxq->mprq_mp : rxq->mp; 34271304b5cSMichael Baum return mlx5_mr_mempool2mr_bh(mr_ctrl, mp, addr); 343151cbe3aSMichael Baum } 344151cbe3aSMichael Baum 345077be91dSDmitry Kozlyuk /** 346077be91dSDmitry Kozlyuk * Query LKey from a packet buffer for Rx. No need to flush local caches 347077be91dSDmitry Kozlyuk * as the Rx mempool database entries are valid for the lifetime of the queue. 348077be91dSDmitry Kozlyuk * 349077be91dSDmitry Kozlyuk * @param rxq 350077be91dSDmitry Kozlyuk * Pointer to Rx queue structure. 351077be91dSDmitry Kozlyuk * @param mb 352077be91dSDmitry Kozlyuk * Buffer to search the address of. 353077be91dSDmitry Kozlyuk * 354077be91dSDmitry Kozlyuk * @return 355077be91dSDmitry Kozlyuk * Searched LKey on success, UINT32_MAX on no match. 356077be91dSDmitry Kozlyuk * This function always succeeds on valid input. 357077be91dSDmitry Kozlyuk */ 358077be91dSDmitry Kozlyuk static __rte_always_inline uint32_t 359077be91dSDmitry Kozlyuk mlx5_rx_mb2mr(struct mlx5_rxq_data *rxq, struct rte_mbuf *mb) 360077be91dSDmitry Kozlyuk { 361077be91dSDmitry Kozlyuk struct mlx5_mr_ctrl *mr_ctrl = &rxq->mr_ctrl; 362077be91dSDmitry Kozlyuk uintptr_t addr = (uintptr_t)mb->buf_addr; 363077be91dSDmitry Kozlyuk uint32_t lkey; 364077be91dSDmitry Kozlyuk 365077be91dSDmitry Kozlyuk /* Linear search on MR cache array. */ 366077be91dSDmitry Kozlyuk lkey = mlx5_mr_lookup_lkey(mr_ctrl->cache, &mr_ctrl->mru, 367077be91dSDmitry Kozlyuk MLX5_MR_CACHE_N, addr); 368077be91dSDmitry Kozlyuk if (likely(lkey != UINT32_MAX)) 369077be91dSDmitry Kozlyuk return lkey; 37071304b5cSMichael Baum /* Slower search in the mempool database on miss. */ 37171304b5cSMichael Baum return mlx5_mr_mempool2mr_bh(mr_ctrl, mb->pool, addr); 372077be91dSDmitry Kozlyuk } 373151cbe3aSMichael Baum 374151cbe3aSMichael Baum /** 375151cbe3aSMichael Baum * Set timestamp in mbuf dynamic field. 376151cbe3aSMichael Baum * 377151cbe3aSMichael Baum * @param mbuf 378151cbe3aSMichael Baum * Structure to write into. 379151cbe3aSMichael Baum * @param offset 380151cbe3aSMichael Baum * Dynamic field offset in mbuf structure. 381151cbe3aSMichael Baum * @param timestamp 382151cbe3aSMichael Baum * Value to write. 383151cbe3aSMichael Baum */ 384151cbe3aSMichael Baum static __rte_always_inline void 385151cbe3aSMichael Baum mlx5_timestamp_set(struct rte_mbuf *mbuf, int offset, 386151cbe3aSMichael Baum rte_mbuf_timestamp_t timestamp) 387151cbe3aSMichael Baum { 388151cbe3aSMichael Baum *RTE_MBUF_DYNFIELD(mbuf, offset, rte_mbuf_timestamp_t *) = timestamp; 389151cbe3aSMichael Baum } 390151cbe3aSMichael Baum 391151cbe3aSMichael Baum /** 392151cbe3aSMichael Baum * Replace MPRQ buffer. 393151cbe3aSMichael Baum * 394151cbe3aSMichael Baum * @param rxq 395151cbe3aSMichael Baum * Pointer to Rx queue structure. 396151cbe3aSMichael Baum * @param rq_idx 397151cbe3aSMichael Baum * RQ index to replace. 398151cbe3aSMichael Baum */ 399151cbe3aSMichael Baum static __rte_always_inline void 400151cbe3aSMichael Baum mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx) 401151cbe3aSMichael Baum { 4020947ed38SMichael Baum const uint32_t strd_n = RTE_BIT32(rxq->log_strd_num); 403151cbe3aSMichael Baum struct mlx5_mprq_buf *rep = rxq->mprq_repl; 404151cbe3aSMichael Baum volatile struct mlx5_wqe_data_seg *wqe = 405151cbe3aSMichael Baum &((volatile struct mlx5_wqe_mprq *)rxq->wqes)[rq_idx].dseg; 406151cbe3aSMichael Baum struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_idx]; 407151cbe3aSMichael Baum void *addr; 408151cbe3aSMichael Baum 409e12a0166STyler Retzlaff if (rte_atomic_load_explicit(&buf->refcnt, rte_memory_order_relaxed) > 1) { 410151cbe3aSMichael Baum MLX5_ASSERT(rep != NULL); 411151cbe3aSMichael Baum /* Replace MPRQ buf. */ 412151cbe3aSMichael Baum (*rxq->mprq_bufs)[rq_idx] = rep; 413151cbe3aSMichael Baum /* Replace WQE. */ 414151cbe3aSMichael Baum addr = mlx5_mprq_buf_addr(rep, strd_n); 415151cbe3aSMichael Baum wqe->addr = rte_cpu_to_be_64((uintptr_t)addr); 416151cbe3aSMichael Baum /* If there's only one MR, no need to replace LKey in WQE. */ 417151cbe3aSMichael Baum if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1)) 418151cbe3aSMichael Baum wqe->lkey = mlx5_rx_addr2mr(rxq, (uintptr_t)addr); 419151cbe3aSMichael Baum /* Stash a mbuf for next replacement. */ 420151cbe3aSMichael Baum if (likely(!rte_mempool_get(rxq->mprq_mp, (void **)&rep))) 421151cbe3aSMichael Baum rxq->mprq_repl = rep; 422151cbe3aSMichael Baum else 423151cbe3aSMichael Baum rxq->mprq_repl = NULL; 424151cbe3aSMichael Baum /* Release the old buffer. */ 425151cbe3aSMichael Baum mlx5_mprq_buf_free(buf); 426151cbe3aSMichael Baum } else if (unlikely(rxq->mprq_repl == NULL)) { 427151cbe3aSMichael Baum struct mlx5_mprq_buf *rep; 428151cbe3aSMichael Baum 429151cbe3aSMichael Baum /* 430151cbe3aSMichael Baum * Currently, the MPRQ mempool is out of buffer 431151cbe3aSMichael Baum * and doing memcpy regardless of the size of Rx 432151cbe3aSMichael Baum * packet. Retry allocation to get back to 433151cbe3aSMichael Baum * normal. 434151cbe3aSMichael Baum */ 435151cbe3aSMichael Baum if (!rte_mempool_get(rxq->mprq_mp, (void **)&rep)) 436151cbe3aSMichael Baum rxq->mprq_repl = rep; 437151cbe3aSMichael Baum } 438151cbe3aSMichael Baum } 439151cbe3aSMichael Baum 440151cbe3aSMichael Baum /** 441151cbe3aSMichael Baum * Attach or copy MPRQ buffer content to a packet. 442151cbe3aSMichael Baum * 443151cbe3aSMichael Baum * @param rxq 444151cbe3aSMichael Baum * Pointer to Rx queue structure. 445151cbe3aSMichael Baum * @param pkt 446151cbe3aSMichael Baum * Pointer to a packet to fill. 447151cbe3aSMichael Baum * @param len 448151cbe3aSMichael Baum * Packet length. 449151cbe3aSMichael Baum * @param buf 450151cbe3aSMichael Baum * Pointer to a MPRQ buffer to take the data from. 451151cbe3aSMichael Baum * @param strd_idx 452151cbe3aSMichael Baum * Stride index to start from. 453151cbe3aSMichael Baum * @param strd_cnt 454151cbe3aSMichael Baum * Number of strides to consume. 455151cbe3aSMichael Baum */ 456151cbe3aSMichael Baum static __rte_always_inline enum mlx5_rqx_code 457151cbe3aSMichael Baum mprq_buf_to_pkt(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt, uint32_t len, 458151cbe3aSMichael Baum struct mlx5_mprq_buf *buf, uint16_t strd_idx, uint16_t strd_cnt) 459151cbe3aSMichael Baum { 4600947ed38SMichael Baum const uint32_t strd_n = RTE_BIT32(rxq->log_strd_num); 4610947ed38SMichael Baum const uint16_t strd_sz = RTE_BIT32(rxq->log_strd_sz); 462151cbe3aSMichael Baum const uint16_t strd_shift = 463151cbe3aSMichael Baum MLX5_MPRQ_STRIDE_SHIFT_BYTE * rxq->strd_shift_en; 464151cbe3aSMichael Baum const int32_t hdrm_overlap = 465151cbe3aSMichael Baum len + RTE_PKTMBUF_HEADROOM - strd_cnt * strd_sz; 466151cbe3aSMichael Baum const uint32_t offset = strd_idx * strd_sz + strd_shift; 467151cbe3aSMichael Baum void *addr = RTE_PTR_ADD(mlx5_mprq_buf_addr(buf, strd_n), offset); 468151cbe3aSMichael Baum 469151cbe3aSMichael Baum /* 470151cbe3aSMichael Baum * Memcpy packets to the target mbuf if: 471151cbe3aSMichael Baum * - The size of packet is smaller than mprq_max_memcpy_len. 472151cbe3aSMichael Baum * - Out of buffer in the Mempool for Multi-Packet RQ. 473151cbe3aSMichael Baum * - The packet's stride overlaps a headroom and scatter is off. 474151cbe3aSMichael Baum */ 475151cbe3aSMichael Baum if (len <= rxq->mprq_max_memcpy_len || 476151cbe3aSMichael Baum rxq->mprq_repl == NULL || 477151cbe3aSMichael Baum (hdrm_overlap > 0 && !rxq->strd_scatter_en)) { 478151cbe3aSMichael Baum if (likely(len <= 479151cbe3aSMichael Baum (uint32_t)(pkt->buf_len - RTE_PKTMBUF_HEADROOM))) { 480151cbe3aSMichael Baum rte_memcpy(rte_pktmbuf_mtod(pkt, void *), 481151cbe3aSMichael Baum addr, len); 482151cbe3aSMichael Baum DATA_LEN(pkt) = len; 483151cbe3aSMichael Baum } else if (rxq->strd_scatter_en) { 484151cbe3aSMichael Baum struct rte_mbuf *prev = pkt; 485151cbe3aSMichael Baum uint32_t seg_len = RTE_MIN(len, (uint32_t) 486151cbe3aSMichael Baum (pkt->buf_len - RTE_PKTMBUF_HEADROOM)); 487151cbe3aSMichael Baum uint32_t rem_len = len - seg_len; 488151cbe3aSMichael Baum 489151cbe3aSMichael Baum rte_memcpy(rte_pktmbuf_mtod(pkt, void *), 490151cbe3aSMichael Baum addr, seg_len); 491151cbe3aSMichael Baum DATA_LEN(pkt) = seg_len; 492151cbe3aSMichael Baum while (rem_len) { 493151cbe3aSMichael Baum struct rte_mbuf *next = 494151cbe3aSMichael Baum rte_pktmbuf_alloc(rxq->mp); 495151cbe3aSMichael Baum 496151cbe3aSMichael Baum if (unlikely(next == NULL)) 497151cbe3aSMichael Baum return MLX5_RXQ_CODE_NOMBUF; 498151cbe3aSMichael Baum NEXT(prev) = next; 499151cbe3aSMichael Baum SET_DATA_OFF(next, 0); 500151cbe3aSMichael Baum addr = RTE_PTR_ADD(addr, seg_len); 501151cbe3aSMichael Baum seg_len = RTE_MIN(rem_len, (uint32_t) 502151cbe3aSMichael Baum (next->buf_len - RTE_PKTMBUF_HEADROOM)); 503151cbe3aSMichael Baum rte_memcpy 504151cbe3aSMichael Baum (rte_pktmbuf_mtod(next, void *), 505151cbe3aSMichael Baum addr, seg_len); 506151cbe3aSMichael Baum DATA_LEN(next) = seg_len; 507151cbe3aSMichael Baum rem_len -= seg_len; 508151cbe3aSMichael Baum prev = next; 509151cbe3aSMichael Baum ++NB_SEGS(pkt); 510151cbe3aSMichael Baum } 511151cbe3aSMichael Baum } else { 512151cbe3aSMichael Baum return MLX5_RXQ_CODE_DROPPED; 513151cbe3aSMichael Baum } 514151cbe3aSMichael Baum } else { 515151cbe3aSMichael Baum rte_iova_t buf_iova; 516151cbe3aSMichael Baum struct rte_mbuf_ext_shared_info *shinfo; 517151cbe3aSMichael Baum uint16_t buf_len = strd_cnt * strd_sz; 518151cbe3aSMichael Baum void *buf_addr; 519151cbe3aSMichael Baum 520151cbe3aSMichael Baum /* Increment the refcnt of the whole chunk. */ 521e12a0166STyler Retzlaff rte_atomic_fetch_add_explicit(&buf->refcnt, 1, rte_memory_order_relaxed); 522e12a0166STyler Retzlaff MLX5_ASSERT(rte_atomic_load_explicit(&buf->refcnt, 523e12a0166STyler Retzlaff rte_memory_order_relaxed) <= strd_n + 1); 524151cbe3aSMichael Baum buf_addr = RTE_PTR_SUB(addr, RTE_PKTMBUF_HEADROOM); 525151cbe3aSMichael Baum /* 526151cbe3aSMichael Baum * MLX5 device doesn't use iova but it is necessary in a 527151cbe3aSMichael Baum * case where the Rx packet is transmitted via a 528151cbe3aSMichael Baum * different PMD. 529151cbe3aSMichael Baum */ 530151cbe3aSMichael Baum buf_iova = rte_mempool_virt2iova(buf) + 531151cbe3aSMichael Baum RTE_PTR_DIFF(buf_addr, buf); 532151cbe3aSMichael Baum shinfo = &buf->shinfos[strd_idx]; 533151cbe3aSMichael Baum rte_mbuf_ext_refcnt_set(shinfo, 1); 534151cbe3aSMichael Baum /* 535daa02b5cSOlivier Matz * RTE_MBUF_F_EXTERNAL will be set to pkt->ol_flags when 536151cbe3aSMichael Baum * attaching the stride to mbuf and more offload flags 537151cbe3aSMichael Baum * will be added below by calling rxq_cq_to_mbuf(). 538151cbe3aSMichael Baum * Other fields will be overwritten. 539151cbe3aSMichael Baum */ 540151cbe3aSMichael Baum rte_pktmbuf_attach_extbuf(pkt, buf_addr, buf_iova, 541151cbe3aSMichael Baum buf_len, shinfo); 542151cbe3aSMichael Baum /* Set mbuf head-room. */ 543151cbe3aSMichael Baum SET_DATA_OFF(pkt, RTE_PKTMBUF_HEADROOM); 5440c7606b7SLior Margalit MLX5_ASSERT(pkt->ol_flags & RTE_MBUF_F_EXTERNAL); 545151cbe3aSMichael Baum MLX5_ASSERT(rte_pktmbuf_tailroom(pkt) >= 546151cbe3aSMichael Baum len - (hdrm_overlap > 0 ? hdrm_overlap : 0)); 547151cbe3aSMichael Baum DATA_LEN(pkt) = len; 548151cbe3aSMichael Baum /* 549151cbe3aSMichael Baum * Copy the last fragment of a packet (up to headroom 550151cbe3aSMichael Baum * size bytes) in case there is a stride overlap with 551151cbe3aSMichael Baum * a next packet's headroom. Allocate a separate mbuf 552151cbe3aSMichael Baum * to store this fragment and link it. Scatter is on. 553151cbe3aSMichael Baum */ 554151cbe3aSMichael Baum if (hdrm_overlap > 0) { 555151cbe3aSMichael Baum MLX5_ASSERT(rxq->strd_scatter_en); 556151cbe3aSMichael Baum struct rte_mbuf *seg = 557151cbe3aSMichael Baum rte_pktmbuf_alloc(rxq->mp); 558151cbe3aSMichael Baum 559151cbe3aSMichael Baum if (unlikely(seg == NULL)) 560151cbe3aSMichael Baum return MLX5_RXQ_CODE_NOMBUF; 561151cbe3aSMichael Baum SET_DATA_OFF(seg, 0); 562151cbe3aSMichael Baum rte_memcpy(rte_pktmbuf_mtod(seg, void *), 563151cbe3aSMichael Baum RTE_PTR_ADD(addr, len - hdrm_overlap), 564151cbe3aSMichael Baum hdrm_overlap); 565151cbe3aSMichael Baum DATA_LEN(seg) = hdrm_overlap; 566151cbe3aSMichael Baum DATA_LEN(pkt) = len - hdrm_overlap; 567151cbe3aSMichael Baum NEXT(pkt) = seg; 568151cbe3aSMichael Baum NB_SEGS(pkt) = 2; 569151cbe3aSMichael Baum } 570151cbe3aSMichael Baum } 571151cbe3aSMichael Baum return MLX5_RXQ_CODE_EXIT; 572151cbe3aSMichael Baum } 573151cbe3aSMichael Baum 574151cbe3aSMichael Baum /** 575151cbe3aSMichael Baum * Check whether Multi-Packet RQ can be enabled for the device. 576151cbe3aSMichael Baum * 577151cbe3aSMichael Baum * @param dev 578151cbe3aSMichael Baum * Pointer to Ethernet device. 579151cbe3aSMichael Baum * 580151cbe3aSMichael Baum * @return 581151cbe3aSMichael Baum * 1 if supported, negative errno value if not. 582151cbe3aSMichael Baum */ 583151cbe3aSMichael Baum static __rte_always_inline int 584151cbe3aSMichael Baum mlx5_check_mprq_support(struct rte_eth_dev *dev) 585151cbe3aSMichael Baum { 586151cbe3aSMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 587151cbe3aSMichael Baum 588151cbe3aSMichael Baum if (priv->config.mprq.enabled && 589151cbe3aSMichael Baum priv->rxqs_n >= priv->config.mprq.min_rxqs_num) 590151cbe3aSMichael Baum return 1; 591151cbe3aSMichael Baum return -ENOTSUP; 592151cbe3aSMichael Baum } 593151cbe3aSMichael Baum 594151cbe3aSMichael Baum /** 595151cbe3aSMichael Baum * Check whether Multi-Packet RQ is enabled for the Rx queue. 596151cbe3aSMichael Baum * 597151cbe3aSMichael Baum * @param rxq 598151cbe3aSMichael Baum * Pointer to receive queue structure. 599151cbe3aSMichael Baum * 600151cbe3aSMichael Baum * @return 601151cbe3aSMichael Baum * 0 if disabled, otherwise enabled. 602151cbe3aSMichael Baum */ 603151cbe3aSMichael Baum static __rte_always_inline int 604151cbe3aSMichael Baum mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq) 605151cbe3aSMichael Baum { 6060947ed38SMichael Baum return rxq->log_strd_num > 0; 607151cbe3aSMichael Baum } 608151cbe3aSMichael Baum 609151cbe3aSMichael Baum /** 610151cbe3aSMichael Baum * Check whether Multi-Packet RQ is enabled for the device. 611151cbe3aSMichael Baum * 612151cbe3aSMichael Baum * @param dev 613151cbe3aSMichael Baum * Pointer to Ethernet device. 614151cbe3aSMichael Baum * 615151cbe3aSMichael Baum * @return 616151cbe3aSMichael Baum * 0 if disabled, otherwise enabled. 617151cbe3aSMichael Baum */ 618151cbe3aSMichael Baum static __rte_always_inline int 619151cbe3aSMichael Baum mlx5_mprq_enabled(struct rte_eth_dev *dev) 620151cbe3aSMichael Baum { 621151cbe3aSMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 622151cbe3aSMichael Baum uint32_t i; 623151cbe3aSMichael Baum uint16_t n = 0; 624151cbe3aSMichael Baum uint16_t n_ibv = 0; 625151cbe3aSMichael Baum 626151cbe3aSMichael Baum if (mlx5_check_mprq_support(dev) < 0) 627151cbe3aSMichael Baum return 0; 628151cbe3aSMichael Baum /* All the configured queues should be enabled. */ 629151cbe3aSMichael Baum for (i = 0; i < priv->rxqs_n; ++i) { 6305cf0707fSXueming Li struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, i); 631151cbe3aSMichael Baum 632c06f77aeSMichael Baum if (rxq_ctrl == NULL || rxq_ctrl->is_hairpin) 633151cbe3aSMichael Baum continue; 634151cbe3aSMichael Baum n_ibv++; 6355cf0707fSXueming Li if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq)) 636151cbe3aSMichael Baum ++n; 637151cbe3aSMichael Baum } 638151cbe3aSMichael Baum /* Multi-Packet RQ can't be partially configured. */ 639151cbe3aSMichael Baum MLX5_ASSERT(n == 0 || n == n_ibv); 640151cbe3aSMichael Baum return n == n_ibv; 641151cbe3aSMichael Baum } 642151cbe3aSMichael Baum 643311b17e6SMichael Baum /** 644311b17e6SMichael Baum * Check whether given RxQ is external. 645311b17e6SMichael Baum * 646311b17e6SMichael Baum * @param dev 647311b17e6SMichael Baum * Pointer to Ethernet device. 648311b17e6SMichael Baum * @param queue_idx 649311b17e6SMichael Baum * Rx queue index. 650311b17e6SMichael Baum * 651311b17e6SMichael Baum * @return 652311b17e6SMichael Baum * True if is external RxQ, otherwise false. 653311b17e6SMichael Baum */ 654311b17e6SMichael Baum static __rte_always_inline bool 655311b17e6SMichael Baum mlx5_is_external_rxq(struct rte_eth_dev *dev, uint16_t queue_idx) 656311b17e6SMichael Baum { 657311b17e6SMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 6588e8b44f2SSuanming Mou struct mlx5_external_q *rxq; 659311b17e6SMichael Baum 66086647d46SThomas Monjalon if (!priv->ext_rxqs || queue_idx < RTE_PMD_MLX5_EXTERNAL_RX_QUEUE_ID_MIN) 661311b17e6SMichael Baum return false; 66286647d46SThomas Monjalon rxq = &priv->ext_rxqs[queue_idx - RTE_PMD_MLX5_EXTERNAL_RX_QUEUE_ID_MIN]; 663e12a0166STyler Retzlaff return !!rte_atomic_load_explicit(&rxq->refcnt, rte_memory_order_relaxed); 664311b17e6SMichael Baum } 665311b17e6SMichael Baum 66625025da3SSpike Du #define LWM_COOKIE_RXQID_OFFSET 0 66725025da3SSpike Du #define LWM_COOKIE_RXQID_MASK 0xffff 66825025da3SSpike Du #define LWM_COOKIE_PORTID_OFFSET 16 66925025da3SSpike Du #define LWM_COOKIE_PORTID_MASK 0xffff 67025025da3SSpike Du 671151cbe3aSMichael Baum #endif /* RTE_PMD_MLX5_RX_H_ */ 672