1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2015 6WIND S.A. 3 * Copyright 2015 Mellanox Technologies, Ltd 4 */ 5 6 #ifndef RTE_PMD_MLX5_RXTX_H_ 7 #define RTE_PMD_MLX5_RXTX_H_ 8 9 #include <stddef.h> 10 #include <stdint.h> 11 #include <sys/queue.h> 12 13 #include <rte_mbuf.h> 14 #include <rte_mempool.h> 15 #include <rte_common.h> 16 #include <rte_hexdump.h> 17 #include <rte_spinlock.h> 18 #include <rte_io.h> 19 #include <rte_bus_pci.h> 20 #include <rte_malloc.h> 21 #include <rte_cycles.h> 22 23 #include <mlx5_glue.h> 24 #include <mlx5_prm.h> 25 #include <mlx5_common.h> 26 #include <mlx5_common_mr.h> 27 28 #include "mlx5_defs.h" 29 #include "mlx5_utils.h" 30 #include "mlx5.h" 31 #include "mlx5_autoconf.h" 32 #include "mlx5_mr.h" 33 34 /* Support tunnel matching. */ 35 #define MLX5_FLOW_TUNNEL 10 36 37 /* Mbuf dynamic flag offset for inline. */ 38 extern uint64_t rte_net_mlx5_dynf_inline_mask; 39 40 struct mlx5_rxq_stats { 41 #ifdef MLX5_PMD_SOFT_COUNTERS 42 uint64_t ipackets; /**< Total of successfully received packets. */ 43 uint64_t ibytes; /**< Total of successfully received bytes. */ 44 #endif 45 uint64_t idropped; /**< Total of packets dropped when RX ring full. */ 46 uint64_t rx_nombuf; /**< Total of RX mbuf allocation failures. */ 47 }; 48 49 struct mlx5_txq_stats { 50 #ifdef MLX5_PMD_SOFT_COUNTERS 51 uint64_t opackets; /**< Total of successfully sent packets. */ 52 uint64_t obytes; /**< Total of successfully sent bytes. */ 53 #endif 54 uint64_t oerrors; /**< Total number of failed transmitted packets. */ 55 }; 56 57 struct mlx5_priv; 58 59 /* Compressed CQE context. */ 60 struct rxq_zip { 61 uint16_t ai; /* Array index. */ 62 uint16_t ca; /* Current array index. */ 63 uint16_t na; /* Next array index. */ 64 uint16_t cq_ci; /* The next CQE. */ 65 uint32_t cqe_cnt; /* Number of CQEs. */ 66 }; 67 68 /* Multi-Packet RQ buffer header. */ 69 struct mlx5_mprq_buf { 70 struct rte_mempool *mp; 71 uint16_t refcnt; /* Atomically accessed refcnt. */ 72 uint8_t pad[RTE_PKTMBUF_HEADROOM]; /* Headroom for the first packet. */ 73 struct rte_mbuf_ext_shared_info shinfos[]; 74 /* 75 * Shared information per stride. 76 * More memory will be allocated for the first stride head-room and for 77 * the strides data. 78 */ 79 } __rte_cache_aligned; 80 81 /* Get pointer to the first stride. */ 82 #define mlx5_mprq_buf_addr(ptr, strd_n) (RTE_PTR_ADD((ptr), \ 83 sizeof(struct mlx5_mprq_buf) + \ 84 (strd_n) * \ 85 sizeof(struct rte_mbuf_ext_shared_info) + \ 86 RTE_PKTMBUF_HEADROOM)) 87 88 #define MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES 6 89 #define MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES 9 90 91 enum mlx5_rxq_err_state { 92 MLX5_RXQ_ERR_STATE_NO_ERROR = 0, 93 MLX5_RXQ_ERR_STATE_NEED_RESET, 94 MLX5_RXQ_ERR_STATE_NEED_READY, 95 }; 96 97 enum mlx5_rqx_code { 98 MLX5_RXQ_CODE_EXIT = 0, 99 MLX5_RXQ_CODE_NOMBUF, 100 MLX5_RXQ_CODE_DROPPED, 101 }; 102 103 struct mlx5_eth_rxseg { 104 struct rte_mempool *mp; /**< Memory pool to allocate segment from. */ 105 uint16_t length; /**< Segment data length, configures split point. */ 106 uint16_t offset; /**< Data offset from beginning of mbuf data buffer. */ 107 uint32_t reserved; /**< Reserved field. */ 108 }; 109 110 /* RX queue descriptor. */ 111 struct mlx5_rxq_data { 112 unsigned int csum:1; /* Enable checksum offloading. */ 113 unsigned int hw_timestamp:1; /* Enable HW timestamp. */ 114 unsigned int rt_timestamp:1; /* Realtime timestamp format. */ 115 unsigned int vlan_strip:1; /* Enable VLAN stripping. */ 116 unsigned int crc_present:1; /* CRC must be subtracted. */ 117 unsigned int sges_n:3; /* Log 2 of SGEs (max buffers per packet). */ 118 unsigned int cqe_n:4; /* Log 2 of CQ elements. */ 119 unsigned int elts_n:4; /* Log 2 of Mbufs. */ 120 unsigned int rss_hash:1; /* RSS hash result is enabled. */ 121 unsigned int mark:1; /* Marked flow available on the queue. */ 122 unsigned int strd_num_n:5; /* Log 2 of the number of stride. */ 123 unsigned int strd_sz_n:4; /* Log 2 of stride size. */ 124 unsigned int strd_shift_en:1; /* Enable 2bytes shift on a stride. */ 125 unsigned int err_state:2; /* enum mlx5_rxq_err_state. */ 126 unsigned int strd_scatter_en:1; /* Scattered packets from a stride. */ 127 unsigned int lro:1; /* Enable LRO. */ 128 unsigned int dynf_meta:1; /* Dynamic metadata is configured. */ 129 unsigned int mcqe_format:3; /* CQE compression format. */ 130 volatile uint32_t *rq_db; 131 volatile uint32_t *cq_db; 132 uint16_t port_id; 133 uint32_t elts_ci; 134 uint32_t rq_ci; 135 uint16_t consumed_strd; /* Number of consumed strides in WQE. */ 136 uint32_t rq_pi; 137 uint32_t cq_ci; 138 uint16_t rq_repl_thresh; /* Threshold for buffer replenishment. */ 139 uint32_t byte_mask; 140 union { 141 struct rxq_zip zip; /* Compressed context. */ 142 uint16_t decompressed; 143 /* Number of ready mbufs decompressed from the CQ. */ 144 }; 145 struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */ 146 uint16_t mprq_max_memcpy_len; /* Maximum size of packet to memcpy. */ 147 volatile void *wqes; 148 volatile struct mlx5_cqe(*cqes)[]; 149 struct rte_mbuf *(*elts)[]; 150 struct mlx5_mprq_buf *(*mprq_bufs)[]; 151 struct rte_mempool *mp; 152 struct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */ 153 struct mlx5_mprq_buf *mprq_repl; /* Stashed mbuf for replenish. */ 154 struct mlx5_dev_ctx_shared *sh; /* Shared context. */ 155 uint16_t idx; /* Queue index. */ 156 struct mlx5_rxq_stats stats; 157 rte_xmm_t mbuf_initializer; /* Default rearm/flags for vectorized Rx. */ 158 struct rte_mbuf fake_mbuf; /* elts padding for vectorized Rx. */ 159 void *cq_uar; /* Verbs CQ user access region. */ 160 uint32_t cqn; /* CQ number. */ 161 uint8_t cq_arm_sn; /* CQ arm seq number. */ 162 #ifndef RTE_ARCH_64 163 rte_spinlock_t *uar_lock_cq; 164 /* CQ (UAR) access lock required for 32bit implementations */ 165 #endif 166 uint32_t tunnel; /* Tunnel information. */ 167 int timestamp_offset; /* Dynamic mbuf field for timestamp. */ 168 uint64_t timestamp_rx_flag; /* Dynamic mbuf flag for timestamp. */ 169 uint64_t flow_meta_mask; 170 int32_t flow_meta_offset; 171 uint32_t flow_meta_port_mask; 172 uint32_t rxseg_n; /* Number of split segment descriptions. */ 173 struct mlx5_eth_rxseg rxseg[MLX5_MAX_RXQ_NSEG]; 174 /* Buffer split segment descriptions - sizes, offsets, pools. */ 175 } __rte_cache_aligned; 176 177 enum mlx5_rxq_type { 178 MLX5_RXQ_TYPE_STANDARD, /* Standard Rx queue. */ 179 MLX5_RXQ_TYPE_HAIRPIN, /* Hairpin Rx queue. */ 180 MLX5_RXQ_TYPE_UNDEFINED, 181 }; 182 183 /* RX queue control descriptor. */ 184 struct mlx5_rxq_ctrl { 185 struct mlx5_rxq_data rxq; /* Data path structure. */ 186 LIST_ENTRY(mlx5_rxq_ctrl) next; /* Pointer to the next element. */ 187 uint32_t refcnt; /* Reference counter. */ 188 struct mlx5_rxq_obj *obj; /* Verbs/DevX elements. */ 189 struct mlx5_priv *priv; /* Back pointer to private data. */ 190 enum mlx5_rxq_type type; /* Rxq type. */ 191 unsigned int socket; /* CPU socket ID for allocations. */ 192 unsigned int irq:1; /* Whether IRQ is enabled. */ 193 uint32_t flow_mark_n; /* Number of Mark/Flag flows using this Queue. */ 194 uint32_t flow_tunnels_n[MLX5_FLOW_TUNNEL]; /* Tunnels counters. */ 195 uint32_t wqn; /* WQ number. */ 196 uint16_t dump_file_n; /* Number of dump files. */ 197 struct rte_eth_hairpin_conf hairpin_conf; /* Hairpin configuration. */ 198 uint32_t hairpin_status; /* Hairpin binding status. */ 199 }; 200 201 /* TX queue send local data. */ 202 __extension__ 203 struct mlx5_txq_local { 204 struct mlx5_wqe *wqe_last; /* last sent WQE pointer. */ 205 struct rte_mbuf *mbuf; /* first mbuf to process. */ 206 uint16_t pkts_copy; /* packets copied to elts. */ 207 uint16_t pkts_sent; /* packets sent. */ 208 uint16_t pkts_loop; /* packets sent on loop entry. */ 209 uint16_t elts_free; /* available elts remain. */ 210 uint16_t wqe_free; /* available wqe remain. */ 211 uint16_t mbuf_off; /* data offset in current mbuf. */ 212 uint16_t mbuf_nseg; /* number of remaining mbuf. */ 213 uint16_t mbuf_free; /* number of inline mbufs to free. */ 214 }; 215 216 /* TX queue descriptor. */ 217 __extension__ 218 struct mlx5_txq_data { 219 uint16_t elts_head; /* Current counter in (*elts)[]. */ 220 uint16_t elts_tail; /* Counter of first element awaiting completion. */ 221 uint16_t elts_comp; /* elts index since last completion request. */ 222 uint16_t elts_s; /* Number of mbuf elements. */ 223 uint16_t elts_m; /* Mask for mbuf elements indices. */ 224 /* Fields related to elts mbuf storage. */ 225 uint16_t wqe_ci; /* Consumer index for work queue. */ 226 uint16_t wqe_pi; /* Producer index for work queue. */ 227 uint16_t wqe_s; /* Number of WQ elements. */ 228 uint16_t wqe_m; /* Mask Number for WQ elements. */ 229 uint16_t wqe_comp; /* WQE index since last completion request. */ 230 uint16_t wqe_thres; /* WQE threshold to request completion in CQ. */ 231 /* WQ related fields. */ 232 uint16_t cq_ci; /* Consumer index for completion queue. */ 233 uint16_t cq_pi; /* Production index for completion queue. */ 234 uint16_t cqe_s; /* Number of CQ elements. */ 235 uint16_t cqe_m; /* Mask for CQ indices. */ 236 /* CQ related fields. */ 237 uint16_t elts_n:4; /* elts[] length (in log2). */ 238 uint16_t cqe_n:4; /* Number of CQ elements (in log2). */ 239 uint16_t wqe_n:4; /* Number of WQ elements (in log2). */ 240 uint16_t tso_en:1; /* When set hardware TSO is enabled. */ 241 uint16_t tunnel_en:1; 242 /* When set TX offload for tunneled packets are supported. */ 243 uint16_t swp_en:1; /* Whether SW parser is enabled. */ 244 uint16_t vlan_en:1; /* VLAN insertion in WQE is supported. */ 245 uint16_t db_nc:1; /* Doorbell mapped to non-cached region. */ 246 uint16_t db_heu:1; /* Doorbell heuristic write barrier. */ 247 uint16_t fast_free:1; /* mbuf fast free on Tx is enabled. */ 248 uint16_t inlen_send; /* Ordinary send data inline size. */ 249 uint16_t inlen_empw; /* eMPW max packet size to inline. */ 250 uint16_t inlen_mode; /* Minimal data length to inline. */ 251 uint32_t qp_num_8s; /* QP number shifted by 8. */ 252 uint64_t offloads; /* Offloads for Tx Queue. */ 253 struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */ 254 struct mlx5_wqe *wqes; /* Work queue. */ 255 struct mlx5_wqe *wqes_end; /* Work queue array limit. */ 256 #ifdef RTE_LIBRTE_MLX5_DEBUG 257 uint32_t *fcqs; /* Free completion queue (debug extended). */ 258 #else 259 uint16_t *fcqs; /* Free completion queue. */ 260 #endif 261 volatile struct mlx5_cqe *cqes; /* Completion queue. */ 262 volatile uint32_t *qp_db; /* Work queue doorbell. */ 263 volatile uint32_t *cq_db; /* Completion queue doorbell. */ 264 uint16_t port_id; /* Port ID of device. */ 265 uint16_t idx; /* Queue index. */ 266 uint64_t ts_mask; /* Timestamp flag dynamic mask. */ 267 int32_t ts_offset; /* Timestamp field dynamic offset. */ 268 struct mlx5_dev_ctx_shared *sh; /* Shared context. */ 269 struct mlx5_txq_stats stats; /* TX queue counters. */ 270 #ifndef RTE_ARCH_64 271 rte_spinlock_t *uar_lock; 272 /* UAR access lock required for 32bit implementations */ 273 #endif 274 struct rte_mbuf *elts[0]; 275 /* Storage for queued packets, must be the last field. */ 276 } __rte_cache_aligned; 277 278 enum mlx5_txq_type { 279 MLX5_TXQ_TYPE_STANDARD, /* Standard Tx queue. */ 280 MLX5_TXQ_TYPE_HAIRPIN, /* Hairpin Rx queue. */ 281 }; 282 283 /* TX queue control descriptor. */ 284 struct mlx5_txq_ctrl { 285 LIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */ 286 uint32_t refcnt; /* Reference counter. */ 287 unsigned int socket; /* CPU socket ID for allocations. */ 288 enum mlx5_txq_type type; /* The txq ctrl type. */ 289 unsigned int max_inline_data; /* Max inline data. */ 290 unsigned int max_tso_header; /* Max TSO header size. */ 291 struct mlx5_txq_obj *obj; /* Verbs/DevX queue object. */ 292 struct mlx5_priv *priv; /* Back pointer to private data. */ 293 off_t uar_mmap_offset; /* UAR mmap offset for non-primary process. */ 294 void *bf_reg; /* BlueFlame register from Verbs. */ 295 uint16_t dump_file_n; /* Number of dump files. */ 296 struct rte_eth_hairpin_conf hairpin_conf; /* Hairpin configuration. */ 297 uint32_t hairpin_status; /* Hairpin binding status. */ 298 struct mlx5_txq_data txq; /* Data path structure. */ 299 /* Must be the last field in the structure, contains elts[]. */ 300 }; 301 302 #define MLX5_TX_BFREG(txq) \ 303 (MLX5_PROC_PRIV((txq)->port_id)->uar_table[(txq)->idx]) 304 305 /* mlx5_rxq.c */ 306 307 extern uint8_t rss_hash_default_key[]; 308 309 unsigned int mlx5_rxq_cqe_num(struct mlx5_rxq_data *rxq_data); 310 int mlx5_mprq_free_mp(struct rte_eth_dev *dev); 311 int mlx5_mprq_alloc_mp(struct rte_eth_dev *dev); 312 int mlx5_rx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id); 313 int mlx5_rx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id); 314 int mlx5_rx_queue_start_primary(struct rte_eth_dev *dev, uint16_t queue_id); 315 int mlx5_rx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t queue_id); 316 int mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, 317 unsigned int socket, const struct rte_eth_rxconf *conf, 318 struct rte_mempool *mp); 319 int mlx5_rx_hairpin_queue_setup 320 (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, 321 const struct rte_eth_hairpin_conf *hairpin_conf); 322 void mlx5_rx_queue_release(void *dpdk_rxq); 323 int mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev); 324 void mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev); 325 int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id); 326 int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id); 327 int mlx5_rxq_obj_verify(struct rte_eth_dev *dev); 328 struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, 329 uint16_t desc, unsigned int socket, 330 const struct rte_eth_rxconf *conf, 331 const struct rte_eth_rxseg_split *rx_seg, 332 uint16_t n_seg); 333 struct mlx5_rxq_ctrl *mlx5_rxq_hairpin_new 334 (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, 335 const struct rte_eth_hairpin_conf *hairpin_conf); 336 struct mlx5_rxq_ctrl *mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx); 337 int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx); 338 int mlx5_rxq_verify(struct rte_eth_dev *dev); 339 int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl); 340 int mlx5_ind_table_obj_verify(struct rte_eth_dev *dev); 341 struct mlx5_ind_table_obj *mlx5_ind_table_obj_get(struct rte_eth_dev *dev, 342 const uint16_t *queues, 343 uint32_t queues_n); 344 int mlx5_ind_table_obj_release(struct rte_eth_dev *dev, 345 struct mlx5_ind_table_obj *ind_tbl, 346 bool standalone); 347 int mlx5_ind_table_obj_setup(struct rte_eth_dev *dev, 348 struct mlx5_ind_table_obj *ind_tbl); 349 int mlx5_ind_table_obj_modify(struct rte_eth_dev *dev, 350 struct mlx5_ind_table_obj *ind_tbl, 351 uint16_t *queues, const uint32_t queues_n, 352 bool standalone); 353 struct mlx5_cache_entry *mlx5_hrxq_create_cb(struct mlx5_cache_list *list, 354 struct mlx5_cache_entry *entry __rte_unused, void *cb_ctx); 355 int mlx5_hrxq_match_cb(struct mlx5_cache_list *list, 356 struct mlx5_cache_entry *entry, 357 void *cb_ctx); 358 void mlx5_hrxq_remove_cb(struct mlx5_cache_list *list, 359 struct mlx5_cache_entry *entry); 360 uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev, 361 struct mlx5_flow_rss_desc *rss_desc); 362 int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hxrq_idx); 363 uint32_t mlx5_hrxq_verify(struct rte_eth_dev *dev); 364 365 366 enum mlx5_rxq_type mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx); 367 const struct rte_eth_hairpin_conf *mlx5_rxq_get_hairpin_conf 368 (struct rte_eth_dev *dev, uint16_t idx); 369 struct mlx5_hrxq *mlx5_drop_action_create(struct rte_eth_dev *dev); 370 void mlx5_drop_action_destroy(struct rte_eth_dev *dev); 371 uint64_t mlx5_get_rx_port_offloads(void); 372 uint64_t mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev); 373 void mlx5_rxq_timestamp_set(struct rte_eth_dev *dev); 374 int mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hxrq_idx, 375 const uint8_t *rss_key, uint32_t rss_key_len, 376 uint64_t hash_fields, 377 const uint16_t *queues, uint32_t queues_n); 378 379 /* mlx5_txq.c */ 380 381 int mlx5_tx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id); 382 int mlx5_tx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id); 383 int mlx5_tx_queue_start_primary(struct rte_eth_dev *dev, uint16_t queue_id); 384 int mlx5_tx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t queue_id); 385 int mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, 386 unsigned int socket, const struct rte_eth_txconf *conf); 387 int mlx5_tx_hairpin_queue_setup 388 (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, 389 const struct rte_eth_hairpin_conf *hairpin_conf); 390 void mlx5_tx_queue_release(void *dpdk_txq); 391 void txq_uar_init(struct mlx5_txq_ctrl *txq_ctrl); 392 int mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd); 393 void mlx5_tx_uar_uninit_secondary(struct rte_eth_dev *dev); 394 int mlx5_txq_obj_verify(struct rte_eth_dev *dev); 395 struct mlx5_txq_ctrl *mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, 396 uint16_t desc, unsigned int socket, 397 const struct rte_eth_txconf *conf); 398 struct mlx5_txq_ctrl *mlx5_txq_hairpin_new 399 (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, 400 const struct rte_eth_hairpin_conf *hairpin_conf); 401 struct mlx5_txq_ctrl *mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx); 402 int mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx); 403 int mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx); 404 int mlx5_txq_verify(struct rte_eth_dev *dev); 405 void txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl); 406 void txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl); 407 uint64_t mlx5_get_tx_port_offloads(struct rte_eth_dev *dev); 408 void mlx5_txq_dynf_timestamp_set(struct rte_eth_dev *dev); 409 410 /* mlx5_rxtx.c */ 411 412 extern uint32_t mlx5_ptype_table[]; 413 extern uint8_t mlx5_cksum_table[]; 414 extern uint8_t mlx5_swp_types_table[]; 415 416 void mlx5_set_ptype_table(void); 417 void mlx5_set_cksum_table(void); 418 void mlx5_set_swp_types_table(void); 419 uint16_t mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n); 420 void mlx5_rxq_initialize(struct mlx5_rxq_data *rxq); 421 __rte_noinline int mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec); 422 void mlx5_mprq_buf_free_cb(void *addr, void *opaque); 423 void mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf); 424 uint16_t mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, 425 uint16_t pkts_n); 426 uint16_t removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, 427 uint16_t pkts_n); 428 uint16_t removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, 429 uint16_t pkts_n); 430 int mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset); 431 int mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset); 432 uint32_t mlx5_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id); 433 void mlx5_dump_debug_information(const char *path, const char *title, 434 const void *buf, unsigned int len); 435 int mlx5_queue_state_modify_primary(struct rte_eth_dev *dev, 436 const struct mlx5_mp_arg_queue_state_modify *sm); 437 void mlx5_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 438 struct rte_eth_rxq_info *qinfo); 439 void mlx5_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 440 struct rte_eth_txq_info *qinfo); 441 int mlx5_rx_burst_mode_get(struct rte_eth_dev *dev, uint16_t rx_queue_id, 442 struct rte_eth_burst_mode *mode); 443 int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t tx_queue_id, 444 struct rte_eth_burst_mode *mode); 445 446 /* Vectorized version of mlx5_rxtx.c */ 447 int mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq_data); 448 int mlx5_check_vec_rx_support(struct rte_eth_dev *dev); 449 uint16_t mlx5_rx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, 450 uint16_t pkts_n); 451 uint16_t mlx5_rx_burst_mprq_vec(void *dpdk_txq, struct rte_mbuf **pkts, 452 uint16_t pkts_n); 453 454 /* mlx5_mr.c */ 455 456 void mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl); 457 uint32_t mlx5_rx_addr2mr_bh(struct mlx5_rxq_data *rxq, uintptr_t addr); 458 uint32_t mlx5_tx_mb2mr_bh(struct mlx5_txq_data *txq, struct rte_mbuf *mb); 459 uint32_t mlx5_tx_update_ext_mp(struct mlx5_txq_data *txq, uintptr_t addr, 460 struct rte_mempool *mp); 461 int mlx5_dma_map(struct rte_pci_device *pdev, void *addr, uint64_t iova, 462 size_t len); 463 int mlx5_dma_unmap(struct rte_pci_device *pdev, void *addr, uint64_t iova, 464 size_t len); 465 466 /** 467 * Provide safe 64bit store operation to mlx5 UAR region for both 32bit and 468 * 64bit architectures. 469 * 470 * @param val 471 * value to write in CPU endian format. 472 * @param addr 473 * Address to write to. 474 * @param lock 475 * Address of the lock to use for that UAR access. 476 */ 477 static __rte_always_inline void 478 __mlx5_uar_write64_relaxed(uint64_t val, void *addr, 479 rte_spinlock_t *lock __rte_unused) 480 { 481 #ifdef RTE_ARCH_64 482 *(uint64_t *)addr = val; 483 #else /* !RTE_ARCH_64 */ 484 rte_spinlock_lock(lock); 485 *(uint32_t *)addr = val; 486 rte_io_wmb(); 487 *((uint32_t *)addr + 1) = val >> 32; 488 rte_spinlock_unlock(lock); 489 #endif 490 } 491 492 /** 493 * Provide safe 64bit store operation to mlx5 UAR region for both 32bit and 494 * 64bit architectures while guaranteeing the order of execution with the 495 * code being executed. 496 * 497 * @param val 498 * value to write in CPU endian format. 499 * @param addr 500 * Address to write to. 501 * @param lock 502 * Address of the lock to use for that UAR access. 503 */ 504 static __rte_always_inline void 505 __mlx5_uar_write64(uint64_t val, void *addr, rte_spinlock_t *lock) 506 { 507 rte_io_wmb(); 508 __mlx5_uar_write64_relaxed(val, addr, lock); 509 } 510 511 /* Assist macros, used instead of directly calling the functions they wrap. */ 512 #ifdef RTE_ARCH_64 513 #define mlx5_uar_write64_relaxed(val, dst, lock) \ 514 __mlx5_uar_write64_relaxed(val, dst, NULL) 515 #define mlx5_uar_write64(val, dst, lock) __mlx5_uar_write64(val, dst, NULL) 516 #else 517 #define mlx5_uar_write64_relaxed(val, dst, lock) \ 518 __mlx5_uar_write64_relaxed(val, dst, lock) 519 #define mlx5_uar_write64(val, dst, lock) __mlx5_uar_write64(val, dst, lock) 520 #endif 521 522 /** 523 * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which the 524 * cloned mbuf is allocated is returned instead. 525 * 526 * @param buf 527 * Pointer to mbuf. 528 * 529 * @return 530 * Memory pool where data is located for given mbuf. 531 */ 532 static inline struct rte_mempool * 533 mlx5_mb2mp(struct rte_mbuf *buf) 534 { 535 if (unlikely(RTE_MBUF_CLONED(buf))) 536 return rte_mbuf_from_indirect(buf)->pool; 537 return buf->pool; 538 } 539 540 /** 541 * Query LKey from a packet buffer for Rx. No need to flush local caches for Rx 542 * as mempool is pre-configured and static. 543 * 544 * @param rxq 545 * Pointer to Rx queue structure. 546 * @param addr 547 * Address to search. 548 * 549 * @return 550 * Searched LKey on success, UINT32_MAX on no match. 551 */ 552 static __rte_always_inline uint32_t 553 mlx5_rx_addr2mr(struct mlx5_rxq_data *rxq, uintptr_t addr) 554 { 555 struct mlx5_mr_ctrl *mr_ctrl = &rxq->mr_ctrl; 556 uint32_t lkey; 557 558 /* Linear search on MR cache array. */ 559 lkey = mlx5_mr_lookup_lkey(mr_ctrl->cache, &mr_ctrl->mru, 560 MLX5_MR_CACHE_N, addr); 561 if (likely(lkey != UINT32_MAX)) 562 return lkey; 563 /* Take slower bottom-half (Binary Search) on miss. */ 564 return mlx5_rx_addr2mr_bh(rxq, addr); 565 } 566 567 #define mlx5_rx_mb2mr(rxq, mb) mlx5_rx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr)) 568 569 /** 570 * Query LKey from a packet buffer for Tx. If not found, add the mempool. 571 * 572 * @param txq 573 * Pointer to Tx queue structure. 574 * @param addr 575 * Address to search. 576 * 577 * @return 578 * Searched LKey on success, UINT32_MAX on no match. 579 */ 580 static __rte_always_inline uint32_t 581 mlx5_tx_mb2mr(struct mlx5_txq_data *txq, struct rte_mbuf *mb) 582 { 583 struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl; 584 uintptr_t addr = (uintptr_t)mb->buf_addr; 585 uint32_t lkey; 586 587 /* Check generation bit to see if there's any change on existing MRs. */ 588 if (unlikely(*mr_ctrl->dev_gen_ptr != mr_ctrl->cur_gen)) 589 mlx5_mr_flush_local_cache(mr_ctrl); 590 /* Linear search on MR cache array. */ 591 lkey = mlx5_mr_lookup_lkey(mr_ctrl->cache, &mr_ctrl->mru, 592 MLX5_MR_CACHE_N, addr); 593 if (likely(lkey != UINT32_MAX)) 594 return lkey; 595 /* Take slower bottom-half on miss. */ 596 return mlx5_tx_mb2mr_bh(txq, mb); 597 } 598 599 /** 600 * Ring TX queue doorbell and flush the update if requested. 601 * 602 * @param txq 603 * Pointer to TX queue structure. 604 * @param wqe 605 * Pointer to the last WQE posted in the NIC. 606 * @param cond 607 * Request for write memory barrier after BlueFlame update. 608 */ 609 static __rte_always_inline void 610 mlx5_tx_dbrec_cond_wmb(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe, 611 int cond) 612 { 613 uint64_t *dst = MLX5_TX_BFREG(txq); 614 volatile uint64_t *src = ((volatile uint64_t *)wqe); 615 616 rte_io_wmb(); 617 *txq->qp_db = rte_cpu_to_be_32(txq->wqe_ci); 618 /* Ensure ordering between DB record and BF copy. */ 619 rte_wmb(); 620 mlx5_uar_write64_relaxed(*src, dst, txq->uar_lock); 621 if (cond) 622 rte_wmb(); 623 } 624 625 /** 626 * Ring TX queue doorbell and flush the update by write memory barrier. 627 * 628 * @param txq 629 * Pointer to TX queue structure. 630 * @param wqe 631 * Pointer to the last WQE posted in the NIC. 632 */ 633 static __rte_always_inline void 634 mlx5_tx_dbrec(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe) 635 { 636 mlx5_tx_dbrec_cond_wmb(txq, wqe, 1); 637 } 638 639 /** 640 * Convert timestamp from HW format to linear counter 641 * from Packet Pacing Clock Queue CQE timestamp format. 642 * 643 * @param sh 644 * Pointer to the device shared context. Might be needed 645 * to convert according current device configuration. 646 * @param ts 647 * Timestamp from CQE to convert. 648 * @return 649 * UTC in nanoseconds 650 */ 651 static __rte_always_inline uint64_t 652 mlx5_txpp_convert_rx_ts(struct mlx5_dev_ctx_shared *sh, uint64_t ts) 653 { 654 RTE_SET_USED(sh); 655 return (ts & UINT32_MAX) + (ts >> 32) * NS_PER_S; 656 } 657 658 /** 659 * Convert timestamp from mbuf format to linear counter 660 * of Clock Queue completions (24 bits) 661 * 662 * @param sh 663 * Pointer to the device shared context to fetch Tx 664 * packet pacing timestamp and parameters. 665 * @param ts 666 * Timestamp from mbuf to convert. 667 * @return 668 * positive or zero value - completion ID to wait 669 * negative value - conversion error 670 */ 671 static __rte_always_inline int32_t 672 mlx5_txpp_convert_tx_ts(struct mlx5_dev_ctx_shared *sh, uint64_t mts) 673 { 674 uint64_t ts, ci; 675 uint32_t tick; 676 677 do { 678 /* 679 * Read atomically two uint64_t fields and compare lsb bits. 680 * It there is no match - the timestamp was updated in 681 * the service thread, data should be re-read. 682 */ 683 rte_compiler_barrier(); 684 ci = __atomic_load_n(&sh->txpp.ts.ci_ts, __ATOMIC_RELAXED); 685 ts = __atomic_load_n(&sh->txpp.ts.ts, __ATOMIC_RELAXED); 686 rte_compiler_barrier(); 687 if (!((ts ^ ci) << (64 - MLX5_CQ_INDEX_WIDTH))) 688 break; 689 } while (true); 690 /* Perform the skew correction, positive value to send earlier. */ 691 mts -= sh->txpp.skew; 692 mts -= ts; 693 if (unlikely(mts >= UINT64_MAX / 2)) { 694 /* We have negative integer, mts is in the past. */ 695 __atomic_fetch_add(&sh->txpp.err_ts_past, 696 1, __ATOMIC_RELAXED); 697 return -1; 698 } 699 tick = sh->txpp.tick; 700 MLX5_ASSERT(tick); 701 /* Convert delta to completions, round up. */ 702 mts = (mts + tick - 1) / tick; 703 if (unlikely(mts >= (1 << MLX5_CQ_INDEX_WIDTH) / 2 - 1)) { 704 /* We have mts is too distant future. */ 705 __atomic_fetch_add(&sh->txpp.err_ts_future, 706 1, __ATOMIC_RELAXED); 707 return -1; 708 } 709 mts <<= 64 - MLX5_CQ_INDEX_WIDTH; 710 ci += mts; 711 ci >>= 64 - MLX5_CQ_INDEX_WIDTH; 712 return ci; 713 } 714 715 /** 716 * Set timestamp in mbuf dynamic field. 717 * 718 * @param mbuf 719 * Structure to write into. 720 * @param offset 721 * Dynamic field offset in mbuf structure. 722 * @param timestamp 723 * Value to write. 724 */ 725 static __rte_always_inline void 726 mlx5_timestamp_set(struct rte_mbuf *mbuf, int offset, 727 rte_mbuf_timestamp_t timestamp) 728 { 729 *RTE_MBUF_DYNFIELD(mbuf, offset, rte_mbuf_timestamp_t *) = timestamp; 730 } 731 732 /** 733 * Replace MPRQ buffer. 734 * 735 * @param rxq 736 * Pointer to Rx queue structure. 737 * @param rq_idx 738 * RQ index to replace. 739 */ 740 static __rte_always_inline void 741 mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx) 742 { 743 const uint32_t strd_n = 1 << rxq->strd_num_n; 744 struct mlx5_mprq_buf *rep = rxq->mprq_repl; 745 volatile struct mlx5_wqe_data_seg *wqe = 746 &((volatile struct mlx5_wqe_mprq *)rxq->wqes)[rq_idx].dseg; 747 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_idx]; 748 void *addr; 749 750 if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) > 1) { 751 MLX5_ASSERT(rep != NULL); 752 /* Replace MPRQ buf. */ 753 (*rxq->mprq_bufs)[rq_idx] = rep; 754 /* Replace WQE. */ 755 addr = mlx5_mprq_buf_addr(rep, strd_n); 756 wqe->addr = rte_cpu_to_be_64((uintptr_t)addr); 757 /* If there's only one MR, no need to replace LKey in WQE. */ 758 if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1)) 759 wqe->lkey = mlx5_rx_addr2mr(rxq, (uintptr_t)addr); 760 /* Stash a mbuf for next replacement. */ 761 if (likely(!rte_mempool_get(rxq->mprq_mp, (void **)&rep))) 762 rxq->mprq_repl = rep; 763 else 764 rxq->mprq_repl = NULL; 765 /* Release the old buffer. */ 766 mlx5_mprq_buf_free(buf); 767 } else if (unlikely(rxq->mprq_repl == NULL)) { 768 struct mlx5_mprq_buf *rep; 769 770 /* 771 * Currently, the MPRQ mempool is out of buffer 772 * and doing memcpy regardless of the size of Rx 773 * packet. Retry allocation to get back to 774 * normal. 775 */ 776 if (!rte_mempool_get(rxq->mprq_mp, (void **)&rep)) 777 rxq->mprq_repl = rep; 778 } 779 } 780 781 /** 782 * Attach or copy MPRQ buffer content to a packet. 783 * 784 * @param rxq 785 * Pointer to Rx queue structure. 786 * @param pkt 787 * Pointer to a packet to fill. 788 * @param len 789 * Packet length. 790 * @param buf 791 * Pointer to a MPRQ buffer to take the data from. 792 * @param strd_idx 793 * Stride index to start from. 794 * @param strd_cnt 795 * Number of strides to consume. 796 */ 797 static __rte_always_inline enum mlx5_rqx_code 798 mprq_buf_to_pkt(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt, uint32_t len, 799 struct mlx5_mprq_buf *buf, uint16_t strd_idx, uint16_t strd_cnt) 800 { 801 const uint32_t strd_n = 1 << rxq->strd_num_n; 802 const uint16_t strd_sz = 1 << rxq->strd_sz_n; 803 const uint16_t strd_shift = 804 MLX5_MPRQ_STRIDE_SHIFT_BYTE * rxq->strd_shift_en; 805 const int32_t hdrm_overlap = 806 len + RTE_PKTMBUF_HEADROOM - strd_cnt * strd_sz; 807 const uint32_t offset = strd_idx * strd_sz + strd_shift; 808 void *addr = RTE_PTR_ADD(mlx5_mprq_buf_addr(buf, strd_n), offset); 809 810 /* 811 * Memcpy packets to the target mbuf if: 812 * - The size of packet is smaller than mprq_max_memcpy_len. 813 * - Out of buffer in the Mempool for Multi-Packet RQ. 814 * - The packet's stride overlaps a headroom and scatter is off. 815 */ 816 if (len <= rxq->mprq_max_memcpy_len || 817 rxq->mprq_repl == NULL || 818 (hdrm_overlap > 0 && !rxq->strd_scatter_en)) { 819 if (likely(len <= 820 (uint32_t)(pkt->buf_len - RTE_PKTMBUF_HEADROOM))) { 821 rte_memcpy(rte_pktmbuf_mtod(pkt, void *), 822 addr, len); 823 DATA_LEN(pkt) = len; 824 } else if (rxq->strd_scatter_en) { 825 struct rte_mbuf *prev = pkt; 826 uint32_t seg_len = RTE_MIN(len, (uint32_t) 827 (pkt->buf_len - RTE_PKTMBUF_HEADROOM)); 828 uint32_t rem_len = len - seg_len; 829 830 rte_memcpy(rte_pktmbuf_mtod(pkt, void *), 831 addr, seg_len); 832 DATA_LEN(pkt) = seg_len; 833 while (rem_len) { 834 struct rte_mbuf *next = 835 rte_pktmbuf_alloc(rxq->mp); 836 837 if (unlikely(next == NULL)) 838 return MLX5_RXQ_CODE_NOMBUF; 839 NEXT(prev) = next; 840 SET_DATA_OFF(next, 0); 841 addr = RTE_PTR_ADD(addr, seg_len); 842 seg_len = RTE_MIN(rem_len, (uint32_t) 843 (next->buf_len - RTE_PKTMBUF_HEADROOM)); 844 rte_memcpy 845 (rte_pktmbuf_mtod(next, void *), 846 addr, seg_len); 847 DATA_LEN(next) = seg_len; 848 rem_len -= seg_len; 849 prev = next; 850 ++NB_SEGS(pkt); 851 } 852 } else { 853 return MLX5_RXQ_CODE_DROPPED; 854 } 855 } else { 856 rte_iova_t buf_iova; 857 struct rte_mbuf_ext_shared_info *shinfo; 858 uint16_t buf_len = strd_cnt * strd_sz; 859 void *buf_addr; 860 861 /* Increment the refcnt of the whole chunk. */ 862 __atomic_add_fetch(&buf->refcnt, 1, __ATOMIC_RELAXED); 863 MLX5_ASSERT(__atomic_load_n(&buf->refcnt, 864 __ATOMIC_RELAXED) <= strd_n + 1); 865 buf_addr = RTE_PTR_SUB(addr, RTE_PKTMBUF_HEADROOM); 866 /* 867 * MLX5 device doesn't use iova but it is necessary in a 868 * case where the Rx packet is transmitted via a 869 * different PMD. 870 */ 871 buf_iova = rte_mempool_virt2iova(buf) + 872 RTE_PTR_DIFF(buf_addr, buf); 873 shinfo = &buf->shinfos[strd_idx]; 874 rte_mbuf_ext_refcnt_set(shinfo, 1); 875 /* 876 * EXT_ATTACHED_MBUF will be set to pkt->ol_flags when 877 * attaching the stride to mbuf and more offload flags 878 * will be added below by calling rxq_cq_to_mbuf(). 879 * Other fields will be overwritten. 880 */ 881 rte_pktmbuf_attach_extbuf(pkt, buf_addr, buf_iova, 882 buf_len, shinfo); 883 /* Set mbuf head-room. */ 884 SET_DATA_OFF(pkt, RTE_PKTMBUF_HEADROOM); 885 MLX5_ASSERT(pkt->ol_flags == EXT_ATTACHED_MBUF); 886 MLX5_ASSERT(rte_pktmbuf_tailroom(pkt) >= 887 len - (hdrm_overlap > 0 ? hdrm_overlap : 0)); 888 DATA_LEN(pkt) = len; 889 /* 890 * Copy the last fragment of a packet (up to headroom 891 * size bytes) in case there is a stride overlap with 892 * a next packet's headroom. Allocate a separate mbuf 893 * to store this fragment and link it. Scatter is on. 894 */ 895 if (hdrm_overlap > 0) { 896 MLX5_ASSERT(rxq->strd_scatter_en); 897 struct rte_mbuf *seg = 898 rte_pktmbuf_alloc(rxq->mp); 899 900 if (unlikely(seg == NULL)) 901 return MLX5_RXQ_CODE_NOMBUF; 902 SET_DATA_OFF(seg, 0); 903 rte_memcpy(rte_pktmbuf_mtod(seg, void *), 904 RTE_PTR_ADD(addr, len - hdrm_overlap), 905 hdrm_overlap); 906 DATA_LEN(seg) = hdrm_overlap; 907 DATA_LEN(pkt) = len - hdrm_overlap; 908 NEXT(pkt) = seg; 909 NB_SEGS(pkt) = 2; 910 } 911 } 912 return MLX5_RXQ_CODE_EXIT; 913 } 914 915 /** 916 * Check whether Multi-Packet RQ can be enabled for the device. 917 * 918 * @param dev 919 * Pointer to Ethernet device. 920 * 921 * @return 922 * 1 if supported, negative errno value if not. 923 */ 924 static __rte_always_inline int 925 mlx5_check_mprq_support(struct rte_eth_dev *dev) 926 { 927 struct mlx5_priv *priv = dev->data->dev_private; 928 929 if (priv->config.mprq.enabled && 930 priv->rxqs_n >= priv->config.mprq.min_rxqs_num) 931 return 1; 932 return -ENOTSUP; 933 } 934 935 /** 936 * Check whether Multi-Packet RQ is enabled for the Rx queue. 937 * 938 * @param rxq 939 * Pointer to receive queue structure. 940 * 941 * @return 942 * 0 if disabled, otherwise enabled. 943 */ 944 static __rte_always_inline int 945 mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq) 946 { 947 return rxq->strd_num_n > 0; 948 } 949 950 /** 951 * Check whether Multi-Packet RQ is enabled for the device. 952 * 953 * @param dev 954 * Pointer to Ethernet device. 955 * 956 * @return 957 * 0 if disabled, otherwise enabled. 958 */ 959 static __rte_always_inline int 960 mlx5_mprq_enabled(struct rte_eth_dev *dev) 961 { 962 struct mlx5_priv *priv = dev->data->dev_private; 963 uint32_t i; 964 uint16_t n = 0; 965 uint16_t n_ibv = 0; 966 967 if (mlx5_check_mprq_support(dev) < 0) 968 return 0; 969 /* All the configured queues should be enabled. */ 970 for (i = 0; i < priv->rxqs_n; ++i) { 971 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i]; 972 struct mlx5_rxq_ctrl *rxq_ctrl = container_of 973 (rxq, struct mlx5_rxq_ctrl, rxq); 974 975 if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD) 976 continue; 977 n_ibv++; 978 if (mlx5_rxq_mprq_enabled(rxq)) 979 ++n; 980 } 981 /* Multi-Packet RQ can't be partially configured. */ 982 MLX5_ASSERT(n == 0 || n == n_ibv); 983 return n == n_ibv; 984 } 985 #endif /* RTE_PMD_MLX5_RXTX_H_ */ 986