xref: /dpdk/drivers/net/mlx4/mlx4_rxtx.h (revision a41f593f1bce27cd94eae0e85a8085c592b14b30)
182092c87SOlivier Matz /* SPDX-License-Identifier: BSD-3-Clause
23d555728SAdrien Mazarguil  * Copyright 2017 6WIND S.A.
35feecc57SShahaf Shuler  * Copyright 2017 Mellanox Technologies, Ltd
43d555728SAdrien Mazarguil  */
53d555728SAdrien Mazarguil 
63d555728SAdrien Mazarguil #ifndef MLX4_RXTX_H_
73d555728SAdrien Mazarguil #define MLX4_RXTX_H_
83d555728SAdrien Mazarguil 
93d555728SAdrien Mazarguil #include <stdint.h>
10078b8b45SAdrien Mazarguil #include <sys/queue.h>
113d555728SAdrien Mazarguil 
123d555728SAdrien Mazarguil /* Verbs headers do not support -pedantic. */
133d555728SAdrien Mazarguil #ifdef PEDANTIC
143d555728SAdrien Mazarguil #pragma GCC diagnostic ignored "-Wpedantic"
153d555728SAdrien Mazarguil #endif
16c3c977bbSMoti Haimovsky #include <infiniband/mlx4dv.h>
173d555728SAdrien Mazarguil #include <infiniband/verbs.h>
183d555728SAdrien Mazarguil #ifdef PEDANTIC
193d555728SAdrien Mazarguil #pragma GCC diagnostic error "-Wpedantic"
203d555728SAdrien Mazarguil #endif
213d555728SAdrien Mazarguil 
22df96fd0dSBruce Richardson #include <ethdev_driver.h>
233d555728SAdrien Mazarguil #include <rte_mbuf.h>
243d555728SAdrien Mazarguil #include <rte_mempool.h>
253d555728SAdrien Mazarguil 
263d555728SAdrien Mazarguil #include "mlx4.h"
27c3c977bbSMoti Haimovsky #include "mlx4_prm.h"
289797bfccSYongseok Koh #include "mlx4_mr.h"
293d555728SAdrien Mazarguil 
303d555728SAdrien Mazarguil /** Rx queue counters. */
313d555728SAdrien Mazarguil struct mlx4_rxq_stats {
323d555728SAdrien Mazarguil 	unsigned int idx; /**< Mapping index. */
333d555728SAdrien Mazarguil 	uint64_t ipackets; /**< Total of successfully received packets. */
343d555728SAdrien Mazarguil 	uint64_t ibytes; /**< Total of successfully received bytes. */
353d555728SAdrien Mazarguil 	uint64_t idropped; /**< Total of packets dropped when Rx ring full. */
363d555728SAdrien Mazarguil 	uint64_t rx_nombuf; /**< Total of Rx mbuf allocation failures. */
373d555728SAdrien Mazarguil };
383d555728SAdrien Mazarguil 
393d555728SAdrien Mazarguil /** Rx queue descriptor. */
403d555728SAdrien Mazarguil struct rxq {
41dbeba4cfSThomas Monjalon 	struct mlx4_priv *priv; /**< Back pointer to private data. */
423d555728SAdrien Mazarguil 	struct rte_mempool *mp; /**< Memory pool for allocations. */
433d555728SAdrien Mazarguil 	struct ibv_cq *cq; /**< Completion queue. */
44fc4e6664SAdrien Mazarguil 	struct ibv_wq *wq; /**< Work queue. */
453d555728SAdrien Mazarguil 	struct ibv_comp_channel *channel; /**< Rx completion channel. */
466681b845SMoti Haimovsky 	uint16_t rq_ci; /**< Saved RQ consumer index. */
476681b845SMoti Haimovsky 	uint16_t port_id; /**< Port ID for incoming packets. */
486681b845SMoti Haimovsky 	uint16_t sges_n; /**< Number of segments per packet (log2 value). */
496681b845SMoti Haimovsky 	uint16_t elts_n; /**< Mbuf queue size (log2 value). */
509797bfccSYongseok Koh 	struct mlx4_mr_ctrl mr_ctrl; /* MR control descriptor. */
516681b845SMoti Haimovsky 	struct rte_mbuf *(*elts)[]; /**< Rx elements. */
526681b845SMoti Haimovsky 	volatile struct mlx4_wqe_data_seg (*wqes)[]; /**< HW queue entries. */
536681b845SMoti Haimovsky 	volatile uint32_t *rq_db; /**< RQ doorbell record. */
549f57340aSMoti Haimovsky 	uint32_t csum:1; /**< Enable checksum offloading. */
559f57340aSMoti Haimovsky 	uint32_t csum_l2tun:1; /**< Same for L2 tunnels. */
56de1df14eSOphir Munk 	uint32_t crc_present:1; /**< CRC must be subtracted. */
5778214fb8SMoti Haimovsky 	uint32_t l2tun_offload:1; /**< L2 tunnel offload is enabled. */
586681b845SMoti Haimovsky 	struct mlx4_cq mcq;  /**< Info for directly manipulating the CQ. */
593d555728SAdrien Mazarguil 	struct mlx4_rxq_stats stats; /**< Rx queue counters. */
603d555728SAdrien Mazarguil 	unsigned int socket; /**< CPU socket ID for allocations. */
615697a414SAdrien Mazarguil 	uint32_t usecnt; /**< Number of users relying on queue resources. */
62c64c58adSAdrien Mazarguil 	uint8_t data[]; /**< Remaining queue resources. */
633d555728SAdrien Mazarguil };
643d555728SAdrien Mazarguil 
65078b8b45SAdrien Mazarguil /** Shared flow target for Rx queues. */
66078b8b45SAdrien Mazarguil struct mlx4_rss {
67078b8b45SAdrien Mazarguil 	LIST_ENTRY(mlx4_rss) next; /**< Next entry in list. */
68dbeba4cfSThomas Monjalon 	struct mlx4_priv *priv; /**< Back pointer to private data. */
69078b8b45SAdrien Mazarguil 	uint32_t refcnt; /**< Reference count for this object. */
70078b8b45SAdrien Mazarguil 	uint32_t usecnt; /**< Number of users relying on @p qp and @p ind. */
71078b8b45SAdrien Mazarguil 	struct ibv_qp *qp; /**< Queue pair. */
72078b8b45SAdrien Mazarguil 	struct ibv_rwq_ind_table *ind; /**< Indirection table. */
73078b8b45SAdrien Mazarguil 	uint64_t fields; /**< Fields for RSS processing (Verbs format). */
74078b8b45SAdrien Mazarguil 	uint8_t key[MLX4_RSS_HASH_KEY_SIZE]; /**< Hash key to use. */
75078b8b45SAdrien Mazarguil 	uint16_t queues; /**< Number of target queues. */
76078b8b45SAdrien Mazarguil 	uint16_t queue_id[]; /**< Target queues. */
77078b8b45SAdrien Mazarguil };
78078b8b45SAdrien Mazarguil 
793d555728SAdrien Mazarguil /** Tx element. */
803d555728SAdrien Mazarguil struct txq_elt {
813d555728SAdrien Mazarguil 	struct rte_mbuf *buf; /**< Buffer. */
8253387152SMatan Azrad 	union {
8378e81a98SMatan Azrad 		volatile struct mlx4_wqe_ctrl_seg *wqe; /**< SQ WQE. */
8453387152SMatan Azrad 		volatile uint32_t *eocb; /**< End of completion burst. */
8553387152SMatan Azrad 	};
863d555728SAdrien Mazarguil };
873d555728SAdrien Mazarguil 
884db261fcSRami Rosen /** Tx queue counters. */
893d555728SAdrien Mazarguil struct mlx4_txq_stats {
903d555728SAdrien Mazarguil 	unsigned int idx; /**< Mapping index. */
913d555728SAdrien Mazarguil 	uint64_t opackets; /**< Total of successfully sent packets. */
923d555728SAdrien Mazarguil 	uint64_t obytes; /**< Total of successfully sent bytes. */
93ba576975SMoti Haimovsky 	uint64_t odropped; /**< Total number of packets failed to transmit. */
943d555728SAdrien Mazarguil };
953d555728SAdrien Mazarguil 
963d555728SAdrien Mazarguil /** Tx queue descriptor. */
973d555728SAdrien Mazarguil struct txq {
98c3c977bbSMoti Haimovsky 	struct mlx4_sq msq; /**< Info for directly manipulating the SQ. */
99c3c977bbSMoti Haimovsky 	struct mlx4_cq mcq; /**< Info for directly manipulating the CQ. */
10097d37d2cSYongseok Koh 	uint16_t port_id; /**< Port ID of device. */
101c3c977bbSMoti Haimovsky 	unsigned int elts_head; /**< Current index in (*elts)[]. */
102c3c977bbSMoti Haimovsky 	unsigned int elts_tail; /**< First element awaiting completion. */
103c3c977bbSMoti Haimovsky 	int elts_comp_cd; /**< Countdown for next completion. */
104c3c977bbSMoti Haimovsky 	unsigned int elts_comp_cd_init; /**< Initial value for countdown. */
105c3c977bbSMoti Haimovsky 	unsigned int elts_n; /**< (*elts)[] length. */
1069797bfccSYongseok Koh 	struct mlx4_mr_ctrl mr_ctrl; /* MR control descriptor. */
107c3c977bbSMoti Haimovsky 	struct txq_elt (*elts)[]; /**< Tx elements. */
108c3c977bbSMoti Haimovsky 	struct mlx4_txq_stats stats; /**< Tx queue counters. */
109c3c977bbSMoti Haimovsky 	uint32_t max_inline; /**< Max inline send size. */
1105db1d364SMoti Haimovsky 	uint32_t csum:1; /**< Enable checksum offloading. */
1115db1d364SMoti Haimovsky 	uint32_t csum_l2tun:1; /**< Same for L2 tunnels. */
11243d77c22SMoti Haimovsky 	uint32_t lb:1; /**< Whether packets should be looped back by eSwitch. */
113c3c977bbSMoti Haimovsky 	uint8_t *bounce_buf;
114c3c977bbSMoti Haimovsky 	/**< Memory used for storing the first DWORD of data TXBBs. */
115dbeba4cfSThomas Monjalon 	struct mlx4_priv *priv; /**< Back pointer to private data. */
116c3c977bbSMoti Haimovsky 	unsigned int socket; /**< CPU socket ID for allocations. */
1173d555728SAdrien Mazarguil 	struct ibv_cq *cq; /**< Completion queue. */
1183d555728SAdrien Mazarguil 	struct ibv_qp *qp; /**< Queue pair. */
119c64c58adSAdrien Mazarguil 	uint8_t data[]; /**< Remaining queue resources. */
1203d555728SAdrien Mazarguil };
1213d555728SAdrien Mazarguil 
12297d37d2cSYongseok Koh #define MLX4_TX_BFREG(txq) \
12397d37d2cSYongseok Koh 		(MLX4_PROC_PRIV((txq)->port_id)->uar_table[(txq)->stats.idx])
12497d37d2cSYongseok Koh 
1255b4c63bdSAdrien Mazarguil /* mlx4_rxq.c */
1265b4c63bdSAdrien Mazarguil 
127a5e5af7fSThomas Monjalon extern uint8_t mlx4_rss_hash_key_default[MLX4_RSS_HASH_KEY_SIZE];
128dbeba4cfSThomas Monjalon int mlx4_rss_init(struct mlx4_priv *priv);
129dbeba4cfSThomas Monjalon void mlx4_rss_deinit(struct mlx4_priv *priv);
130dbeba4cfSThomas Monjalon struct mlx4_rss *mlx4_rss_get(struct mlx4_priv *priv, uint64_t fields,
131ac8d22deSAdrien Mazarguil 			      const uint8_t key[MLX4_RSS_HASH_KEY_SIZE],
132078b8b45SAdrien Mazarguil 			      uint16_t queues, const uint16_t queue_id[]);
133078b8b45SAdrien Mazarguil void mlx4_rss_put(struct mlx4_rss *rss);
134078b8b45SAdrien Mazarguil int mlx4_rss_attach(struct mlx4_rss *rss);
135078b8b45SAdrien Mazarguil void mlx4_rss_detach(struct mlx4_rss *rss);
1365697a414SAdrien Mazarguil int mlx4_rxq_attach(struct rxq *rxq);
1375697a414SAdrien Mazarguil void mlx4_rxq_detach(struct rxq *rxq);
138dbeba4cfSThomas Monjalon uint64_t mlx4_get_rx_port_offloads(struct mlx4_priv *priv);
139dbeba4cfSThomas Monjalon uint64_t mlx4_get_rx_queue_offloads(struct mlx4_priv *priv);
1405b4c63bdSAdrien Mazarguil int mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
1415b4c63bdSAdrien Mazarguil 			uint16_t desc, unsigned int socket,
1425b4c63bdSAdrien Mazarguil 			const struct rte_eth_rxconf *conf,
1435b4c63bdSAdrien Mazarguil 			struct rte_mempool *mp);
144*7483341aSXueming Li void mlx4_rx_queue_release(struct rte_eth_dev *dev, uint16_t idx);
1455b4c63bdSAdrien Mazarguil 
1467f45cb82SAdrien Mazarguil /* mlx4_rxtx.c */
1477f45cb82SAdrien Mazarguil 
1487f45cb82SAdrien Mazarguil uint16_t mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts,
1497f45cb82SAdrien Mazarguil 		       uint16_t pkts_n);
1507f45cb82SAdrien Mazarguil uint16_t mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts,
1517f45cb82SAdrien Mazarguil 		       uint16_t pkts_n);
1527f45cb82SAdrien Mazarguil 
153a2ce2121SAdrien Mazarguil /* mlx4_txq.c */
154a2ce2121SAdrien Mazarguil 
15597d37d2cSYongseok Koh int mlx4_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd);
156ed879addSSuanming Mou void mlx4_tx_uar_uninit_secondary(struct rte_eth_dev *dev);
157dbeba4cfSThomas Monjalon uint64_t mlx4_get_tx_port_offloads(struct mlx4_priv *priv);
158a2ce2121SAdrien Mazarguil int mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
159a2ce2121SAdrien Mazarguil 			uint16_t desc, unsigned int socket,
160a2ce2121SAdrien Mazarguil 			const struct rte_eth_txconf *conf);
161*7483341aSXueming Li void mlx4_tx_queue_release(struct rte_eth_dev *dev, uint16_t idx);
162a2ce2121SAdrien Mazarguil 
1639797bfccSYongseok Koh /* mlx4_mr.c */
1649797bfccSYongseok Koh 
1659797bfccSYongseok Koh void mlx4_mr_flush_local_cache(struct mlx4_mr_ctrl *mr_ctrl);
1669797bfccSYongseok Koh uint32_t mlx4_rx_addr2mr_bh(struct rxq *rxq, uintptr_t addr);
16719487763SYongseok Koh uint32_t mlx4_tx_mb2mr_bh(struct txq *txq, struct rte_mbuf *mb);
16831912d99SYongseok Koh uint32_t mlx4_tx_update_ext_mp(struct txq *txq, uintptr_t addr,
16931912d99SYongseok Koh 			       struct rte_mempool *mp);
17031912d99SYongseok Koh 
17131912d99SYongseok Koh /**
17231912d99SYongseok Koh  * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which the
17331912d99SYongseok Koh  * cloned mbuf is allocated is returned instead.
17431912d99SYongseok Koh  *
17531912d99SYongseok Koh  * @param buf
17631912d99SYongseok Koh  *   Pointer to mbuf.
17731912d99SYongseok Koh  *
17831912d99SYongseok Koh  * @return
17931912d99SYongseok Koh  *   Memory pool where data is located for given mbuf.
18031912d99SYongseok Koh  */
18119487763SYongseok Koh static inline struct rte_mempool *
mlx4_mb2mp(struct rte_mbuf * buf)18231912d99SYongseok Koh mlx4_mb2mp(struct rte_mbuf *buf)
18331912d99SYongseok Koh {
184952f4cf5SYongseok Koh 	if (unlikely(RTE_MBUF_CLONED(buf)))
18531912d99SYongseok Koh 		return rte_mbuf_from_indirect(buf)->pool;
18631912d99SYongseok Koh 	return buf->pool;
18731912d99SYongseok Koh }
1889797bfccSYongseok Koh 
1899797bfccSYongseok Koh /**
1909797bfccSYongseok Koh  * Query LKey from a packet buffer for Rx. No need to flush local caches for Rx
1919797bfccSYongseok Koh  * as mempool is pre-configured and static.
1929797bfccSYongseok Koh  *
1939797bfccSYongseok Koh  * @param rxq
1949797bfccSYongseok Koh  *   Pointer to Rx queue structure.
1959797bfccSYongseok Koh  * @param addr
1969797bfccSYongseok Koh  *   Address to search.
1979797bfccSYongseok Koh  *
1989797bfccSYongseok Koh  * @return
1999797bfccSYongseok Koh  *   Searched LKey on success, UINT32_MAX on no match.
2009797bfccSYongseok Koh  */
2019797bfccSYongseok Koh static __rte_always_inline uint32_t
mlx4_rx_addr2mr(struct rxq * rxq,uintptr_t addr)2029797bfccSYongseok Koh mlx4_rx_addr2mr(struct rxq *rxq, uintptr_t addr)
203326d2cdfSOphir Munk {
2049797bfccSYongseok Koh 	struct mlx4_mr_ctrl *mr_ctrl = &rxq->mr_ctrl;
2059797bfccSYongseok Koh 	uint32_t lkey;
2069797bfccSYongseok Koh 
2079797bfccSYongseok Koh 	/* Linear search on MR cache array. */
2089797bfccSYongseok Koh 	lkey = mlx4_mr_lookup_cache(mr_ctrl->cache, &mr_ctrl->mru,
2099797bfccSYongseok Koh 				    MLX4_MR_CACHE_N, addr);
2109797bfccSYongseok Koh 	if (likely(lkey != UINT32_MAX))
2119797bfccSYongseok Koh 		return lkey;
2129797bfccSYongseok Koh 	/* Take slower bottom-half (Binary Search) on miss. */
2139797bfccSYongseok Koh 	return mlx4_rx_addr2mr_bh(rxq, addr);
214326d2cdfSOphir Munk }
215326d2cdfSOphir Munk 
2169797bfccSYongseok Koh #define mlx4_rx_mb2mr(rxq, mb) mlx4_rx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr))
2179797bfccSYongseok Koh 
2189797bfccSYongseok Koh /**
2199797bfccSYongseok Koh  * Query LKey from a packet buffer for Tx. If not found, add the mempool.
2209797bfccSYongseok Koh  *
2219797bfccSYongseok Koh  * @param txq
2229797bfccSYongseok Koh  *   Pointer to Tx queue structure.
2239797bfccSYongseok Koh  * @param addr
2249797bfccSYongseok Koh  *   Address to search.
2259797bfccSYongseok Koh  *
2269797bfccSYongseok Koh  * @return
2279797bfccSYongseok Koh  *   Searched LKey on success, UINT32_MAX on no match.
2289797bfccSYongseok Koh  */
2299797bfccSYongseok Koh static __rte_always_inline uint32_t
mlx4_tx_mb2mr(struct txq * txq,struct rte_mbuf * mb)23019487763SYongseok Koh mlx4_tx_mb2mr(struct txq *txq, struct rte_mbuf *mb)
2319797bfccSYongseok Koh {
2329797bfccSYongseok Koh 	struct mlx4_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
23319487763SYongseok Koh 	uintptr_t addr = (uintptr_t)mb->buf_addr;
2349797bfccSYongseok Koh 	uint32_t lkey;
2359797bfccSYongseok Koh 
2369797bfccSYongseok Koh 	/* Check generation bit to see if there's any change on existing MRs. */
2379797bfccSYongseok Koh 	if (unlikely(*mr_ctrl->dev_gen_ptr != mr_ctrl->cur_gen))
2389797bfccSYongseok Koh 		mlx4_mr_flush_local_cache(mr_ctrl);
2399797bfccSYongseok Koh 	/* Linear search on MR cache array. */
2409797bfccSYongseok Koh 	lkey = mlx4_mr_lookup_cache(mr_ctrl->cache, &mr_ctrl->mru,
2419797bfccSYongseok Koh 				    MLX4_MR_CACHE_N, addr);
2429797bfccSYongseok Koh 	if (likely(lkey != UINT32_MAX))
2439797bfccSYongseok Koh 		return lkey;
24419487763SYongseok Koh 	/* Take slower bottom-half on miss. */
24519487763SYongseok Koh 	return mlx4_tx_mb2mr_bh(txq, mb);
24631912d99SYongseok Koh }
2479797bfccSYongseok Koh 
2483d555728SAdrien Mazarguil #endif /* MLX4_RXTX_H_ */
249