xref: /dpdk/drivers/net/sfc/sfc_dp_rx.h (revision ba6a168a06581b5b3d523f984722a3e5f65bbb82)
144c0947bSAndrew Rybchenko /* SPDX-License-Identifier: BSD-3-Clause
2df1bfde4SAndrew Rybchenko  *
398d26ef7SAndrew Rybchenko  * Copyright(c) 2019-2021 Xilinx, Inc.
4a0147be5SAndrew Rybchenko  * Copyright(c) 2017-2019 Solarflare Communications Inc.
5df1bfde4SAndrew Rybchenko  *
6df1bfde4SAndrew Rybchenko  * This software was jointly developed between OKTET Labs (under contract
7df1bfde4SAndrew Rybchenko  * for Solarflare) and Solarflare Communications, Inc.
8df1bfde4SAndrew Rybchenko  */
9df1bfde4SAndrew Rybchenko 
10df1bfde4SAndrew Rybchenko #ifndef _SFC_DP_RX_H
11df1bfde4SAndrew Rybchenko #define _SFC_DP_RX_H
12df1bfde4SAndrew Rybchenko 
13df1bfde4SAndrew Rybchenko #include <rte_mempool.h>
14df96fd0dSBruce Richardson #include <ethdev_driver.h>
15df1bfde4SAndrew Rybchenko 
16df1bfde4SAndrew Rybchenko #include "sfc_dp.h"
173037e6cfSViacheslav Galaktionov #include "sfc_nic_dma_dp.h"
18df1bfde4SAndrew Rybchenko 
19df1bfde4SAndrew Rybchenko #ifdef __cplusplus
20df1bfde4SAndrew Rybchenko extern "C" {
21df1bfde4SAndrew Rybchenko #endif
22df1bfde4SAndrew Rybchenko 
23df1bfde4SAndrew Rybchenko /**
24df1bfde4SAndrew Rybchenko  * Generic receive queue information used on data path.
25df1bfde4SAndrew Rybchenko  * It must be kept as small as it is possible since it is built into
26df1bfde4SAndrew Rybchenko  * the structure used on datapath.
27df1bfde4SAndrew Rybchenko  */
28df1bfde4SAndrew Rybchenko struct sfc_dp_rxq {
29df1bfde4SAndrew Rybchenko 	struct sfc_dp_queue	dpq;
30df1bfde4SAndrew Rybchenko };
31df1bfde4SAndrew Rybchenko 
32048a0d1aSIgor Romanov /** Datapath receive queue descriptor number limitations */
33048a0d1aSIgor Romanov struct sfc_dp_rx_hw_limits {
34048a0d1aSIgor Romanov 	unsigned int rxq_max_entries;
35048a0d1aSIgor Romanov 	unsigned int rxq_min_entries;
36d5371f3dSIgor Romanov 	unsigned int evq_max_entries;
37d5371f3dSIgor Romanov 	unsigned int evq_min_entries;
38048a0d1aSIgor Romanov };
39048a0d1aSIgor Romanov 
40df1bfde4SAndrew Rybchenko /**
41df1bfde4SAndrew Rybchenko  * Datapath receive queue creation information.
42df1bfde4SAndrew Rybchenko  *
43df1bfde4SAndrew Rybchenko  * The structure is used just to pass information from control path to
44df1bfde4SAndrew Rybchenko  * datapath. It could be just function arguments, but it would be hardly
45df1bfde4SAndrew Rybchenko  * readable.
46df1bfde4SAndrew Rybchenko  */
47df1bfde4SAndrew Rybchenko struct sfc_dp_rx_qcreate_info {
48df1bfde4SAndrew Rybchenko 	/** Memory pool to allocate Rx buffer from */
49df1bfde4SAndrew Rybchenko 	struct rte_mempool	*refill_mb_pool;
50e5595ee2SAndrew Rybchenko 	/** Maximum number of pushed Rx descriptors in the queue */
51e5595ee2SAndrew Rybchenko 	unsigned int		max_fill_level;
52df1bfde4SAndrew Rybchenko 	/** Minimum number of unused Rx descriptors to do refill */
53df1bfde4SAndrew Rybchenko 	unsigned int		refill_threshold;
54df1bfde4SAndrew Rybchenko 	/**
55df1bfde4SAndrew Rybchenko 	 * Usable mbuf data space in accordance with alignment and
56df1bfde4SAndrew Rybchenko 	 * padding requirements imposed by HW.
57df1bfde4SAndrew Rybchenko 	 */
58df1bfde4SAndrew Rybchenko 	unsigned int		buf_size;
59df1bfde4SAndrew Rybchenko 
60df1bfde4SAndrew Rybchenko 	/**
61df1bfde4SAndrew Rybchenko 	 * Maximum number of Rx descriptors completed in one Rx event.
62df1bfde4SAndrew Rybchenko 	 * Just for sanity checks if datapath would like to do.
63df1bfde4SAndrew Rybchenko 	 */
64df1bfde4SAndrew Rybchenko 	unsigned int		batch_max;
65df1bfde4SAndrew Rybchenko 
66df1bfde4SAndrew Rybchenko 	/** Pseudo-header size */
67df1bfde4SAndrew Rybchenko 	unsigned int		prefix_size;
68df1bfde4SAndrew Rybchenko 
69df1bfde4SAndrew Rybchenko 	/** Receive queue flags initializer */
70df1bfde4SAndrew Rybchenko 	unsigned int		flags;
71df1bfde4SAndrew Rybchenko #define SFC_RXQ_FLAG_RSS_HASH	0x1
72462c4f08SIvan Malov #define SFC_RXQ_FLAG_INGRESS_MPORT	0x2
7362082124SArtemii Morozov #define SFC_RXQ_FLAG_VLAN_STRIPPED_TCI	0x4
74df1bfde4SAndrew Rybchenko 
75df1bfde4SAndrew Rybchenko 	/** Rx queue size */
76df1bfde4SAndrew Rybchenko 	unsigned int		rxq_entries;
77638bddc9SAndrew Rybchenko 	/** DMA-mapped Rx descriptors ring */
78638bddc9SAndrew Rybchenko 	void			*rxq_hw_ring;
79638bddc9SAndrew Rybchenko 
804279b54eSGeorgiy Levashov 	/** Event queue index in hardware */
814279b54eSGeorgiy Levashov 	unsigned int		evq_hw_index;
82638bddc9SAndrew Rybchenko 	/** Associated event queue size */
83638bddc9SAndrew Rybchenko 	unsigned int		evq_entries;
84638bddc9SAndrew Rybchenko 	/** Hardware event ring */
85638bddc9SAndrew Rybchenko 	void			*evq_hw_ring;
86638bddc9SAndrew Rybchenko 
87638bddc9SAndrew Rybchenko 	/** The queue index in hardware (required to push right doorbell) */
88638bddc9SAndrew Rybchenko 	unsigned int		hw_index;
89638bddc9SAndrew Rybchenko 	/**
90638bddc9SAndrew Rybchenko 	 * Virtual address of the memory-mapped BAR to push Rx refill
91638bddc9SAndrew Rybchenko 	 * doorbell
92638bddc9SAndrew Rybchenko 	 */
93638bddc9SAndrew Rybchenko 	volatile void		*mem_bar;
94e285f30dSIgor Romanov 	/** Function control window offset */
95e285f30dSIgor Romanov 	efsys_dma_addr_t	fcw_offset;
96714bff55SAndrew Rybchenko 	/** VI window size shift */
97714bff55SAndrew Rybchenko 	unsigned int		vi_window_shift;
9853a80512SIvan Malov 
9953a80512SIvan Malov 	/** Mask to extract user bits from Rx prefix mark field */
10053a80512SIvan Malov 	uint32_t		user_mark_mask;
1013037e6cfSViacheslav Galaktionov 
1023037e6cfSViacheslav Galaktionov 	/** NIC's DMA mapping information */
1033037e6cfSViacheslav Galaktionov 	const struct sfc_nic_dma_info	*nic_dma_info;
104df1bfde4SAndrew Rybchenko };
105df1bfde4SAndrew Rybchenko 
106df1bfde4SAndrew Rybchenko /**
1073c335b7fSAndrew Rybchenko  * Get Rx datapath specific device info.
1083c335b7fSAndrew Rybchenko  *
1093c335b7fSAndrew Rybchenko  * @param dev_info		Device info to be adjusted
1103c335b7fSAndrew Rybchenko  */
1113c335b7fSAndrew Rybchenko typedef void (sfc_dp_rx_get_dev_info_t)(struct rte_eth_dev_info *dev_info);
1123c335b7fSAndrew Rybchenko 
1133c335b7fSAndrew Rybchenko /**
11408d23c67SAndrew Rybchenko  * Test if an Rx datapath supports specific mempool ops.
11508d23c67SAndrew Rybchenko  *
11608d23c67SAndrew Rybchenko  * @param pool			The name of the pool operations to test.
11708d23c67SAndrew Rybchenko  *
11808d23c67SAndrew Rybchenko  * @return Check status.
11908d23c67SAndrew Rybchenko  * @retval	0		Best mempool ops choice.
12008d23c67SAndrew Rybchenko  * @retval	1		Mempool ops are supported.
12108d23c67SAndrew Rybchenko  * @retval	-ENOTSUP	Mempool ops not supported.
12208d23c67SAndrew Rybchenko  */
12308d23c67SAndrew Rybchenko typedef int (sfc_dp_rx_pool_ops_supported_t)(const char *pool);
12408d23c67SAndrew Rybchenko 
12508d23c67SAndrew Rybchenko /**
126f7da270aSAndrew Rybchenko  * Get size of receive and event queue rings by the number of Rx
127d101da1bSAndrew Rybchenko  * descriptors and mempool configuration.
128f7da270aSAndrew Rybchenko  *
129f7da270aSAndrew Rybchenko  * @param nb_rx_desc		Number of Rx descriptors
130d101da1bSAndrew Rybchenko  * @param mb_pool		mbuf pool with Rx buffers
131f7da270aSAndrew Rybchenko  * @param rxq_entries		Location for number of Rx ring entries
132f7da270aSAndrew Rybchenko  * @param evq_entries		Location for number of event ring entries
133f7da270aSAndrew Rybchenko  * @param rxq_max_fill_level	Location for maximum Rx ring fill level
134f7da270aSAndrew Rybchenko  *
135f7da270aSAndrew Rybchenko  * @return 0 or positive errno.
136f7da270aSAndrew Rybchenko  */
137f7da270aSAndrew Rybchenko typedef int (sfc_dp_rx_qsize_up_rings_t)(uint16_t nb_rx_desc,
138048a0d1aSIgor Romanov 					 struct sfc_dp_rx_hw_limits *limits,
139d101da1bSAndrew Rybchenko 					 struct rte_mempool *mb_pool,
140f7da270aSAndrew Rybchenko 					 unsigned int *rxq_entries,
141f7da270aSAndrew Rybchenko 					 unsigned int *evq_entries,
142f7da270aSAndrew Rybchenko 					 unsigned int *rxq_max_fill_level);
143f7da270aSAndrew Rybchenko 
144f7da270aSAndrew Rybchenko /**
145df1bfde4SAndrew Rybchenko  * Allocate and initialize datapath receive queue.
146df1bfde4SAndrew Rybchenko  *
147df1bfde4SAndrew Rybchenko  * @param port_id	The port identifier
148df1bfde4SAndrew Rybchenko  * @param queue_id	The queue identifier
149df1bfde4SAndrew Rybchenko  * @param pci_addr	PCI function address
150df1bfde4SAndrew Rybchenko  * @param socket_id	Socket identifier to allocate memory
151df1bfde4SAndrew Rybchenko  * @param info		Receive queue information
152df1bfde4SAndrew Rybchenko  * @param dp_rxqp	Location for generic datapath receive queue pointer
153df1bfde4SAndrew Rybchenko  *
154df1bfde4SAndrew Rybchenko  * @return 0 or positive errno.
155df1bfde4SAndrew Rybchenko  */
156df1bfde4SAndrew Rybchenko typedef int (sfc_dp_rx_qcreate_t)(uint16_t port_id, uint16_t queue_id,
157df1bfde4SAndrew Rybchenko 				  const struct rte_pci_addr *pci_addr,
158df1bfde4SAndrew Rybchenko 				  int socket_id,
159df1bfde4SAndrew Rybchenko 				  const struct sfc_dp_rx_qcreate_info *info,
160df1bfde4SAndrew Rybchenko 				  struct sfc_dp_rxq **dp_rxqp);
161df1bfde4SAndrew Rybchenko 
162df1bfde4SAndrew Rybchenko /**
1637be78d02SJosh Soref  * Free resources allocated for datapath receive queue.
164df1bfde4SAndrew Rybchenko  */
165df1bfde4SAndrew Rybchenko typedef void (sfc_dp_rx_qdestroy_t)(struct sfc_dp_rxq *dp_rxq);
166df1bfde4SAndrew Rybchenko 
167df1bfde4SAndrew Rybchenko /**
168df1bfde4SAndrew Rybchenko  * Receive queue start callback.
169df1bfde4SAndrew Rybchenko  *
170df1bfde4SAndrew Rybchenko  * It handovers EvQ to the datapath.
171df1bfde4SAndrew Rybchenko  */
172df1bfde4SAndrew Rybchenko typedef int (sfc_dp_rx_qstart_t)(struct sfc_dp_rxq *dp_rxq,
173c6845644SAndrew Rybchenko 				 unsigned int evq_read_ptr,
174c6845644SAndrew Rybchenko 				 const efx_rx_prefix_layout_t *pinfo);
175df1bfde4SAndrew Rybchenko 
176df1bfde4SAndrew Rybchenko /**
177df1bfde4SAndrew Rybchenko  * Receive queue stop function called before flush.
178df1bfde4SAndrew Rybchenko  */
179df1bfde4SAndrew Rybchenko typedef void (sfc_dp_rx_qstop_t)(struct sfc_dp_rxq *dp_rxq,
180df1bfde4SAndrew Rybchenko 				 unsigned int *evq_read_ptr);
181df1bfde4SAndrew Rybchenko 
182df1bfde4SAndrew Rybchenko /**
183638bddc9SAndrew Rybchenko  * Receive event handler used during queue flush only.
184638bddc9SAndrew Rybchenko  */
185638bddc9SAndrew Rybchenko typedef bool (sfc_dp_rx_qrx_ev_t)(struct sfc_dp_rxq *dp_rxq, unsigned int id);
186638bddc9SAndrew Rybchenko 
187638bddc9SAndrew Rybchenko /**
188390f9b8dSAndrew Rybchenko  * Packed stream receive event handler used during queue flush only.
189390f9b8dSAndrew Rybchenko  */
190390f9b8dSAndrew Rybchenko typedef bool (sfc_dp_rx_qrx_ps_ev_t)(struct sfc_dp_rxq *dp_rxq,
191390f9b8dSAndrew Rybchenko 				     unsigned int id);
192390f9b8dSAndrew Rybchenko 
193390f9b8dSAndrew Rybchenko /**
194df1bfde4SAndrew Rybchenko  * Receive queue purge function called after queue flush.
195df1bfde4SAndrew Rybchenko  *
1967be78d02SJosh Soref  * Should be used to free unused receive buffers.
197df1bfde4SAndrew Rybchenko  */
198df1bfde4SAndrew Rybchenko typedef void (sfc_dp_rx_qpurge_t)(struct sfc_dp_rxq *dp_rxq);
199df1bfde4SAndrew Rybchenko 
200df1bfde4SAndrew Rybchenko /** Get packet types recognized/classified */
201*ba6a168aSSivaramakrishnan Venkat typedef const uint32_t * (sfc_dp_rx_supported_ptypes_get_t)(uint32_t tunnel_encaps,
202*ba6a168aSSivaramakrishnan Venkat 							    size_t *no_of_elements);
203df1bfde4SAndrew Rybchenko 
204df1bfde4SAndrew Rybchenko /** Get number of pending Rx descriptors */
205df1bfde4SAndrew Rybchenko typedef unsigned int (sfc_dp_rx_qdesc_npending_t)(struct sfc_dp_rxq *dp_rxq);
206df1bfde4SAndrew Rybchenko 
2071d8f3a80SIvan Malov /** Check Rx descriptor status */
2081d8f3a80SIvan Malov typedef int (sfc_dp_rx_qdesc_status_t)(struct sfc_dp_rxq *dp_rxq,
2091d8f3a80SIvan Malov 				       uint16_t offset);
2104279b54eSGeorgiy Levashov /** Enable Rx interrupts */
2114279b54eSGeorgiy Levashov typedef int (sfc_dp_rx_intr_enable_t)(struct sfc_dp_rxq *dp_rxq);
2124279b54eSGeorgiy Levashov 
2134279b54eSGeorgiy Levashov /** Disable Rx interrupts */
2144279b54eSGeorgiy Levashov typedef int (sfc_dp_rx_intr_disable_t)(struct sfc_dp_rxq *dp_rxq);
2151d8f3a80SIvan Malov 
216a9a238e9SIgor Romanov /** Get number of pushed Rx buffers */
217a9a238e9SIgor Romanov typedef unsigned int (sfc_dp_rx_get_pushed_t)(struct sfc_dp_rxq *dp_rxq);
218a9a238e9SIgor Romanov 
219df1bfde4SAndrew Rybchenko /** Receive datapath definition */
220df1bfde4SAndrew Rybchenko struct sfc_dp_rx {
221df1bfde4SAndrew Rybchenko 	struct sfc_dp				dp;
222df1bfde4SAndrew Rybchenko 
22399a4949fSAndrew Rybchenko 	unsigned int				features;
224f08d113dSAndrew Rybchenko #define SFC_DP_RX_FEAT_MULTI_PROCESS		0x1
225f08d113dSAndrew Rybchenko #define SFC_DP_RX_FEAT_FLOW_FLAG		0x2
226f08d113dSAndrew Rybchenko #define SFC_DP_RX_FEAT_FLOW_MARK		0x4
2274279b54eSGeorgiy Levashov #define SFC_DP_RX_FEAT_INTR			0x8
228395ffcb4SIvan Ilchenko #define SFC_DP_RX_FEAT_STATS			0x10
229f08d113dSAndrew Rybchenko 	/**
230f08d113dSAndrew Rybchenko 	 * Rx offload capabilities supported by the datapath on device
231f08d113dSAndrew Rybchenko 	 * level only if HW/FW supports it.
232f08d113dSAndrew Rybchenko 	 */
233f08d113dSAndrew Rybchenko 	uint64_t				dev_offload_capa;
234f08d113dSAndrew Rybchenko 	/**
235f08d113dSAndrew Rybchenko 	 * Rx offload capabilities supported by the datapath per-queue
236f08d113dSAndrew Rybchenko 	 * if HW/FW supports it.
237f08d113dSAndrew Rybchenko 	 */
238f08d113dSAndrew Rybchenko 	uint64_t				queue_offload_capa;
2393c335b7fSAndrew Rybchenko 	sfc_dp_rx_get_dev_info_t		*get_dev_info;
24008d23c67SAndrew Rybchenko 	sfc_dp_rx_pool_ops_supported_t		*pool_ops_supported;
241f7da270aSAndrew Rybchenko 	sfc_dp_rx_qsize_up_rings_t		*qsize_up_rings;
242df1bfde4SAndrew Rybchenko 	sfc_dp_rx_qcreate_t			*qcreate;
243df1bfde4SAndrew Rybchenko 	sfc_dp_rx_qdestroy_t			*qdestroy;
244df1bfde4SAndrew Rybchenko 	sfc_dp_rx_qstart_t			*qstart;
245df1bfde4SAndrew Rybchenko 	sfc_dp_rx_qstop_t			*qstop;
246638bddc9SAndrew Rybchenko 	sfc_dp_rx_qrx_ev_t			*qrx_ev;
247390f9b8dSAndrew Rybchenko 	sfc_dp_rx_qrx_ps_ev_t			*qrx_ps_ev;
248df1bfde4SAndrew Rybchenko 	sfc_dp_rx_qpurge_t			*qpurge;
249df1bfde4SAndrew Rybchenko 	sfc_dp_rx_supported_ptypes_get_t	*supported_ptypes_get;
250df1bfde4SAndrew Rybchenko 	sfc_dp_rx_qdesc_npending_t		*qdesc_npending;
2511d8f3a80SIvan Malov 	sfc_dp_rx_qdesc_status_t		*qdesc_status;
2524279b54eSGeorgiy Levashov 	sfc_dp_rx_intr_enable_t			*intr_enable;
2534279b54eSGeorgiy Levashov 	sfc_dp_rx_intr_disable_t		*intr_disable;
254a9a238e9SIgor Romanov 	sfc_dp_rx_get_pushed_t			*get_pushed;
255df1bfde4SAndrew Rybchenko 	eth_rx_burst_t				pkt_burst;
256df1bfde4SAndrew Rybchenko };
257df1bfde4SAndrew Rybchenko 
258df1bfde4SAndrew Rybchenko static inline struct sfc_dp_rx *
sfc_dp_find_rx_by_name(struct sfc_dp_list * head,const char * name)259df1bfde4SAndrew Rybchenko sfc_dp_find_rx_by_name(struct sfc_dp_list *head, const char *name)
260df1bfde4SAndrew Rybchenko {
261df1bfde4SAndrew Rybchenko 	struct sfc_dp *p = sfc_dp_find_by_name(head, SFC_DP_RX, name);
262df1bfde4SAndrew Rybchenko 
263df1bfde4SAndrew Rybchenko 	return (p == NULL) ? NULL : container_of(p, struct sfc_dp_rx, dp);
264df1bfde4SAndrew Rybchenko }
265df1bfde4SAndrew Rybchenko 
266df1bfde4SAndrew Rybchenko static inline struct sfc_dp_rx *
sfc_dp_find_rx_by_caps(struct sfc_dp_list * head,unsigned int avail_caps)267df1bfde4SAndrew Rybchenko sfc_dp_find_rx_by_caps(struct sfc_dp_list *head, unsigned int avail_caps)
268df1bfde4SAndrew Rybchenko {
269df1bfde4SAndrew Rybchenko 	struct sfc_dp *p = sfc_dp_find_by_caps(head, SFC_DP_RX, avail_caps);
270df1bfde4SAndrew Rybchenko 
271df1bfde4SAndrew Rybchenko 	return (p == NULL) ? NULL : container_of(p, struct sfc_dp_rx, dp);
272df1bfde4SAndrew Rybchenko }
273df1bfde4SAndrew Rybchenko 
274f08d113dSAndrew Rybchenko static inline uint64_t
sfc_dp_rx_offload_capa(const struct sfc_dp_rx * dp_rx)275f08d113dSAndrew Rybchenko sfc_dp_rx_offload_capa(const struct sfc_dp_rx *dp_rx)
276f08d113dSAndrew Rybchenko {
277f08d113dSAndrew Rybchenko 	return dp_rx->dev_offload_capa | dp_rx->queue_offload_capa;
278f08d113dSAndrew Rybchenko }
279f08d113dSAndrew Rybchenko 
280b76e1b2cSAndrew Rybchenko /** Get Rx datapath ops by the datapath RxQ handle */
281b76e1b2cSAndrew Rybchenko const struct sfc_dp_rx *sfc_dp_rx_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq);
282b76e1b2cSAndrew Rybchenko 
283df1bfde4SAndrew Rybchenko extern struct sfc_dp_rx sfc_efx_rx;
284638bddc9SAndrew Rybchenko extern struct sfc_dp_rx sfc_ef10_rx;
285390f9b8dSAndrew Rybchenko extern struct sfc_dp_rx sfc_ef10_essb_rx;
286554644e3SAndrew Rybchenko extern struct sfc_dp_rx sfc_ef100_rx;
287df1bfde4SAndrew Rybchenko 
288df1bfde4SAndrew Rybchenko #ifdef __cplusplus
289df1bfde4SAndrew Rybchenko }
290df1bfde4SAndrew Rybchenko #endif
291df1bfde4SAndrew Rybchenko #endif /* _SFC_DP_RX_H */
292