1 /* SPDX-License-Identifier: BSD-3-Clause
2 *
3 * Copyright(c) 2019-2021 Xilinx, Inc.
4 * Copyright(c) 2017-2019 Solarflare Communications Inc.
5 *
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
8 */
9
10 #ifndef _SFC_DP_RX_H
11 #define _SFC_DP_RX_H
12
13 #include <rte_mempool.h>
14 #include <ethdev_driver.h>
15
16 #include "sfc_dp.h"
17 #include "sfc_nic_dma_dp.h"
18
19 #ifdef __cplusplus
20 extern "C" {
21 #endif
22
23 /**
24 * Generic receive queue information used on data path.
25 * It must be kept as small as it is possible since it is built into
26 * the structure used on datapath.
27 */
28 struct sfc_dp_rxq {
29 struct sfc_dp_queue dpq;
30 };
31
32 /** Datapath receive queue descriptor number limitations */
33 struct sfc_dp_rx_hw_limits {
34 unsigned int rxq_max_entries;
35 unsigned int rxq_min_entries;
36 unsigned int evq_max_entries;
37 unsigned int evq_min_entries;
38 };
39
40 /**
41 * Datapath receive queue creation information.
42 *
43 * The structure is used just to pass information from control path to
44 * datapath. It could be just function arguments, but it would be hardly
45 * readable.
46 */
47 struct sfc_dp_rx_qcreate_info {
48 /** Memory pool to allocate Rx buffer from */
49 struct rte_mempool *refill_mb_pool;
50 /** Maximum number of pushed Rx descriptors in the queue */
51 unsigned int max_fill_level;
52 /** Minimum number of unused Rx descriptors to do refill */
53 unsigned int refill_threshold;
54 /**
55 * Usable mbuf data space in accordance with alignment and
56 * padding requirements imposed by HW.
57 */
58 unsigned int buf_size;
59
60 /**
61 * Maximum number of Rx descriptors completed in one Rx event.
62 * Just for sanity checks if datapath would like to do.
63 */
64 unsigned int batch_max;
65
66 /** Pseudo-header size */
67 unsigned int prefix_size;
68
69 /** Receive queue flags initializer */
70 unsigned int flags;
71 #define SFC_RXQ_FLAG_RSS_HASH 0x1
72 #define SFC_RXQ_FLAG_INGRESS_MPORT 0x2
73 #define SFC_RXQ_FLAG_VLAN_STRIPPED_TCI 0x4
74
75 /** Rx queue size */
76 unsigned int rxq_entries;
77 /** DMA-mapped Rx descriptors ring */
78 void *rxq_hw_ring;
79
80 /** Event queue index in hardware */
81 unsigned int evq_hw_index;
82 /** Associated event queue size */
83 unsigned int evq_entries;
84 /** Hardware event ring */
85 void *evq_hw_ring;
86
87 /** The queue index in hardware (required to push right doorbell) */
88 unsigned int hw_index;
89 /**
90 * Virtual address of the memory-mapped BAR to push Rx refill
91 * doorbell
92 */
93 volatile void *mem_bar;
94 /** Function control window offset */
95 efsys_dma_addr_t fcw_offset;
96 /** VI window size shift */
97 unsigned int vi_window_shift;
98
99 /** Mask to extract user bits from Rx prefix mark field */
100 uint32_t user_mark_mask;
101
102 /** NIC's DMA mapping information */
103 const struct sfc_nic_dma_info *nic_dma_info;
104 };
105
106 /**
107 * Get Rx datapath specific device info.
108 *
109 * @param dev_info Device info to be adjusted
110 */
111 typedef void (sfc_dp_rx_get_dev_info_t)(struct rte_eth_dev_info *dev_info);
112
113 /**
114 * Test if an Rx datapath supports specific mempool ops.
115 *
116 * @param pool The name of the pool operations to test.
117 *
118 * @return Check status.
119 * @retval 0 Best mempool ops choice.
120 * @retval 1 Mempool ops are supported.
121 * @retval -ENOTSUP Mempool ops not supported.
122 */
123 typedef int (sfc_dp_rx_pool_ops_supported_t)(const char *pool);
124
125 /**
126 * Get size of receive and event queue rings by the number of Rx
127 * descriptors and mempool configuration.
128 *
129 * @param nb_rx_desc Number of Rx descriptors
130 * @param mb_pool mbuf pool with Rx buffers
131 * @param rxq_entries Location for number of Rx ring entries
132 * @param evq_entries Location for number of event ring entries
133 * @param rxq_max_fill_level Location for maximum Rx ring fill level
134 *
135 * @return 0 or positive errno.
136 */
137 typedef int (sfc_dp_rx_qsize_up_rings_t)(uint16_t nb_rx_desc,
138 struct sfc_dp_rx_hw_limits *limits,
139 struct rte_mempool *mb_pool,
140 unsigned int *rxq_entries,
141 unsigned int *evq_entries,
142 unsigned int *rxq_max_fill_level);
143
144 /**
145 * Allocate and initialize datapath receive queue.
146 *
147 * @param port_id The port identifier
148 * @param queue_id The queue identifier
149 * @param pci_addr PCI function address
150 * @param socket_id Socket identifier to allocate memory
151 * @param info Receive queue information
152 * @param dp_rxqp Location for generic datapath receive queue pointer
153 *
154 * @return 0 or positive errno.
155 */
156 typedef int (sfc_dp_rx_qcreate_t)(uint16_t port_id, uint16_t queue_id,
157 const struct rte_pci_addr *pci_addr,
158 int socket_id,
159 const struct sfc_dp_rx_qcreate_info *info,
160 struct sfc_dp_rxq **dp_rxqp);
161
162 /**
163 * Free resources allocated for datapath receive queue.
164 */
165 typedef void (sfc_dp_rx_qdestroy_t)(struct sfc_dp_rxq *dp_rxq);
166
167 /**
168 * Receive queue start callback.
169 *
170 * It handovers EvQ to the datapath.
171 */
172 typedef int (sfc_dp_rx_qstart_t)(struct sfc_dp_rxq *dp_rxq,
173 unsigned int evq_read_ptr,
174 const efx_rx_prefix_layout_t *pinfo);
175
176 /**
177 * Receive queue stop function called before flush.
178 */
179 typedef void (sfc_dp_rx_qstop_t)(struct sfc_dp_rxq *dp_rxq,
180 unsigned int *evq_read_ptr);
181
182 /**
183 * Receive event handler used during queue flush only.
184 */
185 typedef bool (sfc_dp_rx_qrx_ev_t)(struct sfc_dp_rxq *dp_rxq, unsigned int id);
186
187 /**
188 * Packed stream receive event handler used during queue flush only.
189 */
190 typedef bool (sfc_dp_rx_qrx_ps_ev_t)(struct sfc_dp_rxq *dp_rxq,
191 unsigned int id);
192
193 /**
194 * Receive queue purge function called after queue flush.
195 *
196 * Should be used to free unused receive buffers.
197 */
198 typedef void (sfc_dp_rx_qpurge_t)(struct sfc_dp_rxq *dp_rxq);
199
200 /** Get packet types recognized/classified */
201 typedef const uint32_t * (sfc_dp_rx_supported_ptypes_get_t)(uint32_t tunnel_encaps,
202 size_t *no_of_elements);
203
204 /** Get number of pending Rx descriptors */
205 typedef unsigned int (sfc_dp_rx_qdesc_npending_t)(struct sfc_dp_rxq *dp_rxq);
206
207 /** Check Rx descriptor status */
208 typedef int (sfc_dp_rx_qdesc_status_t)(struct sfc_dp_rxq *dp_rxq,
209 uint16_t offset);
210 /** Enable Rx interrupts */
211 typedef int (sfc_dp_rx_intr_enable_t)(struct sfc_dp_rxq *dp_rxq);
212
213 /** Disable Rx interrupts */
214 typedef int (sfc_dp_rx_intr_disable_t)(struct sfc_dp_rxq *dp_rxq);
215
216 /** Get number of pushed Rx buffers */
217 typedef unsigned int (sfc_dp_rx_get_pushed_t)(struct sfc_dp_rxq *dp_rxq);
218
219 /** Receive datapath definition */
220 struct sfc_dp_rx {
221 struct sfc_dp dp;
222
223 unsigned int features;
224 #define SFC_DP_RX_FEAT_MULTI_PROCESS 0x1
225 #define SFC_DP_RX_FEAT_FLOW_FLAG 0x2
226 #define SFC_DP_RX_FEAT_FLOW_MARK 0x4
227 #define SFC_DP_RX_FEAT_INTR 0x8
228 #define SFC_DP_RX_FEAT_STATS 0x10
229 /**
230 * Rx offload capabilities supported by the datapath on device
231 * level only if HW/FW supports it.
232 */
233 uint64_t dev_offload_capa;
234 /**
235 * Rx offload capabilities supported by the datapath per-queue
236 * if HW/FW supports it.
237 */
238 uint64_t queue_offload_capa;
239 sfc_dp_rx_get_dev_info_t *get_dev_info;
240 sfc_dp_rx_pool_ops_supported_t *pool_ops_supported;
241 sfc_dp_rx_qsize_up_rings_t *qsize_up_rings;
242 sfc_dp_rx_qcreate_t *qcreate;
243 sfc_dp_rx_qdestroy_t *qdestroy;
244 sfc_dp_rx_qstart_t *qstart;
245 sfc_dp_rx_qstop_t *qstop;
246 sfc_dp_rx_qrx_ev_t *qrx_ev;
247 sfc_dp_rx_qrx_ps_ev_t *qrx_ps_ev;
248 sfc_dp_rx_qpurge_t *qpurge;
249 sfc_dp_rx_supported_ptypes_get_t *supported_ptypes_get;
250 sfc_dp_rx_qdesc_npending_t *qdesc_npending;
251 sfc_dp_rx_qdesc_status_t *qdesc_status;
252 sfc_dp_rx_intr_enable_t *intr_enable;
253 sfc_dp_rx_intr_disable_t *intr_disable;
254 sfc_dp_rx_get_pushed_t *get_pushed;
255 eth_rx_burst_t pkt_burst;
256 };
257
258 static inline struct sfc_dp_rx *
sfc_dp_find_rx_by_name(struct sfc_dp_list * head,const char * name)259 sfc_dp_find_rx_by_name(struct sfc_dp_list *head, const char *name)
260 {
261 struct sfc_dp *p = sfc_dp_find_by_name(head, SFC_DP_RX, name);
262
263 return (p == NULL) ? NULL : container_of(p, struct sfc_dp_rx, dp);
264 }
265
266 static inline struct sfc_dp_rx *
sfc_dp_find_rx_by_caps(struct sfc_dp_list * head,unsigned int avail_caps)267 sfc_dp_find_rx_by_caps(struct sfc_dp_list *head, unsigned int avail_caps)
268 {
269 struct sfc_dp *p = sfc_dp_find_by_caps(head, SFC_DP_RX, avail_caps);
270
271 return (p == NULL) ? NULL : container_of(p, struct sfc_dp_rx, dp);
272 }
273
274 static inline uint64_t
sfc_dp_rx_offload_capa(const struct sfc_dp_rx * dp_rx)275 sfc_dp_rx_offload_capa(const struct sfc_dp_rx *dp_rx)
276 {
277 return dp_rx->dev_offload_capa | dp_rx->queue_offload_capa;
278 }
279
280 /** Get Rx datapath ops by the datapath RxQ handle */
281 const struct sfc_dp_rx *sfc_dp_rx_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq);
282
283 extern struct sfc_dp_rx sfc_efx_rx;
284 extern struct sfc_dp_rx sfc_ef10_rx;
285 extern struct sfc_dp_rx sfc_ef10_essb_rx;
286 extern struct sfc_dp_rx sfc_ef100_rx;
287
288 #ifdef __cplusplus
289 }
290 #endif
291 #endif /* _SFC_DP_RX_H */
292