xref: /dpdk/drivers/net/sfc/sfc_dp_tx.h (revision 089e5ed727a15da2729cfee9b63533dd120bd04c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright (c) 2016-2018 Solarflare Communications Inc.
4  * All rights reserved.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9 
10 #ifndef _SFC_DP_TX_H
11 #define _SFC_DP_TX_H
12 
13 #include <rte_ethdev_driver.h>
14 
15 #include "sfc_dp.h"
16 #include "sfc_debug.h"
17 #include "sfc_tso.h"
18 
19 #ifdef __cplusplus
20 extern "C" {
21 #endif
22 
23 /**
24  * Generic transmit queue information used on data path.
25  * It must be kept as small as it is possible since it is built into
26  * the structure used on datapath.
27  */
28 struct sfc_dp_txq {
29 	struct sfc_dp_queue	dpq;
30 };
31 
32 /** Datapath transmit queue descriptor number limitations */
33 struct sfc_dp_tx_hw_limits {
34 	unsigned int txq_max_entries;
35 	unsigned int txq_min_entries;
36 };
37 
38 /**
39  * Datapath transmit queue creation information.
40  *
41  * The structure is used just to pass information from control path to
42  * datapath. It could be just function arguments, but it would be hardly
43  * readable.
44  */
45 struct sfc_dp_tx_qcreate_info {
46 	/** Maximum number of pushed Tx descriptors */
47 	unsigned int		max_fill_level;
48 	/** Minimum number of unused Tx descriptors to do reap */
49 	unsigned int		free_thresh;
50 	/** Offloads enabled on the transmit queue */
51 	uint64_t		offloads;
52 	/** Tx queue size */
53 	unsigned int		txq_entries;
54 	/** Maximum size of data in the DMA descriptor */
55 	uint16_t		dma_desc_size_max;
56 	/** DMA-mapped Tx descriptors ring */
57 	void			*txq_hw_ring;
58 	/** Associated event queue size */
59 	unsigned int		evq_entries;
60 	/** Hardware event ring */
61 	void			*evq_hw_ring;
62 	/** The queue index in hardware (required to push right doorbell) */
63 	unsigned int		hw_index;
64 	/** Virtual address of the memory-mapped BAR to push Tx doorbell */
65 	volatile void		*mem_bar;
66 	/** VI window size shift */
67 	unsigned int		vi_window_shift;
68 	/**
69 	 * Maximum number of bytes into the packet the TCP header can start for
70 	 * the hardware to apply TSO packet edits.
71 	 */
72 	uint16_t		tso_tcp_header_offset_limit;
73 };
74 
75 /**
76  * Get Tx datapath specific device info.
77  *
78  * @param dev_info		Device info to be adjusted
79  */
80 typedef void (sfc_dp_tx_get_dev_info_t)(struct rte_eth_dev_info *dev_info);
81 
82 /**
83  * Get size of transmit and event queue rings by the number of Tx
84  * descriptors.
85  *
86  * @param nb_tx_desc		Number of Tx descriptors
87  * @param txq_entries		Location for number of Tx ring entries
88  * @param evq_entries		Location for number of event ring entries
89  * @param txq_max_fill_level	Location for maximum Tx ring fill level
90  *
91  * @return 0 or positive errno.
92  */
93 typedef int (sfc_dp_tx_qsize_up_rings_t)(uint16_t nb_tx_desc,
94 					 struct sfc_dp_tx_hw_limits *limits,
95 					 unsigned int *txq_entries,
96 					 unsigned int *evq_entries,
97 					 unsigned int *txq_max_fill_level);
98 
99 /**
100  * Allocate and initialize datapath transmit queue.
101  *
102  * @param port_id	The port identifier
103  * @param queue_id	The queue identifier
104  * @param pci_addr	PCI function address
105  * @param socket_id	Socket identifier to allocate memory
106  * @param info		Tx queue details wrapped in structure
107  * @param dp_txqp	Location for generic datapath transmit queue pointer
108  *
109  * @return 0 or positive errno.
110  */
111 typedef int (sfc_dp_tx_qcreate_t)(uint16_t port_id, uint16_t queue_id,
112 				  const struct rte_pci_addr *pci_addr,
113 				  int socket_id,
114 				  const struct sfc_dp_tx_qcreate_info *info,
115 				  struct sfc_dp_txq **dp_txqp);
116 
117 /**
118  * Free resources allocated for datapath transmit queue.
119  */
120 typedef void (sfc_dp_tx_qdestroy_t)(struct sfc_dp_txq *dp_txq);
121 
122 /**
123  * Transmit queue start callback.
124  *
125  * It handovers EvQ to the datapath.
126  */
127 typedef int (sfc_dp_tx_qstart_t)(struct sfc_dp_txq *dp_txq,
128 				 unsigned int evq_read_ptr,
129 				 unsigned int txq_desc_index);
130 
131 /**
132  * Transmit queue stop function called before the queue flush.
133  *
134  * It returns EvQ to the control path.
135  */
136 typedef void (sfc_dp_tx_qstop_t)(struct sfc_dp_txq *dp_txq,
137 				 unsigned int *evq_read_ptr);
138 
139 /**
140  * Transmit event handler used during queue flush only.
141  */
142 typedef bool (sfc_dp_tx_qtx_ev_t)(struct sfc_dp_txq *dp_txq, unsigned int id);
143 
144 /**
145  * Transmit queue function called after the queue flush.
146  */
147 typedef void (sfc_dp_tx_qreap_t)(struct sfc_dp_txq *dp_txq);
148 
149 /**
150  * Check Tx descriptor status
151  */
152 typedef int (sfc_dp_tx_qdesc_status_t)(struct sfc_dp_txq *dp_txq,
153 				       uint16_t offset);
154 
155 /** Transmit datapath definition */
156 struct sfc_dp_tx {
157 	struct sfc_dp			dp;
158 
159 	unsigned int			features;
160 #define SFC_DP_TX_FEAT_MULTI_PROCESS	0x1
161 	/**
162 	 * Tx offload capabilities supported by the datapath on device
163 	 * level only if HW/FW supports it.
164 	 */
165 	uint64_t			dev_offload_capa;
166 	/**
167 	 * Tx offload capabilities supported by the datapath per-queue
168 	 * if HW/FW supports it.
169 	 */
170 	uint64_t			queue_offload_capa;
171 	sfc_dp_tx_get_dev_info_t	*get_dev_info;
172 	sfc_dp_tx_qsize_up_rings_t	*qsize_up_rings;
173 	sfc_dp_tx_qcreate_t		*qcreate;
174 	sfc_dp_tx_qdestroy_t		*qdestroy;
175 	sfc_dp_tx_qstart_t		*qstart;
176 	sfc_dp_tx_qstop_t		*qstop;
177 	sfc_dp_tx_qtx_ev_t		*qtx_ev;
178 	sfc_dp_tx_qreap_t		*qreap;
179 	sfc_dp_tx_qdesc_status_t	*qdesc_status;
180 	eth_tx_prep_t			pkt_prepare;
181 	eth_tx_burst_t			pkt_burst;
182 };
183 
184 static inline struct sfc_dp_tx *
185 sfc_dp_find_tx_by_name(struct sfc_dp_list *head, const char *name)
186 {
187 	struct sfc_dp *p = sfc_dp_find_by_name(head, SFC_DP_TX, name);
188 
189 	return (p == NULL) ? NULL : container_of(p, struct sfc_dp_tx, dp);
190 }
191 
192 static inline struct sfc_dp_tx *
193 sfc_dp_find_tx_by_caps(struct sfc_dp_list *head, unsigned int avail_caps)
194 {
195 	struct sfc_dp *p = sfc_dp_find_by_caps(head, SFC_DP_TX, avail_caps);
196 
197 	return (p == NULL) ? NULL : container_of(p, struct sfc_dp_tx, dp);
198 }
199 
200 /** Get Tx datapath ops by the datapath TxQ handle */
201 const struct sfc_dp_tx *sfc_dp_tx_by_dp_txq(const struct sfc_dp_txq *dp_txq);
202 
203 static inline uint64_t
204 sfc_dp_tx_offload_capa(const struct sfc_dp_tx *dp_tx)
205 {
206 	return dp_tx->dev_offload_capa | dp_tx->queue_offload_capa;
207 }
208 
209 static inline int
210 sfc_dp_tx_prepare_pkt(struct rte_mbuf *m,
211 			   uint32_t tso_tcp_header_offset_limit,
212 			   unsigned int max_fill_level,
213 			   unsigned int nb_tso_descs,
214 			   unsigned int nb_vlan_descs)
215 {
216 	unsigned int descs_required = m->nb_segs;
217 
218 #ifdef RTE_LIBRTE_SFC_EFX_DEBUG
219 	int ret;
220 
221 	ret = rte_validate_tx_offload(m);
222 	if (ret != 0) {
223 		/*
224 		 * Negative error code is returned by rte_validate_tx_offload(),
225 		 * but positive are used inside net/sfc PMD.
226 		 */
227 		SFC_ASSERT(ret < 0);
228 		return -ret;
229 	}
230 #endif
231 
232 	if (m->ol_flags & PKT_TX_TCP_SEG) {
233 		unsigned int tcph_off = m->l2_len + m->l3_len;
234 		unsigned int header_len;
235 
236 		switch (m->ol_flags & PKT_TX_TUNNEL_MASK) {
237 		case 0:
238 			break;
239 		case PKT_TX_TUNNEL_VXLAN:
240 			/* FALLTHROUGH */
241 		case PKT_TX_TUNNEL_GENEVE:
242 			if (!(m->ol_flags &
243 			      (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6)))
244 				return EINVAL;
245 
246 			tcph_off += m->outer_l2_len + m->outer_l3_len;
247 		}
248 
249 		header_len = tcph_off + m->l4_len;
250 
251 		if (unlikely(tcph_off > tso_tcp_header_offset_limit))
252 			return EINVAL;
253 
254 		descs_required += nb_tso_descs;
255 
256 		/*
257 		 * Extra descriptor that is required when a packet header
258 		 * is separated from remaining content of the first segment.
259 		 */
260 		if (rte_pktmbuf_data_len(m) > header_len) {
261 			descs_required++;
262 		} else if (rte_pktmbuf_data_len(m) < header_len &&
263 			 unlikely(header_len > SFC_TSOH_STD_LEN)) {
264 			/*
265 			 * Header linearization is required and
266 			 * the header is too big to be linearized
267 			 */
268 			return EINVAL;
269 		}
270 	}
271 
272 	/*
273 	 * The number of VLAN descriptors is added regardless of requested
274 	 * VLAN offload since VLAN is sticky and sending packet without VLAN
275 	 * insertion may require VLAN descriptor to reset the sticky to 0.
276 	 */
277 	descs_required += nb_vlan_descs;
278 
279 	/*
280 	 * Max fill level must be sufficient to hold all required descriptors
281 	 * to send the packet entirely.
282 	 */
283 	if (descs_required > max_fill_level)
284 		return ENOBUFS;
285 
286 	return 0;
287 }
288 
289 extern struct sfc_dp_tx sfc_efx_tx;
290 extern struct sfc_dp_tx sfc_ef10_tx;
291 extern struct sfc_dp_tx sfc_ef10_simple_tx;
292 
293 #ifdef __cplusplus
294 }
295 #endif
296 #endif /* _SFC_DP_TX_H */
297