xref: /dpdk/drivers/net/sfc/sfc_ef100_tx.c (revision 323b626a860c487e0efe7c3b5c56e62dde28f4e1)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright(c) 2019-2021 Xilinx, Inc.
4  * Copyright(c) 2018-2019 Solarflare Communications Inc.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9 
10 #include <stdbool.h>
11 
12 #include <rte_mbuf.h>
13 #include <rte_mbuf_dyn.h>
14 #include <rte_io.h>
15 #include <rte_net.h>
16 
17 #include "efx.h"
18 #include "efx_types.h"
19 #include "efx_regs.h"
20 #include "efx_regs_ef100.h"
21 
22 #include "sfc_debug.h"
23 #include "sfc_dp_tx.h"
24 #include "sfc_tweak.h"
25 #include "sfc_kvargs.h"
26 #include "sfc_ef100.h"
27 #include "sfc_nic_dma_dp.h"
28 
29 
30 #define sfc_ef100_tx_err(_txq, ...) \
31 	SFC_DP_LOG(SFC_KVARG_DATAPATH_EF100, ERR, &(_txq)->dp.dpq, __VA_ARGS__)
32 
33 #define sfc_ef100_tx_debug(_txq, ...) \
34 	SFC_DP_LOG(SFC_KVARG_DATAPATH_EF100, DEBUG, &(_txq)->dp.dpq, \
35 		   __VA_ARGS__)
36 
37 
38 /** Maximum length of the send descriptor data */
39 #define SFC_EF100_TX_SEND_DESC_LEN_MAX \
40 	((1u << ESF_GZ_TX_SEND_LEN_WIDTH) - 1)
41 
42 /** Maximum length of the segment descriptor data */
43 #define SFC_EF100_TX_SEG_DESC_LEN_MAX \
44 	((1u << ESF_GZ_TX_SEG_LEN_WIDTH) - 1)
45 
46 /**
47  * Maximum number of descriptors/buffers in the Tx ring.
48  * It should guarantee that corresponding event queue never overfill.
49  * EF100 native datapath uses event queue of the same size as Tx queue.
50  * Maximum number of events on datapath can be estimated as number of
51  * Tx queue entries (one event per Tx buffer in the worst case) plus
52  * Tx error and flush events.
53  */
54 #define SFC_EF100_TXQ_LIMIT(_ndesc) \
55 	((_ndesc) - 1 /* head must not step on tail */ - \
56 	 1 /* Rx error */ - 1 /* flush */)
57 
58 struct sfc_ef100_tx_sw_desc {
59 	struct rte_mbuf			*mbuf;
60 };
61 
62 struct sfc_ef100_txq {
63 	unsigned int			flags;
64 #define SFC_EF100_TXQ_STARTED		0x1
65 #define SFC_EF100_TXQ_NOT_RUNNING	0x2
66 #define SFC_EF100_TXQ_EXCEPTION		0x4
67 #define SFC_EF100_TXQ_NIC_DMA_MAP	0x8
68 
69 	unsigned int			ptr_mask;
70 	unsigned int			added;
71 	unsigned int			completed;
72 	unsigned int			max_fill_level;
73 	unsigned int			free_thresh;
74 	struct sfc_ef100_tx_sw_desc	*sw_ring;
75 	efx_oword_t			*txq_hw_ring;
76 	volatile void			*doorbell;
77 
78 	/* Completion/reap */
79 	unsigned int			evq_read_ptr;
80 	unsigned int			evq_phase_bit_shift;
81 	volatile efx_qword_t		*evq_hw_ring;
82 
83 	uint16_t			tso_tcp_header_offset_limit;
84 	uint16_t			tso_max_nb_header_descs;
85 	uint16_t			tso_max_header_len;
86 	uint16_t			tso_max_nb_payload_descs;
87 	uint32_t			tso_max_payload_len;
88 	uint32_t			tso_max_nb_outgoing_frames;
89 
90 	/* Datapath transmit queue anchor */
91 	struct sfc_dp_txq		dp;
92 
93 	const struct sfc_nic_dma_info	*nic_dma_info;
94 };
95 
96 static inline struct sfc_ef100_txq *
sfc_ef100_txq_by_dp_txq(struct sfc_dp_txq * dp_txq)97 sfc_ef100_txq_by_dp_txq(struct sfc_dp_txq *dp_txq)
98 {
99 	return container_of(dp_txq, struct sfc_ef100_txq, dp);
100 }
101 
102 static int
sfc_ef100_tx_prepare_pkt_tso(struct sfc_ef100_txq * const txq,struct rte_mbuf * m)103 sfc_ef100_tx_prepare_pkt_tso(struct sfc_ef100_txq * const txq,
104 			     struct rte_mbuf *m)
105 {
106 	size_t header_len = ((m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ?
107 			     m->outer_l2_len + m->outer_l3_len : 0) +
108 			    m->l2_len + m->l3_len + m->l4_len;
109 	size_t payload_len = m->pkt_len - header_len;
110 	unsigned long mss_conformant_max_payload_len;
111 	unsigned int nb_payload_descs;
112 
113 #ifdef RTE_LIBRTE_SFC_EFX_DEBUG
114 	switch (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
115 	case 0:
116 		/* FALLTHROUGH */
117 	case RTE_MBUF_F_TX_TUNNEL_VXLAN:
118 		/* FALLTHROUGH */
119 	case RTE_MBUF_F_TX_TUNNEL_GENEVE:
120 		break;
121 	default:
122 		return ENOTSUP;
123 	}
124 #endif
125 
126 	mss_conformant_max_payload_len =
127 		m->tso_segsz * txq->tso_max_nb_outgoing_frames;
128 
129 	/*
130 	 * Don't really want to know exact number of payload segments.
131 	 * Just use total number of segments as upper limit. Practically
132 	 * maximum number of payload segments is significantly bigger
133 	 * than maximum number header segments, so we can neglect header
134 	 * segments excluded total number of segments to estimate number
135 	 * of payload segments required.
136 	 */
137 	nb_payload_descs = m->nb_segs;
138 
139 	/*
140 	 * Carry out multiple independent checks using bitwise OR
141 	 * to avoid unnecessary conditional branching.
142 	 */
143 	if (unlikely((header_len > txq->tso_max_header_len) |
144 		     (nb_payload_descs > txq->tso_max_nb_payload_descs) |
145 		     (payload_len > txq->tso_max_payload_len) |
146 		     (payload_len > mss_conformant_max_payload_len) |
147 		     (m->pkt_len == header_len)))
148 		return EINVAL;
149 
150 	return 0;
151 }
152 
153 static uint16_t
sfc_ef100_tx_prepare_pkts(void * tx_queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)154 sfc_ef100_tx_prepare_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
155 			  uint16_t nb_pkts)
156 {
157 	struct sfc_ef100_txq * const txq = sfc_ef100_txq_by_dp_txq(tx_queue);
158 	uint16_t i;
159 
160 	for (i = 0; i < nb_pkts; i++) {
161 		struct rte_mbuf *m = tx_pkts[i];
162 		unsigned int max_nb_header_segs = 0;
163 		bool calc_phdr_cksum = false;
164 		int ret;
165 
166 		/*
167 		 * Partial checksum offload is used in the case of
168 		 * inner TCP/UDP checksum offload. It requires
169 		 * pseudo-header checksum which is calculated below,
170 		 * but requires contiguous packet headers.
171 		 */
172 		if ((m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) &&
173 		    (m->ol_flags & RTE_MBUF_F_TX_L4_MASK)) {
174 			calc_phdr_cksum = true;
175 			max_nb_header_segs = 1;
176 		} else if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
177 			max_nb_header_segs = txq->tso_max_nb_header_descs;
178 		}
179 
180 		ret = sfc_dp_tx_prepare_pkt(m, max_nb_header_segs, 0,
181 					    txq->tso_tcp_header_offset_limit,
182 					    txq->max_fill_level, 1, 0);
183 		if (unlikely(ret != 0)) {
184 			rte_errno = ret;
185 			break;
186 		}
187 
188 		if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
189 			ret = sfc_ef100_tx_prepare_pkt_tso(txq, m);
190 			if (unlikely(ret != 0)) {
191 				rte_errno = ret;
192 				break;
193 			}
194 		} else if (m->nb_segs > EFX_MASK32(ESF_GZ_TX_SEND_NUM_SEGS)) {
195 			rte_errno = EINVAL;
196 			break;
197 		}
198 
199 		if (calc_phdr_cksum) {
200 			/*
201 			 * Full checksum offload does IPv4 header checksum
202 			 * and does not require any assistance.
203 			 */
204 			ret = rte_net_intel_cksum_flags_prepare(m,
205 					m->ol_flags & ~RTE_MBUF_F_TX_IP_CKSUM);
206 			if (unlikely(ret != 0)) {
207 				rte_errno = -ret;
208 				break;
209 			}
210 		}
211 	}
212 
213 	return i;
214 }
215 
216 static bool
sfc_ef100_tx_get_event(struct sfc_ef100_txq * txq,efx_qword_t * ev)217 sfc_ef100_tx_get_event(struct sfc_ef100_txq *txq, efx_qword_t *ev)
218 {
219 	volatile efx_qword_t *evq_hw_ring = txq->evq_hw_ring;
220 
221 	/*
222 	 * Exception flag is set when reap is done.
223 	 * It is never done twice per packet burst get, and absence of
224 	 * the flag is checked on burst get entry.
225 	 */
226 	SFC_ASSERT((txq->flags & SFC_EF100_TXQ_EXCEPTION) == 0);
227 
228 	*ev = evq_hw_ring[txq->evq_read_ptr & txq->ptr_mask];
229 
230 	if (!sfc_ef100_ev_present(ev,
231 			(txq->evq_read_ptr >> txq->evq_phase_bit_shift) & 1))
232 		return false;
233 
234 	if (unlikely(!sfc_ef100_ev_type_is(ev,
235 					   ESE_GZ_EF100_EV_TX_COMPLETION))) {
236 		/*
237 		 * Do not move read_ptr to keep the event for exception
238 		 * handling by the control path.
239 		 */
240 		txq->flags |= SFC_EF100_TXQ_EXCEPTION;
241 		sfc_ef100_tx_err(txq,
242 			"TxQ exception at EvQ ptr %u(%#x), event %08x:%08x",
243 			txq->evq_read_ptr, txq->evq_read_ptr & txq->ptr_mask,
244 			EFX_QWORD_FIELD(*ev, EFX_DWORD_1),
245 			EFX_QWORD_FIELD(*ev, EFX_DWORD_0));
246 		return false;
247 	}
248 
249 	sfc_ef100_tx_debug(txq, "TxQ got event %08x:%08x at %u (%#x)",
250 			   EFX_QWORD_FIELD(*ev, EFX_DWORD_1),
251 			   EFX_QWORD_FIELD(*ev, EFX_DWORD_0),
252 			   txq->evq_read_ptr,
253 			   txq->evq_read_ptr & txq->ptr_mask);
254 
255 	txq->evq_read_ptr++;
256 	return true;
257 }
258 
259 static unsigned int
sfc_ef100_tx_process_events(struct sfc_ef100_txq * txq)260 sfc_ef100_tx_process_events(struct sfc_ef100_txq *txq)
261 {
262 	unsigned int num_descs = 0;
263 	efx_qword_t tx_ev;
264 
265 	while (sfc_ef100_tx_get_event(txq, &tx_ev))
266 		num_descs += EFX_QWORD_FIELD(tx_ev, ESF_GZ_EV_TXCMPL_NUM_DESC);
267 
268 	return num_descs;
269 }
270 
271 static void
sfc_ef100_tx_reap_num_descs(struct sfc_ef100_txq * txq,unsigned int num_descs)272 sfc_ef100_tx_reap_num_descs(struct sfc_ef100_txq *txq, unsigned int num_descs)
273 {
274 	if (num_descs > 0) {
275 		unsigned int completed = txq->completed;
276 		unsigned int pending = completed + num_descs;
277 		struct rte_mbuf *bulk[SFC_TX_REAP_BULK_SIZE];
278 		unsigned int nb = 0;
279 
280 		do {
281 			struct sfc_ef100_tx_sw_desc *txd;
282 			struct rte_mbuf *m;
283 
284 			txd = &txq->sw_ring[completed & txq->ptr_mask];
285 			if (txd->mbuf == NULL)
286 				continue;
287 
288 			m = rte_pktmbuf_prefree_seg(txd->mbuf);
289 			if (m == NULL)
290 				continue;
291 
292 			txd->mbuf = NULL;
293 
294 			if (nb == RTE_DIM(bulk) ||
295 			    (nb != 0 && m->pool != bulk[0]->pool)) {
296 				rte_mempool_put_bulk(bulk[0]->pool,
297 						     (void *)bulk, nb);
298 				nb = 0;
299 			}
300 
301 			bulk[nb++] = m;
302 		} while (++completed != pending);
303 
304 		if (nb != 0)
305 			rte_mempool_put_bulk(bulk[0]->pool, (void *)bulk, nb);
306 
307 		txq->completed = completed;
308 	}
309 }
310 
311 static void
sfc_ef100_tx_reap(struct sfc_ef100_txq * txq)312 sfc_ef100_tx_reap(struct sfc_ef100_txq *txq)
313 {
314 	sfc_ef100_tx_reap_num_descs(txq, sfc_ef100_tx_process_events(txq));
315 }
316 
317 static void
sfc_ef100_tx_qdesc_prefix_create(const struct rte_mbuf * m,efx_oword_t * tx_desc)318 sfc_ef100_tx_qdesc_prefix_create(const struct rte_mbuf *m, efx_oword_t *tx_desc)
319 {
320 	efx_mport_id_t *mport_id =
321 		RTE_MBUF_DYNFIELD(m, sfc_dp_mport_offset, efx_mport_id_t *);
322 
323 	EFX_POPULATE_OWORD_3(*tx_desc,
324 			ESF_GZ_TX_PREFIX_EGRESS_MPORT,
325 			mport_id->id,
326 			ESF_GZ_TX_PREFIX_EGRESS_MPORT_EN, 1,
327 			ESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_PREFIX);
328 }
329 
330 static uint8_t
sfc_ef100_tx_qdesc_cso_inner_l3(uint64_t tx_tunnel)331 sfc_ef100_tx_qdesc_cso_inner_l3(uint64_t tx_tunnel)
332 {
333 	uint8_t inner_l3;
334 
335 	switch (tx_tunnel) {
336 	case RTE_MBUF_F_TX_TUNNEL_VXLAN:
337 		inner_l3 = ESE_GZ_TX_DESC_CS_INNER_L3_VXLAN;
338 		break;
339 	case RTE_MBUF_F_TX_TUNNEL_GENEVE:
340 		inner_l3 = ESE_GZ_TX_DESC_CS_INNER_L3_GENEVE;
341 		break;
342 	default:
343 		inner_l3 = ESE_GZ_TX_DESC_CS_INNER_L3_OFF;
344 		break;
345 	}
346 	return inner_l3;
347 }
348 
349 static int
sfc_ef100_tx_map(const struct sfc_ef100_txq * txq,rte_iova_t iova,size_t len,rte_iova_t * dma_addr)350 sfc_ef100_tx_map(const struct sfc_ef100_txq *txq, rte_iova_t iova, size_t len,
351 		 rte_iova_t *dma_addr)
352 {
353 	if ((txq->flags & SFC_EF100_TXQ_NIC_DMA_MAP) == 0) {
354 		*dma_addr = iova;
355 	} else {
356 		*dma_addr = sfc_nic_dma_map(txq->nic_dma_info, iova, len);
357 		if (unlikely(*dma_addr == RTE_BAD_IOVA))
358 			sfc_ef100_tx_err(txq, "failed to map DMA address on Tx");
359 	}
360 	return 0;
361 }
362 
363 static int
sfc_ef100_tx_qdesc_send_create(const struct sfc_ef100_txq * txq,const struct rte_mbuf * m,efx_oword_t * tx_desc)364 sfc_ef100_tx_qdesc_send_create(const struct sfc_ef100_txq *txq,
365 			       const struct rte_mbuf *m, efx_oword_t *tx_desc)
366 {
367 	bool outer_l3;
368 	bool outer_l4;
369 	uint8_t inner_l3;
370 	uint8_t partial_en;
371 	uint16_t part_cksum_w;
372 	uint16_t l4_offset_w;
373 	rte_iova_t dma_addr;
374 	int rc;
375 
376 	if ((m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) == 0) {
377 		outer_l3 = (m->ol_flags & RTE_MBUF_F_TX_IP_CKSUM);
378 		outer_l4 = (m->ol_flags & RTE_MBUF_F_TX_L4_MASK);
379 		inner_l3 = ESE_GZ_TX_DESC_CS_INNER_L3_OFF;
380 		partial_en = ESE_GZ_TX_DESC_CSO_PARTIAL_EN_OFF;
381 		part_cksum_w = 0;
382 		l4_offset_w = 0;
383 	} else {
384 		outer_l3 = (m->ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM);
385 		outer_l4 = (m->ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM);
386 		inner_l3 = sfc_ef100_tx_qdesc_cso_inner_l3(m->ol_flags &
387 							   RTE_MBUF_F_TX_TUNNEL_MASK);
388 
389 		switch (m->ol_flags & RTE_MBUF_F_TX_L4_MASK) {
390 		case RTE_MBUF_F_TX_TCP_CKSUM:
391 			partial_en = ESE_GZ_TX_DESC_CSO_PARTIAL_EN_TCP;
392 			part_cksum_w = offsetof(struct rte_tcp_hdr, cksum) >> 1;
393 			break;
394 		case RTE_MBUF_F_TX_UDP_CKSUM:
395 			partial_en = ESE_GZ_TX_DESC_CSO_PARTIAL_EN_UDP;
396 			part_cksum_w = offsetof(struct rte_udp_hdr,
397 						dgram_cksum) >> 1;
398 			break;
399 		default:
400 			partial_en = ESE_GZ_TX_DESC_CSO_PARTIAL_EN_OFF;
401 			part_cksum_w = 0;
402 			break;
403 		}
404 		l4_offset_w = (m->outer_l2_len + m->outer_l3_len +
405 				m->l2_len + m->l3_len) >> 1;
406 	}
407 
408 	rc = sfc_ef100_tx_map(txq, rte_mbuf_data_iova(m),
409 			      rte_pktmbuf_data_len(m), &dma_addr);
410 	if (unlikely(rc != 0))
411 		return rc;
412 
413 	EFX_POPULATE_OWORD_10(*tx_desc,
414 			ESF_GZ_TX_SEND_ADDR, dma_addr,
415 			ESF_GZ_TX_SEND_LEN, rte_pktmbuf_data_len(m),
416 			ESF_GZ_TX_SEND_NUM_SEGS, m->nb_segs,
417 			ESF_GZ_TX_SEND_CSO_PARTIAL_START_W, l4_offset_w,
418 			ESF_GZ_TX_SEND_CSO_PARTIAL_CSUM_W, part_cksum_w,
419 			ESF_GZ_TX_SEND_CSO_PARTIAL_EN, partial_en,
420 			ESF_GZ_TX_SEND_CSO_INNER_L3, inner_l3,
421 			ESF_GZ_TX_SEND_CSO_OUTER_L3, outer_l3,
422 			ESF_GZ_TX_SEND_CSO_OUTER_L4, outer_l4,
423 			ESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_SEND);
424 
425 	if (m->ol_flags & RTE_MBUF_F_TX_VLAN) {
426 		efx_oword_t tx_desc_extra_fields;
427 
428 		EFX_POPULATE_OWORD_2(tx_desc_extra_fields,
429 				ESF_GZ_TX_SEND_VLAN_INSERT_EN, 1,
430 				ESF_GZ_TX_SEND_VLAN_INSERT_TCI, m->vlan_tci);
431 
432 		EFX_OR_OWORD(*tx_desc, tx_desc_extra_fields);
433 	}
434 
435 	return 0;
436 }
437 
438 static void
sfc_ef100_tx_qdesc_seg_create(rte_iova_t addr,uint16_t len,efx_oword_t * tx_desc)439 sfc_ef100_tx_qdesc_seg_create(rte_iova_t addr, uint16_t len,
440 			      efx_oword_t *tx_desc)
441 {
442 	EFX_POPULATE_OWORD_3(*tx_desc,
443 			ESF_GZ_TX_SEG_ADDR, addr,
444 			ESF_GZ_TX_SEG_LEN, len,
445 			ESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_SEG);
446 }
447 
448 static void
sfc_ef100_tx_qdesc_tso_create(const struct rte_mbuf * m,uint16_t nb_header_descs,uint16_t nb_payload_descs,size_t header_len,size_t payload_len,size_t outer_iph_off,size_t outer_udph_off,size_t iph_off,size_t tcph_off,efx_oword_t * tx_desc)449 sfc_ef100_tx_qdesc_tso_create(const struct rte_mbuf *m,
450 			      uint16_t nb_header_descs,
451 			      uint16_t nb_payload_descs,
452 			      size_t header_len, size_t payload_len,
453 			      size_t outer_iph_off, size_t outer_udph_off,
454 			      size_t iph_off, size_t tcph_off,
455 			      efx_oword_t *tx_desc)
456 {
457 	efx_oword_t tx_desc_extra_fields;
458 	int ed_outer_udp_len = (outer_udph_off != 0) ? 1 : 0;
459 	int ed_outer_ip_len = (outer_iph_off != 0) ? 1 : 0;
460 	int ed_outer_ip_id = (outer_iph_off != 0) ?
461 		ESE_GZ_TX_DESC_IP4_ID_INC_MOD16 : 0;
462 	/*
463 	 * If no tunnel encapsulation is present, then the ED_INNER
464 	 * fields should be used.
465 	 */
466 	int ed_inner_ip_id = ESE_GZ_TX_DESC_IP4_ID_INC_MOD16;
467 	uint8_t inner_l3 = sfc_ef100_tx_qdesc_cso_inner_l3(
468 					m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK);
469 
470 	EFX_POPULATE_OWORD_10(*tx_desc,
471 			ESF_GZ_TX_TSO_MSS, m->tso_segsz,
472 			ESF_GZ_TX_TSO_HDR_NUM_SEGS, nb_header_descs,
473 			ESF_GZ_TX_TSO_PAYLOAD_NUM_SEGS, nb_payload_descs,
474 			ESF_GZ_TX_TSO_ED_OUTER_IP4_ID, ed_outer_ip_id,
475 			ESF_GZ_TX_TSO_ED_INNER_IP4_ID, ed_inner_ip_id,
476 			ESF_GZ_TX_TSO_ED_OUTER_IP_LEN, ed_outer_ip_len,
477 			ESF_GZ_TX_TSO_ED_INNER_IP_LEN, 1,
478 			ESF_GZ_TX_TSO_ED_OUTER_UDP_LEN, ed_outer_udp_len,
479 			ESF_GZ_TX_TSO_HDR_LEN_W, header_len >> 1,
480 			ESF_GZ_TX_TSO_PAYLOAD_LEN, payload_len);
481 
482 	EFX_POPULATE_OWORD_9(tx_desc_extra_fields,
483 			/*
484 			 * Outer offsets are required for outer IPv4 ID
485 			 * and length edits in the case of tunnel TSO.
486 			 */
487 			ESF_GZ_TX_TSO_OUTER_L3_OFF_W, outer_iph_off >> 1,
488 			ESF_GZ_TX_TSO_OUTER_L4_OFF_W, outer_udph_off >> 1,
489 			/*
490 			 * Inner offsets are required for inner IPv4 ID
491 			 * and IP length edits and partial checksum
492 			 * offload in the case of tunnel TSO.
493 			 */
494 			ESF_GZ_TX_TSO_INNER_L3_OFF_W, iph_off >> 1,
495 			ESF_GZ_TX_TSO_INNER_L4_OFF_W, tcph_off >> 1,
496 			ESF_GZ_TX_TSO_CSO_INNER_L4,
497 				inner_l3 != ESE_GZ_TX_DESC_CS_INNER_L3_OFF,
498 			ESF_GZ_TX_TSO_CSO_INNER_L3, inner_l3,
499 			/*
500 			 * Use outer full checksum offloads which do
501 			 * not require any extra information.
502 			 */
503 			ESF_GZ_TX_TSO_CSO_OUTER_L3, 1,
504 			ESF_GZ_TX_TSO_CSO_OUTER_L4, 1,
505 			ESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_TSO);
506 
507 	EFX_OR_OWORD(*tx_desc, tx_desc_extra_fields);
508 
509 	if (m->ol_flags & RTE_MBUF_F_TX_VLAN) {
510 		EFX_POPULATE_OWORD_2(tx_desc_extra_fields,
511 				ESF_GZ_TX_TSO_VLAN_INSERT_EN, 1,
512 				ESF_GZ_TX_TSO_VLAN_INSERT_TCI, m->vlan_tci);
513 
514 		EFX_OR_OWORD(*tx_desc, tx_desc_extra_fields);
515 	}
516 }
517 
518 static inline void
sfc_ef100_tx_qpush(struct sfc_ef100_txq * txq,unsigned int added)519 sfc_ef100_tx_qpush(struct sfc_ef100_txq *txq, unsigned int added)
520 {
521 	efx_dword_t dword;
522 
523 	EFX_POPULATE_DWORD_1(dword, ERF_GZ_TX_RING_PIDX, added & txq->ptr_mask);
524 
525 	/* DMA sync to device is not required */
526 
527 	/*
528 	 * rte_write32() has rte_io_wmb() which guarantees that the STORE
529 	 * operations (i.e. Rx and event descriptor updates) that precede
530 	 * the rte_io_wmb() call are visible to NIC before the STORE
531 	 * operations that follow it (i.e. doorbell write).
532 	 */
533 	rte_write32(dword.ed_u32[0], txq->doorbell);
534 	txq->dp.dpq.dbells++;
535 
536 	sfc_ef100_tx_debug(txq, "TxQ pushed doorbell at pidx %u (added=%u)",
537 			   EFX_DWORD_FIELD(dword, ERF_GZ_TX_RING_PIDX),
538 			   added);
539 }
540 
541 static unsigned int
sfc_ef100_tx_pkt_descs_max(const struct rte_mbuf * m)542 sfc_ef100_tx_pkt_descs_max(const struct rte_mbuf *m)
543 {
544 	unsigned int extra_descs = 0;
545 
546 /** Maximum length of an mbuf segment data */
547 #define SFC_MBUF_SEG_LEN_MAX		UINT16_MAX
548 	RTE_BUILD_BUG_ON(sizeof(m->data_len) != 2);
549 
550 	if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
551 		/* Tx TSO descriptor */
552 		extra_descs++;
553 		/*
554 		 * Extra Tx segment descriptor may be required if header
555 		 * ends in the middle of segment.
556 		 */
557 		extra_descs++;
558 	} else {
559 		/*
560 		 * mbuf segment cannot be bigger than maximum segment length
561 		 * and maximum packet length since TSO is not supported yet.
562 		 * Make sure that the first segment does not need fragmentation
563 		 * (split into many Tx descriptors).
564 		 */
565 		RTE_BUILD_BUG_ON(SFC_EF100_TX_SEND_DESC_LEN_MAX <
566 				 RTE_MIN_T(EFX_MAC_PDU_MAX, SFC_MBUF_SEG_LEN_MAX, uint32_t));
567 	}
568 
569 	if (m->ol_flags & sfc_dp_mport_override) {
570 		/* Tx override prefix descriptor will be used */
571 		extra_descs++;
572 	}
573 
574 	/*
575 	 * Any segment of scattered packet cannot be bigger than maximum
576 	 * segment length. Make sure that subsequent segments do not need
577 	 * fragmentation (split into many Tx descriptors).
578 	 */
579 	RTE_BUILD_BUG_ON(SFC_EF100_TX_SEG_DESC_LEN_MAX < SFC_MBUF_SEG_LEN_MAX);
580 
581 	return m->nb_segs + extra_descs;
582 }
583 
584 static int
sfc_ef100_xmit_tso_pkt(struct sfc_ef100_txq * const txq,struct rte_mbuf ** m,unsigned int * added)585 sfc_ef100_xmit_tso_pkt(struct sfc_ef100_txq * const txq,
586 		       struct rte_mbuf **m, unsigned int *added)
587 {
588 	struct rte_mbuf *m_seg = *m;
589 	unsigned int nb_hdr_descs;
590 	unsigned int nb_pld_descs;
591 	unsigned int seg_split = 0;
592 	unsigned int tso_desc_id;
593 	unsigned int id;
594 	size_t outer_iph_off;
595 	size_t outer_udph_off;
596 	size_t iph_off;
597 	size_t tcph_off;
598 	size_t header_len;
599 	size_t remaining_hdr_len;
600 	rte_iova_t dma_addr;
601 	int rc;
602 
603 	if (m_seg->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
604 		outer_iph_off = m_seg->outer_l2_len;
605 		outer_udph_off = outer_iph_off + m_seg->outer_l3_len;
606 	} else {
607 		outer_iph_off = 0;
608 		outer_udph_off = 0;
609 	}
610 	iph_off = outer_udph_off + m_seg->l2_len;
611 	tcph_off = iph_off + m_seg->l3_len;
612 	header_len = tcph_off + m_seg->l4_len;
613 
614 	/*
615 	 * Remember ID of the TX_TSO descriptor to be filled in.
616 	 * We can't fill it in right now since we need to calculate
617 	 * number of header and payload segments first and don't want
618 	 * to traverse it twice here.
619 	 */
620 	tso_desc_id = (*added)++ & txq->ptr_mask;
621 
622 	remaining_hdr_len = header_len;
623 	do {
624 		rc = sfc_ef100_tx_map(txq, rte_mbuf_data_iova(m_seg),
625 				      rte_pktmbuf_data_len(m_seg), &dma_addr);
626 		if (unlikely(rc != 0))
627 			return rc;
628 
629 		id = (*added)++ & txq->ptr_mask;
630 		if (rte_pktmbuf_data_len(m_seg) <= remaining_hdr_len) {
631 			/* The segment is fully header segment */
632 			sfc_ef100_tx_qdesc_seg_create(dma_addr,
633 				rte_pktmbuf_data_len(m_seg),
634 				&txq->txq_hw_ring[id]);
635 			remaining_hdr_len -= rte_pktmbuf_data_len(m_seg);
636 		} else {
637 			/*
638 			 * The segment must be split into header and
639 			 * payload segments
640 			 */
641 			sfc_ef100_tx_qdesc_seg_create(dma_addr,
642 				remaining_hdr_len, &txq->txq_hw_ring[id]);
643 			txq->sw_ring[id].mbuf = NULL;
644 
645 			id = (*added)++ & txq->ptr_mask;
646 			sfc_ef100_tx_qdesc_seg_create(
647 				dma_addr + remaining_hdr_len,
648 				rte_pktmbuf_data_len(m_seg) - remaining_hdr_len,
649 				&txq->txq_hw_ring[id]);
650 			remaining_hdr_len = 0;
651 			seg_split = 1;
652 		}
653 		txq->sw_ring[id].mbuf = m_seg;
654 		m_seg = m_seg->next;
655 	} while (remaining_hdr_len > 0);
656 
657 	/*
658 	 * If a segment is split into header and payload segments, added
659 	 * pointer counts it twice and we should correct it.
660 	 */
661 	nb_hdr_descs = ((id - tso_desc_id) & txq->ptr_mask) - seg_split;
662 	nb_pld_descs = (*m)->nb_segs - nb_hdr_descs + seg_split;
663 
664 	sfc_ef100_tx_qdesc_tso_create(*m, nb_hdr_descs, nb_pld_descs, header_len,
665 				      rte_pktmbuf_pkt_len(*m) - header_len,
666 				      outer_iph_off, outer_udph_off,
667 				      iph_off, tcph_off,
668 				      &txq->txq_hw_ring[tso_desc_id]);
669 
670 	*m = m_seg;
671 	return 0;
672 }
673 
674 static uint16_t
sfc_ef100_xmit_pkts(void * tx_queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)675 sfc_ef100_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
676 {
677 	struct sfc_ef100_txq * const txq = sfc_ef100_txq_by_dp_txq(tx_queue);
678 	unsigned int added;
679 	unsigned int dma_desc_space;
680 	bool reap_done;
681 	struct rte_mbuf **pktp;
682 	struct rte_mbuf **pktp_end;
683 	rte_iova_t dma_addr;
684 	int rc;
685 
686 	if (unlikely(txq->flags &
687 		     (SFC_EF100_TXQ_NOT_RUNNING | SFC_EF100_TXQ_EXCEPTION)))
688 		return 0;
689 
690 	added = txq->added;
691 	dma_desc_space = txq->max_fill_level - (added - txq->completed);
692 
693 	reap_done = (dma_desc_space < txq->free_thresh);
694 	if (reap_done) {
695 		sfc_ef100_tx_reap(txq);
696 		dma_desc_space = txq->max_fill_level - (added - txq->completed);
697 	}
698 
699 	for (pktp = &tx_pkts[0], pktp_end = &tx_pkts[nb_pkts];
700 	     pktp != pktp_end;
701 	     ++pktp) {
702 		struct rte_mbuf *m_seg = *pktp;
703 		unsigned int pkt_start = added;
704 		unsigned int id;
705 
706 		if (likely(pktp + 1 != pktp_end))
707 			rte_mbuf_prefetch_part1(pktp[1]);
708 
709 		if (sfc_ef100_tx_pkt_descs_max(m_seg) > dma_desc_space) {
710 			if (reap_done)
711 				break;
712 
713 			/* Push already prepared descriptors before polling */
714 			if (added != txq->added) {
715 				sfc_ef100_tx_qpush(txq, added);
716 				txq->added = added;
717 			}
718 
719 			sfc_ef100_tx_reap(txq);
720 			reap_done = true;
721 			dma_desc_space = txq->max_fill_level -
722 				(added - txq->completed);
723 			if (sfc_ef100_tx_pkt_descs_max(m_seg) > dma_desc_space)
724 				break;
725 		}
726 
727 		if (m_seg->ol_flags & sfc_dp_mport_override) {
728 			id = added++ & txq->ptr_mask;
729 			sfc_ef100_tx_qdesc_prefix_create(m_seg,
730 							 &txq->txq_hw_ring[id]);
731 			txq->sw_ring[id].mbuf = NULL;
732 		}
733 
734 		if (m_seg->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
735 			rc = sfc_ef100_xmit_tso_pkt(txq, &m_seg, &added);
736 		} else {
737 			id = added++ & txq->ptr_mask;
738 			rc = sfc_ef100_tx_qdesc_send_create(txq, m_seg,
739 							&txq->txq_hw_ring[id]);
740 
741 			/*
742 			 * rte_pktmbuf_free() is commonly used in DPDK for
743 			 * recycling packets - the function checks every
744 			 * segment's reference counter and returns the
745 			 * buffer to its pool whenever possible;
746 			 * nevertheless, freeing mbuf segments one by one
747 			 * may entail some performance decline;
748 			 * from this point, sfc_efx_tx_reap() does the same job
749 			 * on its own and frees buffers in bulks (all mbufs
750 			 * within a bulk belong to the same pool);
751 			 * from this perspective, individual segment pointers
752 			 * must be associated with the corresponding SW
753 			 * descriptors independently so that only one loop
754 			 * is sufficient on reap to inspect all the buffers
755 			 */
756 			txq->sw_ring[id].mbuf = m_seg;
757 			m_seg = m_seg->next;
758 		}
759 
760 		while (likely(rc == 0) && m_seg != NULL) {
761 			RTE_BUILD_BUG_ON(SFC_MBUF_SEG_LEN_MAX >
762 					 SFC_EF100_TX_SEG_DESC_LEN_MAX);
763 
764 			id = added++ & txq->ptr_mask;
765 			rc = sfc_ef100_tx_map(txq, rte_mbuf_data_iova(m_seg),
766 					      rte_pktmbuf_data_len(m_seg),
767 					      &dma_addr);
768 			sfc_ef100_tx_qdesc_seg_create(dma_addr,
769 					rte_pktmbuf_data_len(m_seg),
770 					&txq->txq_hw_ring[id]);
771 			txq->sw_ring[id].mbuf = m_seg;
772 			m_seg = m_seg->next;
773 		}
774 
775 		if (likely(rc == 0)) {
776 			dma_desc_space -= (added - pkt_start);
777 
778 			sfc_pkts_bytes_add(&txq->dp.dpq.stats, 1,
779 					   rte_pktmbuf_pkt_len(*pktp));
780 		} else {
781 			added = pkt_start;
782 		}
783 	}
784 
785 	if (likely(added != txq->added)) {
786 		sfc_ef100_tx_qpush(txq, added);
787 		txq->added = added;
788 	}
789 
790 #if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE
791 	if (!reap_done)
792 		sfc_ef100_tx_reap(txq);
793 #endif
794 
795 	return pktp - &tx_pkts[0];
796 }
797 
798 static sfc_dp_tx_get_dev_info_t sfc_ef100_get_dev_info;
799 static void
sfc_ef100_get_dev_info(struct rte_eth_dev_info * dev_info)800 sfc_ef100_get_dev_info(struct rte_eth_dev_info *dev_info)
801 {
802 	/*
803 	 * Number of descriptors just defines maximum number of pushed
804 	 * descriptors (fill level).
805 	 */
806 	dev_info->tx_desc_lim.nb_min = 1;
807 	dev_info->tx_desc_lim.nb_align = 1;
808 }
809 
810 static sfc_dp_tx_qsize_up_rings_t sfc_ef100_tx_qsize_up_rings;
811 static int
sfc_ef100_tx_qsize_up_rings(uint16_t nb_tx_desc,struct sfc_dp_tx_hw_limits * limits,unsigned int * txq_entries,unsigned int * evq_entries,unsigned int * txq_max_fill_level)812 sfc_ef100_tx_qsize_up_rings(uint16_t nb_tx_desc,
813 			   struct sfc_dp_tx_hw_limits *limits,
814 			   unsigned int *txq_entries,
815 			   unsigned int *evq_entries,
816 			   unsigned int *txq_max_fill_level)
817 {
818 	/*
819 	 * rte_ethdev API guarantees that the number meets min, max and
820 	 * alignment requirements.
821 	 */
822 	if (nb_tx_desc <= limits->txq_min_entries)
823 		*txq_entries = limits->txq_min_entries;
824 	else
825 		*txq_entries = rte_align32pow2(nb_tx_desc);
826 
827 	*evq_entries = *txq_entries;
828 
829 	*txq_max_fill_level = RTE_MIN(nb_tx_desc,
830 				      SFC_EF100_TXQ_LIMIT(*evq_entries));
831 	return 0;
832 }
833 
834 static sfc_dp_tx_qcreate_t sfc_ef100_tx_qcreate;
835 static int
sfc_ef100_tx_qcreate(uint16_t port_id,uint16_t queue_id,const struct rte_pci_addr * pci_addr,int socket_id,const struct sfc_dp_tx_qcreate_info * info,struct sfc_dp_txq ** dp_txqp)836 sfc_ef100_tx_qcreate(uint16_t port_id, uint16_t queue_id,
837 		    const struct rte_pci_addr *pci_addr, int socket_id,
838 		    const struct sfc_dp_tx_qcreate_info *info,
839 		    struct sfc_dp_txq **dp_txqp)
840 {
841 	struct sfc_ef100_txq *txq;
842 	int rc;
843 
844 	rc = EINVAL;
845 	if (info->txq_entries != info->evq_entries)
846 		goto fail_bad_args;
847 
848 	rc = ENOMEM;
849 	txq = rte_zmalloc_socket("sfc-ef100-txq", sizeof(*txq),
850 				 RTE_CACHE_LINE_SIZE, socket_id);
851 	if (txq == NULL)
852 		goto fail_txq_alloc;
853 
854 	sfc_dp_queue_init(&txq->dp.dpq, port_id, queue_id, pci_addr);
855 
856 	rc = ENOMEM;
857 	txq->sw_ring = rte_calloc_socket("sfc-ef100-txq-sw_ring",
858 					 info->txq_entries,
859 					 sizeof(*txq->sw_ring),
860 					 RTE_CACHE_LINE_SIZE, socket_id);
861 	if (txq->sw_ring == NULL)
862 		goto fail_sw_ring_alloc;
863 
864 	txq->flags = SFC_EF100_TXQ_NOT_RUNNING;
865 	txq->ptr_mask = info->txq_entries - 1;
866 	txq->max_fill_level = info->max_fill_level;
867 	txq->free_thresh = info->free_thresh;
868 	txq->evq_phase_bit_shift = rte_bsf32(info->evq_entries);
869 	txq->txq_hw_ring = info->txq_hw_ring;
870 	txq->doorbell = (volatile uint8_t *)info->mem_bar +
871 			ER_GZ_TX_RING_DOORBELL_OFST +
872 			(info->hw_index << info->vi_window_shift);
873 	txq->evq_hw_ring = info->evq_hw_ring;
874 
875 	txq->tso_tcp_header_offset_limit = info->tso_tcp_header_offset_limit;
876 	txq->tso_max_nb_header_descs = info->tso_max_nb_header_descs;
877 	txq->tso_max_header_len = info->tso_max_header_len;
878 	txq->tso_max_nb_payload_descs = info->tso_max_nb_payload_descs;
879 	txq->tso_max_payload_len = info->tso_max_payload_len;
880 	txq->tso_max_nb_outgoing_frames = info->tso_max_nb_outgoing_frames;
881 
882 	txq->nic_dma_info = info->nic_dma_info;
883 	if (txq->nic_dma_info->nb_regions > 0)
884 		txq->flags |= SFC_EF100_TXQ_NIC_DMA_MAP;
885 
886 	sfc_ef100_tx_debug(txq, "TxQ doorbell is %p", txq->doorbell);
887 
888 	*dp_txqp = &txq->dp;
889 	return 0;
890 
891 fail_sw_ring_alloc:
892 	rte_free(txq);
893 
894 fail_txq_alloc:
895 fail_bad_args:
896 	return rc;
897 }
898 
899 static sfc_dp_tx_qdestroy_t sfc_ef100_tx_qdestroy;
900 static void
sfc_ef100_tx_qdestroy(struct sfc_dp_txq * dp_txq)901 sfc_ef100_tx_qdestroy(struct sfc_dp_txq *dp_txq)
902 {
903 	struct sfc_ef100_txq *txq = sfc_ef100_txq_by_dp_txq(dp_txq);
904 
905 	rte_free(txq->sw_ring);
906 	rte_free(txq);
907 }
908 
909 static sfc_dp_tx_qstart_t sfc_ef100_tx_qstart;
910 static int
sfc_ef100_tx_qstart(struct sfc_dp_txq * dp_txq,unsigned int evq_read_ptr,unsigned int txq_desc_index)911 sfc_ef100_tx_qstart(struct sfc_dp_txq *dp_txq, unsigned int evq_read_ptr,
912 		   unsigned int txq_desc_index)
913 {
914 	struct sfc_ef100_txq *txq = sfc_ef100_txq_by_dp_txq(dp_txq);
915 
916 	txq->evq_read_ptr = evq_read_ptr;
917 	txq->added = txq->completed = txq_desc_index;
918 
919 	txq->flags |= SFC_EF100_TXQ_STARTED;
920 	txq->flags &= ~(SFC_EF100_TXQ_NOT_RUNNING | SFC_EF100_TXQ_EXCEPTION);
921 
922 	return 0;
923 }
924 
925 static sfc_dp_tx_qstop_t sfc_ef100_tx_qstop;
926 static void
sfc_ef100_tx_qstop(struct sfc_dp_txq * dp_txq,unsigned int * evq_read_ptr)927 sfc_ef100_tx_qstop(struct sfc_dp_txq *dp_txq, unsigned int *evq_read_ptr)
928 {
929 	struct sfc_ef100_txq *txq = sfc_ef100_txq_by_dp_txq(dp_txq);
930 
931 	txq->flags |= SFC_EF100_TXQ_NOT_RUNNING;
932 
933 	*evq_read_ptr = txq->evq_read_ptr;
934 }
935 
936 static sfc_dp_tx_qtx_ev_t sfc_ef100_tx_qtx_ev;
937 static bool
sfc_ef100_tx_qtx_ev(struct sfc_dp_txq * dp_txq,unsigned int num_descs)938 sfc_ef100_tx_qtx_ev(struct sfc_dp_txq *dp_txq, unsigned int num_descs)
939 {
940 	struct sfc_ef100_txq *txq = sfc_ef100_txq_by_dp_txq(dp_txq);
941 
942 	SFC_ASSERT(txq->flags & SFC_EF100_TXQ_NOT_RUNNING);
943 
944 	sfc_ef100_tx_reap_num_descs(txq, num_descs);
945 
946 	return false;
947 }
948 
949 static sfc_dp_tx_qreap_t sfc_ef100_tx_qreap;
950 static void
sfc_ef100_tx_qreap(struct sfc_dp_txq * dp_txq)951 sfc_ef100_tx_qreap(struct sfc_dp_txq *dp_txq)
952 {
953 	struct sfc_ef100_txq *txq = sfc_ef100_txq_by_dp_txq(dp_txq);
954 	unsigned int completed;
955 
956 	for (completed = txq->completed; completed != txq->added; ++completed) {
957 		struct sfc_ef100_tx_sw_desc *txd;
958 
959 		txd = &txq->sw_ring[completed & txq->ptr_mask];
960 		if (txd->mbuf != NULL) {
961 			rte_pktmbuf_free_seg(txd->mbuf);
962 			txd->mbuf = NULL;
963 		}
964 	}
965 
966 	txq->flags &= ~SFC_EF100_TXQ_STARTED;
967 }
968 
969 static unsigned int
sfc_ef100_tx_qdesc_npending(struct sfc_ef100_txq * txq)970 sfc_ef100_tx_qdesc_npending(struct sfc_ef100_txq *txq)
971 {
972 	const unsigned int evq_old_read_ptr = txq->evq_read_ptr;
973 	unsigned int npending = 0;
974 	efx_qword_t tx_ev;
975 
976 	if (unlikely(txq->flags &
977 		     (SFC_EF100_TXQ_NOT_RUNNING | SFC_EF100_TXQ_EXCEPTION)))
978 		return 0;
979 
980 	while (sfc_ef100_tx_get_event(txq, &tx_ev))
981 		npending += EFX_QWORD_FIELD(tx_ev, ESF_GZ_EV_TXCMPL_NUM_DESC);
982 
983 	/*
984 	 * The function does not process events, so return event queue read
985 	 * pointer to the original position to allow the events that were
986 	 * read to be processed later
987 	 */
988 	txq->evq_read_ptr = evq_old_read_ptr;
989 
990 	return npending;
991 }
992 
993 static sfc_dp_tx_qdesc_status_t sfc_ef100_tx_qdesc_status;
994 static int
sfc_ef100_tx_qdesc_status(struct sfc_dp_txq * dp_txq,uint16_t offset)995 sfc_ef100_tx_qdesc_status(struct sfc_dp_txq *dp_txq, uint16_t offset)
996 {
997 	struct sfc_ef100_txq *txq = sfc_ef100_txq_by_dp_txq(dp_txq);
998 	unsigned int pushed = txq->added - txq->completed;
999 
1000 	if (unlikely(offset > txq->ptr_mask))
1001 		return -EINVAL;
1002 
1003 	if (unlikely(offset >= txq->max_fill_level))
1004 		return RTE_ETH_TX_DESC_UNAVAIL;
1005 
1006 	return (offset >= pushed ||
1007 		offset < sfc_ef100_tx_qdesc_npending(txq)) ?
1008 		RTE_ETH_TX_DESC_DONE : RTE_ETH_TX_DESC_FULL;
1009 }
1010 
1011 struct sfc_dp_tx sfc_ef100_tx = {
1012 	.dp = {
1013 		.name		= SFC_KVARG_DATAPATH_EF100,
1014 		.type		= SFC_DP_TX,
1015 		.hw_fw_caps	= SFC_DP_HW_FW_CAP_EF100,
1016 	},
1017 	.features		= SFC_DP_TX_FEAT_MULTI_PROCESS |
1018 				  SFC_DP_TX_FEAT_STATS,
1019 	.dev_offload_capa	= 0,
1020 	.queue_offload_capa	= RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
1021 				  RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
1022 				  RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
1023 				  RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM |
1024 				  RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
1025 				  RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
1026 				  RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
1027 				  RTE_ETH_TX_OFFLOAD_TCP_TSO |
1028 				  RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
1029 				  RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO,
1030 	.get_dev_info		= sfc_ef100_get_dev_info,
1031 	.qsize_up_rings		= sfc_ef100_tx_qsize_up_rings,
1032 	.qcreate		= sfc_ef100_tx_qcreate,
1033 	.qdestroy		= sfc_ef100_tx_qdestroy,
1034 	.qstart			= sfc_ef100_tx_qstart,
1035 	.qtx_ev			= sfc_ef100_tx_qtx_ev,
1036 	.qstop			= sfc_ef100_tx_qstop,
1037 	.qreap			= sfc_ef100_tx_qreap,
1038 	.qdesc_status		= sfc_ef100_tx_qdesc_status,
1039 	.pkt_prepare		= sfc_ef100_tx_prepare_pkts,
1040 	.pkt_burst		= sfc_ef100_xmit_pkts,
1041 };
1042