xref: /dpdk/drivers/net/sfc/sfc_ef10_tx.c (revision 68a03efeed657e6e05f281479b33b51102797e15)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright(c) 2019-2021 Xilinx, Inc.
4  * Copyright(c) 2016-2019 Solarflare Communications Inc.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9 
10 #include <stdbool.h>
11 
12 #include <rte_mbuf.h>
13 #include <rte_io.h>
14 #include <rte_ip.h>
15 #include <rte_tcp.h>
16 
17 #include "efx.h"
18 #include "efx_types.h"
19 #include "efx_regs.h"
20 #include "efx_regs_ef10.h"
21 
22 #include "sfc_debug.h"
23 #include "sfc_dp_tx.h"
24 #include "sfc_tweak.h"
25 #include "sfc_kvargs.h"
26 #include "sfc_ef10.h"
27 #include "sfc_tso.h"
28 
29 #define sfc_ef10_tx_err(dpq, ...) \
30 	SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10, ERR, dpq, __VA_ARGS__)
31 
32 #define sfc_ef10_tx_info(dpq, ...) \
33 	SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10, INFO, dpq, __VA_ARGS__)
34 
35 /** Maximum length of the DMA descriptor data */
36 #define SFC_EF10_TX_DMA_DESC_LEN_MAX \
37 	((1u << ESF_DZ_TX_KER_BYTE_CNT_WIDTH) - 1)
38 
39 /**
40  * Maximum number of descriptors/buffers in the Tx ring.
41  * It should guarantee that corresponding event queue never overfill.
42  * EF10 native datapath uses event queue of the same size as Tx queue.
43  * Maximum number of events on datapath can be estimated as number of
44  * Tx queue entries (one event per Tx buffer in the worst case) plus
45  * Tx error and flush events.
46  */
47 #define SFC_EF10_TXQ_LIMIT(_ndesc) \
48 	((_ndesc) - 1 /* head must not step on tail */ - \
49 	 (SFC_EF10_EV_PER_CACHE_LINE - 1) /* max unused EvQ entries */ - \
50 	 1 /* Rx error */ - 1 /* flush */)
51 
52 struct sfc_ef10_tx_sw_desc {
53 	struct rte_mbuf			*mbuf;
54 };
55 
56 struct sfc_ef10_txq {
57 	unsigned int			flags;
58 #define SFC_EF10_TXQ_STARTED		0x1
59 #define SFC_EF10_TXQ_NOT_RUNNING	0x2
60 #define SFC_EF10_TXQ_EXCEPTION		0x4
61 
62 	unsigned int			ptr_mask;
63 	unsigned int			added;
64 	unsigned int			completed;
65 	unsigned int			max_fill_level;
66 	unsigned int			free_thresh;
67 	unsigned int			evq_read_ptr;
68 	struct sfc_ef10_tx_sw_desc	*sw_ring;
69 	efx_qword_t			*txq_hw_ring;
70 	volatile void			*doorbell;
71 	efx_qword_t			*evq_hw_ring;
72 	uint8_t				*tsoh;
73 	rte_iova_t			tsoh_iova;
74 	uint16_t			tso_tcp_header_offset_limit;
75 
76 	/* Datapath transmit queue anchor */
77 	struct sfc_dp_txq		dp;
78 };
79 
80 static inline struct sfc_ef10_txq *
81 sfc_ef10_txq_by_dp_txq(struct sfc_dp_txq *dp_txq)
82 {
83 	return container_of(dp_txq, struct sfc_ef10_txq, dp);
84 }
85 
86 static bool
87 sfc_ef10_tx_get_event(struct sfc_ef10_txq *txq, efx_qword_t *tx_ev)
88 {
89 	volatile efx_qword_t *evq_hw_ring = txq->evq_hw_ring;
90 
91 	/*
92 	 * Exception flag is set when reap is done.
93 	 * It is never done twice per packet burst get and absence of
94 	 * the flag is checked on burst get entry.
95 	 */
96 	SFC_ASSERT((txq->flags & SFC_EF10_TXQ_EXCEPTION) == 0);
97 
98 	*tx_ev = evq_hw_ring[txq->evq_read_ptr & txq->ptr_mask];
99 
100 	if (!sfc_ef10_ev_present(*tx_ev))
101 		return false;
102 
103 	if (unlikely(EFX_QWORD_FIELD(*tx_ev, FSF_AZ_EV_CODE) !=
104 		     FSE_AZ_EV_CODE_TX_EV)) {
105 		/*
106 		 * Do not move read_ptr to keep the event for exception
107 		 * handling by the control path.
108 		 */
109 		txq->flags |= SFC_EF10_TXQ_EXCEPTION;
110 		sfc_ef10_tx_err(&txq->dp.dpq,
111 				"TxQ exception at EvQ read ptr %#x",
112 				txq->evq_read_ptr);
113 		return false;
114 	}
115 
116 	txq->evq_read_ptr++;
117 	return true;
118 }
119 
120 static unsigned int
121 sfc_ef10_tx_process_events(struct sfc_ef10_txq *txq)
122 {
123 	const unsigned int curr_done = txq->completed - 1;
124 	unsigned int anew_done = curr_done;
125 	efx_qword_t tx_ev;
126 
127 	while (sfc_ef10_tx_get_event(txq, &tx_ev)) {
128 		/*
129 		 * DROP_EVENT is an internal to the NIC, software should
130 		 * never see it and, therefore, may ignore it.
131 		 */
132 
133 		/* Update the latest done descriptor */
134 		anew_done = EFX_QWORD_FIELD(tx_ev, ESF_DZ_TX_DESCR_INDX);
135 	}
136 	return (anew_done - curr_done) & txq->ptr_mask;
137 }
138 
139 static void
140 sfc_ef10_tx_reap(struct sfc_ef10_txq *txq)
141 {
142 	const unsigned int old_read_ptr = txq->evq_read_ptr;
143 	const unsigned int ptr_mask = txq->ptr_mask;
144 	unsigned int completed = txq->completed;
145 	unsigned int pending = completed;
146 
147 	pending += sfc_ef10_tx_process_events(txq);
148 
149 	if (pending != completed) {
150 		struct rte_mbuf *bulk[SFC_TX_REAP_BULK_SIZE];
151 		unsigned int nb = 0;
152 
153 		do {
154 			struct sfc_ef10_tx_sw_desc *txd;
155 			struct rte_mbuf *m;
156 
157 			txd = &txq->sw_ring[completed & ptr_mask];
158 			if (txd->mbuf == NULL)
159 				continue;
160 
161 			m = rte_pktmbuf_prefree_seg(txd->mbuf);
162 			txd->mbuf = NULL;
163 			if (m == NULL)
164 				continue;
165 
166 			if ((nb == RTE_DIM(bulk)) ||
167 			    ((nb != 0) && (m->pool != bulk[0]->pool))) {
168 				rte_mempool_put_bulk(bulk[0]->pool,
169 						     (void *)bulk, nb);
170 				nb = 0;
171 			}
172 
173 			bulk[nb++] = m;
174 		} while (++completed != pending);
175 
176 		if (nb != 0)
177 			rte_mempool_put_bulk(bulk[0]->pool, (void *)bulk, nb);
178 
179 		txq->completed = completed;
180 	}
181 
182 	sfc_ef10_ev_qclear(txq->evq_hw_ring, ptr_mask, old_read_ptr,
183 			   txq->evq_read_ptr);
184 }
185 
186 static void
187 sfc_ef10_tx_qdesc_dma_create(rte_iova_t addr, uint16_t size, bool eop,
188 			     efx_qword_t *edp)
189 {
190 	EFX_POPULATE_QWORD_4(*edp,
191 			     ESF_DZ_TX_KER_TYPE, 0,
192 			     ESF_DZ_TX_KER_CONT, !eop,
193 			     ESF_DZ_TX_KER_BYTE_CNT, size,
194 			     ESF_DZ_TX_KER_BUF_ADDR, addr);
195 }
196 
197 static void
198 sfc_ef10_tx_qdesc_tso2_create(struct sfc_ef10_txq * const txq,
199 			      unsigned int added, uint16_t ipv4_id,
200 			      uint16_t outer_ipv4_id, uint32_t tcp_seq,
201 			      uint16_t tcp_mss)
202 {
203 	EFX_POPULATE_QWORD_5(txq->txq_hw_ring[added & txq->ptr_mask],
204 			    ESF_DZ_TX_DESC_IS_OPT, 1,
205 			    ESF_DZ_TX_OPTION_TYPE,
206 			    ESE_DZ_TX_OPTION_DESC_TSO,
207 			    ESF_DZ_TX_TSO_OPTION_TYPE,
208 			    ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A,
209 			    ESF_DZ_TX_TSO_IP_ID, ipv4_id,
210 			    ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq);
211 	EFX_POPULATE_QWORD_5(txq->txq_hw_ring[(added + 1) & txq->ptr_mask],
212 			    ESF_DZ_TX_DESC_IS_OPT, 1,
213 			    ESF_DZ_TX_OPTION_TYPE,
214 			    ESE_DZ_TX_OPTION_DESC_TSO,
215 			    ESF_DZ_TX_TSO_OPTION_TYPE,
216 			    ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B,
217 			    ESF_DZ_TX_TSO_TCP_MSS, tcp_mss,
218 			    ESF_DZ_TX_TSO_OUTER_IPID, outer_ipv4_id);
219 }
220 
221 static inline void
222 sfc_ef10_tx_qpush(struct sfc_ef10_txq *txq, unsigned int added,
223 		  unsigned int pushed)
224 {
225 	efx_qword_t desc;
226 	efx_oword_t oword;
227 
228 	/*
229 	 * This improves performance by pushing a TX descriptor at the same
230 	 * time as the doorbell. The descriptor must be added to the TXQ,
231 	 * so that can be used if the hardware decides not to use the pushed
232 	 * descriptor.
233 	 */
234 	desc.eq_u64[0] = txq->txq_hw_ring[pushed & txq->ptr_mask].eq_u64[0];
235 	EFX_POPULATE_OWORD_3(oword,
236 		ERF_DZ_TX_DESC_WPTR, added & txq->ptr_mask,
237 		ERF_DZ_TX_DESC_HWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_1),
238 		ERF_DZ_TX_DESC_LWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_0));
239 
240 	/* DMA sync to device is not required */
241 
242 	/*
243 	 * rte_io_wmb() which guarantees that the STORE operations
244 	 * (i.e. Tx and event descriptor updates) that precede
245 	 * the rte_io_wmb() call are visible to NIC before the STORE
246 	 * operations that follow it (i.e. doorbell write).
247 	 */
248 	rte_io_wmb();
249 
250 	*(volatile efsys_uint128_t *)txq->doorbell = oword.eo_u128[0];
251 }
252 
253 static unsigned int
254 sfc_ef10_tx_pkt_descs_max(const struct rte_mbuf *m)
255 {
256 	unsigned int extra_descs_per_seg;
257 	unsigned int extra_descs_per_pkt;
258 
259 	/*
260 	 * VLAN offload is not supported yet, so no extra descriptors
261 	 * are required for VLAN option descriptor.
262 	 */
263 
264 /** Maximum length of the mbuf segment data */
265 #define SFC_MBUF_SEG_LEN_MAX		UINT16_MAX
266 	RTE_BUILD_BUG_ON(sizeof(m->data_len) != 2);
267 
268 	/*
269 	 * Each segment is already counted once below.  So, calculate
270 	 * how many extra DMA descriptors may be required per segment in
271 	 * the worst case because of maximum DMA descriptor length limit.
272 	 * If maximum segment length is less or equal to maximum DMA
273 	 * descriptor length, no extra DMA descriptors are required.
274 	 */
275 	extra_descs_per_seg =
276 		(SFC_MBUF_SEG_LEN_MAX - 1) / SFC_EF10_TX_DMA_DESC_LEN_MAX;
277 
278 /** Maximum length of the packet */
279 #define SFC_MBUF_PKT_LEN_MAX		UINT32_MAX
280 	RTE_BUILD_BUG_ON(sizeof(m->pkt_len) != 4);
281 
282 	/*
283 	 * One more limitation on maximum number of extra DMA descriptors
284 	 * comes from slicing entire packet because of DMA descriptor length
285 	 * limit taking into account that there is at least one segment
286 	 * which is already counted below (so division of the maximum
287 	 * packet length minus one with round down).
288 	 * TSO is not supported yet, so packet length is limited by
289 	 * maximum PDU size.
290 	 */
291 	extra_descs_per_pkt =
292 		(RTE_MIN((unsigned int)EFX_MAC_PDU_MAX,
293 			 SFC_MBUF_PKT_LEN_MAX) - 1) /
294 		SFC_EF10_TX_DMA_DESC_LEN_MAX;
295 
296 	return m->nb_segs + RTE_MIN(m->nb_segs * extra_descs_per_seg,
297 				    extra_descs_per_pkt);
298 }
299 
300 static bool
301 sfc_ef10_try_reap(struct sfc_ef10_txq * const txq, unsigned int added,
302 		  unsigned int needed_desc, unsigned int *dma_desc_space,
303 		  bool *reap_done)
304 {
305 	if (*reap_done)
306 		return false;
307 
308 	if (added != txq->added) {
309 		sfc_ef10_tx_qpush(txq, added, txq->added);
310 		txq->added = added;
311 	}
312 
313 	sfc_ef10_tx_reap(txq);
314 	*reap_done = true;
315 
316 	/*
317 	 * Recalculate DMA descriptor space since Tx reap may change
318 	 * the number of completed descriptors
319 	 */
320 	*dma_desc_space = txq->max_fill_level -
321 		(added - txq->completed);
322 
323 	return (needed_desc <= *dma_desc_space);
324 }
325 
326 static uint16_t
327 sfc_ef10_prepare_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
328 		      uint16_t nb_pkts)
329 {
330 	struct sfc_ef10_txq * const txq = sfc_ef10_txq_by_dp_txq(tx_queue);
331 	uint16_t i;
332 
333 	for (i = 0; i < nb_pkts; i++) {
334 		struct rte_mbuf *m = tx_pkts[i];
335 		int ret;
336 
337 #ifdef RTE_LIBRTE_SFC_EFX_DEBUG
338 		/*
339 		 * In non-TSO case, check that a packet segments do not exceed
340 		 * the size limit. Perform the check in debug mode since MTU
341 		 * more than 9k is not supported, but the limit here is 16k-1.
342 		 */
343 		if (!(m->ol_flags & PKT_TX_TCP_SEG)) {
344 			struct rte_mbuf *m_seg;
345 
346 			for (m_seg = m; m_seg != NULL; m_seg = m_seg->next) {
347 				if (m_seg->data_len >
348 				    SFC_EF10_TX_DMA_DESC_LEN_MAX) {
349 					rte_errno = EINVAL;
350 					break;
351 				}
352 			}
353 		}
354 #endif
355 		ret = sfc_dp_tx_prepare_pkt(m, 0, SFC_TSOH_STD_LEN,
356 				txq->tso_tcp_header_offset_limit,
357 				txq->max_fill_level,
358 				SFC_EF10_TSO_OPT_DESCS_NUM, 0);
359 		if (unlikely(ret != 0)) {
360 			rte_errno = ret;
361 			break;
362 		}
363 	}
364 
365 	return i;
366 }
367 
368 static int
369 sfc_ef10_xmit_tso_pkt(struct sfc_ef10_txq * const txq, struct rte_mbuf *m_seg,
370 		      unsigned int *added, unsigned int *dma_desc_space,
371 		      bool *reap_done)
372 {
373 	size_t iph_off = ((m_seg->ol_flags & PKT_TX_TUNNEL_MASK) ?
374 			  m_seg->outer_l2_len + m_seg->outer_l3_len : 0) +
375 			 m_seg->l2_len;
376 	size_t tcph_off = iph_off + m_seg->l3_len;
377 	size_t header_len = tcph_off + m_seg->l4_len;
378 	/* Offset of the payload in the last segment that contains the header */
379 	size_t in_off = 0;
380 	const struct rte_tcp_hdr *th;
381 	uint16_t packet_id = 0;
382 	uint16_t outer_packet_id = 0;
383 	uint32_t sent_seq;
384 	uint8_t *hdr_addr;
385 	rte_iova_t hdr_iova;
386 	struct rte_mbuf *first_m_seg = m_seg;
387 	unsigned int pkt_start = *added;
388 	unsigned int needed_desc;
389 	struct rte_mbuf *m_seg_to_free_up_to = first_m_seg;
390 	bool eop;
391 
392 	/*
393 	 * Preliminary estimation of required DMA descriptors, including extra
394 	 * descriptor for TSO header that is needed when the header is
395 	 * separated from payload in one segment. It does not include
396 	 * extra descriptors that may appear when a big segment is split across
397 	 * several descriptors.
398 	 */
399 	needed_desc = m_seg->nb_segs +
400 			(unsigned int)SFC_EF10_TSO_OPT_DESCS_NUM +
401 			(unsigned int)SFC_EF10_TSO_HDR_DESCS_NUM;
402 
403 	if (needed_desc > *dma_desc_space &&
404 	    !sfc_ef10_try_reap(txq, pkt_start, needed_desc,
405 			       dma_desc_space, reap_done)) {
406 		/*
407 		 * If a future Tx reap may increase available DMA descriptor
408 		 * space, do not try to send the packet.
409 		 */
410 		if (txq->completed != pkt_start)
411 			return ENOSPC;
412 		/*
413 		 * Do not allow to send packet if the maximum DMA
414 		 * descriptor space is not sufficient to hold TSO
415 		 * descriptors, header descriptor and at least 1
416 		 * segment descriptor.
417 		 */
418 		if (*dma_desc_space < SFC_EF10_TSO_OPT_DESCS_NUM +
419 				SFC_EF10_TSO_HDR_DESCS_NUM + 1)
420 			return EMSGSIZE;
421 	}
422 
423 	/* Check if the header is not fragmented */
424 	if (rte_pktmbuf_data_len(m_seg) >= header_len) {
425 		hdr_addr = rte_pktmbuf_mtod(m_seg, uint8_t *);
426 		hdr_iova = rte_mbuf_data_iova(m_seg);
427 		if (rte_pktmbuf_data_len(m_seg) == header_len) {
428 			/* Cannot send a packet that consists only of header */
429 			if (unlikely(m_seg->next == NULL))
430 				return EMSGSIZE;
431 			/*
432 			 * Associate header mbuf with header descriptor
433 			 * which is located after TSO descriptors.
434 			 */
435 			txq->sw_ring[(pkt_start + SFC_EF10_TSO_OPT_DESCS_NUM) &
436 				     txq->ptr_mask].mbuf = m_seg;
437 			m_seg = m_seg->next;
438 			in_off = 0;
439 
440 			/*
441 			 * If there is no payload offset (payload starts at the
442 			 * beginning of a segment) then an extra descriptor for
443 			 * separated header is not needed.
444 			 */
445 			needed_desc--;
446 		} else {
447 			in_off = header_len;
448 		}
449 	} else {
450 		unsigned int copied_segs;
451 		unsigned int hdr_addr_off = (*added & txq->ptr_mask) *
452 				SFC_TSOH_STD_LEN;
453 
454 		/*
455 		 * Discard a packet if header linearization is needed but
456 		 * the header is too big.
457 		 * Duplicate Tx prepare check here to avoid spoil of
458 		 * memory if Tx prepare is skipped.
459 		 */
460 		if (unlikely(header_len > SFC_TSOH_STD_LEN))
461 			return EMSGSIZE;
462 
463 		hdr_addr = txq->tsoh + hdr_addr_off;
464 		hdr_iova = txq->tsoh_iova + hdr_addr_off;
465 		copied_segs = sfc_tso_prepare_header(hdr_addr, header_len,
466 						     &m_seg, &in_off);
467 
468 		/* Cannot send a packet that consists only of header */
469 		if (unlikely(m_seg == NULL))
470 			return EMSGSIZE;
471 
472 		m_seg_to_free_up_to = m_seg;
473 		/*
474 		 * Reduce the number of needed descriptors by the number of
475 		 * segments that entirely consist of header data.
476 		 */
477 		needed_desc -= copied_segs;
478 
479 		/* Extra descriptor for separated header is not needed */
480 		if (in_off == 0)
481 			needed_desc--;
482 	}
483 
484 	/*
485 	 * 8000-series EF10 hardware requires that innermost IP length
486 	 * be greater than or equal to the value which each segment is
487 	 * supposed to have; otherwise, TCP checksum will be incorrect.
488 	 *
489 	 * The same concern applies to outer UDP datagram length field.
490 	 */
491 	switch (m_seg->ol_flags & PKT_TX_TUNNEL_MASK) {
492 	case PKT_TX_TUNNEL_VXLAN:
493 		/* FALLTHROUGH */
494 	case PKT_TX_TUNNEL_GENEVE:
495 		sfc_tso_outer_udp_fix_len(first_m_seg, hdr_addr);
496 		break;
497 	default:
498 		break;
499 	}
500 
501 	sfc_tso_innermost_ip_fix_len(first_m_seg, hdr_addr, iph_off);
502 
503 	/*
504 	 * Tx prepare has debug-only checks that offload flags are correctly
505 	 * filled in in TSO mbuf. Use zero IPID if there is no IPv4 flag.
506 	 * If the packet is still IPv4, HW will simply start from zero IPID.
507 	 */
508 	if (first_m_seg->ol_flags & PKT_TX_IPV4)
509 		packet_id = sfc_tso_ip4_get_ipid(hdr_addr, iph_off);
510 
511 	if (first_m_seg->ol_flags & PKT_TX_OUTER_IPV4)
512 		outer_packet_id = sfc_tso_ip4_get_ipid(hdr_addr,
513 						first_m_seg->outer_l2_len);
514 
515 	th = (const struct rte_tcp_hdr *)(hdr_addr + tcph_off);
516 	rte_memcpy(&sent_seq, &th->sent_seq, sizeof(uint32_t));
517 	sent_seq = rte_be_to_cpu_32(sent_seq);
518 
519 	sfc_ef10_tx_qdesc_tso2_create(txq, *added, packet_id, outer_packet_id,
520 			sent_seq, first_m_seg->tso_segsz);
521 	(*added) += SFC_EF10_TSO_OPT_DESCS_NUM;
522 
523 	sfc_ef10_tx_qdesc_dma_create(hdr_iova, header_len, false,
524 			&txq->txq_hw_ring[(*added) & txq->ptr_mask]);
525 	(*added)++;
526 
527 	do {
528 		rte_iova_t next_frag = rte_mbuf_data_iova(m_seg);
529 		unsigned int seg_len = rte_pktmbuf_data_len(m_seg);
530 		unsigned int id;
531 
532 		next_frag += in_off;
533 		seg_len -= in_off;
534 		in_off = 0;
535 
536 		do {
537 			rte_iova_t frag_addr = next_frag;
538 			size_t frag_len;
539 
540 			frag_len = RTE_MIN(seg_len,
541 					   SFC_EF10_TX_DMA_DESC_LEN_MAX);
542 
543 			next_frag += frag_len;
544 			seg_len -= frag_len;
545 
546 			eop = (seg_len == 0 && m_seg->next == NULL);
547 
548 			id = (*added) & txq->ptr_mask;
549 			(*added)++;
550 
551 			/*
552 			 * Initially we assume that one DMA descriptor is needed
553 			 * for every segment. When the segment is split across
554 			 * several DMA descriptors, increase the estimation.
555 			 */
556 			needed_desc += (seg_len != 0);
557 
558 			/*
559 			 * When no more descriptors can be added, but not all
560 			 * segments are processed.
561 			 */
562 			if (*added - pkt_start == *dma_desc_space &&
563 			    !eop &&
564 			    !sfc_ef10_try_reap(txq, pkt_start, needed_desc,
565 						dma_desc_space, reap_done)) {
566 				struct rte_mbuf *m;
567 				struct rte_mbuf *m_next;
568 
569 				if (txq->completed != pkt_start) {
570 					unsigned int i;
571 
572 					/*
573 					 * Reset mbuf associations with added
574 					 * descriptors.
575 					 */
576 					for (i = pkt_start; i != *added; i++) {
577 						id = i & txq->ptr_mask;
578 						txq->sw_ring[id].mbuf = NULL;
579 					}
580 					return ENOSPC;
581 				}
582 
583 				/* Free the segments that cannot be sent */
584 				for (m = m_seg->next; m != NULL; m = m_next) {
585 					m_next = m->next;
586 					rte_pktmbuf_free_seg(m);
587 				}
588 				eop = true;
589 				/* Ignore the rest of the segment */
590 				seg_len = 0;
591 			}
592 
593 			sfc_ef10_tx_qdesc_dma_create(frag_addr, frag_len,
594 					eop, &txq->txq_hw_ring[id]);
595 
596 		} while (seg_len != 0);
597 
598 		txq->sw_ring[id].mbuf = m_seg;
599 
600 		m_seg = m_seg->next;
601 	} while (!eop);
602 
603 	/*
604 	 * Free segments which content was entirely copied to the TSO header
605 	 * memory space of Tx queue
606 	 */
607 	for (m_seg = first_m_seg; m_seg != m_seg_to_free_up_to;) {
608 		struct rte_mbuf *seg_to_free = m_seg;
609 
610 		m_seg = m_seg->next;
611 		rte_pktmbuf_free_seg(seg_to_free);
612 	}
613 
614 	return 0;
615 }
616 
617 static uint16_t
618 sfc_ef10_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
619 {
620 	struct sfc_ef10_txq * const txq = sfc_ef10_txq_by_dp_txq(tx_queue);
621 	unsigned int added;
622 	unsigned int dma_desc_space;
623 	bool reap_done;
624 	struct rte_mbuf **pktp;
625 	struct rte_mbuf **pktp_end;
626 
627 	if (unlikely(txq->flags &
628 		     (SFC_EF10_TXQ_NOT_RUNNING | SFC_EF10_TXQ_EXCEPTION)))
629 		return 0;
630 
631 	added = txq->added;
632 	dma_desc_space = txq->max_fill_level - (added - txq->completed);
633 
634 	reap_done = (dma_desc_space < txq->free_thresh);
635 	if (reap_done) {
636 		sfc_ef10_tx_reap(txq);
637 		dma_desc_space = txq->max_fill_level - (added - txq->completed);
638 	}
639 
640 	for (pktp = &tx_pkts[0], pktp_end = &tx_pkts[nb_pkts];
641 	     pktp != pktp_end;
642 	     ++pktp) {
643 		struct rte_mbuf *m_seg = *pktp;
644 		unsigned int pkt_start = added;
645 		uint32_t pkt_len;
646 
647 		if (likely(pktp + 1 != pktp_end))
648 			rte_mbuf_prefetch_part1(pktp[1]);
649 
650 		if (m_seg->ol_flags & PKT_TX_TCP_SEG) {
651 			int rc;
652 
653 			rc = sfc_ef10_xmit_tso_pkt(txq, m_seg, &added,
654 					&dma_desc_space, &reap_done);
655 			if (rc != 0) {
656 				added = pkt_start;
657 
658 				/* Packet can be sent in following xmit calls */
659 				if (likely(rc == ENOSPC))
660 					break;
661 
662 				/*
663 				 * Packet cannot be sent, tell RTE that
664 				 * it is sent, but actually drop it and
665 				 * continue with another packet
666 				 */
667 				rte_pktmbuf_free(*pktp);
668 				continue;
669 			}
670 
671 			goto dma_desc_space_update;
672 		}
673 
674 		if (sfc_ef10_tx_pkt_descs_max(m_seg) > dma_desc_space) {
675 			if (reap_done)
676 				break;
677 
678 			/* Push already prepared descriptors before polling */
679 			if (added != txq->added) {
680 				sfc_ef10_tx_qpush(txq, added, txq->added);
681 				txq->added = added;
682 			}
683 
684 			sfc_ef10_tx_reap(txq);
685 			reap_done = true;
686 			dma_desc_space = txq->max_fill_level -
687 				(added - txq->completed);
688 			if (sfc_ef10_tx_pkt_descs_max(m_seg) > dma_desc_space)
689 				break;
690 		}
691 
692 		pkt_len = m_seg->pkt_len;
693 		do {
694 			rte_iova_t seg_addr = rte_mbuf_data_iova(m_seg);
695 			unsigned int seg_len = rte_pktmbuf_data_len(m_seg);
696 			unsigned int id = added & txq->ptr_mask;
697 
698 			SFC_ASSERT(seg_len <= SFC_EF10_TX_DMA_DESC_LEN_MAX);
699 
700 			pkt_len -= seg_len;
701 
702 			sfc_ef10_tx_qdesc_dma_create(seg_addr,
703 				seg_len, (pkt_len == 0),
704 				&txq->txq_hw_ring[id]);
705 
706 			/*
707 			 * rte_pktmbuf_free() is commonly used in DPDK for
708 			 * recycling packets - the function checks every
709 			 * segment's reference counter and returns the
710 			 * buffer to its pool whenever possible;
711 			 * nevertheless, freeing mbuf segments one by one
712 			 * may entail some performance decline;
713 			 * from this point, sfc_efx_tx_reap() does the same job
714 			 * on its own and frees buffers in bulks (all mbufs
715 			 * within a bulk belong to the same pool);
716 			 * from this perspective, individual segment pointers
717 			 * must be associated with the corresponding SW
718 			 * descriptors independently so that only one loop
719 			 * is sufficient on reap to inspect all the buffers
720 			 */
721 			txq->sw_ring[id].mbuf = m_seg;
722 
723 			++added;
724 
725 		} while ((m_seg = m_seg->next) != 0);
726 
727 dma_desc_space_update:
728 		dma_desc_space -= (added - pkt_start);
729 	}
730 
731 	if (likely(added != txq->added)) {
732 		sfc_ef10_tx_qpush(txq, added, txq->added);
733 		txq->added = added;
734 	}
735 
736 #if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE
737 	if (!reap_done)
738 		sfc_ef10_tx_reap(txq);
739 #endif
740 
741 	return pktp - &tx_pkts[0];
742 }
743 
744 static void
745 sfc_ef10_simple_tx_reap(struct sfc_ef10_txq *txq)
746 {
747 	const unsigned int old_read_ptr = txq->evq_read_ptr;
748 	const unsigned int ptr_mask = txq->ptr_mask;
749 	unsigned int completed = txq->completed;
750 	unsigned int pending = completed;
751 
752 	pending += sfc_ef10_tx_process_events(txq);
753 
754 	if (pending != completed) {
755 		struct rte_mbuf *bulk[SFC_TX_REAP_BULK_SIZE];
756 		unsigned int nb = 0;
757 
758 		do {
759 			struct sfc_ef10_tx_sw_desc *txd;
760 
761 			txd = &txq->sw_ring[completed & ptr_mask];
762 
763 			if (nb == RTE_DIM(bulk)) {
764 				rte_mempool_put_bulk(bulk[0]->pool,
765 						     (void *)bulk, nb);
766 				nb = 0;
767 			}
768 
769 			bulk[nb++] = txd->mbuf;
770 		} while (++completed != pending);
771 
772 		rte_mempool_put_bulk(bulk[0]->pool, (void *)bulk, nb);
773 
774 		txq->completed = completed;
775 	}
776 
777 	sfc_ef10_ev_qclear(txq->evq_hw_ring, ptr_mask, old_read_ptr,
778 			   txq->evq_read_ptr);
779 }
780 
781 #ifdef RTE_LIBRTE_SFC_EFX_DEBUG
782 static uint16_t
783 sfc_ef10_simple_prepare_pkts(__rte_unused void *tx_queue,
784 			     struct rte_mbuf **tx_pkts,
785 			     uint16_t nb_pkts)
786 {
787 	uint16_t i;
788 
789 	for (i = 0; i < nb_pkts; i++) {
790 		struct rte_mbuf *m = tx_pkts[i];
791 		int ret;
792 
793 		ret = rte_validate_tx_offload(m);
794 		if (unlikely(ret != 0)) {
795 			/*
796 			 * Negative error code is returned by
797 			 * rte_validate_tx_offload(), but positive are used
798 			 * inside net/sfc PMD.
799 			 */
800 			SFC_ASSERT(ret < 0);
801 			rte_errno = -ret;
802 			break;
803 		}
804 
805 		/* ef10_simple does not support TSO and VLAN insertion */
806 		if (unlikely(m->ol_flags &
807 			     (PKT_TX_TCP_SEG | PKT_TX_VLAN_PKT))) {
808 			rte_errno = ENOTSUP;
809 			break;
810 		}
811 
812 		/* ef10_simple does not support scattered packets */
813 		if (unlikely(m->nb_segs != 1)) {
814 			rte_errno = ENOTSUP;
815 			break;
816 		}
817 
818 		/*
819 		 * ef10_simple requires fast-free which ignores reference
820 		 * counters
821 		 */
822 		if (unlikely(rte_mbuf_refcnt_read(m) != 1)) {
823 			rte_errno = ENOTSUP;
824 			break;
825 		}
826 
827 		/* ef10_simple requires single pool for all packets */
828 		if (unlikely(m->pool != tx_pkts[0]->pool)) {
829 			rte_errno = ENOTSUP;
830 			break;
831 		}
832 	}
833 
834 	return i;
835 }
836 #endif
837 
838 static uint16_t
839 sfc_ef10_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
840 			  uint16_t nb_pkts)
841 {
842 	struct sfc_ef10_txq * const txq = sfc_ef10_txq_by_dp_txq(tx_queue);
843 	unsigned int ptr_mask;
844 	unsigned int added;
845 	unsigned int dma_desc_space;
846 	bool reap_done;
847 	struct rte_mbuf **pktp;
848 	struct rte_mbuf **pktp_end;
849 
850 	if (unlikely(txq->flags &
851 		     (SFC_EF10_TXQ_NOT_RUNNING | SFC_EF10_TXQ_EXCEPTION)))
852 		return 0;
853 
854 	ptr_mask = txq->ptr_mask;
855 	added = txq->added;
856 	dma_desc_space = txq->max_fill_level - (added - txq->completed);
857 
858 	reap_done = (dma_desc_space < RTE_MAX(txq->free_thresh, nb_pkts));
859 	if (reap_done) {
860 		sfc_ef10_simple_tx_reap(txq);
861 		dma_desc_space = txq->max_fill_level - (added - txq->completed);
862 	}
863 
864 	pktp_end = &tx_pkts[MIN(nb_pkts, dma_desc_space)];
865 	for (pktp = &tx_pkts[0]; pktp != pktp_end; ++pktp) {
866 		struct rte_mbuf *pkt = *pktp;
867 		unsigned int id = added & ptr_mask;
868 
869 		SFC_ASSERT(rte_pktmbuf_data_len(pkt) <=
870 			   SFC_EF10_TX_DMA_DESC_LEN_MAX);
871 
872 		sfc_ef10_tx_qdesc_dma_create(rte_mbuf_data_iova(pkt),
873 					     rte_pktmbuf_data_len(pkt),
874 					     true, &txq->txq_hw_ring[id]);
875 
876 		txq->sw_ring[id].mbuf = pkt;
877 
878 		++added;
879 	}
880 
881 	if (likely(added != txq->added)) {
882 		sfc_ef10_tx_qpush(txq, added, txq->added);
883 		txq->added = added;
884 	}
885 
886 #if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE
887 	if (!reap_done)
888 		sfc_ef10_simple_tx_reap(txq);
889 #endif
890 
891 	return pktp - &tx_pkts[0];
892 }
893 
894 static sfc_dp_tx_get_dev_info_t sfc_ef10_get_dev_info;
895 static void
896 sfc_ef10_get_dev_info(struct rte_eth_dev_info *dev_info)
897 {
898 	/*
899 	 * Number of descriptors just defines maximum number of pushed
900 	 * descriptors (fill level).
901 	 */
902 	dev_info->tx_desc_lim.nb_min = 1;
903 	dev_info->tx_desc_lim.nb_align = 1;
904 }
905 
906 static sfc_dp_tx_qsize_up_rings_t sfc_ef10_tx_qsize_up_rings;
907 static int
908 sfc_ef10_tx_qsize_up_rings(uint16_t nb_tx_desc,
909 			   struct sfc_dp_tx_hw_limits *limits,
910 			   unsigned int *txq_entries,
911 			   unsigned int *evq_entries,
912 			   unsigned int *txq_max_fill_level)
913 {
914 	/*
915 	 * rte_ethdev API guarantees that the number meets min, max and
916 	 * alignment requirements.
917 	 */
918 	if (nb_tx_desc <= limits->txq_min_entries)
919 		*txq_entries = limits->txq_min_entries;
920 	else
921 		*txq_entries = rte_align32pow2(nb_tx_desc);
922 
923 	*evq_entries = *txq_entries;
924 
925 	*txq_max_fill_level = RTE_MIN(nb_tx_desc,
926 				      SFC_EF10_TXQ_LIMIT(*evq_entries));
927 	return 0;
928 }
929 
930 static sfc_dp_tx_qcreate_t sfc_ef10_tx_qcreate;
931 static int
932 sfc_ef10_tx_qcreate(uint16_t port_id, uint16_t queue_id,
933 		    const struct rte_pci_addr *pci_addr, int socket_id,
934 		    const struct sfc_dp_tx_qcreate_info *info,
935 		    struct sfc_dp_txq **dp_txqp)
936 {
937 	struct sfc_ef10_txq *txq;
938 	int rc;
939 
940 	rc = EINVAL;
941 	if (info->txq_entries != info->evq_entries)
942 		goto fail_bad_args;
943 
944 	rc = ENOMEM;
945 	txq = rte_zmalloc_socket("sfc-ef10-txq", sizeof(*txq),
946 				 RTE_CACHE_LINE_SIZE, socket_id);
947 	if (txq == NULL)
948 		goto fail_txq_alloc;
949 
950 	sfc_dp_queue_init(&txq->dp.dpq, port_id, queue_id, pci_addr);
951 
952 	rc = ENOMEM;
953 	txq->sw_ring = rte_calloc_socket("sfc-ef10-txq-sw_ring",
954 					 info->txq_entries,
955 					 sizeof(*txq->sw_ring),
956 					 RTE_CACHE_LINE_SIZE, socket_id);
957 	if (txq->sw_ring == NULL)
958 		goto fail_sw_ring_alloc;
959 
960 	if (info->offloads & (DEV_TX_OFFLOAD_TCP_TSO |
961 			      DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
962 			      DEV_TX_OFFLOAD_GENEVE_TNL_TSO)) {
963 		txq->tsoh = rte_calloc_socket("sfc-ef10-txq-tsoh",
964 					      info->txq_entries,
965 					      SFC_TSOH_STD_LEN,
966 					      RTE_CACHE_LINE_SIZE,
967 					      socket_id);
968 		if (txq->tsoh == NULL)
969 			goto fail_tsoh_alloc;
970 
971 		txq->tsoh_iova = rte_malloc_virt2iova(txq->tsoh);
972 	}
973 
974 	txq->flags = SFC_EF10_TXQ_NOT_RUNNING;
975 	txq->ptr_mask = info->txq_entries - 1;
976 	txq->max_fill_level = info->max_fill_level;
977 	txq->free_thresh = info->free_thresh;
978 	txq->txq_hw_ring = info->txq_hw_ring;
979 	txq->doorbell = (volatile uint8_t *)info->mem_bar +
980 			ER_DZ_TX_DESC_UPD_REG_OFST +
981 			(info->hw_index << info->vi_window_shift);
982 	txq->evq_hw_ring = info->evq_hw_ring;
983 	txq->tso_tcp_header_offset_limit = info->tso_tcp_header_offset_limit;
984 
985 	sfc_ef10_tx_info(&txq->dp.dpq, "TxQ doorbell is %p", txq->doorbell);
986 
987 	*dp_txqp = &txq->dp;
988 	return 0;
989 
990 fail_tsoh_alloc:
991 	rte_free(txq->sw_ring);
992 
993 fail_sw_ring_alloc:
994 	rte_free(txq);
995 
996 fail_txq_alloc:
997 fail_bad_args:
998 	return rc;
999 }
1000 
1001 static sfc_dp_tx_qdestroy_t sfc_ef10_tx_qdestroy;
1002 static void
1003 sfc_ef10_tx_qdestroy(struct sfc_dp_txq *dp_txq)
1004 {
1005 	struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
1006 
1007 	rte_free(txq->tsoh);
1008 	rte_free(txq->sw_ring);
1009 	rte_free(txq);
1010 }
1011 
1012 static sfc_dp_tx_qstart_t sfc_ef10_tx_qstart;
1013 static int
1014 sfc_ef10_tx_qstart(struct sfc_dp_txq *dp_txq, unsigned int evq_read_ptr,
1015 		   unsigned int txq_desc_index)
1016 {
1017 	struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
1018 
1019 	txq->evq_read_ptr = evq_read_ptr;
1020 	txq->added = txq->completed = txq_desc_index;
1021 
1022 	txq->flags |= SFC_EF10_TXQ_STARTED;
1023 	txq->flags &= ~(SFC_EF10_TXQ_NOT_RUNNING | SFC_EF10_TXQ_EXCEPTION);
1024 
1025 	return 0;
1026 }
1027 
1028 static sfc_dp_tx_qstop_t sfc_ef10_tx_qstop;
1029 static void
1030 sfc_ef10_tx_qstop(struct sfc_dp_txq *dp_txq, unsigned int *evq_read_ptr)
1031 {
1032 	struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
1033 
1034 	txq->flags |= SFC_EF10_TXQ_NOT_RUNNING;
1035 
1036 	*evq_read_ptr = txq->evq_read_ptr;
1037 }
1038 
1039 static sfc_dp_tx_qtx_ev_t sfc_ef10_tx_qtx_ev;
1040 static bool
1041 sfc_ef10_tx_qtx_ev(struct sfc_dp_txq *dp_txq, __rte_unused unsigned int id)
1042 {
1043 	__rte_unused struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
1044 
1045 	SFC_ASSERT(txq->flags & SFC_EF10_TXQ_NOT_RUNNING);
1046 
1047 	/*
1048 	 * It is safe to ignore Tx event since we reap all mbufs on
1049 	 * queue purge anyway.
1050 	 */
1051 
1052 	return false;
1053 }
1054 
1055 static sfc_dp_tx_qreap_t sfc_ef10_tx_qreap;
1056 static void
1057 sfc_ef10_tx_qreap(struct sfc_dp_txq *dp_txq)
1058 {
1059 	struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
1060 	unsigned int completed;
1061 
1062 	for (completed = txq->completed; completed != txq->added; ++completed) {
1063 		struct sfc_ef10_tx_sw_desc *txd;
1064 
1065 		txd = &txq->sw_ring[completed & txq->ptr_mask];
1066 		if (txd->mbuf != NULL) {
1067 			rte_pktmbuf_free_seg(txd->mbuf);
1068 			txd->mbuf = NULL;
1069 		}
1070 	}
1071 
1072 	txq->flags &= ~SFC_EF10_TXQ_STARTED;
1073 }
1074 
1075 static unsigned int
1076 sfc_ef10_tx_qdesc_npending(struct sfc_ef10_txq *txq)
1077 {
1078 	const unsigned int curr_done = txq->completed - 1;
1079 	unsigned int anew_done = curr_done;
1080 	efx_qword_t tx_ev;
1081 	const unsigned int evq_old_read_ptr = txq->evq_read_ptr;
1082 
1083 	if (unlikely(txq->flags &
1084 		     (SFC_EF10_TXQ_NOT_RUNNING | SFC_EF10_TXQ_EXCEPTION)))
1085 		return 0;
1086 
1087 	while (sfc_ef10_tx_get_event(txq, &tx_ev))
1088 		anew_done = EFX_QWORD_FIELD(tx_ev, ESF_DZ_TX_DESCR_INDX);
1089 
1090 	/*
1091 	 * The function does not process events, so return event queue read
1092 	 * pointer to the original position to allow the events that were
1093 	 * read to be processed later
1094 	 */
1095 	txq->evq_read_ptr = evq_old_read_ptr;
1096 
1097 	return (anew_done - curr_done) & txq->ptr_mask;
1098 }
1099 
1100 static sfc_dp_tx_qdesc_status_t sfc_ef10_tx_qdesc_status;
1101 static int
1102 sfc_ef10_tx_qdesc_status(struct sfc_dp_txq *dp_txq,
1103 			 uint16_t offset)
1104 {
1105 	struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
1106 	unsigned int npending = sfc_ef10_tx_qdesc_npending(txq);
1107 
1108 	if (unlikely(offset > txq->ptr_mask))
1109 		return -EINVAL;
1110 
1111 	if (unlikely(offset >= txq->max_fill_level))
1112 		return RTE_ETH_TX_DESC_UNAVAIL;
1113 
1114 	if (unlikely(offset < npending))
1115 		return RTE_ETH_TX_DESC_FULL;
1116 
1117 	return RTE_ETH_TX_DESC_DONE;
1118 }
1119 
1120 struct sfc_dp_tx sfc_ef10_tx = {
1121 	.dp = {
1122 		.name		= SFC_KVARG_DATAPATH_EF10,
1123 		.type		= SFC_DP_TX,
1124 		.hw_fw_caps	= SFC_DP_HW_FW_CAP_EF10,
1125 	},
1126 	.features		= SFC_DP_TX_FEAT_MULTI_PROCESS,
1127 	.dev_offload_capa	= DEV_TX_OFFLOAD_MULTI_SEGS,
1128 	.queue_offload_capa	= DEV_TX_OFFLOAD_IPV4_CKSUM |
1129 				  DEV_TX_OFFLOAD_UDP_CKSUM |
1130 				  DEV_TX_OFFLOAD_TCP_CKSUM |
1131 				  DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
1132 				  DEV_TX_OFFLOAD_TCP_TSO |
1133 				  DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1134 				  DEV_TX_OFFLOAD_GENEVE_TNL_TSO,
1135 	.get_dev_info		= sfc_ef10_get_dev_info,
1136 	.qsize_up_rings		= sfc_ef10_tx_qsize_up_rings,
1137 	.qcreate		= sfc_ef10_tx_qcreate,
1138 	.qdestroy		= sfc_ef10_tx_qdestroy,
1139 	.qstart			= sfc_ef10_tx_qstart,
1140 	.qtx_ev			= sfc_ef10_tx_qtx_ev,
1141 	.qstop			= sfc_ef10_tx_qstop,
1142 	.qreap			= sfc_ef10_tx_qreap,
1143 	.qdesc_status		= sfc_ef10_tx_qdesc_status,
1144 	.pkt_prepare		= sfc_ef10_prepare_pkts,
1145 	.pkt_burst		= sfc_ef10_xmit_pkts,
1146 };
1147 
1148 struct sfc_dp_tx sfc_ef10_simple_tx = {
1149 	.dp = {
1150 		.name		= SFC_KVARG_DATAPATH_EF10_SIMPLE,
1151 		.type		= SFC_DP_TX,
1152 	},
1153 	.features		= SFC_DP_TX_FEAT_MULTI_PROCESS,
1154 	.dev_offload_capa	= DEV_TX_OFFLOAD_MBUF_FAST_FREE,
1155 	.queue_offload_capa	= DEV_TX_OFFLOAD_IPV4_CKSUM |
1156 				  DEV_TX_OFFLOAD_UDP_CKSUM |
1157 				  DEV_TX_OFFLOAD_TCP_CKSUM |
1158 				  DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM,
1159 	.get_dev_info		= sfc_ef10_get_dev_info,
1160 	.qsize_up_rings		= sfc_ef10_tx_qsize_up_rings,
1161 	.qcreate		= sfc_ef10_tx_qcreate,
1162 	.qdestroy		= sfc_ef10_tx_qdestroy,
1163 	.qstart			= sfc_ef10_tx_qstart,
1164 	.qtx_ev			= sfc_ef10_tx_qtx_ev,
1165 	.qstop			= sfc_ef10_tx_qstop,
1166 	.qreap			= sfc_ef10_tx_qreap,
1167 	.qdesc_status		= sfc_ef10_tx_qdesc_status,
1168 #ifdef RTE_LIBRTE_SFC_EFX_DEBUG
1169 	.pkt_prepare		= sfc_ef10_simple_prepare_pkts,
1170 #endif
1171 	.pkt_burst		= sfc_ef10_simple_xmit_pkts,
1172 };
1173