xref: /dpdk/drivers/net/sfc/sfc_ef10_tx.c (revision 89f0711f9ddfb5822da9d34f384b92f72a61c4dc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright (c) 2016-2018 Solarflare Communications Inc.
4  * All rights reserved.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9 
10 #include <stdbool.h>
11 
12 #include <rte_mbuf.h>
13 #include <rte_io.h>
14 
15 #include "efx.h"
16 #include "efx_types.h"
17 #include "efx_regs.h"
18 #include "efx_regs_ef10.h"
19 
20 #include "sfc_dp_tx.h"
21 #include "sfc_tweak.h"
22 #include "sfc_kvargs.h"
23 #include "sfc_ef10.h"
24 
25 #define sfc_ef10_tx_err(dpq, ...) \
26 	SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10, ERR, dpq, __VA_ARGS__)
27 
28 /** Maximum length of the DMA descriptor data */
29 #define SFC_EF10_TX_DMA_DESC_LEN_MAX \
30 	((1u << ESF_DZ_TX_KER_BYTE_CNT_WIDTH) - 1)
31 
32 /**
33  * Maximum number of descriptors/buffers in the Tx ring.
34  * It should guarantee that corresponding event queue never overfill.
35  * EF10 native datapath uses event queue of the same size as Tx queue.
36  * Maximum number of events on datapath can be estimated as number of
37  * Tx queue entries (one event per Tx buffer in the worst case) plus
38  * Tx error and flush events.
39  */
40 #define SFC_EF10_TXQ_LIMIT(_ndesc) \
41 	((_ndesc) - 1 /* head must not step on tail */ - \
42 	 (SFC_EF10_EV_PER_CACHE_LINE - 1) /* max unused EvQ entries */ - \
43 	 1 /* Rx error */ - 1 /* flush */)
44 
45 struct sfc_ef10_tx_sw_desc {
46 	struct rte_mbuf			*mbuf;
47 };
48 
49 struct sfc_ef10_txq {
50 	unsigned int			flags;
51 #define SFC_EF10_TXQ_STARTED		0x1
52 #define SFC_EF10_TXQ_NOT_RUNNING	0x2
53 #define SFC_EF10_TXQ_EXCEPTION		0x4
54 
55 	unsigned int			ptr_mask;
56 	unsigned int			added;
57 	unsigned int			completed;
58 	unsigned int			max_fill_level;
59 	unsigned int			free_thresh;
60 	unsigned int			evq_read_ptr;
61 	struct sfc_ef10_tx_sw_desc	*sw_ring;
62 	efx_qword_t			*txq_hw_ring;
63 	volatile void			*doorbell;
64 	efx_qword_t			*evq_hw_ring;
65 
66 	/* Datapath transmit queue anchor */
67 	struct sfc_dp_txq		dp;
68 };
69 
70 static inline struct sfc_ef10_txq *
71 sfc_ef10_txq_by_dp_txq(struct sfc_dp_txq *dp_txq)
72 {
73 	return container_of(dp_txq, struct sfc_ef10_txq, dp);
74 }
75 
76 static bool
77 sfc_ef10_tx_get_event(struct sfc_ef10_txq *txq, efx_qword_t *tx_ev)
78 {
79 	volatile efx_qword_t *evq_hw_ring = txq->evq_hw_ring;
80 
81 	/*
82 	 * Exception flag is set when reap is done.
83 	 * It is never done twice per packet burst get and absence of
84 	 * the flag is checked on burst get entry.
85 	 */
86 	SFC_ASSERT((txq->flags & SFC_EF10_TXQ_EXCEPTION) == 0);
87 
88 	*tx_ev = evq_hw_ring[txq->evq_read_ptr & txq->ptr_mask];
89 
90 	if (!sfc_ef10_ev_present(*tx_ev))
91 		return false;
92 
93 	if (unlikely(EFX_QWORD_FIELD(*tx_ev, FSF_AZ_EV_CODE) !=
94 		     FSE_AZ_EV_CODE_TX_EV)) {
95 		/*
96 		 * Do not move read_ptr to keep the event for exception
97 		 * handling by the control path.
98 		 */
99 		txq->flags |= SFC_EF10_TXQ_EXCEPTION;
100 		sfc_ef10_tx_err(&txq->dp.dpq,
101 				"TxQ exception at EvQ read ptr %#x",
102 				txq->evq_read_ptr);
103 		return false;
104 	}
105 
106 	txq->evq_read_ptr++;
107 	return true;
108 }
109 
110 static unsigned int
111 sfc_ef10_tx_process_events(struct sfc_ef10_txq *txq)
112 {
113 	const unsigned int curr_done = txq->completed - 1;
114 	unsigned int anew_done = curr_done;
115 	efx_qword_t tx_ev;
116 
117 	while (sfc_ef10_tx_get_event(txq, &tx_ev)) {
118 		/*
119 		 * DROP_EVENT is an internal to the NIC, software should
120 		 * never see it and, therefore, may ignore it.
121 		 */
122 
123 		/* Update the latest done descriptor */
124 		anew_done = EFX_QWORD_FIELD(tx_ev, ESF_DZ_TX_DESCR_INDX);
125 	}
126 	return (anew_done - curr_done) & txq->ptr_mask;
127 }
128 
129 static void
130 sfc_ef10_tx_reap(struct sfc_ef10_txq *txq)
131 {
132 	const unsigned int old_read_ptr = txq->evq_read_ptr;
133 	const unsigned int ptr_mask = txq->ptr_mask;
134 	unsigned int completed = txq->completed;
135 	unsigned int pending = completed;
136 
137 	pending += sfc_ef10_tx_process_events(txq);
138 
139 	if (pending != completed) {
140 		struct rte_mbuf *bulk[SFC_TX_REAP_BULK_SIZE];
141 		unsigned int nb = 0;
142 
143 		do {
144 			struct sfc_ef10_tx_sw_desc *txd;
145 			struct rte_mbuf *m;
146 
147 			txd = &txq->sw_ring[completed & ptr_mask];
148 			if (txd->mbuf == NULL)
149 				continue;
150 
151 			m = rte_pktmbuf_prefree_seg(txd->mbuf);
152 			txd->mbuf = NULL;
153 			if (m == NULL)
154 				continue;
155 
156 			if ((nb == RTE_DIM(bulk)) ||
157 			    ((nb != 0) && (m->pool != bulk[0]->pool))) {
158 				rte_mempool_put_bulk(bulk[0]->pool,
159 						     (void *)bulk, nb);
160 				nb = 0;
161 			}
162 
163 			bulk[nb++] = m;
164 		} while (++completed != pending);
165 
166 		if (nb != 0)
167 			rte_mempool_put_bulk(bulk[0]->pool, (void *)bulk, nb);
168 
169 		txq->completed = completed;
170 	}
171 
172 	sfc_ef10_ev_qclear(txq->evq_hw_ring, ptr_mask, old_read_ptr,
173 			   txq->evq_read_ptr);
174 }
175 
176 static void
177 sfc_ef10_tx_qdesc_dma_create(rte_iova_t addr, uint16_t size, bool eop,
178 			     efx_qword_t *edp)
179 {
180 	EFX_POPULATE_QWORD_4(*edp,
181 			     ESF_DZ_TX_KER_TYPE, 0,
182 			     ESF_DZ_TX_KER_CONT, !eop,
183 			     ESF_DZ_TX_KER_BYTE_CNT, size,
184 			     ESF_DZ_TX_KER_BUF_ADDR, addr);
185 }
186 
187 static inline void
188 sfc_ef10_tx_qpush(struct sfc_ef10_txq *txq, unsigned int added,
189 		  unsigned int pushed)
190 {
191 	efx_qword_t desc;
192 	efx_oword_t oword;
193 
194 	/*
195 	 * This improves performance by pushing a TX descriptor at the same
196 	 * time as the doorbell. The descriptor must be added to the TXQ,
197 	 * so that can be used if the hardware decides not to use the pushed
198 	 * descriptor.
199 	 */
200 	desc.eq_u64[0] = txq->txq_hw_ring[pushed & txq->ptr_mask].eq_u64[0];
201 	EFX_POPULATE_OWORD_3(oword,
202 		ERF_DZ_TX_DESC_WPTR, added & txq->ptr_mask,
203 		ERF_DZ_TX_DESC_HWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_1),
204 		ERF_DZ_TX_DESC_LWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_0));
205 
206 	/* DMA sync to device is not required */
207 
208 	/*
209 	 * rte_io_wmb() which guarantees that the STORE operations
210 	 * (i.e. Tx and event descriptor updates) that precede
211 	 * the rte_io_wmb() call are visible to NIC before the STORE
212 	 * operations that follow it (i.e. doorbell write).
213 	 */
214 	rte_io_wmb();
215 
216 	*(volatile __m128i *)txq->doorbell = oword.eo_u128[0];
217 }
218 
219 static unsigned int
220 sfc_ef10_tx_pkt_descs_max(const struct rte_mbuf *m)
221 {
222 	unsigned int extra_descs_per_seg;
223 	unsigned int extra_descs_per_pkt;
224 
225 	/*
226 	 * VLAN offload is not supported yet, so no extra descriptors
227 	 * are required for VLAN option descriptor.
228 	 */
229 
230 /** Maximum length of the mbuf segment data */
231 #define SFC_MBUF_SEG_LEN_MAX		UINT16_MAX
232 	RTE_BUILD_BUG_ON(sizeof(m->data_len) != 2);
233 
234 	/*
235 	 * Each segment is already counted once below.  So, calculate
236 	 * how many extra DMA descriptors may be required per segment in
237 	 * the worst case because of maximum DMA descriptor length limit.
238 	 * If maximum segment length is less or equal to maximum DMA
239 	 * descriptor length, no extra DMA descriptors are required.
240 	 */
241 	extra_descs_per_seg =
242 		(SFC_MBUF_SEG_LEN_MAX - 1) / SFC_EF10_TX_DMA_DESC_LEN_MAX;
243 
244 /** Maximum length of the packet */
245 #define SFC_MBUF_PKT_LEN_MAX		UINT32_MAX
246 	RTE_BUILD_BUG_ON(sizeof(m->pkt_len) != 4);
247 
248 	/*
249 	 * One more limitation on maximum number of extra DMA descriptors
250 	 * comes from slicing entire packet because of DMA descriptor length
251 	 * limit taking into account that there is at least one segment
252 	 * which is already counted below (so division of the maximum
253 	 * packet length minus one with round down).
254 	 * TSO is not supported yet, so packet length is limited by
255 	 * maximum PDU size.
256 	 */
257 	extra_descs_per_pkt =
258 		(RTE_MIN((unsigned int)EFX_MAC_PDU_MAX,
259 			 SFC_MBUF_PKT_LEN_MAX) - 1) /
260 		SFC_EF10_TX_DMA_DESC_LEN_MAX;
261 
262 	return m->nb_segs + RTE_MIN(m->nb_segs * extra_descs_per_seg,
263 				    extra_descs_per_pkt);
264 }
265 
266 static uint16_t
267 sfc_ef10_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
268 {
269 	struct sfc_ef10_txq * const txq = sfc_ef10_txq_by_dp_txq(tx_queue);
270 	unsigned int added;
271 	unsigned int dma_desc_space;
272 	bool reap_done;
273 	struct rte_mbuf **pktp;
274 	struct rte_mbuf **pktp_end;
275 
276 	if (unlikely(txq->flags &
277 		     (SFC_EF10_TXQ_NOT_RUNNING | SFC_EF10_TXQ_EXCEPTION)))
278 		return 0;
279 
280 	added = txq->added;
281 	dma_desc_space = txq->max_fill_level - (added - txq->completed);
282 
283 	reap_done = (dma_desc_space < txq->free_thresh);
284 	if (reap_done) {
285 		sfc_ef10_tx_reap(txq);
286 		dma_desc_space = txq->max_fill_level - (added - txq->completed);
287 	}
288 
289 	for (pktp = &tx_pkts[0], pktp_end = &tx_pkts[nb_pkts];
290 	     pktp != pktp_end;
291 	     ++pktp) {
292 		struct rte_mbuf *m_seg = *pktp;
293 		unsigned int pkt_start = added;
294 		uint32_t pkt_len;
295 
296 		if (likely(pktp + 1 != pktp_end))
297 			rte_mbuf_prefetch_part1(pktp[1]);
298 
299 		if (sfc_ef10_tx_pkt_descs_max(m_seg) > dma_desc_space) {
300 			if (reap_done)
301 				break;
302 
303 			/* Push already prepared descriptors before polling */
304 			if (added != txq->added) {
305 				sfc_ef10_tx_qpush(txq, added, txq->added);
306 				txq->added = added;
307 			}
308 
309 			sfc_ef10_tx_reap(txq);
310 			reap_done = true;
311 			dma_desc_space = txq->max_fill_level -
312 				(added - txq->completed);
313 			if (sfc_ef10_tx_pkt_descs_max(m_seg) > dma_desc_space)
314 				break;
315 		}
316 
317 		pkt_len = m_seg->pkt_len;
318 		do {
319 			rte_iova_t seg_addr = rte_mbuf_data_iova(m_seg);
320 			unsigned int seg_len = rte_pktmbuf_data_len(m_seg);
321 			unsigned int id = added & txq->ptr_mask;
322 
323 			SFC_ASSERT(seg_len <= SFC_EF10_TX_DMA_DESC_LEN_MAX);
324 
325 			pkt_len -= seg_len;
326 
327 			sfc_ef10_tx_qdesc_dma_create(seg_addr,
328 				seg_len, (pkt_len == 0),
329 				&txq->txq_hw_ring[id]);
330 
331 			/*
332 			 * rte_pktmbuf_free() is commonly used in DPDK for
333 			 * recycling packets - the function checks every
334 			 * segment's reference counter and returns the
335 			 * buffer to its pool whenever possible;
336 			 * nevertheless, freeing mbuf segments one by one
337 			 * may entail some performance decline;
338 			 * from this point, sfc_efx_tx_reap() does the same job
339 			 * on its own and frees buffers in bulks (all mbufs
340 			 * within a bulk belong to the same pool);
341 			 * from this perspective, individual segment pointers
342 			 * must be associated with the corresponding SW
343 			 * descriptors independently so that only one loop
344 			 * is sufficient on reap to inspect all the buffers
345 			 */
346 			txq->sw_ring[id].mbuf = m_seg;
347 
348 			++added;
349 
350 		} while ((m_seg = m_seg->next) != 0);
351 
352 		dma_desc_space -= (added - pkt_start);
353 	}
354 
355 	if (likely(added != txq->added)) {
356 		sfc_ef10_tx_qpush(txq, added, txq->added);
357 		txq->added = added;
358 	}
359 
360 #if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE
361 	if (!reap_done)
362 		sfc_ef10_tx_reap(txq);
363 #endif
364 
365 	return pktp - &tx_pkts[0];
366 }
367 
368 static void
369 sfc_ef10_simple_tx_reap(struct sfc_ef10_txq *txq)
370 {
371 	const unsigned int old_read_ptr = txq->evq_read_ptr;
372 	const unsigned int ptr_mask = txq->ptr_mask;
373 	unsigned int completed = txq->completed;
374 	unsigned int pending = completed;
375 
376 	pending += sfc_ef10_tx_process_events(txq);
377 
378 	if (pending != completed) {
379 		struct rte_mbuf *bulk[SFC_TX_REAP_BULK_SIZE];
380 		unsigned int nb = 0;
381 
382 		do {
383 			struct sfc_ef10_tx_sw_desc *txd;
384 
385 			txd = &txq->sw_ring[completed & ptr_mask];
386 
387 			if (nb == RTE_DIM(bulk)) {
388 				rte_mempool_put_bulk(bulk[0]->pool,
389 						     (void *)bulk, nb);
390 				nb = 0;
391 			}
392 
393 			bulk[nb++] = txd->mbuf;
394 		} while (++completed != pending);
395 
396 		rte_mempool_put_bulk(bulk[0]->pool, (void *)bulk, nb);
397 
398 		txq->completed = completed;
399 	}
400 
401 	sfc_ef10_ev_qclear(txq->evq_hw_ring, ptr_mask, old_read_ptr,
402 			   txq->evq_read_ptr);
403 }
404 
405 
406 static uint16_t
407 sfc_ef10_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
408 			  uint16_t nb_pkts)
409 {
410 	struct sfc_ef10_txq * const txq = sfc_ef10_txq_by_dp_txq(tx_queue);
411 	unsigned int ptr_mask;
412 	unsigned int added;
413 	unsigned int dma_desc_space;
414 	bool reap_done;
415 	struct rte_mbuf **pktp;
416 	struct rte_mbuf **pktp_end;
417 
418 	if (unlikely(txq->flags &
419 		     (SFC_EF10_TXQ_NOT_RUNNING | SFC_EF10_TXQ_EXCEPTION)))
420 		return 0;
421 
422 	ptr_mask = txq->ptr_mask;
423 	added = txq->added;
424 	dma_desc_space = txq->max_fill_level - (added - txq->completed);
425 
426 	reap_done = (dma_desc_space < RTE_MAX(txq->free_thresh, nb_pkts));
427 	if (reap_done) {
428 		sfc_ef10_simple_tx_reap(txq);
429 		dma_desc_space = txq->max_fill_level - (added - txq->completed);
430 	}
431 
432 	pktp_end = &tx_pkts[MIN(nb_pkts, dma_desc_space)];
433 	for (pktp = &tx_pkts[0]; pktp != pktp_end; ++pktp) {
434 		struct rte_mbuf *pkt = *pktp;
435 		unsigned int id = added & ptr_mask;
436 
437 		SFC_ASSERT(rte_pktmbuf_data_len(pkt) <=
438 			   SFC_EF10_TX_DMA_DESC_LEN_MAX);
439 
440 		sfc_ef10_tx_qdesc_dma_create(rte_mbuf_data_iova(pkt),
441 					     rte_pktmbuf_data_len(pkt),
442 					     true, &txq->txq_hw_ring[id]);
443 
444 		txq->sw_ring[id].mbuf = pkt;
445 
446 		++added;
447 	}
448 
449 	if (likely(added != txq->added)) {
450 		sfc_ef10_tx_qpush(txq, added, txq->added);
451 		txq->added = added;
452 	}
453 
454 #if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE
455 	if (!reap_done)
456 		sfc_ef10_simple_tx_reap(txq);
457 #endif
458 
459 	return pktp - &tx_pkts[0];
460 }
461 
462 static sfc_dp_tx_get_dev_info_t sfc_ef10_get_dev_info;
463 static void
464 sfc_ef10_get_dev_info(struct rte_eth_dev_info *dev_info)
465 {
466 	/*
467 	 * Number of descriptors just defines maximum number of pushed
468 	 * descriptors (fill level).
469 	 */
470 	dev_info->tx_desc_lim.nb_min = 1;
471 	dev_info->tx_desc_lim.nb_align = 1;
472 }
473 
474 static sfc_dp_tx_qsize_up_rings_t sfc_ef10_tx_qsize_up_rings;
475 static int
476 sfc_ef10_tx_qsize_up_rings(uint16_t nb_tx_desc,
477 			   unsigned int *txq_entries,
478 			   unsigned int *evq_entries,
479 			   unsigned int *txq_max_fill_level)
480 {
481 	/*
482 	 * rte_ethdev API guarantees that the number meets min, max and
483 	 * alignment requirements.
484 	 */
485 	if (nb_tx_desc <= EFX_TXQ_MINNDESCS)
486 		*txq_entries = EFX_TXQ_MINNDESCS;
487 	else
488 		*txq_entries = rte_align32pow2(nb_tx_desc);
489 
490 	*evq_entries = *txq_entries;
491 
492 	*txq_max_fill_level = RTE_MIN(nb_tx_desc,
493 				      SFC_EF10_TXQ_LIMIT(*evq_entries));
494 	return 0;
495 }
496 
497 static sfc_dp_tx_qcreate_t sfc_ef10_tx_qcreate;
498 static int
499 sfc_ef10_tx_qcreate(uint16_t port_id, uint16_t queue_id,
500 		    const struct rte_pci_addr *pci_addr, int socket_id,
501 		    const struct sfc_dp_tx_qcreate_info *info,
502 		    struct sfc_dp_txq **dp_txqp)
503 {
504 	struct sfc_ef10_txq *txq;
505 	int rc;
506 
507 	rc = EINVAL;
508 	if (info->txq_entries != info->evq_entries)
509 		goto fail_bad_args;
510 
511 	rc = ENOMEM;
512 	txq = rte_zmalloc_socket("sfc-ef10-txq", sizeof(*txq),
513 				 RTE_CACHE_LINE_SIZE, socket_id);
514 	if (txq == NULL)
515 		goto fail_txq_alloc;
516 
517 	sfc_dp_queue_init(&txq->dp.dpq, port_id, queue_id, pci_addr);
518 
519 	rc = ENOMEM;
520 	txq->sw_ring = rte_calloc_socket("sfc-ef10-txq-sw_ring",
521 					 info->txq_entries,
522 					 sizeof(*txq->sw_ring),
523 					 RTE_CACHE_LINE_SIZE, socket_id);
524 	if (txq->sw_ring == NULL)
525 		goto fail_sw_ring_alloc;
526 
527 	txq->flags = SFC_EF10_TXQ_NOT_RUNNING;
528 	txq->ptr_mask = info->txq_entries - 1;
529 	txq->max_fill_level = info->max_fill_level;
530 	txq->free_thresh = info->free_thresh;
531 	txq->txq_hw_ring = info->txq_hw_ring;
532 	txq->doorbell = (volatile uint8_t *)info->mem_bar +
533 			ER_DZ_TX_DESC_UPD_REG_OFST +
534 			info->hw_index * ER_DZ_TX_DESC_UPD_REG_STEP;
535 	txq->evq_hw_ring = info->evq_hw_ring;
536 
537 	*dp_txqp = &txq->dp;
538 	return 0;
539 
540 fail_sw_ring_alloc:
541 	rte_free(txq);
542 
543 fail_txq_alloc:
544 fail_bad_args:
545 	return rc;
546 }
547 
548 static sfc_dp_tx_qdestroy_t sfc_ef10_tx_qdestroy;
549 static void
550 sfc_ef10_tx_qdestroy(struct sfc_dp_txq *dp_txq)
551 {
552 	struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
553 
554 	rte_free(txq->sw_ring);
555 	rte_free(txq);
556 }
557 
558 static sfc_dp_tx_qstart_t sfc_ef10_tx_qstart;
559 static int
560 sfc_ef10_tx_qstart(struct sfc_dp_txq *dp_txq, unsigned int evq_read_ptr,
561 		   unsigned int txq_desc_index)
562 {
563 	struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
564 
565 	txq->evq_read_ptr = evq_read_ptr;
566 	txq->added = txq->completed = txq_desc_index;
567 
568 	txq->flags |= SFC_EF10_TXQ_STARTED;
569 	txq->flags &= ~(SFC_EF10_TXQ_NOT_RUNNING | SFC_EF10_TXQ_EXCEPTION);
570 
571 	return 0;
572 }
573 
574 static sfc_dp_tx_qstop_t sfc_ef10_tx_qstop;
575 static void
576 sfc_ef10_tx_qstop(struct sfc_dp_txq *dp_txq, unsigned int *evq_read_ptr)
577 {
578 	struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
579 
580 	txq->flags |= SFC_EF10_TXQ_NOT_RUNNING;
581 
582 	*evq_read_ptr = txq->evq_read_ptr;
583 }
584 
585 static sfc_dp_tx_qtx_ev_t sfc_ef10_tx_qtx_ev;
586 static bool
587 sfc_ef10_tx_qtx_ev(struct sfc_dp_txq *dp_txq, __rte_unused unsigned int id)
588 {
589 	__rte_unused struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
590 
591 	SFC_ASSERT(txq->flags & SFC_EF10_TXQ_NOT_RUNNING);
592 
593 	/*
594 	 * It is safe to ignore Tx event since we reap all mbufs on
595 	 * queue purge anyway.
596 	 */
597 
598 	return false;
599 }
600 
601 static sfc_dp_tx_qreap_t sfc_ef10_tx_qreap;
602 static void
603 sfc_ef10_tx_qreap(struct sfc_dp_txq *dp_txq)
604 {
605 	struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
606 	unsigned int completed;
607 
608 	for (completed = txq->completed; completed != txq->added; ++completed) {
609 		struct sfc_ef10_tx_sw_desc *txd;
610 
611 		txd = &txq->sw_ring[completed & txq->ptr_mask];
612 		if (txd->mbuf != NULL) {
613 			rte_pktmbuf_free_seg(txd->mbuf);
614 			txd->mbuf = NULL;
615 		}
616 	}
617 
618 	txq->flags &= ~SFC_EF10_TXQ_STARTED;
619 }
620 
621 static sfc_dp_tx_qdesc_status_t sfc_ef10_tx_qdesc_status;
622 static int
623 sfc_ef10_tx_qdesc_status(__rte_unused struct sfc_dp_txq *dp_txq,
624 			 __rte_unused uint16_t offset)
625 {
626 	return -ENOTSUP;
627 }
628 
629 struct sfc_dp_tx sfc_ef10_tx = {
630 	.dp = {
631 		.name		= SFC_KVARG_DATAPATH_EF10,
632 		.type		= SFC_DP_TX,
633 		.hw_fw_caps	= SFC_DP_HW_FW_CAP_EF10,
634 	},
635 	.features		= SFC_DP_TX_FEAT_MULTI_SEG |
636 				  SFC_DP_TX_FEAT_MULTI_POOL |
637 				  SFC_DP_TX_FEAT_REFCNT |
638 				  SFC_DP_TX_FEAT_MULTI_PROCESS,
639 	.get_dev_info		= sfc_ef10_get_dev_info,
640 	.qsize_up_rings		= sfc_ef10_tx_qsize_up_rings,
641 	.qcreate		= sfc_ef10_tx_qcreate,
642 	.qdestroy		= sfc_ef10_tx_qdestroy,
643 	.qstart			= sfc_ef10_tx_qstart,
644 	.qtx_ev			= sfc_ef10_tx_qtx_ev,
645 	.qstop			= sfc_ef10_tx_qstop,
646 	.qreap			= sfc_ef10_tx_qreap,
647 	.qdesc_status		= sfc_ef10_tx_qdesc_status,
648 	.pkt_burst		= sfc_ef10_xmit_pkts,
649 };
650 
651 struct sfc_dp_tx sfc_ef10_simple_tx = {
652 	.dp = {
653 		.name		= SFC_KVARG_DATAPATH_EF10_SIMPLE,
654 		.type		= SFC_DP_TX,
655 	},
656 	.features		= SFC_DP_TX_FEAT_MULTI_PROCESS,
657 	.get_dev_info		= sfc_ef10_get_dev_info,
658 	.qsize_up_rings		= sfc_ef10_tx_qsize_up_rings,
659 	.qcreate		= sfc_ef10_tx_qcreate,
660 	.qdestroy		= sfc_ef10_tx_qdestroy,
661 	.qstart			= sfc_ef10_tx_qstart,
662 	.qtx_ev			= sfc_ef10_tx_qtx_ev,
663 	.qstop			= sfc_ef10_tx_qstop,
664 	.qreap			= sfc_ef10_tx_qreap,
665 	.qdesc_status		= sfc_ef10_tx_qdesc_status,
666 	.pkt_burst		= sfc_ef10_simple_xmit_pkts,
667 };
668