xref: /dpdk/drivers/net/sfc/sfc_ef10_tx.c (revision 23f3dac43237d5de18f9544c6e3f932c70c39e27)
144c0947bSAndrew Rybchenko /* SPDX-License-Identifier: BSD-3-Clause
28b00f426SAndrew Rybchenko  *
398d26ef7SAndrew Rybchenko  * Copyright(c) 2019-2021 Xilinx, Inc.
4a0147be5SAndrew Rybchenko  * Copyright(c) 2016-2019 Solarflare Communications Inc.
58b00f426SAndrew Rybchenko  *
68b00f426SAndrew Rybchenko  * This software was jointly developed between OKTET Labs (under contract
78b00f426SAndrew Rybchenko  * for Solarflare) and Solarflare Communications, Inc.
88b00f426SAndrew Rybchenko  */
98b00f426SAndrew Rybchenko 
108b00f426SAndrew Rybchenko #include <stdbool.h>
118b00f426SAndrew Rybchenko 
128b00f426SAndrew Rybchenko #include <rte_mbuf.h>
138b00f426SAndrew Rybchenko #include <rte_io.h>
146bc985e4SIgor Romanov #include <rte_ip.h>
156bc985e4SIgor Romanov #include <rte_tcp.h>
168b00f426SAndrew Rybchenko 
178b00f426SAndrew Rybchenko #include "efx.h"
188b00f426SAndrew Rybchenko #include "efx_types.h"
198b00f426SAndrew Rybchenko #include "efx_regs.h"
208b00f426SAndrew Rybchenko #include "efx_regs_ef10.h"
218b00f426SAndrew Rybchenko 
221b0236e2SAndrew Rybchenko #include "sfc_debug.h"
238b00f426SAndrew Rybchenko #include "sfc_dp_tx.h"
248b00f426SAndrew Rybchenko #include "sfc_tweak.h"
258b00f426SAndrew Rybchenko #include "sfc_kvargs.h"
268b00f426SAndrew Rybchenko #include "sfc_ef10.h"
276bc985e4SIgor Romanov #include "sfc_tso.h"
288b00f426SAndrew Rybchenko 
298b00f426SAndrew Rybchenko #define sfc_ef10_tx_err(dpq, ...) \
308b00f426SAndrew Rybchenko 	SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10, ERR, dpq, __VA_ARGS__)
318b00f426SAndrew Rybchenko 
32e7fbf6f5SAndrew Rybchenko #define sfc_ef10_tx_info(dpq, ...) \
33e7fbf6f5SAndrew Rybchenko 	SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10, INFO, dpq, __VA_ARGS__)
34e7fbf6f5SAndrew Rybchenko 
358b00f426SAndrew Rybchenko /** Maximum length of the DMA descriptor data */
368b00f426SAndrew Rybchenko #define SFC_EF10_TX_DMA_DESC_LEN_MAX \
378b00f426SAndrew Rybchenko 	((1u << ESF_DZ_TX_KER_BYTE_CNT_WIDTH) - 1)
388b00f426SAndrew Rybchenko 
398b00f426SAndrew Rybchenko /**
408b00f426SAndrew Rybchenko  * Maximum number of descriptors/buffers in the Tx ring.
418b00f426SAndrew Rybchenko  * It should guarantee that corresponding event queue never overfill.
428b00f426SAndrew Rybchenko  * EF10 native datapath uses event queue of the same size as Tx queue.
438b00f426SAndrew Rybchenko  * Maximum number of events on datapath can be estimated as number of
448b00f426SAndrew Rybchenko  * Tx queue entries (one event per Tx buffer in the worst case) plus
458b00f426SAndrew Rybchenko  * Tx error and flush events.
468b00f426SAndrew Rybchenko  */
478b00f426SAndrew Rybchenko #define SFC_EF10_TXQ_LIMIT(_ndesc) \
488b00f426SAndrew Rybchenko 	((_ndesc) - 1 /* head must not step on tail */ - \
498b00f426SAndrew Rybchenko 	 (SFC_EF10_EV_PER_CACHE_LINE - 1) /* max unused EvQ entries */ - \
508b00f426SAndrew Rybchenko 	 1 /* Rx error */ - 1 /* flush */)
518b00f426SAndrew Rybchenko 
528b00f426SAndrew Rybchenko struct sfc_ef10_tx_sw_desc {
538b00f426SAndrew Rybchenko 	struct rte_mbuf			*mbuf;
548b00f426SAndrew Rybchenko };
558b00f426SAndrew Rybchenko 
568b00f426SAndrew Rybchenko struct sfc_ef10_txq {
578b00f426SAndrew Rybchenko 	unsigned int			flags;
588b00f426SAndrew Rybchenko #define SFC_EF10_TXQ_STARTED		0x1
598b00f426SAndrew Rybchenko #define SFC_EF10_TXQ_NOT_RUNNING	0x2
608b00f426SAndrew Rybchenko #define SFC_EF10_TXQ_EXCEPTION		0x4
618b00f426SAndrew Rybchenko 
628b00f426SAndrew Rybchenko 	unsigned int			ptr_mask;
638b00f426SAndrew Rybchenko 	unsigned int			added;
648b00f426SAndrew Rybchenko 	unsigned int			completed;
65eaab5d96SAndrew Rybchenko 	unsigned int			max_fill_level;
668b00f426SAndrew Rybchenko 	unsigned int			free_thresh;
678b00f426SAndrew Rybchenko 	unsigned int			evq_read_ptr;
688b00f426SAndrew Rybchenko 	struct sfc_ef10_tx_sw_desc	*sw_ring;
698b00f426SAndrew Rybchenko 	efx_qword_t			*txq_hw_ring;
708b00f426SAndrew Rybchenko 	volatile void			*doorbell;
718b00f426SAndrew Rybchenko 	efx_qword_t			*evq_hw_ring;
726bc985e4SIgor Romanov 	uint8_t				*tsoh;
736bc985e4SIgor Romanov 	rte_iova_t			tsoh_iova;
746bc985e4SIgor Romanov 	uint16_t			tso_tcp_header_offset_limit;
758b00f426SAndrew Rybchenko 
768b00f426SAndrew Rybchenko 	/* Datapath transmit queue anchor */
778b00f426SAndrew Rybchenko 	struct sfc_dp_txq		dp;
788b00f426SAndrew Rybchenko };
798b00f426SAndrew Rybchenko 
808b00f426SAndrew Rybchenko static inline struct sfc_ef10_txq *
sfc_ef10_txq_by_dp_txq(struct sfc_dp_txq * dp_txq)818b00f426SAndrew Rybchenko sfc_ef10_txq_by_dp_txq(struct sfc_dp_txq *dp_txq)
828b00f426SAndrew Rybchenko {
838b00f426SAndrew Rybchenko 	return container_of(dp_txq, struct sfc_ef10_txq, dp);
848b00f426SAndrew Rybchenko }
858b00f426SAndrew Rybchenko 
868b00f426SAndrew Rybchenko static bool
sfc_ef10_tx_get_event(struct sfc_ef10_txq * txq,efx_qword_t * tx_ev)878b00f426SAndrew Rybchenko sfc_ef10_tx_get_event(struct sfc_ef10_txq *txq, efx_qword_t *tx_ev)
888b00f426SAndrew Rybchenko {
898b00f426SAndrew Rybchenko 	volatile efx_qword_t *evq_hw_ring = txq->evq_hw_ring;
908b00f426SAndrew Rybchenko 
918b00f426SAndrew Rybchenko 	/*
928b00f426SAndrew Rybchenko 	 * Exception flag is set when reap is done.
938b00f426SAndrew Rybchenko 	 * It is never done twice per packet burst get and absence of
948b00f426SAndrew Rybchenko 	 * the flag is checked on burst get entry.
958b00f426SAndrew Rybchenko 	 */
968b00f426SAndrew Rybchenko 	SFC_ASSERT((txq->flags & SFC_EF10_TXQ_EXCEPTION) == 0);
978b00f426SAndrew Rybchenko 
988b00f426SAndrew Rybchenko 	*tx_ev = evq_hw_ring[txq->evq_read_ptr & txq->ptr_mask];
998b00f426SAndrew Rybchenko 
1008b00f426SAndrew Rybchenko 	if (!sfc_ef10_ev_present(*tx_ev))
1018b00f426SAndrew Rybchenko 		return false;
1028b00f426SAndrew Rybchenko 
1038b00f426SAndrew Rybchenko 	if (unlikely(EFX_QWORD_FIELD(*tx_ev, FSF_AZ_EV_CODE) !=
1048b00f426SAndrew Rybchenko 		     FSE_AZ_EV_CODE_TX_EV)) {
1058b00f426SAndrew Rybchenko 		/*
1068b00f426SAndrew Rybchenko 		 * Do not move read_ptr to keep the event for exception
1078b00f426SAndrew Rybchenko 		 * handling by the control path.
1088b00f426SAndrew Rybchenko 		 */
1098b00f426SAndrew Rybchenko 		txq->flags |= SFC_EF10_TXQ_EXCEPTION;
1108b00f426SAndrew Rybchenko 		sfc_ef10_tx_err(&txq->dp.dpq,
1118b00f426SAndrew Rybchenko 				"TxQ exception at EvQ read ptr %#x",
1128b00f426SAndrew Rybchenko 				txq->evq_read_ptr);
1138b00f426SAndrew Rybchenko 		return false;
1148b00f426SAndrew Rybchenko 	}
1158b00f426SAndrew Rybchenko 
1168b00f426SAndrew Rybchenko 	txq->evq_read_ptr++;
1178b00f426SAndrew Rybchenko 	return true;
1188b00f426SAndrew Rybchenko }
1198b00f426SAndrew Rybchenko 
12021233dd6SAndrew Rybchenko static unsigned int
sfc_ef10_tx_process_events(struct sfc_ef10_txq * txq)12121233dd6SAndrew Rybchenko sfc_ef10_tx_process_events(struct sfc_ef10_txq *txq)
1228b00f426SAndrew Rybchenko {
12321233dd6SAndrew Rybchenko 	const unsigned int curr_done = txq->completed - 1;
1248b00f426SAndrew Rybchenko 	unsigned int anew_done = curr_done;
1258b00f426SAndrew Rybchenko 	efx_qword_t tx_ev;
1268b00f426SAndrew Rybchenko 
1278b00f426SAndrew Rybchenko 	while (sfc_ef10_tx_get_event(txq, &tx_ev)) {
1288b00f426SAndrew Rybchenko 		/*
1298b00f426SAndrew Rybchenko 		 * DROP_EVENT is an internal to the NIC, software should
1308b00f426SAndrew Rybchenko 		 * never see it and, therefore, may ignore it.
1318b00f426SAndrew Rybchenko 		 */
1328b00f426SAndrew Rybchenko 
1338b00f426SAndrew Rybchenko 		/* Update the latest done descriptor */
1348b00f426SAndrew Rybchenko 		anew_done = EFX_QWORD_FIELD(tx_ev, ESF_DZ_TX_DESCR_INDX);
1358b00f426SAndrew Rybchenko 	}
13621233dd6SAndrew Rybchenko 	return (anew_done - curr_done) & txq->ptr_mask;
13721233dd6SAndrew Rybchenko }
13821233dd6SAndrew Rybchenko 
13921233dd6SAndrew Rybchenko static void
sfc_ef10_tx_reap(struct sfc_ef10_txq * txq)14021233dd6SAndrew Rybchenko sfc_ef10_tx_reap(struct sfc_ef10_txq *txq)
14121233dd6SAndrew Rybchenko {
14221233dd6SAndrew Rybchenko 	const unsigned int old_read_ptr = txq->evq_read_ptr;
14321233dd6SAndrew Rybchenko 	const unsigned int ptr_mask = txq->ptr_mask;
14421233dd6SAndrew Rybchenko 	unsigned int completed = txq->completed;
14521233dd6SAndrew Rybchenko 	unsigned int pending = completed;
14621233dd6SAndrew Rybchenko 
14721233dd6SAndrew Rybchenko 	pending += sfc_ef10_tx_process_events(txq);
1488b00f426SAndrew Rybchenko 
1498b00f426SAndrew Rybchenko 	if (pending != completed) {
150d3219543SIvan Malov 		struct rte_mbuf *bulk[SFC_TX_REAP_BULK_SIZE];
151d3219543SIvan Malov 		unsigned int nb = 0;
152d3219543SIvan Malov 
1538b00f426SAndrew Rybchenko 		do {
1548b00f426SAndrew Rybchenko 			struct sfc_ef10_tx_sw_desc *txd;
155d3219543SIvan Malov 			struct rte_mbuf *m;
1568b00f426SAndrew Rybchenko 
1578b00f426SAndrew Rybchenko 			txd = &txq->sw_ring[completed & ptr_mask];
158d3219543SIvan Malov 			if (txd->mbuf == NULL)
159d3219543SIvan Malov 				continue;
1608b00f426SAndrew Rybchenko 
161d3219543SIvan Malov 			m = rte_pktmbuf_prefree_seg(txd->mbuf);
1628b00f426SAndrew Rybchenko 			txd->mbuf = NULL;
163d3219543SIvan Malov 			if (m == NULL)
164d3219543SIvan Malov 				continue;
165d3219543SIvan Malov 
166d3219543SIvan Malov 			if ((nb == RTE_DIM(bulk)) ||
167d3219543SIvan Malov 			    ((nb != 0) && (m->pool != bulk[0]->pool))) {
168d3219543SIvan Malov 				rte_mempool_put_bulk(bulk[0]->pool,
169d3219543SIvan Malov 						     (void *)bulk, nb);
170d3219543SIvan Malov 				nb = 0;
1718b00f426SAndrew Rybchenko 			}
172d3219543SIvan Malov 
173d3219543SIvan Malov 			bulk[nb++] = m;
1748b00f426SAndrew Rybchenko 		} while (++completed != pending);
1758b00f426SAndrew Rybchenko 
176d3219543SIvan Malov 		if (nb != 0)
177d3219543SIvan Malov 			rte_mempool_put_bulk(bulk[0]->pool, (void *)bulk, nb);
178d3219543SIvan Malov 
1798b00f426SAndrew Rybchenko 		txq->completed = completed;
1808b00f426SAndrew Rybchenko 	}
1818b00f426SAndrew Rybchenko 
1828b00f426SAndrew Rybchenko 	sfc_ef10_ev_qclear(txq->evq_hw_ring, ptr_mask, old_read_ptr,
1838b00f426SAndrew Rybchenko 			   txq->evq_read_ptr);
1848b00f426SAndrew Rybchenko }
1858b00f426SAndrew Rybchenko 
1868b00f426SAndrew Rybchenko static void
sfc_ef10_tx_qdesc_dma_create(rte_iova_t addr,uint16_t size,bool eop,efx_qword_t * edp)187df6e0a06SSantosh Shukla sfc_ef10_tx_qdesc_dma_create(rte_iova_t addr, uint16_t size, bool eop,
1888b00f426SAndrew Rybchenko 			     efx_qword_t *edp)
1898b00f426SAndrew Rybchenko {
1908b00f426SAndrew Rybchenko 	EFX_POPULATE_QWORD_4(*edp,
1918b00f426SAndrew Rybchenko 			     ESF_DZ_TX_KER_TYPE, 0,
1928b00f426SAndrew Rybchenko 			     ESF_DZ_TX_KER_CONT, !eop,
1938b00f426SAndrew Rybchenko 			     ESF_DZ_TX_KER_BYTE_CNT, size,
1948b00f426SAndrew Rybchenko 			     ESF_DZ_TX_KER_BUF_ADDR, addr);
1958b00f426SAndrew Rybchenko }
1968b00f426SAndrew Rybchenko 
1976bc985e4SIgor Romanov static void
sfc_ef10_tx_qdesc_tso2_create(struct sfc_ef10_txq * const txq,unsigned int added,uint16_t ipv4_id,uint16_t outer_ipv4_id,uint32_t tcp_seq,uint16_t tcp_mss)1986bc985e4SIgor Romanov sfc_ef10_tx_qdesc_tso2_create(struct sfc_ef10_txq * const txq,
1996bc985e4SIgor Romanov 			      unsigned int added, uint16_t ipv4_id,
2006bc985e4SIgor Romanov 			      uint16_t outer_ipv4_id, uint32_t tcp_seq,
2016bc985e4SIgor Romanov 			      uint16_t tcp_mss)
2026bc985e4SIgor Romanov {
2036bc985e4SIgor Romanov 	EFX_POPULATE_QWORD_5(txq->txq_hw_ring[added & txq->ptr_mask],
2046bc985e4SIgor Romanov 			    ESF_DZ_TX_DESC_IS_OPT, 1,
2056bc985e4SIgor Romanov 			    ESF_DZ_TX_OPTION_TYPE,
2066bc985e4SIgor Romanov 			    ESE_DZ_TX_OPTION_DESC_TSO,
2076bc985e4SIgor Romanov 			    ESF_DZ_TX_TSO_OPTION_TYPE,
2086bc985e4SIgor Romanov 			    ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A,
2096bc985e4SIgor Romanov 			    ESF_DZ_TX_TSO_IP_ID, ipv4_id,
2106bc985e4SIgor Romanov 			    ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq);
2116bc985e4SIgor Romanov 	EFX_POPULATE_QWORD_5(txq->txq_hw_ring[(added + 1) & txq->ptr_mask],
2126bc985e4SIgor Romanov 			    ESF_DZ_TX_DESC_IS_OPT, 1,
2136bc985e4SIgor Romanov 			    ESF_DZ_TX_OPTION_TYPE,
2146bc985e4SIgor Romanov 			    ESE_DZ_TX_OPTION_DESC_TSO,
2156bc985e4SIgor Romanov 			    ESF_DZ_TX_TSO_OPTION_TYPE,
2166bc985e4SIgor Romanov 			    ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B,
2176bc985e4SIgor Romanov 			    ESF_DZ_TX_TSO_TCP_MSS, tcp_mss,
2186bc985e4SIgor Romanov 			    ESF_DZ_TX_TSO_OUTER_IPID, outer_ipv4_id);
2196bc985e4SIgor Romanov }
2206bc985e4SIgor Romanov 
2218b00f426SAndrew Rybchenko static inline void
sfc_ef10_tx_qpush(struct sfc_ef10_txq * txq,unsigned int added,unsigned int pushed)2228b00f426SAndrew Rybchenko sfc_ef10_tx_qpush(struct sfc_ef10_txq *txq, unsigned int added,
2238b00f426SAndrew Rybchenko 		  unsigned int pushed)
2248b00f426SAndrew Rybchenko {
2258b00f426SAndrew Rybchenko 	efx_qword_t desc;
2268b00f426SAndrew Rybchenko 	efx_oword_t oword;
2278b00f426SAndrew Rybchenko 
2288b00f426SAndrew Rybchenko 	/*
2298b00f426SAndrew Rybchenko 	 * This improves performance by pushing a TX descriptor at the same
2308b00f426SAndrew Rybchenko 	 * time as the doorbell. The descriptor must be added to the TXQ,
2318b00f426SAndrew Rybchenko 	 * so that can be used if the hardware decides not to use the pushed
2328b00f426SAndrew Rybchenko 	 * descriptor.
2338b00f426SAndrew Rybchenko 	 */
2348b00f426SAndrew Rybchenko 	desc.eq_u64[0] = txq->txq_hw_ring[pushed & txq->ptr_mask].eq_u64[0];
2358b00f426SAndrew Rybchenko 	EFX_POPULATE_OWORD_3(oword,
2368b00f426SAndrew Rybchenko 		ERF_DZ_TX_DESC_WPTR, added & txq->ptr_mask,
2378b00f426SAndrew Rybchenko 		ERF_DZ_TX_DESC_HWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_1),
2388b00f426SAndrew Rybchenko 		ERF_DZ_TX_DESC_LWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_0));
2398b00f426SAndrew Rybchenko 
2408b00f426SAndrew Rybchenko 	/* DMA sync to device is not required */
2418b00f426SAndrew Rybchenko 
2428b00f426SAndrew Rybchenko 	/*
2438b00f426SAndrew Rybchenko 	 * rte_io_wmb() which guarantees that the STORE operations
2448b00f426SAndrew Rybchenko 	 * (i.e. Tx and event descriptor updates) that precede
2458b00f426SAndrew Rybchenko 	 * the rte_io_wmb() call are visible to NIC before the STORE
2468b00f426SAndrew Rybchenko 	 * operations that follow it (i.e. doorbell write).
2478b00f426SAndrew Rybchenko 	 */
2488b00f426SAndrew Rybchenko 	rte_io_wmb();
2498b00f426SAndrew Rybchenko 
25031113761SAndrew Rybchenko 	*(volatile efsys_uint128_t *)txq->doorbell = oword.eo_u128[0];
25150448dd3SAndrew Rybchenko 	txq->dp.dpq.dbells++;
2528b00f426SAndrew Rybchenko }
2538b00f426SAndrew Rybchenko 
254b6986271SAndrew Rybchenko static unsigned int
sfc_ef10_tx_pkt_descs_max(const struct rte_mbuf * m)255b6986271SAndrew Rybchenko sfc_ef10_tx_pkt_descs_max(const struct rte_mbuf *m)
256b6986271SAndrew Rybchenko {
257b6986271SAndrew Rybchenko 	unsigned int extra_descs_per_seg;
258b6986271SAndrew Rybchenko 	unsigned int extra_descs_per_pkt;
259b6986271SAndrew Rybchenko 
260b6986271SAndrew Rybchenko 	/*
261b6986271SAndrew Rybchenko 	 * VLAN offload is not supported yet, so no extra descriptors
262b6986271SAndrew Rybchenko 	 * are required for VLAN option descriptor.
263b6986271SAndrew Rybchenko 	 */
264b6986271SAndrew Rybchenko 
265b6986271SAndrew Rybchenko /** Maximum length of the mbuf segment data */
266b6986271SAndrew Rybchenko #define SFC_MBUF_SEG_LEN_MAX		UINT16_MAX
267b6986271SAndrew Rybchenko 	RTE_BUILD_BUG_ON(sizeof(m->data_len) != 2);
268b6986271SAndrew Rybchenko 
269b6986271SAndrew Rybchenko 	/*
270b6986271SAndrew Rybchenko 	 * Each segment is already counted once below.  So, calculate
271b6986271SAndrew Rybchenko 	 * how many extra DMA descriptors may be required per segment in
272b6986271SAndrew Rybchenko 	 * the worst case because of maximum DMA descriptor length limit.
273b6986271SAndrew Rybchenko 	 * If maximum segment length is less or equal to maximum DMA
274b6986271SAndrew Rybchenko 	 * descriptor length, no extra DMA descriptors are required.
275b6986271SAndrew Rybchenko 	 */
276b6986271SAndrew Rybchenko 	extra_descs_per_seg =
277b6986271SAndrew Rybchenko 		(SFC_MBUF_SEG_LEN_MAX - 1) / SFC_EF10_TX_DMA_DESC_LEN_MAX;
278b6986271SAndrew Rybchenko 
279b6986271SAndrew Rybchenko /** Maximum length of the packet */
280b6986271SAndrew Rybchenko #define SFC_MBUF_PKT_LEN_MAX		UINT32_MAX
281b6986271SAndrew Rybchenko 	RTE_BUILD_BUG_ON(sizeof(m->pkt_len) != 4);
282b6986271SAndrew Rybchenko 
283b6986271SAndrew Rybchenko 	/*
284b6986271SAndrew Rybchenko 	 * One more limitation on maximum number of extra DMA descriptors
285b6986271SAndrew Rybchenko 	 * comes from slicing entire packet because of DMA descriptor length
286b6986271SAndrew Rybchenko 	 * limit taking into account that there is at least one segment
287b6986271SAndrew Rybchenko 	 * which is already counted below (so division of the maximum
288b6986271SAndrew Rybchenko 	 * packet length minus one with round down).
289b6986271SAndrew Rybchenko 	 * TSO is not supported yet, so packet length is limited by
290b6986271SAndrew Rybchenko 	 * maximum PDU size.
291b6986271SAndrew Rybchenko 	 */
292b6986271SAndrew Rybchenko 	extra_descs_per_pkt =
293b6986271SAndrew Rybchenko 		(RTE_MIN((unsigned int)EFX_MAC_PDU_MAX,
294b6986271SAndrew Rybchenko 			 SFC_MBUF_PKT_LEN_MAX) - 1) /
295b6986271SAndrew Rybchenko 		SFC_EF10_TX_DMA_DESC_LEN_MAX;
296b6986271SAndrew Rybchenko 
297b6986271SAndrew Rybchenko 	return m->nb_segs + RTE_MIN(m->nb_segs * extra_descs_per_seg,
298b6986271SAndrew Rybchenko 				    extra_descs_per_pkt);
299b6986271SAndrew Rybchenko }
300b6986271SAndrew Rybchenko 
3016bc985e4SIgor Romanov static bool
sfc_ef10_try_reap(struct sfc_ef10_txq * const txq,unsigned int added,unsigned int needed_desc,unsigned int * dma_desc_space,bool * reap_done)3026bc985e4SIgor Romanov sfc_ef10_try_reap(struct sfc_ef10_txq * const txq, unsigned int added,
3036bc985e4SIgor Romanov 		  unsigned int needed_desc, unsigned int *dma_desc_space,
3046bc985e4SIgor Romanov 		  bool *reap_done)
3056bc985e4SIgor Romanov {
3066bc985e4SIgor Romanov 	if (*reap_done)
3076bc985e4SIgor Romanov 		return false;
3086bc985e4SIgor Romanov 
3096bc985e4SIgor Romanov 	if (added != txq->added) {
3106bc985e4SIgor Romanov 		sfc_ef10_tx_qpush(txq, added, txq->added);
3116bc985e4SIgor Romanov 		txq->added = added;
3126bc985e4SIgor Romanov 	}
3136bc985e4SIgor Romanov 
3146bc985e4SIgor Romanov 	sfc_ef10_tx_reap(txq);
3156bc985e4SIgor Romanov 	*reap_done = true;
3166bc985e4SIgor Romanov 
3176bc985e4SIgor Romanov 	/*
3186bc985e4SIgor Romanov 	 * Recalculate DMA descriptor space since Tx reap may change
3196bc985e4SIgor Romanov 	 * the number of completed descriptors
3206bc985e4SIgor Romanov 	 */
3216bc985e4SIgor Romanov 	*dma_desc_space = txq->max_fill_level -
3226bc985e4SIgor Romanov 		(added - txq->completed);
3236bc985e4SIgor Romanov 
3246bc985e4SIgor Romanov 	return (needed_desc <= *dma_desc_space);
3256bc985e4SIgor Romanov }
3266bc985e4SIgor Romanov 
32767330d32SIgor Romanov static uint16_t
sfc_ef10_prepare_pkts(void * tx_queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)328a3895ef3SIgor Romanov sfc_ef10_prepare_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
32967330d32SIgor Romanov 		      uint16_t nb_pkts)
33067330d32SIgor Romanov {
331a3895ef3SIgor Romanov 	struct sfc_ef10_txq * const txq = sfc_ef10_txq_by_dp_txq(tx_queue);
33267330d32SIgor Romanov 	uint16_t i;
33367330d32SIgor Romanov 
33467330d32SIgor Romanov 	for (i = 0; i < nb_pkts; i++) {
33567330d32SIgor Romanov 		struct rte_mbuf *m = tx_pkts[i];
33667330d32SIgor Romanov 		int ret;
33767330d32SIgor Romanov 
33867330d32SIgor Romanov #ifdef RTE_LIBRTE_SFC_EFX_DEBUG
33967330d32SIgor Romanov 		/*
34067330d32SIgor Romanov 		 * In non-TSO case, check that a packet segments do not exceed
34167330d32SIgor Romanov 		 * the size limit. Perform the check in debug mode since MTU
34267330d32SIgor Romanov 		 * more than 9k is not supported, but the limit here is 16k-1.
34367330d32SIgor Romanov 		 */
344daa02b5cSOlivier Matz 		if (!(m->ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
34567330d32SIgor Romanov 			struct rte_mbuf *m_seg;
34667330d32SIgor Romanov 
34767330d32SIgor Romanov 			for (m_seg = m; m_seg != NULL; m_seg = m_seg->next) {
34867330d32SIgor Romanov 				if (m_seg->data_len >
34967330d32SIgor Romanov 				    SFC_EF10_TX_DMA_DESC_LEN_MAX) {
35067330d32SIgor Romanov 					rte_errno = EINVAL;
35167330d32SIgor Romanov 					break;
35267330d32SIgor Romanov 				}
35367330d32SIgor Romanov 			}
35467330d32SIgor Romanov 		}
35567330d32SIgor Romanov #endif
35638109b5bSIvan Malov 		ret = sfc_dp_tx_prepare_pkt(m, 0, SFC_TSOH_STD_LEN,
357f7a66f93SIgor Romanov 				txq->tso_tcp_header_offset_limit,
358f7a66f93SIgor Romanov 				txq->max_fill_level,
359f7a66f93SIgor Romanov 				SFC_EF10_TSO_OPT_DESCS_NUM, 0);
36067330d32SIgor Romanov 		if (unlikely(ret != 0)) {
36167330d32SIgor Romanov 			rte_errno = ret;
36267330d32SIgor Romanov 			break;
36367330d32SIgor Romanov 		}
36467330d32SIgor Romanov 	}
36567330d32SIgor Romanov 
36667330d32SIgor Romanov 	return i;
36767330d32SIgor Romanov }
36867330d32SIgor Romanov 
3696bc985e4SIgor Romanov static int
sfc_ef10_xmit_tso_pkt(struct sfc_ef10_txq * const txq,struct rte_mbuf * m_seg,unsigned int * added,unsigned int * dma_desc_space,bool * reap_done)3706bc985e4SIgor Romanov sfc_ef10_xmit_tso_pkt(struct sfc_ef10_txq * const txq, struct rte_mbuf *m_seg,
3716bc985e4SIgor Romanov 		      unsigned int *added, unsigned int *dma_desc_space,
3726bc985e4SIgor Romanov 		      bool *reap_done)
3736bc985e4SIgor Romanov {
374daa02b5cSOlivier Matz 	size_t iph_off = ((m_seg->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ?
375c1ce2ba2SIvan Malov 			  m_seg->outer_l2_len + m_seg->outer_l3_len : 0) +
376c1ce2ba2SIvan Malov 			 m_seg->l2_len;
377c1ce2ba2SIvan Malov 	size_t tcph_off = iph_off + m_seg->l3_len;
378c1ce2ba2SIvan Malov 	size_t header_len = tcph_off + m_seg->l4_len;
3796bc985e4SIgor Romanov 	/* Offset of the payload in the last segment that contains the header */
3806bc985e4SIgor Romanov 	size_t in_off = 0;
381f41b5156SOlivier Matz 	const struct rte_tcp_hdr *th;
38267330d32SIgor Romanov 	uint16_t packet_id = 0;
383c1ce2ba2SIvan Malov 	uint16_t outer_packet_id = 0;
3846bc985e4SIgor Romanov 	uint32_t sent_seq;
3856bc985e4SIgor Romanov 	uint8_t *hdr_addr;
3866bc985e4SIgor Romanov 	rte_iova_t hdr_iova;
3876bc985e4SIgor Romanov 	struct rte_mbuf *first_m_seg = m_seg;
3886bc985e4SIgor Romanov 	unsigned int pkt_start = *added;
3896bc985e4SIgor Romanov 	unsigned int needed_desc;
3906bc985e4SIgor Romanov 	struct rte_mbuf *m_seg_to_free_up_to = first_m_seg;
3916bc985e4SIgor Romanov 	bool eop;
3926bc985e4SIgor Romanov 
3936bc985e4SIgor Romanov 	/*
3946bc985e4SIgor Romanov 	 * Preliminary estimation of required DMA descriptors, including extra
3956bc985e4SIgor Romanov 	 * descriptor for TSO header that is needed when the header is
3966bc985e4SIgor Romanov 	 * separated from payload in one segment. It does not include
3976bc985e4SIgor Romanov 	 * extra descriptors that may appear when a big segment is split across
3986bc985e4SIgor Romanov 	 * several descriptors.
3996bc985e4SIgor Romanov 	 */
4006bc985e4SIgor Romanov 	needed_desc = m_seg->nb_segs +
40111c3712fSIgor Romanov 			(unsigned int)SFC_EF10_TSO_OPT_DESCS_NUM +
40211c3712fSIgor Romanov 			(unsigned int)SFC_EF10_TSO_HDR_DESCS_NUM;
4036bc985e4SIgor Romanov 
4046bc985e4SIgor Romanov 	if (needed_desc > *dma_desc_space &&
4056bc985e4SIgor Romanov 	    !sfc_ef10_try_reap(txq, pkt_start, needed_desc,
4066bc985e4SIgor Romanov 			       dma_desc_space, reap_done)) {
4076bc985e4SIgor Romanov 		/*
4086bc985e4SIgor Romanov 		 * If a future Tx reap may increase available DMA descriptor
4096bc985e4SIgor Romanov 		 * space, do not try to send the packet.
4106bc985e4SIgor Romanov 		 */
4116bc985e4SIgor Romanov 		if (txq->completed != pkt_start)
4126bc985e4SIgor Romanov 			return ENOSPC;
4136bc985e4SIgor Romanov 		/*
4146bc985e4SIgor Romanov 		 * Do not allow to send packet if the maximum DMA
4156bc985e4SIgor Romanov 		 * descriptor space is not sufficient to hold TSO
4166bc985e4SIgor Romanov 		 * descriptors, header descriptor and at least 1
4176bc985e4SIgor Romanov 		 * segment descriptor.
4186bc985e4SIgor Romanov 		 */
41911c3712fSIgor Romanov 		if (*dma_desc_space < SFC_EF10_TSO_OPT_DESCS_NUM +
42011c3712fSIgor Romanov 				SFC_EF10_TSO_HDR_DESCS_NUM + 1)
4216bc985e4SIgor Romanov 			return EMSGSIZE;
4226bc985e4SIgor Romanov 	}
4236bc985e4SIgor Romanov 
4246bc985e4SIgor Romanov 	/* Check if the header is not fragmented */
4256bc985e4SIgor Romanov 	if (rte_pktmbuf_data_len(m_seg) >= header_len) {
4266bc985e4SIgor Romanov 		hdr_addr = rte_pktmbuf_mtod(m_seg, uint8_t *);
4276bc985e4SIgor Romanov 		hdr_iova = rte_mbuf_data_iova(m_seg);
4286bc985e4SIgor Romanov 		if (rte_pktmbuf_data_len(m_seg) == header_len) {
429c2303617SIgor Romanov 			/* Cannot send a packet that consists only of header */
430c2303617SIgor Romanov 			if (unlikely(m_seg->next == NULL))
431c2303617SIgor Romanov 				return EMSGSIZE;
4326bc985e4SIgor Romanov 			/*
4336bc985e4SIgor Romanov 			 * Associate header mbuf with header descriptor
4346bc985e4SIgor Romanov 			 * which is located after TSO descriptors.
4356bc985e4SIgor Romanov 			 */
43611c3712fSIgor Romanov 			txq->sw_ring[(pkt_start + SFC_EF10_TSO_OPT_DESCS_NUM) &
4376bc985e4SIgor Romanov 				     txq->ptr_mask].mbuf = m_seg;
4386bc985e4SIgor Romanov 			m_seg = m_seg->next;
4396bc985e4SIgor Romanov 			in_off = 0;
4406bc985e4SIgor Romanov 
4416bc985e4SIgor Romanov 			/*
4426bc985e4SIgor Romanov 			 * If there is no payload offset (payload starts at the
4436bc985e4SIgor Romanov 			 * beginning of a segment) then an extra descriptor for
4446bc985e4SIgor Romanov 			 * separated header is not needed.
4456bc985e4SIgor Romanov 			 */
4466bc985e4SIgor Romanov 			needed_desc--;
4476bc985e4SIgor Romanov 		} else {
4486bc985e4SIgor Romanov 			in_off = header_len;
4496bc985e4SIgor Romanov 		}
4506bc985e4SIgor Romanov 	} else {
4516bc985e4SIgor Romanov 		unsigned int copied_segs;
4526bc985e4SIgor Romanov 		unsigned int hdr_addr_off = (*added & txq->ptr_mask) *
4536bc985e4SIgor Romanov 				SFC_TSOH_STD_LEN;
4546bc985e4SIgor Romanov 
4553985802eSIgor Romanov 		/*
4563985802eSIgor Romanov 		 * Discard a packet if header linearization is needed but
4573985802eSIgor Romanov 		 * the header is too big.
4589b70500cSIgor Romanov 		 * Duplicate Tx prepare check here to avoid spoil of
4599b70500cSIgor Romanov 		 * memory if Tx prepare is skipped.
4603985802eSIgor Romanov 		 */
4613985802eSIgor Romanov 		if (unlikely(header_len > SFC_TSOH_STD_LEN))
4623985802eSIgor Romanov 			return EMSGSIZE;
4633985802eSIgor Romanov 
4646bc985e4SIgor Romanov 		hdr_addr = txq->tsoh + hdr_addr_off;
4656bc985e4SIgor Romanov 		hdr_iova = txq->tsoh_iova + hdr_addr_off;
4666bc985e4SIgor Romanov 		copied_segs = sfc_tso_prepare_header(hdr_addr, header_len,
4676bc985e4SIgor Romanov 						     &m_seg, &in_off);
4686bc985e4SIgor Romanov 
469c2303617SIgor Romanov 		/* Cannot send a packet that consists only of header */
470c2303617SIgor Romanov 		if (unlikely(m_seg == NULL))
471c2303617SIgor Romanov 			return EMSGSIZE;
472c2303617SIgor Romanov 
4736bc985e4SIgor Romanov 		m_seg_to_free_up_to = m_seg;
4746bc985e4SIgor Romanov 		/*
4756bc985e4SIgor Romanov 		 * Reduce the number of needed descriptors by the number of
4766bc985e4SIgor Romanov 		 * segments that entirely consist of header data.
4776bc985e4SIgor Romanov 		 */
4786bc985e4SIgor Romanov 		needed_desc -= copied_segs;
4796bc985e4SIgor Romanov 
4806bc985e4SIgor Romanov 		/* Extra descriptor for separated header is not needed */
4816bc985e4SIgor Romanov 		if (in_off == 0)
4826bc985e4SIgor Romanov 			needed_desc--;
4836bc985e4SIgor Romanov 	}
4846bc985e4SIgor Romanov 
48567330d32SIgor Romanov 	/*
486be56d20fSIvan Malov 	 * 8000-series EF10 hardware requires that innermost IP length
487be56d20fSIvan Malov 	 * be greater than or equal to the value which each segment is
488be56d20fSIvan Malov 	 * supposed to have; otherwise, TCP checksum will be incorrect.
489be56d20fSIvan Malov 	 *
490be56d20fSIvan Malov 	 * The same concern applies to outer UDP datagram length field.
491be56d20fSIvan Malov 	 */
492daa02b5cSOlivier Matz 	switch (m_seg->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
493daa02b5cSOlivier Matz 	case RTE_MBUF_F_TX_TUNNEL_VXLAN:
494be56d20fSIvan Malov 		/* FALLTHROUGH */
495daa02b5cSOlivier Matz 	case RTE_MBUF_F_TX_TUNNEL_GENEVE:
496be56d20fSIvan Malov 		sfc_tso_outer_udp_fix_len(first_m_seg, hdr_addr);
497be56d20fSIvan Malov 		break;
498be56d20fSIvan Malov 	default:
499be56d20fSIvan Malov 		break;
500be56d20fSIvan Malov 	}
501be56d20fSIvan Malov 
502be56d20fSIvan Malov 	sfc_tso_innermost_ip_fix_len(first_m_seg, hdr_addr, iph_off);
503be56d20fSIvan Malov 
504be56d20fSIvan Malov 	/*
50567330d32SIgor Romanov 	 * Tx prepare has debug-only checks that offload flags are correctly
506*23f3dac4SStephen Hemminger 	 * filled in TSO mbuf. Use zero IPID if there is no IPv4 flag.
50767330d32SIgor Romanov 	 * If the packet is still IPv4, HW will simply start from zero IPID.
50867330d32SIgor Romanov 	 */
509daa02b5cSOlivier Matz 	if (first_m_seg->ol_flags & RTE_MBUF_F_TX_IPV4)
51041ef1ad5SIvan Malov 		packet_id = sfc_tso_ip4_get_ipid(hdr_addr, iph_off);
5116bc985e4SIgor Romanov 
512daa02b5cSOlivier Matz 	if (first_m_seg->ol_flags & RTE_MBUF_F_TX_OUTER_IPV4)
513c1ce2ba2SIvan Malov 		outer_packet_id = sfc_tso_ip4_get_ipid(hdr_addr,
514c1ce2ba2SIvan Malov 						first_m_seg->outer_l2_len);
515c1ce2ba2SIvan Malov 
516f41b5156SOlivier Matz 	th = (const struct rte_tcp_hdr *)(hdr_addr + tcph_off);
5176bc985e4SIgor Romanov 	rte_memcpy(&sent_seq, &th->sent_seq, sizeof(uint32_t));
5186bc985e4SIgor Romanov 	sent_seq = rte_be_to_cpu_32(sent_seq);
5196bc985e4SIgor Romanov 
520c1ce2ba2SIvan Malov 	sfc_ef10_tx_qdesc_tso2_create(txq, *added, packet_id, outer_packet_id,
521c1ce2ba2SIvan Malov 			sent_seq, first_m_seg->tso_segsz);
52211c3712fSIgor Romanov 	(*added) += SFC_EF10_TSO_OPT_DESCS_NUM;
5236bc985e4SIgor Romanov 
5246bc985e4SIgor Romanov 	sfc_ef10_tx_qdesc_dma_create(hdr_iova, header_len, false,
5256bc985e4SIgor Romanov 			&txq->txq_hw_ring[(*added) & txq->ptr_mask]);
5266bc985e4SIgor Romanov 	(*added)++;
5276bc985e4SIgor Romanov 
5286bc985e4SIgor Romanov 	do {
5296bc985e4SIgor Romanov 		rte_iova_t next_frag = rte_mbuf_data_iova(m_seg);
5306bc985e4SIgor Romanov 		unsigned int seg_len = rte_pktmbuf_data_len(m_seg);
5316bc985e4SIgor Romanov 		unsigned int id;
5326bc985e4SIgor Romanov 
5336bc985e4SIgor Romanov 		next_frag += in_off;
5346bc985e4SIgor Romanov 		seg_len -= in_off;
5356bc985e4SIgor Romanov 		in_off = 0;
5366bc985e4SIgor Romanov 
5376bc985e4SIgor Romanov 		do {
5386bc985e4SIgor Romanov 			rte_iova_t frag_addr = next_frag;
5396bc985e4SIgor Romanov 			size_t frag_len;
5406bc985e4SIgor Romanov 
5416bc985e4SIgor Romanov 			frag_len = RTE_MIN(seg_len,
5426bc985e4SIgor Romanov 					   SFC_EF10_TX_DMA_DESC_LEN_MAX);
5436bc985e4SIgor Romanov 
5446bc985e4SIgor Romanov 			next_frag += frag_len;
5456bc985e4SIgor Romanov 			seg_len -= frag_len;
5466bc985e4SIgor Romanov 
5476bc985e4SIgor Romanov 			eop = (seg_len == 0 && m_seg->next == NULL);
5486bc985e4SIgor Romanov 
5496bc985e4SIgor Romanov 			id = (*added) & txq->ptr_mask;
5506bc985e4SIgor Romanov 			(*added)++;
5516bc985e4SIgor Romanov 
5526bc985e4SIgor Romanov 			/*
5536bc985e4SIgor Romanov 			 * Initially we assume that one DMA descriptor is needed
5546bc985e4SIgor Romanov 			 * for every segment. When the segment is split across
5556bc985e4SIgor Romanov 			 * several DMA descriptors, increase the estimation.
5566bc985e4SIgor Romanov 			 */
5576bc985e4SIgor Romanov 			needed_desc += (seg_len != 0);
5586bc985e4SIgor Romanov 
5596bc985e4SIgor Romanov 			/*
5606bc985e4SIgor Romanov 			 * When no more descriptors can be added, but not all
5616bc985e4SIgor Romanov 			 * segments are processed.
5626bc985e4SIgor Romanov 			 */
5636bc985e4SIgor Romanov 			if (*added - pkt_start == *dma_desc_space &&
5646bc985e4SIgor Romanov 			    !eop &&
5656bc985e4SIgor Romanov 			    !sfc_ef10_try_reap(txq, pkt_start, needed_desc,
5666bc985e4SIgor Romanov 						dma_desc_space, reap_done)) {
5676bc985e4SIgor Romanov 				struct rte_mbuf *m;
5686bc985e4SIgor Romanov 				struct rte_mbuf *m_next;
5696bc985e4SIgor Romanov 
5706bc985e4SIgor Romanov 				if (txq->completed != pkt_start) {
5716bc985e4SIgor Romanov 					unsigned int i;
5726bc985e4SIgor Romanov 
5736bc985e4SIgor Romanov 					/*
5746bc985e4SIgor Romanov 					 * Reset mbuf associations with added
5756bc985e4SIgor Romanov 					 * descriptors.
5766bc985e4SIgor Romanov 					 */
5776bc985e4SIgor Romanov 					for (i = pkt_start; i != *added; i++) {
5786bc985e4SIgor Romanov 						id = i & txq->ptr_mask;
5796bc985e4SIgor Romanov 						txq->sw_ring[id].mbuf = NULL;
5806bc985e4SIgor Romanov 					}
5816bc985e4SIgor Romanov 					return ENOSPC;
5826bc985e4SIgor Romanov 				}
5836bc985e4SIgor Romanov 
5846bc985e4SIgor Romanov 				/* Free the segments that cannot be sent */
5856bc985e4SIgor Romanov 				for (m = m_seg->next; m != NULL; m = m_next) {
5866bc985e4SIgor Romanov 					m_next = m->next;
5876bc985e4SIgor Romanov 					rte_pktmbuf_free_seg(m);
5886bc985e4SIgor Romanov 				}
5896bc985e4SIgor Romanov 				eop = true;
5906bc985e4SIgor Romanov 				/* Ignore the rest of the segment */
5916bc985e4SIgor Romanov 				seg_len = 0;
5926bc985e4SIgor Romanov 			}
5936bc985e4SIgor Romanov 
5946bc985e4SIgor Romanov 			sfc_ef10_tx_qdesc_dma_create(frag_addr, frag_len,
5956bc985e4SIgor Romanov 					eop, &txq->txq_hw_ring[id]);
5966bc985e4SIgor Romanov 
5976bc985e4SIgor Romanov 		} while (seg_len != 0);
5986bc985e4SIgor Romanov 
5996bc985e4SIgor Romanov 		txq->sw_ring[id].mbuf = m_seg;
6006bc985e4SIgor Romanov 
6016bc985e4SIgor Romanov 		m_seg = m_seg->next;
6026bc985e4SIgor Romanov 	} while (!eop);
6036bc985e4SIgor Romanov 
6046bc985e4SIgor Romanov 	/*
6056bc985e4SIgor Romanov 	 * Free segments which content was entirely copied to the TSO header
6066bc985e4SIgor Romanov 	 * memory space of Tx queue
6076bc985e4SIgor Romanov 	 */
6086bc985e4SIgor Romanov 	for (m_seg = first_m_seg; m_seg != m_seg_to_free_up_to;) {
6096bc985e4SIgor Romanov 		struct rte_mbuf *seg_to_free = m_seg;
6106bc985e4SIgor Romanov 
6116bc985e4SIgor Romanov 		m_seg = m_seg->next;
6126bc985e4SIgor Romanov 		rte_pktmbuf_free_seg(seg_to_free);
6136bc985e4SIgor Romanov 	}
6146bc985e4SIgor Romanov 
6156bc985e4SIgor Romanov 	return 0;
6166bc985e4SIgor Romanov }
6176bc985e4SIgor Romanov 
6188b00f426SAndrew Rybchenko static uint16_t
sfc_ef10_xmit_pkts(void * tx_queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)6198b00f426SAndrew Rybchenko sfc_ef10_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
6208b00f426SAndrew Rybchenko {
6218b00f426SAndrew Rybchenko 	struct sfc_ef10_txq * const txq = sfc_ef10_txq_by_dp_txq(tx_queue);
6228b00f426SAndrew Rybchenko 	unsigned int added;
6238b00f426SAndrew Rybchenko 	unsigned int dma_desc_space;
6248b00f426SAndrew Rybchenko 	bool reap_done;
6258b00f426SAndrew Rybchenko 	struct rte_mbuf **pktp;
6268b00f426SAndrew Rybchenko 	struct rte_mbuf **pktp_end;
6278b00f426SAndrew Rybchenko 
6288b00f426SAndrew Rybchenko 	if (unlikely(txq->flags &
6298b00f426SAndrew Rybchenko 		     (SFC_EF10_TXQ_NOT_RUNNING | SFC_EF10_TXQ_EXCEPTION)))
6308b00f426SAndrew Rybchenko 		return 0;
6318b00f426SAndrew Rybchenko 
6328b00f426SAndrew Rybchenko 	added = txq->added;
633eaab5d96SAndrew Rybchenko 	dma_desc_space = txq->max_fill_level - (added - txq->completed);
6348b00f426SAndrew Rybchenko 
6358b00f426SAndrew Rybchenko 	reap_done = (dma_desc_space < txq->free_thresh);
6368b00f426SAndrew Rybchenko 	if (reap_done) {
6378b00f426SAndrew Rybchenko 		sfc_ef10_tx_reap(txq);
638eaab5d96SAndrew Rybchenko 		dma_desc_space = txq->max_fill_level - (added - txq->completed);
6398b00f426SAndrew Rybchenko 	}
6408b00f426SAndrew Rybchenko 
6418b00f426SAndrew Rybchenko 	for (pktp = &tx_pkts[0], pktp_end = &tx_pkts[nb_pkts];
6428b00f426SAndrew Rybchenko 	     pktp != pktp_end;
6438b00f426SAndrew Rybchenko 	     ++pktp) {
6448b00f426SAndrew Rybchenko 		struct rte_mbuf *m_seg = *pktp;
6458b00f426SAndrew Rybchenko 		unsigned int pkt_start = added;
6468b00f426SAndrew Rybchenko 		uint32_t pkt_len;
6478b00f426SAndrew Rybchenko 
6488b00f426SAndrew Rybchenko 		if (likely(pktp + 1 != pktp_end))
6498b00f426SAndrew Rybchenko 			rte_mbuf_prefetch_part1(pktp[1]);
6508b00f426SAndrew Rybchenko 
651daa02b5cSOlivier Matz 		if (m_seg->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
6526bc985e4SIgor Romanov 			int rc;
6536bc985e4SIgor Romanov 
6546bc985e4SIgor Romanov 			rc = sfc_ef10_xmit_tso_pkt(txq, m_seg, &added,
6556bc985e4SIgor Romanov 					&dma_desc_space, &reap_done);
6566bc985e4SIgor Romanov 			if (rc != 0) {
6576bc985e4SIgor Romanov 				added = pkt_start;
6586bc985e4SIgor Romanov 
6596bc985e4SIgor Romanov 				/* Packet can be sent in following xmit calls */
6606bc985e4SIgor Romanov 				if (likely(rc == ENOSPC))
6616bc985e4SIgor Romanov 					break;
6626bc985e4SIgor Romanov 
6636bc985e4SIgor Romanov 				/*
6646bc985e4SIgor Romanov 				 * Packet cannot be sent, tell RTE that
6656bc985e4SIgor Romanov 				 * it is sent, but actually drop it and
6666bc985e4SIgor Romanov 				 * continue with another packet
6676bc985e4SIgor Romanov 				 */
6686bc985e4SIgor Romanov 				rte_pktmbuf_free(*pktp);
6696bc985e4SIgor Romanov 				continue;
6706bc985e4SIgor Romanov 			}
6716bc985e4SIgor Romanov 
6726bc985e4SIgor Romanov 			goto dma_desc_space_update;
6736bc985e4SIgor Romanov 		}
6746bc985e4SIgor Romanov 
675b6986271SAndrew Rybchenko 		if (sfc_ef10_tx_pkt_descs_max(m_seg) > dma_desc_space) {
6768b00f426SAndrew Rybchenko 			if (reap_done)
6778b00f426SAndrew Rybchenko 				break;
6788b00f426SAndrew Rybchenko 
6798b00f426SAndrew Rybchenko 			/* Push already prepared descriptors before polling */
6808b00f426SAndrew Rybchenko 			if (added != txq->added) {
6818b00f426SAndrew Rybchenko 				sfc_ef10_tx_qpush(txq, added, txq->added);
6828b00f426SAndrew Rybchenko 				txq->added = added;
6838b00f426SAndrew Rybchenko 			}
6848b00f426SAndrew Rybchenko 
6858b00f426SAndrew Rybchenko 			sfc_ef10_tx_reap(txq);
6868b00f426SAndrew Rybchenko 			reap_done = true;
687eaab5d96SAndrew Rybchenko 			dma_desc_space = txq->max_fill_level -
6888b00f426SAndrew Rybchenko 				(added - txq->completed);
689b6986271SAndrew Rybchenko 			if (sfc_ef10_tx_pkt_descs_max(m_seg) > dma_desc_space)
6908b00f426SAndrew Rybchenko 				break;
6918b00f426SAndrew Rybchenko 		}
6928b00f426SAndrew Rybchenko 
6938b00f426SAndrew Rybchenko 		pkt_len = m_seg->pkt_len;
6948b00f426SAndrew Rybchenko 		do {
695df6e0a06SSantosh Shukla 			rte_iova_t seg_addr = rte_mbuf_data_iova(m_seg);
6968b00f426SAndrew Rybchenko 			unsigned int seg_len = rte_pktmbuf_data_len(m_seg);
697eaab5d96SAndrew Rybchenko 			unsigned int id = added & txq->ptr_mask;
6988b00f426SAndrew Rybchenko 
6998b00f426SAndrew Rybchenko 			SFC_ASSERT(seg_len <= SFC_EF10_TX_DMA_DESC_LEN_MAX);
7008b00f426SAndrew Rybchenko 
7018b00f426SAndrew Rybchenko 			pkt_len -= seg_len;
7028b00f426SAndrew Rybchenko 
7038b00f426SAndrew Rybchenko 			sfc_ef10_tx_qdesc_dma_create(seg_addr,
7048b00f426SAndrew Rybchenko 				seg_len, (pkt_len == 0),
705d3219543SIvan Malov 				&txq->txq_hw_ring[id]);
706d3219543SIvan Malov 
707d3219543SIvan Malov 			/*
708d3219543SIvan Malov 			 * rte_pktmbuf_free() is commonly used in DPDK for
709d3219543SIvan Malov 			 * recycling packets - the function checks every
710d3219543SIvan Malov 			 * segment's reference counter and returns the
711d3219543SIvan Malov 			 * buffer to its pool whenever possible;
712d3219543SIvan Malov 			 * nevertheless, freeing mbuf segments one by one
713d3219543SIvan Malov 			 * may entail some performance decline;
714d3219543SIvan Malov 			 * from this point, sfc_efx_tx_reap() does the same job
715d3219543SIvan Malov 			 * on its own and frees buffers in bulks (all mbufs
716d3219543SIvan Malov 			 * within a bulk belong to the same pool);
717d3219543SIvan Malov 			 * from this perspective, individual segment pointers
718d3219543SIvan Malov 			 * must be associated with the corresponding SW
719d3219543SIvan Malov 			 * descriptors independently so that only one loop
720d3219543SIvan Malov 			 * is sufficient on reap to inspect all the buffers
721d3219543SIvan Malov 			 */
722d3219543SIvan Malov 			txq->sw_ring[id].mbuf = m_seg;
723d3219543SIvan Malov 
7248b00f426SAndrew Rybchenko 			++added;
7258b00f426SAndrew Rybchenko 
7268b00f426SAndrew Rybchenko 		} while ((m_seg = m_seg->next) != 0);
7278b00f426SAndrew Rybchenko 
7286bc985e4SIgor Romanov dma_desc_space_update:
7298b00f426SAndrew Rybchenko 		dma_desc_space -= (added - pkt_start);
7308b00f426SAndrew Rybchenko 	}
7318b00f426SAndrew Rybchenko 
7328b00f426SAndrew Rybchenko 	if (likely(added != txq->added)) {
7338b00f426SAndrew Rybchenko 		sfc_ef10_tx_qpush(txq, added, txq->added);
7348b00f426SAndrew Rybchenko 		txq->added = added;
7358b00f426SAndrew Rybchenko 	}
7368b00f426SAndrew Rybchenko 
7378b00f426SAndrew Rybchenko #if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE
7388b00f426SAndrew Rybchenko 	if (!reap_done)
7398b00f426SAndrew Rybchenko 		sfc_ef10_tx_reap(txq);
7408b00f426SAndrew Rybchenko #endif
7418b00f426SAndrew Rybchenko 
7428b00f426SAndrew Rybchenko 	return pktp - &tx_pkts[0];
7438b00f426SAndrew Rybchenko }
7448b00f426SAndrew Rybchenko 
74521233dd6SAndrew Rybchenko static void
sfc_ef10_simple_tx_reap(struct sfc_ef10_txq * txq)74621233dd6SAndrew Rybchenko sfc_ef10_simple_tx_reap(struct sfc_ef10_txq *txq)
74721233dd6SAndrew Rybchenko {
74821233dd6SAndrew Rybchenko 	const unsigned int old_read_ptr = txq->evq_read_ptr;
74921233dd6SAndrew Rybchenko 	const unsigned int ptr_mask = txq->ptr_mask;
75021233dd6SAndrew Rybchenko 	unsigned int completed = txq->completed;
75121233dd6SAndrew Rybchenko 	unsigned int pending = completed;
75221233dd6SAndrew Rybchenko 
75321233dd6SAndrew Rybchenko 	pending += sfc_ef10_tx_process_events(txq);
75421233dd6SAndrew Rybchenko 
75521233dd6SAndrew Rybchenko 	if (pending != completed) {
756093e1afaSIvan Malov 		struct rte_mbuf *bulk[SFC_TX_REAP_BULK_SIZE];
757093e1afaSIvan Malov 		unsigned int nb = 0;
758093e1afaSIvan Malov 
75921233dd6SAndrew Rybchenko 		do {
76021233dd6SAndrew Rybchenko 			struct sfc_ef10_tx_sw_desc *txd;
76121233dd6SAndrew Rybchenko 
76221233dd6SAndrew Rybchenko 			txd = &txq->sw_ring[completed & ptr_mask];
76321233dd6SAndrew Rybchenko 
764093e1afaSIvan Malov 			if (nb == RTE_DIM(bulk)) {
765093e1afaSIvan Malov 				rte_mempool_put_bulk(bulk[0]->pool,
766093e1afaSIvan Malov 						     (void *)bulk, nb);
767093e1afaSIvan Malov 				nb = 0;
768093e1afaSIvan Malov 			}
769093e1afaSIvan Malov 
770093e1afaSIvan Malov 			bulk[nb++] = txd->mbuf;
77121233dd6SAndrew Rybchenko 		} while (++completed != pending);
77221233dd6SAndrew Rybchenko 
773093e1afaSIvan Malov 		rte_mempool_put_bulk(bulk[0]->pool, (void *)bulk, nb);
774093e1afaSIvan Malov 
77521233dd6SAndrew Rybchenko 		txq->completed = completed;
77621233dd6SAndrew Rybchenko 	}
77721233dd6SAndrew Rybchenko 
77821233dd6SAndrew Rybchenko 	sfc_ef10_ev_qclear(txq->evq_hw_ring, ptr_mask, old_read_ptr,
77921233dd6SAndrew Rybchenko 			   txq->evq_read_ptr);
78021233dd6SAndrew Rybchenko }
78121233dd6SAndrew Rybchenko 
7828c27fa78SIgor Romanov #ifdef RTE_LIBRTE_SFC_EFX_DEBUG
7838c27fa78SIgor Romanov static uint16_t
sfc_ef10_simple_prepare_pkts(__rte_unused void * tx_queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)7848c27fa78SIgor Romanov sfc_ef10_simple_prepare_pkts(__rte_unused void *tx_queue,
7858c27fa78SIgor Romanov 			     struct rte_mbuf **tx_pkts,
7868c27fa78SIgor Romanov 			     uint16_t nb_pkts)
7878c27fa78SIgor Romanov {
7888c27fa78SIgor Romanov 	uint16_t i;
7898c27fa78SIgor Romanov 
7908c27fa78SIgor Romanov 	for (i = 0; i < nb_pkts; i++) {
7918c27fa78SIgor Romanov 		struct rte_mbuf *m = tx_pkts[i];
7928c27fa78SIgor Romanov 		int ret;
7938c27fa78SIgor Romanov 
7948c27fa78SIgor Romanov 		ret = rte_validate_tx_offload(m);
7958c27fa78SIgor Romanov 		if (unlikely(ret != 0)) {
7968c27fa78SIgor Romanov 			/*
7978c27fa78SIgor Romanov 			 * Negative error code is returned by
7988c27fa78SIgor Romanov 			 * rte_validate_tx_offload(), but positive are used
7998c27fa78SIgor Romanov 			 * inside net/sfc PMD.
8008c27fa78SIgor Romanov 			 */
8018c27fa78SIgor Romanov 			SFC_ASSERT(ret < 0);
8028c27fa78SIgor Romanov 			rte_errno = -ret;
8038c27fa78SIgor Romanov 			break;
8048c27fa78SIgor Romanov 		}
8058c27fa78SIgor Romanov 
8068c27fa78SIgor Romanov 		/* ef10_simple does not support TSO and VLAN insertion */
8078c27fa78SIgor Romanov 		if (unlikely(m->ol_flags &
808daa02b5cSOlivier Matz 			     (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_VLAN))) {
8098c27fa78SIgor Romanov 			rte_errno = ENOTSUP;
8108c27fa78SIgor Romanov 			break;
8118c27fa78SIgor Romanov 		}
8128c27fa78SIgor Romanov 
8138c27fa78SIgor Romanov 		/* ef10_simple does not support scattered packets */
8148c27fa78SIgor Romanov 		if (unlikely(m->nb_segs != 1)) {
8158c27fa78SIgor Romanov 			rte_errno = ENOTSUP;
8168c27fa78SIgor Romanov 			break;
8178c27fa78SIgor Romanov 		}
8188c27fa78SIgor Romanov 
8198c27fa78SIgor Romanov 		/*
8208c27fa78SIgor Romanov 		 * ef10_simple requires fast-free which ignores reference
8218c27fa78SIgor Romanov 		 * counters
8228c27fa78SIgor Romanov 		 */
8238c27fa78SIgor Romanov 		if (unlikely(rte_mbuf_refcnt_read(m) != 1)) {
8248c27fa78SIgor Romanov 			rte_errno = ENOTSUP;
8258c27fa78SIgor Romanov 			break;
8268c27fa78SIgor Romanov 		}
8278c27fa78SIgor Romanov 
8288c27fa78SIgor Romanov 		/* ef10_simple requires single pool for all packets */
8298c27fa78SIgor Romanov 		if (unlikely(m->pool != tx_pkts[0]->pool)) {
8308c27fa78SIgor Romanov 			rte_errno = ENOTSUP;
8318c27fa78SIgor Romanov 			break;
8328c27fa78SIgor Romanov 		}
8338c27fa78SIgor Romanov 	}
8348c27fa78SIgor Romanov 
8358c27fa78SIgor Romanov 	return i;
8368c27fa78SIgor Romanov }
8378c27fa78SIgor Romanov #endif
83821233dd6SAndrew Rybchenko 
83956885200SAndrew Rybchenko static uint16_t
sfc_ef10_simple_xmit_pkts(void * tx_queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)84056885200SAndrew Rybchenko sfc_ef10_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
84156885200SAndrew Rybchenko 			  uint16_t nb_pkts)
84256885200SAndrew Rybchenko {
84356885200SAndrew Rybchenko 	struct sfc_ef10_txq * const txq = sfc_ef10_txq_by_dp_txq(tx_queue);
84456885200SAndrew Rybchenko 	unsigned int ptr_mask;
84556885200SAndrew Rybchenko 	unsigned int added;
84656885200SAndrew Rybchenko 	unsigned int dma_desc_space;
84756885200SAndrew Rybchenko 	bool reap_done;
84856885200SAndrew Rybchenko 	struct rte_mbuf **pktp;
84956885200SAndrew Rybchenko 	struct rte_mbuf **pktp_end;
85056885200SAndrew Rybchenko 
85156885200SAndrew Rybchenko 	if (unlikely(txq->flags &
85256885200SAndrew Rybchenko 		     (SFC_EF10_TXQ_NOT_RUNNING | SFC_EF10_TXQ_EXCEPTION)))
85356885200SAndrew Rybchenko 		return 0;
85456885200SAndrew Rybchenko 
85556885200SAndrew Rybchenko 	ptr_mask = txq->ptr_mask;
85656885200SAndrew Rybchenko 	added = txq->added;
857eaab5d96SAndrew Rybchenko 	dma_desc_space = txq->max_fill_level - (added - txq->completed);
85856885200SAndrew Rybchenko 
85956885200SAndrew Rybchenko 	reap_done = (dma_desc_space < RTE_MAX(txq->free_thresh, nb_pkts));
86056885200SAndrew Rybchenko 	if (reap_done) {
86121233dd6SAndrew Rybchenko 		sfc_ef10_simple_tx_reap(txq);
862eaab5d96SAndrew Rybchenko 		dma_desc_space = txq->max_fill_level - (added - txq->completed);
86356885200SAndrew Rybchenko 	}
86456885200SAndrew Rybchenko 
86556885200SAndrew Rybchenko 	pktp_end = &tx_pkts[MIN(nb_pkts, dma_desc_space)];
86656885200SAndrew Rybchenko 	for (pktp = &tx_pkts[0]; pktp != pktp_end; ++pktp) {
86756885200SAndrew Rybchenko 		struct rte_mbuf *pkt = *pktp;
86856885200SAndrew Rybchenko 		unsigned int id = added & ptr_mask;
86956885200SAndrew Rybchenko 
87056885200SAndrew Rybchenko 		SFC_ASSERT(rte_pktmbuf_data_len(pkt) <=
87156885200SAndrew Rybchenko 			   SFC_EF10_TX_DMA_DESC_LEN_MAX);
87256885200SAndrew Rybchenko 
873bfa9a8a4SThomas Monjalon 		sfc_ef10_tx_qdesc_dma_create(rte_mbuf_data_iova(pkt),
87456885200SAndrew Rybchenko 					     rte_pktmbuf_data_len(pkt),
87556885200SAndrew Rybchenko 					     true, &txq->txq_hw_ring[id]);
87656885200SAndrew Rybchenko 
87756885200SAndrew Rybchenko 		txq->sw_ring[id].mbuf = pkt;
87856885200SAndrew Rybchenko 
87956885200SAndrew Rybchenko 		++added;
88056885200SAndrew Rybchenko 	}
88156885200SAndrew Rybchenko 
88256885200SAndrew Rybchenko 	if (likely(added != txq->added)) {
88356885200SAndrew Rybchenko 		sfc_ef10_tx_qpush(txq, added, txq->added);
88456885200SAndrew Rybchenko 		txq->added = added;
88556885200SAndrew Rybchenko 	}
88656885200SAndrew Rybchenko 
88756885200SAndrew Rybchenko #if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE
88856885200SAndrew Rybchenko 	if (!reap_done)
88921233dd6SAndrew Rybchenko 		sfc_ef10_simple_tx_reap(txq);
89056885200SAndrew Rybchenko #endif
89156885200SAndrew Rybchenko 
89256885200SAndrew Rybchenko 	return pktp - &tx_pkts[0];
89356885200SAndrew Rybchenko }
89456885200SAndrew Rybchenko 
895c7dadc9fSAndrew Rybchenko static sfc_dp_tx_get_dev_info_t sfc_ef10_get_dev_info;
896c7dadc9fSAndrew Rybchenko static void
sfc_ef10_get_dev_info(struct rte_eth_dev_info * dev_info)897c7dadc9fSAndrew Rybchenko sfc_ef10_get_dev_info(struct rte_eth_dev_info *dev_info)
898c7dadc9fSAndrew Rybchenko {
899c7dadc9fSAndrew Rybchenko 	/*
900c7dadc9fSAndrew Rybchenko 	 * Number of descriptors just defines maximum number of pushed
901c7dadc9fSAndrew Rybchenko 	 * descriptors (fill level).
902c7dadc9fSAndrew Rybchenko 	 */
903c7dadc9fSAndrew Rybchenko 	dev_info->tx_desc_lim.nb_min = 1;
904c7dadc9fSAndrew Rybchenko 	dev_info->tx_desc_lim.nb_align = 1;
905c7dadc9fSAndrew Rybchenko }
9068b00f426SAndrew Rybchenko 
907420efecbSAndrew Rybchenko static sfc_dp_tx_qsize_up_rings_t sfc_ef10_tx_qsize_up_rings;
908420efecbSAndrew Rybchenko static int
sfc_ef10_tx_qsize_up_rings(uint16_t nb_tx_desc,struct sfc_dp_tx_hw_limits * limits,unsigned int * txq_entries,unsigned int * evq_entries,unsigned int * txq_max_fill_level)909420efecbSAndrew Rybchenko sfc_ef10_tx_qsize_up_rings(uint16_t nb_tx_desc,
9109dbd28dfSIgor Romanov 			   struct sfc_dp_tx_hw_limits *limits,
911420efecbSAndrew Rybchenko 			   unsigned int *txq_entries,
912420efecbSAndrew Rybchenko 			   unsigned int *evq_entries,
913420efecbSAndrew Rybchenko 			   unsigned int *txq_max_fill_level)
914420efecbSAndrew Rybchenko {
915c7dadc9fSAndrew Rybchenko 	/*
916c7dadc9fSAndrew Rybchenko 	 * rte_ethdev API guarantees that the number meets min, max and
917c7dadc9fSAndrew Rybchenko 	 * alignment requirements.
918c7dadc9fSAndrew Rybchenko 	 */
9199dbd28dfSIgor Romanov 	if (nb_tx_desc <= limits->txq_min_entries)
9209dbd28dfSIgor Romanov 		*txq_entries = limits->txq_min_entries;
921c7dadc9fSAndrew Rybchenko 	else
922c7dadc9fSAndrew Rybchenko 		*txq_entries = rte_align32pow2(nb_tx_desc);
923c7dadc9fSAndrew Rybchenko 
924c7dadc9fSAndrew Rybchenko 	*evq_entries = *txq_entries;
925c7dadc9fSAndrew Rybchenko 
926c7dadc9fSAndrew Rybchenko 	*txq_max_fill_level = RTE_MIN(nb_tx_desc,
927c7dadc9fSAndrew Rybchenko 				      SFC_EF10_TXQ_LIMIT(*evq_entries));
928420efecbSAndrew Rybchenko 	return 0;
929420efecbSAndrew Rybchenko }
930420efecbSAndrew Rybchenko 
9318b00f426SAndrew Rybchenko static sfc_dp_tx_qcreate_t sfc_ef10_tx_qcreate;
9328b00f426SAndrew Rybchenko static int
sfc_ef10_tx_qcreate(uint16_t port_id,uint16_t queue_id,const struct rte_pci_addr * pci_addr,int socket_id,const struct sfc_dp_tx_qcreate_info * info,struct sfc_dp_txq ** dp_txqp)9338b00f426SAndrew Rybchenko sfc_ef10_tx_qcreate(uint16_t port_id, uint16_t queue_id,
9348b00f426SAndrew Rybchenko 		    const struct rte_pci_addr *pci_addr, int socket_id,
9358b00f426SAndrew Rybchenko 		    const struct sfc_dp_tx_qcreate_info *info,
9368b00f426SAndrew Rybchenko 		    struct sfc_dp_txq **dp_txqp)
9378b00f426SAndrew Rybchenko {
9388b00f426SAndrew Rybchenko 	struct sfc_ef10_txq *txq;
9398b00f426SAndrew Rybchenko 	int rc;
9408b00f426SAndrew Rybchenko 
9418b00f426SAndrew Rybchenko 	rc = EINVAL;
9428b00f426SAndrew Rybchenko 	if (info->txq_entries != info->evq_entries)
9438b00f426SAndrew Rybchenko 		goto fail_bad_args;
9448b00f426SAndrew Rybchenko 
9453037e6cfSViacheslav Galaktionov 	rc = ENOTSUP;
9463037e6cfSViacheslav Galaktionov 	if (info->nic_dma_info->nb_regions > 0)
9473037e6cfSViacheslav Galaktionov 		goto fail_nic_dma;
9483037e6cfSViacheslav Galaktionov 
9498b00f426SAndrew Rybchenko 	rc = ENOMEM;
9508b00f426SAndrew Rybchenko 	txq = rte_zmalloc_socket("sfc-ef10-txq", sizeof(*txq),
9518b00f426SAndrew Rybchenko 				 RTE_CACHE_LINE_SIZE, socket_id);
9528b00f426SAndrew Rybchenko 	if (txq == NULL)
9538b00f426SAndrew Rybchenko 		goto fail_txq_alloc;
9548b00f426SAndrew Rybchenko 
9558b00f426SAndrew Rybchenko 	sfc_dp_queue_init(&txq->dp.dpq, port_id, queue_id, pci_addr);
9568b00f426SAndrew Rybchenko 
9578b00f426SAndrew Rybchenko 	rc = ENOMEM;
9588b00f426SAndrew Rybchenko 	txq->sw_ring = rte_calloc_socket("sfc-ef10-txq-sw_ring",
9598b00f426SAndrew Rybchenko 					 info->txq_entries,
9608b00f426SAndrew Rybchenko 					 sizeof(*txq->sw_ring),
9618b00f426SAndrew Rybchenko 					 RTE_CACHE_LINE_SIZE, socket_id);
9628b00f426SAndrew Rybchenko 	if (txq->sw_ring == NULL)
9638b00f426SAndrew Rybchenko 		goto fail_sw_ring_alloc;
9648b00f426SAndrew Rybchenko 
965295968d1SFerruh Yigit 	if (info->offloads & (RTE_ETH_TX_OFFLOAD_TCP_TSO |
966295968d1SFerruh Yigit 			      RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
967295968d1SFerruh Yigit 			      RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO)) {
9686bc985e4SIgor Romanov 		txq->tsoh = rte_calloc_socket("sfc-ef10-txq-tsoh",
9696bc985e4SIgor Romanov 					      info->txq_entries,
9706bc985e4SIgor Romanov 					      SFC_TSOH_STD_LEN,
9716bc985e4SIgor Romanov 					      RTE_CACHE_LINE_SIZE,
9726bc985e4SIgor Romanov 					      socket_id);
9736bc985e4SIgor Romanov 		if (txq->tsoh == NULL)
9746bc985e4SIgor Romanov 			goto fail_tsoh_alloc;
9756bc985e4SIgor Romanov 
9766bc985e4SIgor Romanov 		txq->tsoh_iova = rte_malloc_virt2iova(txq->tsoh);
9776bc985e4SIgor Romanov 	}
9786bc985e4SIgor Romanov 
9798b00f426SAndrew Rybchenko 	txq->flags = SFC_EF10_TXQ_NOT_RUNNING;
9808b00f426SAndrew Rybchenko 	txq->ptr_mask = info->txq_entries - 1;
981eaab5d96SAndrew Rybchenko 	txq->max_fill_level = info->max_fill_level;
9828b00f426SAndrew Rybchenko 	txq->free_thresh = info->free_thresh;
9838b00f426SAndrew Rybchenko 	txq->txq_hw_ring = info->txq_hw_ring;
9848b00f426SAndrew Rybchenko 	txq->doorbell = (volatile uint8_t *)info->mem_bar +
9858b00f426SAndrew Rybchenko 			ER_DZ_TX_DESC_UPD_REG_OFST +
986714bff55SAndrew Rybchenko 			(info->hw_index << info->vi_window_shift);
9878b00f426SAndrew Rybchenko 	txq->evq_hw_ring = info->evq_hw_ring;
9886bc985e4SIgor Romanov 	txq->tso_tcp_header_offset_limit = info->tso_tcp_header_offset_limit;
9898b00f426SAndrew Rybchenko 
990e7fbf6f5SAndrew Rybchenko 	sfc_ef10_tx_info(&txq->dp.dpq, "TxQ doorbell is %p", txq->doorbell);
991e7fbf6f5SAndrew Rybchenko 
9928b00f426SAndrew Rybchenko 	*dp_txqp = &txq->dp;
9938b00f426SAndrew Rybchenko 	return 0;
9948b00f426SAndrew Rybchenko 
9956bc985e4SIgor Romanov fail_tsoh_alloc:
9966bc985e4SIgor Romanov 	rte_free(txq->sw_ring);
9976bc985e4SIgor Romanov 
9988b00f426SAndrew Rybchenko fail_sw_ring_alloc:
9998b00f426SAndrew Rybchenko 	rte_free(txq);
10008b00f426SAndrew Rybchenko 
10018b00f426SAndrew Rybchenko fail_txq_alloc:
10023037e6cfSViacheslav Galaktionov fail_nic_dma:
10038b00f426SAndrew Rybchenko fail_bad_args:
10048b00f426SAndrew Rybchenko 	return rc;
10058b00f426SAndrew Rybchenko }
10068b00f426SAndrew Rybchenko 
10078b00f426SAndrew Rybchenko static sfc_dp_tx_qdestroy_t sfc_ef10_tx_qdestroy;
10088b00f426SAndrew Rybchenko static void
sfc_ef10_tx_qdestroy(struct sfc_dp_txq * dp_txq)10098b00f426SAndrew Rybchenko sfc_ef10_tx_qdestroy(struct sfc_dp_txq *dp_txq)
10108b00f426SAndrew Rybchenko {
10118b00f426SAndrew Rybchenko 	struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
10128b00f426SAndrew Rybchenko 
10136bc985e4SIgor Romanov 	rte_free(txq->tsoh);
10148b00f426SAndrew Rybchenko 	rte_free(txq->sw_ring);
10158b00f426SAndrew Rybchenko 	rte_free(txq);
10168b00f426SAndrew Rybchenko }
10178b00f426SAndrew Rybchenko 
10188b00f426SAndrew Rybchenko static sfc_dp_tx_qstart_t sfc_ef10_tx_qstart;
10198b00f426SAndrew Rybchenko static int
sfc_ef10_tx_qstart(struct sfc_dp_txq * dp_txq,unsigned int evq_read_ptr,unsigned int txq_desc_index)10208b00f426SAndrew Rybchenko sfc_ef10_tx_qstart(struct sfc_dp_txq *dp_txq, unsigned int evq_read_ptr,
10218b00f426SAndrew Rybchenko 		   unsigned int txq_desc_index)
10228b00f426SAndrew Rybchenko {
10238b00f426SAndrew Rybchenko 	struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
10248b00f426SAndrew Rybchenko 
10258b00f426SAndrew Rybchenko 	txq->evq_read_ptr = evq_read_ptr;
10268b00f426SAndrew Rybchenko 	txq->added = txq->completed = txq_desc_index;
10278b00f426SAndrew Rybchenko 
10288b00f426SAndrew Rybchenko 	txq->flags |= SFC_EF10_TXQ_STARTED;
10298b00f426SAndrew Rybchenko 	txq->flags &= ~(SFC_EF10_TXQ_NOT_RUNNING | SFC_EF10_TXQ_EXCEPTION);
10308b00f426SAndrew Rybchenko 
10318b00f426SAndrew Rybchenko 	return 0;
10328b00f426SAndrew Rybchenko }
10338b00f426SAndrew Rybchenko 
10348b00f426SAndrew Rybchenko static sfc_dp_tx_qstop_t sfc_ef10_tx_qstop;
10358b00f426SAndrew Rybchenko static void
sfc_ef10_tx_qstop(struct sfc_dp_txq * dp_txq,unsigned int * evq_read_ptr)10368b00f426SAndrew Rybchenko sfc_ef10_tx_qstop(struct sfc_dp_txq *dp_txq, unsigned int *evq_read_ptr)
10378b00f426SAndrew Rybchenko {
10388b00f426SAndrew Rybchenko 	struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
10398b00f426SAndrew Rybchenko 
10408b00f426SAndrew Rybchenko 	txq->flags |= SFC_EF10_TXQ_NOT_RUNNING;
10418b00f426SAndrew Rybchenko 
10428b00f426SAndrew Rybchenko 	*evq_read_ptr = txq->evq_read_ptr;
10438b00f426SAndrew Rybchenko }
10448b00f426SAndrew Rybchenko 
10458b00f426SAndrew Rybchenko static sfc_dp_tx_qtx_ev_t sfc_ef10_tx_qtx_ev;
10468b00f426SAndrew Rybchenko static bool
sfc_ef10_tx_qtx_ev(struct sfc_dp_txq * dp_txq,__rte_unused unsigned int id)10478b00f426SAndrew Rybchenko sfc_ef10_tx_qtx_ev(struct sfc_dp_txq *dp_txq, __rte_unused unsigned int id)
10488b00f426SAndrew Rybchenko {
10498b00f426SAndrew Rybchenko 	__rte_unused struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
10508b00f426SAndrew Rybchenko 
10518b00f426SAndrew Rybchenko 	SFC_ASSERT(txq->flags & SFC_EF10_TXQ_NOT_RUNNING);
10528b00f426SAndrew Rybchenko 
10538b00f426SAndrew Rybchenko 	/*
10548b00f426SAndrew Rybchenko 	 * It is safe to ignore Tx event since we reap all mbufs on
10558b00f426SAndrew Rybchenko 	 * queue purge anyway.
10568b00f426SAndrew Rybchenko 	 */
10578b00f426SAndrew Rybchenko 
10588b00f426SAndrew Rybchenko 	return false;
10598b00f426SAndrew Rybchenko }
10608b00f426SAndrew Rybchenko 
10618b00f426SAndrew Rybchenko static sfc_dp_tx_qreap_t sfc_ef10_tx_qreap;
10628b00f426SAndrew Rybchenko static void
sfc_ef10_tx_qreap(struct sfc_dp_txq * dp_txq)10638b00f426SAndrew Rybchenko sfc_ef10_tx_qreap(struct sfc_dp_txq *dp_txq)
10648b00f426SAndrew Rybchenko {
10658b00f426SAndrew Rybchenko 	struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
10663fd7797fSAndrew Rybchenko 	unsigned int completed;
10678b00f426SAndrew Rybchenko 
10683fd7797fSAndrew Rybchenko 	for (completed = txq->completed; completed != txq->added; ++completed) {
10693fd7797fSAndrew Rybchenko 		struct sfc_ef10_tx_sw_desc *txd;
10703fd7797fSAndrew Rybchenko 
10713fd7797fSAndrew Rybchenko 		txd = &txq->sw_ring[completed & txq->ptr_mask];
10723fd7797fSAndrew Rybchenko 		if (txd->mbuf != NULL) {
1073d112a3ecSIvan Malov 			rte_pktmbuf_free_seg(txd->mbuf);
10743fd7797fSAndrew Rybchenko 			txd->mbuf = NULL;
10758b00f426SAndrew Rybchenko 		}
10768b00f426SAndrew Rybchenko 	}
10778b00f426SAndrew Rybchenko 
10788b00f426SAndrew Rybchenko 	txq->flags &= ~SFC_EF10_TXQ_STARTED;
10798b00f426SAndrew Rybchenko }
10808b00f426SAndrew Rybchenko 
10812631dcedSIgor Romanov static unsigned int
sfc_ef10_tx_qdesc_npending(struct sfc_ef10_txq * txq)10822631dcedSIgor Romanov sfc_ef10_tx_qdesc_npending(struct sfc_ef10_txq *txq)
10832631dcedSIgor Romanov {
10842631dcedSIgor Romanov 	const unsigned int curr_done = txq->completed - 1;
10852631dcedSIgor Romanov 	unsigned int anew_done = curr_done;
10862631dcedSIgor Romanov 	efx_qword_t tx_ev;
10872631dcedSIgor Romanov 	const unsigned int evq_old_read_ptr = txq->evq_read_ptr;
10882631dcedSIgor Romanov 
10892631dcedSIgor Romanov 	if (unlikely(txq->flags &
10902631dcedSIgor Romanov 		     (SFC_EF10_TXQ_NOT_RUNNING | SFC_EF10_TXQ_EXCEPTION)))
10912631dcedSIgor Romanov 		return 0;
10922631dcedSIgor Romanov 
10932631dcedSIgor Romanov 	while (sfc_ef10_tx_get_event(txq, &tx_ev))
10942631dcedSIgor Romanov 		anew_done = EFX_QWORD_FIELD(tx_ev, ESF_DZ_TX_DESCR_INDX);
10952631dcedSIgor Romanov 
10962631dcedSIgor Romanov 	/*
10972631dcedSIgor Romanov 	 * The function does not process events, so return event queue read
10982631dcedSIgor Romanov 	 * pointer to the original position to allow the events that were
10992631dcedSIgor Romanov 	 * read to be processed later
11002631dcedSIgor Romanov 	 */
11012631dcedSIgor Romanov 	txq->evq_read_ptr = evq_old_read_ptr;
11022631dcedSIgor Romanov 
11032631dcedSIgor Romanov 	return (anew_done - curr_done) & txq->ptr_mask;
11042631dcedSIgor Romanov }
11052631dcedSIgor Romanov 
11067df6f854SIvan Malov static sfc_dp_tx_qdesc_status_t sfc_ef10_tx_qdesc_status;
11077df6f854SIvan Malov static int
sfc_ef10_tx_qdesc_status(struct sfc_dp_txq * dp_txq,uint16_t offset)11082631dcedSIgor Romanov sfc_ef10_tx_qdesc_status(struct sfc_dp_txq *dp_txq,
11092631dcedSIgor Romanov 			 uint16_t offset)
11107df6f854SIvan Malov {
11112631dcedSIgor Romanov 	struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
11122631dcedSIgor Romanov 	unsigned int npending = sfc_ef10_tx_qdesc_npending(txq);
11132631dcedSIgor Romanov 
11142631dcedSIgor Romanov 	if (unlikely(offset > txq->ptr_mask))
11152631dcedSIgor Romanov 		return -EINVAL;
11162631dcedSIgor Romanov 
11172631dcedSIgor Romanov 	if (unlikely(offset >= txq->max_fill_level))
11182631dcedSIgor Romanov 		return RTE_ETH_TX_DESC_UNAVAIL;
11192631dcedSIgor Romanov 
11202631dcedSIgor Romanov 	if (unlikely(offset < npending))
11212631dcedSIgor Romanov 		return RTE_ETH_TX_DESC_FULL;
11222631dcedSIgor Romanov 
11232631dcedSIgor Romanov 	return RTE_ETH_TX_DESC_DONE;
11247df6f854SIvan Malov }
11257df6f854SIvan Malov 
11268b00f426SAndrew Rybchenko struct sfc_dp_tx sfc_ef10_tx = {
11278b00f426SAndrew Rybchenko 	.dp = {
11288b00f426SAndrew Rybchenko 		.name		= SFC_KVARG_DATAPATH_EF10,
11298b00f426SAndrew Rybchenko 		.type		= SFC_DP_TX,
11308b00f426SAndrew Rybchenko 		.hw_fw_caps	= SFC_DP_HW_FW_CAP_EF10,
11318b00f426SAndrew Rybchenko 	},
11329aa0afd1SAndrew Rybchenko 	.features		= SFC_DP_TX_FEAT_MULTI_PROCESS,
1133295968d1SFerruh Yigit 	.dev_offload_capa	= RTE_ETH_TX_OFFLOAD_MULTI_SEGS,
1134295968d1SFerruh Yigit 	.queue_offload_capa	= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
1135295968d1SFerruh Yigit 				  RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
1136295968d1SFerruh Yigit 				  RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
1137295968d1SFerruh Yigit 				  RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
1138295968d1SFerruh Yigit 				  RTE_ETH_TX_OFFLOAD_TCP_TSO |
1139295968d1SFerruh Yigit 				  RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
1140295968d1SFerruh Yigit 				  RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO,
1141c7dadc9fSAndrew Rybchenko 	.get_dev_info		= sfc_ef10_get_dev_info,
1142420efecbSAndrew Rybchenko 	.qsize_up_rings		= sfc_ef10_tx_qsize_up_rings,
11438b00f426SAndrew Rybchenko 	.qcreate		= sfc_ef10_tx_qcreate,
11448b00f426SAndrew Rybchenko 	.qdestroy		= sfc_ef10_tx_qdestroy,
11458b00f426SAndrew Rybchenko 	.qstart			= sfc_ef10_tx_qstart,
11468b00f426SAndrew Rybchenko 	.qtx_ev			= sfc_ef10_tx_qtx_ev,
11478b00f426SAndrew Rybchenko 	.qstop			= sfc_ef10_tx_qstop,
11488b00f426SAndrew Rybchenko 	.qreap			= sfc_ef10_tx_qreap,
11497df6f854SIvan Malov 	.qdesc_status		= sfc_ef10_tx_qdesc_status,
115067330d32SIgor Romanov 	.pkt_prepare		= sfc_ef10_prepare_pkts,
11518b00f426SAndrew Rybchenko 	.pkt_burst		= sfc_ef10_xmit_pkts,
11528b00f426SAndrew Rybchenko };
115356885200SAndrew Rybchenko 
115456885200SAndrew Rybchenko struct sfc_dp_tx sfc_ef10_simple_tx = {
115556885200SAndrew Rybchenko 	.dp = {
115656885200SAndrew Rybchenko 		.name		= SFC_KVARG_DATAPATH_EF10_SIMPLE,
115756885200SAndrew Rybchenko 		.type		= SFC_DP_TX,
115856885200SAndrew Rybchenko 	},
1159f28ede50SAndrew Rybchenko 	.features		= SFC_DP_TX_FEAT_MULTI_PROCESS,
1160295968d1SFerruh Yigit 	.dev_offload_capa	= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE,
1161295968d1SFerruh Yigit 	.queue_offload_capa	= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
1162295968d1SFerruh Yigit 				  RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
1163295968d1SFerruh Yigit 				  RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
1164295968d1SFerruh Yigit 				  RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM,
1165c7dadc9fSAndrew Rybchenko 	.get_dev_info		= sfc_ef10_get_dev_info,
1166420efecbSAndrew Rybchenko 	.qsize_up_rings		= sfc_ef10_tx_qsize_up_rings,
116756885200SAndrew Rybchenko 	.qcreate		= sfc_ef10_tx_qcreate,
116856885200SAndrew Rybchenko 	.qdestroy		= sfc_ef10_tx_qdestroy,
116956885200SAndrew Rybchenko 	.qstart			= sfc_ef10_tx_qstart,
117056885200SAndrew Rybchenko 	.qtx_ev			= sfc_ef10_tx_qtx_ev,
117156885200SAndrew Rybchenko 	.qstop			= sfc_ef10_tx_qstop,
117256885200SAndrew Rybchenko 	.qreap			= sfc_ef10_tx_qreap,
11737df6f854SIvan Malov 	.qdesc_status		= sfc_ef10_tx_qdesc_status,
11748c27fa78SIgor Romanov #ifdef RTE_LIBRTE_SFC_EFX_DEBUG
11758c27fa78SIgor Romanov 	.pkt_prepare		= sfc_ef10_simple_prepare_pkts,
11768c27fa78SIgor Romanov #endif
117756885200SAndrew Rybchenko 	.pkt_burst		= sfc_ef10_simple_xmit_pkts,
117856885200SAndrew Rybchenko };
1179