xref: /dpdk/drivers/net/sfc/sfc_tx.c (revision 24a491bb882a3269b9b4e2754e1d5b43d83b9821)
144c0947bSAndrew Rybchenko /* SPDX-License-Identifier: BSD-3-Clause
2244cfa79SAndrew Rybchenko  *
398d26ef7SAndrew Rybchenko  * Copyright(c) 2019-2021 Xilinx, Inc.
4a0147be5SAndrew Rybchenko  * Copyright(c) 2016-2019 Solarflare Communications Inc.
5a8ad8cf8SIvan Malov  *
6a8ad8cf8SIvan Malov  * This software was jointly developed between OKTET Labs (under contract
7a8ad8cf8SIvan Malov  * for Solarflare) and Solarflare Communications, Inc.
8a8ad8cf8SIvan Malov  */
9a8ad8cf8SIvan Malov 
10a8ad8cf8SIvan Malov #include "sfc.h"
11fed9aeb4SIvan Malov #include "sfc_debug.h"
12a8ad8cf8SIvan Malov #include "sfc_log.h"
13a8ad8cf8SIvan Malov #include "sfc_ev.h"
14a8ad8cf8SIvan Malov #include "sfc_tx.h"
15428c7dddSIvan Malov #include "sfc_tweak.h"
16dbdc8241SAndrew Rybchenko #include "sfc_kvargs.h"
17a8ad8cf8SIvan Malov 
18fed9aeb4SIvan Malov /*
19fed9aeb4SIvan Malov  * Maximum number of TX queue flush attempts in case of
20fed9aeb4SIvan Malov  * failure or flush timeout
21fed9aeb4SIvan Malov  */
22fed9aeb4SIvan Malov #define SFC_TX_QFLUSH_ATTEMPTS		(3)
23fed9aeb4SIvan Malov 
24fed9aeb4SIvan Malov /*
25fed9aeb4SIvan Malov  * Time to wait between event queue polling attempts when waiting for TX
26fed9aeb4SIvan Malov  * queue flush done or flush failed events
27fed9aeb4SIvan Malov  */
28fed9aeb4SIvan Malov #define SFC_TX_QFLUSH_POLL_WAIT_MS	(1)
29fed9aeb4SIvan Malov 
30fed9aeb4SIvan Malov /*
31fed9aeb4SIvan Malov  * Maximum number of event queue polling attempts when waiting for TX queue
32fed9aeb4SIvan Malov  * flush done or flush failed events; it defines TX queue flush attempt timeout
33fed9aeb4SIvan Malov  * together with SFC_TX_QFLUSH_POLL_WAIT_MS
34fed9aeb4SIvan Malov  */
35fed9aeb4SIvan Malov #define SFC_TX_QFLUSH_POLL_ATTEMPTS	(2000)
36fed9aeb4SIvan Malov 
37db980d26SIgor Romanov struct sfc_txq_info *
sfc_txq_info_by_ethdev_qid(struct sfc_adapter_shared * sas,sfc_ethdev_qid_t ethdev_qid)38db980d26SIgor Romanov sfc_txq_info_by_ethdev_qid(struct sfc_adapter_shared *sas,
39db980d26SIgor Romanov 			   sfc_ethdev_qid_t ethdev_qid)
40db980d26SIgor Romanov {
41db980d26SIgor Romanov 	sfc_sw_index_t sw_index;
42db980d26SIgor Romanov 
43db980d26SIgor Romanov 	SFC_ASSERT((unsigned int)ethdev_qid < sas->ethdev_txq_count);
44db980d26SIgor Romanov 	SFC_ASSERT(ethdev_qid != SFC_ETHDEV_QID_INVALID);
45db980d26SIgor Romanov 
46db980d26SIgor Romanov 	sw_index = sfc_txq_sw_index_by_ethdev_tx_qid(sas, ethdev_qid);
47db980d26SIgor Romanov 	return &sas->txq_info[sw_index];
48db980d26SIgor Romanov }
49db980d26SIgor Romanov 
509aa0afd1SAndrew Rybchenko static uint64_t
sfc_tx_get_offload_mask(struct sfc_adapter * sa)519aa0afd1SAndrew Rybchenko sfc_tx_get_offload_mask(struct sfc_adapter *sa)
529aa0afd1SAndrew Rybchenko {
539aa0afd1SAndrew Rybchenko 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
549aa0afd1SAndrew Rybchenko 	uint64_t no_caps = 0;
559aa0afd1SAndrew Rybchenko 
569aa0afd1SAndrew Rybchenko 	if (!encp->enc_hw_tx_insert_vlan_enabled)
57295968d1SFerruh Yigit 		no_caps |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
589aa0afd1SAndrew Rybchenko 
599aa0afd1SAndrew Rybchenko 	if (!encp->enc_tunnel_encapsulations_supported)
60295968d1SFerruh Yigit 		no_caps |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
619aa0afd1SAndrew Rybchenko 
629aa0afd1SAndrew Rybchenko 	if (!sa->tso)
63295968d1SFerruh Yigit 		no_caps |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
649aa0afd1SAndrew Rybchenko 
654a4c4f34SAndrew Rybchenko 	if (!sa->tso_encap ||
664a4c4f34SAndrew Rybchenko 	    (encp->enc_tunnel_encapsulations_supported &
674a4c4f34SAndrew Rybchenko 	     (1u << EFX_TUNNEL_PROTOCOL_VXLAN)) == 0)
68295968d1SFerruh Yigit 		no_caps |= RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO;
694a4c4f34SAndrew Rybchenko 
704a4c4f34SAndrew Rybchenko 	if (!sa->tso_encap ||
714a4c4f34SAndrew Rybchenko 	    (encp->enc_tunnel_encapsulations_supported &
724a4c4f34SAndrew Rybchenko 	     (1u << EFX_TUNNEL_PROTOCOL_GENEVE)) == 0)
73295968d1SFerruh Yigit 		no_caps |= RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO;
749aa0afd1SAndrew Rybchenko 
759aa0afd1SAndrew Rybchenko 	return ~no_caps;
769aa0afd1SAndrew Rybchenko }
779aa0afd1SAndrew Rybchenko 
7885069adeSIvan Malov uint64_t
sfc_tx_get_dev_offload_caps(struct sfc_adapter * sa)7985069adeSIvan Malov sfc_tx_get_dev_offload_caps(struct sfc_adapter *sa)
8085069adeSIvan Malov {
819aa0afd1SAndrew Rybchenko 	return sa->priv.dp_tx->dev_offload_capa & sfc_tx_get_offload_mask(sa);
82c78d280eSIvan Malov }
83c78d280eSIvan Malov 
84c78d280eSIvan Malov uint64_t
sfc_tx_get_queue_offload_caps(struct sfc_adapter * sa)85c78d280eSIvan Malov sfc_tx_get_queue_offload_caps(struct sfc_adapter *sa)
86c78d280eSIvan Malov {
879aa0afd1SAndrew Rybchenko 	return sa->priv.dp_tx->queue_offload_capa & sfc_tx_get_offload_mask(sa);
8885069adeSIvan Malov }
8985069adeSIvan Malov 
90a8ad8cf8SIvan Malov static int
sfc_tx_qcheck_conf(struct sfc_adapter * sa,unsigned int txq_max_fill_level,const struct rte_eth_txconf * tx_conf,uint64_t offloads)91420efecbSAndrew Rybchenko sfc_tx_qcheck_conf(struct sfc_adapter *sa, unsigned int txq_max_fill_level,
92a4996bd8SWei Dai 		   const struct rte_eth_txconf *tx_conf,
93a4996bd8SWei Dai 		   uint64_t offloads)
94b1b7ad93SIvan Malov {
95b1b7ad93SIvan Malov 	int rc = 0;
96b1b7ad93SIvan Malov 
97b1b7ad93SIvan Malov 	if (tx_conf->tx_rs_thresh != 0) {
98b1b7ad93SIvan Malov 		sfc_err(sa, "RS bit in transmit descriptor is not supported");
99b1b7ad93SIvan Malov 		rc = EINVAL;
100b1b7ad93SIvan Malov 	}
101b1b7ad93SIvan Malov 
102420efecbSAndrew Rybchenko 	if (tx_conf->tx_free_thresh > txq_max_fill_level) {
103b1b7ad93SIvan Malov 		sfc_err(sa,
10421f6411cSIvan Malov 			"TxQ free threshold too large: %u vs maximum %u",
105420efecbSAndrew Rybchenko 			tx_conf->tx_free_thresh, txq_max_fill_level);
106b1b7ad93SIvan Malov 		rc = EINVAL;
107b1b7ad93SIvan Malov 	}
108b1b7ad93SIvan Malov 
109b1b7ad93SIvan Malov 	if (tx_conf->tx_thresh.pthresh != 0 ||
110b1b7ad93SIvan Malov 	    tx_conf->tx_thresh.hthresh != 0 ||
111b1b7ad93SIvan Malov 	    tx_conf->tx_thresh.wthresh != 0) {
112048da253SAndrew Rybchenko 		sfc_warn(sa,
113b1b7ad93SIvan Malov 			"prefetch/host/writeback thresholds are not supported");
114b1b7ad93SIvan Malov 	}
115b1b7ad93SIvan Malov 
116b1b7ad93SIvan Malov 	/* We either perform both TCP and UDP offload, or no offload at all */
117295968d1SFerruh Yigit 	if (((offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) == 0) !=
118295968d1SFerruh Yigit 	    ((offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) == 0)) {
119b1b7ad93SIvan Malov 		sfc_err(sa, "TCP and UDP offloads can't be set independently");
120b1b7ad93SIvan Malov 		rc = EINVAL;
121b1b7ad93SIvan Malov 	}
122b1b7ad93SIvan Malov 
123b1b7ad93SIvan Malov 	return rc;
124b1b7ad93SIvan Malov }
125b1b7ad93SIvan Malov 
126fed9aeb4SIvan Malov void
sfc_tx_qflush_done(struct sfc_txq_info * txq_info)127561508daSAndrew Rybchenko sfc_tx_qflush_done(struct sfc_txq_info *txq_info)
128fed9aeb4SIvan Malov {
129561508daSAndrew Rybchenko 	txq_info->state |= SFC_TXQ_FLUSHED;
130561508daSAndrew Rybchenko 	txq_info->state &= ~SFC_TXQ_FLUSHING;
131fed9aeb4SIvan Malov }
132fed9aeb4SIvan Malov 
133b1b7ad93SIvan Malov int
sfc_tx_qinit(struct sfc_adapter * sa,sfc_sw_index_t sw_index,uint16_t nb_tx_desc,unsigned int socket_id,const struct rte_eth_txconf * tx_conf)134db980d26SIgor Romanov sfc_tx_qinit(struct sfc_adapter *sa, sfc_sw_index_t sw_index,
135b1b7ad93SIvan Malov 	     uint16_t nb_tx_desc, unsigned int socket_id,
136b1b7ad93SIvan Malov 	     const struct rte_eth_txconf *tx_conf)
137b1b7ad93SIvan Malov {
138db980d26SIgor Romanov 	struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
139db980d26SIgor Romanov 	sfc_ethdev_qid_t ethdev_qid;
140676d11ffSAndrew Rybchenko 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
141420efecbSAndrew Rybchenko 	unsigned int txq_entries;
142420efecbSAndrew Rybchenko 	unsigned int evq_entries;
143420efecbSAndrew Rybchenko 	unsigned int txq_max_fill_level;
144b1b7ad93SIvan Malov 	struct sfc_txq_info *txq_info;
145b1b7ad93SIvan Malov 	struct sfc_evq *evq;
146b1b7ad93SIvan Malov 	struct sfc_txq *txq;
147b1b7ad93SIvan Malov 	int rc = 0;
148dbdc8241SAndrew Rybchenko 	struct sfc_dp_tx_qcreate_info info;
149a4996bd8SWei Dai 	uint64_t offloads;
1509dbd28dfSIgor Romanov 	struct sfc_dp_tx_hw_limits hw_limits;
151b1b7ad93SIvan Malov 
152db980d26SIgor Romanov 	ethdev_qid = sfc_ethdev_tx_qid_by_txq_sw_index(sas, sw_index);
153db980d26SIgor Romanov 
154db980d26SIgor Romanov 	sfc_log_init(sa, "TxQ = %d (internal %u)", ethdev_qid, sw_index);
155b1b7ad93SIvan Malov 
1569dbd28dfSIgor Romanov 	memset(&hw_limits, 0, sizeof(hw_limits));
1579dbd28dfSIgor Romanov 	hw_limits.txq_max_entries = sa->txq_max_entries;
1589dbd28dfSIgor Romanov 	hw_limits.txq_min_entries = sa->txq_min_entries;
1599dbd28dfSIgor Romanov 
1609dbd28dfSIgor Romanov 	rc = sa->priv.dp_tx->qsize_up_rings(nb_tx_desc, &hw_limits,
1619dbd28dfSIgor Romanov 					    &txq_entries, &evq_entries,
1629dbd28dfSIgor Romanov 					    &txq_max_fill_level);
163420efecbSAndrew Rybchenko 	if (rc != 0)
164420efecbSAndrew Rybchenko 		goto fail_size_up_rings;
1659dbd28dfSIgor Romanov 	SFC_ASSERT(txq_entries >= sa->txq_min_entries);
166c7dadc9fSAndrew Rybchenko 	SFC_ASSERT(txq_entries <= sa->txq_max_entries);
167c7dadc9fSAndrew Rybchenko 	SFC_ASSERT(txq_entries >= nb_tx_desc);
168c7dadc9fSAndrew Rybchenko 	SFC_ASSERT(txq_max_fill_level <= nb_tx_desc);
169420efecbSAndrew Rybchenko 
170db980d26SIgor Romanov 	offloads = tx_conf->offloads;
171db980d26SIgor Romanov 	/* Add device level Tx offloads if the queue is an ethdev Tx queue */
172db980d26SIgor Romanov 	if (ethdev_qid != SFC_ETHDEV_QID_INVALID)
173db980d26SIgor Romanov 		offloads |= sa->eth_dev->data->dev_conf.txmode.offloads;
174db980d26SIgor Romanov 
175a4996bd8SWei Dai 	rc = sfc_tx_qcheck_conf(sa, txq_max_fill_level, tx_conf, offloads);
176b1b7ad93SIvan Malov 	if (rc != 0)
177b1b7ad93SIvan Malov 		goto fail_bad_conf;
178b1b7ad93SIvan Malov 
179113a14a6SAndrew Rybchenko 	SFC_ASSERT(sw_index < sfc_sa2shared(sa)->txq_count);
180113a14a6SAndrew Rybchenko 	txq_info = &sfc_sa2shared(sa)->txq_info[sw_index];
181b1b7ad93SIvan Malov 
182420efecbSAndrew Rybchenko 	txq_info->entries = txq_entries;
183b1b7ad93SIvan Malov 
1846caeec47SAndrew Rybchenko 	rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_TX, sw_index,
185420efecbSAndrew Rybchenko 			  evq_entries, socket_id, &evq);
186b1b7ad93SIvan Malov 	if (rc != 0)
187b1b7ad93SIvan Malov 		goto fail_ev_qinit;
188b1b7ad93SIvan Malov 
18929e4237dSAndrew Rybchenko 	txq = &sa->txq_ctrl[sw_index];
190dbdc8241SAndrew Rybchenko 	txq->hw_index = sw_index;
191dbdc8241SAndrew Rybchenko 	txq->evq = evq;
192b57870f2SAndrew Rybchenko 	txq_info->free_thresh =
193dbdc8241SAndrew Rybchenko 		(tx_conf->tx_free_thresh) ? tx_conf->tx_free_thresh :
194dbdc8241SAndrew Rybchenko 		SFC_TX_DEFAULT_FREE_THRESH;
195b57870f2SAndrew Rybchenko 	txq_info->offloads = offloads;
196dbdc8241SAndrew Rybchenko 
1973037e6cfSViacheslav Galaktionov 	rc = sfc_dma_alloc(sa, "txq", sw_index, EFX_NIC_DMA_ADDR_TX_RING,
1985638b1f0SIgor Romanov 			   efx_txq_size(sa->nic, txq_info->entries),
199b1b7ad93SIvan Malov 			   socket_id, &txq->mem);
200b1b7ad93SIvan Malov 	if (rc != 0)
201b1b7ad93SIvan Malov 		goto fail_dma_alloc;
202b1b7ad93SIvan Malov 
203dbdc8241SAndrew Rybchenko 	memset(&info, 0, sizeof(info));
204eaab5d96SAndrew Rybchenko 	info.max_fill_level = txq_max_fill_level;
205b57870f2SAndrew Rybchenko 	info.free_thresh = txq_info->free_thresh;
206a4996bd8SWei Dai 	info.offloads = offloads;
207dbdc8241SAndrew Rybchenko 	info.txq_entries = txq_info->entries;
208dbdc8241SAndrew Rybchenko 	info.dma_desc_size_max = encp->enc_tx_dma_desc_size_max;
2098b00f426SAndrew Rybchenko 	info.txq_hw_ring = txq->mem.esm_base;
210420efecbSAndrew Rybchenko 	info.evq_entries = evq_entries;
2118b00f426SAndrew Rybchenko 	info.evq_hw_ring = evq->mem.esm_base;
2128b00f426SAndrew Rybchenko 	info.hw_index = txq->hw_index;
2138b00f426SAndrew Rybchenko 	info.mem_bar = sa->mem_bar.esb_base;
214714bff55SAndrew Rybchenko 	info.vi_window_shift = encp->enc_vi_window_shift;
2156bc985e4SIgor Romanov 	info.tso_tcp_header_offset_limit =
2166bc985e4SIgor Romanov 		encp->enc_tx_tso_tcp_header_offset_limit;
2174f936666SIvan Malov 	info.tso_max_nb_header_descs =
2184f936666SIvan Malov 		RTE_MIN(encp->enc_tx_tso_max_header_ndescs,
2194f936666SIvan Malov 			(uint32_t)UINT16_MAX);
2204f936666SIvan Malov 	info.tso_max_header_len =
2214f936666SIvan Malov 		RTE_MIN(encp->enc_tx_tso_max_header_length,
2224f936666SIvan Malov 			(uint32_t)UINT16_MAX);
2234f936666SIvan Malov 	info.tso_max_nb_payload_descs =
2244f936666SIvan Malov 		RTE_MIN(encp->enc_tx_tso_max_payload_ndescs,
2254f936666SIvan Malov 			(uint32_t)UINT16_MAX);
2264f936666SIvan Malov 	info.tso_max_payload_len = encp->enc_tx_tso_max_payload_length;
2274f936666SIvan Malov 	info.tso_max_nb_outgoing_frames = encp->enc_tx_tso_max_nframes;
228b1b7ad93SIvan Malov 
2293037e6cfSViacheslav Galaktionov 	info.nic_dma_info = &sas->nic_dma_info;
2303037e6cfSViacheslav Galaktionov 
2315dec95e3SAndrew Rybchenko 	rc = sa->priv.dp_tx->qcreate(sa->eth_dev->data->port_id, sw_index,
232c0802544SFerruh Yigit 				     &RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr,
23342ce2521SAndrew Rybchenko 				     socket_id, &info, &txq_info->dp);
234fec33d5bSIvan Malov 	if (rc != 0)
235dbdc8241SAndrew Rybchenko 		goto fail_dp_tx_qinit;
236dbdc8241SAndrew Rybchenko 
23742ce2521SAndrew Rybchenko 	evq->dp_txq = txq_info->dp;
238fec33d5bSIvan Malov 
239561508daSAndrew Rybchenko 	txq_info->state = SFC_TXQ_INITIALIZED;
240b1b7ad93SIvan Malov 
241c6a1d9b5SIvan Malov 	txq_info->deferred_start = (tx_conf->tx_deferred_start != 0);
242b1b7ad93SIvan Malov 
243b1b7ad93SIvan Malov 	return 0;
244b1b7ad93SIvan Malov 
245dbdc8241SAndrew Rybchenko fail_dp_tx_qinit:
246b1b7ad93SIvan Malov 	sfc_dma_free(sa, &txq->mem);
247b1b7ad93SIvan Malov 
248b1b7ad93SIvan Malov fail_dma_alloc:
2496caeec47SAndrew Rybchenko 	sfc_ev_qfini(evq);
250b1b7ad93SIvan Malov 
251b1b7ad93SIvan Malov fail_ev_qinit:
252b1b7ad93SIvan Malov 	txq_info->entries = 0;
253b1b7ad93SIvan Malov 
254b1b7ad93SIvan Malov fail_bad_conf:
255420efecbSAndrew Rybchenko fail_size_up_rings:
256db980d26SIgor Romanov 	sfc_log_init(sa, "failed (TxQ = %d (internal %u), rc = %d)", ethdev_qid,
257db980d26SIgor Romanov 		     sw_index, rc);
258b1b7ad93SIvan Malov 	return rc;
259b1b7ad93SIvan Malov }
260b1b7ad93SIvan Malov 
261b1b7ad93SIvan Malov void
sfc_tx_qfini(struct sfc_adapter * sa,sfc_sw_index_t sw_index)262db980d26SIgor Romanov sfc_tx_qfini(struct sfc_adapter *sa, sfc_sw_index_t sw_index)
263b1b7ad93SIvan Malov {
264db980d26SIgor Romanov 	struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
265db980d26SIgor Romanov 	sfc_ethdev_qid_t ethdev_qid;
266b1b7ad93SIvan Malov 	struct sfc_txq_info *txq_info;
267b1b7ad93SIvan Malov 	struct sfc_txq *txq;
268b1b7ad93SIvan Malov 
269db980d26SIgor Romanov 	ethdev_qid = sfc_ethdev_tx_qid_by_txq_sw_index(sas, sw_index);
270db980d26SIgor Romanov 
271db980d26SIgor Romanov 	sfc_log_init(sa, "TxQ = %d (internal %u)", ethdev_qid, sw_index);
272b1b7ad93SIvan Malov 
273113a14a6SAndrew Rybchenko 	SFC_ASSERT(sw_index < sfc_sa2shared(sa)->txq_count);
274db980d26SIgor Romanov 	if (ethdev_qid != SFC_ETHDEV_QID_INVALID)
275db980d26SIgor Romanov 		sa->eth_dev->data->tx_queues[ethdev_qid] = NULL;
2765d138ef9SIgor Romanov 
277113a14a6SAndrew Rybchenko 	txq_info = &sfc_sa2shared(sa)->txq_info[sw_index];
278b1b7ad93SIvan Malov 
279561508daSAndrew Rybchenko 	SFC_ASSERT(txq_info->state == SFC_TXQ_INITIALIZED);
280b1b7ad93SIvan Malov 
28142ce2521SAndrew Rybchenko 	sa->priv.dp_tx->qdestroy(txq_info->dp);
28242ce2521SAndrew Rybchenko 	txq_info->dp = NULL;
283fec33d5bSIvan Malov 
28429e4237dSAndrew Rybchenko 	txq_info->state &= ~SFC_TXQ_INITIALIZED;
285b1b7ad93SIvan Malov 	txq_info->entries = 0;
286b1b7ad93SIvan Malov 
28729e4237dSAndrew Rybchenko 	txq = &sa->txq_ctrl[sw_index];
28829e4237dSAndrew Rybchenko 
289b1b7ad93SIvan Malov 	sfc_dma_free(sa, &txq->mem);
290940a34faSAndrew Rybchenko 
2916caeec47SAndrew Rybchenko 	sfc_ev_qfini(txq->evq);
2926caeec47SAndrew Rybchenko 	txq->evq = NULL;
293b1b7ad93SIvan Malov }
294b1b7ad93SIvan Malov 
29558d6f89aSIgor Romanov int
sfc_tx_qinit_info(struct sfc_adapter * sa,sfc_sw_index_t sw_index)296db980d26SIgor Romanov sfc_tx_qinit_info(struct sfc_adapter *sa, sfc_sw_index_t sw_index)
297a8ad8cf8SIvan Malov {
298db980d26SIgor Romanov 	struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
299db980d26SIgor Romanov 	sfc_ethdev_qid_t ethdev_qid;
300db980d26SIgor Romanov 
301db980d26SIgor Romanov 	ethdev_qid = sfc_ethdev_tx_qid_by_txq_sw_index(sas, sw_index);
302db980d26SIgor Romanov 
303db980d26SIgor Romanov 	sfc_log_init(sa, "TxQ = %d (internal %u)", ethdev_qid, sw_index);
304a8ad8cf8SIvan Malov 
305a8ad8cf8SIvan Malov 	return 0;
306a8ad8cf8SIvan Malov }
307a8ad8cf8SIvan Malov 
308dbf0f627SIvan Malov static int
sfc_tx_check_mode(struct sfc_adapter * sa,const struct rte_eth_txmode * txmode)309dbf0f627SIvan Malov sfc_tx_check_mode(struct sfc_adapter *sa, const struct rte_eth_txmode *txmode)
310dbf0f627SIvan Malov {
311*24a491bbSIvan Malov 	uint64_t dev_tx_offload_cap = sfc_tx_get_dev_offload_caps(sa);
312dbf0f627SIvan Malov 	int rc = 0;
313dbf0f627SIvan Malov 
314dbf0f627SIvan Malov 	switch (txmode->mq_mode) {
315295968d1SFerruh Yigit 	case RTE_ETH_MQ_TX_NONE:
316dbf0f627SIvan Malov 		break;
317dbf0f627SIvan Malov 	default:
318dbf0f627SIvan Malov 		sfc_err(sa, "Tx multi-queue mode %u not supported",
319dbf0f627SIvan Malov 			txmode->mq_mode);
320dbf0f627SIvan Malov 		rc = EINVAL;
321dbf0f627SIvan Malov 	}
322dbf0f627SIvan Malov 
323*24a491bbSIvan Malov 	if ((dev_tx_offload_cap & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) != 0 &&
324*24a491bbSIvan Malov 	    (txmode->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) == 0) {
325*24a491bbSIvan Malov 		sfc_err(sa, "There is no FAST_FREE flag in the attempted Tx mode configuration");
326*24a491bbSIvan Malov 		sfc_err(sa, "FAST_FREE is always active as per the current Tx datapath variant");
327*24a491bbSIvan Malov 		rc = EINVAL;
328*24a491bbSIvan Malov 	}
329*24a491bbSIvan Malov 
330dbf0f627SIvan Malov 	/*
331dbf0f627SIvan Malov 	 * These features are claimed to be i40e-specific,
332dbf0f627SIvan Malov 	 * but it does make sense to double-check their absence
333dbf0f627SIvan Malov 	 */
334dbf0f627SIvan Malov 	if (txmode->hw_vlan_reject_tagged) {
335dbf0f627SIvan Malov 		sfc_err(sa, "Rejecting tagged packets not supported");
336dbf0f627SIvan Malov 		rc = EINVAL;
337dbf0f627SIvan Malov 	}
338dbf0f627SIvan Malov 
339dbf0f627SIvan Malov 	if (txmode->hw_vlan_reject_untagged) {
340dbf0f627SIvan Malov 		sfc_err(sa, "Rejecting untagged packets not supported");
341dbf0f627SIvan Malov 		rc = EINVAL;
342dbf0f627SIvan Malov 	}
343dbf0f627SIvan Malov 
344dbf0f627SIvan Malov 	if (txmode->hw_vlan_insert_pvid) {
345dbf0f627SIvan Malov 		sfc_err(sa, "Port-based VLAN insertion not supported");
346dbf0f627SIvan Malov 		rc = EINVAL;
347dbf0f627SIvan Malov 	}
348dbf0f627SIvan Malov 
349dbf0f627SIvan Malov 	return rc;
350dbf0f627SIvan Malov }
351dbf0f627SIvan Malov 
35209a46bf9SAndrew Rybchenko /**
35309a46bf9SAndrew Rybchenko  * Destroy excess queues that are no longer needed after reconfiguration
35409a46bf9SAndrew Rybchenko  * or complete close.
35509a46bf9SAndrew Rybchenko  */
35609a46bf9SAndrew Rybchenko static void
sfc_tx_fini_queues(struct sfc_adapter * sa,unsigned int nb_tx_queues)35709a46bf9SAndrew Rybchenko sfc_tx_fini_queues(struct sfc_adapter *sa, unsigned int nb_tx_queues)
35809a46bf9SAndrew Rybchenko {
359113a14a6SAndrew Rybchenko 	struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
360db980d26SIgor Romanov 	sfc_sw_index_t sw_index;
361db980d26SIgor Romanov 	sfc_ethdev_qid_t ethdev_qid;
36209a46bf9SAndrew Rybchenko 
363db980d26SIgor Romanov 	SFC_ASSERT(nb_tx_queues <= sas->ethdev_txq_count);
36409a46bf9SAndrew Rybchenko 
365db980d26SIgor Romanov 	/*
366db980d26SIgor Romanov 	 * Finalize only ethdev queues since other ones are finalized only
3677be78d02SJosh Soref 	 * on device close and they may require additional deinitialization.
368db980d26SIgor Romanov 	 */
369db980d26SIgor Romanov 	ethdev_qid = sas->ethdev_txq_count;
370db980d26SIgor Romanov 	while (--ethdev_qid >= (int)nb_tx_queues) {
371db980d26SIgor Romanov 		struct sfc_txq_info *txq_info;
372db980d26SIgor Romanov 
373db980d26SIgor Romanov 		sw_index = sfc_txq_sw_index_by_ethdev_tx_qid(sas, ethdev_qid);
374db980d26SIgor Romanov 		txq_info = sfc_txq_info_by_ethdev_qid(sas, ethdev_qid);
375db980d26SIgor Romanov 		if (txq_info->state & SFC_TXQ_INITIALIZED)
37609a46bf9SAndrew Rybchenko 			sfc_tx_qfini(sa, sw_index);
37709a46bf9SAndrew Rybchenko 	}
37809a46bf9SAndrew Rybchenko 
379db980d26SIgor Romanov 	sas->ethdev_txq_count = nb_tx_queues;
38009a46bf9SAndrew Rybchenko }
38109a46bf9SAndrew Rybchenko 
382a8ad8cf8SIvan Malov int
sfc_tx_configure(struct sfc_adapter * sa)383df64eaddSAndrew Rybchenko sfc_tx_configure(struct sfc_adapter *sa)
384a8ad8cf8SIvan Malov {
385113a14a6SAndrew Rybchenko 	struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
386676d11ffSAndrew Rybchenko 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
387dbf0f627SIvan Malov 	const struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
38809a46bf9SAndrew Rybchenko 	const unsigned int nb_tx_queues = sa->eth_dev->data->nb_tx_queues;
389689a5674SIgor Romanov 	const unsigned int nb_rsvd_tx_queues = sfc_nb_txq_reserved(sas);
390689a5674SIgor Romanov 	const unsigned int nb_txq_total = nb_tx_queues + nb_rsvd_tx_queues;
39158d6f89aSIgor Romanov 	bool reconfigure;
392a8ad8cf8SIvan Malov 	int rc = 0;
393a8ad8cf8SIvan Malov 
39409a46bf9SAndrew Rybchenko 	sfc_log_init(sa, "nb_tx_queues=%u (old %u)",
395db980d26SIgor Romanov 		     nb_tx_queues, sas->ethdev_txq_count);
39609a46bf9SAndrew Rybchenko 
397676d11ffSAndrew Rybchenko 	/*
398676d11ffSAndrew Rybchenko 	 * The datapath implementation assumes absence of boundary
399676d11ffSAndrew Rybchenko 	 * limits on Tx DMA descriptors. Addition of these checks on
400676d11ffSAndrew Rybchenko 	 * datapath would simply make the datapath slower.
401676d11ffSAndrew Rybchenko 	 */
402676d11ffSAndrew Rybchenko 	if (encp->enc_tx_dma_desc_boundary != 0) {
403676d11ffSAndrew Rybchenko 		rc = ENOTSUP;
404676d11ffSAndrew Rybchenko 		goto fail_tx_dma_desc_boundary;
405676d11ffSAndrew Rybchenko 	}
406676d11ffSAndrew Rybchenko 
407dbf0f627SIvan Malov 	rc = sfc_tx_check_mode(sa, &dev_conf->txmode);
408dbf0f627SIvan Malov 	if (rc != 0)
409dbf0f627SIvan Malov 		goto fail_check_mode;
410dbf0f627SIvan Malov 
411689a5674SIgor Romanov 	if (nb_txq_total == sas->txq_count)
41209a46bf9SAndrew Rybchenko 		goto done;
413a8ad8cf8SIvan Malov 
414113a14a6SAndrew Rybchenko 	if (sas->txq_info == NULL) {
41558d6f89aSIgor Romanov 		reconfigure = false;
416689a5674SIgor Romanov 		sas->txq_info = rte_calloc_socket("sfc-txqs", nb_txq_total,
417113a14a6SAndrew Rybchenko 						  sizeof(sas->txq_info[0]), 0,
418a8ad8cf8SIvan Malov 						  sa->socket_id);
419113a14a6SAndrew Rybchenko 		if (sas->txq_info == NULL)
420a8ad8cf8SIvan Malov 			goto fail_txqs_alloc;
42129e4237dSAndrew Rybchenko 
42229e4237dSAndrew Rybchenko 		/*
42329e4237dSAndrew Rybchenko 		 * Allocate primary process only TxQ control from heap
42429e4237dSAndrew Rybchenko 		 * since it should not be shared.
42529e4237dSAndrew Rybchenko 		 */
42629e4237dSAndrew Rybchenko 		rc = ENOMEM;
427689a5674SIgor Romanov 		sa->txq_ctrl = calloc(nb_txq_total, sizeof(sa->txq_ctrl[0]));
42829e4237dSAndrew Rybchenko 		if (sa->txq_ctrl == NULL)
42929e4237dSAndrew Rybchenko 			goto fail_txqs_ctrl_alloc;
43009a46bf9SAndrew Rybchenko 	} else {
43109a46bf9SAndrew Rybchenko 		struct sfc_txq_info *new_txq_info;
43229e4237dSAndrew Rybchenko 		struct sfc_txq *new_txq_ctrl;
433a8ad8cf8SIvan Malov 
43458d6f89aSIgor Romanov 		reconfigure = true;
43558d6f89aSIgor Romanov 
436db980d26SIgor Romanov 		if (nb_tx_queues < sas->ethdev_txq_count)
43709a46bf9SAndrew Rybchenko 			sfc_tx_fini_queues(sa, nb_tx_queues);
43809a46bf9SAndrew Rybchenko 
43909a46bf9SAndrew Rybchenko 		new_txq_info =
440113a14a6SAndrew Rybchenko 			rte_realloc(sas->txq_info,
441689a5674SIgor Romanov 				    nb_txq_total * sizeof(sas->txq_info[0]), 0);
442689a5674SIgor Romanov 		if (new_txq_info == NULL && nb_txq_total > 0)
44309a46bf9SAndrew Rybchenko 			goto fail_txqs_realloc;
44409a46bf9SAndrew Rybchenko 
44529e4237dSAndrew Rybchenko 		new_txq_ctrl = realloc(sa->txq_ctrl,
446689a5674SIgor Romanov 				       nb_txq_total * sizeof(sa->txq_ctrl[0]));
447689a5674SIgor Romanov 		if (new_txq_ctrl == NULL && nb_txq_total > 0)
44829e4237dSAndrew Rybchenko 			goto fail_txqs_ctrl_realloc;
44929e4237dSAndrew Rybchenko 
450113a14a6SAndrew Rybchenko 		sas->txq_info = new_txq_info;
45129e4237dSAndrew Rybchenko 		sa->txq_ctrl = new_txq_ctrl;
452689a5674SIgor Romanov 		if (nb_txq_total > sas->txq_count) {
453689a5674SIgor Romanov 			memset(&sas->txq_info[sas->txq_count], 0,
454689a5674SIgor Romanov 			       (nb_txq_total - sas->txq_count) *
455113a14a6SAndrew Rybchenko 			       sizeof(sas->txq_info[0]));
456689a5674SIgor Romanov 			memset(&sa->txq_ctrl[sas->txq_count], 0,
457689a5674SIgor Romanov 			       (nb_txq_total - sas->txq_count) *
45829e4237dSAndrew Rybchenko 			       sizeof(sa->txq_ctrl[0]));
45929e4237dSAndrew Rybchenko 		}
460a8ad8cf8SIvan Malov 	}
461a8ad8cf8SIvan Malov 
462db980d26SIgor Romanov 	while (sas->ethdev_txq_count < nb_tx_queues) {
463db980d26SIgor Romanov 		sfc_sw_index_t sw_index;
464db980d26SIgor Romanov 
465db980d26SIgor Romanov 		sw_index = sfc_txq_sw_index_by_ethdev_tx_qid(sas,
466db980d26SIgor Romanov 				sas->ethdev_txq_count);
467db980d26SIgor Romanov 		rc = sfc_tx_qinit_info(sa, sw_index);
46809a46bf9SAndrew Rybchenko 		if (rc != 0)
46909a46bf9SAndrew Rybchenko 			goto fail_tx_qinit_info;
47009a46bf9SAndrew Rybchenko 
471db980d26SIgor Romanov 		sas->ethdev_txq_count++;
47209a46bf9SAndrew Rybchenko 	}
47309a46bf9SAndrew Rybchenko 
474689a5674SIgor Romanov 	sas->txq_count = sas->ethdev_txq_count + nb_rsvd_tx_queues;
475db980d26SIgor Romanov 
47658d6f89aSIgor Romanov 	if (!reconfigure) {
47758d6f89aSIgor Romanov 		rc = sfc_repr_proxy_txq_init(sa);
47858d6f89aSIgor Romanov 		if (rc != 0)
47958d6f89aSIgor Romanov 			goto fail_repr_proxy_txq_init;
48058d6f89aSIgor Romanov 	}
48158d6f89aSIgor Romanov 
48209a46bf9SAndrew Rybchenko done:
483a8ad8cf8SIvan Malov 	return 0;
484a8ad8cf8SIvan Malov 
48558d6f89aSIgor Romanov fail_repr_proxy_txq_init:
486a8ad8cf8SIvan Malov fail_tx_qinit_info:
48729e4237dSAndrew Rybchenko fail_txqs_ctrl_realloc:
48809a46bf9SAndrew Rybchenko fail_txqs_realloc:
48929e4237dSAndrew Rybchenko fail_txqs_ctrl_alloc:
490a8ad8cf8SIvan Malov fail_txqs_alloc:
49109a46bf9SAndrew Rybchenko 	sfc_tx_close(sa);
492a8ad8cf8SIvan Malov 
493dbf0f627SIvan Malov fail_check_mode:
494676d11ffSAndrew Rybchenko fail_tx_dma_desc_boundary:
495a8ad8cf8SIvan Malov 	sfc_log_init(sa, "failed (rc = %d)", rc);
496a8ad8cf8SIvan Malov 	return rc;
497a8ad8cf8SIvan Malov }
498a8ad8cf8SIvan Malov 
499a8ad8cf8SIvan Malov void
sfc_tx_close(struct sfc_adapter * sa)500df64eaddSAndrew Rybchenko sfc_tx_close(struct sfc_adapter *sa)
501a8ad8cf8SIvan Malov {
50209a46bf9SAndrew Rybchenko 	sfc_tx_fini_queues(sa, 0);
50358d6f89aSIgor Romanov 	sfc_repr_proxy_txq_fini(sa);
504b1b7ad93SIvan Malov 
50529e4237dSAndrew Rybchenko 	free(sa->txq_ctrl);
50629e4237dSAndrew Rybchenko 	sa->txq_ctrl = NULL;
50729e4237dSAndrew Rybchenko 
508113a14a6SAndrew Rybchenko 	rte_free(sfc_sa2shared(sa)->txq_info);
509113a14a6SAndrew Rybchenko 	sfc_sa2shared(sa)->txq_info = NULL;
510a8ad8cf8SIvan Malov }
511fed9aeb4SIvan Malov 
512fed9aeb4SIvan Malov int
sfc_tx_qstart(struct sfc_adapter * sa,sfc_sw_index_t sw_index)513db980d26SIgor Romanov sfc_tx_qstart(struct sfc_adapter *sa, sfc_sw_index_t sw_index)
514fed9aeb4SIvan Malov {
515113a14a6SAndrew Rybchenko 	struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
516db980d26SIgor Romanov 	sfc_ethdev_qid_t ethdev_qid;
517c78d280eSIvan Malov 	uint64_t offloads_supported = sfc_tx_get_dev_offload_caps(sa) |
518c78d280eSIvan Malov 				      sfc_tx_get_queue_offload_caps(sa);
519fed9aeb4SIvan Malov 	struct sfc_txq_info *txq_info;
520fed9aeb4SIvan Malov 	struct sfc_txq *txq;
521fed9aeb4SIvan Malov 	struct sfc_evq *evq;
522c78d280eSIvan Malov 	uint16_t flags = 0;
523fed9aeb4SIvan Malov 	unsigned int desc_index;
524fed9aeb4SIvan Malov 	int rc = 0;
525fed9aeb4SIvan Malov 
526db980d26SIgor Romanov 	ethdev_qid = sfc_ethdev_tx_qid_by_txq_sw_index(sas, sw_index);
527db980d26SIgor Romanov 
528db980d26SIgor Romanov 	sfc_log_init(sa, "TxQ = %d (internal %u)", ethdev_qid, sw_index);
529fed9aeb4SIvan Malov 
530113a14a6SAndrew Rybchenko 	SFC_ASSERT(sw_index < sas->txq_count);
531113a14a6SAndrew Rybchenko 	txq_info = &sas->txq_info[sw_index];
532fed9aeb4SIvan Malov 
533561508daSAndrew Rybchenko 	SFC_ASSERT(txq_info->state == SFC_TXQ_INITIALIZED);
534fed9aeb4SIvan Malov 
53529e4237dSAndrew Rybchenko 	txq = &sa->txq_ctrl[sw_index];
536fed9aeb4SIvan Malov 	evq = txq->evq;
537fed9aeb4SIvan Malov 
538db980d26SIgor Romanov 	rc = sfc_ev_qstart(evq, sfc_evq_sw_index_by_txq_sw_index(sa, sw_index));
539fed9aeb4SIvan Malov 	if (rc != 0)
540fed9aeb4SIvan Malov 		goto fail_ev_qstart;
541fed9aeb4SIvan Malov 
542295968d1SFerruh Yigit 	if (txq_info->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
543c78d280eSIvan Malov 		flags |= EFX_TXQ_CKSUM_IPV4;
544c1767d93SAndrew Rybchenko 
545295968d1SFerruh Yigit 	if (txq_info->offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)
546c1767d93SAndrew Rybchenko 		flags |= EFX_TXQ_CKSUM_INNER_IPV4;
547fed9aeb4SIvan Malov 
548295968d1SFerruh Yigit 	if ((txq_info->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) ||
549295968d1SFerruh Yigit 	    (txq_info->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)) {
550c78d280eSIvan Malov 		flags |= EFX_TXQ_CKSUM_TCPUDP;
551c1767d93SAndrew Rybchenko 
552295968d1SFerruh Yigit 		if (offloads_supported & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)
553c78d280eSIvan Malov 			flags |= EFX_TXQ_CKSUM_INNER_TCPUDP;
554fec33d5bSIvan Malov 	}
555fec33d5bSIvan Malov 
556295968d1SFerruh Yigit 	if (txq_info->offloads & (RTE_ETH_TX_OFFLOAD_TCP_TSO |
557295968d1SFerruh Yigit 				  RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
558295968d1SFerruh Yigit 				  RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO))
559c78d280eSIvan Malov 		flags |= EFX_TXQ_FATSOV2;
560c78d280eSIvan Malov 
5617928b0fdSAndrew Rybchenko 	rc = efx_tx_qcreate(sa->nic, txq->hw_index, 0, &txq->mem,
562fed9aeb4SIvan Malov 			    txq_info->entries, 0 /* not used on EF10 */,
563fed9aeb4SIvan Malov 			    flags, evq->common,
564fed9aeb4SIvan Malov 			    &txq->common, &desc_index);
565fec33d5bSIvan Malov 	if (rc != 0) {
566fec33d5bSIvan Malov 		if (sa->tso && (rc == ENOSPC))
567fec33d5bSIvan Malov 			sfc_err(sa, "ran out of TSO contexts");
568fec33d5bSIvan Malov 
569fed9aeb4SIvan Malov 		goto fail_tx_qcreate;
570fec33d5bSIvan Malov 	}
571fed9aeb4SIvan Malov 
572fed9aeb4SIvan Malov 	efx_tx_qenable(txq->common);
573fed9aeb4SIvan Malov 
574561508daSAndrew Rybchenko 	txq_info->state |= SFC_TXQ_STARTED;
575dbdc8241SAndrew Rybchenko 
57642ce2521SAndrew Rybchenko 	rc = sa->priv.dp_tx->qstart(txq_info->dp, evq->read_ptr, desc_index);
577dbdc8241SAndrew Rybchenko 	if (rc != 0)
578dbdc8241SAndrew Rybchenko 		goto fail_dp_qstart;
579fed9aeb4SIvan Malov 
580db980d26SIgor Romanov 	if (ethdev_qid != SFC_ETHDEV_QID_INVALID) {
581db980d26SIgor Romanov 		struct rte_eth_dev_data *dev_data;
582db980d26SIgor Romanov 
583fed9aeb4SIvan Malov 		/*
584db980d26SIgor Romanov 		 * It sems to be used by DPDK for debug purposes only
585db980d26SIgor Romanov 		 * ('rte_ether').
586fed9aeb4SIvan Malov 		 */
587fed9aeb4SIvan Malov 		dev_data = sa->eth_dev->data;
588db980d26SIgor Romanov 		dev_data->tx_queue_state[ethdev_qid] =
589db980d26SIgor Romanov 			RTE_ETH_QUEUE_STATE_STARTED;
590db980d26SIgor Romanov 	}
591fed9aeb4SIvan Malov 
592fed9aeb4SIvan Malov 	return 0;
593fed9aeb4SIvan Malov 
594dbdc8241SAndrew Rybchenko fail_dp_qstart:
595561508daSAndrew Rybchenko 	txq_info->state = SFC_TXQ_INITIALIZED;
596dbdc8241SAndrew Rybchenko 	efx_tx_qdestroy(txq->common);
597dbdc8241SAndrew Rybchenko 
598fed9aeb4SIvan Malov fail_tx_qcreate:
5996caeec47SAndrew Rybchenko 	sfc_ev_qstop(evq);
600fed9aeb4SIvan Malov 
601fed9aeb4SIvan Malov fail_ev_qstart:
602fed9aeb4SIvan Malov 	return rc;
603fed9aeb4SIvan Malov }
604fed9aeb4SIvan Malov 
605fed9aeb4SIvan Malov void
sfc_tx_qstop(struct sfc_adapter * sa,sfc_sw_index_t sw_index)606db980d26SIgor Romanov sfc_tx_qstop(struct sfc_adapter *sa, sfc_sw_index_t sw_index)
607fed9aeb4SIvan Malov {
608113a14a6SAndrew Rybchenko 	struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
609db980d26SIgor Romanov 	sfc_ethdev_qid_t ethdev_qid;
610fed9aeb4SIvan Malov 	struct sfc_txq_info *txq_info;
611fed9aeb4SIvan Malov 	struct sfc_txq *txq;
612fed9aeb4SIvan Malov 	unsigned int retry_count;
613fed9aeb4SIvan Malov 	unsigned int wait_count;
61469fbd1e9SAndrew Rybchenko 	int rc;
615fed9aeb4SIvan Malov 
616db980d26SIgor Romanov 	ethdev_qid = sfc_ethdev_tx_qid_by_txq_sw_index(sas, sw_index);
617db980d26SIgor Romanov 
618db980d26SIgor Romanov 	sfc_log_init(sa, "TxQ = %d (internal %u)", ethdev_qid, sw_index);
619fed9aeb4SIvan Malov 
620113a14a6SAndrew Rybchenko 	SFC_ASSERT(sw_index < sas->txq_count);
621113a14a6SAndrew Rybchenko 	txq_info = &sas->txq_info[sw_index];
622fed9aeb4SIvan Malov 
62329e4237dSAndrew Rybchenko 	if (txq_info->state == SFC_TXQ_INITIALIZED)
624c6a1d9b5SIvan Malov 		return;
625c6a1d9b5SIvan Malov 
626561508daSAndrew Rybchenko 	SFC_ASSERT(txq_info->state & SFC_TXQ_STARTED);
627fed9aeb4SIvan Malov 
62829e4237dSAndrew Rybchenko 	txq = &sa->txq_ctrl[sw_index];
62942ce2521SAndrew Rybchenko 	sa->priv.dp_tx->qstop(txq_info->dp, &txq->evq->read_ptr);
630fed9aeb4SIvan Malov 
631fed9aeb4SIvan Malov 	/*
632fed9aeb4SIvan Malov 	 * Retry TX queue flushing in case of flush failed or
633fed9aeb4SIvan Malov 	 * timeout; in the worst case it can delay for 6 seconds
634fed9aeb4SIvan Malov 	 */
635fed9aeb4SIvan Malov 	for (retry_count = 0;
636561508daSAndrew Rybchenko 	     ((txq_info->state & SFC_TXQ_FLUSHED) == 0) &&
637fed9aeb4SIvan Malov 	     (retry_count < SFC_TX_QFLUSH_ATTEMPTS);
638fed9aeb4SIvan Malov 	     ++retry_count) {
63969fbd1e9SAndrew Rybchenko 		rc = efx_tx_qflush(txq->common);
64069fbd1e9SAndrew Rybchenko 		if (rc != 0) {
641561508daSAndrew Rybchenko 			txq_info->state |= (rc == EALREADY) ?
64269fbd1e9SAndrew Rybchenko 				SFC_TXQ_FLUSHED : SFC_TXQ_FLUSH_FAILED;
643fed9aeb4SIvan Malov 			break;
644fed9aeb4SIvan Malov 		}
645fed9aeb4SIvan Malov 
646fed9aeb4SIvan Malov 		/*
647fed9aeb4SIvan Malov 		 * Wait for TX queue flush done or flush failed event at least
648fed9aeb4SIvan Malov 		 * SFC_TX_QFLUSH_POLL_WAIT_MS milliseconds and not more
649fed9aeb4SIvan Malov 		 * than 2 seconds (SFC_TX_QFLUSH_POLL_WAIT_MS multiplied
650fed9aeb4SIvan Malov 		 * by SFC_TX_QFLUSH_POLL_ATTEMPTS)
651fed9aeb4SIvan Malov 		 */
652fed9aeb4SIvan Malov 		wait_count = 0;
653fed9aeb4SIvan Malov 		do {
654fed9aeb4SIvan Malov 			rte_delay_ms(SFC_TX_QFLUSH_POLL_WAIT_MS);
655fed9aeb4SIvan Malov 			sfc_ev_qpoll(txq->evq);
656561508daSAndrew Rybchenko 		} while ((txq_info->state & SFC_TXQ_FLUSHING) &&
657fed9aeb4SIvan Malov 			 wait_count++ < SFC_TX_QFLUSH_POLL_ATTEMPTS);
658fed9aeb4SIvan Malov 
659561508daSAndrew Rybchenko 		if (txq_info->state & SFC_TXQ_FLUSHING)
660db980d26SIgor Romanov 			sfc_err(sa, "TxQ %d (internal %u) flush timed out",
661db980d26SIgor Romanov 				ethdev_qid, sw_index);
662fed9aeb4SIvan Malov 
663561508daSAndrew Rybchenko 		if (txq_info->state & SFC_TXQ_FLUSHED)
664db980d26SIgor Romanov 			sfc_notice(sa, "TxQ %d (internal %u) flushed",
665db980d26SIgor Romanov 				   ethdev_qid, sw_index);
666fed9aeb4SIvan Malov 	}
667fed9aeb4SIvan Malov 
66842ce2521SAndrew Rybchenko 	sa->priv.dp_tx->qreap(txq_info->dp);
669fed9aeb4SIvan Malov 
670561508daSAndrew Rybchenko 	txq_info->state = SFC_TXQ_INITIALIZED;
671fed9aeb4SIvan Malov 
672fed9aeb4SIvan Malov 	efx_tx_qdestroy(txq->common);
673fed9aeb4SIvan Malov 
6746caeec47SAndrew Rybchenko 	sfc_ev_qstop(txq->evq);
675fed9aeb4SIvan Malov 
676db980d26SIgor Romanov 	if (ethdev_qid != SFC_ETHDEV_QID_INVALID) {
677db980d26SIgor Romanov 		struct rte_eth_dev_data *dev_data;
678db980d26SIgor Romanov 
679fed9aeb4SIvan Malov 		/*
680db980d26SIgor Romanov 		 * It seems to be used by DPDK for debug purposes only
681db980d26SIgor Romanov 		 * ('rte_ether')
682fed9aeb4SIvan Malov 		 */
683fed9aeb4SIvan Malov 		dev_data = sa->eth_dev->data;
684db980d26SIgor Romanov 		dev_data->tx_queue_state[ethdev_qid] =
685db980d26SIgor Romanov 			RTE_ETH_QUEUE_STATE_STOPPED;
686db980d26SIgor Romanov 	}
687fed9aeb4SIvan Malov }
688fed9aeb4SIvan Malov 
689fed9aeb4SIvan Malov int
sfc_tx_start(struct sfc_adapter * sa)690fed9aeb4SIvan Malov sfc_tx_start(struct sfc_adapter *sa)
691fed9aeb4SIvan Malov {
692113a14a6SAndrew Rybchenko 	struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
693c1ce2ba2SIvan Malov 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
694db980d26SIgor Romanov 	sfc_sw_index_t sw_index;
695fed9aeb4SIvan Malov 	int rc = 0;
696fed9aeb4SIvan Malov 
697db980d26SIgor Romanov 	sfc_log_init(sa, "txq_count = %u (internal %u)",
698db980d26SIgor Romanov 		     sas->ethdev_txq_count, sas->txq_count);
699fed9aeb4SIvan Malov 
700fec33d5bSIvan Malov 	if (sa->tso) {
7014f936666SIvan Malov 		if (!encp->enc_fw_assisted_tso_v2_enabled &&
7024f936666SIvan Malov 		    !encp->enc_tso_v3_enabled) {
703fec33d5bSIvan Malov 			sfc_warn(sa, "TSO support was unable to be restored");
704fec33d5bSIvan Malov 			sa->tso = B_FALSE;
705c1ce2ba2SIvan Malov 			sa->tso_encap = B_FALSE;
706fec33d5bSIvan Malov 		}
707fec33d5bSIvan Malov 	}
708fec33d5bSIvan Malov 
70977cb0071SIvan Malov 	if (sa->tso_encap && !encp->enc_fw_assisted_tso_v2_encap_enabled &&
71077cb0071SIvan Malov 	    !encp->enc_tso_v3_enabled) {
711c1ce2ba2SIvan Malov 		sfc_warn(sa, "Encapsulated TSO support was unable to be restored");
712c1ce2ba2SIvan Malov 		sa->tso_encap = B_FALSE;
713c1ce2ba2SIvan Malov 	}
714c1ce2ba2SIvan Malov 
715fed9aeb4SIvan Malov 	rc = efx_tx_init(sa->nic);
716fed9aeb4SIvan Malov 	if (rc != 0)
717fed9aeb4SIvan Malov 		goto fail_efx_tx_init;
718fed9aeb4SIvan Malov 
719113a14a6SAndrew Rybchenko 	for (sw_index = 0; sw_index < sas->txq_count; ++sw_index) {
720113a14a6SAndrew Rybchenko 		if (sas->txq_info[sw_index].state == SFC_TXQ_INITIALIZED &&
721113a14a6SAndrew Rybchenko 		    (!(sas->txq_info[sw_index].deferred_start) ||
722113a14a6SAndrew Rybchenko 		     sas->txq_info[sw_index].deferred_started)) {
723fed9aeb4SIvan Malov 			rc = sfc_tx_qstart(sa, sw_index);
724fed9aeb4SIvan Malov 			if (rc != 0)
725fed9aeb4SIvan Malov 				goto fail_tx_qstart;
726fed9aeb4SIvan Malov 		}
727c6a1d9b5SIvan Malov 	}
728fed9aeb4SIvan Malov 
729fed9aeb4SIvan Malov 	return 0;
730fed9aeb4SIvan Malov 
731fed9aeb4SIvan Malov fail_tx_qstart:
732fed9aeb4SIvan Malov 	while (sw_index-- > 0)
733fed9aeb4SIvan Malov 		sfc_tx_qstop(sa, sw_index);
734fed9aeb4SIvan Malov 
735fed9aeb4SIvan Malov 	efx_tx_fini(sa->nic);
736fed9aeb4SIvan Malov 
737fed9aeb4SIvan Malov fail_efx_tx_init:
738fed9aeb4SIvan Malov 	sfc_log_init(sa, "failed (rc = %d)", rc);
739fed9aeb4SIvan Malov 	return rc;
740fed9aeb4SIvan Malov }
741fed9aeb4SIvan Malov 
742fed9aeb4SIvan Malov void
sfc_tx_stop(struct sfc_adapter * sa)743fed9aeb4SIvan Malov sfc_tx_stop(struct sfc_adapter *sa)
744fed9aeb4SIvan Malov {
745113a14a6SAndrew Rybchenko 	struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
746db980d26SIgor Romanov 	sfc_sw_index_t sw_index;
747fed9aeb4SIvan Malov 
748db980d26SIgor Romanov 	sfc_log_init(sa, "txq_count = %u (internal %u)",
749db980d26SIgor Romanov 		     sas->ethdev_txq_count, sas->txq_count);
750fed9aeb4SIvan Malov 
751113a14a6SAndrew Rybchenko 	sw_index = sas->txq_count;
752fed9aeb4SIvan Malov 	while (sw_index-- > 0) {
753113a14a6SAndrew Rybchenko 		if (sas->txq_info[sw_index].state & SFC_TXQ_STARTED)
754fed9aeb4SIvan Malov 			sfc_tx_qstop(sa, sw_index);
755fed9aeb4SIvan Malov 	}
756fed9aeb4SIvan Malov 
757fed9aeb4SIvan Malov 	efx_tx_fini(sa->nic);
758fed9aeb4SIvan Malov }
759428c7dddSIvan Malov 
760dbdc8241SAndrew Rybchenko static void
sfc_efx_tx_reap(struct sfc_efx_txq * txq)761dbdc8241SAndrew Rybchenko sfc_efx_tx_reap(struct sfc_efx_txq *txq)
762dbdc8241SAndrew Rybchenko {
763dbdc8241SAndrew Rybchenko 	unsigned int completed;
764dbdc8241SAndrew Rybchenko 
765dbdc8241SAndrew Rybchenko 	sfc_ev_qpoll(txq->evq);
766dbdc8241SAndrew Rybchenko 
767dbdc8241SAndrew Rybchenko 	for (completed = txq->completed;
768dbdc8241SAndrew Rybchenko 	     completed != txq->pending; completed++) {
769dbdc8241SAndrew Rybchenko 		struct sfc_efx_tx_sw_desc *txd;
770dbdc8241SAndrew Rybchenko 
771dbdc8241SAndrew Rybchenko 		txd = &txq->sw_ring[completed & txq->ptr_mask];
772dbdc8241SAndrew Rybchenko 
773dbdc8241SAndrew Rybchenko 		if (txd->mbuf != NULL) {
774dbdc8241SAndrew Rybchenko 			rte_pktmbuf_free(txd->mbuf);
775dbdc8241SAndrew Rybchenko 			txd->mbuf = NULL;
776dbdc8241SAndrew Rybchenko 		}
777dbdc8241SAndrew Rybchenko 	}
778dbdc8241SAndrew Rybchenko 
779dbdc8241SAndrew Rybchenko 	txq->completed = completed;
780dbdc8241SAndrew Rybchenko }
781dbdc8241SAndrew Rybchenko 
7827fd63681SIvan Malov /*
7837fd63681SIvan Malov  * The function is used to insert or update VLAN tag;
7847fd63681SIvan Malov  * the firmware has state of the firmware tag to insert per TxQ
7857fd63681SIvan Malov  * (controlled by option descriptors), hence, if the tag of the
7867fd63681SIvan Malov  * packet to be sent is different from one remembered by the firmware,
7877fd63681SIvan Malov  * the function will update it
7887fd63681SIvan Malov  */
7897fd63681SIvan Malov static unsigned int
sfc_efx_tx_maybe_insert_tag(struct sfc_efx_txq * txq,struct rte_mbuf * m,efx_desc_t ** pend)790dbdc8241SAndrew Rybchenko sfc_efx_tx_maybe_insert_tag(struct sfc_efx_txq *txq, struct rte_mbuf *m,
7917fd63681SIvan Malov 			    efx_desc_t **pend)
7927fd63681SIvan Malov {
793daa02b5cSOlivier Matz 	uint16_t this_tag = ((m->ol_flags & RTE_MBUF_F_TX_VLAN) ?
7947fd63681SIvan Malov 			     m->vlan_tci : 0);
7957fd63681SIvan Malov 
7967fd63681SIvan Malov 	if (this_tag == txq->hw_vlan_tci)
7977fd63681SIvan Malov 		return 0;
7987fd63681SIvan Malov 
7997fd63681SIvan Malov 	/*
8007fd63681SIvan Malov 	 * The expression inside SFC_ASSERT() is not desired to be checked in
8017fd63681SIvan Malov 	 * a non-debug build because it might be too expensive on the data path
8027fd63681SIvan Malov 	 */
8037fd63681SIvan Malov 	SFC_ASSERT(efx_nic_cfg_get(txq->evq->sa->nic)->enc_hw_tx_insert_vlan_enabled);
8047fd63681SIvan Malov 
8057fd63681SIvan Malov 	efx_tx_qdesc_vlantci_create(txq->common, rte_cpu_to_be_16(this_tag),
8067fd63681SIvan Malov 				    *pend);
8077fd63681SIvan Malov 	(*pend)++;
8087fd63681SIvan Malov 	txq->hw_vlan_tci = this_tag;
8097fd63681SIvan Malov 
8107fd63681SIvan Malov 	return 1;
8117fd63681SIvan Malov }
8127fd63681SIvan Malov 
813dbdc8241SAndrew Rybchenko static uint16_t
sfc_efx_prepare_pkts(void * tx_queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)814a3895ef3SIgor Romanov sfc_efx_prepare_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
81507685524SIgor Romanov 		     uint16_t nb_pkts)
81607685524SIgor Romanov {
817a3895ef3SIgor Romanov 	struct sfc_dp_txq *dp_txq = tx_queue;
818a3895ef3SIgor Romanov 	struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
819a3895ef3SIgor Romanov 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(txq->evq->sa->nic);
82007685524SIgor Romanov 	uint16_t i;
82107685524SIgor Romanov 
82207685524SIgor Romanov 	for (i = 0; i < nb_pkts; i++) {
82307685524SIgor Romanov 		int ret;
82407685524SIgor Romanov 
825f7a66f93SIgor Romanov 		/*
826f7a66f93SIgor Romanov 		 * EFX Tx datapath may require extra VLAN descriptor if VLAN
827f7a66f93SIgor Romanov 		 * insertion offload is requested regardless the offload
828f7a66f93SIgor Romanov 		 * requested/supported.
829f7a66f93SIgor Romanov 		 */
83038109b5bSIvan Malov 		ret = sfc_dp_tx_prepare_pkt(tx_pkts[i], 0, SFC_TSOH_STD_LEN,
831f7a66f93SIgor Romanov 				encp->enc_tx_tso_tcp_header_offset_limit,
832f7a66f93SIgor Romanov 				txq->max_fill_level, EFX_TX_FATSOV2_OPT_NDESCS,
833f7a66f93SIgor Romanov 				1);
83407685524SIgor Romanov 		if (unlikely(ret != 0)) {
83507685524SIgor Romanov 			rte_errno = ret;
83607685524SIgor Romanov 			break;
83707685524SIgor Romanov 		}
83807685524SIgor Romanov 	}
83907685524SIgor Romanov 
84007685524SIgor Romanov 	return i;
84107685524SIgor Romanov }
84207685524SIgor Romanov 
84307685524SIgor Romanov static uint16_t
sfc_efx_xmit_pkts(void * tx_queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)844dbdc8241SAndrew Rybchenko sfc_efx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
845428c7dddSIvan Malov {
846dbdc8241SAndrew Rybchenko 	struct sfc_dp_txq *dp_txq = (struct sfc_dp_txq *)tx_queue;
847dbdc8241SAndrew Rybchenko 	struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
848428c7dddSIvan Malov 	unsigned int added = txq->added;
849428c7dddSIvan Malov 	unsigned int pushed = added;
850428c7dddSIvan Malov 	unsigned int pkts_sent = 0;
851428c7dddSIvan Malov 	efx_desc_t *pend = &txq->pend_desc[0];
852eaab5d96SAndrew Rybchenko 	const unsigned int hard_max_fill = txq->max_fill_level;
85321f6411cSIvan Malov 	const unsigned int soft_max_fill = hard_max_fill - txq->free_thresh;
854428c7dddSIvan Malov 	unsigned int fill_level = added - txq->completed;
855428c7dddSIvan Malov 	boolean_t reap_done;
856428c7dddSIvan Malov 	int rc __rte_unused;
857428c7dddSIvan Malov 	struct rte_mbuf **pktp;
858428c7dddSIvan Malov 
859dbdc8241SAndrew Rybchenko 	if (unlikely((txq->flags & SFC_EFX_TXQ_FLAG_RUNNING) == 0))
860428c7dddSIvan Malov 		goto done;
861428c7dddSIvan Malov 
862428c7dddSIvan Malov 	/*
863428c7dddSIvan Malov 	 * If insufficient space for a single packet is present,
864428c7dddSIvan Malov 	 * we should reap; otherwise, we shouldn't do that all the time
865428c7dddSIvan Malov 	 * to avoid latency increase
866428c7dddSIvan Malov 	 */
867428c7dddSIvan Malov 	reap_done = (fill_level > soft_max_fill);
868428c7dddSIvan Malov 
869428c7dddSIvan Malov 	if (reap_done) {
870dbdc8241SAndrew Rybchenko 		sfc_efx_tx_reap(txq);
871428c7dddSIvan Malov 		/*
872428c7dddSIvan Malov 		 * Recalculate fill level since 'txq->completed'
873428c7dddSIvan Malov 		 * might have changed on reap
874428c7dddSIvan Malov 		 */
875428c7dddSIvan Malov 		fill_level = added - txq->completed;
876428c7dddSIvan Malov 	}
877428c7dddSIvan Malov 
878428c7dddSIvan Malov 	for (pkts_sent = 0, pktp = &tx_pkts[0];
879428c7dddSIvan Malov 	     (pkts_sent < nb_pkts) && (fill_level <= soft_max_fill);
880428c7dddSIvan Malov 	     pkts_sent++, pktp++) {
881649885c0SIvan Malov 		uint16_t		hw_vlan_tci_prev = txq->hw_vlan_tci;
882428c7dddSIvan Malov 		struct rte_mbuf		*m_seg = *pktp;
883428c7dddSIvan Malov 		size_t			pkt_len = m_seg->pkt_len;
884428c7dddSIvan Malov 		unsigned int		pkt_descs = 0;
885fec33d5bSIvan Malov 		size_t			in_off = 0;
886428c7dddSIvan Malov 
8877fd63681SIvan Malov 		/*
8887fd63681SIvan Malov 		 * Here VLAN TCI is expected to be zero in case if no
889295968d1SFerruh Yigit 		 * RTE_ETH_TX_OFFLOAD_VLAN_INSERT capability is advertised;
8907fd63681SIvan Malov 		 * if the calling app ignores the absence of
891295968d1SFerruh Yigit 		 * RTE_ETH_TX_OFFLOAD_VLAN_INSERT and pushes VLAN TCI, then
8927fd63681SIvan Malov 		 * TX_ERROR will occur
8937fd63681SIvan Malov 		 */
894dbdc8241SAndrew Rybchenko 		pkt_descs += sfc_efx_tx_maybe_insert_tag(txq, m_seg, &pend);
8957fd63681SIvan Malov 
896daa02b5cSOlivier Matz 		if (m_seg->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
897fec33d5bSIvan Malov 			/*
898fec33d5bSIvan Malov 			 * We expect correct 'pkt->l[2, 3, 4]_len' values
899fec33d5bSIvan Malov 			 * to be set correctly by the caller
900fec33d5bSIvan Malov 			 */
901dbdc8241SAndrew Rybchenko 			if (sfc_efx_tso_do(txq, added, &m_seg, &in_off, &pend,
902fec33d5bSIvan Malov 					   &pkt_descs, &pkt_len) != 0) {
903a3895ef3SIgor Romanov 				/* We may have reached this place if packet
904a3895ef3SIgor Romanov 				 * header linearization is needed but the
905a3895ef3SIgor Romanov 				 * header length is greater than
906a3895ef3SIgor Romanov 				 * SFC_TSOH_STD_LEN
907fec33d5bSIvan Malov 				 *
908fec33d5bSIvan Malov 				 * We will deceive RTE saying that we have sent
909fec33d5bSIvan Malov 				 * the packet, but we will actually drop it.
910fec33d5bSIvan Malov 				 * Hence, we should revert 'pend' to the
911fec33d5bSIvan Malov 				 * previous state (in case we have added
912fec33d5bSIvan Malov 				 * VLAN descriptor) and start processing
913fec33d5bSIvan Malov 				 * another one packet. But the original
914fec33d5bSIvan Malov 				 * mbuf shouldn't be orphaned
915fec33d5bSIvan Malov 				 */
916fec33d5bSIvan Malov 				pend -= pkt_descs;
917649885c0SIvan Malov 				txq->hw_vlan_tci = hw_vlan_tci_prev;
918fec33d5bSIvan Malov 
919fec33d5bSIvan Malov 				rte_pktmbuf_free(*pktp);
920fec33d5bSIvan Malov 
921fec33d5bSIvan Malov 				continue;
922fec33d5bSIvan Malov 			}
923fec33d5bSIvan Malov 
924fec33d5bSIvan Malov 			/*
925fec33d5bSIvan Malov 			 * We've only added 2 FATSOv2 option descriptors
926fec33d5bSIvan Malov 			 * and 1 descriptor for the linearized packet header.
927fec33d5bSIvan Malov 			 * The outstanding work will be done in the same manner
928fec33d5bSIvan Malov 			 * as for the usual non-TSO path
929fec33d5bSIvan Malov 			 */
930fec33d5bSIvan Malov 		}
931fec33d5bSIvan Malov 
932428c7dddSIvan Malov 		for (; m_seg != NULL; m_seg = m_seg->next) {
933428c7dddSIvan Malov 			efsys_dma_addr_t	next_frag;
934428c7dddSIvan Malov 			size_t			seg_len;
935428c7dddSIvan Malov 
936428c7dddSIvan Malov 			seg_len = m_seg->data_len;
937bfa9a8a4SThomas Monjalon 			next_frag = rte_mbuf_data_iova(m_seg);
938428c7dddSIvan Malov 
939fec33d5bSIvan Malov 			/*
940fec33d5bSIvan Malov 			 * If we've started TSO transaction few steps earlier,
941fec33d5bSIvan Malov 			 * we'll skip packet header using an offset in the
942fec33d5bSIvan Malov 			 * current segment (which has been set to the
943fec33d5bSIvan Malov 			 * first one containing payload)
944fec33d5bSIvan Malov 			 */
945fec33d5bSIvan Malov 			seg_len -= in_off;
946fec33d5bSIvan Malov 			next_frag += in_off;
947fec33d5bSIvan Malov 			in_off = 0;
948fec33d5bSIvan Malov 
949428c7dddSIvan Malov 			do {
950428c7dddSIvan Malov 				efsys_dma_addr_t	frag_addr = next_frag;
951428c7dddSIvan Malov 				size_t			frag_len;
952428c7dddSIvan Malov 
953676d11ffSAndrew Rybchenko 				/*
954676d11ffSAndrew Rybchenko 				 * It is assumed here that there is no
955676d11ffSAndrew Rybchenko 				 * limitation on address boundary
956676d11ffSAndrew Rybchenko 				 * crossing by DMA descriptor.
957676d11ffSAndrew Rybchenko 				 */
958676d11ffSAndrew Rybchenko 				frag_len = MIN(seg_len, txq->dma_desc_size_max);
959676d11ffSAndrew Rybchenko 				next_frag += frag_len;
960428c7dddSIvan Malov 				seg_len -= frag_len;
961428c7dddSIvan Malov 				pkt_len -= frag_len;
962428c7dddSIvan Malov 
963428c7dddSIvan Malov 				efx_tx_qdesc_dma_create(txq->common,
964428c7dddSIvan Malov 							frag_addr, frag_len,
965428c7dddSIvan Malov 							(pkt_len == 0),
966428c7dddSIvan Malov 							pend++);
967428c7dddSIvan Malov 
968428c7dddSIvan Malov 				pkt_descs++;
969428c7dddSIvan Malov 			} while (seg_len != 0);
970428c7dddSIvan Malov 		}
971428c7dddSIvan Malov 
972428c7dddSIvan Malov 		added += pkt_descs;
973428c7dddSIvan Malov 
974428c7dddSIvan Malov 		fill_level += pkt_descs;
975428c7dddSIvan Malov 		if (unlikely(fill_level > hard_max_fill)) {
976428c7dddSIvan Malov 			/*
977428c7dddSIvan Malov 			 * Our estimation for maximum number of descriptors
978428c7dddSIvan Malov 			 * required to send a packet seems to be wrong.
979428c7dddSIvan Malov 			 * Try to reap (if we haven't yet).
980428c7dddSIvan Malov 			 */
981428c7dddSIvan Malov 			if (!reap_done) {
982dbdc8241SAndrew Rybchenko 				sfc_efx_tx_reap(txq);
983428c7dddSIvan Malov 				reap_done = B_TRUE;
984428c7dddSIvan Malov 				fill_level = added - txq->completed;
985428c7dddSIvan Malov 				if (fill_level > hard_max_fill) {
986428c7dddSIvan Malov 					pend -= pkt_descs;
987649885c0SIvan Malov 					txq->hw_vlan_tci = hw_vlan_tci_prev;
988428c7dddSIvan Malov 					break;
989428c7dddSIvan Malov 				}
990428c7dddSIvan Malov 			} else {
991428c7dddSIvan Malov 				pend -= pkt_descs;
992649885c0SIvan Malov 				txq->hw_vlan_tci = hw_vlan_tci_prev;
993428c7dddSIvan Malov 				break;
994428c7dddSIvan Malov 			}
995428c7dddSIvan Malov 		}
996428c7dddSIvan Malov 
997428c7dddSIvan Malov 		/* Assign mbuf to the last used desc */
998428c7dddSIvan Malov 		txq->sw_ring[(added - 1) & txq->ptr_mask].mbuf = *pktp;
999428c7dddSIvan Malov 	}
1000428c7dddSIvan Malov 
1001428c7dddSIvan Malov 	if (likely(pkts_sent > 0)) {
1002428c7dddSIvan Malov 		rc = efx_tx_qdesc_post(txq->common, txq->pend_desc,
1003428c7dddSIvan Malov 				       pend - &txq->pend_desc[0],
1004428c7dddSIvan Malov 				       txq->completed, &txq->added);
1005428c7dddSIvan Malov 		SFC_ASSERT(rc == 0);
1006428c7dddSIvan Malov 
1007fdd7719eSIvan Ilchenko 		if (likely(pushed != txq->added)) {
1008428c7dddSIvan Malov 			efx_tx_qpush(txq->common, txq->added, pushed);
100950448dd3SAndrew Rybchenko 			txq->dp.dpq.dbells++;
1010fdd7719eSIvan Ilchenko 		}
1011428c7dddSIvan Malov 	}
1012428c7dddSIvan Malov 
1013428c7dddSIvan Malov #if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE
1014428c7dddSIvan Malov 	if (!reap_done)
1015dbdc8241SAndrew Rybchenko 		sfc_efx_tx_reap(txq);
1016428c7dddSIvan Malov #endif
1017428c7dddSIvan Malov 
1018428c7dddSIvan Malov done:
1019428c7dddSIvan Malov 	return pkts_sent;
1020428c7dddSIvan Malov }
1021dbdc8241SAndrew Rybchenko 
10223cf4b9c2SAndrew Rybchenko const struct sfc_dp_tx *
sfc_dp_tx_by_dp_txq(const struct sfc_dp_txq * dp_txq)10233cf4b9c2SAndrew Rybchenko sfc_dp_tx_by_dp_txq(const struct sfc_dp_txq *dp_txq)
10243cf4b9c2SAndrew Rybchenko {
10253cf4b9c2SAndrew Rybchenko 	const struct sfc_dp_queue *dpq = &dp_txq->dpq;
10263cf4b9c2SAndrew Rybchenko 	struct rte_eth_dev *eth_dev;
10273cf4b9c2SAndrew Rybchenko 	struct sfc_adapter_priv *sap;
10283cf4b9c2SAndrew Rybchenko 
10293cf4b9c2SAndrew Rybchenko 	SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id));
10303cf4b9c2SAndrew Rybchenko 	eth_dev = &rte_eth_devices[dpq->port_id];
10313cf4b9c2SAndrew Rybchenko 
10323cf4b9c2SAndrew Rybchenko 	sap = sfc_adapter_priv_by_eth_dev(eth_dev);
10333cf4b9c2SAndrew Rybchenko 
10343cf4b9c2SAndrew Rybchenko 	return sap->dp_tx;
10353cf4b9c2SAndrew Rybchenko }
10363cf4b9c2SAndrew Rybchenko 
1037561508daSAndrew Rybchenko struct sfc_txq_info *
sfc_txq_info_by_dp_txq(const struct sfc_dp_txq * dp_txq)1038561508daSAndrew Rybchenko sfc_txq_info_by_dp_txq(const struct sfc_dp_txq *dp_txq)
1039dbdc8241SAndrew Rybchenko {
1040dbdc8241SAndrew Rybchenko 	const struct sfc_dp_queue *dpq = &dp_txq->dpq;
1041dbdc8241SAndrew Rybchenko 	struct rte_eth_dev *eth_dev;
1042113a14a6SAndrew Rybchenko 	struct sfc_adapter_shared *sas;
1043dbdc8241SAndrew Rybchenko 
1044dbdc8241SAndrew Rybchenko 	SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id));
1045dbdc8241SAndrew Rybchenko 	eth_dev = &rte_eth_devices[dpq->port_id];
1046dbdc8241SAndrew Rybchenko 
1047113a14a6SAndrew Rybchenko 	sas = sfc_adapter_shared_by_eth_dev(eth_dev);
1048dbdc8241SAndrew Rybchenko 
1049113a14a6SAndrew Rybchenko 	SFC_ASSERT(dpq->queue_id < sas->txq_count);
1050113a14a6SAndrew Rybchenko 	return &sas->txq_info[dpq->queue_id];
1051561508daSAndrew Rybchenko }
1052dbdc8241SAndrew Rybchenko 
1053561508daSAndrew Rybchenko struct sfc_txq *
sfc_txq_by_dp_txq(const struct sfc_dp_txq * dp_txq)1054561508daSAndrew Rybchenko sfc_txq_by_dp_txq(const struct sfc_dp_txq *dp_txq)
1055561508daSAndrew Rybchenko {
105629e4237dSAndrew Rybchenko 	const struct sfc_dp_queue *dpq = &dp_txq->dpq;
105729e4237dSAndrew Rybchenko 	struct rte_eth_dev *eth_dev;
105829e4237dSAndrew Rybchenko 	struct sfc_adapter *sa;
1059561508daSAndrew Rybchenko 
106029e4237dSAndrew Rybchenko 	SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id));
106129e4237dSAndrew Rybchenko 	eth_dev = &rte_eth_devices[dpq->port_id];
1062561508daSAndrew Rybchenko 
10635313b441SAndrew Rybchenko 	sa = sfc_adapter_by_eth_dev(eth_dev);
106429e4237dSAndrew Rybchenko 
1065113a14a6SAndrew Rybchenko 	SFC_ASSERT(dpq->queue_id < sfc_sa2shared(sa)->txq_count);
106629e4237dSAndrew Rybchenko 	return &sa->txq_ctrl[dpq->queue_id];
1067dbdc8241SAndrew Rybchenko }
1068dbdc8241SAndrew Rybchenko 
1069420efecbSAndrew Rybchenko static sfc_dp_tx_qsize_up_rings_t sfc_efx_tx_qsize_up_rings;
1070420efecbSAndrew Rybchenko static int
sfc_efx_tx_qsize_up_rings(uint16_t nb_tx_desc,__rte_unused struct sfc_dp_tx_hw_limits * limits,unsigned int * txq_entries,unsigned int * evq_entries,unsigned int * txq_max_fill_level)1071420efecbSAndrew Rybchenko sfc_efx_tx_qsize_up_rings(uint16_t nb_tx_desc,
10729dbd28dfSIgor Romanov 			  __rte_unused struct sfc_dp_tx_hw_limits *limits,
1073420efecbSAndrew Rybchenko 			  unsigned int *txq_entries,
1074420efecbSAndrew Rybchenko 			  unsigned int *evq_entries,
1075420efecbSAndrew Rybchenko 			  unsigned int *txq_max_fill_level)
1076420efecbSAndrew Rybchenko {
1077420efecbSAndrew Rybchenko 	*txq_entries = nb_tx_desc;
1078420efecbSAndrew Rybchenko 	*evq_entries = nb_tx_desc;
1079420efecbSAndrew Rybchenko 	*txq_max_fill_level = EFX_TXQ_LIMIT(*txq_entries);
1080420efecbSAndrew Rybchenko 	return 0;
1081420efecbSAndrew Rybchenko }
1082420efecbSAndrew Rybchenko 
1083dbdc8241SAndrew Rybchenko static sfc_dp_tx_qcreate_t sfc_efx_tx_qcreate;
1084dbdc8241SAndrew Rybchenko static int
sfc_efx_tx_qcreate(uint16_t port_id,uint16_t queue_id,const struct rte_pci_addr * pci_addr,int socket_id,const struct sfc_dp_tx_qcreate_info * info,struct sfc_dp_txq ** dp_txqp)1085dbdc8241SAndrew Rybchenko sfc_efx_tx_qcreate(uint16_t port_id, uint16_t queue_id,
1086dbdc8241SAndrew Rybchenko 		   const struct rte_pci_addr *pci_addr,
1087dbdc8241SAndrew Rybchenko 		   int socket_id,
1088dbdc8241SAndrew Rybchenko 		   const struct sfc_dp_tx_qcreate_info *info,
1089dbdc8241SAndrew Rybchenko 		   struct sfc_dp_txq **dp_txqp)
1090dbdc8241SAndrew Rybchenko {
1091dbdc8241SAndrew Rybchenko 	struct sfc_efx_txq *txq;
1092dbdc8241SAndrew Rybchenko 	struct sfc_txq *ctrl_txq;
1093dbdc8241SAndrew Rybchenko 	int rc;
1094dbdc8241SAndrew Rybchenko 
10953037e6cfSViacheslav Galaktionov 	rc = ENOTSUP;
10963037e6cfSViacheslav Galaktionov 	if (info->nic_dma_info->nb_regions > 0)
10973037e6cfSViacheslav Galaktionov 		goto fail_nic_dma;
10983037e6cfSViacheslav Galaktionov 
1099dbdc8241SAndrew Rybchenko 	rc = ENOMEM;
1100dbdc8241SAndrew Rybchenko 	txq = rte_zmalloc_socket("sfc-efx-txq", sizeof(*txq),
1101dbdc8241SAndrew Rybchenko 				 RTE_CACHE_LINE_SIZE, socket_id);
1102dbdc8241SAndrew Rybchenko 	if (txq == NULL)
1103dbdc8241SAndrew Rybchenko 		goto fail_txq_alloc;
1104dbdc8241SAndrew Rybchenko 
1105dbdc8241SAndrew Rybchenko 	sfc_dp_queue_init(&txq->dp.dpq, port_id, queue_id, pci_addr);
1106dbdc8241SAndrew Rybchenko 
1107dbdc8241SAndrew Rybchenko 	rc = ENOMEM;
1108dbdc8241SAndrew Rybchenko 	txq->pend_desc = rte_calloc_socket("sfc-efx-txq-pend-desc",
1109dbdc8241SAndrew Rybchenko 					   EFX_TXQ_LIMIT(info->txq_entries),
1110dbdc8241SAndrew Rybchenko 					   sizeof(*txq->pend_desc), 0,
1111dbdc8241SAndrew Rybchenko 					   socket_id);
1112dbdc8241SAndrew Rybchenko 	if (txq->pend_desc == NULL)
1113dbdc8241SAndrew Rybchenko 		goto fail_pend_desc_alloc;
1114dbdc8241SAndrew Rybchenko 
1115dbdc8241SAndrew Rybchenko 	rc = ENOMEM;
1116dbdc8241SAndrew Rybchenko 	txq->sw_ring = rte_calloc_socket("sfc-efx-txq-sw_ring",
1117dbdc8241SAndrew Rybchenko 					 info->txq_entries,
1118dbdc8241SAndrew Rybchenko 					 sizeof(*txq->sw_ring),
1119dbdc8241SAndrew Rybchenko 					 RTE_CACHE_LINE_SIZE, socket_id);
1120dbdc8241SAndrew Rybchenko 	if (txq->sw_ring == NULL)
1121dbdc8241SAndrew Rybchenko 		goto fail_sw_ring_alloc;
1122dbdc8241SAndrew Rybchenko 
1123dbdc8241SAndrew Rybchenko 	ctrl_txq = sfc_txq_by_dp_txq(&txq->dp);
1124dbdc8241SAndrew Rybchenko 	if (ctrl_txq->evq->sa->tso) {
1125dbdc8241SAndrew Rybchenko 		rc = sfc_efx_tso_alloc_tsoh_objs(txq->sw_ring,
1126dbdc8241SAndrew Rybchenko 						 info->txq_entries, socket_id);
1127dbdc8241SAndrew Rybchenko 		if (rc != 0)
1128dbdc8241SAndrew Rybchenko 			goto fail_alloc_tsoh_objs;
1129dbdc8241SAndrew Rybchenko 	}
1130dbdc8241SAndrew Rybchenko 
1131dbdc8241SAndrew Rybchenko 	txq->evq = ctrl_txq->evq;
1132dbdc8241SAndrew Rybchenko 	txq->ptr_mask = info->txq_entries - 1;
1133eaab5d96SAndrew Rybchenko 	txq->max_fill_level = info->max_fill_level;
1134dbdc8241SAndrew Rybchenko 	txq->free_thresh = info->free_thresh;
1135dbdc8241SAndrew Rybchenko 	txq->dma_desc_size_max = info->dma_desc_size_max;
1136dbdc8241SAndrew Rybchenko 
1137dbdc8241SAndrew Rybchenko 	*dp_txqp = &txq->dp;
1138dbdc8241SAndrew Rybchenko 	return 0;
1139dbdc8241SAndrew Rybchenko 
1140dbdc8241SAndrew Rybchenko fail_alloc_tsoh_objs:
1141dbdc8241SAndrew Rybchenko 	rte_free(txq->sw_ring);
1142dbdc8241SAndrew Rybchenko 
1143dbdc8241SAndrew Rybchenko fail_sw_ring_alloc:
1144dbdc8241SAndrew Rybchenko 	rte_free(txq->pend_desc);
1145dbdc8241SAndrew Rybchenko 
1146dbdc8241SAndrew Rybchenko fail_pend_desc_alloc:
1147dbdc8241SAndrew Rybchenko 	rte_free(txq);
1148dbdc8241SAndrew Rybchenko 
1149dbdc8241SAndrew Rybchenko fail_txq_alloc:
11503037e6cfSViacheslav Galaktionov fail_nic_dma:
1151dbdc8241SAndrew Rybchenko 	return rc;
1152dbdc8241SAndrew Rybchenko }
1153dbdc8241SAndrew Rybchenko 
1154dbdc8241SAndrew Rybchenko static sfc_dp_tx_qdestroy_t sfc_efx_tx_qdestroy;
1155dbdc8241SAndrew Rybchenko static void
sfc_efx_tx_qdestroy(struct sfc_dp_txq * dp_txq)1156dbdc8241SAndrew Rybchenko sfc_efx_tx_qdestroy(struct sfc_dp_txq *dp_txq)
1157dbdc8241SAndrew Rybchenko {
1158dbdc8241SAndrew Rybchenko 	struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
1159dbdc8241SAndrew Rybchenko 
1160dbdc8241SAndrew Rybchenko 	sfc_efx_tso_free_tsoh_objs(txq->sw_ring, txq->ptr_mask + 1);
1161dbdc8241SAndrew Rybchenko 	rte_free(txq->sw_ring);
1162dbdc8241SAndrew Rybchenko 	rte_free(txq->pend_desc);
1163dbdc8241SAndrew Rybchenko 	rte_free(txq);
1164dbdc8241SAndrew Rybchenko }
1165dbdc8241SAndrew Rybchenko 
1166dbdc8241SAndrew Rybchenko static sfc_dp_tx_qstart_t sfc_efx_tx_qstart;
1167dbdc8241SAndrew Rybchenko static int
sfc_efx_tx_qstart(struct sfc_dp_txq * dp_txq,__rte_unused unsigned int evq_read_ptr,unsigned int txq_desc_index)1168dbdc8241SAndrew Rybchenko sfc_efx_tx_qstart(struct sfc_dp_txq *dp_txq,
1169dbdc8241SAndrew Rybchenko 		  __rte_unused unsigned int evq_read_ptr,
1170dbdc8241SAndrew Rybchenko 		  unsigned int txq_desc_index)
1171dbdc8241SAndrew Rybchenko {
1172dbdc8241SAndrew Rybchenko 	/* libefx-based datapath is specific to libefx-based PMD */
1173dbdc8241SAndrew Rybchenko 	struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
1174dbdc8241SAndrew Rybchenko 	struct sfc_txq *ctrl_txq = sfc_txq_by_dp_txq(dp_txq);
1175dbdc8241SAndrew Rybchenko 
1176dbdc8241SAndrew Rybchenko 	txq->common = ctrl_txq->common;
1177dbdc8241SAndrew Rybchenko 
1178dbdc8241SAndrew Rybchenko 	txq->pending = txq->completed = txq->added = txq_desc_index;
1179dbdc8241SAndrew Rybchenko 	txq->hw_vlan_tci = 0;
1180dbdc8241SAndrew Rybchenko 
1181dbdc8241SAndrew Rybchenko 	txq->flags |= (SFC_EFX_TXQ_FLAG_STARTED | SFC_EFX_TXQ_FLAG_RUNNING);
1182dbdc8241SAndrew Rybchenko 
1183dbdc8241SAndrew Rybchenko 	return 0;
1184dbdc8241SAndrew Rybchenko }
1185dbdc8241SAndrew Rybchenko 
1186dbdc8241SAndrew Rybchenko static sfc_dp_tx_qstop_t sfc_efx_tx_qstop;
1187dbdc8241SAndrew Rybchenko static void
sfc_efx_tx_qstop(struct sfc_dp_txq * dp_txq,__rte_unused unsigned int * evq_read_ptr)1188dbdc8241SAndrew Rybchenko sfc_efx_tx_qstop(struct sfc_dp_txq *dp_txq,
1189dbdc8241SAndrew Rybchenko 		 __rte_unused unsigned int *evq_read_ptr)
1190dbdc8241SAndrew Rybchenko {
1191dbdc8241SAndrew Rybchenko 	struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
1192dbdc8241SAndrew Rybchenko 
1193dbdc8241SAndrew Rybchenko 	txq->flags &= ~SFC_EFX_TXQ_FLAG_RUNNING;
1194dbdc8241SAndrew Rybchenko }
1195dbdc8241SAndrew Rybchenko 
1196dbdc8241SAndrew Rybchenko static sfc_dp_tx_qreap_t sfc_efx_tx_qreap;
1197dbdc8241SAndrew Rybchenko static void
sfc_efx_tx_qreap(struct sfc_dp_txq * dp_txq)1198dbdc8241SAndrew Rybchenko sfc_efx_tx_qreap(struct sfc_dp_txq *dp_txq)
1199dbdc8241SAndrew Rybchenko {
1200dbdc8241SAndrew Rybchenko 	struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
1201dbdc8241SAndrew Rybchenko 	unsigned int txds;
1202dbdc8241SAndrew Rybchenko 
1203dbdc8241SAndrew Rybchenko 	sfc_efx_tx_reap(txq);
1204dbdc8241SAndrew Rybchenko 
1205dbdc8241SAndrew Rybchenko 	for (txds = 0; txds <= txq->ptr_mask; txds++) {
1206dbdc8241SAndrew Rybchenko 		if (txq->sw_ring[txds].mbuf != NULL) {
1207dbdc8241SAndrew Rybchenko 			rte_pktmbuf_free(txq->sw_ring[txds].mbuf);
1208dbdc8241SAndrew Rybchenko 			txq->sw_ring[txds].mbuf = NULL;
1209dbdc8241SAndrew Rybchenko 		}
1210dbdc8241SAndrew Rybchenko 	}
1211dbdc8241SAndrew Rybchenko 
1212dbdc8241SAndrew Rybchenko 	txq->flags &= ~SFC_EFX_TXQ_FLAG_STARTED;
1213dbdc8241SAndrew Rybchenko }
1214dbdc8241SAndrew Rybchenko 
12157df6f854SIvan Malov static sfc_dp_tx_qdesc_status_t sfc_efx_tx_qdesc_status;
12167df6f854SIvan Malov static int
sfc_efx_tx_qdesc_status(struct sfc_dp_txq * dp_txq,uint16_t offset)12177df6f854SIvan Malov sfc_efx_tx_qdesc_status(struct sfc_dp_txq *dp_txq, uint16_t offset)
12187df6f854SIvan Malov {
12197df6f854SIvan Malov 	struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
12207df6f854SIvan Malov 
12217df6f854SIvan Malov 	if (unlikely(offset > txq->ptr_mask))
12227df6f854SIvan Malov 		return -EINVAL;
12237df6f854SIvan Malov 
1224eaab5d96SAndrew Rybchenko 	if (unlikely(offset >= txq->max_fill_level))
12257df6f854SIvan Malov 		return RTE_ETH_TX_DESC_UNAVAIL;
12267df6f854SIvan Malov 
12277df6f854SIvan Malov 	/*
12287df6f854SIvan Malov 	 * Poll EvQ to derive up-to-date 'txq->pending' figure;
12297df6f854SIvan Malov 	 * it is required for the queue to be running, but the
12307df6f854SIvan Malov 	 * check is omitted because API design assumes that it
12317df6f854SIvan Malov 	 * is the duty of the caller to satisfy all conditions
12327df6f854SIvan Malov 	 */
12337df6f854SIvan Malov 	SFC_ASSERT((txq->flags & SFC_EFX_TXQ_FLAG_RUNNING) ==
12347df6f854SIvan Malov 		   SFC_EFX_TXQ_FLAG_RUNNING);
12357df6f854SIvan Malov 	sfc_ev_qpoll(txq->evq);
12367df6f854SIvan Malov 
12377df6f854SIvan Malov 	/*
12387df6f854SIvan Malov 	 * Ring tail is 'txq->pending', and although descriptors
12397df6f854SIvan Malov 	 * between 'txq->completed' and 'txq->pending' are still
12407df6f854SIvan Malov 	 * in use by the driver, they should be reported as DONE
12417df6f854SIvan Malov 	 */
12427df6f854SIvan Malov 	if (unlikely(offset < (txq->added - txq->pending)))
12437df6f854SIvan Malov 		return RTE_ETH_TX_DESC_FULL;
12447df6f854SIvan Malov 
12457df6f854SIvan Malov 	/*
12467df6f854SIvan Malov 	 * There is no separate return value for unused descriptors;
12477df6f854SIvan Malov 	 * the latter will be reported as DONE because genuine DONE
12487df6f854SIvan Malov 	 * descriptors will be freed anyway in SW on the next burst
12497df6f854SIvan Malov 	 */
12507df6f854SIvan Malov 	return RTE_ETH_TX_DESC_DONE;
12517df6f854SIvan Malov }
12527df6f854SIvan Malov 
1253dbdc8241SAndrew Rybchenko struct sfc_dp_tx sfc_efx_tx = {
1254dbdc8241SAndrew Rybchenko 	.dp = {
1255dbdc8241SAndrew Rybchenko 		.name		= SFC_KVARG_DATAPATH_EFX,
1256dbdc8241SAndrew Rybchenko 		.type		= SFC_DP_TX,
1257849c2d91SAndrew Rybchenko 		.hw_fw_caps	= SFC_DP_HW_FW_CAP_TX_EFX,
1258dbdc8241SAndrew Rybchenko 	},
12599aa0afd1SAndrew Rybchenko 	.features		= 0,
1260295968d1SFerruh Yigit 	.dev_offload_capa	= RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
1261295968d1SFerruh Yigit 				  RTE_ETH_TX_OFFLOAD_MULTI_SEGS,
1262295968d1SFerruh Yigit 	.queue_offload_capa	= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
1263295968d1SFerruh Yigit 				  RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
1264295968d1SFerruh Yigit 				  RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
1265295968d1SFerruh Yigit 				  RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
1266295968d1SFerruh Yigit 				  RTE_ETH_TX_OFFLOAD_TCP_TSO,
1267420efecbSAndrew Rybchenko 	.qsize_up_rings		= sfc_efx_tx_qsize_up_rings,
1268dbdc8241SAndrew Rybchenko 	.qcreate		= sfc_efx_tx_qcreate,
1269dbdc8241SAndrew Rybchenko 	.qdestroy		= sfc_efx_tx_qdestroy,
1270dbdc8241SAndrew Rybchenko 	.qstart			= sfc_efx_tx_qstart,
1271dbdc8241SAndrew Rybchenko 	.qstop			= sfc_efx_tx_qstop,
1272dbdc8241SAndrew Rybchenko 	.qreap			= sfc_efx_tx_qreap,
12737df6f854SIvan Malov 	.qdesc_status		= sfc_efx_tx_qdesc_status,
127407685524SIgor Romanov 	.pkt_prepare		= sfc_efx_prepare_pkts,
1275dbdc8241SAndrew Rybchenko 	.pkt_burst		= sfc_efx_xmit_pkts,
1276dbdc8241SAndrew Rybchenko };
1277