xref: /dpdk/drivers/net/sfc/sfc_tx.c (revision dbdc82416b723b1f089bdcea99d5441016aa724d)
1a8ad8cf8SIvan Malov /*-
2244cfa79SAndrew Rybchenko  *   BSD LICENSE
3244cfa79SAndrew Rybchenko  *
4244cfa79SAndrew Rybchenko  * Copyright (c) 2016-2017 Solarflare Communications Inc.
5a8ad8cf8SIvan Malov  * All rights reserved.
6a8ad8cf8SIvan Malov  *
7a8ad8cf8SIvan Malov  * This software was jointly developed between OKTET Labs (under contract
8a8ad8cf8SIvan Malov  * for Solarflare) and Solarflare Communications, Inc.
9a8ad8cf8SIvan Malov  *
10a8ad8cf8SIvan Malov  * Redistribution and use in source and binary forms, with or without
11a8ad8cf8SIvan Malov  * modification, are permitted provided that the following conditions are met:
12a8ad8cf8SIvan Malov  *
13a8ad8cf8SIvan Malov  * 1. Redistributions of source code must retain the above copyright notice,
14a8ad8cf8SIvan Malov  *    this list of conditions and the following disclaimer.
15a8ad8cf8SIvan Malov  * 2. Redistributions in binary form must reproduce the above copyright notice,
16a8ad8cf8SIvan Malov  *    this list of conditions and the following disclaimer in the documentation
17a8ad8cf8SIvan Malov  *    and/or other materials provided with the distribution.
18a8ad8cf8SIvan Malov  *
19a8ad8cf8SIvan Malov  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20a8ad8cf8SIvan Malov  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21a8ad8cf8SIvan Malov  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22a8ad8cf8SIvan Malov  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23a8ad8cf8SIvan Malov  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24a8ad8cf8SIvan Malov  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25a8ad8cf8SIvan Malov  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26a8ad8cf8SIvan Malov  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27a8ad8cf8SIvan Malov  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28a8ad8cf8SIvan Malov  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
29a8ad8cf8SIvan Malov  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30a8ad8cf8SIvan Malov  */
31a8ad8cf8SIvan Malov 
32a8ad8cf8SIvan Malov #include "sfc.h"
33fed9aeb4SIvan Malov #include "sfc_debug.h"
34a8ad8cf8SIvan Malov #include "sfc_log.h"
35a8ad8cf8SIvan Malov #include "sfc_ev.h"
36a8ad8cf8SIvan Malov #include "sfc_tx.h"
37428c7dddSIvan Malov #include "sfc_tweak.h"
38*dbdc8241SAndrew Rybchenko #include "sfc_kvargs.h"
39a8ad8cf8SIvan Malov 
40fed9aeb4SIvan Malov /*
41fed9aeb4SIvan Malov  * Maximum number of TX queue flush attempts in case of
42fed9aeb4SIvan Malov  * failure or flush timeout
43fed9aeb4SIvan Malov  */
44fed9aeb4SIvan Malov #define SFC_TX_QFLUSH_ATTEMPTS		(3)
45fed9aeb4SIvan Malov 
46fed9aeb4SIvan Malov /*
47fed9aeb4SIvan Malov  * Time to wait between event queue polling attempts when waiting for TX
48fed9aeb4SIvan Malov  * queue flush done or flush failed events
49fed9aeb4SIvan Malov  */
50fed9aeb4SIvan Malov #define SFC_TX_QFLUSH_POLL_WAIT_MS	(1)
51fed9aeb4SIvan Malov 
52fed9aeb4SIvan Malov /*
53fed9aeb4SIvan Malov  * Maximum number of event queue polling attempts when waiting for TX queue
54fed9aeb4SIvan Malov  * flush done or flush failed events; it defines TX queue flush attempt timeout
55fed9aeb4SIvan Malov  * together with SFC_TX_QFLUSH_POLL_WAIT_MS
56fed9aeb4SIvan Malov  */
57fed9aeb4SIvan Malov #define SFC_TX_QFLUSH_POLL_ATTEMPTS	(2000)
58fed9aeb4SIvan Malov 
59a8ad8cf8SIvan Malov static int
6021f6411cSIvan Malov sfc_tx_qcheck_conf(struct sfc_adapter *sa, uint16_t nb_tx_desc,
61b1b7ad93SIvan Malov 		   const struct rte_eth_txconf *tx_conf)
62b1b7ad93SIvan Malov {
63b1b7ad93SIvan Malov 	unsigned int flags = tx_conf->txq_flags;
647fd63681SIvan Malov 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
65b1b7ad93SIvan Malov 	int rc = 0;
66b1b7ad93SIvan Malov 
67b1b7ad93SIvan Malov 	if (tx_conf->tx_rs_thresh != 0) {
68b1b7ad93SIvan Malov 		sfc_err(sa, "RS bit in transmit descriptor is not supported");
69b1b7ad93SIvan Malov 		rc = EINVAL;
70b1b7ad93SIvan Malov 	}
71b1b7ad93SIvan Malov 
7221f6411cSIvan Malov 	if (tx_conf->tx_free_thresh > EFX_TXQ_LIMIT(nb_tx_desc)) {
73b1b7ad93SIvan Malov 		sfc_err(sa,
7421f6411cSIvan Malov 			"TxQ free threshold too large: %u vs maximum %u",
7521f6411cSIvan Malov 			tx_conf->tx_free_thresh, EFX_TXQ_LIMIT(nb_tx_desc));
76b1b7ad93SIvan Malov 		rc = EINVAL;
77b1b7ad93SIvan Malov 	}
78b1b7ad93SIvan Malov 
79b1b7ad93SIvan Malov 	if (tx_conf->tx_thresh.pthresh != 0 ||
80b1b7ad93SIvan Malov 	    tx_conf->tx_thresh.hthresh != 0 ||
81b1b7ad93SIvan Malov 	    tx_conf->tx_thresh.wthresh != 0) {
82b1b7ad93SIvan Malov 		sfc_err(sa,
83b1b7ad93SIvan Malov 			"prefetch/host/writeback thresholds are not supported");
84b1b7ad93SIvan Malov 		rc = EINVAL;
85b1b7ad93SIvan Malov 	}
86b1b7ad93SIvan Malov 
877fd63681SIvan Malov 	if (!encp->enc_hw_tx_insert_vlan_enabled &&
887fd63681SIvan Malov 	    (flags & ETH_TXQ_FLAGS_NOVLANOFFL) == 0) {
89b1b7ad93SIvan Malov 		sfc_err(sa, "VLAN offload is not supported");
90b1b7ad93SIvan Malov 		rc = EINVAL;
91b1b7ad93SIvan Malov 	}
92b1b7ad93SIvan Malov 
93b1b7ad93SIvan Malov 	if ((flags & ETH_TXQ_FLAGS_NOXSUMSCTP) == 0) {
94b1b7ad93SIvan Malov 		sfc_err(sa, "SCTP offload is not supported");
95b1b7ad93SIvan Malov 		rc = EINVAL;
96b1b7ad93SIvan Malov 	}
97b1b7ad93SIvan Malov 
98b1b7ad93SIvan Malov 	/* We either perform both TCP and UDP offload, or no offload at all */
99b1b7ad93SIvan Malov 	if (((flags & ETH_TXQ_FLAGS_NOXSUMTCP) == 0) !=
100b1b7ad93SIvan Malov 	    ((flags & ETH_TXQ_FLAGS_NOXSUMUDP) == 0)) {
101b1b7ad93SIvan Malov 		sfc_err(sa, "TCP and UDP offloads can't be set independently");
102b1b7ad93SIvan Malov 		rc = EINVAL;
103b1b7ad93SIvan Malov 	}
104b1b7ad93SIvan Malov 
105b1b7ad93SIvan Malov 	return rc;
106b1b7ad93SIvan Malov }
107b1b7ad93SIvan Malov 
108fed9aeb4SIvan Malov void
109fed9aeb4SIvan Malov sfc_tx_qflush_done(struct sfc_txq *txq)
110fed9aeb4SIvan Malov {
111fed9aeb4SIvan Malov 	txq->state |= SFC_TXQ_FLUSHED;
112fed9aeb4SIvan Malov 	txq->state &= ~SFC_TXQ_FLUSHING;
113fed9aeb4SIvan Malov }
114fed9aeb4SIvan Malov 
115b1b7ad93SIvan Malov int
116b1b7ad93SIvan Malov sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
117b1b7ad93SIvan Malov 	     uint16_t nb_tx_desc, unsigned int socket_id,
118b1b7ad93SIvan Malov 	     const struct rte_eth_txconf *tx_conf)
119b1b7ad93SIvan Malov {
120676d11ffSAndrew Rybchenko 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
121b1b7ad93SIvan Malov 	struct sfc_txq_info *txq_info;
122b1b7ad93SIvan Malov 	struct sfc_evq *evq;
123b1b7ad93SIvan Malov 	struct sfc_txq *txq;
124b1b7ad93SIvan Malov 	unsigned int evq_index = sfc_evq_index_by_txq_sw_index(sa, sw_index);
125b1b7ad93SIvan Malov 	int rc = 0;
126*dbdc8241SAndrew Rybchenko 	struct sfc_dp_tx_qcreate_info info;
127b1b7ad93SIvan Malov 
128b1b7ad93SIvan Malov 	sfc_log_init(sa, "TxQ = %u", sw_index);
129b1b7ad93SIvan Malov 
13021f6411cSIvan Malov 	rc = sfc_tx_qcheck_conf(sa, nb_tx_desc, tx_conf);
131b1b7ad93SIvan Malov 	if (rc != 0)
132b1b7ad93SIvan Malov 		goto fail_bad_conf;
133b1b7ad93SIvan Malov 
134b1b7ad93SIvan Malov 	SFC_ASSERT(sw_index < sa->txq_count);
135b1b7ad93SIvan Malov 	txq_info = &sa->txq_info[sw_index];
136b1b7ad93SIvan Malov 
137b1b7ad93SIvan Malov 	SFC_ASSERT(nb_tx_desc <= sa->txq_max_entries);
138b1b7ad93SIvan Malov 	txq_info->entries = nb_tx_desc;
139b1b7ad93SIvan Malov 
140b1b7ad93SIvan Malov 	rc = sfc_ev_qinit(sa, evq_index, txq_info->entries, socket_id);
141b1b7ad93SIvan Malov 	if (rc != 0)
142b1b7ad93SIvan Malov 		goto fail_ev_qinit;
143b1b7ad93SIvan Malov 
144b1b7ad93SIvan Malov 	evq = sa->evq_info[evq_index].evq;
145b1b7ad93SIvan Malov 
146b1b7ad93SIvan Malov 	rc = ENOMEM;
147b1b7ad93SIvan Malov 	txq = rte_zmalloc_socket("sfc-txq", sizeof(*txq), 0, socket_id);
148b1b7ad93SIvan Malov 	if (txq == NULL)
149b1b7ad93SIvan Malov 		goto fail_txq_alloc;
150b1b7ad93SIvan Malov 
151*dbdc8241SAndrew Rybchenko 	txq_info->txq = txq;
152*dbdc8241SAndrew Rybchenko 
153*dbdc8241SAndrew Rybchenko 	txq->hw_index = sw_index;
154*dbdc8241SAndrew Rybchenko 	txq->evq = evq;
155*dbdc8241SAndrew Rybchenko 	txq->free_thresh =
156*dbdc8241SAndrew Rybchenko 		(tx_conf->tx_free_thresh) ? tx_conf->tx_free_thresh :
157*dbdc8241SAndrew Rybchenko 		SFC_TX_DEFAULT_FREE_THRESH;
158*dbdc8241SAndrew Rybchenko 	txq->flags = tx_conf->txq_flags;
159*dbdc8241SAndrew Rybchenko 
160b1b7ad93SIvan Malov 	rc = sfc_dma_alloc(sa, "txq", sw_index, EFX_TXQ_SIZE(txq_info->entries),
161b1b7ad93SIvan Malov 			   socket_id, &txq->mem);
162b1b7ad93SIvan Malov 	if (rc != 0)
163b1b7ad93SIvan Malov 		goto fail_dma_alloc;
164b1b7ad93SIvan Malov 
165*dbdc8241SAndrew Rybchenko 	memset(&info, 0, sizeof(info));
166*dbdc8241SAndrew Rybchenko 	info.free_thresh = txq->free_thresh;
167*dbdc8241SAndrew Rybchenko 	info.flags = tx_conf->txq_flags;
168*dbdc8241SAndrew Rybchenko 	info.txq_entries = txq_info->entries;
169*dbdc8241SAndrew Rybchenko 	info.dma_desc_size_max = encp->enc_tx_dma_desc_size_max;
170b1b7ad93SIvan Malov 
171*dbdc8241SAndrew Rybchenko 	rc = sa->dp_tx->qcreate(sa->eth_dev->data->port_id, sw_index,
172*dbdc8241SAndrew Rybchenko 				&SFC_DEV_TO_PCI(sa->eth_dev)->addr,
173*dbdc8241SAndrew Rybchenko 				socket_id, &info, &txq->dp);
174fec33d5bSIvan Malov 	if (rc != 0)
175*dbdc8241SAndrew Rybchenko 		goto fail_dp_tx_qinit;
176*dbdc8241SAndrew Rybchenko 
177*dbdc8241SAndrew Rybchenko 	evq->dp_txq = txq->dp;
178fec33d5bSIvan Malov 
179b1b7ad93SIvan Malov 	txq->state = SFC_TXQ_INITIALIZED;
180b1b7ad93SIvan Malov 
181c6a1d9b5SIvan Malov 	txq_info->deferred_start = (tx_conf->tx_deferred_start != 0);
182b1b7ad93SIvan Malov 
183b1b7ad93SIvan Malov 	return 0;
184b1b7ad93SIvan Malov 
185*dbdc8241SAndrew Rybchenko fail_dp_tx_qinit:
186b1b7ad93SIvan Malov 	sfc_dma_free(sa, &txq->mem);
187b1b7ad93SIvan Malov 
188b1b7ad93SIvan Malov fail_dma_alloc:
189*dbdc8241SAndrew Rybchenko 	txq_info->txq = NULL;
190b1b7ad93SIvan Malov 	rte_free(txq);
191b1b7ad93SIvan Malov 
192b1b7ad93SIvan Malov fail_txq_alloc:
193b1b7ad93SIvan Malov 	sfc_ev_qfini(sa, evq_index);
194b1b7ad93SIvan Malov 
195b1b7ad93SIvan Malov fail_ev_qinit:
196b1b7ad93SIvan Malov 	txq_info->entries = 0;
197b1b7ad93SIvan Malov 
198b1b7ad93SIvan Malov fail_bad_conf:
199b1b7ad93SIvan Malov 	sfc_log_init(sa, "failed (TxQ = %u, rc = %d)", sw_index, rc);
200b1b7ad93SIvan Malov 	return rc;
201b1b7ad93SIvan Malov }
202b1b7ad93SIvan Malov 
203b1b7ad93SIvan Malov void
204b1b7ad93SIvan Malov sfc_tx_qfini(struct sfc_adapter *sa, unsigned int sw_index)
205b1b7ad93SIvan Malov {
206b1b7ad93SIvan Malov 	struct sfc_txq_info *txq_info;
207b1b7ad93SIvan Malov 	struct sfc_txq *txq;
208b1b7ad93SIvan Malov 
209b1b7ad93SIvan Malov 	sfc_log_init(sa, "TxQ = %u", sw_index);
210b1b7ad93SIvan Malov 
211b1b7ad93SIvan Malov 	SFC_ASSERT(sw_index < sa->txq_count);
212b1b7ad93SIvan Malov 	txq_info = &sa->txq_info[sw_index];
213b1b7ad93SIvan Malov 
214b1b7ad93SIvan Malov 	txq = txq_info->txq;
215b1b7ad93SIvan Malov 	SFC_ASSERT(txq != NULL);
216b1b7ad93SIvan Malov 	SFC_ASSERT(txq->state == SFC_TXQ_INITIALIZED);
217b1b7ad93SIvan Malov 
218*dbdc8241SAndrew Rybchenko 	sa->dp_tx->qdestroy(txq->dp);
219*dbdc8241SAndrew Rybchenko 	txq->dp = NULL;
220fec33d5bSIvan Malov 
221b1b7ad93SIvan Malov 	txq_info->txq = NULL;
222b1b7ad93SIvan Malov 	txq_info->entries = 0;
223b1b7ad93SIvan Malov 
224b1b7ad93SIvan Malov 	sfc_dma_free(sa, &txq->mem);
225b1b7ad93SIvan Malov 	rte_free(txq);
226b1b7ad93SIvan Malov }
227b1b7ad93SIvan Malov 
228b1b7ad93SIvan Malov static int
229a8ad8cf8SIvan Malov sfc_tx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
230a8ad8cf8SIvan Malov {
231a8ad8cf8SIvan Malov 	sfc_log_init(sa, "TxQ = %u", sw_index);
232a8ad8cf8SIvan Malov 
233a8ad8cf8SIvan Malov 	return 0;
234a8ad8cf8SIvan Malov }
235a8ad8cf8SIvan Malov 
236dbf0f627SIvan Malov static int
237dbf0f627SIvan Malov sfc_tx_check_mode(struct sfc_adapter *sa, const struct rte_eth_txmode *txmode)
238dbf0f627SIvan Malov {
239dbf0f627SIvan Malov 	int rc = 0;
240dbf0f627SIvan Malov 
241dbf0f627SIvan Malov 	switch (txmode->mq_mode) {
242dbf0f627SIvan Malov 	case ETH_MQ_TX_NONE:
243dbf0f627SIvan Malov 		break;
244dbf0f627SIvan Malov 	default:
245dbf0f627SIvan Malov 		sfc_err(sa, "Tx multi-queue mode %u not supported",
246dbf0f627SIvan Malov 			txmode->mq_mode);
247dbf0f627SIvan Malov 		rc = EINVAL;
248dbf0f627SIvan Malov 	}
249dbf0f627SIvan Malov 
250dbf0f627SIvan Malov 	/*
251dbf0f627SIvan Malov 	 * These features are claimed to be i40e-specific,
252dbf0f627SIvan Malov 	 * but it does make sense to double-check their absence
253dbf0f627SIvan Malov 	 */
254dbf0f627SIvan Malov 	if (txmode->hw_vlan_reject_tagged) {
255dbf0f627SIvan Malov 		sfc_err(sa, "Rejecting tagged packets not supported");
256dbf0f627SIvan Malov 		rc = EINVAL;
257dbf0f627SIvan Malov 	}
258dbf0f627SIvan Malov 
259dbf0f627SIvan Malov 	if (txmode->hw_vlan_reject_untagged) {
260dbf0f627SIvan Malov 		sfc_err(sa, "Rejecting untagged packets not supported");
261dbf0f627SIvan Malov 		rc = EINVAL;
262dbf0f627SIvan Malov 	}
263dbf0f627SIvan Malov 
264dbf0f627SIvan Malov 	if (txmode->hw_vlan_insert_pvid) {
265dbf0f627SIvan Malov 		sfc_err(sa, "Port-based VLAN insertion not supported");
266dbf0f627SIvan Malov 		rc = EINVAL;
267dbf0f627SIvan Malov 	}
268dbf0f627SIvan Malov 
269dbf0f627SIvan Malov 	return rc;
270dbf0f627SIvan Malov }
271dbf0f627SIvan Malov 
272a8ad8cf8SIvan Malov int
273a8ad8cf8SIvan Malov sfc_tx_init(struct sfc_adapter *sa)
274a8ad8cf8SIvan Malov {
275676d11ffSAndrew Rybchenko 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
276dbf0f627SIvan Malov 	const struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
277a8ad8cf8SIvan Malov 	unsigned int sw_index;
278a8ad8cf8SIvan Malov 	int rc = 0;
279a8ad8cf8SIvan Malov 
280676d11ffSAndrew Rybchenko 	/*
281676d11ffSAndrew Rybchenko 	 * The datapath implementation assumes absence of boundary
282676d11ffSAndrew Rybchenko 	 * limits on Tx DMA descriptors. Addition of these checks on
283676d11ffSAndrew Rybchenko 	 * datapath would simply make the datapath slower.
284676d11ffSAndrew Rybchenko 	 */
285676d11ffSAndrew Rybchenko 	if (encp->enc_tx_dma_desc_boundary != 0) {
286676d11ffSAndrew Rybchenko 		rc = ENOTSUP;
287676d11ffSAndrew Rybchenko 		goto fail_tx_dma_desc_boundary;
288676d11ffSAndrew Rybchenko 	}
289676d11ffSAndrew Rybchenko 
290dbf0f627SIvan Malov 	rc = sfc_tx_check_mode(sa, &dev_conf->txmode);
291dbf0f627SIvan Malov 	if (rc != 0)
292dbf0f627SIvan Malov 		goto fail_check_mode;
293dbf0f627SIvan Malov 
294a8ad8cf8SIvan Malov 	sa->txq_count = sa->eth_dev->data->nb_tx_queues;
295a8ad8cf8SIvan Malov 
296a8ad8cf8SIvan Malov 	sa->txq_info = rte_calloc_socket("sfc-txqs", sa->txq_count,
297a8ad8cf8SIvan Malov 					 sizeof(sa->txq_info[0]), 0,
298a8ad8cf8SIvan Malov 					 sa->socket_id);
299a8ad8cf8SIvan Malov 	if (sa->txq_info == NULL)
300a8ad8cf8SIvan Malov 		goto fail_txqs_alloc;
301a8ad8cf8SIvan Malov 
302a8ad8cf8SIvan Malov 	for (sw_index = 0; sw_index < sa->txq_count; ++sw_index) {
303a8ad8cf8SIvan Malov 		rc = sfc_tx_qinit_info(sa, sw_index);
304a8ad8cf8SIvan Malov 		if (rc != 0)
305a8ad8cf8SIvan Malov 			goto fail_tx_qinit_info;
306a8ad8cf8SIvan Malov 	}
307a8ad8cf8SIvan Malov 
308a8ad8cf8SIvan Malov 	return 0;
309a8ad8cf8SIvan Malov 
310a8ad8cf8SIvan Malov fail_tx_qinit_info:
311a8ad8cf8SIvan Malov 	rte_free(sa->txq_info);
312a8ad8cf8SIvan Malov 	sa->txq_info = NULL;
313a8ad8cf8SIvan Malov 
314a8ad8cf8SIvan Malov fail_txqs_alloc:
315a8ad8cf8SIvan Malov 	sa->txq_count = 0;
316a8ad8cf8SIvan Malov 
317dbf0f627SIvan Malov fail_check_mode:
318676d11ffSAndrew Rybchenko fail_tx_dma_desc_boundary:
319a8ad8cf8SIvan Malov 	sfc_log_init(sa, "failed (rc = %d)", rc);
320a8ad8cf8SIvan Malov 	return rc;
321a8ad8cf8SIvan Malov }
322a8ad8cf8SIvan Malov 
323a8ad8cf8SIvan Malov void
324a8ad8cf8SIvan Malov sfc_tx_fini(struct sfc_adapter *sa)
325a8ad8cf8SIvan Malov {
326b1b7ad93SIvan Malov 	int sw_index;
327b1b7ad93SIvan Malov 
328b1b7ad93SIvan Malov 	sw_index = sa->txq_count;
329b1b7ad93SIvan Malov 	while (--sw_index >= 0) {
330b1b7ad93SIvan Malov 		if (sa->txq_info[sw_index].txq != NULL)
331b1b7ad93SIvan Malov 			sfc_tx_qfini(sa, sw_index);
332b1b7ad93SIvan Malov 	}
333b1b7ad93SIvan Malov 
334a8ad8cf8SIvan Malov 	rte_free(sa->txq_info);
335a8ad8cf8SIvan Malov 	sa->txq_info = NULL;
336a8ad8cf8SIvan Malov 	sa->txq_count = 0;
337a8ad8cf8SIvan Malov }
338fed9aeb4SIvan Malov 
339fed9aeb4SIvan Malov int
340fed9aeb4SIvan Malov sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
341fed9aeb4SIvan Malov {
342fed9aeb4SIvan Malov 	struct rte_eth_dev_data *dev_data;
343fed9aeb4SIvan Malov 	struct sfc_txq_info *txq_info;
344fed9aeb4SIvan Malov 	struct sfc_txq *txq;
345fed9aeb4SIvan Malov 	struct sfc_evq *evq;
346fed9aeb4SIvan Malov 	uint16_t flags;
347fed9aeb4SIvan Malov 	unsigned int desc_index;
348fed9aeb4SIvan Malov 	int rc = 0;
349fed9aeb4SIvan Malov 
350fed9aeb4SIvan Malov 	sfc_log_init(sa, "TxQ = %u", sw_index);
351fed9aeb4SIvan Malov 
352fed9aeb4SIvan Malov 	SFC_ASSERT(sw_index < sa->txq_count);
353fed9aeb4SIvan Malov 	txq_info = &sa->txq_info[sw_index];
354fed9aeb4SIvan Malov 
355fed9aeb4SIvan Malov 	txq = txq_info->txq;
356fed9aeb4SIvan Malov 
357fed9aeb4SIvan Malov 	SFC_ASSERT(txq->state == SFC_TXQ_INITIALIZED);
358fed9aeb4SIvan Malov 
359fed9aeb4SIvan Malov 	evq = txq->evq;
360fed9aeb4SIvan Malov 
361fed9aeb4SIvan Malov 	rc = sfc_ev_qstart(sa, evq->evq_index);
362fed9aeb4SIvan Malov 	if (rc != 0)
363fed9aeb4SIvan Malov 		goto fail_ev_qstart;
364fed9aeb4SIvan Malov 
365fed9aeb4SIvan Malov 	/*
366fed9aeb4SIvan Malov 	 * It seems that DPDK has no controls regarding IPv4 offloads,
367fed9aeb4SIvan Malov 	 * hence, we always enable it here
368fed9aeb4SIvan Malov 	 */
369fed9aeb4SIvan Malov 	if ((txq->flags & ETH_TXQ_FLAGS_NOXSUMTCP) ||
370fec33d5bSIvan Malov 	    (txq->flags & ETH_TXQ_FLAGS_NOXSUMUDP)) {
371fed9aeb4SIvan Malov 		flags = EFX_TXQ_CKSUM_IPV4;
372fec33d5bSIvan Malov 	} else {
373fed9aeb4SIvan Malov 		flags = EFX_TXQ_CKSUM_IPV4 | EFX_TXQ_CKSUM_TCPUDP;
374fed9aeb4SIvan Malov 
375fec33d5bSIvan Malov 		if (sa->tso)
376fec33d5bSIvan Malov 			flags |= EFX_TXQ_FATSOV2;
377fec33d5bSIvan Malov 	}
378fec33d5bSIvan Malov 
379fed9aeb4SIvan Malov 	rc = efx_tx_qcreate(sa->nic, sw_index, 0, &txq->mem,
380fed9aeb4SIvan Malov 			    txq_info->entries, 0 /* not used on EF10 */,
381fed9aeb4SIvan Malov 			    flags, evq->common,
382fed9aeb4SIvan Malov 			    &txq->common, &desc_index);
383fec33d5bSIvan Malov 	if (rc != 0) {
384fec33d5bSIvan Malov 		if (sa->tso && (rc == ENOSPC))
385fec33d5bSIvan Malov 			sfc_err(sa, "ran out of TSO contexts");
386fec33d5bSIvan Malov 
387fed9aeb4SIvan Malov 		goto fail_tx_qcreate;
388fec33d5bSIvan Malov 	}
389fed9aeb4SIvan Malov 
390fed9aeb4SIvan Malov 	efx_tx_qenable(txq->common);
391fed9aeb4SIvan Malov 
392*dbdc8241SAndrew Rybchenko 	txq->state |= SFC_TXQ_STARTED;
393*dbdc8241SAndrew Rybchenko 
394*dbdc8241SAndrew Rybchenko 	rc = sa->dp_tx->qstart(txq->dp, evq->read_ptr, desc_index);
395*dbdc8241SAndrew Rybchenko 	if (rc != 0)
396*dbdc8241SAndrew Rybchenko 		goto fail_dp_qstart;
397fed9aeb4SIvan Malov 
398fed9aeb4SIvan Malov 	/*
399fed9aeb4SIvan Malov 	 * It seems to be used by DPDK for debug purposes only ('rte_ether')
400fed9aeb4SIvan Malov 	 */
401fed9aeb4SIvan Malov 	dev_data = sa->eth_dev->data;
402fed9aeb4SIvan Malov 	dev_data->tx_queue_state[sw_index] = RTE_ETH_QUEUE_STATE_STARTED;
403fed9aeb4SIvan Malov 
404fed9aeb4SIvan Malov 	return 0;
405fed9aeb4SIvan Malov 
406*dbdc8241SAndrew Rybchenko fail_dp_qstart:
407*dbdc8241SAndrew Rybchenko 	txq->state = SFC_TXQ_INITIALIZED;
408*dbdc8241SAndrew Rybchenko 	efx_tx_qdestroy(txq->common);
409*dbdc8241SAndrew Rybchenko 
410fed9aeb4SIvan Malov fail_tx_qcreate:
411fed9aeb4SIvan Malov 	sfc_ev_qstop(sa, evq->evq_index);
412fed9aeb4SIvan Malov 
413fed9aeb4SIvan Malov fail_ev_qstart:
414fed9aeb4SIvan Malov 	return rc;
415fed9aeb4SIvan Malov }
416fed9aeb4SIvan Malov 
417fed9aeb4SIvan Malov void
418fed9aeb4SIvan Malov sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
419fed9aeb4SIvan Malov {
420fed9aeb4SIvan Malov 	struct rte_eth_dev_data *dev_data;
421fed9aeb4SIvan Malov 	struct sfc_txq_info *txq_info;
422fed9aeb4SIvan Malov 	struct sfc_txq *txq;
423fed9aeb4SIvan Malov 	unsigned int retry_count;
424fed9aeb4SIvan Malov 	unsigned int wait_count;
425fed9aeb4SIvan Malov 
426fed9aeb4SIvan Malov 	sfc_log_init(sa, "TxQ = %u", sw_index);
427fed9aeb4SIvan Malov 
428fed9aeb4SIvan Malov 	SFC_ASSERT(sw_index < sa->txq_count);
429fed9aeb4SIvan Malov 	txq_info = &sa->txq_info[sw_index];
430fed9aeb4SIvan Malov 
431fed9aeb4SIvan Malov 	txq = txq_info->txq;
432fed9aeb4SIvan Malov 
433c6a1d9b5SIvan Malov 	if (txq->state == SFC_TXQ_INITIALIZED)
434c6a1d9b5SIvan Malov 		return;
435c6a1d9b5SIvan Malov 
436fed9aeb4SIvan Malov 	SFC_ASSERT(txq->state & SFC_TXQ_STARTED);
437fed9aeb4SIvan Malov 
438*dbdc8241SAndrew Rybchenko 	sa->dp_tx->qstop(txq->dp, &txq->evq->read_ptr);
439fed9aeb4SIvan Malov 
440fed9aeb4SIvan Malov 	/*
441fed9aeb4SIvan Malov 	 * Retry TX queue flushing in case of flush failed or
442fed9aeb4SIvan Malov 	 * timeout; in the worst case it can delay for 6 seconds
443fed9aeb4SIvan Malov 	 */
444fed9aeb4SIvan Malov 	for (retry_count = 0;
445fed9aeb4SIvan Malov 	     ((txq->state & SFC_TXQ_FLUSHED) == 0) &&
446fed9aeb4SIvan Malov 	     (retry_count < SFC_TX_QFLUSH_ATTEMPTS);
447fed9aeb4SIvan Malov 	     ++retry_count) {
448fed9aeb4SIvan Malov 		if (efx_tx_qflush(txq->common) != 0) {
449fed9aeb4SIvan Malov 			txq->state |= SFC_TXQ_FLUSHING;
450fed9aeb4SIvan Malov 			break;
451fed9aeb4SIvan Malov 		}
452fed9aeb4SIvan Malov 
453fed9aeb4SIvan Malov 		/*
454fed9aeb4SIvan Malov 		 * Wait for TX queue flush done or flush failed event at least
455fed9aeb4SIvan Malov 		 * SFC_TX_QFLUSH_POLL_WAIT_MS milliseconds and not more
456fed9aeb4SIvan Malov 		 * than 2 seconds (SFC_TX_QFLUSH_POLL_WAIT_MS multiplied
457fed9aeb4SIvan Malov 		 * by SFC_TX_QFLUSH_POLL_ATTEMPTS)
458fed9aeb4SIvan Malov 		 */
459fed9aeb4SIvan Malov 		wait_count = 0;
460fed9aeb4SIvan Malov 		do {
461fed9aeb4SIvan Malov 			rte_delay_ms(SFC_TX_QFLUSH_POLL_WAIT_MS);
462fed9aeb4SIvan Malov 			sfc_ev_qpoll(txq->evq);
463fed9aeb4SIvan Malov 		} while ((txq->state & SFC_TXQ_FLUSHING) &&
464fed9aeb4SIvan Malov 			 wait_count++ < SFC_TX_QFLUSH_POLL_ATTEMPTS);
465fed9aeb4SIvan Malov 
466fed9aeb4SIvan Malov 		if (txq->state & SFC_TXQ_FLUSHING)
467fed9aeb4SIvan Malov 			sfc_err(sa, "TxQ %u flush timed out", sw_index);
468fed9aeb4SIvan Malov 
469fed9aeb4SIvan Malov 		if (txq->state & SFC_TXQ_FLUSHED)
470fed9aeb4SIvan Malov 			sfc_info(sa, "TxQ %u flushed", sw_index);
471fed9aeb4SIvan Malov 	}
472fed9aeb4SIvan Malov 
473*dbdc8241SAndrew Rybchenko 	sa->dp_tx->qreap(txq->dp);
474fed9aeb4SIvan Malov 
475fed9aeb4SIvan Malov 	txq->state = SFC_TXQ_INITIALIZED;
476fed9aeb4SIvan Malov 
477fed9aeb4SIvan Malov 	efx_tx_qdestroy(txq->common);
478fed9aeb4SIvan Malov 
479fed9aeb4SIvan Malov 	sfc_ev_qstop(sa, txq->evq->evq_index);
480fed9aeb4SIvan Malov 
481fed9aeb4SIvan Malov 	/*
482fed9aeb4SIvan Malov 	 * It seems to be used by DPDK for debug purposes only ('rte_ether')
483fed9aeb4SIvan Malov 	 */
484fed9aeb4SIvan Malov 	dev_data = sa->eth_dev->data;
485fed9aeb4SIvan Malov 	dev_data->tx_queue_state[sw_index] = RTE_ETH_QUEUE_STATE_STOPPED;
486fed9aeb4SIvan Malov }
487fed9aeb4SIvan Malov 
488fed9aeb4SIvan Malov int
489fed9aeb4SIvan Malov sfc_tx_start(struct sfc_adapter *sa)
490fed9aeb4SIvan Malov {
491fed9aeb4SIvan Malov 	unsigned int sw_index;
492fed9aeb4SIvan Malov 	int rc = 0;
493fed9aeb4SIvan Malov 
494fed9aeb4SIvan Malov 	sfc_log_init(sa, "txq_count = %u", sa->txq_count);
495fed9aeb4SIvan Malov 
496fec33d5bSIvan Malov 	if (sa->tso) {
497fec33d5bSIvan Malov 		if (!efx_nic_cfg_get(sa->nic)->enc_fw_assisted_tso_v2_enabled) {
498fec33d5bSIvan Malov 			sfc_warn(sa, "TSO support was unable to be restored");
499fec33d5bSIvan Malov 			sa->tso = B_FALSE;
500fec33d5bSIvan Malov 		}
501fec33d5bSIvan Malov 	}
502fec33d5bSIvan Malov 
503fed9aeb4SIvan Malov 	rc = efx_tx_init(sa->nic);
504fed9aeb4SIvan Malov 	if (rc != 0)
505fed9aeb4SIvan Malov 		goto fail_efx_tx_init;
506fed9aeb4SIvan Malov 
507fed9aeb4SIvan Malov 	for (sw_index = 0; sw_index < sa->txq_count; ++sw_index) {
508c6a1d9b5SIvan Malov 		if (!(sa->txq_info[sw_index].deferred_start) ||
509c6a1d9b5SIvan Malov 		    sa->txq_info[sw_index].deferred_started) {
510fed9aeb4SIvan Malov 			rc = sfc_tx_qstart(sa, sw_index);
511fed9aeb4SIvan Malov 			if (rc != 0)
512fed9aeb4SIvan Malov 				goto fail_tx_qstart;
513fed9aeb4SIvan Malov 		}
514c6a1d9b5SIvan Malov 	}
515fed9aeb4SIvan Malov 
516fed9aeb4SIvan Malov 	return 0;
517fed9aeb4SIvan Malov 
518fed9aeb4SIvan Malov fail_tx_qstart:
519fed9aeb4SIvan Malov 	while (sw_index-- > 0)
520fed9aeb4SIvan Malov 		sfc_tx_qstop(sa, sw_index);
521fed9aeb4SIvan Malov 
522fed9aeb4SIvan Malov 	efx_tx_fini(sa->nic);
523fed9aeb4SIvan Malov 
524fed9aeb4SIvan Malov fail_efx_tx_init:
525fed9aeb4SIvan Malov 	sfc_log_init(sa, "failed (rc = %d)", rc);
526fed9aeb4SIvan Malov 	return rc;
527fed9aeb4SIvan Malov }
528fed9aeb4SIvan Malov 
529fed9aeb4SIvan Malov void
530fed9aeb4SIvan Malov sfc_tx_stop(struct sfc_adapter *sa)
531fed9aeb4SIvan Malov {
532fed9aeb4SIvan Malov 	unsigned int sw_index;
533fed9aeb4SIvan Malov 
534fed9aeb4SIvan Malov 	sfc_log_init(sa, "txq_count = %u", sa->txq_count);
535fed9aeb4SIvan Malov 
536fed9aeb4SIvan Malov 	sw_index = sa->txq_count;
537fed9aeb4SIvan Malov 	while (sw_index-- > 0) {
538fed9aeb4SIvan Malov 		if (sa->txq_info[sw_index].txq != NULL)
539fed9aeb4SIvan Malov 			sfc_tx_qstop(sa, sw_index);
540fed9aeb4SIvan Malov 	}
541fed9aeb4SIvan Malov 
542fed9aeb4SIvan Malov 	efx_tx_fini(sa->nic);
543fed9aeb4SIvan Malov }
544428c7dddSIvan Malov 
545*dbdc8241SAndrew Rybchenko static void
546*dbdc8241SAndrew Rybchenko sfc_efx_tx_reap(struct sfc_efx_txq *txq)
547*dbdc8241SAndrew Rybchenko {
548*dbdc8241SAndrew Rybchenko 	unsigned int completed;
549*dbdc8241SAndrew Rybchenko 
550*dbdc8241SAndrew Rybchenko 	sfc_ev_qpoll(txq->evq);
551*dbdc8241SAndrew Rybchenko 
552*dbdc8241SAndrew Rybchenko 	for (completed = txq->completed;
553*dbdc8241SAndrew Rybchenko 	     completed != txq->pending; completed++) {
554*dbdc8241SAndrew Rybchenko 		struct sfc_efx_tx_sw_desc *txd;
555*dbdc8241SAndrew Rybchenko 
556*dbdc8241SAndrew Rybchenko 		txd = &txq->sw_ring[completed & txq->ptr_mask];
557*dbdc8241SAndrew Rybchenko 
558*dbdc8241SAndrew Rybchenko 		if (txd->mbuf != NULL) {
559*dbdc8241SAndrew Rybchenko 			rte_pktmbuf_free(txd->mbuf);
560*dbdc8241SAndrew Rybchenko 			txd->mbuf = NULL;
561*dbdc8241SAndrew Rybchenko 		}
562*dbdc8241SAndrew Rybchenko 	}
563*dbdc8241SAndrew Rybchenko 
564*dbdc8241SAndrew Rybchenko 	txq->completed = completed;
565*dbdc8241SAndrew Rybchenko }
566*dbdc8241SAndrew Rybchenko 
5677fd63681SIvan Malov /*
5687fd63681SIvan Malov  * The function is used to insert or update VLAN tag;
5697fd63681SIvan Malov  * the firmware has state of the firmware tag to insert per TxQ
5707fd63681SIvan Malov  * (controlled by option descriptors), hence, if the tag of the
5717fd63681SIvan Malov  * packet to be sent is different from one remembered by the firmware,
5727fd63681SIvan Malov  * the function will update it
5737fd63681SIvan Malov  */
5747fd63681SIvan Malov static unsigned int
575*dbdc8241SAndrew Rybchenko sfc_efx_tx_maybe_insert_tag(struct sfc_efx_txq *txq, struct rte_mbuf *m,
5767fd63681SIvan Malov 			    efx_desc_t **pend)
5777fd63681SIvan Malov {
5787fd63681SIvan Malov 	uint16_t this_tag = ((m->ol_flags & PKT_TX_VLAN_PKT) ?
5797fd63681SIvan Malov 			     m->vlan_tci : 0);
5807fd63681SIvan Malov 
5817fd63681SIvan Malov 	if (this_tag == txq->hw_vlan_tci)
5827fd63681SIvan Malov 		return 0;
5837fd63681SIvan Malov 
5847fd63681SIvan Malov 	/*
5857fd63681SIvan Malov 	 * The expression inside SFC_ASSERT() is not desired to be checked in
5867fd63681SIvan Malov 	 * a non-debug build because it might be too expensive on the data path
5877fd63681SIvan Malov 	 */
5887fd63681SIvan Malov 	SFC_ASSERT(efx_nic_cfg_get(txq->evq->sa->nic)->enc_hw_tx_insert_vlan_enabled);
5897fd63681SIvan Malov 
5907fd63681SIvan Malov 	efx_tx_qdesc_vlantci_create(txq->common, rte_cpu_to_be_16(this_tag),
5917fd63681SIvan Malov 				    *pend);
5927fd63681SIvan Malov 	(*pend)++;
5937fd63681SIvan Malov 	txq->hw_vlan_tci = this_tag;
5947fd63681SIvan Malov 
5957fd63681SIvan Malov 	return 1;
5967fd63681SIvan Malov }
5977fd63681SIvan Malov 
598*dbdc8241SAndrew Rybchenko static uint16_t
599*dbdc8241SAndrew Rybchenko sfc_efx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
600428c7dddSIvan Malov {
601*dbdc8241SAndrew Rybchenko 	struct sfc_dp_txq *dp_txq = (struct sfc_dp_txq *)tx_queue;
602*dbdc8241SAndrew Rybchenko 	struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
603428c7dddSIvan Malov 	unsigned int added = txq->added;
604428c7dddSIvan Malov 	unsigned int pushed = added;
605428c7dddSIvan Malov 	unsigned int pkts_sent = 0;
606428c7dddSIvan Malov 	efx_desc_t *pend = &txq->pend_desc[0];
607428c7dddSIvan Malov 	const unsigned int hard_max_fill = EFX_TXQ_LIMIT(txq->ptr_mask + 1);
60821f6411cSIvan Malov 	const unsigned int soft_max_fill = hard_max_fill - txq->free_thresh;
609428c7dddSIvan Malov 	unsigned int fill_level = added - txq->completed;
610428c7dddSIvan Malov 	boolean_t reap_done;
611428c7dddSIvan Malov 	int rc __rte_unused;
612428c7dddSIvan Malov 	struct rte_mbuf **pktp;
613428c7dddSIvan Malov 
614*dbdc8241SAndrew Rybchenko 	if (unlikely((txq->flags & SFC_EFX_TXQ_FLAG_RUNNING) == 0))
615428c7dddSIvan Malov 		goto done;
616428c7dddSIvan Malov 
617428c7dddSIvan Malov 	/*
618428c7dddSIvan Malov 	 * If insufficient space for a single packet is present,
619428c7dddSIvan Malov 	 * we should reap; otherwise, we shouldn't do that all the time
620428c7dddSIvan Malov 	 * to avoid latency increase
621428c7dddSIvan Malov 	 */
622428c7dddSIvan Malov 	reap_done = (fill_level > soft_max_fill);
623428c7dddSIvan Malov 
624428c7dddSIvan Malov 	if (reap_done) {
625*dbdc8241SAndrew Rybchenko 		sfc_efx_tx_reap(txq);
626428c7dddSIvan Malov 		/*
627428c7dddSIvan Malov 		 * Recalculate fill level since 'txq->completed'
628428c7dddSIvan Malov 		 * might have changed on reap
629428c7dddSIvan Malov 		 */
630428c7dddSIvan Malov 		fill_level = added - txq->completed;
631428c7dddSIvan Malov 	}
632428c7dddSIvan Malov 
633428c7dddSIvan Malov 	for (pkts_sent = 0, pktp = &tx_pkts[0];
634428c7dddSIvan Malov 	     (pkts_sent < nb_pkts) && (fill_level <= soft_max_fill);
635428c7dddSIvan Malov 	     pkts_sent++, pktp++) {
636428c7dddSIvan Malov 		struct rte_mbuf		*m_seg = *pktp;
637428c7dddSIvan Malov 		size_t			pkt_len = m_seg->pkt_len;
638428c7dddSIvan Malov 		unsigned int		pkt_descs = 0;
639fec33d5bSIvan Malov 		size_t			in_off = 0;
640428c7dddSIvan Malov 
6417fd63681SIvan Malov 		/*
6427fd63681SIvan Malov 		 * Here VLAN TCI is expected to be zero in case if no
6437fd63681SIvan Malov 		 * DEV_TX_VLAN_OFFLOAD capability is advertised;
6447fd63681SIvan Malov 		 * if the calling app ignores the absence of
6457fd63681SIvan Malov 		 * DEV_TX_VLAN_OFFLOAD and pushes VLAN TCI, then
6467fd63681SIvan Malov 		 * TX_ERROR will occur
6477fd63681SIvan Malov 		 */
648*dbdc8241SAndrew Rybchenko 		pkt_descs += sfc_efx_tx_maybe_insert_tag(txq, m_seg, &pend);
6497fd63681SIvan Malov 
650fec33d5bSIvan Malov 		if (m_seg->ol_flags & PKT_TX_TCP_SEG) {
651fec33d5bSIvan Malov 			/*
652fec33d5bSIvan Malov 			 * We expect correct 'pkt->l[2, 3, 4]_len' values
653fec33d5bSIvan Malov 			 * to be set correctly by the caller
654fec33d5bSIvan Malov 			 */
655*dbdc8241SAndrew Rybchenko 			if (sfc_efx_tso_do(txq, added, &m_seg, &in_off, &pend,
656fec33d5bSIvan Malov 					   &pkt_descs, &pkt_len) != 0) {
657fec33d5bSIvan Malov 				/* We may have reached this place for
658fec33d5bSIvan Malov 				 * one of the following reasons:
659fec33d5bSIvan Malov 				 *
660fec33d5bSIvan Malov 				 * 1) Packet header length is greater
661fec33d5bSIvan Malov 				 *    than SFC_TSOH_STD_LEN
662fec33d5bSIvan Malov 				 * 2) TCP header starts at more then
663fec33d5bSIvan Malov 				 *    208 bytes into the frame
664fec33d5bSIvan Malov 				 *
665fec33d5bSIvan Malov 				 * We will deceive RTE saying that we have sent
666fec33d5bSIvan Malov 				 * the packet, but we will actually drop it.
667fec33d5bSIvan Malov 				 * Hence, we should revert 'pend' to the
668fec33d5bSIvan Malov 				 * previous state (in case we have added
669fec33d5bSIvan Malov 				 * VLAN descriptor) and start processing
670fec33d5bSIvan Malov 				 * another one packet. But the original
671fec33d5bSIvan Malov 				 * mbuf shouldn't be orphaned
672fec33d5bSIvan Malov 				 */
673fec33d5bSIvan Malov 				pend -= pkt_descs;
674fec33d5bSIvan Malov 
675fec33d5bSIvan Malov 				rte_pktmbuf_free(*pktp);
676fec33d5bSIvan Malov 
677fec33d5bSIvan Malov 				continue;
678fec33d5bSIvan Malov 			}
679fec33d5bSIvan Malov 
680fec33d5bSIvan Malov 			/*
681fec33d5bSIvan Malov 			 * We've only added 2 FATSOv2 option descriptors
682fec33d5bSIvan Malov 			 * and 1 descriptor for the linearized packet header.
683fec33d5bSIvan Malov 			 * The outstanding work will be done in the same manner
684fec33d5bSIvan Malov 			 * as for the usual non-TSO path
685fec33d5bSIvan Malov 			 */
686fec33d5bSIvan Malov 		}
687fec33d5bSIvan Malov 
688428c7dddSIvan Malov 		for (; m_seg != NULL; m_seg = m_seg->next) {
689428c7dddSIvan Malov 			efsys_dma_addr_t	next_frag;
690428c7dddSIvan Malov 			size_t			seg_len;
691428c7dddSIvan Malov 
692428c7dddSIvan Malov 			seg_len = m_seg->data_len;
693428c7dddSIvan Malov 			next_frag = rte_mbuf_data_dma_addr(m_seg);
694428c7dddSIvan Malov 
695fec33d5bSIvan Malov 			/*
696fec33d5bSIvan Malov 			 * If we've started TSO transaction few steps earlier,
697fec33d5bSIvan Malov 			 * we'll skip packet header using an offset in the
698fec33d5bSIvan Malov 			 * current segment (which has been set to the
699fec33d5bSIvan Malov 			 * first one containing payload)
700fec33d5bSIvan Malov 			 */
701fec33d5bSIvan Malov 			seg_len -= in_off;
702fec33d5bSIvan Malov 			next_frag += in_off;
703fec33d5bSIvan Malov 			in_off = 0;
704fec33d5bSIvan Malov 
705428c7dddSIvan Malov 			do {
706428c7dddSIvan Malov 				efsys_dma_addr_t	frag_addr = next_frag;
707428c7dddSIvan Malov 				size_t			frag_len;
708428c7dddSIvan Malov 
709676d11ffSAndrew Rybchenko 				/*
710676d11ffSAndrew Rybchenko 				 * It is assumed here that there is no
711676d11ffSAndrew Rybchenko 				 * limitation on address boundary
712676d11ffSAndrew Rybchenko 				 * crossing by DMA descriptor.
713676d11ffSAndrew Rybchenko 				 */
714676d11ffSAndrew Rybchenko 				frag_len = MIN(seg_len, txq->dma_desc_size_max);
715676d11ffSAndrew Rybchenko 				next_frag += frag_len;
716428c7dddSIvan Malov 				seg_len -= frag_len;
717428c7dddSIvan Malov 				pkt_len -= frag_len;
718428c7dddSIvan Malov 
719428c7dddSIvan Malov 				efx_tx_qdesc_dma_create(txq->common,
720428c7dddSIvan Malov 							frag_addr, frag_len,
721428c7dddSIvan Malov 							(pkt_len == 0),
722428c7dddSIvan Malov 							pend++);
723428c7dddSIvan Malov 
724428c7dddSIvan Malov 				pkt_descs++;
725428c7dddSIvan Malov 			} while (seg_len != 0);
726428c7dddSIvan Malov 		}
727428c7dddSIvan Malov 
728428c7dddSIvan Malov 		added += pkt_descs;
729428c7dddSIvan Malov 
730428c7dddSIvan Malov 		fill_level += pkt_descs;
731428c7dddSIvan Malov 		if (unlikely(fill_level > hard_max_fill)) {
732428c7dddSIvan Malov 			/*
733428c7dddSIvan Malov 			 * Our estimation for maximum number of descriptors
734428c7dddSIvan Malov 			 * required to send a packet seems to be wrong.
735428c7dddSIvan Malov 			 * Try to reap (if we haven't yet).
736428c7dddSIvan Malov 			 */
737428c7dddSIvan Malov 			if (!reap_done) {
738*dbdc8241SAndrew Rybchenko 				sfc_efx_tx_reap(txq);
739428c7dddSIvan Malov 				reap_done = B_TRUE;
740428c7dddSIvan Malov 				fill_level = added - txq->completed;
741428c7dddSIvan Malov 				if (fill_level > hard_max_fill) {
742428c7dddSIvan Malov 					pend -= pkt_descs;
743428c7dddSIvan Malov 					break;
744428c7dddSIvan Malov 				}
745428c7dddSIvan Malov 			} else {
746428c7dddSIvan Malov 				pend -= pkt_descs;
747428c7dddSIvan Malov 				break;
748428c7dddSIvan Malov 			}
749428c7dddSIvan Malov 		}
750428c7dddSIvan Malov 
751428c7dddSIvan Malov 		/* Assign mbuf to the last used desc */
752428c7dddSIvan Malov 		txq->sw_ring[(added - 1) & txq->ptr_mask].mbuf = *pktp;
753428c7dddSIvan Malov 	}
754428c7dddSIvan Malov 
755428c7dddSIvan Malov 	if (likely(pkts_sent > 0)) {
756428c7dddSIvan Malov 		rc = efx_tx_qdesc_post(txq->common, txq->pend_desc,
757428c7dddSIvan Malov 				       pend - &txq->pend_desc[0],
758428c7dddSIvan Malov 				       txq->completed, &txq->added);
759428c7dddSIvan Malov 		SFC_ASSERT(rc == 0);
760428c7dddSIvan Malov 
761428c7dddSIvan Malov 		if (likely(pushed != txq->added))
762428c7dddSIvan Malov 			efx_tx_qpush(txq->common, txq->added, pushed);
763428c7dddSIvan Malov 	}
764428c7dddSIvan Malov 
765428c7dddSIvan Malov #if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE
766428c7dddSIvan Malov 	if (!reap_done)
767*dbdc8241SAndrew Rybchenko 		sfc_efx_tx_reap(txq);
768428c7dddSIvan Malov #endif
769428c7dddSIvan Malov 
770428c7dddSIvan Malov done:
771428c7dddSIvan Malov 	return pkts_sent;
772428c7dddSIvan Malov }
773*dbdc8241SAndrew Rybchenko 
774*dbdc8241SAndrew Rybchenko struct sfc_txq *
775*dbdc8241SAndrew Rybchenko sfc_txq_by_dp_txq(const struct sfc_dp_txq *dp_txq)
776*dbdc8241SAndrew Rybchenko {
777*dbdc8241SAndrew Rybchenko 	const struct sfc_dp_queue *dpq = &dp_txq->dpq;
778*dbdc8241SAndrew Rybchenko 	struct rte_eth_dev *eth_dev;
779*dbdc8241SAndrew Rybchenko 	struct sfc_adapter *sa;
780*dbdc8241SAndrew Rybchenko 	struct sfc_txq *txq;
781*dbdc8241SAndrew Rybchenko 
782*dbdc8241SAndrew Rybchenko 	SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id));
783*dbdc8241SAndrew Rybchenko 	eth_dev = &rte_eth_devices[dpq->port_id];
784*dbdc8241SAndrew Rybchenko 
785*dbdc8241SAndrew Rybchenko 	sa = eth_dev->data->dev_private;
786*dbdc8241SAndrew Rybchenko 
787*dbdc8241SAndrew Rybchenko 	SFC_ASSERT(dpq->queue_id < sa->txq_count);
788*dbdc8241SAndrew Rybchenko 	txq = sa->txq_info[dpq->queue_id].txq;
789*dbdc8241SAndrew Rybchenko 
790*dbdc8241SAndrew Rybchenko 	SFC_ASSERT(txq != NULL);
791*dbdc8241SAndrew Rybchenko 	return txq;
792*dbdc8241SAndrew Rybchenko }
793*dbdc8241SAndrew Rybchenko 
794*dbdc8241SAndrew Rybchenko static sfc_dp_tx_qcreate_t sfc_efx_tx_qcreate;
795*dbdc8241SAndrew Rybchenko static int
796*dbdc8241SAndrew Rybchenko sfc_efx_tx_qcreate(uint16_t port_id, uint16_t queue_id,
797*dbdc8241SAndrew Rybchenko 		   const struct rte_pci_addr *pci_addr,
798*dbdc8241SAndrew Rybchenko 		   int socket_id,
799*dbdc8241SAndrew Rybchenko 		   const struct sfc_dp_tx_qcreate_info *info,
800*dbdc8241SAndrew Rybchenko 		   struct sfc_dp_txq **dp_txqp)
801*dbdc8241SAndrew Rybchenko {
802*dbdc8241SAndrew Rybchenko 	struct sfc_efx_txq *txq;
803*dbdc8241SAndrew Rybchenko 	struct sfc_txq *ctrl_txq;
804*dbdc8241SAndrew Rybchenko 	int rc;
805*dbdc8241SAndrew Rybchenko 
806*dbdc8241SAndrew Rybchenko 	rc = ENOMEM;
807*dbdc8241SAndrew Rybchenko 	txq = rte_zmalloc_socket("sfc-efx-txq", sizeof(*txq),
808*dbdc8241SAndrew Rybchenko 				 RTE_CACHE_LINE_SIZE, socket_id);
809*dbdc8241SAndrew Rybchenko 	if (txq == NULL)
810*dbdc8241SAndrew Rybchenko 		goto fail_txq_alloc;
811*dbdc8241SAndrew Rybchenko 
812*dbdc8241SAndrew Rybchenko 	sfc_dp_queue_init(&txq->dp.dpq, port_id, queue_id, pci_addr);
813*dbdc8241SAndrew Rybchenko 
814*dbdc8241SAndrew Rybchenko 	rc = ENOMEM;
815*dbdc8241SAndrew Rybchenko 	txq->pend_desc = rte_calloc_socket("sfc-efx-txq-pend-desc",
816*dbdc8241SAndrew Rybchenko 					   EFX_TXQ_LIMIT(info->txq_entries),
817*dbdc8241SAndrew Rybchenko 					   sizeof(*txq->pend_desc), 0,
818*dbdc8241SAndrew Rybchenko 					   socket_id);
819*dbdc8241SAndrew Rybchenko 	if (txq->pend_desc == NULL)
820*dbdc8241SAndrew Rybchenko 		goto fail_pend_desc_alloc;
821*dbdc8241SAndrew Rybchenko 
822*dbdc8241SAndrew Rybchenko 	rc = ENOMEM;
823*dbdc8241SAndrew Rybchenko 	txq->sw_ring = rte_calloc_socket("sfc-efx-txq-sw_ring",
824*dbdc8241SAndrew Rybchenko 					 info->txq_entries,
825*dbdc8241SAndrew Rybchenko 					 sizeof(*txq->sw_ring),
826*dbdc8241SAndrew Rybchenko 					 RTE_CACHE_LINE_SIZE, socket_id);
827*dbdc8241SAndrew Rybchenko 	if (txq->sw_ring == NULL)
828*dbdc8241SAndrew Rybchenko 		goto fail_sw_ring_alloc;
829*dbdc8241SAndrew Rybchenko 
830*dbdc8241SAndrew Rybchenko 	ctrl_txq = sfc_txq_by_dp_txq(&txq->dp);
831*dbdc8241SAndrew Rybchenko 	if (ctrl_txq->evq->sa->tso) {
832*dbdc8241SAndrew Rybchenko 		rc = sfc_efx_tso_alloc_tsoh_objs(txq->sw_ring,
833*dbdc8241SAndrew Rybchenko 						 info->txq_entries, socket_id);
834*dbdc8241SAndrew Rybchenko 		if (rc != 0)
835*dbdc8241SAndrew Rybchenko 			goto fail_alloc_tsoh_objs;
836*dbdc8241SAndrew Rybchenko 	}
837*dbdc8241SAndrew Rybchenko 
838*dbdc8241SAndrew Rybchenko 	txq->evq = ctrl_txq->evq;
839*dbdc8241SAndrew Rybchenko 	txq->ptr_mask = info->txq_entries - 1;
840*dbdc8241SAndrew Rybchenko 	txq->free_thresh = info->free_thresh;
841*dbdc8241SAndrew Rybchenko 	txq->dma_desc_size_max = info->dma_desc_size_max;
842*dbdc8241SAndrew Rybchenko 
843*dbdc8241SAndrew Rybchenko 	*dp_txqp = &txq->dp;
844*dbdc8241SAndrew Rybchenko 	return 0;
845*dbdc8241SAndrew Rybchenko 
846*dbdc8241SAndrew Rybchenko fail_alloc_tsoh_objs:
847*dbdc8241SAndrew Rybchenko 	rte_free(txq->sw_ring);
848*dbdc8241SAndrew Rybchenko 
849*dbdc8241SAndrew Rybchenko fail_sw_ring_alloc:
850*dbdc8241SAndrew Rybchenko 	rte_free(txq->pend_desc);
851*dbdc8241SAndrew Rybchenko 
852*dbdc8241SAndrew Rybchenko fail_pend_desc_alloc:
853*dbdc8241SAndrew Rybchenko 	rte_free(txq);
854*dbdc8241SAndrew Rybchenko 
855*dbdc8241SAndrew Rybchenko fail_txq_alloc:
856*dbdc8241SAndrew Rybchenko 	return rc;
857*dbdc8241SAndrew Rybchenko }
858*dbdc8241SAndrew Rybchenko 
859*dbdc8241SAndrew Rybchenko static sfc_dp_tx_qdestroy_t sfc_efx_tx_qdestroy;
860*dbdc8241SAndrew Rybchenko static void
861*dbdc8241SAndrew Rybchenko sfc_efx_tx_qdestroy(struct sfc_dp_txq *dp_txq)
862*dbdc8241SAndrew Rybchenko {
863*dbdc8241SAndrew Rybchenko 	struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
864*dbdc8241SAndrew Rybchenko 
865*dbdc8241SAndrew Rybchenko 	sfc_efx_tso_free_tsoh_objs(txq->sw_ring, txq->ptr_mask + 1);
866*dbdc8241SAndrew Rybchenko 	rte_free(txq->sw_ring);
867*dbdc8241SAndrew Rybchenko 	rte_free(txq->pend_desc);
868*dbdc8241SAndrew Rybchenko 	rte_free(txq);
869*dbdc8241SAndrew Rybchenko }
870*dbdc8241SAndrew Rybchenko 
871*dbdc8241SAndrew Rybchenko static sfc_dp_tx_qstart_t sfc_efx_tx_qstart;
872*dbdc8241SAndrew Rybchenko static int
873*dbdc8241SAndrew Rybchenko sfc_efx_tx_qstart(struct sfc_dp_txq *dp_txq,
874*dbdc8241SAndrew Rybchenko 		  __rte_unused unsigned int evq_read_ptr,
875*dbdc8241SAndrew Rybchenko 		  unsigned int txq_desc_index)
876*dbdc8241SAndrew Rybchenko {
877*dbdc8241SAndrew Rybchenko 	/* libefx-based datapath is specific to libefx-based PMD */
878*dbdc8241SAndrew Rybchenko 	struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
879*dbdc8241SAndrew Rybchenko 	struct sfc_txq *ctrl_txq = sfc_txq_by_dp_txq(dp_txq);
880*dbdc8241SAndrew Rybchenko 
881*dbdc8241SAndrew Rybchenko 	txq->common = ctrl_txq->common;
882*dbdc8241SAndrew Rybchenko 
883*dbdc8241SAndrew Rybchenko 	txq->pending = txq->completed = txq->added = txq_desc_index;
884*dbdc8241SAndrew Rybchenko 	txq->hw_vlan_tci = 0;
885*dbdc8241SAndrew Rybchenko 
886*dbdc8241SAndrew Rybchenko 	txq->flags |= (SFC_EFX_TXQ_FLAG_STARTED | SFC_EFX_TXQ_FLAG_RUNNING);
887*dbdc8241SAndrew Rybchenko 
888*dbdc8241SAndrew Rybchenko 	return 0;
889*dbdc8241SAndrew Rybchenko }
890*dbdc8241SAndrew Rybchenko 
891*dbdc8241SAndrew Rybchenko static sfc_dp_tx_qstop_t sfc_efx_tx_qstop;
892*dbdc8241SAndrew Rybchenko static void
893*dbdc8241SAndrew Rybchenko sfc_efx_tx_qstop(struct sfc_dp_txq *dp_txq,
894*dbdc8241SAndrew Rybchenko 		 __rte_unused unsigned int *evq_read_ptr)
895*dbdc8241SAndrew Rybchenko {
896*dbdc8241SAndrew Rybchenko 	struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
897*dbdc8241SAndrew Rybchenko 
898*dbdc8241SAndrew Rybchenko 	txq->flags &= ~SFC_EFX_TXQ_FLAG_RUNNING;
899*dbdc8241SAndrew Rybchenko }
900*dbdc8241SAndrew Rybchenko 
901*dbdc8241SAndrew Rybchenko static sfc_dp_tx_qreap_t sfc_efx_tx_qreap;
902*dbdc8241SAndrew Rybchenko static void
903*dbdc8241SAndrew Rybchenko sfc_efx_tx_qreap(struct sfc_dp_txq *dp_txq)
904*dbdc8241SAndrew Rybchenko {
905*dbdc8241SAndrew Rybchenko 	struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
906*dbdc8241SAndrew Rybchenko 	unsigned int txds;
907*dbdc8241SAndrew Rybchenko 
908*dbdc8241SAndrew Rybchenko 	sfc_efx_tx_reap(txq);
909*dbdc8241SAndrew Rybchenko 
910*dbdc8241SAndrew Rybchenko 	for (txds = 0; txds <= txq->ptr_mask; txds++) {
911*dbdc8241SAndrew Rybchenko 		if (txq->sw_ring[txds].mbuf != NULL) {
912*dbdc8241SAndrew Rybchenko 			rte_pktmbuf_free(txq->sw_ring[txds].mbuf);
913*dbdc8241SAndrew Rybchenko 			txq->sw_ring[txds].mbuf = NULL;
914*dbdc8241SAndrew Rybchenko 		}
915*dbdc8241SAndrew Rybchenko 	}
916*dbdc8241SAndrew Rybchenko 
917*dbdc8241SAndrew Rybchenko 	txq->flags &= ~SFC_EFX_TXQ_FLAG_STARTED;
918*dbdc8241SAndrew Rybchenko }
919*dbdc8241SAndrew Rybchenko 
920*dbdc8241SAndrew Rybchenko struct sfc_dp_tx sfc_efx_tx = {
921*dbdc8241SAndrew Rybchenko 	.dp = {
922*dbdc8241SAndrew Rybchenko 		.name		= SFC_KVARG_DATAPATH_EFX,
923*dbdc8241SAndrew Rybchenko 		.type		= SFC_DP_TX,
924*dbdc8241SAndrew Rybchenko 		.hw_fw_caps	= 0,
925*dbdc8241SAndrew Rybchenko 	},
926*dbdc8241SAndrew Rybchenko 	.qcreate		= sfc_efx_tx_qcreate,
927*dbdc8241SAndrew Rybchenko 	.qdestroy		= sfc_efx_tx_qdestroy,
928*dbdc8241SAndrew Rybchenko 	.qstart			= sfc_efx_tx_qstart,
929*dbdc8241SAndrew Rybchenko 	.qstop			= sfc_efx_tx_qstop,
930*dbdc8241SAndrew Rybchenko 	.qreap			= sfc_efx_tx_qreap,
931*dbdc8241SAndrew Rybchenko 	.pkt_burst		= sfc_efx_xmit_pkts,
932*dbdc8241SAndrew Rybchenko };
933