xref: /dpdk/drivers/net/mlx5/mlx5_tx.c (revision 3cddeba0ca38b00c7dc646277484d08a4cb2d862)
179a876e3SMichael Baum /* SPDX-License-Identifier: BSD-3-Clause
279a876e3SMichael Baum  * Copyright 2021 6WIND S.A.
379a876e3SMichael Baum  * Copyright 2021 Mellanox Technologies, Ltd
479a876e3SMichael Baum  */
579a876e3SMichael Baum 
679a876e3SMichael Baum #include <stdint.h>
779a876e3SMichael Baum #include <string.h>
879a876e3SMichael Baum #include <stdlib.h>
979a876e3SMichael Baum 
1079a876e3SMichael Baum #include <rte_mbuf.h>
1179a876e3SMichael Baum #include <rte_mempool.h>
1279a876e3SMichael Baum #include <rte_prefetch.h>
1379a876e3SMichael Baum #include <rte_common.h>
1479a876e3SMichael Baum #include <rte_branch_prediction.h>
1579a876e3SMichael Baum #include <rte_ether.h>
1679a876e3SMichael Baum #include <rte_cycles.h>
1779a876e3SMichael Baum #include <rte_flow.h>
1879a876e3SMichael Baum 
1979a876e3SMichael Baum #include <mlx5_prm.h>
2079a876e3SMichael Baum #include <mlx5_common.h>
2179a876e3SMichael Baum 
2279a876e3SMichael Baum #include "mlx5_autoconf.h"
2379a876e3SMichael Baum #include "mlx5_defs.h"
2479a876e3SMichael Baum #include "mlx5.h"
2579a876e3SMichael Baum #include "mlx5_utils.h"
2679a876e3SMichael Baum #include "mlx5_rxtx.h"
2779a876e3SMichael Baum #include "mlx5_tx.h"
2879a876e3SMichael Baum 
2979a876e3SMichael Baum #define MLX5_TXOFF_INFO(func, olx) {mlx5_tx_burst_##func, olx},
3079a876e3SMichael Baum 
3179a876e3SMichael Baum /**
3279a876e3SMichael Baum  * Move QP from error state to running state and initialize indexes.
3379a876e3SMichael Baum  *
3479a876e3SMichael Baum  * @param txq_ctrl
3579a876e3SMichael Baum  *   Pointer to TX queue control structure.
3679a876e3SMichael Baum  *
3779a876e3SMichael Baum  * @return
3879a876e3SMichael Baum  *   0 on success, else -1.
3979a876e3SMichael Baum  */
4079a876e3SMichael Baum static int
4179a876e3SMichael Baum tx_recover_qp(struct mlx5_txq_ctrl *txq_ctrl)
4279a876e3SMichael Baum {
4379a876e3SMichael Baum 	struct mlx5_mp_arg_queue_state_modify sm = {
4479a876e3SMichael Baum 			.is_wq = 0,
4579a876e3SMichael Baum 			.queue_id = txq_ctrl->txq.idx,
4679a876e3SMichael Baum 	};
4779a876e3SMichael Baum 
4879a876e3SMichael Baum 	if (mlx5_queue_state_modify(ETH_DEV(txq_ctrl->priv), &sm))
4979a876e3SMichael Baum 		return -1;
5079a876e3SMichael Baum 	txq_ctrl->txq.wqe_ci = 0;
5179a876e3SMichael Baum 	txq_ctrl->txq.wqe_pi = 0;
5279a876e3SMichael Baum 	txq_ctrl->txq.elts_comp = 0;
5379a876e3SMichael Baum 	return 0;
5479a876e3SMichael Baum }
5579a876e3SMichael Baum 
5679a876e3SMichael Baum /* Return 1 if the error CQE is signed otherwise, sign it and return 0. */
5779a876e3SMichael Baum static int
58*3cddeba0SAlexander Kozyrev check_err_cqe_seen(volatile struct mlx5_error_cqe *err_cqe)
5979a876e3SMichael Baum {
6079a876e3SMichael Baum 	static const uint8_t magic[] = "seen";
6179a876e3SMichael Baum 	int ret = 1;
6279a876e3SMichael Baum 	unsigned int i;
6379a876e3SMichael Baum 
6479a876e3SMichael Baum 	for (i = 0; i < sizeof(magic); ++i)
6579a876e3SMichael Baum 		if (!ret || err_cqe->rsvd1[i] != magic[i]) {
6679a876e3SMichael Baum 			ret = 0;
6779a876e3SMichael Baum 			err_cqe->rsvd1[i] = magic[i];
6879a876e3SMichael Baum 		}
6979a876e3SMichael Baum 	return ret;
7079a876e3SMichael Baum }
7179a876e3SMichael Baum 
7279a876e3SMichael Baum /**
7379a876e3SMichael Baum  * Handle error CQE.
7479a876e3SMichael Baum  *
7579a876e3SMichael Baum  * @param txq
7679a876e3SMichael Baum  *   Pointer to TX queue structure.
7779a876e3SMichael Baum  * @param error_cqe
7879a876e3SMichael Baum  *   Pointer to the error CQE.
7979a876e3SMichael Baum  *
8079a876e3SMichael Baum  * @return
8179a876e3SMichael Baum  *   Negative value if queue recovery failed, otherwise
8279a876e3SMichael Baum  *   the error completion entry is handled successfully.
8379a876e3SMichael Baum  */
8479a876e3SMichael Baum static int
8579a876e3SMichael Baum mlx5_tx_error_cqe_handle(struct mlx5_txq_data *__rte_restrict txq,
86*3cddeba0SAlexander Kozyrev 			 volatile struct mlx5_error_cqe *err_cqe)
8779a876e3SMichael Baum {
8879a876e3SMichael Baum 	if (err_cqe->syndrome != MLX5_CQE_SYNDROME_WR_FLUSH_ERR) {
8979a876e3SMichael Baum 		const uint16_t wqe_m = ((1 << txq->wqe_n) - 1);
9079a876e3SMichael Baum 		struct mlx5_txq_ctrl *txq_ctrl =
9179a876e3SMichael Baum 				container_of(txq, struct mlx5_txq_ctrl, txq);
9279a876e3SMichael Baum 		uint16_t new_wqe_pi = rte_be_to_cpu_16(err_cqe->wqe_counter);
9379a876e3SMichael Baum 		int seen = check_err_cqe_seen(err_cqe);
9479a876e3SMichael Baum 
9579a876e3SMichael Baum 		if (!seen && txq_ctrl->dump_file_n <
9679a876e3SMichael Baum 		    txq_ctrl->priv->config.max_dump_files_num) {
9779a876e3SMichael Baum 			MKSTR(err_str, "Unexpected CQE error syndrome "
9879a876e3SMichael Baum 			      "0x%02x CQN = %u SQN = %u wqe_counter = %u "
9979a876e3SMichael Baum 			      "wq_ci = %u cq_ci = %u", err_cqe->syndrome,
10079a876e3SMichael Baum 			      txq->cqe_s, txq->qp_num_8s >> 8,
10179a876e3SMichael Baum 			      rte_be_to_cpu_16(err_cqe->wqe_counter),
10279a876e3SMichael Baum 			      txq->wqe_ci, txq->cq_ci);
10379a876e3SMichael Baum 			MKSTR(name, "dpdk_mlx5_port_%u_txq_%u_index_%u_%u",
10479a876e3SMichael Baum 			      PORT_ID(txq_ctrl->priv), txq->idx,
10579a876e3SMichael Baum 			      txq_ctrl->dump_file_n, (uint32_t)rte_rdtsc());
10679a876e3SMichael Baum 			mlx5_dump_debug_information(name, NULL, err_str, 0);
10779a876e3SMichael Baum 			mlx5_dump_debug_information(name, "MLX5 Error CQ:",
10879a876e3SMichael Baum 						    (const void *)((uintptr_t)
10979a876e3SMichael Baum 						    txq->cqes),
110*3cddeba0SAlexander Kozyrev 						    sizeof(struct mlx5_error_cqe) *
11179a876e3SMichael Baum 						    (1 << txq->cqe_n));
11279a876e3SMichael Baum 			mlx5_dump_debug_information(name, "MLX5 Error SQ:",
11379a876e3SMichael Baum 						    (const void *)((uintptr_t)
11479a876e3SMichael Baum 						    txq->wqes),
11579a876e3SMichael Baum 						    MLX5_WQE_SIZE *
11679a876e3SMichael Baum 						    (1 << txq->wqe_n));
11779a876e3SMichael Baum 			txq_ctrl->dump_file_n++;
11879a876e3SMichael Baum 		}
11979a876e3SMichael Baum 		if (!seen)
12079a876e3SMichael Baum 			/*
12179a876e3SMichael Baum 			 * Count errors in WQEs units.
12279a876e3SMichael Baum 			 * Later it can be improved to count error packets,
12379a876e3SMichael Baum 			 * for example, by SQ parsing to find how much packets
12479a876e3SMichael Baum 			 * should be counted for each WQE.
12579a876e3SMichael Baum 			 */
12679a876e3SMichael Baum 			txq->stats.oerrors += ((txq->wqe_ci & wqe_m) -
12779a876e3SMichael Baum 						new_wqe_pi) & wqe_m;
12879a876e3SMichael Baum 		if (tx_recover_qp(txq_ctrl)) {
12979a876e3SMichael Baum 			/* Recovering failed - retry later on the same WQE. */
13079a876e3SMichael Baum 			return -1;
13179a876e3SMichael Baum 		}
13279a876e3SMichael Baum 		/* Release all the remaining buffers. */
13379a876e3SMichael Baum 		txq_free_elts(txq_ctrl);
13479a876e3SMichael Baum 	}
13579a876e3SMichael Baum 	return 0;
13679a876e3SMichael Baum }
13779a876e3SMichael Baum 
13879a876e3SMichael Baum /**
13979a876e3SMichael Baum  * Update completion queue consuming index via doorbell
14079a876e3SMichael Baum  * and flush the completed data buffers.
14179a876e3SMichael Baum  *
14279a876e3SMichael Baum  * @param txq
14379a876e3SMichael Baum  *   Pointer to TX queue structure.
14479a876e3SMichael Baum  * @param last_cqe
14579a876e3SMichael Baum  *   valid CQE pointer, if not NULL update txq->wqe_pi and flush the buffers.
14679a876e3SMichael Baum  * @param olx
14779a876e3SMichael Baum  *   Configured Tx offloads mask. It is fully defined at
14879a876e3SMichael Baum  *   compile time and may be used for optimization.
14979a876e3SMichael Baum  */
15079a876e3SMichael Baum static __rte_always_inline void
15179a876e3SMichael Baum mlx5_tx_comp_flush(struct mlx5_txq_data *__rte_restrict txq,
15279a876e3SMichael Baum 		   volatile struct mlx5_cqe *last_cqe,
15379a876e3SMichael Baum 		   unsigned int olx __rte_unused)
15479a876e3SMichael Baum {
15579a876e3SMichael Baum 	if (likely(last_cqe != NULL)) {
15679a876e3SMichael Baum 		uint16_t tail;
15779a876e3SMichael Baum 
15879a876e3SMichael Baum 		txq->wqe_pi = rte_be_to_cpu_16(last_cqe->wqe_counter);
15979a876e3SMichael Baum 		tail = txq->fcqs[(txq->cq_ci - 1) & txq->cqe_m];
16079a876e3SMichael Baum 		if (likely(tail != txq->elts_tail)) {
16179a876e3SMichael Baum 			mlx5_tx_free_elts(txq, tail, olx);
16279a876e3SMichael Baum 			MLX5_ASSERT(tail == txq->elts_tail);
16379a876e3SMichael Baum 		}
16479a876e3SMichael Baum 	}
16579a876e3SMichael Baum }
16679a876e3SMichael Baum 
16779a876e3SMichael Baum /**
16879a876e3SMichael Baum  * Manage TX completions. This routine checks the CQ for
16979a876e3SMichael Baum  * arrived CQEs, deduces the last accomplished WQE in SQ,
17079a876e3SMichael Baum  * updates SQ producing index and frees all completed mbufs.
17179a876e3SMichael Baum  *
17279a876e3SMichael Baum  * @param txq
17379a876e3SMichael Baum  *   Pointer to TX queue structure.
17479a876e3SMichael Baum  * @param olx
17579a876e3SMichael Baum  *   Configured Tx offloads mask. It is fully defined at
17679a876e3SMichael Baum  *   compile time and may be used for optimization.
17779a876e3SMichael Baum  *
17879a876e3SMichael Baum  * NOTE: not inlined intentionally, it makes tx_burst
17979a876e3SMichael Baum  * routine smaller, simple and faster - from experiments.
18079a876e3SMichael Baum  */
18179a876e3SMichael Baum void
18279a876e3SMichael Baum mlx5_tx_handle_completion(struct mlx5_txq_data *__rte_restrict txq,
18379a876e3SMichael Baum 			  unsigned int olx __rte_unused)
18479a876e3SMichael Baum {
18579a876e3SMichael Baum 	unsigned int count = MLX5_TX_COMP_MAX_CQE;
18679a876e3SMichael Baum 	volatile struct mlx5_cqe *last_cqe = NULL;
18779a876e3SMichael Baum 	bool ring_doorbell = false;
18879a876e3SMichael Baum 	int ret;
18979a876e3SMichael Baum 
19079a876e3SMichael Baum 	do {
19179a876e3SMichael Baum 		volatile struct mlx5_cqe *cqe;
19279a876e3SMichael Baum 
19379a876e3SMichael Baum 		cqe = &txq->cqes[txq->cq_ci & txq->cqe_m];
19479a876e3SMichael Baum 		ret = check_cqe(cqe, txq->cqe_s, txq->cq_ci);
19579a876e3SMichael Baum 		if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
19679a876e3SMichael Baum 			if (likely(ret != MLX5_CQE_STATUS_ERR)) {
19779a876e3SMichael Baum 				/* No new CQEs in completion queue. */
19879a876e3SMichael Baum 				MLX5_ASSERT(ret == MLX5_CQE_STATUS_HW_OWN);
19979a876e3SMichael Baum 				break;
20079a876e3SMichael Baum 			}
20179a876e3SMichael Baum 			/*
20279a876e3SMichael Baum 			 * Some error occurred, try to restart.
20379a876e3SMichael Baum 			 * We have no barrier after WQE related Doorbell
20479a876e3SMichael Baum 			 * written, make sure all writes are completed
20579a876e3SMichael Baum 			 * here, before we might perform SQ reset.
20679a876e3SMichael Baum 			 */
20779a876e3SMichael Baum 			rte_wmb();
20879a876e3SMichael Baum 			ret = mlx5_tx_error_cqe_handle
209*3cddeba0SAlexander Kozyrev 				(txq, (volatile struct mlx5_error_cqe *)cqe);
21079a876e3SMichael Baum 			if (unlikely(ret < 0)) {
21179a876e3SMichael Baum 				/*
21279a876e3SMichael Baum 				 * Some error occurred on queue error
21379a876e3SMichael Baum 				 * handling, we do not advance the index
21479a876e3SMichael Baum 				 * here, allowing to retry on next call.
21579a876e3SMichael Baum 				 */
21679a876e3SMichael Baum 				return;
21779a876e3SMichael Baum 			}
21879a876e3SMichael Baum 			/*
21979a876e3SMichael Baum 			 * We are going to fetch all entries with
22079a876e3SMichael Baum 			 * MLX5_CQE_SYNDROME_WR_FLUSH_ERR status.
22179a876e3SMichael Baum 			 * The send queue is supposed to be empty.
22279a876e3SMichael Baum 			 */
22379a876e3SMichael Baum 			ring_doorbell = true;
22479a876e3SMichael Baum 			++txq->cq_ci;
22579a876e3SMichael Baum 			txq->cq_pi = txq->cq_ci;
22679a876e3SMichael Baum 			last_cqe = NULL;
22779a876e3SMichael Baum 			continue;
22879a876e3SMichael Baum 		}
22979a876e3SMichael Baum 		/* Normal transmit completion. */
23079a876e3SMichael Baum 		MLX5_ASSERT(txq->cq_ci != txq->cq_pi);
23179a876e3SMichael Baum #ifdef RTE_LIBRTE_MLX5_DEBUG
23279a876e3SMichael Baum 		MLX5_ASSERT((txq->fcqs[txq->cq_ci & txq->cqe_m] >> 16) ==
23379a876e3SMichael Baum 			    cqe->wqe_counter);
23479a876e3SMichael Baum #endif
235a1e910f5SViacheslav Ovsiienko 		if (__rte_trace_point_fp_is_enabled()) {
236a1e910f5SViacheslav Ovsiienko 			uint64_t ts = rte_be_to_cpu_64(cqe->timestamp);
237a1e910f5SViacheslav Ovsiienko 			uint16_t wqe_id = rte_be_to_cpu_16(cqe->wqe_counter);
238a1e910f5SViacheslav Ovsiienko 
239a1e910f5SViacheslav Ovsiienko 			if (txq->rt_timestamp)
240a1e910f5SViacheslav Ovsiienko 				ts = mlx5_txpp_convert_rx_ts(NULL, ts);
241a1e910f5SViacheslav Ovsiienko 			rte_pmd_mlx5_trace_tx_complete(txq->port_id, txq->idx,
242a1e910f5SViacheslav Ovsiienko 						       wqe_id, ts);
243a1e910f5SViacheslav Ovsiienko 		}
24479a876e3SMichael Baum 		ring_doorbell = true;
24579a876e3SMichael Baum 		++txq->cq_ci;
24679a876e3SMichael Baum 		last_cqe = cqe;
24779a876e3SMichael Baum 		/*
24879a876e3SMichael Baum 		 * We have to restrict the amount of processed CQEs
24979a876e3SMichael Baum 		 * in one tx_burst routine call. The CQ may be large
25079a876e3SMichael Baum 		 * and many CQEs may be updated by the NIC in one
25179a876e3SMichael Baum 		 * transaction. Buffers freeing is time consuming,
25279a876e3SMichael Baum 		 * multiple iterations may introduce significant latency.
25379a876e3SMichael Baum 		 */
25479a876e3SMichael Baum 		if (likely(--count == 0))
25579a876e3SMichael Baum 			break;
25679a876e3SMichael Baum 	} while (true);
25779a876e3SMichael Baum 	if (likely(ring_doorbell)) {
25879a876e3SMichael Baum 		/* Ring doorbell to notify hardware. */
25979a876e3SMichael Baum 		rte_compiler_barrier();
26079a876e3SMichael Baum 		*txq->cq_db = rte_cpu_to_be_32(txq->cq_ci);
26179a876e3SMichael Baum 		mlx5_tx_comp_flush(txq, last_cqe, olx);
26279a876e3SMichael Baum 	}
26379a876e3SMichael Baum }
26479a876e3SMichael Baum 
26579a876e3SMichael Baum /**
26679a876e3SMichael Baum  * DPDK callback to check the status of a Tx descriptor.
26779a876e3SMichael Baum  *
26879a876e3SMichael Baum  * @param tx_queue
26979a876e3SMichael Baum  *   The Tx queue.
27079a876e3SMichael Baum  * @param[in] offset
27179a876e3SMichael Baum  *   The index of the descriptor in the ring.
27279a876e3SMichael Baum  *
27379a876e3SMichael Baum  * @return
27479a876e3SMichael Baum  *   The status of the Tx descriptor.
27579a876e3SMichael Baum  */
27679a876e3SMichael Baum int
27779a876e3SMichael Baum mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset)
27879a876e3SMichael Baum {
27979a876e3SMichael Baum 	struct mlx5_txq_data *__rte_restrict txq = tx_queue;
28079a876e3SMichael Baum 	uint16_t used;
28179a876e3SMichael Baum 
28279a876e3SMichael Baum 	mlx5_tx_handle_completion(txq, 0);
28379a876e3SMichael Baum 	used = txq->elts_head - txq->elts_tail;
28479a876e3SMichael Baum 	if (offset < used)
28579a876e3SMichael Baum 		return RTE_ETH_TX_DESC_FULL;
28679a876e3SMichael Baum 	return RTE_ETH_TX_DESC_DONE;
28779a876e3SMichael Baum }
28879a876e3SMichael Baum 
28979a876e3SMichael Baum /*
29079a876e3SMichael Baum  * Array of declared and compiled Tx burst function and corresponding
29179a876e3SMichael Baum  * supported offloads set. The array is used to select the Tx burst
29279a876e3SMichael Baum  * function for specified offloads set at Tx queue configuration time.
29379a876e3SMichael Baum  */
29479a876e3SMichael Baum const struct {
29579a876e3SMichael Baum 	eth_tx_burst_t func;
29679a876e3SMichael Baum 	unsigned int olx;
29779a876e3SMichael Baum } txoff_func[] = {
29879a876e3SMichael Baum MLX5_TXOFF_INFO(full_empw,
29979a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
30079a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_SWP |	MLX5_TXOFF_CONFIG_CSUM |
30179a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
30279a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
30379a876e3SMichael Baum 
30479a876e3SMichael Baum MLX5_TXOFF_INFO(none_empw,
30579a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW)
30679a876e3SMichael Baum 
30779a876e3SMichael Baum MLX5_TXOFF_INFO(md_empw,
30879a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
30979a876e3SMichael Baum 
31079a876e3SMichael Baum MLX5_TXOFF_INFO(mt_empw,
31179a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
31279a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
31379a876e3SMichael Baum 
31479a876e3SMichael Baum MLX5_TXOFF_INFO(mtsc_empw,
31579a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
31679a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_SWP |	MLX5_TXOFF_CONFIG_CSUM |
31779a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
31879a876e3SMichael Baum 
31979a876e3SMichael Baum MLX5_TXOFF_INFO(mti_empw,
32079a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
32179a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_INLINE |
32279a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
32379a876e3SMichael Baum 
32479a876e3SMichael Baum MLX5_TXOFF_INFO(mtv_empw,
32579a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
32679a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_VLAN |
32779a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
32879a876e3SMichael Baum 
32979a876e3SMichael Baum MLX5_TXOFF_INFO(mtiv_empw,
33079a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
33179a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
33279a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
33379a876e3SMichael Baum 
33479a876e3SMichael Baum MLX5_TXOFF_INFO(sc_empw,
33579a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_SWP |	MLX5_TXOFF_CONFIG_CSUM |
33679a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
33779a876e3SMichael Baum 
33879a876e3SMichael Baum MLX5_TXOFF_INFO(sci_empw,
33979a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_SWP |	MLX5_TXOFF_CONFIG_CSUM |
34079a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_INLINE |
34179a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
34279a876e3SMichael Baum 
34379a876e3SMichael Baum MLX5_TXOFF_INFO(scv_empw,
34479a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_SWP |	MLX5_TXOFF_CONFIG_CSUM |
34579a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_VLAN |
34679a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
34779a876e3SMichael Baum 
34879a876e3SMichael Baum MLX5_TXOFF_INFO(sciv_empw,
34979a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_SWP |	MLX5_TXOFF_CONFIG_CSUM |
35079a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
35179a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
35279a876e3SMichael Baum 
35379a876e3SMichael Baum MLX5_TXOFF_INFO(i_empw,
35479a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_INLINE |
35579a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
35679a876e3SMichael Baum 
35779a876e3SMichael Baum MLX5_TXOFF_INFO(v_empw,
35879a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_VLAN |
35979a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
36079a876e3SMichael Baum 
36179a876e3SMichael Baum MLX5_TXOFF_INFO(iv_empw,
36279a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
36379a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
36479a876e3SMichael Baum 
36579a876e3SMichael Baum MLX5_TXOFF_INFO(full_ts_nompw,
36679a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_TXPP)
36779a876e3SMichael Baum 
36879a876e3SMichael Baum MLX5_TXOFF_INFO(full_ts_nompwi,
36979a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
37079a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
37179a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_VLAN | MLX5_TXOFF_CONFIG_METADATA |
37279a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_TXPP)
37379a876e3SMichael Baum 
37479a876e3SMichael Baum MLX5_TXOFF_INFO(full_ts,
37579a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_TXPP |
37679a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_EMPW)
37779a876e3SMichael Baum 
37879a876e3SMichael Baum MLX5_TXOFF_INFO(full_ts_noi,
37979a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
38079a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
38179a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_VLAN | MLX5_TXOFF_CONFIG_METADATA |
38279a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_TXPP | MLX5_TXOFF_CONFIG_EMPW)
38379a876e3SMichael Baum 
38479a876e3SMichael Baum MLX5_TXOFF_INFO(none_ts,
38579a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_TXPP |
38679a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_EMPW)
38779a876e3SMichael Baum 
38879a876e3SMichael Baum MLX5_TXOFF_INFO(mdi_ts,
38979a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_METADATA |
39079a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_TXPP | MLX5_TXOFF_CONFIG_EMPW)
39179a876e3SMichael Baum 
39279a876e3SMichael Baum MLX5_TXOFF_INFO(mti_ts,
39379a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
39479a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_METADATA |
39579a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_TXPP | MLX5_TXOFF_CONFIG_EMPW)
39679a876e3SMichael Baum 
39779a876e3SMichael Baum MLX5_TXOFF_INFO(mtiv_ts,
39879a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
39979a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
40079a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_TXPP |
40179a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_EMPW)
40279a876e3SMichael Baum 
40379a876e3SMichael Baum MLX5_TXOFF_INFO(full,
40479a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
40579a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_SWP |	MLX5_TXOFF_CONFIG_CSUM |
40679a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
40779a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_METADATA)
40879a876e3SMichael Baum 
40979a876e3SMichael Baum MLX5_TXOFF_INFO(none,
41079a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_NONE)
41179a876e3SMichael Baum 
41279a876e3SMichael Baum MLX5_TXOFF_INFO(md,
41379a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_METADATA)
41479a876e3SMichael Baum 
41579a876e3SMichael Baum MLX5_TXOFF_INFO(mt,
41679a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
41779a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_METADATA)
41879a876e3SMichael Baum 
41979a876e3SMichael Baum MLX5_TXOFF_INFO(mtsc,
42079a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
42179a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_SWP |	MLX5_TXOFF_CONFIG_CSUM |
42279a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_METADATA)
42379a876e3SMichael Baum 
42479a876e3SMichael Baum MLX5_TXOFF_INFO(mti,
42579a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
42679a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_INLINE |
42779a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_METADATA)
42879a876e3SMichael Baum 
42979a876e3SMichael Baum MLX5_TXOFF_INFO(mtv,
43079a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
43179a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_VLAN |
43279a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_METADATA)
43379a876e3SMichael Baum 
43479a876e3SMichael Baum MLX5_TXOFF_INFO(mtiv,
43579a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
43679a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
43779a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_METADATA)
43879a876e3SMichael Baum 
43979a876e3SMichael Baum MLX5_TXOFF_INFO(sc,
44079a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_SWP |	MLX5_TXOFF_CONFIG_CSUM |
44179a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_METADATA)
44279a876e3SMichael Baum 
44379a876e3SMichael Baum MLX5_TXOFF_INFO(sci,
44479a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_SWP |	MLX5_TXOFF_CONFIG_CSUM |
44579a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_INLINE |
44679a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_METADATA)
44779a876e3SMichael Baum 
44879a876e3SMichael Baum MLX5_TXOFF_INFO(scv,
44979a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_SWP |	MLX5_TXOFF_CONFIG_CSUM |
45079a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_VLAN |
45179a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_METADATA)
45279a876e3SMichael Baum 
45379a876e3SMichael Baum MLX5_TXOFF_INFO(sciv,
45479a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_SWP |	MLX5_TXOFF_CONFIG_CSUM |
45579a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
45679a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_METADATA)
45779a876e3SMichael Baum 
45879a876e3SMichael Baum MLX5_TXOFF_INFO(i,
45979a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_INLINE |
46079a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_METADATA)
46179a876e3SMichael Baum 
46279a876e3SMichael Baum MLX5_TXOFF_INFO(v,
46379a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_VLAN |
46479a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_METADATA)
46579a876e3SMichael Baum 
46679a876e3SMichael Baum MLX5_TXOFF_INFO(iv,
46779a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
46879a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_METADATA)
46979a876e3SMichael Baum 
47079a876e3SMichael Baum MLX5_TXOFF_INFO(none_mpw,
47179a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW |
47279a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_MPW)
47379a876e3SMichael Baum 
47479a876e3SMichael Baum MLX5_TXOFF_INFO(mci_mpw,
47579a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
47679a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
47779a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_MPW)
47879a876e3SMichael Baum 
47979a876e3SMichael Baum MLX5_TXOFF_INFO(mc_mpw,
48079a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
48179a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_EMPW | MLX5_TXOFF_CONFIG_MPW)
48279a876e3SMichael Baum 
48379a876e3SMichael Baum MLX5_TXOFF_INFO(i_mpw,
48479a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
48579a876e3SMichael Baum 		MLX5_TXOFF_CONFIG_MPW)
48679a876e3SMichael Baum };
48779a876e3SMichael Baum 
48879a876e3SMichael Baum /**
48979a876e3SMichael Baum  * Configure the Tx function to use. The routine checks configured
49079a876e3SMichael Baum  * Tx offloads for the device and selects appropriate Tx burst routine.
49179a876e3SMichael Baum  * There are multiple Tx burst routines compiled from the same template
49279a876e3SMichael Baum  * in the most optimal way for the dedicated Tx offloads set.
49379a876e3SMichael Baum  *
49479a876e3SMichael Baum  * @param dev
49579a876e3SMichael Baum  *   Pointer to private data structure.
49679a876e3SMichael Baum  *
49779a876e3SMichael Baum  * @return
49879a876e3SMichael Baum  *   Pointer to selected Tx burst function.
49979a876e3SMichael Baum  */
50079a876e3SMichael Baum eth_tx_burst_t
50179a876e3SMichael Baum mlx5_select_tx_function(struct rte_eth_dev *dev)
50279a876e3SMichael Baum {
50379a876e3SMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
50445a6df80SMichael Baum 	struct mlx5_port_config *config = &priv->config;
50579a876e3SMichael Baum 	uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
50679a876e3SMichael Baum 	unsigned int diff = 0, olx = 0, i, m;
50779a876e3SMichael Baum 
50879a876e3SMichael Baum 	MLX5_ASSERT(priv);
509295968d1SFerruh Yigit 	if (tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) {
51079a876e3SMichael Baum 		/* We should support Multi-Segment Packets. */
51179a876e3SMichael Baum 		olx |= MLX5_TXOFF_CONFIG_MULTI;
51279a876e3SMichael Baum 	}
513295968d1SFerruh Yigit 	if (tx_offloads & (RTE_ETH_TX_OFFLOAD_TCP_TSO |
514295968d1SFerruh Yigit 			   RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
515295968d1SFerruh Yigit 			   RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
516295968d1SFerruh Yigit 			   RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
517295968d1SFerruh Yigit 			   RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO)) {
51879a876e3SMichael Baum 		/* We should support TCP Send Offload. */
51979a876e3SMichael Baum 		olx |= MLX5_TXOFF_CONFIG_TSO;
52079a876e3SMichael Baum 	}
521295968d1SFerruh Yigit 	if (tx_offloads & (RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
522295968d1SFerruh Yigit 			   RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO |
523295968d1SFerruh Yigit 			   RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
52479a876e3SMichael Baum 		/* We should support Software Parser for Tunnels. */
52579a876e3SMichael Baum 		olx |= MLX5_TXOFF_CONFIG_SWP;
52679a876e3SMichael Baum 	}
527295968d1SFerruh Yigit 	if (tx_offloads & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
528295968d1SFerruh Yigit 			   RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
529295968d1SFerruh Yigit 			   RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
530295968d1SFerruh Yigit 			   RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
53179a876e3SMichael Baum 		/* We should support IP/TCP/UDP Checksums. */
53279a876e3SMichael Baum 		olx |= MLX5_TXOFF_CONFIG_CSUM;
53379a876e3SMichael Baum 	}
534295968d1SFerruh Yigit 	if (tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) {
53579a876e3SMichael Baum 		/* We should support VLAN insertion. */
53679a876e3SMichael Baum 		olx |= MLX5_TXOFF_CONFIG_VLAN;
53779a876e3SMichael Baum 	}
538295968d1SFerruh Yigit 	if (tx_offloads & RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP &&
53979a876e3SMichael Baum 	    rte_mbuf_dynflag_lookup
54079a876e3SMichael Baum 			(RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL) >= 0 &&
54179a876e3SMichael Baum 	    rte_mbuf_dynfield_lookup
54279a876e3SMichael Baum 			(RTE_MBUF_DYNFIELD_TIMESTAMP_NAME, NULL) >= 0) {
54379a876e3SMichael Baum 		/* Offload configured, dynamic entities registered. */
54479a876e3SMichael Baum 		olx |= MLX5_TXOFF_CONFIG_TXPP;
54579a876e3SMichael Baum 	}
54679a876e3SMichael Baum 	if (priv->txqs_n && (*priv->txqs)[0]) {
54779a876e3SMichael Baum 		struct mlx5_txq_data *txd = (*priv->txqs)[0];
54879a876e3SMichael Baum 
54979a876e3SMichael Baum 		if (txd->inlen_send) {
55079a876e3SMichael Baum 			/*
55179a876e3SMichael Baum 			 * Check the data inline requirements. Data inline
55279a876e3SMichael Baum 			 * is enabled on per device basis, we can check
55379a876e3SMichael Baum 			 * the first Tx queue only.
55479a876e3SMichael Baum 			 *
55579a876e3SMichael Baum 			 * If device does not support VLAN insertion in WQE
55679a876e3SMichael Baum 			 * and some queues are requested to perform VLAN
55779a876e3SMichael Baum 			 * insertion offload than inline must be enabled.
55879a876e3SMichael Baum 			 */
55979a876e3SMichael Baum 			olx |= MLX5_TXOFF_CONFIG_INLINE;
56079a876e3SMichael Baum 		}
56179a876e3SMichael Baum 	}
56279a876e3SMichael Baum 	if (config->mps == MLX5_MPW_ENHANCED &&
56379a876e3SMichael Baum 	    config->txq_inline_min <= 0) {
56479a876e3SMichael Baum 		/*
56579a876e3SMichael Baum 		 * The NIC supports Enhanced Multi-Packet Write
56679a876e3SMichael Baum 		 * and does not require minimal inline data.
56779a876e3SMichael Baum 		 */
56879a876e3SMichael Baum 		olx |= MLX5_TXOFF_CONFIG_EMPW;
56979a876e3SMichael Baum 	}
57079a876e3SMichael Baum 	if (rte_flow_dynf_metadata_avail()) {
57179a876e3SMichael Baum 		/* We should support Flow metadata. */
57279a876e3SMichael Baum 		olx |= MLX5_TXOFF_CONFIG_METADATA;
57379a876e3SMichael Baum 	}
57479a876e3SMichael Baum 	if (config->mps == MLX5_MPW) {
57579a876e3SMichael Baum 		/*
57679a876e3SMichael Baum 		 * The NIC supports Legacy Multi-Packet Write.
57779a876e3SMichael Baum 		 * The MLX5_TXOFF_CONFIG_MPW controls the descriptor building
57879a876e3SMichael Baum 		 * method in combination with MLX5_TXOFF_CONFIG_EMPW.
57979a876e3SMichael Baum 		 */
58079a876e3SMichael Baum 		if (!(olx & (MLX5_TXOFF_CONFIG_TSO |
58179a876e3SMichael Baum 			     MLX5_TXOFF_CONFIG_SWP |
58279a876e3SMichael Baum 			     MLX5_TXOFF_CONFIG_VLAN |
58379a876e3SMichael Baum 			     MLX5_TXOFF_CONFIG_METADATA)))
58479a876e3SMichael Baum 			olx |= MLX5_TXOFF_CONFIG_EMPW |
58579a876e3SMichael Baum 			       MLX5_TXOFF_CONFIG_MPW;
58679a876e3SMichael Baum 	}
58779a876e3SMichael Baum 	/*
58879a876e3SMichael Baum 	 * Scan the routines table to find the minimal
58979a876e3SMichael Baum 	 * satisfying routine with requested offloads.
59079a876e3SMichael Baum 	 */
59179a876e3SMichael Baum 	m = RTE_DIM(txoff_func);
59279a876e3SMichael Baum 	for (i = 0; i < RTE_DIM(txoff_func); i++) {
59379a876e3SMichael Baum 		unsigned int tmp;
59479a876e3SMichael Baum 
59579a876e3SMichael Baum 		tmp = txoff_func[i].olx;
59679a876e3SMichael Baum 		if (tmp == olx) {
59779a876e3SMichael Baum 			/* Meets requested offloads exactly.*/
59879a876e3SMichael Baum 			m = i;
59979a876e3SMichael Baum 			break;
60079a876e3SMichael Baum 		}
60179a876e3SMichael Baum 		if ((tmp & olx) != olx) {
60279a876e3SMichael Baum 			/* Does not meet requested offloads at all. */
60379a876e3SMichael Baum 			continue;
60479a876e3SMichael Baum 		}
60579a876e3SMichael Baum 		if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_MPW)
60679a876e3SMichael Baum 			/* Do not enable legacy MPW if not configured. */
60779a876e3SMichael Baum 			continue;
60879a876e3SMichael Baum 		if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_EMPW)
60979a876e3SMichael Baum 			/* Do not enable eMPW if not configured. */
61079a876e3SMichael Baum 			continue;
61179a876e3SMichael Baum 		if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_INLINE)
61279a876e3SMichael Baum 			/* Do not enable inlining if not configured. */
61379a876e3SMichael Baum 			continue;
61479a876e3SMichael Baum 		if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_TXPP)
61579a876e3SMichael Baum 			/* Do not enable scheduling if not configured. */
61679a876e3SMichael Baum 			continue;
61779a876e3SMichael Baum 		/*
61879a876e3SMichael Baum 		 * Some routine meets the requirements.
61979a876e3SMichael Baum 		 * Check whether it has minimal amount
62079a876e3SMichael Baum 		 * of not requested offloads.
62179a876e3SMichael Baum 		 */
622191128d7SDavid Marchand 		tmp = rte_popcount64(tmp & ~olx);
62379a876e3SMichael Baum 		if (m >= RTE_DIM(txoff_func) || tmp < diff) {
62479a876e3SMichael Baum 			/* First or better match, save and continue. */
62579a876e3SMichael Baum 			m = i;
62679a876e3SMichael Baum 			diff = tmp;
62779a876e3SMichael Baum 			continue;
62879a876e3SMichael Baum 		}
62979a876e3SMichael Baum 		if (tmp == diff) {
63079a876e3SMichael Baum 			tmp = txoff_func[i].olx ^ txoff_func[m].olx;
63179a876e3SMichael Baum 			if (__builtin_ffsl(txoff_func[i].olx & ~tmp) <
63279a876e3SMichael Baum 			    __builtin_ffsl(txoff_func[m].olx & ~tmp)) {
63379a876e3SMichael Baum 				/* Lighter not requested offload. */
63479a876e3SMichael Baum 				m = i;
63579a876e3SMichael Baum 			}
63679a876e3SMichael Baum 		}
63779a876e3SMichael Baum 	}
63879a876e3SMichael Baum 	if (m >= RTE_DIM(txoff_func)) {
63979a876e3SMichael Baum 		DRV_LOG(DEBUG, "port %u has no selected Tx function"
64079a876e3SMichael Baum 			       " for requested offloads %04X",
64179a876e3SMichael Baum 				dev->data->port_id, olx);
64279a876e3SMichael Baum 		return NULL;
64379a876e3SMichael Baum 	}
64479a876e3SMichael Baum 	DRV_LOG(DEBUG, "port %u has selected Tx function"
64579a876e3SMichael Baum 		       " supporting offloads %04X/%04X",
64679a876e3SMichael Baum 			dev->data->port_id, olx, txoff_func[m].olx);
64779a876e3SMichael Baum 	if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_MULTI)
64879a876e3SMichael Baum 		DRV_LOG(DEBUG, "\tMULTI (multi segment)");
64979a876e3SMichael Baum 	if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_TSO)
65079a876e3SMichael Baum 		DRV_LOG(DEBUG, "\tTSO   (TCP send offload)");
65179a876e3SMichael Baum 	if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_SWP)
65279a876e3SMichael Baum 		DRV_LOG(DEBUG, "\tSWP   (software parser)");
65379a876e3SMichael Baum 	if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_CSUM)
65479a876e3SMichael Baum 		DRV_LOG(DEBUG, "\tCSUM  (checksum offload)");
65579a876e3SMichael Baum 	if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_INLINE)
65679a876e3SMichael Baum 		DRV_LOG(DEBUG, "\tINLIN (inline data)");
65779a876e3SMichael Baum 	if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_VLAN)
65879a876e3SMichael Baum 		DRV_LOG(DEBUG, "\tVLANI (VLAN insertion)");
65979a876e3SMichael Baum 	if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_METADATA)
66079a876e3SMichael Baum 		DRV_LOG(DEBUG, "\tMETAD (tx Flow metadata)");
66179a876e3SMichael Baum 	if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_TXPP)
66279a876e3SMichael Baum 		DRV_LOG(DEBUG, "\tMETAD (tx Scheduling)");
66379a876e3SMichael Baum 	if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_EMPW) {
66479a876e3SMichael Baum 		if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_MPW)
66579a876e3SMichael Baum 			DRV_LOG(DEBUG, "\tMPW   (Legacy MPW)");
66679a876e3SMichael Baum 		else
66779a876e3SMichael Baum 			DRV_LOG(DEBUG, "\tEMPW  (Enhanced MPW)");
66879a876e3SMichael Baum 	}
66979a876e3SMichael Baum 	return txoff_func[m].func;
67079a876e3SMichael Baum }
67179a876e3SMichael Baum 
67279a876e3SMichael Baum /**
67379a876e3SMichael Baum  * DPDK callback to get the TX queue information.
67479a876e3SMichael Baum  *
67579a876e3SMichael Baum  * @param dev
67679a876e3SMichael Baum  *   Pointer to the device structure.
67779a876e3SMichael Baum  *
67879a876e3SMichael Baum  * @param tx_queue_id
67979a876e3SMichael Baum  *   Tx queue identificator.
68079a876e3SMichael Baum  *
68179a876e3SMichael Baum  * @param qinfo
68279a876e3SMichael Baum  *   Pointer to the TX queue information structure.
68379a876e3SMichael Baum  *
68479a876e3SMichael Baum  * @return
68579a876e3SMichael Baum  *   None.
68679a876e3SMichael Baum  */
68779a876e3SMichael Baum void
68879a876e3SMichael Baum mlx5_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
68979a876e3SMichael Baum 		  struct rte_eth_txq_info *qinfo)
69079a876e3SMichael Baum {
69179a876e3SMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
69279a876e3SMichael Baum 	struct mlx5_txq_data *txq = (*priv->txqs)[tx_queue_id];
69379a876e3SMichael Baum 	struct mlx5_txq_ctrl *txq_ctrl =
69479a876e3SMichael Baum 			container_of(txq, struct mlx5_txq_ctrl, txq);
69579a876e3SMichael Baum 
69679a876e3SMichael Baum 	if (!txq)
69779a876e3SMichael Baum 		return;
69879a876e3SMichael Baum 	qinfo->nb_desc = txq->elts_s;
69979a876e3SMichael Baum 	qinfo->conf.tx_thresh.pthresh = 0;
70079a876e3SMichael Baum 	qinfo->conf.tx_thresh.hthresh = 0;
70179a876e3SMichael Baum 	qinfo->conf.tx_thresh.wthresh = 0;
70279a876e3SMichael Baum 	qinfo->conf.tx_rs_thresh = 0;
70379a876e3SMichael Baum 	qinfo->conf.tx_free_thresh = 0;
70479a876e3SMichael Baum 	qinfo->conf.tx_deferred_start = txq_ctrl ? 0 : 1;
70579a876e3SMichael Baum 	qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads;
70679a876e3SMichael Baum }
70779a876e3SMichael Baum 
70879a876e3SMichael Baum /**
70979a876e3SMichael Baum  * DPDK callback to get the TX packet burst mode information.
71079a876e3SMichael Baum  *
71179a876e3SMichael Baum  * @param dev
71279a876e3SMichael Baum  *   Pointer to the device structure.
71379a876e3SMichael Baum  *
71479a876e3SMichael Baum  * @param tx_queue_id
7157be78d02SJosh Soref  *   Tx queue identification.
71679a876e3SMichael Baum  *
71779a876e3SMichael Baum  * @param mode
71879a876e3SMichael Baum  *   Pointer to the burts mode information.
71979a876e3SMichael Baum  *
72079a876e3SMichael Baum  * @return
72179a876e3SMichael Baum  *   0 as success, -EINVAL as failure.
72279a876e3SMichael Baum  */
72379a876e3SMichael Baum int
72479a876e3SMichael Baum mlx5_tx_burst_mode_get(struct rte_eth_dev *dev,
72579a876e3SMichael Baum 		       uint16_t tx_queue_id,
72679a876e3SMichael Baum 		       struct rte_eth_burst_mode *mode)
72779a876e3SMichael Baum {
72879a876e3SMichael Baum 	eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
72979a876e3SMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
73079a876e3SMichael Baum 	struct mlx5_txq_data *txq = (*priv->txqs)[tx_queue_id];
73179a876e3SMichael Baum 	unsigned int i, olx;
73279a876e3SMichael Baum 
73379a876e3SMichael Baum 	for (i = 0; i < RTE_DIM(txoff_func); i++) {
73479a876e3SMichael Baum 		if (pkt_burst == txoff_func[i].func) {
73579a876e3SMichael Baum 			olx = txoff_func[i].olx;
73679a876e3SMichael Baum 			snprintf(mode->info, sizeof(mode->info),
73779a876e3SMichael Baum 				 "%s%s%s%s%s%s%s%s%s%s",
73879a876e3SMichael Baum 				 (olx & MLX5_TXOFF_CONFIG_EMPW) ?
73979a876e3SMichael Baum 				 ((olx & MLX5_TXOFF_CONFIG_MPW) ?
74079a876e3SMichael Baum 				 "Legacy MPW" : "Enhanced MPW") : "No MPW",
74179a876e3SMichael Baum 				 (olx & MLX5_TXOFF_CONFIG_MULTI) ?
74279a876e3SMichael Baum 				 " + MULTI" : "",
74379a876e3SMichael Baum 				 (olx & MLX5_TXOFF_CONFIG_TSO) ?
74479a876e3SMichael Baum 				 " + TSO" : "",
74579a876e3SMichael Baum 				 (olx & MLX5_TXOFF_CONFIG_SWP) ?
74679a876e3SMichael Baum 				 " + SWP" : "",
74779a876e3SMichael Baum 				 (olx & MLX5_TXOFF_CONFIG_CSUM) ?
74879a876e3SMichael Baum 				 "  + CSUM" : "",
74979a876e3SMichael Baum 				 (olx & MLX5_TXOFF_CONFIG_INLINE) ?
75079a876e3SMichael Baum 				 " + INLINE" : "",
75179a876e3SMichael Baum 				 (olx & MLX5_TXOFF_CONFIG_VLAN) ?
75279a876e3SMichael Baum 				 " + VLAN" : "",
75379a876e3SMichael Baum 				 (olx & MLX5_TXOFF_CONFIG_METADATA) ?
75479a876e3SMichael Baum 				 " + METADATA" : "",
75579a876e3SMichael Baum 				 (olx & MLX5_TXOFF_CONFIG_TXPP) ?
75679a876e3SMichael Baum 				 " + TXPP" : "",
75779a876e3SMichael Baum 				 (txq && txq->fast_free) ?
75879a876e3SMichael Baum 				 " + Fast Free" : "");
75979a876e3SMichael Baum 			return 0;
76079a876e3SMichael Baum 		}
76179a876e3SMichael Baum 	}
76279a876e3SMichael Baum 	return -EINVAL;
76379a876e3SMichael Baum }
7643dfa7877SKiran Vedere 
7653dfa7877SKiran Vedere /**
7663dfa7877SKiran Vedere  * Dump SQ/CQ Context to a file.
7673dfa7877SKiran Vedere  *
7683dfa7877SKiran Vedere  * @param[in] port_id
7693dfa7877SKiran Vedere  *   Port ID
7703dfa7877SKiran Vedere  * @param[in] queue_id
7713dfa7877SKiran Vedere  *   Queue ID
7723dfa7877SKiran Vedere  * @param[in] filename
7733dfa7877SKiran Vedere  *   Name of file to dump the Tx Queue Context
7743dfa7877SKiran Vedere  *
7753dfa7877SKiran Vedere  * @return
7763dfa7877SKiran Vedere  *   0 for success, non-zero value depending on failure.
7773dfa7877SKiran Vedere  *
7783dfa7877SKiran Vedere  */
7793dfa7877SKiran Vedere int rte_pmd_mlx5_txq_dump_contexts(uint16_t port_id, uint16_t queue_id, const char *filename)
7803dfa7877SKiran Vedere {
7813dfa7877SKiran Vedere 	struct rte_eth_dev *dev;
7823dfa7877SKiran Vedere 	struct mlx5_priv *priv;
7833dfa7877SKiran Vedere 	struct mlx5_txq_data *txq_data;
7843dfa7877SKiran Vedere 	struct mlx5_txq_ctrl *txq_ctrl;
7853dfa7877SKiran Vedere 	struct mlx5_txq_obj *txq_obj;
7863dfa7877SKiran Vedere 	struct mlx5_devx_sq *sq;
7873dfa7877SKiran Vedere 	struct mlx5_devx_cq *cq;
7883dfa7877SKiran Vedere 	struct mlx5_devx_obj *sq_devx_obj;
7893dfa7877SKiran Vedere 	struct mlx5_devx_obj *cq_devx_obj;
7903dfa7877SKiran Vedere 
7913dfa7877SKiran Vedere 	uint32_t sq_out[MLX5_ST_SZ_DW(query_sq_out)] = {0};
7923dfa7877SKiran Vedere 	uint32_t cq_out[MLX5_ST_SZ_DW(query_cq_out)] = {0};
7933dfa7877SKiran Vedere 
7943dfa7877SKiran Vedere 	int ret;
7953dfa7877SKiran Vedere 	FILE *fd;
7963dfa7877SKiran Vedere 	MKSTR(path, "./%s", filename);
7973dfa7877SKiran Vedere 
7983dfa7877SKiran Vedere 	if (!rte_eth_dev_is_valid_port(port_id))
7993dfa7877SKiran Vedere 		return -ENODEV;
8003dfa7877SKiran Vedere 
8013dfa7877SKiran Vedere 	if (rte_eth_tx_queue_is_valid(port_id, queue_id))
8023dfa7877SKiran Vedere 		return -EINVAL;
8033dfa7877SKiran Vedere 
8043dfa7877SKiran Vedere 	fd = fopen(path, "w");
8053dfa7877SKiran Vedere 	if (!fd) {
8063dfa7877SKiran Vedere 		rte_errno = errno;
8073dfa7877SKiran Vedere 		return -EIO;
8083dfa7877SKiran Vedere 	}
8093dfa7877SKiran Vedere 
8103dfa7877SKiran Vedere 	dev = &rte_eth_devices[port_id];
8113dfa7877SKiran Vedere 	priv = dev->data->dev_private;
8123dfa7877SKiran Vedere 	txq_data = (*priv->txqs)[queue_id];
8133dfa7877SKiran Vedere 	txq_ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq);
8143dfa7877SKiran Vedere 	txq_obj = txq_ctrl->obj;
8153dfa7877SKiran Vedere 	sq = &txq_obj->sq_obj;
8163dfa7877SKiran Vedere 	cq = &txq_obj->cq_obj;
8173dfa7877SKiran Vedere 	sq_devx_obj = sq->sq;
8183dfa7877SKiran Vedere 	cq_devx_obj = cq->cq;
8193dfa7877SKiran Vedere 
8203dfa7877SKiran Vedere 	do {
8213dfa7877SKiran Vedere 		ret = mlx5_devx_cmd_query_sq(sq_devx_obj, sq_out, sizeof(sq_out));
8223dfa7877SKiran Vedere 		if (ret)
8233dfa7877SKiran Vedere 			break;
8243dfa7877SKiran Vedere 
8253dfa7877SKiran Vedere 		/* Dump sq query output to file */
8263dfa7877SKiran Vedere 		MKSTR(sq_headline, "SQ DevX ID = %u Port = %u Queue index = %u ",
8273dfa7877SKiran Vedere 					sq_devx_obj->id, port_id, queue_id);
8283dfa7877SKiran Vedere 		mlx5_dump_to_file(fd, NULL, sq_headline, 0);
8293dfa7877SKiran Vedere 		mlx5_dump_to_file(fd, "Query SQ Dump:",
8303dfa7877SKiran Vedere 					(const void *)((uintptr_t)sq_out),
8313dfa7877SKiran Vedere 					sizeof(sq_out));
8323dfa7877SKiran Vedere 
8333dfa7877SKiran Vedere 		ret = mlx5_devx_cmd_query_cq(cq_devx_obj, cq_out, sizeof(cq_out));
8343dfa7877SKiran Vedere 		if (ret)
8353dfa7877SKiran Vedere 			break;
8363dfa7877SKiran Vedere 
8373dfa7877SKiran Vedere 		/* Dump cq query output to file */
8383dfa7877SKiran Vedere 		MKSTR(cq_headline, "CQ DevX ID = %u Port = %u Queue index = %u ",
8393dfa7877SKiran Vedere 						cq_devx_obj->id, port_id, queue_id);
8403dfa7877SKiran Vedere 		mlx5_dump_to_file(fd, NULL, cq_headline, 0);
8413dfa7877SKiran Vedere 		mlx5_dump_to_file(fd, "Query CQ Dump:",
8423dfa7877SKiran Vedere 					(const void *)((uintptr_t)cq_out),
8433dfa7877SKiran Vedere 					sizeof(cq_out));
8443dfa7877SKiran Vedere 	} while (false);
8453dfa7877SKiran Vedere 
8463dfa7877SKiran Vedere 	fclose(fd);
8473dfa7877SKiran Vedere 	return ret;
8483dfa7877SKiran Vedere }
849