xref: /dpdk/drivers/net/mlx5/mlx5_txq.c (revision 4c3d7961d9002bb715a8ee76bcf464d633316d4c)
18fd92a66SOlivier Matz /* SPDX-License-Identifier: BSD-3-Clause
22e22920bSAdrien Mazarguil  * Copyright 2015 6WIND S.A.
35feecc57SShahaf Shuler  * Copyright 2015 Mellanox Technologies, Ltd
42e22920bSAdrien Mazarguil  */
52e22920bSAdrien Mazarguil 
62e22920bSAdrien Mazarguil #include <stddef.h>
72e22920bSAdrien Mazarguil #include <errno.h>
82e22920bSAdrien Mazarguil #include <string.h>
92e22920bSAdrien Mazarguil #include <stdint.h>
10f8b9a3baSXueming Li #include <unistd.h>
11843e7205SAli Alnubani #include <inttypes.h>
122e22920bSAdrien Mazarguil 
132e22920bSAdrien Mazarguil #include <rte_mbuf.h>
142e22920bSAdrien Mazarguil #include <rte_malloc.h>
15df96fd0dSBruce Richardson #include <ethdev_driver.h>
161f37cb2bSDavid Marchand #include <bus_pci_driver.h>
172e22920bSAdrien Mazarguil #include <rte_common.h>
182aba9fc7SOphir Munk #include <rte_eal_paging.h>
192e22920bSAdrien Mazarguil 
20e415f348SMatan Azrad #include <mlx5_common.h>
21b8dc6b0eSVu Pham #include <mlx5_common_mr.h>
22ac3fc732SSuanming Mou #include <mlx5_malloc.h>
237b4f1e6bSMatan Azrad 
241d88ba17SNélio Laranjeiro #include "mlx5_defs.h"
257b4f1e6bSMatan Azrad #include "mlx5_utils.h"
262e22920bSAdrien Mazarguil #include "mlx5.h"
27377b69fbSMichael Baum #include "mlx5_tx.h"
282e22920bSAdrien Mazarguil #include "mlx5_rxtx.h"
292e22920bSAdrien Mazarguil #include "mlx5_autoconf.h"
301944fbc3SSuanming Mou #include "mlx5_devx.h"
3126e1eaf2SDariusz Sosnowski #include "rte_pmd_mlx5.h"
3226e1eaf2SDariusz Sosnowski #include "mlx5_flow.h"
332e22920bSAdrien Mazarguil 
342e22920bSAdrien Mazarguil /**
352e22920bSAdrien Mazarguil  * Allocate TX queue elements.
362e22920bSAdrien Mazarguil  *
3721c8bb49SNélio Laranjeiro  * @param txq_ctrl
382e22920bSAdrien Mazarguil  *   Pointer to TX queue structure.
392e22920bSAdrien Mazarguil  */
406e78005aSNélio Laranjeiro void
416e78005aSNélio Laranjeiro txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl)
422e22920bSAdrien Mazarguil {
436e78005aSNélio Laranjeiro 	const unsigned int elts_n = 1 << txq_ctrl->txq.elts_n;
442e22920bSAdrien Mazarguil 	unsigned int i;
452e22920bSAdrien Mazarguil 
461d88ba17SNélio Laranjeiro 	for (i = 0; (i != elts_n); ++i)
4738b4b397SViacheslav Ovsiienko 		txq_ctrl->txq.elts[i] = NULL;
48a170a30dSNélio Laranjeiro 	DRV_LOG(DEBUG, "port %u Tx queue %u allocated and configured %u WRs",
49d5c900d1SYongseok Koh 		PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx, elts_n);
5021c8bb49SNélio Laranjeiro 	txq_ctrl->txq.elts_head = 0;
5121c8bb49SNélio Laranjeiro 	txq_ctrl->txq.elts_tail = 0;
52c305090bSAdrien Mazarguil 	txq_ctrl->txq.elts_comp = 0;
532e22920bSAdrien Mazarguil }
542e22920bSAdrien Mazarguil 
552e22920bSAdrien Mazarguil /**
562e22920bSAdrien Mazarguil  * Free TX queue elements.
572e22920bSAdrien Mazarguil  *
5821c8bb49SNélio Laranjeiro  * @param txq_ctrl
592e22920bSAdrien Mazarguil  *   Pointer to TX queue structure.
602e22920bSAdrien Mazarguil  */
611fd9af05SViacheslav Ovsiienko void
62991b04f6SNélio Laranjeiro txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
632e22920bSAdrien Mazarguil {
648c819a69SYongseok Koh 	const uint16_t elts_n = 1 << txq_ctrl->txq.elts_n;
658c819a69SYongseok Koh 	const uint16_t elts_m = elts_n - 1;
668c819a69SYongseok Koh 	uint16_t elts_head = txq_ctrl->txq.elts_head;
678c819a69SYongseok Koh 	uint16_t elts_tail = txq_ctrl->txq.elts_tail;
68712d1fb8STyler Retzlaff 	struct rte_mbuf *(*elts)[] = &txq_ctrl->txq.elts;
692e22920bSAdrien Mazarguil 
70a170a30dSNélio Laranjeiro 	DRV_LOG(DEBUG, "port %u Tx queue %u freeing WRs",
71d5c900d1SYongseok Koh 		PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx);
7221c8bb49SNélio Laranjeiro 	txq_ctrl->txq.elts_head = 0;
7321c8bb49SNélio Laranjeiro 	txq_ctrl->txq.elts_tail = 0;
74c305090bSAdrien Mazarguil 	txq_ctrl->txq.elts_comp = 0;
752e22920bSAdrien Mazarguil 
76b185e63fSAdrien Mazarguil 	while (elts_tail != elts_head) {
778c819a69SYongseok Koh 		struct rte_mbuf *elt = (*elts)[elts_tail & elts_m];
782e22920bSAdrien Mazarguil 
798e46d4e1SAlexander Kozyrev 		MLX5_ASSERT(elt != NULL);
80c80711c3SYongseok Koh 		rte_pktmbuf_free_seg(elt);
810afacb04SAlexander Kozyrev #ifdef RTE_LIBRTE_MLX5_DEBUG
82b185e63fSAdrien Mazarguil 		/* Poisoning. */
838c819a69SYongseok Koh 		memset(&(*elts)[elts_tail & elts_m],
841d88ba17SNélio Laranjeiro 		       0x77,
858c819a69SYongseok Koh 		       sizeof((*elts)[elts_tail & elts_m]));
86b185e63fSAdrien Mazarguil #endif
878c819a69SYongseok Koh 		++elts_tail;
882e22920bSAdrien Mazarguil 	}
892e22920bSAdrien Mazarguil }
902e22920bSAdrien Mazarguil 
912e22920bSAdrien Mazarguil /**
92dbccb4cdSShahaf Shuler  * Returns the per-port supported offloads.
93dbccb4cdSShahaf Shuler  *
94af4f09f2SNélio Laranjeiro  * @param dev
95af4f09f2SNélio Laranjeiro  *   Pointer to Ethernet device.
96dbccb4cdSShahaf Shuler  *
97dbccb4cdSShahaf Shuler  * @return
98dbccb4cdSShahaf Shuler  *   Supported Tx offloads.
99dbccb4cdSShahaf Shuler  */
100dbccb4cdSShahaf Shuler uint64_t
101af4f09f2SNélio Laranjeiro mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
102dbccb4cdSShahaf Shuler {
103dbeba4cfSThomas Monjalon 	struct mlx5_priv *priv = dev->data->dev_private;
104295968d1SFerruh Yigit 	uint64_t offloads = (RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
105295968d1SFerruh Yigit 			     RTE_ETH_TX_OFFLOAD_VLAN_INSERT);
10645a6df80SMichael Baum 	struct mlx5_port_config *config = &priv->config;
10787af0d1eSMichael Baum 	struct mlx5_dev_cap *dev_cap = &priv->sh->dev_cap;
108dbccb4cdSShahaf Shuler 
10987af0d1eSMichael Baum 	if (dev_cap->hw_csum)
110295968d1SFerruh Yigit 		offloads |= (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
111295968d1SFerruh Yigit 			     RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
112295968d1SFerruh Yigit 			     RTE_ETH_TX_OFFLOAD_TCP_CKSUM);
11387af0d1eSMichael Baum 	if (dev_cap->tso)
114295968d1SFerruh Yigit 		offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
1152f5122dfSViacheslav Ovsiienko 	if (priv->sh->config.tx_pp ||
1162f5122dfSViacheslav Ovsiienko 	    priv->sh->cdev->config.hca_attr.wait_on_time)
117295968d1SFerruh Yigit 		offloads |= RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP;
11887af0d1eSMichael Baum 	if (dev_cap->swp) {
11987af0d1eSMichael Baum 		if (dev_cap->swp & MLX5_SW_PARSING_CSUM_CAP)
120295968d1SFerruh Yigit 			offloads |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
12187af0d1eSMichael Baum 		if (dev_cap->swp & MLX5_SW_PARSING_TSO_CAP)
122295968d1SFerruh Yigit 			offloads |= (RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
123295968d1SFerruh Yigit 				     RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO);
124e46821e9SShahaf Shuler 	}
12587af0d1eSMichael Baum 	if (dev_cap->tunnel_en) {
12687af0d1eSMichael Baum 		if (dev_cap->hw_csum)
127295968d1SFerruh Yigit 			offloads |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
12887af0d1eSMichael Baum 		if (dev_cap->tso) {
12987af0d1eSMichael Baum 			if (dev_cap->tunnel_en &
130c1a320bfSTal Shnaiderman 				MLX5_TUNNELED_OFFLOADS_VXLAN_CAP)
131295968d1SFerruh Yigit 				offloads |= RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO;
13287af0d1eSMichael Baum 			if (dev_cap->tunnel_en &
133c1a320bfSTal Shnaiderman 				MLX5_TUNNELED_OFFLOADS_GRE_CAP)
134295968d1SFerruh Yigit 				offloads |= RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO;
13587af0d1eSMichael Baum 			if (dev_cap->tunnel_en &
136c1a320bfSTal Shnaiderman 				MLX5_TUNNELED_OFFLOADS_GENEVE_CAP)
137295968d1SFerruh Yigit 				offloads |= RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO;
138c1a320bfSTal Shnaiderman 		}
139dbccb4cdSShahaf Shuler 	}
1401d89c404SViacheslav Ovsiienko 	if (!config->mprq.enabled)
141295968d1SFerruh Yigit 		offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
142dbccb4cdSShahaf Shuler 	return offloads;
143dbccb4cdSShahaf Shuler }
144dbccb4cdSShahaf Shuler 
145161d103bSViacheslav Ovsiienko /* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */
146161d103bSViacheslav Ovsiienko static void
147161d103bSViacheslav Ovsiienko txq_sync_cq(struct mlx5_txq_data *txq)
148161d103bSViacheslav Ovsiienko {
149161d103bSViacheslav Ovsiienko 	volatile struct mlx5_cqe *cqe;
150161d103bSViacheslav Ovsiienko 	int ret, i;
151161d103bSViacheslav Ovsiienko 
152161d103bSViacheslav Ovsiienko 	i = txq->cqe_s;
153161d103bSViacheslav Ovsiienko 	do {
154161d103bSViacheslav Ovsiienko 		cqe = &txq->cqes[txq->cq_ci & txq->cqe_m];
155161d103bSViacheslav Ovsiienko 		ret = check_cqe(cqe, txq->cqe_s, txq->cq_ci);
156161d103bSViacheslav Ovsiienko 		if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
157161d103bSViacheslav Ovsiienko 			if (likely(ret != MLX5_CQE_STATUS_ERR)) {
158161d103bSViacheslav Ovsiienko 				/* No new CQEs in completion queue. */
159161d103bSViacheslav Ovsiienko 				MLX5_ASSERT(ret == MLX5_CQE_STATUS_HW_OWN);
160161d103bSViacheslav Ovsiienko 				break;
161161d103bSViacheslav Ovsiienko 			}
162161d103bSViacheslav Ovsiienko 		}
163161d103bSViacheslav Ovsiienko 		++txq->cq_ci;
164161d103bSViacheslav Ovsiienko 	} while (--i);
165161d103bSViacheslav Ovsiienko 	/* Move all CQEs to HW ownership. */
166161d103bSViacheslav Ovsiienko 	for (i = 0; i < txq->cqe_s; i++) {
167161d103bSViacheslav Ovsiienko 		cqe = &txq->cqes[i];
168161d103bSViacheslav Ovsiienko 		cqe->op_own = MLX5_CQE_INVALIDATE;
169161d103bSViacheslav Ovsiienko 	}
170161d103bSViacheslav Ovsiienko 	/* Resync CQE and WQE (WQ in reset state). */
171f0f5d844SPhil Yang 	rte_io_wmb();
172161d103bSViacheslav Ovsiienko 	*txq->cq_db = rte_cpu_to_be_32(txq->cq_ci);
17327b09507SViacheslav Ovsiienko 	txq->cq_pi = txq->cq_ci;
174f0f5d844SPhil Yang 	rte_io_wmb();
175161d103bSViacheslav Ovsiienko }
176161d103bSViacheslav Ovsiienko 
177161d103bSViacheslav Ovsiienko /**
178161d103bSViacheslav Ovsiienko  * Tx queue stop. Device queue goes to the idle state,
179161d103bSViacheslav Ovsiienko  * all involved mbufs are freed from elts/WQ.
180161d103bSViacheslav Ovsiienko  *
181161d103bSViacheslav Ovsiienko  * @param dev
182161d103bSViacheslav Ovsiienko  *   Pointer to Ethernet device structure.
183161d103bSViacheslav Ovsiienko  * @param idx
184161d103bSViacheslav Ovsiienko  *   Tx queue index.
185161d103bSViacheslav Ovsiienko  *
186161d103bSViacheslav Ovsiienko  * @return
187161d103bSViacheslav Ovsiienko  *   0 on success, a negative errno value otherwise and rte_errno is set.
188161d103bSViacheslav Ovsiienko  */
189161d103bSViacheslav Ovsiienko int
190161d103bSViacheslav Ovsiienko mlx5_tx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t idx)
191161d103bSViacheslav Ovsiienko {
192161d103bSViacheslav Ovsiienko 	struct mlx5_priv *priv = dev->data->dev_private;
193161d103bSViacheslav Ovsiienko 	struct mlx5_txq_data *txq = (*priv->txqs)[idx];
194161d103bSViacheslav Ovsiienko 	struct mlx5_txq_ctrl *txq_ctrl =
195161d103bSViacheslav Ovsiienko 			container_of(txq, struct mlx5_txq_ctrl, txq);
196161d103bSViacheslav Ovsiienko 	int ret;
197161d103bSViacheslav Ovsiienko 
198161d103bSViacheslav Ovsiienko 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
199161d103bSViacheslav Ovsiienko 	/* Move QP to RESET state. */
2005d9f3c3fSMichael Baum 	ret = priv->obj_ops.txq_obj_modify(txq_ctrl->obj, MLX5_TXQ_MOD_RDY2RST,
2015d9f3c3fSMichael Baum 					   (uint8_t)priv->dev_port);
2025d9f3c3fSMichael Baum 	if (ret)
203161d103bSViacheslav Ovsiienko 		return ret;
204161d103bSViacheslav Ovsiienko 	/* Handle all send completions. */
205161d103bSViacheslav Ovsiienko 	txq_sync_cq(txq);
206161d103bSViacheslav Ovsiienko 	/* Free elts stored in the SQ. */
207161d103bSViacheslav Ovsiienko 	txq_free_elts(txq_ctrl);
208161d103bSViacheslav Ovsiienko 	/* Prevent writing new pkts to SQ by setting no free WQE.*/
209161d103bSViacheslav Ovsiienko 	txq->wqe_ci = txq->wqe_s;
210161d103bSViacheslav Ovsiienko 	txq->wqe_pi = 0;
211161d103bSViacheslav Ovsiienko 	txq->elts_comp = 0;
212161d103bSViacheslav Ovsiienko 	/* Set the actual queue state. */
213161d103bSViacheslav Ovsiienko 	dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
214161d103bSViacheslav Ovsiienko 	return 0;
215161d103bSViacheslav Ovsiienko }
216161d103bSViacheslav Ovsiienko 
217161d103bSViacheslav Ovsiienko /**
218161d103bSViacheslav Ovsiienko  * Tx queue stop. Device queue goes to the idle state,
219161d103bSViacheslav Ovsiienko  * all involved mbufs are freed from elts/WQ.
220161d103bSViacheslav Ovsiienko  *
221161d103bSViacheslav Ovsiienko  * @param dev
222161d103bSViacheslav Ovsiienko  *   Pointer to Ethernet device structure.
223161d103bSViacheslav Ovsiienko  * @param idx
224161d103bSViacheslav Ovsiienko  *   Tx queue index.
225161d103bSViacheslav Ovsiienko  *
226161d103bSViacheslav Ovsiienko  * @return
227161d103bSViacheslav Ovsiienko  *   0 on success, a negative errno value otherwise and rte_errno is set.
228161d103bSViacheslav Ovsiienko  */
229161d103bSViacheslav Ovsiienko int
230161d103bSViacheslav Ovsiienko mlx5_tx_queue_stop(struct rte_eth_dev *dev, uint16_t idx)
231161d103bSViacheslav Ovsiienko {
232161d103bSViacheslav Ovsiienko 	int ret;
233161d103bSViacheslav Ovsiienko 
2348682e492SFerruh Yigit 	if (rte_eth_dev_is_tx_hairpin_queue(dev, idx)) {
235161d103bSViacheslav Ovsiienko 		DRV_LOG(ERR, "Hairpin queue can't be stopped");
236161d103bSViacheslav Ovsiienko 		rte_errno = EINVAL;
237161d103bSViacheslav Ovsiienko 		return -EINVAL;
238161d103bSViacheslav Ovsiienko 	}
239161d103bSViacheslav Ovsiienko 	if (dev->data->tx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STOPPED)
240161d103bSViacheslav Ovsiienko 		return 0;
241161d103bSViacheslav Ovsiienko 	if (rte_eal_process_type() ==  RTE_PROC_SECONDARY) {
242161d103bSViacheslav Ovsiienko 		ret = mlx5_mp_os_req_queue_control(dev, idx,
243161d103bSViacheslav Ovsiienko 						   MLX5_MP_REQ_QUEUE_TX_STOP);
244161d103bSViacheslav Ovsiienko 	} else {
245161d103bSViacheslav Ovsiienko 		ret = mlx5_tx_queue_stop_primary(dev, idx);
246161d103bSViacheslav Ovsiienko 	}
247161d103bSViacheslav Ovsiienko 	return ret;
248161d103bSViacheslav Ovsiienko }
249161d103bSViacheslav Ovsiienko 
250161d103bSViacheslav Ovsiienko /**
251161d103bSViacheslav Ovsiienko  * Rx queue start. Device queue goes to the ready state,
252161d103bSViacheslav Ovsiienko  * all required mbufs are allocated and WQ is replenished.
253161d103bSViacheslav Ovsiienko  *
254161d103bSViacheslav Ovsiienko  * @param dev
255161d103bSViacheslav Ovsiienko  *   Pointer to Ethernet device structure.
256161d103bSViacheslav Ovsiienko  * @param idx
257161d103bSViacheslav Ovsiienko  *   RX queue index.
258161d103bSViacheslav Ovsiienko  *
259161d103bSViacheslav Ovsiienko  * @return
260161d103bSViacheslav Ovsiienko  *   0 on success, a negative errno value otherwise and rte_errno is set.
261161d103bSViacheslav Ovsiienko  */
262161d103bSViacheslav Ovsiienko int
263161d103bSViacheslav Ovsiienko mlx5_tx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx)
264161d103bSViacheslav Ovsiienko {
265161d103bSViacheslav Ovsiienko 	struct mlx5_priv *priv = dev->data->dev_private;
266161d103bSViacheslav Ovsiienko 	struct mlx5_txq_data *txq = (*priv->txqs)[idx];
267161d103bSViacheslav Ovsiienko 	struct mlx5_txq_ctrl *txq_ctrl =
268161d103bSViacheslav Ovsiienko 			container_of(txq, struct mlx5_txq_ctrl, txq);
269161d103bSViacheslav Ovsiienko 	int ret;
270161d103bSViacheslav Ovsiienko 
271161d103bSViacheslav Ovsiienko 	MLX5_ASSERT(rte_eal_process_type() ==  RTE_PROC_PRIMARY);
2725d9f3c3fSMichael Baum 	ret = priv->obj_ops.txq_obj_modify(txq_ctrl->obj,
27389f170c0SMatan Azrad 					   MLX5_TXQ_MOD_RST2RDY,
2745d9f3c3fSMichael Baum 					   (uint8_t)priv->dev_port);
2755d9f3c3fSMichael Baum 	if (ret)
276161d103bSViacheslav Ovsiienko 		return ret;
277161d103bSViacheslav Ovsiienko 	txq_ctrl->txq.wqe_ci = 0;
278161d103bSViacheslav Ovsiienko 	txq_ctrl->txq.wqe_pi = 0;
279161d103bSViacheslav Ovsiienko 	txq_ctrl->txq.elts_comp = 0;
280161d103bSViacheslav Ovsiienko 	/* Set the actual queue state. */
281161d103bSViacheslav Ovsiienko 	dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
282161d103bSViacheslav Ovsiienko 	return 0;
283161d103bSViacheslav Ovsiienko }
284161d103bSViacheslav Ovsiienko 
285161d103bSViacheslav Ovsiienko /**
286161d103bSViacheslav Ovsiienko  * Rx queue start. Device queue goes to the ready state,
287161d103bSViacheslav Ovsiienko  * all required mbufs are allocated and WQ is replenished.
288161d103bSViacheslav Ovsiienko  *
289161d103bSViacheslav Ovsiienko  * @param dev
290161d103bSViacheslav Ovsiienko  *   Pointer to Ethernet device structure.
291161d103bSViacheslav Ovsiienko  * @param idx
292161d103bSViacheslav Ovsiienko  *   RX queue index.
293161d103bSViacheslav Ovsiienko  *
294161d103bSViacheslav Ovsiienko  * @return
295161d103bSViacheslav Ovsiienko  *   0 on success, a negative errno value otherwise and rte_errno is set.
296161d103bSViacheslav Ovsiienko  */
297161d103bSViacheslav Ovsiienko int
298161d103bSViacheslav Ovsiienko mlx5_tx_queue_start(struct rte_eth_dev *dev, uint16_t idx)
299161d103bSViacheslav Ovsiienko {
300161d103bSViacheslav Ovsiienko 	int ret;
301161d103bSViacheslav Ovsiienko 
3028682e492SFerruh Yigit 	if (rte_eth_dev_is_tx_hairpin_queue(dev, idx)) {
303161d103bSViacheslav Ovsiienko 		DRV_LOG(ERR, "Hairpin queue can't be started");
304161d103bSViacheslav Ovsiienko 		rte_errno = EINVAL;
305161d103bSViacheslav Ovsiienko 		return -EINVAL;
306161d103bSViacheslav Ovsiienko 	}
307161d103bSViacheslav Ovsiienko 	if (dev->data->tx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STARTED)
308161d103bSViacheslav Ovsiienko 		return 0;
309161d103bSViacheslav Ovsiienko 	if (rte_eal_process_type() ==  RTE_PROC_SECONDARY) {
310161d103bSViacheslav Ovsiienko 		ret = mlx5_mp_os_req_queue_control(dev, idx,
311161d103bSViacheslav Ovsiienko 						   MLX5_MP_REQ_QUEUE_TX_START);
312161d103bSViacheslav Ovsiienko 	} else {
313161d103bSViacheslav Ovsiienko 		ret = mlx5_tx_queue_start_primary(dev, idx);
314161d103bSViacheslav Ovsiienko 	}
315161d103bSViacheslav Ovsiienko 	return ret;
316161d103bSViacheslav Ovsiienko }
317161d103bSViacheslav Ovsiienko 
318dbccb4cdSShahaf Shuler /**
319ae18a1aeSOri Kam  * Tx queue presetup checks.
3202e22920bSAdrien Mazarguil  *
3212e22920bSAdrien Mazarguil  * @param dev
3222e22920bSAdrien Mazarguil  *   Pointer to Ethernet device structure.
3232e22920bSAdrien Mazarguil  * @param idx
324ae18a1aeSOri Kam  *   Tx queue index.
3252e22920bSAdrien Mazarguil  * @param desc
3262e22920bSAdrien Mazarguil  *   Number of descriptors to configure in queue.
3272e22920bSAdrien Mazarguil  *
3282e22920bSAdrien Mazarguil  * @return
329a6d83b6aSNélio Laranjeiro  *   0 on success, a negative errno value otherwise and rte_errno is set.
3302e22920bSAdrien Mazarguil  */
331ae18a1aeSOri Kam static int
332e891b54aSAlexander Kozyrev mlx5_tx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc)
3332e22920bSAdrien Mazarguil {
334dbeba4cfSThomas Monjalon 	struct mlx5_priv *priv = dev->data->dev_private;
3352e22920bSAdrien Mazarguil 
336*4c3d7961SIgor Gutorov 	if (*desc > 1 << priv->sh->cdev->config.hca_attr.log_max_wq_sz) {
337*4c3d7961SIgor Gutorov 		DRV_LOG(ERR,
338*4c3d7961SIgor Gutorov 			"port %u number of descriptors requested for Tx queue"
339*4c3d7961SIgor Gutorov 			" %u is more than supported",
340*4c3d7961SIgor Gutorov 			dev->data->port_id, idx);
341*4c3d7961SIgor Gutorov 		rte_errno = EINVAL;
342*4c3d7961SIgor Gutorov 		return -EINVAL;
343*4c3d7961SIgor Gutorov 	}
344e891b54aSAlexander Kozyrev 	if (*desc <= MLX5_TX_COMP_THRESH) {
345a170a30dSNélio Laranjeiro 		DRV_LOG(WARNING,
346a170a30dSNélio Laranjeiro 			"port %u number of descriptors requested for Tx queue"
347a170a30dSNélio Laranjeiro 			" %u must be higher than MLX5_TX_COMP_THRESH, using %u"
348e891b54aSAlexander Kozyrev 			" instead of %u", dev->data->port_id, idx,
349e891b54aSAlexander Kozyrev 			MLX5_TX_COMP_THRESH + 1, *desc);
350e891b54aSAlexander Kozyrev 		*desc = MLX5_TX_COMP_THRESH + 1;
351c305090bSAdrien Mazarguil 	}
352e891b54aSAlexander Kozyrev 	if (!rte_is_power_of_2(*desc)) {
353e891b54aSAlexander Kozyrev 		*desc = 1 << log2above(*desc);
354a170a30dSNélio Laranjeiro 		DRV_LOG(WARNING,
355a170a30dSNélio Laranjeiro 			"port %u increased number of descriptors in Tx queue"
356a170a30dSNélio Laranjeiro 			" %u to the next power of two (%d)",
357e891b54aSAlexander Kozyrev 			dev->data->port_id, idx, *desc);
3581d88ba17SNélio Laranjeiro 	}
359a170a30dSNélio Laranjeiro 	DRV_LOG(DEBUG, "port %u configuring queue %u for %u descriptors",
360e891b54aSAlexander Kozyrev 		dev->data->port_id, idx, *desc);
3612e22920bSAdrien Mazarguil 	if (idx >= priv->txqs_n) {
362a170a30dSNélio Laranjeiro 		DRV_LOG(ERR, "port %u Tx queue index out of range (%u >= %u)",
3630f99970bSNélio Laranjeiro 			dev->data->port_id, idx, priv->txqs_n);
364a6d83b6aSNélio Laranjeiro 		rte_errno = EOVERFLOW;
365a6d83b6aSNélio Laranjeiro 		return -rte_errno;
3662e22920bSAdrien Mazarguil 	}
367af4f09f2SNélio Laranjeiro 	if (!mlx5_txq_releasable(dev, idx)) {
368a6d83b6aSNélio Laranjeiro 		rte_errno = EBUSY;
369a170a30dSNélio Laranjeiro 		DRV_LOG(ERR, "port %u unable to release queue index %u",
3700f99970bSNélio Laranjeiro 			dev->data->port_id, idx);
371a6d83b6aSNélio Laranjeiro 		return -rte_errno;
372faf2667fSNélio Laranjeiro 	}
373af4f09f2SNélio Laranjeiro 	mlx5_txq_release(dev, idx);
374ae18a1aeSOri Kam 	return 0;
375ae18a1aeSOri Kam }
37617a57183SMichael Baum 
377ae18a1aeSOri Kam /**
378ae18a1aeSOri Kam  * DPDK callback to configure a TX queue.
379ae18a1aeSOri Kam  *
380ae18a1aeSOri Kam  * @param dev
381ae18a1aeSOri Kam  *   Pointer to Ethernet device structure.
382ae18a1aeSOri Kam  * @param idx
383ae18a1aeSOri Kam  *   TX queue index.
384ae18a1aeSOri Kam  * @param desc
385ae18a1aeSOri Kam  *   Number of descriptors to configure in queue.
386ae18a1aeSOri Kam  * @param socket
387ae18a1aeSOri Kam  *   NUMA socket on which memory must be allocated.
388ae18a1aeSOri Kam  * @param[in] conf
389ae18a1aeSOri Kam  *   Thresholds parameters.
390ae18a1aeSOri Kam  *
391ae18a1aeSOri Kam  * @return
392ae18a1aeSOri Kam  *   0 on success, a negative errno value otherwise and rte_errno is set.
393ae18a1aeSOri Kam  */
394ae18a1aeSOri Kam int
395ae18a1aeSOri Kam mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
396ae18a1aeSOri Kam 		    unsigned int socket, const struct rte_eth_txconf *conf)
397ae18a1aeSOri Kam {
398ae18a1aeSOri Kam 	struct mlx5_priv *priv = dev->data->dev_private;
399ae18a1aeSOri Kam 	struct mlx5_txq_data *txq = (*priv->txqs)[idx];
400ae18a1aeSOri Kam 	struct mlx5_txq_ctrl *txq_ctrl =
401ae18a1aeSOri Kam 		container_of(txq, struct mlx5_txq_ctrl, txq);
402ae18a1aeSOri Kam 	int res;
403ae18a1aeSOri Kam 
404e891b54aSAlexander Kozyrev 	res = mlx5_tx_queue_pre_setup(dev, idx, &desc);
405ae18a1aeSOri Kam 	if (res)
406ae18a1aeSOri Kam 		return res;
407af4f09f2SNélio Laranjeiro 	txq_ctrl = mlx5_txq_new(dev, idx, desc, socket, conf);
4086e78005aSNélio Laranjeiro 	if (!txq_ctrl) {
409a170a30dSNélio Laranjeiro 		DRV_LOG(ERR, "port %u unable to allocate queue index %u",
4100f99970bSNélio Laranjeiro 			dev->data->port_id, idx);
411a6d83b6aSNélio Laranjeiro 		return -rte_errno;
4126e78005aSNélio Laranjeiro 	}
413a170a30dSNélio Laranjeiro 	DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
414a170a30dSNélio Laranjeiro 		dev->data->port_id, idx);
41521c8bb49SNélio Laranjeiro 	(*priv->txqs)[idx] = &txq_ctrl->txq;
416a6d83b6aSNélio Laranjeiro 	return 0;
4172e22920bSAdrien Mazarguil }
4182e22920bSAdrien Mazarguil 
4192e22920bSAdrien Mazarguil /**
420ae18a1aeSOri Kam  * DPDK callback to configure a TX hairpin queue.
421ae18a1aeSOri Kam  *
422ae18a1aeSOri Kam  * @param dev
423ae18a1aeSOri Kam  *   Pointer to Ethernet device structure.
424ae18a1aeSOri Kam  * @param idx
425ae18a1aeSOri Kam  *   TX queue index.
426ae18a1aeSOri Kam  * @param desc
427ae18a1aeSOri Kam  *   Number of descriptors to configure in queue.
428ae18a1aeSOri Kam  * @param[in] hairpin_conf
429ae18a1aeSOri Kam  *   The hairpin binding configuration.
430ae18a1aeSOri Kam  *
431ae18a1aeSOri Kam  * @return
432ae18a1aeSOri Kam  *   0 on success, a negative errno value otherwise and rte_errno is set.
433ae18a1aeSOri Kam  */
434ae18a1aeSOri Kam int
435ae18a1aeSOri Kam mlx5_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
436ae18a1aeSOri Kam 			    uint16_t desc,
437ae18a1aeSOri Kam 			    const struct rte_eth_hairpin_conf *hairpin_conf)
438ae18a1aeSOri Kam {
439ae18a1aeSOri Kam 	struct mlx5_priv *priv = dev->data->dev_private;
440ae18a1aeSOri Kam 	struct mlx5_txq_data *txq = (*priv->txqs)[idx];
441ae18a1aeSOri Kam 	struct mlx5_txq_ctrl *txq_ctrl =
442ae18a1aeSOri Kam 		container_of(txq, struct mlx5_txq_ctrl, txq);
443ae18a1aeSOri Kam 	int res;
444ae18a1aeSOri Kam 
445e891b54aSAlexander Kozyrev 	res = mlx5_tx_queue_pre_setup(dev, idx, &desc);
446ae18a1aeSOri Kam 	if (res)
447ae18a1aeSOri Kam 		return res;
4481a01264fSBing Zhao 	if (hairpin_conf->peer_count != 1) {
449ae18a1aeSOri Kam 		rte_errno = EINVAL;
4501a01264fSBing Zhao 		DRV_LOG(ERR, "port %u unable to setup Tx hairpin queue index %u"
4511a01264fSBing Zhao 			" peer count is %u", dev->data->port_id,
4521a01264fSBing Zhao 			idx, hairpin_conf->peer_count);
453ae18a1aeSOri Kam 		return -rte_errno;
454ae18a1aeSOri Kam 	}
4551a01264fSBing Zhao 	if (hairpin_conf->peers[0].port == dev->data->port_id) {
4561a01264fSBing Zhao 		if (hairpin_conf->peers[0].queue >= priv->rxqs_n) {
4571a01264fSBing Zhao 			rte_errno = EINVAL;
4581a01264fSBing Zhao 			DRV_LOG(ERR, "port %u unable to setup Tx hairpin queue"
4591a01264fSBing Zhao 				" index %u, Rx %u is larger than %u",
4601a01264fSBing Zhao 				dev->data->port_id, idx,
4611a01264fSBing Zhao 				hairpin_conf->peers[0].queue, priv->txqs_n);
4621a01264fSBing Zhao 			return -rte_errno;
4631a01264fSBing Zhao 		}
4641a01264fSBing Zhao 	} else {
4651a01264fSBing Zhao 		if (hairpin_conf->manual_bind == 0 ||
4661a01264fSBing Zhao 		    hairpin_conf->tx_explicit == 0) {
4671a01264fSBing Zhao 			rte_errno = EINVAL;
4681a01264fSBing Zhao 			DRV_LOG(ERR, "port %u unable to setup Tx hairpin queue"
4691a01264fSBing Zhao 				" index %u peer port %u with attributes %u %u",
4701a01264fSBing Zhao 				dev->data->port_id, idx,
4711a01264fSBing Zhao 				hairpin_conf->peers[0].port,
4721a01264fSBing Zhao 				hairpin_conf->manual_bind,
4731a01264fSBing Zhao 				hairpin_conf->tx_explicit);
4741a01264fSBing Zhao 			return -rte_errno;
4751a01264fSBing Zhao 		}
4761a01264fSBing Zhao 	}
477ae18a1aeSOri Kam 	txq_ctrl = mlx5_txq_hairpin_new(dev, idx, desc,	hairpin_conf);
478ae18a1aeSOri Kam 	if (!txq_ctrl) {
479ae18a1aeSOri Kam 		DRV_LOG(ERR, "port %u unable to allocate queue index %u",
480ae18a1aeSOri Kam 			dev->data->port_id, idx);
481ae18a1aeSOri Kam 		return -rte_errno;
482ae18a1aeSOri Kam 	}
483ae18a1aeSOri Kam 	DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
484ae18a1aeSOri Kam 		dev->data->port_id, idx);
485ae18a1aeSOri Kam 	(*priv->txqs)[idx] = &txq_ctrl->txq;
486161d103bSViacheslav Ovsiienko 	dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN;
487ae18a1aeSOri Kam 	return 0;
488ae18a1aeSOri Kam }
489ae18a1aeSOri Kam 
490ae18a1aeSOri Kam /**
4912e22920bSAdrien Mazarguil  * DPDK callback to release a TX queue.
4922e22920bSAdrien Mazarguil  *
4937483341aSXueming Li  * @param dev
4947483341aSXueming Li  *   Pointer to Ethernet device structure.
4957483341aSXueming Li  * @param qid
4967483341aSXueming Li  *   Transmit queue index.
4972e22920bSAdrien Mazarguil  */
4982e22920bSAdrien Mazarguil void
4997483341aSXueming Li mlx5_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
5002e22920bSAdrien Mazarguil {
5017483341aSXueming Li 	struct mlx5_txq_data *txq = dev->data->tx_queues[qid];
5022e22920bSAdrien Mazarguil 
5032e22920bSAdrien Mazarguil 	if (txq == NULL)
5042e22920bSAdrien Mazarguil 		return;
505a170a30dSNélio Laranjeiro 	DRV_LOG(DEBUG, "port %u removing Tx queue %u from list",
5067483341aSXueming Li 		dev->data->port_id, qid);
5077483341aSXueming Li 	mlx5_txq_release(dev, qid);
5082e22920bSAdrien Mazarguil }
509f8b9a3baSXueming Li 
510120dc4a7SYongseok Koh /**
511120dc4a7SYongseok Koh  * Remap UAR register of a Tx queue for secondary process.
512f8b9a3baSXueming Li  *
513120dc4a7SYongseok Koh  * Remapped address is stored at the table in the process private structure of
514120dc4a7SYongseok Koh  * the device, indexed by queue index.
515120dc4a7SYongseok Koh  *
516120dc4a7SYongseok Koh  * @param txq_ctrl
517120dc4a7SYongseok Koh  *   Pointer to Tx queue control structure.
518120dc4a7SYongseok Koh  * @param fd
519120dc4a7SYongseok Koh  *   Verbs file descriptor to map UAR pages.
520120dc4a7SYongseok Koh  *
521120dc4a7SYongseok Koh  * @return
522120dc4a7SYongseok Koh  *   0 on success, a negative errno value otherwise and rte_errno is set.
523120dc4a7SYongseok Koh  */
524120dc4a7SYongseok Koh static int
525120dc4a7SYongseok Koh txq_uar_init_secondary(struct mlx5_txq_ctrl *txq_ctrl, int fd)
526120dc4a7SYongseok Koh {
527120dc4a7SYongseok Koh 	struct mlx5_priv *priv = txq_ctrl->priv;
528120dc4a7SYongseok Koh 	struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv));
529b6e9c33cSMichael Baum 	struct mlx5_proc_priv *primary_ppriv = priv->sh->pppriv;
530120dc4a7SYongseok Koh 	struct mlx5_txq_data *txq = &txq_ctrl->txq;
531120dc4a7SYongseok Koh 	void *addr;
532120dc4a7SYongseok Koh 	uintptr_t uar_va;
533120dc4a7SYongseok Koh 	uintptr_t offset;
5342aba9fc7SOphir Munk 	const size_t page_size = rte_mem_page_size();
5352aba9fc7SOphir Munk 	if (page_size == (size_t)-1) {
5362aba9fc7SOphir Munk 		DRV_LOG(ERR, "Failed to get mem page size");
5372aba9fc7SOphir Munk 		rte_errno = ENOMEM;
5382aba9fc7SOphir Munk 		return -rte_errno;
5392aba9fc7SOphir Munk 	}
540120dc4a7SYongseok Koh 
541c06f77aeSMichael Baum 	if (txq_ctrl->is_hairpin)
542ae18a1aeSOri Kam 		return 0;
5438e46d4e1SAlexander Kozyrev 	MLX5_ASSERT(ppriv);
544120dc4a7SYongseok Koh 	/*
545120dc4a7SYongseok Koh 	 * As rdma-core, UARs are mapped in size of OS page
546120dc4a7SYongseok Koh 	 * size. Ref to libmlx5 function: mlx5_init_context()
547120dc4a7SYongseok Koh 	 */
5485dfa003dSMichael Baum 	uar_va = (uintptr_t)primary_ppriv->uar_table[txq->idx].db;
549120dc4a7SYongseok Koh 	offset = uar_va & (page_size - 1); /* Offset in page. */
5502aba9fc7SOphir Munk 	addr = rte_mem_map(NULL, page_size, RTE_PROT_WRITE, RTE_MAP_SHARED,
5512aba9fc7SOphir Munk 			   fd, txq_ctrl->uar_mmap_offset);
5522aba9fc7SOphir Munk 	if (!addr) {
553b6e9c33cSMichael Baum 		DRV_LOG(ERR, "Port %u mmap failed for BF reg of txq %u.",
554120dc4a7SYongseok Koh 			txq->port_id, txq->idx);
555120dc4a7SYongseok Koh 		rte_errno = ENXIO;
556120dc4a7SYongseok Koh 		return -rte_errno;
557120dc4a7SYongseok Koh 	}
558120dc4a7SYongseok Koh 	addr = RTE_PTR_ADD(addr, offset);
5595dfa003dSMichael Baum 	ppriv->uar_table[txq->idx].db = addr;
5605dfa003dSMichael Baum #ifndef RTE_ARCH_64
5615dfa003dSMichael Baum 	ppriv->uar_table[txq->idx].sl_p =
5625dfa003dSMichael Baum 			primary_ppriv->uar_table[txq->idx].sl_p;
5635dfa003dSMichael Baum #endif
564120dc4a7SYongseok Koh 	return 0;
565120dc4a7SYongseok Koh }
566120dc4a7SYongseok Koh 
567120dc4a7SYongseok Koh /**
568120dc4a7SYongseok Koh  * Unmap UAR register of a Tx queue for secondary process.
569120dc4a7SYongseok Koh  *
570120dc4a7SYongseok Koh  * @param txq_ctrl
571120dc4a7SYongseok Koh  *   Pointer to Tx queue control structure.
572120dc4a7SYongseok Koh  */
573120dc4a7SYongseok Koh static void
574120dc4a7SYongseok Koh txq_uar_uninit_secondary(struct mlx5_txq_ctrl *txq_ctrl)
575120dc4a7SYongseok Koh {
576120dc4a7SYongseok Koh 	struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(txq_ctrl->priv));
577120dc4a7SYongseok Koh 	void *addr;
5782aba9fc7SOphir Munk 	const size_t page_size = rte_mem_page_size();
5792aba9fc7SOphir Munk 	if (page_size == (size_t)-1) {
5802aba9fc7SOphir Munk 		DRV_LOG(ERR, "Failed to get mem page size");
5812aba9fc7SOphir Munk 		rte_errno = ENOMEM;
5822aba9fc7SOphir Munk 	}
583120dc4a7SYongseok Koh 
584c06f77aeSMichael Baum 	if (txq_ctrl->is_hairpin)
585ae18a1aeSOri Kam 		return;
5865dfa003dSMichael Baum 	addr = ppriv->uar_table[txq_ctrl->txq.idx].db;
5872aba9fc7SOphir Munk 	rte_mem_unmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size);
588120dc4a7SYongseok Koh }
589120dc4a7SYongseok Koh 
590120dc4a7SYongseok Koh /**
5912786b7bfSSuanming Mou  * Deinitialize Tx UAR registers for secondary process.
5922786b7bfSSuanming Mou  *
5932786b7bfSSuanming Mou  * @param dev
5942786b7bfSSuanming Mou  *   Pointer to Ethernet device.
5952786b7bfSSuanming Mou  */
5962786b7bfSSuanming Mou void
5972786b7bfSSuanming Mou mlx5_tx_uar_uninit_secondary(struct rte_eth_dev *dev)
5982786b7bfSSuanming Mou {
59984a22cbcSSuanming Mou 	struct mlx5_proc_priv *ppriv = (struct mlx5_proc_priv *)
60084a22cbcSSuanming Mou 					dev->process_private;
60184a22cbcSSuanming Mou 	const size_t page_size = rte_mem_page_size();
60284a22cbcSSuanming Mou 	void *addr;
6032786b7bfSSuanming Mou 	unsigned int i;
6042786b7bfSSuanming Mou 
60584a22cbcSSuanming Mou 	if (page_size == (size_t)-1) {
60684a22cbcSSuanming Mou 		DRV_LOG(ERR, "Failed to get mem page size");
60784a22cbcSSuanming Mou 		return;
60884a22cbcSSuanming Mou 	}
6092786b7bfSSuanming Mou 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
61084a22cbcSSuanming Mou 	for (i = 0; i != ppriv->uar_table_sz; ++i) {
6115dfa003dSMichael Baum 		if (!ppriv->uar_table[i].db)
6122786b7bfSSuanming Mou 			continue;
6135dfa003dSMichael Baum 		addr = ppriv->uar_table[i].db;
61484a22cbcSSuanming Mou 		rte_mem_unmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size);
61584a22cbcSSuanming Mou 
6162786b7bfSSuanming Mou 	}
6172786b7bfSSuanming Mou }
6182786b7bfSSuanming Mou 
6192786b7bfSSuanming Mou /**
620120dc4a7SYongseok Koh  * Initialize Tx UAR registers for secondary process.
621120dc4a7SYongseok Koh  *
622120dc4a7SYongseok Koh  * @param dev
623af4f09f2SNélio Laranjeiro  *   Pointer to Ethernet device.
624f8b9a3baSXueming Li  * @param fd
625f8b9a3baSXueming Li  *   Verbs file descriptor to map UAR pages.
626f8b9a3baSXueming Li  *
627f8b9a3baSXueming Li  * @return
628a6d83b6aSNélio Laranjeiro  *   0 on success, a negative errno value otherwise and rte_errno is set.
629f8b9a3baSXueming Li  */
630f8b9a3baSXueming Li int
631120dc4a7SYongseok Koh mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd)
632f8b9a3baSXueming Li {
633dbeba4cfSThomas Monjalon 	struct mlx5_priv *priv = dev->data->dev_private;
634991b04f6SNélio Laranjeiro 	struct mlx5_txq_data *txq;
635991b04f6SNélio Laranjeiro 	struct mlx5_txq_ctrl *txq_ctrl;
636120dc4a7SYongseok Koh 	unsigned int i;
637120dc4a7SYongseok Koh 	int ret;
638f8b9a3baSXueming Li 
6398e46d4e1SAlexander Kozyrev 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
640f8b9a3baSXueming Li 	for (i = 0; i != priv->txqs_n; ++i) {
641fbab400fSNélio Laranjeiro 		if (!(*priv->txqs)[i])
642fbab400fSNélio Laranjeiro 			continue;
643f8b9a3baSXueming Li 		txq = (*priv->txqs)[i];
644991b04f6SNélio Laranjeiro 		txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
645c06f77aeSMichael Baum 		if (txq_ctrl->is_hairpin)
646ae18a1aeSOri Kam 			continue;
6478e46d4e1SAlexander Kozyrev 		MLX5_ASSERT(txq->idx == (uint16_t)i);
648120dc4a7SYongseok Koh 		ret = txq_uar_init_secondary(txq_ctrl, fd);
649120dc4a7SYongseok Koh 		if (ret)
650120dc4a7SYongseok Koh 			goto error;
6514a984153SXueming Li 	}
652f8b9a3baSXueming Li 	return 0;
653120dc4a7SYongseok Koh error:
654120dc4a7SYongseok Koh 	/* Rollback. */
655120dc4a7SYongseok Koh 	do {
656120dc4a7SYongseok Koh 		if (!(*priv->txqs)[i])
657120dc4a7SYongseok Koh 			continue;
658120dc4a7SYongseok Koh 		txq = (*priv->txqs)[i];
659120dc4a7SYongseok Koh 		txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
660120dc4a7SYongseok Koh 		txq_uar_uninit_secondary(txq_ctrl);
661120dc4a7SYongseok Koh 	} while (i--);
662120dc4a7SYongseok Koh 	return -rte_errno;
663f8b9a3baSXueming Li }
664faf2667fSNélio Laranjeiro 
665faf2667fSNélio Laranjeiro /**
666faf2667fSNélio Laranjeiro  * Verify the Verbs Tx queue list is empty
667faf2667fSNélio Laranjeiro  *
668af4f09f2SNélio Laranjeiro  * @param dev
669af4f09f2SNélio Laranjeiro  *   Pointer to Ethernet device.
670faf2667fSNélio Laranjeiro  *
671fb732b0aSNélio Laranjeiro  * @return
672fb732b0aSNélio Laranjeiro  *   The number of object not released.
673faf2667fSNélio Laranjeiro  */
674faf2667fSNélio Laranjeiro int
675894c4a8eSOri Kam mlx5_txq_obj_verify(struct rte_eth_dev *dev)
676faf2667fSNélio Laranjeiro {
677dbeba4cfSThomas Monjalon 	struct mlx5_priv *priv = dev->data->dev_private;
678faf2667fSNélio Laranjeiro 	int ret = 0;
679894c4a8eSOri Kam 	struct mlx5_txq_obj *txq_obj;
680faf2667fSNélio Laranjeiro 
681894c4a8eSOri Kam 	LIST_FOREACH(txq_obj, &priv->txqsobj, next) {
682a170a30dSNélio Laranjeiro 		DRV_LOG(DEBUG, "port %u Verbs Tx queue %u still referenced",
683894c4a8eSOri Kam 			dev->data->port_id, txq_obj->txq_ctrl->txq.idx);
684faf2667fSNélio Laranjeiro 		++ret;
685faf2667fSNélio Laranjeiro 	}
686faf2667fSNélio Laranjeiro 	return ret;
687faf2667fSNélio Laranjeiro }
6886e78005aSNélio Laranjeiro 
6896e78005aSNélio Laranjeiro /**
69042280dd9SDekel Peled  * Calculate the total number of WQEBB for Tx queue.
691f6d9ab4eSYongseok Koh  *
692f6d9ab4eSYongseok Koh  * Simplified version of calc_sq_size() in rdma-core.
693f6d9ab4eSYongseok Koh  *
694f6d9ab4eSYongseok Koh  * @param txq_ctrl
695f6d9ab4eSYongseok Koh  *   Pointer to Tx queue control structure.
696f6d9ab4eSYongseok Koh  *
697f6d9ab4eSYongseok Koh  * @return
698f6d9ab4eSYongseok Koh  *   The number of WQEBB.
699f6d9ab4eSYongseok Koh  */
700f6d9ab4eSYongseok Koh static int
701f6d9ab4eSYongseok Koh txq_calc_wqebb_cnt(struct mlx5_txq_ctrl *txq_ctrl)
702f6d9ab4eSYongseok Koh {
703f6d9ab4eSYongseok Koh 	unsigned int wqe_size;
704f6d9ab4eSYongseok Koh 	const unsigned int desc = 1 << txq_ctrl->txq.elts_n;
705f6d9ab4eSYongseok Koh 
70638b4b397SViacheslav Ovsiienko 	wqe_size = MLX5_WQE_CSEG_SIZE +
70738b4b397SViacheslav Ovsiienko 		   MLX5_WQE_ESEG_SIZE +
70838b4b397SViacheslav Ovsiienko 		   MLX5_WSEG_SIZE -
70938b4b397SViacheslav Ovsiienko 		   MLX5_ESEG_MIN_INLINE_SIZE +
71038b4b397SViacheslav Ovsiienko 		   txq_ctrl->max_inline_data;
711f6d9ab4eSYongseok Koh 	return rte_align32pow2(wqe_size * desc) / MLX5_WQE_SIZE;
712f6d9ab4eSYongseok Koh }
713f6d9ab4eSYongseok Koh 
714f6d9ab4eSYongseok Koh /**
715b53cd869SViacheslav Ovsiienko  * Calculate the maximal inline data size for Tx queue.
716b53cd869SViacheslav Ovsiienko  *
717b53cd869SViacheslav Ovsiienko  * @param txq_ctrl
718b53cd869SViacheslav Ovsiienko  *   Pointer to Tx queue control structure.
719b53cd869SViacheslav Ovsiienko  *
720b53cd869SViacheslav Ovsiienko  * @return
721b53cd869SViacheslav Ovsiienko  *   The maximal inline data size.
722b53cd869SViacheslav Ovsiienko  */
723b53cd869SViacheslav Ovsiienko static unsigned int
724b53cd869SViacheslav Ovsiienko txq_calc_inline_max(struct mlx5_txq_ctrl *txq_ctrl)
725b53cd869SViacheslav Ovsiienko {
726b53cd869SViacheslav Ovsiienko 	const unsigned int desc = 1 << txq_ctrl->txq.elts_n;
727b53cd869SViacheslav Ovsiienko 	struct mlx5_priv *priv = txq_ctrl->priv;
728b53cd869SViacheslav Ovsiienko 	unsigned int wqe_size;
729b53cd869SViacheslav Ovsiienko 
73091d1cfafSMichael Baum 	wqe_size = priv->sh->dev_cap.max_qp_wr / desc;
731b53cd869SViacheslav Ovsiienko 	if (!wqe_size)
732b53cd869SViacheslav Ovsiienko 		return 0;
733b53cd869SViacheslav Ovsiienko 	/*
734b53cd869SViacheslav Ovsiienko 	 * This calculation is derived from tthe source of
735b53cd869SViacheslav Ovsiienko 	 * mlx5_calc_send_wqe() in rdma_core library.
736b53cd869SViacheslav Ovsiienko 	 */
737b53cd869SViacheslav Ovsiienko 	wqe_size = wqe_size * MLX5_WQE_SIZE -
738b53cd869SViacheslav Ovsiienko 		   MLX5_WQE_CSEG_SIZE -
739b53cd869SViacheslav Ovsiienko 		   MLX5_WQE_ESEG_SIZE -
740b53cd869SViacheslav Ovsiienko 		   MLX5_WSEG_SIZE -
741b53cd869SViacheslav Ovsiienko 		   MLX5_WSEG_SIZE +
742b53cd869SViacheslav Ovsiienko 		   MLX5_DSEG_MIN_INLINE_SIZE;
743b53cd869SViacheslav Ovsiienko 	return wqe_size;
744b53cd869SViacheslav Ovsiienko }
745b53cd869SViacheslav Ovsiienko 
746b53cd869SViacheslav Ovsiienko /**
7477fe24446SShahaf Shuler  * Set Tx queue parameters from device configuration.
7487fe24446SShahaf Shuler  *
7497fe24446SShahaf Shuler  * @param txq_ctrl
7507fe24446SShahaf Shuler  *   Pointer to Tx queue control structure.
7517fe24446SShahaf Shuler  */
7527fe24446SShahaf Shuler static void
7537fe24446SShahaf Shuler txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
7547fe24446SShahaf Shuler {
75538b4b397SViacheslav Ovsiienko 	struct mlx5_priv *priv = txq_ctrl->priv;
75645a6df80SMichael Baum 	struct mlx5_port_config *config = &priv->config;
75787af0d1eSMichael Baum 	struct mlx5_dev_cap *dev_cap = &priv->sh->dev_cap;
75838b4b397SViacheslav Ovsiienko 	unsigned int inlen_send; /* Inline data for ordinary SEND.*/
75938b4b397SViacheslav Ovsiienko 	unsigned int inlen_empw; /* Inline data for enhanced MPW. */
76038b4b397SViacheslav Ovsiienko 	unsigned int inlen_mode; /* Minimal required Inline data. */
76138b4b397SViacheslav Ovsiienko 	unsigned int txqs_inline; /* Min Tx queues to enable inline. */
76238b4b397SViacheslav Ovsiienko 	uint64_t dev_txoff = priv->dev_data->dev_conf.txmode.offloads;
763295968d1SFerruh Yigit 	bool tso = txq_ctrl->txq.offloads & (RTE_ETH_TX_OFFLOAD_TCP_TSO |
764295968d1SFerruh Yigit 					    RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
765295968d1SFerruh Yigit 					    RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
766295968d1SFerruh Yigit 					    RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
767295968d1SFerruh Yigit 					    RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO);
76838b4b397SViacheslav Ovsiienko 	bool vlan_inline;
76938b4b397SViacheslav Ovsiienko 	unsigned int temp;
77038b4b397SViacheslav Ovsiienko 
7711d89c404SViacheslav Ovsiienko 	txq_ctrl->txq.fast_free =
772295968d1SFerruh Yigit 		!!((txq_ctrl->txq.offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) &&
773295968d1SFerruh Yigit 		   !(txq_ctrl->txq.offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) &&
7741d89c404SViacheslav Ovsiienko 		   !config->mprq.enabled);
77538b4b397SViacheslav Ovsiienko 	if (config->txqs_inline == MLX5_ARG_UNSET)
77638b4b397SViacheslav Ovsiienko 		txqs_inline =
77738b4b397SViacheslav Ovsiienko #if defined(RTE_ARCH_ARM64)
77856bb3c84SXueming Li 		(priv->pci_dev && priv->pci_dev->id.device_id ==
7790a9fff95SRaslan Darawsheh 			PCI_DEVICE_ID_MELLANOX_BLUEFIELD) ?
78038b4b397SViacheslav Ovsiienko 			MLX5_INLINE_MAX_TXQS_BLUEFIELD :
78138b4b397SViacheslav Ovsiienko #endif
78238b4b397SViacheslav Ovsiienko 			MLX5_INLINE_MAX_TXQS;
78338b4b397SViacheslav Ovsiienko 	else
78438b4b397SViacheslav Ovsiienko 		txqs_inline = (unsigned int)config->txqs_inline;
78538b4b397SViacheslav Ovsiienko 	inlen_send = (config->txq_inline_max == MLX5_ARG_UNSET) ?
78638b4b397SViacheslav Ovsiienko 		     MLX5_SEND_DEF_INLINE_LEN :
78738b4b397SViacheslav Ovsiienko 		     (unsigned int)config->txq_inline_max;
78838b4b397SViacheslav Ovsiienko 	inlen_empw = (config->txq_inline_mpw == MLX5_ARG_UNSET) ?
78938b4b397SViacheslav Ovsiienko 		     MLX5_EMPW_DEF_INLINE_LEN :
79038b4b397SViacheslav Ovsiienko 		     (unsigned int)config->txq_inline_mpw;
79138b4b397SViacheslav Ovsiienko 	inlen_mode = (config->txq_inline_min == MLX5_ARG_UNSET) ?
79238b4b397SViacheslav Ovsiienko 		     0 : (unsigned int)config->txq_inline_min;
79382e75f83SViacheslav Ovsiienko 	if (config->mps != MLX5_MPW_ENHANCED && config->mps != MLX5_MPW)
79438b4b397SViacheslav Ovsiienko 		inlen_empw = 0;
79538b4b397SViacheslav Ovsiienko 	/*
79638b4b397SViacheslav Ovsiienko 	 * If there is requested minimal amount of data to inline
79738b4b397SViacheslav Ovsiienko 	 * we MUST enable inlining. This is a case for ConnectX-4
79838b4b397SViacheslav Ovsiienko 	 * which usually requires L2 inlined for correct operating
799ee76bddcSThomas Monjalon 	 * and ConnectX-4 Lx which requires L2-L4 inlined to
80038b4b397SViacheslav Ovsiienko 	 * support E-Switch Flows.
80138b4b397SViacheslav Ovsiienko 	 */
80238b4b397SViacheslav Ovsiienko 	if (inlen_mode) {
80338b4b397SViacheslav Ovsiienko 		if (inlen_mode <= MLX5_ESEG_MIN_INLINE_SIZE) {
80438b4b397SViacheslav Ovsiienko 			/*
80538b4b397SViacheslav Ovsiienko 			 * Optimize minimal inlining for single
80638b4b397SViacheslav Ovsiienko 			 * segment packets to fill one WQEBB
80738b4b397SViacheslav Ovsiienko 			 * without gaps.
80838b4b397SViacheslav Ovsiienko 			 */
80938b4b397SViacheslav Ovsiienko 			temp = MLX5_ESEG_MIN_INLINE_SIZE;
81038b4b397SViacheslav Ovsiienko 		} else {
81138b4b397SViacheslav Ovsiienko 			temp = inlen_mode - MLX5_ESEG_MIN_INLINE_SIZE;
81238b4b397SViacheslav Ovsiienko 			temp = RTE_ALIGN(temp, MLX5_WSEG_SIZE) +
81338b4b397SViacheslav Ovsiienko 			       MLX5_ESEG_MIN_INLINE_SIZE;
81438b4b397SViacheslav Ovsiienko 			temp = RTE_MIN(temp, MLX5_SEND_MAX_INLINE_LEN);
81538b4b397SViacheslav Ovsiienko 		}
81638b4b397SViacheslav Ovsiienko 		if (temp != inlen_mode) {
81738b4b397SViacheslav Ovsiienko 			DRV_LOG(INFO,
81838b4b397SViacheslav Ovsiienko 				"port %u minimal required inline setting"
81938b4b397SViacheslav Ovsiienko 				" aligned from %u to %u",
82038b4b397SViacheslav Ovsiienko 				PORT_ID(priv), inlen_mode, temp);
82138b4b397SViacheslav Ovsiienko 			inlen_mode = temp;
82238b4b397SViacheslav Ovsiienko 		}
82338b4b397SViacheslav Ovsiienko 	}
82438b4b397SViacheslav Ovsiienko 	/*
82538b4b397SViacheslav Ovsiienko 	 * If port is configured to support VLAN insertion and device
82638b4b397SViacheslav Ovsiienko 	 * does not support this feature by HW (for NICs before ConnectX-5
82738b4b397SViacheslav Ovsiienko 	 * or in case of wqe_vlan_insert flag is not set) we must enable
82838b4b397SViacheslav Ovsiienko 	 * data inline on all queues because it is supported by single
82938b4b397SViacheslav Ovsiienko 	 * tx_burst routine.
83038b4b397SViacheslav Ovsiienko 	 */
83138b4b397SViacheslav Ovsiienko 	txq_ctrl->txq.vlan_en = config->hw_vlan_insert;
832295968d1SFerruh Yigit 	vlan_inline = (dev_txoff & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) &&
83338b4b397SViacheslav Ovsiienko 		      !config->hw_vlan_insert;
83438b4b397SViacheslav Ovsiienko 	/*
83538b4b397SViacheslav Ovsiienko 	 * If there are few Tx queues it is prioritized
83638b4b397SViacheslav Ovsiienko 	 * to save CPU cycles and disable data inlining at all.
83738b4b397SViacheslav Ovsiienko 	 */
838c6f04856SViacheslav Ovsiienko 	if (inlen_send && priv->txqs_n >= txqs_inline) {
83938b4b397SViacheslav Ovsiienko 		/*
84038b4b397SViacheslav Ovsiienko 		 * The data sent with ordinal MLX5_OPCODE_SEND
84138b4b397SViacheslav Ovsiienko 		 * may be inlined in Ethernet Segment, align the
84238b4b397SViacheslav Ovsiienko 		 * length accordingly to fit entire WQEBBs.
84338b4b397SViacheslav Ovsiienko 		 */
844b53cd869SViacheslav Ovsiienko 		temp = RTE_MAX(inlen_send,
845b53cd869SViacheslav Ovsiienko 			       MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE);
846b53cd869SViacheslav Ovsiienko 		temp -= MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE;
847b53cd869SViacheslav Ovsiienko 		temp = RTE_ALIGN(temp, MLX5_WQE_SIZE);
848b53cd869SViacheslav Ovsiienko 		temp += MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE;
84938b4b397SViacheslav Ovsiienko 		temp = RTE_MIN(temp, MLX5_WQE_SIZE_MAX +
85038b4b397SViacheslav Ovsiienko 				     MLX5_ESEG_MIN_INLINE_SIZE -
85138b4b397SViacheslav Ovsiienko 				     MLX5_WQE_CSEG_SIZE -
85238b4b397SViacheslav Ovsiienko 				     MLX5_WQE_ESEG_SIZE -
85338b4b397SViacheslav Ovsiienko 				     MLX5_WQE_DSEG_SIZE * 2);
85438b4b397SViacheslav Ovsiienko 		temp = RTE_MIN(temp, MLX5_SEND_MAX_INLINE_LEN);
85538b4b397SViacheslav Ovsiienko 		temp = RTE_MAX(temp, inlen_mode);
85638b4b397SViacheslav Ovsiienko 		if (temp != inlen_send) {
85738b4b397SViacheslav Ovsiienko 			DRV_LOG(INFO,
85838b4b397SViacheslav Ovsiienko 				"port %u ordinary send inline setting"
85938b4b397SViacheslav Ovsiienko 				" aligned from %u to %u",
86038b4b397SViacheslav Ovsiienko 				PORT_ID(priv), inlen_send, temp);
86138b4b397SViacheslav Ovsiienko 			inlen_send = temp;
86238b4b397SViacheslav Ovsiienko 		}
86338b4b397SViacheslav Ovsiienko 		/*
86438b4b397SViacheslav Ovsiienko 		 * Not aligned to cache lines, but to WQEs.
86538b4b397SViacheslav Ovsiienko 		 * First bytes of data (initial alignment)
86638b4b397SViacheslav Ovsiienko 		 * is going to be copied explicitly at the
86738b4b397SViacheslav Ovsiienko 		 * beginning of inlining buffer in Ethernet
86838b4b397SViacheslav Ovsiienko 		 * Segment.
86938b4b397SViacheslav Ovsiienko 		 */
8708e46d4e1SAlexander Kozyrev 		MLX5_ASSERT(inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
8718e46d4e1SAlexander Kozyrev 		MLX5_ASSERT(inlen_send <= MLX5_WQE_SIZE_MAX +
87238b4b397SViacheslav Ovsiienko 					  MLX5_ESEG_MIN_INLINE_SIZE -
87338b4b397SViacheslav Ovsiienko 					  MLX5_WQE_CSEG_SIZE -
87438b4b397SViacheslav Ovsiienko 					  MLX5_WQE_ESEG_SIZE -
87538b4b397SViacheslav Ovsiienko 					  MLX5_WQE_DSEG_SIZE * 2);
876c6f04856SViacheslav Ovsiienko 	} else if (inlen_mode) {
87738b4b397SViacheslav Ovsiienko 		/*
87838b4b397SViacheslav Ovsiienko 		 * If minimal inlining is requested we must
87938b4b397SViacheslav Ovsiienko 		 * enable inlining in general, despite the
880c6f04856SViacheslav Ovsiienko 		 * number of configured queues. Ignore the
881c6f04856SViacheslav Ovsiienko 		 * txq_inline_max devarg, this is not
882c6f04856SViacheslav Ovsiienko 		 * full-featured inline.
88338b4b397SViacheslav Ovsiienko 		 */
88438b4b397SViacheslav Ovsiienko 		inlen_send = inlen_mode;
885c6f04856SViacheslav Ovsiienko 		inlen_empw = 0;
886c6f04856SViacheslav Ovsiienko 	} else if (vlan_inline) {
88738b4b397SViacheslav Ovsiienko 		/*
888c6f04856SViacheslav Ovsiienko 		 * Hardware does not report offload for
889c6f04856SViacheslav Ovsiienko 		 * VLAN insertion, we must enable data inline
890c6f04856SViacheslav Ovsiienko 		 * to implement feature by software.
89138b4b397SViacheslav Ovsiienko 		 */
892c6f04856SViacheslav Ovsiienko 		inlen_send = MLX5_ESEG_MIN_INLINE_SIZE;
893c6f04856SViacheslav Ovsiienko 		inlen_empw = 0;
894c6f04856SViacheslav Ovsiienko 	} else {
895c6f04856SViacheslav Ovsiienko 		inlen_send = 0;
896c6f04856SViacheslav Ovsiienko 		inlen_empw = 0;
89738b4b397SViacheslav Ovsiienko 	}
89838b4b397SViacheslav Ovsiienko 	txq_ctrl->txq.inlen_send = inlen_send;
89938b4b397SViacheslav Ovsiienko 	txq_ctrl->txq.inlen_mode = inlen_mode;
90038b4b397SViacheslav Ovsiienko 	txq_ctrl->txq.inlen_empw = 0;
90138b4b397SViacheslav Ovsiienko 	if (inlen_send && inlen_empw && priv->txqs_n >= txqs_inline) {
90238b4b397SViacheslav Ovsiienko 		/*
90338b4b397SViacheslav Ovsiienko 		 * The data sent with MLX5_OPCODE_ENHANCED_MPSW
90438b4b397SViacheslav Ovsiienko 		 * may be inlined in Data Segment, align the
90538b4b397SViacheslav Ovsiienko 		 * length accordingly to fit entire WQEBBs.
90638b4b397SViacheslav Ovsiienko 		 */
907b53cd869SViacheslav Ovsiienko 		temp = RTE_MAX(inlen_empw,
908b53cd869SViacheslav Ovsiienko 			       MLX5_WQE_SIZE + MLX5_DSEG_MIN_INLINE_SIZE);
909b53cd869SViacheslav Ovsiienko 		temp -= MLX5_DSEG_MIN_INLINE_SIZE;
910b53cd869SViacheslav Ovsiienko 		temp = RTE_ALIGN(temp, MLX5_WQE_SIZE);
911b53cd869SViacheslav Ovsiienko 		temp += MLX5_DSEG_MIN_INLINE_SIZE;
91238b4b397SViacheslav Ovsiienko 		temp = RTE_MIN(temp, MLX5_WQE_SIZE_MAX +
91338b4b397SViacheslav Ovsiienko 				     MLX5_DSEG_MIN_INLINE_SIZE -
91438b4b397SViacheslav Ovsiienko 				     MLX5_WQE_CSEG_SIZE -
91538b4b397SViacheslav Ovsiienko 				     MLX5_WQE_ESEG_SIZE -
91638b4b397SViacheslav Ovsiienko 				     MLX5_WQE_DSEG_SIZE);
91738b4b397SViacheslav Ovsiienko 		temp = RTE_MIN(temp, MLX5_EMPW_MAX_INLINE_LEN);
91838b4b397SViacheslav Ovsiienko 		if (temp != inlen_empw) {
91938b4b397SViacheslav Ovsiienko 			DRV_LOG(INFO,
92038b4b397SViacheslav Ovsiienko 				"port %u enhanced empw inline setting"
92138b4b397SViacheslav Ovsiienko 				" aligned from %u to %u",
92238b4b397SViacheslav Ovsiienko 				PORT_ID(priv), inlen_empw, temp);
92338b4b397SViacheslav Ovsiienko 			inlen_empw = temp;
92438b4b397SViacheslav Ovsiienko 		}
9258e46d4e1SAlexander Kozyrev 		MLX5_ASSERT(inlen_empw >= MLX5_ESEG_MIN_INLINE_SIZE);
9268e46d4e1SAlexander Kozyrev 		MLX5_ASSERT(inlen_empw <= MLX5_WQE_SIZE_MAX +
92738b4b397SViacheslav Ovsiienko 					  MLX5_DSEG_MIN_INLINE_SIZE -
92838b4b397SViacheslav Ovsiienko 					  MLX5_WQE_CSEG_SIZE -
92938b4b397SViacheslav Ovsiienko 					  MLX5_WQE_ESEG_SIZE -
93038b4b397SViacheslav Ovsiienko 					  MLX5_WQE_DSEG_SIZE);
93138b4b397SViacheslav Ovsiienko 		txq_ctrl->txq.inlen_empw = inlen_empw;
93238b4b397SViacheslav Ovsiienko 	}
93338b4b397SViacheslav Ovsiienko 	txq_ctrl->max_inline_data = RTE_MAX(inlen_send, inlen_empw);
93438b4b397SViacheslav Ovsiienko 	if (tso) {
93538b4b397SViacheslav Ovsiienko 		txq_ctrl->max_tso_header = MLX5_MAX_TSO_HEADER;
93638b4b397SViacheslav Ovsiienko 		txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->max_inline_data,
93738b4b397SViacheslav Ovsiienko 						    MLX5_MAX_TSO_HEADER);
93838b4b397SViacheslav Ovsiienko 		txq_ctrl->txq.tso_en = 1;
93938b4b397SViacheslav Ovsiienko 	}
940295968d1SFerruh Yigit 	if (((RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO & txq_ctrl->txq.offloads) &&
94187af0d1eSMichael Baum 	    (dev_cap->tunnel_en & MLX5_TUNNELED_OFFLOADS_VXLAN_CAP)) |
942295968d1SFerruh Yigit 	   ((RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO & txq_ctrl->txq.offloads) &&
94387af0d1eSMichael Baum 	    (dev_cap->tunnel_en & MLX5_TUNNELED_OFFLOADS_GRE_CAP)) |
944295968d1SFerruh Yigit 	   ((RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO & txq_ctrl->txq.offloads) &&
94587af0d1eSMichael Baum 	    (dev_cap->tunnel_en & MLX5_TUNNELED_OFFLOADS_GENEVE_CAP)) |
94687af0d1eSMichael Baum 	   (dev_cap->swp  & MLX5_SW_PARSING_TSO_CAP))
947c1a320bfSTal Shnaiderman 		txq_ctrl->txq.tunnel_en = 1;
948295968d1SFerruh Yigit 	txq_ctrl->txq.swp_en = (((RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
949295968d1SFerruh Yigit 				  RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO) &
95087af0d1eSMichael Baum 				  txq_ctrl->txq.offloads) && (dev_cap->swp &
951accf3cfcSTal Shnaiderman 				  MLX5_SW_PARSING_TSO_CAP)) |
952295968d1SFerruh Yigit 				((RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM &
95387af0d1eSMichael Baum 				 txq_ctrl->txq.offloads) && (dev_cap->swp &
954accf3cfcSTal Shnaiderman 				 MLX5_SW_PARSING_CSUM_CAP));
9557fe24446SShahaf Shuler }
9567fe24446SShahaf Shuler 
9577fe24446SShahaf Shuler /**
958b53cd869SViacheslav Ovsiienko  * Adjust Tx queue data inline parameters for large queue sizes.
959b53cd869SViacheslav Ovsiienko  * The data inline feature requires multiple WQEs to fit the packets,
960b53cd869SViacheslav Ovsiienko  * and if the large amount of Tx descriptors is requested by application
961b53cd869SViacheslav Ovsiienko  * the total WQE amount may exceed the hardware capabilities. If the
962b53cd869SViacheslav Ovsiienko  * default inline setting are used we can try to adjust these ones and
963b53cd869SViacheslav Ovsiienko  * meet the hardware requirements and not exceed the queue size.
964b53cd869SViacheslav Ovsiienko  *
965b53cd869SViacheslav Ovsiienko  * @param txq_ctrl
966b53cd869SViacheslav Ovsiienko  *   Pointer to Tx queue control structure.
967b53cd869SViacheslav Ovsiienko  *
968b53cd869SViacheslav Ovsiienko  * @return
969b53cd869SViacheslav Ovsiienko  *   Zero on success, otherwise the parameters can not be adjusted.
970b53cd869SViacheslav Ovsiienko  */
971b53cd869SViacheslav Ovsiienko static int
972b53cd869SViacheslav Ovsiienko txq_adjust_params(struct mlx5_txq_ctrl *txq_ctrl)
973b53cd869SViacheslav Ovsiienko {
974b53cd869SViacheslav Ovsiienko 	struct mlx5_priv *priv = txq_ctrl->priv;
97545a6df80SMichael Baum 	struct mlx5_port_config *config = &priv->config;
976b53cd869SViacheslav Ovsiienko 	unsigned int max_inline;
977b53cd869SViacheslav Ovsiienko 
978b53cd869SViacheslav Ovsiienko 	max_inline = txq_calc_inline_max(txq_ctrl);
979b53cd869SViacheslav Ovsiienko 	if (!txq_ctrl->txq.inlen_send) {
980b53cd869SViacheslav Ovsiienko 		/*
981b53cd869SViacheslav Ovsiienko 		 * Inline data feature is not engaged at all.
982b53cd869SViacheslav Ovsiienko 		 * There is nothing to adjust.
983b53cd869SViacheslav Ovsiienko 		 */
984b53cd869SViacheslav Ovsiienko 		return 0;
985b53cd869SViacheslav Ovsiienko 	}
986b53cd869SViacheslav Ovsiienko 	if (txq_ctrl->max_inline_data <= max_inline) {
987b53cd869SViacheslav Ovsiienko 		/*
988b53cd869SViacheslav Ovsiienko 		 * The requested inline data length does not
989b53cd869SViacheslav Ovsiienko 		 * exceed queue capabilities.
990b53cd869SViacheslav Ovsiienko 		 */
991b53cd869SViacheslav Ovsiienko 		return 0;
992b53cd869SViacheslav Ovsiienko 	}
993b53cd869SViacheslav Ovsiienko 	if (txq_ctrl->txq.inlen_mode > max_inline) {
994b53cd869SViacheslav Ovsiienko 		DRV_LOG(ERR,
995b53cd869SViacheslav Ovsiienko 			"minimal data inline requirements (%u) are not"
996b53cd869SViacheslav Ovsiienko 			" satisfied (%u) on port %u, try the smaller"
997b53cd869SViacheslav Ovsiienko 			" Tx queue size (%d)",
998b53cd869SViacheslav Ovsiienko 			txq_ctrl->txq.inlen_mode, max_inline,
99991d1cfafSMichael Baum 			priv->dev_data->port_id, priv->sh->dev_cap.max_qp_wr);
1000b53cd869SViacheslav Ovsiienko 		goto error;
1001b53cd869SViacheslav Ovsiienko 	}
1002b53cd869SViacheslav Ovsiienko 	if (txq_ctrl->txq.inlen_send > max_inline &&
1003b53cd869SViacheslav Ovsiienko 	    config->txq_inline_max != MLX5_ARG_UNSET &&
1004b53cd869SViacheslav Ovsiienko 	    config->txq_inline_max > (int)max_inline) {
1005b53cd869SViacheslav Ovsiienko 		DRV_LOG(ERR,
1006b53cd869SViacheslav Ovsiienko 			"txq_inline_max requirements (%u) are not"
1007b53cd869SViacheslav Ovsiienko 			" satisfied (%u) on port %u, try the smaller"
1008b53cd869SViacheslav Ovsiienko 			" Tx queue size (%d)",
1009b53cd869SViacheslav Ovsiienko 			txq_ctrl->txq.inlen_send, max_inline,
101091d1cfafSMichael Baum 			priv->dev_data->port_id, priv->sh->dev_cap.max_qp_wr);
1011b53cd869SViacheslav Ovsiienko 		goto error;
1012b53cd869SViacheslav Ovsiienko 	}
1013b53cd869SViacheslav Ovsiienko 	if (txq_ctrl->txq.inlen_empw > max_inline &&
1014b53cd869SViacheslav Ovsiienko 	    config->txq_inline_mpw != MLX5_ARG_UNSET &&
1015b53cd869SViacheslav Ovsiienko 	    config->txq_inline_mpw > (int)max_inline) {
1016b53cd869SViacheslav Ovsiienko 		DRV_LOG(ERR,
1017b53cd869SViacheslav Ovsiienko 			"txq_inline_mpw requirements (%u) are not"
1018b53cd869SViacheslav Ovsiienko 			" satisfied (%u) on port %u, try the smaller"
1019b53cd869SViacheslav Ovsiienko 			" Tx queue size (%d)",
1020b53cd869SViacheslav Ovsiienko 			txq_ctrl->txq.inlen_empw, max_inline,
102191d1cfafSMichael Baum 			priv->dev_data->port_id, priv->sh->dev_cap.max_qp_wr);
1022b53cd869SViacheslav Ovsiienko 		goto error;
1023b53cd869SViacheslav Ovsiienko 	}
1024b53cd869SViacheslav Ovsiienko 	if (txq_ctrl->txq.tso_en && max_inline < MLX5_MAX_TSO_HEADER) {
1025b53cd869SViacheslav Ovsiienko 		DRV_LOG(ERR,
1026b53cd869SViacheslav Ovsiienko 			"tso header inline requirements (%u) are not"
1027b53cd869SViacheslav Ovsiienko 			" satisfied (%u) on port %u, try the smaller"
1028b53cd869SViacheslav Ovsiienko 			" Tx queue size (%d)",
1029b53cd869SViacheslav Ovsiienko 			MLX5_MAX_TSO_HEADER, max_inline,
103091d1cfafSMichael Baum 			priv->dev_data->port_id, priv->sh->dev_cap.max_qp_wr);
1031b53cd869SViacheslav Ovsiienko 		goto error;
1032b53cd869SViacheslav Ovsiienko 	}
1033b53cd869SViacheslav Ovsiienko 	if (txq_ctrl->txq.inlen_send > max_inline) {
1034b53cd869SViacheslav Ovsiienko 		DRV_LOG(WARNING,
1035b53cd869SViacheslav Ovsiienko 			"adjust txq_inline_max (%u->%u)"
1036b53cd869SViacheslav Ovsiienko 			" due to large Tx queue on port %u",
1037b53cd869SViacheslav Ovsiienko 			txq_ctrl->txq.inlen_send, max_inline,
1038b53cd869SViacheslav Ovsiienko 			priv->dev_data->port_id);
1039b53cd869SViacheslav Ovsiienko 		txq_ctrl->txq.inlen_send = max_inline;
1040b53cd869SViacheslav Ovsiienko 	}
1041b53cd869SViacheslav Ovsiienko 	if (txq_ctrl->txq.inlen_empw > max_inline) {
1042b53cd869SViacheslav Ovsiienko 		DRV_LOG(WARNING,
1043b53cd869SViacheslav Ovsiienko 			"adjust txq_inline_mpw (%u->%u)"
1044b53cd869SViacheslav Ovsiienko 			"due to large Tx queue on port %u",
1045b53cd869SViacheslav Ovsiienko 			txq_ctrl->txq.inlen_empw, max_inline,
1046b53cd869SViacheslav Ovsiienko 			priv->dev_data->port_id);
1047b53cd869SViacheslav Ovsiienko 		txq_ctrl->txq.inlen_empw = max_inline;
1048b53cd869SViacheslav Ovsiienko 	}
1049b53cd869SViacheslav Ovsiienko 	txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->txq.inlen_send,
1050b53cd869SViacheslav Ovsiienko 					    txq_ctrl->txq.inlen_empw);
10518e46d4e1SAlexander Kozyrev 	MLX5_ASSERT(txq_ctrl->max_inline_data <= max_inline);
10528e46d4e1SAlexander Kozyrev 	MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= max_inline);
10538e46d4e1SAlexander Kozyrev 	MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_send);
10548e46d4e1SAlexander Kozyrev 	MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_empw ||
1055c4d314a3SViacheslav Ovsiienko 		    !txq_ctrl->txq.inlen_empw);
1056b53cd869SViacheslav Ovsiienko 	return 0;
1057b53cd869SViacheslav Ovsiienko error:
1058b53cd869SViacheslav Ovsiienko 	rte_errno = ENOMEM;
1059b53cd869SViacheslav Ovsiienko 	return -ENOMEM;
1060b53cd869SViacheslav Ovsiienko }
1061b53cd869SViacheslav Ovsiienko 
1062b53cd869SViacheslav Ovsiienko /**
10636e78005aSNélio Laranjeiro  * Create a DPDK Tx queue.
10646e78005aSNélio Laranjeiro  *
1065af4f09f2SNélio Laranjeiro  * @param dev
1066af4f09f2SNélio Laranjeiro  *   Pointer to Ethernet device.
10676e78005aSNélio Laranjeiro  * @param idx
10686e78005aSNélio Laranjeiro  *   TX queue index.
10696e78005aSNélio Laranjeiro  * @param desc
10706e78005aSNélio Laranjeiro  *   Number of descriptors to configure in queue.
10716e78005aSNélio Laranjeiro  * @param socket
10726e78005aSNélio Laranjeiro  *   NUMA socket on which memory must be allocated.
10736e78005aSNélio Laranjeiro  * @param[in] conf
10746e78005aSNélio Laranjeiro  *  Thresholds parameters.
10756e78005aSNélio Laranjeiro  *
10766e78005aSNélio Laranjeiro  * @return
1077a6d83b6aSNélio Laranjeiro  *   A DPDK queue object on success, NULL otherwise and rte_errno is set.
10786e78005aSNélio Laranjeiro  */
10796e78005aSNélio Laranjeiro struct mlx5_txq_ctrl *
1080af4f09f2SNélio Laranjeiro mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1081af4f09f2SNélio Laranjeiro 	     unsigned int socket, const struct rte_eth_txconf *conf)
10826e78005aSNélio Laranjeiro {
1083dbeba4cfSThomas Monjalon 	struct mlx5_priv *priv = dev->data->dev_private;
10846e78005aSNélio Laranjeiro 	struct mlx5_txq_ctrl *tmpl;
10856e78005aSNélio Laranjeiro 
1086ac3fc732SSuanming Mou 	tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl) +
1087ac3fc732SSuanming Mou 			   desc * sizeof(struct rte_mbuf *), 0, socket);
1088a6d83b6aSNélio Laranjeiro 	if (!tmpl) {
1089a6d83b6aSNélio Laranjeiro 		rte_errno = ENOMEM;
10906e78005aSNélio Laranjeiro 		return NULL;
1091a6d83b6aSNélio Laranjeiro 	}
109271304b5cSMichael Baum 	if (mlx5_mr_ctrl_init(&tmpl->txq.mr_ctrl,
109371304b5cSMichael Baum 			      &priv->sh->cdev->mr_scache.dev_gen, socket)) {
1094974f1e7eSYongseok Koh 		/* rte_errno is already set. */
1095974f1e7eSYongseok Koh 		goto error;
1096974f1e7eSYongseok Koh 	}
10978e46d4e1SAlexander Kozyrev 	MLX5_ASSERT(desc > MLX5_TX_COMP_THRESH);
1098a4996bd8SWei Dai 	tmpl->txq.offloads = conf->offloads |
1099a4996bd8SWei Dai 			     dev->data->dev_conf.txmode.offloads;
11006e78005aSNélio Laranjeiro 	tmpl->priv = priv;
1101a49b617bSOlivier Gournet 	tmpl->socket = socket;
11026e78005aSNélio Laranjeiro 	tmpl->txq.elts_n = log2above(desc);
110338b4b397SViacheslav Ovsiienko 	tmpl->txq.elts_s = desc;
110438b4b397SViacheslav Ovsiienko 	tmpl->txq.elts_m = desc - 1;
1105120dc4a7SYongseok Koh 	tmpl->txq.port_id = dev->data->port_id;
1106d5c900d1SYongseok Koh 	tmpl->txq.idx = idx;
11077fe24446SShahaf Shuler 	txq_set_params(tmpl);
1108b53cd869SViacheslav Ovsiienko 	if (txq_adjust_params(tmpl))
1109b53cd869SViacheslav Ovsiienko 		goto error;
1110f6d9ab4eSYongseok Koh 	if (txq_calc_wqebb_cnt(tmpl) >
111191d1cfafSMichael Baum 	    priv->sh->dev_cap.max_qp_wr) {
1112f6d9ab4eSYongseok Koh 		DRV_LOG(ERR,
1113f6d9ab4eSYongseok Koh 			"port %u Tx WQEBB count (%d) exceeds the limit (%d),"
1114f6d9ab4eSYongseok Koh 			" try smaller queue size",
1115f6d9ab4eSYongseok Koh 			dev->data->port_id, txq_calc_wqebb_cnt(tmpl),
111691d1cfafSMichael Baum 			priv->sh->dev_cap.max_qp_wr);
1117f6d9ab4eSYongseok Koh 		rte_errno = ENOMEM;
1118f6d9ab4eSYongseok Koh 		goto error;
1119f6d9ab4eSYongseok Koh 	}
1120e12a0166STyler Retzlaff 	rte_atomic_fetch_add_explicit(&tmpl->refcnt, 1, rte_memory_order_relaxed);
1121c06f77aeSMichael Baum 	tmpl->is_hairpin = false;
11226e78005aSNélio Laranjeiro 	LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
11236e78005aSNélio Laranjeiro 	return tmpl;
1124974f1e7eSYongseok Koh error:
1125b689b00dSMichael Baum 	mlx5_mr_btree_free(&tmpl->txq.mr_ctrl.cache_bh);
1126ac3fc732SSuanming Mou 	mlx5_free(tmpl);
1127974f1e7eSYongseok Koh 	return NULL;
11286e78005aSNélio Laranjeiro }
11296e78005aSNélio Laranjeiro 
11306e78005aSNélio Laranjeiro /**
1131ae18a1aeSOri Kam  * Create a DPDK Tx hairpin queue.
1132ae18a1aeSOri Kam  *
1133ae18a1aeSOri Kam  * @param dev
1134ae18a1aeSOri Kam  *   Pointer to Ethernet device.
1135ae18a1aeSOri Kam  * @param idx
1136ae18a1aeSOri Kam  *   TX queue index.
1137ae18a1aeSOri Kam  * @param desc
1138ae18a1aeSOri Kam  *   Number of descriptors to configure in queue.
1139ae18a1aeSOri Kam  * @param hairpin_conf
1140ae18a1aeSOri Kam  *  The hairpin configuration.
1141ae18a1aeSOri Kam  *
1142ae18a1aeSOri Kam  * @return
1143ae18a1aeSOri Kam  *   A DPDK queue object on success, NULL otherwise and rte_errno is set.
1144ae18a1aeSOri Kam  */
1145ae18a1aeSOri Kam struct mlx5_txq_ctrl *
1146ae18a1aeSOri Kam mlx5_txq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1147ae18a1aeSOri Kam 		     const struct rte_eth_hairpin_conf *hairpin_conf)
1148ae18a1aeSOri Kam {
1149ae18a1aeSOri Kam 	struct mlx5_priv *priv = dev->data->dev_private;
1150ae18a1aeSOri Kam 	struct mlx5_txq_ctrl *tmpl;
1151ae18a1aeSOri Kam 
1152ac3fc732SSuanming Mou 	tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
1153ac3fc732SSuanming Mou 			   SOCKET_ID_ANY);
1154ae18a1aeSOri Kam 	if (!tmpl) {
1155ae18a1aeSOri Kam 		rte_errno = ENOMEM;
1156ae18a1aeSOri Kam 		return NULL;
1157ae18a1aeSOri Kam 	}
1158ae18a1aeSOri Kam 	tmpl->priv = priv;
1159ae18a1aeSOri Kam 	tmpl->socket = SOCKET_ID_ANY;
1160ae18a1aeSOri Kam 	tmpl->txq.elts_n = log2above(desc);
1161ae18a1aeSOri Kam 	tmpl->txq.port_id = dev->data->port_id;
1162ae18a1aeSOri Kam 	tmpl->txq.idx = idx;
1163ae18a1aeSOri Kam 	tmpl->hairpin_conf = *hairpin_conf;
1164c06f77aeSMichael Baum 	tmpl->is_hairpin = true;
1165e12a0166STyler Retzlaff 	rte_atomic_fetch_add_explicit(&tmpl->refcnt, 1, rte_memory_order_relaxed);
1166ae18a1aeSOri Kam 	LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
1167ae18a1aeSOri Kam 	return tmpl;
1168ae18a1aeSOri Kam }
1169ae18a1aeSOri Kam 
1170ae18a1aeSOri Kam /**
11716e78005aSNélio Laranjeiro  * Get a Tx queue.
11726e78005aSNélio Laranjeiro  *
1173af4f09f2SNélio Laranjeiro  * @param dev
1174af4f09f2SNélio Laranjeiro  *   Pointer to Ethernet device.
11756e78005aSNélio Laranjeiro  * @param idx
11766e78005aSNélio Laranjeiro  *   TX queue index.
11776e78005aSNélio Laranjeiro  *
11786e78005aSNélio Laranjeiro  * @return
11796e78005aSNélio Laranjeiro  *   A pointer to the queue if it exists.
11806e78005aSNélio Laranjeiro  */
11816e78005aSNélio Laranjeiro struct mlx5_txq_ctrl *
1182af4f09f2SNélio Laranjeiro mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx)
11836e78005aSNélio Laranjeiro {
1184dbeba4cfSThomas Monjalon 	struct mlx5_priv *priv = dev->data->dev_private;
118517a57183SMichael Baum 	struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
11866e78005aSNélio Laranjeiro 	struct mlx5_txq_ctrl *ctrl = NULL;
11876e78005aSNélio Laranjeiro 
118817a57183SMichael Baum 	if (txq_data) {
118917a57183SMichael Baum 		ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq);
1190e12a0166STyler Retzlaff 		rte_atomic_fetch_add_explicit(&ctrl->refcnt, 1, rte_memory_order_relaxed);
11916e78005aSNélio Laranjeiro 	}
11926e78005aSNélio Laranjeiro 	return ctrl;
11936e78005aSNélio Laranjeiro }
11946e78005aSNélio Laranjeiro 
11956e78005aSNélio Laranjeiro /**
11961944fbc3SSuanming Mou  * Get an external Tx queue.
11971944fbc3SSuanming Mou  *
11981944fbc3SSuanming Mou  * @param dev
11991944fbc3SSuanming Mou  *   Pointer to Ethernet device.
12001944fbc3SSuanming Mou  * @param idx
12011944fbc3SSuanming Mou  *   External Tx queue index.
12021944fbc3SSuanming Mou  *
12031944fbc3SSuanming Mou  * @return
12041944fbc3SSuanming Mou  *   A pointer to the queue if it exists, NULL otherwise.
12051944fbc3SSuanming Mou  */
12061944fbc3SSuanming Mou struct mlx5_external_q *
12071944fbc3SSuanming Mou mlx5_ext_txq_get(struct rte_eth_dev *dev, uint16_t idx)
12081944fbc3SSuanming Mou {
12091944fbc3SSuanming Mou 	struct mlx5_priv *priv = dev->data->dev_private;
12101944fbc3SSuanming Mou 
12111944fbc3SSuanming Mou 	MLX5_ASSERT(mlx5_is_external_txq(dev, idx));
12121944fbc3SSuanming Mou 	return &priv->ext_txqs[idx - MLX5_EXTERNAL_TX_QUEUE_ID_MIN];
12131944fbc3SSuanming Mou }
12141944fbc3SSuanming Mou 
12151944fbc3SSuanming Mou /**
12161944fbc3SSuanming Mou  * Verify the external Tx Queue list is empty.
12171944fbc3SSuanming Mou  *
12181944fbc3SSuanming Mou  * @param dev
12191944fbc3SSuanming Mou  *   Pointer to Ethernet device.
12201944fbc3SSuanming Mou  *
12211944fbc3SSuanming Mou  * @return
12221944fbc3SSuanming Mou  *   The number of object not released.
12231944fbc3SSuanming Mou  */
12241944fbc3SSuanming Mou int
12251944fbc3SSuanming Mou mlx5_ext_txq_verify(struct rte_eth_dev *dev)
12261944fbc3SSuanming Mou {
12271944fbc3SSuanming Mou 	struct mlx5_priv *priv = dev->data->dev_private;
12281944fbc3SSuanming Mou 	struct mlx5_external_q *txq;
12291944fbc3SSuanming Mou 	uint32_t i;
12301944fbc3SSuanming Mou 	int ret = 0;
12311944fbc3SSuanming Mou 
12321944fbc3SSuanming Mou 	if (priv->ext_txqs == NULL)
12331944fbc3SSuanming Mou 		return 0;
12341944fbc3SSuanming Mou 
12351944fbc3SSuanming Mou 	for (i = MLX5_EXTERNAL_TX_QUEUE_ID_MIN; i <= UINT16_MAX ; ++i) {
12361944fbc3SSuanming Mou 		txq = mlx5_ext_txq_get(dev, i);
12371944fbc3SSuanming Mou 		if (txq->refcnt < 2)
12381944fbc3SSuanming Mou 			continue;
12391944fbc3SSuanming Mou 		DRV_LOG(DEBUG, "Port %u external TxQ %u still referenced.",
12401944fbc3SSuanming Mou 			dev->data->port_id, i);
12411944fbc3SSuanming Mou 		++ret;
12421944fbc3SSuanming Mou 	}
12431944fbc3SSuanming Mou 	return ret;
12441944fbc3SSuanming Mou }
12451944fbc3SSuanming Mou 
12461944fbc3SSuanming Mou /**
12476e78005aSNélio Laranjeiro  * Release a Tx queue.
12486e78005aSNélio Laranjeiro  *
1249af4f09f2SNélio Laranjeiro  * @param dev
1250af4f09f2SNélio Laranjeiro  *   Pointer to Ethernet device.
12516e78005aSNélio Laranjeiro  * @param idx
12526e78005aSNélio Laranjeiro  *   TX queue index.
12536e78005aSNélio Laranjeiro  *
12546e78005aSNélio Laranjeiro  * @return
1255925061b5SNélio Laranjeiro  *   1 while a reference on it exists, 0 when freed.
12566e78005aSNélio Laranjeiro  */
12576e78005aSNélio Laranjeiro int
1258af4f09f2SNélio Laranjeiro mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
12596e78005aSNélio Laranjeiro {
1260dbeba4cfSThomas Monjalon 	struct mlx5_priv *priv = dev->data->dev_private;
1261f49f4483SMichael Baum 	struct mlx5_txq_ctrl *txq_ctrl;
12626e78005aSNélio Laranjeiro 
126394e257ecSDmitry Kozlyuk 	if (priv->txqs == NULL || (*priv->txqs)[idx] == NULL)
12646e78005aSNélio Laranjeiro 		return 0;
1265f49f4483SMichael Baum 	txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
1266e12a0166STyler Retzlaff 	if (rte_atomic_fetch_sub_explicit(&txq_ctrl->refcnt, 1, rte_memory_order_relaxed) - 1 > 1)
126717a57183SMichael Baum 		return 1;
1268f49f4483SMichael Baum 	if (txq_ctrl->obj) {
1269f49f4483SMichael Baum 		priv->obj_ops.txq_obj_release(txq_ctrl->obj);
1270f49f4483SMichael Baum 		LIST_REMOVE(txq_ctrl->obj, next);
1271f49f4483SMichael Baum 		mlx5_free(txq_ctrl->obj);
1272f49f4483SMichael Baum 		txq_ctrl->obj = NULL;
127317a57183SMichael Baum 	}
1274c06f77aeSMichael Baum 	if (!txq_ctrl->is_hairpin) {
1275f49f4483SMichael Baum 		if (txq_ctrl->txq.fcqs) {
1276f49f4483SMichael Baum 			mlx5_free(txq_ctrl->txq.fcqs);
1277f49f4483SMichael Baum 			txq_ctrl->txq.fcqs = NULL;
1278f49f4483SMichael Baum 		}
1279f49f4483SMichael Baum 		txq_free_elts(txq_ctrl);
12809ab9d46aSMatan Azrad 		dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
1281876b5d52SMatan Azrad 	}
1282e12a0166STyler Retzlaff 	if (!rte_atomic_load_explicit(&txq_ctrl->refcnt, rte_memory_order_relaxed)) {
1283c06f77aeSMichael Baum 		if (!txq_ctrl->is_hairpin)
12849ab9d46aSMatan Azrad 			mlx5_mr_btree_free(&txq_ctrl->txq.mr_ctrl.cache_bh);
1285f49f4483SMichael Baum 		LIST_REMOVE(txq_ctrl, next);
1286f49f4483SMichael Baum 		mlx5_free(txq_ctrl);
12876e78005aSNélio Laranjeiro 		(*priv->txqs)[idx] = NULL;
12889ab9d46aSMatan Azrad 	}
12896e78005aSNélio Laranjeiro 	return 0;
12906e78005aSNélio Laranjeiro }
12916e78005aSNélio Laranjeiro 
12926e78005aSNélio Laranjeiro /**
12936e78005aSNélio Laranjeiro  * Verify if the queue can be released.
12946e78005aSNélio Laranjeiro  *
1295af4f09f2SNélio Laranjeiro  * @param dev
1296af4f09f2SNélio Laranjeiro  *   Pointer to Ethernet device.
12976e78005aSNélio Laranjeiro  * @param idx
12986e78005aSNélio Laranjeiro  *   TX queue index.
12996e78005aSNélio Laranjeiro  *
13006e78005aSNélio Laranjeiro  * @return
13016e78005aSNélio Laranjeiro  *   1 if the queue can be released.
13026e78005aSNélio Laranjeiro  */
13036e78005aSNélio Laranjeiro int
1304af4f09f2SNélio Laranjeiro mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx)
13056e78005aSNélio Laranjeiro {
1306dbeba4cfSThomas Monjalon 	struct mlx5_priv *priv = dev->data->dev_private;
13076e78005aSNélio Laranjeiro 	struct mlx5_txq_ctrl *txq;
13086e78005aSNélio Laranjeiro 
13096e78005aSNélio Laranjeiro 	if (!(*priv->txqs)[idx])
13106e78005aSNélio Laranjeiro 		return -1;
13116e78005aSNélio Laranjeiro 	txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
1312e12a0166STyler Retzlaff 	return (rte_atomic_load_explicit(&txq->refcnt, rte_memory_order_relaxed) == 1);
13136e78005aSNélio Laranjeiro }
13146e78005aSNélio Laranjeiro 
13156e78005aSNélio Laranjeiro /**
13166e78005aSNélio Laranjeiro  * Verify the Tx Queue list is empty
13176e78005aSNélio Laranjeiro  *
1318af4f09f2SNélio Laranjeiro  * @param dev
1319af4f09f2SNélio Laranjeiro  *   Pointer to Ethernet device.
13206e78005aSNélio Laranjeiro  *
1321fb732b0aSNélio Laranjeiro  * @return
1322fb732b0aSNélio Laranjeiro  *   The number of object not released.
13236e78005aSNélio Laranjeiro  */
13246e78005aSNélio Laranjeiro int
1325af4f09f2SNélio Laranjeiro mlx5_txq_verify(struct rte_eth_dev *dev)
13266e78005aSNélio Laranjeiro {
1327dbeba4cfSThomas Monjalon 	struct mlx5_priv *priv = dev->data->dev_private;
1328d5c900d1SYongseok Koh 	struct mlx5_txq_ctrl *txq_ctrl;
13296e78005aSNélio Laranjeiro 	int ret = 0;
13306e78005aSNélio Laranjeiro 
1331d5c900d1SYongseok Koh 	LIST_FOREACH(txq_ctrl, &priv->txqsctrl, next) {
1332a170a30dSNélio Laranjeiro 		DRV_LOG(DEBUG, "port %u Tx queue %u still referenced",
1333d5c900d1SYongseok Koh 			dev->data->port_id, txq_ctrl->txq.idx);
13346e78005aSNélio Laranjeiro 		++ret;
13356e78005aSNélio Laranjeiro 	}
13366e78005aSNélio Laranjeiro 	return ret;
13376e78005aSNélio Laranjeiro }
13383172c471SViacheslav Ovsiienko 
133926e1eaf2SDariusz Sosnowski int
134026e1eaf2SDariusz Sosnowski mlx5_txq_get_sqn(struct mlx5_txq_ctrl *txq)
134126e1eaf2SDariusz Sosnowski {
134226e1eaf2SDariusz Sosnowski 	return txq->is_hairpin ? txq->obj->sq->id : txq->obj->sq_obj.sq->id;
134326e1eaf2SDariusz Sosnowski }
134426e1eaf2SDariusz Sosnowski 
134526e1eaf2SDariusz Sosnowski int
134626e1eaf2SDariusz Sosnowski rte_pmd_mlx5_external_sq_enable(uint16_t port_id, uint32_t sq_num)
134726e1eaf2SDariusz Sosnowski {
134826e1eaf2SDariusz Sosnowski 	struct rte_eth_dev *dev;
134926e1eaf2SDariusz Sosnowski 	struct mlx5_priv *priv;
135026e1eaf2SDariusz Sosnowski 	uint32_t flow;
135126e1eaf2SDariusz Sosnowski 
135226e1eaf2SDariusz Sosnowski 	if (rte_eth_dev_is_valid_port(port_id) < 0) {
135326e1eaf2SDariusz Sosnowski 		DRV_LOG(ERR, "There is no Ethernet device for port %u.",
135426e1eaf2SDariusz Sosnowski 			port_id);
135526e1eaf2SDariusz Sosnowski 		rte_errno = ENODEV;
135626e1eaf2SDariusz Sosnowski 		return -rte_errno;
135726e1eaf2SDariusz Sosnowski 	}
135826e1eaf2SDariusz Sosnowski 	dev = &rte_eth_devices[port_id];
135926e1eaf2SDariusz Sosnowski 	priv = dev->data->dev_private;
136026e1eaf2SDariusz Sosnowski 	if ((!priv->representor && !priv->master) ||
136126e1eaf2SDariusz Sosnowski 	    !priv->sh->config.dv_esw_en) {
136226e1eaf2SDariusz Sosnowski 		DRV_LOG(ERR, "Port %u must be represetnor or master port in E-Switch mode.",
136326e1eaf2SDariusz Sosnowski 			port_id);
136426e1eaf2SDariusz Sosnowski 		rte_errno = EINVAL;
136526e1eaf2SDariusz Sosnowski 		return -rte_errno;
136626e1eaf2SDariusz Sosnowski 	}
136726e1eaf2SDariusz Sosnowski 	if (sq_num == 0) {
136826e1eaf2SDariusz Sosnowski 		DRV_LOG(ERR, "Invalid SQ number.");
136926e1eaf2SDariusz Sosnowski 		rte_errno = EINVAL;
137026e1eaf2SDariusz Sosnowski 		return -rte_errno;
137126e1eaf2SDariusz Sosnowski 	}
137226e1eaf2SDariusz Sosnowski #ifdef HAVE_MLX5_HWS_SUPPORT
137386f2907cSDariusz Sosnowski 	if (priv->sh->config.dv_flow_en == 2) {
1374cf9a91c6SDariusz Sosnowski 		bool sq_miss_created = false;
1375cf9a91c6SDariusz Sosnowski 
1376cf9a91c6SDariusz Sosnowski 		if (priv->sh->config.fdb_def_rule) {
1377f37c184aSSuanming Mou 			if (mlx5_flow_hw_esw_create_sq_miss_flow(dev, sq_num, true))
137886f2907cSDariusz Sosnowski 				return -rte_errno;
1379cf9a91c6SDariusz Sosnowski 			sq_miss_created = true;
1380cf9a91c6SDariusz Sosnowski 		}
1381cf9a91c6SDariusz Sosnowski 
138286f2907cSDariusz Sosnowski 		if (priv->sh->config.repr_matching &&
1383f37c184aSSuanming Mou 		    mlx5_flow_hw_tx_repr_matching_flow(dev, sq_num, true)) {
1384cf9a91c6SDariusz Sosnowski 			if (sq_miss_created)
138586f2907cSDariusz Sosnowski 				mlx5_flow_hw_esw_destroy_sq_miss_flow(dev, sq_num);
138686f2907cSDariusz Sosnowski 			return -rte_errno;
138786f2907cSDariusz Sosnowski 		}
138886f2907cSDariusz Sosnowski 		return 0;
138986f2907cSDariusz Sosnowski 	}
139026e1eaf2SDariusz Sosnowski #endif
139126e1eaf2SDariusz Sosnowski 	flow = mlx5_flow_create_devx_sq_miss_flow(dev, sq_num);
139226e1eaf2SDariusz Sosnowski 	if (flow > 0)
139326e1eaf2SDariusz Sosnowski 		return 0;
139426e1eaf2SDariusz Sosnowski 	DRV_LOG(ERR, "Port %u failed to create default miss flow for SQ %u.",
139526e1eaf2SDariusz Sosnowski 		port_id, sq_num);
139626e1eaf2SDariusz Sosnowski 	return -rte_errno;
139726e1eaf2SDariusz Sosnowski }
139826e1eaf2SDariusz Sosnowski 
13993172c471SViacheslav Ovsiienko /**
14003172c471SViacheslav Ovsiienko  * Set the Tx queue dynamic timestamp (mask and offset)
14013172c471SViacheslav Ovsiienko  *
14023172c471SViacheslav Ovsiienko  * @param[in] dev
14033172c471SViacheslav Ovsiienko  *   Pointer to the Ethernet device structure.
14043172c471SViacheslav Ovsiienko  */
14053172c471SViacheslav Ovsiienko void
14063172c471SViacheslav Ovsiienko mlx5_txq_dynf_timestamp_set(struct rte_eth_dev *dev)
14073172c471SViacheslav Ovsiienko {
14083172c471SViacheslav Ovsiienko 	struct mlx5_priv *priv = dev->data->dev_private;
14093172c471SViacheslav Ovsiienko 	struct mlx5_dev_ctx_shared *sh = priv->sh;
14103172c471SViacheslav Ovsiienko 	struct mlx5_txq_data *data;
14113172c471SViacheslav Ovsiienko 	int off, nbit;
14123172c471SViacheslav Ovsiienko 	unsigned int i;
14133172c471SViacheslav Ovsiienko 	uint64_t mask = 0;
14142f5122dfSViacheslav Ovsiienko 	uint64_t ts_mask;
14153172c471SViacheslav Ovsiienko 
14162f5122dfSViacheslav Ovsiienko 	if (sh->dev_cap.rt_timestamp ||
14172f5122dfSViacheslav Ovsiienko 	    !sh->cdev->config.hca_attr.dev_freq_khz)
14182f5122dfSViacheslav Ovsiienko 		ts_mask = MLX5_TS_MASK_SECS << 32;
14192f5122dfSViacheslav Ovsiienko 	else
14202f5122dfSViacheslav Ovsiienko 		ts_mask = rte_align64pow2(MLX5_TS_MASK_SECS * 1000ull *
14212f5122dfSViacheslav Ovsiienko 				sh->cdev->config.hca_attr.dev_freq_khz);
14222f5122dfSViacheslav Ovsiienko 	ts_mask = rte_cpu_to_be_64(ts_mask - 1ull);
14233172c471SViacheslav Ovsiienko 	nbit = rte_mbuf_dynflag_lookup
14243172c471SViacheslav Ovsiienko 				(RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL);
14253172c471SViacheslav Ovsiienko 	off = rte_mbuf_dynfield_lookup
14263172c471SViacheslav Ovsiienko 				(RTE_MBUF_DYNFIELD_TIMESTAMP_NAME, NULL);
14272f5122dfSViacheslav Ovsiienko 	if (nbit >= 0 && off >= 0 &&
14282f5122dfSViacheslav Ovsiienko 	    (sh->txpp.refcnt || priv->sh->cdev->config.hca_attr.wait_on_time))
14293172c471SViacheslav Ovsiienko 		mask = 1ULL << nbit;
14303172c471SViacheslav Ovsiienko 	for (i = 0; i != priv->txqs_n; ++i) {
14313172c471SViacheslav Ovsiienko 		data = (*priv->txqs)[i];
14323172c471SViacheslav Ovsiienko 		if (!data)
14333172c471SViacheslav Ovsiienko 			continue;
14343172c471SViacheslav Ovsiienko 		data->sh = sh;
14353172c471SViacheslav Ovsiienko 		data->ts_mask = mask;
14363172c471SViacheslav Ovsiienko 		data->ts_offset = off;
14372f5122dfSViacheslav Ovsiienko 		data->rt_timestamp = sh->dev_cap.rt_timestamp;
14382f5122dfSViacheslav Ovsiienko 		data->rt_timemask = (data->offloads &
14392f5122dfSViacheslav Ovsiienko 				     RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP) ?
14402f5122dfSViacheslav Ovsiienko 				     ts_mask : 0;
14413172c471SViacheslav Ovsiienko 	}
14423172c471SViacheslav Ovsiienko }
1443ce306af6SJiawei Wang 
1444ce306af6SJiawei Wang int mlx5_count_aggr_ports(struct rte_eth_dev *dev)
1445ce306af6SJiawei Wang {
1446ce306af6SJiawei Wang 	struct mlx5_priv *priv = dev->data->dev_private;
1447ce306af6SJiawei Wang 
1448ce306af6SJiawei Wang 	return priv->sh->bond.n_port;
1449ce306af6SJiawei Wang }
1450ce306af6SJiawei Wang 
1451ce306af6SJiawei Wang int mlx5_map_aggr_tx_affinity(struct rte_eth_dev *dev, uint16_t tx_queue_id,
1452ce306af6SJiawei Wang 			      uint8_t affinity)
1453ce306af6SJiawei Wang {
1454ce306af6SJiawei Wang 	struct mlx5_txq_ctrl *txq_ctrl;
1455ce306af6SJiawei Wang 	struct mlx5_txq_data *txq;
1456ce306af6SJiawei Wang 	struct mlx5_priv *priv;
1457ce306af6SJiawei Wang 
1458ce306af6SJiawei Wang 	priv = dev->data->dev_private;
1459bd80a1bcSJiawei Wang 	if (!mlx5_devx_obj_ops_en(priv->sh)) {
1460bd80a1bcSJiawei Wang 		DRV_LOG(ERR, "Tx affinity mapping isn't supported by Verbs API.");
1461bd80a1bcSJiawei Wang 		rte_errno = ENOTSUP;
1462bd80a1bcSJiawei Wang 		return -rte_errno;
1463bd80a1bcSJiawei Wang 	}
1464ce306af6SJiawei Wang 	txq = (*priv->txqs)[tx_queue_id];
1465ce306af6SJiawei Wang 	if (!txq)
1466ce306af6SJiawei Wang 		return -1;
1467ce306af6SJiawei Wang 	txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
1468ce306af6SJiawei Wang 	if (tx_queue_id >= priv->txqs_n) {
1469ce306af6SJiawei Wang 		DRV_LOG(ERR, "port %u Tx queue index out of range (%u >= %u)",
1470ce306af6SJiawei Wang 			dev->data->port_id, tx_queue_id, priv->txqs_n);
1471ce306af6SJiawei Wang 		rte_errno = EOVERFLOW;
1472ce306af6SJiawei Wang 		return -rte_errno;
1473ce306af6SJiawei Wang 	}
1474ce306af6SJiawei Wang 	if (affinity > priv->num_lag_ports) {
1475ce306af6SJiawei Wang 		DRV_LOG(ERR, "port %u unable to setup Tx queue index %u"
1476ce306af6SJiawei Wang 			" affinity is %u exceeds the maximum %u", dev->data->port_id,
1477ce306af6SJiawei Wang 			tx_queue_id, affinity, priv->num_lag_ports);
1478ce306af6SJiawei Wang 		rte_errno = EINVAL;
1479ce306af6SJiawei Wang 		return -rte_errno;
1480ce306af6SJiawei Wang 	}
1481ce306af6SJiawei Wang 	DRV_LOG(DEBUG, "port %u configuring queue %u for aggregated affinity %u",
1482ce306af6SJiawei Wang 		dev->data->port_id, tx_queue_id, affinity);
1483ce306af6SJiawei Wang 	txq_ctrl->txq.tx_aggr_affinity = affinity;
1484ce306af6SJiawei Wang 	return 0;
1485ce306af6SJiawei Wang }
14861944fbc3SSuanming Mou 
14871944fbc3SSuanming Mou /**
14881944fbc3SSuanming Mou  * Validate given external TxQ rte_flow index, and get pointer to concurrent
14891944fbc3SSuanming Mou  * external TxQ object to map/unmap.
14901944fbc3SSuanming Mou  *
14911944fbc3SSuanming Mou  * @param[in] port_id
14921944fbc3SSuanming Mou  *   The port identifier of the Ethernet device.
14931944fbc3SSuanming Mou  * @param[in] dpdk_idx
14941944fbc3SSuanming Mou  *   Tx Queue index in rte_flow.
14951944fbc3SSuanming Mou  *
14961944fbc3SSuanming Mou  * @return
14971944fbc3SSuanming Mou  *   Pointer to concurrent external TxQ on success,
14981944fbc3SSuanming Mou  *   NULL otherwise and rte_errno is set.
14991944fbc3SSuanming Mou  */
15001944fbc3SSuanming Mou static struct mlx5_external_q *
15011944fbc3SSuanming Mou mlx5_external_tx_queue_get_validate(uint16_t port_id, uint16_t dpdk_idx)
15021944fbc3SSuanming Mou {
15031944fbc3SSuanming Mou 	struct rte_eth_dev *dev;
15041944fbc3SSuanming Mou 	struct mlx5_priv *priv;
15051944fbc3SSuanming Mou 	int ret;
15061944fbc3SSuanming Mou 
15071944fbc3SSuanming Mou 	if (dpdk_idx < MLX5_EXTERNAL_TX_QUEUE_ID_MIN) {
15081944fbc3SSuanming Mou 		DRV_LOG(ERR, "Queue index %u should be in range: [%u, %u].",
15091944fbc3SSuanming Mou 			dpdk_idx, MLX5_EXTERNAL_TX_QUEUE_ID_MIN, UINT16_MAX);
15101944fbc3SSuanming Mou 		rte_errno = EINVAL;
15111944fbc3SSuanming Mou 		return NULL;
15121944fbc3SSuanming Mou 	}
15131944fbc3SSuanming Mou 	ret = mlx5_devx_extq_port_validate(port_id);
15141944fbc3SSuanming Mou 	if (unlikely(ret))
15151944fbc3SSuanming Mou 		return NULL;
15161944fbc3SSuanming Mou 	dev = &rte_eth_devices[port_id];
15171944fbc3SSuanming Mou 	priv = dev->data->dev_private;
15181944fbc3SSuanming Mou 	/*
15191944fbc3SSuanming Mou 	 * When user configures remote PD and CTX and device creates TxQ by
15201944fbc3SSuanming Mou 	 * DevX, external TxQs array is allocated.
15211944fbc3SSuanming Mou 	 */
15221944fbc3SSuanming Mou 	MLX5_ASSERT(priv->ext_txqs != NULL);
15231944fbc3SSuanming Mou 	return &priv->ext_txqs[dpdk_idx - MLX5_EXTERNAL_TX_QUEUE_ID_MIN];
15241944fbc3SSuanming Mou }
15251944fbc3SSuanming Mou 
15261944fbc3SSuanming Mou int
15271944fbc3SSuanming Mou rte_pmd_mlx5_external_tx_queue_id_map(uint16_t port_id, uint16_t dpdk_idx,
15281944fbc3SSuanming Mou 				      uint32_t hw_idx)
15291944fbc3SSuanming Mou {
15301944fbc3SSuanming Mou 	struct mlx5_external_q *ext_txq;
15311944fbc3SSuanming Mou 	uint32_t unmapped = 0;
15321944fbc3SSuanming Mou 
15331944fbc3SSuanming Mou 	ext_txq = mlx5_external_tx_queue_get_validate(port_id, dpdk_idx);
15341944fbc3SSuanming Mou 	if (ext_txq == NULL)
15351944fbc3SSuanming Mou 		return -rte_errno;
15361944fbc3SSuanming Mou 	if (!rte_atomic_compare_exchange_strong_explicit(&ext_txq->refcnt, &unmapped, 1,
15371944fbc3SSuanming Mou 					 rte_memory_order_relaxed, rte_memory_order_relaxed)) {
15381944fbc3SSuanming Mou 		if (ext_txq->hw_id != hw_idx) {
15391944fbc3SSuanming Mou 			DRV_LOG(ERR, "Port %u external TxQ index %u "
15401944fbc3SSuanming Mou 				"is already mapped to HW index (requesting is "
15411944fbc3SSuanming Mou 				"%u, existing is %u).",
15421944fbc3SSuanming Mou 				port_id, dpdk_idx, hw_idx, ext_txq->hw_id);
15431944fbc3SSuanming Mou 			rte_errno = EEXIST;
15441944fbc3SSuanming Mou 			return -rte_errno;
15451944fbc3SSuanming Mou 		}
15461944fbc3SSuanming Mou 		DRV_LOG(WARNING, "Port %u external TxQ index %u "
15471944fbc3SSuanming Mou 			"is already mapped to the requested HW index (%u)",
15481944fbc3SSuanming Mou 			port_id, dpdk_idx, hw_idx);
15491944fbc3SSuanming Mou 
15501944fbc3SSuanming Mou 	} else {
15511944fbc3SSuanming Mou 		ext_txq->hw_id = hw_idx;
15521944fbc3SSuanming Mou 		DRV_LOG(DEBUG, "Port %u external TxQ index %u "
15531944fbc3SSuanming Mou 			"is successfully mapped to the requested HW index (%u)",
15541944fbc3SSuanming Mou 			port_id, dpdk_idx, hw_idx);
15551944fbc3SSuanming Mou 	}
15561944fbc3SSuanming Mou 	return 0;
15571944fbc3SSuanming Mou }
15581944fbc3SSuanming Mou 
15591944fbc3SSuanming Mou int
15601944fbc3SSuanming Mou rte_pmd_mlx5_external_tx_queue_id_unmap(uint16_t port_id, uint16_t dpdk_idx)
15611944fbc3SSuanming Mou {
15621944fbc3SSuanming Mou 	struct mlx5_external_q *ext_txq;
15631944fbc3SSuanming Mou 	uint32_t mapped = 1;
15641944fbc3SSuanming Mou 
15651944fbc3SSuanming Mou 	ext_txq = mlx5_external_tx_queue_get_validate(port_id, dpdk_idx);
15661944fbc3SSuanming Mou 	if (ext_txq == NULL)
15671944fbc3SSuanming Mou 		return -rte_errno;
15681944fbc3SSuanming Mou 	if (ext_txq->refcnt > 1) {
15691944fbc3SSuanming Mou 		DRV_LOG(ERR, "Port %u external TxQ index %u still referenced.",
15701944fbc3SSuanming Mou 			port_id, dpdk_idx);
15711944fbc3SSuanming Mou 		rte_errno = EINVAL;
15721944fbc3SSuanming Mou 		return -rte_errno;
15731944fbc3SSuanming Mou 	}
15741944fbc3SSuanming Mou 	if (!rte_atomic_compare_exchange_strong_explicit(&ext_txq->refcnt, &mapped, 0,
15751944fbc3SSuanming Mou 					 rte_memory_order_relaxed, rte_memory_order_relaxed)) {
15761944fbc3SSuanming Mou 		DRV_LOG(ERR, "Port %u external TxQ index %u doesn't exist.",
15771944fbc3SSuanming Mou 			port_id, dpdk_idx);
15781944fbc3SSuanming Mou 		rte_errno = EINVAL;
15791944fbc3SSuanming Mou 		return -rte_errno;
15801944fbc3SSuanming Mou 	}
15811944fbc3SSuanming Mou 	DRV_LOG(DEBUG,
15821944fbc3SSuanming Mou 		"Port %u external TxQ index %u is successfully unmapped.",
15831944fbc3SSuanming Mou 		port_id, dpdk_idx);
15841944fbc3SSuanming Mou 	return 0;
15851944fbc3SSuanming Mou }
1586