xref: /dpdk/drivers/net/mlx5/mlx5_txq.c (revision a170a30d22a8c34c36541d0dd6bcc2fcc4c9ee2f)
18fd92a66SOlivier Matz /* SPDX-License-Identifier: BSD-3-Clause
22e22920bSAdrien Mazarguil  * Copyright 2015 6WIND S.A.
32e22920bSAdrien Mazarguil  * Copyright 2015 Mellanox.
42e22920bSAdrien Mazarguil  */
52e22920bSAdrien Mazarguil 
62e22920bSAdrien Mazarguil #include <stddef.h>
72e22920bSAdrien Mazarguil #include <assert.h>
82e22920bSAdrien Mazarguil #include <errno.h>
92e22920bSAdrien Mazarguil #include <string.h>
102e22920bSAdrien Mazarguil #include <stdint.h>
11f8b9a3baSXueming Li #include <unistd.h>
12f8b9a3baSXueming Li #include <sys/mman.h>
132e22920bSAdrien Mazarguil 
142e22920bSAdrien Mazarguil /* Verbs header. */
152e22920bSAdrien Mazarguil /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
162e22920bSAdrien Mazarguil #ifdef PEDANTIC
17fc5b160fSBruce Richardson #pragma GCC diagnostic ignored "-Wpedantic"
182e22920bSAdrien Mazarguil #endif
192e22920bSAdrien Mazarguil #include <infiniband/verbs.h>
202e22920bSAdrien Mazarguil #ifdef PEDANTIC
21fc5b160fSBruce Richardson #pragma GCC diagnostic error "-Wpedantic"
222e22920bSAdrien Mazarguil #endif
232e22920bSAdrien Mazarguil 
242e22920bSAdrien Mazarguil #include <rte_mbuf.h>
252e22920bSAdrien Mazarguil #include <rte_malloc.h>
26ffc905f3SFerruh Yigit #include <rte_ethdev_driver.h>
272e22920bSAdrien Mazarguil #include <rte_common.h>
282e22920bSAdrien Mazarguil 
292e22920bSAdrien Mazarguil #include "mlx5_utils.h"
301d88ba17SNélio Laranjeiro #include "mlx5_defs.h"
312e22920bSAdrien Mazarguil #include "mlx5.h"
322e22920bSAdrien Mazarguil #include "mlx5_rxtx.h"
332e22920bSAdrien Mazarguil #include "mlx5_autoconf.h"
340e83b8e5SNelio Laranjeiro #include "mlx5_glue.h"
352e22920bSAdrien Mazarguil 
362e22920bSAdrien Mazarguil /**
372e22920bSAdrien Mazarguil  * Allocate TX queue elements.
382e22920bSAdrien Mazarguil  *
3921c8bb49SNélio Laranjeiro  * @param txq_ctrl
402e22920bSAdrien Mazarguil  *   Pointer to TX queue structure.
412e22920bSAdrien Mazarguil  */
426e78005aSNélio Laranjeiro void
436e78005aSNélio Laranjeiro txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl)
442e22920bSAdrien Mazarguil {
456e78005aSNélio Laranjeiro 	const unsigned int elts_n = 1 << txq_ctrl->txq.elts_n;
462e22920bSAdrien Mazarguil 	unsigned int i;
472e22920bSAdrien Mazarguil 
481d88ba17SNélio Laranjeiro 	for (i = 0; (i != elts_n); ++i)
491d88ba17SNélio Laranjeiro 		(*txq_ctrl->txq.elts)[i] = NULL;
50*a170a30dSNélio Laranjeiro 	DRV_LOG(DEBUG, "port %u Tx queue %u allocated and configured %u WRs",
510f99970bSNélio Laranjeiro 		txq_ctrl->priv->dev->data->port_id, txq_ctrl->idx, elts_n);
5221c8bb49SNélio Laranjeiro 	txq_ctrl->txq.elts_head = 0;
5321c8bb49SNélio Laranjeiro 	txq_ctrl->txq.elts_tail = 0;
54c305090bSAdrien Mazarguil 	txq_ctrl->txq.elts_comp = 0;
552e22920bSAdrien Mazarguil }
562e22920bSAdrien Mazarguil 
572e22920bSAdrien Mazarguil /**
582e22920bSAdrien Mazarguil  * Free TX queue elements.
592e22920bSAdrien Mazarguil  *
6021c8bb49SNélio Laranjeiro  * @param txq_ctrl
612e22920bSAdrien Mazarguil  *   Pointer to TX queue structure.
622e22920bSAdrien Mazarguil  */
632e22920bSAdrien Mazarguil static void
64991b04f6SNélio Laranjeiro txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
652e22920bSAdrien Mazarguil {
668c819a69SYongseok Koh 	const uint16_t elts_n = 1 << txq_ctrl->txq.elts_n;
678c819a69SYongseok Koh 	const uint16_t elts_m = elts_n - 1;
688c819a69SYongseok Koh 	uint16_t elts_head = txq_ctrl->txq.elts_head;
698c819a69SYongseok Koh 	uint16_t elts_tail = txq_ctrl->txq.elts_tail;
701d88ba17SNélio Laranjeiro 	struct rte_mbuf *(*elts)[elts_n] = txq_ctrl->txq.elts;
712e22920bSAdrien Mazarguil 
72*a170a30dSNélio Laranjeiro 	DRV_LOG(DEBUG, "port %u Tx queue %u freeing WRs",
730f99970bSNélio Laranjeiro 		txq_ctrl->priv->dev->data->port_id, txq_ctrl->idx);
7421c8bb49SNélio Laranjeiro 	txq_ctrl->txq.elts_head = 0;
7521c8bb49SNélio Laranjeiro 	txq_ctrl->txq.elts_tail = 0;
76c305090bSAdrien Mazarguil 	txq_ctrl->txq.elts_comp = 0;
772e22920bSAdrien Mazarguil 
78b185e63fSAdrien Mazarguil 	while (elts_tail != elts_head) {
798c819a69SYongseok Koh 		struct rte_mbuf *elt = (*elts)[elts_tail & elts_m];
802e22920bSAdrien Mazarguil 
811d88ba17SNélio Laranjeiro 		assert(elt != NULL);
82c80711c3SYongseok Koh 		rte_pktmbuf_free_seg(elt);
83b185e63fSAdrien Mazarguil #ifndef NDEBUG
84b185e63fSAdrien Mazarguil 		/* Poisoning. */
858c819a69SYongseok Koh 		memset(&(*elts)[elts_tail & elts_m],
861d88ba17SNélio Laranjeiro 		       0x77,
878c819a69SYongseok Koh 		       sizeof((*elts)[elts_tail & elts_m]));
88b185e63fSAdrien Mazarguil #endif
898c819a69SYongseok Koh 		++elts_tail;
902e22920bSAdrien Mazarguil 	}
912e22920bSAdrien Mazarguil }
922e22920bSAdrien Mazarguil 
932e22920bSAdrien Mazarguil /**
94dbccb4cdSShahaf Shuler  * Returns the per-port supported offloads.
95dbccb4cdSShahaf Shuler  *
96af4f09f2SNélio Laranjeiro  * @param dev
97af4f09f2SNélio Laranjeiro  *   Pointer to Ethernet device.
98dbccb4cdSShahaf Shuler  *
99dbccb4cdSShahaf Shuler  * @return
100dbccb4cdSShahaf Shuler  *   Supported Tx offloads.
101dbccb4cdSShahaf Shuler  */
102dbccb4cdSShahaf Shuler uint64_t
103af4f09f2SNélio Laranjeiro mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
104dbccb4cdSShahaf Shuler {
105af4f09f2SNélio Laranjeiro 	struct priv *priv = dev->data->dev_private;
106dbccb4cdSShahaf Shuler 	uint64_t offloads = (DEV_TX_OFFLOAD_MULTI_SEGS |
107dbccb4cdSShahaf Shuler 			     DEV_TX_OFFLOAD_VLAN_INSERT);
108dbccb4cdSShahaf Shuler 	struct mlx5_dev_config *config = &priv->config;
109dbccb4cdSShahaf Shuler 
110dbccb4cdSShahaf Shuler 	if (config->hw_csum)
111dbccb4cdSShahaf Shuler 		offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
112dbccb4cdSShahaf Shuler 			     DEV_TX_OFFLOAD_UDP_CKSUM |
113dbccb4cdSShahaf Shuler 			     DEV_TX_OFFLOAD_TCP_CKSUM);
114dbccb4cdSShahaf Shuler 	if (config->tso)
115dbccb4cdSShahaf Shuler 		offloads |= DEV_TX_OFFLOAD_TCP_TSO;
116dbccb4cdSShahaf Shuler 	if (config->tunnel_en) {
117dbccb4cdSShahaf Shuler 		if (config->hw_csum)
118dbccb4cdSShahaf Shuler 			offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
119dbccb4cdSShahaf Shuler 		if (config->tso)
120dbccb4cdSShahaf Shuler 			offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
121dbccb4cdSShahaf Shuler 				     DEV_TX_OFFLOAD_GRE_TNL_TSO);
122dbccb4cdSShahaf Shuler 	}
123dbccb4cdSShahaf Shuler 	return offloads;
124dbccb4cdSShahaf Shuler }
125dbccb4cdSShahaf Shuler 
126dbccb4cdSShahaf Shuler /**
127dbccb4cdSShahaf Shuler  * Checks if the per-queue offload configuration is valid.
128dbccb4cdSShahaf Shuler  *
129af4f09f2SNélio Laranjeiro  * @param dev
130af4f09f2SNélio Laranjeiro  *   Pointer to Ethernet device.
131dbccb4cdSShahaf Shuler  * @param offloads
132dbccb4cdSShahaf Shuler  *   Per-queue offloads configuration.
133dbccb4cdSShahaf Shuler  *
134dbccb4cdSShahaf Shuler  * @return
135dbccb4cdSShahaf Shuler  *   1 if the configuration is valid, 0 otherwise.
136dbccb4cdSShahaf Shuler  */
137dbccb4cdSShahaf Shuler static int
138af4f09f2SNélio Laranjeiro mlx5_is_tx_queue_offloads_allowed(struct rte_eth_dev *dev, uint64_t offloads)
139dbccb4cdSShahaf Shuler {
140af4f09f2SNélio Laranjeiro 	uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
141af4f09f2SNélio Laranjeiro 	uint64_t port_supp_offloads = mlx5_get_tx_port_offloads(dev);
142dbccb4cdSShahaf Shuler 
143dbccb4cdSShahaf Shuler 	/* There are no Tx offloads which are per queue. */
144dbccb4cdSShahaf Shuler 	if ((offloads & port_supp_offloads) != offloads)
145dbccb4cdSShahaf Shuler 		return 0;
146dbccb4cdSShahaf Shuler 	if ((port_offloads ^ offloads) & port_supp_offloads)
147dbccb4cdSShahaf Shuler 		return 0;
148dbccb4cdSShahaf Shuler 	return 1;
149dbccb4cdSShahaf Shuler }
150dbccb4cdSShahaf Shuler 
151dbccb4cdSShahaf Shuler /**
1522e22920bSAdrien Mazarguil  * DPDK callback to configure a TX queue.
1532e22920bSAdrien Mazarguil  *
1542e22920bSAdrien Mazarguil  * @param dev
1552e22920bSAdrien Mazarguil  *   Pointer to Ethernet device structure.
1562e22920bSAdrien Mazarguil  * @param idx
1572e22920bSAdrien Mazarguil  *   TX queue index.
1582e22920bSAdrien Mazarguil  * @param desc
1592e22920bSAdrien Mazarguil  *   Number of descriptors to configure in queue.
1602e22920bSAdrien Mazarguil  * @param socket
1612e22920bSAdrien Mazarguil  *   NUMA socket on which memory must be allocated.
1622e22920bSAdrien Mazarguil  * @param[in] conf
1632e22920bSAdrien Mazarguil  *   Thresholds parameters.
1642e22920bSAdrien Mazarguil  *
1652e22920bSAdrien Mazarguil  * @return
166a6d83b6aSNélio Laranjeiro  *   0 on success, a negative errno value otherwise and rte_errno is set.
1672e22920bSAdrien Mazarguil  */
1682e22920bSAdrien Mazarguil int
1692e22920bSAdrien Mazarguil mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1702e22920bSAdrien Mazarguil 		    unsigned int socket, const struct rte_eth_txconf *conf)
1712e22920bSAdrien Mazarguil {
1722e22920bSAdrien Mazarguil 	struct priv *priv = dev->data->dev_private;
173991b04f6SNélio Laranjeiro 	struct mlx5_txq_data *txq = (*priv->txqs)[idx];
174991b04f6SNélio Laranjeiro 	struct mlx5_txq_ctrl *txq_ctrl =
175991b04f6SNélio Laranjeiro 		container_of(txq, struct mlx5_txq_ctrl, txq);
1762e22920bSAdrien Mazarguil 
177dbccb4cdSShahaf Shuler 	/*
178dbccb4cdSShahaf Shuler 	 * Don't verify port offloads for application which
179dbccb4cdSShahaf Shuler 	 * use the old API.
180dbccb4cdSShahaf Shuler 	 */
181dbccb4cdSShahaf Shuler 	if (!!(conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
182af4f09f2SNélio Laranjeiro 	    !mlx5_is_tx_queue_offloads_allowed(dev, conf->offloads)) {
183a6d83b6aSNélio Laranjeiro 		rte_errno = ENOTSUP;
184*a170a30dSNélio Laranjeiro 		DRV_LOG(ERR,
185*a170a30dSNélio Laranjeiro 			"port %u Tx queue offloads 0x%" PRIx64 " don't match"
1860f99970bSNélio Laranjeiro 			" port offloads 0x%" PRIx64 " or supported offloads 0x%"
1870f99970bSNélio Laranjeiro 			PRIx64,
1880f99970bSNélio Laranjeiro 			dev->data->port_id, conf->offloads,
189dbccb4cdSShahaf Shuler 			dev->data->dev_conf.txmode.offloads,
190af4f09f2SNélio Laranjeiro 			mlx5_get_tx_port_offloads(dev));
191a6d83b6aSNélio Laranjeiro 		return -rte_errno;
192dbccb4cdSShahaf Shuler 	}
193c305090bSAdrien Mazarguil 	if (desc <= MLX5_TX_COMP_THRESH) {
194*a170a30dSNélio Laranjeiro 		DRV_LOG(WARNING,
195*a170a30dSNélio Laranjeiro 			"port %u number of descriptors requested for Tx queue"
196*a170a30dSNélio Laranjeiro 			" %u must be higher than MLX5_TX_COMP_THRESH, using %u"
197*a170a30dSNélio Laranjeiro 			" instead of %u",
1980f99970bSNélio Laranjeiro 			dev->data->port_id, idx, MLX5_TX_COMP_THRESH + 1, desc);
199c305090bSAdrien Mazarguil 		desc = MLX5_TX_COMP_THRESH + 1;
200c305090bSAdrien Mazarguil 	}
2011d88ba17SNélio Laranjeiro 	if (!rte_is_power_of_2(desc)) {
2021d88ba17SNélio Laranjeiro 		desc = 1 << log2above(desc);
203*a170a30dSNélio Laranjeiro 		DRV_LOG(WARNING,
204*a170a30dSNélio Laranjeiro 			"port %u increased number of descriptors in Tx queue"
205*a170a30dSNélio Laranjeiro 			" %u to the next power of two (%d)",
2060f99970bSNélio Laranjeiro 			dev->data->port_id, idx, desc);
2071d88ba17SNélio Laranjeiro 	}
208*a170a30dSNélio Laranjeiro 	DRV_LOG(DEBUG, "port %u configuring queue %u for %u descriptors",
2090f99970bSNélio Laranjeiro 		dev->data->port_id, idx, desc);
2102e22920bSAdrien Mazarguil 	if (idx >= priv->txqs_n) {
211*a170a30dSNélio Laranjeiro 		DRV_LOG(ERR, "port %u Tx queue index out of range (%u >= %u)",
2120f99970bSNélio Laranjeiro 			dev->data->port_id, idx, priv->txqs_n);
213a6d83b6aSNélio Laranjeiro 		rte_errno = EOVERFLOW;
214a6d83b6aSNélio Laranjeiro 		return -rte_errno;
2152e22920bSAdrien Mazarguil 	}
216af4f09f2SNélio Laranjeiro 	if (!mlx5_txq_releasable(dev, idx)) {
217a6d83b6aSNélio Laranjeiro 		rte_errno = EBUSY;
218*a170a30dSNélio Laranjeiro 		DRV_LOG(ERR, "port %u unable to release queue index %u",
2190f99970bSNélio Laranjeiro 			dev->data->port_id, idx);
220a6d83b6aSNélio Laranjeiro 		return -rte_errno;
221faf2667fSNélio Laranjeiro 	}
222af4f09f2SNélio Laranjeiro 	mlx5_txq_release(dev, idx);
223af4f09f2SNélio Laranjeiro 	txq_ctrl = mlx5_txq_new(dev, idx, desc, socket, conf);
2246e78005aSNélio Laranjeiro 	if (!txq_ctrl) {
225*a170a30dSNélio Laranjeiro 		DRV_LOG(ERR, "port %u unable to allocate queue index %u",
2260f99970bSNélio Laranjeiro 			dev->data->port_id, idx);
227a6d83b6aSNélio Laranjeiro 		return -rte_errno;
2286e78005aSNélio Laranjeiro 	}
229*a170a30dSNélio Laranjeiro 	DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
230*a170a30dSNélio Laranjeiro 		dev->data->port_id, idx);
23121c8bb49SNélio Laranjeiro 	(*priv->txqs)[idx] = &txq_ctrl->txq;
232a6d83b6aSNélio Laranjeiro 	return 0;
2332e22920bSAdrien Mazarguil }
2342e22920bSAdrien Mazarguil 
2352e22920bSAdrien Mazarguil /**
2362e22920bSAdrien Mazarguil  * DPDK callback to release a TX queue.
2372e22920bSAdrien Mazarguil  *
2382e22920bSAdrien Mazarguil  * @param dpdk_txq
2392e22920bSAdrien Mazarguil  *   Generic TX queue pointer.
2402e22920bSAdrien Mazarguil  */
2412e22920bSAdrien Mazarguil void
2422e22920bSAdrien Mazarguil mlx5_tx_queue_release(void *dpdk_txq)
2432e22920bSAdrien Mazarguil {
244991b04f6SNélio Laranjeiro 	struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
245991b04f6SNélio Laranjeiro 	struct mlx5_txq_ctrl *txq_ctrl;
2462e22920bSAdrien Mazarguil 	struct priv *priv;
2472e22920bSAdrien Mazarguil 	unsigned int i;
2482e22920bSAdrien Mazarguil 
2492e22920bSAdrien Mazarguil 	if (txq == NULL)
2502e22920bSAdrien Mazarguil 		return;
251991b04f6SNélio Laranjeiro 	txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
2521d88ba17SNélio Laranjeiro 	priv = txq_ctrl->priv;
2532e22920bSAdrien Mazarguil 	for (i = 0; (i != priv->txqs_n); ++i)
2542e22920bSAdrien Mazarguil 		if ((*priv->txqs)[i] == txq) {
255a6d83b6aSNélio Laranjeiro 			mlx5_txq_release(priv->dev, i);
256*a170a30dSNélio Laranjeiro 			DRV_LOG(DEBUG, "port %u removing Tx queue %u from list",
2570f99970bSNélio Laranjeiro 				priv->dev->data->port_id, txq_ctrl->idx);
2582e22920bSAdrien Mazarguil 			break;
2592e22920bSAdrien Mazarguil 		}
2602e22920bSAdrien Mazarguil }
261f8b9a3baSXueming Li 
262f8b9a3baSXueming Li 
263f8b9a3baSXueming Li /**
2644a984153SXueming Li  * Mmap TX UAR(HW doorbell) pages into reserved UAR address space.
2654a984153SXueming Li  * Both primary and secondary process do mmap to make UAR address
2664a984153SXueming Li  * aligned.
267f8b9a3baSXueming Li  *
268af4f09f2SNélio Laranjeiro  * @param[in] dev
269af4f09f2SNélio Laranjeiro  *   Pointer to Ethernet device.
270f8b9a3baSXueming Li  * @param fd
271f8b9a3baSXueming Li  *   Verbs file descriptor to map UAR pages.
272f8b9a3baSXueming Li  *
273f8b9a3baSXueming Li  * @return
274a6d83b6aSNélio Laranjeiro  *   0 on success, a negative errno value otherwise and rte_errno is set.
275f8b9a3baSXueming Li  */
276f8b9a3baSXueming Li int
277af4f09f2SNélio Laranjeiro mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd)
278f8b9a3baSXueming Li {
279af4f09f2SNélio Laranjeiro 	struct priv *priv = dev->data->dev_private;
280f8b9a3baSXueming Li 	unsigned int i, j;
281f8b9a3baSXueming Li 	uintptr_t pages[priv->txqs_n];
282f8b9a3baSXueming Li 	unsigned int pages_n = 0;
283f8b9a3baSXueming Li 	uintptr_t uar_va;
2844a984153SXueming Li 	uintptr_t off;
285f8b9a3baSXueming Li 	void *addr;
2864a984153SXueming Li 	void *ret;
287991b04f6SNélio Laranjeiro 	struct mlx5_txq_data *txq;
288991b04f6SNélio Laranjeiro 	struct mlx5_txq_ctrl *txq_ctrl;
289f8b9a3baSXueming Li 	int already_mapped;
290f8b9a3baSXueming Li 	size_t page_size = sysconf(_SC_PAGESIZE);
291f8b9a3baSXueming Li 
29250dcb0c5SXueming Li 	memset(pages, 0, priv->txqs_n * sizeof(uintptr_t));
293f8b9a3baSXueming Li 	/*
294f8b9a3baSXueming Li 	 * As rdma-core, UARs are mapped in size of OS page size.
295f8b9a3baSXueming Li 	 * Use aligned address to avoid duplicate mmap.
296f8b9a3baSXueming Li 	 * Ref to libmlx5 function: mlx5_init_context()
297f8b9a3baSXueming Li 	 */
298f8b9a3baSXueming Li 	for (i = 0; i != priv->txqs_n; ++i) {
299fbab400fSNélio Laranjeiro 		if (!(*priv->txqs)[i])
300fbab400fSNélio Laranjeiro 			continue;
301f8b9a3baSXueming Li 		txq = (*priv->txqs)[i];
302991b04f6SNélio Laranjeiro 		txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
3030f99970bSNélio Laranjeiro 		assert(txq_ctrl->idx == (uint16_t)i);
3044a984153SXueming Li 		/* UAR addr form verbs used to find dup and offset in page. */
3054a984153SXueming Li 		uar_va = (uintptr_t)txq_ctrl->bf_reg_orig;
3064a984153SXueming Li 		off = uar_va & (page_size - 1); /* offset in page. */
3074a984153SXueming Li 		uar_va = RTE_ALIGN_FLOOR(uar_va, page_size); /* page addr. */
308f8b9a3baSXueming Li 		already_mapped = 0;
309f8b9a3baSXueming Li 		for (j = 0; j != pages_n; ++j) {
310f8b9a3baSXueming Li 			if (pages[j] == uar_va) {
311f8b9a3baSXueming Li 				already_mapped = 1;
312f8b9a3baSXueming Li 				break;
313f8b9a3baSXueming Li 			}
314f8b9a3baSXueming Li 		}
3154a984153SXueming Li 		/* new address in reserved UAR address space. */
3164a984153SXueming Li 		addr = RTE_PTR_ADD(priv->uar_base,
3174a984153SXueming Li 				   uar_va & (MLX5_UAR_SIZE - 1));
3184a984153SXueming Li 		if (!already_mapped) {
319f8b9a3baSXueming Li 			pages[pages_n++] = uar_va;
3204a984153SXueming Li 			/* fixed mmap to specified address in reserved
3214a984153SXueming Li 			 * address space.
3224a984153SXueming Li 			 */
3234a984153SXueming Li 			ret = mmap(addr, page_size,
324f8b9a3baSXueming Li 				   PROT_WRITE, MAP_FIXED | MAP_SHARED, fd,
325f8b9a3baSXueming Li 				   txq_ctrl->uar_mmap_offset);
3264a984153SXueming Li 			if (ret != addr) {
3274a984153SXueming Li 				/* fixed mmap have to return same address */
328*a170a30dSNélio Laranjeiro 				DRV_LOG(ERR,
329*a170a30dSNélio Laranjeiro 					"port %u call to mmap failed on UAR"
330*a170a30dSNélio Laranjeiro 					" for txq %u",
331*a170a30dSNélio Laranjeiro 					dev->data->port_id, txq_ctrl->idx);
332a6d83b6aSNélio Laranjeiro 				rte_errno = ENXIO;
333a6d83b6aSNélio Laranjeiro 				return -rte_errno;
334f8b9a3baSXueming Li 			}
335f8b9a3baSXueming Li 		}
3364a984153SXueming Li 		if (rte_eal_process_type() == RTE_PROC_PRIMARY) /* save once */
3374a984153SXueming Li 			txq_ctrl->txq.bf_reg = RTE_PTR_ADD((void *)addr, off);
3384a984153SXueming Li 		else
3394a984153SXueming Li 			assert(txq_ctrl->txq.bf_reg ==
3404a984153SXueming Li 			       RTE_PTR_ADD((void *)addr, off));
3414a984153SXueming Li 	}
342f8b9a3baSXueming Li 	return 0;
343f8b9a3baSXueming Li }
344faf2667fSNélio Laranjeiro 
345faf2667fSNélio Laranjeiro /**
3467fe24446SShahaf Shuler  * Check if the burst function is using eMPW.
3477fe24446SShahaf Shuler  *
3487fe24446SShahaf Shuler  * @param tx_pkt_burst
3497fe24446SShahaf Shuler  *   Tx burst function pointer.
3507fe24446SShahaf Shuler  *
3517fe24446SShahaf Shuler  * @return
3527fe24446SShahaf Shuler  *   1 if the burst function is using eMPW, 0 otherwise.
3537fe24446SShahaf Shuler  */
3547fe24446SShahaf Shuler static int
3557fe24446SShahaf Shuler is_empw_burst_func(eth_tx_burst_t tx_pkt_burst)
3567fe24446SShahaf Shuler {
3577fe24446SShahaf Shuler 	if (tx_pkt_burst == mlx5_tx_burst_raw_vec ||
3587fe24446SShahaf Shuler 	    tx_pkt_burst == mlx5_tx_burst_vec ||
3597fe24446SShahaf Shuler 	    tx_pkt_burst == mlx5_tx_burst_empw)
3607fe24446SShahaf Shuler 		return 1;
3617fe24446SShahaf Shuler 	return 0;
3627fe24446SShahaf Shuler }
3637fe24446SShahaf Shuler 
3647fe24446SShahaf Shuler /**
365faf2667fSNélio Laranjeiro  * Create the Tx queue Verbs object.
366faf2667fSNélio Laranjeiro  *
367af4f09f2SNélio Laranjeiro  * @param dev
368af4f09f2SNélio Laranjeiro  *   Pointer to Ethernet device.
369faf2667fSNélio Laranjeiro  * @param idx
370faf2667fSNélio Laranjeiro  *   Queue index in DPDK Rx queue array
371faf2667fSNélio Laranjeiro  *
372faf2667fSNélio Laranjeiro  * @return
373a6d83b6aSNélio Laranjeiro  *   The Verbs object initialised, NULL otherwise and rte_errno is set.
374faf2667fSNélio Laranjeiro  */
375faf2667fSNélio Laranjeiro struct mlx5_txq_ibv *
376af4f09f2SNélio Laranjeiro mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
377faf2667fSNélio Laranjeiro {
378af4f09f2SNélio Laranjeiro 	struct priv *priv = dev->data->dev_private;
379faf2667fSNélio Laranjeiro 	struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
380faf2667fSNélio Laranjeiro 	struct mlx5_txq_ctrl *txq_ctrl =
381faf2667fSNélio Laranjeiro 		container_of(txq_data, struct mlx5_txq_ctrl, txq);
382faf2667fSNélio Laranjeiro 	struct mlx5_txq_ibv tmpl;
383faf2667fSNélio Laranjeiro 	struct mlx5_txq_ibv *txq_ibv;
384faf2667fSNélio Laranjeiro 	union {
385faf2667fSNélio Laranjeiro 		struct ibv_qp_init_attr_ex init;
386faf2667fSNélio Laranjeiro 		struct ibv_cq_init_attr_ex cq;
387faf2667fSNélio Laranjeiro 		struct ibv_qp_attr mod;
388faf2667fSNélio Laranjeiro 		struct ibv_cq_ex cq_attr;
389faf2667fSNélio Laranjeiro 	} attr;
390faf2667fSNélio Laranjeiro 	unsigned int cqe_n;
3918fe4d212SXueming Li 	struct mlx5dv_qp qp = { .comp_mask = MLX5DV_QP_MASK_UAR_MMAP_OFFSET };
392faf2667fSNélio Laranjeiro 	struct mlx5dv_cq cq_info;
393faf2667fSNélio Laranjeiro 	struct mlx5dv_obj obj;
394faf2667fSNélio Laranjeiro 	const int desc = 1 << txq_data->elts_n;
395af4f09f2SNélio Laranjeiro 	eth_tx_burst_t tx_pkt_burst = mlx5_select_tx_function(dev);
396faf2667fSNélio Laranjeiro 	int ret = 0;
397faf2667fSNélio Laranjeiro 
398faf2667fSNélio Laranjeiro 	assert(txq_data);
399d10b09dbSOlivier Matz 	priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_TX_QUEUE;
400d10b09dbSOlivier Matz 	priv->verbs_alloc_ctx.obj = txq_ctrl;
401faf2667fSNélio Laranjeiro 	if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) {
402*a170a30dSNélio Laranjeiro 		DRV_LOG(ERR,
403*a170a30dSNélio Laranjeiro 			"port %u MLX5_ENABLE_CQE_COMPRESSION must never be set",
4040f99970bSNélio Laranjeiro 			dev->data->port_id);
405a6d83b6aSNélio Laranjeiro 		rte_errno = EINVAL;
406a6d83b6aSNélio Laranjeiro 		return NULL;
407faf2667fSNélio Laranjeiro 	}
408faf2667fSNélio Laranjeiro 	memset(&tmpl, 0, sizeof(struct mlx5_txq_ibv));
409faf2667fSNélio Laranjeiro 	/* MRs will be registered in mp2mr[] later. */
410faf2667fSNélio Laranjeiro 	attr.cq = (struct ibv_cq_init_attr_ex){
411faf2667fSNélio Laranjeiro 		.comp_mask = 0,
412faf2667fSNélio Laranjeiro 	};
413faf2667fSNélio Laranjeiro 	cqe_n = ((desc / MLX5_TX_COMP_THRESH) - 1) ?
414faf2667fSNélio Laranjeiro 		((desc / MLX5_TX_COMP_THRESH) - 1) : 1;
4157fe24446SShahaf Shuler 	if (is_empw_burst_func(tx_pkt_burst))
416faf2667fSNélio Laranjeiro 		cqe_n += MLX5_TX_COMP_THRESH_INLINE_DIV;
4170e83b8e5SNelio Laranjeiro 	tmpl.cq = mlx5_glue->create_cq(priv->ctx, cqe_n, NULL, NULL, 0);
418faf2667fSNélio Laranjeiro 	if (tmpl.cq == NULL) {
419*a170a30dSNélio Laranjeiro 		DRV_LOG(ERR, "port %u Tx queue %u CQ creation failure",
4200f99970bSNélio Laranjeiro 			dev->data->port_id, idx);
421a6d83b6aSNélio Laranjeiro 		rte_errno = errno;
422faf2667fSNélio Laranjeiro 		goto error;
423faf2667fSNélio Laranjeiro 	}
424faf2667fSNélio Laranjeiro 	attr.init = (struct ibv_qp_init_attr_ex){
425faf2667fSNélio Laranjeiro 		/* CQ to be associated with the send queue. */
426faf2667fSNélio Laranjeiro 		.send_cq = tmpl.cq,
427faf2667fSNélio Laranjeiro 		/* CQ to be associated with the receive queue. */
428faf2667fSNélio Laranjeiro 		.recv_cq = tmpl.cq,
429faf2667fSNélio Laranjeiro 		.cap = {
430faf2667fSNélio Laranjeiro 			/* Max number of outstanding WRs. */
431faf2667fSNélio Laranjeiro 			.max_send_wr =
432faf2667fSNélio Laranjeiro 				((priv->device_attr.orig_attr.max_qp_wr <
433faf2667fSNélio Laranjeiro 				  desc) ?
434faf2667fSNélio Laranjeiro 				 priv->device_attr.orig_attr.max_qp_wr :
435faf2667fSNélio Laranjeiro 				 desc),
436faf2667fSNélio Laranjeiro 			/*
437faf2667fSNélio Laranjeiro 			 * Max number of scatter/gather elements in a WR,
438faf2667fSNélio Laranjeiro 			 * must be 1 to prevent libmlx5 from trying to affect
439faf2667fSNélio Laranjeiro 			 * too much memory. TX gather is not impacted by the
440faf2667fSNélio Laranjeiro 			 * priv->device_attr.max_sge limit and will still work
441faf2667fSNélio Laranjeiro 			 * properly.
442faf2667fSNélio Laranjeiro 			 */
443faf2667fSNélio Laranjeiro 			.max_send_sge = 1,
444faf2667fSNélio Laranjeiro 		},
445faf2667fSNélio Laranjeiro 		.qp_type = IBV_QPT_RAW_PACKET,
446faf2667fSNélio Laranjeiro 		/*
447faf2667fSNélio Laranjeiro 		 * Do *NOT* enable this, completions events are managed per
448faf2667fSNélio Laranjeiro 		 * Tx burst.
449faf2667fSNélio Laranjeiro 		 */
450faf2667fSNélio Laranjeiro 		.sq_sig_all = 0,
451faf2667fSNélio Laranjeiro 		.pd = priv->pd,
452faf2667fSNélio Laranjeiro 		.comp_mask = IBV_QP_INIT_ATTR_PD,
453faf2667fSNélio Laranjeiro 	};
45427a6b2d6SNélio Laranjeiro 	if (txq_data->max_inline)
455faf2667fSNélio Laranjeiro 		attr.init.cap.max_inline_data = txq_ctrl->max_inline_data;
456faf2667fSNélio Laranjeiro 	if (txq_data->tso_en) {
457faf2667fSNélio Laranjeiro 		attr.init.max_tso_header = txq_ctrl->max_tso_header;
458faf2667fSNélio Laranjeiro 		attr.init.comp_mask |= IBV_QP_INIT_ATTR_MAX_TSO_HEADER;
459faf2667fSNélio Laranjeiro 	}
4600e83b8e5SNelio Laranjeiro 	tmpl.qp = mlx5_glue->create_qp_ex(priv->ctx, &attr.init);
461faf2667fSNélio Laranjeiro 	if (tmpl.qp == NULL) {
462*a170a30dSNélio Laranjeiro 		DRV_LOG(ERR, "port %u Tx queue %u QP creation failure",
4630f99970bSNélio Laranjeiro 			dev->data->port_id, idx);
464a6d83b6aSNélio Laranjeiro 		rte_errno = errno;
465faf2667fSNélio Laranjeiro 		goto error;
466faf2667fSNélio Laranjeiro 	}
467faf2667fSNélio Laranjeiro 	attr.mod = (struct ibv_qp_attr){
468faf2667fSNélio Laranjeiro 		/* Move the QP to this state. */
469faf2667fSNélio Laranjeiro 		.qp_state = IBV_QPS_INIT,
470faf2667fSNélio Laranjeiro 		/* Primary port number. */
471faf2667fSNélio Laranjeiro 		.port_num = priv->port
472faf2667fSNélio Laranjeiro 	};
4730e83b8e5SNelio Laranjeiro 	ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod,
4740e83b8e5SNelio Laranjeiro 				   (IBV_QP_STATE | IBV_QP_PORT));
475faf2667fSNélio Laranjeiro 	if (ret) {
476*a170a30dSNélio Laranjeiro 		DRV_LOG(ERR,
477*a170a30dSNélio Laranjeiro 			"port %u Tx queue %u QP state to IBV_QPS_INIT failed",
4780f99970bSNélio Laranjeiro 			dev->data->port_id, idx);
479a6d83b6aSNélio Laranjeiro 		rte_errno = errno;
480faf2667fSNélio Laranjeiro 		goto error;
481faf2667fSNélio Laranjeiro 	}
482faf2667fSNélio Laranjeiro 	attr.mod = (struct ibv_qp_attr){
483faf2667fSNélio Laranjeiro 		.qp_state = IBV_QPS_RTR
484faf2667fSNélio Laranjeiro 	};
4850e83b8e5SNelio Laranjeiro 	ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
486faf2667fSNélio Laranjeiro 	if (ret) {
487*a170a30dSNélio Laranjeiro 		DRV_LOG(ERR,
488*a170a30dSNélio Laranjeiro 			"port %u Tx queue %u QP state to IBV_QPS_RTR failed",
4890f99970bSNélio Laranjeiro 			dev->data->port_id, idx);
490a6d83b6aSNélio Laranjeiro 		rte_errno = errno;
491faf2667fSNélio Laranjeiro 		goto error;
492faf2667fSNélio Laranjeiro 	}
493faf2667fSNélio Laranjeiro 	attr.mod.qp_state = IBV_QPS_RTS;
4940e83b8e5SNelio Laranjeiro 	ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
495faf2667fSNélio Laranjeiro 	if (ret) {
496*a170a30dSNélio Laranjeiro 		DRV_LOG(ERR,
497*a170a30dSNélio Laranjeiro 			"port %u Tx queue %u QP state to IBV_QPS_RTS failed",
4980f99970bSNélio Laranjeiro 			dev->data->port_id, idx);
499a6d83b6aSNélio Laranjeiro 		rte_errno = errno;
500faf2667fSNélio Laranjeiro 		goto error;
501faf2667fSNélio Laranjeiro 	}
502faf2667fSNélio Laranjeiro 	txq_ibv = rte_calloc_socket(__func__, 1, sizeof(struct mlx5_txq_ibv), 0,
503faf2667fSNélio Laranjeiro 				    txq_ctrl->socket);
504faf2667fSNélio Laranjeiro 	if (!txq_ibv) {
505*a170a30dSNélio Laranjeiro 		DRV_LOG(ERR, "port %u Tx queue %u cannot allocate memory",
5060f99970bSNélio Laranjeiro 			dev->data->port_id, idx);
507a6d83b6aSNélio Laranjeiro 		rte_errno = ENOMEM;
508faf2667fSNélio Laranjeiro 		goto error;
509faf2667fSNélio Laranjeiro 	}
510faf2667fSNélio Laranjeiro 	obj.cq.in = tmpl.cq;
511faf2667fSNélio Laranjeiro 	obj.cq.out = &cq_info;
512faf2667fSNélio Laranjeiro 	obj.qp.in = tmpl.qp;
513faf2667fSNélio Laranjeiro 	obj.qp.out = &qp;
5140e83b8e5SNelio Laranjeiro 	ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP);
515a6d83b6aSNélio Laranjeiro 	if (ret != 0) {
516a6d83b6aSNélio Laranjeiro 		rte_errno = errno;
517faf2667fSNélio Laranjeiro 		goto error;
518a6d83b6aSNélio Laranjeiro 	}
519faf2667fSNélio Laranjeiro 	if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
520*a170a30dSNélio Laranjeiro 		DRV_LOG(ERR,
521*a170a30dSNélio Laranjeiro 			"port %u wrong MLX5_CQE_SIZE environment variable"
522*a170a30dSNélio Laranjeiro 			" value: it should be set to %u",
523*a170a30dSNélio Laranjeiro 			dev->data->port_id, RTE_CACHE_LINE_SIZE);
524a6d83b6aSNélio Laranjeiro 		rte_errno = EINVAL;
525faf2667fSNélio Laranjeiro 		goto error;
526faf2667fSNélio Laranjeiro 	}
527faf2667fSNélio Laranjeiro 	txq_data->cqe_n = log2above(cq_info.cqe_cnt);
528faf2667fSNélio Laranjeiro 	txq_data->qp_num_8s = tmpl.qp->qp_num << 8;
529faf2667fSNélio Laranjeiro 	txq_data->wqes = qp.sq.buf;
530faf2667fSNélio Laranjeiro 	txq_data->wqe_n = log2above(qp.sq.wqe_cnt);
531faf2667fSNélio Laranjeiro 	txq_data->qp_db = &qp.dbrec[MLX5_SND_DBR];
5324a984153SXueming Li 	txq_ctrl->bf_reg_orig = qp.bf.reg;
533faf2667fSNélio Laranjeiro 	txq_data->cq_db = cq_info.dbrec;
534faf2667fSNélio Laranjeiro 	txq_data->cqes =
535faf2667fSNélio Laranjeiro 		(volatile struct mlx5_cqe (*)[])
536faf2667fSNélio Laranjeiro 		(uintptr_t)cq_info.buf;
537faf2667fSNélio Laranjeiro 	txq_data->cq_ci = 0;
5382eefbec5SYongseok Koh #ifndef NDEBUG
539faf2667fSNélio Laranjeiro 	txq_data->cq_pi = 0;
5402eefbec5SYongseok Koh #endif
541faf2667fSNélio Laranjeiro 	txq_data->wqe_ci = 0;
542faf2667fSNélio Laranjeiro 	txq_data->wqe_pi = 0;
543faf2667fSNélio Laranjeiro 	txq_ibv->qp = tmpl.qp;
544faf2667fSNélio Laranjeiro 	txq_ibv->cq = tmpl.cq;
545faf2667fSNélio Laranjeiro 	rte_atomic32_inc(&txq_ibv->refcnt);
5468fe4d212SXueming Li 	if (qp.comp_mask & MLX5DV_QP_MASK_UAR_MMAP_OFFSET) {
5478fe4d212SXueming Li 		txq_ctrl->uar_mmap_offset = qp.uar_mmap_offset;
5488fe4d212SXueming Li 	} else {
549*a170a30dSNélio Laranjeiro 		DRV_LOG(ERR,
550*a170a30dSNélio Laranjeiro 			"port %u failed to retrieve UAR info, invalid"
551*a170a30dSNélio Laranjeiro 			" libmlx5.so",
5520f99970bSNélio Laranjeiro 			dev->data->port_id);
553a6d83b6aSNélio Laranjeiro 		rte_errno = EINVAL;
5548fe4d212SXueming Li 		goto error;
5558fe4d212SXueming Li 	}
556*a170a30dSNélio Laranjeiro 	DRV_LOG(DEBUG, "port %u Verbs Tx queue %u: refcnt %d",
557*a170a30dSNélio Laranjeiro 		dev->data->port_id, idx, rte_atomic32_read(&txq_ibv->refcnt));
558faf2667fSNélio Laranjeiro 	LIST_INSERT_HEAD(&priv->txqsibv, txq_ibv, next);
5590f99970bSNélio Laranjeiro 	txq_ibv->txq_ctrl = txq_ctrl;
560d10b09dbSOlivier Matz 	priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
561faf2667fSNélio Laranjeiro 	return txq_ibv;
562faf2667fSNélio Laranjeiro error:
563a6d83b6aSNélio Laranjeiro 	ret = rte_errno; /* Save rte_errno before cleanup. */
564faf2667fSNélio Laranjeiro 	if (tmpl.cq)
5650e83b8e5SNelio Laranjeiro 		claim_zero(mlx5_glue->destroy_cq(tmpl.cq));
566faf2667fSNélio Laranjeiro 	if (tmpl.qp)
5670e83b8e5SNelio Laranjeiro 		claim_zero(mlx5_glue->destroy_qp(tmpl.qp));
568d10b09dbSOlivier Matz 	priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
569a6d83b6aSNélio Laranjeiro 	rte_errno = ret; /* Restore rte_errno. */
570faf2667fSNélio Laranjeiro 	return NULL;
571faf2667fSNélio Laranjeiro }
572faf2667fSNélio Laranjeiro 
573faf2667fSNélio Laranjeiro /**
574faf2667fSNélio Laranjeiro  * Get an Tx queue Verbs object.
575faf2667fSNélio Laranjeiro  *
576af4f09f2SNélio Laranjeiro  * @param dev
577af4f09f2SNélio Laranjeiro  *   Pointer to Ethernet device.
578faf2667fSNélio Laranjeiro  * @param idx
579faf2667fSNélio Laranjeiro  *   Queue index in DPDK Rx queue array
580faf2667fSNélio Laranjeiro  *
581faf2667fSNélio Laranjeiro  * @return
582faf2667fSNélio Laranjeiro  *   The Verbs object if it exists.
583faf2667fSNélio Laranjeiro  */
584faf2667fSNélio Laranjeiro struct mlx5_txq_ibv *
585af4f09f2SNélio Laranjeiro mlx5_txq_ibv_get(struct rte_eth_dev *dev, uint16_t idx)
586faf2667fSNélio Laranjeiro {
587af4f09f2SNélio Laranjeiro 	struct priv *priv = dev->data->dev_private;
588faf2667fSNélio Laranjeiro 	struct mlx5_txq_ctrl *txq_ctrl;
589faf2667fSNélio Laranjeiro 
590faf2667fSNélio Laranjeiro 	if (idx >= priv->txqs_n)
591faf2667fSNélio Laranjeiro 		return NULL;
592faf2667fSNélio Laranjeiro 	if (!(*priv->txqs)[idx])
593faf2667fSNélio Laranjeiro 		return NULL;
594faf2667fSNélio Laranjeiro 	txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
595faf2667fSNélio Laranjeiro 	if (txq_ctrl->ibv) {
596faf2667fSNélio Laranjeiro 		rte_atomic32_inc(&txq_ctrl->ibv->refcnt);
597*a170a30dSNélio Laranjeiro 		DRV_LOG(DEBUG, "port %u Verbs Tx queue %u: refcnt %d",
5980f99970bSNélio Laranjeiro 			dev->data->port_id, txq_ctrl->idx,
599faf2667fSNélio Laranjeiro 		      rte_atomic32_read(&txq_ctrl->ibv->refcnt));
600faf2667fSNélio Laranjeiro 	}
601faf2667fSNélio Laranjeiro 	return txq_ctrl->ibv;
602faf2667fSNélio Laranjeiro }
603faf2667fSNélio Laranjeiro 
604faf2667fSNélio Laranjeiro /**
605faf2667fSNélio Laranjeiro  * Release an Tx verbs queue object.
606faf2667fSNélio Laranjeiro  *
607faf2667fSNélio Laranjeiro  * @param txq_ibv
608faf2667fSNélio Laranjeiro  *   Verbs Tx queue object.
609faf2667fSNélio Laranjeiro  *
610faf2667fSNélio Laranjeiro  * @return
611925061b5SNélio Laranjeiro  *   1 while a reference on it exists, 0 when freed.
612faf2667fSNélio Laranjeiro  */
613faf2667fSNélio Laranjeiro int
614af4f09f2SNélio Laranjeiro mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv)
615faf2667fSNélio Laranjeiro {
616faf2667fSNélio Laranjeiro 	assert(txq_ibv);
617*a170a30dSNélio Laranjeiro 	DRV_LOG(DEBUG, "port %u Verbs Tx queue %u: refcnt %d",
6180f99970bSNélio Laranjeiro 		txq_ibv->txq_ctrl->priv->dev->data->port_id,
6190f99970bSNélio Laranjeiro 		txq_ibv->txq_ctrl->idx, rte_atomic32_read(&txq_ibv->refcnt));
620faf2667fSNélio Laranjeiro 	if (rte_atomic32_dec_and_test(&txq_ibv->refcnt)) {
6210e83b8e5SNelio Laranjeiro 		claim_zero(mlx5_glue->destroy_qp(txq_ibv->qp));
6220e83b8e5SNelio Laranjeiro 		claim_zero(mlx5_glue->destroy_cq(txq_ibv->cq));
623faf2667fSNélio Laranjeiro 		LIST_REMOVE(txq_ibv, next);
624faf2667fSNélio Laranjeiro 		rte_free(txq_ibv);
625faf2667fSNélio Laranjeiro 		return 0;
626faf2667fSNélio Laranjeiro 	}
627925061b5SNélio Laranjeiro 	return 1;
628faf2667fSNélio Laranjeiro }
629faf2667fSNélio Laranjeiro 
630faf2667fSNélio Laranjeiro /**
631faf2667fSNélio Laranjeiro  * Return true if a single reference exists on the object.
632faf2667fSNélio Laranjeiro  *
633faf2667fSNélio Laranjeiro  * @param txq_ibv
634faf2667fSNélio Laranjeiro  *   Verbs Tx queue object.
635faf2667fSNélio Laranjeiro  */
636faf2667fSNélio Laranjeiro int
637af4f09f2SNélio Laranjeiro mlx5_txq_ibv_releasable(struct mlx5_txq_ibv *txq_ibv)
638faf2667fSNélio Laranjeiro {
639faf2667fSNélio Laranjeiro 	assert(txq_ibv);
640faf2667fSNélio Laranjeiro 	return (rte_atomic32_read(&txq_ibv->refcnt) == 1);
641faf2667fSNélio Laranjeiro }
642faf2667fSNélio Laranjeiro 
643faf2667fSNélio Laranjeiro /**
644faf2667fSNélio Laranjeiro  * Verify the Verbs Tx queue list is empty
645faf2667fSNélio Laranjeiro  *
646af4f09f2SNélio Laranjeiro  * @param dev
647af4f09f2SNélio Laranjeiro  *   Pointer to Ethernet device.
648faf2667fSNélio Laranjeiro  *
649fb732b0aSNélio Laranjeiro  * @return
650fb732b0aSNélio Laranjeiro  *   The number of object not released.
651faf2667fSNélio Laranjeiro  */
652faf2667fSNélio Laranjeiro int
653af4f09f2SNélio Laranjeiro mlx5_txq_ibv_verify(struct rte_eth_dev *dev)
654faf2667fSNélio Laranjeiro {
655af4f09f2SNélio Laranjeiro 	struct priv *priv = dev->data->dev_private;
656faf2667fSNélio Laranjeiro 	int ret = 0;
657faf2667fSNélio Laranjeiro 	struct mlx5_txq_ibv *txq_ibv;
658faf2667fSNélio Laranjeiro 
659faf2667fSNélio Laranjeiro 	LIST_FOREACH(txq_ibv, &priv->txqsibv, next) {
660*a170a30dSNélio Laranjeiro 		DRV_LOG(DEBUG, "port %u Verbs Tx queue %u still referenced",
661*a170a30dSNélio Laranjeiro 			dev->data->port_id, txq_ibv->txq_ctrl->idx);
662faf2667fSNélio Laranjeiro 		++ret;
663faf2667fSNélio Laranjeiro 	}
664faf2667fSNélio Laranjeiro 	return ret;
665faf2667fSNélio Laranjeiro }
6666e78005aSNélio Laranjeiro 
6676e78005aSNélio Laranjeiro /**
6687fe24446SShahaf Shuler  * Set Tx queue parameters from device configuration.
6697fe24446SShahaf Shuler  *
6707fe24446SShahaf Shuler  * @param txq_ctrl
6717fe24446SShahaf Shuler  *   Pointer to Tx queue control structure.
6727fe24446SShahaf Shuler  */
6737fe24446SShahaf Shuler static void
6747fe24446SShahaf Shuler txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
6757fe24446SShahaf Shuler {
6767fe24446SShahaf Shuler 	struct priv *priv = txq_ctrl->priv;
6777fe24446SShahaf Shuler 	struct mlx5_dev_config *config = &priv->config;
6787fe24446SShahaf Shuler 	const unsigned int max_tso_inline =
6797fe24446SShahaf Shuler 		((MLX5_MAX_TSO_HEADER + (RTE_CACHE_LINE_SIZE - 1)) /
6807fe24446SShahaf Shuler 		 RTE_CACHE_LINE_SIZE);
6817fe24446SShahaf Shuler 	unsigned int txq_inline;
6827fe24446SShahaf Shuler 	unsigned int txqs_inline;
6837fe24446SShahaf Shuler 	unsigned int inline_max_packet_sz;
684af4f09f2SNélio Laranjeiro 	eth_tx_burst_t tx_pkt_burst =
685af4f09f2SNélio Laranjeiro 		mlx5_select_tx_function(txq_ctrl->priv->dev);
6867fe24446SShahaf Shuler 	int is_empw_func = is_empw_burst_func(tx_pkt_burst);
687dbccb4cdSShahaf Shuler 	int tso = !!(txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_TCP_TSO);
6887fe24446SShahaf Shuler 
6897fe24446SShahaf Shuler 	txq_inline = (config->txq_inline == MLX5_ARG_UNSET) ?
6907fe24446SShahaf Shuler 		0 : config->txq_inline;
6917fe24446SShahaf Shuler 	txqs_inline = (config->txqs_inline == MLX5_ARG_UNSET) ?
6927fe24446SShahaf Shuler 		0 : config->txqs_inline;
6937fe24446SShahaf Shuler 	inline_max_packet_sz =
6947fe24446SShahaf Shuler 		(config->inline_max_packet_sz == MLX5_ARG_UNSET) ?
6957fe24446SShahaf Shuler 		0 : config->inline_max_packet_sz;
6967fe24446SShahaf Shuler 	if (is_empw_func) {
6977fe24446SShahaf Shuler 		if (config->txq_inline == MLX5_ARG_UNSET)
6987fe24446SShahaf Shuler 			txq_inline = MLX5_WQE_SIZE_MAX - MLX5_WQE_SIZE;
6997fe24446SShahaf Shuler 		if (config->txqs_inline == MLX5_ARG_UNSET)
7007fe24446SShahaf Shuler 			txqs_inline = MLX5_EMPW_MIN_TXQS;
7017fe24446SShahaf Shuler 		if (config->inline_max_packet_sz == MLX5_ARG_UNSET)
7027fe24446SShahaf Shuler 			inline_max_packet_sz = MLX5_EMPW_MAX_INLINE_LEN;
7037fe24446SShahaf Shuler 		txq_ctrl->txq.mpw_hdr_dseg = config->mpw_hdr_dseg;
7047fe24446SShahaf Shuler 		txq_ctrl->txq.inline_max_packet_sz = inline_max_packet_sz;
7057fe24446SShahaf Shuler 	}
7067fe24446SShahaf Shuler 	if (txq_inline && priv->txqs_n >= txqs_inline) {
7077fe24446SShahaf Shuler 		unsigned int ds_cnt;
7087fe24446SShahaf Shuler 
7097fe24446SShahaf Shuler 		txq_ctrl->txq.max_inline =
7107fe24446SShahaf Shuler 			((txq_inline + (RTE_CACHE_LINE_SIZE - 1)) /
7117fe24446SShahaf Shuler 			 RTE_CACHE_LINE_SIZE);
7127fe24446SShahaf Shuler 		if (is_empw_func) {
7137fe24446SShahaf Shuler 			/* To minimize the size of data set, avoid requesting
7147fe24446SShahaf Shuler 			 * too large WQ.
7157fe24446SShahaf Shuler 			 */
7167fe24446SShahaf Shuler 			txq_ctrl->max_inline_data =
7177fe24446SShahaf Shuler 				((RTE_MIN(txq_inline,
7187fe24446SShahaf Shuler 					  inline_max_packet_sz) +
7197fe24446SShahaf Shuler 				  (RTE_CACHE_LINE_SIZE - 1)) /
7207fe24446SShahaf Shuler 				 RTE_CACHE_LINE_SIZE) * RTE_CACHE_LINE_SIZE;
721dbccb4cdSShahaf Shuler 		} else if (tso) {
7227fe24446SShahaf Shuler 			int inline_diff = txq_ctrl->txq.max_inline -
7237fe24446SShahaf Shuler 					  max_tso_inline;
7247fe24446SShahaf Shuler 
7257fe24446SShahaf Shuler 			/*
7267fe24446SShahaf Shuler 			 * Adjust inline value as Verbs aggregates
7277fe24446SShahaf Shuler 			 * tso_inline and txq_inline fields.
7287fe24446SShahaf Shuler 			 */
7297fe24446SShahaf Shuler 			txq_ctrl->max_inline_data = inline_diff > 0 ?
7307fe24446SShahaf Shuler 					       inline_diff *
7317fe24446SShahaf Shuler 					       RTE_CACHE_LINE_SIZE :
7327fe24446SShahaf Shuler 					       0;
7337fe24446SShahaf Shuler 		} else {
7347fe24446SShahaf Shuler 			txq_ctrl->max_inline_data =
7357fe24446SShahaf Shuler 				txq_ctrl->txq.max_inline * RTE_CACHE_LINE_SIZE;
7367fe24446SShahaf Shuler 		}
7377fe24446SShahaf Shuler 		/*
7387fe24446SShahaf Shuler 		 * Check if the inline size is too large in a way which
7397fe24446SShahaf Shuler 		 * can make the WQE DS to overflow.
7407fe24446SShahaf Shuler 		 * Considering in calculation:
7417fe24446SShahaf Shuler 		 *      WQE CTRL (1 DS)
7427fe24446SShahaf Shuler 		 *      WQE ETH  (1 DS)
7437fe24446SShahaf Shuler 		 *      Inline part (N DS)
7447fe24446SShahaf Shuler 		 */
7457fe24446SShahaf Shuler 		ds_cnt = 2 + (txq_ctrl->txq.max_inline / MLX5_WQE_DWORD_SIZE);
7467fe24446SShahaf Shuler 		if (ds_cnt > MLX5_DSEG_MAX) {
7477fe24446SShahaf Shuler 			unsigned int max_inline = (MLX5_DSEG_MAX - 2) *
7487fe24446SShahaf Shuler 						  MLX5_WQE_DWORD_SIZE;
7497fe24446SShahaf Shuler 
7507fe24446SShahaf Shuler 			max_inline = max_inline - (max_inline %
7517fe24446SShahaf Shuler 						   RTE_CACHE_LINE_SIZE);
752*a170a30dSNélio Laranjeiro 			DRV_LOG(WARNING,
753*a170a30dSNélio Laranjeiro 				"port %u txq inline is too large (%d) setting"
754*a170a30dSNélio Laranjeiro 				" it to the maximum possible: %d\n",
755*a170a30dSNélio Laranjeiro 				priv->dev->data->port_id, txq_inline,
756*a170a30dSNélio Laranjeiro 				max_inline);
7577fe24446SShahaf Shuler 			txq_ctrl->txq.max_inline = max_inline /
7587fe24446SShahaf Shuler 						   RTE_CACHE_LINE_SIZE;
7597fe24446SShahaf Shuler 		}
7607fe24446SShahaf Shuler 	}
761dbccb4cdSShahaf Shuler 	if (tso) {
7627fe24446SShahaf Shuler 		txq_ctrl->max_tso_header = max_tso_inline * RTE_CACHE_LINE_SIZE;
7637fe24446SShahaf Shuler 		txq_ctrl->txq.max_inline = RTE_MAX(txq_ctrl->txq.max_inline,
7647fe24446SShahaf Shuler 						   max_tso_inline);
7657fe24446SShahaf Shuler 		txq_ctrl->txq.tso_en = 1;
7667fe24446SShahaf Shuler 	}
7677fe24446SShahaf Shuler 	txq_ctrl->txq.tunnel_en = config->tunnel_en;
7687fe24446SShahaf Shuler }
7697fe24446SShahaf Shuler 
7707fe24446SShahaf Shuler /**
7716e78005aSNélio Laranjeiro  * Create a DPDK Tx queue.
7726e78005aSNélio Laranjeiro  *
773af4f09f2SNélio Laranjeiro  * @param dev
774af4f09f2SNélio Laranjeiro  *   Pointer to Ethernet device.
7756e78005aSNélio Laranjeiro  * @param idx
7766e78005aSNélio Laranjeiro  *   TX queue index.
7776e78005aSNélio Laranjeiro  * @param desc
7786e78005aSNélio Laranjeiro  *   Number of descriptors to configure in queue.
7796e78005aSNélio Laranjeiro  * @param socket
7806e78005aSNélio Laranjeiro  *   NUMA socket on which memory must be allocated.
7816e78005aSNélio Laranjeiro  * @param[in] conf
7826e78005aSNélio Laranjeiro  *  Thresholds parameters.
7836e78005aSNélio Laranjeiro  *
7846e78005aSNélio Laranjeiro  * @return
785a6d83b6aSNélio Laranjeiro  *   A DPDK queue object on success, NULL otherwise and rte_errno is set.
7866e78005aSNélio Laranjeiro  */
7876e78005aSNélio Laranjeiro struct mlx5_txq_ctrl *
788af4f09f2SNélio Laranjeiro mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
789af4f09f2SNélio Laranjeiro 	     unsigned int socket, const struct rte_eth_txconf *conf)
7906e78005aSNélio Laranjeiro {
791af4f09f2SNélio Laranjeiro 	struct priv *priv = dev->data->dev_private;
7926e78005aSNélio Laranjeiro 	struct mlx5_txq_ctrl *tmpl;
7936e78005aSNélio Laranjeiro 
7946e78005aSNélio Laranjeiro 	tmpl = rte_calloc_socket("TXQ", 1,
7956e78005aSNélio Laranjeiro 				 sizeof(*tmpl) +
7966e78005aSNélio Laranjeiro 				 desc * sizeof(struct rte_mbuf *),
7976e78005aSNélio Laranjeiro 				 0, socket);
798a6d83b6aSNélio Laranjeiro 	if (!tmpl) {
799a6d83b6aSNélio Laranjeiro 		rte_errno = ENOMEM;
8006e78005aSNélio Laranjeiro 		return NULL;
801a6d83b6aSNélio Laranjeiro 	}
8026e78005aSNélio Laranjeiro 	assert(desc > MLX5_TX_COMP_THRESH);
803dbccb4cdSShahaf Shuler 	tmpl->txq.offloads = conf->offloads;
8046e78005aSNélio Laranjeiro 	tmpl->priv = priv;
805a49b617bSOlivier Gournet 	tmpl->socket = socket;
8066e78005aSNélio Laranjeiro 	tmpl->txq.elts_n = log2above(desc);
8070f99970bSNélio Laranjeiro 	tmpl->idx = idx;
8087fe24446SShahaf Shuler 	txq_set_params(tmpl);
8096e78005aSNélio Laranjeiro 	/* MRs will be registered in mp2mr[] later. */
810*a170a30dSNélio Laranjeiro 	DRV_LOG(DEBUG, "port %u priv->device_attr.max_qp_wr is %d",
811*a170a30dSNélio Laranjeiro 		dev->data->port_id, priv->device_attr.orig_attr.max_qp_wr);
812*a170a30dSNélio Laranjeiro 	DRV_LOG(DEBUG, "port %u priv->device_attr.max_sge is %d",
813*a170a30dSNélio Laranjeiro 		dev->data->port_id, priv->device_attr.orig_attr.max_sge);
8146e78005aSNélio Laranjeiro 	tmpl->txq.elts =
8156e78005aSNélio Laranjeiro 		(struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])(tmpl + 1);
8166e78005aSNélio Laranjeiro 	tmpl->txq.stats.idx = idx;
8176e78005aSNélio Laranjeiro 	rte_atomic32_inc(&tmpl->refcnt);
818*a170a30dSNélio Laranjeiro 	DRV_LOG(DEBUG, "port %u Tx queue %u: refcnt %d", dev->data->port_id,
8190f99970bSNélio Laranjeiro 		idx, rte_atomic32_read(&tmpl->refcnt));
8206e78005aSNélio Laranjeiro 	LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
8216e78005aSNélio Laranjeiro 	return tmpl;
8226e78005aSNélio Laranjeiro }
8236e78005aSNélio Laranjeiro 
8246e78005aSNélio Laranjeiro /**
8256e78005aSNélio Laranjeiro  * Get a Tx queue.
8266e78005aSNélio Laranjeiro  *
827af4f09f2SNélio Laranjeiro  * @param dev
828af4f09f2SNélio Laranjeiro  *   Pointer to Ethernet device.
8296e78005aSNélio Laranjeiro  * @param idx
8306e78005aSNélio Laranjeiro  *   TX queue index.
8316e78005aSNélio Laranjeiro  *
8326e78005aSNélio Laranjeiro  * @return
8336e78005aSNélio Laranjeiro  *   A pointer to the queue if it exists.
8346e78005aSNélio Laranjeiro  */
8356e78005aSNélio Laranjeiro struct mlx5_txq_ctrl *
836af4f09f2SNélio Laranjeiro mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx)
8376e78005aSNélio Laranjeiro {
838af4f09f2SNélio Laranjeiro 	struct priv *priv = dev->data->dev_private;
8396e78005aSNélio Laranjeiro 	struct mlx5_txq_ctrl *ctrl = NULL;
8406e78005aSNélio Laranjeiro 
8416e78005aSNélio Laranjeiro 	if ((*priv->txqs)[idx]) {
8426e78005aSNélio Laranjeiro 		ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl,
8436e78005aSNélio Laranjeiro 				    txq);
8446e78005aSNélio Laranjeiro 		unsigned int i;
8456e78005aSNélio Laranjeiro 
846af4f09f2SNélio Laranjeiro 		mlx5_txq_ibv_get(dev, idx);
8476e78005aSNélio Laranjeiro 		for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) {
84856f08e16SNélio Laranjeiro 			if (ctrl->txq.mp2mr[i])
84956f08e16SNélio Laranjeiro 				claim_nonzero
850af4f09f2SNélio Laranjeiro 					(mlx5_mr_get(dev,
85156f08e16SNélio Laranjeiro 						     ctrl->txq.mp2mr[i]->mp));
8526e78005aSNélio Laranjeiro 		}
8536e78005aSNélio Laranjeiro 		rte_atomic32_inc(&ctrl->refcnt);
854*a170a30dSNélio Laranjeiro 		DRV_LOG(DEBUG, "port %u Tx queue %u refcnt %d",
855*a170a30dSNélio Laranjeiro 			dev->data->port_id,
8560f99970bSNélio Laranjeiro 			ctrl->idx, rte_atomic32_read(&ctrl->refcnt));
8576e78005aSNélio Laranjeiro 	}
8586e78005aSNélio Laranjeiro 	return ctrl;
8596e78005aSNélio Laranjeiro }
8606e78005aSNélio Laranjeiro 
8616e78005aSNélio Laranjeiro /**
8626e78005aSNélio Laranjeiro  * Release a Tx queue.
8636e78005aSNélio Laranjeiro  *
864af4f09f2SNélio Laranjeiro  * @param dev
865af4f09f2SNélio Laranjeiro  *   Pointer to Ethernet device.
8666e78005aSNélio Laranjeiro  * @param idx
8676e78005aSNélio Laranjeiro  *   TX queue index.
8686e78005aSNélio Laranjeiro  *
8696e78005aSNélio Laranjeiro  * @return
870925061b5SNélio Laranjeiro  *   1 while a reference on it exists, 0 when freed.
8716e78005aSNélio Laranjeiro  */
8726e78005aSNélio Laranjeiro int
873af4f09f2SNélio Laranjeiro mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
8746e78005aSNélio Laranjeiro {
875af4f09f2SNélio Laranjeiro 	struct priv *priv = dev->data->dev_private;
8766e78005aSNélio Laranjeiro 	unsigned int i;
8776e78005aSNélio Laranjeiro 	struct mlx5_txq_ctrl *txq;
8784a984153SXueming Li 	size_t page_size = sysconf(_SC_PAGESIZE);
8796e78005aSNélio Laranjeiro 
8806e78005aSNélio Laranjeiro 	if (!(*priv->txqs)[idx])
8816e78005aSNélio Laranjeiro 		return 0;
8826e78005aSNélio Laranjeiro 	txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
883*a170a30dSNélio Laranjeiro 	DRV_LOG(DEBUG, "port %u Tx queue %u: refcnt %d", dev->data->port_id,
8840f99970bSNélio Laranjeiro 		txq->idx, rte_atomic32_read(&txq->refcnt));
885925061b5SNélio Laranjeiro 	if (txq->ibv && !mlx5_txq_ibv_release(txq->ibv))
8866e78005aSNélio Laranjeiro 		txq->ibv = NULL;
8876e78005aSNélio Laranjeiro 	for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) {
8886e78005aSNélio Laranjeiro 		if (txq->txq.mp2mr[i]) {
889af4f09f2SNélio Laranjeiro 			mlx5_mr_release(txq->txq.mp2mr[i]);
8906e78005aSNélio Laranjeiro 			txq->txq.mp2mr[i] = NULL;
8916e78005aSNélio Laranjeiro 		}
8926e78005aSNélio Laranjeiro 	}
8934a984153SXueming Li 	if (priv->uar_base)
8944a984153SXueming Li 		munmap((void *)RTE_ALIGN_FLOOR((uintptr_t)txq->txq.bf_reg,
8954a984153SXueming Li 		       page_size), page_size);
8966e78005aSNélio Laranjeiro 	if (rte_atomic32_dec_and_test(&txq->refcnt)) {
8976e78005aSNélio Laranjeiro 		txq_free_elts(txq);
8986e78005aSNélio Laranjeiro 		LIST_REMOVE(txq, next);
8996e78005aSNélio Laranjeiro 		rte_free(txq);
9006e78005aSNélio Laranjeiro 		(*priv->txqs)[idx] = NULL;
9016e78005aSNélio Laranjeiro 		return 0;
9026e78005aSNélio Laranjeiro 	}
903925061b5SNélio Laranjeiro 	return 1;
9046e78005aSNélio Laranjeiro }
9056e78005aSNélio Laranjeiro 
9066e78005aSNélio Laranjeiro /**
9076e78005aSNélio Laranjeiro  * Verify if the queue can be released.
9086e78005aSNélio Laranjeiro  *
909af4f09f2SNélio Laranjeiro  * @param dev
910af4f09f2SNélio Laranjeiro  *   Pointer to Ethernet device.
9116e78005aSNélio Laranjeiro  * @param idx
9126e78005aSNélio Laranjeiro  *   TX queue index.
9136e78005aSNélio Laranjeiro  *
9146e78005aSNélio Laranjeiro  * @return
9156e78005aSNélio Laranjeiro  *   1 if the queue can be released.
9166e78005aSNélio Laranjeiro  */
9176e78005aSNélio Laranjeiro int
918af4f09f2SNélio Laranjeiro mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx)
9196e78005aSNélio Laranjeiro {
920af4f09f2SNélio Laranjeiro 	struct priv *priv = dev->data->dev_private;
9216e78005aSNélio Laranjeiro 	struct mlx5_txq_ctrl *txq;
9226e78005aSNélio Laranjeiro 
9236e78005aSNélio Laranjeiro 	if (!(*priv->txqs)[idx])
9246e78005aSNélio Laranjeiro 		return -1;
9256e78005aSNélio Laranjeiro 	txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
9266e78005aSNélio Laranjeiro 	return (rte_atomic32_read(&txq->refcnt) == 1);
9276e78005aSNélio Laranjeiro }
9286e78005aSNélio Laranjeiro 
9296e78005aSNélio Laranjeiro /**
9306e78005aSNélio Laranjeiro  * Verify the Tx Queue list is empty
9316e78005aSNélio Laranjeiro  *
932af4f09f2SNélio Laranjeiro  * @param dev
933af4f09f2SNélio Laranjeiro  *   Pointer to Ethernet device.
9346e78005aSNélio Laranjeiro  *
935fb732b0aSNélio Laranjeiro  * @return
936fb732b0aSNélio Laranjeiro  *   The number of object not released.
9376e78005aSNélio Laranjeiro  */
9386e78005aSNélio Laranjeiro int
939af4f09f2SNélio Laranjeiro mlx5_txq_verify(struct rte_eth_dev *dev)
9406e78005aSNélio Laranjeiro {
941af4f09f2SNélio Laranjeiro 	struct priv *priv = dev->data->dev_private;
9426e78005aSNélio Laranjeiro 	struct mlx5_txq_ctrl *txq;
9436e78005aSNélio Laranjeiro 	int ret = 0;
9446e78005aSNélio Laranjeiro 
9456e78005aSNélio Laranjeiro 	LIST_FOREACH(txq, &priv->txqsctrl, next) {
946*a170a30dSNélio Laranjeiro 		DRV_LOG(DEBUG, "port %u Tx queue %u still referenced",
9470f99970bSNélio Laranjeiro 			dev->data->port_id, txq->idx);
9486e78005aSNélio Laranjeiro 		++ret;
9496e78005aSNélio Laranjeiro 	}
9506e78005aSNélio Laranjeiro 	return ret;
9516e78005aSNélio Laranjeiro }
952