xref: /dpdk/drivers/net/mlx5/mlx5_txq.c (revision 9ad3a41ab2a10db0059e1decdbf3ec038f348e08)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2015 6WIND S.A.
3  * Copyright 2015 Mellanox Technologies, Ltd
4  */
5 
6 #include <stddef.h>
7 #include <errno.h>
8 #include <string.h>
9 #include <stdint.h>
10 #include <unistd.h>
11 #include <inttypes.h>
12 
13 #include <rte_mbuf.h>
14 #include <rte_malloc.h>
15 #include <ethdev_driver.h>
16 #include <rte_bus_pci.h>
17 #include <rte_common.h>
18 #include <rte_eal_paging.h>
19 
20 #include <mlx5_common.h>
21 #include <mlx5_common_mr.h>
22 #include <mlx5_malloc.h>
23 
24 #include "mlx5_defs.h"
25 #include "mlx5_utils.h"
26 #include "mlx5.h"
27 #include "mlx5_tx.h"
28 #include "mlx5_rxtx.h"
29 #include "mlx5_autoconf.h"
30 
31 /**
32  * Allocate TX queue elements.
33  *
34  * @param txq_ctrl
35  *   Pointer to TX queue structure.
36  */
37 void
38 txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl)
39 {
40 	const unsigned int elts_n = 1 << txq_ctrl->txq.elts_n;
41 	unsigned int i;
42 
43 	for (i = 0; (i != elts_n); ++i)
44 		txq_ctrl->txq.elts[i] = NULL;
45 	DRV_LOG(DEBUG, "port %u Tx queue %u allocated and configured %u WRs",
46 		PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx, elts_n);
47 	txq_ctrl->txq.elts_head = 0;
48 	txq_ctrl->txq.elts_tail = 0;
49 	txq_ctrl->txq.elts_comp = 0;
50 }
51 
52 /**
53  * Free TX queue elements.
54  *
55  * @param txq_ctrl
56  *   Pointer to TX queue structure.
57  */
58 void
59 txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
60 {
61 	const uint16_t elts_n = 1 << txq_ctrl->txq.elts_n;
62 	const uint16_t elts_m = elts_n - 1;
63 	uint16_t elts_head = txq_ctrl->txq.elts_head;
64 	uint16_t elts_tail = txq_ctrl->txq.elts_tail;
65 	struct rte_mbuf *(*elts)[elts_n] = &txq_ctrl->txq.elts;
66 
67 	DRV_LOG(DEBUG, "port %u Tx queue %u freeing WRs",
68 		PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx);
69 	txq_ctrl->txq.elts_head = 0;
70 	txq_ctrl->txq.elts_tail = 0;
71 	txq_ctrl->txq.elts_comp = 0;
72 
73 	while (elts_tail != elts_head) {
74 		struct rte_mbuf *elt = (*elts)[elts_tail & elts_m];
75 
76 		MLX5_ASSERT(elt != NULL);
77 		rte_pktmbuf_free_seg(elt);
78 #ifdef RTE_LIBRTE_MLX5_DEBUG
79 		/* Poisoning. */
80 		memset(&(*elts)[elts_tail & elts_m],
81 		       0x77,
82 		       sizeof((*elts)[elts_tail & elts_m]));
83 #endif
84 		++elts_tail;
85 	}
86 }
87 
88 /**
89  * Returns the per-port supported offloads.
90  *
91  * @param dev
92  *   Pointer to Ethernet device.
93  *
94  * @return
95  *   Supported Tx offloads.
96  */
97 uint64_t
98 mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
99 {
100 	struct mlx5_priv *priv = dev->data->dev_private;
101 	uint64_t offloads = (RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
102 			     RTE_ETH_TX_OFFLOAD_VLAN_INSERT);
103 	struct mlx5_port_config *config = &priv->config;
104 	struct mlx5_dev_cap *dev_cap = &priv->sh->dev_cap;
105 
106 	if (dev_cap->hw_csum)
107 		offloads |= (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
108 			     RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
109 			     RTE_ETH_TX_OFFLOAD_TCP_CKSUM);
110 	if (dev_cap->tso)
111 		offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
112 	if (priv->sh->config.tx_pp)
113 		offloads |= RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP;
114 	if (dev_cap->swp) {
115 		if (dev_cap->swp & MLX5_SW_PARSING_CSUM_CAP)
116 			offloads |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
117 		if (dev_cap->swp & MLX5_SW_PARSING_TSO_CAP)
118 			offloads |= (RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
119 				     RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO);
120 	}
121 	if (dev_cap->tunnel_en) {
122 		if (dev_cap->hw_csum)
123 			offloads |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
124 		if (dev_cap->tso) {
125 			if (dev_cap->tunnel_en &
126 				MLX5_TUNNELED_OFFLOADS_VXLAN_CAP)
127 				offloads |= RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO;
128 			if (dev_cap->tunnel_en &
129 				MLX5_TUNNELED_OFFLOADS_GRE_CAP)
130 				offloads |= RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO;
131 			if (dev_cap->tunnel_en &
132 				MLX5_TUNNELED_OFFLOADS_GENEVE_CAP)
133 				offloads |= RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO;
134 		}
135 	}
136 	if (!config->mprq.enabled)
137 		offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
138 	return offloads;
139 }
140 
141 /* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */
142 static void
143 txq_sync_cq(struct mlx5_txq_data *txq)
144 {
145 	volatile struct mlx5_cqe *cqe;
146 	int ret, i;
147 
148 	i = txq->cqe_s;
149 	do {
150 		cqe = &txq->cqes[txq->cq_ci & txq->cqe_m];
151 		ret = check_cqe(cqe, txq->cqe_s, txq->cq_ci);
152 		if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
153 			if (likely(ret != MLX5_CQE_STATUS_ERR)) {
154 				/* No new CQEs in completion queue. */
155 				MLX5_ASSERT(ret == MLX5_CQE_STATUS_HW_OWN);
156 				break;
157 			}
158 		}
159 		++txq->cq_ci;
160 	} while (--i);
161 	/* Move all CQEs to HW ownership. */
162 	for (i = 0; i < txq->cqe_s; i++) {
163 		cqe = &txq->cqes[i];
164 		cqe->op_own = MLX5_CQE_INVALIDATE;
165 	}
166 	/* Resync CQE and WQE (WQ in reset state). */
167 	rte_io_wmb();
168 	*txq->cq_db = rte_cpu_to_be_32(txq->cq_ci);
169 	txq->cq_pi = txq->cq_ci;
170 	rte_io_wmb();
171 }
172 
173 /**
174  * Tx queue stop. Device queue goes to the idle state,
175  * all involved mbufs are freed from elts/WQ.
176  *
177  * @param dev
178  *   Pointer to Ethernet device structure.
179  * @param idx
180  *   Tx queue index.
181  *
182  * @return
183  *   0 on success, a negative errno value otherwise and rte_errno is set.
184  */
185 int
186 mlx5_tx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t idx)
187 {
188 	struct mlx5_priv *priv = dev->data->dev_private;
189 	struct mlx5_txq_data *txq = (*priv->txqs)[idx];
190 	struct mlx5_txq_ctrl *txq_ctrl =
191 			container_of(txq, struct mlx5_txq_ctrl, txq);
192 	int ret;
193 
194 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
195 	/* Move QP to RESET state. */
196 	ret = priv->obj_ops.txq_obj_modify(txq_ctrl->obj, MLX5_TXQ_MOD_RDY2RST,
197 					   (uint8_t)priv->dev_port);
198 	if (ret)
199 		return ret;
200 	/* Handle all send completions. */
201 	txq_sync_cq(txq);
202 	/* Free elts stored in the SQ. */
203 	txq_free_elts(txq_ctrl);
204 	/* Prevent writing new pkts to SQ by setting no free WQE.*/
205 	txq->wqe_ci = txq->wqe_s;
206 	txq->wqe_pi = 0;
207 	txq->elts_comp = 0;
208 	/* Set the actual queue state. */
209 	dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
210 	return 0;
211 }
212 
213 /**
214  * Tx queue stop. Device queue goes to the idle state,
215  * all involved mbufs are freed from elts/WQ.
216  *
217  * @param dev
218  *   Pointer to Ethernet device structure.
219  * @param idx
220  *   Tx queue index.
221  *
222  * @return
223  *   0 on success, a negative errno value otherwise and rte_errno is set.
224  */
225 int
226 mlx5_tx_queue_stop(struct rte_eth_dev *dev, uint16_t idx)
227 {
228 	int ret;
229 
230 	if (rte_eth_dev_is_tx_hairpin_queue(dev, idx)) {
231 		DRV_LOG(ERR, "Hairpin queue can't be stopped");
232 		rte_errno = EINVAL;
233 		return -EINVAL;
234 	}
235 	if (dev->data->tx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STOPPED)
236 		return 0;
237 	if (rte_eal_process_type() ==  RTE_PROC_SECONDARY) {
238 		ret = mlx5_mp_os_req_queue_control(dev, idx,
239 						   MLX5_MP_REQ_QUEUE_TX_STOP);
240 	} else {
241 		ret = mlx5_tx_queue_stop_primary(dev, idx);
242 	}
243 	return ret;
244 }
245 
246 /**
247  * Rx queue start. Device queue goes to the ready state,
248  * all required mbufs are allocated and WQ is replenished.
249  *
250  * @param dev
251  *   Pointer to Ethernet device structure.
252  * @param idx
253  *   RX queue index.
254  *
255  * @return
256  *   0 on success, a negative errno value otherwise and rte_errno is set.
257  */
258 int
259 mlx5_tx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx)
260 {
261 	struct mlx5_priv *priv = dev->data->dev_private;
262 	struct mlx5_txq_data *txq = (*priv->txqs)[idx];
263 	struct mlx5_txq_ctrl *txq_ctrl =
264 			container_of(txq, struct mlx5_txq_ctrl, txq);
265 	int ret;
266 
267 	MLX5_ASSERT(rte_eal_process_type() ==  RTE_PROC_PRIMARY);
268 	ret = priv->obj_ops.txq_obj_modify(txq_ctrl->obj,
269 					   MLX5_TXQ_MOD_RST2RDY,
270 					   (uint8_t)priv->dev_port);
271 	if (ret)
272 		return ret;
273 	txq_ctrl->txq.wqe_ci = 0;
274 	txq_ctrl->txq.wqe_pi = 0;
275 	txq_ctrl->txq.elts_comp = 0;
276 	/* Set the actual queue state. */
277 	dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
278 	return 0;
279 }
280 
281 /**
282  * Rx queue start. Device queue goes to the ready state,
283  * all required mbufs are allocated and WQ is replenished.
284  *
285  * @param dev
286  *   Pointer to Ethernet device structure.
287  * @param idx
288  *   RX queue index.
289  *
290  * @return
291  *   0 on success, a negative errno value otherwise and rte_errno is set.
292  */
293 int
294 mlx5_tx_queue_start(struct rte_eth_dev *dev, uint16_t idx)
295 {
296 	int ret;
297 
298 	if (rte_eth_dev_is_tx_hairpin_queue(dev, idx)) {
299 		DRV_LOG(ERR, "Hairpin queue can't be started");
300 		rte_errno = EINVAL;
301 		return -EINVAL;
302 	}
303 	if (dev->data->tx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STARTED)
304 		return 0;
305 	if (rte_eal_process_type() ==  RTE_PROC_SECONDARY) {
306 		ret = mlx5_mp_os_req_queue_control(dev, idx,
307 						   MLX5_MP_REQ_QUEUE_TX_START);
308 	} else {
309 		ret = mlx5_tx_queue_start_primary(dev, idx);
310 	}
311 	return ret;
312 }
313 
314 /**
315  * Tx queue presetup checks.
316  *
317  * @param dev
318  *   Pointer to Ethernet device structure.
319  * @param idx
320  *   Tx queue index.
321  * @param desc
322  *   Number of descriptors to configure in queue.
323  *
324  * @return
325  *   0 on success, a negative errno value otherwise and rte_errno is set.
326  */
327 static int
328 mlx5_tx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc)
329 {
330 	struct mlx5_priv *priv = dev->data->dev_private;
331 
332 	if (*desc <= MLX5_TX_COMP_THRESH) {
333 		DRV_LOG(WARNING,
334 			"port %u number of descriptors requested for Tx queue"
335 			" %u must be higher than MLX5_TX_COMP_THRESH, using %u"
336 			" instead of %u", dev->data->port_id, idx,
337 			MLX5_TX_COMP_THRESH + 1, *desc);
338 		*desc = MLX5_TX_COMP_THRESH + 1;
339 	}
340 	if (!rte_is_power_of_2(*desc)) {
341 		*desc = 1 << log2above(*desc);
342 		DRV_LOG(WARNING,
343 			"port %u increased number of descriptors in Tx queue"
344 			" %u to the next power of two (%d)",
345 			dev->data->port_id, idx, *desc);
346 	}
347 	DRV_LOG(DEBUG, "port %u configuring queue %u for %u descriptors",
348 		dev->data->port_id, idx, *desc);
349 	if (idx >= priv->txqs_n) {
350 		DRV_LOG(ERR, "port %u Tx queue index out of range (%u >= %u)",
351 			dev->data->port_id, idx, priv->txqs_n);
352 		rte_errno = EOVERFLOW;
353 		return -rte_errno;
354 	}
355 	if (!mlx5_txq_releasable(dev, idx)) {
356 		rte_errno = EBUSY;
357 		DRV_LOG(ERR, "port %u unable to release queue index %u",
358 			dev->data->port_id, idx);
359 		return -rte_errno;
360 	}
361 	mlx5_txq_release(dev, idx);
362 	return 0;
363 }
364 
365 /**
366  * DPDK callback to configure a TX queue.
367  *
368  * @param dev
369  *   Pointer to Ethernet device structure.
370  * @param idx
371  *   TX queue index.
372  * @param desc
373  *   Number of descriptors to configure in queue.
374  * @param socket
375  *   NUMA socket on which memory must be allocated.
376  * @param[in] conf
377  *   Thresholds parameters.
378  *
379  * @return
380  *   0 on success, a negative errno value otherwise and rte_errno is set.
381  */
382 int
383 mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
384 		    unsigned int socket, const struct rte_eth_txconf *conf)
385 {
386 	struct mlx5_priv *priv = dev->data->dev_private;
387 	struct mlx5_txq_data *txq = (*priv->txqs)[idx];
388 	struct mlx5_txq_ctrl *txq_ctrl =
389 		container_of(txq, struct mlx5_txq_ctrl, txq);
390 	int res;
391 
392 	res = mlx5_tx_queue_pre_setup(dev, idx, &desc);
393 	if (res)
394 		return res;
395 	txq_ctrl = mlx5_txq_new(dev, idx, desc, socket, conf);
396 	if (!txq_ctrl) {
397 		DRV_LOG(ERR, "port %u unable to allocate queue index %u",
398 			dev->data->port_id, idx);
399 		return -rte_errno;
400 	}
401 	DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
402 		dev->data->port_id, idx);
403 	(*priv->txqs)[idx] = &txq_ctrl->txq;
404 	return 0;
405 }
406 
407 /**
408  * DPDK callback to configure a TX hairpin queue.
409  *
410  * @param dev
411  *   Pointer to Ethernet device structure.
412  * @param idx
413  *   TX queue index.
414  * @param desc
415  *   Number of descriptors to configure in queue.
416  * @param[in] hairpin_conf
417  *   The hairpin binding configuration.
418  *
419  * @return
420  *   0 on success, a negative errno value otherwise and rte_errno is set.
421  */
422 int
423 mlx5_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
424 			    uint16_t desc,
425 			    const struct rte_eth_hairpin_conf *hairpin_conf)
426 {
427 	struct mlx5_priv *priv = dev->data->dev_private;
428 	struct mlx5_txq_data *txq = (*priv->txqs)[idx];
429 	struct mlx5_txq_ctrl *txq_ctrl =
430 		container_of(txq, struct mlx5_txq_ctrl, txq);
431 	int res;
432 
433 	res = mlx5_tx_queue_pre_setup(dev, idx, &desc);
434 	if (res)
435 		return res;
436 	if (hairpin_conf->peer_count != 1) {
437 		rte_errno = EINVAL;
438 		DRV_LOG(ERR, "port %u unable to setup Tx hairpin queue index %u"
439 			" peer count is %u", dev->data->port_id,
440 			idx, hairpin_conf->peer_count);
441 		return -rte_errno;
442 	}
443 	if (hairpin_conf->peers[0].port == dev->data->port_id) {
444 		if (hairpin_conf->peers[0].queue >= priv->rxqs_n) {
445 			rte_errno = EINVAL;
446 			DRV_LOG(ERR, "port %u unable to setup Tx hairpin queue"
447 				" index %u, Rx %u is larger than %u",
448 				dev->data->port_id, idx,
449 				hairpin_conf->peers[0].queue, priv->txqs_n);
450 			return -rte_errno;
451 		}
452 	} else {
453 		if (hairpin_conf->manual_bind == 0 ||
454 		    hairpin_conf->tx_explicit == 0) {
455 			rte_errno = EINVAL;
456 			DRV_LOG(ERR, "port %u unable to setup Tx hairpin queue"
457 				" index %u peer port %u with attributes %u %u",
458 				dev->data->port_id, idx,
459 				hairpin_conf->peers[0].port,
460 				hairpin_conf->manual_bind,
461 				hairpin_conf->tx_explicit);
462 			return -rte_errno;
463 		}
464 	}
465 	txq_ctrl = mlx5_txq_hairpin_new(dev, idx, desc,	hairpin_conf);
466 	if (!txq_ctrl) {
467 		DRV_LOG(ERR, "port %u unable to allocate queue index %u",
468 			dev->data->port_id, idx);
469 		return -rte_errno;
470 	}
471 	DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
472 		dev->data->port_id, idx);
473 	(*priv->txqs)[idx] = &txq_ctrl->txq;
474 	dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN;
475 	return 0;
476 }
477 
478 /**
479  * DPDK callback to release a TX queue.
480  *
481  * @param dev
482  *   Pointer to Ethernet device structure.
483  * @param qid
484  *   Transmit queue index.
485  */
486 void
487 mlx5_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
488 {
489 	struct mlx5_txq_data *txq = dev->data->tx_queues[qid];
490 
491 	if (txq == NULL)
492 		return;
493 	DRV_LOG(DEBUG, "port %u removing Tx queue %u from list",
494 		dev->data->port_id, qid);
495 	mlx5_txq_release(dev, qid);
496 }
497 
498 /**
499  * Remap UAR register of a Tx queue for secondary process.
500  *
501  * Remapped address is stored at the table in the process private structure of
502  * the device, indexed by queue index.
503  *
504  * @param txq_ctrl
505  *   Pointer to Tx queue control structure.
506  * @param fd
507  *   Verbs file descriptor to map UAR pages.
508  *
509  * @return
510  *   0 on success, a negative errno value otherwise and rte_errno is set.
511  */
512 static int
513 txq_uar_init_secondary(struct mlx5_txq_ctrl *txq_ctrl, int fd)
514 {
515 	struct mlx5_priv *priv = txq_ctrl->priv;
516 	struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv));
517 	struct mlx5_proc_priv *primary_ppriv = priv->sh->pppriv;
518 	struct mlx5_txq_data *txq = &txq_ctrl->txq;
519 	void *addr;
520 	uintptr_t uar_va;
521 	uintptr_t offset;
522 	const size_t page_size = rte_mem_page_size();
523 	if (page_size == (size_t)-1) {
524 		DRV_LOG(ERR, "Failed to get mem page size");
525 		rte_errno = ENOMEM;
526 		return -rte_errno;
527 	}
528 
529 	if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
530 		return 0;
531 	MLX5_ASSERT(ppriv);
532 	/*
533 	 * As rdma-core, UARs are mapped in size of OS page
534 	 * size. Ref to libmlx5 function: mlx5_init_context()
535 	 */
536 	uar_va = (uintptr_t)primary_ppriv->uar_table[txq->idx].db;
537 	offset = uar_va & (page_size - 1); /* Offset in page. */
538 	addr = rte_mem_map(NULL, page_size, RTE_PROT_WRITE, RTE_MAP_SHARED,
539 			   fd, txq_ctrl->uar_mmap_offset);
540 	if (!addr) {
541 		DRV_LOG(ERR, "Port %u mmap failed for BF reg of txq %u.",
542 			txq->port_id, txq->idx);
543 		rte_errno = ENXIO;
544 		return -rte_errno;
545 	}
546 	addr = RTE_PTR_ADD(addr, offset);
547 	ppriv->uar_table[txq->idx].db = addr;
548 #ifndef RTE_ARCH_64
549 	ppriv->uar_table[txq->idx].sl_p =
550 			primary_ppriv->uar_table[txq->idx].sl_p;
551 #endif
552 	return 0;
553 }
554 
555 /**
556  * Unmap UAR register of a Tx queue for secondary process.
557  *
558  * @param txq_ctrl
559  *   Pointer to Tx queue control structure.
560  */
561 static void
562 txq_uar_uninit_secondary(struct mlx5_txq_ctrl *txq_ctrl)
563 {
564 	struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(txq_ctrl->priv));
565 	void *addr;
566 	const size_t page_size = rte_mem_page_size();
567 	if (page_size == (size_t)-1) {
568 		DRV_LOG(ERR, "Failed to get mem page size");
569 		rte_errno = ENOMEM;
570 	}
571 
572 	if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
573 		return;
574 	addr = ppriv->uar_table[txq_ctrl->txq.idx].db;
575 	rte_mem_unmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size);
576 }
577 
578 /**
579  * Deinitialize Tx UAR registers for secondary process.
580  *
581  * @param dev
582  *   Pointer to Ethernet device.
583  */
584 void
585 mlx5_tx_uar_uninit_secondary(struct rte_eth_dev *dev)
586 {
587 	struct mlx5_proc_priv *ppriv = (struct mlx5_proc_priv *)
588 					dev->process_private;
589 	const size_t page_size = rte_mem_page_size();
590 	void *addr;
591 	unsigned int i;
592 
593 	if (page_size == (size_t)-1) {
594 		DRV_LOG(ERR, "Failed to get mem page size");
595 		return;
596 	}
597 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
598 	for (i = 0; i != ppriv->uar_table_sz; ++i) {
599 		if (!ppriv->uar_table[i].db)
600 			continue;
601 		addr = ppriv->uar_table[i].db;
602 		rte_mem_unmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size);
603 
604 	}
605 }
606 
607 /**
608  * Initialize Tx UAR registers for secondary process.
609  *
610  * @param dev
611  *   Pointer to Ethernet device.
612  * @param fd
613  *   Verbs file descriptor to map UAR pages.
614  *
615  * @return
616  *   0 on success, a negative errno value otherwise and rte_errno is set.
617  */
618 int
619 mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd)
620 {
621 	struct mlx5_priv *priv = dev->data->dev_private;
622 	struct mlx5_txq_data *txq;
623 	struct mlx5_txq_ctrl *txq_ctrl;
624 	unsigned int i;
625 	int ret;
626 
627 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
628 	for (i = 0; i != priv->txqs_n; ++i) {
629 		if (!(*priv->txqs)[i])
630 			continue;
631 		txq = (*priv->txqs)[i];
632 		txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
633 		if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
634 			continue;
635 		MLX5_ASSERT(txq->idx == (uint16_t)i);
636 		ret = txq_uar_init_secondary(txq_ctrl, fd);
637 		if (ret)
638 			goto error;
639 	}
640 	return 0;
641 error:
642 	/* Rollback. */
643 	do {
644 		if (!(*priv->txqs)[i])
645 			continue;
646 		txq = (*priv->txqs)[i];
647 		txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
648 		txq_uar_uninit_secondary(txq_ctrl);
649 	} while (i--);
650 	return -rte_errno;
651 }
652 
653 /**
654  * Verify the Verbs Tx queue list is empty
655  *
656  * @param dev
657  *   Pointer to Ethernet device.
658  *
659  * @return
660  *   The number of object not released.
661  */
662 int
663 mlx5_txq_obj_verify(struct rte_eth_dev *dev)
664 {
665 	struct mlx5_priv *priv = dev->data->dev_private;
666 	int ret = 0;
667 	struct mlx5_txq_obj *txq_obj;
668 
669 	LIST_FOREACH(txq_obj, &priv->txqsobj, next) {
670 		DRV_LOG(DEBUG, "port %u Verbs Tx queue %u still referenced",
671 			dev->data->port_id, txq_obj->txq_ctrl->txq.idx);
672 		++ret;
673 	}
674 	return ret;
675 }
676 
677 /**
678  * Calculate the total number of WQEBB for Tx queue.
679  *
680  * Simplified version of calc_sq_size() in rdma-core.
681  *
682  * @param txq_ctrl
683  *   Pointer to Tx queue control structure.
684  *
685  * @return
686  *   The number of WQEBB.
687  */
688 static int
689 txq_calc_wqebb_cnt(struct mlx5_txq_ctrl *txq_ctrl)
690 {
691 	unsigned int wqe_size;
692 	const unsigned int desc = 1 << txq_ctrl->txq.elts_n;
693 
694 	wqe_size = MLX5_WQE_CSEG_SIZE +
695 		   MLX5_WQE_ESEG_SIZE +
696 		   MLX5_WSEG_SIZE -
697 		   MLX5_ESEG_MIN_INLINE_SIZE +
698 		   txq_ctrl->max_inline_data;
699 	return rte_align32pow2(wqe_size * desc) / MLX5_WQE_SIZE;
700 }
701 
702 /**
703  * Calculate the maximal inline data size for Tx queue.
704  *
705  * @param txq_ctrl
706  *   Pointer to Tx queue control structure.
707  *
708  * @return
709  *   The maximal inline data size.
710  */
711 static unsigned int
712 txq_calc_inline_max(struct mlx5_txq_ctrl *txq_ctrl)
713 {
714 	const unsigned int desc = 1 << txq_ctrl->txq.elts_n;
715 	struct mlx5_priv *priv = txq_ctrl->priv;
716 	unsigned int wqe_size;
717 
718 	wqe_size = priv->sh->dev_cap.max_qp_wr / desc;
719 	if (!wqe_size)
720 		return 0;
721 	/*
722 	 * This calculation is derived from tthe source of
723 	 * mlx5_calc_send_wqe() in rdma_core library.
724 	 */
725 	wqe_size = wqe_size * MLX5_WQE_SIZE -
726 		   MLX5_WQE_CSEG_SIZE -
727 		   MLX5_WQE_ESEG_SIZE -
728 		   MLX5_WSEG_SIZE -
729 		   MLX5_WSEG_SIZE +
730 		   MLX5_DSEG_MIN_INLINE_SIZE;
731 	return wqe_size;
732 }
733 
734 /**
735  * Set Tx queue parameters from device configuration.
736  *
737  * @param txq_ctrl
738  *   Pointer to Tx queue control structure.
739  */
740 static void
741 txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
742 {
743 	struct mlx5_priv *priv = txq_ctrl->priv;
744 	struct mlx5_port_config *config = &priv->config;
745 	struct mlx5_dev_cap *dev_cap = &priv->sh->dev_cap;
746 	unsigned int inlen_send; /* Inline data for ordinary SEND.*/
747 	unsigned int inlen_empw; /* Inline data for enhanced MPW. */
748 	unsigned int inlen_mode; /* Minimal required Inline data. */
749 	unsigned int txqs_inline; /* Min Tx queues to enable inline. */
750 	uint64_t dev_txoff = priv->dev_data->dev_conf.txmode.offloads;
751 	bool tso = txq_ctrl->txq.offloads & (RTE_ETH_TX_OFFLOAD_TCP_TSO |
752 					    RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
753 					    RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
754 					    RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
755 					    RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO);
756 	bool vlan_inline;
757 	unsigned int temp;
758 
759 	txq_ctrl->txq.fast_free =
760 		!!((txq_ctrl->txq.offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) &&
761 		   !(txq_ctrl->txq.offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) &&
762 		   !config->mprq.enabled);
763 	if (config->txqs_inline == MLX5_ARG_UNSET)
764 		txqs_inline =
765 #if defined(RTE_ARCH_ARM64)
766 		(priv->pci_dev && priv->pci_dev->id.device_id ==
767 			PCI_DEVICE_ID_MELLANOX_CONNECTX5BF) ?
768 			MLX5_INLINE_MAX_TXQS_BLUEFIELD :
769 #endif
770 			MLX5_INLINE_MAX_TXQS;
771 	else
772 		txqs_inline = (unsigned int)config->txqs_inline;
773 	inlen_send = (config->txq_inline_max == MLX5_ARG_UNSET) ?
774 		     MLX5_SEND_DEF_INLINE_LEN :
775 		     (unsigned int)config->txq_inline_max;
776 	inlen_empw = (config->txq_inline_mpw == MLX5_ARG_UNSET) ?
777 		     MLX5_EMPW_DEF_INLINE_LEN :
778 		     (unsigned int)config->txq_inline_mpw;
779 	inlen_mode = (config->txq_inline_min == MLX5_ARG_UNSET) ?
780 		     0 : (unsigned int)config->txq_inline_min;
781 	if (config->mps != MLX5_MPW_ENHANCED && config->mps != MLX5_MPW)
782 		inlen_empw = 0;
783 	/*
784 	 * If there is requested minimal amount of data to inline
785 	 * we MUST enable inlining. This is a case for ConnectX-4
786 	 * which usually requires L2 inlined for correct operating
787 	 * and ConnectX-4 Lx which requires L2-L4 inlined to
788 	 * support E-Switch Flows.
789 	 */
790 	if (inlen_mode) {
791 		if (inlen_mode <= MLX5_ESEG_MIN_INLINE_SIZE) {
792 			/*
793 			 * Optimize minimal inlining for single
794 			 * segment packets to fill one WQEBB
795 			 * without gaps.
796 			 */
797 			temp = MLX5_ESEG_MIN_INLINE_SIZE;
798 		} else {
799 			temp = inlen_mode - MLX5_ESEG_MIN_INLINE_SIZE;
800 			temp = RTE_ALIGN(temp, MLX5_WSEG_SIZE) +
801 			       MLX5_ESEG_MIN_INLINE_SIZE;
802 			temp = RTE_MIN(temp, MLX5_SEND_MAX_INLINE_LEN);
803 		}
804 		if (temp != inlen_mode) {
805 			DRV_LOG(INFO,
806 				"port %u minimal required inline setting"
807 				" aligned from %u to %u",
808 				PORT_ID(priv), inlen_mode, temp);
809 			inlen_mode = temp;
810 		}
811 	}
812 	/*
813 	 * If port is configured to support VLAN insertion and device
814 	 * does not support this feature by HW (for NICs before ConnectX-5
815 	 * or in case of wqe_vlan_insert flag is not set) we must enable
816 	 * data inline on all queues because it is supported by single
817 	 * tx_burst routine.
818 	 */
819 	txq_ctrl->txq.vlan_en = config->hw_vlan_insert;
820 	vlan_inline = (dev_txoff & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) &&
821 		      !config->hw_vlan_insert;
822 	/*
823 	 * If there are few Tx queues it is prioritized
824 	 * to save CPU cycles and disable data inlining at all.
825 	 */
826 	if (inlen_send && priv->txqs_n >= txqs_inline) {
827 		/*
828 		 * The data sent with ordinal MLX5_OPCODE_SEND
829 		 * may be inlined in Ethernet Segment, align the
830 		 * length accordingly to fit entire WQEBBs.
831 		 */
832 		temp = RTE_MAX(inlen_send,
833 			       MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE);
834 		temp -= MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE;
835 		temp = RTE_ALIGN(temp, MLX5_WQE_SIZE);
836 		temp += MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE;
837 		temp = RTE_MIN(temp, MLX5_WQE_SIZE_MAX +
838 				     MLX5_ESEG_MIN_INLINE_SIZE -
839 				     MLX5_WQE_CSEG_SIZE -
840 				     MLX5_WQE_ESEG_SIZE -
841 				     MLX5_WQE_DSEG_SIZE * 2);
842 		temp = RTE_MIN(temp, MLX5_SEND_MAX_INLINE_LEN);
843 		temp = RTE_MAX(temp, inlen_mode);
844 		if (temp != inlen_send) {
845 			DRV_LOG(INFO,
846 				"port %u ordinary send inline setting"
847 				" aligned from %u to %u",
848 				PORT_ID(priv), inlen_send, temp);
849 			inlen_send = temp;
850 		}
851 		/*
852 		 * Not aligned to cache lines, but to WQEs.
853 		 * First bytes of data (initial alignment)
854 		 * is going to be copied explicitly at the
855 		 * beginning of inlining buffer in Ethernet
856 		 * Segment.
857 		 */
858 		MLX5_ASSERT(inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
859 		MLX5_ASSERT(inlen_send <= MLX5_WQE_SIZE_MAX +
860 					  MLX5_ESEG_MIN_INLINE_SIZE -
861 					  MLX5_WQE_CSEG_SIZE -
862 					  MLX5_WQE_ESEG_SIZE -
863 					  MLX5_WQE_DSEG_SIZE * 2);
864 	} else if (inlen_mode) {
865 		/*
866 		 * If minimal inlining is requested we must
867 		 * enable inlining in general, despite the
868 		 * number of configured queues. Ignore the
869 		 * txq_inline_max devarg, this is not
870 		 * full-featured inline.
871 		 */
872 		inlen_send = inlen_mode;
873 		inlen_empw = 0;
874 	} else if (vlan_inline) {
875 		/*
876 		 * Hardware does not report offload for
877 		 * VLAN insertion, we must enable data inline
878 		 * to implement feature by software.
879 		 */
880 		inlen_send = MLX5_ESEG_MIN_INLINE_SIZE;
881 		inlen_empw = 0;
882 	} else {
883 		inlen_send = 0;
884 		inlen_empw = 0;
885 	}
886 	txq_ctrl->txq.inlen_send = inlen_send;
887 	txq_ctrl->txq.inlen_mode = inlen_mode;
888 	txq_ctrl->txq.inlen_empw = 0;
889 	if (inlen_send && inlen_empw && priv->txqs_n >= txqs_inline) {
890 		/*
891 		 * The data sent with MLX5_OPCODE_ENHANCED_MPSW
892 		 * may be inlined in Data Segment, align the
893 		 * length accordingly to fit entire WQEBBs.
894 		 */
895 		temp = RTE_MAX(inlen_empw,
896 			       MLX5_WQE_SIZE + MLX5_DSEG_MIN_INLINE_SIZE);
897 		temp -= MLX5_DSEG_MIN_INLINE_SIZE;
898 		temp = RTE_ALIGN(temp, MLX5_WQE_SIZE);
899 		temp += MLX5_DSEG_MIN_INLINE_SIZE;
900 		temp = RTE_MIN(temp, MLX5_WQE_SIZE_MAX +
901 				     MLX5_DSEG_MIN_INLINE_SIZE -
902 				     MLX5_WQE_CSEG_SIZE -
903 				     MLX5_WQE_ESEG_SIZE -
904 				     MLX5_WQE_DSEG_SIZE);
905 		temp = RTE_MIN(temp, MLX5_EMPW_MAX_INLINE_LEN);
906 		if (temp != inlen_empw) {
907 			DRV_LOG(INFO,
908 				"port %u enhanced empw inline setting"
909 				" aligned from %u to %u",
910 				PORT_ID(priv), inlen_empw, temp);
911 			inlen_empw = temp;
912 		}
913 		MLX5_ASSERT(inlen_empw >= MLX5_ESEG_MIN_INLINE_SIZE);
914 		MLX5_ASSERT(inlen_empw <= MLX5_WQE_SIZE_MAX +
915 					  MLX5_DSEG_MIN_INLINE_SIZE -
916 					  MLX5_WQE_CSEG_SIZE -
917 					  MLX5_WQE_ESEG_SIZE -
918 					  MLX5_WQE_DSEG_SIZE);
919 		txq_ctrl->txq.inlen_empw = inlen_empw;
920 	}
921 	txq_ctrl->max_inline_data = RTE_MAX(inlen_send, inlen_empw);
922 	if (tso) {
923 		txq_ctrl->max_tso_header = MLX5_MAX_TSO_HEADER;
924 		txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->max_inline_data,
925 						    MLX5_MAX_TSO_HEADER);
926 		txq_ctrl->txq.tso_en = 1;
927 	}
928 	if (((RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO & txq_ctrl->txq.offloads) &&
929 	    (dev_cap->tunnel_en & MLX5_TUNNELED_OFFLOADS_VXLAN_CAP)) |
930 	   ((RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO & txq_ctrl->txq.offloads) &&
931 	    (dev_cap->tunnel_en & MLX5_TUNNELED_OFFLOADS_GRE_CAP)) |
932 	   ((RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO & txq_ctrl->txq.offloads) &&
933 	    (dev_cap->tunnel_en & MLX5_TUNNELED_OFFLOADS_GENEVE_CAP)) |
934 	   (dev_cap->swp  & MLX5_SW_PARSING_TSO_CAP))
935 		txq_ctrl->txq.tunnel_en = 1;
936 	txq_ctrl->txq.swp_en = (((RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
937 				  RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO) &
938 				  txq_ctrl->txq.offloads) && (dev_cap->swp &
939 				  MLX5_SW_PARSING_TSO_CAP)) |
940 				((RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM &
941 				 txq_ctrl->txq.offloads) && (dev_cap->swp &
942 				 MLX5_SW_PARSING_CSUM_CAP));
943 }
944 
945 /**
946  * Adjust Tx queue data inline parameters for large queue sizes.
947  * The data inline feature requires multiple WQEs to fit the packets,
948  * and if the large amount of Tx descriptors is requested by application
949  * the total WQE amount may exceed the hardware capabilities. If the
950  * default inline setting are used we can try to adjust these ones and
951  * meet the hardware requirements and not exceed the queue size.
952  *
953  * @param txq_ctrl
954  *   Pointer to Tx queue control structure.
955  *
956  * @return
957  *   Zero on success, otherwise the parameters can not be adjusted.
958  */
959 static int
960 txq_adjust_params(struct mlx5_txq_ctrl *txq_ctrl)
961 {
962 	struct mlx5_priv *priv = txq_ctrl->priv;
963 	struct mlx5_port_config *config = &priv->config;
964 	unsigned int max_inline;
965 
966 	max_inline = txq_calc_inline_max(txq_ctrl);
967 	if (!txq_ctrl->txq.inlen_send) {
968 		/*
969 		 * Inline data feature is not engaged at all.
970 		 * There is nothing to adjust.
971 		 */
972 		return 0;
973 	}
974 	if (txq_ctrl->max_inline_data <= max_inline) {
975 		/*
976 		 * The requested inline data length does not
977 		 * exceed queue capabilities.
978 		 */
979 		return 0;
980 	}
981 	if (txq_ctrl->txq.inlen_mode > max_inline) {
982 		DRV_LOG(ERR,
983 			"minimal data inline requirements (%u) are not"
984 			" satisfied (%u) on port %u, try the smaller"
985 			" Tx queue size (%d)",
986 			txq_ctrl->txq.inlen_mode, max_inline,
987 			priv->dev_data->port_id, priv->sh->dev_cap.max_qp_wr);
988 		goto error;
989 	}
990 	if (txq_ctrl->txq.inlen_send > max_inline &&
991 	    config->txq_inline_max != MLX5_ARG_UNSET &&
992 	    config->txq_inline_max > (int)max_inline) {
993 		DRV_LOG(ERR,
994 			"txq_inline_max requirements (%u) are not"
995 			" satisfied (%u) on port %u, try the smaller"
996 			" Tx queue size (%d)",
997 			txq_ctrl->txq.inlen_send, max_inline,
998 			priv->dev_data->port_id, priv->sh->dev_cap.max_qp_wr);
999 		goto error;
1000 	}
1001 	if (txq_ctrl->txq.inlen_empw > max_inline &&
1002 	    config->txq_inline_mpw != MLX5_ARG_UNSET &&
1003 	    config->txq_inline_mpw > (int)max_inline) {
1004 		DRV_LOG(ERR,
1005 			"txq_inline_mpw requirements (%u) are not"
1006 			" satisfied (%u) on port %u, try the smaller"
1007 			" Tx queue size (%d)",
1008 			txq_ctrl->txq.inlen_empw, max_inline,
1009 			priv->dev_data->port_id, priv->sh->dev_cap.max_qp_wr);
1010 		goto error;
1011 	}
1012 	if (txq_ctrl->txq.tso_en && max_inline < MLX5_MAX_TSO_HEADER) {
1013 		DRV_LOG(ERR,
1014 			"tso header inline requirements (%u) are not"
1015 			" satisfied (%u) on port %u, try the smaller"
1016 			" Tx queue size (%d)",
1017 			MLX5_MAX_TSO_HEADER, max_inline,
1018 			priv->dev_data->port_id, priv->sh->dev_cap.max_qp_wr);
1019 		goto error;
1020 	}
1021 	if (txq_ctrl->txq.inlen_send > max_inline) {
1022 		DRV_LOG(WARNING,
1023 			"adjust txq_inline_max (%u->%u)"
1024 			" due to large Tx queue on port %u",
1025 			txq_ctrl->txq.inlen_send, max_inline,
1026 			priv->dev_data->port_id);
1027 		txq_ctrl->txq.inlen_send = max_inline;
1028 	}
1029 	if (txq_ctrl->txq.inlen_empw > max_inline) {
1030 		DRV_LOG(WARNING,
1031 			"adjust txq_inline_mpw (%u->%u)"
1032 			"due to large Tx queue on port %u",
1033 			txq_ctrl->txq.inlen_empw, max_inline,
1034 			priv->dev_data->port_id);
1035 		txq_ctrl->txq.inlen_empw = max_inline;
1036 	}
1037 	txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->txq.inlen_send,
1038 					    txq_ctrl->txq.inlen_empw);
1039 	MLX5_ASSERT(txq_ctrl->max_inline_data <= max_inline);
1040 	MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= max_inline);
1041 	MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_send);
1042 	MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_empw ||
1043 		    !txq_ctrl->txq.inlen_empw);
1044 	return 0;
1045 error:
1046 	rte_errno = ENOMEM;
1047 	return -ENOMEM;
1048 }
1049 
1050 /**
1051  * Create a DPDK Tx queue.
1052  *
1053  * @param dev
1054  *   Pointer to Ethernet device.
1055  * @param idx
1056  *   TX queue index.
1057  * @param desc
1058  *   Number of descriptors to configure in queue.
1059  * @param socket
1060  *   NUMA socket on which memory must be allocated.
1061  * @param[in] conf
1062  *  Thresholds parameters.
1063  *
1064  * @return
1065  *   A DPDK queue object on success, NULL otherwise and rte_errno is set.
1066  */
1067 struct mlx5_txq_ctrl *
1068 mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1069 	     unsigned int socket, const struct rte_eth_txconf *conf)
1070 {
1071 	struct mlx5_priv *priv = dev->data->dev_private;
1072 	struct mlx5_txq_ctrl *tmpl;
1073 
1074 	tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl) +
1075 			   desc * sizeof(struct rte_mbuf *), 0, socket);
1076 	if (!tmpl) {
1077 		rte_errno = ENOMEM;
1078 		return NULL;
1079 	}
1080 	if (mlx5_mr_ctrl_init(&tmpl->txq.mr_ctrl,
1081 			      &priv->sh->cdev->mr_scache.dev_gen, socket)) {
1082 		/* rte_errno is already set. */
1083 		goto error;
1084 	}
1085 	MLX5_ASSERT(desc > MLX5_TX_COMP_THRESH);
1086 	tmpl->txq.offloads = conf->offloads |
1087 			     dev->data->dev_conf.txmode.offloads;
1088 	tmpl->priv = priv;
1089 	tmpl->socket = socket;
1090 	tmpl->txq.elts_n = log2above(desc);
1091 	tmpl->txq.elts_s = desc;
1092 	tmpl->txq.elts_m = desc - 1;
1093 	tmpl->txq.port_id = dev->data->port_id;
1094 	tmpl->txq.idx = idx;
1095 	txq_set_params(tmpl);
1096 	if (txq_adjust_params(tmpl))
1097 		goto error;
1098 	if (txq_calc_wqebb_cnt(tmpl) >
1099 	    priv->sh->dev_cap.max_qp_wr) {
1100 		DRV_LOG(ERR,
1101 			"port %u Tx WQEBB count (%d) exceeds the limit (%d),"
1102 			" try smaller queue size",
1103 			dev->data->port_id, txq_calc_wqebb_cnt(tmpl),
1104 			priv->sh->dev_cap.max_qp_wr);
1105 		rte_errno = ENOMEM;
1106 		goto error;
1107 	}
1108 	__atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
1109 	tmpl->type = MLX5_TXQ_TYPE_STANDARD;
1110 	LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
1111 	return tmpl;
1112 error:
1113 	mlx5_mr_btree_free(&tmpl->txq.mr_ctrl.cache_bh);
1114 	mlx5_free(tmpl);
1115 	return NULL;
1116 }
1117 
1118 /**
1119  * Create a DPDK Tx hairpin queue.
1120  *
1121  * @param dev
1122  *   Pointer to Ethernet device.
1123  * @param idx
1124  *   TX queue index.
1125  * @param desc
1126  *   Number of descriptors to configure in queue.
1127  * @param hairpin_conf
1128  *  The hairpin configuration.
1129  *
1130  * @return
1131  *   A DPDK queue object on success, NULL otherwise and rte_errno is set.
1132  */
1133 struct mlx5_txq_ctrl *
1134 mlx5_txq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1135 		     const struct rte_eth_hairpin_conf *hairpin_conf)
1136 {
1137 	struct mlx5_priv *priv = dev->data->dev_private;
1138 	struct mlx5_txq_ctrl *tmpl;
1139 
1140 	tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
1141 			   SOCKET_ID_ANY);
1142 	if (!tmpl) {
1143 		rte_errno = ENOMEM;
1144 		return NULL;
1145 	}
1146 	tmpl->priv = priv;
1147 	tmpl->socket = SOCKET_ID_ANY;
1148 	tmpl->txq.elts_n = log2above(desc);
1149 	tmpl->txq.port_id = dev->data->port_id;
1150 	tmpl->txq.idx = idx;
1151 	tmpl->hairpin_conf = *hairpin_conf;
1152 	tmpl->type = MLX5_TXQ_TYPE_HAIRPIN;
1153 	__atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
1154 	LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
1155 	return tmpl;
1156 }
1157 
1158 /**
1159  * Get a Tx queue.
1160  *
1161  * @param dev
1162  *   Pointer to Ethernet device.
1163  * @param idx
1164  *   TX queue index.
1165  *
1166  * @return
1167  *   A pointer to the queue if it exists.
1168  */
1169 struct mlx5_txq_ctrl *
1170 mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx)
1171 {
1172 	struct mlx5_priv *priv = dev->data->dev_private;
1173 	struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
1174 	struct mlx5_txq_ctrl *ctrl = NULL;
1175 
1176 	if (txq_data) {
1177 		ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq);
1178 		__atomic_fetch_add(&ctrl->refcnt, 1, __ATOMIC_RELAXED);
1179 	}
1180 	return ctrl;
1181 }
1182 
1183 /**
1184  * Release a Tx queue.
1185  *
1186  * @param dev
1187  *   Pointer to Ethernet device.
1188  * @param idx
1189  *   TX queue index.
1190  *
1191  * @return
1192  *   1 while a reference on it exists, 0 when freed.
1193  */
1194 int
1195 mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
1196 {
1197 	struct mlx5_priv *priv = dev->data->dev_private;
1198 	struct mlx5_txq_ctrl *txq_ctrl;
1199 
1200 	if (priv->txqs == NULL || (*priv->txqs)[idx] == NULL)
1201 		return 0;
1202 	txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
1203 	if (__atomic_sub_fetch(&txq_ctrl->refcnt, 1, __ATOMIC_RELAXED) > 1)
1204 		return 1;
1205 	if (txq_ctrl->obj) {
1206 		priv->obj_ops.txq_obj_release(txq_ctrl->obj);
1207 		LIST_REMOVE(txq_ctrl->obj, next);
1208 		mlx5_free(txq_ctrl->obj);
1209 		txq_ctrl->obj = NULL;
1210 	}
1211 	if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD) {
1212 		if (txq_ctrl->txq.fcqs) {
1213 			mlx5_free(txq_ctrl->txq.fcqs);
1214 			txq_ctrl->txq.fcqs = NULL;
1215 		}
1216 		txq_free_elts(txq_ctrl);
1217 		dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
1218 	}
1219 	if (!__atomic_load_n(&txq_ctrl->refcnt, __ATOMIC_RELAXED)) {
1220 		if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD)
1221 			mlx5_mr_btree_free(&txq_ctrl->txq.mr_ctrl.cache_bh);
1222 		LIST_REMOVE(txq_ctrl, next);
1223 		mlx5_free(txq_ctrl);
1224 		(*priv->txqs)[idx] = NULL;
1225 	}
1226 	return 0;
1227 }
1228 
1229 /**
1230  * Verify if the queue can be released.
1231  *
1232  * @param dev
1233  *   Pointer to Ethernet device.
1234  * @param idx
1235  *   TX queue index.
1236  *
1237  * @return
1238  *   1 if the queue can be released.
1239  */
1240 int
1241 mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx)
1242 {
1243 	struct mlx5_priv *priv = dev->data->dev_private;
1244 	struct mlx5_txq_ctrl *txq;
1245 
1246 	if (!(*priv->txqs)[idx])
1247 		return -1;
1248 	txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
1249 	return (__atomic_load_n(&txq->refcnt, __ATOMIC_RELAXED) == 1);
1250 }
1251 
1252 /**
1253  * Verify the Tx Queue list is empty
1254  *
1255  * @param dev
1256  *   Pointer to Ethernet device.
1257  *
1258  * @return
1259  *   The number of object not released.
1260  */
1261 int
1262 mlx5_txq_verify(struct rte_eth_dev *dev)
1263 {
1264 	struct mlx5_priv *priv = dev->data->dev_private;
1265 	struct mlx5_txq_ctrl *txq_ctrl;
1266 	int ret = 0;
1267 
1268 	LIST_FOREACH(txq_ctrl, &priv->txqsctrl, next) {
1269 		DRV_LOG(DEBUG, "port %u Tx queue %u still referenced",
1270 			dev->data->port_id, txq_ctrl->txq.idx);
1271 		++ret;
1272 	}
1273 	return ret;
1274 }
1275 
1276 /**
1277  * Set the Tx queue dynamic timestamp (mask and offset)
1278  *
1279  * @param[in] dev
1280  *   Pointer to the Ethernet device structure.
1281  */
1282 void
1283 mlx5_txq_dynf_timestamp_set(struct rte_eth_dev *dev)
1284 {
1285 	struct mlx5_priv *priv = dev->data->dev_private;
1286 	struct mlx5_dev_ctx_shared *sh = priv->sh;
1287 	struct mlx5_txq_data *data;
1288 	int off, nbit;
1289 	unsigned int i;
1290 	uint64_t mask = 0;
1291 
1292 	nbit = rte_mbuf_dynflag_lookup
1293 				(RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL);
1294 	off = rte_mbuf_dynfield_lookup
1295 				(RTE_MBUF_DYNFIELD_TIMESTAMP_NAME, NULL);
1296 	if (nbit >= 0 && off >= 0 && sh->txpp.refcnt)
1297 		mask = 1ULL << nbit;
1298 	for (i = 0; i != priv->txqs_n; ++i) {
1299 		data = (*priv->txqs)[i];
1300 		if (!data)
1301 			continue;
1302 		data->sh = sh;
1303 		data->ts_mask = mask;
1304 		data->ts_offset = off;
1305 	}
1306 }
1307