xref: /dpdk/drivers/net/mlx5/mlx5_txq.c (revision 03ab51eafda992874a48c392ca66ffb577fe2b71)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2015 6WIND S.A.
3  * Copyright 2015 Mellanox Technologies, Ltd
4  */
5 
6 #include <stddef.h>
7 #include <errno.h>
8 #include <string.h>
9 #include <stdint.h>
10 #include <unistd.h>
11 #include <inttypes.h>
12 
13 #include <rte_mbuf.h>
14 #include <rte_malloc.h>
15 #include <ethdev_driver.h>
16 #include <rte_bus_pci.h>
17 #include <rte_common.h>
18 #include <rte_eal_paging.h>
19 
20 #include <mlx5_common.h>
21 #include <mlx5_common_mr.h>
22 #include <mlx5_malloc.h>
23 
24 #include "mlx5_defs.h"
25 #include "mlx5_utils.h"
26 #include "mlx5.h"
27 #include "mlx5_tx.h"
28 #include "mlx5_rxtx.h"
29 #include "mlx5_autoconf.h"
30 
31 /**
32  * Allocate TX queue elements.
33  *
34  * @param txq_ctrl
35  *   Pointer to TX queue structure.
36  */
37 void
38 txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl)
39 {
40 	const unsigned int elts_n = 1 << txq_ctrl->txq.elts_n;
41 	unsigned int i;
42 
43 	for (i = 0; (i != elts_n); ++i)
44 		txq_ctrl->txq.elts[i] = NULL;
45 	DRV_LOG(DEBUG, "port %u Tx queue %u allocated and configured %u WRs",
46 		PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx, elts_n);
47 	txq_ctrl->txq.elts_head = 0;
48 	txq_ctrl->txq.elts_tail = 0;
49 	txq_ctrl->txq.elts_comp = 0;
50 }
51 
52 /**
53  * Free TX queue elements.
54  *
55  * @param txq_ctrl
56  *   Pointer to TX queue structure.
57  */
58 void
59 txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
60 {
61 	const uint16_t elts_n = 1 << txq_ctrl->txq.elts_n;
62 	const uint16_t elts_m = elts_n - 1;
63 	uint16_t elts_head = txq_ctrl->txq.elts_head;
64 	uint16_t elts_tail = txq_ctrl->txq.elts_tail;
65 	struct rte_mbuf *(*elts)[elts_n] = &txq_ctrl->txq.elts;
66 
67 	DRV_LOG(DEBUG, "port %u Tx queue %u freeing WRs",
68 		PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx);
69 	txq_ctrl->txq.elts_head = 0;
70 	txq_ctrl->txq.elts_tail = 0;
71 	txq_ctrl->txq.elts_comp = 0;
72 
73 	while (elts_tail != elts_head) {
74 		struct rte_mbuf *elt = (*elts)[elts_tail & elts_m];
75 
76 		MLX5_ASSERT(elt != NULL);
77 		rte_pktmbuf_free_seg(elt);
78 #ifdef RTE_LIBRTE_MLX5_DEBUG
79 		/* Poisoning. */
80 		memset(&(*elts)[elts_tail & elts_m],
81 		       0x77,
82 		       sizeof((*elts)[elts_tail & elts_m]));
83 #endif
84 		++elts_tail;
85 	}
86 }
87 
88 /**
89  * Returns the per-port supported offloads.
90  *
91  * @param dev
92  *   Pointer to Ethernet device.
93  *
94  * @return
95  *   Supported Tx offloads.
96  */
97 uint64_t
98 mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
99 {
100 	struct mlx5_priv *priv = dev->data->dev_private;
101 	uint64_t offloads = (DEV_TX_OFFLOAD_MULTI_SEGS |
102 			     DEV_TX_OFFLOAD_VLAN_INSERT);
103 	struct mlx5_dev_config *config = &priv->config;
104 
105 	if (config->hw_csum)
106 		offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
107 			     DEV_TX_OFFLOAD_UDP_CKSUM |
108 			     DEV_TX_OFFLOAD_TCP_CKSUM);
109 	if (config->tso)
110 		offloads |= DEV_TX_OFFLOAD_TCP_TSO;
111 	if (config->tx_pp)
112 		offloads |= DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP;
113 	if (config->swp) {
114 		if (config->hw_csum)
115 			offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
116 		if (config->tso)
117 			offloads |= (DEV_TX_OFFLOAD_IP_TNL_TSO |
118 				     DEV_TX_OFFLOAD_UDP_TNL_TSO);
119 	}
120 	if (config->tunnel_en) {
121 		if (config->hw_csum)
122 			offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
123 		if (config->tso)
124 			offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
125 				     DEV_TX_OFFLOAD_GRE_TNL_TSO |
126 				     DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
127 	}
128 	if (!config->mprq.enabled)
129 		offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
130 	return offloads;
131 }
132 
133 /* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */
134 static void
135 txq_sync_cq(struct mlx5_txq_data *txq)
136 {
137 	volatile struct mlx5_cqe *cqe;
138 	int ret, i;
139 
140 	i = txq->cqe_s;
141 	do {
142 		cqe = &txq->cqes[txq->cq_ci & txq->cqe_m];
143 		ret = check_cqe(cqe, txq->cqe_s, txq->cq_ci);
144 		if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
145 			if (likely(ret != MLX5_CQE_STATUS_ERR)) {
146 				/* No new CQEs in completion queue. */
147 				MLX5_ASSERT(ret == MLX5_CQE_STATUS_HW_OWN);
148 				break;
149 			}
150 		}
151 		++txq->cq_ci;
152 	} while (--i);
153 	/* Move all CQEs to HW ownership. */
154 	for (i = 0; i < txq->cqe_s; i++) {
155 		cqe = &txq->cqes[i];
156 		cqe->op_own = MLX5_CQE_INVALIDATE;
157 	}
158 	/* Resync CQE and WQE (WQ in reset state). */
159 	rte_io_wmb();
160 	*txq->cq_db = rte_cpu_to_be_32(txq->cq_ci);
161 	txq->cq_pi = txq->cq_ci;
162 	rte_io_wmb();
163 }
164 
165 /**
166  * Tx queue stop. Device queue goes to the idle state,
167  * all involved mbufs are freed from elts/WQ.
168  *
169  * @param dev
170  *   Pointer to Ethernet device structure.
171  * @param idx
172  *   Tx queue index.
173  *
174  * @return
175  *   0 on success, a negative errno value otherwise and rte_errno is set.
176  */
177 int
178 mlx5_tx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t idx)
179 {
180 	struct mlx5_priv *priv = dev->data->dev_private;
181 	struct mlx5_txq_data *txq = (*priv->txqs)[idx];
182 	struct mlx5_txq_ctrl *txq_ctrl =
183 			container_of(txq, struct mlx5_txq_ctrl, txq);
184 	int ret;
185 
186 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
187 	/* Move QP to RESET state. */
188 	ret = priv->obj_ops.txq_obj_modify(txq_ctrl->obj, MLX5_TXQ_MOD_RDY2RST,
189 					   (uint8_t)priv->dev_port);
190 	if (ret)
191 		return ret;
192 	/* Handle all send completions. */
193 	txq_sync_cq(txq);
194 	/* Free elts stored in the SQ. */
195 	txq_free_elts(txq_ctrl);
196 	/* Prevent writing new pkts to SQ by setting no free WQE.*/
197 	txq->wqe_ci = txq->wqe_s;
198 	txq->wqe_pi = 0;
199 	txq->elts_comp = 0;
200 	/* Set the actual queue state. */
201 	dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
202 	return 0;
203 }
204 
205 /**
206  * Tx queue stop. Device queue goes to the idle state,
207  * all involved mbufs are freed from elts/WQ.
208  *
209  * @param dev
210  *   Pointer to Ethernet device structure.
211  * @param idx
212  *   Tx queue index.
213  *
214  * @return
215  *   0 on success, a negative errno value otherwise and rte_errno is set.
216  */
217 int
218 mlx5_tx_queue_stop(struct rte_eth_dev *dev, uint16_t idx)
219 {
220 	int ret;
221 
222 	if (rte_eth_dev_is_tx_hairpin_queue(dev, idx)) {
223 		DRV_LOG(ERR, "Hairpin queue can't be stopped");
224 		rte_errno = EINVAL;
225 		return -EINVAL;
226 	}
227 	if (dev->data->tx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STOPPED)
228 		return 0;
229 	if (rte_eal_process_type() ==  RTE_PROC_SECONDARY) {
230 		ret = mlx5_mp_os_req_queue_control(dev, idx,
231 						   MLX5_MP_REQ_QUEUE_TX_STOP);
232 	} else {
233 		ret = mlx5_tx_queue_stop_primary(dev, idx);
234 	}
235 	return ret;
236 }
237 
238 /**
239  * Rx queue start. Device queue goes to the ready state,
240  * all required mbufs are allocated and WQ is replenished.
241  *
242  * @param dev
243  *   Pointer to Ethernet device structure.
244  * @param idx
245  *   RX queue index.
246  *
247  * @return
248  *   0 on success, a negative errno value otherwise and rte_errno is set.
249  */
250 int
251 mlx5_tx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx)
252 {
253 	struct mlx5_priv *priv = dev->data->dev_private;
254 	struct mlx5_txq_data *txq = (*priv->txqs)[idx];
255 	struct mlx5_txq_ctrl *txq_ctrl =
256 			container_of(txq, struct mlx5_txq_ctrl, txq);
257 	int ret;
258 
259 	MLX5_ASSERT(rte_eal_process_type() ==  RTE_PROC_PRIMARY);
260 	ret = priv->obj_ops.txq_obj_modify(txq_ctrl->obj,
261 					   MLX5_TXQ_MOD_RST2RDY,
262 					   (uint8_t)priv->dev_port);
263 	if (ret)
264 		return ret;
265 	txq_ctrl->txq.wqe_ci = 0;
266 	txq_ctrl->txq.wqe_pi = 0;
267 	txq_ctrl->txq.elts_comp = 0;
268 	/* Set the actual queue state. */
269 	dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
270 	return 0;
271 }
272 
273 /**
274  * Rx queue start. Device queue goes to the ready state,
275  * all required mbufs are allocated and WQ is replenished.
276  *
277  * @param dev
278  *   Pointer to Ethernet device structure.
279  * @param idx
280  *   RX queue index.
281  *
282  * @return
283  *   0 on success, a negative errno value otherwise and rte_errno is set.
284  */
285 int
286 mlx5_tx_queue_start(struct rte_eth_dev *dev, uint16_t idx)
287 {
288 	int ret;
289 
290 	if (rte_eth_dev_is_tx_hairpin_queue(dev, idx)) {
291 		DRV_LOG(ERR, "Hairpin queue can't be started");
292 		rte_errno = EINVAL;
293 		return -EINVAL;
294 	}
295 	if (dev->data->tx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STARTED)
296 		return 0;
297 	if (rte_eal_process_type() ==  RTE_PROC_SECONDARY) {
298 		ret = mlx5_mp_os_req_queue_control(dev, idx,
299 						   MLX5_MP_REQ_QUEUE_TX_START);
300 	} else {
301 		ret = mlx5_tx_queue_start_primary(dev, idx);
302 	}
303 	return ret;
304 }
305 
306 /**
307  * Tx queue presetup checks.
308  *
309  * @param dev
310  *   Pointer to Ethernet device structure.
311  * @param idx
312  *   Tx queue index.
313  * @param desc
314  *   Number of descriptors to configure in queue.
315  *
316  * @return
317  *   0 on success, a negative errno value otherwise and rte_errno is set.
318  */
319 static int
320 mlx5_tx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc)
321 {
322 	struct mlx5_priv *priv = dev->data->dev_private;
323 
324 	if (*desc <= MLX5_TX_COMP_THRESH) {
325 		DRV_LOG(WARNING,
326 			"port %u number of descriptors requested for Tx queue"
327 			" %u must be higher than MLX5_TX_COMP_THRESH, using %u"
328 			" instead of %u", dev->data->port_id, idx,
329 			MLX5_TX_COMP_THRESH + 1, *desc);
330 		*desc = MLX5_TX_COMP_THRESH + 1;
331 	}
332 	if (!rte_is_power_of_2(*desc)) {
333 		*desc = 1 << log2above(*desc);
334 		DRV_LOG(WARNING,
335 			"port %u increased number of descriptors in Tx queue"
336 			" %u to the next power of two (%d)",
337 			dev->data->port_id, idx, *desc);
338 	}
339 	DRV_LOG(DEBUG, "port %u configuring queue %u for %u descriptors",
340 		dev->data->port_id, idx, *desc);
341 	if (idx >= priv->txqs_n) {
342 		DRV_LOG(ERR, "port %u Tx queue index out of range (%u >= %u)",
343 			dev->data->port_id, idx, priv->txqs_n);
344 		rte_errno = EOVERFLOW;
345 		return -rte_errno;
346 	}
347 	if (!mlx5_txq_releasable(dev, idx)) {
348 		rte_errno = EBUSY;
349 		DRV_LOG(ERR, "port %u unable to release queue index %u",
350 			dev->data->port_id, idx);
351 		return -rte_errno;
352 	}
353 	mlx5_txq_release(dev, idx);
354 	return 0;
355 }
356 
357 /**
358  * DPDK callback to configure a TX queue.
359  *
360  * @param dev
361  *   Pointer to Ethernet device structure.
362  * @param idx
363  *   TX queue index.
364  * @param desc
365  *   Number of descriptors to configure in queue.
366  * @param socket
367  *   NUMA socket on which memory must be allocated.
368  * @param[in] conf
369  *   Thresholds parameters.
370  *
371  * @return
372  *   0 on success, a negative errno value otherwise and rte_errno is set.
373  */
374 int
375 mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
376 		    unsigned int socket, const struct rte_eth_txconf *conf)
377 {
378 	struct mlx5_priv *priv = dev->data->dev_private;
379 	struct mlx5_txq_data *txq = (*priv->txqs)[idx];
380 	struct mlx5_txq_ctrl *txq_ctrl =
381 		container_of(txq, struct mlx5_txq_ctrl, txq);
382 	int res;
383 
384 	res = mlx5_tx_queue_pre_setup(dev, idx, &desc);
385 	if (res)
386 		return res;
387 	txq_ctrl = mlx5_txq_new(dev, idx, desc, socket, conf);
388 	if (!txq_ctrl) {
389 		DRV_LOG(ERR, "port %u unable to allocate queue index %u",
390 			dev->data->port_id, idx);
391 		return -rte_errno;
392 	}
393 	DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
394 		dev->data->port_id, idx);
395 	(*priv->txqs)[idx] = &txq_ctrl->txq;
396 	return 0;
397 }
398 
399 /**
400  * DPDK callback to configure a TX hairpin queue.
401  *
402  * @param dev
403  *   Pointer to Ethernet device structure.
404  * @param idx
405  *   TX queue index.
406  * @param desc
407  *   Number of descriptors to configure in queue.
408  * @param[in] hairpin_conf
409  *   The hairpin binding configuration.
410  *
411  * @return
412  *   0 on success, a negative errno value otherwise and rte_errno is set.
413  */
414 int
415 mlx5_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
416 			    uint16_t desc,
417 			    const struct rte_eth_hairpin_conf *hairpin_conf)
418 {
419 	struct mlx5_priv *priv = dev->data->dev_private;
420 	struct mlx5_txq_data *txq = (*priv->txqs)[idx];
421 	struct mlx5_txq_ctrl *txq_ctrl =
422 		container_of(txq, struct mlx5_txq_ctrl, txq);
423 	int res;
424 
425 	res = mlx5_tx_queue_pre_setup(dev, idx, &desc);
426 	if (res)
427 		return res;
428 	if (hairpin_conf->peer_count != 1) {
429 		rte_errno = EINVAL;
430 		DRV_LOG(ERR, "port %u unable to setup Tx hairpin queue index %u"
431 			" peer count is %u", dev->data->port_id,
432 			idx, hairpin_conf->peer_count);
433 		return -rte_errno;
434 	}
435 	if (hairpin_conf->peers[0].port == dev->data->port_id) {
436 		if (hairpin_conf->peers[0].queue >= priv->rxqs_n) {
437 			rte_errno = EINVAL;
438 			DRV_LOG(ERR, "port %u unable to setup Tx hairpin queue"
439 				" index %u, Rx %u is larger than %u",
440 				dev->data->port_id, idx,
441 				hairpin_conf->peers[0].queue, priv->txqs_n);
442 			return -rte_errno;
443 		}
444 	} else {
445 		if (hairpin_conf->manual_bind == 0 ||
446 		    hairpin_conf->tx_explicit == 0) {
447 			rte_errno = EINVAL;
448 			DRV_LOG(ERR, "port %u unable to setup Tx hairpin queue"
449 				" index %u peer port %u with attributes %u %u",
450 				dev->data->port_id, idx,
451 				hairpin_conf->peers[0].port,
452 				hairpin_conf->manual_bind,
453 				hairpin_conf->tx_explicit);
454 			return -rte_errno;
455 		}
456 	}
457 	txq_ctrl = mlx5_txq_hairpin_new(dev, idx, desc,	hairpin_conf);
458 	if (!txq_ctrl) {
459 		DRV_LOG(ERR, "port %u unable to allocate queue index %u",
460 			dev->data->port_id, idx);
461 		return -rte_errno;
462 	}
463 	DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
464 		dev->data->port_id, idx);
465 	(*priv->txqs)[idx] = &txq_ctrl->txq;
466 	dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN;
467 	return 0;
468 }
469 
470 /**
471  * DPDK callback to release a TX queue.
472  *
473  * @param dev
474  *   Pointer to Ethernet device structure.
475  * @param qid
476  *   Transmit queue index.
477  */
478 void
479 mlx5_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
480 {
481 	struct mlx5_txq_data *txq = dev->data->tx_queues[qid];
482 
483 	if (txq == NULL)
484 		return;
485 	DRV_LOG(DEBUG, "port %u removing Tx queue %u from list",
486 		dev->data->port_id, qid);
487 	mlx5_txq_release(dev, qid);
488 }
489 
490 /**
491  * Configure the doorbell register non-cached attribute.
492  *
493  * @param txq_ctrl
494  *   Pointer to Tx queue control structure.
495  * @param page_size
496  *   Systme page size
497  */
498 static void
499 txq_uar_ncattr_init(struct mlx5_txq_ctrl *txq_ctrl, size_t page_size)
500 {
501 	struct mlx5_priv *priv = txq_ctrl->priv;
502 	off_t cmd;
503 
504 	txq_ctrl->txq.db_heu = priv->config.dbnc == MLX5_TXDB_HEURISTIC;
505 	txq_ctrl->txq.db_nc = 0;
506 	/* Check the doorbell register mapping type. */
507 	cmd = txq_ctrl->uar_mmap_offset / page_size;
508 	cmd >>= MLX5_UAR_MMAP_CMD_SHIFT;
509 	cmd &= MLX5_UAR_MMAP_CMD_MASK;
510 	if (cmd == MLX5_MMAP_GET_NC_PAGES_CMD)
511 		txq_ctrl->txq.db_nc = 1;
512 }
513 
514 /**
515  * Initialize Tx UAR registers for primary process.
516  *
517  * @param txq_ctrl
518  *   Pointer to Tx queue control structure.
519  */
520 void
521 txq_uar_init(struct mlx5_txq_ctrl *txq_ctrl)
522 {
523 	struct mlx5_priv *priv = txq_ctrl->priv;
524 	struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv));
525 #ifndef RTE_ARCH_64
526 	unsigned int lock_idx;
527 #endif
528 	const size_t page_size = rte_mem_page_size();
529 	if (page_size == (size_t)-1) {
530 		DRV_LOG(ERR, "Failed to get mem page size");
531 		rte_errno = ENOMEM;
532 	}
533 
534 	if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
535 		return;
536 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
537 	MLX5_ASSERT(ppriv);
538 	ppriv->uar_table[txq_ctrl->txq.idx] = txq_ctrl->bf_reg;
539 	txq_uar_ncattr_init(txq_ctrl, page_size);
540 #ifndef RTE_ARCH_64
541 	/* Assign an UAR lock according to UAR page number */
542 	lock_idx = (txq_ctrl->uar_mmap_offset / page_size) &
543 		   MLX5_UAR_PAGE_NUM_MASK;
544 	txq_ctrl->txq.uar_lock = &priv->sh->uar_lock[lock_idx];
545 #endif
546 }
547 
548 /**
549  * Remap UAR register of a Tx queue for secondary process.
550  *
551  * Remapped address is stored at the table in the process private structure of
552  * the device, indexed by queue index.
553  *
554  * @param txq_ctrl
555  *   Pointer to Tx queue control structure.
556  * @param fd
557  *   Verbs file descriptor to map UAR pages.
558  *
559  * @return
560  *   0 on success, a negative errno value otherwise and rte_errno is set.
561  */
562 static int
563 txq_uar_init_secondary(struct mlx5_txq_ctrl *txq_ctrl, int fd)
564 {
565 	struct mlx5_priv *priv = txq_ctrl->priv;
566 	struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv));
567 	struct mlx5_txq_data *txq = &txq_ctrl->txq;
568 	void *addr;
569 	uintptr_t uar_va;
570 	uintptr_t offset;
571 	const size_t page_size = rte_mem_page_size();
572 	if (page_size == (size_t)-1) {
573 		DRV_LOG(ERR, "Failed to get mem page size");
574 		rte_errno = ENOMEM;
575 		return -rte_errno;
576 	}
577 
578 	if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
579 		return 0;
580 	MLX5_ASSERT(ppriv);
581 	/*
582 	 * As rdma-core, UARs are mapped in size of OS page
583 	 * size. Ref to libmlx5 function: mlx5_init_context()
584 	 */
585 	uar_va = (uintptr_t)txq_ctrl->bf_reg;
586 	offset = uar_va & (page_size - 1); /* Offset in page. */
587 	addr = rte_mem_map(NULL, page_size, RTE_PROT_WRITE, RTE_MAP_SHARED,
588 			    fd, txq_ctrl->uar_mmap_offset);
589 	if (!addr) {
590 		DRV_LOG(ERR,
591 			"port %u mmap failed for BF reg of txq %u",
592 			txq->port_id, txq->idx);
593 		rte_errno = ENXIO;
594 		return -rte_errno;
595 	}
596 	addr = RTE_PTR_ADD(addr, offset);
597 	ppriv->uar_table[txq->idx] = addr;
598 	txq_uar_ncattr_init(txq_ctrl, page_size);
599 	return 0;
600 }
601 
602 /**
603  * Unmap UAR register of a Tx queue for secondary process.
604  *
605  * @param txq_ctrl
606  *   Pointer to Tx queue control structure.
607  */
608 static void
609 txq_uar_uninit_secondary(struct mlx5_txq_ctrl *txq_ctrl)
610 {
611 	struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(txq_ctrl->priv));
612 	void *addr;
613 	const size_t page_size = rte_mem_page_size();
614 	if (page_size == (size_t)-1) {
615 		DRV_LOG(ERR, "Failed to get mem page size");
616 		rte_errno = ENOMEM;
617 	}
618 
619 	if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
620 		return;
621 	addr = ppriv->uar_table[txq_ctrl->txq.idx];
622 	rte_mem_unmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size);
623 }
624 
625 /**
626  * Deinitialize Tx UAR registers for secondary process.
627  *
628  * @param dev
629  *   Pointer to Ethernet device.
630  */
631 void
632 mlx5_tx_uar_uninit_secondary(struct rte_eth_dev *dev)
633 {
634 	struct mlx5_proc_priv *ppriv = (struct mlx5_proc_priv *)
635 					dev->process_private;
636 	const size_t page_size = rte_mem_page_size();
637 	void *addr;
638 	unsigned int i;
639 
640 	if (page_size == (size_t)-1) {
641 		DRV_LOG(ERR, "Failed to get mem page size");
642 		return;
643 	}
644 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
645 	for (i = 0; i != ppriv->uar_table_sz; ++i) {
646 		if (!ppriv->uar_table[i])
647 			continue;
648 		addr = ppriv->uar_table[i];
649 		rte_mem_unmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size);
650 
651 	}
652 }
653 
654 /**
655  * Initialize Tx UAR registers for secondary process.
656  *
657  * @param dev
658  *   Pointer to Ethernet device.
659  * @param fd
660  *   Verbs file descriptor to map UAR pages.
661  *
662  * @return
663  *   0 on success, a negative errno value otherwise and rte_errno is set.
664  */
665 int
666 mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd)
667 {
668 	struct mlx5_priv *priv = dev->data->dev_private;
669 	struct mlx5_txq_data *txq;
670 	struct mlx5_txq_ctrl *txq_ctrl;
671 	unsigned int i;
672 	int ret;
673 
674 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
675 	for (i = 0; i != priv->txqs_n; ++i) {
676 		if (!(*priv->txqs)[i])
677 			continue;
678 		txq = (*priv->txqs)[i];
679 		txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
680 		if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
681 			continue;
682 		MLX5_ASSERT(txq->idx == (uint16_t)i);
683 		ret = txq_uar_init_secondary(txq_ctrl, fd);
684 		if (ret)
685 			goto error;
686 	}
687 	return 0;
688 error:
689 	/* Rollback. */
690 	do {
691 		if (!(*priv->txqs)[i])
692 			continue;
693 		txq = (*priv->txqs)[i];
694 		txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
695 		txq_uar_uninit_secondary(txq_ctrl);
696 	} while (i--);
697 	return -rte_errno;
698 }
699 
700 /**
701  * Verify the Verbs Tx queue list is empty
702  *
703  * @param dev
704  *   Pointer to Ethernet device.
705  *
706  * @return
707  *   The number of object not released.
708  */
709 int
710 mlx5_txq_obj_verify(struct rte_eth_dev *dev)
711 {
712 	struct mlx5_priv *priv = dev->data->dev_private;
713 	int ret = 0;
714 	struct mlx5_txq_obj *txq_obj;
715 
716 	LIST_FOREACH(txq_obj, &priv->txqsobj, next) {
717 		DRV_LOG(DEBUG, "port %u Verbs Tx queue %u still referenced",
718 			dev->data->port_id, txq_obj->txq_ctrl->txq.idx);
719 		++ret;
720 	}
721 	return ret;
722 }
723 
724 /**
725  * Calculate the total number of WQEBB for Tx queue.
726  *
727  * Simplified version of calc_sq_size() in rdma-core.
728  *
729  * @param txq_ctrl
730  *   Pointer to Tx queue control structure.
731  *
732  * @return
733  *   The number of WQEBB.
734  */
735 static int
736 txq_calc_wqebb_cnt(struct mlx5_txq_ctrl *txq_ctrl)
737 {
738 	unsigned int wqe_size;
739 	const unsigned int desc = 1 << txq_ctrl->txq.elts_n;
740 
741 	wqe_size = MLX5_WQE_CSEG_SIZE +
742 		   MLX5_WQE_ESEG_SIZE +
743 		   MLX5_WSEG_SIZE -
744 		   MLX5_ESEG_MIN_INLINE_SIZE +
745 		   txq_ctrl->max_inline_data;
746 	return rte_align32pow2(wqe_size * desc) / MLX5_WQE_SIZE;
747 }
748 
749 /**
750  * Calculate the maximal inline data size for Tx queue.
751  *
752  * @param txq_ctrl
753  *   Pointer to Tx queue control structure.
754  *
755  * @return
756  *   The maximal inline data size.
757  */
758 static unsigned int
759 txq_calc_inline_max(struct mlx5_txq_ctrl *txq_ctrl)
760 {
761 	const unsigned int desc = 1 << txq_ctrl->txq.elts_n;
762 	struct mlx5_priv *priv = txq_ctrl->priv;
763 	unsigned int wqe_size;
764 
765 	wqe_size = priv->sh->device_attr.max_qp_wr / desc;
766 	if (!wqe_size)
767 		return 0;
768 	/*
769 	 * This calculation is derived from tthe source of
770 	 * mlx5_calc_send_wqe() in rdma_core library.
771 	 */
772 	wqe_size = wqe_size * MLX5_WQE_SIZE -
773 		   MLX5_WQE_CSEG_SIZE -
774 		   MLX5_WQE_ESEG_SIZE -
775 		   MLX5_WSEG_SIZE -
776 		   MLX5_WSEG_SIZE +
777 		   MLX5_DSEG_MIN_INLINE_SIZE;
778 	return wqe_size;
779 }
780 
781 /**
782  * Set Tx queue parameters from device configuration.
783  *
784  * @param txq_ctrl
785  *   Pointer to Tx queue control structure.
786  */
787 static void
788 txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
789 {
790 	struct mlx5_priv *priv = txq_ctrl->priv;
791 	struct mlx5_dev_config *config = &priv->config;
792 	unsigned int inlen_send; /* Inline data for ordinary SEND.*/
793 	unsigned int inlen_empw; /* Inline data for enhanced MPW. */
794 	unsigned int inlen_mode; /* Minimal required Inline data. */
795 	unsigned int txqs_inline; /* Min Tx queues to enable inline. */
796 	uint64_t dev_txoff = priv->dev_data->dev_conf.txmode.offloads;
797 	bool tso = txq_ctrl->txq.offloads & (DEV_TX_OFFLOAD_TCP_TSO |
798 					    DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
799 					    DEV_TX_OFFLOAD_GRE_TNL_TSO |
800 					    DEV_TX_OFFLOAD_IP_TNL_TSO |
801 					    DEV_TX_OFFLOAD_UDP_TNL_TSO);
802 	bool vlan_inline;
803 	unsigned int temp;
804 
805 	txq_ctrl->txq.fast_free =
806 		!!((txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
807 		   !(txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_MULTI_SEGS) &&
808 		   !config->mprq.enabled);
809 	if (config->txqs_inline == MLX5_ARG_UNSET)
810 		txqs_inline =
811 #if defined(RTE_ARCH_ARM64)
812 		(priv->pci_dev && priv->pci_dev->id.device_id ==
813 			PCI_DEVICE_ID_MELLANOX_CONNECTX5BF) ?
814 			MLX5_INLINE_MAX_TXQS_BLUEFIELD :
815 #endif
816 			MLX5_INLINE_MAX_TXQS;
817 	else
818 		txqs_inline = (unsigned int)config->txqs_inline;
819 	inlen_send = (config->txq_inline_max == MLX5_ARG_UNSET) ?
820 		     MLX5_SEND_DEF_INLINE_LEN :
821 		     (unsigned int)config->txq_inline_max;
822 	inlen_empw = (config->txq_inline_mpw == MLX5_ARG_UNSET) ?
823 		     MLX5_EMPW_DEF_INLINE_LEN :
824 		     (unsigned int)config->txq_inline_mpw;
825 	inlen_mode = (config->txq_inline_min == MLX5_ARG_UNSET) ?
826 		     0 : (unsigned int)config->txq_inline_min;
827 	if (config->mps != MLX5_MPW_ENHANCED && config->mps != MLX5_MPW)
828 		inlen_empw = 0;
829 	/*
830 	 * If there is requested minimal amount of data to inline
831 	 * we MUST enable inlining. This is a case for ConnectX-4
832 	 * which usually requires L2 inlined for correct operating
833 	 * and ConnectX-4 Lx which requires L2-L4 inlined to
834 	 * support E-Switch Flows.
835 	 */
836 	if (inlen_mode) {
837 		if (inlen_mode <= MLX5_ESEG_MIN_INLINE_SIZE) {
838 			/*
839 			 * Optimize minimal inlining for single
840 			 * segment packets to fill one WQEBB
841 			 * without gaps.
842 			 */
843 			temp = MLX5_ESEG_MIN_INLINE_SIZE;
844 		} else {
845 			temp = inlen_mode - MLX5_ESEG_MIN_INLINE_SIZE;
846 			temp = RTE_ALIGN(temp, MLX5_WSEG_SIZE) +
847 			       MLX5_ESEG_MIN_INLINE_SIZE;
848 			temp = RTE_MIN(temp, MLX5_SEND_MAX_INLINE_LEN);
849 		}
850 		if (temp != inlen_mode) {
851 			DRV_LOG(INFO,
852 				"port %u minimal required inline setting"
853 				" aligned from %u to %u",
854 				PORT_ID(priv), inlen_mode, temp);
855 			inlen_mode = temp;
856 		}
857 	}
858 	/*
859 	 * If port is configured to support VLAN insertion and device
860 	 * does not support this feature by HW (for NICs before ConnectX-5
861 	 * or in case of wqe_vlan_insert flag is not set) we must enable
862 	 * data inline on all queues because it is supported by single
863 	 * tx_burst routine.
864 	 */
865 	txq_ctrl->txq.vlan_en = config->hw_vlan_insert;
866 	vlan_inline = (dev_txoff & DEV_TX_OFFLOAD_VLAN_INSERT) &&
867 		      !config->hw_vlan_insert;
868 	/*
869 	 * If there are few Tx queues it is prioritized
870 	 * to save CPU cycles and disable data inlining at all.
871 	 */
872 	if (inlen_send && priv->txqs_n >= txqs_inline) {
873 		/*
874 		 * The data sent with ordinal MLX5_OPCODE_SEND
875 		 * may be inlined in Ethernet Segment, align the
876 		 * length accordingly to fit entire WQEBBs.
877 		 */
878 		temp = RTE_MAX(inlen_send,
879 			       MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE);
880 		temp -= MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE;
881 		temp = RTE_ALIGN(temp, MLX5_WQE_SIZE);
882 		temp += MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE;
883 		temp = RTE_MIN(temp, MLX5_WQE_SIZE_MAX +
884 				     MLX5_ESEG_MIN_INLINE_SIZE -
885 				     MLX5_WQE_CSEG_SIZE -
886 				     MLX5_WQE_ESEG_SIZE -
887 				     MLX5_WQE_DSEG_SIZE * 2);
888 		temp = RTE_MIN(temp, MLX5_SEND_MAX_INLINE_LEN);
889 		temp = RTE_MAX(temp, inlen_mode);
890 		if (temp != inlen_send) {
891 			DRV_LOG(INFO,
892 				"port %u ordinary send inline setting"
893 				" aligned from %u to %u",
894 				PORT_ID(priv), inlen_send, temp);
895 			inlen_send = temp;
896 		}
897 		/*
898 		 * Not aligned to cache lines, but to WQEs.
899 		 * First bytes of data (initial alignment)
900 		 * is going to be copied explicitly at the
901 		 * beginning of inlining buffer in Ethernet
902 		 * Segment.
903 		 */
904 		MLX5_ASSERT(inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
905 		MLX5_ASSERT(inlen_send <= MLX5_WQE_SIZE_MAX +
906 					  MLX5_ESEG_MIN_INLINE_SIZE -
907 					  MLX5_WQE_CSEG_SIZE -
908 					  MLX5_WQE_ESEG_SIZE -
909 					  MLX5_WQE_DSEG_SIZE * 2);
910 	} else if (inlen_mode) {
911 		/*
912 		 * If minimal inlining is requested we must
913 		 * enable inlining in general, despite the
914 		 * number of configured queues. Ignore the
915 		 * txq_inline_max devarg, this is not
916 		 * full-featured inline.
917 		 */
918 		inlen_send = inlen_mode;
919 		inlen_empw = 0;
920 	} else if (vlan_inline) {
921 		/*
922 		 * Hardware does not report offload for
923 		 * VLAN insertion, we must enable data inline
924 		 * to implement feature by software.
925 		 */
926 		inlen_send = MLX5_ESEG_MIN_INLINE_SIZE;
927 		inlen_empw = 0;
928 	} else {
929 		inlen_send = 0;
930 		inlen_empw = 0;
931 	}
932 	txq_ctrl->txq.inlen_send = inlen_send;
933 	txq_ctrl->txq.inlen_mode = inlen_mode;
934 	txq_ctrl->txq.inlen_empw = 0;
935 	if (inlen_send && inlen_empw && priv->txqs_n >= txqs_inline) {
936 		/*
937 		 * The data sent with MLX5_OPCODE_ENHANCED_MPSW
938 		 * may be inlined in Data Segment, align the
939 		 * length accordingly to fit entire WQEBBs.
940 		 */
941 		temp = RTE_MAX(inlen_empw,
942 			       MLX5_WQE_SIZE + MLX5_DSEG_MIN_INLINE_SIZE);
943 		temp -= MLX5_DSEG_MIN_INLINE_SIZE;
944 		temp = RTE_ALIGN(temp, MLX5_WQE_SIZE);
945 		temp += MLX5_DSEG_MIN_INLINE_SIZE;
946 		temp = RTE_MIN(temp, MLX5_WQE_SIZE_MAX +
947 				     MLX5_DSEG_MIN_INLINE_SIZE -
948 				     MLX5_WQE_CSEG_SIZE -
949 				     MLX5_WQE_ESEG_SIZE -
950 				     MLX5_WQE_DSEG_SIZE);
951 		temp = RTE_MIN(temp, MLX5_EMPW_MAX_INLINE_LEN);
952 		if (temp != inlen_empw) {
953 			DRV_LOG(INFO,
954 				"port %u enhanced empw inline setting"
955 				" aligned from %u to %u",
956 				PORT_ID(priv), inlen_empw, temp);
957 			inlen_empw = temp;
958 		}
959 		MLX5_ASSERT(inlen_empw >= MLX5_ESEG_MIN_INLINE_SIZE);
960 		MLX5_ASSERT(inlen_empw <= MLX5_WQE_SIZE_MAX +
961 					  MLX5_DSEG_MIN_INLINE_SIZE -
962 					  MLX5_WQE_CSEG_SIZE -
963 					  MLX5_WQE_ESEG_SIZE -
964 					  MLX5_WQE_DSEG_SIZE);
965 		txq_ctrl->txq.inlen_empw = inlen_empw;
966 	}
967 	txq_ctrl->max_inline_data = RTE_MAX(inlen_send, inlen_empw);
968 	if (tso) {
969 		txq_ctrl->max_tso_header = MLX5_MAX_TSO_HEADER;
970 		txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->max_inline_data,
971 						    MLX5_MAX_TSO_HEADER);
972 		txq_ctrl->txq.tso_en = 1;
973 	}
974 	txq_ctrl->txq.tunnel_en = config->tunnel_en | config->swp;
975 	txq_ctrl->txq.swp_en = ((DEV_TX_OFFLOAD_IP_TNL_TSO |
976 				 DEV_TX_OFFLOAD_UDP_TNL_TSO |
977 				 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) &
978 				txq_ctrl->txq.offloads) && config->swp;
979 }
980 
981 /**
982  * Adjust Tx queue data inline parameters for large queue sizes.
983  * The data inline feature requires multiple WQEs to fit the packets,
984  * and if the large amount of Tx descriptors is requested by application
985  * the total WQE amount may exceed the hardware capabilities. If the
986  * default inline setting are used we can try to adjust these ones and
987  * meet the hardware requirements and not exceed the queue size.
988  *
989  * @param txq_ctrl
990  *   Pointer to Tx queue control structure.
991  *
992  * @return
993  *   Zero on success, otherwise the parameters can not be adjusted.
994  */
995 static int
996 txq_adjust_params(struct mlx5_txq_ctrl *txq_ctrl)
997 {
998 	struct mlx5_priv *priv = txq_ctrl->priv;
999 	struct mlx5_dev_config *config = &priv->config;
1000 	unsigned int max_inline;
1001 
1002 	max_inline = txq_calc_inline_max(txq_ctrl);
1003 	if (!txq_ctrl->txq.inlen_send) {
1004 		/*
1005 		 * Inline data feature is not engaged at all.
1006 		 * There is nothing to adjust.
1007 		 */
1008 		return 0;
1009 	}
1010 	if (txq_ctrl->max_inline_data <= max_inline) {
1011 		/*
1012 		 * The requested inline data length does not
1013 		 * exceed queue capabilities.
1014 		 */
1015 		return 0;
1016 	}
1017 	if (txq_ctrl->txq.inlen_mode > max_inline) {
1018 		DRV_LOG(ERR,
1019 			"minimal data inline requirements (%u) are not"
1020 			" satisfied (%u) on port %u, try the smaller"
1021 			" Tx queue size (%d)",
1022 			txq_ctrl->txq.inlen_mode, max_inline,
1023 			priv->dev_data->port_id,
1024 			priv->sh->device_attr.max_qp_wr);
1025 		goto error;
1026 	}
1027 	if (txq_ctrl->txq.inlen_send > max_inline &&
1028 	    config->txq_inline_max != MLX5_ARG_UNSET &&
1029 	    config->txq_inline_max > (int)max_inline) {
1030 		DRV_LOG(ERR,
1031 			"txq_inline_max requirements (%u) are not"
1032 			" satisfied (%u) on port %u, try the smaller"
1033 			" Tx queue size (%d)",
1034 			txq_ctrl->txq.inlen_send, max_inline,
1035 			priv->dev_data->port_id,
1036 			priv->sh->device_attr.max_qp_wr);
1037 		goto error;
1038 	}
1039 	if (txq_ctrl->txq.inlen_empw > max_inline &&
1040 	    config->txq_inline_mpw != MLX5_ARG_UNSET &&
1041 	    config->txq_inline_mpw > (int)max_inline) {
1042 		DRV_LOG(ERR,
1043 			"txq_inline_mpw requirements (%u) are not"
1044 			" satisfied (%u) on port %u, try the smaller"
1045 			" Tx queue size (%d)",
1046 			txq_ctrl->txq.inlen_empw, max_inline,
1047 			priv->dev_data->port_id,
1048 			priv->sh->device_attr.max_qp_wr);
1049 		goto error;
1050 	}
1051 	if (txq_ctrl->txq.tso_en && max_inline < MLX5_MAX_TSO_HEADER) {
1052 		DRV_LOG(ERR,
1053 			"tso header inline requirements (%u) are not"
1054 			" satisfied (%u) on port %u, try the smaller"
1055 			" Tx queue size (%d)",
1056 			MLX5_MAX_TSO_HEADER, max_inline,
1057 			priv->dev_data->port_id,
1058 			priv->sh->device_attr.max_qp_wr);
1059 		goto error;
1060 	}
1061 	if (txq_ctrl->txq.inlen_send > max_inline) {
1062 		DRV_LOG(WARNING,
1063 			"adjust txq_inline_max (%u->%u)"
1064 			" due to large Tx queue on port %u",
1065 			txq_ctrl->txq.inlen_send, max_inline,
1066 			priv->dev_data->port_id);
1067 		txq_ctrl->txq.inlen_send = max_inline;
1068 	}
1069 	if (txq_ctrl->txq.inlen_empw > max_inline) {
1070 		DRV_LOG(WARNING,
1071 			"adjust txq_inline_mpw (%u->%u)"
1072 			"due to large Tx queue on port %u",
1073 			txq_ctrl->txq.inlen_empw, max_inline,
1074 			priv->dev_data->port_id);
1075 		txq_ctrl->txq.inlen_empw = max_inline;
1076 	}
1077 	txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->txq.inlen_send,
1078 					    txq_ctrl->txq.inlen_empw);
1079 	MLX5_ASSERT(txq_ctrl->max_inline_data <= max_inline);
1080 	MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= max_inline);
1081 	MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_send);
1082 	MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_empw ||
1083 		    !txq_ctrl->txq.inlen_empw);
1084 	return 0;
1085 error:
1086 	rte_errno = ENOMEM;
1087 	return -ENOMEM;
1088 }
1089 
1090 /**
1091  * Create a DPDK Tx queue.
1092  *
1093  * @param dev
1094  *   Pointer to Ethernet device.
1095  * @param idx
1096  *   TX queue index.
1097  * @param desc
1098  *   Number of descriptors to configure in queue.
1099  * @param socket
1100  *   NUMA socket on which memory must be allocated.
1101  * @param[in] conf
1102  *  Thresholds parameters.
1103  *
1104  * @return
1105  *   A DPDK queue object on success, NULL otherwise and rte_errno is set.
1106  */
1107 struct mlx5_txq_ctrl *
1108 mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1109 	     unsigned int socket, const struct rte_eth_txconf *conf)
1110 {
1111 	struct mlx5_priv *priv = dev->data->dev_private;
1112 	struct mlx5_txq_ctrl *tmpl;
1113 
1114 	tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl) +
1115 			   desc * sizeof(struct rte_mbuf *), 0, socket);
1116 	if (!tmpl) {
1117 		rte_errno = ENOMEM;
1118 		return NULL;
1119 	}
1120 	if (mlx5_mr_btree_init(&tmpl->txq.mr_ctrl.cache_bh,
1121 			       MLX5_MR_BTREE_CACHE_N, socket)) {
1122 		/* rte_errno is already set. */
1123 		goto error;
1124 	}
1125 	/* Save pointer of global generation number to check memory event. */
1126 	tmpl->txq.mr_ctrl.dev_gen_ptr = &priv->sh->share_cache.dev_gen;
1127 	MLX5_ASSERT(desc > MLX5_TX_COMP_THRESH);
1128 	tmpl->txq.offloads = conf->offloads |
1129 			     dev->data->dev_conf.txmode.offloads;
1130 	tmpl->priv = priv;
1131 	tmpl->socket = socket;
1132 	tmpl->txq.elts_n = log2above(desc);
1133 	tmpl->txq.elts_s = desc;
1134 	tmpl->txq.elts_m = desc - 1;
1135 	tmpl->txq.port_id = dev->data->port_id;
1136 	tmpl->txq.idx = idx;
1137 	txq_set_params(tmpl);
1138 	if (txq_adjust_params(tmpl))
1139 		goto error;
1140 	if (txq_calc_wqebb_cnt(tmpl) >
1141 	    priv->sh->device_attr.max_qp_wr) {
1142 		DRV_LOG(ERR,
1143 			"port %u Tx WQEBB count (%d) exceeds the limit (%d),"
1144 			" try smaller queue size",
1145 			dev->data->port_id, txq_calc_wqebb_cnt(tmpl),
1146 			priv->sh->device_attr.max_qp_wr);
1147 		rte_errno = ENOMEM;
1148 		goto error;
1149 	}
1150 	__atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
1151 	tmpl->type = MLX5_TXQ_TYPE_STANDARD;
1152 	LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
1153 	return tmpl;
1154 error:
1155 	mlx5_mr_btree_free(&tmpl->txq.mr_ctrl.cache_bh);
1156 	mlx5_free(tmpl);
1157 	return NULL;
1158 }
1159 
1160 /**
1161  * Create a DPDK Tx hairpin queue.
1162  *
1163  * @param dev
1164  *   Pointer to Ethernet device.
1165  * @param idx
1166  *   TX queue index.
1167  * @param desc
1168  *   Number of descriptors to configure in queue.
1169  * @param hairpin_conf
1170  *  The hairpin configuration.
1171  *
1172  * @return
1173  *   A DPDK queue object on success, NULL otherwise and rte_errno is set.
1174  */
1175 struct mlx5_txq_ctrl *
1176 mlx5_txq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1177 		     const struct rte_eth_hairpin_conf *hairpin_conf)
1178 {
1179 	struct mlx5_priv *priv = dev->data->dev_private;
1180 	struct mlx5_txq_ctrl *tmpl;
1181 
1182 	tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
1183 			   SOCKET_ID_ANY);
1184 	if (!tmpl) {
1185 		rte_errno = ENOMEM;
1186 		return NULL;
1187 	}
1188 	tmpl->priv = priv;
1189 	tmpl->socket = SOCKET_ID_ANY;
1190 	tmpl->txq.elts_n = log2above(desc);
1191 	tmpl->txq.port_id = dev->data->port_id;
1192 	tmpl->txq.idx = idx;
1193 	tmpl->hairpin_conf = *hairpin_conf;
1194 	tmpl->type = MLX5_TXQ_TYPE_HAIRPIN;
1195 	__atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
1196 	LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
1197 	return tmpl;
1198 }
1199 
1200 /**
1201  * Get a Tx queue.
1202  *
1203  * @param dev
1204  *   Pointer to Ethernet device.
1205  * @param idx
1206  *   TX queue index.
1207  *
1208  * @return
1209  *   A pointer to the queue if it exists.
1210  */
1211 struct mlx5_txq_ctrl *
1212 mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx)
1213 {
1214 	struct mlx5_priv *priv = dev->data->dev_private;
1215 	struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
1216 	struct mlx5_txq_ctrl *ctrl = NULL;
1217 
1218 	if (txq_data) {
1219 		ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq);
1220 		__atomic_fetch_add(&ctrl->refcnt, 1, __ATOMIC_RELAXED);
1221 	}
1222 	return ctrl;
1223 }
1224 
1225 /**
1226  * Release a Tx queue.
1227  *
1228  * @param dev
1229  *   Pointer to Ethernet device.
1230  * @param idx
1231  *   TX queue index.
1232  *
1233  * @return
1234  *   1 while a reference on it exists, 0 when freed.
1235  */
1236 int
1237 mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
1238 {
1239 	struct mlx5_priv *priv = dev->data->dev_private;
1240 	struct mlx5_txq_ctrl *txq_ctrl;
1241 
1242 	if (priv->txqs == NULL || (*priv->txqs)[idx] == NULL)
1243 		return 0;
1244 	txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
1245 	if (__atomic_sub_fetch(&txq_ctrl->refcnt, 1, __ATOMIC_RELAXED) > 1)
1246 		return 1;
1247 	if (txq_ctrl->obj) {
1248 		priv->obj_ops.txq_obj_release(txq_ctrl->obj);
1249 		LIST_REMOVE(txq_ctrl->obj, next);
1250 		mlx5_free(txq_ctrl->obj);
1251 		txq_ctrl->obj = NULL;
1252 	}
1253 	if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD) {
1254 		if (txq_ctrl->txq.fcqs) {
1255 			mlx5_free(txq_ctrl->txq.fcqs);
1256 			txq_ctrl->txq.fcqs = NULL;
1257 		}
1258 		txq_free_elts(txq_ctrl);
1259 		dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
1260 	}
1261 	if (!__atomic_load_n(&txq_ctrl->refcnt, __ATOMIC_RELAXED)) {
1262 		if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD)
1263 			mlx5_mr_btree_free(&txq_ctrl->txq.mr_ctrl.cache_bh);
1264 		LIST_REMOVE(txq_ctrl, next);
1265 		mlx5_free(txq_ctrl);
1266 		(*priv->txqs)[idx] = NULL;
1267 	}
1268 	return 0;
1269 }
1270 
1271 /**
1272  * Verify if the queue can be released.
1273  *
1274  * @param dev
1275  *   Pointer to Ethernet device.
1276  * @param idx
1277  *   TX queue index.
1278  *
1279  * @return
1280  *   1 if the queue can be released.
1281  */
1282 int
1283 mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx)
1284 {
1285 	struct mlx5_priv *priv = dev->data->dev_private;
1286 	struct mlx5_txq_ctrl *txq;
1287 
1288 	if (!(*priv->txqs)[idx])
1289 		return -1;
1290 	txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
1291 	return (__atomic_load_n(&txq->refcnt, __ATOMIC_RELAXED) == 1);
1292 }
1293 
1294 /**
1295  * Verify the Tx Queue list is empty
1296  *
1297  * @param dev
1298  *   Pointer to Ethernet device.
1299  *
1300  * @return
1301  *   The number of object not released.
1302  */
1303 int
1304 mlx5_txq_verify(struct rte_eth_dev *dev)
1305 {
1306 	struct mlx5_priv *priv = dev->data->dev_private;
1307 	struct mlx5_txq_ctrl *txq_ctrl;
1308 	int ret = 0;
1309 
1310 	LIST_FOREACH(txq_ctrl, &priv->txqsctrl, next) {
1311 		DRV_LOG(DEBUG, "port %u Tx queue %u still referenced",
1312 			dev->data->port_id, txq_ctrl->txq.idx);
1313 		++ret;
1314 	}
1315 	return ret;
1316 }
1317 
1318 /**
1319  * Set the Tx queue dynamic timestamp (mask and offset)
1320  *
1321  * @param[in] dev
1322  *   Pointer to the Ethernet device structure.
1323  */
1324 void
1325 mlx5_txq_dynf_timestamp_set(struct rte_eth_dev *dev)
1326 {
1327 	struct mlx5_priv *priv = dev->data->dev_private;
1328 	struct mlx5_dev_ctx_shared *sh = priv->sh;
1329 	struct mlx5_txq_data *data;
1330 	int off, nbit;
1331 	unsigned int i;
1332 	uint64_t mask = 0;
1333 
1334 	nbit = rte_mbuf_dynflag_lookup
1335 				(RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL);
1336 	off = rte_mbuf_dynfield_lookup
1337 				(RTE_MBUF_DYNFIELD_TIMESTAMP_NAME, NULL);
1338 	if (nbit >= 0 && off >= 0 && sh->txpp.refcnt)
1339 		mask = 1ULL << nbit;
1340 	for (i = 0; i != priv->txqs_n; ++i) {
1341 		data = (*priv->txqs)[i];
1342 		if (!data)
1343 			continue;
1344 		data->sh = sh;
1345 		data->ts_mask = mask;
1346 		data->ts_offset = off;
1347 	}
1348 }
1349