xref: /dpdk/drivers/net/mlx5/mlx5_txq.c (revision 68a03efeed657e6e05f281479b33b51102797e15)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2015 6WIND S.A.
3  * Copyright 2015 Mellanox Technologies, Ltd
4  */
5 
6 #include <stddef.h>
7 #include <errno.h>
8 #include <string.h>
9 #include <stdint.h>
10 #include <unistd.h>
11 #include <inttypes.h>
12 
13 #include <rte_mbuf.h>
14 #include <rte_malloc.h>
15 #include <ethdev_driver.h>
16 #include <rte_common.h>
17 #include <rte_eal_paging.h>
18 
19 #include <mlx5_common.h>
20 #include <mlx5_common_mr.h>
21 #include <mlx5_malloc.h>
22 
23 #include "mlx5_defs.h"
24 #include "mlx5_utils.h"
25 #include "mlx5.h"
26 #include "mlx5_rxtx.h"
27 #include "mlx5_autoconf.h"
28 
29 /**
30  * Allocate TX queue elements.
31  *
32  * @param txq_ctrl
33  *   Pointer to TX queue structure.
34  */
35 void
36 txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl)
37 {
38 	const unsigned int elts_n = 1 << txq_ctrl->txq.elts_n;
39 	unsigned int i;
40 
41 	for (i = 0; (i != elts_n); ++i)
42 		txq_ctrl->txq.elts[i] = NULL;
43 	DRV_LOG(DEBUG, "port %u Tx queue %u allocated and configured %u WRs",
44 		PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx, elts_n);
45 	txq_ctrl->txq.elts_head = 0;
46 	txq_ctrl->txq.elts_tail = 0;
47 	txq_ctrl->txq.elts_comp = 0;
48 }
49 
50 /**
51  * Free TX queue elements.
52  *
53  * @param txq_ctrl
54  *   Pointer to TX queue structure.
55  */
56 void
57 txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
58 {
59 	const uint16_t elts_n = 1 << txq_ctrl->txq.elts_n;
60 	const uint16_t elts_m = elts_n - 1;
61 	uint16_t elts_head = txq_ctrl->txq.elts_head;
62 	uint16_t elts_tail = txq_ctrl->txq.elts_tail;
63 	struct rte_mbuf *(*elts)[elts_n] = &txq_ctrl->txq.elts;
64 
65 	DRV_LOG(DEBUG, "port %u Tx queue %u freeing WRs",
66 		PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx);
67 	txq_ctrl->txq.elts_head = 0;
68 	txq_ctrl->txq.elts_tail = 0;
69 	txq_ctrl->txq.elts_comp = 0;
70 
71 	while (elts_tail != elts_head) {
72 		struct rte_mbuf *elt = (*elts)[elts_tail & elts_m];
73 
74 		MLX5_ASSERT(elt != NULL);
75 		rte_pktmbuf_free_seg(elt);
76 #ifdef RTE_LIBRTE_MLX5_DEBUG
77 		/* Poisoning. */
78 		memset(&(*elts)[elts_tail & elts_m],
79 		       0x77,
80 		       sizeof((*elts)[elts_tail & elts_m]));
81 #endif
82 		++elts_tail;
83 	}
84 }
85 
86 /**
87  * Returns the per-port supported offloads.
88  *
89  * @param dev
90  *   Pointer to Ethernet device.
91  *
92  * @return
93  *   Supported Tx offloads.
94  */
95 uint64_t
96 mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
97 {
98 	struct mlx5_priv *priv = dev->data->dev_private;
99 	uint64_t offloads = (DEV_TX_OFFLOAD_MULTI_SEGS |
100 			     DEV_TX_OFFLOAD_VLAN_INSERT);
101 	struct mlx5_dev_config *config = &priv->config;
102 
103 	if (config->hw_csum)
104 		offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
105 			     DEV_TX_OFFLOAD_UDP_CKSUM |
106 			     DEV_TX_OFFLOAD_TCP_CKSUM);
107 	if (config->tso)
108 		offloads |= DEV_TX_OFFLOAD_TCP_TSO;
109 	if (config->tx_pp)
110 		offloads |= DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP;
111 	if (config->swp) {
112 		if (config->hw_csum)
113 			offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
114 		if (config->tso)
115 			offloads |= (DEV_TX_OFFLOAD_IP_TNL_TSO |
116 				     DEV_TX_OFFLOAD_UDP_TNL_TSO);
117 	}
118 	if (config->tunnel_en) {
119 		if (config->hw_csum)
120 			offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
121 		if (config->tso)
122 			offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
123 				     DEV_TX_OFFLOAD_GRE_TNL_TSO |
124 				     DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
125 	}
126 	if (!config->mprq.enabled)
127 		offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
128 	return offloads;
129 }
130 
131 /* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */
132 static void
133 txq_sync_cq(struct mlx5_txq_data *txq)
134 {
135 	volatile struct mlx5_cqe *cqe;
136 	int ret, i;
137 
138 	i = txq->cqe_s;
139 	do {
140 		cqe = &txq->cqes[txq->cq_ci & txq->cqe_m];
141 		ret = check_cqe(cqe, txq->cqe_s, txq->cq_ci);
142 		if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
143 			if (likely(ret != MLX5_CQE_STATUS_ERR)) {
144 				/* No new CQEs in completion queue. */
145 				MLX5_ASSERT(ret == MLX5_CQE_STATUS_HW_OWN);
146 				break;
147 			}
148 		}
149 		++txq->cq_ci;
150 	} while (--i);
151 	/* Move all CQEs to HW ownership. */
152 	for (i = 0; i < txq->cqe_s; i++) {
153 		cqe = &txq->cqes[i];
154 		cqe->op_own = MLX5_CQE_INVALIDATE;
155 	}
156 	/* Resync CQE and WQE (WQ in reset state). */
157 	rte_io_wmb();
158 	*txq->cq_db = rte_cpu_to_be_32(txq->cq_ci);
159 	txq->cq_pi = txq->cq_ci;
160 	rte_io_wmb();
161 }
162 
163 /**
164  * Tx queue stop. Device queue goes to the idle state,
165  * all involved mbufs are freed from elts/WQ.
166  *
167  * @param dev
168  *   Pointer to Ethernet device structure.
169  * @param idx
170  *   Tx queue index.
171  *
172  * @return
173  *   0 on success, a negative errno value otherwise and rte_errno is set.
174  */
175 int
176 mlx5_tx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t idx)
177 {
178 	struct mlx5_priv *priv = dev->data->dev_private;
179 	struct mlx5_txq_data *txq = (*priv->txqs)[idx];
180 	struct mlx5_txq_ctrl *txq_ctrl =
181 			container_of(txq, struct mlx5_txq_ctrl, txq);
182 	int ret;
183 
184 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
185 	/* Move QP to RESET state. */
186 	ret = priv->obj_ops.txq_obj_modify(txq_ctrl->obj, MLX5_TXQ_MOD_RDY2RST,
187 					   (uint8_t)priv->dev_port);
188 	if (ret)
189 		return ret;
190 	/* Handle all send completions. */
191 	txq_sync_cq(txq);
192 	/* Free elts stored in the SQ. */
193 	txq_free_elts(txq_ctrl);
194 	/* Prevent writing new pkts to SQ by setting no free WQE.*/
195 	txq->wqe_ci = txq->wqe_s;
196 	txq->wqe_pi = 0;
197 	txq->elts_comp = 0;
198 	/* Set the actual queue state. */
199 	dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
200 	return 0;
201 }
202 
203 /**
204  * Tx queue stop. Device queue goes to the idle state,
205  * all involved mbufs are freed from elts/WQ.
206  *
207  * @param dev
208  *   Pointer to Ethernet device structure.
209  * @param idx
210  *   Tx queue index.
211  *
212  * @return
213  *   0 on success, a negative errno value otherwise and rte_errno is set.
214  */
215 int
216 mlx5_tx_queue_stop(struct rte_eth_dev *dev, uint16_t idx)
217 {
218 	int ret;
219 
220 	if (rte_eth_dev_is_tx_hairpin_queue(dev, idx)) {
221 		DRV_LOG(ERR, "Hairpin queue can't be stopped");
222 		rte_errno = EINVAL;
223 		return -EINVAL;
224 	}
225 	if (dev->data->tx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STOPPED)
226 		return 0;
227 	if (rte_eal_process_type() ==  RTE_PROC_SECONDARY) {
228 		ret = mlx5_mp_os_req_queue_control(dev, idx,
229 						   MLX5_MP_REQ_QUEUE_TX_STOP);
230 	} else {
231 		ret = mlx5_tx_queue_stop_primary(dev, idx);
232 	}
233 	return ret;
234 }
235 
236 /**
237  * Rx queue start. Device queue goes to the ready state,
238  * all required mbufs are allocated and WQ is replenished.
239  *
240  * @param dev
241  *   Pointer to Ethernet device structure.
242  * @param idx
243  *   RX queue index.
244  *
245  * @return
246  *   0 on success, a negative errno value otherwise and rte_errno is set.
247  */
248 int
249 mlx5_tx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx)
250 {
251 	struct mlx5_priv *priv = dev->data->dev_private;
252 	struct mlx5_txq_data *txq = (*priv->txqs)[idx];
253 	struct mlx5_txq_ctrl *txq_ctrl =
254 			container_of(txq, struct mlx5_txq_ctrl, txq);
255 	int ret;
256 
257 	MLX5_ASSERT(rte_eal_process_type() ==  RTE_PROC_PRIMARY);
258 	ret = priv->obj_ops.txq_obj_modify(txq_ctrl->obj,
259 					   MLX5_TXQ_MOD_RST2RDY,
260 					   (uint8_t)priv->dev_port);
261 	if (ret)
262 		return ret;
263 	txq_ctrl->txq.wqe_ci = 0;
264 	txq_ctrl->txq.wqe_pi = 0;
265 	txq_ctrl->txq.elts_comp = 0;
266 	/* Set the actual queue state. */
267 	dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
268 	return 0;
269 }
270 
271 /**
272  * Rx queue start. Device queue goes to the ready state,
273  * all required mbufs are allocated and WQ is replenished.
274  *
275  * @param dev
276  *   Pointer to Ethernet device structure.
277  * @param idx
278  *   RX queue index.
279  *
280  * @return
281  *   0 on success, a negative errno value otherwise and rte_errno is set.
282  */
283 int
284 mlx5_tx_queue_start(struct rte_eth_dev *dev, uint16_t idx)
285 {
286 	int ret;
287 
288 	if (rte_eth_dev_is_tx_hairpin_queue(dev, idx)) {
289 		DRV_LOG(ERR, "Hairpin queue can't be started");
290 		rte_errno = EINVAL;
291 		return -EINVAL;
292 	}
293 	if (dev->data->tx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STARTED)
294 		return 0;
295 	if (rte_eal_process_type() ==  RTE_PROC_SECONDARY) {
296 		ret = mlx5_mp_os_req_queue_control(dev, idx,
297 						   MLX5_MP_REQ_QUEUE_TX_START);
298 	} else {
299 		ret = mlx5_tx_queue_start_primary(dev, idx);
300 	}
301 	return ret;
302 }
303 
304 /**
305  * Tx queue presetup checks.
306  *
307  * @param dev
308  *   Pointer to Ethernet device structure.
309  * @param idx
310  *   Tx queue index.
311  * @param desc
312  *   Number of descriptors to configure in queue.
313  *
314  * @return
315  *   0 on success, a negative errno value otherwise and rte_errno is set.
316  */
317 static int
318 mlx5_tx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc)
319 {
320 	struct mlx5_priv *priv = dev->data->dev_private;
321 
322 	if (*desc <= MLX5_TX_COMP_THRESH) {
323 		DRV_LOG(WARNING,
324 			"port %u number of descriptors requested for Tx queue"
325 			" %u must be higher than MLX5_TX_COMP_THRESH, using %u"
326 			" instead of %u", dev->data->port_id, idx,
327 			MLX5_TX_COMP_THRESH + 1, *desc);
328 		*desc = MLX5_TX_COMP_THRESH + 1;
329 	}
330 	if (!rte_is_power_of_2(*desc)) {
331 		*desc = 1 << log2above(*desc);
332 		DRV_LOG(WARNING,
333 			"port %u increased number of descriptors in Tx queue"
334 			" %u to the next power of two (%d)",
335 			dev->data->port_id, idx, *desc);
336 	}
337 	DRV_LOG(DEBUG, "port %u configuring queue %u for %u descriptors",
338 		dev->data->port_id, idx, *desc);
339 	if (idx >= priv->txqs_n) {
340 		DRV_LOG(ERR, "port %u Tx queue index out of range (%u >= %u)",
341 			dev->data->port_id, idx, priv->txqs_n);
342 		rte_errno = EOVERFLOW;
343 		return -rte_errno;
344 	}
345 	if (!mlx5_txq_releasable(dev, idx)) {
346 		rte_errno = EBUSY;
347 		DRV_LOG(ERR, "port %u unable to release queue index %u",
348 			dev->data->port_id, idx);
349 		return -rte_errno;
350 	}
351 	mlx5_txq_release(dev, idx);
352 	return 0;
353 }
354 
355 /**
356  * DPDK callback to configure a TX queue.
357  *
358  * @param dev
359  *   Pointer to Ethernet device structure.
360  * @param idx
361  *   TX queue index.
362  * @param desc
363  *   Number of descriptors to configure in queue.
364  * @param socket
365  *   NUMA socket on which memory must be allocated.
366  * @param[in] conf
367  *   Thresholds parameters.
368  *
369  * @return
370  *   0 on success, a negative errno value otherwise and rte_errno is set.
371  */
372 int
373 mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
374 		    unsigned int socket, const struct rte_eth_txconf *conf)
375 {
376 	struct mlx5_priv *priv = dev->data->dev_private;
377 	struct mlx5_txq_data *txq = (*priv->txqs)[idx];
378 	struct mlx5_txq_ctrl *txq_ctrl =
379 		container_of(txq, struct mlx5_txq_ctrl, txq);
380 	int res;
381 
382 	res = mlx5_tx_queue_pre_setup(dev, idx, &desc);
383 	if (res)
384 		return res;
385 	txq_ctrl = mlx5_txq_new(dev, idx, desc, socket, conf);
386 	if (!txq_ctrl) {
387 		DRV_LOG(ERR, "port %u unable to allocate queue index %u",
388 			dev->data->port_id, idx);
389 		return -rte_errno;
390 	}
391 	DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
392 		dev->data->port_id, idx);
393 	(*priv->txqs)[idx] = &txq_ctrl->txq;
394 	return 0;
395 }
396 
397 /**
398  * DPDK callback to configure a TX hairpin queue.
399  *
400  * @param dev
401  *   Pointer to Ethernet device structure.
402  * @param idx
403  *   TX queue index.
404  * @param desc
405  *   Number of descriptors to configure in queue.
406  * @param[in] hairpin_conf
407  *   The hairpin binding configuration.
408  *
409  * @return
410  *   0 on success, a negative errno value otherwise and rte_errno is set.
411  */
412 int
413 mlx5_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
414 			    uint16_t desc,
415 			    const struct rte_eth_hairpin_conf *hairpin_conf)
416 {
417 	struct mlx5_priv *priv = dev->data->dev_private;
418 	struct mlx5_txq_data *txq = (*priv->txqs)[idx];
419 	struct mlx5_txq_ctrl *txq_ctrl =
420 		container_of(txq, struct mlx5_txq_ctrl, txq);
421 	int res;
422 
423 	res = mlx5_tx_queue_pre_setup(dev, idx, &desc);
424 	if (res)
425 		return res;
426 	if (hairpin_conf->peer_count != 1) {
427 		rte_errno = EINVAL;
428 		DRV_LOG(ERR, "port %u unable to setup Tx hairpin queue index %u"
429 			" peer count is %u", dev->data->port_id,
430 			idx, hairpin_conf->peer_count);
431 		return -rte_errno;
432 	}
433 	if (hairpin_conf->peers[0].port == dev->data->port_id) {
434 		if (hairpin_conf->peers[0].queue >= priv->rxqs_n) {
435 			rte_errno = EINVAL;
436 			DRV_LOG(ERR, "port %u unable to setup Tx hairpin queue"
437 				" index %u, Rx %u is larger than %u",
438 				dev->data->port_id, idx,
439 				hairpin_conf->peers[0].queue, priv->txqs_n);
440 			return -rte_errno;
441 		}
442 	} else {
443 		if (hairpin_conf->manual_bind == 0 ||
444 		    hairpin_conf->tx_explicit == 0) {
445 			rte_errno = EINVAL;
446 			DRV_LOG(ERR, "port %u unable to setup Tx hairpin queue"
447 				" index %u peer port %u with attributes %u %u",
448 				dev->data->port_id, idx,
449 				hairpin_conf->peers[0].port,
450 				hairpin_conf->manual_bind,
451 				hairpin_conf->tx_explicit);
452 			return -rte_errno;
453 		}
454 	}
455 	txq_ctrl = mlx5_txq_hairpin_new(dev, idx, desc,	hairpin_conf);
456 	if (!txq_ctrl) {
457 		DRV_LOG(ERR, "port %u unable to allocate queue index %u",
458 			dev->data->port_id, idx);
459 		return -rte_errno;
460 	}
461 	DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
462 		dev->data->port_id, idx);
463 	(*priv->txqs)[idx] = &txq_ctrl->txq;
464 	dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN;
465 	return 0;
466 }
467 
468 /**
469  * DPDK callback to release a TX queue.
470  *
471  * @param dpdk_txq
472  *   Generic TX queue pointer.
473  */
474 void
475 mlx5_tx_queue_release(void *dpdk_txq)
476 {
477 	struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
478 	struct mlx5_txq_ctrl *txq_ctrl;
479 	struct mlx5_priv *priv;
480 	unsigned int i;
481 
482 	if (txq == NULL)
483 		return;
484 	txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
485 	priv = txq_ctrl->priv;
486 	for (i = 0; (i != priv->txqs_n); ++i)
487 		if ((*priv->txqs)[i] == txq) {
488 			DRV_LOG(DEBUG, "port %u removing Tx queue %u from list",
489 				PORT_ID(priv), txq->idx);
490 			mlx5_txq_release(ETH_DEV(priv), i);
491 			break;
492 		}
493 }
494 
495 /**
496  * Configure the doorbell register non-cached attribute.
497  *
498  * @param txq_ctrl
499  *   Pointer to Tx queue control structure.
500  * @param page_size
501  *   Systme page size
502  */
503 static void
504 txq_uar_ncattr_init(struct mlx5_txq_ctrl *txq_ctrl, size_t page_size)
505 {
506 	struct mlx5_priv *priv = txq_ctrl->priv;
507 	off_t cmd;
508 
509 	txq_ctrl->txq.db_heu = priv->config.dbnc == MLX5_TXDB_HEURISTIC;
510 	txq_ctrl->txq.db_nc = 0;
511 	/* Check the doorbell register mapping type. */
512 	cmd = txq_ctrl->uar_mmap_offset / page_size;
513 	cmd >>= MLX5_UAR_MMAP_CMD_SHIFT;
514 	cmd &= MLX5_UAR_MMAP_CMD_MASK;
515 	if (cmd == MLX5_MMAP_GET_NC_PAGES_CMD)
516 		txq_ctrl->txq.db_nc = 1;
517 }
518 
519 /**
520  * Initialize Tx UAR registers for primary process.
521  *
522  * @param txq_ctrl
523  *   Pointer to Tx queue control structure.
524  */
525 void
526 txq_uar_init(struct mlx5_txq_ctrl *txq_ctrl)
527 {
528 	struct mlx5_priv *priv = txq_ctrl->priv;
529 	struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv));
530 #ifndef RTE_ARCH_64
531 	unsigned int lock_idx;
532 #endif
533 	const size_t page_size = rte_mem_page_size();
534 	if (page_size == (size_t)-1) {
535 		DRV_LOG(ERR, "Failed to get mem page size");
536 		rte_errno = ENOMEM;
537 	}
538 
539 	if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
540 		return;
541 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
542 	MLX5_ASSERT(ppriv);
543 	ppriv->uar_table[txq_ctrl->txq.idx] = txq_ctrl->bf_reg;
544 	txq_uar_ncattr_init(txq_ctrl, page_size);
545 #ifndef RTE_ARCH_64
546 	/* Assign an UAR lock according to UAR page number */
547 	lock_idx = (txq_ctrl->uar_mmap_offset / page_size) &
548 		   MLX5_UAR_PAGE_NUM_MASK;
549 	txq_ctrl->txq.uar_lock = &priv->sh->uar_lock[lock_idx];
550 #endif
551 }
552 
553 /**
554  * Remap UAR register of a Tx queue for secondary process.
555  *
556  * Remapped address is stored at the table in the process private structure of
557  * the device, indexed by queue index.
558  *
559  * @param txq_ctrl
560  *   Pointer to Tx queue control structure.
561  * @param fd
562  *   Verbs file descriptor to map UAR pages.
563  *
564  * @return
565  *   0 on success, a negative errno value otherwise and rte_errno is set.
566  */
567 static int
568 txq_uar_init_secondary(struct mlx5_txq_ctrl *txq_ctrl, int fd)
569 {
570 	struct mlx5_priv *priv = txq_ctrl->priv;
571 	struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv));
572 	struct mlx5_txq_data *txq = &txq_ctrl->txq;
573 	void *addr;
574 	uintptr_t uar_va;
575 	uintptr_t offset;
576 	const size_t page_size = rte_mem_page_size();
577 	if (page_size == (size_t)-1) {
578 		DRV_LOG(ERR, "Failed to get mem page size");
579 		rte_errno = ENOMEM;
580 		return -rte_errno;
581 	}
582 
583 	if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
584 		return 0;
585 	MLX5_ASSERT(ppriv);
586 	/*
587 	 * As rdma-core, UARs are mapped in size of OS page
588 	 * size. Ref to libmlx5 function: mlx5_init_context()
589 	 */
590 	uar_va = (uintptr_t)txq_ctrl->bf_reg;
591 	offset = uar_va & (page_size - 1); /* Offset in page. */
592 	addr = rte_mem_map(NULL, page_size, RTE_PROT_WRITE, RTE_MAP_SHARED,
593 			    fd, txq_ctrl->uar_mmap_offset);
594 	if (!addr) {
595 		DRV_LOG(ERR,
596 			"port %u mmap failed for BF reg of txq %u",
597 			txq->port_id, txq->idx);
598 		rte_errno = ENXIO;
599 		return -rte_errno;
600 	}
601 	addr = RTE_PTR_ADD(addr, offset);
602 	ppriv->uar_table[txq->idx] = addr;
603 	txq_uar_ncattr_init(txq_ctrl, page_size);
604 	return 0;
605 }
606 
607 /**
608  * Unmap UAR register of a Tx queue for secondary process.
609  *
610  * @param txq_ctrl
611  *   Pointer to Tx queue control structure.
612  */
613 static void
614 txq_uar_uninit_secondary(struct mlx5_txq_ctrl *txq_ctrl)
615 {
616 	struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(txq_ctrl->priv));
617 	void *addr;
618 	const size_t page_size = rte_mem_page_size();
619 	if (page_size == (size_t)-1) {
620 		DRV_LOG(ERR, "Failed to get mem page size");
621 		rte_errno = ENOMEM;
622 	}
623 
624 	if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
625 		return;
626 	addr = ppriv->uar_table[txq_ctrl->txq.idx];
627 	rte_mem_unmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size);
628 }
629 
630 /**
631  * Deinitialize Tx UAR registers for secondary process.
632  *
633  * @param dev
634  *   Pointer to Ethernet device.
635  */
636 void
637 mlx5_tx_uar_uninit_secondary(struct rte_eth_dev *dev)
638 {
639 	struct mlx5_proc_priv *ppriv = (struct mlx5_proc_priv *)
640 					dev->process_private;
641 	const size_t page_size = rte_mem_page_size();
642 	void *addr;
643 	unsigned int i;
644 
645 	if (page_size == (size_t)-1) {
646 		DRV_LOG(ERR, "Failed to get mem page size");
647 		return;
648 	}
649 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
650 	for (i = 0; i != ppriv->uar_table_sz; ++i) {
651 		if (!ppriv->uar_table[i])
652 			continue;
653 		addr = ppriv->uar_table[i];
654 		rte_mem_unmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size);
655 
656 	}
657 }
658 
659 /**
660  * Initialize Tx UAR registers for secondary process.
661  *
662  * @param dev
663  *   Pointer to Ethernet device.
664  * @param fd
665  *   Verbs file descriptor to map UAR pages.
666  *
667  * @return
668  *   0 on success, a negative errno value otherwise and rte_errno is set.
669  */
670 int
671 mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd)
672 {
673 	struct mlx5_priv *priv = dev->data->dev_private;
674 	struct mlx5_txq_data *txq;
675 	struct mlx5_txq_ctrl *txq_ctrl;
676 	unsigned int i;
677 	int ret;
678 
679 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
680 	for (i = 0; i != priv->txqs_n; ++i) {
681 		if (!(*priv->txqs)[i])
682 			continue;
683 		txq = (*priv->txqs)[i];
684 		txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
685 		if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
686 			continue;
687 		MLX5_ASSERT(txq->idx == (uint16_t)i);
688 		ret = txq_uar_init_secondary(txq_ctrl, fd);
689 		if (ret)
690 			goto error;
691 	}
692 	return 0;
693 error:
694 	/* Rollback. */
695 	do {
696 		if (!(*priv->txqs)[i])
697 			continue;
698 		txq = (*priv->txqs)[i];
699 		txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
700 		txq_uar_uninit_secondary(txq_ctrl);
701 	} while (i--);
702 	return -rte_errno;
703 }
704 
705 /**
706  * Verify the Verbs Tx queue list is empty
707  *
708  * @param dev
709  *   Pointer to Ethernet device.
710  *
711  * @return
712  *   The number of object not released.
713  */
714 int
715 mlx5_txq_obj_verify(struct rte_eth_dev *dev)
716 {
717 	struct mlx5_priv *priv = dev->data->dev_private;
718 	int ret = 0;
719 	struct mlx5_txq_obj *txq_obj;
720 
721 	LIST_FOREACH(txq_obj, &priv->txqsobj, next) {
722 		DRV_LOG(DEBUG, "port %u Verbs Tx queue %u still referenced",
723 			dev->data->port_id, txq_obj->txq_ctrl->txq.idx);
724 		++ret;
725 	}
726 	return ret;
727 }
728 
729 /**
730  * Calculate the total number of WQEBB for Tx queue.
731  *
732  * Simplified version of calc_sq_size() in rdma-core.
733  *
734  * @param txq_ctrl
735  *   Pointer to Tx queue control structure.
736  *
737  * @return
738  *   The number of WQEBB.
739  */
740 static int
741 txq_calc_wqebb_cnt(struct mlx5_txq_ctrl *txq_ctrl)
742 {
743 	unsigned int wqe_size;
744 	const unsigned int desc = 1 << txq_ctrl->txq.elts_n;
745 
746 	wqe_size = MLX5_WQE_CSEG_SIZE +
747 		   MLX5_WQE_ESEG_SIZE +
748 		   MLX5_WSEG_SIZE -
749 		   MLX5_ESEG_MIN_INLINE_SIZE +
750 		   txq_ctrl->max_inline_data;
751 	return rte_align32pow2(wqe_size * desc) / MLX5_WQE_SIZE;
752 }
753 
754 /**
755  * Calculate the maximal inline data size for Tx queue.
756  *
757  * @param txq_ctrl
758  *   Pointer to Tx queue control structure.
759  *
760  * @return
761  *   The maximal inline data size.
762  */
763 static unsigned int
764 txq_calc_inline_max(struct mlx5_txq_ctrl *txq_ctrl)
765 {
766 	const unsigned int desc = 1 << txq_ctrl->txq.elts_n;
767 	struct mlx5_priv *priv = txq_ctrl->priv;
768 	unsigned int wqe_size;
769 
770 	wqe_size = priv->sh->device_attr.max_qp_wr / desc;
771 	if (!wqe_size)
772 		return 0;
773 	/*
774 	 * This calculation is derived from tthe source of
775 	 * mlx5_calc_send_wqe() in rdma_core library.
776 	 */
777 	wqe_size = wqe_size * MLX5_WQE_SIZE -
778 		   MLX5_WQE_CSEG_SIZE -
779 		   MLX5_WQE_ESEG_SIZE -
780 		   MLX5_WSEG_SIZE -
781 		   MLX5_WSEG_SIZE +
782 		   MLX5_DSEG_MIN_INLINE_SIZE;
783 	return wqe_size;
784 }
785 
786 /**
787  * Set Tx queue parameters from device configuration.
788  *
789  * @param txq_ctrl
790  *   Pointer to Tx queue control structure.
791  */
792 static void
793 txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
794 {
795 	struct mlx5_priv *priv = txq_ctrl->priv;
796 	struct mlx5_dev_config *config = &priv->config;
797 	unsigned int inlen_send; /* Inline data for ordinary SEND.*/
798 	unsigned int inlen_empw; /* Inline data for enhanced MPW. */
799 	unsigned int inlen_mode; /* Minimal required Inline data. */
800 	unsigned int txqs_inline; /* Min Tx queues to enable inline. */
801 	uint64_t dev_txoff = priv->dev_data->dev_conf.txmode.offloads;
802 	bool tso = txq_ctrl->txq.offloads & (DEV_TX_OFFLOAD_TCP_TSO |
803 					    DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
804 					    DEV_TX_OFFLOAD_GRE_TNL_TSO |
805 					    DEV_TX_OFFLOAD_IP_TNL_TSO |
806 					    DEV_TX_OFFLOAD_UDP_TNL_TSO);
807 	bool vlan_inline;
808 	unsigned int temp;
809 
810 	txq_ctrl->txq.fast_free =
811 		!!((txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
812 		   !(txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_MULTI_SEGS) &&
813 		   !config->mprq.enabled);
814 	if (config->txqs_inline == MLX5_ARG_UNSET)
815 		txqs_inline =
816 #if defined(RTE_ARCH_ARM64)
817 		(priv->pci_dev->id.device_id ==
818 			PCI_DEVICE_ID_MELLANOX_CONNECTX5BF) ?
819 			MLX5_INLINE_MAX_TXQS_BLUEFIELD :
820 #endif
821 			MLX5_INLINE_MAX_TXQS;
822 	else
823 		txqs_inline = (unsigned int)config->txqs_inline;
824 	inlen_send = (config->txq_inline_max == MLX5_ARG_UNSET) ?
825 		     MLX5_SEND_DEF_INLINE_LEN :
826 		     (unsigned int)config->txq_inline_max;
827 	inlen_empw = (config->txq_inline_mpw == MLX5_ARG_UNSET) ?
828 		     MLX5_EMPW_DEF_INLINE_LEN :
829 		     (unsigned int)config->txq_inline_mpw;
830 	inlen_mode = (config->txq_inline_min == MLX5_ARG_UNSET) ?
831 		     0 : (unsigned int)config->txq_inline_min;
832 	if (config->mps != MLX5_MPW_ENHANCED && config->mps != MLX5_MPW)
833 		inlen_empw = 0;
834 	/*
835 	 * If there is requested minimal amount of data to inline
836 	 * we MUST enable inlining. This is a case for ConnectX-4
837 	 * which usually requires L2 inlined for correct operating
838 	 * and ConnectX-4 Lx which requires L2-L4 inlined to
839 	 * support E-Switch Flows.
840 	 */
841 	if (inlen_mode) {
842 		if (inlen_mode <= MLX5_ESEG_MIN_INLINE_SIZE) {
843 			/*
844 			 * Optimize minimal inlining for single
845 			 * segment packets to fill one WQEBB
846 			 * without gaps.
847 			 */
848 			temp = MLX5_ESEG_MIN_INLINE_SIZE;
849 		} else {
850 			temp = inlen_mode - MLX5_ESEG_MIN_INLINE_SIZE;
851 			temp = RTE_ALIGN(temp, MLX5_WSEG_SIZE) +
852 			       MLX5_ESEG_MIN_INLINE_SIZE;
853 			temp = RTE_MIN(temp, MLX5_SEND_MAX_INLINE_LEN);
854 		}
855 		if (temp != inlen_mode) {
856 			DRV_LOG(INFO,
857 				"port %u minimal required inline setting"
858 				" aligned from %u to %u",
859 				PORT_ID(priv), inlen_mode, temp);
860 			inlen_mode = temp;
861 		}
862 	}
863 	/*
864 	 * If port is configured to support VLAN insertion and device
865 	 * does not support this feature by HW (for NICs before ConnectX-5
866 	 * or in case of wqe_vlan_insert flag is not set) we must enable
867 	 * data inline on all queues because it is supported by single
868 	 * tx_burst routine.
869 	 */
870 	txq_ctrl->txq.vlan_en = config->hw_vlan_insert;
871 	vlan_inline = (dev_txoff & DEV_TX_OFFLOAD_VLAN_INSERT) &&
872 		      !config->hw_vlan_insert;
873 	/*
874 	 * If there are few Tx queues it is prioritized
875 	 * to save CPU cycles and disable data inlining at all.
876 	 */
877 	if (inlen_send && priv->txqs_n >= txqs_inline) {
878 		/*
879 		 * The data sent with ordinal MLX5_OPCODE_SEND
880 		 * may be inlined in Ethernet Segment, align the
881 		 * length accordingly to fit entire WQEBBs.
882 		 */
883 		temp = RTE_MAX(inlen_send,
884 			       MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE);
885 		temp -= MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE;
886 		temp = RTE_ALIGN(temp, MLX5_WQE_SIZE);
887 		temp += MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE;
888 		temp = RTE_MIN(temp, MLX5_WQE_SIZE_MAX +
889 				     MLX5_ESEG_MIN_INLINE_SIZE -
890 				     MLX5_WQE_CSEG_SIZE -
891 				     MLX5_WQE_ESEG_SIZE -
892 				     MLX5_WQE_DSEG_SIZE * 2);
893 		temp = RTE_MIN(temp, MLX5_SEND_MAX_INLINE_LEN);
894 		temp = RTE_MAX(temp, inlen_mode);
895 		if (temp != inlen_send) {
896 			DRV_LOG(INFO,
897 				"port %u ordinary send inline setting"
898 				" aligned from %u to %u",
899 				PORT_ID(priv), inlen_send, temp);
900 			inlen_send = temp;
901 		}
902 		/*
903 		 * Not aligned to cache lines, but to WQEs.
904 		 * First bytes of data (initial alignment)
905 		 * is going to be copied explicitly at the
906 		 * beginning of inlining buffer in Ethernet
907 		 * Segment.
908 		 */
909 		MLX5_ASSERT(inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
910 		MLX5_ASSERT(inlen_send <= MLX5_WQE_SIZE_MAX +
911 					  MLX5_ESEG_MIN_INLINE_SIZE -
912 					  MLX5_WQE_CSEG_SIZE -
913 					  MLX5_WQE_ESEG_SIZE -
914 					  MLX5_WQE_DSEG_SIZE * 2);
915 	} else if (inlen_mode) {
916 		/*
917 		 * If minimal inlining is requested we must
918 		 * enable inlining in general, despite the
919 		 * number of configured queues. Ignore the
920 		 * txq_inline_max devarg, this is not
921 		 * full-featured inline.
922 		 */
923 		inlen_send = inlen_mode;
924 		inlen_empw = 0;
925 	} else if (vlan_inline) {
926 		/*
927 		 * Hardware does not report offload for
928 		 * VLAN insertion, we must enable data inline
929 		 * to implement feature by software.
930 		 */
931 		inlen_send = MLX5_ESEG_MIN_INLINE_SIZE;
932 		inlen_empw = 0;
933 	} else {
934 		inlen_send = 0;
935 		inlen_empw = 0;
936 	}
937 	txq_ctrl->txq.inlen_send = inlen_send;
938 	txq_ctrl->txq.inlen_mode = inlen_mode;
939 	txq_ctrl->txq.inlen_empw = 0;
940 	if (inlen_send && inlen_empw && priv->txqs_n >= txqs_inline) {
941 		/*
942 		 * The data sent with MLX5_OPCODE_ENHANCED_MPSW
943 		 * may be inlined in Data Segment, align the
944 		 * length accordingly to fit entire WQEBBs.
945 		 */
946 		temp = RTE_MAX(inlen_empw,
947 			       MLX5_WQE_SIZE + MLX5_DSEG_MIN_INLINE_SIZE);
948 		temp -= MLX5_DSEG_MIN_INLINE_SIZE;
949 		temp = RTE_ALIGN(temp, MLX5_WQE_SIZE);
950 		temp += MLX5_DSEG_MIN_INLINE_SIZE;
951 		temp = RTE_MIN(temp, MLX5_WQE_SIZE_MAX +
952 				     MLX5_DSEG_MIN_INLINE_SIZE -
953 				     MLX5_WQE_CSEG_SIZE -
954 				     MLX5_WQE_ESEG_SIZE -
955 				     MLX5_WQE_DSEG_SIZE);
956 		temp = RTE_MIN(temp, MLX5_EMPW_MAX_INLINE_LEN);
957 		if (temp != inlen_empw) {
958 			DRV_LOG(INFO,
959 				"port %u enhanced empw inline setting"
960 				" aligned from %u to %u",
961 				PORT_ID(priv), inlen_empw, temp);
962 			inlen_empw = temp;
963 		}
964 		MLX5_ASSERT(inlen_empw >= MLX5_ESEG_MIN_INLINE_SIZE);
965 		MLX5_ASSERT(inlen_empw <= MLX5_WQE_SIZE_MAX +
966 					  MLX5_DSEG_MIN_INLINE_SIZE -
967 					  MLX5_WQE_CSEG_SIZE -
968 					  MLX5_WQE_ESEG_SIZE -
969 					  MLX5_WQE_DSEG_SIZE);
970 		txq_ctrl->txq.inlen_empw = inlen_empw;
971 	}
972 	txq_ctrl->max_inline_data = RTE_MAX(inlen_send, inlen_empw);
973 	if (tso) {
974 		txq_ctrl->max_tso_header = MLX5_MAX_TSO_HEADER;
975 		txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->max_inline_data,
976 						    MLX5_MAX_TSO_HEADER);
977 		txq_ctrl->txq.tso_en = 1;
978 	}
979 	txq_ctrl->txq.tunnel_en = config->tunnel_en | config->swp;
980 	txq_ctrl->txq.swp_en = ((DEV_TX_OFFLOAD_IP_TNL_TSO |
981 				 DEV_TX_OFFLOAD_UDP_TNL_TSO |
982 				 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) &
983 				txq_ctrl->txq.offloads) && config->swp;
984 }
985 
986 /**
987  * Adjust Tx queue data inline parameters for large queue sizes.
988  * The data inline feature requires multiple WQEs to fit the packets,
989  * and if the large amount of Tx descriptors is requested by application
990  * the total WQE amount may exceed the hardware capabilities. If the
991  * default inline setting are used we can try to adjust these ones and
992  * meet the hardware requirements and not exceed the queue size.
993  *
994  * @param txq_ctrl
995  *   Pointer to Tx queue control structure.
996  *
997  * @return
998  *   Zero on success, otherwise the parameters can not be adjusted.
999  */
1000 static int
1001 txq_adjust_params(struct mlx5_txq_ctrl *txq_ctrl)
1002 {
1003 	struct mlx5_priv *priv = txq_ctrl->priv;
1004 	struct mlx5_dev_config *config = &priv->config;
1005 	unsigned int max_inline;
1006 
1007 	max_inline = txq_calc_inline_max(txq_ctrl);
1008 	if (!txq_ctrl->txq.inlen_send) {
1009 		/*
1010 		 * Inline data feature is not engaged at all.
1011 		 * There is nothing to adjust.
1012 		 */
1013 		return 0;
1014 	}
1015 	if (txq_ctrl->max_inline_data <= max_inline) {
1016 		/*
1017 		 * The requested inline data length does not
1018 		 * exceed queue capabilities.
1019 		 */
1020 		return 0;
1021 	}
1022 	if (txq_ctrl->txq.inlen_mode > max_inline) {
1023 		DRV_LOG(ERR,
1024 			"minimal data inline requirements (%u) are not"
1025 			" satisfied (%u) on port %u, try the smaller"
1026 			" Tx queue size (%d)",
1027 			txq_ctrl->txq.inlen_mode, max_inline,
1028 			priv->dev_data->port_id,
1029 			priv->sh->device_attr.max_qp_wr);
1030 		goto error;
1031 	}
1032 	if (txq_ctrl->txq.inlen_send > max_inline &&
1033 	    config->txq_inline_max != MLX5_ARG_UNSET &&
1034 	    config->txq_inline_max > (int)max_inline) {
1035 		DRV_LOG(ERR,
1036 			"txq_inline_max requirements (%u) are not"
1037 			" satisfied (%u) on port %u, try the smaller"
1038 			" Tx queue size (%d)",
1039 			txq_ctrl->txq.inlen_send, max_inline,
1040 			priv->dev_data->port_id,
1041 			priv->sh->device_attr.max_qp_wr);
1042 		goto error;
1043 	}
1044 	if (txq_ctrl->txq.inlen_empw > max_inline &&
1045 	    config->txq_inline_mpw != MLX5_ARG_UNSET &&
1046 	    config->txq_inline_mpw > (int)max_inline) {
1047 		DRV_LOG(ERR,
1048 			"txq_inline_mpw requirements (%u) are not"
1049 			" satisfied (%u) on port %u, try the smaller"
1050 			" Tx queue size (%d)",
1051 			txq_ctrl->txq.inlen_empw, max_inline,
1052 			priv->dev_data->port_id,
1053 			priv->sh->device_attr.max_qp_wr);
1054 		goto error;
1055 	}
1056 	if (txq_ctrl->txq.tso_en && max_inline < MLX5_MAX_TSO_HEADER) {
1057 		DRV_LOG(ERR,
1058 			"tso header inline requirements (%u) are not"
1059 			" satisfied (%u) on port %u, try the smaller"
1060 			" Tx queue size (%d)",
1061 			MLX5_MAX_TSO_HEADER, max_inline,
1062 			priv->dev_data->port_id,
1063 			priv->sh->device_attr.max_qp_wr);
1064 		goto error;
1065 	}
1066 	if (txq_ctrl->txq.inlen_send > max_inline) {
1067 		DRV_LOG(WARNING,
1068 			"adjust txq_inline_max (%u->%u)"
1069 			" due to large Tx queue on port %u",
1070 			txq_ctrl->txq.inlen_send, max_inline,
1071 			priv->dev_data->port_id);
1072 		txq_ctrl->txq.inlen_send = max_inline;
1073 	}
1074 	if (txq_ctrl->txq.inlen_empw > max_inline) {
1075 		DRV_LOG(WARNING,
1076 			"adjust txq_inline_mpw (%u->%u)"
1077 			"due to large Tx queue on port %u",
1078 			txq_ctrl->txq.inlen_empw, max_inline,
1079 			priv->dev_data->port_id);
1080 		txq_ctrl->txq.inlen_empw = max_inline;
1081 	}
1082 	txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->txq.inlen_send,
1083 					    txq_ctrl->txq.inlen_empw);
1084 	MLX5_ASSERT(txq_ctrl->max_inline_data <= max_inline);
1085 	MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= max_inline);
1086 	MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_send);
1087 	MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_empw ||
1088 		    !txq_ctrl->txq.inlen_empw);
1089 	return 0;
1090 error:
1091 	rte_errno = ENOMEM;
1092 	return -ENOMEM;
1093 }
1094 
1095 /**
1096  * Create a DPDK Tx queue.
1097  *
1098  * @param dev
1099  *   Pointer to Ethernet device.
1100  * @param idx
1101  *   TX queue index.
1102  * @param desc
1103  *   Number of descriptors to configure in queue.
1104  * @param socket
1105  *   NUMA socket on which memory must be allocated.
1106  * @param[in] conf
1107  *  Thresholds parameters.
1108  *
1109  * @return
1110  *   A DPDK queue object on success, NULL otherwise and rte_errno is set.
1111  */
1112 struct mlx5_txq_ctrl *
1113 mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1114 	     unsigned int socket, const struct rte_eth_txconf *conf)
1115 {
1116 	struct mlx5_priv *priv = dev->data->dev_private;
1117 	struct mlx5_txq_ctrl *tmpl;
1118 
1119 	tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl) +
1120 			   desc * sizeof(struct rte_mbuf *), 0, socket);
1121 	if (!tmpl) {
1122 		rte_errno = ENOMEM;
1123 		return NULL;
1124 	}
1125 	if (mlx5_mr_btree_init(&tmpl->txq.mr_ctrl.cache_bh,
1126 			       MLX5_MR_BTREE_CACHE_N, socket)) {
1127 		/* rte_errno is already set. */
1128 		goto error;
1129 	}
1130 	/* Save pointer of global generation number to check memory event. */
1131 	tmpl->txq.mr_ctrl.dev_gen_ptr = &priv->sh->share_cache.dev_gen;
1132 	MLX5_ASSERT(desc > MLX5_TX_COMP_THRESH);
1133 	tmpl->txq.offloads = conf->offloads |
1134 			     dev->data->dev_conf.txmode.offloads;
1135 	tmpl->priv = priv;
1136 	tmpl->socket = socket;
1137 	tmpl->txq.elts_n = log2above(desc);
1138 	tmpl->txq.elts_s = desc;
1139 	tmpl->txq.elts_m = desc - 1;
1140 	tmpl->txq.port_id = dev->data->port_id;
1141 	tmpl->txq.idx = idx;
1142 	txq_set_params(tmpl);
1143 	if (txq_adjust_params(tmpl))
1144 		goto error;
1145 	if (txq_calc_wqebb_cnt(tmpl) >
1146 	    priv->sh->device_attr.max_qp_wr) {
1147 		DRV_LOG(ERR,
1148 			"port %u Tx WQEBB count (%d) exceeds the limit (%d),"
1149 			" try smaller queue size",
1150 			dev->data->port_id, txq_calc_wqebb_cnt(tmpl),
1151 			priv->sh->device_attr.max_qp_wr);
1152 		rte_errno = ENOMEM;
1153 		goto error;
1154 	}
1155 	__atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
1156 	tmpl->type = MLX5_TXQ_TYPE_STANDARD;
1157 	LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
1158 	return tmpl;
1159 error:
1160 	mlx5_mr_btree_free(&tmpl->txq.mr_ctrl.cache_bh);
1161 	mlx5_free(tmpl);
1162 	return NULL;
1163 }
1164 
1165 /**
1166  * Create a DPDK Tx hairpin queue.
1167  *
1168  * @param dev
1169  *   Pointer to Ethernet device.
1170  * @param idx
1171  *   TX queue index.
1172  * @param desc
1173  *   Number of descriptors to configure in queue.
1174  * @param hairpin_conf
1175  *  The hairpin configuration.
1176  *
1177  * @return
1178  *   A DPDK queue object on success, NULL otherwise and rte_errno is set.
1179  */
1180 struct mlx5_txq_ctrl *
1181 mlx5_txq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1182 		     const struct rte_eth_hairpin_conf *hairpin_conf)
1183 {
1184 	struct mlx5_priv *priv = dev->data->dev_private;
1185 	struct mlx5_txq_ctrl *tmpl;
1186 
1187 	tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
1188 			   SOCKET_ID_ANY);
1189 	if (!tmpl) {
1190 		rte_errno = ENOMEM;
1191 		return NULL;
1192 	}
1193 	tmpl->priv = priv;
1194 	tmpl->socket = SOCKET_ID_ANY;
1195 	tmpl->txq.elts_n = log2above(desc);
1196 	tmpl->txq.port_id = dev->data->port_id;
1197 	tmpl->txq.idx = idx;
1198 	tmpl->hairpin_conf = *hairpin_conf;
1199 	tmpl->type = MLX5_TXQ_TYPE_HAIRPIN;
1200 	__atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
1201 	LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
1202 	return tmpl;
1203 }
1204 
1205 /**
1206  * Get a Tx queue.
1207  *
1208  * @param dev
1209  *   Pointer to Ethernet device.
1210  * @param idx
1211  *   TX queue index.
1212  *
1213  * @return
1214  *   A pointer to the queue if it exists.
1215  */
1216 struct mlx5_txq_ctrl *
1217 mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx)
1218 {
1219 	struct mlx5_priv *priv = dev->data->dev_private;
1220 	struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
1221 	struct mlx5_txq_ctrl *ctrl = NULL;
1222 
1223 	if (txq_data) {
1224 		ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq);
1225 		__atomic_fetch_add(&ctrl->refcnt, 1, __ATOMIC_RELAXED);
1226 	}
1227 	return ctrl;
1228 }
1229 
1230 /**
1231  * Release a Tx queue.
1232  *
1233  * @param dev
1234  *   Pointer to Ethernet device.
1235  * @param idx
1236  *   TX queue index.
1237  *
1238  * @return
1239  *   1 while a reference on it exists, 0 when freed.
1240  */
1241 int
1242 mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
1243 {
1244 	struct mlx5_priv *priv = dev->data->dev_private;
1245 	struct mlx5_txq_ctrl *txq_ctrl;
1246 
1247 	if (!(*priv->txqs)[idx])
1248 		return 0;
1249 	txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
1250 	if (__atomic_sub_fetch(&txq_ctrl->refcnt, 1, __ATOMIC_RELAXED) > 1)
1251 		return 1;
1252 	if (txq_ctrl->obj) {
1253 		priv->obj_ops.txq_obj_release(txq_ctrl->obj);
1254 		LIST_REMOVE(txq_ctrl->obj, next);
1255 		mlx5_free(txq_ctrl->obj);
1256 		txq_ctrl->obj = NULL;
1257 	}
1258 	if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD) {
1259 		if (txq_ctrl->txq.fcqs) {
1260 			mlx5_free(txq_ctrl->txq.fcqs);
1261 			txq_ctrl->txq.fcqs = NULL;
1262 		}
1263 		txq_free_elts(txq_ctrl);
1264 		dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
1265 	}
1266 	if (!__atomic_load_n(&txq_ctrl->refcnt, __ATOMIC_RELAXED)) {
1267 		if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD)
1268 			mlx5_mr_btree_free(&txq_ctrl->txq.mr_ctrl.cache_bh);
1269 		LIST_REMOVE(txq_ctrl, next);
1270 		mlx5_free(txq_ctrl);
1271 		(*priv->txqs)[idx] = NULL;
1272 	}
1273 	return 0;
1274 }
1275 
1276 /**
1277  * Verify if the queue can be released.
1278  *
1279  * @param dev
1280  *   Pointer to Ethernet device.
1281  * @param idx
1282  *   TX queue index.
1283  *
1284  * @return
1285  *   1 if the queue can be released.
1286  */
1287 int
1288 mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx)
1289 {
1290 	struct mlx5_priv *priv = dev->data->dev_private;
1291 	struct mlx5_txq_ctrl *txq;
1292 
1293 	if (!(*priv->txqs)[idx])
1294 		return -1;
1295 	txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
1296 	return (__atomic_load_n(&txq->refcnt, __ATOMIC_RELAXED) == 1);
1297 }
1298 
1299 /**
1300  * Verify the Tx Queue list is empty
1301  *
1302  * @param dev
1303  *   Pointer to Ethernet device.
1304  *
1305  * @return
1306  *   The number of object not released.
1307  */
1308 int
1309 mlx5_txq_verify(struct rte_eth_dev *dev)
1310 {
1311 	struct mlx5_priv *priv = dev->data->dev_private;
1312 	struct mlx5_txq_ctrl *txq_ctrl;
1313 	int ret = 0;
1314 
1315 	LIST_FOREACH(txq_ctrl, &priv->txqsctrl, next) {
1316 		DRV_LOG(DEBUG, "port %u Tx queue %u still referenced",
1317 			dev->data->port_id, txq_ctrl->txq.idx);
1318 		++ret;
1319 	}
1320 	return ret;
1321 }
1322 
1323 /**
1324  * Set the Tx queue dynamic timestamp (mask and offset)
1325  *
1326  * @param[in] dev
1327  *   Pointer to the Ethernet device structure.
1328  */
1329 void
1330 mlx5_txq_dynf_timestamp_set(struct rte_eth_dev *dev)
1331 {
1332 	struct mlx5_priv *priv = dev->data->dev_private;
1333 	struct mlx5_dev_ctx_shared *sh = priv->sh;
1334 	struct mlx5_txq_data *data;
1335 	int off, nbit;
1336 	unsigned int i;
1337 	uint64_t mask = 0;
1338 
1339 	nbit = rte_mbuf_dynflag_lookup
1340 				(RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL);
1341 	off = rte_mbuf_dynfield_lookup
1342 				(RTE_MBUF_DYNFIELD_TIMESTAMP_NAME, NULL);
1343 	if (nbit >= 0 && off >= 0 && sh->txpp.refcnt)
1344 		mask = 1ULL << nbit;
1345 	for (i = 0; i != priv->txqs_n; ++i) {
1346 		data = (*priv->txqs)[i];
1347 		if (!data)
1348 			continue;
1349 		data->sh = sh;
1350 		data->ts_mask = mask;
1351 		data->ts_offset = off;
1352 	}
1353 }
1354