xref: /dpdk/drivers/net/mlx5/mlx5_txq.c (revision 081e42dab11d1add2d038fdf2bd4c86b20043d08)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2015 6WIND S.A.
3  * Copyright 2015 Mellanox Technologies, Ltd
4  */
5 
6 #include <stddef.h>
7 #include <errno.h>
8 #include <string.h>
9 #include <stdint.h>
10 #include <unistd.h>
11 #include <inttypes.h>
12 
13 #include <rte_mbuf.h>
14 #include <rte_malloc.h>
15 #include <ethdev_driver.h>
16 #include <rte_bus_pci.h>
17 #include <rte_common.h>
18 #include <rte_eal_paging.h>
19 
20 #include <mlx5_common.h>
21 #include <mlx5_common_mr.h>
22 #include <mlx5_malloc.h>
23 
24 #include "mlx5_defs.h"
25 #include "mlx5_utils.h"
26 #include "mlx5.h"
27 #include "mlx5_tx.h"
28 #include "mlx5_rxtx.h"
29 #include "mlx5_autoconf.h"
30 
31 /**
32  * Allocate TX queue elements.
33  *
34  * @param txq_ctrl
35  *   Pointer to TX queue structure.
36  */
37 void
38 txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl)
39 {
40 	const unsigned int elts_n = 1 << txq_ctrl->txq.elts_n;
41 	unsigned int i;
42 
43 	for (i = 0; (i != elts_n); ++i)
44 		txq_ctrl->txq.elts[i] = NULL;
45 	DRV_LOG(DEBUG, "port %u Tx queue %u allocated and configured %u WRs",
46 		PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx, elts_n);
47 	txq_ctrl->txq.elts_head = 0;
48 	txq_ctrl->txq.elts_tail = 0;
49 	txq_ctrl->txq.elts_comp = 0;
50 }
51 
52 /**
53  * Free TX queue elements.
54  *
55  * @param txq_ctrl
56  *   Pointer to TX queue structure.
57  */
58 void
59 txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
60 {
61 	const uint16_t elts_n = 1 << txq_ctrl->txq.elts_n;
62 	const uint16_t elts_m = elts_n - 1;
63 	uint16_t elts_head = txq_ctrl->txq.elts_head;
64 	uint16_t elts_tail = txq_ctrl->txq.elts_tail;
65 	struct rte_mbuf *(*elts)[elts_n] = &txq_ctrl->txq.elts;
66 
67 	DRV_LOG(DEBUG, "port %u Tx queue %u freeing WRs",
68 		PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx);
69 	txq_ctrl->txq.elts_head = 0;
70 	txq_ctrl->txq.elts_tail = 0;
71 	txq_ctrl->txq.elts_comp = 0;
72 
73 	while (elts_tail != elts_head) {
74 		struct rte_mbuf *elt = (*elts)[elts_tail & elts_m];
75 
76 		MLX5_ASSERT(elt != NULL);
77 		rte_pktmbuf_free_seg(elt);
78 #ifdef RTE_LIBRTE_MLX5_DEBUG
79 		/* Poisoning. */
80 		memset(&(*elts)[elts_tail & elts_m],
81 		       0x77,
82 		       sizeof((*elts)[elts_tail & elts_m]));
83 #endif
84 		++elts_tail;
85 	}
86 }
87 
88 /**
89  * Returns the per-port supported offloads.
90  *
91  * @param dev
92  *   Pointer to Ethernet device.
93  *
94  * @return
95  *   Supported Tx offloads.
96  */
97 uint64_t
98 mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
99 {
100 	struct mlx5_priv *priv = dev->data->dev_private;
101 	uint64_t offloads = (DEV_TX_OFFLOAD_MULTI_SEGS |
102 			     DEV_TX_OFFLOAD_VLAN_INSERT);
103 	struct mlx5_dev_config *config = &priv->config;
104 
105 	if (config->hw_csum)
106 		offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
107 			     DEV_TX_OFFLOAD_UDP_CKSUM |
108 			     DEV_TX_OFFLOAD_TCP_CKSUM);
109 	if (config->tso)
110 		offloads |= DEV_TX_OFFLOAD_TCP_TSO;
111 	if (config->tx_pp)
112 		offloads |= DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP;
113 	if (config->swp) {
114 		if (config->swp & MLX5_SW_PARSING_CSUM_CAP)
115 			offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
116 		if (config->swp & MLX5_SW_PARSING_TSO_CAP)
117 			offloads |= (DEV_TX_OFFLOAD_IP_TNL_TSO |
118 				     DEV_TX_OFFLOAD_UDP_TNL_TSO);
119 	}
120 	if (config->tunnel_en) {
121 		if (config->hw_csum)
122 			offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
123 		if (config->tso) {
124 			if (config->tunnel_en &
125 				MLX5_TUNNELED_OFFLOADS_VXLAN_CAP)
126 				offloads |= DEV_TX_OFFLOAD_VXLAN_TNL_TSO;
127 			if (config->tunnel_en &
128 				MLX5_TUNNELED_OFFLOADS_GRE_CAP)
129 				offloads |= DEV_TX_OFFLOAD_GRE_TNL_TSO;
130 			if (config->tunnel_en &
131 				MLX5_TUNNELED_OFFLOADS_GENEVE_CAP)
132 				offloads |= DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
133 		}
134 	}
135 	if (!config->mprq.enabled)
136 		offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
137 	return offloads;
138 }
139 
140 /* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */
141 static void
142 txq_sync_cq(struct mlx5_txq_data *txq)
143 {
144 	volatile struct mlx5_cqe *cqe;
145 	int ret, i;
146 
147 	i = txq->cqe_s;
148 	do {
149 		cqe = &txq->cqes[txq->cq_ci & txq->cqe_m];
150 		ret = check_cqe(cqe, txq->cqe_s, txq->cq_ci);
151 		if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
152 			if (likely(ret != MLX5_CQE_STATUS_ERR)) {
153 				/* No new CQEs in completion queue. */
154 				MLX5_ASSERT(ret == MLX5_CQE_STATUS_HW_OWN);
155 				break;
156 			}
157 		}
158 		++txq->cq_ci;
159 	} while (--i);
160 	/* Move all CQEs to HW ownership. */
161 	for (i = 0; i < txq->cqe_s; i++) {
162 		cqe = &txq->cqes[i];
163 		cqe->op_own = MLX5_CQE_INVALIDATE;
164 	}
165 	/* Resync CQE and WQE (WQ in reset state). */
166 	rte_io_wmb();
167 	*txq->cq_db = rte_cpu_to_be_32(txq->cq_ci);
168 	txq->cq_pi = txq->cq_ci;
169 	rte_io_wmb();
170 }
171 
172 /**
173  * Tx queue stop. Device queue goes to the idle state,
174  * all involved mbufs are freed from elts/WQ.
175  *
176  * @param dev
177  *   Pointer to Ethernet device structure.
178  * @param idx
179  *   Tx queue index.
180  *
181  * @return
182  *   0 on success, a negative errno value otherwise and rte_errno is set.
183  */
184 int
185 mlx5_tx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t idx)
186 {
187 	struct mlx5_priv *priv = dev->data->dev_private;
188 	struct mlx5_txq_data *txq = (*priv->txqs)[idx];
189 	struct mlx5_txq_ctrl *txq_ctrl =
190 			container_of(txq, struct mlx5_txq_ctrl, txq);
191 	int ret;
192 
193 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
194 	/* Move QP to RESET state. */
195 	ret = priv->obj_ops.txq_obj_modify(txq_ctrl->obj, MLX5_TXQ_MOD_RDY2RST,
196 					   (uint8_t)priv->dev_port);
197 	if (ret)
198 		return ret;
199 	/* Handle all send completions. */
200 	txq_sync_cq(txq);
201 	/* Free elts stored in the SQ. */
202 	txq_free_elts(txq_ctrl);
203 	/* Prevent writing new pkts to SQ by setting no free WQE.*/
204 	txq->wqe_ci = txq->wqe_s;
205 	txq->wqe_pi = 0;
206 	txq->elts_comp = 0;
207 	/* Set the actual queue state. */
208 	dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
209 	return 0;
210 }
211 
212 /**
213  * Tx queue stop. Device queue goes to the idle state,
214  * all involved mbufs are freed from elts/WQ.
215  *
216  * @param dev
217  *   Pointer to Ethernet device structure.
218  * @param idx
219  *   Tx queue index.
220  *
221  * @return
222  *   0 on success, a negative errno value otherwise and rte_errno is set.
223  */
224 int
225 mlx5_tx_queue_stop(struct rte_eth_dev *dev, uint16_t idx)
226 {
227 	int ret;
228 
229 	if (rte_eth_dev_is_tx_hairpin_queue(dev, idx)) {
230 		DRV_LOG(ERR, "Hairpin queue can't be stopped");
231 		rte_errno = EINVAL;
232 		return -EINVAL;
233 	}
234 	if (dev->data->tx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STOPPED)
235 		return 0;
236 	if (rte_eal_process_type() ==  RTE_PROC_SECONDARY) {
237 		ret = mlx5_mp_os_req_queue_control(dev, idx,
238 						   MLX5_MP_REQ_QUEUE_TX_STOP);
239 	} else {
240 		ret = mlx5_tx_queue_stop_primary(dev, idx);
241 	}
242 	return ret;
243 }
244 
245 /**
246  * Rx queue start. Device queue goes to the ready state,
247  * all required mbufs are allocated and WQ is replenished.
248  *
249  * @param dev
250  *   Pointer to Ethernet device structure.
251  * @param idx
252  *   RX queue index.
253  *
254  * @return
255  *   0 on success, a negative errno value otherwise and rte_errno is set.
256  */
257 int
258 mlx5_tx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx)
259 {
260 	struct mlx5_priv *priv = dev->data->dev_private;
261 	struct mlx5_txq_data *txq = (*priv->txqs)[idx];
262 	struct mlx5_txq_ctrl *txq_ctrl =
263 			container_of(txq, struct mlx5_txq_ctrl, txq);
264 	int ret;
265 
266 	MLX5_ASSERT(rte_eal_process_type() ==  RTE_PROC_PRIMARY);
267 	ret = priv->obj_ops.txq_obj_modify(txq_ctrl->obj,
268 					   MLX5_TXQ_MOD_RST2RDY,
269 					   (uint8_t)priv->dev_port);
270 	if (ret)
271 		return ret;
272 	txq_ctrl->txq.wqe_ci = 0;
273 	txq_ctrl->txq.wqe_pi = 0;
274 	txq_ctrl->txq.elts_comp = 0;
275 	/* Set the actual queue state. */
276 	dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
277 	return 0;
278 }
279 
280 /**
281  * Rx queue start. Device queue goes to the ready state,
282  * all required mbufs are allocated and WQ is replenished.
283  *
284  * @param dev
285  *   Pointer to Ethernet device structure.
286  * @param idx
287  *   RX queue index.
288  *
289  * @return
290  *   0 on success, a negative errno value otherwise and rte_errno is set.
291  */
292 int
293 mlx5_tx_queue_start(struct rte_eth_dev *dev, uint16_t idx)
294 {
295 	int ret;
296 
297 	if (rte_eth_dev_is_tx_hairpin_queue(dev, idx)) {
298 		DRV_LOG(ERR, "Hairpin queue can't be started");
299 		rte_errno = EINVAL;
300 		return -EINVAL;
301 	}
302 	if (dev->data->tx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STARTED)
303 		return 0;
304 	if (rte_eal_process_type() ==  RTE_PROC_SECONDARY) {
305 		ret = mlx5_mp_os_req_queue_control(dev, idx,
306 						   MLX5_MP_REQ_QUEUE_TX_START);
307 	} else {
308 		ret = mlx5_tx_queue_start_primary(dev, idx);
309 	}
310 	return ret;
311 }
312 
313 /**
314  * Tx queue presetup checks.
315  *
316  * @param dev
317  *   Pointer to Ethernet device structure.
318  * @param idx
319  *   Tx queue index.
320  * @param desc
321  *   Number of descriptors to configure in queue.
322  *
323  * @return
324  *   0 on success, a negative errno value otherwise and rte_errno is set.
325  */
326 static int
327 mlx5_tx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc)
328 {
329 	struct mlx5_priv *priv = dev->data->dev_private;
330 
331 	if (*desc <= MLX5_TX_COMP_THRESH) {
332 		DRV_LOG(WARNING,
333 			"port %u number of descriptors requested for Tx queue"
334 			" %u must be higher than MLX5_TX_COMP_THRESH, using %u"
335 			" instead of %u", dev->data->port_id, idx,
336 			MLX5_TX_COMP_THRESH + 1, *desc);
337 		*desc = MLX5_TX_COMP_THRESH + 1;
338 	}
339 	if (!rte_is_power_of_2(*desc)) {
340 		*desc = 1 << log2above(*desc);
341 		DRV_LOG(WARNING,
342 			"port %u increased number of descriptors in Tx queue"
343 			" %u to the next power of two (%d)",
344 			dev->data->port_id, idx, *desc);
345 	}
346 	DRV_LOG(DEBUG, "port %u configuring queue %u for %u descriptors",
347 		dev->data->port_id, idx, *desc);
348 	if (idx >= priv->txqs_n) {
349 		DRV_LOG(ERR, "port %u Tx queue index out of range (%u >= %u)",
350 			dev->data->port_id, idx, priv->txqs_n);
351 		rte_errno = EOVERFLOW;
352 		return -rte_errno;
353 	}
354 	if (!mlx5_txq_releasable(dev, idx)) {
355 		rte_errno = EBUSY;
356 		DRV_LOG(ERR, "port %u unable to release queue index %u",
357 			dev->data->port_id, idx);
358 		return -rte_errno;
359 	}
360 	mlx5_txq_release(dev, idx);
361 	return 0;
362 }
363 
364 /**
365  * DPDK callback to configure a TX queue.
366  *
367  * @param dev
368  *   Pointer to Ethernet device structure.
369  * @param idx
370  *   TX queue index.
371  * @param desc
372  *   Number of descriptors to configure in queue.
373  * @param socket
374  *   NUMA socket on which memory must be allocated.
375  * @param[in] conf
376  *   Thresholds parameters.
377  *
378  * @return
379  *   0 on success, a negative errno value otherwise and rte_errno is set.
380  */
381 int
382 mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
383 		    unsigned int socket, const struct rte_eth_txconf *conf)
384 {
385 	struct mlx5_priv *priv = dev->data->dev_private;
386 	struct mlx5_txq_data *txq = (*priv->txqs)[idx];
387 	struct mlx5_txq_ctrl *txq_ctrl =
388 		container_of(txq, struct mlx5_txq_ctrl, txq);
389 	int res;
390 
391 	res = mlx5_tx_queue_pre_setup(dev, idx, &desc);
392 	if (res)
393 		return res;
394 	txq_ctrl = mlx5_txq_new(dev, idx, desc, socket, conf);
395 	if (!txq_ctrl) {
396 		DRV_LOG(ERR, "port %u unable to allocate queue index %u",
397 			dev->data->port_id, idx);
398 		return -rte_errno;
399 	}
400 	DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
401 		dev->data->port_id, idx);
402 	(*priv->txqs)[idx] = &txq_ctrl->txq;
403 	return 0;
404 }
405 
406 /**
407  * DPDK callback to configure a TX hairpin queue.
408  *
409  * @param dev
410  *   Pointer to Ethernet device structure.
411  * @param idx
412  *   TX queue index.
413  * @param desc
414  *   Number of descriptors to configure in queue.
415  * @param[in] hairpin_conf
416  *   The hairpin binding configuration.
417  *
418  * @return
419  *   0 on success, a negative errno value otherwise and rte_errno is set.
420  */
421 int
422 mlx5_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
423 			    uint16_t desc,
424 			    const struct rte_eth_hairpin_conf *hairpin_conf)
425 {
426 	struct mlx5_priv *priv = dev->data->dev_private;
427 	struct mlx5_txq_data *txq = (*priv->txqs)[idx];
428 	struct mlx5_txq_ctrl *txq_ctrl =
429 		container_of(txq, struct mlx5_txq_ctrl, txq);
430 	int res;
431 
432 	res = mlx5_tx_queue_pre_setup(dev, idx, &desc);
433 	if (res)
434 		return res;
435 	if (hairpin_conf->peer_count != 1) {
436 		rte_errno = EINVAL;
437 		DRV_LOG(ERR, "port %u unable to setup Tx hairpin queue index %u"
438 			" peer count is %u", dev->data->port_id,
439 			idx, hairpin_conf->peer_count);
440 		return -rte_errno;
441 	}
442 	if (hairpin_conf->peers[0].port == dev->data->port_id) {
443 		if (hairpin_conf->peers[0].queue >= priv->rxqs_n) {
444 			rte_errno = EINVAL;
445 			DRV_LOG(ERR, "port %u unable to setup Tx hairpin queue"
446 				" index %u, Rx %u is larger than %u",
447 				dev->data->port_id, idx,
448 				hairpin_conf->peers[0].queue, priv->txqs_n);
449 			return -rte_errno;
450 		}
451 	} else {
452 		if (hairpin_conf->manual_bind == 0 ||
453 		    hairpin_conf->tx_explicit == 0) {
454 			rte_errno = EINVAL;
455 			DRV_LOG(ERR, "port %u unable to setup Tx hairpin queue"
456 				" index %u peer port %u with attributes %u %u",
457 				dev->data->port_id, idx,
458 				hairpin_conf->peers[0].port,
459 				hairpin_conf->manual_bind,
460 				hairpin_conf->tx_explicit);
461 			return -rte_errno;
462 		}
463 	}
464 	txq_ctrl = mlx5_txq_hairpin_new(dev, idx, desc,	hairpin_conf);
465 	if (!txq_ctrl) {
466 		DRV_LOG(ERR, "port %u unable to allocate queue index %u",
467 			dev->data->port_id, idx);
468 		return -rte_errno;
469 	}
470 	DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
471 		dev->data->port_id, idx);
472 	(*priv->txqs)[idx] = &txq_ctrl->txq;
473 	dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN;
474 	return 0;
475 }
476 
477 /**
478  * DPDK callback to release a TX queue.
479  *
480  * @param dev
481  *   Pointer to Ethernet device structure.
482  * @param qid
483  *   Transmit queue index.
484  */
485 void
486 mlx5_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
487 {
488 	struct mlx5_txq_data *txq = dev->data->tx_queues[qid];
489 
490 	if (txq == NULL)
491 		return;
492 	DRV_LOG(DEBUG, "port %u removing Tx queue %u from list",
493 		dev->data->port_id, qid);
494 	mlx5_txq_release(dev, qid);
495 }
496 
497 /**
498  * Configure the doorbell register non-cached attribute.
499  *
500  * @param txq_ctrl
501  *   Pointer to Tx queue control structure.
502  * @param page_size
503  *   Systme page size
504  */
505 static void
506 txq_uar_ncattr_init(struct mlx5_txq_ctrl *txq_ctrl, size_t page_size)
507 {
508 	struct mlx5_priv *priv = txq_ctrl->priv;
509 	off_t cmd;
510 
511 	txq_ctrl->txq.db_heu = priv->config.dbnc == MLX5_TXDB_HEURISTIC;
512 	txq_ctrl->txq.db_nc = 0;
513 	/* Check the doorbell register mapping type. */
514 	cmd = txq_ctrl->uar_mmap_offset / page_size;
515 	cmd >>= MLX5_UAR_MMAP_CMD_SHIFT;
516 	cmd &= MLX5_UAR_MMAP_CMD_MASK;
517 	if (cmd == MLX5_MMAP_GET_NC_PAGES_CMD)
518 		txq_ctrl->txq.db_nc = 1;
519 }
520 
521 /**
522  * Initialize Tx UAR registers for primary process.
523  *
524  * @param txq_ctrl
525  *   Pointer to Tx queue control structure.
526  */
527 void
528 txq_uar_init(struct mlx5_txq_ctrl *txq_ctrl)
529 {
530 	struct mlx5_priv *priv = txq_ctrl->priv;
531 	struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv));
532 #ifndef RTE_ARCH_64
533 	unsigned int lock_idx;
534 #endif
535 	const size_t page_size = rte_mem_page_size();
536 	if (page_size == (size_t)-1) {
537 		DRV_LOG(ERR, "Failed to get mem page size");
538 		rte_errno = ENOMEM;
539 	}
540 
541 	if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
542 		return;
543 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
544 	MLX5_ASSERT(ppriv);
545 	ppriv->uar_table[txq_ctrl->txq.idx] = txq_ctrl->bf_reg;
546 	txq_uar_ncattr_init(txq_ctrl, page_size);
547 #ifndef RTE_ARCH_64
548 	/* Assign an UAR lock according to UAR page number */
549 	lock_idx = (txq_ctrl->uar_mmap_offset / page_size) &
550 		   MLX5_UAR_PAGE_NUM_MASK;
551 	txq_ctrl->txq.uar_lock = &priv->sh->uar_lock[lock_idx];
552 #endif
553 }
554 
555 /**
556  * Remap UAR register of a Tx queue for secondary process.
557  *
558  * Remapped address is stored at the table in the process private structure of
559  * the device, indexed by queue index.
560  *
561  * @param txq_ctrl
562  *   Pointer to Tx queue control structure.
563  * @param fd
564  *   Verbs file descriptor to map UAR pages.
565  *
566  * @return
567  *   0 on success, a negative errno value otherwise and rte_errno is set.
568  */
569 static int
570 txq_uar_init_secondary(struct mlx5_txq_ctrl *txq_ctrl, int fd)
571 {
572 	struct mlx5_priv *priv = txq_ctrl->priv;
573 	struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv));
574 	struct mlx5_txq_data *txq = &txq_ctrl->txq;
575 	void *addr;
576 	uintptr_t uar_va;
577 	uintptr_t offset;
578 	const size_t page_size = rte_mem_page_size();
579 	if (page_size == (size_t)-1) {
580 		DRV_LOG(ERR, "Failed to get mem page size");
581 		rte_errno = ENOMEM;
582 		return -rte_errno;
583 	}
584 
585 	if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
586 		return 0;
587 	MLX5_ASSERT(ppriv);
588 	/*
589 	 * As rdma-core, UARs are mapped in size of OS page
590 	 * size. Ref to libmlx5 function: mlx5_init_context()
591 	 */
592 	uar_va = (uintptr_t)txq_ctrl->bf_reg;
593 	offset = uar_va & (page_size - 1); /* Offset in page. */
594 	addr = rte_mem_map(NULL, page_size, RTE_PROT_WRITE, RTE_MAP_SHARED,
595 			    fd, txq_ctrl->uar_mmap_offset);
596 	if (!addr) {
597 		DRV_LOG(ERR,
598 			"port %u mmap failed for BF reg of txq %u",
599 			txq->port_id, txq->idx);
600 		rte_errno = ENXIO;
601 		return -rte_errno;
602 	}
603 	addr = RTE_PTR_ADD(addr, offset);
604 	ppriv->uar_table[txq->idx] = addr;
605 	txq_uar_ncattr_init(txq_ctrl, page_size);
606 	return 0;
607 }
608 
609 /**
610  * Unmap UAR register of a Tx queue for secondary process.
611  *
612  * @param txq_ctrl
613  *   Pointer to Tx queue control structure.
614  */
615 static void
616 txq_uar_uninit_secondary(struct mlx5_txq_ctrl *txq_ctrl)
617 {
618 	struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(txq_ctrl->priv));
619 	void *addr;
620 	const size_t page_size = rte_mem_page_size();
621 	if (page_size == (size_t)-1) {
622 		DRV_LOG(ERR, "Failed to get mem page size");
623 		rte_errno = ENOMEM;
624 	}
625 
626 	if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
627 		return;
628 	addr = ppriv->uar_table[txq_ctrl->txq.idx];
629 	rte_mem_unmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size);
630 }
631 
632 /**
633  * Deinitialize Tx UAR registers for secondary process.
634  *
635  * @param dev
636  *   Pointer to Ethernet device.
637  */
638 void
639 mlx5_tx_uar_uninit_secondary(struct rte_eth_dev *dev)
640 {
641 	struct mlx5_proc_priv *ppriv = (struct mlx5_proc_priv *)
642 					dev->process_private;
643 	const size_t page_size = rte_mem_page_size();
644 	void *addr;
645 	unsigned int i;
646 
647 	if (page_size == (size_t)-1) {
648 		DRV_LOG(ERR, "Failed to get mem page size");
649 		return;
650 	}
651 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
652 	for (i = 0; i != ppriv->uar_table_sz; ++i) {
653 		if (!ppriv->uar_table[i])
654 			continue;
655 		addr = ppriv->uar_table[i];
656 		rte_mem_unmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size);
657 
658 	}
659 }
660 
661 /**
662  * Initialize Tx UAR registers for secondary process.
663  *
664  * @param dev
665  *   Pointer to Ethernet device.
666  * @param fd
667  *   Verbs file descriptor to map UAR pages.
668  *
669  * @return
670  *   0 on success, a negative errno value otherwise and rte_errno is set.
671  */
672 int
673 mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd)
674 {
675 	struct mlx5_priv *priv = dev->data->dev_private;
676 	struct mlx5_txq_data *txq;
677 	struct mlx5_txq_ctrl *txq_ctrl;
678 	unsigned int i;
679 	int ret;
680 
681 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
682 	for (i = 0; i != priv->txqs_n; ++i) {
683 		if (!(*priv->txqs)[i])
684 			continue;
685 		txq = (*priv->txqs)[i];
686 		txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
687 		if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
688 			continue;
689 		MLX5_ASSERT(txq->idx == (uint16_t)i);
690 		ret = txq_uar_init_secondary(txq_ctrl, fd);
691 		if (ret)
692 			goto error;
693 	}
694 	return 0;
695 error:
696 	/* Rollback. */
697 	do {
698 		if (!(*priv->txqs)[i])
699 			continue;
700 		txq = (*priv->txqs)[i];
701 		txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
702 		txq_uar_uninit_secondary(txq_ctrl);
703 	} while (i--);
704 	return -rte_errno;
705 }
706 
707 /**
708  * Verify the Verbs Tx queue list is empty
709  *
710  * @param dev
711  *   Pointer to Ethernet device.
712  *
713  * @return
714  *   The number of object not released.
715  */
716 int
717 mlx5_txq_obj_verify(struct rte_eth_dev *dev)
718 {
719 	struct mlx5_priv *priv = dev->data->dev_private;
720 	int ret = 0;
721 	struct mlx5_txq_obj *txq_obj;
722 
723 	LIST_FOREACH(txq_obj, &priv->txqsobj, next) {
724 		DRV_LOG(DEBUG, "port %u Verbs Tx queue %u still referenced",
725 			dev->data->port_id, txq_obj->txq_ctrl->txq.idx);
726 		++ret;
727 	}
728 	return ret;
729 }
730 
731 /**
732  * Calculate the total number of WQEBB for Tx queue.
733  *
734  * Simplified version of calc_sq_size() in rdma-core.
735  *
736  * @param txq_ctrl
737  *   Pointer to Tx queue control structure.
738  *
739  * @return
740  *   The number of WQEBB.
741  */
742 static int
743 txq_calc_wqebb_cnt(struct mlx5_txq_ctrl *txq_ctrl)
744 {
745 	unsigned int wqe_size;
746 	const unsigned int desc = 1 << txq_ctrl->txq.elts_n;
747 
748 	wqe_size = MLX5_WQE_CSEG_SIZE +
749 		   MLX5_WQE_ESEG_SIZE +
750 		   MLX5_WSEG_SIZE -
751 		   MLX5_ESEG_MIN_INLINE_SIZE +
752 		   txq_ctrl->max_inline_data;
753 	return rte_align32pow2(wqe_size * desc) / MLX5_WQE_SIZE;
754 }
755 
756 /**
757  * Calculate the maximal inline data size for Tx queue.
758  *
759  * @param txq_ctrl
760  *   Pointer to Tx queue control structure.
761  *
762  * @return
763  *   The maximal inline data size.
764  */
765 static unsigned int
766 txq_calc_inline_max(struct mlx5_txq_ctrl *txq_ctrl)
767 {
768 	const unsigned int desc = 1 << txq_ctrl->txq.elts_n;
769 	struct mlx5_priv *priv = txq_ctrl->priv;
770 	unsigned int wqe_size;
771 
772 	wqe_size = priv->sh->device_attr.max_qp_wr / desc;
773 	if (!wqe_size)
774 		return 0;
775 	/*
776 	 * This calculation is derived from tthe source of
777 	 * mlx5_calc_send_wqe() in rdma_core library.
778 	 */
779 	wqe_size = wqe_size * MLX5_WQE_SIZE -
780 		   MLX5_WQE_CSEG_SIZE -
781 		   MLX5_WQE_ESEG_SIZE -
782 		   MLX5_WSEG_SIZE -
783 		   MLX5_WSEG_SIZE +
784 		   MLX5_DSEG_MIN_INLINE_SIZE;
785 	return wqe_size;
786 }
787 
788 /**
789  * Set Tx queue parameters from device configuration.
790  *
791  * @param txq_ctrl
792  *   Pointer to Tx queue control structure.
793  */
794 static void
795 txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
796 {
797 	struct mlx5_priv *priv = txq_ctrl->priv;
798 	struct mlx5_dev_config *config = &priv->config;
799 	unsigned int inlen_send; /* Inline data for ordinary SEND.*/
800 	unsigned int inlen_empw; /* Inline data for enhanced MPW. */
801 	unsigned int inlen_mode; /* Minimal required Inline data. */
802 	unsigned int txqs_inline; /* Min Tx queues to enable inline. */
803 	uint64_t dev_txoff = priv->dev_data->dev_conf.txmode.offloads;
804 	bool tso = txq_ctrl->txq.offloads & (DEV_TX_OFFLOAD_TCP_TSO |
805 					    DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
806 					    DEV_TX_OFFLOAD_GRE_TNL_TSO |
807 					    DEV_TX_OFFLOAD_IP_TNL_TSO |
808 					    DEV_TX_OFFLOAD_UDP_TNL_TSO);
809 	bool vlan_inline;
810 	unsigned int temp;
811 
812 	txq_ctrl->txq.fast_free =
813 		!!((txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
814 		   !(txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_MULTI_SEGS) &&
815 		   !config->mprq.enabled);
816 	if (config->txqs_inline == MLX5_ARG_UNSET)
817 		txqs_inline =
818 #if defined(RTE_ARCH_ARM64)
819 		(priv->pci_dev && priv->pci_dev->id.device_id ==
820 			PCI_DEVICE_ID_MELLANOX_CONNECTX5BF) ?
821 			MLX5_INLINE_MAX_TXQS_BLUEFIELD :
822 #endif
823 			MLX5_INLINE_MAX_TXQS;
824 	else
825 		txqs_inline = (unsigned int)config->txqs_inline;
826 	inlen_send = (config->txq_inline_max == MLX5_ARG_UNSET) ?
827 		     MLX5_SEND_DEF_INLINE_LEN :
828 		     (unsigned int)config->txq_inline_max;
829 	inlen_empw = (config->txq_inline_mpw == MLX5_ARG_UNSET) ?
830 		     MLX5_EMPW_DEF_INLINE_LEN :
831 		     (unsigned int)config->txq_inline_mpw;
832 	inlen_mode = (config->txq_inline_min == MLX5_ARG_UNSET) ?
833 		     0 : (unsigned int)config->txq_inline_min;
834 	if (config->mps != MLX5_MPW_ENHANCED && config->mps != MLX5_MPW)
835 		inlen_empw = 0;
836 	/*
837 	 * If there is requested minimal amount of data to inline
838 	 * we MUST enable inlining. This is a case for ConnectX-4
839 	 * which usually requires L2 inlined for correct operating
840 	 * and ConnectX-4 Lx which requires L2-L4 inlined to
841 	 * support E-Switch Flows.
842 	 */
843 	if (inlen_mode) {
844 		if (inlen_mode <= MLX5_ESEG_MIN_INLINE_SIZE) {
845 			/*
846 			 * Optimize minimal inlining for single
847 			 * segment packets to fill one WQEBB
848 			 * without gaps.
849 			 */
850 			temp = MLX5_ESEG_MIN_INLINE_SIZE;
851 		} else {
852 			temp = inlen_mode - MLX5_ESEG_MIN_INLINE_SIZE;
853 			temp = RTE_ALIGN(temp, MLX5_WSEG_SIZE) +
854 			       MLX5_ESEG_MIN_INLINE_SIZE;
855 			temp = RTE_MIN(temp, MLX5_SEND_MAX_INLINE_LEN);
856 		}
857 		if (temp != inlen_mode) {
858 			DRV_LOG(INFO,
859 				"port %u minimal required inline setting"
860 				" aligned from %u to %u",
861 				PORT_ID(priv), inlen_mode, temp);
862 			inlen_mode = temp;
863 		}
864 	}
865 	/*
866 	 * If port is configured to support VLAN insertion and device
867 	 * does not support this feature by HW (for NICs before ConnectX-5
868 	 * or in case of wqe_vlan_insert flag is not set) we must enable
869 	 * data inline on all queues because it is supported by single
870 	 * tx_burst routine.
871 	 */
872 	txq_ctrl->txq.vlan_en = config->hw_vlan_insert;
873 	vlan_inline = (dev_txoff & DEV_TX_OFFLOAD_VLAN_INSERT) &&
874 		      !config->hw_vlan_insert;
875 	/*
876 	 * If there are few Tx queues it is prioritized
877 	 * to save CPU cycles and disable data inlining at all.
878 	 */
879 	if (inlen_send && priv->txqs_n >= txqs_inline) {
880 		/*
881 		 * The data sent with ordinal MLX5_OPCODE_SEND
882 		 * may be inlined in Ethernet Segment, align the
883 		 * length accordingly to fit entire WQEBBs.
884 		 */
885 		temp = RTE_MAX(inlen_send,
886 			       MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE);
887 		temp -= MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE;
888 		temp = RTE_ALIGN(temp, MLX5_WQE_SIZE);
889 		temp += MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE;
890 		temp = RTE_MIN(temp, MLX5_WQE_SIZE_MAX +
891 				     MLX5_ESEG_MIN_INLINE_SIZE -
892 				     MLX5_WQE_CSEG_SIZE -
893 				     MLX5_WQE_ESEG_SIZE -
894 				     MLX5_WQE_DSEG_SIZE * 2);
895 		temp = RTE_MIN(temp, MLX5_SEND_MAX_INLINE_LEN);
896 		temp = RTE_MAX(temp, inlen_mode);
897 		if (temp != inlen_send) {
898 			DRV_LOG(INFO,
899 				"port %u ordinary send inline setting"
900 				" aligned from %u to %u",
901 				PORT_ID(priv), inlen_send, temp);
902 			inlen_send = temp;
903 		}
904 		/*
905 		 * Not aligned to cache lines, but to WQEs.
906 		 * First bytes of data (initial alignment)
907 		 * is going to be copied explicitly at the
908 		 * beginning of inlining buffer in Ethernet
909 		 * Segment.
910 		 */
911 		MLX5_ASSERT(inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
912 		MLX5_ASSERT(inlen_send <= MLX5_WQE_SIZE_MAX +
913 					  MLX5_ESEG_MIN_INLINE_SIZE -
914 					  MLX5_WQE_CSEG_SIZE -
915 					  MLX5_WQE_ESEG_SIZE -
916 					  MLX5_WQE_DSEG_SIZE * 2);
917 	} else if (inlen_mode) {
918 		/*
919 		 * If minimal inlining is requested we must
920 		 * enable inlining in general, despite the
921 		 * number of configured queues. Ignore the
922 		 * txq_inline_max devarg, this is not
923 		 * full-featured inline.
924 		 */
925 		inlen_send = inlen_mode;
926 		inlen_empw = 0;
927 	} else if (vlan_inline) {
928 		/*
929 		 * Hardware does not report offload for
930 		 * VLAN insertion, we must enable data inline
931 		 * to implement feature by software.
932 		 */
933 		inlen_send = MLX5_ESEG_MIN_INLINE_SIZE;
934 		inlen_empw = 0;
935 	} else {
936 		inlen_send = 0;
937 		inlen_empw = 0;
938 	}
939 	txq_ctrl->txq.inlen_send = inlen_send;
940 	txq_ctrl->txq.inlen_mode = inlen_mode;
941 	txq_ctrl->txq.inlen_empw = 0;
942 	if (inlen_send && inlen_empw && priv->txqs_n >= txqs_inline) {
943 		/*
944 		 * The data sent with MLX5_OPCODE_ENHANCED_MPSW
945 		 * may be inlined in Data Segment, align the
946 		 * length accordingly to fit entire WQEBBs.
947 		 */
948 		temp = RTE_MAX(inlen_empw,
949 			       MLX5_WQE_SIZE + MLX5_DSEG_MIN_INLINE_SIZE);
950 		temp -= MLX5_DSEG_MIN_INLINE_SIZE;
951 		temp = RTE_ALIGN(temp, MLX5_WQE_SIZE);
952 		temp += MLX5_DSEG_MIN_INLINE_SIZE;
953 		temp = RTE_MIN(temp, MLX5_WQE_SIZE_MAX +
954 				     MLX5_DSEG_MIN_INLINE_SIZE -
955 				     MLX5_WQE_CSEG_SIZE -
956 				     MLX5_WQE_ESEG_SIZE -
957 				     MLX5_WQE_DSEG_SIZE);
958 		temp = RTE_MIN(temp, MLX5_EMPW_MAX_INLINE_LEN);
959 		if (temp != inlen_empw) {
960 			DRV_LOG(INFO,
961 				"port %u enhanced empw inline setting"
962 				" aligned from %u to %u",
963 				PORT_ID(priv), inlen_empw, temp);
964 			inlen_empw = temp;
965 		}
966 		MLX5_ASSERT(inlen_empw >= MLX5_ESEG_MIN_INLINE_SIZE);
967 		MLX5_ASSERT(inlen_empw <= MLX5_WQE_SIZE_MAX +
968 					  MLX5_DSEG_MIN_INLINE_SIZE -
969 					  MLX5_WQE_CSEG_SIZE -
970 					  MLX5_WQE_ESEG_SIZE -
971 					  MLX5_WQE_DSEG_SIZE);
972 		txq_ctrl->txq.inlen_empw = inlen_empw;
973 	}
974 	txq_ctrl->max_inline_data = RTE_MAX(inlen_send, inlen_empw);
975 	if (tso) {
976 		txq_ctrl->max_tso_header = MLX5_MAX_TSO_HEADER;
977 		txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->max_inline_data,
978 						    MLX5_MAX_TSO_HEADER);
979 		txq_ctrl->txq.tso_en = 1;
980 	}
981 	if (((DEV_TX_OFFLOAD_VXLAN_TNL_TSO & txq_ctrl->txq.offloads) &&
982 	    (config->tunnel_en & MLX5_TUNNELED_OFFLOADS_VXLAN_CAP)) |
983 	   ((DEV_TX_OFFLOAD_GRE_TNL_TSO & txq_ctrl->txq.offloads) &&
984 	    (config->tunnel_en & MLX5_TUNNELED_OFFLOADS_GRE_CAP)) |
985 	   ((DEV_TX_OFFLOAD_GENEVE_TNL_TSO & txq_ctrl->txq.offloads) &&
986 	    (config->tunnel_en & MLX5_TUNNELED_OFFLOADS_GENEVE_CAP)) |
987 	   (config->swp  & MLX5_SW_PARSING_TSO_CAP))
988 		txq_ctrl->txq.tunnel_en = 1;
989 	txq_ctrl->txq.swp_en = (((DEV_TX_OFFLOAD_IP_TNL_TSO |
990 				  DEV_TX_OFFLOAD_UDP_TNL_TSO) &
991 				  txq_ctrl->txq.offloads) && (config->swp &
992 				  MLX5_SW_PARSING_TSO_CAP)) |
993 				((DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM &
994 				 txq_ctrl->txq.offloads) && (config->swp &
995 				 MLX5_SW_PARSING_CSUM_CAP));
996 }
997 
998 /**
999  * Adjust Tx queue data inline parameters for large queue sizes.
1000  * The data inline feature requires multiple WQEs to fit the packets,
1001  * and if the large amount of Tx descriptors is requested by application
1002  * the total WQE amount may exceed the hardware capabilities. If the
1003  * default inline setting are used we can try to adjust these ones and
1004  * meet the hardware requirements and not exceed the queue size.
1005  *
1006  * @param txq_ctrl
1007  *   Pointer to Tx queue control structure.
1008  *
1009  * @return
1010  *   Zero on success, otherwise the parameters can not be adjusted.
1011  */
1012 static int
1013 txq_adjust_params(struct mlx5_txq_ctrl *txq_ctrl)
1014 {
1015 	struct mlx5_priv *priv = txq_ctrl->priv;
1016 	struct mlx5_dev_config *config = &priv->config;
1017 	unsigned int max_inline;
1018 
1019 	max_inline = txq_calc_inline_max(txq_ctrl);
1020 	if (!txq_ctrl->txq.inlen_send) {
1021 		/*
1022 		 * Inline data feature is not engaged at all.
1023 		 * There is nothing to adjust.
1024 		 */
1025 		return 0;
1026 	}
1027 	if (txq_ctrl->max_inline_data <= max_inline) {
1028 		/*
1029 		 * The requested inline data length does not
1030 		 * exceed queue capabilities.
1031 		 */
1032 		return 0;
1033 	}
1034 	if (txq_ctrl->txq.inlen_mode > max_inline) {
1035 		DRV_LOG(ERR,
1036 			"minimal data inline requirements (%u) are not"
1037 			" satisfied (%u) on port %u, try the smaller"
1038 			" Tx queue size (%d)",
1039 			txq_ctrl->txq.inlen_mode, max_inline,
1040 			priv->dev_data->port_id,
1041 			priv->sh->device_attr.max_qp_wr);
1042 		goto error;
1043 	}
1044 	if (txq_ctrl->txq.inlen_send > max_inline &&
1045 	    config->txq_inline_max != MLX5_ARG_UNSET &&
1046 	    config->txq_inline_max > (int)max_inline) {
1047 		DRV_LOG(ERR,
1048 			"txq_inline_max requirements (%u) are not"
1049 			" satisfied (%u) on port %u, try the smaller"
1050 			" Tx queue size (%d)",
1051 			txq_ctrl->txq.inlen_send, max_inline,
1052 			priv->dev_data->port_id,
1053 			priv->sh->device_attr.max_qp_wr);
1054 		goto error;
1055 	}
1056 	if (txq_ctrl->txq.inlen_empw > max_inline &&
1057 	    config->txq_inline_mpw != MLX5_ARG_UNSET &&
1058 	    config->txq_inline_mpw > (int)max_inline) {
1059 		DRV_LOG(ERR,
1060 			"txq_inline_mpw requirements (%u) are not"
1061 			" satisfied (%u) on port %u, try the smaller"
1062 			" Tx queue size (%d)",
1063 			txq_ctrl->txq.inlen_empw, max_inline,
1064 			priv->dev_data->port_id,
1065 			priv->sh->device_attr.max_qp_wr);
1066 		goto error;
1067 	}
1068 	if (txq_ctrl->txq.tso_en && max_inline < MLX5_MAX_TSO_HEADER) {
1069 		DRV_LOG(ERR,
1070 			"tso header inline requirements (%u) are not"
1071 			" satisfied (%u) on port %u, try the smaller"
1072 			" Tx queue size (%d)",
1073 			MLX5_MAX_TSO_HEADER, max_inline,
1074 			priv->dev_data->port_id,
1075 			priv->sh->device_attr.max_qp_wr);
1076 		goto error;
1077 	}
1078 	if (txq_ctrl->txq.inlen_send > max_inline) {
1079 		DRV_LOG(WARNING,
1080 			"adjust txq_inline_max (%u->%u)"
1081 			" due to large Tx queue on port %u",
1082 			txq_ctrl->txq.inlen_send, max_inline,
1083 			priv->dev_data->port_id);
1084 		txq_ctrl->txq.inlen_send = max_inline;
1085 	}
1086 	if (txq_ctrl->txq.inlen_empw > max_inline) {
1087 		DRV_LOG(WARNING,
1088 			"adjust txq_inline_mpw (%u->%u)"
1089 			"due to large Tx queue on port %u",
1090 			txq_ctrl->txq.inlen_empw, max_inline,
1091 			priv->dev_data->port_id);
1092 		txq_ctrl->txq.inlen_empw = max_inline;
1093 	}
1094 	txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->txq.inlen_send,
1095 					    txq_ctrl->txq.inlen_empw);
1096 	MLX5_ASSERT(txq_ctrl->max_inline_data <= max_inline);
1097 	MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= max_inline);
1098 	MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_send);
1099 	MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_empw ||
1100 		    !txq_ctrl->txq.inlen_empw);
1101 	return 0;
1102 error:
1103 	rte_errno = ENOMEM;
1104 	return -ENOMEM;
1105 }
1106 
1107 /**
1108  * Create a DPDK Tx queue.
1109  *
1110  * @param dev
1111  *   Pointer to Ethernet device.
1112  * @param idx
1113  *   TX queue index.
1114  * @param desc
1115  *   Number of descriptors to configure in queue.
1116  * @param socket
1117  *   NUMA socket on which memory must be allocated.
1118  * @param[in] conf
1119  *  Thresholds parameters.
1120  *
1121  * @return
1122  *   A DPDK queue object on success, NULL otherwise and rte_errno is set.
1123  */
1124 struct mlx5_txq_ctrl *
1125 mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1126 	     unsigned int socket, const struct rte_eth_txconf *conf)
1127 {
1128 	struct mlx5_priv *priv = dev->data->dev_private;
1129 	struct mlx5_txq_ctrl *tmpl;
1130 
1131 	tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl) +
1132 			   desc * sizeof(struct rte_mbuf *), 0, socket);
1133 	if (!tmpl) {
1134 		rte_errno = ENOMEM;
1135 		return NULL;
1136 	}
1137 	if (mlx5_mr_btree_init(&tmpl->txq.mr_ctrl.cache_bh,
1138 			       MLX5_MR_BTREE_CACHE_N, socket)) {
1139 		/* rte_errno is already set. */
1140 		goto error;
1141 	}
1142 	/* Save pointer of global generation number to check memory event. */
1143 	tmpl->txq.mr_ctrl.dev_gen_ptr = &priv->sh->share_cache.dev_gen;
1144 	MLX5_ASSERT(desc > MLX5_TX_COMP_THRESH);
1145 	tmpl->txq.offloads = conf->offloads |
1146 			     dev->data->dev_conf.txmode.offloads;
1147 	tmpl->priv = priv;
1148 	tmpl->socket = socket;
1149 	tmpl->txq.elts_n = log2above(desc);
1150 	tmpl->txq.elts_s = desc;
1151 	tmpl->txq.elts_m = desc - 1;
1152 	tmpl->txq.port_id = dev->data->port_id;
1153 	tmpl->txq.idx = idx;
1154 	txq_set_params(tmpl);
1155 	if (txq_adjust_params(tmpl))
1156 		goto error;
1157 	if (txq_calc_wqebb_cnt(tmpl) >
1158 	    priv->sh->device_attr.max_qp_wr) {
1159 		DRV_LOG(ERR,
1160 			"port %u Tx WQEBB count (%d) exceeds the limit (%d),"
1161 			" try smaller queue size",
1162 			dev->data->port_id, txq_calc_wqebb_cnt(tmpl),
1163 			priv->sh->device_attr.max_qp_wr);
1164 		rte_errno = ENOMEM;
1165 		goto error;
1166 	}
1167 	__atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
1168 	tmpl->type = MLX5_TXQ_TYPE_STANDARD;
1169 	LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
1170 	return tmpl;
1171 error:
1172 	mlx5_mr_btree_free(&tmpl->txq.mr_ctrl.cache_bh);
1173 	mlx5_free(tmpl);
1174 	return NULL;
1175 }
1176 
1177 /**
1178  * Create a DPDK Tx hairpin queue.
1179  *
1180  * @param dev
1181  *   Pointer to Ethernet device.
1182  * @param idx
1183  *   TX queue index.
1184  * @param desc
1185  *   Number of descriptors to configure in queue.
1186  * @param hairpin_conf
1187  *  The hairpin configuration.
1188  *
1189  * @return
1190  *   A DPDK queue object on success, NULL otherwise and rte_errno is set.
1191  */
1192 struct mlx5_txq_ctrl *
1193 mlx5_txq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1194 		     const struct rte_eth_hairpin_conf *hairpin_conf)
1195 {
1196 	struct mlx5_priv *priv = dev->data->dev_private;
1197 	struct mlx5_txq_ctrl *tmpl;
1198 
1199 	tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
1200 			   SOCKET_ID_ANY);
1201 	if (!tmpl) {
1202 		rte_errno = ENOMEM;
1203 		return NULL;
1204 	}
1205 	tmpl->priv = priv;
1206 	tmpl->socket = SOCKET_ID_ANY;
1207 	tmpl->txq.elts_n = log2above(desc);
1208 	tmpl->txq.port_id = dev->data->port_id;
1209 	tmpl->txq.idx = idx;
1210 	tmpl->hairpin_conf = *hairpin_conf;
1211 	tmpl->type = MLX5_TXQ_TYPE_HAIRPIN;
1212 	__atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
1213 	LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
1214 	return tmpl;
1215 }
1216 
1217 /**
1218  * Get a Tx queue.
1219  *
1220  * @param dev
1221  *   Pointer to Ethernet device.
1222  * @param idx
1223  *   TX queue index.
1224  *
1225  * @return
1226  *   A pointer to the queue if it exists.
1227  */
1228 struct mlx5_txq_ctrl *
1229 mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx)
1230 {
1231 	struct mlx5_priv *priv = dev->data->dev_private;
1232 	struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
1233 	struct mlx5_txq_ctrl *ctrl = NULL;
1234 
1235 	if (txq_data) {
1236 		ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq);
1237 		__atomic_fetch_add(&ctrl->refcnt, 1, __ATOMIC_RELAXED);
1238 	}
1239 	return ctrl;
1240 }
1241 
1242 /**
1243  * Release a Tx queue.
1244  *
1245  * @param dev
1246  *   Pointer to Ethernet device.
1247  * @param idx
1248  *   TX queue index.
1249  *
1250  * @return
1251  *   1 while a reference on it exists, 0 when freed.
1252  */
1253 int
1254 mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
1255 {
1256 	struct mlx5_priv *priv = dev->data->dev_private;
1257 	struct mlx5_txq_ctrl *txq_ctrl;
1258 
1259 	if (priv->txqs == NULL || (*priv->txqs)[idx] == NULL)
1260 		return 0;
1261 	txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
1262 	if (__atomic_sub_fetch(&txq_ctrl->refcnt, 1, __ATOMIC_RELAXED) > 1)
1263 		return 1;
1264 	if (txq_ctrl->obj) {
1265 		priv->obj_ops.txq_obj_release(txq_ctrl->obj);
1266 		LIST_REMOVE(txq_ctrl->obj, next);
1267 		mlx5_free(txq_ctrl->obj);
1268 		txq_ctrl->obj = NULL;
1269 	}
1270 	if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD) {
1271 		if (txq_ctrl->txq.fcqs) {
1272 			mlx5_free(txq_ctrl->txq.fcqs);
1273 			txq_ctrl->txq.fcqs = NULL;
1274 		}
1275 		txq_free_elts(txq_ctrl);
1276 		dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
1277 	}
1278 	if (!__atomic_load_n(&txq_ctrl->refcnt, __ATOMIC_RELAXED)) {
1279 		if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD)
1280 			mlx5_mr_btree_free(&txq_ctrl->txq.mr_ctrl.cache_bh);
1281 		LIST_REMOVE(txq_ctrl, next);
1282 		mlx5_free(txq_ctrl);
1283 		(*priv->txqs)[idx] = NULL;
1284 	}
1285 	return 0;
1286 }
1287 
1288 /**
1289  * Verify if the queue can be released.
1290  *
1291  * @param dev
1292  *   Pointer to Ethernet device.
1293  * @param idx
1294  *   TX queue index.
1295  *
1296  * @return
1297  *   1 if the queue can be released.
1298  */
1299 int
1300 mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx)
1301 {
1302 	struct mlx5_priv *priv = dev->data->dev_private;
1303 	struct mlx5_txq_ctrl *txq;
1304 
1305 	if (!(*priv->txqs)[idx])
1306 		return -1;
1307 	txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
1308 	return (__atomic_load_n(&txq->refcnt, __ATOMIC_RELAXED) == 1);
1309 }
1310 
1311 /**
1312  * Verify the Tx Queue list is empty
1313  *
1314  * @param dev
1315  *   Pointer to Ethernet device.
1316  *
1317  * @return
1318  *   The number of object not released.
1319  */
1320 int
1321 mlx5_txq_verify(struct rte_eth_dev *dev)
1322 {
1323 	struct mlx5_priv *priv = dev->data->dev_private;
1324 	struct mlx5_txq_ctrl *txq_ctrl;
1325 	int ret = 0;
1326 
1327 	LIST_FOREACH(txq_ctrl, &priv->txqsctrl, next) {
1328 		DRV_LOG(DEBUG, "port %u Tx queue %u still referenced",
1329 			dev->data->port_id, txq_ctrl->txq.idx);
1330 		++ret;
1331 	}
1332 	return ret;
1333 }
1334 
1335 /**
1336  * Set the Tx queue dynamic timestamp (mask and offset)
1337  *
1338  * @param[in] dev
1339  *   Pointer to the Ethernet device structure.
1340  */
1341 void
1342 mlx5_txq_dynf_timestamp_set(struct rte_eth_dev *dev)
1343 {
1344 	struct mlx5_priv *priv = dev->data->dev_private;
1345 	struct mlx5_dev_ctx_shared *sh = priv->sh;
1346 	struct mlx5_txq_data *data;
1347 	int off, nbit;
1348 	unsigned int i;
1349 	uint64_t mask = 0;
1350 
1351 	nbit = rte_mbuf_dynflag_lookup
1352 				(RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL);
1353 	off = rte_mbuf_dynfield_lookup
1354 				(RTE_MBUF_DYNFIELD_TIMESTAMP_NAME, NULL);
1355 	if (nbit >= 0 && off >= 0 && sh->txpp.refcnt)
1356 		mask = 1ULL << nbit;
1357 	for (i = 0; i != priv->txqs_n; ++i) {
1358 		data = (*priv->txqs)[i];
1359 		if (!data)
1360 			continue;
1361 		data->sh = sh;
1362 		data->ts_mask = mask;
1363 		data->ts_offset = off;
1364 	}
1365 }
1366