xref: /dpdk/drivers/net/mlx5/mlx5_txq.c (revision 8809f78c7dd9f33a44a4f89c58fc91ded34296ed)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2015 6WIND S.A.
3  * Copyright 2015 Mellanox Technologies, Ltd
4  */
5 
6 #include <stddef.h>
7 #include <errno.h>
8 #include <string.h>
9 #include <stdint.h>
10 #include <unistd.h>
11 #include <inttypes.h>
12 
13 #include <rte_mbuf.h>
14 #include <rte_malloc.h>
15 #include <rte_ethdev_driver.h>
16 #include <rte_common.h>
17 #include <rte_eal_paging.h>
18 
19 #include <mlx5_common.h>
20 #include <mlx5_common_mr.h>
21 #include <mlx5_malloc.h>
22 
23 #include "mlx5_defs.h"
24 #include "mlx5_utils.h"
25 #include "mlx5.h"
26 #include "mlx5_rxtx.h"
27 #include "mlx5_autoconf.h"
28 
29 /**
30  * Allocate TX queue elements.
31  *
32  * @param txq_ctrl
33  *   Pointer to TX queue structure.
34  */
35 void
36 txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl)
37 {
38 	const unsigned int elts_n = 1 << txq_ctrl->txq.elts_n;
39 	unsigned int i;
40 
41 	for (i = 0; (i != elts_n); ++i)
42 		txq_ctrl->txq.elts[i] = NULL;
43 	DRV_LOG(DEBUG, "port %u Tx queue %u allocated and configured %u WRs",
44 		PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx, elts_n);
45 	txq_ctrl->txq.elts_head = 0;
46 	txq_ctrl->txq.elts_tail = 0;
47 	txq_ctrl->txq.elts_comp = 0;
48 }
49 
50 /**
51  * Free TX queue elements.
52  *
53  * @param txq_ctrl
54  *   Pointer to TX queue structure.
55  */
56 void
57 txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
58 {
59 	const uint16_t elts_n = 1 << txq_ctrl->txq.elts_n;
60 	const uint16_t elts_m = elts_n - 1;
61 	uint16_t elts_head = txq_ctrl->txq.elts_head;
62 	uint16_t elts_tail = txq_ctrl->txq.elts_tail;
63 	struct rte_mbuf *(*elts)[elts_n] = &txq_ctrl->txq.elts;
64 
65 	DRV_LOG(DEBUG, "port %u Tx queue %u freeing WRs",
66 		PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx);
67 	txq_ctrl->txq.elts_head = 0;
68 	txq_ctrl->txq.elts_tail = 0;
69 	txq_ctrl->txq.elts_comp = 0;
70 
71 	while (elts_tail != elts_head) {
72 		struct rte_mbuf *elt = (*elts)[elts_tail & elts_m];
73 
74 		MLX5_ASSERT(elt != NULL);
75 		rte_pktmbuf_free_seg(elt);
76 #ifdef RTE_LIBRTE_MLX5_DEBUG
77 		/* Poisoning. */
78 		memset(&(*elts)[elts_tail & elts_m],
79 		       0x77,
80 		       sizeof((*elts)[elts_tail & elts_m]));
81 #endif
82 		++elts_tail;
83 	}
84 }
85 
86 /**
87  * Returns the per-port supported offloads.
88  *
89  * @param dev
90  *   Pointer to Ethernet device.
91  *
92  * @return
93  *   Supported Tx offloads.
94  */
95 uint64_t
96 mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
97 {
98 	struct mlx5_priv *priv = dev->data->dev_private;
99 	uint64_t offloads = (DEV_TX_OFFLOAD_MULTI_SEGS |
100 			     DEV_TX_OFFLOAD_VLAN_INSERT);
101 	struct mlx5_dev_config *config = &priv->config;
102 
103 	if (config->hw_csum)
104 		offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
105 			     DEV_TX_OFFLOAD_UDP_CKSUM |
106 			     DEV_TX_OFFLOAD_TCP_CKSUM);
107 	if (config->tso)
108 		offloads |= DEV_TX_OFFLOAD_TCP_TSO;
109 	if (config->tx_pp)
110 		offloads |= DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP;
111 	if (config->swp) {
112 		if (config->hw_csum)
113 			offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
114 		if (config->tso)
115 			offloads |= (DEV_TX_OFFLOAD_IP_TNL_TSO |
116 				     DEV_TX_OFFLOAD_UDP_TNL_TSO);
117 	}
118 	if (config->tunnel_en) {
119 		if (config->hw_csum)
120 			offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
121 		if (config->tso)
122 			offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
123 				     DEV_TX_OFFLOAD_GRE_TNL_TSO |
124 				     DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
125 	}
126 	return offloads;
127 }
128 
129 /* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */
130 static void
131 txq_sync_cq(struct mlx5_txq_data *txq)
132 {
133 	volatile struct mlx5_cqe *cqe;
134 	int ret, i;
135 
136 	i = txq->cqe_s;
137 	do {
138 		cqe = &txq->cqes[txq->cq_ci & txq->cqe_m];
139 		ret = check_cqe(cqe, txq->cqe_s, txq->cq_ci);
140 		if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
141 			if (likely(ret != MLX5_CQE_STATUS_ERR)) {
142 				/* No new CQEs in completion queue. */
143 				MLX5_ASSERT(ret == MLX5_CQE_STATUS_HW_OWN);
144 				break;
145 			}
146 		}
147 		++txq->cq_ci;
148 	} while (--i);
149 	/* Move all CQEs to HW ownership. */
150 	for (i = 0; i < txq->cqe_s; i++) {
151 		cqe = &txq->cqes[i];
152 		cqe->op_own = MLX5_CQE_INVALIDATE;
153 	}
154 	/* Resync CQE and WQE (WQ in reset state). */
155 	rte_io_wmb();
156 	*txq->cq_db = rte_cpu_to_be_32(txq->cq_ci);
157 	rte_io_wmb();
158 }
159 
160 /**
161  * Tx queue stop. Device queue goes to the idle state,
162  * all involved mbufs are freed from elts/WQ.
163  *
164  * @param dev
165  *   Pointer to Ethernet device structure.
166  * @param idx
167  *   Tx queue index.
168  *
169  * @return
170  *   0 on success, a negative errno value otherwise and rte_errno is set.
171  */
172 int
173 mlx5_tx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t idx)
174 {
175 	struct mlx5_priv *priv = dev->data->dev_private;
176 	struct mlx5_txq_data *txq = (*priv->txqs)[idx];
177 	struct mlx5_txq_ctrl *txq_ctrl =
178 			container_of(txq, struct mlx5_txq_ctrl, txq);
179 	int ret;
180 
181 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
182 	/* Move QP to RESET state. */
183 	ret = priv->obj_ops.txq_obj_modify(txq_ctrl->obj, MLX5_TXQ_MOD_RDY2RST,
184 					   (uint8_t)priv->dev_port);
185 	if (ret)
186 		return ret;
187 	/* Handle all send completions. */
188 	txq_sync_cq(txq);
189 	/* Free elts stored in the SQ. */
190 	txq_free_elts(txq_ctrl);
191 	/* Prevent writing new pkts to SQ by setting no free WQE.*/
192 	txq->wqe_ci = txq->wqe_s;
193 	txq->wqe_pi = 0;
194 	txq->elts_comp = 0;
195 	/* Set the actual queue state. */
196 	dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
197 	return 0;
198 }
199 
200 /**
201  * Tx queue stop. Device queue goes to the idle state,
202  * all involved mbufs are freed from elts/WQ.
203  *
204  * @param dev
205  *   Pointer to Ethernet device structure.
206  * @param idx
207  *   Tx queue index.
208  *
209  * @return
210  *   0 on success, a negative errno value otherwise and rte_errno is set.
211  */
212 int
213 mlx5_tx_queue_stop(struct rte_eth_dev *dev, uint16_t idx)
214 {
215 	int ret;
216 
217 	if (rte_eth_dev_is_tx_hairpin_queue(dev, idx)) {
218 		DRV_LOG(ERR, "Hairpin queue can't be stopped");
219 		rte_errno = EINVAL;
220 		return -EINVAL;
221 	}
222 	if (dev->data->tx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STOPPED)
223 		return 0;
224 	if (rte_eal_process_type() ==  RTE_PROC_SECONDARY) {
225 		ret = mlx5_mp_os_req_queue_control(dev, idx,
226 						   MLX5_MP_REQ_QUEUE_TX_STOP);
227 	} else {
228 		ret = mlx5_tx_queue_stop_primary(dev, idx);
229 	}
230 	return ret;
231 }
232 
233 /**
234  * Rx queue start. Device queue goes to the ready state,
235  * all required mbufs are allocated and WQ is replenished.
236  *
237  * @param dev
238  *   Pointer to Ethernet device structure.
239  * @param idx
240  *   RX queue index.
241  *
242  * @return
243  *   0 on success, a negative errno value otherwise and rte_errno is set.
244  */
245 int
246 mlx5_tx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx)
247 {
248 	struct mlx5_priv *priv = dev->data->dev_private;
249 	struct mlx5_txq_data *txq = (*priv->txqs)[idx];
250 	struct mlx5_txq_ctrl *txq_ctrl =
251 			container_of(txq, struct mlx5_txq_ctrl, txq);
252 	int ret;
253 
254 	MLX5_ASSERT(rte_eal_process_type() ==  RTE_PROC_PRIMARY);
255 	ret = priv->obj_ops.txq_obj_modify(txq_ctrl->obj,
256 					   MLX5_TXQ_MOD_RDY2RDY,
257 					   (uint8_t)priv->dev_port);
258 	if (ret)
259 		return ret;
260 	txq_ctrl->txq.wqe_ci = 0;
261 	txq_ctrl->txq.wqe_pi = 0;
262 	txq_ctrl->txq.elts_comp = 0;
263 	/* Set the actual queue state. */
264 	dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
265 	return 0;
266 }
267 
268 /**
269  * Rx queue start. Device queue goes to the ready state,
270  * all required mbufs are allocated and WQ is replenished.
271  *
272  * @param dev
273  *   Pointer to Ethernet device structure.
274  * @param idx
275  *   RX queue index.
276  *
277  * @return
278  *   0 on success, a negative errno value otherwise and rte_errno is set.
279  */
280 int
281 mlx5_tx_queue_start(struct rte_eth_dev *dev, uint16_t idx)
282 {
283 	int ret;
284 
285 	if (rte_eth_dev_is_tx_hairpin_queue(dev, idx)) {
286 		DRV_LOG(ERR, "Hairpin queue can't be started");
287 		rte_errno = EINVAL;
288 		return -EINVAL;
289 	}
290 	if (dev->data->tx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STARTED)
291 		return 0;
292 	if (rte_eal_process_type() ==  RTE_PROC_SECONDARY) {
293 		ret = mlx5_mp_os_req_queue_control(dev, idx,
294 						   MLX5_MP_REQ_QUEUE_TX_START);
295 	} else {
296 		ret = mlx5_tx_queue_start_primary(dev, idx);
297 	}
298 	return ret;
299 }
300 
301 /**
302  * Tx queue presetup checks.
303  *
304  * @param dev
305  *   Pointer to Ethernet device structure.
306  * @param idx
307  *   Tx queue index.
308  * @param desc
309  *   Number of descriptors to configure in queue.
310  *
311  * @return
312  *   0 on success, a negative errno value otherwise and rte_errno is set.
313  */
314 static int
315 mlx5_tx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc)
316 {
317 	struct mlx5_priv *priv = dev->data->dev_private;
318 
319 	if (*desc <= MLX5_TX_COMP_THRESH) {
320 		DRV_LOG(WARNING,
321 			"port %u number of descriptors requested for Tx queue"
322 			" %u must be higher than MLX5_TX_COMP_THRESH, using %u"
323 			" instead of %u", dev->data->port_id, idx,
324 			MLX5_TX_COMP_THRESH + 1, *desc);
325 		*desc = MLX5_TX_COMP_THRESH + 1;
326 	}
327 	if (!rte_is_power_of_2(*desc)) {
328 		*desc = 1 << log2above(*desc);
329 		DRV_LOG(WARNING,
330 			"port %u increased number of descriptors in Tx queue"
331 			" %u to the next power of two (%d)",
332 			dev->data->port_id, idx, *desc);
333 	}
334 	DRV_LOG(DEBUG, "port %u configuring queue %u for %u descriptors",
335 		dev->data->port_id, idx, *desc);
336 	if (idx >= priv->txqs_n) {
337 		DRV_LOG(ERR, "port %u Tx queue index out of range (%u >= %u)",
338 			dev->data->port_id, idx, priv->txqs_n);
339 		rte_errno = EOVERFLOW;
340 		return -rte_errno;
341 	}
342 	if (!mlx5_txq_releasable(dev, idx)) {
343 		rte_errno = EBUSY;
344 		DRV_LOG(ERR, "port %u unable to release queue index %u",
345 			dev->data->port_id, idx);
346 		return -rte_errno;
347 	}
348 	mlx5_txq_release(dev, idx);
349 	return 0;
350 }
351 
352 /**
353  * DPDK callback to configure a TX queue.
354  *
355  * @param dev
356  *   Pointer to Ethernet device structure.
357  * @param idx
358  *   TX queue index.
359  * @param desc
360  *   Number of descriptors to configure in queue.
361  * @param socket
362  *   NUMA socket on which memory must be allocated.
363  * @param[in] conf
364  *   Thresholds parameters.
365  *
366  * @return
367  *   0 on success, a negative errno value otherwise and rte_errno is set.
368  */
369 int
370 mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
371 		    unsigned int socket, const struct rte_eth_txconf *conf)
372 {
373 	struct mlx5_priv *priv = dev->data->dev_private;
374 	struct mlx5_txq_data *txq = (*priv->txqs)[idx];
375 	struct mlx5_txq_ctrl *txq_ctrl =
376 		container_of(txq, struct mlx5_txq_ctrl, txq);
377 	int res;
378 
379 	res = mlx5_tx_queue_pre_setup(dev, idx, &desc);
380 	if (res)
381 		return res;
382 	txq_ctrl = mlx5_txq_new(dev, idx, desc, socket, conf);
383 	if (!txq_ctrl) {
384 		DRV_LOG(ERR, "port %u unable to allocate queue index %u",
385 			dev->data->port_id, idx);
386 		return -rte_errno;
387 	}
388 	DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
389 		dev->data->port_id, idx);
390 	(*priv->txqs)[idx] = &txq_ctrl->txq;
391 	dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
392 	return 0;
393 }
394 
395 /**
396  * DPDK callback to configure a TX hairpin queue.
397  *
398  * @param dev
399  *   Pointer to Ethernet device structure.
400  * @param idx
401  *   TX queue index.
402  * @param desc
403  *   Number of descriptors to configure in queue.
404  * @param[in] hairpin_conf
405  *   The hairpin binding configuration.
406  *
407  * @return
408  *   0 on success, a negative errno value otherwise and rte_errno is set.
409  */
410 int
411 mlx5_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
412 			    uint16_t desc,
413 			    const struct rte_eth_hairpin_conf *hairpin_conf)
414 {
415 	struct mlx5_priv *priv = dev->data->dev_private;
416 	struct mlx5_txq_data *txq = (*priv->txqs)[idx];
417 	struct mlx5_txq_ctrl *txq_ctrl =
418 		container_of(txq, struct mlx5_txq_ctrl, txq);
419 	int res;
420 
421 	res = mlx5_tx_queue_pre_setup(dev, idx, &desc);
422 	if (res)
423 		return res;
424 	if (hairpin_conf->peer_count != 1 ||
425 	    hairpin_conf->peers[0].port != dev->data->port_id ||
426 	    hairpin_conf->peers[0].queue >= priv->rxqs_n) {
427 		DRV_LOG(ERR, "port %u unable to setup hairpin queue index %u "
428 			" invalid hairpind configuration", dev->data->port_id,
429 			idx);
430 		rte_errno = EINVAL;
431 		return -rte_errno;
432 	}
433 	txq_ctrl = mlx5_txq_hairpin_new(dev, idx, desc,	hairpin_conf);
434 	if (!txq_ctrl) {
435 		DRV_LOG(ERR, "port %u unable to allocate queue index %u",
436 			dev->data->port_id, idx);
437 		return -rte_errno;
438 	}
439 	DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
440 		dev->data->port_id, idx);
441 	(*priv->txqs)[idx] = &txq_ctrl->txq;
442 	dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN;
443 	return 0;
444 }
445 
446 /**
447  * DPDK callback to release a TX queue.
448  *
449  * @param dpdk_txq
450  *   Generic TX queue pointer.
451  */
452 void
453 mlx5_tx_queue_release(void *dpdk_txq)
454 {
455 	struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
456 	struct mlx5_txq_ctrl *txq_ctrl;
457 	struct mlx5_priv *priv;
458 	unsigned int i;
459 
460 	if (txq == NULL)
461 		return;
462 	txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
463 	priv = txq_ctrl->priv;
464 	for (i = 0; (i != priv->txqs_n); ++i)
465 		if ((*priv->txqs)[i] == txq) {
466 			DRV_LOG(DEBUG, "port %u removing Tx queue %u from list",
467 				PORT_ID(priv), txq->idx);
468 			mlx5_txq_release(ETH_DEV(priv), i);
469 			break;
470 		}
471 }
472 
473 /**
474  * Configure the doorbell register non-cached attribute.
475  *
476  * @param txq_ctrl
477  *   Pointer to Tx queue control structure.
478  * @param page_size
479  *   Systme page size
480  */
481 static void
482 txq_uar_ncattr_init(struct mlx5_txq_ctrl *txq_ctrl, size_t page_size)
483 {
484 	struct mlx5_priv *priv = txq_ctrl->priv;
485 	off_t cmd;
486 
487 	txq_ctrl->txq.db_heu = priv->config.dbnc == MLX5_TXDB_HEURISTIC;
488 	txq_ctrl->txq.db_nc = 0;
489 	/* Check the doorbell register mapping type. */
490 	cmd = txq_ctrl->uar_mmap_offset / page_size;
491 	cmd >>= MLX5_UAR_MMAP_CMD_SHIFT;
492 	cmd &= MLX5_UAR_MMAP_CMD_MASK;
493 	if (cmd == MLX5_MMAP_GET_NC_PAGES_CMD)
494 		txq_ctrl->txq.db_nc = 1;
495 }
496 
497 /**
498  * Initialize Tx UAR registers for primary process.
499  *
500  * @param txq_ctrl
501  *   Pointer to Tx queue control structure.
502  */
503 void
504 txq_uar_init(struct mlx5_txq_ctrl *txq_ctrl)
505 {
506 	struct mlx5_priv *priv = txq_ctrl->priv;
507 	struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv));
508 #ifndef RTE_ARCH_64
509 	unsigned int lock_idx;
510 #endif
511 	const size_t page_size = rte_mem_page_size();
512 	if (page_size == (size_t)-1) {
513 		DRV_LOG(ERR, "Failed to get mem page size");
514 		rte_errno = ENOMEM;
515 	}
516 
517 	if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
518 		return;
519 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
520 	MLX5_ASSERT(ppriv);
521 	ppriv->uar_table[txq_ctrl->txq.idx] = txq_ctrl->bf_reg;
522 	txq_uar_ncattr_init(txq_ctrl, page_size);
523 #ifndef RTE_ARCH_64
524 	/* Assign an UAR lock according to UAR page number */
525 	lock_idx = (txq_ctrl->uar_mmap_offset / page_size) &
526 		   MLX5_UAR_PAGE_NUM_MASK;
527 	txq_ctrl->txq.uar_lock = &priv->sh->uar_lock[lock_idx];
528 #endif
529 }
530 
531 /**
532  * Remap UAR register of a Tx queue for secondary process.
533  *
534  * Remapped address is stored at the table in the process private structure of
535  * the device, indexed by queue index.
536  *
537  * @param txq_ctrl
538  *   Pointer to Tx queue control structure.
539  * @param fd
540  *   Verbs file descriptor to map UAR pages.
541  *
542  * @return
543  *   0 on success, a negative errno value otherwise and rte_errno is set.
544  */
545 static int
546 txq_uar_init_secondary(struct mlx5_txq_ctrl *txq_ctrl, int fd)
547 {
548 	struct mlx5_priv *priv = txq_ctrl->priv;
549 	struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv));
550 	struct mlx5_txq_data *txq = &txq_ctrl->txq;
551 	void *addr;
552 	uintptr_t uar_va;
553 	uintptr_t offset;
554 	const size_t page_size = rte_mem_page_size();
555 	if (page_size == (size_t)-1) {
556 		DRV_LOG(ERR, "Failed to get mem page size");
557 		rte_errno = ENOMEM;
558 		return -rte_errno;
559 	}
560 
561 	if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
562 		return 0;
563 	MLX5_ASSERT(ppriv);
564 	/*
565 	 * As rdma-core, UARs are mapped in size of OS page
566 	 * size. Ref to libmlx5 function: mlx5_init_context()
567 	 */
568 	uar_va = (uintptr_t)txq_ctrl->bf_reg;
569 	offset = uar_va & (page_size - 1); /* Offset in page. */
570 	addr = rte_mem_map(NULL, page_size, RTE_PROT_WRITE, RTE_MAP_SHARED,
571 			    fd, txq_ctrl->uar_mmap_offset);
572 	if (!addr) {
573 		DRV_LOG(ERR,
574 			"port %u mmap failed for BF reg of txq %u",
575 			txq->port_id, txq->idx);
576 		rte_errno = ENXIO;
577 		return -rte_errno;
578 	}
579 	addr = RTE_PTR_ADD(addr, offset);
580 	ppriv->uar_table[txq->idx] = addr;
581 	txq_uar_ncattr_init(txq_ctrl, page_size);
582 	return 0;
583 }
584 
585 /**
586  * Unmap UAR register of a Tx queue for secondary process.
587  *
588  * @param txq_ctrl
589  *   Pointer to Tx queue control structure.
590  */
591 static void
592 txq_uar_uninit_secondary(struct mlx5_txq_ctrl *txq_ctrl)
593 {
594 	struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(txq_ctrl->priv));
595 	void *addr;
596 	const size_t page_size = rte_mem_page_size();
597 	if (page_size == (size_t)-1) {
598 		DRV_LOG(ERR, "Failed to get mem page size");
599 		rte_errno = ENOMEM;
600 	}
601 
602 	if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
603 		return;
604 	addr = ppriv->uar_table[txq_ctrl->txq.idx];
605 	rte_mem_unmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size);
606 }
607 
608 /**
609  * Deinitialize Tx UAR registers for secondary process.
610  *
611  * @param dev
612  *   Pointer to Ethernet device.
613  */
614 void
615 mlx5_tx_uar_uninit_secondary(struct rte_eth_dev *dev)
616 {
617 	struct mlx5_priv *priv = dev->data->dev_private;
618 	struct mlx5_txq_data *txq;
619 	struct mlx5_txq_ctrl *txq_ctrl;
620 	unsigned int i;
621 
622 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
623 	for (i = 0; i != priv->txqs_n; ++i) {
624 		if (!(*priv->txqs)[i])
625 			continue;
626 		txq = (*priv->txqs)[i];
627 		txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
628 		txq_uar_uninit_secondary(txq_ctrl);
629 	}
630 }
631 
632 /**
633  * Initialize Tx UAR registers for secondary process.
634  *
635  * @param dev
636  *   Pointer to Ethernet device.
637  * @param fd
638  *   Verbs file descriptor to map UAR pages.
639  *
640  * @return
641  *   0 on success, a negative errno value otherwise and rte_errno is set.
642  */
643 int
644 mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd)
645 {
646 	struct mlx5_priv *priv = dev->data->dev_private;
647 	struct mlx5_txq_data *txq;
648 	struct mlx5_txq_ctrl *txq_ctrl;
649 	unsigned int i;
650 	int ret;
651 
652 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
653 	for (i = 0; i != priv->txqs_n; ++i) {
654 		if (!(*priv->txqs)[i])
655 			continue;
656 		txq = (*priv->txqs)[i];
657 		txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
658 		if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
659 			continue;
660 		MLX5_ASSERT(txq->idx == (uint16_t)i);
661 		ret = txq_uar_init_secondary(txq_ctrl, fd);
662 		if (ret)
663 			goto error;
664 	}
665 	return 0;
666 error:
667 	/* Rollback. */
668 	do {
669 		if (!(*priv->txqs)[i])
670 			continue;
671 		txq = (*priv->txqs)[i];
672 		txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
673 		txq_uar_uninit_secondary(txq_ctrl);
674 	} while (i--);
675 	return -rte_errno;
676 }
677 
678 /**
679  * Verify the Verbs Tx queue list is empty
680  *
681  * @param dev
682  *   Pointer to Ethernet device.
683  *
684  * @return
685  *   The number of object not released.
686  */
687 int
688 mlx5_txq_obj_verify(struct rte_eth_dev *dev)
689 {
690 	struct mlx5_priv *priv = dev->data->dev_private;
691 	int ret = 0;
692 	struct mlx5_txq_obj *txq_obj;
693 
694 	LIST_FOREACH(txq_obj, &priv->txqsobj, next) {
695 		DRV_LOG(DEBUG, "port %u Verbs Tx queue %u still referenced",
696 			dev->data->port_id, txq_obj->txq_ctrl->txq.idx);
697 		++ret;
698 	}
699 	return ret;
700 }
701 
702 /**
703  * Calculate the total number of WQEBB for Tx queue.
704  *
705  * Simplified version of calc_sq_size() in rdma-core.
706  *
707  * @param txq_ctrl
708  *   Pointer to Tx queue control structure.
709  *
710  * @return
711  *   The number of WQEBB.
712  */
713 static int
714 txq_calc_wqebb_cnt(struct mlx5_txq_ctrl *txq_ctrl)
715 {
716 	unsigned int wqe_size;
717 	const unsigned int desc = 1 << txq_ctrl->txq.elts_n;
718 
719 	wqe_size = MLX5_WQE_CSEG_SIZE +
720 		   MLX5_WQE_ESEG_SIZE +
721 		   MLX5_WSEG_SIZE -
722 		   MLX5_ESEG_MIN_INLINE_SIZE +
723 		   txq_ctrl->max_inline_data;
724 	return rte_align32pow2(wqe_size * desc) / MLX5_WQE_SIZE;
725 }
726 
727 /**
728  * Calculate the maximal inline data size for Tx queue.
729  *
730  * @param txq_ctrl
731  *   Pointer to Tx queue control structure.
732  *
733  * @return
734  *   The maximal inline data size.
735  */
736 static unsigned int
737 txq_calc_inline_max(struct mlx5_txq_ctrl *txq_ctrl)
738 {
739 	const unsigned int desc = 1 << txq_ctrl->txq.elts_n;
740 	struct mlx5_priv *priv = txq_ctrl->priv;
741 	unsigned int wqe_size;
742 
743 	wqe_size = priv->sh->device_attr.max_qp_wr / desc;
744 	if (!wqe_size)
745 		return 0;
746 	/*
747 	 * This calculation is derived from tthe source of
748 	 * mlx5_calc_send_wqe() in rdma_core library.
749 	 */
750 	wqe_size = wqe_size * MLX5_WQE_SIZE -
751 		   MLX5_WQE_CSEG_SIZE -
752 		   MLX5_WQE_ESEG_SIZE -
753 		   MLX5_WSEG_SIZE -
754 		   MLX5_WSEG_SIZE +
755 		   MLX5_DSEG_MIN_INLINE_SIZE;
756 	return wqe_size;
757 }
758 
759 /**
760  * Set Tx queue parameters from device configuration.
761  *
762  * @param txq_ctrl
763  *   Pointer to Tx queue control structure.
764  */
765 static void
766 txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
767 {
768 	struct mlx5_priv *priv = txq_ctrl->priv;
769 	struct mlx5_dev_config *config = &priv->config;
770 	unsigned int inlen_send; /* Inline data for ordinary SEND.*/
771 	unsigned int inlen_empw; /* Inline data for enhanced MPW. */
772 	unsigned int inlen_mode; /* Minimal required Inline data. */
773 	unsigned int txqs_inline; /* Min Tx queues to enable inline. */
774 	uint64_t dev_txoff = priv->dev_data->dev_conf.txmode.offloads;
775 	bool tso = txq_ctrl->txq.offloads & (DEV_TX_OFFLOAD_TCP_TSO |
776 					    DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
777 					    DEV_TX_OFFLOAD_GRE_TNL_TSO |
778 					    DEV_TX_OFFLOAD_IP_TNL_TSO |
779 					    DEV_TX_OFFLOAD_UDP_TNL_TSO);
780 	bool vlan_inline;
781 	unsigned int temp;
782 
783 	if (config->txqs_inline == MLX5_ARG_UNSET)
784 		txqs_inline =
785 #if defined(RTE_ARCH_ARM64)
786 		(priv->pci_dev->id.device_id ==
787 			PCI_DEVICE_ID_MELLANOX_CONNECTX5BF) ?
788 			MLX5_INLINE_MAX_TXQS_BLUEFIELD :
789 #endif
790 			MLX5_INLINE_MAX_TXQS;
791 	else
792 		txqs_inline = (unsigned int)config->txqs_inline;
793 	inlen_send = (config->txq_inline_max == MLX5_ARG_UNSET) ?
794 		     MLX5_SEND_DEF_INLINE_LEN :
795 		     (unsigned int)config->txq_inline_max;
796 	inlen_empw = (config->txq_inline_mpw == MLX5_ARG_UNSET) ?
797 		     MLX5_EMPW_DEF_INLINE_LEN :
798 		     (unsigned int)config->txq_inline_mpw;
799 	inlen_mode = (config->txq_inline_min == MLX5_ARG_UNSET) ?
800 		     0 : (unsigned int)config->txq_inline_min;
801 	if (config->mps != MLX5_MPW_ENHANCED && config->mps != MLX5_MPW)
802 		inlen_empw = 0;
803 	/*
804 	 * If there is requested minimal amount of data to inline
805 	 * we MUST enable inlining. This is a case for ConnectX-4
806 	 * which usually requires L2 inlined for correct operating
807 	 * and ConnectX-4 Lx which requires L2-L4 inlined to
808 	 * support E-Switch Flows.
809 	 */
810 	if (inlen_mode) {
811 		if (inlen_mode <= MLX5_ESEG_MIN_INLINE_SIZE) {
812 			/*
813 			 * Optimize minimal inlining for single
814 			 * segment packets to fill one WQEBB
815 			 * without gaps.
816 			 */
817 			temp = MLX5_ESEG_MIN_INLINE_SIZE;
818 		} else {
819 			temp = inlen_mode - MLX5_ESEG_MIN_INLINE_SIZE;
820 			temp = RTE_ALIGN(temp, MLX5_WSEG_SIZE) +
821 			       MLX5_ESEG_MIN_INLINE_SIZE;
822 			temp = RTE_MIN(temp, MLX5_SEND_MAX_INLINE_LEN);
823 		}
824 		if (temp != inlen_mode) {
825 			DRV_LOG(INFO,
826 				"port %u minimal required inline setting"
827 				" aligned from %u to %u",
828 				PORT_ID(priv), inlen_mode, temp);
829 			inlen_mode = temp;
830 		}
831 	}
832 	/*
833 	 * If port is configured to support VLAN insertion and device
834 	 * does not support this feature by HW (for NICs before ConnectX-5
835 	 * or in case of wqe_vlan_insert flag is not set) we must enable
836 	 * data inline on all queues because it is supported by single
837 	 * tx_burst routine.
838 	 */
839 	txq_ctrl->txq.vlan_en = config->hw_vlan_insert;
840 	vlan_inline = (dev_txoff & DEV_TX_OFFLOAD_VLAN_INSERT) &&
841 		      !config->hw_vlan_insert;
842 	/*
843 	 * If there are few Tx queues it is prioritized
844 	 * to save CPU cycles and disable data inlining at all.
845 	 */
846 	if (inlen_send && priv->txqs_n >= txqs_inline) {
847 		/*
848 		 * The data sent with ordinal MLX5_OPCODE_SEND
849 		 * may be inlined in Ethernet Segment, align the
850 		 * length accordingly to fit entire WQEBBs.
851 		 */
852 		temp = RTE_MAX(inlen_send,
853 			       MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE);
854 		temp -= MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE;
855 		temp = RTE_ALIGN(temp, MLX5_WQE_SIZE);
856 		temp += MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE;
857 		temp = RTE_MIN(temp, MLX5_WQE_SIZE_MAX +
858 				     MLX5_ESEG_MIN_INLINE_SIZE -
859 				     MLX5_WQE_CSEG_SIZE -
860 				     MLX5_WQE_ESEG_SIZE -
861 				     MLX5_WQE_DSEG_SIZE * 2);
862 		temp = RTE_MIN(temp, MLX5_SEND_MAX_INLINE_LEN);
863 		temp = RTE_MAX(temp, inlen_mode);
864 		if (temp != inlen_send) {
865 			DRV_LOG(INFO,
866 				"port %u ordinary send inline setting"
867 				" aligned from %u to %u",
868 				PORT_ID(priv), inlen_send, temp);
869 			inlen_send = temp;
870 		}
871 		/*
872 		 * Not aligned to cache lines, but to WQEs.
873 		 * First bytes of data (initial alignment)
874 		 * is going to be copied explicitly at the
875 		 * beginning of inlining buffer in Ethernet
876 		 * Segment.
877 		 */
878 		MLX5_ASSERT(inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
879 		MLX5_ASSERT(inlen_send <= MLX5_WQE_SIZE_MAX +
880 					  MLX5_ESEG_MIN_INLINE_SIZE -
881 					  MLX5_WQE_CSEG_SIZE -
882 					  MLX5_WQE_ESEG_SIZE -
883 					  MLX5_WQE_DSEG_SIZE * 2);
884 	} else if (inlen_mode) {
885 		/*
886 		 * If minimal inlining is requested we must
887 		 * enable inlining in general, despite the
888 		 * number of configured queues. Ignore the
889 		 * txq_inline_max devarg, this is not
890 		 * full-featured inline.
891 		 */
892 		inlen_send = inlen_mode;
893 		inlen_empw = 0;
894 	} else if (vlan_inline) {
895 		/*
896 		 * Hardware does not report offload for
897 		 * VLAN insertion, we must enable data inline
898 		 * to implement feature by software.
899 		 */
900 		inlen_send = MLX5_ESEG_MIN_INLINE_SIZE;
901 		inlen_empw = 0;
902 	} else {
903 		inlen_send = 0;
904 		inlen_empw = 0;
905 	}
906 	txq_ctrl->txq.inlen_send = inlen_send;
907 	txq_ctrl->txq.inlen_mode = inlen_mode;
908 	txq_ctrl->txq.inlen_empw = 0;
909 	if (inlen_send && inlen_empw && priv->txqs_n >= txqs_inline) {
910 		/*
911 		 * The data sent with MLX5_OPCODE_ENHANCED_MPSW
912 		 * may be inlined in Data Segment, align the
913 		 * length accordingly to fit entire WQEBBs.
914 		 */
915 		temp = RTE_MAX(inlen_empw,
916 			       MLX5_WQE_SIZE + MLX5_DSEG_MIN_INLINE_SIZE);
917 		temp -= MLX5_DSEG_MIN_INLINE_SIZE;
918 		temp = RTE_ALIGN(temp, MLX5_WQE_SIZE);
919 		temp += MLX5_DSEG_MIN_INLINE_SIZE;
920 		temp = RTE_MIN(temp, MLX5_WQE_SIZE_MAX +
921 				     MLX5_DSEG_MIN_INLINE_SIZE -
922 				     MLX5_WQE_CSEG_SIZE -
923 				     MLX5_WQE_ESEG_SIZE -
924 				     MLX5_WQE_DSEG_SIZE);
925 		temp = RTE_MIN(temp, MLX5_EMPW_MAX_INLINE_LEN);
926 		if (temp != inlen_empw) {
927 			DRV_LOG(INFO,
928 				"port %u enhanced empw inline setting"
929 				" aligned from %u to %u",
930 				PORT_ID(priv), inlen_empw, temp);
931 			inlen_empw = temp;
932 		}
933 		MLX5_ASSERT(inlen_empw >= MLX5_ESEG_MIN_INLINE_SIZE);
934 		MLX5_ASSERT(inlen_empw <= MLX5_WQE_SIZE_MAX +
935 					  MLX5_DSEG_MIN_INLINE_SIZE -
936 					  MLX5_WQE_CSEG_SIZE -
937 					  MLX5_WQE_ESEG_SIZE -
938 					  MLX5_WQE_DSEG_SIZE);
939 		txq_ctrl->txq.inlen_empw = inlen_empw;
940 	}
941 	txq_ctrl->max_inline_data = RTE_MAX(inlen_send, inlen_empw);
942 	if (tso) {
943 		txq_ctrl->max_tso_header = MLX5_MAX_TSO_HEADER;
944 		txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->max_inline_data,
945 						    MLX5_MAX_TSO_HEADER);
946 		txq_ctrl->txq.tso_en = 1;
947 	}
948 	txq_ctrl->txq.tunnel_en = config->tunnel_en | config->swp;
949 	txq_ctrl->txq.swp_en = ((DEV_TX_OFFLOAD_IP_TNL_TSO |
950 				 DEV_TX_OFFLOAD_UDP_TNL_TSO |
951 				 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) &
952 				txq_ctrl->txq.offloads) && config->swp;
953 }
954 
955 /**
956  * Adjust Tx queue data inline parameters for large queue sizes.
957  * The data inline feature requires multiple WQEs to fit the packets,
958  * and if the large amount of Tx descriptors is requested by application
959  * the total WQE amount may exceed the hardware capabilities. If the
960  * default inline setting are used we can try to adjust these ones and
961  * meet the hardware requirements and not exceed the queue size.
962  *
963  * @param txq_ctrl
964  *   Pointer to Tx queue control structure.
965  *
966  * @return
967  *   Zero on success, otherwise the parameters can not be adjusted.
968  */
969 static int
970 txq_adjust_params(struct mlx5_txq_ctrl *txq_ctrl)
971 {
972 	struct mlx5_priv *priv = txq_ctrl->priv;
973 	struct mlx5_dev_config *config = &priv->config;
974 	unsigned int max_inline;
975 
976 	max_inline = txq_calc_inline_max(txq_ctrl);
977 	if (!txq_ctrl->txq.inlen_send) {
978 		/*
979 		 * Inline data feature is not engaged at all.
980 		 * There is nothing to adjust.
981 		 */
982 		return 0;
983 	}
984 	if (txq_ctrl->max_inline_data <= max_inline) {
985 		/*
986 		 * The requested inline data length does not
987 		 * exceed queue capabilities.
988 		 */
989 		return 0;
990 	}
991 	if (txq_ctrl->txq.inlen_mode > max_inline) {
992 		DRV_LOG(ERR,
993 			"minimal data inline requirements (%u) are not"
994 			" satisfied (%u) on port %u, try the smaller"
995 			" Tx queue size (%d)",
996 			txq_ctrl->txq.inlen_mode, max_inline,
997 			priv->dev_data->port_id,
998 			priv->sh->device_attr.max_qp_wr);
999 		goto error;
1000 	}
1001 	if (txq_ctrl->txq.inlen_send > max_inline &&
1002 	    config->txq_inline_max != MLX5_ARG_UNSET &&
1003 	    config->txq_inline_max > (int)max_inline) {
1004 		DRV_LOG(ERR,
1005 			"txq_inline_max requirements (%u) are not"
1006 			" satisfied (%u) on port %u, try the smaller"
1007 			" Tx queue size (%d)",
1008 			txq_ctrl->txq.inlen_send, max_inline,
1009 			priv->dev_data->port_id,
1010 			priv->sh->device_attr.max_qp_wr);
1011 		goto error;
1012 	}
1013 	if (txq_ctrl->txq.inlen_empw > max_inline &&
1014 	    config->txq_inline_mpw != MLX5_ARG_UNSET &&
1015 	    config->txq_inline_mpw > (int)max_inline) {
1016 		DRV_LOG(ERR,
1017 			"txq_inline_mpw requirements (%u) are not"
1018 			" satisfied (%u) on port %u, try the smaller"
1019 			" Tx queue size (%d)",
1020 			txq_ctrl->txq.inlen_empw, max_inline,
1021 			priv->dev_data->port_id,
1022 			priv->sh->device_attr.max_qp_wr);
1023 		goto error;
1024 	}
1025 	if (txq_ctrl->txq.tso_en && max_inline < MLX5_MAX_TSO_HEADER) {
1026 		DRV_LOG(ERR,
1027 			"tso header inline requirements (%u) are not"
1028 			" satisfied (%u) on port %u, try the smaller"
1029 			" Tx queue size (%d)",
1030 			MLX5_MAX_TSO_HEADER, max_inline,
1031 			priv->dev_data->port_id,
1032 			priv->sh->device_attr.max_qp_wr);
1033 		goto error;
1034 	}
1035 	if (txq_ctrl->txq.inlen_send > max_inline) {
1036 		DRV_LOG(WARNING,
1037 			"adjust txq_inline_max (%u->%u)"
1038 			" due to large Tx queue on port %u",
1039 			txq_ctrl->txq.inlen_send, max_inline,
1040 			priv->dev_data->port_id);
1041 		txq_ctrl->txq.inlen_send = max_inline;
1042 	}
1043 	if (txq_ctrl->txq.inlen_empw > max_inline) {
1044 		DRV_LOG(WARNING,
1045 			"adjust txq_inline_mpw (%u->%u)"
1046 			"due to large Tx queue on port %u",
1047 			txq_ctrl->txq.inlen_empw, max_inline,
1048 			priv->dev_data->port_id);
1049 		txq_ctrl->txq.inlen_empw = max_inline;
1050 	}
1051 	txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->txq.inlen_send,
1052 					    txq_ctrl->txq.inlen_empw);
1053 	MLX5_ASSERT(txq_ctrl->max_inline_data <= max_inline);
1054 	MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= max_inline);
1055 	MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_send);
1056 	MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_empw ||
1057 		    !txq_ctrl->txq.inlen_empw);
1058 	return 0;
1059 error:
1060 	rte_errno = ENOMEM;
1061 	return -ENOMEM;
1062 }
1063 
1064 /**
1065  * Create a DPDK Tx queue.
1066  *
1067  * @param dev
1068  *   Pointer to Ethernet device.
1069  * @param idx
1070  *   TX queue index.
1071  * @param desc
1072  *   Number of descriptors to configure in queue.
1073  * @param socket
1074  *   NUMA socket on which memory must be allocated.
1075  * @param[in] conf
1076  *  Thresholds parameters.
1077  *
1078  * @return
1079  *   A DPDK queue object on success, NULL otherwise and rte_errno is set.
1080  */
1081 struct mlx5_txq_ctrl *
1082 mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1083 	     unsigned int socket, const struct rte_eth_txconf *conf)
1084 {
1085 	struct mlx5_priv *priv = dev->data->dev_private;
1086 	struct mlx5_txq_ctrl *tmpl;
1087 
1088 	tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl) +
1089 			   desc * sizeof(struct rte_mbuf *), 0, socket);
1090 	if (!tmpl) {
1091 		rte_errno = ENOMEM;
1092 		return NULL;
1093 	}
1094 	if (mlx5_mr_btree_init(&tmpl->txq.mr_ctrl.cache_bh,
1095 			       MLX5_MR_BTREE_CACHE_N, socket)) {
1096 		/* rte_errno is already set. */
1097 		goto error;
1098 	}
1099 	/* Save pointer of global generation number to check memory event. */
1100 	tmpl->txq.mr_ctrl.dev_gen_ptr = &priv->sh->share_cache.dev_gen;
1101 	MLX5_ASSERT(desc > MLX5_TX_COMP_THRESH);
1102 	tmpl->txq.offloads = conf->offloads |
1103 			     dev->data->dev_conf.txmode.offloads;
1104 	tmpl->priv = priv;
1105 	tmpl->socket = socket;
1106 	tmpl->txq.elts_n = log2above(desc);
1107 	tmpl->txq.elts_s = desc;
1108 	tmpl->txq.elts_m = desc - 1;
1109 	tmpl->txq.port_id = dev->data->port_id;
1110 	tmpl->txq.idx = idx;
1111 	txq_set_params(tmpl);
1112 	if (txq_adjust_params(tmpl))
1113 		goto error;
1114 	if (txq_calc_wqebb_cnt(tmpl) >
1115 	    priv->sh->device_attr.max_qp_wr) {
1116 		DRV_LOG(ERR,
1117 			"port %u Tx WQEBB count (%d) exceeds the limit (%d),"
1118 			" try smaller queue size",
1119 			dev->data->port_id, txq_calc_wqebb_cnt(tmpl),
1120 			priv->sh->device_attr.max_qp_wr);
1121 		rte_errno = ENOMEM;
1122 		goto error;
1123 	}
1124 	rte_atomic32_inc(&tmpl->refcnt);
1125 	tmpl->type = MLX5_TXQ_TYPE_STANDARD;
1126 	LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
1127 	return tmpl;
1128 error:
1129 	mlx5_free(tmpl);
1130 	return NULL;
1131 }
1132 
1133 /**
1134  * Create a DPDK Tx hairpin queue.
1135  *
1136  * @param dev
1137  *   Pointer to Ethernet device.
1138  * @param idx
1139  *   TX queue index.
1140  * @param desc
1141  *   Number of descriptors to configure in queue.
1142  * @param hairpin_conf
1143  *  The hairpin configuration.
1144  *
1145  * @return
1146  *   A DPDK queue object on success, NULL otherwise and rte_errno is set.
1147  */
1148 struct mlx5_txq_ctrl *
1149 mlx5_txq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1150 		     const struct rte_eth_hairpin_conf *hairpin_conf)
1151 {
1152 	struct mlx5_priv *priv = dev->data->dev_private;
1153 	struct mlx5_txq_ctrl *tmpl;
1154 
1155 	tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
1156 			   SOCKET_ID_ANY);
1157 	if (!tmpl) {
1158 		rte_errno = ENOMEM;
1159 		return NULL;
1160 	}
1161 	tmpl->priv = priv;
1162 	tmpl->socket = SOCKET_ID_ANY;
1163 	tmpl->txq.elts_n = log2above(desc);
1164 	tmpl->txq.port_id = dev->data->port_id;
1165 	tmpl->txq.idx = idx;
1166 	tmpl->hairpin_conf = *hairpin_conf;
1167 	tmpl->type = MLX5_TXQ_TYPE_HAIRPIN;
1168 	rte_atomic32_inc(&tmpl->refcnt);
1169 	LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
1170 	return tmpl;
1171 }
1172 
1173 /**
1174  * Get a Tx queue.
1175  *
1176  * @param dev
1177  *   Pointer to Ethernet device.
1178  * @param idx
1179  *   TX queue index.
1180  *
1181  * @return
1182  *   A pointer to the queue if it exists.
1183  */
1184 struct mlx5_txq_ctrl *
1185 mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx)
1186 {
1187 	struct mlx5_priv *priv = dev->data->dev_private;
1188 	struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
1189 	struct mlx5_txq_ctrl *ctrl = NULL;
1190 
1191 	if (txq_data) {
1192 		ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq);
1193 		rte_atomic32_inc(&ctrl->refcnt);
1194 	}
1195 	return ctrl;
1196 }
1197 
1198 /**
1199  * Release a Tx queue.
1200  *
1201  * @param dev
1202  *   Pointer to Ethernet device.
1203  * @param idx
1204  *   TX queue index.
1205  *
1206  * @return
1207  *   1 while a reference on it exists, 0 when freed.
1208  */
1209 int
1210 mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
1211 {
1212 	struct mlx5_priv *priv = dev->data->dev_private;
1213 	struct mlx5_txq_ctrl *txq_ctrl;
1214 
1215 	if (!(*priv->txqs)[idx])
1216 		return 0;
1217 	txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
1218 	if (!rte_atomic32_dec_and_test(&txq_ctrl->refcnt))
1219 		return 1;
1220 	if (txq_ctrl->obj) {
1221 		priv->obj_ops.txq_obj_release(txq_ctrl->obj);
1222 		LIST_REMOVE(txq_ctrl->obj, next);
1223 		mlx5_free(txq_ctrl->obj);
1224 		txq_ctrl->obj = NULL;
1225 	}
1226 	if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD) {
1227 		if (txq_ctrl->txq.fcqs) {
1228 			mlx5_free(txq_ctrl->txq.fcqs);
1229 			txq_ctrl->txq.fcqs = NULL;
1230 		}
1231 		txq_free_elts(txq_ctrl);
1232 		mlx5_mr_btree_free(&txq_ctrl->txq.mr_ctrl.cache_bh);
1233 	}
1234 	LIST_REMOVE(txq_ctrl, next);
1235 	mlx5_free(txq_ctrl);
1236 	(*priv->txqs)[idx] = NULL;
1237 	dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
1238 	return 0;
1239 }
1240 
1241 /**
1242  * Verify if the queue can be released.
1243  *
1244  * @param dev
1245  *   Pointer to Ethernet device.
1246  * @param idx
1247  *   TX queue index.
1248  *
1249  * @return
1250  *   1 if the queue can be released.
1251  */
1252 int
1253 mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx)
1254 {
1255 	struct mlx5_priv *priv = dev->data->dev_private;
1256 	struct mlx5_txq_ctrl *txq;
1257 
1258 	if (!(*priv->txqs)[idx])
1259 		return -1;
1260 	txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
1261 	return (rte_atomic32_read(&txq->refcnt) == 1);
1262 }
1263 
1264 /**
1265  * Verify the Tx Queue list is empty
1266  *
1267  * @param dev
1268  *   Pointer to Ethernet device.
1269  *
1270  * @return
1271  *   The number of object not released.
1272  */
1273 int
1274 mlx5_txq_verify(struct rte_eth_dev *dev)
1275 {
1276 	struct mlx5_priv *priv = dev->data->dev_private;
1277 	struct mlx5_txq_ctrl *txq_ctrl;
1278 	int ret = 0;
1279 
1280 	LIST_FOREACH(txq_ctrl, &priv->txqsctrl, next) {
1281 		DRV_LOG(DEBUG, "port %u Tx queue %u still referenced",
1282 			dev->data->port_id, txq_ctrl->txq.idx);
1283 		++ret;
1284 	}
1285 	return ret;
1286 }
1287 
1288 /**
1289  * Set the Tx queue dynamic timestamp (mask and offset)
1290  *
1291  * @param[in] dev
1292  *   Pointer to the Ethernet device structure.
1293  */
1294 void
1295 mlx5_txq_dynf_timestamp_set(struct rte_eth_dev *dev)
1296 {
1297 	struct mlx5_priv *priv = dev->data->dev_private;
1298 	struct mlx5_dev_ctx_shared *sh = priv->sh;
1299 	struct mlx5_txq_data *data;
1300 	int off, nbit;
1301 	unsigned int i;
1302 	uint64_t mask = 0;
1303 
1304 	nbit = rte_mbuf_dynflag_lookup
1305 				(RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL);
1306 	off = rte_mbuf_dynfield_lookup
1307 				(RTE_MBUF_DYNFIELD_TIMESTAMP_NAME, NULL);
1308 	if (nbit >= 0 && off >= 0 && sh->txpp.refcnt)
1309 		mask = 1ULL << nbit;
1310 	for (i = 0; i != priv->txqs_n; ++i) {
1311 		data = (*priv->txqs)[i];
1312 		if (!data)
1313 			continue;
1314 		data->sh = sh;
1315 		data->ts_mask = mask;
1316 		data->ts_offset = off;
1317 	}
1318 }
1319