xref: /dpdk/drivers/net/mlx5/mlx5_trigger.c (revision 23233fd63a15e5cde5bae4f26f78f4d679033a7b)
18fd92a66SOlivier Matz /* SPDX-License-Identifier: BSD-3-Clause
2e60fbd5bSAdrien Mazarguil  * Copyright 2015 6WIND S.A.
35feecc57SShahaf Shuler  * Copyright 2015 Mellanox Technologies, Ltd
4e60fbd5bSAdrien Mazarguil  */
58fd92a66SOlivier Matz 
63f2fe392SNélio Laranjeiro #include <unistd.h>
7e60fbd5bSAdrien Mazarguil 
8e60fbd5bSAdrien Mazarguil #include <rte_ether.h>
9df96fd0dSBruce Richardson #include <ethdev_driver.h>
10198a3c33SNelio Laranjeiro #include <rte_interrupts.h>
11198a3c33SNelio Laranjeiro #include <rte_alarm.h>
1220698c9fSOphir Munk #include <rte_cycles.h>
13e60fbd5bSAdrien Mazarguil 
141260a87bSMichael Baum #include <mlx5_malloc.h>
151260a87bSMichael Baum 
16e60fbd5bSAdrien Mazarguil #include "mlx5.h"
17b8dc6b0eSVu Pham #include "mlx5_mr.h"
18151cbe3aSMichael Baum #include "mlx5_rx.h"
19377b69fbSMichael Baum #include "mlx5_tx.h"
20e60fbd5bSAdrien Mazarguil #include "mlx5_utils.h"
21efa79e68SOri Kam #include "rte_pmd_mlx5.h"
22e60fbd5bSAdrien Mazarguil 
23fb732b0aSNélio Laranjeiro /**
24fb732b0aSNélio Laranjeiro  * Stop traffic on Tx queues.
25fb732b0aSNélio Laranjeiro  *
26fb732b0aSNélio Laranjeiro  * @param dev
27fb732b0aSNélio Laranjeiro  *   Pointer to Ethernet device structure.
28fb732b0aSNélio Laranjeiro  */
296e78005aSNélio Laranjeiro static void
30af4f09f2SNélio Laranjeiro mlx5_txq_stop(struct rte_eth_dev *dev)
316e78005aSNélio Laranjeiro {
32dbeba4cfSThomas Monjalon 	struct mlx5_priv *priv = dev->data->dev_private;
336e78005aSNélio Laranjeiro 	unsigned int i;
346e78005aSNélio Laranjeiro 
356e78005aSNélio Laranjeiro 	for (i = 0; i != priv->txqs_n; ++i)
36af4f09f2SNélio Laranjeiro 		mlx5_txq_release(dev, i);
376e78005aSNélio Laranjeiro }
386e78005aSNélio Laranjeiro 
39fb732b0aSNélio Laranjeiro /**
40fb732b0aSNélio Laranjeiro  * Start traffic on Tx queues.
41fb732b0aSNélio Laranjeiro  *
42fb732b0aSNélio Laranjeiro  * @param dev
43fb732b0aSNélio Laranjeiro  *   Pointer to Ethernet device structure.
44fb732b0aSNélio Laranjeiro  *
45fb732b0aSNélio Laranjeiro  * @return
46a6d83b6aSNélio Laranjeiro  *   0 on success, a negative errno value otherwise and rte_errno is set.
47fb732b0aSNélio Laranjeiro  */
486e78005aSNélio Laranjeiro static int
49af4f09f2SNélio Laranjeiro mlx5_txq_start(struct rte_eth_dev *dev)
506e78005aSNélio Laranjeiro {
51dbeba4cfSThomas Monjalon 	struct mlx5_priv *priv = dev->data->dev_private;
526e78005aSNélio Laranjeiro 	unsigned int i;
53a6d83b6aSNélio Laranjeiro 	int ret;
546e78005aSNélio Laranjeiro 
556e78005aSNélio Laranjeiro 	for (i = 0; i != priv->txqs_n; ++i) {
56af4f09f2SNélio Laranjeiro 		struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i);
57f49f4483SMichael Baum 		struct mlx5_txq_data *txq_data = &txq_ctrl->txq;
58f49f4483SMichael Baum 		uint32_t flags = MLX5_MEM_RTE | MLX5_MEM_ZERO;
596e78005aSNélio Laranjeiro 
606e78005aSNélio Laranjeiro 		if (!txq_ctrl)
616e78005aSNélio Laranjeiro 			continue;
6286d259ceSMichael Baum 		if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD)
636e78005aSNélio Laranjeiro 			txq_alloc_elts(txq_ctrl);
64f49f4483SMichael Baum 		MLX5_ASSERT(!txq_ctrl->obj);
65f49f4483SMichael Baum 		txq_ctrl->obj = mlx5_malloc(flags, sizeof(struct mlx5_txq_obj),
66f49f4483SMichael Baum 					    0, txq_ctrl->socket);
67894c4a8eSOri Kam 		if (!txq_ctrl->obj) {
68f49f4483SMichael Baum 			DRV_LOG(ERR, "Port %u Tx queue %u cannot allocate "
69f49f4483SMichael Baum 				"memory resources.", dev->data->port_id,
70f49f4483SMichael Baum 				txq_data->idx);
71a6d83b6aSNélio Laranjeiro 			rte_errno = ENOMEM;
726e78005aSNélio Laranjeiro 			goto error;
736e78005aSNélio Laranjeiro 		}
74f49f4483SMichael Baum 		ret = priv->obj_ops.txq_obj_new(dev, i);
75f49f4483SMichael Baum 		if (ret < 0) {
76f49f4483SMichael Baum 			mlx5_free(txq_ctrl->obj);
77f49f4483SMichael Baum 			txq_ctrl->obj = NULL;
78f49f4483SMichael Baum 			goto error;
79f49f4483SMichael Baum 		}
80f49f4483SMichael Baum 		if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD) {
81f49f4483SMichael Baum 			size_t size = txq_data->cqe_s * sizeof(*txq_data->fcqs);
82876b5d52SMatan Azrad 
83f49f4483SMichael Baum 			txq_data->fcqs = mlx5_malloc(flags, size,
84f49f4483SMichael Baum 						     RTE_CACHE_LINE_SIZE,
85f49f4483SMichael Baum 						     txq_ctrl->socket);
86f49f4483SMichael Baum 			if (!txq_data->fcqs) {
87f49f4483SMichael Baum 				DRV_LOG(ERR, "Port %u Tx queue %u cannot "
88f49f4483SMichael Baum 					"allocate memory (FCQ).",
89f49f4483SMichael Baum 					dev->data->port_id, i);
90f49f4483SMichael Baum 				rte_errno = ENOMEM;
91f49f4483SMichael Baum 				goto error;
92f49f4483SMichael Baum 			}
93f49f4483SMichael Baum 		}
94f49f4483SMichael Baum 		DRV_LOG(DEBUG, "Port %u txq %u updated with %p.",
95f49f4483SMichael Baum 			dev->data->port_id, i, (void *)&txq_ctrl->obj);
96f49f4483SMichael Baum 		LIST_INSERT_HEAD(&priv->txqsobj, txq_ctrl->obj, next);
976e78005aSNélio Laranjeiro 	}
98a6d83b6aSNélio Laranjeiro 	return 0;
996e78005aSNélio Laranjeiro error:
100a6d83b6aSNélio Laranjeiro 	ret = rte_errno; /* Save rte_errno before cleanup. */
10124f653a7SYongseok Koh 	do {
10224f653a7SYongseok Koh 		mlx5_txq_release(dev, i);
10324f653a7SYongseok Koh 	} while (i-- != 0);
104a6d83b6aSNélio Laranjeiro 	rte_errno = ret; /* Restore rte_errno. */
105a6d83b6aSNélio Laranjeiro 	return -rte_errno;
1066e78005aSNélio Laranjeiro }
1076e78005aSNélio Laranjeiro 
108fb732b0aSNélio Laranjeiro /**
109fb732b0aSNélio Laranjeiro  * Stop traffic on Rx queues.
110fb732b0aSNélio Laranjeiro  *
111fb732b0aSNélio Laranjeiro  * @param dev
112fb732b0aSNélio Laranjeiro  *   Pointer to Ethernet device structure.
113fb732b0aSNélio Laranjeiro  */
114a1366b1aSNélio Laranjeiro static void
115af4f09f2SNélio Laranjeiro mlx5_rxq_stop(struct rte_eth_dev *dev)
116a1366b1aSNélio Laranjeiro {
117dbeba4cfSThomas Monjalon 	struct mlx5_priv *priv = dev->data->dev_private;
118a1366b1aSNélio Laranjeiro 	unsigned int i;
119a1366b1aSNélio Laranjeiro 
120a1366b1aSNélio Laranjeiro 	for (i = 0; i != priv->rxqs_n; ++i)
121af4f09f2SNélio Laranjeiro 		mlx5_rxq_release(dev, i);
122a1366b1aSNélio Laranjeiro }
123a1366b1aSNélio Laranjeiro 
124fb732b0aSNélio Laranjeiro /**
125fb732b0aSNélio Laranjeiro  * Start traffic on Rx queues.
126fb732b0aSNélio Laranjeiro  *
127fb732b0aSNélio Laranjeiro  * @param dev
128fb732b0aSNélio Laranjeiro  *   Pointer to Ethernet device structure.
129fb732b0aSNélio Laranjeiro  *
130fb732b0aSNélio Laranjeiro  * @return
131a6d83b6aSNélio Laranjeiro  *   0 on success, a negative errno value otherwise and rte_errno is set.
132fb732b0aSNélio Laranjeiro  */
133a1366b1aSNélio Laranjeiro static int
134af4f09f2SNélio Laranjeiro mlx5_rxq_start(struct rte_eth_dev *dev)
135a1366b1aSNélio Laranjeiro {
136dbeba4cfSThomas Monjalon 	struct mlx5_priv *priv = dev->data->dev_private;
137a1366b1aSNélio Laranjeiro 	unsigned int i;
138a1366b1aSNélio Laranjeiro 	int ret = 0;
139a1366b1aSNélio Laranjeiro 
1407d6bf6b8SYongseok Koh 	/* Allocate/reuse/resize mempool for Multi-Packet RQ. */
14124f653a7SYongseok Koh 	if (mlx5_mprq_alloc_mp(dev)) {
14224f653a7SYongseok Koh 		/* Should not release Rx queues but return immediately. */
14324f653a7SYongseok Koh 		return -rte_errno;
14424f653a7SYongseok Koh 	}
1451260a87bSMichael Baum 	DRV_LOG(DEBUG, "Port %u device_attr.max_qp_wr is %d.",
1461260a87bSMichael Baum 		dev->data->port_id, priv->sh->device_attr.max_qp_wr);
1471260a87bSMichael Baum 	DRV_LOG(DEBUG, "Port %u device_attr.max_sge is %d.",
1481260a87bSMichael Baum 		dev->data->port_id, priv->sh->device_attr.max_sge);
149a1366b1aSNélio Laranjeiro 	for (i = 0; i != priv->rxqs_n; ++i) {
150af4f09f2SNélio Laranjeiro 		struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i);
151a1366b1aSNélio Laranjeiro 
152a1366b1aSNélio Laranjeiro 		if (!rxq_ctrl)
153a1366b1aSNélio Laranjeiro 			continue;
1546deb19e1SMichael Baum 		if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
155213e2727SViacheslav Ovsiienko 			/* Pre-register Rx mempools. */
156213e2727SViacheslav Ovsiienko 			if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq)) {
157213e2727SViacheslav Ovsiienko 				mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl,
158213e2727SViacheslav Ovsiienko 						  rxq_ctrl->rxq.mprq_mp);
159213e2727SViacheslav Ovsiienko 			} else {
160213e2727SViacheslav Ovsiienko 				uint32_t s;
161213e2727SViacheslav Ovsiienko 
162213e2727SViacheslav Ovsiienko 				for (s = 0; s < rxq_ctrl->rxq.rxseg_n; s++)
163213e2727SViacheslav Ovsiienko 					mlx5_mr_update_mp
164213e2727SViacheslav Ovsiienko 						(dev, &rxq_ctrl->rxq.mr_ctrl,
165213e2727SViacheslav Ovsiienko 						rxq_ctrl->rxq.rxseg[s].mp);
166213e2727SViacheslav Ovsiienko 			}
167a1366b1aSNélio Laranjeiro 			ret = rxq_alloc_elts(rxq_ctrl);
168a1366b1aSNélio Laranjeiro 			if (ret)
169a1366b1aSNélio Laranjeiro 				goto error;
1706deb19e1SMichael Baum 		}
1711260a87bSMichael Baum 		MLX5_ASSERT(!rxq_ctrl->obj);
1721260a87bSMichael Baum 		rxq_ctrl->obj = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
1731260a87bSMichael Baum 					    sizeof(*rxq_ctrl->obj), 0,
1741260a87bSMichael Baum 					    rxq_ctrl->socket);
1751260a87bSMichael Baum 		if (!rxq_ctrl->obj) {
1761260a87bSMichael Baum 			DRV_LOG(ERR,
1771260a87bSMichael Baum 				"Port %u Rx queue %u can't allocate resources.",
1781260a87bSMichael Baum 				dev->data->port_id, (*priv->rxqs)[i]->idx);
1791260a87bSMichael Baum 			rte_errno = ENOMEM;
180a1366b1aSNélio Laranjeiro 			goto error;
181a1366b1aSNélio Laranjeiro 		}
1825eaf882eSMichael Baum 		ret = priv->obj_ops.rxq_obj_new(dev, i);
1831260a87bSMichael Baum 		if (ret) {
1841260a87bSMichael Baum 			mlx5_free(rxq_ctrl->obj);
1851260a87bSMichael Baum 			goto error;
1861260a87bSMichael Baum 		}
1871260a87bSMichael Baum 		DRV_LOG(DEBUG, "Port %u rxq %u updated with %p.",
1881260a87bSMichael Baum 			dev->data->port_id, i, (void *)&rxq_ctrl->obj);
1891260a87bSMichael Baum 		LIST_INSERT_HEAD(&priv->rxqsobj, rxq_ctrl->obj, next);
1901260a87bSMichael Baum 	}
191a6d83b6aSNélio Laranjeiro 	return 0;
192a1366b1aSNélio Laranjeiro error:
193a6d83b6aSNélio Laranjeiro 	ret = rte_errno; /* Save rte_errno before cleanup. */
19424f653a7SYongseok Koh 	do {
19524f653a7SYongseok Koh 		mlx5_rxq_release(dev, i);
19624f653a7SYongseok Koh 	} while (i-- != 0);
197a6d83b6aSNélio Laranjeiro 	rte_errno = ret; /* Restore rte_errno. */
198a6d83b6aSNélio Laranjeiro 	return -rte_errno;
199a1366b1aSNélio Laranjeiro }
200a1366b1aSNélio Laranjeiro 
201e60fbd5bSAdrien Mazarguil /**
2026a338ad4SOri Kam  * Binds Tx queues to Rx queues for hairpin.
2036a338ad4SOri Kam  *
2046a338ad4SOri Kam  * Binds Tx queues to the target Rx queues.
2056a338ad4SOri Kam  *
2066a338ad4SOri Kam  * @param dev
2076a338ad4SOri Kam  *   Pointer to Ethernet device structure.
2086a338ad4SOri Kam  *
2096a338ad4SOri Kam  * @return
2106a338ad4SOri Kam  *   0 on success, a negative errno value otherwise and rte_errno is set.
2116a338ad4SOri Kam  */
2126a338ad4SOri Kam static int
21337cd4501SBing Zhao mlx5_hairpin_auto_bind(struct rte_eth_dev *dev)
2146a338ad4SOri Kam {
2156a338ad4SOri Kam 	struct mlx5_priv *priv = dev->data->dev_private;
2166a338ad4SOri Kam 	struct mlx5_devx_modify_sq_attr sq_attr = { 0 };
2176a338ad4SOri Kam 	struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
2186a338ad4SOri Kam 	struct mlx5_txq_ctrl *txq_ctrl;
2196a338ad4SOri Kam 	struct mlx5_rxq_ctrl *rxq_ctrl;
2206a338ad4SOri Kam 	struct mlx5_devx_obj *sq;
2216a338ad4SOri Kam 	struct mlx5_devx_obj *rq;
2226a338ad4SOri Kam 	unsigned int i;
2236a338ad4SOri Kam 	int ret = 0;
224aa8bea0eSBing Zhao 	bool need_auto = false;
225aa8bea0eSBing Zhao 	uint16_t self_port = dev->data->port_id;
2266a338ad4SOri Kam 
2276a338ad4SOri Kam 	for (i = 0; i != priv->txqs_n; ++i) {
2286a338ad4SOri Kam 		txq_ctrl = mlx5_txq_get(dev, i);
2296a338ad4SOri Kam 		if (!txq_ctrl)
2306a338ad4SOri Kam 			continue;
2316a338ad4SOri Kam 		if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
2326a338ad4SOri Kam 			mlx5_txq_release(dev, i);
2336a338ad4SOri Kam 			continue;
2346a338ad4SOri Kam 		}
235aa8bea0eSBing Zhao 		if (txq_ctrl->hairpin_conf.peers[0].port != self_port)
236aa8bea0eSBing Zhao 			continue;
237aa8bea0eSBing Zhao 		if (txq_ctrl->hairpin_conf.manual_bind) {
238aa8bea0eSBing Zhao 			mlx5_txq_release(dev, i);
239aa8bea0eSBing Zhao 			return 0;
240aa8bea0eSBing Zhao 		}
241aa8bea0eSBing Zhao 		need_auto = true;
242aa8bea0eSBing Zhao 		mlx5_txq_release(dev, i);
243aa8bea0eSBing Zhao 	}
244aa8bea0eSBing Zhao 	if (!need_auto)
245aa8bea0eSBing Zhao 		return 0;
246aa8bea0eSBing Zhao 	for (i = 0; i != priv->txqs_n; ++i) {
247aa8bea0eSBing Zhao 		txq_ctrl = mlx5_txq_get(dev, i);
248aa8bea0eSBing Zhao 		if (!txq_ctrl)
249aa8bea0eSBing Zhao 			continue;
250aa8bea0eSBing Zhao 		if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
251aa8bea0eSBing Zhao 			mlx5_txq_release(dev, i);
252aa8bea0eSBing Zhao 			continue;
253aa8bea0eSBing Zhao 		}
254aa8bea0eSBing Zhao 		/* Skip hairpin queues with other peer ports. */
255aa8bea0eSBing Zhao 		if (txq_ctrl->hairpin_conf.peers[0].port != self_port)
256aa8bea0eSBing Zhao 			continue;
2576a338ad4SOri Kam 		if (!txq_ctrl->obj) {
2586a338ad4SOri Kam 			rte_errno = ENOMEM;
2596a338ad4SOri Kam 			DRV_LOG(ERR, "port %u no txq object found: %d",
2606a338ad4SOri Kam 				dev->data->port_id, i);
2616a338ad4SOri Kam 			mlx5_txq_release(dev, i);
2626a338ad4SOri Kam 			return -rte_errno;
2636a338ad4SOri Kam 		}
2646a338ad4SOri Kam 		sq = txq_ctrl->obj->sq;
2656a338ad4SOri Kam 		rxq_ctrl = mlx5_rxq_get(dev,
2666a338ad4SOri Kam 					txq_ctrl->hairpin_conf.peers[0].queue);
2676a338ad4SOri Kam 		if (!rxq_ctrl) {
2686a338ad4SOri Kam 			mlx5_txq_release(dev, i);
2696a338ad4SOri Kam 			rte_errno = EINVAL;
2706a338ad4SOri Kam 			DRV_LOG(ERR, "port %u no rxq object found: %d",
2716a338ad4SOri Kam 				dev->data->port_id,
2726a338ad4SOri Kam 				txq_ctrl->hairpin_conf.peers[0].queue);
2736a338ad4SOri Kam 			return -rte_errno;
2746a338ad4SOri Kam 		}
2756a338ad4SOri Kam 		if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN ||
2766a338ad4SOri Kam 		    rxq_ctrl->hairpin_conf.peers[0].queue != i) {
2776a338ad4SOri Kam 			rte_errno = ENOMEM;
2786a338ad4SOri Kam 			DRV_LOG(ERR, "port %u Tx queue %d can't be binded to "
2796a338ad4SOri Kam 				"Rx queue %d", dev->data->port_id,
2806a338ad4SOri Kam 				i, txq_ctrl->hairpin_conf.peers[0].queue);
2816a338ad4SOri Kam 			goto error;
2826a338ad4SOri Kam 		}
2836a338ad4SOri Kam 		rq = rxq_ctrl->obj->rq;
2846a338ad4SOri Kam 		if (!rq) {
2856a338ad4SOri Kam 			rte_errno = ENOMEM;
2866a338ad4SOri Kam 			DRV_LOG(ERR, "port %u hairpin no matching rxq: %d",
2876a338ad4SOri Kam 				dev->data->port_id,
2886a338ad4SOri Kam 				txq_ctrl->hairpin_conf.peers[0].queue);
2896a338ad4SOri Kam 			goto error;
2906a338ad4SOri Kam 		}
2916a338ad4SOri Kam 		sq_attr.state = MLX5_SQC_STATE_RDY;
2926a338ad4SOri Kam 		sq_attr.sq_state = MLX5_SQC_STATE_RST;
2936a338ad4SOri Kam 		sq_attr.hairpin_peer_rq = rq->id;
2946a338ad4SOri Kam 		sq_attr.hairpin_peer_vhca = priv->config.hca_attr.vhca_id;
2956a338ad4SOri Kam 		ret = mlx5_devx_cmd_modify_sq(sq, &sq_attr);
2966a338ad4SOri Kam 		if (ret)
2976a338ad4SOri Kam 			goto error;
2986a338ad4SOri Kam 		rq_attr.state = MLX5_SQC_STATE_RDY;
2996a338ad4SOri Kam 		rq_attr.rq_state = MLX5_SQC_STATE_RST;
3006a338ad4SOri Kam 		rq_attr.hairpin_peer_sq = sq->id;
3016a338ad4SOri Kam 		rq_attr.hairpin_peer_vhca = priv->config.hca_attr.vhca_id;
3026a338ad4SOri Kam 		ret = mlx5_devx_cmd_modify_rq(rq, &rq_attr);
3036a338ad4SOri Kam 		if (ret)
3046a338ad4SOri Kam 			goto error;
305aa8bea0eSBing Zhao 		/* Qs with auto-bind will be destroyed directly. */
306aa8bea0eSBing Zhao 		rxq_ctrl->hairpin_status = 1;
307aa8bea0eSBing Zhao 		txq_ctrl->hairpin_status = 1;
3086a338ad4SOri Kam 		mlx5_txq_release(dev, i);
3096a338ad4SOri Kam 		mlx5_rxq_release(dev, txq_ctrl->hairpin_conf.peers[0].queue);
3106a338ad4SOri Kam 	}
3116a338ad4SOri Kam 	return 0;
3126a338ad4SOri Kam error:
3136a338ad4SOri Kam 	mlx5_txq_release(dev, i);
3146a338ad4SOri Kam 	mlx5_rxq_release(dev, txq_ctrl->hairpin_conf.peers[0].queue);
3156a338ad4SOri Kam 	return -rte_errno;
3166a338ad4SOri Kam }
3176a338ad4SOri Kam 
31837cd4501SBing Zhao /*
31937cd4501SBing Zhao  * Fetch the peer queue's SW & HW information.
32037cd4501SBing Zhao  *
32137cd4501SBing Zhao  * @param dev
32237cd4501SBing Zhao  *   Pointer to Ethernet device structure.
32337cd4501SBing Zhao  * @param peer_queue
32437cd4501SBing Zhao  *   Index of the queue to fetch the information.
32537cd4501SBing Zhao  * @param current_info
32637cd4501SBing Zhao  *   Pointer to the input peer information, not used currently.
32737cd4501SBing Zhao  * @param peer_info
32837cd4501SBing Zhao  *   Pointer to the structure to store the information, output.
32937cd4501SBing Zhao  * @param direction
33037cd4501SBing Zhao  *   Positive to get the RxQ information, zero to get the TxQ information.
33137cd4501SBing Zhao  *
33237cd4501SBing Zhao  * @return
33337cd4501SBing Zhao  *   0 on success, a negative errno value otherwise and rte_errno is set.
33437cd4501SBing Zhao  */
33537cd4501SBing Zhao int
33637cd4501SBing Zhao mlx5_hairpin_queue_peer_update(struct rte_eth_dev *dev, uint16_t peer_queue,
33737cd4501SBing Zhao 			       struct rte_hairpin_peer_info *current_info,
33837cd4501SBing Zhao 			       struct rte_hairpin_peer_info *peer_info,
33937cd4501SBing Zhao 			       uint32_t direction)
34037cd4501SBing Zhao {
34137cd4501SBing Zhao 	struct mlx5_priv *priv = dev->data->dev_private;
34237cd4501SBing Zhao 	RTE_SET_USED(current_info);
34337cd4501SBing Zhao 
34437cd4501SBing Zhao 	if (dev->data->dev_started == 0) {
34537cd4501SBing Zhao 		rte_errno = EBUSY;
34637cd4501SBing Zhao 		DRV_LOG(ERR, "peer port %u is not started",
34737cd4501SBing Zhao 			dev->data->port_id);
34837cd4501SBing Zhao 		return -rte_errno;
34937cd4501SBing Zhao 	}
35037cd4501SBing Zhao 	/*
35137cd4501SBing Zhao 	 * Peer port used as egress. In the current design, hairpin Tx queue
35237cd4501SBing Zhao 	 * will be bound to the peer Rx queue. Indeed, only the information of
35337cd4501SBing Zhao 	 * peer Rx queue needs to be fetched.
35437cd4501SBing Zhao 	 */
35537cd4501SBing Zhao 	if (direction == 0) {
35637cd4501SBing Zhao 		struct mlx5_txq_ctrl *txq_ctrl;
35737cd4501SBing Zhao 
35837cd4501SBing Zhao 		txq_ctrl = mlx5_txq_get(dev, peer_queue);
35937cd4501SBing Zhao 		if (txq_ctrl == NULL) {
36037cd4501SBing Zhao 			rte_errno = EINVAL;
36137cd4501SBing Zhao 			DRV_LOG(ERR, "Failed to get port %u Tx queue %d",
36237cd4501SBing Zhao 				dev->data->port_id, peer_queue);
36337cd4501SBing Zhao 			return -rte_errno;
36437cd4501SBing Zhao 		}
36537cd4501SBing Zhao 		if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
36637cd4501SBing Zhao 			rte_errno = EINVAL;
36737cd4501SBing Zhao 			DRV_LOG(ERR, "port %u queue %d is not a hairpin Txq",
36837cd4501SBing Zhao 				dev->data->port_id, peer_queue);
36937cd4501SBing Zhao 			mlx5_txq_release(dev, peer_queue);
37037cd4501SBing Zhao 			return -rte_errno;
37137cd4501SBing Zhao 		}
37237cd4501SBing Zhao 		if (txq_ctrl->obj == NULL || txq_ctrl->obj->sq == NULL) {
37337cd4501SBing Zhao 			rte_errno = ENOMEM;
37437cd4501SBing Zhao 			DRV_LOG(ERR, "port %u no Txq object found: %d",
37537cd4501SBing Zhao 				dev->data->port_id, peer_queue);
37637cd4501SBing Zhao 			mlx5_txq_release(dev, peer_queue);
37737cd4501SBing Zhao 			return -rte_errno;
37837cd4501SBing Zhao 		}
37937cd4501SBing Zhao 		peer_info->qp_id = txq_ctrl->obj->sq->id;
38037cd4501SBing Zhao 		peer_info->vhca_id = priv->config.hca_attr.vhca_id;
38137cd4501SBing Zhao 		/* 1-to-1 mapping, only the first one is used. */
38237cd4501SBing Zhao 		peer_info->peer_q = txq_ctrl->hairpin_conf.peers[0].queue;
38337cd4501SBing Zhao 		peer_info->tx_explicit = txq_ctrl->hairpin_conf.tx_explicit;
38437cd4501SBing Zhao 		peer_info->manual_bind = txq_ctrl->hairpin_conf.manual_bind;
38537cd4501SBing Zhao 		mlx5_txq_release(dev, peer_queue);
38637cd4501SBing Zhao 	} else { /* Peer port used as ingress. */
38737cd4501SBing Zhao 		struct mlx5_rxq_ctrl *rxq_ctrl;
38837cd4501SBing Zhao 
38937cd4501SBing Zhao 		rxq_ctrl = mlx5_rxq_get(dev, peer_queue);
39037cd4501SBing Zhao 		if (rxq_ctrl == NULL) {
39137cd4501SBing Zhao 			rte_errno = EINVAL;
39237cd4501SBing Zhao 			DRV_LOG(ERR, "Failed to get port %u Rx queue %d",
39337cd4501SBing Zhao 				dev->data->port_id, peer_queue);
39437cd4501SBing Zhao 			return -rte_errno;
39537cd4501SBing Zhao 		}
39637cd4501SBing Zhao 		if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN) {
39737cd4501SBing Zhao 			rte_errno = EINVAL;
39837cd4501SBing Zhao 			DRV_LOG(ERR, "port %u queue %d is not a hairpin Rxq",
39937cd4501SBing Zhao 				dev->data->port_id, peer_queue);
40037cd4501SBing Zhao 			mlx5_rxq_release(dev, peer_queue);
40137cd4501SBing Zhao 			return -rte_errno;
40237cd4501SBing Zhao 		}
40337cd4501SBing Zhao 		if (rxq_ctrl->obj == NULL || rxq_ctrl->obj->rq == NULL) {
40437cd4501SBing Zhao 			rte_errno = ENOMEM;
40537cd4501SBing Zhao 			DRV_LOG(ERR, "port %u no Rxq object found: %d",
40637cd4501SBing Zhao 				dev->data->port_id, peer_queue);
40737cd4501SBing Zhao 			mlx5_rxq_release(dev, peer_queue);
40837cd4501SBing Zhao 			return -rte_errno;
40937cd4501SBing Zhao 		}
41037cd4501SBing Zhao 		peer_info->qp_id = rxq_ctrl->obj->rq->id;
41137cd4501SBing Zhao 		peer_info->vhca_id = priv->config.hca_attr.vhca_id;
41237cd4501SBing Zhao 		peer_info->peer_q = rxq_ctrl->hairpin_conf.peers[0].queue;
41337cd4501SBing Zhao 		peer_info->tx_explicit = rxq_ctrl->hairpin_conf.tx_explicit;
41437cd4501SBing Zhao 		peer_info->manual_bind = rxq_ctrl->hairpin_conf.manual_bind;
41537cd4501SBing Zhao 		mlx5_rxq_release(dev, peer_queue);
41637cd4501SBing Zhao 	}
41737cd4501SBing Zhao 	return 0;
41837cd4501SBing Zhao }
41937cd4501SBing Zhao 
42037cd4501SBing Zhao /*
42137cd4501SBing Zhao  * Bind the hairpin queue with the peer HW information.
42237cd4501SBing Zhao  * This needs to be called twice both for Tx and Rx queues of a pair.
42337cd4501SBing Zhao  * If the queue is already bound, it is considered successful.
42437cd4501SBing Zhao  *
42537cd4501SBing Zhao  * @param dev
42637cd4501SBing Zhao  *   Pointer to Ethernet device structure.
42737cd4501SBing Zhao  * @param cur_queue
42837cd4501SBing Zhao  *   Index of the queue to change the HW configuration to bind.
42937cd4501SBing Zhao  * @param peer_info
43037cd4501SBing Zhao  *   Pointer to information of the peer queue.
43137cd4501SBing Zhao  * @param direction
43237cd4501SBing Zhao  *   Positive to configure the TxQ, zero to configure the RxQ.
43337cd4501SBing Zhao  *
43437cd4501SBing Zhao  * @return
43537cd4501SBing Zhao  *   0 on success, a negative errno value otherwise and rte_errno is set.
43637cd4501SBing Zhao  */
43737cd4501SBing Zhao int
43837cd4501SBing Zhao mlx5_hairpin_queue_peer_bind(struct rte_eth_dev *dev, uint16_t cur_queue,
43937cd4501SBing Zhao 			     struct rte_hairpin_peer_info *peer_info,
44037cd4501SBing Zhao 			     uint32_t direction)
44137cd4501SBing Zhao {
44237cd4501SBing Zhao 	int ret = 0;
44337cd4501SBing Zhao 
44437cd4501SBing Zhao 	/*
44537cd4501SBing Zhao 	 * Consistency checking of the peer queue: opposite direction is used
44637cd4501SBing Zhao 	 * to get the peer queue info with ethdev port ID, no need to check.
44737cd4501SBing Zhao 	 */
44837cd4501SBing Zhao 	if (peer_info->peer_q != cur_queue) {
44937cd4501SBing Zhao 		rte_errno = EINVAL;
45037cd4501SBing Zhao 		DRV_LOG(ERR, "port %u queue %d and peer queue %d mismatch",
45137cd4501SBing Zhao 			dev->data->port_id, cur_queue, peer_info->peer_q);
45237cd4501SBing Zhao 		return -rte_errno;
45337cd4501SBing Zhao 	}
45437cd4501SBing Zhao 	if (direction != 0) {
45537cd4501SBing Zhao 		struct mlx5_txq_ctrl *txq_ctrl;
45637cd4501SBing Zhao 		struct mlx5_devx_modify_sq_attr sq_attr = { 0 };
45737cd4501SBing Zhao 
45837cd4501SBing Zhao 		txq_ctrl = mlx5_txq_get(dev, cur_queue);
45937cd4501SBing Zhao 		if (txq_ctrl == NULL) {
46037cd4501SBing Zhao 			rte_errno = EINVAL;
46137cd4501SBing Zhao 			DRV_LOG(ERR, "Failed to get port %u Tx queue %d",
46237cd4501SBing Zhao 				dev->data->port_id, cur_queue);
46337cd4501SBing Zhao 			return -rte_errno;
46437cd4501SBing Zhao 		}
46537cd4501SBing Zhao 		if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
46637cd4501SBing Zhao 			rte_errno = EINVAL;
46737cd4501SBing Zhao 			DRV_LOG(ERR, "port %u queue %d not a hairpin Txq",
46837cd4501SBing Zhao 				dev->data->port_id, cur_queue);
46937cd4501SBing Zhao 			mlx5_txq_release(dev, cur_queue);
47037cd4501SBing Zhao 			return -rte_errno;
47137cd4501SBing Zhao 		}
47237cd4501SBing Zhao 		if (txq_ctrl->obj == NULL || txq_ctrl->obj->sq == NULL) {
47337cd4501SBing Zhao 			rte_errno = ENOMEM;
47437cd4501SBing Zhao 			DRV_LOG(ERR, "port %u no Txq object found: %d",
47537cd4501SBing Zhao 				dev->data->port_id, cur_queue);
47637cd4501SBing Zhao 			mlx5_txq_release(dev, cur_queue);
47737cd4501SBing Zhao 			return -rte_errno;
47837cd4501SBing Zhao 		}
47937cd4501SBing Zhao 		if (txq_ctrl->hairpin_status != 0) {
48037cd4501SBing Zhao 			DRV_LOG(DEBUG, "port %u Tx queue %d is already bound",
48137cd4501SBing Zhao 				dev->data->port_id, cur_queue);
48237cd4501SBing Zhao 			mlx5_txq_release(dev, cur_queue);
48337cd4501SBing Zhao 			return 0;
48437cd4501SBing Zhao 		}
48537cd4501SBing Zhao 		/*
48637cd4501SBing Zhao 		 * All queues' of one port consistency checking is done in the
48737cd4501SBing Zhao 		 * bind() function, and that is optional.
48837cd4501SBing Zhao 		 */
48937cd4501SBing Zhao 		if (peer_info->tx_explicit !=
49037cd4501SBing Zhao 		    txq_ctrl->hairpin_conf.tx_explicit) {
49137cd4501SBing Zhao 			rte_errno = EINVAL;
49237cd4501SBing Zhao 			DRV_LOG(ERR, "port %u Tx queue %d and peer Tx rule mode"
49337cd4501SBing Zhao 				" mismatch", dev->data->port_id, cur_queue);
49437cd4501SBing Zhao 			mlx5_txq_release(dev, cur_queue);
49537cd4501SBing Zhao 			return -rte_errno;
49637cd4501SBing Zhao 		}
49737cd4501SBing Zhao 		if (peer_info->manual_bind !=
49837cd4501SBing Zhao 		    txq_ctrl->hairpin_conf.manual_bind) {
49937cd4501SBing Zhao 			rte_errno = EINVAL;
50037cd4501SBing Zhao 			DRV_LOG(ERR, "port %u Tx queue %d and peer binding mode"
50137cd4501SBing Zhao 				" mismatch", dev->data->port_id, cur_queue);
50237cd4501SBing Zhao 			mlx5_txq_release(dev, cur_queue);
50337cd4501SBing Zhao 			return -rte_errno;
50437cd4501SBing Zhao 		}
50537cd4501SBing Zhao 		sq_attr.state = MLX5_SQC_STATE_RDY;
50637cd4501SBing Zhao 		sq_attr.sq_state = MLX5_SQC_STATE_RST;
50737cd4501SBing Zhao 		sq_attr.hairpin_peer_rq = peer_info->qp_id;
50837cd4501SBing Zhao 		sq_attr.hairpin_peer_vhca = peer_info->vhca_id;
50937cd4501SBing Zhao 		ret = mlx5_devx_cmd_modify_sq(txq_ctrl->obj->sq, &sq_attr);
51037cd4501SBing Zhao 		if (ret == 0)
51137cd4501SBing Zhao 			txq_ctrl->hairpin_status = 1;
51237cd4501SBing Zhao 		mlx5_txq_release(dev, cur_queue);
51337cd4501SBing Zhao 	} else {
51437cd4501SBing Zhao 		struct mlx5_rxq_ctrl *rxq_ctrl;
51537cd4501SBing Zhao 		struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
51637cd4501SBing Zhao 
51737cd4501SBing Zhao 		rxq_ctrl = mlx5_rxq_get(dev, cur_queue);
51837cd4501SBing Zhao 		if (rxq_ctrl == NULL) {
51937cd4501SBing Zhao 			rte_errno = EINVAL;
52037cd4501SBing Zhao 			DRV_LOG(ERR, "Failed to get port %u Rx queue %d",
52137cd4501SBing Zhao 				dev->data->port_id, cur_queue);
52237cd4501SBing Zhao 			return -rte_errno;
52337cd4501SBing Zhao 		}
52437cd4501SBing Zhao 		if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN) {
52537cd4501SBing Zhao 			rte_errno = EINVAL;
52637cd4501SBing Zhao 			DRV_LOG(ERR, "port %u queue %d not a hairpin Rxq",
52737cd4501SBing Zhao 				dev->data->port_id, cur_queue);
52837cd4501SBing Zhao 			mlx5_rxq_release(dev, cur_queue);
52937cd4501SBing Zhao 			return -rte_errno;
53037cd4501SBing Zhao 		}
53137cd4501SBing Zhao 		if (rxq_ctrl->obj == NULL || rxq_ctrl->obj->rq == NULL) {
53237cd4501SBing Zhao 			rte_errno = ENOMEM;
53337cd4501SBing Zhao 			DRV_LOG(ERR, "port %u no Rxq object found: %d",
53437cd4501SBing Zhao 				dev->data->port_id, cur_queue);
53537cd4501SBing Zhao 			mlx5_rxq_release(dev, cur_queue);
53637cd4501SBing Zhao 			return -rte_errno;
53737cd4501SBing Zhao 		}
53837cd4501SBing Zhao 		if (rxq_ctrl->hairpin_status != 0) {
53937cd4501SBing Zhao 			DRV_LOG(DEBUG, "port %u Rx queue %d is already bound",
54037cd4501SBing Zhao 				dev->data->port_id, cur_queue);
54137cd4501SBing Zhao 			mlx5_rxq_release(dev, cur_queue);
54237cd4501SBing Zhao 			return 0;
54337cd4501SBing Zhao 		}
54437cd4501SBing Zhao 		if (peer_info->tx_explicit !=
54537cd4501SBing Zhao 		    rxq_ctrl->hairpin_conf.tx_explicit) {
54637cd4501SBing Zhao 			rte_errno = EINVAL;
54737cd4501SBing Zhao 			DRV_LOG(ERR, "port %u Rx queue %d and peer Tx rule mode"
54837cd4501SBing Zhao 				" mismatch", dev->data->port_id, cur_queue);
54937cd4501SBing Zhao 			mlx5_rxq_release(dev, cur_queue);
55037cd4501SBing Zhao 			return -rte_errno;
55137cd4501SBing Zhao 		}
55237cd4501SBing Zhao 		if (peer_info->manual_bind !=
55337cd4501SBing Zhao 		    rxq_ctrl->hairpin_conf.manual_bind) {
55437cd4501SBing Zhao 			rte_errno = EINVAL;
55537cd4501SBing Zhao 			DRV_LOG(ERR, "port %u Rx queue %d and peer binding mode"
55637cd4501SBing Zhao 				" mismatch", dev->data->port_id, cur_queue);
55737cd4501SBing Zhao 			mlx5_rxq_release(dev, cur_queue);
55837cd4501SBing Zhao 			return -rte_errno;
55937cd4501SBing Zhao 		}
56037cd4501SBing Zhao 		rq_attr.state = MLX5_SQC_STATE_RDY;
56137cd4501SBing Zhao 		rq_attr.rq_state = MLX5_SQC_STATE_RST;
56237cd4501SBing Zhao 		rq_attr.hairpin_peer_sq = peer_info->qp_id;
56337cd4501SBing Zhao 		rq_attr.hairpin_peer_vhca = peer_info->vhca_id;
56437cd4501SBing Zhao 		ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq, &rq_attr);
56537cd4501SBing Zhao 		if (ret == 0)
56637cd4501SBing Zhao 			rxq_ctrl->hairpin_status = 1;
56737cd4501SBing Zhao 		mlx5_rxq_release(dev, cur_queue);
56837cd4501SBing Zhao 	}
56937cd4501SBing Zhao 	return ret;
57037cd4501SBing Zhao }
57137cd4501SBing Zhao 
57237cd4501SBing Zhao /*
57337cd4501SBing Zhao  * Unbind the hairpin queue and reset its HW configuration.
57437cd4501SBing Zhao  * This needs to be called twice both for Tx and Rx queues of a pair.
57537cd4501SBing Zhao  * If the queue is already unbound, it is considered successful.
57637cd4501SBing Zhao  *
57737cd4501SBing Zhao  * @param dev
57837cd4501SBing Zhao  *   Pointer to Ethernet device structure.
57937cd4501SBing Zhao  * @param cur_queue
58037cd4501SBing Zhao  *   Index of the queue to change the HW configuration to unbind.
58137cd4501SBing Zhao  * @param direction
58237cd4501SBing Zhao  *   Positive to reset the TxQ, zero to reset the RxQ.
58337cd4501SBing Zhao  *
58437cd4501SBing Zhao  * @return
58537cd4501SBing Zhao  *   0 on success, a negative errno value otherwise and rte_errno is set.
58637cd4501SBing Zhao  */
58737cd4501SBing Zhao int
58837cd4501SBing Zhao mlx5_hairpin_queue_peer_unbind(struct rte_eth_dev *dev, uint16_t cur_queue,
58937cd4501SBing Zhao 			       uint32_t direction)
59037cd4501SBing Zhao {
59137cd4501SBing Zhao 	int ret = 0;
59237cd4501SBing Zhao 
59337cd4501SBing Zhao 	if (direction != 0) {
59437cd4501SBing Zhao 		struct mlx5_txq_ctrl *txq_ctrl;
59537cd4501SBing Zhao 		struct mlx5_devx_modify_sq_attr sq_attr = { 0 };
59637cd4501SBing Zhao 
59737cd4501SBing Zhao 		txq_ctrl = mlx5_txq_get(dev, cur_queue);
59837cd4501SBing Zhao 		if (txq_ctrl == NULL) {
59937cd4501SBing Zhao 			rte_errno = EINVAL;
60037cd4501SBing Zhao 			DRV_LOG(ERR, "Failed to get port %u Tx queue %d",
60137cd4501SBing Zhao 				dev->data->port_id, cur_queue);
60237cd4501SBing Zhao 			return -rte_errno;
60337cd4501SBing Zhao 		}
60437cd4501SBing Zhao 		if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
60537cd4501SBing Zhao 			rte_errno = EINVAL;
60637cd4501SBing Zhao 			DRV_LOG(ERR, "port %u queue %d not a hairpin Txq",
60737cd4501SBing Zhao 				dev->data->port_id, cur_queue);
60837cd4501SBing Zhao 			mlx5_txq_release(dev, cur_queue);
60937cd4501SBing Zhao 			return -rte_errno;
61037cd4501SBing Zhao 		}
61137cd4501SBing Zhao 		/* Already unbound, return success before obj checking. */
61237cd4501SBing Zhao 		if (txq_ctrl->hairpin_status == 0) {
61337cd4501SBing Zhao 			DRV_LOG(DEBUG, "port %u Tx queue %d is already unbound",
61437cd4501SBing Zhao 				dev->data->port_id, cur_queue);
61537cd4501SBing Zhao 			mlx5_txq_release(dev, cur_queue);
61637cd4501SBing Zhao 			return 0;
61737cd4501SBing Zhao 		}
61837cd4501SBing Zhao 		if (!txq_ctrl->obj || !txq_ctrl->obj->sq) {
61937cd4501SBing Zhao 			rte_errno = ENOMEM;
62037cd4501SBing Zhao 			DRV_LOG(ERR, "port %u no Txq object found: %d",
62137cd4501SBing Zhao 				dev->data->port_id, cur_queue);
62237cd4501SBing Zhao 			mlx5_txq_release(dev, cur_queue);
62337cd4501SBing Zhao 			return -rte_errno;
62437cd4501SBing Zhao 		}
62537cd4501SBing Zhao 		sq_attr.state = MLX5_SQC_STATE_RST;
62637cd4501SBing Zhao 		sq_attr.sq_state = MLX5_SQC_STATE_RST;
62737cd4501SBing Zhao 		ret = mlx5_devx_cmd_modify_sq(txq_ctrl->obj->sq, &sq_attr);
62837cd4501SBing Zhao 		if (ret == 0)
62937cd4501SBing Zhao 			txq_ctrl->hairpin_status = 0;
63037cd4501SBing Zhao 		mlx5_txq_release(dev, cur_queue);
63137cd4501SBing Zhao 	} else {
63237cd4501SBing Zhao 		struct mlx5_rxq_ctrl *rxq_ctrl;
63337cd4501SBing Zhao 		struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
63437cd4501SBing Zhao 
63537cd4501SBing Zhao 		rxq_ctrl = mlx5_rxq_get(dev, cur_queue);
63637cd4501SBing Zhao 		if (rxq_ctrl == NULL) {
63737cd4501SBing Zhao 			rte_errno = EINVAL;
63837cd4501SBing Zhao 			DRV_LOG(ERR, "Failed to get port %u Rx queue %d",
63937cd4501SBing Zhao 				dev->data->port_id, cur_queue);
64037cd4501SBing Zhao 			return -rte_errno;
64137cd4501SBing Zhao 		}
64237cd4501SBing Zhao 		if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN) {
64337cd4501SBing Zhao 			rte_errno = EINVAL;
64437cd4501SBing Zhao 			DRV_LOG(ERR, "port %u queue %d not a hairpin Rxq",
64537cd4501SBing Zhao 				dev->data->port_id, cur_queue);
64637cd4501SBing Zhao 			mlx5_rxq_release(dev, cur_queue);
64737cd4501SBing Zhao 			return -rte_errno;
64837cd4501SBing Zhao 		}
64937cd4501SBing Zhao 		if (rxq_ctrl->hairpin_status == 0) {
65037cd4501SBing Zhao 			DRV_LOG(DEBUG, "port %u Rx queue %d is already unbound",
65137cd4501SBing Zhao 				dev->data->port_id, cur_queue);
65237cd4501SBing Zhao 			mlx5_rxq_release(dev, cur_queue);
65337cd4501SBing Zhao 			return 0;
65437cd4501SBing Zhao 		}
65537cd4501SBing Zhao 		if (rxq_ctrl->obj == NULL || rxq_ctrl->obj->rq == NULL) {
65637cd4501SBing Zhao 			rte_errno = ENOMEM;
65737cd4501SBing Zhao 			DRV_LOG(ERR, "port %u no Rxq object found: %d",
65837cd4501SBing Zhao 				dev->data->port_id, cur_queue);
65937cd4501SBing Zhao 			mlx5_rxq_release(dev, cur_queue);
66037cd4501SBing Zhao 			return -rte_errno;
66137cd4501SBing Zhao 		}
66237cd4501SBing Zhao 		rq_attr.state = MLX5_SQC_STATE_RST;
66337cd4501SBing Zhao 		rq_attr.rq_state = MLX5_SQC_STATE_RST;
66437cd4501SBing Zhao 		ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq, &rq_attr);
66537cd4501SBing Zhao 		if (ret == 0)
66637cd4501SBing Zhao 			rxq_ctrl->hairpin_status = 0;
66737cd4501SBing Zhao 		mlx5_rxq_release(dev, cur_queue);
66837cd4501SBing Zhao 	}
66937cd4501SBing Zhao 	return ret;
67037cd4501SBing Zhao }
67137cd4501SBing Zhao 
67237cd4501SBing Zhao /*
67337cd4501SBing Zhao  * Bind the hairpin port pairs, from the Tx to the peer Rx.
67437cd4501SBing Zhao  * This function only supports to bind the Tx to one Rx.
67537cd4501SBing Zhao  *
67637cd4501SBing Zhao  * @param dev
67737cd4501SBing Zhao  *   Pointer to Ethernet device structure.
67837cd4501SBing Zhao  * @param rx_port
67937cd4501SBing Zhao  *   Port identifier of the Rx port.
68037cd4501SBing Zhao  *
68137cd4501SBing Zhao  * @return
68237cd4501SBing Zhao  *   0 on success, a negative errno value otherwise and rte_errno is set.
68337cd4501SBing Zhao  */
68437cd4501SBing Zhao static int
68537cd4501SBing Zhao mlx5_hairpin_bind_single_port(struct rte_eth_dev *dev, uint16_t rx_port)
68637cd4501SBing Zhao {
68737cd4501SBing Zhao 	struct mlx5_priv *priv = dev->data->dev_private;
68837cd4501SBing Zhao 	int ret = 0;
68937cd4501SBing Zhao 	struct mlx5_txq_ctrl *txq_ctrl;
69037cd4501SBing Zhao 	uint32_t i;
69137cd4501SBing Zhao 	struct rte_hairpin_peer_info peer = {0xffffff};
69237cd4501SBing Zhao 	struct rte_hairpin_peer_info cur;
69337cd4501SBing Zhao 	const struct rte_eth_hairpin_conf *conf;
69437cd4501SBing Zhao 	uint16_t num_q = 0;
69537cd4501SBing Zhao 	uint16_t local_port = priv->dev_data->port_id;
69637cd4501SBing Zhao 	uint32_t manual;
69737cd4501SBing Zhao 	uint32_t explicit;
69837cd4501SBing Zhao 	uint16_t rx_queue;
69937cd4501SBing Zhao 
70037cd4501SBing Zhao 	if (mlx5_eth_find_next(rx_port, priv->pci_dev) != rx_port) {
70137cd4501SBing Zhao 		rte_errno = ENODEV;
70237cd4501SBing Zhao 		DRV_LOG(ERR, "Rx port %u does not belong to mlx5", rx_port);
70337cd4501SBing Zhao 		return -rte_errno;
70437cd4501SBing Zhao 	}
70537cd4501SBing Zhao 	/*
70637cd4501SBing Zhao 	 * Before binding TxQ to peer RxQ, first round loop will be used for
70737cd4501SBing Zhao 	 * checking the queues' configuration consistency. This would be a
70837cd4501SBing Zhao 	 * little time consuming but better than doing the rollback.
70937cd4501SBing Zhao 	 */
71037cd4501SBing Zhao 	for (i = 0; i != priv->txqs_n; i++) {
71137cd4501SBing Zhao 		txq_ctrl = mlx5_txq_get(dev, i);
71237cd4501SBing Zhao 		if (txq_ctrl == NULL)
71337cd4501SBing Zhao 			continue;
71437cd4501SBing Zhao 		if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
71537cd4501SBing Zhao 			mlx5_txq_release(dev, i);
71637cd4501SBing Zhao 			continue;
71737cd4501SBing Zhao 		}
71837cd4501SBing Zhao 		/*
71937cd4501SBing Zhao 		 * All hairpin Tx queues of a single port that connected to the
72037cd4501SBing Zhao 		 * same peer Rx port should have the same "auto binding" and
72137cd4501SBing Zhao 		 * "implicit Tx flow" modes.
72237cd4501SBing Zhao 		 * Peer consistency checking will be done in per queue binding.
72337cd4501SBing Zhao 		 */
72437cd4501SBing Zhao 		conf = &txq_ctrl->hairpin_conf;
72537cd4501SBing Zhao 		if (conf->peers[0].port == rx_port) {
72637cd4501SBing Zhao 			if (num_q == 0) {
72737cd4501SBing Zhao 				manual = conf->manual_bind;
72837cd4501SBing Zhao 				explicit = conf->tx_explicit;
72937cd4501SBing Zhao 			} else {
73037cd4501SBing Zhao 				if (manual != conf->manual_bind ||
73137cd4501SBing Zhao 				    explicit != conf->tx_explicit) {
73237cd4501SBing Zhao 					rte_errno = EINVAL;
73337cd4501SBing Zhao 					DRV_LOG(ERR, "port %u queue %d mode"
73437cd4501SBing Zhao 						" mismatch: %u %u, %u %u",
73537cd4501SBing Zhao 						local_port, i, manual,
73637cd4501SBing Zhao 						conf->manual_bind, explicit,
73737cd4501SBing Zhao 						conf->tx_explicit);
73837cd4501SBing Zhao 					mlx5_txq_release(dev, i);
73937cd4501SBing Zhao 					return -rte_errno;
74037cd4501SBing Zhao 				}
74137cd4501SBing Zhao 			}
74237cd4501SBing Zhao 			num_q++;
74337cd4501SBing Zhao 		}
74437cd4501SBing Zhao 		mlx5_txq_release(dev, i);
74537cd4501SBing Zhao 	}
74637cd4501SBing Zhao 	/* Once no queue is configured, success is returned directly. */
74737cd4501SBing Zhao 	if (num_q == 0)
74837cd4501SBing Zhao 		return ret;
74937cd4501SBing Zhao 	/* All the hairpin TX queues need to be traversed again. */
75037cd4501SBing Zhao 	for (i = 0; i != priv->txqs_n; i++) {
75137cd4501SBing Zhao 		txq_ctrl = mlx5_txq_get(dev, i);
75237cd4501SBing Zhao 		if (txq_ctrl == NULL)
75337cd4501SBing Zhao 			continue;
75437cd4501SBing Zhao 		if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
75537cd4501SBing Zhao 			mlx5_txq_release(dev, i);
75637cd4501SBing Zhao 			continue;
75737cd4501SBing Zhao 		}
75837cd4501SBing Zhao 		if (txq_ctrl->hairpin_conf.peers[0].port != rx_port) {
75937cd4501SBing Zhao 			mlx5_txq_release(dev, i);
76037cd4501SBing Zhao 			continue;
76137cd4501SBing Zhao 		}
76237cd4501SBing Zhao 		rx_queue = txq_ctrl->hairpin_conf.peers[0].queue;
76337cd4501SBing Zhao 		/*
76437cd4501SBing Zhao 		 * Fetch peer RxQ's information.
76537cd4501SBing Zhao 		 * No need to pass the information of the current queue.
76637cd4501SBing Zhao 		 */
76737cd4501SBing Zhao 		ret = rte_eth_hairpin_queue_peer_update(rx_port, rx_queue,
76837cd4501SBing Zhao 							NULL, &peer, 1);
76937cd4501SBing Zhao 		if (ret != 0) {
77037cd4501SBing Zhao 			mlx5_txq_release(dev, i);
77137cd4501SBing Zhao 			goto error;
77237cd4501SBing Zhao 		}
77337cd4501SBing Zhao 		/* Accessing its own device, inside mlx5 PMD. */
77437cd4501SBing Zhao 		ret = mlx5_hairpin_queue_peer_bind(dev, i, &peer, 1);
77537cd4501SBing Zhao 		if (ret != 0) {
77637cd4501SBing Zhao 			mlx5_txq_release(dev, i);
77737cd4501SBing Zhao 			goto error;
77837cd4501SBing Zhao 		}
77937cd4501SBing Zhao 		/* Pass TxQ's information to peer RxQ and try binding. */
78037cd4501SBing Zhao 		cur.peer_q = rx_queue;
78137cd4501SBing Zhao 		cur.qp_id = txq_ctrl->obj->sq->id;
78237cd4501SBing Zhao 		cur.vhca_id = priv->config.hca_attr.vhca_id;
78337cd4501SBing Zhao 		cur.tx_explicit = txq_ctrl->hairpin_conf.tx_explicit;
78437cd4501SBing Zhao 		cur.manual_bind = txq_ctrl->hairpin_conf.manual_bind;
78537cd4501SBing Zhao 		/*
78637cd4501SBing Zhao 		 * In order to access another device in a proper way, RTE level
78737cd4501SBing Zhao 		 * private function is needed.
78837cd4501SBing Zhao 		 */
78937cd4501SBing Zhao 		ret = rte_eth_hairpin_queue_peer_bind(rx_port, rx_queue,
79037cd4501SBing Zhao 						      &cur, 0);
79137cd4501SBing Zhao 		if (ret != 0) {
79237cd4501SBing Zhao 			mlx5_txq_release(dev, i);
79337cd4501SBing Zhao 			goto error;
79437cd4501SBing Zhao 		}
79537cd4501SBing Zhao 		mlx5_txq_release(dev, i);
79637cd4501SBing Zhao 	}
79737cd4501SBing Zhao 	return 0;
79837cd4501SBing Zhao error:
79937cd4501SBing Zhao 	/*
80037cd4501SBing Zhao 	 * Do roll-back process for the queues already bound.
80137cd4501SBing Zhao 	 * No need to check the return value of the queue unbind function.
80237cd4501SBing Zhao 	 */
80337cd4501SBing Zhao 	do {
80437cd4501SBing Zhao 		/* No validation is needed here. */
80537cd4501SBing Zhao 		txq_ctrl = mlx5_txq_get(dev, i);
80637cd4501SBing Zhao 		if (txq_ctrl == NULL)
80737cd4501SBing Zhao 			continue;
80837cd4501SBing Zhao 		rx_queue = txq_ctrl->hairpin_conf.peers[0].queue;
80937cd4501SBing Zhao 		rte_eth_hairpin_queue_peer_unbind(rx_port, rx_queue, 0);
81037cd4501SBing Zhao 		mlx5_hairpin_queue_peer_unbind(dev, i, 1);
81137cd4501SBing Zhao 		mlx5_txq_release(dev, i);
81237cd4501SBing Zhao 	} while (i--);
81337cd4501SBing Zhao 	return ret;
81437cd4501SBing Zhao }
81537cd4501SBing Zhao 
81637cd4501SBing Zhao /*
81737cd4501SBing Zhao  * Unbind the hairpin port pair, HW configuration of both devices will be clear
81837cd4501SBing Zhao  * and status will be reset for all the queues used between the them.
81937cd4501SBing Zhao  * This function only supports to unbind the Tx from one Rx.
82037cd4501SBing Zhao  *
82137cd4501SBing Zhao  * @param dev
82237cd4501SBing Zhao  *   Pointer to Ethernet device structure.
82337cd4501SBing Zhao  * @param rx_port
82437cd4501SBing Zhao  *   Port identifier of the Rx port.
82537cd4501SBing Zhao  *
82637cd4501SBing Zhao  * @return
82737cd4501SBing Zhao  *   0 on success, a negative errno value otherwise and rte_errno is set.
82837cd4501SBing Zhao  */
82937cd4501SBing Zhao static int
83037cd4501SBing Zhao mlx5_hairpin_unbind_single_port(struct rte_eth_dev *dev, uint16_t rx_port)
83137cd4501SBing Zhao {
83237cd4501SBing Zhao 	struct mlx5_priv *priv = dev->data->dev_private;
83337cd4501SBing Zhao 	struct mlx5_txq_ctrl *txq_ctrl;
83437cd4501SBing Zhao 	uint32_t i;
83537cd4501SBing Zhao 	int ret;
83637cd4501SBing Zhao 	uint16_t cur_port = priv->dev_data->port_id;
83737cd4501SBing Zhao 
83837cd4501SBing Zhao 	if (mlx5_eth_find_next(rx_port, priv->pci_dev) != rx_port) {
83937cd4501SBing Zhao 		rte_errno = ENODEV;
84037cd4501SBing Zhao 		DRV_LOG(ERR, "Rx port %u does not belong to mlx5", rx_port);
84137cd4501SBing Zhao 		return -rte_errno;
84237cd4501SBing Zhao 	}
84337cd4501SBing Zhao 	for (i = 0; i != priv->txqs_n; i++) {
84437cd4501SBing Zhao 		uint16_t rx_queue;
84537cd4501SBing Zhao 
84637cd4501SBing Zhao 		txq_ctrl = mlx5_txq_get(dev, i);
84737cd4501SBing Zhao 		if (txq_ctrl == NULL)
84837cd4501SBing Zhao 			continue;
84937cd4501SBing Zhao 		if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
85037cd4501SBing Zhao 			mlx5_txq_release(dev, i);
85137cd4501SBing Zhao 			continue;
85237cd4501SBing Zhao 		}
85337cd4501SBing Zhao 		if (txq_ctrl->hairpin_conf.peers[0].port != rx_port) {
85437cd4501SBing Zhao 			mlx5_txq_release(dev, i);
85537cd4501SBing Zhao 			continue;
85637cd4501SBing Zhao 		}
85737cd4501SBing Zhao 		/* Indeed, only the first used queue needs to be checked. */
85837cd4501SBing Zhao 		if (txq_ctrl->hairpin_conf.manual_bind == 0) {
85937cd4501SBing Zhao 			if (cur_port != rx_port) {
86037cd4501SBing Zhao 				rte_errno = EINVAL;
86137cd4501SBing Zhao 				DRV_LOG(ERR, "port %u and port %u are in"
86237cd4501SBing Zhao 					" auto-bind mode", cur_port, rx_port);
86337cd4501SBing Zhao 				mlx5_txq_release(dev, i);
86437cd4501SBing Zhao 				return -rte_errno;
86537cd4501SBing Zhao 			} else {
86637cd4501SBing Zhao 				return 0;
86737cd4501SBing Zhao 			}
86837cd4501SBing Zhao 		}
86937cd4501SBing Zhao 		rx_queue = txq_ctrl->hairpin_conf.peers[0].queue;
87037cd4501SBing Zhao 		mlx5_txq_release(dev, i);
87137cd4501SBing Zhao 		ret = rte_eth_hairpin_queue_peer_unbind(rx_port, rx_queue, 0);
87237cd4501SBing Zhao 		if (ret) {
87337cd4501SBing Zhao 			DRV_LOG(ERR, "port %u Rx queue %d unbind - failure",
87437cd4501SBing Zhao 				rx_port, rx_queue);
87537cd4501SBing Zhao 			return ret;
87637cd4501SBing Zhao 		}
87737cd4501SBing Zhao 		ret = mlx5_hairpin_queue_peer_unbind(dev, i, 1);
87837cd4501SBing Zhao 		if (ret) {
87937cd4501SBing Zhao 			DRV_LOG(ERR, "port %u Tx queue %d unbind - failure",
88037cd4501SBing Zhao 				cur_port, i);
88137cd4501SBing Zhao 			return ret;
88237cd4501SBing Zhao 		}
88337cd4501SBing Zhao 	}
88437cd4501SBing Zhao 	return 0;
88537cd4501SBing Zhao }
88637cd4501SBing Zhao 
88737cd4501SBing Zhao /*
88837cd4501SBing Zhao  * Bind hairpin ports, Rx could be all ports when using RTE_MAX_ETHPORTS.
88937cd4501SBing Zhao  * @see mlx5_hairpin_bind_single_port()
89037cd4501SBing Zhao  */
89137cd4501SBing Zhao int
89237cd4501SBing Zhao mlx5_hairpin_bind(struct rte_eth_dev *dev, uint16_t rx_port)
89337cd4501SBing Zhao {
89437cd4501SBing Zhao 	int ret = 0;
89537cd4501SBing Zhao 	uint16_t p, pp;
89637cd4501SBing Zhao 	struct mlx5_priv *priv = dev->data->dev_private;
89737cd4501SBing Zhao 
89837cd4501SBing Zhao 	/*
89937cd4501SBing Zhao 	 * If the Rx port has no hairpin configuration with the current port,
90037cd4501SBing Zhao 	 * the binding will be skipped in the called function of single port.
90137cd4501SBing Zhao 	 * Device started status will be checked only before the queue
90237cd4501SBing Zhao 	 * information updating.
90337cd4501SBing Zhao 	 */
90437cd4501SBing Zhao 	if (rx_port == RTE_MAX_ETHPORTS) {
90537cd4501SBing Zhao 		MLX5_ETH_FOREACH_DEV(p, priv->pci_dev) {
90637cd4501SBing Zhao 			ret = mlx5_hairpin_bind_single_port(dev, p);
90737cd4501SBing Zhao 			if (ret != 0)
90837cd4501SBing Zhao 				goto unbind;
90937cd4501SBing Zhao 		}
91037cd4501SBing Zhao 		return ret;
91137cd4501SBing Zhao 	} else {
91237cd4501SBing Zhao 		return mlx5_hairpin_bind_single_port(dev, rx_port);
91337cd4501SBing Zhao 	}
91437cd4501SBing Zhao unbind:
91537cd4501SBing Zhao 	MLX5_ETH_FOREACH_DEV(pp, priv->pci_dev)
91637cd4501SBing Zhao 		if (pp < p)
91737cd4501SBing Zhao 			mlx5_hairpin_unbind_single_port(dev, pp);
91837cd4501SBing Zhao 	return ret;
91937cd4501SBing Zhao }
92037cd4501SBing Zhao 
92137cd4501SBing Zhao /*
92237cd4501SBing Zhao  * Unbind hairpin ports, Rx could be all ports when using RTE_MAX_ETHPORTS.
92337cd4501SBing Zhao  * @see mlx5_hairpin_unbind_single_port()
92437cd4501SBing Zhao  */
92537cd4501SBing Zhao int
92637cd4501SBing Zhao mlx5_hairpin_unbind(struct rte_eth_dev *dev, uint16_t rx_port)
92737cd4501SBing Zhao {
92837cd4501SBing Zhao 	int ret = 0;
92937cd4501SBing Zhao 	uint16_t p;
93037cd4501SBing Zhao 	struct mlx5_priv *priv = dev->data->dev_private;
93137cd4501SBing Zhao 
93237cd4501SBing Zhao 	if (rx_port == RTE_MAX_ETHPORTS)
93337cd4501SBing Zhao 		MLX5_ETH_FOREACH_DEV(p, priv->pci_dev) {
93437cd4501SBing Zhao 			ret = mlx5_hairpin_unbind_single_port(dev, p);
93537cd4501SBing Zhao 			if (ret != 0)
93637cd4501SBing Zhao 				return ret;
93737cd4501SBing Zhao 		}
93837cd4501SBing Zhao 	else
9390746dcabSBing Zhao 		ret = mlx5_hairpin_unbind_single_port(dev, rx_port);
94037cd4501SBing Zhao 	return ret;
94137cd4501SBing Zhao }
94237cd4501SBing Zhao 
94302109eaeSBing Zhao /*
94402109eaeSBing Zhao  * DPDK callback to get the hairpin peer ports list.
94502109eaeSBing Zhao  * This will return the actual number of peer ports and save the identifiers
94602109eaeSBing Zhao  * into the array (sorted, may be different from that when setting up the
94702109eaeSBing Zhao  * hairpin peer queues).
94802109eaeSBing Zhao  * The peer port ID could be the same as the port ID of the current device.
94902109eaeSBing Zhao  *
95002109eaeSBing Zhao  * @param dev
95102109eaeSBing Zhao  *   Pointer to Ethernet device structure.
95202109eaeSBing Zhao  * @param peer_ports
95302109eaeSBing Zhao  *   Pointer to array to save the port identifiers.
95402109eaeSBing Zhao  * @param len
95502109eaeSBing Zhao  *   The length of the array.
95602109eaeSBing Zhao  * @param direction
95702109eaeSBing Zhao  *   Current port to peer port direction.
95802109eaeSBing Zhao  *   positive - current used as Tx to get all peer Rx ports.
95902109eaeSBing Zhao  *   zero - current used as Rx to get all peer Tx ports.
96002109eaeSBing Zhao  *
96102109eaeSBing Zhao  * @return
96202109eaeSBing Zhao  *   0 or positive value on success, actual number of peer ports.
96302109eaeSBing Zhao  *   a negative errno value otherwise and rte_errno is set.
96402109eaeSBing Zhao  */
96502109eaeSBing Zhao int
96602109eaeSBing Zhao mlx5_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
96702109eaeSBing Zhao 			    size_t len, uint32_t direction)
96802109eaeSBing Zhao {
96902109eaeSBing Zhao 	struct mlx5_priv *priv = dev->data->dev_private;
97002109eaeSBing Zhao 	struct mlx5_txq_ctrl *txq_ctrl;
97102109eaeSBing Zhao 	struct mlx5_rxq_ctrl *rxq_ctrl;
97202109eaeSBing Zhao 	uint32_t i;
97302109eaeSBing Zhao 	uint16_t pp;
97402109eaeSBing Zhao 	uint32_t bits[(RTE_MAX_ETHPORTS + 31) / 32] = {0};
97502109eaeSBing Zhao 	int ret = 0;
97602109eaeSBing Zhao 
97702109eaeSBing Zhao 	if (direction) {
97802109eaeSBing Zhao 		for (i = 0; i < priv->txqs_n; i++) {
97902109eaeSBing Zhao 			txq_ctrl = mlx5_txq_get(dev, i);
98002109eaeSBing Zhao 			if (!txq_ctrl)
98102109eaeSBing Zhao 				continue;
98202109eaeSBing Zhao 			if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
98302109eaeSBing Zhao 				mlx5_txq_release(dev, i);
98402109eaeSBing Zhao 				continue;
98502109eaeSBing Zhao 			}
98602109eaeSBing Zhao 			pp = txq_ctrl->hairpin_conf.peers[0].port;
98702109eaeSBing Zhao 			if (pp >= RTE_MAX_ETHPORTS) {
98802109eaeSBing Zhao 				rte_errno = ERANGE;
98902109eaeSBing Zhao 				mlx5_txq_release(dev, i);
99002109eaeSBing Zhao 				DRV_LOG(ERR, "port %hu queue %u peer port "
99102109eaeSBing Zhao 					"out of range %hu",
99202109eaeSBing Zhao 					priv->dev_data->port_id, i, pp);
99302109eaeSBing Zhao 				return -rte_errno;
99402109eaeSBing Zhao 			}
99502109eaeSBing Zhao 			bits[pp / 32] |= 1 << (pp % 32);
99602109eaeSBing Zhao 			mlx5_txq_release(dev, i);
99702109eaeSBing Zhao 		}
99802109eaeSBing Zhao 	} else {
99902109eaeSBing Zhao 		for (i = 0; i < priv->rxqs_n; i++) {
100002109eaeSBing Zhao 			rxq_ctrl = mlx5_rxq_get(dev, i);
100102109eaeSBing Zhao 			if (!rxq_ctrl)
100202109eaeSBing Zhao 				continue;
100302109eaeSBing Zhao 			if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN) {
100402109eaeSBing Zhao 				mlx5_rxq_release(dev, i);
100502109eaeSBing Zhao 				continue;
100602109eaeSBing Zhao 			}
100702109eaeSBing Zhao 			pp = rxq_ctrl->hairpin_conf.peers[0].port;
100802109eaeSBing Zhao 			if (pp >= RTE_MAX_ETHPORTS) {
100902109eaeSBing Zhao 				rte_errno = ERANGE;
101002109eaeSBing Zhao 				mlx5_rxq_release(dev, i);
101102109eaeSBing Zhao 				DRV_LOG(ERR, "port %hu queue %u peer port "
101202109eaeSBing Zhao 					"out of range %hu",
101302109eaeSBing Zhao 					priv->dev_data->port_id, i, pp);
101402109eaeSBing Zhao 				return -rte_errno;
101502109eaeSBing Zhao 			}
101602109eaeSBing Zhao 			bits[pp / 32] |= 1 << (pp % 32);
101702109eaeSBing Zhao 			mlx5_rxq_release(dev, i);
101802109eaeSBing Zhao 		}
101902109eaeSBing Zhao 	}
102002109eaeSBing Zhao 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
102102109eaeSBing Zhao 		if (bits[i / 32] & (1 << (i % 32))) {
102202109eaeSBing Zhao 			if ((size_t)ret >= len) {
102302109eaeSBing Zhao 				rte_errno = E2BIG;
102402109eaeSBing Zhao 				return -rte_errno;
102502109eaeSBing Zhao 			}
102602109eaeSBing Zhao 			peer_ports[ret++] = i;
102702109eaeSBing Zhao 		}
102802109eaeSBing Zhao 	}
102902109eaeSBing Zhao 	return ret;
103002109eaeSBing Zhao }
103102109eaeSBing Zhao 
10326a338ad4SOri Kam /**
1033e60fbd5bSAdrien Mazarguil  * DPDK callback to start the device.
1034e60fbd5bSAdrien Mazarguil  *
1035e60fbd5bSAdrien Mazarguil  * Simulate device start by attaching all configured flows.
1036e60fbd5bSAdrien Mazarguil  *
1037e60fbd5bSAdrien Mazarguil  * @param dev
1038e60fbd5bSAdrien Mazarguil  *   Pointer to Ethernet device structure.
1039e60fbd5bSAdrien Mazarguil  *
1040e60fbd5bSAdrien Mazarguil  * @return
1041a6d83b6aSNélio Laranjeiro  *   0 on success, a negative errno value otherwise and rte_errno is set.
1042e60fbd5bSAdrien Mazarguil  */
1043e60fbd5bSAdrien Mazarguil int
1044e60fbd5bSAdrien Mazarguil mlx5_dev_start(struct rte_eth_dev *dev)
1045e60fbd5bSAdrien Mazarguil {
104633860cfaSSuanming Mou 	struct mlx5_priv *priv = dev->data->dev_private;
1047a6d83b6aSNélio Laranjeiro 	int ret;
1048efa79e68SOri Kam 	int fine_inline;
1049e60fbd5bSAdrien Mazarguil 
105024f653a7SYongseok Koh 	DRV_LOG(DEBUG, "port %u starting device", dev->data->port_id);
1051efa79e68SOri Kam 	fine_inline = rte_mbuf_dynflag_lookup
1052efa79e68SOri Kam 		(RTE_PMD_MLX5_FINE_GRANULARITY_INLINE, NULL);
1053042540e4SThomas Monjalon 	if (fine_inline >= 0)
1054efa79e68SOri Kam 		rte_net_mlx5_dynf_inline_mask = 1UL << fine_inline;
1055efa79e68SOri Kam 	else
1056efa79e68SOri Kam 		rte_net_mlx5_dynf_inline_mask = 0;
1057606d6905SShiri Kuzin 	if (dev->data->nb_rx_queues > 0) {
105863bd1629SOri Kam 		ret = mlx5_dev_configure_rss_reta(dev);
105963bd1629SOri Kam 		if (ret) {
106063bd1629SOri Kam 			DRV_LOG(ERR, "port %u reta config failed: %s",
106163bd1629SOri Kam 				dev->data->port_id, strerror(rte_errno));
106263bd1629SOri Kam 			return -rte_errno;
106363bd1629SOri Kam 		}
1064606d6905SShiri Kuzin 	}
1065d133f4cdSViacheslav Ovsiienko 	ret = mlx5_txpp_start(dev);
1066d133f4cdSViacheslav Ovsiienko 	if (ret) {
1067d133f4cdSViacheslav Ovsiienko 		DRV_LOG(ERR, "port %u Tx packet pacing init failed: %s",
1068d133f4cdSViacheslav Ovsiienko 			dev->data->port_id, strerror(rte_errno));
1069d133f4cdSViacheslav Ovsiienko 		goto error;
1070d133f4cdSViacheslav Ovsiienko 	}
1071*23233fd6SBing Zhao 	if ((priv->config.devx && priv->config.dv_flow_en &&
1072*23233fd6SBing Zhao 	    priv->config.dest_tir) && priv->obj_ops.lb_dummy_queue_create) {
1073*23233fd6SBing Zhao 		ret = priv->obj_ops.lb_dummy_queue_create(dev);
1074*23233fd6SBing Zhao 		if (ret)
1075*23233fd6SBing Zhao 			goto error;
1076*23233fd6SBing Zhao 	}
1077a6d83b6aSNélio Laranjeiro 	ret = mlx5_txq_start(dev);
1078a6d83b6aSNélio Laranjeiro 	if (ret) {
1079a170a30dSNélio Laranjeiro 		DRV_LOG(ERR, "port %u Tx queue allocation failed: %s",
10800f99970bSNélio Laranjeiro 			dev->data->port_id, strerror(rte_errno));
1081d133f4cdSViacheslav Ovsiienko 		goto error;
10826e78005aSNélio Laranjeiro 	}
1083a6d83b6aSNélio Laranjeiro 	ret = mlx5_rxq_start(dev);
1084a6d83b6aSNélio Laranjeiro 	if (ret) {
1085a170a30dSNélio Laranjeiro 		DRV_LOG(ERR, "port %u Rx queue allocation failed: %s",
10860f99970bSNélio Laranjeiro 			dev->data->port_id, strerror(rte_errno));
1087d133f4cdSViacheslav Ovsiienko 		goto error;
1088a1366b1aSNélio Laranjeiro 	}
1089aa8bea0eSBing Zhao 	/*
1090aa8bea0eSBing Zhao 	 * Such step will be skipped if there is no hairpin TX queue configured
1091aa8bea0eSBing Zhao 	 * with RX peer queue from the same device.
1092aa8bea0eSBing Zhao 	 */
109337cd4501SBing Zhao 	ret = mlx5_hairpin_auto_bind(dev);
10946a338ad4SOri Kam 	if (ret) {
1095aa8bea0eSBing Zhao 		DRV_LOG(ERR, "port %u hairpin auto binding failed: %s",
10966a338ad4SOri Kam 			dev->data->port_id, strerror(rte_errno));
1097d133f4cdSViacheslav Ovsiienko 		goto error;
10986a338ad4SOri Kam 	}
1099e7bfa359SBing Zhao 	/* Set started flag here for the following steps like control flow. */
110024f653a7SYongseok Koh 	dev->data->dev_started = 1;
1101a6d83b6aSNélio Laranjeiro 	ret = mlx5_rx_intr_vec_enable(dev);
1102a6d83b6aSNélio Laranjeiro 	if (ret) {
1103a170a30dSNélio Laranjeiro 		DRV_LOG(ERR, "port %u Rx interrupt vector creation failed",
11040f99970bSNélio Laranjeiro 			dev->data->port_id);
1105e1016cb7SAdrien Mazarguil 		goto error;
11063c7d44afSShahaf Shuler 	}
110773bf9235SOphir Munk 	mlx5_os_stats_init(dev);
11087ba5320bSNélio Laranjeiro 	ret = mlx5_traffic_enable(dev);
1109a6d83b6aSNélio Laranjeiro 	if (ret) {
11108db7e3b6SBing Zhao 		DRV_LOG(ERR, "port %u failed to set defaults flows",
1111e313ef4cSShahaf Shuler 			dev->data->port_id);
1112e313ef4cSShahaf Shuler 		goto error;
1113e313ef4cSShahaf Shuler 	}
1114a2854c4dSViacheslav Ovsiienko 	/* Set a mask and offset of dynamic metadata flows into Rx queues. */
11156c55b622SAlexander Kozyrev 	mlx5_flow_rxq_dynf_metadata_set(dev);
1116a2854c4dSViacheslav Ovsiienko 	/* Set flags and context to convert Rx timestamps. */
1117a2854c4dSViacheslav Ovsiienko 	mlx5_rxq_timestamp_set(dev);
1118a2854c4dSViacheslav Ovsiienko 	/* Set a mask and offset of scheduling on timestamp into Tx queues. */
11193172c471SViacheslav Ovsiienko 	mlx5_txq_dynf_timestamp_set(dev);
11208db7e3b6SBing Zhao 	/*
11218db7e3b6SBing Zhao 	 * In non-cached mode, it only needs to start the default mreg copy
11228db7e3b6SBing Zhao 	 * action and no flow created by application exists anymore.
11238db7e3b6SBing Zhao 	 * But it is worth wrapping the interface for further usage.
11248db7e3b6SBing Zhao 	 */
11258db7e3b6SBing Zhao 	ret = mlx5_flow_start_default(dev);
11267ba5320bSNélio Laranjeiro 	if (ret) {
11278db7e3b6SBing Zhao 		DRV_LOG(DEBUG, "port %u failed to start default actions: %s",
11288db7e3b6SBing Zhao 			dev->data->port_id, strerror(rte_errno));
11297ba5320bSNélio Laranjeiro 		goto error;
11307ba5320bSNélio Laranjeiro 	}
11312aac5b5dSYongseok Koh 	rte_wmb();
11327ba5320bSNélio Laranjeiro 	dev->tx_pkt_burst = mlx5_select_tx_function(dev);
11337ba5320bSNélio Laranjeiro 	dev->rx_pkt_burst = mlx5_select_rx_function(dev);
11342aac5b5dSYongseok Koh 	/* Enable datapath on secondary process. */
11352e86c4e5SOphir Munk 	mlx5_mp_os_req_start_rxtx(dev);
113633860cfaSSuanming Mou 	if (priv->sh->intr_handle.fd >= 0) {
113791389890SOphir Munk 		priv->sh->port[priv->dev_port - 1].ih_port_id =
113833860cfaSSuanming Mou 					(uint32_t)dev->data->port_id;
113933860cfaSSuanming Mou 	} else {
114033860cfaSSuanming Mou 		DRV_LOG(INFO, "port %u starts without LSC and RMV interrupts.",
114133860cfaSSuanming Mou 			dev->data->port_id);
114233860cfaSSuanming Mou 		dev->data->dev_conf.intr_conf.lsc = 0;
114333860cfaSSuanming Mou 		dev->data->dev_conf.intr_conf.rmv = 0;
114433860cfaSSuanming Mou 	}
114533860cfaSSuanming Mou 	if (priv->sh->intr_handle_devx.fd >= 0)
114691389890SOphir Munk 		priv->sh->port[priv->dev_port - 1].devx_ih_port_id =
114733860cfaSSuanming Mou 					(uint32_t)dev->data->port_id;
1148c8d4ee50SNélio Laranjeiro 	return 0;
1149c8d4ee50SNélio Laranjeiro error:
1150a6d83b6aSNélio Laranjeiro 	ret = rte_errno; /* Save rte_errno before cleanup. */
1151e60fbd5bSAdrien Mazarguil 	/* Rollback. */
1152272733b5SNélio Laranjeiro 	dev->data->dev_started = 0;
11538db7e3b6SBing Zhao 	mlx5_flow_stop_default(dev);
1154af4f09f2SNélio Laranjeiro 	mlx5_traffic_disable(dev);
1155af4f09f2SNélio Laranjeiro 	mlx5_txq_stop(dev);
1156af4f09f2SNélio Laranjeiro 	mlx5_rxq_stop(dev);
1157*23233fd6SBing Zhao 	if (priv->obj_ops.lb_dummy_queue_release)
1158*23233fd6SBing Zhao 		priv->obj_ops.lb_dummy_queue_release(dev);
1159d133f4cdSViacheslav Ovsiienko 	mlx5_txpp_stop(dev); /* Stop last. */
1160a6d83b6aSNélio Laranjeiro 	rte_errno = ret; /* Restore rte_errno. */
1161a6d83b6aSNélio Laranjeiro 	return -rte_errno;
1162e60fbd5bSAdrien Mazarguil }
1163e60fbd5bSAdrien Mazarguil 
1164e60fbd5bSAdrien Mazarguil /**
1165e60fbd5bSAdrien Mazarguil  * DPDK callback to stop the device.
1166e60fbd5bSAdrien Mazarguil  *
1167e60fbd5bSAdrien Mazarguil  * Simulate device stop by detaching all configured flows.
1168e60fbd5bSAdrien Mazarguil  *
1169e60fbd5bSAdrien Mazarguil  * @param dev
1170e60fbd5bSAdrien Mazarguil  *   Pointer to Ethernet device structure.
1171e60fbd5bSAdrien Mazarguil  */
117262024eb8SIvan Ilchenko int
1173e60fbd5bSAdrien Mazarguil mlx5_dev_stop(struct rte_eth_dev *dev)
1174e60fbd5bSAdrien Mazarguil {
1175dbeba4cfSThomas Monjalon 	struct mlx5_priv *priv = dev->data->dev_private;
1176e60fbd5bSAdrien Mazarguil 
11773f2fe392SNélio Laranjeiro 	dev->data->dev_started = 0;
11783f2fe392SNélio Laranjeiro 	/* Prevent crashes when queues are still in use. */
11793f2fe392SNélio Laranjeiro 	dev->rx_pkt_burst = removed_rx_burst;
11803f2fe392SNélio Laranjeiro 	dev->tx_pkt_burst = removed_tx_burst;
11813f2fe392SNélio Laranjeiro 	rte_wmb();
11822aac5b5dSYongseok Koh 	/* Disable datapath on secondary process. */
11832e86c4e5SOphir Munk 	mlx5_mp_os_req_stop_rxtx(dev);
118420698c9fSOphir Munk 	rte_delay_us_sleep(1000 * priv->rxqs_n);
118524f653a7SYongseok Koh 	DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id);
11868db7e3b6SBing Zhao 	mlx5_flow_stop_default(dev);
11878db7e3b6SBing Zhao 	/* Control flows for default traffic can be removed firstly. */
1188af4f09f2SNélio Laranjeiro 	mlx5_traffic_disable(dev);
11898db7e3b6SBing Zhao 	/* All RX queue flags will be cleared in the flush interface. */
11908db7e3b6SBing Zhao 	mlx5_flow_list_flush(dev, &priv->flows, true);
1191ec962badSLi Zhang 	mlx5_flow_meter_rxq_flush(dev);
1192af4f09f2SNélio Laranjeiro 	mlx5_rx_intr_vec_disable(dev);
119391389890SOphir Munk 	priv->sh->port[priv->dev_port - 1].ih_port_id = RTE_MAX_ETHPORTS;
119491389890SOphir Munk 	priv->sh->port[priv->dev_port - 1].devx_ih_port_id = RTE_MAX_ETHPORTS;
1195af4f09f2SNélio Laranjeiro 	mlx5_txq_stop(dev);
1196af4f09f2SNélio Laranjeiro 	mlx5_rxq_stop(dev);
1197*23233fd6SBing Zhao 	if (priv->obj_ops.lb_dummy_queue_release)
1198*23233fd6SBing Zhao 		priv->obj_ops.lb_dummy_queue_release(dev);
1199d133f4cdSViacheslav Ovsiienko 	mlx5_txpp_stop(dev);
120062024eb8SIvan Ilchenko 
120162024eb8SIvan Ilchenko 	return 0;
1202e60fbd5bSAdrien Mazarguil }
1203272733b5SNélio Laranjeiro 
1204272733b5SNélio Laranjeiro /**
1205272733b5SNélio Laranjeiro  * Enable traffic flows configured by control plane
1206272733b5SNélio Laranjeiro  *
1207af4f09f2SNélio Laranjeiro  * @param dev
1208272733b5SNélio Laranjeiro  *   Pointer to Ethernet device private data.
1209272733b5SNélio Laranjeiro  * @param dev
1210272733b5SNélio Laranjeiro  *   Pointer to Ethernet device structure.
1211272733b5SNélio Laranjeiro  *
1212272733b5SNélio Laranjeiro  * @return
1213a6d83b6aSNélio Laranjeiro  *   0 on success, a negative errno value otherwise and rte_errno is set.
1214272733b5SNélio Laranjeiro  */
1215272733b5SNélio Laranjeiro int
1216af4f09f2SNélio Laranjeiro mlx5_traffic_enable(struct rte_eth_dev *dev)
1217272733b5SNélio Laranjeiro {
1218dbeba4cfSThomas Monjalon 	struct mlx5_priv *priv = dev->data->dev_private;
1219272733b5SNélio Laranjeiro 	struct rte_flow_item_eth bcast = {
1220272733b5SNélio Laranjeiro 		.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1221272733b5SNélio Laranjeiro 	};
1222272733b5SNélio Laranjeiro 	struct rte_flow_item_eth ipv6_multi_spec = {
1223272733b5SNélio Laranjeiro 		.dst.addr_bytes = "\x33\x33\x00\x00\x00\x00",
1224272733b5SNélio Laranjeiro 	};
1225272733b5SNélio Laranjeiro 	struct rte_flow_item_eth ipv6_multi_mask = {
1226272733b5SNélio Laranjeiro 		.dst.addr_bytes = "\xff\xff\x00\x00\x00\x00",
1227272733b5SNélio Laranjeiro 	};
1228272733b5SNélio Laranjeiro 	struct rte_flow_item_eth unicast = {
1229272733b5SNélio Laranjeiro 		.src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
1230272733b5SNélio Laranjeiro 	};
1231272733b5SNélio Laranjeiro 	struct rte_flow_item_eth unicast_mask = {
1232272733b5SNélio Laranjeiro 		.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1233272733b5SNélio Laranjeiro 	};
1234272733b5SNélio Laranjeiro 	const unsigned int vlan_filter_n = priv->vlan_filter_n;
12356d13ea8eSOlivier Matz 	const struct rte_ether_addr cmp = {
1236272733b5SNélio Laranjeiro 		.addr_bytes = "\x00\x00\x00\x00\x00\x00",
1237272733b5SNélio Laranjeiro 	};
1238272733b5SNélio Laranjeiro 	unsigned int i;
1239272733b5SNélio Laranjeiro 	unsigned int j;
1240272733b5SNélio Laranjeiro 	int ret;
1241272733b5SNélio Laranjeiro 
12423c84f34eSOri Kam 	/*
12433c84f34eSOri Kam 	 * Hairpin txq default flow should be created no matter if it is
12443c84f34eSOri Kam 	 * isolation mode. Or else all the packets to be sent will be sent
12453c84f34eSOri Kam 	 * out directly without the TX flow actions, e.g. encapsulation.
12463c84f34eSOri Kam 	 */
12473c84f34eSOri Kam 	for (i = 0; i != priv->txqs_n; ++i) {
12483c84f34eSOri Kam 		struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i);
12493c84f34eSOri Kam 		if (!txq_ctrl)
12503c84f34eSOri Kam 			continue;
1251aa8bea0eSBing Zhao 		/* Only Tx implicit mode requires the default Tx flow. */
1252aa8bea0eSBing Zhao 		if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN &&
1253aa8bea0eSBing Zhao 		    txq_ctrl->hairpin_conf.tx_explicit == 0 &&
1254aa8bea0eSBing Zhao 		    txq_ctrl->hairpin_conf.peers[0].port ==
1255aa8bea0eSBing Zhao 		    priv->dev_data->port_id) {
12563c84f34eSOri Kam 			ret = mlx5_ctrl_flow_source_queue(dev, i);
12573c84f34eSOri Kam 			if (ret) {
12583c84f34eSOri Kam 				mlx5_txq_release(dev, i);
12593c84f34eSOri Kam 				goto error;
12603c84f34eSOri Kam 			}
12613c84f34eSOri Kam 		}
12623c84f34eSOri Kam 		mlx5_txq_release(dev, i);
12633c84f34eSOri Kam 	}
1264fbde4331SMatan Azrad 	if (priv->config.dv_esw_en && !priv->config.vf) {
1265fbde4331SMatan Azrad 		if (mlx5_flow_create_esw_table_zero_flow(dev))
1266fbde4331SMatan Azrad 			priv->fdb_def_rule = 1;
1267fbde4331SMatan Azrad 		else
1268fbde4331SMatan Azrad 			DRV_LOG(INFO, "port %u FDB default rule cannot be"
1269fbde4331SMatan Azrad 				" configured - only Eswitch group 0 flows are"
1270fbde4331SMatan Azrad 				" supported.", dev->data->port_id);
1271fbde4331SMatan Azrad 	}
12720f0ae73aSShiri Kuzin 	if (!priv->config.lacp_by_user && priv->pf_bond >= 0) {
12730f0ae73aSShiri Kuzin 		ret = mlx5_flow_lacp_miss(dev);
12740f0ae73aSShiri Kuzin 		if (ret)
12750f0ae73aSShiri Kuzin 			DRV_LOG(INFO, "port %u LACP rule cannot be created - "
12760f0ae73aSShiri Kuzin 				"forward LACP to kernel.", dev->data->port_id);
12770f0ae73aSShiri Kuzin 		else
12780f0ae73aSShiri Kuzin 			DRV_LOG(INFO, "LACP traffic will be missed in port %u."
12790f0ae73aSShiri Kuzin 				, dev->data->port_id);
12800f0ae73aSShiri Kuzin 	}
1281f8cb4b57SNélio Laranjeiro 	if (priv->isolated)
1282f8cb4b57SNélio Laranjeiro 		return 0;
1283f8cb4b57SNélio Laranjeiro 	if (dev->data->promiscuous) {
1284f8cb4b57SNélio Laranjeiro 		struct rte_flow_item_eth promisc = {
1285f8cb4b57SNélio Laranjeiro 			.dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
1286f8cb4b57SNélio Laranjeiro 			.src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
1287f8cb4b57SNélio Laranjeiro 			.type = 0,
1288f8cb4b57SNélio Laranjeiro 		};
1289f8cb4b57SNélio Laranjeiro 
1290a6d83b6aSNélio Laranjeiro 		ret = mlx5_ctrl_flow(dev, &promisc, &promisc);
1291a6d83b6aSNélio Laranjeiro 		if (ret)
1292a6d83b6aSNélio Laranjeiro 			goto error;
1293f8cb4b57SNélio Laranjeiro 	}
1294f8cb4b57SNélio Laranjeiro 	if (dev->data->all_multicast) {
1295f8cb4b57SNélio Laranjeiro 		struct rte_flow_item_eth multicast = {
1296f8cb4b57SNélio Laranjeiro 			.dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
1297f8cb4b57SNélio Laranjeiro 			.src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
1298f8cb4b57SNélio Laranjeiro 			.type = 0,
1299f8cb4b57SNélio Laranjeiro 		};
1300f8cb4b57SNélio Laranjeiro 
1301a6d83b6aSNélio Laranjeiro 		ret = mlx5_ctrl_flow(dev, &multicast, &multicast);
1302a6d83b6aSNélio Laranjeiro 		if (ret)
1303a6d83b6aSNélio Laranjeiro 			goto error;
1304f8cb4b57SNélio Laranjeiro 	} else {
1305f8cb4b57SNélio Laranjeiro 		/* Add broadcast/multicast flows. */
1306f8cb4b57SNélio Laranjeiro 		for (i = 0; i != vlan_filter_n; ++i) {
1307f8cb4b57SNélio Laranjeiro 			uint16_t vlan = priv->vlan_filter[i];
1308f8cb4b57SNélio Laranjeiro 
1309f8cb4b57SNélio Laranjeiro 			struct rte_flow_item_vlan vlan_spec = {
1310f8cb4b57SNélio Laranjeiro 				.tci = rte_cpu_to_be_16(vlan),
1311f8cb4b57SNélio Laranjeiro 			};
13122bc98393SNelio Laranjeiro 			struct rte_flow_item_vlan vlan_mask =
13132bc98393SNelio Laranjeiro 				rte_flow_item_vlan_mask;
1314f8cb4b57SNélio Laranjeiro 
1315f8cb4b57SNélio Laranjeiro 			ret = mlx5_ctrl_flow_vlan(dev, &bcast, &bcast,
1316f8cb4b57SNélio Laranjeiro 						  &vlan_spec, &vlan_mask);
1317f8cb4b57SNélio Laranjeiro 			if (ret)
1318f8cb4b57SNélio Laranjeiro 				goto error;
1319f8cb4b57SNélio Laranjeiro 			ret = mlx5_ctrl_flow_vlan(dev, &ipv6_multi_spec,
1320f8cb4b57SNélio Laranjeiro 						  &ipv6_multi_mask,
1321f8cb4b57SNélio Laranjeiro 						  &vlan_spec, &vlan_mask);
1322f8cb4b57SNélio Laranjeiro 			if (ret)
1323f8cb4b57SNélio Laranjeiro 				goto error;
1324f8cb4b57SNélio Laranjeiro 		}
1325f8cb4b57SNélio Laranjeiro 		if (!vlan_filter_n) {
1326f8cb4b57SNélio Laranjeiro 			ret = mlx5_ctrl_flow(dev, &bcast, &bcast);
1327f8cb4b57SNélio Laranjeiro 			if (ret)
1328f8cb4b57SNélio Laranjeiro 				goto error;
1329f8cb4b57SNélio Laranjeiro 			ret = mlx5_ctrl_flow(dev, &ipv6_multi_spec,
1330f8cb4b57SNélio Laranjeiro 					     &ipv6_multi_mask);
1331084de7a1STal Shnaiderman 			if (ret) {
1332084de7a1STal Shnaiderman 				/* Do not fail on IPv6 broadcast creation failure. */
1333084de7a1STal Shnaiderman 				DRV_LOG(WARNING,
1334084de7a1STal Shnaiderman 					"IPv6 broadcast is not supported");
1335084de7a1STal Shnaiderman 				ret = 0;
1336084de7a1STal Shnaiderman 			}
1337f8cb4b57SNélio Laranjeiro 		}
1338f8cb4b57SNélio Laranjeiro 	}
1339f8cb4b57SNélio Laranjeiro 	/* Add MAC address flows. */
1340272733b5SNélio Laranjeiro 	for (i = 0; i != MLX5_MAX_MAC_ADDRESSES; ++i) {
13416d13ea8eSOlivier Matz 		struct rte_ether_addr *mac = &dev->data->mac_addrs[i];
1342272733b5SNélio Laranjeiro 
1343272733b5SNélio Laranjeiro 		if (!memcmp(mac, &cmp, sizeof(*mac)))
1344272733b5SNélio Laranjeiro 			continue;
1345272733b5SNélio Laranjeiro 		memcpy(&unicast.dst.addr_bytes,
1346272733b5SNélio Laranjeiro 		       mac->addr_bytes,
134735b2d13fSOlivier Matz 		       RTE_ETHER_ADDR_LEN);
1348272733b5SNélio Laranjeiro 		for (j = 0; j != vlan_filter_n; ++j) {
1349272733b5SNélio Laranjeiro 			uint16_t vlan = priv->vlan_filter[j];
1350272733b5SNélio Laranjeiro 
1351272733b5SNélio Laranjeiro 			struct rte_flow_item_vlan vlan_spec = {
1352272733b5SNélio Laranjeiro 				.tci = rte_cpu_to_be_16(vlan),
1353272733b5SNélio Laranjeiro 			};
13542bc98393SNelio Laranjeiro 			struct rte_flow_item_vlan vlan_mask =
13552bc98393SNelio Laranjeiro 				rte_flow_item_vlan_mask;
1356272733b5SNélio Laranjeiro 
1357272733b5SNélio Laranjeiro 			ret = mlx5_ctrl_flow_vlan(dev, &unicast,
1358272733b5SNélio Laranjeiro 						  &unicast_mask,
1359272733b5SNélio Laranjeiro 						  &vlan_spec,
1360272733b5SNélio Laranjeiro 						  &vlan_mask);
1361272733b5SNélio Laranjeiro 			if (ret)
1362272733b5SNélio Laranjeiro 				goto error;
1363272733b5SNélio Laranjeiro 		}
1364272733b5SNélio Laranjeiro 		if (!vlan_filter_n) {
1365a6d83b6aSNélio Laranjeiro 			ret = mlx5_ctrl_flow(dev, &unicast, &unicast_mask);
1366272733b5SNélio Laranjeiro 			if (ret)
1367272733b5SNélio Laranjeiro 				goto error;
1368272733b5SNélio Laranjeiro 		}
1369272733b5SNélio Laranjeiro 	}
1370272733b5SNélio Laranjeiro 	return 0;
1371272733b5SNélio Laranjeiro error:
1372a6d83b6aSNélio Laranjeiro 	ret = rte_errno; /* Save rte_errno before cleanup. */
13738db7e3b6SBing Zhao 	mlx5_flow_list_flush(dev, &priv->ctrl_flows, false);
1374a6d83b6aSNélio Laranjeiro 	rte_errno = ret; /* Restore rte_errno. */
1375a6d83b6aSNélio Laranjeiro 	return -rte_errno;
1376272733b5SNélio Laranjeiro }
1377272733b5SNélio Laranjeiro 
1378272733b5SNélio Laranjeiro 
1379272733b5SNélio Laranjeiro /**
1380272733b5SNélio Laranjeiro  * Disable traffic flows configured by control plane
1381272733b5SNélio Laranjeiro  *
1382272733b5SNélio Laranjeiro  * @param dev
1383af4f09f2SNélio Laranjeiro  *   Pointer to Ethernet device private data.
1384272733b5SNélio Laranjeiro  */
1385925061b5SNélio Laranjeiro void
1386af4f09f2SNélio Laranjeiro mlx5_traffic_disable(struct rte_eth_dev *dev)
1387272733b5SNélio Laranjeiro {
1388dbeba4cfSThomas Monjalon 	struct mlx5_priv *priv = dev->data->dev_private;
1389272733b5SNélio Laranjeiro 
13908db7e3b6SBing Zhao 	mlx5_flow_list_flush(dev, &priv->ctrl_flows, false);
1391272733b5SNélio Laranjeiro }
1392272733b5SNélio Laranjeiro 
1393272733b5SNélio Laranjeiro /**
1394272733b5SNélio Laranjeiro  * Restart traffic flows configured by control plane
1395272733b5SNélio Laranjeiro  *
1396272733b5SNélio Laranjeiro  * @param dev
1397af4f09f2SNélio Laranjeiro  *   Pointer to Ethernet device private data.
1398272733b5SNélio Laranjeiro  *
1399272733b5SNélio Laranjeiro  * @return
1400a6d83b6aSNélio Laranjeiro  *   0 on success, a negative errno value otherwise and rte_errno is set.
1401272733b5SNélio Laranjeiro  */
1402272733b5SNélio Laranjeiro int
1403272733b5SNélio Laranjeiro mlx5_traffic_restart(struct rte_eth_dev *dev)
1404272733b5SNélio Laranjeiro {
1405af4f09f2SNélio Laranjeiro 	if (dev->data->dev_started) {
1406af4f09f2SNélio Laranjeiro 		mlx5_traffic_disable(dev);
1407a6d83b6aSNélio Laranjeiro 		return mlx5_traffic_enable(dev);
1408af4f09f2SNélio Laranjeiro 	}
1409272733b5SNélio Laranjeiro 	return 0;
1410272733b5SNélio Laranjeiro }
1411