xref: /dpdk/drivers/net/mlx5/mlx5_trigger.c (revision d46f3b525aafbb4c6c88d9c61b445eb0d93d2149)
18fd92a66SOlivier Matz /* SPDX-License-Identifier: BSD-3-Clause
2e60fbd5bSAdrien Mazarguil  * Copyright 2015 6WIND S.A.
35feecc57SShahaf Shuler  * Copyright 2015 Mellanox Technologies, Ltd
4e60fbd5bSAdrien Mazarguil  */
58fd92a66SOlivier Matz 
63f2fe392SNélio Laranjeiro #include <unistd.h>
7e60fbd5bSAdrien Mazarguil 
8e60fbd5bSAdrien Mazarguil #include <rte_ether.h>
9df96fd0dSBruce Richardson #include <ethdev_driver.h>
10198a3c33SNelio Laranjeiro #include <rte_interrupts.h>
11198a3c33SNelio Laranjeiro #include <rte_alarm.h>
1220698c9fSOphir Munk #include <rte_cycles.h>
13e60fbd5bSAdrien Mazarguil 
141260a87bSMichael Baum #include <mlx5_malloc.h>
151260a87bSMichael Baum 
16e60fbd5bSAdrien Mazarguil #include "mlx5.h"
17ec4e11d4SDmitry Kozlyuk #include "mlx5_flow.h"
18151cbe3aSMichael Baum #include "mlx5_rx.h"
19377b69fbSMichael Baum #include "mlx5_tx.h"
20e60fbd5bSAdrien Mazarguil #include "mlx5_utils.h"
21efa79e68SOri Kam #include "rte_pmd_mlx5.h"
22e60fbd5bSAdrien Mazarguil 
2386d09686SDariusz Sosnowski static void mlx5_traffic_disable_legacy(struct rte_eth_dev *dev);
2486d09686SDariusz Sosnowski 
25fb732b0aSNélio Laranjeiro /**
26fb732b0aSNélio Laranjeiro  * Stop traffic on Tx queues.
27fb732b0aSNélio Laranjeiro  *
28fb732b0aSNélio Laranjeiro  * @param dev
29fb732b0aSNélio Laranjeiro  *   Pointer to Ethernet device structure.
30fb732b0aSNélio Laranjeiro  */
316e78005aSNélio Laranjeiro static void
32af4f09f2SNélio Laranjeiro mlx5_txq_stop(struct rte_eth_dev *dev)
336e78005aSNélio Laranjeiro {
34dbeba4cfSThomas Monjalon 	struct mlx5_priv *priv = dev->data->dev_private;
356e78005aSNélio Laranjeiro 	unsigned int i;
366e78005aSNélio Laranjeiro 
376e78005aSNélio Laranjeiro 	for (i = 0; i != priv->txqs_n; ++i)
38af4f09f2SNélio Laranjeiro 		mlx5_txq_release(dev, i);
396e78005aSNélio Laranjeiro }
406e78005aSNélio Laranjeiro 
41fb732b0aSNélio Laranjeiro /**
42fb732b0aSNélio Laranjeiro  * Start traffic on Tx queues.
43fb732b0aSNélio Laranjeiro  *
44fb732b0aSNélio Laranjeiro  * @param dev
45fb732b0aSNélio Laranjeiro  *   Pointer to Ethernet device structure.
46fb732b0aSNélio Laranjeiro  *
47fb732b0aSNélio Laranjeiro  * @return
48a6d83b6aSNélio Laranjeiro  *   0 on success, a negative errno value otherwise and rte_errno is set.
49fb732b0aSNélio Laranjeiro  */
506e78005aSNélio Laranjeiro static int
51af4f09f2SNélio Laranjeiro mlx5_txq_start(struct rte_eth_dev *dev)
526e78005aSNélio Laranjeiro {
53dbeba4cfSThomas Monjalon 	struct mlx5_priv *priv = dev->data->dev_private;
546e78005aSNélio Laranjeiro 	unsigned int i;
55a6d83b6aSNélio Laranjeiro 	int ret;
566e78005aSNélio Laranjeiro 
576e78005aSNélio Laranjeiro 	for (i = 0; i != priv->txqs_n; ++i) {
58af4f09f2SNélio Laranjeiro 		struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i);
59f49f4483SMichael Baum 		struct mlx5_txq_data *txq_data = &txq_ctrl->txq;
60f49f4483SMichael Baum 		uint32_t flags = MLX5_MEM_RTE | MLX5_MEM_ZERO;
616e78005aSNélio Laranjeiro 
626e78005aSNélio Laranjeiro 		if (!txq_ctrl)
636e78005aSNélio Laranjeiro 			continue;
64c06f77aeSMichael Baum 		if (!txq_ctrl->is_hairpin)
656e78005aSNélio Laranjeiro 			txq_alloc_elts(txq_ctrl);
66f49f4483SMichael Baum 		MLX5_ASSERT(!txq_ctrl->obj);
67f49f4483SMichael Baum 		txq_ctrl->obj = mlx5_malloc(flags, sizeof(struct mlx5_txq_obj),
68f49f4483SMichael Baum 					    0, txq_ctrl->socket);
69894c4a8eSOri Kam 		if (!txq_ctrl->obj) {
70f49f4483SMichael Baum 			DRV_LOG(ERR, "Port %u Tx queue %u cannot allocate "
71f49f4483SMichael Baum 				"memory resources.", dev->data->port_id,
72f49f4483SMichael Baum 				txq_data->idx);
73a6d83b6aSNélio Laranjeiro 			rte_errno = ENOMEM;
746e78005aSNélio Laranjeiro 			goto error;
756e78005aSNélio Laranjeiro 		}
76f49f4483SMichael Baum 		ret = priv->obj_ops.txq_obj_new(dev, i);
77f49f4483SMichael Baum 		if (ret < 0) {
78f49f4483SMichael Baum 			mlx5_free(txq_ctrl->obj);
79f49f4483SMichael Baum 			txq_ctrl->obj = NULL;
80f49f4483SMichael Baum 			goto error;
81f49f4483SMichael Baum 		}
82c06f77aeSMichael Baum 		if (!txq_ctrl->is_hairpin) {
83f49f4483SMichael Baum 			size_t size = txq_data->cqe_s * sizeof(*txq_data->fcqs);
84876b5d52SMatan Azrad 
85f49f4483SMichael Baum 			txq_data->fcqs = mlx5_malloc(flags, size,
86f49f4483SMichael Baum 						     RTE_CACHE_LINE_SIZE,
87f49f4483SMichael Baum 						     txq_ctrl->socket);
88f49f4483SMichael Baum 			if (!txq_data->fcqs) {
89f49f4483SMichael Baum 				DRV_LOG(ERR, "Port %u Tx queue %u cannot "
90f49f4483SMichael Baum 					"allocate memory (FCQ).",
91f49f4483SMichael Baum 					dev->data->port_id, i);
92f49f4483SMichael Baum 				rte_errno = ENOMEM;
93f49f4483SMichael Baum 				goto error;
94f49f4483SMichael Baum 			}
95f49f4483SMichael Baum 		}
96f49f4483SMichael Baum 		DRV_LOG(DEBUG, "Port %u txq %u updated with %p.",
97f49f4483SMichael Baum 			dev->data->port_id, i, (void *)&txq_ctrl->obj);
98f49f4483SMichael Baum 		LIST_INSERT_HEAD(&priv->txqsobj, txq_ctrl->obj, next);
996e78005aSNélio Laranjeiro 	}
100a6d83b6aSNélio Laranjeiro 	return 0;
1016e78005aSNélio Laranjeiro error:
102a6d83b6aSNélio Laranjeiro 	ret = rte_errno; /* Save rte_errno before cleanup. */
10324f653a7SYongseok Koh 	do {
10424f653a7SYongseok Koh 		mlx5_txq_release(dev, i);
10524f653a7SYongseok Koh 	} while (i-- != 0);
106a6d83b6aSNélio Laranjeiro 	rte_errno = ret; /* Restore rte_errno. */
107a6d83b6aSNélio Laranjeiro 	return -rte_errno;
1086e78005aSNélio Laranjeiro }
1096e78005aSNélio Laranjeiro 
110fb732b0aSNélio Laranjeiro /**
111fec28ca0SDmitry Kozlyuk  * Register Rx queue mempools and fill the Rx queue cache.
112fec28ca0SDmitry Kozlyuk  * This function tolerates repeated mempool registration.
113fec28ca0SDmitry Kozlyuk  *
114fec28ca0SDmitry Kozlyuk  * @param[in] rxq_ctrl
115fec28ca0SDmitry Kozlyuk  *   Rx queue control data.
116fec28ca0SDmitry Kozlyuk  *
117fec28ca0SDmitry Kozlyuk  * @return
118fec28ca0SDmitry Kozlyuk  *   0 on success, (-1) on failure and rte_errno is set.
119fec28ca0SDmitry Kozlyuk  */
120fec28ca0SDmitry Kozlyuk static int
12120489176SMichael Baum mlx5_rxq_mempool_register(struct mlx5_rxq_ctrl *rxq_ctrl)
122fec28ca0SDmitry Kozlyuk {
123fec28ca0SDmitry Kozlyuk 	struct rte_mempool *mp;
124fec28ca0SDmitry Kozlyuk 	uint32_t s;
125fec28ca0SDmitry Kozlyuk 	int ret = 0;
126fec28ca0SDmitry Kozlyuk 
127fec28ca0SDmitry Kozlyuk 	mlx5_mr_flush_local_cache(&rxq_ctrl->rxq.mr_ctrl);
128fec28ca0SDmitry Kozlyuk 	/* MPRQ mempool is registered on creation, just fill the cache. */
12908ac0358SDmitry Kozlyuk 	if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
13008ac0358SDmitry Kozlyuk 		return mlx5_mr_mempool_populate_cache(&rxq_ctrl->rxq.mr_ctrl,
13108ac0358SDmitry Kozlyuk 						      rxq_ctrl->rxq.mprq_mp);
132fec28ca0SDmitry Kozlyuk 	for (s = 0; s < rxq_ctrl->rxq.rxseg_n; s++) {
13308ac0358SDmitry Kozlyuk 		bool is_extmem;
1347297d2cdSDmitry Kozlyuk 
135fec28ca0SDmitry Kozlyuk 		mp = rxq_ctrl->rxq.rxseg[s].mp;
13608ac0358SDmitry Kozlyuk 		is_extmem = (rte_pktmbuf_priv_flags(mp) &
13708ac0358SDmitry Kozlyuk 			     RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF) != 0;
13808ac0358SDmitry Kozlyuk 		ret = mlx5_mr_mempool_register(rxq_ctrl->sh->cdev, mp,
13908ac0358SDmitry Kozlyuk 					       is_extmem);
140fec28ca0SDmitry Kozlyuk 		if (ret < 0 && rte_errno != EEXIST)
141fec28ca0SDmitry Kozlyuk 			return ret;
14208ac0358SDmitry Kozlyuk 		ret = mlx5_mr_mempool_populate_cache(&rxq_ctrl->rxq.mr_ctrl,
14308ac0358SDmitry Kozlyuk 						     mp);
14408ac0358SDmitry Kozlyuk 		if (ret < 0)
14508ac0358SDmitry Kozlyuk 			return ret;
146fec28ca0SDmitry Kozlyuk 	}
147fec28ca0SDmitry Kozlyuk 	return 0;
148fec28ca0SDmitry Kozlyuk }
149fec28ca0SDmitry Kozlyuk 
150fec28ca0SDmitry Kozlyuk /**
151fb732b0aSNélio Laranjeiro  * Stop traffic on Rx queues.
152fb732b0aSNélio Laranjeiro  *
153fb732b0aSNélio Laranjeiro  * @param dev
154fb732b0aSNélio Laranjeiro  *   Pointer to Ethernet device structure.
155fb732b0aSNélio Laranjeiro  */
156a1366b1aSNélio Laranjeiro static void
157af4f09f2SNélio Laranjeiro mlx5_rxq_stop(struct rte_eth_dev *dev)
158a1366b1aSNélio Laranjeiro {
159dbeba4cfSThomas Monjalon 	struct mlx5_priv *priv = dev->data->dev_private;
160a1366b1aSNélio Laranjeiro 	unsigned int i;
161a1366b1aSNélio Laranjeiro 
162a1366b1aSNélio Laranjeiro 	for (i = 0; i != priv->rxqs_n; ++i)
163af4f09f2SNélio Laranjeiro 		mlx5_rxq_release(dev, i);
164a1366b1aSNélio Laranjeiro }
165a1366b1aSNélio Laranjeiro 
16609c25553SXueming Li static int
16709c25553SXueming Li mlx5_rxq_ctrl_prepare(struct rte_eth_dev *dev, struct mlx5_rxq_ctrl *rxq_ctrl,
16809c25553SXueming Li 		      unsigned int idx)
16909c25553SXueming Li {
17009c25553SXueming Li 	int ret = 0;
17109c25553SXueming Li 
172c06f77aeSMichael Baum 	if (!rxq_ctrl->is_hairpin) {
17309c25553SXueming Li 		/*
17409c25553SXueming Li 		 * Pre-register the mempools. Regardless of whether
17509c25553SXueming Li 		 * the implicit registration is enabled or not,
17609c25553SXueming Li 		 * Rx mempool destruction is tracked to free MRs.
17709c25553SXueming Li 		 */
17820489176SMichael Baum 		if (mlx5_rxq_mempool_register(rxq_ctrl) < 0)
17909c25553SXueming Li 			return -rte_errno;
18009c25553SXueming Li 		ret = rxq_alloc_elts(rxq_ctrl);
18109c25553SXueming Li 		if (ret)
18209c25553SXueming Li 			return ret;
18309c25553SXueming Li 	}
18409c25553SXueming Li 	MLX5_ASSERT(!rxq_ctrl->obj);
18509c25553SXueming Li 	rxq_ctrl->obj = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
18609c25553SXueming Li 				    sizeof(*rxq_ctrl->obj), 0,
18709c25553SXueming Li 				    rxq_ctrl->socket);
18809c25553SXueming Li 	if (!rxq_ctrl->obj) {
18909c25553SXueming Li 		DRV_LOG(ERR, "Port %u Rx queue %u can't allocate resources.",
19009c25553SXueming Li 			dev->data->port_id, idx);
19109c25553SXueming Li 		rte_errno = ENOMEM;
19209c25553SXueming Li 		return -rte_errno;
19309c25553SXueming Li 	}
19409c25553SXueming Li 	DRV_LOG(DEBUG, "Port %u rxq %u updated with %p.", dev->data->port_id,
19509c25553SXueming Li 		idx, (void *)&rxq_ctrl->obj);
19609c25553SXueming Li 	return 0;
19709c25553SXueming Li }
19809c25553SXueming Li 
199fb732b0aSNélio Laranjeiro /**
200fb732b0aSNélio Laranjeiro  * Start traffic on Rx queues.
201fb732b0aSNélio Laranjeiro  *
202fb732b0aSNélio Laranjeiro  * @param dev
203fb732b0aSNélio Laranjeiro  *   Pointer to Ethernet device structure.
204fb732b0aSNélio Laranjeiro  *
205fb732b0aSNélio Laranjeiro  * @return
206a6d83b6aSNélio Laranjeiro  *   0 on success, a negative errno value otherwise and rte_errno is set.
207fb732b0aSNélio Laranjeiro  */
208a1366b1aSNélio Laranjeiro static int
209af4f09f2SNélio Laranjeiro mlx5_rxq_start(struct rte_eth_dev *dev)
210a1366b1aSNélio Laranjeiro {
211dbeba4cfSThomas Monjalon 	struct mlx5_priv *priv = dev->data->dev_private;
212a1366b1aSNélio Laranjeiro 	unsigned int i;
213a1366b1aSNélio Laranjeiro 	int ret = 0;
214a1366b1aSNélio Laranjeiro 
2157d6bf6b8SYongseok Koh 	/* Allocate/reuse/resize mempool for Multi-Packet RQ. */
21624f653a7SYongseok Koh 	if (mlx5_mprq_alloc_mp(dev)) {
21724f653a7SYongseok Koh 		/* Should not release Rx queues but return immediately. */
21824f653a7SYongseok Koh 		return -rte_errno;
21924f653a7SYongseok Koh 	}
22091d1cfafSMichael Baum 	DRV_LOG(DEBUG, "Port %u dev_cap.max_qp_wr is %d.",
22191d1cfafSMichael Baum 		dev->data->port_id, priv->sh->dev_cap.max_qp_wr);
22291d1cfafSMichael Baum 	DRV_LOG(DEBUG, "Port %u dev_cap.max_sge is %d.",
22391d1cfafSMichael Baum 		dev->data->port_id, priv->sh->dev_cap.max_sge);
224a1366b1aSNélio Laranjeiro 	for (i = 0; i != priv->rxqs_n; ++i) {
2250cedf34dSXueming Li 		struct mlx5_rxq_priv *rxq = mlx5_rxq_ref(dev, i);
2260cedf34dSXueming Li 		struct mlx5_rxq_ctrl *rxq_ctrl;
227a1366b1aSNélio Laranjeiro 
2280cedf34dSXueming Li 		if (rxq == NULL)
229a1366b1aSNélio Laranjeiro 			continue;
2300cedf34dSXueming Li 		rxq_ctrl = rxq->ctrl;
231c93943c5SDariusz Sosnowski 		if (!rxq_ctrl->started)
23209c25553SXueming Li 			if (mlx5_rxq_ctrl_prepare(dev, rxq_ctrl, i) < 0)
233fec28ca0SDmitry Kozlyuk 				goto error;
2345ceb3a02SXueming Li 		ret = priv->obj_ops.rxq_obj_new(rxq);
2351260a87bSMichael Baum 		if (ret) {
2361260a87bSMichael Baum 			mlx5_free(rxq_ctrl->obj);
2379ec1ceabSDmitry Kozlyuk 			rxq_ctrl->obj = NULL;
2381260a87bSMichael Baum 			goto error;
2391260a87bSMichael Baum 		}
240c93943c5SDariusz Sosnowski 		if (!rxq_ctrl->started)
241c93943c5SDariusz Sosnowski 			LIST_INSERT_HEAD(&priv->rxqsobj, rxq_ctrl->obj, next);
24209c25553SXueming Li 		rxq_ctrl->started = true;
2431260a87bSMichael Baum 	}
244a6d83b6aSNélio Laranjeiro 	return 0;
245a1366b1aSNélio Laranjeiro error:
246a6d83b6aSNélio Laranjeiro 	ret = rte_errno; /* Save rte_errno before cleanup. */
24724f653a7SYongseok Koh 	do {
24824f653a7SYongseok Koh 		mlx5_rxq_release(dev, i);
24924f653a7SYongseok Koh 	} while (i-- != 0);
250a6d83b6aSNélio Laranjeiro 	rte_errno = ret; /* Restore rte_errno. */
251a6d83b6aSNélio Laranjeiro 	return -rte_errno;
252a1366b1aSNélio Laranjeiro }
253a1366b1aSNélio Laranjeiro 
254e60fbd5bSAdrien Mazarguil /**
2556a338ad4SOri Kam  * Binds Tx queues to Rx queues for hairpin.
2566a338ad4SOri Kam  *
2576a338ad4SOri Kam  * Binds Tx queues to the target Rx queues.
2586a338ad4SOri Kam  *
2596a338ad4SOri Kam  * @param dev
2606a338ad4SOri Kam  *   Pointer to Ethernet device structure.
2616a338ad4SOri Kam  *
2626a338ad4SOri Kam  * @return
2636a338ad4SOri Kam  *   0 on success, a negative errno value otherwise and rte_errno is set.
2646a338ad4SOri Kam  */
2656a338ad4SOri Kam static int
26637cd4501SBing Zhao mlx5_hairpin_auto_bind(struct rte_eth_dev *dev)
2676a338ad4SOri Kam {
2686a338ad4SOri Kam 	struct mlx5_priv *priv = dev->data->dev_private;
2696a338ad4SOri Kam 	struct mlx5_devx_modify_sq_attr sq_attr = { 0 };
2706a338ad4SOri Kam 	struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
2716a338ad4SOri Kam 	struct mlx5_txq_ctrl *txq_ctrl;
2720cedf34dSXueming Li 	struct mlx5_rxq_priv *rxq;
2736a338ad4SOri Kam 	struct mlx5_rxq_ctrl *rxq_ctrl;
2746a338ad4SOri Kam 	struct mlx5_devx_obj *sq;
2756a338ad4SOri Kam 	struct mlx5_devx_obj *rq;
2766a338ad4SOri Kam 	unsigned int i;
2776a338ad4SOri Kam 	int ret = 0;
278aa8bea0eSBing Zhao 	bool need_auto = false;
279aa8bea0eSBing Zhao 	uint16_t self_port = dev->data->port_id;
2806a338ad4SOri Kam 
2816a338ad4SOri Kam 	for (i = 0; i != priv->txqs_n; ++i) {
2826a338ad4SOri Kam 		txq_ctrl = mlx5_txq_get(dev, i);
2836a338ad4SOri Kam 		if (!txq_ctrl)
2846a338ad4SOri Kam 			continue;
285c06f77aeSMichael Baum 		if (!txq_ctrl->is_hairpin ||
28675f166c2SBing Zhao 		    txq_ctrl->hairpin_conf.peers[0].port != self_port) {
2876a338ad4SOri Kam 			mlx5_txq_release(dev, i);
2886a338ad4SOri Kam 			continue;
2896a338ad4SOri Kam 		}
290aa8bea0eSBing Zhao 		if (txq_ctrl->hairpin_conf.manual_bind) {
291aa8bea0eSBing Zhao 			mlx5_txq_release(dev, i);
292aa8bea0eSBing Zhao 			return 0;
293aa8bea0eSBing Zhao 		}
294aa8bea0eSBing Zhao 		need_auto = true;
295aa8bea0eSBing Zhao 		mlx5_txq_release(dev, i);
296aa8bea0eSBing Zhao 	}
297aa8bea0eSBing Zhao 	if (!need_auto)
298aa8bea0eSBing Zhao 		return 0;
299aa8bea0eSBing Zhao 	for (i = 0; i != priv->txqs_n; ++i) {
300aa8bea0eSBing Zhao 		txq_ctrl = mlx5_txq_get(dev, i);
301aa8bea0eSBing Zhao 		if (!txq_ctrl)
302aa8bea0eSBing Zhao 			continue;
30375f166c2SBing Zhao 		/* Skip hairpin queues with other peer ports. */
304c06f77aeSMichael Baum 		if (!txq_ctrl->is_hairpin ||
30575f166c2SBing Zhao 		    txq_ctrl->hairpin_conf.peers[0].port != self_port) {
306aa8bea0eSBing Zhao 			mlx5_txq_release(dev, i);
307aa8bea0eSBing Zhao 			continue;
308aa8bea0eSBing Zhao 		}
3096a338ad4SOri Kam 		if (!txq_ctrl->obj) {
3106a338ad4SOri Kam 			rte_errno = ENOMEM;
3116a338ad4SOri Kam 			DRV_LOG(ERR, "port %u no txq object found: %d",
3126a338ad4SOri Kam 				dev->data->port_id, i);
3136a338ad4SOri Kam 			mlx5_txq_release(dev, i);
3146a338ad4SOri Kam 			return -rte_errno;
3156a338ad4SOri Kam 		}
3166a338ad4SOri Kam 		sq = txq_ctrl->obj->sq;
3170cedf34dSXueming Li 		rxq = mlx5_rxq_get(dev, txq_ctrl->hairpin_conf.peers[0].queue);
3180cedf34dSXueming Li 		if (rxq == NULL) {
3196a338ad4SOri Kam 			mlx5_txq_release(dev, i);
3206a338ad4SOri Kam 			rte_errno = EINVAL;
3216a338ad4SOri Kam 			DRV_LOG(ERR, "port %u no rxq object found: %d",
3226a338ad4SOri Kam 				dev->data->port_id,
3236a338ad4SOri Kam 				txq_ctrl->hairpin_conf.peers[0].queue);
3246a338ad4SOri Kam 			return -rte_errno;
3256a338ad4SOri Kam 		}
3260cedf34dSXueming Li 		rxq_ctrl = rxq->ctrl;
327c06f77aeSMichael Baum 		if (!rxq_ctrl->is_hairpin ||
32844126bd9SXueming Li 		    rxq->hairpin_conf.peers[0].queue != i) {
3296a338ad4SOri Kam 			rte_errno = ENOMEM;
3306a338ad4SOri Kam 			DRV_LOG(ERR, "port %u Tx queue %d can't be binded to "
3316a338ad4SOri Kam 				"Rx queue %d", dev->data->port_id,
3326a338ad4SOri Kam 				i, txq_ctrl->hairpin_conf.peers[0].queue);
3336a338ad4SOri Kam 			goto error;
3346a338ad4SOri Kam 		}
3356a338ad4SOri Kam 		rq = rxq_ctrl->obj->rq;
3366a338ad4SOri Kam 		if (!rq) {
3376a338ad4SOri Kam 			rte_errno = ENOMEM;
3386a338ad4SOri Kam 			DRV_LOG(ERR, "port %u hairpin no matching rxq: %d",
3396a338ad4SOri Kam 				dev->data->port_id,
3406a338ad4SOri Kam 				txq_ctrl->hairpin_conf.peers[0].queue);
3416a338ad4SOri Kam 			goto error;
3426a338ad4SOri Kam 		}
3436a338ad4SOri Kam 		sq_attr.state = MLX5_SQC_STATE_RDY;
3446a338ad4SOri Kam 		sq_attr.sq_state = MLX5_SQC_STATE_RST;
3456a338ad4SOri Kam 		sq_attr.hairpin_peer_rq = rq->id;
34653820561SMichael Baum 		sq_attr.hairpin_peer_vhca =
34753820561SMichael Baum 				priv->sh->cdev->config.hca_attr.vhca_id;
3486a338ad4SOri Kam 		ret = mlx5_devx_cmd_modify_sq(sq, &sq_attr);
3496a338ad4SOri Kam 		if (ret)
3506a338ad4SOri Kam 			goto error;
351ca638c49SDariusz Sosnowski 		rq_attr.state = MLX5_RQC_STATE_RDY;
352ca638c49SDariusz Sosnowski 		rq_attr.rq_state = MLX5_RQC_STATE_RST;
3536a338ad4SOri Kam 		rq_attr.hairpin_peer_sq = sq->id;
35453820561SMichael Baum 		rq_attr.hairpin_peer_vhca =
35553820561SMichael Baum 				priv->sh->cdev->config.hca_attr.vhca_id;
3566a338ad4SOri Kam 		ret = mlx5_devx_cmd_modify_rq(rq, &rq_attr);
3576a338ad4SOri Kam 		if (ret)
3586a338ad4SOri Kam 			goto error;
359aa8bea0eSBing Zhao 		/* Qs with auto-bind will be destroyed directly. */
36044126bd9SXueming Li 		rxq->hairpin_status = 1;
361aa8bea0eSBing Zhao 		txq_ctrl->hairpin_status = 1;
3626a338ad4SOri Kam 		mlx5_txq_release(dev, i);
3636a338ad4SOri Kam 	}
3646a338ad4SOri Kam 	return 0;
3656a338ad4SOri Kam error:
3666a338ad4SOri Kam 	mlx5_txq_release(dev, i);
3676a338ad4SOri Kam 	return -rte_errno;
3686a338ad4SOri Kam }
3696a338ad4SOri Kam 
37037cd4501SBing Zhao /*
37137cd4501SBing Zhao  * Fetch the peer queue's SW & HW information.
37237cd4501SBing Zhao  *
37337cd4501SBing Zhao  * @param dev
37437cd4501SBing Zhao  *   Pointer to Ethernet device structure.
37537cd4501SBing Zhao  * @param peer_queue
37637cd4501SBing Zhao  *   Index of the queue to fetch the information.
37737cd4501SBing Zhao  * @param current_info
37837cd4501SBing Zhao  *   Pointer to the input peer information, not used currently.
37937cd4501SBing Zhao  * @param peer_info
38037cd4501SBing Zhao  *   Pointer to the structure to store the information, output.
38137cd4501SBing Zhao  * @param direction
38237cd4501SBing Zhao  *   Positive to get the RxQ information, zero to get the TxQ information.
38337cd4501SBing Zhao  *
38437cd4501SBing Zhao  * @return
38537cd4501SBing Zhao  *   0 on success, a negative errno value otherwise and rte_errno is set.
38637cd4501SBing Zhao  */
38737cd4501SBing Zhao int
38837cd4501SBing Zhao mlx5_hairpin_queue_peer_update(struct rte_eth_dev *dev, uint16_t peer_queue,
38937cd4501SBing Zhao 			       struct rte_hairpin_peer_info *current_info,
39037cd4501SBing Zhao 			       struct rte_hairpin_peer_info *peer_info,
39137cd4501SBing Zhao 			       uint32_t direction)
39237cd4501SBing Zhao {
39337cd4501SBing Zhao 	struct mlx5_priv *priv = dev->data->dev_private;
39437cd4501SBing Zhao 	RTE_SET_USED(current_info);
39537cd4501SBing Zhao 
39637cd4501SBing Zhao 	if (dev->data->dev_started == 0) {
39737cd4501SBing Zhao 		rte_errno = EBUSY;
39837cd4501SBing Zhao 		DRV_LOG(ERR, "peer port %u is not started",
39937cd4501SBing Zhao 			dev->data->port_id);
40037cd4501SBing Zhao 		return -rte_errno;
40137cd4501SBing Zhao 	}
40237cd4501SBing Zhao 	/*
40337cd4501SBing Zhao 	 * Peer port used as egress. In the current design, hairpin Tx queue
40437cd4501SBing Zhao 	 * will be bound to the peer Rx queue. Indeed, only the information of
40537cd4501SBing Zhao 	 * peer Rx queue needs to be fetched.
40637cd4501SBing Zhao 	 */
40737cd4501SBing Zhao 	if (direction == 0) {
40837cd4501SBing Zhao 		struct mlx5_txq_ctrl *txq_ctrl;
40937cd4501SBing Zhao 
41037cd4501SBing Zhao 		txq_ctrl = mlx5_txq_get(dev, peer_queue);
41137cd4501SBing Zhao 		if (txq_ctrl == NULL) {
41237cd4501SBing Zhao 			rte_errno = EINVAL;
41337cd4501SBing Zhao 			DRV_LOG(ERR, "Failed to get port %u Tx queue %d",
41437cd4501SBing Zhao 				dev->data->port_id, peer_queue);
41537cd4501SBing Zhao 			return -rte_errno;
41637cd4501SBing Zhao 		}
417c06f77aeSMichael Baum 		if (!txq_ctrl->is_hairpin) {
41837cd4501SBing Zhao 			rte_errno = EINVAL;
41937cd4501SBing Zhao 			DRV_LOG(ERR, "port %u queue %d is not a hairpin Txq",
42037cd4501SBing Zhao 				dev->data->port_id, peer_queue);
42137cd4501SBing Zhao 			mlx5_txq_release(dev, peer_queue);
42237cd4501SBing Zhao 			return -rte_errno;
42337cd4501SBing Zhao 		}
42437cd4501SBing Zhao 		if (txq_ctrl->obj == NULL || txq_ctrl->obj->sq == NULL) {
42537cd4501SBing Zhao 			rte_errno = ENOMEM;
42637cd4501SBing Zhao 			DRV_LOG(ERR, "port %u no Txq object found: %d",
42737cd4501SBing Zhao 				dev->data->port_id, peer_queue);
42837cd4501SBing Zhao 			mlx5_txq_release(dev, peer_queue);
42937cd4501SBing Zhao 			return -rte_errno;
43037cd4501SBing Zhao 		}
43126e1eaf2SDariusz Sosnowski 		peer_info->qp_id = mlx5_txq_get_sqn(txq_ctrl);
43253820561SMichael Baum 		peer_info->vhca_id = priv->sh->cdev->config.hca_attr.vhca_id;
43337cd4501SBing Zhao 		/* 1-to-1 mapping, only the first one is used. */
43437cd4501SBing Zhao 		peer_info->peer_q = txq_ctrl->hairpin_conf.peers[0].queue;
43537cd4501SBing Zhao 		peer_info->tx_explicit = txq_ctrl->hairpin_conf.tx_explicit;
43637cd4501SBing Zhao 		peer_info->manual_bind = txq_ctrl->hairpin_conf.manual_bind;
43737cd4501SBing Zhao 		mlx5_txq_release(dev, peer_queue);
43837cd4501SBing Zhao 	} else { /* Peer port used as ingress. */
4390cedf34dSXueming Li 		struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, peer_queue);
44037cd4501SBing Zhao 		struct mlx5_rxq_ctrl *rxq_ctrl;
44137cd4501SBing Zhao 
4420cedf34dSXueming Li 		if (rxq == NULL) {
44337cd4501SBing Zhao 			rte_errno = EINVAL;
44437cd4501SBing Zhao 			DRV_LOG(ERR, "Failed to get port %u Rx queue %d",
44537cd4501SBing Zhao 				dev->data->port_id, peer_queue);
44637cd4501SBing Zhao 			return -rte_errno;
44737cd4501SBing Zhao 		}
4480cedf34dSXueming Li 		rxq_ctrl = rxq->ctrl;
449c06f77aeSMichael Baum 		if (!rxq_ctrl->is_hairpin) {
45037cd4501SBing Zhao 			rte_errno = EINVAL;
45137cd4501SBing Zhao 			DRV_LOG(ERR, "port %u queue %d is not a hairpin Rxq",
45237cd4501SBing Zhao 				dev->data->port_id, peer_queue);
45337cd4501SBing Zhao 			return -rte_errno;
45437cd4501SBing Zhao 		}
45537cd4501SBing Zhao 		if (rxq_ctrl->obj == NULL || rxq_ctrl->obj->rq == NULL) {
45637cd4501SBing Zhao 			rte_errno = ENOMEM;
45737cd4501SBing Zhao 			DRV_LOG(ERR, "port %u no Rxq object found: %d",
45837cd4501SBing Zhao 				dev->data->port_id, peer_queue);
45937cd4501SBing Zhao 			return -rte_errno;
46037cd4501SBing Zhao 		}
46137cd4501SBing Zhao 		peer_info->qp_id = rxq_ctrl->obj->rq->id;
46253820561SMichael Baum 		peer_info->vhca_id = priv->sh->cdev->config.hca_attr.vhca_id;
46344126bd9SXueming Li 		peer_info->peer_q = rxq->hairpin_conf.peers[0].queue;
46444126bd9SXueming Li 		peer_info->tx_explicit = rxq->hairpin_conf.tx_explicit;
46544126bd9SXueming Li 		peer_info->manual_bind = rxq->hairpin_conf.manual_bind;
46637cd4501SBing Zhao 	}
46737cd4501SBing Zhao 	return 0;
46837cd4501SBing Zhao }
46937cd4501SBing Zhao 
47037cd4501SBing Zhao /*
47137cd4501SBing Zhao  * Bind the hairpin queue with the peer HW information.
47237cd4501SBing Zhao  * This needs to be called twice both for Tx and Rx queues of a pair.
47337cd4501SBing Zhao  * If the queue is already bound, it is considered successful.
47437cd4501SBing Zhao  *
47537cd4501SBing Zhao  * @param dev
47637cd4501SBing Zhao  *   Pointer to Ethernet device structure.
47737cd4501SBing Zhao  * @param cur_queue
47837cd4501SBing Zhao  *   Index of the queue to change the HW configuration to bind.
47937cd4501SBing Zhao  * @param peer_info
48037cd4501SBing Zhao  *   Pointer to information of the peer queue.
48137cd4501SBing Zhao  * @param direction
48237cd4501SBing Zhao  *   Positive to configure the TxQ, zero to configure the RxQ.
48337cd4501SBing Zhao  *
48437cd4501SBing Zhao  * @return
48537cd4501SBing Zhao  *   0 on success, a negative errno value otherwise and rte_errno is set.
48637cd4501SBing Zhao  */
48737cd4501SBing Zhao int
48837cd4501SBing Zhao mlx5_hairpin_queue_peer_bind(struct rte_eth_dev *dev, uint16_t cur_queue,
48937cd4501SBing Zhao 			     struct rte_hairpin_peer_info *peer_info,
49037cd4501SBing Zhao 			     uint32_t direction)
49137cd4501SBing Zhao {
49237cd4501SBing Zhao 	int ret = 0;
49337cd4501SBing Zhao 
49437cd4501SBing Zhao 	/*
49537cd4501SBing Zhao 	 * Consistency checking of the peer queue: opposite direction is used
49637cd4501SBing Zhao 	 * to get the peer queue info with ethdev port ID, no need to check.
49737cd4501SBing Zhao 	 */
49837cd4501SBing Zhao 	if (peer_info->peer_q != cur_queue) {
49937cd4501SBing Zhao 		rte_errno = EINVAL;
50037cd4501SBing Zhao 		DRV_LOG(ERR, "port %u queue %d and peer queue %d mismatch",
50137cd4501SBing Zhao 			dev->data->port_id, cur_queue, peer_info->peer_q);
50237cd4501SBing Zhao 		return -rte_errno;
50337cd4501SBing Zhao 	}
50437cd4501SBing Zhao 	if (direction != 0) {
50537cd4501SBing Zhao 		struct mlx5_txq_ctrl *txq_ctrl;
50637cd4501SBing Zhao 		struct mlx5_devx_modify_sq_attr sq_attr = { 0 };
50737cd4501SBing Zhao 
50837cd4501SBing Zhao 		txq_ctrl = mlx5_txq_get(dev, cur_queue);
50937cd4501SBing Zhao 		if (txq_ctrl == NULL) {
51037cd4501SBing Zhao 			rte_errno = EINVAL;
51137cd4501SBing Zhao 			DRV_LOG(ERR, "Failed to get port %u Tx queue %d",
51237cd4501SBing Zhao 				dev->data->port_id, cur_queue);
51337cd4501SBing Zhao 			return -rte_errno;
51437cd4501SBing Zhao 		}
515c06f77aeSMichael Baum 		if (!txq_ctrl->is_hairpin) {
51637cd4501SBing Zhao 			rte_errno = EINVAL;
51737cd4501SBing Zhao 			DRV_LOG(ERR, "port %u queue %d not a hairpin Txq",
51837cd4501SBing Zhao 				dev->data->port_id, cur_queue);
51937cd4501SBing Zhao 			mlx5_txq_release(dev, cur_queue);
52037cd4501SBing Zhao 			return -rte_errno;
52137cd4501SBing Zhao 		}
52237cd4501SBing Zhao 		if (txq_ctrl->obj == NULL || txq_ctrl->obj->sq == NULL) {
52337cd4501SBing Zhao 			rte_errno = ENOMEM;
52437cd4501SBing Zhao 			DRV_LOG(ERR, "port %u no Txq object found: %d",
52537cd4501SBing Zhao 				dev->data->port_id, cur_queue);
52637cd4501SBing Zhao 			mlx5_txq_release(dev, cur_queue);
52737cd4501SBing Zhao 			return -rte_errno;
52837cd4501SBing Zhao 		}
52937cd4501SBing Zhao 		if (txq_ctrl->hairpin_status != 0) {
53037cd4501SBing Zhao 			DRV_LOG(DEBUG, "port %u Tx queue %d is already bound",
53137cd4501SBing Zhao 				dev->data->port_id, cur_queue);
53237cd4501SBing Zhao 			mlx5_txq_release(dev, cur_queue);
53337cd4501SBing Zhao 			return 0;
53437cd4501SBing Zhao 		}
53537cd4501SBing Zhao 		/*
53637cd4501SBing Zhao 		 * All queues' of one port consistency checking is done in the
53737cd4501SBing Zhao 		 * bind() function, and that is optional.
53837cd4501SBing Zhao 		 */
53937cd4501SBing Zhao 		if (peer_info->tx_explicit !=
54037cd4501SBing Zhao 		    txq_ctrl->hairpin_conf.tx_explicit) {
54137cd4501SBing Zhao 			rte_errno = EINVAL;
54237cd4501SBing Zhao 			DRV_LOG(ERR, "port %u Tx queue %d and peer Tx rule mode"
54337cd4501SBing Zhao 				" mismatch", dev->data->port_id, cur_queue);
54437cd4501SBing Zhao 			mlx5_txq_release(dev, cur_queue);
54537cd4501SBing Zhao 			return -rte_errno;
54637cd4501SBing Zhao 		}
54737cd4501SBing Zhao 		if (peer_info->manual_bind !=
54837cd4501SBing Zhao 		    txq_ctrl->hairpin_conf.manual_bind) {
54937cd4501SBing Zhao 			rte_errno = EINVAL;
55037cd4501SBing Zhao 			DRV_LOG(ERR, "port %u Tx queue %d and peer binding mode"
55137cd4501SBing Zhao 				" mismatch", dev->data->port_id, cur_queue);
55237cd4501SBing Zhao 			mlx5_txq_release(dev, cur_queue);
55337cd4501SBing Zhao 			return -rte_errno;
55437cd4501SBing Zhao 		}
55537cd4501SBing Zhao 		sq_attr.state = MLX5_SQC_STATE_RDY;
55637cd4501SBing Zhao 		sq_attr.sq_state = MLX5_SQC_STATE_RST;
55737cd4501SBing Zhao 		sq_attr.hairpin_peer_rq = peer_info->qp_id;
55837cd4501SBing Zhao 		sq_attr.hairpin_peer_vhca = peer_info->vhca_id;
55937cd4501SBing Zhao 		ret = mlx5_devx_cmd_modify_sq(txq_ctrl->obj->sq, &sq_attr);
56037cd4501SBing Zhao 		if (ret == 0)
56137cd4501SBing Zhao 			txq_ctrl->hairpin_status = 1;
56237cd4501SBing Zhao 		mlx5_txq_release(dev, cur_queue);
56337cd4501SBing Zhao 	} else {
5640cedf34dSXueming Li 		struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, cur_queue);
56537cd4501SBing Zhao 		struct mlx5_rxq_ctrl *rxq_ctrl;
56637cd4501SBing Zhao 		struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
56737cd4501SBing Zhao 
5680cedf34dSXueming Li 		if (rxq == NULL) {
56937cd4501SBing Zhao 			rte_errno = EINVAL;
57037cd4501SBing Zhao 			DRV_LOG(ERR, "Failed to get port %u Rx queue %d",
57137cd4501SBing Zhao 				dev->data->port_id, cur_queue);
57237cd4501SBing Zhao 			return -rte_errno;
57337cd4501SBing Zhao 		}
5740cedf34dSXueming Li 		rxq_ctrl = rxq->ctrl;
575c06f77aeSMichael Baum 		if (!rxq_ctrl->is_hairpin) {
57637cd4501SBing Zhao 			rte_errno = EINVAL;
57737cd4501SBing Zhao 			DRV_LOG(ERR, "port %u queue %d not a hairpin Rxq",
57837cd4501SBing Zhao 				dev->data->port_id, cur_queue);
57937cd4501SBing Zhao 			return -rte_errno;
58037cd4501SBing Zhao 		}
58137cd4501SBing Zhao 		if (rxq_ctrl->obj == NULL || rxq_ctrl->obj->rq == NULL) {
58237cd4501SBing Zhao 			rte_errno = ENOMEM;
58337cd4501SBing Zhao 			DRV_LOG(ERR, "port %u no Rxq object found: %d",
58437cd4501SBing Zhao 				dev->data->port_id, cur_queue);
58537cd4501SBing Zhao 			return -rte_errno;
58637cd4501SBing Zhao 		}
58744126bd9SXueming Li 		if (rxq->hairpin_status != 0) {
58837cd4501SBing Zhao 			DRV_LOG(DEBUG, "port %u Rx queue %d is already bound",
58937cd4501SBing Zhao 				dev->data->port_id, cur_queue);
59037cd4501SBing Zhao 			return 0;
59137cd4501SBing Zhao 		}
59237cd4501SBing Zhao 		if (peer_info->tx_explicit !=
59344126bd9SXueming Li 		    rxq->hairpin_conf.tx_explicit) {
59437cd4501SBing Zhao 			rte_errno = EINVAL;
59537cd4501SBing Zhao 			DRV_LOG(ERR, "port %u Rx queue %d and peer Tx rule mode"
59637cd4501SBing Zhao 				" mismatch", dev->data->port_id, cur_queue);
59737cd4501SBing Zhao 			return -rte_errno;
59837cd4501SBing Zhao 		}
59937cd4501SBing Zhao 		if (peer_info->manual_bind !=
60044126bd9SXueming Li 		    rxq->hairpin_conf.manual_bind) {
60137cd4501SBing Zhao 			rte_errno = EINVAL;
60237cd4501SBing Zhao 			DRV_LOG(ERR, "port %u Rx queue %d and peer binding mode"
60337cd4501SBing Zhao 				" mismatch", dev->data->port_id, cur_queue);
60437cd4501SBing Zhao 			return -rte_errno;
60537cd4501SBing Zhao 		}
606ca638c49SDariusz Sosnowski 		rq_attr.state = MLX5_RQC_STATE_RDY;
607ca638c49SDariusz Sosnowski 		rq_attr.rq_state = MLX5_RQC_STATE_RST;
60837cd4501SBing Zhao 		rq_attr.hairpin_peer_sq = peer_info->qp_id;
60937cd4501SBing Zhao 		rq_attr.hairpin_peer_vhca = peer_info->vhca_id;
61037cd4501SBing Zhao 		ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq, &rq_attr);
61137cd4501SBing Zhao 		if (ret == 0)
61244126bd9SXueming Li 			rxq->hairpin_status = 1;
61337cd4501SBing Zhao 	}
61437cd4501SBing Zhao 	return ret;
61537cd4501SBing Zhao }
61637cd4501SBing Zhao 
61737cd4501SBing Zhao /*
61837cd4501SBing Zhao  * Unbind the hairpin queue and reset its HW configuration.
61937cd4501SBing Zhao  * This needs to be called twice both for Tx and Rx queues of a pair.
62037cd4501SBing Zhao  * If the queue is already unbound, it is considered successful.
62137cd4501SBing Zhao  *
62237cd4501SBing Zhao  * @param dev
62337cd4501SBing Zhao  *   Pointer to Ethernet device structure.
62437cd4501SBing Zhao  * @param cur_queue
62537cd4501SBing Zhao  *   Index of the queue to change the HW configuration to unbind.
62637cd4501SBing Zhao  * @param direction
62737cd4501SBing Zhao  *   Positive to reset the TxQ, zero to reset the RxQ.
62837cd4501SBing Zhao  *
62937cd4501SBing Zhao  * @return
63037cd4501SBing Zhao  *   0 on success, a negative errno value otherwise and rte_errno is set.
63137cd4501SBing Zhao  */
63237cd4501SBing Zhao int
63337cd4501SBing Zhao mlx5_hairpin_queue_peer_unbind(struct rte_eth_dev *dev, uint16_t cur_queue,
63437cd4501SBing Zhao 			       uint32_t direction)
63537cd4501SBing Zhao {
63637cd4501SBing Zhao 	int ret = 0;
63737cd4501SBing Zhao 
63837cd4501SBing Zhao 	if (direction != 0) {
63937cd4501SBing Zhao 		struct mlx5_txq_ctrl *txq_ctrl;
64037cd4501SBing Zhao 		struct mlx5_devx_modify_sq_attr sq_attr = { 0 };
64137cd4501SBing Zhao 
64237cd4501SBing Zhao 		txq_ctrl = mlx5_txq_get(dev, cur_queue);
64337cd4501SBing Zhao 		if (txq_ctrl == NULL) {
64437cd4501SBing Zhao 			rte_errno = EINVAL;
64537cd4501SBing Zhao 			DRV_LOG(ERR, "Failed to get port %u Tx queue %d",
64637cd4501SBing Zhao 				dev->data->port_id, cur_queue);
64737cd4501SBing Zhao 			return -rte_errno;
64837cd4501SBing Zhao 		}
649c06f77aeSMichael Baum 		if (!txq_ctrl->is_hairpin) {
65037cd4501SBing Zhao 			rte_errno = EINVAL;
65137cd4501SBing Zhao 			DRV_LOG(ERR, "port %u queue %d not a hairpin Txq",
65237cd4501SBing Zhao 				dev->data->port_id, cur_queue);
65337cd4501SBing Zhao 			mlx5_txq_release(dev, cur_queue);
65437cd4501SBing Zhao 			return -rte_errno;
65537cd4501SBing Zhao 		}
65637cd4501SBing Zhao 		/* Already unbound, return success before obj checking. */
65737cd4501SBing Zhao 		if (txq_ctrl->hairpin_status == 0) {
65837cd4501SBing Zhao 			DRV_LOG(DEBUG, "port %u Tx queue %d is already unbound",
65937cd4501SBing Zhao 				dev->data->port_id, cur_queue);
66037cd4501SBing Zhao 			mlx5_txq_release(dev, cur_queue);
66137cd4501SBing Zhao 			return 0;
66237cd4501SBing Zhao 		}
66337cd4501SBing Zhao 		if (!txq_ctrl->obj || !txq_ctrl->obj->sq) {
66437cd4501SBing Zhao 			rte_errno = ENOMEM;
66537cd4501SBing Zhao 			DRV_LOG(ERR, "port %u no Txq object found: %d",
66637cd4501SBing Zhao 				dev->data->port_id, cur_queue);
66737cd4501SBing Zhao 			mlx5_txq_release(dev, cur_queue);
66837cd4501SBing Zhao 			return -rte_errno;
66937cd4501SBing Zhao 		}
67037cd4501SBing Zhao 		sq_attr.state = MLX5_SQC_STATE_RST;
671ca638c49SDariusz Sosnowski 		sq_attr.sq_state = MLX5_SQC_STATE_RDY;
67237cd4501SBing Zhao 		ret = mlx5_devx_cmd_modify_sq(txq_ctrl->obj->sq, &sq_attr);
67337cd4501SBing Zhao 		if (ret == 0)
67437cd4501SBing Zhao 			txq_ctrl->hairpin_status = 0;
67537cd4501SBing Zhao 		mlx5_txq_release(dev, cur_queue);
67637cd4501SBing Zhao 	} else {
6770cedf34dSXueming Li 		struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, cur_queue);
67837cd4501SBing Zhao 		struct mlx5_rxq_ctrl *rxq_ctrl;
67937cd4501SBing Zhao 		struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
68037cd4501SBing Zhao 
6810cedf34dSXueming Li 		if (rxq == NULL) {
68237cd4501SBing Zhao 			rte_errno = EINVAL;
68337cd4501SBing Zhao 			DRV_LOG(ERR, "Failed to get port %u Rx queue %d",
68437cd4501SBing Zhao 				dev->data->port_id, cur_queue);
68537cd4501SBing Zhao 			return -rte_errno;
68637cd4501SBing Zhao 		}
6870cedf34dSXueming Li 		rxq_ctrl = rxq->ctrl;
688c06f77aeSMichael Baum 		if (!rxq_ctrl->is_hairpin) {
68937cd4501SBing Zhao 			rte_errno = EINVAL;
69037cd4501SBing Zhao 			DRV_LOG(ERR, "port %u queue %d not a hairpin Rxq",
69137cd4501SBing Zhao 				dev->data->port_id, cur_queue);
69237cd4501SBing Zhao 			return -rte_errno;
69337cd4501SBing Zhao 		}
69444126bd9SXueming Li 		if (rxq->hairpin_status == 0) {
69537cd4501SBing Zhao 			DRV_LOG(DEBUG, "port %u Rx queue %d is already unbound",
69637cd4501SBing Zhao 				dev->data->port_id, cur_queue);
69737cd4501SBing Zhao 			return 0;
69837cd4501SBing Zhao 		}
69937cd4501SBing Zhao 		if (rxq_ctrl->obj == NULL || rxq_ctrl->obj->rq == NULL) {
70037cd4501SBing Zhao 			rte_errno = ENOMEM;
70137cd4501SBing Zhao 			DRV_LOG(ERR, "port %u no Rxq object found: %d",
70237cd4501SBing Zhao 				dev->data->port_id, cur_queue);
70337cd4501SBing Zhao 			return -rte_errno;
70437cd4501SBing Zhao 		}
705ca638c49SDariusz Sosnowski 		rq_attr.state = MLX5_RQC_STATE_RST;
706ca638c49SDariusz Sosnowski 		rq_attr.rq_state = MLX5_RQC_STATE_RDY;
70737cd4501SBing Zhao 		ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq, &rq_attr);
70837cd4501SBing Zhao 		if (ret == 0)
70944126bd9SXueming Li 			rxq->hairpin_status = 0;
71037cd4501SBing Zhao 	}
71137cd4501SBing Zhao 	return ret;
71237cd4501SBing Zhao }
71337cd4501SBing Zhao 
71437cd4501SBing Zhao /*
71537cd4501SBing Zhao  * Bind the hairpin port pairs, from the Tx to the peer Rx.
71637cd4501SBing Zhao  * This function only supports to bind the Tx to one Rx.
71737cd4501SBing Zhao  *
71837cd4501SBing Zhao  * @param dev
71937cd4501SBing Zhao  *   Pointer to Ethernet device structure.
72037cd4501SBing Zhao  * @param rx_port
72137cd4501SBing Zhao  *   Port identifier of the Rx port.
72237cd4501SBing Zhao  *
72337cd4501SBing Zhao  * @return
72437cd4501SBing Zhao  *   0 on success, a negative errno value otherwise and rte_errno is set.
72537cd4501SBing Zhao  */
72637cd4501SBing Zhao static int
72737cd4501SBing Zhao mlx5_hairpin_bind_single_port(struct rte_eth_dev *dev, uint16_t rx_port)
72837cd4501SBing Zhao {
72937cd4501SBing Zhao 	struct mlx5_priv *priv = dev->data->dev_private;
73037cd4501SBing Zhao 	int ret = 0;
73137cd4501SBing Zhao 	struct mlx5_txq_ctrl *txq_ctrl;
73237cd4501SBing Zhao 	uint32_t i;
73337cd4501SBing Zhao 	struct rte_hairpin_peer_info peer = {0xffffff};
73437cd4501SBing Zhao 	struct rte_hairpin_peer_info cur;
73537cd4501SBing Zhao 	const struct rte_eth_hairpin_conf *conf;
73637cd4501SBing Zhao 	uint16_t num_q = 0;
73737cd4501SBing Zhao 	uint16_t local_port = priv->dev_data->port_id;
73837cd4501SBing Zhao 	uint32_t manual;
73937cd4501SBing Zhao 	uint32_t explicit;
74037cd4501SBing Zhao 	uint16_t rx_queue;
74137cd4501SBing Zhao 
74256bb3c84SXueming Li 	if (mlx5_eth_find_next(rx_port, dev->device) != rx_port) {
74337cd4501SBing Zhao 		rte_errno = ENODEV;
74437cd4501SBing Zhao 		DRV_LOG(ERR, "Rx port %u does not belong to mlx5", rx_port);
74537cd4501SBing Zhao 		return -rte_errno;
74637cd4501SBing Zhao 	}
74737cd4501SBing Zhao 	/*
74837cd4501SBing Zhao 	 * Before binding TxQ to peer RxQ, first round loop will be used for
74937cd4501SBing Zhao 	 * checking the queues' configuration consistency. This would be a
75037cd4501SBing Zhao 	 * little time consuming but better than doing the rollback.
75137cd4501SBing Zhao 	 */
75237cd4501SBing Zhao 	for (i = 0; i != priv->txqs_n; i++) {
75337cd4501SBing Zhao 		txq_ctrl = mlx5_txq_get(dev, i);
75437cd4501SBing Zhao 		if (txq_ctrl == NULL)
75537cd4501SBing Zhao 			continue;
756c06f77aeSMichael Baum 		if (!txq_ctrl->is_hairpin) {
75737cd4501SBing Zhao 			mlx5_txq_release(dev, i);
75837cd4501SBing Zhao 			continue;
75937cd4501SBing Zhao 		}
76037cd4501SBing Zhao 		/*
76137cd4501SBing Zhao 		 * All hairpin Tx queues of a single port that connected to the
76237cd4501SBing Zhao 		 * same peer Rx port should have the same "auto binding" and
76337cd4501SBing Zhao 		 * "implicit Tx flow" modes.
76437cd4501SBing Zhao 		 * Peer consistency checking will be done in per queue binding.
76537cd4501SBing Zhao 		 */
76637cd4501SBing Zhao 		conf = &txq_ctrl->hairpin_conf;
76737cd4501SBing Zhao 		if (conf->peers[0].port == rx_port) {
76837cd4501SBing Zhao 			if (num_q == 0) {
76937cd4501SBing Zhao 				manual = conf->manual_bind;
77037cd4501SBing Zhao 				explicit = conf->tx_explicit;
77137cd4501SBing Zhao 			} else {
77237cd4501SBing Zhao 				if (manual != conf->manual_bind ||
77337cd4501SBing Zhao 				    explicit != conf->tx_explicit) {
77437cd4501SBing Zhao 					rte_errno = EINVAL;
77537cd4501SBing Zhao 					DRV_LOG(ERR, "port %u queue %d mode"
77637cd4501SBing Zhao 						" mismatch: %u %u, %u %u",
77737cd4501SBing Zhao 						local_port, i, manual,
77837cd4501SBing Zhao 						conf->manual_bind, explicit,
77937cd4501SBing Zhao 						conf->tx_explicit);
78037cd4501SBing Zhao 					mlx5_txq_release(dev, i);
78137cd4501SBing Zhao 					return -rte_errno;
78237cd4501SBing Zhao 				}
78337cd4501SBing Zhao 			}
78437cd4501SBing Zhao 			num_q++;
78537cd4501SBing Zhao 		}
78637cd4501SBing Zhao 		mlx5_txq_release(dev, i);
78737cd4501SBing Zhao 	}
78837cd4501SBing Zhao 	/* Once no queue is configured, success is returned directly. */
78937cd4501SBing Zhao 	if (num_q == 0)
79037cd4501SBing Zhao 		return ret;
79137cd4501SBing Zhao 	/* All the hairpin TX queues need to be traversed again. */
79237cd4501SBing Zhao 	for (i = 0; i != priv->txqs_n; i++) {
79337cd4501SBing Zhao 		txq_ctrl = mlx5_txq_get(dev, i);
79437cd4501SBing Zhao 		if (txq_ctrl == NULL)
79537cd4501SBing Zhao 			continue;
796c06f77aeSMichael Baum 		if (!txq_ctrl->is_hairpin) {
79737cd4501SBing Zhao 			mlx5_txq_release(dev, i);
79837cd4501SBing Zhao 			continue;
79937cd4501SBing Zhao 		}
80037cd4501SBing Zhao 		if (txq_ctrl->hairpin_conf.peers[0].port != rx_port) {
80137cd4501SBing Zhao 			mlx5_txq_release(dev, i);
80237cd4501SBing Zhao 			continue;
80337cd4501SBing Zhao 		}
80437cd4501SBing Zhao 		rx_queue = txq_ctrl->hairpin_conf.peers[0].queue;
80537cd4501SBing Zhao 		/*
80637cd4501SBing Zhao 		 * Fetch peer RxQ's information.
80737cd4501SBing Zhao 		 * No need to pass the information of the current queue.
80837cd4501SBing Zhao 		 */
80937cd4501SBing Zhao 		ret = rte_eth_hairpin_queue_peer_update(rx_port, rx_queue,
81037cd4501SBing Zhao 							NULL, &peer, 1);
81137cd4501SBing Zhao 		if (ret != 0) {
81237cd4501SBing Zhao 			mlx5_txq_release(dev, i);
81337cd4501SBing Zhao 			goto error;
81437cd4501SBing Zhao 		}
81537cd4501SBing Zhao 		/* Accessing its own device, inside mlx5 PMD. */
81637cd4501SBing Zhao 		ret = mlx5_hairpin_queue_peer_bind(dev, i, &peer, 1);
81737cd4501SBing Zhao 		if (ret != 0) {
81837cd4501SBing Zhao 			mlx5_txq_release(dev, i);
81937cd4501SBing Zhao 			goto error;
82037cd4501SBing Zhao 		}
82137cd4501SBing Zhao 		/* Pass TxQ's information to peer RxQ and try binding. */
82237cd4501SBing Zhao 		cur.peer_q = rx_queue;
82326e1eaf2SDariusz Sosnowski 		cur.qp_id = mlx5_txq_get_sqn(txq_ctrl);
82453820561SMichael Baum 		cur.vhca_id = priv->sh->cdev->config.hca_attr.vhca_id;
82537cd4501SBing Zhao 		cur.tx_explicit = txq_ctrl->hairpin_conf.tx_explicit;
82637cd4501SBing Zhao 		cur.manual_bind = txq_ctrl->hairpin_conf.manual_bind;
82737cd4501SBing Zhao 		/*
82837cd4501SBing Zhao 		 * In order to access another device in a proper way, RTE level
82937cd4501SBing Zhao 		 * private function is needed.
83037cd4501SBing Zhao 		 */
83137cd4501SBing Zhao 		ret = rte_eth_hairpin_queue_peer_bind(rx_port, rx_queue,
83237cd4501SBing Zhao 						      &cur, 0);
83337cd4501SBing Zhao 		if (ret != 0) {
83437cd4501SBing Zhao 			mlx5_txq_release(dev, i);
83537cd4501SBing Zhao 			goto error;
83637cd4501SBing Zhao 		}
83737cd4501SBing Zhao 		mlx5_txq_release(dev, i);
83837cd4501SBing Zhao 	}
83937cd4501SBing Zhao 	return 0;
84037cd4501SBing Zhao error:
84137cd4501SBing Zhao 	/*
84237cd4501SBing Zhao 	 * Do roll-back process for the queues already bound.
84337cd4501SBing Zhao 	 * No need to check the return value of the queue unbind function.
84437cd4501SBing Zhao 	 */
84537cd4501SBing Zhao 	do {
84637cd4501SBing Zhao 		/* No validation is needed here. */
84737cd4501SBing Zhao 		txq_ctrl = mlx5_txq_get(dev, i);
84837cd4501SBing Zhao 		if (txq_ctrl == NULL)
84937cd4501SBing Zhao 			continue;
850ab2439f8SDariusz Sosnowski 		if (!txq_ctrl->is_hairpin ||
851ab2439f8SDariusz Sosnowski 		    txq_ctrl->hairpin_conf.peers[0].port != rx_port) {
852ab2439f8SDariusz Sosnowski 			mlx5_txq_release(dev, i);
853ab2439f8SDariusz Sosnowski 			continue;
854ab2439f8SDariusz Sosnowski 		}
85537cd4501SBing Zhao 		rx_queue = txq_ctrl->hairpin_conf.peers[0].queue;
85637cd4501SBing Zhao 		rte_eth_hairpin_queue_peer_unbind(rx_port, rx_queue, 0);
85737cd4501SBing Zhao 		mlx5_hairpin_queue_peer_unbind(dev, i, 1);
85837cd4501SBing Zhao 		mlx5_txq_release(dev, i);
85937cd4501SBing Zhao 	} while (i--);
86037cd4501SBing Zhao 	return ret;
86137cd4501SBing Zhao }
86237cd4501SBing Zhao 
86337cd4501SBing Zhao /*
86437cd4501SBing Zhao  * Unbind the hairpin port pair, HW configuration of both devices will be clear
865b53d106dSSean Morrissey  * and status will be reset for all the queues used between them.
86637cd4501SBing Zhao  * This function only supports to unbind the Tx from one Rx.
86737cd4501SBing Zhao  *
86837cd4501SBing Zhao  * @param dev
86937cd4501SBing Zhao  *   Pointer to Ethernet device structure.
87037cd4501SBing Zhao  * @param rx_port
87137cd4501SBing Zhao  *   Port identifier of the Rx port.
87237cd4501SBing Zhao  *
87337cd4501SBing Zhao  * @return
87437cd4501SBing Zhao  *   0 on success, a negative errno value otherwise and rte_errno is set.
87537cd4501SBing Zhao  */
87637cd4501SBing Zhao static int
87737cd4501SBing Zhao mlx5_hairpin_unbind_single_port(struct rte_eth_dev *dev, uint16_t rx_port)
87837cd4501SBing Zhao {
87937cd4501SBing Zhao 	struct mlx5_priv *priv = dev->data->dev_private;
88037cd4501SBing Zhao 	struct mlx5_txq_ctrl *txq_ctrl;
88137cd4501SBing Zhao 	uint32_t i;
88237cd4501SBing Zhao 	int ret;
88337cd4501SBing Zhao 	uint16_t cur_port = priv->dev_data->port_id;
88437cd4501SBing Zhao 
88556bb3c84SXueming Li 	if (mlx5_eth_find_next(rx_port, dev->device) != rx_port) {
88637cd4501SBing Zhao 		rte_errno = ENODEV;
88737cd4501SBing Zhao 		DRV_LOG(ERR, "Rx port %u does not belong to mlx5", rx_port);
88837cd4501SBing Zhao 		return -rte_errno;
88937cd4501SBing Zhao 	}
89037cd4501SBing Zhao 	for (i = 0; i != priv->txqs_n; i++) {
89137cd4501SBing Zhao 		uint16_t rx_queue;
89237cd4501SBing Zhao 
89337cd4501SBing Zhao 		txq_ctrl = mlx5_txq_get(dev, i);
89437cd4501SBing Zhao 		if (txq_ctrl == NULL)
89537cd4501SBing Zhao 			continue;
896c06f77aeSMichael Baum 		if (!txq_ctrl->is_hairpin) {
89737cd4501SBing Zhao 			mlx5_txq_release(dev, i);
89837cd4501SBing Zhao 			continue;
89937cd4501SBing Zhao 		}
90037cd4501SBing Zhao 		if (txq_ctrl->hairpin_conf.peers[0].port != rx_port) {
90137cd4501SBing Zhao 			mlx5_txq_release(dev, i);
90237cd4501SBing Zhao 			continue;
90337cd4501SBing Zhao 		}
90437cd4501SBing Zhao 		/* Indeed, only the first used queue needs to be checked. */
90537cd4501SBing Zhao 		if (txq_ctrl->hairpin_conf.manual_bind == 0) {
9069284987aSBing Zhao 			mlx5_txq_release(dev, i);
90737cd4501SBing Zhao 			if (cur_port != rx_port) {
90837cd4501SBing Zhao 				rte_errno = EINVAL;
90937cd4501SBing Zhao 				DRV_LOG(ERR, "port %u and port %u are in"
91037cd4501SBing Zhao 					" auto-bind mode", cur_port, rx_port);
91137cd4501SBing Zhao 				return -rte_errno;
91237cd4501SBing Zhao 			} else {
91337cd4501SBing Zhao 				return 0;
91437cd4501SBing Zhao 			}
91537cd4501SBing Zhao 		}
91637cd4501SBing Zhao 		rx_queue = txq_ctrl->hairpin_conf.peers[0].queue;
91737cd4501SBing Zhao 		mlx5_txq_release(dev, i);
91837cd4501SBing Zhao 		ret = rte_eth_hairpin_queue_peer_unbind(rx_port, rx_queue, 0);
91937cd4501SBing Zhao 		if (ret) {
92037cd4501SBing Zhao 			DRV_LOG(ERR, "port %u Rx queue %d unbind - failure",
92137cd4501SBing Zhao 				rx_port, rx_queue);
92237cd4501SBing Zhao 			return ret;
92337cd4501SBing Zhao 		}
92437cd4501SBing Zhao 		ret = mlx5_hairpin_queue_peer_unbind(dev, i, 1);
92537cd4501SBing Zhao 		if (ret) {
92637cd4501SBing Zhao 			DRV_LOG(ERR, "port %u Tx queue %d unbind - failure",
92737cd4501SBing Zhao 				cur_port, i);
92837cd4501SBing Zhao 			return ret;
92937cd4501SBing Zhao 		}
93037cd4501SBing Zhao 	}
93137cd4501SBing Zhao 	return 0;
93237cd4501SBing Zhao }
93337cd4501SBing Zhao 
93437cd4501SBing Zhao /*
93537cd4501SBing Zhao  * Bind hairpin ports, Rx could be all ports when using RTE_MAX_ETHPORTS.
93637cd4501SBing Zhao  * @see mlx5_hairpin_bind_single_port()
93737cd4501SBing Zhao  */
93837cd4501SBing Zhao int
93937cd4501SBing Zhao mlx5_hairpin_bind(struct rte_eth_dev *dev, uint16_t rx_port)
94037cd4501SBing Zhao {
94137cd4501SBing Zhao 	int ret = 0;
94237cd4501SBing Zhao 	uint16_t p, pp;
94337cd4501SBing Zhao 
94437cd4501SBing Zhao 	/*
94537cd4501SBing Zhao 	 * If the Rx port has no hairpin configuration with the current port,
94637cd4501SBing Zhao 	 * the binding will be skipped in the called function of single port.
94737cd4501SBing Zhao 	 * Device started status will be checked only before the queue
94837cd4501SBing Zhao 	 * information updating.
94937cd4501SBing Zhao 	 */
95037cd4501SBing Zhao 	if (rx_port == RTE_MAX_ETHPORTS) {
95156bb3c84SXueming Li 		MLX5_ETH_FOREACH_DEV(p, dev->device) {
95237cd4501SBing Zhao 			ret = mlx5_hairpin_bind_single_port(dev, p);
95337cd4501SBing Zhao 			if (ret != 0)
95437cd4501SBing Zhao 				goto unbind;
95537cd4501SBing Zhao 		}
95637cd4501SBing Zhao 		return ret;
95737cd4501SBing Zhao 	} else {
95837cd4501SBing Zhao 		return mlx5_hairpin_bind_single_port(dev, rx_port);
95937cd4501SBing Zhao 	}
96037cd4501SBing Zhao unbind:
96156bb3c84SXueming Li 	MLX5_ETH_FOREACH_DEV(pp, dev->device)
96237cd4501SBing Zhao 		if (pp < p)
96337cd4501SBing Zhao 			mlx5_hairpin_unbind_single_port(dev, pp);
96437cd4501SBing Zhao 	return ret;
96537cd4501SBing Zhao }
96637cd4501SBing Zhao 
96737cd4501SBing Zhao /*
96837cd4501SBing Zhao  * Unbind hairpin ports, Rx could be all ports when using RTE_MAX_ETHPORTS.
96937cd4501SBing Zhao  * @see mlx5_hairpin_unbind_single_port()
97037cd4501SBing Zhao  */
97137cd4501SBing Zhao int
97237cd4501SBing Zhao mlx5_hairpin_unbind(struct rte_eth_dev *dev, uint16_t rx_port)
97337cd4501SBing Zhao {
97437cd4501SBing Zhao 	int ret = 0;
97537cd4501SBing Zhao 	uint16_t p;
97637cd4501SBing Zhao 
97737cd4501SBing Zhao 	if (rx_port == RTE_MAX_ETHPORTS)
97856bb3c84SXueming Li 		MLX5_ETH_FOREACH_DEV(p, dev->device) {
97937cd4501SBing Zhao 			ret = mlx5_hairpin_unbind_single_port(dev, p);
98037cd4501SBing Zhao 			if (ret != 0)
98137cd4501SBing Zhao 				return ret;
98237cd4501SBing Zhao 		}
98337cd4501SBing Zhao 	else
9840746dcabSBing Zhao 		ret = mlx5_hairpin_unbind_single_port(dev, rx_port);
98537cd4501SBing Zhao 	return ret;
98637cd4501SBing Zhao }
98737cd4501SBing Zhao 
98802109eaeSBing Zhao /*
98902109eaeSBing Zhao  * DPDK callback to get the hairpin peer ports list.
99002109eaeSBing Zhao  * This will return the actual number of peer ports and save the identifiers
99102109eaeSBing Zhao  * into the array (sorted, may be different from that when setting up the
99202109eaeSBing Zhao  * hairpin peer queues).
99302109eaeSBing Zhao  * The peer port ID could be the same as the port ID of the current device.
99402109eaeSBing Zhao  *
99502109eaeSBing Zhao  * @param dev
99602109eaeSBing Zhao  *   Pointer to Ethernet device structure.
99702109eaeSBing Zhao  * @param peer_ports
99802109eaeSBing Zhao  *   Pointer to array to save the port identifiers.
99902109eaeSBing Zhao  * @param len
100002109eaeSBing Zhao  *   The length of the array.
100102109eaeSBing Zhao  * @param direction
100202109eaeSBing Zhao  *   Current port to peer port direction.
100302109eaeSBing Zhao  *   positive - current used as Tx to get all peer Rx ports.
100402109eaeSBing Zhao  *   zero - current used as Rx to get all peer Tx ports.
100502109eaeSBing Zhao  *
100602109eaeSBing Zhao  * @return
100702109eaeSBing Zhao  *   0 or positive value on success, actual number of peer ports.
100802109eaeSBing Zhao  *   a negative errno value otherwise and rte_errno is set.
100902109eaeSBing Zhao  */
101002109eaeSBing Zhao int
101102109eaeSBing Zhao mlx5_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
101202109eaeSBing Zhao 			    size_t len, uint32_t direction)
101302109eaeSBing Zhao {
101402109eaeSBing Zhao 	struct mlx5_priv *priv = dev->data->dev_private;
101502109eaeSBing Zhao 	struct mlx5_txq_ctrl *txq_ctrl;
101602109eaeSBing Zhao 	uint32_t i;
101702109eaeSBing Zhao 	uint16_t pp;
101802109eaeSBing Zhao 	uint32_t bits[(RTE_MAX_ETHPORTS + 31) / 32] = {0};
101902109eaeSBing Zhao 	int ret = 0;
102002109eaeSBing Zhao 
102102109eaeSBing Zhao 	if (direction) {
102202109eaeSBing Zhao 		for (i = 0; i < priv->txqs_n; i++) {
102302109eaeSBing Zhao 			txq_ctrl = mlx5_txq_get(dev, i);
102402109eaeSBing Zhao 			if (!txq_ctrl)
102502109eaeSBing Zhao 				continue;
1026c06f77aeSMichael Baum 			if (!txq_ctrl->is_hairpin) {
102702109eaeSBing Zhao 				mlx5_txq_release(dev, i);
102802109eaeSBing Zhao 				continue;
102902109eaeSBing Zhao 			}
103002109eaeSBing Zhao 			pp = txq_ctrl->hairpin_conf.peers[0].port;
103102109eaeSBing Zhao 			if (pp >= RTE_MAX_ETHPORTS) {
103202109eaeSBing Zhao 				rte_errno = ERANGE;
103302109eaeSBing Zhao 				mlx5_txq_release(dev, i);
103402109eaeSBing Zhao 				DRV_LOG(ERR, "port %hu queue %u peer port "
103502109eaeSBing Zhao 					"out of range %hu",
103602109eaeSBing Zhao 					priv->dev_data->port_id, i, pp);
103702109eaeSBing Zhao 				return -rte_errno;
103802109eaeSBing Zhao 			}
103902109eaeSBing Zhao 			bits[pp / 32] |= 1 << (pp % 32);
104002109eaeSBing Zhao 			mlx5_txq_release(dev, i);
104102109eaeSBing Zhao 		}
104202109eaeSBing Zhao 	} else {
104302109eaeSBing Zhao 		for (i = 0; i < priv->rxqs_n; i++) {
10440cedf34dSXueming Li 			struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i);
10450cedf34dSXueming Li 			struct mlx5_rxq_ctrl *rxq_ctrl;
10460cedf34dSXueming Li 
10470cedf34dSXueming Li 			if (rxq == NULL)
104802109eaeSBing Zhao 				continue;
10490cedf34dSXueming Li 			rxq_ctrl = rxq->ctrl;
1050c06f77aeSMichael Baum 			if (!rxq_ctrl->is_hairpin)
105102109eaeSBing Zhao 				continue;
105244126bd9SXueming Li 			pp = rxq->hairpin_conf.peers[0].port;
105302109eaeSBing Zhao 			if (pp >= RTE_MAX_ETHPORTS) {
105402109eaeSBing Zhao 				rte_errno = ERANGE;
105502109eaeSBing Zhao 				DRV_LOG(ERR, "port %hu queue %u peer port "
105602109eaeSBing Zhao 					"out of range %hu",
105702109eaeSBing Zhao 					priv->dev_data->port_id, i, pp);
105802109eaeSBing Zhao 				return -rte_errno;
105902109eaeSBing Zhao 			}
106002109eaeSBing Zhao 			bits[pp / 32] |= 1 << (pp % 32);
106102109eaeSBing Zhao 		}
106202109eaeSBing Zhao 	}
106302109eaeSBing Zhao 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
106402109eaeSBing Zhao 		if (bits[i / 32] & (1 << (i % 32))) {
106502109eaeSBing Zhao 			if ((size_t)ret >= len) {
106602109eaeSBing Zhao 				rte_errno = E2BIG;
106702109eaeSBing Zhao 				return -rte_errno;
106802109eaeSBing Zhao 			}
106902109eaeSBing Zhao 			peer_ports[ret++] = i;
107002109eaeSBing Zhao 		}
107102109eaeSBing Zhao 	}
107202109eaeSBing Zhao 	return ret;
107302109eaeSBing Zhao }
107402109eaeSBing Zhao 
1075483181f7SDariusz Sosnowski #ifdef HAVE_MLX5_HWS_SUPPORT
1076483181f7SDariusz Sosnowski 
1077483181f7SDariusz Sosnowski /**
1078483181f7SDariusz Sosnowski  * Check if starting representor port is allowed.
1079483181f7SDariusz Sosnowski  *
1080483181f7SDariusz Sosnowski  * If transfer proxy port is configured for HWS, then starting representor port
1081483181f7SDariusz Sosnowski  * is allowed if and only if transfer proxy port is started as well.
1082483181f7SDariusz Sosnowski  *
1083483181f7SDariusz Sosnowski  * @param dev
1084483181f7SDariusz Sosnowski  *   Pointer to Ethernet device structure.
1085483181f7SDariusz Sosnowski  *
1086483181f7SDariusz Sosnowski  * @return
1087483181f7SDariusz Sosnowski  *   If stopping representor port is allowed, then 0 is returned.
1088483181f7SDariusz Sosnowski  *   Otherwise rte_errno is set, and negative errno value is returned.
1089483181f7SDariusz Sosnowski  */
1090483181f7SDariusz Sosnowski static int
1091483181f7SDariusz Sosnowski mlx5_hw_representor_port_allowed_start(struct rte_eth_dev *dev)
1092483181f7SDariusz Sosnowski {
1093483181f7SDariusz Sosnowski 	struct mlx5_priv *priv = dev->data->dev_private;
1094483181f7SDariusz Sosnowski 	struct rte_eth_dev *proxy_dev;
1095483181f7SDariusz Sosnowski 	struct mlx5_priv *proxy_priv;
1096483181f7SDariusz Sosnowski 	uint16_t proxy_port_id = UINT16_MAX;
1097483181f7SDariusz Sosnowski 	int ret;
1098483181f7SDariusz Sosnowski 
1099483181f7SDariusz Sosnowski 	MLX5_ASSERT(priv->sh->config.dv_flow_en == 2);
1100483181f7SDariusz Sosnowski 	MLX5_ASSERT(priv->sh->config.dv_esw_en);
1101483181f7SDariusz Sosnowski 	MLX5_ASSERT(priv->representor);
1102483181f7SDariusz Sosnowski 	ret = rte_flow_pick_transfer_proxy(dev->data->port_id, &proxy_port_id, NULL);
1103483181f7SDariusz Sosnowski 	if (ret) {
1104483181f7SDariusz Sosnowski 		if (ret == -ENODEV)
1105483181f7SDariusz Sosnowski 			DRV_LOG(ERR, "Starting representor port %u is not allowed. Transfer "
1106483181f7SDariusz Sosnowski 				     "proxy port is not available.", dev->data->port_id);
1107483181f7SDariusz Sosnowski 		else
1108483181f7SDariusz Sosnowski 			DRV_LOG(ERR, "Failed to pick transfer proxy for port %u (ret = %d)",
1109483181f7SDariusz Sosnowski 				dev->data->port_id, ret);
1110483181f7SDariusz Sosnowski 		return ret;
1111483181f7SDariusz Sosnowski 	}
1112483181f7SDariusz Sosnowski 	proxy_dev = &rte_eth_devices[proxy_port_id];
1113483181f7SDariusz Sosnowski 	proxy_priv = proxy_dev->data->dev_private;
1114483181f7SDariusz Sosnowski 	if (proxy_priv->dr_ctx == NULL) {
1115483181f7SDariusz Sosnowski 		DRV_LOG(DEBUG, "Starting representor port %u is allowed, but default traffic flows"
1116483181f7SDariusz Sosnowski 			       " will not be created. Transfer proxy port must be configured"
1117483181f7SDariusz Sosnowski 			       " for HWS and started.",
1118483181f7SDariusz Sosnowski 			       dev->data->port_id);
1119483181f7SDariusz Sosnowski 		return 0;
1120483181f7SDariusz Sosnowski 	}
1121483181f7SDariusz Sosnowski 	if (!proxy_dev->data->dev_started) {
1122483181f7SDariusz Sosnowski 		DRV_LOG(ERR, "Failed to start port %u: transfer proxy (port %u) must be started",
1123483181f7SDariusz Sosnowski 			     dev->data->port_id, proxy_port_id);
1124483181f7SDariusz Sosnowski 		rte_errno = EAGAIN;
1125483181f7SDariusz Sosnowski 		return -rte_errno;
1126483181f7SDariusz Sosnowski 	}
1127483181f7SDariusz Sosnowski 	if (priv->sh->config.repr_matching && !priv->dr_ctx) {
1128483181f7SDariusz Sosnowski 		DRV_LOG(ERR, "Failed to start port %u: with representor matching enabled, port "
1129483181f7SDariusz Sosnowski 			     "must be configured for HWS", dev->data->port_id);
1130483181f7SDariusz Sosnowski 		rte_errno = EINVAL;
1131483181f7SDariusz Sosnowski 		return -rte_errno;
1132483181f7SDariusz Sosnowski 	}
1133483181f7SDariusz Sosnowski 	return 0;
1134483181f7SDariusz Sosnowski }
1135483181f7SDariusz Sosnowski 
1136483181f7SDariusz Sosnowski #endif
1137483181f7SDariusz Sosnowski 
11386a338ad4SOri Kam /**
1139e60fbd5bSAdrien Mazarguil  * DPDK callback to start the device.
1140e60fbd5bSAdrien Mazarguil  *
1141e60fbd5bSAdrien Mazarguil  * Simulate device start by attaching all configured flows.
1142e60fbd5bSAdrien Mazarguil  *
1143e60fbd5bSAdrien Mazarguil  * @param dev
1144e60fbd5bSAdrien Mazarguil  *   Pointer to Ethernet device structure.
1145e60fbd5bSAdrien Mazarguil  *
1146e60fbd5bSAdrien Mazarguil  * @return
1147a6d83b6aSNélio Laranjeiro  *   0 on success, a negative errno value otherwise and rte_errno is set.
11488e82ebe2SDariusz Sosnowski  *   The following error values are defined:
11498e82ebe2SDariusz Sosnowski  *
11508e82ebe2SDariusz Sosnowski  *   - -EAGAIN: If port representor cannot be started,
11518e82ebe2SDariusz Sosnowski  *     because transfer proxy port is not started.
1152e60fbd5bSAdrien Mazarguil  */
1153e60fbd5bSAdrien Mazarguil int
1154e60fbd5bSAdrien Mazarguil mlx5_dev_start(struct rte_eth_dev *dev)
1155e60fbd5bSAdrien Mazarguil {
115633860cfaSSuanming Mou 	struct mlx5_priv *priv = dev->data->dev_private;
1157a6d83b6aSNélio Laranjeiro 	int ret;
1158efa79e68SOri Kam 	int fine_inline;
1159e60fbd5bSAdrien Mazarguil 
116024f653a7SYongseok Koh 	DRV_LOG(DEBUG, "port %u starting device", dev->data->port_id);
1161483181f7SDariusz Sosnowski #ifdef HAVE_MLX5_HWS_SUPPORT
1162483181f7SDariusz Sosnowski 	if (priv->sh->config.dv_flow_en == 2) {
1163*d46f3b52SGregory Etelson 		struct rte_flow_error error = { 0, };
1164*d46f3b52SGregory Etelson 
1165e38776c3SMaayan Kashani 		/*If previous configuration does not exist. */
1166e38776c3SMaayan Kashani 		if (!(priv->dr_ctx)) {
1167*d46f3b52SGregory Etelson 			ret = flow_hw_init(dev, &error);
1168*d46f3b52SGregory Etelson 			if (ret) {
1169*d46f3b52SGregory Etelson 				DRV_LOG(ERR, "Failed to start port %u %s: %s",
1170*d46f3b52SGregory Etelson 					dev->data->port_id, dev->data->name,
1171*d46f3b52SGregory Etelson 					error.message);
1172e38776c3SMaayan Kashani 				return ret;
1173e38776c3SMaayan Kashani 			}
1174*d46f3b52SGregory Etelson 		}
1175483181f7SDariusz Sosnowski 		/* If there is no E-Switch, then there are no start/stop order limitations. */
1176483181f7SDariusz Sosnowski 		if (!priv->sh->config.dv_esw_en)
1177483181f7SDariusz Sosnowski 			goto continue_dev_start;
1178483181f7SDariusz Sosnowski 		/* If master is being started, then it is always allowed. */
1179483181f7SDariusz Sosnowski 		if (priv->master)
1180483181f7SDariusz Sosnowski 			goto continue_dev_start;
1181483181f7SDariusz Sosnowski 		if (mlx5_hw_representor_port_allowed_start(dev))
1182483181f7SDariusz Sosnowski 			return -rte_errno;
1183483181f7SDariusz Sosnowski 	}
1184483181f7SDariusz Sosnowski continue_dev_start:
1185483181f7SDariusz Sosnowski #endif
1186efa79e68SOri Kam 	fine_inline = rte_mbuf_dynflag_lookup
1187efa79e68SOri Kam 		(RTE_PMD_MLX5_FINE_GRANULARITY_INLINE, NULL);
1188042540e4SThomas Monjalon 	if (fine_inline >= 0)
1189efa79e68SOri Kam 		rte_net_mlx5_dynf_inline_mask = 1UL << fine_inline;
1190efa79e68SOri Kam 	else
1191efa79e68SOri Kam 		rte_net_mlx5_dynf_inline_mask = 0;
1192606d6905SShiri Kuzin 	if (dev->data->nb_rx_queues > 0) {
1193b9f1f4c2SGregory Etelson 		uint32_t max_lro_msg_size = priv->max_lro_msg_size;
1194b9f1f4c2SGregory Etelson 
1195b9f1f4c2SGregory Etelson 		if (max_lro_msg_size < MLX5_LRO_SEG_CHUNK_SIZE) {
1196b9f1f4c2SGregory Etelson 			uint32_t i;
1197b9f1f4c2SGregory Etelson 			struct mlx5_rxq_priv *rxq;
1198b9f1f4c2SGregory Etelson 
1199b9f1f4c2SGregory Etelson 			for (i = 0; i != priv->rxqs_n; ++i) {
1200b9f1f4c2SGregory Etelson 				rxq = mlx5_rxq_get(dev, i);
1201b9f1f4c2SGregory Etelson 				if (rxq && rxq->ctrl && rxq->ctrl->rxq.lro) {
1202b9f1f4c2SGregory Etelson 					DRV_LOG(ERR, "port %u invalid max LRO size",
1203b9f1f4c2SGregory Etelson 						dev->data->port_id);
1204b9f1f4c2SGregory Etelson 					rte_errno = EINVAL;
1205b9f1f4c2SGregory Etelson 					return -rte_errno;
1206b9f1f4c2SGregory Etelson 				}
1207b9f1f4c2SGregory Etelson 			}
1208b9f1f4c2SGregory Etelson 		}
120963bd1629SOri Kam 		ret = mlx5_dev_configure_rss_reta(dev);
121063bd1629SOri Kam 		if (ret) {
121163bd1629SOri Kam 			DRV_LOG(ERR, "port %u reta config failed: %s",
121263bd1629SOri Kam 				dev->data->port_id, strerror(rte_errno));
121363bd1629SOri Kam 			return -rte_errno;
121463bd1629SOri Kam 		}
1215606d6905SShiri Kuzin 	}
1216d133f4cdSViacheslav Ovsiienko 	ret = mlx5_txpp_start(dev);
1217d133f4cdSViacheslav Ovsiienko 	if (ret) {
1218d133f4cdSViacheslav Ovsiienko 		DRV_LOG(ERR, "port %u Tx packet pacing init failed: %s",
1219d133f4cdSViacheslav Ovsiienko 			dev->data->port_id, strerror(rte_errno));
1220d133f4cdSViacheslav Ovsiienko 		goto error;
1221d133f4cdSViacheslav Ovsiienko 	}
1222c4b86201SMichael Baum 	if (mlx5_devx_obj_ops_en(priv->sh) &&
122387af0d1eSMichael Baum 	    priv->obj_ops.lb_dummy_queue_create) {
122423233fd6SBing Zhao 		ret = priv->obj_ops.lb_dummy_queue_create(dev);
122523233fd6SBing Zhao 		if (ret)
122623233fd6SBing Zhao 			goto error;
122723233fd6SBing Zhao 	}
1228a6d83b6aSNélio Laranjeiro 	ret = mlx5_txq_start(dev);
1229a6d83b6aSNélio Laranjeiro 	if (ret) {
1230a170a30dSNélio Laranjeiro 		DRV_LOG(ERR, "port %u Tx queue allocation failed: %s",
12310f99970bSNélio Laranjeiro 			dev->data->port_id, strerror(rte_errno));
1232d133f4cdSViacheslav Ovsiienko 		goto error;
12336e78005aSNélio Laranjeiro 	}
1234e8482187SBing Zhao 	if (priv->config.std_delay_drop || priv->config.hp_delay_drop) {
123587af0d1eSMichael Baum 		if (!priv->sh->dev_cap.vf && !priv->sh->dev_cap.sf &&
1236e8482187SBing Zhao 		    !priv->representor) {
1237e8482187SBing Zhao 			ret = mlx5_get_flag_dropless_rq(dev);
1238e8482187SBing Zhao 			if (ret < 0)
1239e8482187SBing Zhao 				DRV_LOG(WARNING,
1240e8482187SBing Zhao 					"port %u cannot query dropless flag",
1241e8482187SBing Zhao 					dev->data->port_id);
1242e8482187SBing Zhao 			else if (!ret)
1243e8482187SBing Zhao 				DRV_LOG(WARNING,
1244e8482187SBing Zhao 					"port %u dropless_rq OFF, no rearming",
1245e8482187SBing Zhao 					dev->data->port_id);
1246e8482187SBing Zhao 		} else {
1247e8482187SBing Zhao 			DRV_LOG(DEBUG,
1248e8482187SBing Zhao 				"port %u doesn't support dropless_rq flag",
1249e8482187SBing Zhao 				dev->data->port_id);
1250e8482187SBing Zhao 		}
1251e8482187SBing Zhao 	}
1252a6d83b6aSNélio Laranjeiro 	ret = mlx5_rxq_start(dev);
1253a6d83b6aSNélio Laranjeiro 	if (ret) {
1254a170a30dSNélio Laranjeiro 		DRV_LOG(ERR, "port %u Rx queue allocation failed: %s",
12550f99970bSNélio Laranjeiro 			dev->data->port_id, strerror(rte_errno));
1256d133f4cdSViacheslav Ovsiienko 		goto error;
1257a1366b1aSNélio Laranjeiro 	}
1258aa8bea0eSBing Zhao 	/*
1259aa8bea0eSBing Zhao 	 * Such step will be skipped if there is no hairpin TX queue configured
1260aa8bea0eSBing Zhao 	 * with RX peer queue from the same device.
1261aa8bea0eSBing Zhao 	 */
126237cd4501SBing Zhao 	ret = mlx5_hairpin_auto_bind(dev);
12636a338ad4SOri Kam 	if (ret) {
1264aa8bea0eSBing Zhao 		DRV_LOG(ERR, "port %u hairpin auto binding failed: %s",
12656a338ad4SOri Kam 			dev->data->port_id, strerror(rte_errno));
1266d133f4cdSViacheslav Ovsiienko 		goto error;
12676a338ad4SOri Kam 	}
1268e7bfa359SBing Zhao 	/* Set started flag here for the following steps like control flow. */
126924f653a7SYongseok Koh 	dev->data->dev_started = 1;
1270a6d83b6aSNélio Laranjeiro 	ret = mlx5_rx_intr_vec_enable(dev);
1271a6d83b6aSNélio Laranjeiro 	if (ret) {
1272a170a30dSNélio Laranjeiro 		DRV_LOG(ERR, "port %u Rx interrupt vector creation failed",
12730f99970bSNélio Laranjeiro 			dev->data->port_id);
1274e1016cb7SAdrien Mazarguil 		goto error;
12753c7d44afSShahaf Shuler 	}
127673bf9235SOphir Munk 	mlx5_os_stats_init(dev);
12775c078fceSDmitry Kozlyuk 	/*
12785c078fceSDmitry Kozlyuk 	 * Attach indirection table objects detached on port stop.
12795c078fceSDmitry Kozlyuk 	 * They may be needed to create RSS in non-isolated mode.
12805c078fceSDmitry Kozlyuk 	 */
12815c078fceSDmitry Kozlyuk 	ret = mlx5_action_handle_attach(dev);
12825c078fceSDmitry Kozlyuk 	if (ret) {
12835c078fceSDmitry Kozlyuk 		DRV_LOG(ERR,
12845c078fceSDmitry Kozlyuk 			"port %u failed to attach indirect actions: %s",
12855c078fceSDmitry Kozlyuk 			dev->data->port_id, rte_strerror(rte_errno));
12865c078fceSDmitry Kozlyuk 		goto error;
12875c078fceSDmitry Kozlyuk 	}
1288f1fecffaSDariusz Sosnowski #ifdef HAVE_MLX5_HWS_SUPPORT
1289f1fecffaSDariusz Sosnowski 	if (priv->sh->config.dv_flow_en == 2) {
1290f1fecffaSDariusz Sosnowski 		ret = flow_hw_table_update(dev, NULL);
1291f1fecffaSDariusz Sosnowski 		if (ret) {
1292f1fecffaSDariusz Sosnowski 			DRV_LOG(ERR, "port %u failed to update HWS tables",
1293f1fecffaSDariusz Sosnowski 				dev->data->port_id);
1294f1fecffaSDariusz Sosnowski 			goto error;
1295f1fecffaSDariusz Sosnowski 		}
1296f1fecffaSDariusz Sosnowski 	}
1297f1fecffaSDariusz Sosnowski #endif
12987ba5320bSNélio Laranjeiro 	ret = mlx5_traffic_enable(dev);
1299a6d83b6aSNélio Laranjeiro 	if (ret) {
13008db7e3b6SBing Zhao 		DRV_LOG(ERR, "port %u failed to set defaults flows",
1301e313ef4cSShahaf Shuler 			dev->data->port_id);
1302e313ef4cSShahaf Shuler 		goto error;
1303e313ef4cSShahaf Shuler 	}
1304fca8cba4SDavid Marchand 	/* Set dynamic fields and flags into Rx queues. */
1305fca8cba4SDavid Marchand 	mlx5_flow_rxq_dynf_set(dev);
1306a2854c4dSViacheslav Ovsiienko 	/* Set flags and context to convert Rx timestamps. */
1307a2854c4dSViacheslav Ovsiienko 	mlx5_rxq_timestamp_set(dev);
1308a2854c4dSViacheslav Ovsiienko 	/* Set a mask and offset of scheduling on timestamp into Tx queues. */
13093172c471SViacheslav Ovsiienko 	mlx5_txq_dynf_timestamp_set(dev);
13108db7e3b6SBing Zhao 	/*
13118db7e3b6SBing Zhao 	 * In non-cached mode, it only needs to start the default mreg copy
13128db7e3b6SBing Zhao 	 * action and no flow created by application exists anymore.
13138db7e3b6SBing Zhao 	 * But it is worth wrapping the interface for further usage.
13148db7e3b6SBing Zhao 	 */
13158db7e3b6SBing Zhao 	ret = mlx5_flow_start_default(dev);
13167ba5320bSNélio Laranjeiro 	if (ret) {
13178db7e3b6SBing Zhao 		DRV_LOG(DEBUG, "port %u failed to start default actions: %s",
13188db7e3b6SBing Zhao 			dev->data->port_id, strerror(rte_errno));
13197ba5320bSNélio Laranjeiro 		goto error;
13207ba5320bSNélio Laranjeiro 	}
1321fec28ca0SDmitry Kozlyuk 	if (mlx5_dev_ctx_shared_mempool_subscribe(dev) != 0) {
1322fec28ca0SDmitry Kozlyuk 		DRV_LOG(ERR, "port %u failed to subscribe for mempool life cycle: %s",
1323fec28ca0SDmitry Kozlyuk 			dev->data->port_id, rte_strerror(rte_errno));
1324fec28ca0SDmitry Kozlyuk 		goto error;
1325fec28ca0SDmitry Kozlyuk 	}
13262aac5b5dSYongseok Koh 	rte_wmb();
13277ba5320bSNélio Laranjeiro 	dev->tx_pkt_burst = mlx5_select_tx_function(dev);
13287ba5320bSNélio Laranjeiro 	dev->rx_pkt_burst = mlx5_select_rx_function(dev);
13292aac5b5dSYongseok Koh 	/* Enable datapath on secondary process. */
13302e86c4e5SOphir Munk 	mlx5_mp_os_req_start_rxtx(dev);
1331d61138d4SHarman Kalra 	if (rte_intr_fd_get(priv->sh->intr_handle) >= 0) {
133291389890SOphir Munk 		priv->sh->port[priv->dev_port - 1].ih_port_id =
133333860cfaSSuanming Mou 					(uint32_t)dev->data->port_id;
133433860cfaSSuanming Mou 	} else {
133517f95513SDmitry Kozlyuk 		DRV_LOG(INFO, "port %u starts without RMV interrupts.",
133617f95513SDmitry Kozlyuk 			dev->data->port_id);
133717f95513SDmitry Kozlyuk 		dev->data->dev_conf.intr_conf.rmv = 0;
133817f95513SDmitry Kozlyuk 	}
133917f95513SDmitry Kozlyuk 	if (rte_intr_fd_get(priv->sh->intr_handle_nl) >= 0) {
134017f95513SDmitry Kozlyuk 		priv->sh->port[priv->dev_port - 1].nl_ih_port_id =
134117f95513SDmitry Kozlyuk 					(uint32_t)dev->data->port_id;
134217f95513SDmitry Kozlyuk 	} else {
134317f95513SDmitry Kozlyuk 		DRV_LOG(INFO, "port %u starts without LSC interrupts.",
134433860cfaSSuanming Mou 			dev->data->port_id);
134533860cfaSSuanming Mou 		dev->data->dev_conf.intr_conf.lsc = 0;
134633860cfaSSuanming Mou 	}
1347d61138d4SHarman Kalra 	if (rte_intr_fd_get(priv->sh->intr_handle_devx) >= 0)
134891389890SOphir Munk 		priv->sh->port[priv->dev_port - 1].devx_ih_port_id =
134933860cfaSSuanming Mou 					(uint32_t)dev->data->port_id;
1350c8d4ee50SNélio Laranjeiro 	return 0;
1351c8d4ee50SNélio Laranjeiro error:
1352a6d83b6aSNélio Laranjeiro 	ret = rte_errno; /* Save rte_errno before cleanup. */
1353e60fbd5bSAdrien Mazarguil 	/* Rollback. */
1354272733b5SNélio Laranjeiro 	dev->data->dev_started = 0;
13558db7e3b6SBing Zhao 	mlx5_flow_stop_default(dev);
1356af4f09f2SNélio Laranjeiro 	mlx5_traffic_disable(dev);
1357af4f09f2SNélio Laranjeiro 	mlx5_txq_stop(dev);
1358af4f09f2SNélio Laranjeiro 	mlx5_rxq_stop(dev);
135923233fd6SBing Zhao 	if (priv->obj_ops.lb_dummy_queue_release)
136023233fd6SBing Zhao 		priv->obj_ops.lb_dummy_queue_release(dev);
1361d133f4cdSViacheslav Ovsiienko 	mlx5_txpp_stop(dev); /* Stop last. */
1362a6d83b6aSNélio Laranjeiro 	rte_errno = ret; /* Restore rte_errno. */
1363a6d83b6aSNélio Laranjeiro 	return -rte_errno;
1364e60fbd5bSAdrien Mazarguil }
1365e60fbd5bSAdrien Mazarguil 
1366483181f7SDariusz Sosnowski #ifdef HAVE_MLX5_HWS_SUPPORT
1367483181f7SDariusz Sosnowski /**
1368483181f7SDariusz Sosnowski  * Check if stopping transfer proxy port is allowed.
1369483181f7SDariusz Sosnowski  *
1370483181f7SDariusz Sosnowski  * If transfer proxy port is configured for HWS, then it is allowed to stop it
1371483181f7SDariusz Sosnowski  * if and only if all other representor ports are stopped.
1372483181f7SDariusz Sosnowski  *
1373483181f7SDariusz Sosnowski  * @param dev
1374483181f7SDariusz Sosnowski  *   Pointer to Ethernet device structure.
1375483181f7SDariusz Sosnowski  *
1376483181f7SDariusz Sosnowski  * @return
1377483181f7SDariusz Sosnowski  *   If stopping transfer proxy port is allowed, then 0 is returned.
1378483181f7SDariusz Sosnowski  *   Otherwise rte_errno is set, and negative errno value is returned.
1379483181f7SDariusz Sosnowski  */
1380483181f7SDariusz Sosnowski static int
1381483181f7SDariusz Sosnowski mlx5_hw_proxy_port_allowed_stop(struct rte_eth_dev *dev)
1382483181f7SDariusz Sosnowski {
1383483181f7SDariusz Sosnowski 	struct mlx5_priv *priv = dev->data->dev_private;
1384483181f7SDariusz Sosnowski 	bool representor_started = false;
1385483181f7SDariusz Sosnowski 	uint16_t port_id;
1386483181f7SDariusz Sosnowski 
1387483181f7SDariusz Sosnowski 	MLX5_ASSERT(priv->sh->config.dv_flow_en == 2);
1388483181f7SDariusz Sosnowski 	MLX5_ASSERT(priv->sh->config.dv_esw_en);
1389483181f7SDariusz Sosnowski 	MLX5_ASSERT(priv->master);
1390483181f7SDariusz Sosnowski 	/* If transfer proxy port was not configured for HWS, then stopping it is allowed. */
1391483181f7SDariusz Sosnowski 	if (!priv->dr_ctx)
1392483181f7SDariusz Sosnowski 		return 0;
1393483181f7SDariusz Sosnowski 	MLX5_ETH_FOREACH_DEV(port_id, dev->device) {
1394483181f7SDariusz Sosnowski 		const struct rte_eth_dev *port_dev = &rte_eth_devices[port_id];
1395483181f7SDariusz Sosnowski 		const struct mlx5_priv *port_priv = port_dev->data->dev_private;
1396483181f7SDariusz Sosnowski 
1397483181f7SDariusz Sosnowski 		if (port_id != dev->data->port_id &&
1398483181f7SDariusz Sosnowski 		    port_priv->domain_id == priv->domain_id &&
1399483181f7SDariusz Sosnowski 		    port_dev->data->dev_started)
1400483181f7SDariusz Sosnowski 			representor_started = true;
1401483181f7SDariusz Sosnowski 	}
1402483181f7SDariusz Sosnowski 	if (representor_started) {
1403f359b715SDariusz Sosnowski 		DRV_LOG(ERR, "Failed to stop port %u: attached representor ports"
1404483181f7SDariusz Sosnowski 			     " must be stopped before stopping transfer proxy port",
1405483181f7SDariusz Sosnowski 			     dev->data->port_id);
1406483181f7SDariusz Sosnowski 		rte_errno = EBUSY;
1407483181f7SDariusz Sosnowski 		return -rte_errno;
1408483181f7SDariusz Sosnowski 	}
1409483181f7SDariusz Sosnowski 	return 0;
1410483181f7SDariusz Sosnowski }
1411483181f7SDariusz Sosnowski #endif
1412483181f7SDariusz Sosnowski 
1413e60fbd5bSAdrien Mazarguil /**
1414e60fbd5bSAdrien Mazarguil  * DPDK callback to stop the device.
1415e60fbd5bSAdrien Mazarguil  *
1416e60fbd5bSAdrien Mazarguil  * Simulate device stop by detaching all configured flows.
1417e60fbd5bSAdrien Mazarguil  *
1418e60fbd5bSAdrien Mazarguil  * @param dev
1419e60fbd5bSAdrien Mazarguil  *   Pointer to Ethernet device structure.
14208e82ebe2SDariusz Sosnowski  *
14218e82ebe2SDariusz Sosnowski  * @return
14228e82ebe2SDariusz Sosnowski  *   0 on success, a negative errno value otherwise and rte_errno is set.
14238e82ebe2SDariusz Sosnowski  *   The following error values are defined:
14248e82ebe2SDariusz Sosnowski  *
14258e82ebe2SDariusz Sosnowski  *   - -EBUSY: If transfer proxy port cannot be stopped,
14268e82ebe2SDariusz Sosnowski  *     because other port representors are still running.
1427e60fbd5bSAdrien Mazarguil  */
142862024eb8SIvan Ilchenko int
1429e60fbd5bSAdrien Mazarguil mlx5_dev_stop(struct rte_eth_dev *dev)
1430e60fbd5bSAdrien Mazarguil {
1431dbeba4cfSThomas Monjalon 	struct mlx5_priv *priv = dev->data->dev_private;
1432e60fbd5bSAdrien Mazarguil 
1433483181f7SDariusz Sosnowski #ifdef HAVE_MLX5_HWS_SUPPORT
1434483181f7SDariusz Sosnowski 	if (priv->sh->config.dv_flow_en == 2) {
1435483181f7SDariusz Sosnowski 		/* If there is no E-Switch, then there are no start/stop order limitations. */
1436483181f7SDariusz Sosnowski 		if (!priv->sh->config.dv_esw_en)
1437483181f7SDariusz Sosnowski 			goto continue_dev_stop;
1438483181f7SDariusz Sosnowski 		/* If representor is being stopped, then it is always allowed. */
1439483181f7SDariusz Sosnowski 		if (priv->representor)
1440483181f7SDariusz Sosnowski 			goto continue_dev_stop;
1441483181f7SDariusz Sosnowski 		if (mlx5_hw_proxy_port_allowed_stop(dev)) {
1442483181f7SDariusz Sosnowski 			dev->data->dev_started = 1;
1443483181f7SDariusz Sosnowski 			return -rte_errno;
1444483181f7SDariusz Sosnowski 		}
1445483181f7SDariusz Sosnowski 	}
1446483181f7SDariusz Sosnowski continue_dev_stop:
1447483181f7SDariusz Sosnowski #endif
14483f2fe392SNélio Laranjeiro 	dev->data->dev_started = 0;
14493f2fe392SNélio Laranjeiro 	/* Prevent crashes when queues are still in use. */
1450a41f593fSFerruh Yigit 	dev->rx_pkt_burst = rte_eth_pkt_burst_dummy;
1451a41f593fSFerruh Yigit 	dev->tx_pkt_burst = rte_eth_pkt_burst_dummy;
14523f2fe392SNélio Laranjeiro 	rte_wmb();
14532aac5b5dSYongseok Koh 	/* Disable datapath on secondary process. */
14542e86c4e5SOphir Munk 	mlx5_mp_os_req_stop_rxtx(dev);
145520698c9fSOphir Munk 	rte_delay_us_sleep(1000 * priv->rxqs_n);
145624f653a7SYongseok Koh 	DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id);
14578db7e3b6SBing Zhao 	mlx5_flow_stop_default(dev);
14588db7e3b6SBing Zhao 	/* Control flows for default traffic can be removed firstly. */
1459af4f09f2SNélio Laranjeiro 	mlx5_traffic_disable(dev);
14608db7e3b6SBing Zhao 	/* All RX queue flags will be cleared in the flush interface. */
1461b4edeaf3SSuanming Mou 	mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_GEN, true);
1462ec962badSLi Zhang 	mlx5_flow_meter_rxq_flush(dev);
1463ec4e11d4SDmitry Kozlyuk 	mlx5_action_handle_detach(dev);
14649fa7c1cdSDariusz Sosnowski #ifdef HAVE_MLX5_HWS_SUPPORT
14659fa7c1cdSDariusz Sosnowski 	mlx5_flow_hw_cleanup_ctrl_rx_templates(dev);
14669fa7c1cdSDariusz Sosnowski #endif
1467af4f09f2SNélio Laranjeiro 	mlx5_rx_intr_vec_disable(dev);
146891389890SOphir Munk 	priv->sh->port[priv->dev_port - 1].ih_port_id = RTE_MAX_ETHPORTS;
146991389890SOphir Munk 	priv->sh->port[priv->dev_port - 1].devx_ih_port_id = RTE_MAX_ETHPORTS;
147017f95513SDmitry Kozlyuk 	priv->sh->port[priv->dev_port - 1].nl_ih_port_id = RTE_MAX_ETHPORTS;
1471af4f09f2SNélio Laranjeiro 	mlx5_txq_stop(dev);
1472af4f09f2SNélio Laranjeiro 	mlx5_rxq_stop(dev);
147323233fd6SBing Zhao 	if (priv->obj_ops.lb_dummy_queue_release)
147423233fd6SBing Zhao 		priv->obj_ops.lb_dummy_queue_release(dev);
1475d133f4cdSViacheslav Ovsiienko 	mlx5_txpp_stop(dev);
147662024eb8SIvan Ilchenko 
147762024eb8SIvan Ilchenko 	return 0;
1478e60fbd5bSAdrien Mazarguil }
1479272733b5SNélio Laranjeiro 
14801939eb6fSDariusz Sosnowski #ifdef HAVE_MLX5_HWS_SUPPORT
14811939eb6fSDariusz Sosnowski 
14821939eb6fSDariusz Sosnowski static int
14831939eb6fSDariusz Sosnowski mlx5_traffic_enable_hws(struct rte_eth_dev *dev)
14841939eb6fSDariusz Sosnowski {
14851939eb6fSDariusz Sosnowski 	struct mlx5_priv *priv = dev->data->dev_private;
1486483181f7SDariusz Sosnowski 	struct mlx5_sh_config *config = &priv->sh->config;
14879fa7c1cdSDariusz Sosnowski 	uint64_t flags = 0;
14881939eb6fSDariusz Sosnowski 	unsigned int i;
14891939eb6fSDariusz Sosnowski 	int ret;
14901939eb6fSDariusz Sosnowski 
1491483181f7SDariusz Sosnowski 	/*
1492483181f7SDariusz Sosnowski 	 * With extended metadata enabled, the Tx metadata copy is handled by default
1493483181f7SDariusz Sosnowski 	 * Tx tagging flow rules, so default Tx flow rule is not needed. It is only
1494483181f7SDariusz Sosnowski 	 * required when representor matching is disabled.
1495483181f7SDariusz Sosnowski 	 */
1496483181f7SDariusz Sosnowski 	if (config->dv_esw_en &&
1497483181f7SDariusz Sosnowski 	    !config->repr_matching &&
1498483181f7SDariusz Sosnowski 	    config->dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS &&
1499483181f7SDariusz Sosnowski 	    priv->master) {
1500ddb68e47SBing Zhao 		if (mlx5_flow_hw_create_tx_default_mreg_copy_flow(dev))
1501ddb68e47SBing Zhao 			goto error;
15021939eb6fSDariusz Sosnowski 	}
15031939eb6fSDariusz Sosnowski 	for (i = 0; i < priv->txqs_n; ++i) {
15041939eb6fSDariusz Sosnowski 		struct mlx5_txq_ctrl *txq = mlx5_txq_get(dev, i);
15051939eb6fSDariusz Sosnowski 		uint32_t queue;
15061939eb6fSDariusz Sosnowski 
15071939eb6fSDariusz Sosnowski 		if (!txq)
15081939eb6fSDariusz Sosnowski 			continue;
150926e1eaf2SDariusz Sosnowski 		queue = mlx5_txq_get_sqn(txq);
1510cf9a91c6SDariusz Sosnowski 		if ((priv->representor || priv->master) &&
1511cf9a91c6SDariusz Sosnowski 		    config->dv_esw_en &&
1512cf9a91c6SDariusz Sosnowski 		    config->fdb_def_rule) {
1513f37c184aSSuanming Mou 			if (mlx5_flow_hw_esw_create_sq_miss_flow(dev, queue, false)) {
15141939eb6fSDariusz Sosnowski 				mlx5_txq_release(dev, i);
15151939eb6fSDariusz Sosnowski 				goto error;
15161939eb6fSDariusz Sosnowski 			}
15171939eb6fSDariusz Sosnowski 		}
1518483181f7SDariusz Sosnowski 		if (config->dv_esw_en && config->repr_matching) {
1519f37c184aSSuanming Mou 			if (mlx5_flow_hw_tx_repr_matching_flow(dev, queue, false)) {
1520483181f7SDariusz Sosnowski 				mlx5_txq_release(dev, i);
1521483181f7SDariusz Sosnowski 				goto error;
1522483181f7SDariusz Sosnowski 			}
1523483181f7SDariusz Sosnowski 		}
15241939eb6fSDariusz Sosnowski 		mlx5_txq_release(dev, i);
15251939eb6fSDariusz Sosnowski 	}
1526483181f7SDariusz Sosnowski 	if (config->fdb_def_rule) {
1527483181f7SDariusz Sosnowski 		if ((priv->master || priv->representor) && config->dv_esw_en) {
152826e1eaf2SDariusz Sosnowski 			if (!mlx5_flow_hw_esw_create_default_jump_flow(dev))
152926e1eaf2SDariusz Sosnowski 				priv->fdb_def_rule = 1;
153026e1eaf2SDariusz Sosnowski 			else
15311939eb6fSDariusz Sosnowski 				goto error;
15321939eb6fSDariusz Sosnowski 		}
153326e1eaf2SDariusz Sosnowski 	} else {
153426e1eaf2SDariusz Sosnowski 		DRV_LOG(INFO, "port %u FDB default rule is disabled", dev->data->port_id);
153526e1eaf2SDariusz Sosnowski 	}
15369fa7c1cdSDariusz Sosnowski 	if (priv->isolated)
15371939eb6fSDariusz Sosnowski 		return 0;
153887e4384dSBing Zhao 	if (!priv->sh->config.lacp_by_user && priv->pf_bond >= 0 && priv->master)
153949dffadfSBing Zhao 		if (mlx5_flow_hw_lacp_rx_flow(dev))
154049dffadfSBing Zhao 			goto error;
15419fa7c1cdSDariusz Sosnowski 	if (dev->data->promiscuous)
15429fa7c1cdSDariusz Sosnowski 		flags |= MLX5_CTRL_PROMISCUOUS;
15439fa7c1cdSDariusz Sosnowski 	if (dev->data->all_multicast)
15449fa7c1cdSDariusz Sosnowski 		flags |= MLX5_CTRL_ALL_MULTICAST;
15459fa7c1cdSDariusz Sosnowski 	else
15469fa7c1cdSDariusz Sosnowski 		flags |= MLX5_CTRL_BROADCAST | MLX5_CTRL_IPV4_MULTICAST | MLX5_CTRL_IPV6_MULTICAST;
15479fa7c1cdSDariusz Sosnowski 	flags |= MLX5_CTRL_DMAC;
15489fa7c1cdSDariusz Sosnowski 	if (priv->vlan_filter_n)
15499fa7c1cdSDariusz Sosnowski 		flags |= MLX5_CTRL_VLAN_FILTER;
15509fa7c1cdSDariusz Sosnowski 	return mlx5_flow_hw_ctrl_flows(dev, flags);
15511939eb6fSDariusz Sosnowski error:
15521939eb6fSDariusz Sosnowski 	ret = rte_errno;
15531939eb6fSDariusz Sosnowski 	mlx5_flow_hw_flush_ctrl_flows(dev);
15541939eb6fSDariusz Sosnowski 	rte_errno = ret;
15551939eb6fSDariusz Sosnowski 	return -rte_errno;
15561939eb6fSDariusz Sosnowski }
15571939eb6fSDariusz Sosnowski 
15581939eb6fSDariusz Sosnowski #endif
15591939eb6fSDariusz Sosnowski 
1560272733b5SNélio Laranjeiro /**
1561272733b5SNélio Laranjeiro  * Enable traffic flows configured by control plane
1562272733b5SNélio Laranjeiro  *
1563af4f09f2SNélio Laranjeiro  * @param dev
1564272733b5SNélio Laranjeiro  *   Pointer to Ethernet device structure.
1565272733b5SNélio Laranjeiro  *
1566272733b5SNélio Laranjeiro  * @return
1567a6d83b6aSNélio Laranjeiro  *   0 on success, a negative errno value otherwise and rte_errno is set.
1568272733b5SNélio Laranjeiro  */
1569272733b5SNélio Laranjeiro int
1570af4f09f2SNélio Laranjeiro mlx5_traffic_enable(struct rte_eth_dev *dev)
1571272733b5SNélio Laranjeiro {
1572dbeba4cfSThomas Monjalon 	struct mlx5_priv *priv = dev->data->dev_private;
1573272733b5SNélio Laranjeiro 	struct rte_flow_item_eth bcast = {
1574e0d947a1SFerruh Yigit 		.hdr.dst_addr.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
1575272733b5SNélio Laranjeiro 	};
1576272733b5SNélio Laranjeiro 	struct rte_flow_item_eth ipv6_multi_spec = {
1577e0d947a1SFerruh Yigit 		.hdr.dst_addr.addr_bytes = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 },
1578272733b5SNélio Laranjeiro 	};
1579272733b5SNélio Laranjeiro 	struct rte_flow_item_eth ipv6_multi_mask = {
1580e0d947a1SFerruh Yigit 		.hdr.dst_addr.addr_bytes = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 },
1581272733b5SNélio Laranjeiro 	};
1582272733b5SNélio Laranjeiro 	struct rte_flow_item_eth unicast = {
1583e0d947a1SFerruh Yigit 		.hdr.src_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
1584272733b5SNélio Laranjeiro 	};
1585272733b5SNélio Laranjeiro 	struct rte_flow_item_eth unicast_mask = {
1586e0d947a1SFerruh Yigit 		.hdr.dst_addr.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
1587272733b5SNélio Laranjeiro 	};
1588272733b5SNélio Laranjeiro 	const unsigned int vlan_filter_n = priv->vlan_filter_n;
15896d13ea8eSOlivier Matz 	const struct rte_ether_addr cmp = {
1590e0d947a1SFerruh Yigit 		.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
1591272733b5SNélio Laranjeiro 	};
1592272733b5SNélio Laranjeiro 	unsigned int i;
1593272733b5SNélio Laranjeiro 	unsigned int j;
1594272733b5SNélio Laranjeiro 	int ret;
1595272733b5SNélio Laranjeiro 
15961939eb6fSDariusz Sosnowski #ifdef HAVE_MLX5_HWS_SUPPORT
15971939eb6fSDariusz Sosnowski 	if (priv->sh->config.dv_flow_en == 2)
15981939eb6fSDariusz Sosnowski 		return mlx5_traffic_enable_hws(dev);
15991939eb6fSDariusz Sosnowski #endif
16003c84f34eSOri Kam 	/*
16013c84f34eSOri Kam 	 * Hairpin txq default flow should be created no matter if it is
16023c84f34eSOri Kam 	 * isolation mode. Or else all the packets to be sent will be sent
16033c84f34eSOri Kam 	 * out directly without the TX flow actions, e.g. encapsulation.
16043c84f34eSOri Kam 	 */
16053c84f34eSOri Kam 	for (i = 0; i != priv->txqs_n; ++i) {
16063c84f34eSOri Kam 		struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i);
16073c84f34eSOri Kam 		if (!txq_ctrl)
16083c84f34eSOri Kam 			continue;
1609aa8bea0eSBing Zhao 		/* Only Tx implicit mode requires the default Tx flow. */
1610c06f77aeSMichael Baum 		if (txq_ctrl->is_hairpin &&
1611aa8bea0eSBing Zhao 		    txq_ctrl->hairpin_conf.tx_explicit == 0 &&
1612aa8bea0eSBing Zhao 		    txq_ctrl->hairpin_conf.peers[0].port ==
1613aa8bea0eSBing Zhao 		    priv->dev_data->port_id) {
161426e1eaf2SDariusz Sosnowski 			ret = mlx5_ctrl_flow_source_queue(dev,
161526e1eaf2SDariusz Sosnowski 					mlx5_txq_get_sqn(txq_ctrl));
16163c84f34eSOri Kam 			if (ret) {
16173c84f34eSOri Kam 				mlx5_txq_release(dev, i);
16183c84f34eSOri Kam 				goto error;
16193c84f34eSOri Kam 			}
16203c84f34eSOri Kam 		}
1621a13ec19cSMichael Baum 		if (priv->sh->config.dv_esw_en) {
162226e1eaf2SDariusz Sosnowski 			uint32_t q = mlx5_txq_get_sqn(txq_ctrl);
162326e1eaf2SDariusz Sosnowski 
162426e1eaf2SDariusz Sosnowski 			if (mlx5_flow_create_devx_sq_miss_flow(dev, q) == 0) {
162526e1eaf2SDariusz Sosnowski 				mlx5_txq_release(dev, i);
1626686d05b6SXueming Li 				DRV_LOG(ERR,
1627686d05b6SXueming Li 					"Port %u Tx queue %u SQ create representor devx default miss rule failed.",
1628686d05b6SXueming Li 					dev->data->port_id, i);
1629686d05b6SXueming Li 				goto error;
1630686d05b6SXueming Li 			}
1631686d05b6SXueming Li 		}
16323c84f34eSOri Kam 		mlx5_txq_release(dev, i);
16333c84f34eSOri Kam 	}
16341939eb6fSDariusz Sosnowski 	if (priv->sh->config.fdb_def_rule) {
1635a13ec19cSMichael Baum 		if (priv->sh->config.dv_esw_en) {
1636fbde4331SMatan Azrad 			if (mlx5_flow_create_esw_table_zero_flow(dev))
1637fbde4331SMatan Azrad 				priv->fdb_def_rule = 1;
1638fbde4331SMatan Azrad 			else
16391939eb6fSDariusz Sosnowski 				DRV_LOG(INFO, "port %u FDB default rule cannot be configured - only Eswitch group 0 flows are supported.",
16401939eb6fSDariusz Sosnowski 					dev->data->port_id);
16411939eb6fSDariusz Sosnowski 		}
16421939eb6fSDariusz Sosnowski 	} else {
16431939eb6fSDariusz Sosnowski 		DRV_LOG(INFO, "port %u FDB default rule is disabled",
16441939eb6fSDariusz Sosnowski 			dev->data->port_id);
1645fbde4331SMatan Azrad 	}
164687e4384dSBing Zhao 	if (!priv->sh->config.lacp_by_user && priv->pf_bond >= 0 && priv->master) {
16470f0ae73aSShiri Kuzin 		ret = mlx5_flow_lacp_miss(dev);
16480f0ae73aSShiri Kuzin 		if (ret)
16490f0ae73aSShiri Kuzin 			DRV_LOG(INFO, "port %u LACP rule cannot be created - "
16500f0ae73aSShiri Kuzin 				"forward LACP to kernel.", dev->data->port_id);
16510f0ae73aSShiri Kuzin 		else
165287e4384dSBing Zhao 			DRV_LOG(INFO, "LACP traffic will be missed in port %u.",
165387e4384dSBing Zhao 				dev->data->port_id);
16540f0ae73aSShiri Kuzin 	}
1655f8cb4b57SNélio Laranjeiro 	if (priv->isolated)
1656f8cb4b57SNélio Laranjeiro 		return 0;
1657f8cb4b57SNélio Laranjeiro 	if (dev->data->promiscuous) {
1658f8cb4b57SNélio Laranjeiro 		struct rte_flow_item_eth promisc = {
1659e0d947a1SFerruh Yigit 			.hdr.dst_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
1660e0d947a1SFerruh Yigit 			.hdr.src_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
16618275d5fcSThomas Monjalon 			.hdr.ether_type = 0,
1662f8cb4b57SNélio Laranjeiro 		};
1663f8cb4b57SNélio Laranjeiro 
1664a6d83b6aSNélio Laranjeiro 		ret = mlx5_ctrl_flow(dev, &promisc, &promisc);
1665a6d83b6aSNélio Laranjeiro 		if (ret)
1666a6d83b6aSNélio Laranjeiro 			goto error;
1667f8cb4b57SNélio Laranjeiro 	}
1668f8cb4b57SNélio Laranjeiro 	if (dev->data->all_multicast) {
1669f8cb4b57SNélio Laranjeiro 		struct rte_flow_item_eth multicast = {
1670e0d947a1SFerruh Yigit 			.hdr.dst_addr.addr_bytes = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 },
1671e0d947a1SFerruh Yigit 			.hdr.src_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
16728275d5fcSThomas Monjalon 			.hdr.ether_type = 0,
1673f8cb4b57SNélio Laranjeiro 		};
1674f8cb4b57SNélio Laranjeiro 
1675a6d83b6aSNélio Laranjeiro 		ret = mlx5_ctrl_flow(dev, &multicast, &multicast);
1676a6d83b6aSNélio Laranjeiro 		if (ret)
1677a6d83b6aSNélio Laranjeiro 			goto error;
1678f8cb4b57SNélio Laranjeiro 	} else {
1679f8cb4b57SNélio Laranjeiro 		/* Add broadcast/multicast flows. */
1680f8cb4b57SNélio Laranjeiro 		for (i = 0; i != vlan_filter_n; ++i) {
1681f8cb4b57SNélio Laranjeiro 			uint16_t vlan = priv->vlan_filter[i];
1682f8cb4b57SNélio Laranjeiro 
1683f8cb4b57SNélio Laranjeiro 			struct rte_flow_item_vlan vlan_spec = {
16848275d5fcSThomas Monjalon 				.hdr.vlan_tci = rte_cpu_to_be_16(vlan),
1685f8cb4b57SNélio Laranjeiro 			};
16862bc98393SNelio Laranjeiro 			struct rte_flow_item_vlan vlan_mask =
16872bc98393SNelio Laranjeiro 				rte_flow_item_vlan_mask;
1688f8cb4b57SNélio Laranjeiro 
1689f8cb4b57SNélio Laranjeiro 			ret = mlx5_ctrl_flow_vlan(dev, &bcast, &bcast,
1690f8cb4b57SNélio Laranjeiro 						  &vlan_spec, &vlan_mask);
1691f8cb4b57SNélio Laranjeiro 			if (ret)
1692f8cb4b57SNélio Laranjeiro 				goto error;
1693f8cb4b57SNélio Laranjeiro 			ret = mlx5_ctrl_flow_vlan(dev, &ipv6_multi_spec,
1694f8cb4b57SNélio Laranjeiro 						  &ipv6_multi_mask,
1695f8cb4b57SNélio Laranjeiro 						  &vlan_spec, &vlan_mask);
1696f8cb4b57SNélio Laranjeiro 			if (ret)
1697f8cb4b57SNélio Laranjeiro 				goto error;
1698f8cb4b57SNélio Laranjeiro 		}
1699f8cb4b57SNélio Laranjeiro 		if (!vlan_filter_n) {
1700f8cb4b57SNélio Laranjeiro 			ret = mlx5_ctrl_flow(dev, &bcast, &bcast);
1701f8cb4b57SNélio Laranjeiro 			if (ret)
1702f8cb4b57SNélio Laranjeiro 				goto error;
1703f8cb4b57SNélio Laranjeiro 			ret = mlx5_ctrl_flow(dev, &ipv6_multi_spec,
1704f8cb4b57SNélio Laranjeiro 					     &ipv6_multi_mask);
1705084de7a1STal Shnaiderman 			if (ret) {
1706084de7a1STal Shnaiderman 				/* Do not fail on IPv6 broadcast creation failure. */
1707084de7a1STal Shnaiderman 				DRV_LOG(WARNING,
1708084de7a1STal Shnaiderman 					"IPv6 broadcast is not supported");
1709084de7a1STal Shnaiderman 				ret = 0;
1710084de7a1STal Shnaiderman 			}
1711f8cb4b57SNélio Laranjeiro 		}
1712f8cb4b57SNélio Laranjeiro 	}
1713f8cb4b57SNélio Laranjeiro 	/* Add MAC address flows. */
1714272733b5SNélio Laranjeiro 	for (i = 0; i != MLX5_MAX_MAC_ADDRESSES; ++i) {
17156d13ea8eSOlivier Matz 		struct rte_ether_addr *mac = &dev->data->mac_addrs[i];
1716272733b5SNélio Laranjeiro 
1717272733b5SNélio Laranjeiro 		if (!memcmp(mac, &cmp, sizeof(*mac)))
1718272733b5SNélio Laranjeiro 			continue;
17198275d5fcSThomas Monjalon 		memcpy(&unicast.hdr.dst_addr.addr_bytes,
1720272733b5SNélio Laranjeiro 		       mac->addr_bytes,
172135b2d13fSOlivier Matz 		       RTE_ETHER_ADDR_LEN);
1722272733b5SNélio Laranjeiro 		for (j = 0; j != vlan_filter_n; ++j) {
1723272733b5SNélio Laranjeiro 			uint16_t vlan = priv->vlan_filter[j];
1724272733b5SNélio Laranjeiro 
1725272733b5SNélio Laranjeiro 			struct rte_flow_item_vlan vlan_spec = {
17268275d5fcSThomas Monjalon 				.hdr.vlan_tci = rte_cpu_to_be_16(vlan),
1727272733b5SNélio Laranjeiro 			};
17282bc98393SNelio Laranjeiro 			struct rte_flow_item_vlan vlan_mask =
17292bc98393SNelio Laranjeiro 				rte_flow_item_vlan_mask;
1730272733b5SNélio Laranjeiro 
1731272733b5SNélio Laranjeiro 			ret = mlx5_ctrl_flow_vlan(dev, &unicast,
1732272733b5SNélio Laranjeiro 						  &unicast_mask,
1733272733b5SNélio Laranjeiro 						  &vlan_spec,
1734272733b5SNélio Laranjeiro 						  &vlan_mask);
1735272733b5SNélio Laranjeiro 			if (ret)
1736272733b5SNélio Laranjeiro 				goto error;
1737272733b5SNélio Laranjeiro 		}
1738272733b5SNélio Laranjeiro 		if (!vlan_filter_n) {
1739a6d83b6aSNélio Laranjeiro 			ret = mlx5_ctrl_flow(dev, &unicast, &unicast_mask);
1740272733b5SNélio Laranjeiro 			if (ret)
1741272733b5SNélio Laranjeiro 				goto error;
1742272733b5SNélio Laranjeiro 		}
1743272733b5SNélio Laranjeiro 	}
1744272733b5SNélio Laranjeiro 	return 0;
1745272733b5SNélio Laranjeiro error:
1746a6d83b6aSNélio Laranjeiro 	ret = rte_errno; /* Save rte_errno before cleanup. */
174786d09686SDariusz Sosnowski 	mlx5_traffic_disable_legacy(dev);
1748a6d83b6aSNélio Laranjeiro 	rte_errno = ret; /* Restore rte_errno. */
1749a6d83b6aSNélio Laranjeiro 	return -rte_errno;
1750272733b5SNélio Laranjeiro }
1751272733b5SNélio Laranjeiro 
175286d09686SDariusz Sosnowski static void
175386d09686SDariusz Sosnowski mlx5_traffic_disable_legacy(struct rte_eth_dev *dev)
175486d09686SDariusz Sosnowski {
175586d09686SDariusz Sosnowski 	struct mlx5_priv *priv = dev->data->dev_private;
175686d09686SDariusz Sosnowski 	struct mlx5_ctrl_flow_entry *entry;
175786d09686SDariusz Sosnowski 	struct mlx5_ctrl_flow_entry *tmp;
175886d09686SDariusz Sosnowski 
175986d09686SDariusz Sosnowski 	/*
176086d09686SDariusz Sosnowski 	 * Free registered control flow rules first,
176186d09686SDariusz Sosnowski 	 * to free the memory allocated for list entries
176286d09686SDariusz Sosnowski 	 */
176386d09686SDariusz Sosnowski 	entry = LIST_FIRST(&priv->hw_ctrl_flows);
176486d09686SDariusz Sosnowski 	while (entry != NULL) {
176586d09686SDariusz Sosnowski 		tmp = LIST_NEXT(entry, next);
176686d09686SDariusz Sosnowski 		mlx5_legacy_ctrl_flow_destroy(dev, entry);
176786d09686SDariusz Sosnowski 		entry = tmp;
176886d09686SDariusz Sosnowski 	}
176986d09686SDariusz Sosnowski 
177086d09686SDariusz Sosnowski 	mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_CTL, false);
177186d09686SDariusz Sosnowski }
1772272733b5SNélio Laranjeiro 
1773272733b5SNélio Laranjeiro /**
1774272733b5SNélio Laranjeiro  * Disable traffic flows configured by control plane
1775272733b5SNélio Laranjeiro  *
1776272733b5SNélio Laranjeiro  * @param dev
1777af4f09f2SNélio Laranjeiro  *   Pointer to Ethernet device private data.
1778272733b5SNélio Laranjeiro  */
1779925061b5SNélio Laranjeiro void
1780af4f09f2SNélio Laranjeiro mlx5_traffic_disable(struct rte_eth_dev *dev)
1781272733b5SNélio Laranjeiro {
17821939eb6fSDariusz Sosnowski #ifdef HAVE_MLX5_HWS_SUPPORT
17831939eb6fSDariusz Sosnowski 	struct mlx5_priv *priv = dev->data->dev_private;
17841939eb6fSDariusz Sosnowski 
17851939eb6fSDariusz Sosnowski 	if (priv->sh->config.dv_flow_en == 2)
17861939eb6fSDariusz Sosnowski 		mlx5_flow_hw_flush_ctrl_flows(dev);
17871939eb6fSDariusz Sosnowski 	else
17881939eb6fSDariusz Sosnowski #endif
178986d09686SDariusz Sosnowski 		mlx5_traffic_disable_legacy(dev);
1790272733b5SNélio Laranjeiro }
1791272733b5SNélio Laranjeiro 
1792272733b5SNélio Laranjeiro /**
1793272733b5SNélio Laranjeiro  * Restart traffic flows configured by control plane
1794272733b5SNélio Laranjeiro  *
1795272733b5SNélio Laranjeiro  * @param dev
1796af4f09f2SNélio Laranjeiro  *   Pointer to Ethernet device private data.
1797272733b5SNélio Laranjeiro  *
1798272733b5SNélio Laranjeiro  * @return
1799a6d83b6aSNélio Laranjeiro  *   0 on success, a negative errno value otherwise and rte_errno is set.
1800272733b5SNélio Laranjeiro  */
1801272733b5SNélio Laranjeiro int
1802272733b5SNélio Laranjeiro mlx5_traffic_restart(struct rte_eth_dev *dev)
1803272733b5SNélio Laranjeiro {
1804af4f09f2SNélio Laranjeiro 	if (dev->data->dev_started) {
1805af4f09f2SNélio Laranjeiro 		mlx5_traffic_disable(dev);
18069fa7c1cdSDariusz Sosnowski #ifdef HAVE_MLX5_HWS_SUPPORT
18079fa7c1cdSDariusz Sosnowski 		mlx5_flow_hw_cleanup_ctrl_rx_templates(dev);
18089fa7c1cdSDariusz Sosnowski #endif
1809a6d83b6aSNélio Laranjeiro 		return mlx5_traffic_enable(dev);
1810af4f09f2SNélio Laranjeiro 	}
1811272733b5SNélio Laranjeiro 	return 0;
1812272733b5SNélio Laranjeiro }
1813d9f28495SDariusz Sosnowski 
1814d9f28495SDariusz Sosnowski static bool
1815d9f28495SDariusz Sosnowski mac_flows_update_needed(struct rte_eth_dev *dev)
1816d9f28495SDariusz Sosnowski {
1817d9f28495SDariusz Sosnowski 	struct mlx5_priv *priv = dev->data->dev_private;
1818d9f28495SDariusz Sosnowski 
1819d9f28495SDariusz Sosnowski 	if (!dev->data->dev_started)
1820d9f28495SDariusz Sosnowski 		return false;
1821d9f28495SDariusz Sosnowski 	if (dev->data->promiscuous)
1822d9f28495SDariusz Sosnowski 		return false;
1823d9f28495SDariusz Sosnowski 	if (priv->isolated)
1824d9f28495SDariusz Sosnowski 		return false;
1825d9f28495SDariusz Sosnowski 
1826d9f28495SDariusz Sosnowski 	return true;
1827d9f28495SDariusz Sosnowski }
1828d9f28495SDariusz Sosnowski 
1829d9f28495SDariusz Sosnowski static int
1830d9f28495SDariusz Sosnowski traffic_dmac_create(struct rte_eth_dev *dev, const struct rte_ether_addr *addr)
1831d9f28495SDariusz Sosnowski {
1832d9f28495SDariusz Sosnowski 	struct mlx5_priv *priv = dev->data->dev_private;
1833d9f28495SDariusz Sosnowski 
1834d9f28495SDariusz Sosnowski 	if (priv->sh->config.dv_flow_en == 2)
1835d9f28495SDariusz Sosnowski 		return mlx5_flow_hw_ctrl_flow_dmac(dev, addr);
1836d9f28495SDariusz Sosnowski 	else
1837d9f28495SDariusz Sosnowski 		return mlx5_legacy_dmac_flow_create(dev, addr);
1838d9f28495SDariusz Sosnowski }
1839d9f28495SDariusz Sosnowski 
1840d9f28495SDariusz Sosnowski static int
1841d9f28495SDariusz Sosnowski traffic_dmac_destroy(struct rte_eth_dev *dev, const struct rte_ether_addr *addr)
1842d9f28495SDariusz Sosnowski {
1843d9f28495SDariusz Sosnowski 	struct mlx5_priv *priv = dev->data->dev_private;
1844d9f28495SDariusz Sosnowski 
1845d9f28495SDariusz Sosnowski 	if (priv->sh->config.dv_flow_en == 2)
1846d9f28495SDariusz Sosnowski 		return mlx5_flow_hw_ctrl_flow_dmac_destroy(dev, addr);
1847d9f28495SDariusz Sosnowski 	else
1848d9f28495SDariusz Sosnowski 		return mlx5_legacy_dmac_flow_destroy(dev, addr);
1849d9f28495SDariusz Sosnowski }
1850d9f28495SDariusz Sosnowski 
1851d9f28495SDariusz Sosnowski static int
1852d9f28495SDariusz Sosnowski traffic_dmac_vlan_create(struct rte_eth_dev *dev,
1853d9f28495SDariusz Sosnowski 			 const struct rte_ether_addr *addr,
1854d9f28495SDariusz Sosnowski 			 const uint16_t vid)
1855d9f28495SDariusz Sosnowski {
1856d9f28495SDariusz Sosnowski 	struct mlx5_priv *priv = dev->data->dev_private;
1857d9f28495SDariusz Sosnowski 
1858d9f28495SDariusz Sosnowski 	if (priv->sh->config.dv_flow_en == 2)
1859d9f28495SDariusz Sosnowski 		return mlx5_flow_hw_ctrl_flow_dmac_vlan(dev, addr, vid);
1860d9f28495SDariusz Sosnowski 	else
1861d9f28495SDariusz Sosnowski 		return mlx5_legacy_dmac_vlan_flow_create(dev, addr, vid);
1862d9f28495SDariusz Sosnowski }
1863d9f28495SDariusz Sosnowski 
1864d9f28495SDariusz Sosnowski static int
1865d9f28495SDariusz Sosnowski traffic_dmac_vlan_destroy(struct rte_eth_dev *dev,
1866d9f28495SDariusz Sosnowski 			 const struct rte_ether_addr *addr,
1867d9f28495SDariusz Sosnowski 			 const uint16_t vid)
1868d9f28495SDariusz Sosnowski {
1869d9f28495SDariusz Sosnowski 	struct mlx5_priv *priv = dev->data->dev_private;
1870d9f28495SDariusz Sosnowski 
1871d9f28495SDariusz Sosnowski 	if (priv->sh->config.dv_flow_en == 2)
1872d9f28495SDariusz Sosnowski 		return mlx5_flow_hw_ctrl_flow_dmac_vlan_destroy(dev, addr, vid);
1873d9f28495SDariusz Sosnowski 	else
1874d9f28495SDariusz Sosnowski 		return mlx5_legacy_dmac_vlan_flow_destroy(dev, addr, vid);
1875d9f28495SDariusz Sosnowski }
1876d9f28495SDariusz Sosnowski 
1877d9f28495SDariusz Sosnowski /**
1878d9f28495SDariusz Sosnowski  * Adjust Rx control flow rules to allow traffic on provided MAC address.
1879d9f28495SDariusz Sosnowski  */
1880d9f28495SDariusz Sosnowski int
1881d9f28495SDariusz Sosnowski mlx5_traffic_mac_add(struct rte_eth_dev *dev, const struct rte_ether_addr *addr)
1882d9f28495SDariusz Sosnowski {
1883d9f28495SDariusz Sosnowski 	struct mlx5_priv *priv = dev->data->dev_private;
1884d9f28495SDariusz Sosnowski 
1885d9f28495SDariusz Sosnowski 	if (!mac_flows_update_needed(dev))
1886d9f28495SDariusz Sosnowski 		return 0;
1887d9f28495SDariusz Sosnowski 
1888d9f28495SDariusz Sosnowski 	if (priv->vlan_filter_n > 0) {
1889d9f28495SDariusz Sosnowski 		unsigned int i;
1890d9f28495SDariusz Sosnowski 
1891d9f28495SDariusz Sosnowski 		for (i = 0; i < priv->vlan_filter_n; ++i) {
1892d9f28495SDariusz Sosnowski 			uint16_t vlan = priv->vlan_filter[i];
1893d9f28495SDariusz Sosnowski 			int ret;
1894d9f28495SDariusz Sosnowski 
1895d9f28495SDariusz Sosnowski 			if (mlx5_ctrl_flow_uc_dmac_vlan_exists(dev, addr, vlan))
1896d9f28495SDariusz Sosnowski 				continue;
1897d9f28495SDariusz Sosnowski 
1898d9f28495SDariusz Sosnowski 			ret = traffic_dmac_vlan_create(dev, addr, vlan);
1899d9f28495SDariusz Sosnowski 			if (ret != 0)
1900d9f28495SDariusz Sosnowski 				return ret;
1901d9f28495SDariusz Sosnowski 		}
1902d9f28495SDariusz Sosnowski 
1903d9f28495SDariusz Sosnowski 		return 0;
1904d9f28495SDariusz Sosnowski 	}
1905d9f28495SDariusz Sosnowski 
1906d9f28495SDariusz Sosnowski 	if (mlx5_ctrl_flow_uc_dmac_exists(dev, addr))
1907d9f28495SDariusz Sosnowski 		return 0;
1908d9f28495SDariusz Sosnowski 
1909d9f28495SDariusz Sosnowski 	return traffic_dmac_create(dev, addr);
1910d9f28495SDariusz Sosnowski }
1911d9f28495SDariusz Sosnowski 
1912d9f28495SDariusz Sosnowski /**
1913d9f28495SDariusz Sosnowski  * Adjust Rx control flow rules to disallow traffic with removed MAC address.
1914d9f28495SDariusz Sosnowski  */
1915d9f28495SDariusz Sosnowski int
1916d9f28495SDariusz Sosnowski mlx5_traffic_mac_remove(struct rte_eth_dev *dev, const struct rte_ether_addr *addr)
1917d9f28495SDariusz Sosnowski {
1918d9f28495SDariusz Sosnowski 	struct mlx5_priv *priv = dev->data->dev_private;
1919d9f28495SDariusz Sosnowski 
1920d9f28495SDariusz Sosnowski 	if (!mac_flows_update_needed(dev))
1921d9f28495SDariusz Sosnowski 		return 0;
1922d9f28495SDariusz Sosnowski 
1923d9f28495SDariusz Sosnowski 	if (priv->vlan_filter_n > 0) {
1924d9f28495SDariusz Sosnowski 		unsigned int i;
1925d9f28495SDariusz Sosnowski 
1926d9f28495SDariusz Sosnowski 		for (i = 0; i < priv->vlan_filter_n; ++i) {
1927d9f28495SDariusz Sosnowski 			uint16_t vlan = priv->vlan_filter[i];
1928d9f28495SDariusz Sosnowski 			int ret;
1929d9f28495SDariusz Sosnowski 
1930d9f28495SDariusz Sosnowski 			if (!mlx5_ctrl_flow_uc_dmac_vlan_exists(dev, addr, vlan))
1931d9f28495SDariusz Sosnowski 				continue;
1932d9f28495SDariusz Sosnowski 
1933d9f28495SDariusz Sosnowski 			ret = traffic_dmac_vlan_destroy(dev, addr, vlan);
1934d9f28495SDariusz Sosnowski 			if (ret != 0)
1935d9f28495SDariusz Sosnowski 				return ret;
1936d9f28495SDariusz Sosnowski 		}
1937d9f28495SDariusz Sosnowski 
1938d9f28495SDariusz Sosnowski 		return 0;
1939d9f28495SDariusz Sosnowski 	}
1940d9f28495SDariusz Sosnowski 
1941d9f28495SDariusz Sosnowski 	if (!mlx5_ctrl_flow_uc_dmac_exists(dev, addr))
1942d9f28495SDariusz Sosnowski 		return 0;
1943d9f28495SDariusz Sosnowski 
1944d9f28495SDariusz Sosnowski 	return traffic_dmac_destroy(dev, addr);
1945d9f28495SDariusz Sosnowski }
1946d9f28495SDariusz Sosnowski 
1947d9f28495SDariusz Sosnowski /**
1948d9f28495SDariusz Sosnowski  * Adjust Rx control flow rules to allow traffic on provided VLAN.
1949d9f28495SDariusz Sosnowski  *
1950d9f28495SDariusz Sosnowski  * Assumptions:
1951d9f28495SDariusz Sosnowski  * - Called when VLAN is added.
1952d9f28495SDariusz Sosnowski  * - At least one VLAN is enabled before function call.
1953d9f28495SDariusz Sosnowski  *
1954d9f28495SDariusz Sosnowski  * This functions assumes that VLAN is new and was not included in
1955d9f28495SDariusz Sosnowski  * Rx control flow rules set up before calling it.
1956d9f28495SDariusz Sosnowski  */
1957d9f28495SDariusz Sosnowski int
1958d9f28495SDariusz Sosnowski mlx5_traffic_vlan_add(struct rte_eth_dev *dev, const uint16_t vid)
1959d9f28495SDariusz Sosnowski {
1960d9f28495SDariusz Sosnowski 	struct mlx5_priv *priv = dev->data->dev_private;
1961d9f28495SDariusz Sosnowski 	unsigned int i;
1962d9f28495SDariusz Sosnowski 	int ret;
1963d9f28495SDariusz Sosnowski 
1964d9f28495SDariusz Sosnowski 	if (!mac_flows_update_needed(dev))
1965d9f28495SDariusz Sosnowski 		return 0;
1966d9f28495SDariusz Sosnowski 
1967d9f28495SDariusz Sosnowski 	/* Add all unicast DMAC flow rules with new VLAN attached. */
1968d9f28495SDariusz Sosnowski 	for (i = 0; i != MLX5_MAX_MAC_ADDRESSES; ++i) {
1969d9f28495SDariusz Sosnowski 		struct rte_ether_addr *mac = &dev->data->mac_addrs[i];
1970d9f28495SDariusz Sosnowski 
1971d9f28495SDariusz Sosnowski 		if (rte_is_zero_ether_addr(mac))
1972d9f28495SDariusz Sosnowski 			continue;
1973d9f28495SDariusz Sosnowski 
1974d9f28495SDariusz Sosnowski 		ret = traffic_dmac_vlan_create(dev, mac, vid);
1975d9f28495SDariusz Sosnowski 		if (ret != 0)
1976d9f28495SDariusz Sosnowski 			return ret;
1977d9f28495SDariusz Sosnowski 	}
1978d9f28495SDariusz Sosnowski 
1979d9f28495SDariusz Sosnowski 	if (priv->vlan_filter_n == 1) {
1980d9f28495SDariusz Sosnowski 		/*
1981d9f28495SDariusz Sosnowski 		 * Adding first VLAN. Need to remove unicast DMAC rules before adding new rules.
1982d9f28495SDariusz Sosnowski 		 * Removing after creating VLAN rules so that traffic "gap" is not introduced.
1983d9f28495SDariusz Sosnowski 		 */
1984d9f28495SDariusz Sosnowski 
1985d9f28495SDariusz Sosnowski 		for (i = 0; i != MLX5_MAX_MAC_ADDRESSES; ++i) {
1986d9f28495SDariusz Sosnowski 			struct rte_ether_addr *mac = &dev->data->mac_addrs[i];
1987d9f28495SDariusz Sosnowski 
1988d9f28495SDariusz Sosnowski 			if (rte_is_zero_ether_addr(mac))
1989d9f28495SDariusz Sosnowski 				continue;
1990d9f28495SDariusz Sosnowski 
1991d9f28495SDariusz Sosnowski 			ret = traffic_dmac_destroy(dev, mac);
1992d9f28495SDariusz Sosnowski 			if (ret != 0)
1993d9f28495SDariusz Sosnowski 				return ret;
1994d9f28495SDariusz Sosnowski 		}
1995d9f28495SDariusz Sosnowski 	}
1996d9f28495SDariusz Sosnowski 
1997d9f28495SDariusz Sosnowski 	return 0;
1998d9f28495SDariusz Sosnowski }
1999d9f28495SDariusz Sosnowski 
2000d9f28495SDariusz Sosnowski /**
2001d9f28495SDariusz Sosnowski  * Adjust Rx control flow rules to disallow traffic with removed VLAN.
2002d9f28495SDariusz Sosnowski  *
2003d9f28495SDariusz Sosnowski  * Assumptions:
2004d9f28495SDariusz Sosnowski  *
2005d9f28495SDariusz Sosnowski  * - VLAN was really removed.
2006d9f28495SDariusz Sosnowski  */
2007d9f28495SDariusz Sosnowski int
2008d9f28495SDariusz Sosnowski mlx5_traffic_vlan_remove(struct rte_eth_dev *dev, const uint16_t vid)
2009d9f28495SDariusz Sosnowski {
2010d9f28495SDariusz Sosnowski 	struct mlx5_priv *priv = dev->data->dev_private;
2011d9f28495SDariusz Sosnowski 	unsigned int i;
2012d9f28495SDariusz Sosnowski 	int ret;
2013d9f28495SDariusz Sosnowski 
2014d9f28495SDariusz Sosnowski 	if (!mac_flows_update_needed(dev))
2015d9f28495SDariusz Sosnowski 		return 0;
2016d9f28495SDariusz Sosnowski 
2017d9f28495SDariusz Sosnowski 	if (priv->vlan_filter_n == 0) {
2018d9f28495SDariusz Sosnowski 		/*
2019d9f28495SDariusz Sosnowski 		 * If there are no VLANs as a result, unicast DMAC flow rules must be recreated.
2020d9f28495SDariusz Sosnowski 		 * Recreating first to ensure no traffic "gap".
2021d9f28495SDariusz Sosnowski 		 */
2022d9f28495SDariusz Sosnowski 
2023d9f28495SDariusz Sosnowski 		for (i = 0; i != MLX5_MAX_MAC_ADDRESSES; ++i) {
2024d9f28495SDariusz Sosnowski 			struct rte_ether_addr *mac = &dev->data->mac_addrs[i];
2025d9f28495SDariusz Sosnowski 
2026d9f28495SDariusz Sosnowski 			if (rte_is_zero_ether_addr(mac))
2027d9f28495SDariusz Sosnowski 				continue;
2028d9f28495SDariusz Sosnowski 
2029d9f28495SDariusz Sosnowski 			ret = traffic_dmac_create(dev, mac);
2030d9f28495SDariusz Sosnowski 			if (ret != 0)
2031d9f28495SDariusz Sosnowski 				return ret;
2032d9f28495SDariusz Sosnowski 		}
2033d9f28495SDariusz Sosnowski 	}
2034d9f28495SDariusz Sosnowski 
2035d9f28495SDariusz Sosnowski 	/* Remove all unicast DMAC flow rules with this VLAN. */
2036d9f28495SDariusz Sosnowski 	for (i = 0; i != MLX5_MAX_MAC_ADDRESSES; ++i) {
2037d9f28495SDariusz Sosnowski 		struct rte_ether_addr *mac = &dev->data->mac_addrs[i];
2038d9f28495SDariusz Sosnowski 
2039d9f28495SDariusz Sosnowski 		if (rte_is_zero_ether_addr(mac))
2040d9f28495SDariusz Sosnowski 			continue;
2041d9f28495SDariusz Sosnowski 
2042d9f28495SDariusz Sosnowski 		ret = traffic_dmac_vlan_destroy(dev, mac, vid);
2043d9f28495SDariusz Sosnowski 		if (ret != 0)
2044d9f28495SDariusz Sosnowski 			return ret;
2045d9f28495SDariusz Sosnowski 	}
2046d9f28495SDariusz Sosnowski 
2047d9f28495SDariusz Sosnowski 	return 0;
2048d9f28495SDariusz Sosnowski }
2049