xref: /dpdk/drivers/net/mlx5/mlx5_trigger.c (revision f49f44839df36167e26a69df287d8a08e60ad5bb)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2015 6WIND S.A.
3  * Copyright 2015 Mellanox Technologies, Ltd
4  */
5 
6 #include <unistd.h>
7 
8 #include <rte_ether.h>
9 #include <rte_ethdev_driver.h>
10 #include <rte_interrupts.h>
11 #include <rte_alarm.h>
12 
13 #include <mlx5_malloc.h>
14 
15 #include "mlx5.h"
16 #include "mlx5_mr.h"
17 #include "mlx5_rxtx.h"
18 #include "mlx5_utils.h"
19 #include "rte_pmd_mlx5.h"
20 
21 /**
22  * Stop traffic on Tx queues.
23  *
24  * @param dev
25  *   Pointer to Ethernet device structure.
26  */
27 static void
28 mlx5_txq_stop(struct rte_eth_dev *dev)
29 {
30 	struct mlx5_priv *priv = dev->data->dev_private;
31 	unsigned int i;
32 
33 	for (i = 0; i != priv->txqs_n; ++i)
34 		mlx5_txq_release(dev, i);
35 }
36 
37 /**
38  * Start traffic on Tx queues.
39  *
40  * @param dev
41  *   Pointer to Ethernet device structure.
42  *
43  * @return
44  *   0 on success, a negative errno value otherwise and rte_errno is set.
45  */
46 static int
47 mlx5_txq_start(struct rte_eth_dev *dev)
48 {
49 	struct mlx5_priv *priv = dev->data->dev_private;
50 	unsigned int i;
51 	int ret;
52 
53 	for (i = 0; i != priv->txqs_n; ++i) {
54 		struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i);
55 		struct mlx5_txq_data *txq_data = &txq_ctrl->txq;
56 		uint32_t flags = MLX5_MEM_RTE | MLX5_MEM_ZERO;
57 
58 		if (!txq_ctrl)
59 			continue;
60 		if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD)
61 			txq_alloc_elts(txq_ctrl);
62 		MLX5_ASSERT(!txq_ctrl->obj);
63 		txq_ctrl->obj = mlx5_malloc(flags, sizeof(struct mlx5_txq_obj),
64 					    0, txq_ctrl->socket);
65 		if (!txq_ctrl->obj) {
66 			DRV_LOG(ERR, "Port %u Tx queue %u cannot allocate "
67 				"memory resources.", dev->data->port_id,
68 				txq_data->idx);
69 			rte_errno = ENOMEM;
70 			goto error;
71 		}
72 		ret = priv->obj_ops.txq_obj_new(dev, i);
73 		if (ret < 0) {
74 			mlx5_free(txq_ctrl->obj);
75 			txq_ctrl->obj = NULL;
76 			goto error;
77 		}
78 		if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD) {
79 			size_t size = txq_data->cqe_s * sizeof(*txq_data->fcqs);
80 			txq_data->fcqs = mlx5_malloc(flags, size,
81 						     RTE_CACHE_LINE_SIZE,
82 						     txq_ctrl->socket);
83 			if (!txq_data->fcqs) {
84 				DRV_LOG(ERR, "Port %u Tx queue %u cannot "
85 					"allocate memory (FCQ).",
86 					dev->data->port_id, i);
87 				rte_errno = ENOMEM;
88 				goto error;
89 			}
90 		}
91 		DRV_LOG(DEBUG, "Port %u txq %u updated with %p.",
92 			dev->data->port_id, i, (void *)&txq_ctrl->obj);
93 		LIST_INSERT_HEAD(&priv->txqsobj, txq_ctrl->obj, next);
94 	}
95 	return 0;
96 error:
97 	ret = rte_errno; /* Save rte_errno before cleanup. */
98 	do {
99 		mlx5_txq_release(dev, i);
100 	} while (i-- != 0);
101 	rte_errno = ret; /* Restore rte_errno. */
102 	return -rte_errno;
103 }
104 
105 /**
106  * Stop traffic on Rx queues.
107  *
108  * @param dev
109  *   Pointer to Ethernet device structure.
110  */
111 static void
112 mlx5_rxq_stop(struct rte_eth_dev *dev)
113 {
114 	struct mlx5_priv *priv = dev->data->dev_private;
115 	unsigned int i;
116 
117 	for (i = 0; i != priv->rxqs_n; ++i)
118 		mlx5_rxq_release(dev, i);
119 }
120 
121 /**
122  * Start traffic on Rx queues.
123  *
124  * @param dev
125  *   Pointer to Ethernet device structure.
126  *
127  * @return
128  *   0 on success, a negative errno value otherwise and rte_errno is set.
129  */
130 static int
131 mlx5_rxq_start(struct rte_eth_dev *dev)
132 {
133 	struct mlx5_priv *priv = dev->data->dev_private;
134 	unsigned int i;
135 	int ret = 0;
136 
137 	/* Allocate/reuse/resize mempool for Multi-Packet RQ. */
138 	if (mlx5_mprq_alloc_mp(dev)) {
139 		/* Should not release Rx queues but return immediately. */
140 		return -rte_errno;
141 	}
142 	DRV_LOG(DEBUG, "Port %u device_attr.max_qp_wr is %d.",
143 		dev->data->port_id, priv->sh->device_attr.max_qp_wr);
144 	DRV_LOG(DEBUG, "Port %u device_attr.max_sge is %d.",
145 		dev->data->port_id, priv->sh->device_attr.max_sge);
146 	for (i = 0; i != priv->rxqs_n; ++i) {
147 		struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i);
148 		struct rte_mempool *mp;
149 
150 		if (!rxq_ctrl)
151 			continue;
152 		if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
153 			/* Pre-register Rx mempool. */
154 			mp = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
155 			     rxq_ctrl->rxq.mprq_mp : rxq_ctrl->rxq.mp;
156 			DRV_LOG(DEBUG, "Port %u Rx queue %u registering mp %s"
157 				" having %u chunks.", dev->data->port_id,
158 				rxq_ctrl->rxq.idx, mp->name, mp->nb_mem_chunks);
159 			mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl, mp);
160 			ret = rxq_alloc_elts(rxq_ctrl);
161 			if (ret)
162 				goto error;
163 		}
164 		MLX5_ASSERT(!rxq_ctrl->obj);
165 		rxq_ctrl->obj = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
166 					    sizeof(*rxq_ctrl->obj), 0,
167 					    rxq_ctrl->socket);
168 		if (!rxq_ctrl->obj) {
169 			DRV_LOG(ERR,
170 				"Port %u Rx queue %u can't allocate resources.",
171 				dev->data->port_id, (*priv->rxqs)[i]->idx);
172 			rte_errno = ENOMEM;
173 			goto error;
174 		}
175 		ret = priv->obj_ops.rxq_obj_new(dev, i);
176 		if (ret) {
177 			mlx5_free(rxq_ctrl->obj);
178 			goto error;
179 		}
180 		DRV_LOG(DEBUG, "Port %u rxq %u updated with %p.",
181 			dev->data->port_id, i, (void *)&rxq_ctrl->obj);
182 		LIST_INSERT_HEAD(&priv->rxqsobj, rxq_ctrl->obj, next);
183 	}
184 	return 0;
185 error:
186 	ret = rte_errno; /* Save rte_errno before cleanup. */
187 	do {
188 		mlx5_rxq_release(dev, i);
189 	} while (i-- != 0);
190 	rte_errno = ret; /* Restore rte_errno. */
191 	return -rte_errno;
192 }
193 
194 /**
195  * Binds Tx queues to Rx queues for hairpin.
196  *
197  * Binds Tx queues to the target Rx queues.
198  *
199  * @param dev
200  *   Pointer to Ethernet device structure.
201  *
202  * @return
203  *   0 on success, a negative errno value otherwise and rte_errno is set.
204  */
205 static int
206 mlx5_hairpin_bind(struct rte_eth_dev *dev)
207 {
208 	struct mlx5_priv *priv = dev->data->dev_private;
209 	struct mlx5_devx_modify_sq_attr sq_attr = { 0 };
210 	struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
211 	struct mlx5_txq_ctrl *txq_ctrl;
212 	struct mlx5_rxq_ctrl *rxq_ctrl;
213 	struct mlx5_devx_obj *sq;
214 	struct mlx5_devx_obj *rq;
215 	unsigned int i;
216 	int ret = 0;
217 
218 	for (i = 0; i != priv->txqs_n; ++i) {
219 		txq_ctrl = mlx5_txq_get(dev, i);
220 		if (!txq_ctrl)
221 			continue;
222 		if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
223 			mlx5_txq_release(dev, i);
224 			continue;
225 		}
226 		if (!txq_ctrl->obj) {
227 			rte_errno = ENOMEM;
228 			DRV_LOG(ERR, "port %u no txq object found: %d",
229 				dev->data->port_id, i);
230 			mlx5_txq_release(dev, i);
231 			return -rte_errno;
232 		}
233 		sq = txq_ctrl->obj->sq;
234 		rxq_ctrl = mlx5_rxq_get(dev,
235 					txq_ctrl->hairpin_conf.peers[0].queue);
236 		if (!rxq_ctrl) {
237 			mlx5_txq_release(dev, i);
238 			rte_errno = EINVAL;
239 			DRV_LOG(ERR, "port %u no rxq object found: %d",
240 				dev->data->port_id,
241 				txq_ctrl->hairpin_conf.peers[0].queue);
242 			return -rte_errno;
243 		}
244 		if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN ||
245 		    rxq_ctrl->hairpin_conf.peers[0].queue != i) {
246 			rte_errno = ENOMEM;
247 			DRV_LOG(ERR, "port %u Tx queue %d can't be binded to "
248 				"Rx queue %d", dev->data->port_id,
249 				i, txq_ctrl->hairpin_conf.peers[0].queue);
250 			goto error;
251 		}
252 		rq = rxq_ctrl->obj->rq;
253 		if (!rq) {
254 			rte_errno = ENOMEM;
255 			DRV_LOG(ERR, "port %u hairpin no matching rxq: %d",
256 				dev->data->port_id,
257 				txq_ctrl->hairpin_conf.peers[0].queue);
258 			goto error;
259 		}
260 		sq_attr.state = MLX5_SQC_STATE_RDY;
261 		sq_attr.sq_state = MLX5_SQC_STATE_RST;
262 		sq_attr.hairpin_peer_rq = rq->id;
263 		sq_attr.hairpin_peer_vhca = priv->config.hca_attr.vhca_id;
264 		ret = mlx5_devx_cmd_modify_sq(sq, &sq_attr);
265 		if (ret)
266 			goto error;
267 		rq_attr.state = MLX5_SQC_STATE_RDY;
268 		rq_attr.rq_state = MLX5_SQC_STATE_RST;
269 		rq_attr.hairpin_peer_sq = sq->id;
270 		rq_attr.hairpin_peer_vhca = priv->config.hca_attr.vhca_id;
271 		ret = mlx5_devx_cmd_modify_rq(rq, &rq_attr);
272 		if (ret)
273 			goto error;
274 		mlx5_txq_release(dev, i);
275 		mlx5_rxq_release(dev, txq_ctrl->hairpin_conf.peers[0].queue);
276 	}
277 	return 0;
278 error:
279 	mlx5_txq_release(dev, i);
280 	mlx5_rxq_release(dev, txq_ctrl->hairpin_conf.peers[0].queue);
281 	return -rte_errno;
282 }
283 
284 /**
285  * DPDK callback to start the device.
286  *
287  * Simulate device start by attaching all configured flows.
288  *
289  * @param dev
290  *   Pointer to Ethernet device structure.
291  *
292  * @return
293  *   0 on success, a negative errno value otherwise and rte_errno is set.
294  */
295 int
296 mlx5_dev_start(struct rte_eth_dev *dev)
297 {
298 	struct mlx5_priv *priv = dev->data->dev_private;
299 	int ret;
300 	int fine_inline;
301 
302 	DRV_LOG(DEBUG, "port %u starting device", dev->data->port_id);
303 	fine_inline = rte_mbuf_dynflag_lookup
304 		(RTE_PMD_MLX5_FINE_GRANULARITY_INLINE, NULL);
305 	if (fine_inline > 0)
306 		rte_net_mlx5_dynf_inline_mask = 1UL << fine_inline;
307 	else
308 		rte_net_mlx5_dynf_inline_mask = 0;
309 	if (dev->data->nb_rx_queues > 0) {
310 		ret = mlx5_dev_configure_rss_reta(dev);
311 		if (ret) {
312 			DRV_LOG(ERR, "port %u reta config failed: %s",
313 				dev->data->port_id, strerror(rte_errno));
314 			return -rte_errno;
315 		}
316 	}
317 	ret = mlx5_txpp_start(dev);
318 	if (ret) {
319 		DRV_LOG(ERR, "port %u Tx packet pacing init failed: %s",
320 			dev->data->port_id, strerror(rte_errno));
321 		goto error;
322 	}
323 	ret = mlx5_txq_start(dev);
324 	if (ret) {
325 		DRV_LOG(ERR, "port %u Tx queue allocation failed: %s",
326 			dev->data->port_id, strerror(rte_errno));
327 		goto error;
328 	}
329 	ret = mlx5_rxq_start(dev);
330 	if (ret) {
331 		DRV_LOG(ERR, "port %u Rx queue allocation failed: %s",
332 			dev->data->port_id, strerror(rte_errno));
333 		goto error;
334 	}
335 	ret = mlx5_hairpin_bind(dev);
336 	if (ret) {
337 		DRV_LOG(ERR, "port %u hairpin binding failed: %s",
338 			dev->data->port_id, strerror(rte_errno));
339 		goto error;
340 	}
341 	/* Set started flag here for the following steps like control flow. */
342 	dev->data->dev_started = 1;
343 	ret = mlx5_rx_intr_vec_enable(dev);
344 	if (ret) {
345 		DRV_LOG(ERR, "port %u Rx interrupt vector creation failed",
346 			dev->data->port_id);
347 		goto error;
348 	}
349 	mlx5_os_stats_init(dev);
350 	ret = mlx5_traffic_enable(dev);
351 	if (ret) {
352 		DRV_LOG(ERR, "port %u failed to set defaults flows",
353 			dev->data->port_id);
354 		goto error;
355 	}
356 	/* Set a mask and offset of dynamic metadata flows into Rx queues. */
357 	mlx5_flow_rxq_dynf_metadata_set(dev);
358 	/* Set flags and context to convert Rx timestamps. */
359 	mlx5_rxq_timestamp_set(dev);
360 	/* Set a mask and offset of scheduling on timestamp into Tx queues. */
361 	mlx5_txq_dynf_timestamp_set(dev);
362 	/*
363 	 * In non-cached mode, it only needs to start the default mreg copy
364 	 * action and no flow created by application exists anymore.
365 	 * But it is worth wrapping the interface for further usage.
366 	 */
367 	ret = mlx5_flow_start_default(dev);
368 	if (ret) {
369 		DRV_LOG(DEBUG, "port %u failed to start default actions: %s",
370 			dev->data->port_id, strerror(rte_errno));
371 		goto error;
372 	}
373 	rte_wmb();
374 	dev->tx_pkt_burst = mlx5_select_tx_function(dev);
375 	dev->rx_pkt_burst = mlx5_select_rx_function(dev);
376 	/* Enable datapath on secondary process. */
377 	mlx5_mp_os_req_start_rxtx(dev);
378 	if (priv->sh->intr_handle.fd >= 0) {
379 		priv->sh->port[priv->dev_port - 1].ih_port_id =
380 					(uint32_t)dev->data->port_id;
381 	} else {
382 		DRV_LOG(INFO, "port %u starts without LSC and RMV interrupts.",
383 			dev->data->port_id);
384 		dev->data->dev_conf.intr_conf.lsc = 0;
385 		dev->data->dev_conf.intr_conf.rmv = 0;
386 	}
387 	if (priv->sh->intr_handle_devx.fd >= 0)
388 		priv->sh->port[priv->dev_port - 1].devx_ih_port_id =
389 					(uint32_t)dev->data->port_id;
390 	return 0;
391 error:
392 	ret = rte_errno; /* Save rte_errno before cleanup. */
393 	/* Rollback. */
394 	dev->data->dev_started = 0;
395 	mlx5_flow_stop_default(dev);
396 	mlx5_traffic_disable(dev);
397 	mlx5_txq_stop(dev);
398 	mlx5_rxq_stop(dev);
399 	mlx5_txpp_stop(dev); /* Stop last. */
400 	rte_errno = ret; /* Restore rte_errno. */
401 	return -rte_errno;
402 }
403 
404 /**
405  * DPDK callback to stop the device.
406  *
407  * Simulate device stop by detaching all configured flows.
408  *
409  * @param dev
410  *   Pointer to Ethernet device structure.
411  */
412 void
413 mlx5_dev_stop(struct rte_eth_dev *dev)
414 {
415 	struct mlx5_priv *priv = dev->data->dev_private;
416 
417 	dev->data->dev_started = 0;
418 	/* Prevent crashes when queues are still in use. */
419 	dev->rx_pkt_burst = removed_rx_burst;
420 	dev->tx_pkt_burst = removed_tx_burst;
421 	rte_wmb();
422 	/* Disable datapath on secondary process. */
423 	mlx5_mp_os_req_stop_rxtx(dev);
424 	usleep(1000 * priv->rxqs_n);
425 	DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id);
426 	mlx5_flow_stop_default(dev);
427 	/* Control flows for default traffic can be removed firstly. */
428 	mlx5_traffic_disable(dev);
429 	/* All RX queue flags will be cleared in the flush interface. */
430 	mlx5_flow_list_flush(dev, &priv->flows, true);
431 	mlx5_rx_intr_vec_disable(dev);
432 	priv->sh->port[priv->dev_port - 1].ih_port_id = RTE_MAX_ETHPORTS;
433 	priv->sh->port[priv->dev_port - 1].devx_ih_port_id = RTE_MAX_ETHPORTS;
434 	mlx5_txq_stop(dev);
435 	mlx5_rxq_stop(dev);
436 	mlx5_txpp_stop(dev);
437 }
438 
439 /**
440  * Enable traffic flows configured by control plane
441  *
442  * @param dev
443  *   Pointer to Ethernet device private data.
444  * @param dev
445  *   Pointer to Ethernet device structure.
446  *
447  * @return
448  *   0 on success, a negative errno value otherwise and rte_errno is set.
449  */
450 int
451 mlx5_traffic_enable(struct rte_eth_dev *dev)
452 {
453 	struct mlx5_priv *priv = dev->data->dev_private;
454 	struct rte_flow_item_eth bcast = {
455 		.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
456 	};
457 	struct rte_flow_item_eth ipv6_multi_spec = {
458 		.dst.addr_bytes = "\x33\x33\x00\x00\x00\x00",
459 	};
460 	struct rte_flow_item_eth ipv6_multi_mask = {
461 		.dst.addr_bytes = "\xff\xff\x00\x00\x00\x00",
462 	};
463 	struct rte_flow_item_eth unicast = {
464 		.src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
465 	};
466 	struct rte_flow_item_eth unicast_mask = {
467 		.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
468 	};
469 	const unsigned int vlan_filter_n = priv->vlan_filter_n;
470 	const struct rte_ether_addr cmp = {
471 		.addr_bytes = "\x00\x00\x00\x00\x00\x00",
472 	};
473 	unsigned int i;
474 	unsigned int j;
475 	int ret;
476 
477 	/*
478 	 * Hairpin txq default flow should be created no matter if it is
479 	 * isolation mode. Or else all the packets to be sent will be sent
480 	 * out directly without the TX flow actions, e.g. encapsulation.
481 	 */
482 	for (i = 0; i != priv->txqs_n; ++i) {
483 		struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i);
484 		if (!txq_ctrl)
485 			continue;
486 		if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) {
487 			ret = mlx5_ctrl_flow_source_queue(dev, i);
488 			if (ret) {
489 				mlx5_txq_release(dev, i);
490 				goto error;
491 			}
492 		}
493 		mlx5_txq_release(dev, i);
494 	}
495 	if (priv->config.dv_esw_en && !priv->config.vf) {
496 		if (mlx5_flow_create_esw_table_zero_flow(dev))
497 			priv->fdb_def_rule = 1;
498 		else
499 			DRV_LOG(INFO, "port %u FDB default rule cannot be"
500 				" configured - only Eswitch group 0 flows are"
501 				" supported.", dev->data->port_id);
502 	}
503 	if (!priv->config.lacp_by_user && priv->pf_bond >= 0) {
504 		ret = mlx5_flow_lacp_miss(dev);
505 		if (ret)
506 			DRV_LOG(INFO, "port %u LACP rule cannot be created - "
507 				"forward LACP to kernel.", dev->data->port_id);
508 		else
509 			DRV_LOG(INFO, "LACP traffic will be missed in port %u."
510 				, dev->data->port_id);
511 	}
512 	if (priv->isolated)
513 		return 0;
514 	if (dev->data->promiscuous) {
515 		struct rte_flow_item_eth promisc = {
516 			.dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
517 			.src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
518 			.type = 0,
519 		};
520 
521 		ret = mlx5_ctrl_flow(dev, &promisc, &promisc);
522 		if (ret)
523 			goto error;
524 	}
525 	if (dev->data->all_multicast) {
526 		struct rte_flow_item_eth multicast = {
527 			.dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
528 			.src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
529 			.type = 0,
530 		};
531 
532 		ret = mlx5_ctrl_flow(dev, &multicast, &multicast);
533 		if (ret)
534 			goto error;
535 	} else {
536 		/* Add broadcast/multicast flows. */
537 		for (i = 0; i != vlan_filter_n; ++i) {
538 			uint16_t vlan = priv->vlan_filter[i];
539 
540 			struct rte_flow_item_vlan vlan_spec = {
541 				.tci = rte_cpu_to_be_16(vlan),
542 			};
543 			struct rte_flow_item_vlan vlan_mask =
544 				rte_flow_item_vlan_mask;
545 
546 			ret = mlx5_ctrl_flow_vlan(dev, &bcast, &bcast,
547 						  &vlan_spec, &vlan_mask);
548 			if (ret)
549 				goto error;
550 			ret = mlx5_ctrl_flow_vlan(dev, &ipv6_multi_spec,
551 						  &ipv6_multi_mask,
552 						  &vlan_spec, &vlan_mask);
553 			if (ret)
554 				goto error;
555 		}
556 		if (!vlan_filter_n) {
557 			ret = mlx5_ctrl_flow(dev, &bcast, &bcast);
558 			if (ret)
559 				goto error;
560 			ret = mlx5_ctrl_flow(dev, &ipv6_multi_spec,
561 					     &ipv6_multi_mask);
562 			if (ret)
563 				goto error;
564 		}
565 	}
566 	/* Add MAC address flows. */
567 	for (i = 0; i != MLX5_MAX_MAC_ADDRESSES; ++i) {
568 		struct rte_ether_addr *mac = &dev->data->mac_addrs[i];
569 
570 		if (!memcmp(mac, &cmp, sizeof(*mac)))
571 			continue;
572 		memcpy(&unicast.dst.addr_bytes,
573 		       mac->addr_bytes,
574 		       RTE_ETHER_ADDR_LEN);
575 		for (j = 0; j != vlan_filter_n; ++j) {
576 			uint16_t vlan = priv->vlan_filter[j];
577 
578 			struct rte_flow_item_vlan vlan_spec = {
579 				.tci = rte_cpu_to_be_16(vlan),
580 			};
581 			struct rte_flow_item_vlan vlan_mask =
582 				rte_flow_item_vlan_mask;
583 
584 			ret = mlx5_ctrl_flow_vlan(dev, &unicast,
585 						  &unicast_mask,
586 						  &vlan_spec,
587 						  &vlan_mask);
588 			if (ret)
589 				goto error;
590 		}
591 		if (!vlan_filter_n) {
592 			ret = mlx5_ctrl_flow(dev, &unicast, &unicast_mask);
593 			if (ret)
594 				goto error;
595 		}
596 	}
597 	return 0;
598 error:
599 	ret = rte_errno; /* Save rte_errno before cleanup. */
600 	mlx5_flow_list_flush(dev, &priv->ctrl_flows, false);
601 	rte_errno = ret; /* Restore rte_errno. */
602 	return -rte_errno;
603 }
604 
605 
606 /**
607  * Disable traffic flows configured by control plane
608  *
609  * @param dev
610  *   Pointer to Ethernet device private data.
611  */
612 void
613 mlx5_traffic_disable(struct rte_eth_dev *dev)
614 {
615 	struct mlx5_priv *priv = dev->data->dev_private;
616 
617 	mlx5_flow_list_flush(dev, &priv->ctrl_flows, false);
618 }
619 
620 /**
621  * Restart traffic flows configured by control plane
622  *
623  * @param dev
624  *   Pointer to Ethernet device private data.
625  *
626  * @return
627  *   0 on success, a negative errno value otherwise and rte_errno is set.
628  */
629 int
630 mlx5_traffic_restart(struct rte_eth_dev *dev)
631 {
632 	if (dev->data->dev_started) {
633 		mlx5_traffic_disable(dev);
634 		return mlx5_traffic_enable(dev);
635 	}
636 	return 0;
637 }
638