xref: /dpdk/drivers/net/mlx5/mlx5_trigger.c (revision d237d22fbe62c9e62d3fb050dbd2259387fe19b3)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2015 6WIND S.A.
3  * Copyright 2015 Mellanox Technologies, Ltd
4  */
5 
6 #include <unistd.h>
7 
8 #include <rte_ether.h>
9 #include <rte_ethdev_driver.h>
10 #include <rte_interrupts.h>
11 #include <rte_alarm.h>
12 
13 #include "mlx5.h"
14 #include "mlx5_mr.h"
15 #include "mlx5_rxtx.h"
16 #include "mlx5_utils.h"
17 #include "rte_pmd_mlx5.h"
18 
19 /**
20  * Stop traffic on Tx queues.
21  *
22  * @param dev
23  *   Pointer to Ethernet device structure.
24  */
25 static void
26 mlx5_txq_stop(struct rte_eth_dev *dev)
27 {
28 	struct mlx5_priv *priv = dev->data->dev_private;
29 	unsigned int i;
30 
31 	for (i = 0; i != priv->txqs_n; ++i)
32 		mlx5_txq_release(dev, i);
33 }
34 
35 /**
36  * Start traffic on Tx queues.
37  *
38  * @param dev
39  *   Pointer to Ethernet device structure.
40  *
41  * @return
42  *   0 on success, a negative errno value otherwise and rte_errno is set.
43  */
44 static int
45 mlx5_txq_start(struct rte_eth_dev *dev)
46 {
47 	struct mlx5_priv *priv = dev->data->dev_private;
48 	unsigned int i;
49 	int ret;
50 
51 	for (i = 0; i != priv->txqs_n; ++i) {
52 		struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i);
53 
54 		if (!txq_ctrl)
55 			continue;
56 		if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) {
57 			txq_ctrl->obj = mlx5_txq_obj_new
58 				(dev, i, MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN);
59 		} else {
60 			txq_alloc_elts(txq_ctrl);
61 			txq_ctrl->obj = mlx5_txq_obj_new
62 				(dev, i, MLX5_TXQ_OBJ_TYPE_IBV);
63 		}
64 		if (!txq_ctrl->obj) {
65 			rte_errno = ENOMEM;
66 			goto error;
67 		}
68 	}
69 	return 0;
70 error:
71 	ret = rte_errno; /* Save rte_errno before cleanup. */
72 	do {
73 		mlx5_txq_release(dev, i);
74 	} while (i-- != 0);
75 	rte_errno = ret; /* Restore rte_errno. */
76 	return -rte_errno;
77 }
78 
79 /**
80  * Stop traffic on Rx queues.
81  *
82  * @param dev
83  *   Pointer to Ethernet device structure.
84  */
85 static void
86 mlx5_rxq_stop(struct rte_eth_dev *dev)
87 {
88 	struct mlx5_priv *priv = dev->data->dev_private;
89 	unsigned int i;
90 
91 	for (i = 0; i != priv->rxqs_n; ++i)
92 		mlx5_rxq_release(dev, i);
93 }
94 
95 /**
96  * Start traffic on Rx queues.
97  *
98  * @param dev
99  *   Pointer to Ethernet device structure.
100  *
101  * @return
102  *   0 on success, a negative errno value otherwise and rte_errno is set.
103  */
104 static int
105 mlx5_rxq_start(struct rte_eth_dev *dev)
106 {
107 	struct mlx5_priv *priv = dev->data->dev_private;
108 	unsigned int i;
109 	int ret = 0;
110 	enum mlx5_rxq_obj_type obj_type =
111 			priv->config.dv_flow_en && priv->config.devx &&
112 			priv->config.dest_tir ?
113 			MLX5_RXQ_OBJ_TYPE_DEVX_RQ : MLX5_RXQ_OBJ_TYPE_IBV;
114 
115 	/* Allocate/reuse/resize mempool for Multi-Packet RQ. */
116 	if (mlx5_mprq_alloc_mp(dev)) {
117 		/* Should not release Rx queues but return immediately. */
118 		return -rte_errno;
119 	}
120 	for (i = 0; i != priv->rxqs_n; ++i) {
121 		struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i);
122 		struct rte_mempool *mp;
123 
124 		if (!rxq_ctrl)
125 			continue;
126 		if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) {
127 			rxq_ctrl->obj = mlx5_rxq_obj_new
128 				(dev, i, MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN);
129 			if (!rxq_ctrl->obj)
130 				goto error;
131 			continue;
132 		}
133 		/* Pre-register Rx mempool. */
134 		mp = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
135 		     rxq_ctrl->rxq.mprq_mp : rxq_ctrl->rxq.mp;
136 		DRV_LOG(DEBUG,
137 			"port %u Rx queue %u registering"
138 			" mp %s having %u chunks",
139 			dev->data->port_id, rxq_ctrl->rxq.idx,
140 			mp->name, mp->nb_mem_chunks);
141 		mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl, mp);
142 		ret = rxq_alloc_elts(rxq_ctrl);
143 		if (ret)
144 			goto error;
145 		rxq_ctrl->obj = mlx5_rxq_obj_new(dev, i, obj_type);
146 		if (!rxq_ctrl->obj)
147 			goto error;
148 		if (obj_type == MLX5_RXQ_OBJ_TYPE_IBV)
149 			rxq_ctrl->wqn = rxq_ctrl->obj->wq->wq_num;
150 		else if (obj_type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ)
151 			rxq_ctrl->wqn = rxq_ctrl->obj->rq->id;
152 	}
153 	return 0;
154 error:
155 	ret = rte_errno; /* Save rte_errno before cleanup. */
156 	do {
157 		mlx5_rxq_release(dev, i);
158 	} while (i-- != 0);
159 	rte_errno = ret; /* Restore rte_errno. */
160 	return -rte_errno;
161 }
162 
163 /**
164  * Binds Tx queues to Rx queues for hairpin.
165  *
166  * Binds Tx queues to the target Rx queues.
167  *
168  * @param dev
169  *   Pointer to Ethernet device structure.
170  *
171  * @return
172  *   0 on success, a negative errno value otherwise and rte_errno is set.
173  */
174 static int
175 mlx5_hairpin_bind(struct rte_eth_dev *dev)
176 {
177 	struct mlx5_priv *priv = dev->data->dev_private;
178 	struct mlx5_devx_modify_sq_attr sq_attr = { 0 };
179 	struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
180 	struct mlx5_txq_ctrl *txq_ctrl;
181 	struct mlx5_rxq_ctrl *rxq_ctrl;
182 	struct mlx5_devx_obj *sq;
183 	struct mlx5_devx_obj *rq;
184 	unsigned int i;
185 	int ret = 0;
186 
187 	for (i = 0; i != priv->txqs_n; ++i) {
188 		txq_ctrl = mlx5_txq_get(dev, i);
189 		if (!txq_ctrl)
190 			continue;
191 		if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
192 			mlx5_txq_release(dev, i);
193 			continue;
194 		}
195 		if (!txq_ctrl->obj) {
196 			rte_errno = ENOMEM;
197 			DRV_LOG(ERR, "port %u no txq object found: %d",
198 				dev->data->port_id, i);
199 			mlx5_txq_release(dev, i);
200 			return -rte_errno;
201 		}
202 		sq = txq_ctrl->obj->sq;
203 		rxq_ctrl = mlx5_rxq_get(dev,
204 					txq_ctrl->hairpin_conf.peers[0].queue);
205 		if (!rxq_ctrl) {
206 			mlx5_txq_release(dev, i);
207 			rte_errno = EINVAL;
208 			DRV_LOG(ERR, "port %u no rxq object found: %d",
209 				dev->data->port_id,
210 				txq_ctrl->hairpin_conf.peers[0].queue);
211 			return -rte_errno;
212 		}
213 		if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN ||
214 		    rxq_ctrl->hairpin_conf.peers[0].queue != i) {
215 			rte_errno = ENOMEM;
216 			DRV_LOG(ERR, "port %u Tx queue %d can't be binded to "
217 				"Rx queue %d", dev->data->port_id,
218 				i, txq_ctrl->hairpin_conf.peers[0].queue);
219 			goto error;
220 		}
221 		rq = rxq_ctrl->obj->rq;
222 		if (!rq) {
223 			rte_errno = ENOMEM;
224 			DRV_LOG(ERR, "port %u hairpin no matching rxq: %d",
225 				dev->data->port_id,
226 				txq_ctrl->hairpin_conf.peers[0].queue);
227 			goto error;
228 		}
229 		sq_attr.state = MLX5_SQC_STATE_RDY;
230 		sq_attr.sq_state = MLX5_SQC_STATE_RST;
231 		sq_attr.hairpin_peer_rq = rq->id;
232 		sq_attr.hairpin_peer_vhca = priv->config.hca_attr.vhca_id;
233 		ret = mlx5_devx_cmd_modify_sq(sq, &sq_attr);
234 		if (ret)
235 			goto error;
236 		rq_attr.state = MLX5_SQC_STATE_RDY;
237 		rq_attr.rq_state = MLX5_SQC_STATE_RST;
238 		rq_attr.hairpin_peer_sq = sq->id;
239 		rq_attr.hairpin_peer_vhca = priv->config.hca_attr.vhca_id;
240 		ret = mlx5_devx_cmd_modify_rq(rq, &rq_attr);
241 		if (ret)
242 			goto error;
243 		mlx5_txq_release(dev, i);
244 		mlx5_rxq_release(dev, txq_ctrl->hairpin_conf.peers[0].queue);
245 	}
246 	return 0;
247 error:
248 	mlx5_txq_release(dev, i);
249 	mlx5_rxq_release(dev, txq_ctrl->hairpin_conf.peers[0].queue);
250 	return -rte_errno;
251 }
252 
253 /**
254  * DPDK callback to start the device.
255  *
256  * Simulate device start by attaching all configured flows.
257  *
258  * @param dev
259  *   Pointer to Ethernet device structure.
260  *
261  * @return
262  *   0 on success, a negative errno value otherwise and rte_errno is set.
263  */
264 int
265 mlx5_dev_start(struct rte_eth_dev *dev)
266 {
267 	int ret;
268 	int fine_inline;
269 
270 	DRV_LOG(DEBUG, "port %u starting device", dev->data->port_id);
271 	fine_inline = rte_mbuf_dynflag_lookup
272 		(RTE_PMD_MLX5_FINE_GRANULARITY_INLINE, NULL);
273 	if (fine_inline > 0)
274 		rte_net_mlx5_dynf_inline_mask = 1UL << fine_inline;
275 	else
276 		rte_net_mlx5_dynf_inline_mask = 0;
277 	if (dev->data->nb_rx_queues > 0) {
278 		ret = mlx5_dev_configure_rss_reta(dev);
279 		if (ret) {
280 			DRV_LOG(ERR, "port %u reta config failed: %s",
281 				dev->data->port_id, strerror(rte_errno));
282 			return -rte_errno;
283 		}
284 	}
285 	ret = mlx5_txq_start(dev);
286 	if (ret) {
287 		DRV_LOG(ERR, "port %u Tx queue allocation failed: %s",
288 			dev->data->port_id, strerror(rte_errno));
289 		return -rte_errno;
290 	}
291 	ret = mlx5_rxq_start(dev);
292 	if (ret) {
293 		DRV_LOG(ERR, "port %u Rx queue allocation failed: %s",
294 			dev->data->port_id, strerror(rte_errno));
295 		mlx5_txq_stop(dev);
296 		return -rte_errno;
297 	}
298 	ret = mlx5_hairpin_bind(dev);
299 	if (ret) {
300 		DRV_LOG(ERR, "port %u hairpin binding failed: %s",
301 			dev->data->port_id, strerror(rte_errno));
302 		mlx5_txq_stop(dev);
303 		return -rte_errno;
304 	}
305 	/* Set started flag here for the following steps like control flow. */
306 	dev->data->dev_started = 1;
307 	ret = mlx5_rx_intr_vec_enable(dev);
308 	if (ret) {
309 		DRV_LOG(ERR, "port %u Rx interrupt vector creation failed",
310 			dev->data->port_id);
311 		goto error;
312 	}
313 	mlx5_stats_init(dev);
314 	ret = mlx5_traffic_enable(dev);
315 	if (ret) {
316 		DRV_LOG(ERR, "port %u failed to set defaults flows",
317 			dev->data->port_id);
318 		goto error;
319 	}
320 	/* Set a mask and offset of dynamic metadata flows into Rx queues*/
321 	mlx5_flow_rxq_dynf_metadata_set(dev);
322 	/*
323 	 * In non-cached mode, it only needs to start the default mreg copy
324 	 * action and no flow created by application exists anymore.
325 	 * But it is worth wrapping the interface for further usage.
326 	 */
327 	ret = mlx5_flow_start_default(dev);
328 	if (ret) {
329 		DRV_LOG(DEBUG, "port %u failed to start default actions: %s",
330 			dev->data->port_id, strerror(rte_errno));
331 		goto error;
332 	}
333 	rte_wmb();
334 	dev->tx_pkt_burst = mlx5_select_tx_function(dev);
335 	dev->rx_pkt_burst = mlx5_select_rx_function(dev);
336 	/* Enable datapath on secondary process. */
337 	mlx5_mp_req_start_rxtx(dev);
338 	mlx5_dev_interrupt_handler_install(dev);
339 	return 0;
340 error:
341 	ret = rte_errno; /* Save rte_errno before cleanup. */
342 	/* Rollback. */
343 	dev->data->dev_started = 0;
344 	mlx5_flow_stop_default(dev);
345 	mlx5_traffic_disable(dev);
346 	mlx5_txq_stop(dev);
347 	mlx5_rxq_stop(dev);
348 	rte_errno = ret; /* Restore rte_errno. */
349 	return -rte_errno;
350 }
351 
352 /**
353  * DPDK callback to stop the device.
354  *
355  * Simulate device stop by detaching all configured flows.
356  *
357  * @param dev
358  *   Pointer to Ethernet device structure.
359  */
360 void
361 mlx5_dev_stop(struct rte_eth_dev *dev)
362 {
363 	struct mlx5_priv *priv = dev->data->dev_private;
364 
365 	dev->data->dev_started = 0;
366 	/* Prevent crashes when queues are still in use. */
367 	dev->rx_pkt_burst = removed_rx_burst;
368 	dev->tx_pkt_burst = removed_tx_burst;
369 	rte_wmb();
370 	/* Disable datapath on secondary process. */
371 	mlx5_mp_req_stop_rxtx(dev);
372 	usleep(1000 * priv->rxqs_n);
373 	DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id);
374 	mlx5_flow_stop_default(dev);
375 	/* Control flows for default traffic can be removed firstly. */
376 	mlx5_traffic_disable(dev);
377 	/* All RX queue flags will be cleared in the flush interface. */
378 	mlx5_flow_list_flush(dev, &priv->flows, true);
379 	mlx5_rx_intr_vec_disable(dev);
380 	mlx5_dev_interrupt_handler_uninstall(dev);
381 	mlx5_txq_stop(dev);
382 	mlx5_rxq_stop(dev);
383 }
384 
385 /**
386  * Enable traffic flows configured by control plane
387  *
388  * @param dev
389  *   Pointer to Ethernet device private data.
390  * @param dev
391  *   Pointer to Ethernet device structure.
392  *
393  * @return
394  *   0 on success, a negative errno value otherwise and rte_errno is set.
395  */
396 int
397 mlx5_traffic_enable(struct rte_eth_dev *dev)
398 {
399 	struct mlx5_priv *priv = dev->data->dev_private;
400 	struct rte_flow_item_eth bcast = {
401 		.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
402 	};
403 	struct rte_flow_item_eth ipv6_multi_spec = {
404 		.dst.addr_bytes = "\x33\x33\x00\x00\x00\x00",
405 	};
406 	struct rte_flow_item_eth ipv6_multi_mask = {
407 		.dst.addr_bytes = "\xff\xff\x00\x00\x00\x00",
408 	};
409 	struct rte_flow_item_eth unicast = {
410 		.src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
411 	};
412 	struct rte_flow_item_eth unicast_mask = {
413 		.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
414 	};
415 	const unsigned int vlan_filter_n = priv->vlan_filter_n;
416 	const struct rte_ether_addr cmp = {
417 		.addr_bytes = "\x00\x00\x00\x00\x00\x00",
418 	};
419 	unsigned int i;
420 	unsigned int j;
421 	int ret;
422 
423 	/*
424 	 * Hairpin txq default flow should be created no matter if it is
425 	 * isolation mode. Or else all the packets to be sent will be sent
426 	 * out directly without the TX flow actions, e.g. encapsulation.
427 	 */
428 	for (i = 0; i != priv->txqs_n; ++i) {
429 		struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i);
430 		if (!txq_ctrl)
431 			continue;
432 		if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) {
433 			ret = mlx5_ctrl_flow_source_queue(dev, i);
434 			if (ret) {
435 				mlx5_txq_release(dev, i);
436 				goto error;
437 			}
438 		}
439 		mlx5_txq_release(dev, i);
440 	}
441 	if (priv->config.dv_esw_en && !priv->config.vf) {
442 		if (mlx5_flow_create_esw_table_zero_flow(dev))
443 			priv->fdb_def_rule = 1;
444 		else
445 			DRV_LOG(INFO, "port %u FDB default rule cannot be"
446 				" configured - only Eswitch group 0 flows are"
447 				" supported.", dev->data->port_id);
448 	}
449 	if (priv->isolated)
450 		return 0;
451 	if (dev->data->promiscuous) {
452 		struct rte_flow_item_eth promisc = {
453 			.dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
454 			.src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
455 			.type = 0,
456 		};
457 
458 		ret = mlx5_ctrl_flow(dev, &promisc, &promisc);
459 		if (ret)
460 			goto error;
461 	}
462 	if (dev->data->all_multicast) {
463 		struct rte_flow_item_eth multicast = {
464 			.dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
465 			.src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
466 			.type = 0,
467 		};
468 
469 		ret = mlx5_ctrl_flow(dev, &multicast, &multicast);
470 		if (ret)
471 			goto error;
472 	} else {
473 		/* Add broadcast/multicast flows. */
474 		for (i = 0; i != vlan_filter_n; ++i) {
475 			uint16_t vlan = priv->vlan_filter[i];
476 
477 			struct rte_flow_item_vlan vlan_spec = {
478 				.tci = rte_cpu_to_be_16(vlan),
479 			};
480 			struct rte_flow_item_vlan vlan_mask =
481 				rte_flow_item_vlan_mask;
482 
483 			ret = mlx5_ctrl_flow_vlan(dev, &bcast, &bcast,
484 						  &vlan_spec, &vlan_mask);
485 			if (ret)
486 				goto error;
487 			ret = mlx5_ctrl_flow_vlan(dev, &ipv6_multi_spec,
488 						  &ipv6_multi_mask,
489 						  &vlan_spec, &vlan_mask);
490 			if (ret)
491 				goto error;
492 		}
493 		if (!vlan_filter_n) {
494 			ret = mlx5_ctrl_flow(dev, &bcast, &bcast);
495 			if (ret)
496 				goto error;
497 			ret = mlx5_ctrl_flow(dev, &ipv6_multi_spec,
498 					     &ipv6_multi_mask);
499 			if (ret)
500 				goto error;
501 		}
502 	}
503 	/* Add MAC address flows. */
504 	for (i = 0; i != MLX5_MAX_MAC_ADDRESSES; ++i) {
505 		struct rte_ether_addr *mac = &dev->data->mac_addrs[i];
506 
507 		if (!memcmp(mac, &cmp, sizeof(*mac)))
508 			continue;
509 		memcpy(&unicast.dst.addr_bytes,
510 		       mac->addr_bytes,
511 		       RTE_ETHER_ADDR_LEN);
512 		for (j = 0; j != vlan_filter_n; ++j) {
513 			uint16_t vlan = priv->vlan_filter[j];
514 
515 			struct rte_flow_item_vlan vlan_spec = {
516 				.tci = rte_cpu_to_be_16(vlan),
517 			};
518 			struct rte_flow_item_vlan vlan_mask =
519 				rte_flow_item_vlan_mask;
520 
521 			ret = mlx5_ctrl_flow_vlan(dev, &unicast,
522 						  &unicast_mask,
523 						  &vlan_spec,
524 						  &vlan_mask);
525 			if (ret)
526 				goto error;
527 		}
528 		if (!vlan_filter_n) {
529 			ret = mlx5_ctrl_flow(dev, &unicast, &unicast_mask);
530 			if (ret)
531 				goto error;
532 		}
533 	}
534 	return 0;
535 error:
536 	ret = rte_errno; /* Save rte_errno before cleanup. */
537 	mlx5_flow_list_flush(dev, &priv->ctrl_flows, false);
538 	rte_errno = ret; /* Restore rte_errno. */
539 	return -rte_errno;
540 }
541 
542 
543 /**
544  * Disable traffic flows configured by control plane
545  *
546  * @param dev
547  *   Pointer to Ethernet device private data.
548  */
549 void
550 mlx5_traffic_disable(struct rte_eth_dev *dev)
551 {
552 	struct mlx5_priv *priv = dev->data->dev_private;
553 
554 	mlx5_flow_list_flush(dev, &priv->ctrl_flows, false);
555 }
556 
557 /**
558  * Restart traffic flows configured by control plane
559  *
560  * @param dev
561  *   Pointer to Ethernet device private data.
562  *
563  * @return
564  *   0 on success, a negative errno value otherwise and rte_errno is set.
565  */
566 int
567 mlx5_traffic_restart(struct rte_eth_dev *dev)
568 {
569 	if (dev->data->dev_started) {
570 		mlx5_traffic_disable(dev);
571 		return mlx5_traffic_enable(dev);
572 	}
573 	return 0;
574 }
575