xref: /dpdk/drivers/net/mlx5/mlx5_trigger.c (revision f5862ae99e058c0cee36a08dfd51f8a3b766999a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2015 6WIND S.A.
3  * Copyright 2015 Mellanox Technologies, Ltd
4  */
5 
6 #include <unistd.h>
7 
8 #include <rte_ether.h>
9 #include <rte_ethdev_driver.h>
10 #include <rte_interrupts.h>
11 #include <rte_alarm.h>
12 
13 #include "mlx5.h"
14 #include "mlx5_rxtx.h"
15 #include "mlx5_utils.h"
16 
17 /**
18  * Stop traffic on Tx queues.
19  *
20  * @param dev
21  *   Pointer to Ethernet device structure.
22  */
23 static void
24 mlx5_txq_stop(struct rte_eth_dev *dev)
25 {
26 	struct mlx5_priv *priv = dev->data->dev_private;
27 	unsigned int i;
28 
29 	for (i = 0; i != priv->txqs_n; ++i)
30 		mlx5_txq_release(dev, i);
31 }
32 
33 /**
34  * Start traffic on Tx queues.
35  *
36  * @param dev
37  *   Pointer to Ethernet device structure.
38  *
39  * @return
40  *   0 on success, a negative errno value otherwise and rte_errno is set.
41  */
42 static int
43 mlx5_txq_start(struct rte_eth_dev *dev)
44 {
45 	struct mlx5_priv *priv = dev->data->dev_private;
46 	unsigned int i;
47 	int ret;
48 
49 	for (i = 0; i != priv->txqs_n; ++i) {
50 		struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i);
51 
52 		if (!txq_ctrl)
53 			continue;
54 		if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) {
55 			txq_ctrl->obj = mlx5_txq_obj_new
56 				(dev, i, MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN);
57 		} else {
58 			txq_alloc_elts(txq_ctrl);
59 			txq_ctrl->obj = mlx5_txq_obj_new
60 				(dev, i, MLX5_TXQ_OBJ_TYPE_IBV);
61 		}
62 		if (!txq_ctrl->obj) {
63 			rte_errno = ENOMEM;
64 			goto error;
65 		}
66 	}
67 	return 0;
68 error:
69 	ret = rte_errno; /* Save rte_errno before cleanup. */
70 	do {
71 		mlx5_txq_release(dev, i);
72 	} while (i-- != 0);
73 	rte_errno = ret; /* Restore rte_errno. */
74 	return -rte_errno;
75 }
76 
77 /**
78  * Stop traffic on Rx queues.
79  *
80  * @param dev
81  *   Pointer to Ethernet device structure.
82  */
83 static void
84 mlx5_rxq_stop(struct rte_eth_dev *dev)
85 {
86 	struct mlx5_priv *priv = dev->data->dev_private;
87 	unsigned int i;
88 
89 	for (i = 0; i != priv->rxqs_n; ++i)
90 		mlx5_rxq_release(dev, i);
91 }
92 
93 /**
94  * Start traffic on Rx queues.
95  *
96  * @param dev
97  *   Pointer to Ethernet device structure.
98  *
99  * @return
100  *   0 on success, a negative errno value otherwise and rte_errno is set.
101  */
102 static int
103 mlx5_rxq_start(struct rte_eth_dev *dev)
104 {
105 	struct mlx5_priv *priv = dev->data->dev_private;
106 	unsigned int i;
107 	int ret = 0;
108 	enum mlx5_rxq_obj_type obj_type = MLX5_RXQ_OBJ_TYPE_IBV;
109 	struct mlx5_rxq_data *rxq = NULL;
110 
111 	for (i = 0; i < priv->rxqs_n; ++i) {
112 		rxq = (*priv->rxqs)[i];
113 
114 		if (rxq && rxq->lro) {
115 			obj_type =  MLX5_RXQ_OBJ_TYPE_DEVX_RQ;
116 			break;
117 		}
118 	}
119 	/* Allocate/reuse/resize mempool for Multi-Packet RQ. */
120 	if (mlx5_mprq_alloc_mp(dev)) {
121 		/* Should not release Rx queues but return immediately. */
122 		return -rte_errno;
123 	}
124 	for (i = 0; i != priv->rxqs_n; ++i) {
125 		struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i);
126 		struct rte_mempool *mp;
127 
128 		if (!rxq_ctrl)
129 			continue;
130 		if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) {
131 			rxq_ctrl->obj = mlx5_rxq_obj_new
132 				(dev, i, MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN);
133 			if (!rxq_ctrl->obj)
134 				goto error;
135 			continue;
136 		}
137 		/* Pre-register Rx mempool. */
138 		mp = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
139 		     rxq_ctrl->rxq.mprq_mp : rxq_ctrl->rxq.mp;
140 		DRV_LOG(DEBUG,
141 			"port %u Rx queue %u registering"
142 			" mp %s having %u chunks",
143 			dev->data->port_id, rxq_ctrl->rxq.idx,
144 			mp->name, mp->nb_mem_chunks);
145 		mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl, mp);
146 		ret = rxq_alloc_elts(rxq_ctrl);
147 		if (ret)
148 			goto error;
149 		rxq_ctrl->obj = mlx5_rxq_obj_new(dev, i, obj_type);
150 		if (!rxq_ctrl->obj)
151 			goto error;
152 		if (obj_type == MLX5_RXQ_OBJ_TYPE_IBV)
153 			rxq_ctrl->wqn = rxq_ctrl->obj->wq->wq_num;
154 		else if (obj_type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ)
155 			rxq_ctrl->wqn = rxq_ctrl->obj->rq->id;
156 	}
157 	return 0;
158 error:
159 	ret = rte_errno; /* Save rte_errno before cleanup. */
160 	do {
161 		mlx5_rxq_release(dev, i);
162 	} while (i-- != 0);
163 	rte_errno = ret; /* Restore rte_errno. */
164 	return -rte_errno;
165 }
166 
167 /**
168  * Binds Tx queues to Rx queues for hairpin.
169  *
170  * Binds Tx queues to the target Rx queues.
171  *
172  * @param dev
173  *   Pointer to Ethernet device structure.
174  *
175  * @return
176  *   0 on success, a negative errno value otherwise and rte_errno is set.
177  */
178 static int
179 mlx5_hairpin_bind(struct rte_eth_dev *dev)
180 {
181 	struct mlx5_priv *priv = dev->data->dev_private;
182 	struct mlx5_devx_modify_sq_attr sq_attr = { 0 };
183 	struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
184 	struct mlx5_txq_ctrl *txq_ctrl;
185 	struct mlx5_rxq_ctrl *rxq_ctrl;
186 	struct mlx5_devx_obj *sq;
187 	struct mlx5_devx_obj *rq;
188 	unsigned int i;
189 	int ret = 0;
190 
191 	for (i = 0; i != priv->txqs_n; ++i) {
192 		txq_ctrl = mlx5_txq_get(dev, i);
193 		if (!txq_ctrl)
194 			continue;
195 		if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
196 			mlx5_txq_release(dev, i);
197 			continue;
198 		}
199 		if (!txq_ctrl->obj) {
200 			rte_errno = ENOMEM;
201 			DRV_LOG(ERR, "port %u no txq object found: %d",
202 				dev->data->port_id, i);
203 			mlx5_txq_release(dev, i);
204 			return -rte_errno;
205 		}
206 		sq = txq_ctrl->obj->sq;
207 		rxq_ctrl = mlx5_rxq_get(dev,
208 					txq_ctrl->hairpin_conf.peers[0].queue);
209 		if (!rxq_ctrl) {
210 			mlx5_txq_release(dev, i);
211 			rte_errno = EINVAL;
212 			DRV_LOG(ERR, "port %u no rxq object found: %d",
213 				dev->data->port_id,
214 				txq_ctrl->hairpin_conf.peers[0].queue);
215 			return -rte_errno;
216 		}
217 		if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN ||
218 		    rxq_ctrl->hairpin_conf.peers[0].queue != i) {
219 			rte_errno = ENOMEM;
220 			DRV_LOG(ERR, "port %u Tx queue %d can't be binded to "
221 				"Rx queue %d", dev->data->port_id,
222 				i, txq_ctrl->hairpin_conf.peers[0].queue);
223 			goto error;
224 		}
225 		rq = rxq_ctrl->obj->rq;
226 		if (!rq) {
227 			rte_errno = ENOMEM;
228 			DRV_LOG(ERR, "port %u hairpin no matching rxq: %d",
229 				dev->data->port_id,
230 				txq_ctrl->hairpin_conf.peers[0].queue);
231 			goto error;
232 		}
233 		sq_attr.state = MLX5_SQC_STATE_RDY;
234 		sq_attr.sq_state = MLX5_SQC_STATE_RST;
235 		sq_attr.hairpin_peer_rq = rq->id;
236 		sq_attr.hairpin_peer_vhca = priv->config.hca_attr.vhca_id;
237 		ret = mlx5_devx_cmd_modify_sq(sq, &sq_attr);
238 		if (ret)
239 			goto error;
240 		rq_attr.state = MLX5_SQC_STATE_RDY;
241 		rq_attr.rq_state = MLX5_SQC_STATE_RST;
242 		rq_attr.hairpin_peer_sq = sq->id;
243 		rq_attr.hairpin_peer_vhca = priv->config.hca_attr.vhca_id;
244 		ret = mlx5_devx_cmd_modify_rq(rq, &rq_attr);
245 		if (ret)
246 			goto error;
247 		mlx5_txq_release(dev, i);
248 		mlx5_rxq_release(dev, txq_ctrl->hairpin_conf.peers[0].queue);
249 	}
250 	return 0;
251 error:
252 	mlx5_txq_release(dev, i);
253 	mlx5_rxq_release(dev, txq_ctrl->hairpin_conf.peers[0].queue);
254 	return -rte_errno;
255 }
256 
257 /**
258  * DPDK callback to start the device.
259  *
260  * Simulate device start by attaching all configured flows.
261  *
262  * @param dev
263  *   Pointer to Ethernet device structure.
264  *
265  * @return
266  *   0 on success, a negative errno value otherwise and rte_errno is set.
267  */
268 int
269 mlx5_dev_start(struct rte_eth_dev *dev)
270 {
271 	struct mlx5_priv *priv = dev->data->dev_private;
272 	int ret;
273 
274 	DRV_LOG(DEBUG, "port %u starting device", dev->data->port_id);
275 	ret = mlx5_dev_configure_rss_reta(dev);
276 	if (ret) {
277 		DRV_LOG(ERR, "port %u reta config failed: %s",
278 			dev->data->port_id, strerror(rte_errno));
279 		return -rte_errno;
280 	}
281 	ret = mlx5_txq_start(dev);
282 	if (ret) {
283 		DRV_LOG(ERR, "port %u Tx queue allocation failed: %s",
284 			dev->data->port_id, strerror(rte_errno));
285 		return -rte_errno;
286 	}
287 	ret = mlx5_rxq_start(dev);
288 	if (ret) {
289 		DRV_LOG(ERR, "port %u Rx queue allocation failed: %s",
290 			dev->data->port_id, strerror(rte_errno));
291 		mlx5_txq_stop(dev);
292 		return -rte_errno;
293 	}
294 	ret = mlx5_hairpin_bind(dev);
295 	if (ret) {
296 		DRV_LOG(ERR, "port %u hairpin binding failed: %s",
297 			dev->data->port_id, strerror(rte_errno));
298 		mlx5_txq_stop(dev);
299 		return -rte_errno;
300 	}
301 	dev->data->dev_started = 1;
302 	ret = mlx5_rx_intr_vec_enable(dev);
303 	if (ret) {
304 		DRV_LOG(ERR, "port %u Rx interrupt vector creation failed",
305 			dev->data->port_id);
306 		goto error;
307 	}
308 	mlx5_stats_init(dev);
309 	ret = mlx5_traffic_enable(dev);
310 	if (ret) {
311 		DRV_LOG(DEBUG, "port %u failed to set defaults flows",
312 			dev->data->port_id);
313 		goto error;
314 	}
315 	ret = mlx5_flow_start(dev, &priv->flows);
316 	if (ret) {
317 		DRV_LOG(DEBUG, "port %u failed to set flows",
318 			dev->data->port_id);
319 		goto error;
320 	}
321 	rte_wmb();
322 	dev->tx_pkt_burst = mlx5_select_tx_function(dev);
323 	dev->rx_pkt_burst = mlx5_select_rx_function(dev);
324 	/* Enable datapath on secondary process. */
325 	mlx5_mp_req_start_rxtx(dev);
326 	mlx5_dev_interrupt_handler_install(dev);
327 	return 0;
328 error:
329 	ret = rte_errno; /* Save rte_errno before cleanup. */
330 	/* Rollback. */
331 	dev->data->dev_started = 0;
332 	mlx5_flow_stop(dev, &priv->flows);
333 	mlx5_traffic_disable(dev);
334 	mlx5_txq_stop(dev);
335 	mlx5_rxq_stop(dev);
336 	rte_errno = ret; /* Restore rte_errno. */
337 	return -rte_errno;
338 }
339 
340 /**
341  * DPDK callback to stop the device.
342  *
343  * Simulate device stop by detaching all configured flows.
344  *
345  * @param dev
346  *   Pointer to Ethernet device structure.
347  */
348 void
349 mlx5_dev_stop(struct rte_eth_dev *dev)
350 {
351 	struct mlx5_priv *priv = dev->data->dev_private;
352 
353 	dev->data->dev_started = 0;
354 	/* Prevent crashes when queues are still in use. */
355 	dev->rx_pkt_burst = removed_rx_burst;
356 	dev->tx_pkt_burst = removed_tx_burst;
357 	rte_wmb();
358 	/* Disable datapath on secondary process. */
359 	mlx5_mp_req_stop_rxtx(dev);
360 	usleep(1000 * priv->rxqs_n);
361 	DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id);
362 	mlx5_flow_stop(dev, &priv->flows);
363 	mlx5_traffic_disable(dev);
364 	mlx5_rx_intr_vec_disable(dev);
365 	mlx5_dev_interrupt_handler_uninstall(dev);
366 	mlx5_txq_stop(dev);
367 	mlx5_rxq_stop(dev);
368 }
369 
370 /**
371  * Enable traffic flows configured by control plane
372  *
373  * @param dev
374  *   Pointer to Ethernet device private data.
375  * @param dev
376  *   Pointer to Ethernet device structure.
377  *
378  * @return
379  *   0 on success, a negative errno value otherwise and rte_errno is set.
380  */
381 int
382 mlx5_traffic_enable(struct rte_eth_dev *dev)
383 {
384 	struct mlx5_priv *priv = dev->data->dev_private;
385 	struct rte_flow_item_eth bcast = {
386 		.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
387 	};
388 	struct rte_flow_item_eth ipv6_multi_spec = {
389 		.dst.addr_bytes = "\x33\x33\x00\x00\x00\x00",
390 	};
391 	struct rte_flow_item_eth ipv6_multi_mask = {
392 		.dst.addr_bytes = "\xff\xff\x00\x00\x00\x00",
393 	};
394 	struct rte_flow_item_eth unicast = {
395 		.src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
396 	};
397 	struct rte_flow_item_eth unicast_mask = {
398 		.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
399 	};
400 	const unsigned int vlan_filter_n = priv->vlan_filter_n;
401 	const struct rte_ether_addr cmp = {
402 		.addr_bytes = "\x00\x00\x00\x00\x00\x00",
403 	};
404 	unsigned int i;
405 	unsigned int j;
406 	int ret;
407 
408 	/*
409 	 * Hairpin txq default flow should be created no matter if it is
410 	 * isolation mode. Or else all the packets to be sent will be sent
411 	 * out directly without the TX flow actions, e.g. encapsulation.
412 	 */
413 	for (i = 0; i != priv->txqs_n; ++i) {
414 		struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i);
415 		if (!txq_ctrl)
416 			continue;
417 		if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) {
418 			ret = mlx5_ctrl_flow_source_queue(dev, i);
419 			if (ret) {
420 				mlx5_txq_release(dev, i);
421 				goto error;
422 			}
423 		}
424 		mlx5_txq_release(dev, i);
425 	}
426 	if (priv->config.dv_esw_en && !priv->config.vf)
427 		if (!mlx5_flow_create_esw_table_zero_flow(dev))
428 			goto error;
429 	if (priv->isolated)
430 		return 0;
431 	if (dev->data->promiscuous) {
432 		struct rte_flow_item_eth promisc = {
433 			.dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
434 			.src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
435 			.type = 0,
436 		};
437 
438 		ret = mlx5_ctrl_flow(dev, &promisc, &promisc);
439 		if (ret)
440 			goto error;
441 	}
442 	if (dev->data->all_multicast) {
443 		struct rte_flow_item_eth multicast = {
444 			.dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
445 			.src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
446 			.type = 0,
447 		};
448 
449 		ret = mlx5_ctrl_flow(dev, &multicast, &multicast);
450 		if (ret)
451 			goto error;
452 	} else {
453 		/* Add broadcast/multicast flows. */
454 		for (i = 0; i != vlan_filter_n; ++i) {
455 			uint16_t vlan = priv->vlan_filter[i];
456 
457 			struct rte_flow_item_vlan vlan_spec = {
458 				.tci = rte_cpu_to_be_16(vlan),
459 			};
460 			struct rte_flow_item_vlan vlan_mask =
461 				rte_flow_item_vlan_mask;
462 
463 			ret = mlx5_ctrl_flow_vlan(dev, &bcast, &bcast,
464 						  &vlan_spec, &vlan_mask);
465 			if (ret)
466 				goto error;
467 			ret = mlx5_ctrl_flow_vlan(dev, &ipv6_multi_spec,
468 						  &ipv6_multi_mask,
469 						  &vlan_spec, &vlan_mask);
470 			if (ret)
471 				goto error;
472 		}
473 		if (!vlan_filter_n) {
474 			ret = mlx5_ctrl_flow(dev, &bcast, &bcast);
475 			if (ret)
476 				goto error;
477 			ret = mlx5_ctrl_flow(dev, &ipv6_multi_spec,
478 					     &ipv6_multi_mask);
479 			if (ret)
480 				goto error;
481 		}
482 	}
483 	/* Add MAC address flows. */
484 	for (i = 0; i != MLX5_MAX_MAC_ADDRESSES; ++i) {
485 		struct rte_ether_addr *mac = &dev->data->mac_addrs[i];
486 
487 		if (!memcmp(mac, &cmp, sizeof(*mac)))
488 			continue;
489 		memcpy(&unicast.dst.addr_bytes,
490 		       mac->addr_bytes,
491 		       RTE_ETHER_ADDR_LEN);
492 		for (j = 0; j != vlan_filter_n; ++j) {
493 			uint16_t vlan = priv->vlan_filter[j];
494 
495 			struct rte_flow_item_vlan vlan_spec = {
496 				.tci = rte_cpu_to_be_16(vlan),
497 			};
498 			struct rte_flow_item_vlan vlan_mask =
499 				rte_flow_item_vlan_mask;
500 
501 			ret = mlx5_ctrl_flow_vlan(dev, &unicast,
502 						  &unicast_mask,
503 						  &vlan_spec,
504 						  &vlan_mask);
505 			if (ret)
506 				goto error;
507 		}
508 		if (!vlan_filter_n) {
509 			ret = mlx5_ctrl_flow(dev, &unicast, &unicast_mask);
510 			if (ret)
511 				goto error;
512 		}
513 	}
514 	return 0;
515 error:
516 	ret = rte_errno; /* Save rte_errno before cleanup. */
517 	mlx5_flow_list_flush(dev, &priv->ctrl_flows);
518 	rte_errno = ret; /* Restore rte_errno. */
519 	return -rte_errno;
520 }
521 
522 
523 /**
524  * Disable traffic flows configured by control plane
525  *
526  * @param dev
527  *   Pointer to Ethernet device private data.
528  */
529 void
530 mlx5_traffic_disable(struct rte_eth_dev *dev)
531 {
532 	struct mlx5_priv *priv = dev->data->dev_private;
533 
534 	mlx5_flow_list_flush(dev, &priv->ctrl_flows);
535 }
536 
537 /**
538  * Restart traffic flows configured by control plane
539  *
540  * @param dev
541  *   Pointer to Ethernet device private data.
542  *
543  * @return
544  *   0 on success, a negative errno value otherwise and rte_errno is set.
545  */
546 int
547 mlx5_traffic_restart(struct rte_eth_dev *dev)
548 {
549 	if (dev->data->dev_started) {
550 		mlx5_traffic_disable(dev);
551 		return mlx5_traffic_enable(dev);
552 	}
553 	return 0;
554 }
555