xref: /dpdk/drivers/net/mlx5/mlx5_trigger.c (revision 925061b58b487fba57f55847b1447417fed715fb)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2015 6WIND S.A.
3  * Copyright 2015 Mellanox.
4  */
5 
6 #include <unistd.h>
7 
8 #include <rte_ether.h>
9 #include <rte_ethdev_driver.h>
10 #include <rte_interrupts.h>
11 #include <rte_alarm.h>
12 
13 #include "mlx5.h"
14 #include "mlx5_rxtx.h"
15 #include "mlx5_utils.h"
16 
17 /**
18  * Stop traffic on Tx queues.
19  *
20  * @param dev
21  *   Pointer to Ethernet device structure.
22  */
23 static void
24 mlx5_txq_stop(struct rte_eth_dev *dev)
25 {
26 	struct priv *priv = dev->data->dev_private;
27 	unsigned int i;
28 
29 	for (i = 0; i != priv->txqs_n; ++i)
30 		mlx5_txq_release(dev, i);
31 }
32 
33 /**
34  * Start traffic on Tx queues.
35  *
36  * @param dev
37  *   Pointer to Ethernet device structure.
38  *
39  * @return
40  *   0 on success, errno on error.
41  */
42 static int
43 mlx5_txq_start(struct rte_eth_dev *dev)
44 {
45 	struct priv *priv = dev->data->dev_private;
46 	unsigned int i;
47 	int ret = 0;
48 
49 	/* Add memory regions to Tx queues. */
50 	for (i = 0; i != priv->txqs_n; ++i) {
51 		unsigned int idx = 0;
52 		struct mlx5_mr *mr;
53 		struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i);
54 
55 		if (!txq_ctrl)
56 			continue;
57 		LIST_FOREACH(mr, &priv->mr, next) {
58 			mlx5_txq_mp2mr_reg(&txq_ctrl->txq, mr->mp, idx++);
59 			if (idx == MLX5_PMD_TX_MP_CACHE)
60 				break;
61 		}
62 		txq_alloc_elts(txq_ctrl);
63 		txq_ctrl->ibv = mlx5_txq_ibv_new(dev, i);
64 		if (!txq_ctrl->ibv) {
65 			ret = ENOMEM;
66 			goto error;
67 		}
68 	}
69 	ret = mlx5_tx_uar_remap(dev, priv->ctx->cmd_fd);
70 	if (ret)
71 		goto error;
72 	return ret;
73 error:
74 	mlx5_txq_stop(dev);
75 	return ret;
76 }
77 
78 /**
79  * Stop traffic on Rx queues.
80  *
81  * @param dev
82  *   Pointer to Ethernet device structure.
83  */
84 static void
85 mlx5_rxq_stop(struct rte_eth_dev *dev)
86 {
87 	struct priv *priv = dev->data->dev_private;
88 	unsigned int i;
89 
90 	for (i = 0; i != priv->rxqs_n; ++i)
91 		mlx5_rxq_release(dev, i);
92 }
93 
94 /**
95  * Start traffic on Rx queues.
96  *
97  * @param dev
98  *   Pointer to Ethernet device structure.
99  *
100  * @return
101  *   0 on success, errno on error.
102  */
103 static int
104 mlx5_rxq_start(struct rte_eth_dev *dev)
105 {
106 	struct priv *priv = dev->data->dev_private;
107 	unsigned int i;
108 	int ret = 0;
109 
110 	for (i = 0; i != priv->rxqs_n; ++i) {
111 		struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i);
112 
113 		if (!rxq_ctrl)
114 			continue;
115 		ret = rxq_alloc_elts(rxq_ctrl);
116 		if (ret)
117 			goto error;
118 		rxq_ctrl->ibv = mlx5_rxq_ibv_new(dev, i);
119 		if (!rxq_ctrl->ibv) {
120 			ret = ENOMEM;
121 			goto error;
122 		}
123 	}
124 	return -ret;
125 error:
126 	mlx5_rxq_stop(dev);
127 	return -ret;
128 }
129 
130 /**
131  * DPDK callback to start the device.
132  *
133  * Simulate device start by attaching all configured flows.
134  *
135  * @param dev
136  *   Pointer to Ethernet device structure.
137  *
138  * @return
139  *   0 on success, negative errno value on failure.
140  */
141 int
142 mlx5_dev_start(struct rte_eth_dev *dev)
143 {
144 	struct priv *priv = dev->data->dev_private;
145 	struct mlx5_mr *mr = NULL;
146 	int err;
147 
148 	dev->data->dev_started = 1;
149 	err = mlx5_flow_create_drop_queue(dev);
150 	if (err) {
151 		ERROR("%p: Drop queue allocation failed: %s",
152 		      (void *)dev, strerror(err));
153 		goto error;
154 	}
155 	DEBUG("%p: allocating and configuring hash RX queues", (void *)dev);
156 	rte_mempool_walk(mlx5_mp2mr_iter, priv);
157 	err = mlx5_txq_start(dev);
158 	if (err) {
159 		ERROR("%p: TXQ allocation failed: %s",
160 		      (void *)dev, strerror(err));
161 		goto error;
162 	}
163 	err = mlx5_rxq_start(dev);
164 	if (err) {
165 		ERROR("%p: RXQ allocation failed: %s",
166 		      (void *)dev, strerror(err));
167 		goto error;
168 	}
169 	err = mlx5_rx_intr_vec_enable(dev);
170 	if (err) {
171 		ERROR("%p: RX interrupt vector creation failed",
172 		      (void *)priv);
173 		goto error;
174 	}
175 	mlx5_xstats_init(dev);
176 	/* Update link status and Tx/Rx callbacks for the first time. */
177 	memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link));
178 	INFO("Forcing port %u link to be up", dev->data->port_id);
179 	err = mlx5_force_link_status_change(dev, ETH_LINK_UP);
180 	if (err) {
181 		DEBUG("Failed to set port %u link to be up",
182 		      dev->data->port_id);
183 		goto error;
184 	}
185 	mlx5_dev_interrupt_handler_install(dev);
186 	return 0;
187 error:
188 	/* Rollback. */
189 	dev->data->dev_started = 0;
190 	for (mr = LIST_FIRST(&priv->mr); mr; mr = LIST_FIRST(&priv->mr))
191 		mlx5_mr_release(mr);
192 	mlx5_flow_stop(dev, &priv->flows);
193 	mlx5_traffic_disable(dev);
194 	mlx5_txq_stop(dev);
195 	mlx5_rxq_stop(dev);
196 	mlx5_flow_delete_drop_queue(dev);
197 	return err;
198 }
199 
200 /**
201  * DPDK callback to stop the device.
202  *
203  * Simulate device stop by detaching all configured flows.
204  *
205  * @param dev
206  *   Pointer to Ethernet device structure.
207  */
208 void
209 mlx5_dev_stop(struct rte_eth_dev *dev)
210 {
211 	struct priv *priv = dev->data->dev_private;
212 	struct mlx5_mr *mr;
213 
214 	dev->data->dev_started = 0;
215 	/* Prevent crashes when queues are still in use. */
216 	dev->rx_pkt_burst = removed_rx_burst;
217 	dev->tx_pkt_burst = removed_tx_burst;
218 	rte_wmb();
219 	usleep(1000 * priv->rxqs_n);
220 	DEBUG("%p: cleaning up and destroying hash RX queues", (void *)dev);
221 	mlx5_flow_stop(dev, &priv->flows);
222 	mlx5_traffic_disable(dev);
223 	mlx5_rx_intr_vec_disable(dev);
224 	mlx5_dev_interrupt_handler_uninstall(dev);
225 	mlx5_txq_stop(dev);
226 	mlx5_rxq_stop(dev);
227 	for (mr = LIST_FIRST(&priv->mr); mr; mr = LIST_FIRST(&priv->mr))
228 		mlx5_mr_release(mr);
229 	mlx5_flow_delete_drop_queue(dev);
230 }
231 
232 /**
233  * Enable traffic flows configured by control plane
234  *
235  * @param dev
236  *   Pointer to Ethernet device private data.
237  * @param dev
238  *   Pointer to Ethernet device structure.
239  *
240  * @return
241  *   0 on success.
242  */
243 int
244 mlx5_traffic_enable(struct rte_eth_dev *dev)
245 {
246 	struct priv *priv = dev->data->dev_private;
247 	struct rte_flow_item_eth bcast = {
248 		.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
249 	};
250 	struct rte_flow_item_eth ipv6_multi_spec = {
251 		.dst.addr_bytes = "\x33\x33\x00\x00\x00\x00",
252 	};
253 	struct rte_flow_item_eth ipv6_multi_mask = {
254 		.dst.addr_bytes = "\xff\xff\x00\x00\x00\x00",
255 	};
256 	struct rte_flow_item_eth unicast = {
257 		.src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
258 	};
259 	struct rte_flow_item_eth unicast_mask = {
260 		.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
261 	};
262 	const unsigned int vlan_filter_n = priv->vlan_filter_n;
263 	const struct ether_addr cmp = {
264 		.addr_bytes = "\x00\x00\x00\x00\x00\x00",
265 	};
266 	unsigned int i;
267 	unsigned int j;
268 	int ret;
269 
270 	if (priv->isolated)
271 		return 0;
272 	if (dev->data->promiscuous) {
273 		struct rte_flow_item_eth promisc = {
274 			.dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
275 			.src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
276 			.type = 0,
277 		};
278 
279 		claim_zero(mlx5_ctrl_flow(dev, &promisc, &promisc));
280 		return 0;
281 	}
282 	if (dev->data->all_multicast) {
283 		struct rte_flow_item_eth multicast = {
284 			.dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
285 			.src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
286 			.type = 0,
287 		};
288 
289 		claim_zero(mlx5_ctrl_flow(dev, &multicast, &multicast));
290 	} else {
291 		/* Add broadcast/multicast flows. */
292 		for (i = 0; i != vlan_filter_n; ++i) {
293 			uint16_t vlan = priv->vlan_filter[i];
294 
295 			struct rte_flow_item_vlan vlan_spec = {
296 				.tci = rte_cpu_to_be_16(vlan),
297 			};
298 			struct rte_flow_item_vlan vlan_mask = {
299 				.tci = 0xffff,
300 			};
301 
302 			ret = mlx5_ctrl_flow_vlan(dev, &bcast, &bcast,
303 						  &vlan_spec, &vlan_mask);
304 			if (ret)
305 				goto error;
306 			ret = mlx5_ctrl_flow_vlan(dev, &ipv6_multi_spec,
307 						  &ipv6_multi_mask,
308 						  &vlan_spec, &vlan_mask);
309 			if (ret)
310 				goto error;
311 		}
312 		if (!vlan_filter_n) {
313 			ret = mlx5_ctrl_flow(dev, &bcast, &bcast);
314 			if (ret)
315 				goto error;
316 			ret = mlx5_ctrl_flow(dev, &ipv6_multi_spec,
317 					     &ipv6_multi_mask);
318 			if (ret)
319 				goto error;
320 		}
321 	}
322 	/* Add MAC address flows. */
323 	for (i = 0; i != MLX5_MAX_MAC_ADDRESSES; ++i) {
324 		struct ether_addr *mac = &dev->data->mac_addrs[i];
325 
326 		if (!memcmp(mac, &cmp, sizeof(*mac)))
327 			continue;
328 		memcpy(&unicast.dst.addr_bytes,
329 		       mac->addr_bytes,
330 		       ETHER_ADDR_LEN);
331 		for (j = 0; j != vlan_filter_n; ++j) {
332 			uint16_t vlan = priv->vlan_filter[j];
333 
334 			struct rte_flow_item_vlan vlan_spec = {
335 				.tci = rte_cpu_to_be_16(vlan),
336 			};
337 			struct rte_flow_item_vlan vlan_mask = {
338 				.tci = 0xffff,
339 			};
340 
341 			ret = mlx5_ctrl_flow_vlan(dev, &unicast,
342 						  &unicast_mask,
343 						  &vlan_spec,
344 						  &vlan_mask);
345 			if (ret)
346 				goto error;
347 		}
348 		if (!vlan_filter_n) {
349 			ret = mlx5_ctrl_flow(dev, &unicast,
350 					     &unicast_mask);
351 			if (ret)
352 				goto error;
353 		}
354 	}
355 	return 0;
356 error:
357 	return rte_errno;
358 }
359 
360 
361 /**
362  * Disable traffic flows configured by control plane
363  *
364  * @param dev
365  *   Pointer to Ethernet device private data.
366  */
367 void
368 mlx5_traffic_disable(struct rte_eth_dev *dev)
369 {
370 	struct priv *priv = dev->data->dev_private;
371 
372 	mlx5_flow_list_flush(dev, &priv->ctrl_flows);
373 }
374 
375 /**
376  * Restart traffic flows configured by control plane
377  *
378  * @param dev
379  *   Pointer to Ethernet device private data.
380  *
381  * @return
382  *   0 on success.
383  */
384 int
385 mlx5_traffic_restart(struct rte_eth_dev *dev)
386 {
387 	if (dev->data->dev_started) {
388 		mlx5_traffic_disable(dev);
389 		mlx5_traffic_enable(dev);
390 	}
391 	return 0;
392 }
393