xref: /dpdk/drivers/net/mlx5/mlx5_trigger.c (revision 89f0711f9ddfb5822da9d34f384b92f72a61c4dc)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright 2015 6WIND S.A.
5  *   Copyright 2015 Mellanox.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of 6WIND S.A. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 #include <unistd.h>
34 
35 #include <rte_ether.h>
36 #include <rte_ethdev_driver.h>
37 #include <rte_interrupts.h>
38 #include <rte_alarm.h>
39 
40 #include "mlx5.h"
41 #include "mlx5_rxtx.h"
42 #include "mlx5_utils.h"
43 
44 static void
45 priv_txq_stop(struct priv *priv)
46 {
47 	unsigned int i;
48 
49 	for (i = 0; i != priv->txqs_n; ++i)
50 		mlx5_priv_txq_release(priv, i);
51 }
52 
53 static int
54 priv_txq_start(struct priv *priv)
55 {
56 	unsigned int i;
57 	int ret = 0;
58 
59 	/* Add memory regions to Tx queues. */
60 	for (i = 0; i != priv->txqs_n; ++i) {
61 		unsigned int idx = 0;
62 		struct mlx5_mr *mr;
63 		struct mlx5_txq_ctrl *txq_ctrl = mlx5_priv_txq_get(priv, i);
64 
65 		if (!txq_ctrl)
66 			continue;
67 		LIST_FOREACH(mr, &priv->mr, next) {
68 			priv_txq_mp2mr_reg(priv, &txq_ctrl->txq, mr->mp, idx++);
69 			if (idx == MLX5_PMD_TX_MP_CACHE)
70 				break;
71 		}
72 		txq_alloc_elts(txq_ctrl);
73 		txq_ctrl->ibv = mlx5_priv_txq_ibv_new(priv, i);
74 		if (!txq_ctrl->ibv) {
75 			ret = ENOMEM;
76 			goto error;
77 		}
78 	}
79 	ret = priv_tx_uar_remap(priv, priv->ctx->cmd_fd);
80 	if (ret)
81 		goto error;
82 	return ret;
83 error:
84 	priv_txq_stop(priv);
85 	return ret;
86 }
87 
88 static void
89 priv_rxq_stop(struct priv *priv)
90 {
91 	unsigned int i;
92 
93 	for (i = 0; i != priv->rxqs_n; ++i)
94 		mlx5_priv_rxq_release(priv, i);
95 }
96 
97 static int
98 priv_rxq_start(struct priv *priv)
99 {
100 	unsigned int i;
101 	int ret = 0;
102 
103 	for (i = 0; i != priv->rxqs_n; ++i) {
104 		struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_priv_rxq_get(priv, i);
105 
106 		if (!rxq_ctrl)
107 			continue;
108 		ret = rxq_alloc_elts(rxq_ctrl);
109 		if (ret)
110 			goto error;
111 		rxq_ctrl->ibv = mlx5_priv_rxq_ibv_new(priv, i);
112 		if (!rxq_ctrl->ibv) {
113 			ret = ENOMEM;
114 			goto error;
115 		}
116 	}
117 	return -ret;
118 error:
119 	priv_rxq_stop(priv);
120 	return -ret;
121 }
122 
123 /**
124  * DPDK callback to start the device.
125  *
126  * Simulate device start by attaching all configured flows.
127  *
128  * @param dev
129  *   Pointer to Ethernet device structure.
130  *
131  * @return
132  *   0 on success, negative errno value on failure.
133  */
134 int
135 mlx5_dev_start(struct rte_eth_dev *dev)
136 {
137 	struct priv *priv = dev->data->dev_private;
138 	struct mlx5_mr *mr = NULL;
139 	int err;
140 
141 	dev->data->dev_started = 1;
142 	priv_lock(priv);
143 	err = priv_flow_create_drop_queue(priv);
144 	if (err) {
145 		ERROR("%p: Drop queue allocation failed: %s",
146 		      (void *)dev, strerror(err));
147 		goto error;
148 	}
149 	DEBUG("%p: allocating and configuring hash RX queues", (void *)dev);
150 	rte_mempool_walk(mlx5_mp2mr_iter, priv);
151 	err = priv_txq_start(priv);
152 	if (err) {
153 		ERROR("%p: TXQ allocation failed: %s",
154 		      (void *)dev, strerror(err));
155 		goto error;
156 	}
157 	err = priv_rxq_start(priv);
158 	if (err) {
159 		ERROR("%p: RXQ allocation failed: %s",
160 		      (void *)dev, strerror(err));
161 		goto error;
162 	}
163 	err = priv_rx_intr_vec_enable(priv);
164 	if (err) {
165 		ERROR("%p: RX interrupt vector creation failed",
166 		      (void *)priv);
167 		goto error;
168 	}
169 	priv_xstats_init(priv);
170 	/* Update link status and Tx/Rx callbacks for the first time. */
171 	memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link));
172 	INFO("Forcing port %u link to be up", dev->data->port_id);
173 	err = priv_force_link_status_change(priv, ETH_LINK_UP);
174 	if (err) {
175 		DEBUG("Failed to set port %u link to be up",
176 		      dev->data->port_id);
177 		goto error;
178 	}
179 	priv_dev_interrupt_handler_install(priv, dev);
180 	priv_unlock(priv);
181 	return 0;
182 error:
183 	/* Rollback. */
184 	dev->data->dev_started = 0;
185 	for (mr = LIST_FIRST(&priv->mr); mr; mr = LIST_FIRST(&priv->mr))
186 		priv_mr_release(priv, mr);
187 	priv_flow_stop(priv, &priv->flows);
188 	priv_dev_traffic_disable(priv, dev);
189 	priv_txq_stop(priv);
190 	priv_rxq_stop(priv);
191 	priv_flow_delete_drop_queue(priv);
192 	priv_unlock(priv);
193 	return err;
194 }
195 
196 /**
197  * DPDK callback to stop the device.
198  *
199  * Simulate device stop by detaching all configured flows.
200  *
201  * @param dev
202  *   Pointer to Ethernet device structure.
203  */
204 void
205 mlx5_dev_stop(struct rte_eth_dev *dev)
206 {
207 	struct priv *priv = dev->data->dev_private;
208 	struct mlx5_mr *mr;
209 
210 	priv_lock(priv);
211 	dev->data->dev_started = 0;
212 	/* Prevent crashes when queues are still in use. */
213 	dev->rx_pkt_burst = removed_rx_burst;
214 	dev->tx_pkt_burst = removed_tx_burst;
215 	rte_wmb();
216 	usleep(1000 * priv->rxqs_n);
217 	DEBUG("%p: cleaning up and destroying hash RX queues", (void *)dev);
218 	priv_flow_stop(priv, &priv->flows);
219 	priv_dev_traffic_disable(priv, dev);
220 	priv_rx_intr_vec_disable(priv);
221 	priv_dev_interrupt_handler_uninstall(priv, dev);
222 	priv_txq_stop(priv);
223 	priv_rxq_stop(priv);
224 	for (mr = LIST_FIRST(&priv->mr); mr; mr = LIST_FIRST(&priv->mr))
225 		priv_mr_release(priv, mr);
226 	priv_flow_delete_drop_queue(priv);
227 	priv_unlock(priv);
228 }
229 
230 /**
231  * Enable traffic flows configured by control plane
232  *
233  * @param priv
234  *   Pointer to Ethernet device private data.
235  * @param dev
236  *   Pointer to Ethernet device structure.
237  *
238  * @return
239  *   0 on success.
240  */
241 int
242 priv_dev_traffic_enable(struct priv *priv, struct rte_eth_dev *dev)
243 {
244 	struct rte_flow_item_eth bcast = {
245 		.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
246 	};
247 	struct rte_flow_item_eth ipv6_multi_spec = {
248 		.dst.addr_bytes = "\x33\x33\x00\x00\x00\x00",
249 	};
250 	struct rte_flow_item_eth ipv6_multi_mask = {
251 		.dst.addr_bytes = "\xff\xff\x00\x00\x00\x00",
252 	};
253 	struct rte_flow_item_eth unicast = {
254 		.src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
255 	};
256 	struct rte_flow_item_eth unicast_mask = {
257 		.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
258 	};
259 	const unsigned int vlan_filter_n = priv->vlan_filter_n;
260 	const struct ether_addr cmp = {
261 		.addr_bytes = "\x00\x00\x00\x00\x00\x00",
262 	};
263 	unsigned int i;
264 	unsigned int j;
265 	int ret;
266 
267 	if (priv->isolated)
268 		return 0;
269 	if (dev->data->promiscuous) {
270 		struct rte_flow_item_eth promisc = {
271 			.dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
272 			.src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
273 			.type = 0,
274 		};
275 
276 		claim_zero(mlx5_ctrl_flow(dev, &promisc, &promisc));
277 		return 0;
278 	}
279 	if (dev->data->all_multicast) {
280 		struct rte_flow_item_eth multicast = {
281 			.dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
282 			.src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
283 			.type = 0,
284 		};
285 
286 		claim_zero(mlx5_ctrl_flow(dev, &multicast, &multicast));
287 	} else {
288 		/* Add broadcast/multicast flows. */
289 		for (i = 0; i != vlan_filter_n; ++i) {
290 			uint16_t vlan = priv->vlan_filter[i];
291 
292 			struct rte_flow_item_vlan vlan_spec = {
293 				.tci = rte_cpu_to_be_16(vlan),
294 			};
295 			struct rte_flow_item_vlan vlan_mask = {
296 				.tci = 0xffff,
297 			};
298 
299 			ret = mlx5_ctrl_flow_vlan(dev, &bcast, &bcast,
300 						  &vlan_spec, &vlan_mask);
301 			if (ret)
302 				goto error;
303 			ret = mlx5_ctrl_flow_vlan(dev, &ipv6_multi_spec,
304 						  &ipv6_multi_mask,
305 						  &vlan_spec, &vlan_mask);
306 			if (ret)
307 				goto error;
308 		}
309 		if (!vlan_filter_n) {
310 			ret = mlx5_ctrl_flow(dev, &bcast, &bcast);
311 			if (ret)
312 				goto error;
313 			ret = mlx5_ctrl_flow(dev, &ipv6_multi_spec,
314 					     &ipv6_multi_mask);
315 			if (ret)
316 				goto error;
317 		}
318 	}
319 	/* Add MAC address flows. */
320 	for (i = 0; i != MLX5_MAX_MAC_ADDRESSES; ++i) {
321 		struct ether_addr *mac = &dev->data->mac_addrs[i];
322 
323 		if (!memcmp(mac, &cmp, sizeof(*mac)))
324 			continue;
325 		memcpy(&unicast.dst.addr_bytes,
326 		       mac->addr_bytes,
327 		       ETHER_ADDR_LEN);
328 		for (j = 0; j != vlan_filter_n; ++j) {
329 			uint16_t vlan = priv->vlan_filter[j];
330 
331 			struct rte_flow_item_vlan vlan_spec = {
332 				.tci = rte_cpu_to_be_16(vlan),
333 			};
334 			struct rte_flow_item_vlan vlan_mask = {
335 				.tci = 0xffff,
336 			};
337 
338 			ret = mlx5_ctrl_flow_vlan(dev, &unicast,
339 						  &unicast_mask,
340 						  &vlan_spec,
341 						  &vlan_mask);
342 			if (ret)
343 				goto error;
344 		}
345 		if (!vlan_filter_n) {
346 			ret = mlx5_ctrl_flow(dev, &unicast,
347 					     &unicast_mask);
348 			if (ret)
349 				goto error;
350 		}
351 	}
352 	return 0;
353 error:
354 	return rte_errno;
355 }
356 
357 
358 /**
359  * Disable traffic flows configured by control plane
360  *
361  * @param priv
362  *   Pointer to Ethernet device private data.
363  * @param dev
364  *   Pointer to Ethernet device structure.
365  *
366  * @return
367  *   0 on success.
368  */
369 int
370 priv_dev_traffic_disable(struct priv *priv, struct rte_eth_dev *dev)
371 {
372 	(void)dev;
373 	priv_flow_flush(priv, &priv->ctrl_flows);
374 	return 0;
375 }
376 
377 /**
378  * Restart traffic flows configured by control plane
379  *
380  * @param priv
381  *   Pointer to Ethernet device private data.
382  * @param dev
383  *   Pointer to Ethernet device structure.
384  *
385  * @return
386  *   0 on success.
387  */
388 int
389 priv_dev_traffic_restart(struct priv *priv, struct rte_eth_dev *dev)
390 {
391 	if (dev->data->dev_started) {
392 		priv_dev_traffic_disable(priv, dev);
393 		priv_dev_traffic_enable(priv, dev);
394 	}
395 	return 0;
396 }
397 
398 /**
399  * Restart traffic flows configured by control plane
400  *
401  * @param dev
402  *   Pointer to Ethernet device structure.
403  *
404  * @return
405  *   0 on success.
406  */
407 int
408 mlx5_traffic_restart(struct rte_eth_dev *dev)
409 {
410 	struct priv *priv = dev->data->dev_private;
411 
412 	priv_lock(priv);
413 	priv_dev_traffic_restart(priv, dev);
414 	priv_unlock(priv);
415 	return 0;
416 }
417