xref: /dpdk/drivers/net/mlx5/linux/mlx5_mp_os.c (revision daa02b5cddbb8e11b31d41e2bf7bb1ae64dcae2f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2019 6WIND S.A.
3  * Copyright 2019 Mellanox Technologies, Ltd
4  */
5 
6 #include <stdio.h>
7 #include <time.h>
8 
9 #include <rte_eal.h>
10 #include <ethdev_driver.h>
11 #include <rte_string_fns.h>
12 
13 #include <mlx5_common_mp.h>
14 #include <mlx5_common_mr.h>
15 #include <mlx5_malloc.h>
16 
17 #include "mlx5.h"
18 #include "mlx5_rxtx.h"
19 #include "mlx5_rx.h"
20 #include "mlx5_tx.h"
21 #include "mlx5_utils.h"
22 
23 /**
24  * Handle a port-agnostic message.
25  *
26  * @return
27  *   0 on success, 1 when message is not port-agnostic, (-1) on error.
28  */
29 static int
30 mlx5_mp_os_handle_port_agnostic(const struct rte_mp_msg *mp_msg,
31 				const void *peer)
32 {
33 	struct rte_mp_msg mp_res;
34 	struct mlx5_mp_param *res = (struct mlx5_mp_param *)mp_res.param;
35 	const struct mlx5_mp_param *param =
36 		(const struct mlx5_mp_param *)mp_msg->param;
37 	const struct mlx5_mp_arg_mempool_reg *mpr;
38 	struct mlx5_mp_id mp_id;
39 
40 	switch (param->type) {
41 	case MLX5_MP_REQ_MEMPOOL_REGISTER:
42 		mlx5_mp_id_init(&mp_id, param->port_id);
43 		mp_init_msg(&mp_id, &mp_res, param->type);
44 		mpr = &param->args.mempool_reg;
45 		res->result = mlx5_mr_mempool_register(mpr->share_cache,
46 						       mpr->pd, mpr->mempool,
47 						       NULL);
48 		return rte_mp_reply(&mp_res, peer);
49 	case MLX5_MP_REQ_MEMPOOL_UNREGISTER:
50 		mlx5_mp_id_init(&mp_id, param->port_id);
51 		mp_init_msg(&mp_id, &mp_res, param->type);
52 		mpr = &param->args.mempool_reg;
53 		res->result = mlx5_mr_mempool_unregister(mpr->share_cache,
54 							 mpr->mempool, NULL);
55 		return rte_mp_reply(&mp_res, peer);
56 	default:
57 		return 1;
58 	}
59 	return -1;
60 }
61 
62 int
63 mlx5_mp_os_primary_handle(const struct rte_mp_msg *mp_msg, const void *peer)
64 {
65 	struct rte_mp_msg mp_res;
66 	struct mlx5_mp_param *res = (struct mlx5_mp_param *)mp_res.param;
67 	const struct mlx5_mp_param *param =
68 		(const struct mlx5_mp_param *)mp_msg->param;
69 	struct rte_eth_dev *dev;
70 	struct mlx5_priv *priv;
71 	struct mlx5_common_device *cdev;
72 	struct mr_cache_entry entry;
73 	uint32_t lkey;
74 	int ret;
75 
76 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
77 	/* Port-agnostic messages. */
78 	ret = mlx5_mp_os_handle_port_agnostic(mp_msg, peer);
79 	if (ret <= 0)
80 		return ret;
81 	/* Port-specific messages. */
82 	if (!rte_eth_dev_is_valid_port(param->port_id)) {
83 		rte_errno = ENODEV;
84 		DRV_LOG(ERR, "port %u invalid port ID", param->port_id);
85 		return -rte_errno;
86 	}
87 	dev = &rte_eth_devices[param->port_id];
88 	priv = dev->data->dev_private;
89 	cdev = priv->sh->cdev;
90 	switch (param->type) {
91 	case MLX5_MP_REQ_CREATE_MR:
92 		mp_init_msg(&priv->mp_id, &mp_res, param->type);
93 		lkey = mlx5_mr_create_primary(cdev->pd, &cdev->mr_scache,
94 					      &entry, param->args.addr,
95 					      cdev->config.mr_ext_memseg_en);
96 		if (lkey == UINT32_MAX)
97 			res->result = -rte_errno;
98 		ret = rte_mp_reply(&mp_res, peer);
99 		break;
100 	case MLX5_MP_REQ_VERBS_CMD_FD:
101 		mp_init_msg(&priv->mp_id, &mp_res, param->type);
102 		mp_res.num_fds = 1;
103 		mp_res.fds[0] = ((struct ibv_context *)cdev->ctx)->cmd_fd;
104 		res->result = 0;
105 		ret = rte_mp_reply(&mp_res, peer);
106 		break;
107 	case MLX5_MP_REQ_QUEUE_STATE_MODIFY:
108 		mp_init_msg(&priv->mp_id, &mp_res, param->type);
109 		res->result = mlx5_queue_state_modify_primary
110 					(dev, &param->args.state_modify);
111 		ret = rte_mp_reply(&mp_res, peer);
112 		break;
113 	case MLX5_MP_REQ_QUEUE_RX_STOP:
114 		mp_init_msg(&priv->mp_id, &mp_res, param->type);
115 		res->result = mlx5_rx_queue_stop_primary
116 					(dev, param->args.queue_id.queue_id);
117 		ret = rte_mp_reply(&mp_res, peer);
118 		break;
119 	case MLX5_MP_REQ_QUEUE_RX_START:
120 		mp_init_msg(&priv->mp_id, &mp_res, param->type);
121 		res->result = mlx5_rx_queue_start_primary
122 					(dev, param->args.queue_id.queue_id);
123 		ret = rte_mp_reply(&mp_res, peer);
124 		break;
125 	case MLX5_MP_REQ_QUEUE_TX_STOP:
126 		mp_init_msg(&priv->mp_id, &mp_res, param->type);
127 		res->result = mlx5_tx_queue_stop_primary
128 					(dev, param->args.queue_id.queue_id);
129 		ret = rte_mp_reply(&mp_res, peer);
130 		break;
131 	case MLX5_MP_REQ_QUEUE_TX_START:
132 		mp_init_msg(&priv->mp_id, &mp_res, param->type);
133 		res->result = mlx5_tx_queue_start_primary
134 					(dev, param->args.queue_id.queue_id);
135 		ret = rte_mp_reply(&mp_res, peer);
136 		break;
137 	default:
138 		rte_errno = EINVAL;
139 		DRV_LOG(ERR, "port %u invalid mp request type",
140 			dev->data->port_id);
141 		return -rte_errno;
142 	}
143 	return ret;
144 }
145 
146 /**
147  * IPC message handler of a secondary process.
148  *
149  * @param[in] dev
150  *   Pointer to Ethernet structure.
151  * @param[in] peer
152  *   Pointer to the peer socket path.
153  *
154  * @return
155  *   0 on success, a negative errno value otherwise and rte_errno is set.
156  */
157 int
158 mlx5_mp_os_secondary_handle(const struct rte_mp_msg *mp_msg, const void *peer)
159 {
160 struct rte_mp_msg mp_res;
161 	struct mlx5_mp_param *res = (struct mlx5_mp_param *)mp_res.param;
162 	const struct mlx5_mp_param *param =
163 		(const struct mlx5_mp_param *)mp_msg->param;
164 	struct rte_eth_dev *dev;
165 	struct mlx5_proc_priv *ppriv;
166 	struct mlx5_priv *priv;
167 	int ret;
168 
169 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
170 	if (!rte_eth_dev_is_valid_port(param->port_id)) {
171 		rte_errno = ENODEV;
172 		DRV_LOG(ERR, "port %u invalid port ID", param->port_id);
173 		return -rte_errno;
174 	}
175 	dev = &rte_eth_devices[param->port_id];
176 	priv = dev->data->dev_private;
177 	switch (param->type) {
178 	case MLX5_MP_REQ_START_RXTX:
179 		DRV_LOG(INFO, "port %u starting datapath", dev->data->port_id);
180 		dev->rx_pkt_burst = mlx5_select_rx_function(dev);
181 		dev->tx_pkt_burst = mlx5_select_tx_function(dev);
182 		ppriv = (struct mlx5_proc_priv *)dev->process_private;
183 		/* If Tx queue number changes, re-initialize UAR. */
184 		if (ppriv->uar_table_sz != priv->txqs_n) {
185 			mlx5_tx_uar_uninit_secondary(dev);
186 			mlx5_proc_priv_uninit(dev);
187 			ret = mlx5_proc_priv_init(dev);
188 			if (ret)
189 				return -rte_errno;
190 			ret = mlx5_tx_uar_init_secondary(dev, mp_msg->fds[0]);
191 			if (ret) {
192 				mlx5_proc_priv_uninit(dev);
193 				return -rte_errno;
194 			}
195 		}
196 		rte_mb();
197 		mp_init_msg(&priv->mp_id, &mp_res, param->type);
198 		res->result = 0;
199 		ret = rte_mp_reply(&mp_res, peer);
200 		break;
201 	case MLX5_MP_REQ_STOP_RXTX:
202 		DRV_LOG(INFO, "port %u stopping datapath", dev->data->port_id);
203 		dev->rx_pkt_burst = removed_rx_burst;
204 		dev->tx_pkt_burst = removed_tx_burst;
205 		rte_mb();
206 		mp_init_msg(&priv->mp_id, &mp_res, param->type);
207 		res->result = 0;
208 		ret = rte_mp_reply(&mp_res, peer);
209 		break;
210 	default:
211 		rte_errno = EINVAL;
212 		DRV_LOG(ERR, "port %u invalid mp request type",
213 			dev->data->port_id);
214 		return -rte_errno;
215 	}
216 	return ret;
217 }
218 
219 /**
220  * Broadcast request of stopping/starting data-path to secondary processes.
221  *
222  * @param[in] dev
223  *   Pointer to Ethernet structure.
224  * @param[in] type
225  *   Request type.
226  */
227 static void
228 mp_req_on_rxtx(struct rte_eth_dev *dev, enum mlx5_mp_req_type type)
229 {
230 	struct rte_mp_msg mp_req;
231 	struct rte_mp_msg *mp_res;
232 	struct rte_mp_reply mp_rep;
233 	struct mlx5_mp_param *res;
234 	struct timespec ts = {.tv_sec = MLX5_MP_REQ_TIMEOUT_SEC, .tv_nsec = 0};
235 	struct mlx5_priv *priv = dev->data->dev_private;
236 	int ret;
237 	int i;
238 
239 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
240 	if (!mlx5_shared_data->secondary_cnt)
241 		return;
242 	if (type != MLX5_MP_REQ_START_RXTX && type != MLX5_MP_REQ_STOP_RXTX) {
243 		DRV_LOG(ERR, "port %u unknown request (req_type %d)",
244 			dev->data->port_id, type);
245 		return;
246 	}
247 	mp_init_msg(&priv->mp_id, &mp_req, type);
248 	if (type == MLX5_MP_REQ_START_RXTX) {
249 		mp_req.num_fds = 1;
250 		mp_req.fds[0] =
251 			((struct ibv_context *)priv->sh->cdev->ctx)->cmd_fd;
252 	}
253 	ret = rte_mp_request_sync(&mp_req, &mp_rep, &ts);
254 	if (ret) {
255 		if (rte_errno != ENOTSUP)
256 			DRV_LOG(ERR, "port %u failed to request stop/start Rx/Tx (%d)",
257 				dev->data->port_id, type);
258 		goto exit;
259 	}
260 	if (mp_rep.nb_sent != mp_rep.nb_received) {
261 		DRV_LOG(ERR,
262 			"port %u not all secondaries responded (req_type %d)",
263 			dev->data->port_id, type);
264 		goto exit;
265 	}
266 	for (i = 0; i < mp_rep.nb_received; i++) {
267 		mp_res = &mp_rep.msgs[i];
268 		res = (struct mlx5_mp_param *)mp_res->param;
269 		if (res->result) {
270 			DRV_LOG(ERR, "port %u request failed on secondary #%d",
271 				dev->data->port_id, i);
272 			goto exit;
273 		}
274 	}
275 exit:
276 	mlx5_free(mp_rep.msgs);
277 }
278 
279 /**
280  * Broadcast request of starting data-path to secondary processes. The request
281  * is synchronous.
282  *
283  * @param[in] dev
284  *   Pointer to Ethernet structure.
285  */
286 void
287 mlx5_mp_os_req_start_rxtx(struct rte_eth_dev *dev)
288 {
289 	mp_req_on_rxtx(dev, MLX5_MP_REQ_START_RXTX);
290 }
291 
292 /**
293  * Broadcast request of stopping data-path to secondary processes. The request
294  * is synchronous.
295  *
296  * @param[in] dev
297  *   Pointer to Ethernet structure.
298  */
299 void
300 mlx5_mp_os_req_stop_rxtx(struct rte_eth_dev *dev)
301 {
302 	mp_req_on_rxtx(dev, MLX5_MP_REQ_STOP_RXTX);
303 }
304 
305 /**
306  * Request Verbs Rx/Tx queue stop or start to the primary process.
307  *
308  * @param[in] dev
309  *   Pointer to Ethernet structure.
310  * @param queue_id
311  *   Queue ID to control.
312  * @param req_type
313  *   request type
314  *     MLX5_MP_REQ_QUEUE_RX_START - start Rx queue
315  *     MLX5_MP_REQ_QUEUE_TX_START - stop Tx queue
316  *     MLX5_MP_REQ_QUEUE_RX_STOP - stop Rx queue
317  *     MLX5_MP_REQ_QUEUE_TX_STOP - stop Tx queue
318  * @return
319  *   0 on success, a negative errno value otherwise and
320  *     rte_errno is set.
321  */
322 int
323 mlx5_mp_os_req_queue_control(struct rte_eth_dev *dev, uint16_t queue_id,
324 			  enum mlx5_mp_req_type req_type)
325 {
326 	struct rte_mp_msg mp_req;
327 	struct rte_mp_msg *mp_res;
328 	struct rte_mp_reply mp_rep;
329 	struct mlx5_mp_param *req = (struct mlx5_mp_param *)mp_req.param;
330 	struct mlx5_mp_param *res;
331 	struct timespec ts = {.tv_sec = MLX5_MP_REQ_TIMEOUT_SEC, .tv_nsec = 0};
332 	struct mlx5_priv *priv;
333 	int ret;
334 
335 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
336 	priv = dev->data->dev_private;
337 	mp_init_msg(&priv->mp_id, &mp_req, req_type);
338 	req->args.queue_id.queue_id = queue_id;
339 	ret = rte_mp_request_sync(&mp_req, &mp_rep, &ts);
340 	if (ret) {
341 		DRV_LOG(ERR, "port %u request to primary process failed",
342 			dev->data->port_id);
343 		return -rte_errno;
344 	}
345 	MLX5_ASSERT(mp_rep.nb_received == 1);
346 	mp_res = &mp_rep.msgs[0];
347 	res = (struct mlx5_mp_param *)mp_res->param;
348 	ret = res->result;
349 	free(mp_rep.msgs);
350 	return ret;
351 }
352