xref: /dpdk/drivers/net/mlx5/linux/mlx5_mp_os.c (revision bc5d8fdb7008210e2698fa1f91e51d7dfba00f77)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2019 6WIND S.A.
3  * Copyright 2019 Mellanox Technologies, Ltd
4  */
5 
6 #include <stdio.h>
7 #include <time.h>
8 
9 #include <rte_eal.h>
10 #include <ethdev_driver.h>
11 #include <rte_string_fns.h>
12 
13 #include <mlx5_common_mp.h>
14 #include <mlx5_common_mr.h>
15 #include <mlx5_malloc.h>
16 
17 #include "mlx5.h"
18 #include "mlx5_rxtx.h"
19 #include "mlx5_rx.h"
20 #include "mlx5_tx.h"
21 #include "mlx5_utils.h"
22 
23 /**
24  * Handle a port-agnostic message.
25  *
26  * @return
27  *   0 on success, 1 when message is not port-agnostic, (-1) on error.
28  */
29 static int
mlx5_mp_os_handle_port_agnostic(const struct rte_mp_msg * mp_msg,const void * peer)30 mlx5_mp_os_handle_port_agnostic(const struct rte_mp_msg *mp_msg,
31 				const void *peer)
32 {
33 	struct rte_mp_msg mp_res;
34 	struct mlx5_mp_param *res = (struct mlx5_mp_param *)mp_res.param;
35 	const struct mlx5_mp_param *param =
36 		(const struct mlx5_mp_param *)mp_msg->param;
37 	const struct mlx5_mp_arg_mr_manage *mng = &param->args.mr_manage;
38 	struct mr_cache_entry entry;
39 	uint32_t lkey;
40 
41 	switch (param->type) {
42 	case MLX5_MP_REQ_CREATE_MR:
43 		mp_init_port_agnostic_msg(&mp_res, param->type);
44 		lkey = mlx5_mr_create(mng->cdev, &mng->cdev->mr_scache, &entry,
45 				      mng->addr);
46 		if (lkey == UINT32_MAX)
47 			res->result = -rte_errno;
48 		return rte_mp_reply(&mp_res, peer);
49 	case MLX5_MP_REQ_MEMPOOL_REGISTER:
50 		mp_init_port_agnostic_msg(&mp_res, param->type);
51 		res->result = mlx5_mr_mempool_register(mng->cdev, mng->mempool,
52 						       mng->is_extmem);
53 		return rte_mp_reply(&mp_res, peer);
54 	case MLX5_MP_REQ_MEMPOOL_UNREGISTER:
55 		mp_init_port_agnostic_msg(&mp_res, param->type);
56 		res->result = mlx5_mr_mempool_unregister(mng->cdev,
57 							 mng->mempool);
58 		return rte_mp_reply(&mp_res, peer);
59 	default:
60 		return 1;
61 	}
62 	return -1;
63 }
64 
65 int
mlx5_mp_os_primary_handle(const struct rte_mp_msg * mp_msg,const void * peer)66 mlx5_mp_os_primary_handle(const struct rte_mp_msg *mp_msg, const void *peer)
67 {
68 	struct rte_mp_msg mp_res;
69 	struct mlx5_mp_param *res = (struct mlx5_mp_param *)mp_res.param;
70 	const struct mlx5_mp_param *param =
71 		(const struct mlx5_mp_param *)mp_msg->param;
72 	struct rte_eth_dev *dev;
73 	struct mlx5_priv *priv;
74 	struct mlx5_common_device *cdev;
75 	int ret;
76 
77 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
78 	/* Port-agnostic messages. */
79 	ret = mlx5_mp_os_handle_port_agnostic(mp_msg, peer);
80 	if (ret <= 0)
81 		return ret;
82 	/* Port-specific messages. */
83 	if (!rte_eth_dev_is_valid_port(param->port_id)) {
84 		rte_errno = ENODEV;
85 		DRV_LOG(ERR, "port %u invalid port ID", param->port_id);
86 		return -rte_errno;
87 	}
88 	dev = &rte_eth_devices[param->port_id];
89 	priv = dev->data->dev_private;
90 	cdev = priv->sh->cdev;
91 	switch (param->type) {
92 	case MLX5_MP_REQ_VERBS_CMD_FD:
93 		mp_init_msg(&priv->mp_id, &mp_res, param->type);
94 		mp_res.num_fds = 1;
95 		mp_res.fds[0] = ((struct ibv_context *)cdev->ctx)->cmd_fd;
96 		res->result = 0;
97 		ret = rte_mp_reply(&mp_res, peer);
98 		break;
99 	case MLX5_MP_REQ_QUEUE_STATE_MODIFY:
100 		mp_init_msg(&priv->mp_id, &mp_res, param->type);
101 		res->result = mlx5_queue_state_modify_primary
102 					(dev, &param->args.state_modify);
103 		ret = rte_mp_reply(&mp_res, peer);
104 		break;
105 	case MLX5_MP_REQ_QUEUE_RX_STOP:
106 		mp_init_msg(&priv->mp_id, &mp_res, param->type);
107 		res->result = mlx5_rx_queue_stop_primary
108 					(dev, param->args.queue_id.queue_id);
109 		ret = rte_mp_reply(&mp_res, peer);
110 		break;
111 	case MLX5_MP_REQ_QUEUE_RX_START:
112 		mp_init_msg(&priv->mp_id, &mp_res, param->type);
113 		res->result = mlx5_rx_queue_start_primary
114 					(dev, param->args.queue_id.queue_id);
115 		ret = rte_mp_reply(&mp_res, peer);
116 		break;
117 	case MLX5_MP_REQ_QUEUE_TX_STOP:
118 		mp_init_msg(&priv->mp_id, &mp_res, param->type);
119 		res->result = mlx5_tx_queue_stop_primary
120 					(dev, param->args.queue_id.queue_id);
121 		ret = rte_mp_reply(&mp_res, peer);
122 		break;
123 	case MLX5_MP_REQ_QUEUE_TX_START:
124 		mp_init_msg(&priv->mp_id, &mp_res, param->type);
125 		res->result = mlx5_tx_queue_start_primary
126 					(dev, param->args.queue_id.queue_id);
127 		ret = rte_mp_reply(&mp_res, peer);
128 		break;
129 	default:
130 		rte_errno = EINVAL;
131 		DRV_LOG(ERR, "port %u invalid mp request type",
132 			dev->data->port_id);
133 		return -rte_errno;
134 	}
135 	return ret;
136 }
137 
138 /**
139  * IPC message handler of a secondary process.
140  *
141  * @param[in] dev
142  *   Pointer to Ethernet structure.
143  * @param[in] peer
144  *   Pointer to the peer socket path.
145  *
146  * @return
147  *   0 on success, a negative errno value otherwise and rte_errno is set.
148  */
149 int
mlx5_mp_os_secondary_handle(const struct rte_mp_msg * mp_msg,const void * peer)150 mlx5_mp_os_secondary_handle(const struct rte_mp_msg *mp_msg, const void *peer)
151 {
152 struct rte_mp_msg mp_res;
153 	struct mlx5_mp_param *res = (struct mlx5_mp_param *)mp_res.param;
154 	const struct mlx5_mp_param *param =
155 		(const struct mlx5_mp_param *)mp_msg->param;
156 	struct rte_eth_dev *dev;
157 	struct mlx5_proc_priv *ppriv;
158 	struct mlx5_priv *priv;
159 	int ret;
160 
161 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
162 	if (!rte_eth_dev_is_valid_port(param->port_id)) {
163 		rte_errno = ENODEV;
164 		DRV_LOG(ERR, "port %u invalid port ID", param->port_id);
165 		return -rte_errno;
166 	}
167 	dev = &rte_eth_devices[param->port_id];
168 	priv = dev->data->dev_private;
169 	switch (param->type) {
170 	case MLX5_MP_REQ_START_RXTX:
171 		DRV_LOG(INFO, "port %u starting datapath", dev->data->port_id);
172 		dev->rx_pkt_burst = mlx5_select_rx_function(dev);
173 		dev->tx_pkt_burst = mlx5_select_tx_function(dev);
174 		ppriv = (struct mlx5_proc_priv *)dev->process_private;
175 		/* If Tx queue number changes, re-initialize UAR. */
176 		if (ppriv->uar_table_sz != priv->txqs_n) {
177 			mlx5_tx_uar_uninit_secondary(dev);
178 			mlx5_proc_priv_uninit(dev);
179 			ret = mlx5_proc_priv_init(dev);
180 			if (ret) {
181 				close(mp_msg->fds[0]);
182 				return -rte_errno;
183 			}
184 			ret = mlx5_tx_uar_init_secondary(dev, mp_msg->fds[0]);
185 			if (ret) {
186 				close(mp_msg->fds[0]);
187 				mlx5_proc_priv_uninit(dev);
188 				return -rte_errno;
189 			}
190 		}
191 		close(mp_msg->fds[0]);
192 		rte_mb();
193 		mp_init_msg(&priv->mp_id, &mp_res, param->type);
194 		res->result = 0;
195 		ret = rte_mp_reply(&mp_res, peer);
196 		break;
197 	case MLX5_MP_REQ_STOP_RXTX:
198 		DRV_LOG(INFO, "port %u stopping datapath", dev->data->port_id);
199 		dev->rx_pkt_burst = rte_eth_pkt_burst_dummy;
200 		dev->tx_pkt_burst = rte_eth_pkt_burst_dummy;
201 		rte_mb();
202 		mp_init_msg(&priv->mp_id, &mp_res, param->type);
203 		res->result = 0;
204 		ret = rte_mp_reply(&mp_res, peer);
205 		break;
206 	default:
207 		rte_errno = EINVAL;
208 		DRV_LOG(ERR, "port %u invalid mp request type",
209 			dev->data->port_id);
210 		return -rte_errno;
211 	}
212 	return ret;
213 }
214 
215 /**
216  * Broadcast request of stopping/starting data-path to secondary processes.
217  *
218  * @param[in] dev
219  *   Pointer to Ethernet structure.
220  * @param[in] type
221  *   Request type.
222  */
223 static void
mp_req_on_rxtx(struct rte_eth_dev * dev,enum mlx5_mp_req_type type)224 mp_req_on_rxtx(struct rte_eth_dev *dev, enum mlx5_mp_req_type type)
225 {
226 	struct rte_mp_msg mp_req;
227 	struct rte_mp_msg *mp_res;
228 	struct rte_mp_reply mp_rep;
229 	struct mlx5_mp_param *res;
230 	struct timespec ts = {.tv_sec = MLX5_MP_REQ_TIMEOUT_SEC, .tv_nsec = 0};
231 	struct mlx5_priv *priv = dev->data->dev_private;
232 	int ret;
233 	int i;
234 
235 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
236 	if (!mlx5_shared_data->secondary_cnt)
237 		return;
238 	if (type != MLX5_MP_REQ_START_RXTX && type != MLX5_MP_REQ_STOP_RXTX) {
239 		DRV_LOG(ERR, "port %u unknown request (req_type %d)",
240 			dev->data->port_id, type);
241 		return;
242 	}
243 	mp_init_msg(&priv->mp_id, &mp_req, type);
244 	if (type == MLX5_MP_REQ_START_RXTX) {
245 		mp_req.num_fds = 1;
246 		mp_req.fds[0] =
247 			((struct ibv_context *)priv->sh->cdev->ctx)->cmd_fd;
248 	}
249 	ret = rte_mp_request_sync(&mp_req, &mp_rep, &ts);
250 	if (ret) {
251 		if (rte_errno != ENOTSUP)
252 			DRV_LOG(ERR, "port %u failed to request stop/start Rx/Tx (%d)",
253 				dev->data->port_id, type);
254 		goto exit;
255 	}
256 	if (mp_rep.nb_sent != mp_rep.nb_received) {
257 		DRV_LOG(ERR,
258 			"port %u not all secondaries responded (req_type %d)",
259 			dev->data->port_id, type);
260 		goto exit;
261 	}
262 	for (i = 0; i < mp_rep.nb_received; i++) {
263 		mp_res = &mp_rep.msgs[i];
264 		res = (struct mlx5_mp_param *)mp_res->param;
265 		if (res->result) {
266 			DRV_LOG(ERR, "port %u request failed on secondary #%d",
267 				dev->data->port_id, i);
268 			goto exit;
269 		}
270 	}
271 exit:
272 	mlx5_free(mp_rep.msgs);
273 }
274 
275 /**
276  * Broadcast request of starting data-path to secondary processes. The request
277  * is synchronous.
278  *
279  * @param[in] dev
280  *   Pointer to Ethernet structure.
281  */
282 void
mlx5_mp_os_req_start_rxtx(struct rte_eth_dev * dev)283 mlx5_mp_os_req_start_rxtx(struct rte_eth_dev *dev)
284 {
285 	mp_req_on_rxtx(dev, MLX5_MP_REQ_START_RXTX);
286 }
287 
288 /**
289  * Broadcast request of stopping data-path to secondary processes. The request
290  * is synchronous.
291  *
292  * @param[in] dev
293  *   Pointer to Ethernet structure.
294  */
295 void
mlx5_mp_os_req_stop_rxtx(struct rte_eth_dev * dev)296 mlx5_mp_os_req_stop_rxtx(struct rte_eth_dev *dev)
297 {
298 	mp_req_on_rxtx(dev, MLX5_MP_REQ_STOP_RXTX);
299 }
300 
301 /**
302  * Request Verbs Rx/Tx queue stop or start to the primary process.
303  *
304  * @param[in] dev
305  *   Pointer to Ethernet structure.
306  * @param queue_id
307  *   Queue ID to control.
308  * @param req_type
309  *   request type
310  *     MLX5_MP_REQ_QUEUE_RX_START - start Rx queue
311  *     MLX5_MP_REQ_QUEUE_TX_START - stop Tx queue
312  *     MLX5_MP_REQ_QUEUE_RX_STOP - stop Rx queue
313  *     MLX5_MP_REQ_QUEUE_TX_STOP - stop Tx queue
314  * @return
315  *   0 on success, a negative errno value otherwise and
316  *     rte_errno is set.
317  */
318 int
mlx5_mp_os_req_queue_control(struct rte_eth_dev * dev,uint16_t queue_id,enum mlx5_mp_req_type req_type)319 mlx5_mp_os_req_queue_control(struct rte_eth_dev *dev, uint16_t queue_id,
320 			  enum mlx5_mp_req_type req_type)
321 {
322 	struct rte_mp_msg mp_req;
323 	struct rte_mp_msg *mp_res;
324 	struct rte_mp_reply mp_rep;
325 	struct mlx5_mp_param *req = (struct mlx5_mp_param *)mp_req.param;
326 	struct mlx5_mp_param *res;
327 	struct timespec ts = {.tv_sec = MLX5_MP_REQ_TIMEOUT_SEC, .tv_nsec = 0};
328 	struct mlx5_priv *priv;
329 	int ret;
330 
331 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
332 	priv = dev->data->dev_private;
333 	mp_init_msg(&priv->mp_id, &mp_req, req_type);
334 	req->args.queue_id.queue_id = queue_id;
335 	ret = rte_mp_request_sync(&mp_req, &mp_rep, &ts);
336 	if (ret) {
337 		DRV_LOG(ERR, "port %u request to primary process failed",
338 			dev->data->port_id);
339 		return -rte_errno;
340 	}
341 	MLX5_ASSERT(mp_rep.nb_received == 1);
342 	mp_res = &mp_rep.msgs[0];
343 	res = (struct mlx5_mp_param *)mp_res->param;
344 	ret = res->result;
345 	free(mp_rep.msgs);
346 	return ret;
347 }
348