xref: /dpdk/drivers/crypto/ipsec_mb/ipsec_mb_ops.c (revision e9fd1ebf981f361844aea9ec94e17f4bda5e1479)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2021 Intel Corporation
3  */
4 
5 #include <string.h>
6 #include <unistd.h>
7 
8 #include <rte_common.h>
9 #include <rte_malloc.h>
10 #include <rte_log.h>
11 
12 #include "ipsec_mb_private.h"
13 
14 #define IMB_MP_REQ_VER_STR "1.1.0"
15 
16 /** Configure device */
17 int
18 ipsec_mb_config(__rte_unused struct rte_cryptodev *dev,
19 		    __rte_unused struct rte_cryptodev_config *config)
20 {
21 	return 0;
22 }
23 
24 /** Start device */
25 int
26 ipsec_mb_start(__rte_unused struct rte_cryptodev *dev)
27 {
28 	return 0;
29 }
30 
31 /** Stop device */
32 void
33 ipsec_mb_stop(__rte_unused struct rte_cryptodev *dev)
34 {
35 }
36 
37 /** Close device */
38 int
39 ipsec_mb_close(__rte_unused struct rte_cryptodev *dev)
40 {
41 	return 0;
42 }
43 
44 /** Get device statistics */
45 void
46 ipsec_mb_stats_get(struct rte_cryptodev *dev,
47 		struct rte_cryptodev_stats *stats)
48 {
49 	int qp_id;
50 
51 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
52 		struct ipsec_mb_qp *qp = dev->data->queue_pairs[qp_id];
53 		if (qp == NULL) {
54 			IPSEC_MB_LOG(DEBUG, "Uninitialised qp %d", qp_id);
55 			continue;
56 		}
57 
58 		stats->enqueued_count += qp->stats.enqueued_count;
59 		stats->dequeued_count += qp->stats.dequeued_count;
60 
61 		stats->enqueue_err_count += qp->stats.enqueue_err_count;
62 		stats->dequeue_err_count += qp->stats.dequeue_err_count;
63 	}
64 }
65 
66 /** Reset device statistics */
67 void
68 ipsec_mb_stats_reset(struct rte_cryptodev *dev)
69 {
70 	int qp_id;
71 
72 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
73 		struct ipsec_mb_qp *qp = dev->data->queue_pairs[qp_id];
74 
75 		memset(&qp->stats, 0, sizeof(qp->stats));
76 	}
77 }
78 
79 /** Get device info */
80 void
81 ipsec_mb_info_get(struct rte_cryptodev *dev,
82 		struct rte_cryptodev_info *dev_info)
83 {
84 	struct ipsec_mb_dev_private *internals = dev->data->dev_private;
85 	struct ipsec_mb_internals *pmd_info =
86 		&ipsec_mb_pmds[internals->pmd_type];
87 
88 	if (dev_info != NULL) {
89 		dev_info->driver_id = dev->driver_id;
90 		dev_info->feature_flags = dev->feature_flags;
91 		dev_info->capabilities = pmd_info->caps;
92 		dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
93 		/* No limit of number of sessions */
94 		dev_info->sym.max_nb_sessions = 0;
95 	}
96 }
97 
98 static int
99 ipsec_mb_secondary_qp_op(int dev_id, int qp_id,
100 		const struct rte_cryptodev_qp_conf *qp_conf,
101 		int socket_id, enum ipsec_mb_mp_req_type op_type)
102 {
103 	int ret;
104 	struct rte_mp_msg qp_req_msg;
105 	struct rte_mp_msg *qp_resp_msg;
106 	struct rte_mp_reply qp_resp;
107 	struct ipsec_mb_mp_param *req_param;
108 	struct ipsec_mb_mp_param *resp_param;
109 	struct timespec ts = {.tv_sec = 1, .tv_nsec = 0};
110 
111 	memset(&qp_req_msg, 0, sizeof(IPSEC_MB_MP_MSG));
112 	memcpy(qp_req_msg.name, IPSEC_MB_MP_MSG, sizeof(IPSEC_MB_MP_MSG));
113 	req_param = (struct ipsec_mb_mp_param *)&qp_req_msg.param;
114 
115 	qp_req_msg.len_param = sizeof(struct ipsec_mb_mp_param);
116 	req_param->type = op_type;
117 	req_param->dev_id = dev_id;
118 	req_param->qp_id = qp_id;
119 	req_param->socket_id = socket_id;
120 	req_param->process_id = getpid();
121 	if (qp_conf) {
122 		req_param->nb_descriptors = qp_conf->nb_descriptors;
123 		req_param->mp_session = (void *)qp_conf->mp_session;
124 	}
125 
126 	qp_req_msg.num_fds = 0;
127 	ret = rte_mp_request_sync(&qp_req_msg, &qp_resp, &ts);
128 	if (ret) {
129 		IPSEC_MB_LOG(ERR, "Create MR request to primary process failed.");
130 		return -1;
131 	}
132 	qp_resp_msg = &qp_resp.msgs[0];
133 	resp_param = (struct ipsec_mb_mp_param *)qp_resp_msg->param;
134 
135 	return resp_param->result;
136 }
137 
138 /** Release queue pair */
139 int
140 ipsec_mb_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
141 {
142 	struct ipsec_mb_qp *qp = dev->data->queue_pairs[qp_id];
143 
144 	if (!qp)
145 		return 0;
146 
147 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
148 		rte_ring_free(rte_ring_lookup(qp->name));
149 
150 #if IMB_VERSION(1, 1, 0) > IMB_VERSION_NUM
151 		if (qp->mb_mgr)
152 			free_mb_mgr(qp->mb_mgr);
153 #else
154 		if (qp->mb_mgr_mz) {
155 			rte_memzone_free(qp->mb_mgr_mz);
156 			qp->mb_mgr = NULL;
157 		}
158 #endif
159 		rte_free(qp);
160 		dev->data->queue_pairs[qp_id] = NULL;
161 	} else { /* secondary process */
162 		return ipsec_mb_secondary_qp_op(dev->data->dev_id, qp_id,
163 					NULL, 0, RTE_IPSEC_MB_MP_REQ_QP_FREE);
164 	}
165 	return 0;
166 }
167 
168 /** Set a unique name for the queue pair */
169 int
170 ipsec_mb_qp_set_unique_name(struct rte_cryptodev *dev,
171 					   struct ipsec_mb_qp *qp)
172 {
173 	uint32_t n =
174 	    snprintf(qp->name, sizeof(qp->name), "ipsec_mb_pmd_%u_qp_%u",
175 		     dev->data->dev_id, qp->id);
176 
177 	if (n >= sizeof(qp->name))
178 		return -1;
179 
180 	return 0;
181 }
182 
183 /** Create a ring to place processed operations on */
184 static struct rte_ring
185 *ipsec_mb_qp_create_processed_ops_ring(
186 	struct ipsec_mb_qp *qp, unsigned int ring_size, int socket_id)
187 {
188 	struct rte_ring *r;
189 	char ring_name[RTE_CRYPTODEV_NAME_MAX_LEN];
190 
191 	unsigned int n = rte_strlcpy(ring_name, qp->name, sizeof(ring_name));
192 
193 	if (n >= sizeof(ring_name))
194 		return NULL;
195 
196 	r = rte_ring_lookup(ring_name);
197 	if (r) {
198 		if (rte_ring_get_size(r) >= ring_size) {
199 			IPSEC_MB_LOG(
200 			    INFO, "Reusing existing ring %s for processed ops",
201 			    ring_name);
202 			return r;
203 		}
204 		IPSEC_MB_LOG(
205 		    ERR, "Unable to reuse existing ring %s for processed ops",
206 		    ring_name);
207 		return NULL;
208 	}
209 
210 	return rte_ring_create(ring_name, ring_size, socket_id,
211 			       RING_F_SP_ENQ | RING_F_SC_DEQ);
212 }
213 
214 #if IMB_VERSION(1, 1, 0) <= IMB_VERSION_NUM
215 static IMB_MGR *
216 ipsec_mb_alloc_mgr_from_memzone(const struct rte_memzone **mb_mgr_mz,
217 		const char *mb_mgr_mz_name)
218 {
219 	IMB_MGR *mb_mgr;
220 
221 	if (rte_eal_process_type() ==  RTE_PROC_PRIMARY) {
222 		*mb_mgr_mz = rte_memzone_lookup(mb_mgr_mz_name);
223 		if (*mb_mgr_mz == NULL) {
224 			*mb_mgr_mz = rte_memzone_reserve(mb_mgr_mz_name,
225 			imb_get_mb_mgr_size(),
226 			rte_socket_id(), 0);
227 		}
228 		if (*mb_mgr_mz == NULL) {
229 			IPSEC_MB_LOG(DEBUG, "Error allocating memzone for %s",
230 					mb_mgr_mz_name);
231 			return NULL;
232 		}
233 		mb_mgr = imb_set_pointers_mb_mgr((*mb_mgr_mz)->addr, 0, 1);
234 		init_mb_mgr_auto(mb_mgr, NULL);
235 	} else {
236 		*mb_mgr_mz = rte_memzone_lookup(mb_mgr_mz_name);
237 		if (*mb_mgr_mz == NULL) {
238 			IPSEC_MB_LOG(ERR,
239 				"Secondary can't find %s mz, did primary create it?",
240 				mb_mgr_mz_name);
241 			return NULL;
242 		}
243 		mb_mgr = imb_set_pointers_mb_mgr((*mb_mgr_mz)->addr, 0, 0);
244 	}
245 	return mb_mgr;
246 }
247 #endif
248 
249 /** Setup a queue pair */
250 int
251 ipsec_mb_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
252 				const struct rte_cryptodev_qp_conf *qp_conf,
253 				int socket_id)
254 {
255 	struct ipsec_mb_qp *qp = NULL;
256 	struct ipsec_mb_dev_private *internals = dev->data->dev_private;
257 	struct ipsec_mb_internals *pmd_data =
258 		&ipsec_mb_pmds[internals->pmd_type];
259 	uint32_t qp_size;
260 	int ret;
261 
262 	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
263 #if IMB_VERSION(1, 1, 0) > IMB_VERSION_NUM
264 		IPSEC_MB_LOG(ERR, "The intel-ipsec-mb version (%s) does not support multiprocess,"
265 				"the minimum version required for this feature is %s.",
266 				IMB_VERSION_STR, IMB_MP_REQ_VER_STR);
267 		return -EINVAL;
268 #endif
269 		qp = dev->data->queue_pairs[qp_id];
270 		if (qp == NULL) {
271 			IPSEC_MB_LOG(DEBUG, "Secondary process setting up device qp.");
272 			return ipsec_mb_secondary_qp_op(dev->data->dev_id, qp_id,
273 						qp_conf, socket_id,	RTE_IPSEC_MB_MP_REQ_QP_SET);
274 		}
275 	} else {
276 		/* Free memory prior to re-allocation if needed. */
277 		if (dev->data->queue_pairs[qp_id] != NULL)
278 			ipsec_mb_qp_release(dev, qp_id);
279 
280 		qp_size = sizeof(*qp) + pmd_data->qp_priv_size;
281 		/* Allocate the queue pair data structure. */
282 		qp = rte_zmalloc_socket("IPSEC PMD Queue Pair", qp_size,
283 					RTE_CACHE_LINE_SIZE, socket_id);
284 		if (qp == NULL)
285 			return -ENOMEM;
286 	}
287 
288 #if IMB_VERSION(1, 1, 0) > IMB_VERSION_NUM
289 	qp->mb_mgr = alloc_init_mb_mgr();
290 #else
291 	char mz_name[IPSEC_MB_MAX_MZ_NAME];
292 	snprintf(mz_name, sizeof(mz_name), "IMB_MGR_DEV_%d_QP_%d",
293 			dev->data->dev_id, qp_id);
294 	qp->mb_mgr = ipsec_mb_alloc_mgr_from_memzone(&(qp->mb_mgr_mz),
295 			mz_name);
296 #endif
297 	if (qp->mb_mgr == NULL) {
298 		ret = -ENOMEM;
299 		goto qp_setup_cleanup;
300 	}
301 
302 	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
303 		return 0;
304 
305 	qp->id = qp_id;
306 	dev->data->queue_pairs[qp_id] = qp;
307 	if (ipsec_mb_qp_set_unique_name(dev, qp)) {
308 		ret = -EINVAL;
309 		goto qp_setup_cleanup;
310 	}
311 
312 	qp->pmd_type = internals->pmd_type;
313 	qp->sess_mp = qp_conf->mp_session;
314 
315 	qp->ingress_queue = ipsec_mb_qp_create_processed_ops_ring(qp,
316 		qp_conf->nb_descriptors, socket_id);
317 	if (qp->ingress_queue == NULL) {
318 		ret = -EINVAL;
319 		goto qp_setup_cleanup;
320 	}
321 
322 	memset(&qp->stats, 0, sizeof(qp->stats));
323 
324 	if (pmd_data->queue_pair_configure) {
325 		ret = pmd_data->queue_pair_configure(qp);
326 		if (ret < 0)
327 			goto qp_setup_cleanup;
328 	}
329 
330 	return 0;
331 
332 qp_setup_cleanup:
333 #if IMB_VERSION(1, 1, 0) > IMB_VERSION_NUM
334 	if (qp->mb_mgr)
335 		free_mb_mgr(qp->mb_mgr);
336 #else
337 	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
338 		return ret;
339 	if (qp->mb_mgr_mz)
340 		rte_memzone_free(qp->mb_mgr_mz);
341 #endif
342 	rte_free(qp);
343 	return ret;
344 }
345 
346 int
347 ipsec_mb_ipc_request(const struct rte_mp_msg *mp_msg, const void *peer)
348 {
349 	struct rte_mp_msg ipc_resp;
350 	struct ipsec_mb_mp_param *resp_param =
351 		(struct ipsec_mb_mp_param *)ipc_resp.param;
352 	const struct ipsec_mb_mp_param *req_param =
353 		(const struct ipsec_mb_mp_param *)mp_msg->param;
354 
355 	int ret;
356 	struct rte_cryptodev *dev;
357 	struct ipsec_mb_qp *qp;
358 	struct rte_cryptodev_qp_conf queue_conf;
359 	int dev_id = req_param->dev_id;
360 	int qp_id = req_param->qp_id;
361 
362 	queue_conf.nb_descriptors = req_param->nb_descriptors;
363 	queue_conf.mp_session = (struct rte_mempool *)req_param->mp_session;
364 	memset(resp_param, 0, sizeof(struct ipsec_mb_mp_param));
365 	memcpy(ipc_resp.name, IPSEC_MB_MP_MSG, sizeof(IPSEC_MB_MP_MSG));
366 
367 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
368 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
369 		goto out;
370 	}
371 
372 	dev = rte_cryptodev_pmd_get_dev(dev_id);
373 	switch (req_param->type) {
374 	case RTE_IPSEC_MB_MP_REQ_QP_SET:
375 		qp = dev->data->queue_pairs[qp_id];
376 		if (qp)	{
377 			CDEV_LOG_DEBUG("qp %d on dev %d is initialised", qp_id, dev_id);
378 			goto out;
379 		}
380 
381 		ret = ipsec_mb_qp_setup(dev, qp_id,	&queue_conf, req_param->socket_id);
382 		if (!ret) {
383 			qp = dev->data->queue_pairs[qp_id];
384 			if (!qp) {
385 				CDEV_LOG_DEBUG("qp %d on dev %d is not initialised",
386 					qp_id, dev_id);
387 				goto out;
388 			}
389 			qp->qp_used_by_pid = req_param->process_id;
390 		}
391 		resp_param->result = ret;
392 		break;
393 	case RTE_IPSEC_MB_MP_REQ_QP_FREE:
394 		qp = dev->data->queue_pairs[qp_id];
395 		if (!qp) {
396 			CDEV_LOG_DEBUG("qp %d on dev %d is not initialised",
397 				qp_id, dev_id);
398 			goto out;
399 		}
400 
401 		if (qp->qp_used_by_pid != req_param->process_id) {
402 			CDEV_LOG_ERR("Unable to release qp_id=%d", qp_id);
403 			goto out;
404 		}
405 
406 		qp->qp_used_by_pid = 0;
407 		resp_param->result = ipsec_mb_qp_release(dev, qp_id);
408 		break;
409 	default:
410 		CDEV_LOG_ERR("invalid mp request type");
411 	}
412 
413 out:
414 	ret = rte_mp_reply(&ipc_resp, peer);
415 	return ret;
416 }
417 
418 /** Return the size of the specific pmd session structure */
419 unsigned
420 ipsec_mb_sym_session_get_size(struct rte_cryptodev *dev)
421 {
422 	struct ipsec_mb_dev_private *internals = dev->data->dev_private;
423 	struct ipsec_mb_internals *pmd_data =
424 		&ipsec_mb_pmds[internals->pmd_type];
425 
426 	return pmd_data->session_priv_size;
427 }
428 
429 /** Configure pmd specific multi-buffer session from a crypto xform chain */
430 int
431 ipsec_mb_sym_session_configure(
432 	struct rte_cryptodev *dev, struct rte_crypto_sym_xform *xform,
433 	struct rte_cryptodev_sym_session *sess)
434 {
435 	struct ipsec_mb_dev_private *internals = dev->data->dev_private;
436 	struct ipsec_mb_internals *pmd_data =
437 		&ipsec_mb_pmds[internals->pmd_type];
438 	struct ipsec_mb_qp *qp = dev->data->queue_pairs[0];
439 	IMB_MGR *mb_mgr;
440 	int ret = 0;
441 
442 	if (qp != NULL)
443 		mb_mgr = qp->mb_mgr;
444 	else
445 		mb_mgr = alloc_init_mb_mgr();
446 
447 	if (!mb_mgr)
448 		return -ENOMEM;
449 
450 	if (unlikely(sess == NULL)) {
451 		IPSEC_MB_LOG(ERR, "invalid session struct");
452 		if (qp == NULL)
453 			free_mb_mgr(mb_mgr);
454 		return -EINVAL;
455 	}
456 
457 	ret = (*pmd_data->session_configure)(mb_mgr,
458 			CRYPTODEV_GET_SYM_SESS_PRIV(sess), xform);
459 	if (ret != 0) {
460 		IPSEC_MB_LOG(ERR, "failed configure session parameters");
461 
462 		/* Return session to mempool */
463 		if (qp == NULL)
464 			free_mb_mgr(mb_mgr);
465 		return ret;
466 	}
467 
468 	if (qp == NULL)
469 		free_mb_mgr(mb_mgr);
470 	return 0;
471 }
472 
473 /** Clear the session memory */
474 void
475 ipsec_mb_sym_session_clear(struct rte_cryptodev *dev __rte_unused,
476 		struct rte_cryptodev_sym_session *sess __rte_unused)
477 {}
478