xref: /dpdk/drivers/crypto/ipsec_mb/ipsec_mb_ops.c (revision 4b53e9802b6b6040ad5622b1414aaa93d9581d0c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2021 Intel Corporation
3  */
4 
5 #include <string.h>
6 #include <unistd.h>
7 
8 #include <rte_common.h>
9 #include <rte_malloc.h>
10 
11 #include "ipsec_mb_private.h"
12 
13 #define IMB_MP_REQ_VER_STR "1.1.0"
14 
15 /** Configure device */
16 int
17 ipsec_mb_config(__rte_unused struct rte_cryptodev *dev,
18 		    __rte_unused struct rte_cryptodev_config *config)
19 {
20 	return 0;
21 }
22 
23 /** Start device */
24 int
25 ipsec_mb_start(__rte_unused struct rte_cryptodev *dev)
26 {
27 	return 0;
28 }
29 
30 /** Stop device */
31 void
32 ipsec_mb_stop(__rte_unused struct rte_cryptodev *dev)
33 {
34 }
35 
36 /** Close device */
37 int
38 ipsec_mb_close(__rte_unused struct rte_cryptodev *dev)
39 {
40 	return 0;
41 }
42 
43 /** Get device statistics */
44 void
45 ipsec_mb_stats_get(struct rte_cryptodev *dev,
46 		struct rte_cryptodev_stats *stats)
47 {
48 	int qp_id;
49 
50 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
51 		struct ipsec_mb_qp *qp = dev->data->queue_pairs[qp_id];
52 		if (qp == NULL) {
53 			IPSEC_MB_LOG(DEBUG, "Uninitialised qp %d", qp_id);
54 			continue;
55 		}
56 
57 		stats->enqueued_count += qp->stats.enqueued_count;
58 		stats->dequeued_count += qp->stats.dequeued_count;
59 
60 		stats->enqueue_err_count += qp->stats.enqueue_err_count;
61 		stats->dequeue_err_count += qp->stats.dequeue_err_count;
62 	}
63 }
64 
65 /** Reset device statistics */
66 void
67 ipsec_mb_stats_reset(struct rte_cryptodev *dev)
68 {
69 	int qp_id;
70 
71 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
72 		struct ipsec_mb_qp *qp = dev->data->queue_pairs[qp_id];
73 
74 		memset(&qp->stats, 0, sizeof(qp->stats));
75 	}
76 }
77 
78 /** Get device info */
79 void
80 ipsec_mb_info_get(struct rte_cryptodev *dev,
81 		struct rte_cryptodev_info *dev_info)
82 {
83 	struct ipsec_mb_dev_private *internals = dev->data->dev_private;
84 	struct ipsec_mb_internals *pmd_info =
85 		&ipsec_mb_pmds[internals->pmd_type];
86 
87 	if (dev_info != NULL) {
88 		dev_info->driver_id = dev->driver_id;
89 		dev_info->feature_flags = dev->feature_flags;
90 		dev_info->capabilities = pmd_info->caps;
91 		dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
92 		/* No limit of number of sessions */
93 		dev_info->sym.max_nb_sessions = 0;
94 	}
95 }
96 
97 static int
98 ipsec_mb_secondary_qp_op(int dev_id, int qp_id,
99 		const struct rte_cryptodev_qp_conf *qp_conf,
100 		int socket_id, enum ipsec_mb_mp_req_type op_type)
101 {
102 	int ret;
103 	struct rte_mp_msg qp_req_msg;
104 	struct rte_mp_msg *qp_resp_msg;
105 	struct rte_mp_reply qp_resp;
106 	struct ipsec_mb_mp_param *req_param;
107 	struct ipsec_mb_mp_param *resp_param;
108 	struct timespec ts = {.tv_sec = 1, .tv_nsec = 0};
109 
110 	memset(&qp_req_msg, 0, sizeof(IPSEC_MB_MP_MSG));
111 	memcpy(qp_req_msg.name, IPSEC_MB_MP_MSG, sizeof(IPSEC_MB_MP_MSG));
112 	req_param = (struct ipsec_mb_mp_param *)&qp_req_msg.param;
113 
114 	qp_req_msg.len_param = sizeof(struct ipsec_mb_mp_param);
115 	req_param->type = op_type;
116 	req_param->dev_id = dev_id;
117 	req_param->qp_id = qp_id;
118 	req_param->socket_id = socket_id;
119 	req_param->process_id = getpid();
120 	if (qp_conf) {
121 		req_param->nb_descriptors = qp_conf->nb_descriptors;
122 		req_param->mp_session = (void *)qp_conf->mp_session;
123 	}
124 
125 	qp_req_msg.num_fds = 0;
126 	ret = rte_mp_request_sync(&qp_req_msg, &qp_resp, &ts);
127 	if (ret) {
128 		RTE_LOG(ERR, USER1, "Create MR request to primary process failed.");
129 		return -1;
130 	}
131 	qp_resp_msg = &qp_resp.msgs[0];
132 	resp_param = (struct ipsec_mb_mp_param *)qp_resp_msg->param;
133 
134 	return resp_param->result;
135 }
136 
137 /** Release queue pair */
138 int
139 ipsec_mb_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
140 {
141 	struct ipsec_mb_qp *qp = dev->data->queue_pairs[qp_id];
142 	struct rte_ring *r = NULL;
143 
144 	if (qp != NULL)
145 		return 0;
146 
147 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
148 		r = rte_ring_lookup(qp->name);
149 		rte_ring_free(r);
150 
151 #if IMB_VERSION(1, 1, 0) > IMB_VERSION_NUM
152 		if (qp->mb_mgr)
153 			free_mb_mgr(qp->mb_mgr);
154 #else
155 		if (qp->mb_mgr_mz) {
156 			rte_memzone_free(qp->mb_mgr_mz);
157 			qp->mb_mgr = NULL;
158 		}
159 #endif
160 		rte_free(qp);
161 		dev->data->queue_pairs[qp_id] = NULL;
162 	} else { /* secondary process */
163 		return ipsec_mb_secondary_qp_op(dev->data->dev_id, qp_id,
164 					NULL, 0, RTE_IPSEC_MB_MP_REQ_QP_FREE);
165 	}
166 	return 0;
167 }
168 
169 /** Set a unique name for the queue pair */
170 int
171 ipsec_mb_qp_set_unique_name(struct rte_cryptodev *dev,
172 					   struct ipsec_mb_qp *qp)
173 {
174 	uint32_t n =
175 	    snprintf(qp->name, sizeof(qp->name), "ipsec_mb_pmd_%u_qp_%u",
176 		     dev->data->dev_id, qp->id);
177 
178 	if (n >= sizeof(qp->name))
179 		return -1;
180 
181 	return 0;
182 }
183 
184 /** Create a ring to place processed operations on */
185 static struct rte_ring
186 *ipsec_mb_qp_create_processed_ops_ring(
187 	struct ipsec_mb_qp *qp, unsigned int ring_size, int socket_id)
188 {
189 	struct rte_ring *r;
190 	char ring_name[RTE_CRYPTODEV_NAME_MAX_LEN];
191 
192 	unsigned int n = rte_strlcpy(ring_name, qp->name, sizeof(ring_name));
193 
194 	if (n >= sizeof(ring_name))
195 		return NULL;
196 
197 	r = rte_ring_lookup(ring_name);
198 	if (r) {
199 		if (rte_ring_get_size(r) >= ring_size) {
200 			IPSEC_MB_LOG(
201 			    INFO, "Reusing existing ring %s for processed ops",
202 			    ring_name);
203 			return r;
204 		}
205 		IPSEC_MB_LOG(
206 		    ERR, "Unable to reuse existing ring %s for processed ops",
207 		    ring_name);
208 		return NULL;
209 	}
210 
211 	return rte_ring_create(ring_name, ring_size, socket_id,
212 			       RING_F_SP_ENQ | RING_F_SC_DEQ);
213 }
214 
215 #if IMB_VERSION(1, 1, 0) <= IMB_VERSION_NUM
216 static IMB_MGR *
217 ipsec_mb_alloc_mgr_from_memzone(const struct rte_memzone **mb_mgr_mz,
218 		const char *mb_mgr_mz_name)
219 {
220 	IMB_MGR *mb_mgr;
221 
222 	if (rte_eal_process_type() ==  RTE_PROC_PRIMARY) {
223 		*mb_mgr_mz = rte_memzone_lookup(mb_mgr_mz_name);
224 		if (*mb_mgr_mz == NULL) {
225 			*mb_mgr_mz = rte_memzone_reserve(mb_mgr_mz_name,
226 			imb_get_mb_mgr_size(),
227 			rte_socket_id(), 0);
228 		}
229 		if (*mb_mgr_mz == NULL) {
230 			IPSEC_MB_LOG(DEBUG, "Error allocating memzone for %s",
231 					mb_mgr_mz_name);
232 			return NULL;
233 		}
234 		mb_mgr = imb_set_pointers_mb_mgr((*mb_mgr_mz)->addr, 0, 1);
235 		init_mb_mgr_auto(mb_mgr, NULL);
236 	} else {
237 		*mb_mgr_mz = rte_memzone_lookup(mb_mgr_mz_name);
238 		if (*mb_mgr_mz == NULL) {
239 			IPSEC_MB_LOG(ERR,
240 				"Secondary can't find %s mz, did primary create it?",
241 				mb_mgr_mz_name);
242 			return NULL;
243 		}
244 		mb_mgr = imb_set_pointers_mb_mgr((*mb_mgr_mz)->addr, 0, 0);
245 	}
246 	return mb_mgr;
247 }
248 #endif
249 
250 /** Setup a queue pair */
251 int
252 ipsec_mb_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
253 				const struct rte_cryptodev_qp_conf *qp_conf,
254 				int socket_id)
255 {
256 	struct ipsec_mb_qp *qp = NULL;
257 	struct ipsec_mb_dev_private *internals = dev->data->dev_private;
258 	struct ipsec_mb_internals *pmd_data =
259 		&ipsec_mb_pmds[internals->pmd_type];
260 	uint32_t qp_size;
261 	int ret;
262 
263 	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
264 #if IMB_VERSION(1, 1, 0) > IMB_VERSION_NUM
265 		IPSEC_MB_LOG(ERR, "The intel-ipsec-mb version (%s) does not support multiprocess,"
266 				"the minimum version required for this feature is %s.",
267 				IMB_VERSION_STR, IMB_MP_REQ_VER_STR);
268 		return -EINVAL;
269 #endif
270 		qp = dev->data->queue_pairs[qp_id];
271 		if (qp == NULL) {
272 			IPSEC_MB_LOG(DEBUG, "Secondary process setting up device qp.");
273 			return ipsec_mb_secondary_qp_op(dev->data->dev_id, qp_id,
274 						qp_conf, socket_id,	RTE_IPSEC_MB_MP_REQ_QP_SET);
275 		}
276 
277 		IPSEC_MB_LOG(ERR, "Queue pair already setup'ed.");
278 		return -EINVAL;
279 	} else {
280 		/* Free memory prior to re-allocation if needed. */
281 		if (dev->data->queue_pairs[qp_id] != NULL)
282 			ipsec_mb_qp_release(dev, qp_id);
283 
284 		qp_size = sizeof(*qp) + pmd_data->qp_priv_size;
285 		/* Allocate the queue pair data structure. */
286 		qp = rte_zmalloc_socket("IPSEC PMD Queue Pair", qp_size,
287 					RTE_CACHE_LINE_SIZE, socket_id);
288 		if (qp == NULL)
289 			return -ENOMEM;
290 	}
291 
292 #if IMB_VERSION(1, 1, 0) > IMB_VERSION_NUM
293 	qp->mb_mgr = alloc_init_mb_mgr();
294 #else
295 	char mz_name[IPSEC_MB_MAX_MZ_NAME];
296 	snprintf(mz_name, sizeof(mz_name), "IMB_MGR_DEV_%d_QP_%d",
297 			dev->data->dev_id, qp_id);
298 	qp->mb_mgr = ipsec_mb_alloc_mgr_from_memzone(&(qp->mb_mgr_mz),
299 			mz_name);
300 #endif
301 	if (qp->mb_mgr == NULL) {
302 		ret = -ENOMEM;
303 		goto qp_setup_cleanup;
304 	}
305 
306 	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
307 		return 0;
308 
309 	qp->id = qp_id;
310 	dev->data->queue_pairs[qp_id] = qp;
311 	if (ipsec_mb_qp_set_unique_name(dev, qp)) {
312 		ret = -EINVAL;
313 		goto qp_setup_cleanup;
314 	}
315 
316 	qp->pmd_type = internals->pmd_type;
317 	qp->sess_mp = qp_conf->mp_session;
318 
319 	qp->ingress_queue = ipsec_mb_qp_create_processed_ops_ring(qp,
320 		qp_conf->nb_descriptors, socket_id);
321 	if (qp->ingress_queue == NULL) {
322 		ret = -EINVAL;
323 		goto qp_setup_cleanup;
324 	}
325 
326 	memset(&qp->stats, 0, sizeof(qp->stats));
327 
328 	if (pmd_data->queue_pair_configure) {
329 		ret = pmd_data->queue_pair_configure(qp);
330 		if (ret < 0)
331 			goto qp_setup_cleanup;
332 	}
333 
334 	return 0;
335 
336 qp_setup_cleanup:
337 #if IMB_VERSION(1, 1, 0) > IMB_VERSION_NUM
338 	if (qp->mb_mgr)
339 		free_mb_mgr(qp->mb_mgr);
340 #else
341 	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
342 		return ret;
343 	if (qp->mb_mgr_mz)
344 		rte_memzone_free(qp->mb_mgr_mz);
345 #endif
346 	rte_free(qp);
347 	return ret;
348 }
349 
350 int
351 ipsec_mb_ipc_request(const struct rte_mp_msg *mp_msg, const void *peer)
352 {
353 	struct rte_mp_msg ipc_resp;
354 	struct ipsec_mb_mp_param *resp_param =
355 		(struct ipsec_mb_mp_param *)ipc_resp.param;
356 	const struct ipsec_mb_mp_param *req_param =
357 		(const struct ipsec_mb_mp_param *)mp_msg->param;
358 
359 	int ret;
360 	struct rte_cryptodev *dev;
361 	struct ipsec_mb_qp *qp;
362 	struct rte_cryptodev_qp_conf queue_conf;
363 	int dev_id = req_param->dev_id;
364 	int qp_id = req_param->qp_id;
365 
366 	queue_conf.nb_descriptors = req_param->nb_descriptors;
367 	queue_conf.mp_session = (struct rte_mempool *)req_param->mp_session;
368 	memset(resp_param, 0, sizeof(struct ipsec_mb_mp_param));
369 	memcpy(ipc_resp.name, IPSEC_MB_MP_MSG, sizeof(IPSEC_MB_MP_MSG));
370 
371 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
372 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
373 		goto out;
374 	}
375 
376 	dev = rte_cryptodev_pmd_get_dev(dev_id);
377 	switch (req_param->type) {
378 	case RTE_IPSEC_MB_MP_REQ_QP_SET:
379 		qp = dev->data->queue_pairs[qp_id];
380 		if (qp)	{
381 			CDEV_LOG_DEBUG("qp %d on dev %d is initialised", qp_id, dev_id);
382 			goto out;
383 		}
384 
385 		ret = ipsec_mb_qp_setup(dev, qp_id,	&queue_conf, req_param->socket_id);
386 		if (!ret) {
387 			qp = dev->data->queue_pairs[qp_id];
388 			if (!qp) {
389 				CDEV_LOG_DEBUG("qp %d on dev %d is not initialised",
390 					qp_id, dev_id);
391 				goto out;
392 			}
393 			qp->qp_used_by_pid = req_param->process_id;
394 		}
395 		resp_param->result = ret;
396 		break;
397 	case RTE_IPSEC_MB_MP_REQ_QP_FREE:
398 		qp = dev->data->queue_pairs[qp_id];
399 		if (!qp) {
400 			CDEV_LOG_DEBUG("qp %d on dev %d is not initialised",
401 				qp_id, dev_id);
402 			goto out;
403 		}
404 
405 		if (qp->qp_used_by_pid != req_param->process_id) {
406 			CDEV_LOG_ERR("Unable to release qp_id=%d", qp_id);
407 			goto out;
408 		}
409 
410 		qp->qp_used_by_pid = 0;
411 		resp_param->result = ipsec_mb_qp_release(dev, qp_id);
412 		break;
413 	default:
414 		CDEV_LOG_ERR("invalid mp request type\n");
415 	}
416 
417 out:
418 	ret = rte_mp_reply(&ipc_resp, peer);
419 	return ret;
420 }
421 
422 /** Return the size of the specific pmd session structure */
423 unsigned
424 ipsec_mb_sym_session_get_size(struct rte_cryptodev *dev)
425 {
426 	struct ipsec_mb_dev_private *internals = dev->data->dev_private;
427 	struct ipsec_mb_internals *pmd_data =
428 		&ipsec_mb_pmds[internals->pmd_type];
429 
430 	return pmd_data->session_priv_size;
431 }
432 
433 /** Configure pmd specific multi-buffer session from a crypto xform chain */
434 int
435 ipsec_mb_sym_session_configure(
436 	struct rte_cryptodev *dev, struct rte_crypto_sym_xform *xform,
437 	struct rte_cryptodev_sym_session *sess)
438 {
439 	struct ipsec_mb_dev_private *internals = dev->data->dev_private;
440 	struct ipsec_mb_internals *pmd_data =
441 		&ipsec_mb_pmds[internals->pmd_type];
442 	IMB_MGR *mb_mgr = alloc_init_mb_mgr();
443 	int ret = 0;
444 
445 	if (!mb_mgr)
446 		return -ENOMEM;
447 
448 	if (unlikely(sess == NULL)) {
449 		IPSEC_MB_LOG(ERR, "invalid session struct");
450 		free_mb_mgr(mb_mgr);
451 		return -EINVAL;
452 	}
453 
454 	ret = (*pmd_data->session_configure)(mb_mgr,
455 			CRYPTODEV_GET_SYM_SESS_PRIV(sess), xform);
456 	if (ret != 0) {
457 		IPSEC_MB_LOG(ERR, "failed configure session parameters");
458 
459 		/* Return session to mempool */
460 		free_mb_mgr(mb_mgr);
461 		return ret;
462 	}
463 
464 	free_mb_mgr(mb_mgr);
465 	return 0;
466 }
467 
468 /** Clear the session memory */
469 void
470 ipsec_mb_sym_session_clear(struct rte_cryptodev *dev __rte_unused,
471 		struct rte_cryptodev_sym_session *sess __rte_unused)
472 {}
473