xref: /dpdk/drivers/crypto/ipsec_mb/ipsec_mb_ops.c (revision 665b49c51639a10c553433bc2bcd85c7331c631e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2021 Intel Corporation
3  */
4 
5 #include <string.h>
6 #include <unistd.h>
7 
8 #include <rte_common.h>
9 #include <rte_malloc.h>
10 
11 #include "ipsec_mb_private.h"
12 
13 #define IMB_MP_REQ_VER_STR "1.1.0"
14 
15 /** Configure device */
16 int
17 ipsec_mb_config(__rte_unused struct rte_cryptodev *dev,
18 		    __rte_unused struct rte_cryptodev_config *config)
19 {
20 	return 0;
21 }
22 
23 /** Start device */
24 int
25 ipsec_mb_start(__rte_unused struct rte_cryptodev *dev)
26 {
27 	return 0;
28 }
29 
30 /** Stop device */
31 void
32 ipsec_mb_stop(__rte_unused struct rte_cryptodev *dev)
33 {
34 }
35 
36 /** Close device */
37 int
38 ipsec_mb_close(__rte_unused struct rte_cryptodev *dev)
39 {
40 	return 0;
41 }
42 
43 /** Get device statistics */
44 void
45 ipsec_mb_stats_get(struct rte_cryptodev *dev,
46 		struct rte_cryptodev_stats *stats)
47 {
48 	int qp_id;
49 
50 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
51 		struct ipsec_mb_qp *qp = dev->data->queue_pairs[qp_id];
52 		if (qp == NULL) {
53 			IPSEC_MB_LOG(DEBUG, "Uninitialised qp %d", qp_id);
54 			continue;
55 		}
56 
57 		stats->enqueued_count += qp->stats.enqueued_count;
58 		stats->dequeued_count += qp->stats.dequeued_count;
59 
60 		stats->enqueue_err_count += qp->stats.enqueue_err_count;
61 		stats->dequeue_err_count += qp->stats.dequeue_err_count;
62 	}
63 }
64 
65 /** Reset device statistics */
66 void
67 ipsec_mb_stats_reset(struct rte_cryptodev *dev)
68 {
69 	int qp_id;
70 
71 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
72 		struct ipsec_mb_qp *qp = dev->data->queue_pairs[qp_id];
73 
74 		memset(&qp->stats, 0, sizeof(qp->stats));
75 	}
76 }
77 
78 /** Get device info */
79 void
80 ipsec_mb_info_get(struct rte_cryptodev *dev,
81 		struct rte_cryptodev_info *dev_info)
82 {
83 	struct ipsec_mb_dev_private *internals = dev->data->dev_private;
84 	struct ipsec_mb_internals *pmd_info =
85 		&ipsec_mb_pmds[internals->pmd_type];
86 
87 	if (dev_info != NULL) {
88 		dev_info->driver_id = dev->driver_id;
89 		dev_info->feature_flags = dev->feature_flags;
90 		dev_info->capabilities = pmd_info->caps;
91 		dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
92 		/* No limit of number of sessions */
93 		dev_info->sym.max_nb_sessions = 0;
94 	}
95 }
96 
97 static int
98 ipsec_mb_secondary_qp_op(int dev_id, int qp_id,
99 		const struct rte_cryptodev_qp_conf *qp_conf,
100 		int socket_id, enum ipsec_mb_mp_req_type op_type)
101 {
102 	int ret;
103 	struct rte_mp_msg qp_req_msg;
104 	struct rte_mp_msg *qp_resp_msg;
105 	struct rte_mp_reply qp_resp;
106 	struct ipsec_mb_mp_param *req_param;
107 	struct ipsec_mb_mp_param *resp_param;
108 	struct timespec ts = {.tv_sec = 1, .tv_nsec = 0};
109 
110 	memset(&qp_req_msg, 0, sizeof(IPSEC_MB_MP_MSG));
111 	memcpy(qp_req_msg.name, IPSEC_MB_MP_MSG, sizeof(IPSEC_MB_MP_MSG));
112 	req_param = (struct ipsec_mb_mp_param *)&qp_req_msg.param;
113 
114 	qp_req_msg.len_param = sizeof(struct ipsec_mb_mp_param);
115 	req_param->type = op_type;
116 	req_param->dev_id = dev_id;
117 	req_param->qp_id = qp_id;
118 	req_param->socket_id = socket_id;
119 	req_param->process_id = getpid();
120 	if (qp_conf) {
121 		req_param->nb_descriptors = qp_conf->nb_descriptors;
122 		req_param->mp_session = (void *)qp_conf->mp_session;
123 	}
124 
125 	qp_req_msg.num_fds = 0;
126 	ret = rte_mp_request_sync(&qp_req_msg, &qp_resp, &ts);
127 	if (ret) {
128 		RTE_LOG(ERR, USER1, "Create MR request to primary process failed.");
129 		return -1;
130 	}
131 	qp_resp_msg = &qp_resp.msgs[0];
132 	resp_param = (struct ipsec_mb_mp_param *)qp_resp_msg->param;
133 
134 	return resp_param->result;
135 }
136 
137 /** Release queue pair */
138 int
139 ipsec_mb_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
140 {
141 	struct ipsec_mb_qp *qp = dev->data->queue_pairs[qp_id];
142 
143 	if (!qp)
144 		return 0;
145 
146 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
147 		rte_ring_free(rte_ring_lookup(qp->name));
148 
149 #if IMB_VERSION(1, 1, 0) > IMB_VERSION_NUM
150 		if (qp->mb_mgr)
151 			free_mb_mgr(qp->mb_mgr);
152 #else
153 		if (qp->mb_mgr_mz) {
154 			rte_memzone_free(qp->mb_mgr_mz);
155 			qp->mb_mgr = NULL;
156 		}
157 #endif
158 		rte_free(qp);
159 		dev->data->queue_pairs[qp_id] = NULL;
160 	} else { /* secondary process */
161 		return ipsec_mb_secondary_qp_op(dev->data->dev_id, qp_id,
162 					NULL, 0, RTE_IPSEC_MB_MP_REQ_QP_FREE);
163 	}
164 	return 0;
165 }
166 
167 /** Set a unique name for the queue pair */
168 int
169 ipsec_mb_qp_set_unique_name(struct rte_cryptodev *dev,
170 					   struct ipsec_mb_qp *qp)
171 {
172 	uint32_t n =
173 	    snprintf(qp->name, sizeof(qp->name), "ipsec_mb_pmd_%u_qp_%u",
174 		     dev->data->dev_id, qp->id);
175 
176 	if (n >= sizeof(qp->name))
177 		return -1;
178 
179 	return 0;
180 }
181 
182 /** Create a ring to place processed operations on */
183 static struct rte_ring
184 *ipsec_mb_qp_create_processed_ops_ring(
185 	struct ipsec_mb_qp *qp, unsigned int ring_size, int socket_id)
186 {
187 	struct rte_ring *r;
188 	char ring_name[RTE_CRYPTODEV_NAME_MAX_LEN];
189 
190 	unsigned int n = rte_strlcpy(ring_name, qp->name, sizeof(ring_name));
191 
192 	if (n >= sizeof(ring_name))
193 		return NULL;
194 
195 	r = rte_ring_lookup(ring_name);
196 	if (r) {
197 		if (rte_ring_get_size(r) >= ring_size) {
198 			IPSEC_MB_LOG(
199 			    INFO, "Reusing existing ring %s for processed ops",
200 			    ring_name);
201 			return r;
202 		}
203 		IPSEC_MB_LOG(
204 		    ERR, "Unable to reuse existing ring %s for processed ops",
205 		    ring_name);
206 		return NULL;
207 	}
208 
209 	return rte_ring_create(ring_name, ring_size, socket_id,
210 			       RING_F_SP_ENQ | RING_F_SC_DEQ);
211 }
212 
213 #if IMB_VERSION(1, 1, 0) <= IMB_VERSION_NUM
214 static IMB_MGR *
215 ipsec_mb_alloc_mgr_from_memzone(const struct rte_memzone **mb_mgr_mz,
216 		const char *mb_mgr_mz_name)
217 {
218 	IMB_MGR *mb_mgr;
219 
220 	if (rte_eal_process_type() ==  RTE_PROC_PRIMARY) {
221 		*mb_mgr_mz = rte_memzone_lookup(mb_mgr_mz_name);
222 		if (*mb_mgr_mz == NULL) {
223 			*mb_mgr_mz = rte_memzone_reserve(mb_mgr_mz_name,
224 			imb_get_mb_mgr_size(),
225 			rte_socket_id(), 0);
226 		}
227 		if (*mb_mgr_mz == NULL) {
228 			IPSEC_MB_LOG(DEBUG, "Error allocating memzone for %s",
229 					mb_mgr_mz_name);
230 			return NULL;
231 		}
232 		mb_mgr = imb_set_pointers_mb_mgr((*mb_mgr_mz)->addr, 0, 1);
233 		init_mb_mgr_auto(mb_mgr, NULL);
234 	} else {
235 		*mb_mgr_mz = rte_memzone_lookup(mb_mgr_mz_name);
236 		if (*mb_mgr_mz == NULL) {
237 			IPSEC_MB_LOG(ERR,
238 				"Secondary can't find %s mz, did primary create it?",
239 				mb_mgr_mz_name);
240 			return NULL;
241 		}
242 		mb_mgr = imb_set_pointers_mb_mgr((*mb_mgr_mz)->addr, 0, 0);
243 	}
244 	return mb_mgr;
245 }
246 #endif
247 
248 /** Setup a queue pair */
249 int
250 ipsec_mb_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
251 				const struct rte_cryptodev_qp_conf *qp_conf,
252 				int socket_id)
253 {
254 	struct ipsec_mb_qp *qp = NULL;
255 	struct ipsec_mb_dev_private *internals = dev->data->dev_private;
256 	struct ipsec_mb_internals *pmd_data =
257 		&ipsec_mb_pmds[internals->pmd_type];
258 	uint32_t qp_size;
259 	int ret;
260 
261 	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
262 #if IMB_VERSION(1, 1, 0) > IMB_VERSION_NUM
263 		IPSEC_MB_LOG(ERR, "The intel-ipsec-mb version (%s) does not support multiprocess,"
264 				"the minimum version required for this feature is %s.",
265 				IMB_VERSION_STR, IMB_MP_REQ_VER_STR);
266 		return -EINVAL;
267 #endif
268 		qp = dev->data->queue_pairs[qp_id];
269 		if (qp == NULL) {
270 			IPSEC_MB_LOG(DEBUG, "Secondary process setting up device qp.");
271 			return ipsec_mb_secondary_qp_op(dev->data->dev_id, qp_id,
272 						qp_conf, socket_id,	RTE_IPSEC_MB_MP_REQ_QP_SET);
273 		}
274 	} else {
275 		/* Free memory prior to re-allocation if needed. */
276 		if (dev->data->queue_pairs[qp_id] != NULL)
277 			ipsec_mb_qp_release(dev, qp_id);
278 
279 		qp_size = sizeof(*qp) + pmd_data->qp_priv_size;
280 		/* Allocate the queue pair data structure. */
281 		qp = rte_zmalloc_socket("IPSEC PMD Queue Pair", qp_size,
282 					RTE_CACHE_LINE_SIZE, socket_id);
283 		if (qp == NULL)
284 			return -ENOMEM;
285 	}
286 
287 #if IMB_VERSION(1, 1, 0) > IMB_VERSION_NUM
288 	qp->mb_mgr = alloc_init_mb_mgr();
289 #else
290 	char mz_name[IPSEC_MB_MAX_MZ_NAME];
291 	snprintf(mz_name, sizeof(mz_name), "IMB_MGR_DEV_%d_QP_%d",
292 			dev->data->dev_id, qp_id);
293 	qp->mb_mgr = ipsec_mb_alloc_mgr_from_memzone(&(qp->mb_mgr_mz),
294 			mz_name);
295 #endif
296 	if (qp->mb_mgr == NULL) {
297 		ret = -ENOMEM;
298 		goto qp_setup_cleanup;
299 	}
300 
301 	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
302 		return 0;
303 
304 	qp->id = qp_id;
305 	dev->data->queue_pairs[qp_id] = qp;
306 	if (ipsec_mb_qp_set_unique_name(dev, qp)) {
307 		ret = -EINVAL;
308 		goto qp_setup_cleanup;
309 	}
310 
311 	qp->pmd_type = internals->pmd_type;
312 	qp->sess_mp = qp_conf->mp_session;
313 
314 	qp->ingress_queue = ipsec_mb_qp_create_processed_ops_ring(qp,
315 		qp_conf->nb_descriptors, socket_id);
316 	if (qp->ingress_queue == NULL) {
317 		ret = -EINVAL;
318 		goto qp_setup_cleanup;
319 	}
320 
321 	memset(&qp->stats, 0, sizeof(qp->stats));
322 
323 	if (pmd_data->queue_pair_configure) {
324 		ret = pmd_data->queue_pair_configure(qp);
325 		if (ret < 0)
326 			goto qp_setup_cleanup;
327 	}
328 
329 	return 0;
330 
331 qp_setup_cleanup:
332 #if IMB_VERSION(1, 1, 0) > IMB_VERSION_NUM
333 	if (qp->mb_mgr)
334 		free_mb_mgr(qp->mb_mgr);
335 #else
336 	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
337 		return ret;
338 	if (qp->mb_mgr_mz)
339 		rte_memzone_free(qp->mb_mgr_mz);
340 #endif
341 	rte_free(qp);
342 	return ret;
343 }
344 
345 int
346 ipsec_mb_ipc_request(const struct rte_mp_msg *mp_msg, const void *peer)
347 {
348 	struct rte_mp_msg ipc_resp;
349 	struct ipsec_mb_mp_param *resp_param =
350 		(struct ipsec_mb_mp_param *)ipc_resp.param;
351 	const struct ipsec_mb_mp_param *req_param =
352 		(const struct ipsec_mb_mp_param *)mp_msg->param;
353 
354 	int ret;
355 	struct rte_cryptodev *dev;
356 	struct ipsec_mb_qp *qp;
357 	struct rte_cryptodev_qp_conf queue_conf;
358 	int dev_id = req_param->dev_id;
359 	int qp_id = req_param->qp_id;
360 
361 	queue_conf.nb_descriptors = req_param->nb_descriptors;
362 	queue_conf.mp_session = (struct rte_mempool *)req_param->mp_session;
363 	memset(resp_param, 0, sizeof(struct ipsec_mb_mp_param));
364 	memcpy(ipc_resp.name, IPSEC_MB_MP_MSG, sizeof(IPSEC_MB_MP_MSG));
365 
366 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
367 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
368 		goto out;
369 	}
370 
371 	dev = rte_cryptodev_pmd_get_dev(dev_id);
372 	switch (req_param->type) {
373 	case RTE_IPSEC_MB_MP_REQ_QP_SET:
374 		qp = dev->data->queue_pairs[qp_id];
375 		if (qp)	{
376 			CDEV_LOG_DEBUG("qp %d on dev %d is initialised", qp_id, dev_id);
377 			goto out;
378 		}
379 
380 		ret = ipsec_mb_qp_setup(dev, qp_id,	&queue_conf, req_param->socket_id);
381 		if (!ret) {
382 			qp = dev->data->queue_pairs[qp_id];
383 			if (!qp) {
384 				CDEV_LOG_DEBUG("qp %d on dev %d is not initialised",
385 					qp_id, dev_id);
386 				goto out;
387 			}
388 			qp->qp_used_by_pid = req_param->process_id;
389 		}
390 		resp_param->result = ret;
391 		break;
392 	case RTE_IPSEC_MB_MP_REQ_QP_FREE:
393 		qp = dev->data->queue_pairs[qp_id];
394 		if (!qp) {
395 			CDEV_LOG_DEBUG("qp %d on dev %d is not initialised",
396 				qp_id, dev_id);
397 			goto out;
398 		}
399 
400 		if (qp->qp_used_by_pid != req_param->process_id) {
401 			CDEV_LOG_ERR("Unable to release qp_id=%d", qp_id);
402 			goto out;
403 		}
404 
405 		qp->qp_used_by_pid = 0;
406 		resp_param->result = ipsec_mb_qp_release(dev, qp_id);
407 		break;
408 	default:
409 		CDEV_LOG_ERR("invalid mp request type\n");
410 	}
411 
412 out:
413 	ret = rte_mp_reply(&ipc_resp, peer);
414 	return ret;
415 }
416 
417 /** Return the size of the specific pmd session structure */
418 unsigned
419 ipsec_mb_sym_session_get_size(struct rte_cryptodev *dev)
420 {
421 	struct ipsec_mb_dev_private *internals = dev->data->dev_private;
422 	struct ipsec_mb_internals *pmd_data =
423 		&ipsec_mb_pmds[internals->pmd_type];
424 
425 	return pmd_data->session_priv_size;
426 }
427 
428 /** Configure pmd specific multi-buffer session from a crypto xform chain */
429 int
430 ipsec_mb_sym_session_configure(
431 	struct rte_cryptodev *dev, struct rte_crypto_sym_xform *xform,
432 	struct rte_cryptodev_sym_session *sess)
433 {
434 	struct ipsec_mb_dev_private *internals = dev->data->dev_private;
435 	struct ipsec_mb_internals *pmd_data =
436 		&ipsec_mb_pmds[internals->pmd_type];
437 	IMB_MGR *mb_mgr = alloc_init_mb_mgr();
438 	int ret = 0;
439 
440 	if (!mb_mgr)
441 		return -ENOMEM;
442 
443 	if (unlikely(sess == NULL)) {
444 		IPSEC_MB_LOG(ERR, "invalid session struct");
445 		free_mb_mgr(mb_mgr);
446 		return -EINVAL;
447 	}
448 
449 	ret = (*pmd_data->session_configure)(mb_mgr,
450 			CRYPTODEV_GET_SYM_SESS_PRIV(sess), xform);
451 	if (ret != 0) {
452 		IPSEC_MB_LOG(ERR, "failed configure session parameters");
453 
454 		/* Return session to mempool */
455 		free_mb_mgr(mb_mgr);
456 		return ret;
457 	}
458 
459 	free_mb_mgr(mb_mgr);
460 	return 0;
461 }
462 
463 /** Clear the session memory */
464 void
465 ipsec_mb_sym_session_clear(struct rte_cryptodev *dev __rte_unused,
466 		struct rte_cryptodev_sym_session *sess __rte_unused)
467 {}
468