xref: /dpdk/drivers/crypto/ipsec_mb/ipsec_mb_ops.c (revision 8484d74bd656bc0e951a3ed4e0816ee0fea5e593)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2021 Intel Corporation
3  */
4 
5 #include <string.h>
6 #include <unistd.h>
7 
8 #include <rte_common.h>
9 #include <rte_malloc.h>
10 #include <rte_log.h>
11 
12 #include "ipsec_mb_private.h"
13 
14 /** Configure device */
15 int
16 ipsec_mb_config(__rte_unused struct rte_cryptodev *dev,
17 		    __rte_unused struct rte_cryptodev_config *config)
18 {
19 	return 0;
20 }
21 
22 /** Start device */
23 int
24 ipsec_mb_start(__rte_unused struct rte_cryptodev *dev)
25 {
26 	return 0;
27 }
28 
29 /** Stop device */
30 void
31 ipsec_mb_stop(__rte_unused struct rte_cryptodev *dev)
32 {
33 }
34 
35 /** Close device */
36 int
37 ipsec_mb_close(__rte_unused struct rte_cryptodev *dev)
38 {
39 	return 0;
40 }
41 
42 /** Get device statistics */
43 void
44 ipsec_mb_stats_get(struct rte_cryptodev *dev,
45 		struct rte_cryptodev_stats *stats)
46 {
47 	int qp_id;
48 
49 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
50 		struct ipsec_mb_qp *qp = dev->data->queue_pairs[qp_id];
51 		if (qp == NULL) {
52 			IPSEC_MB_LOG(DEBUG, "Uninitialised qp %d", qp_id);
53 			continue;
54 		}
55 
56 		stats->enqueued_count += qp->stats.enqueued_count;
57 		stats->dequeued_count += qp->stats.dequeued_count;
58 
59 		stats->enqueue_err_count += qp->stats.enqueue_err_count;
60 		stats->dequeue_err_count += qp->stats.dequeue_err_count;
61 	}
62 }
63 
64 /** Reset device statistics */
65 void
66 ipsec_mb_stats_reset(struct rte_cryptodev *dev)
67 {
68 	int qp_id;
69 
70 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
71 		struct ipsec_mb_qp *qp = dev->data->queue_pairs[qp_id];
72 
73 		memset(&qp->stats, 0, sizeof(qp->stats));
74 	}
75 }
76 
77 /** Get device info */
78 void
79 ipsec_mb_info_get(struct rte_cryptodev *dev,
80 		struct rte_cryptodev_info *dev_info)
81 {
82 	struct ipsec_mb_dev_private *internals = dev->data->dev_private;
83 	struct ipsec_mb_internals *pmd_info =
84 		&ipsec_mb_pmds[internals->pmd_type];
85 
86 	if (dev_info != NULL) {
87 		dev_info->driver_id = dev->driver_id;
88 		dev_info->feature_flags = dev->feature_flags;
89 		dev_info->capabilities = pmd_info->caps;
90 		dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
91 		/* No limit of number of sessions */
92 		dev_info->sym.max_nb_sessions = 0;
93 	}
94 }
95 
96 static int
97 ipsec_mb_secondary_qp_op(int dev_id, int qp_id,
98 		const struct rte_cryptodev_qp_conf *qp_conf,
99 		int socket_id, enum ipsec_mb_mp_req_type op_type)
100 {
101 	int ret;
102 	struct rte_mp_msg qp_req_msg;
103 	struct rte_mp_msg *qp_resp_msg;
104 	struct rte_mp_reply qp_resp;
105 	struct ipsec_mb_mp_param *req_param;
106 	struct ipsec_mb_mp_param *resp_param;
107 	struct timespec ts = {.tv_sec = 1, .tv_nsec = 0};
108 
109 	memset(&qp_req_msg, 0, sizeof(IPSEC_MB_MP_MSG));
110 	memcpy(qp_req_msg.name, IPSEC_MB_MP_MSG, sizeof(IPSEC_MB_MP_MSG));
111 	req_param = (struct ipsec_mb_mp_param *)&qp_req_msg.param;
112 
113 	qp_req_msg.len_param = sizeof(struct ipsec_mb_mp_param);
114 	req_param->type = op_type;
115 	req_param->dev_id = dev_id;
116 	req_param->qp_id = qp_id;
117 	req_param->socket_id = socket_id;
118 	req_param->process_id = getpid();
119 	if (qp_conf) {
120 		req_param->nb_descriptors = qp_conf->nb_descriptors;
121 		req_param->mp_session = (void *)qp_conf->mp_session;
122 	}
123 
124 	qp_req_msg.num_fds = 0;
125 	ret = rte_mp_request_sync(&qp_req_msg, &qp_resp, &ts);
126 	if (ret) {
127 		IPSEC_MB_LOG(ERR, "Create MR request to primary process failed.");
128 		return -1;
129 	}
130 	qp_resp_msg = &qp_resp.msgs[0];
131 	resp_param = (struct ipsec_mb_mp_param *)qp_resp_msg->param;
132 
133 	return resp_param->result;
134 }
135 
136 /** Release queue pair */
137 int
138 ipsec_mb_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
139 {
140 	struct ipsec_mb_qp *qp = dev->data->queue_pairs[qp_id];
141 
142 	if (!qp)
143 		return 0;
144 
145 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
146 		rte_ring_free(rte_ring_lookup(qp->name));
147 
148 		if (qp->mb_mgr_mz) {
149 			rte_memzone_free(qp->mb_mgr_mz);
150 			qp->mb_mgr = NULL;
151 		}
152 		rte_free(qp);
153 		dev->data->queue_pairs[qp_id] = NULL;
154 	} else { /* secondary process */
155 		return ipsec_mb_secondary_qp_op(dev->data->dev_id, qp_id,
156 					NULL, 0, RTE_IPSEC_MB_MP_REQ_QP_FREE);
157 	}
158 	return 0;
159 }
160 
161 /** Set a unique name for the queue pair */
162 int
163 ipsec_mb_qp_set_unique_name(struct rte_cryptodev *dev,
164 					   struct ipsec_mb_qp *qp)
165 {
166 	uint32_t n =
167 	    snprintf(qp->name, sizeof(qp->name), "ipsec_mb_pmd_%u_qp_%u",
168 		     dev->data->dev_id, qp->id);
169 
170 	if (n >= sizeof(qp->name))
171 		return -1;
172 
173 	return 0;
174 }
175 
176 /** Create a ring to place processed operations on */
177 static struct rte_ring
178 *ipsec_mb_qp_create_processed_ops_ring(
179 	struct ipsec_mb_qp *qp, unsigned int ring_size, int socket_id)
180 {
181 	struct rte_ring *r;
182 	char ring_name[RTE_CRYPTODEV_NAME_MAX_LEN];
183 
184 	unsigned int n = rte_strlcpy(ring_name, qp->name, sizeof(ring_name));
185 
186 	if (n >= sizeof(ring_name))
187 		return NULL;
188 
189 	r = rte_ring_lookup(ring_name);
190 	if (r) {
191 		if (rte_ring_get_size(r) >= ring_size) {
192 			IPSEC_MB_LOG(
193 			    INFO, "Reusing existing ring %s for processed ops",
194 			    ring_name);
195 			return r;
196 		}
197 		IPSEC_MB_LOG(
198 		    ERR, "Unable to reuse existing ring %s for processed ops",
199 		    ring_name);
200 		return NULL;
201 	}
202 
203 	return rte_ring_create(ring_name, ring_size, socket_id,
204 			       RING_F_SP_ENQ | RING_F_SC_DEQ);
205 }
206 
207 static IMB_MGR *
208 ipsec_mb_alloc_mgr_from_memzone(const struct rte_memzone **mb_mgr_mz,
209 		const char *mb_mgr_mz_name)
210 {
211 	IMB_MGR *mb_mgr;
212 
213 	if (rte_eal_process_type() ==  RTE_PROC_PRIMARY) {
214 		*mb_mgr_mz = rte_memzone_lookup(mb_mgr_mz_name);
215 		if (*mb_mgr_mz == NULL) {
216 			*mb_mgr_mz = rte_memzone_reserve(mb_mgr_mz_name,
217 			imb_get_mb_mgr_size(),
218 			rte_socket_id(), 0);
219 		}
220 		if (*mb_mgr_mz == NULL) {
221 			IPSEC_MB_LOG(DEBUG, "Error allocating memzone for %s",
222 					mb_mgr_mz_name);
223 			return NULL;
224 		}
225 		mb_mgr = imb_set_pointers_mb_mgr((*mb_mgr_mz)->addr, 0, 1);
226 		init_mb_mgr_auto(mb_mgr, NULL);
227 	} else {
228 		*mb_mgr_mz = rte_memzone_lookup(mb_mgr_mz_name);
229 		if (*mb_mgr_mz == NULL) {
230 			IPSEC_MB_LOG(ERR,
231 				"Secondary can't find %s mz, did primary create it?",
232 				mb_mgr_mz_name);
233 			return NULL;
234 		}
235 		mb_mgr = imb_set_pointers_mb_mgr((*mb_mgr_mz)->addr, 0, 0);
236 	}
237 	return mb_mgr;
238 }
239 
240 /** Setup a queue pair */
241 int
242 ipsec_mb_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
243 				const struct rte_cryptodev_qp_conf *qp_conf,
244 				int socket_id)
245 {
246 	struct ipsec_mb_qp *qp = NULL;
247 	struct ipsec_mb_dev_private *internals = dev->data->dev_private;
248 	struct ipsec_mb_internals *pmd_data =
249 		&ipsec_mb_pmds[internals->pmd_type];
250 	uint32_t qp_size;
251 	int ret;
252 
253 	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
254 		qp = dev->data->queue_pairs[qp_id];
255 		if (qp == NULL) {
256 			IPSEC_MB_LOG(DEBUG, "Secondary process setting up device qp.");
257 			return ipsec_mb_secondary_qp_op(dev->data->dev_id, qp_id,
258 						qp_conf, socket_id,	RTE_IPSEC_MB_MP_REQ_QP_SET);
259 		}
260 	} else {
261 		/* Free memory prior to re-allocation if needed. */
262 		if (dev->data->queue_pairs[qp_id] != NULL)
263 			ipsec_mb_qp_release(dev, qp_id);
264 
265 		qp_size = sizeof(*qp) + pmd_data->qp_priv_size;
266 		/* Allocate the queue pair data structure. */
267 		qp = rte_zmalloc_socket("IPSEC PMD Queue Pair", qp_size,
268 					RTE_CACHE_LINE_SIZE, socket_id);
269 		if (qp == NULL)
270 			return -ENOMEM;
271 	}
272 
273 	char mz_name[IPSEC_MB_MAX_MZ_NAME];
274 	snprintf(mz_name, sizeof(mz_name), "IMB_MGR_DEV_%d_QP_%d",
275 			dev->data->dev_id, qp_id);
276 	qp->mb_mgr = ipsec_mb_alloc_mgr_from_memzone(&(qp->mb_mgr_mz),
277 			mz_name);
278 	if (qp->mb_mgr == NULL) {
279 		ret = -ENOMEM;
280 		goto qp_setup_cleanup;
281 	}
282 
283 	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
284 		return 0;
285 
286 	qp->id = qp_id;
287 	dev->data->queue_pairs[qp_id] = qp;
288 	if (ipsec_mb_qp_set_unique_name(dev, qp)) {
289 		ret = -EINVAL;
290 		goto qp_setup_cleanup;
291 	}
292 
293 	qp->pmd_type = internals->pmd_type;
294 	qp->sess_mp = qp_conf->mp_session;
295 
296 	qp->ingress_queue = ipsec_mb_qp_create_processed_ops_ring(qp,
297 		qp_conf->nb_descriptors, socket_id);
298 	if (qp->ingress_queue == NULL) {
299 		ret = -EINVAL;
300 		goto qp_setup_cleanup;
301 	}
302 
303 	memset(&qp->stats, 0, sizeof(qp->stats));
304 
305 	if (pmd_data->queue_pair_configure) {
306 		ret = pmd_data->queue_pair_configure(qp);
307 		if (ret < 0)
308 			goto qp_setup_cleanup;
309 	}
310 
311 	return 0;
312 
313 qp_setup_cleanup:
314 	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
315 		return ret;
316 	rte_memzone_free(qp->mb_mgr_mz);
317 	rte_free(qp);
318 	return ret;
319 }
320 
321 int
322 ipsec_mb_ipc_request(const struct rte_mp_msg *mp_msg, const void *peer)
323 {
324 	struct rte_mp_msg ipc_resp;
325 	struct ipsec_mb_mp_param *resp_param =
326 		(struct ipsec_mb_mp_param *)ipc_resp.param;
327 	const struct ipsec_mb_mp_param *req_param =
328 		(const struct ipsec_mb_mp_param *)mp_msg->param;
329 
330 	int ret;
331 	struct rte_cryptodev *dev;
332 	struct ipsec_mb_qp *qp;
333 	struct rte_cryptodev_qp_conf queue_conf;
334 	int dev_id = req_param->dev_id;
335 	int qp_id = req_param->qp_id;
336 
337 	queue_conf.nb_descriptors = req_param->nb_descriptors;
338 	queue_conf.mp_session = (struct rte_mempool *)req_param->mp_session;
339 	memset(resp_param, 0, sizeof(struct ipsec_mb_mp_param));
340 	memcpy(ipc_resp.name, IPSEC_MB_MP_MSG, sizeof(IPSEC_MB_MP_MSG));
341 
342 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
343 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
344 		goto out;
345 	}
346 
347 	dev = rte_cryptodev_pmd_get_dev(dev_id);
348 	switch (req_param->type) {
349 	case RTE_IPSEC_MB_MP_REQ_QP_SET:
350 		qp = dev->data->queue_pairs[qp_id];
351 		if (qp)	{
352 			CDEV_LOG_DEBUG("qp %d on dev %d is initialised", qp_id, dev_id);
353 			goto out;
354 		}
355 
356 		ret = ipsec_mb_qp_setup(dev, qp_id,	&queue_conf, req_param->socket_id);
357 		if (!ret) {
358 			qp = dev->data->queue_pairs[qp_id];
359 			if (!qp) {
360 				CDEV_LOG_DEBUG("qp %d on dev %d is not initialised",
361 					qp_id, dev_id);
362 				goto out;
363 			}
364 			qp->qp_used_by_pid = req_param->process_id;
365 		}
366 		resp_param->result = ret;
367 		break;
368 	case RTE_IPSEC_MB_MP_REQ_QP_FREE:
369 		qp = dev->data->queue_pairs[qp_id];
370 		if (!qp) {
371 			CDEV_LOG_DEBUG("qp %d on dev %d is not initialised",
372 				qp_id, dev_id);
373 			goto out;
374 		}
375 
376 		if (qp->qp_used_by_pid != req_param->process_id) {
377 			CDEV_LOG_ERR("Unable to release qp_id=%d", qp_id);
378 			goto out;
379 		}
380 
381 		qp->qp_used_by_pid = 0;
382 		resp_param->result = ipsec_mb_qp_release(dev, qp_id);
383 		break;
384 	default:
385 		CDEV_LOG_ERR("invalid mp request type");
386 	}
387 
388 out:
389 	ret = rte_mp_reply(&ipc_resp, peer);
390 	return ret;
391 }
392 
393 /** Return the size of the specific pmd session structure */
394 unsigned
395 ipsec_mb_sym_session_get_size(struct rte_cryptodev *dev)
396 {
397 	struct ipsec_mb_dev_private *internals = dev->data->dev_private;
398 	struct ipsec_mb_internals *pmd_data =
399 		&ipsec_mb_pmds[internals->pmd_type];
400 
401 	return pmd_data->session_priv_size;
402 }
403 
404 /** Configure pmd specific multi-buffer session from a crypto xform chain */
405 int
406 ipsec_mb_sym_session_configure(
407 	struct rte_cryptodev *dev, struct rte_crypto_sym_xform *xform,
408 	struct rte_cryptodev_sym_session *sess)
409 {
410 	struct ipsec_mb_dev_private *internals = dev->data->dev_private;
411 	struct ipsec_mb_internals *pmd_data =
412 		&ipsec_mb_pmds[internals->pmd_type];
413 	struct ipsec_mb_qp *qp = dev->data->queue_pairs[0];
414 	IMB_MGR *mb_mgr;
415 	int ret = 0;
416 
417 	if (qp != NULL)
418 		mb_mgr = qp->mb_mgr;
419 	else
420 		mb_mgr = alloc_init_mb_mgr();
421 
422 	if (!mb_mgr)
423 		return -ENOMEM;
424 
425 	if (unlikely(sess == NULL)) {
426 		IPSEC_MB_LOG(ERR, "invalid session struct");
427 		if (qp == NULL)
428 			free_mb_mgr(mb_mgr);
429 		return -EINVAL;
430 	}
431 
432 	ret = (*pmd_data->session_configure)(mb_mgr,
433 			CRYPTODEV_GET_SYM_SESS_PRIV(sess), xform);
434 	if (ret != 0) {
435 		IPSEC_MB_LOG(ERR, "failed configure session parameters");
436 
437 		/* Return session to mempool */
438 		if (qp == NULL)
439 			free_mb_mgr(mb_mgr);
440 		return ret;
441 	}
442 
443 	if (qp == NULL)
444 		free_mb_mgr(mb_mgr);
445 	return 0;
446 }
447 
448 /** Clear the session memory */
449 void
450 ipsec_mb_sym_session_clear(struct rte_cryptodev *dev __rte_unused,
451 		struct rte_cryptodev_sym_session *sess __rte_unused)
452 {}
453