xref: /dpdk/drivers/crypto/ipsec_mb/ipsec_mb_ops.c (revision bdce2564dbf78e1fecc0db438b562ae19f0c057c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2021 Intel Corporation
3  */
4 
5 #include <string.h>
6 
7 #include <rte_common.h>
8 #include <rte_malloc.h>
9 
10 #include "ipsec_mb_private.h"
11 
12 #define IMB_MP_REQ_VER_STR "1.1.0"
13 
14 /** Configure device */
15 int
16 ipsec_mb_config(__rte_unused struct rte_cryptodev *dev,
17 		    __rte_unused struct rte_cryptodev_config *config)
18 {
19 	return 0;
20 }
21 
22 /** Start device */
23 int
24 ipsec_mb_start(__rte_unused struct rte_cryptodev *dev)
25 {
26 	return 0;
27 }
28 
29 /** Stop device */
30 void
31 ipsec_mb_stop(__rte_unused struct rte_cryptodev *dev)
32 {
33 }
34 
35 /** Close device */
36 int
37 ipsec_mb_close(__rte_unused struct rte_cryptodev *dev)
38 {
39 	return 0;
40 }
41 
42 /** Get device statistics */
43 void
44 ipsec_mb_stats_get(struct rte_cryptodev *dev,
45 		struct rte_cryptodev_stats *stats)
46 {
47 	int qp_id;
48 
49 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
50 		struct ipsec_mb_qp *qp = dev->data->queue_pairs[qp_id];
51 		if (qp == NULL) {
52 			IPSEC_MB_LOG(DEBUG, "Uninitialised qp %d", qp_id);
53 			continue;
54 		}
55 
56 		stats->enqueued_count += qp->stats.enqueued_count;
57 		stats->dequeued_count += qp->stats.dequeued_count;
58 
59 		stats->enqueue_err_count += qp->stats.enqueue_err_count;
60 		stats->dequeue_err_count += qp->stats.dequeue_err_count;
61 	}
62 }
63 
64 /** Reset device statistics */
65 void
66 ipsec_mb_stats_reset(struct rte_cryptodev *dev)
67 {
68 	int qp_id;
69 
70 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
71 		struct ipsec_mb_qp *qp = dev->data->queue_pairs[qp_id];
72 
73 		memset(&qp->stats, 0, sizeof(qp->stats));
74 	}
75 }
76 
77 /** Get device info */
78 void
79 ipsec_mb_info_get(struct rte_cryptodev *dev,
80 		struct rte_cryptodev_info *dev_info)
81 {
82 	struct ipsec_mb_dev_private *internals = dev->data->dev_private;
83 	struct ipsec_mb_internals *pmd_info =
84 		&ipsec_mb_pmds[internals->pmd_type];
85 
86 	if (dev_info != NULL) {
87 		dev_info->driver_id = dev->driver_id;
88 		dev_info->feature_flags = dev->feature_flags;
89 		dev_info->capabilities = pmd_info->caps;
90 		dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
91 		/* No limit of number of sessions */
92 		dev_info->sym.max_nb_sessions = 0;
93 	}
94 }
95 
96 /** Release queue pair */
97 int
98 ipsec_mb_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
99 {
100 	struct ipsec_mb_qp *qp = dev->data->queue_pairs[qp_id];
101 	struct rte_ring *r = NULL;
102 
103 	if (qp != NULL && rte_eal_process_type() == RTE_PROC_PRIMARY) {
104 		r = rte_ring_lookup(qp->name);
105 		rte_ring_free(r);
106 
107 #if IMB_VERSION(1, 1, 0) > IMB_VERSION_NUM
108 		if (qp->mb_mgr)
109 			free_mb_mgr(qp->mb_mgr);
110 #else
111 		if (qp->mb_mgr_mz) {
112 			rte_memzone_free(qp->mb_mgr_mz);
113 			qp->mb_mgr = NULL;
114 		}
115 #endif
116 		rte_free(qp);
117 		dev->data->queue_pairs[qp_id] = NULL;
118 	}
119 	return 0;
120 }
121 
122 /** Set a unique name for the queue pair */
123 int
124 ipsec_mb_qp_set_unique_name(struct rte_cryptodev *dev,
125 					   struct ipsec_mb_qp *qp)
126 {
127 	uint32_t n =
128 	    snprintf(qp->name, sizeof(qp->name), "ipsec_mb_pmd_%u_qp_%u",
129 		     dev->data->dev_id, qp->id);
130 
131 	if (n >= sizeof(qp->name))
132 		return -1;
133 
134 	return 0;
135 }
136 
137 /** Create a ring to place processed operations on */
138 static struct rte_ring
139 *ipsec_mb_qp_create_processed_ops_ring(
140 	struct ipsec_mb_qp *qp, unsigned int ring_size, int socket_id)
141 {
142 	struct rte_ring *r;
143 	char ring_name[RTE_CRYPTODEV_NAME_MAX_LEN];
144 
145 	unsigned int n = rte_strlcpy(ring_name, qp->name, sizeof(ring_name));
146 
147 	if (n >= sizeof(ring_name))
148 		return NULL;
149 
150 	r = rte_ring_lookup(ring_name);
151 	if (r) {
152 		if (rte_ring_get_size(r) >= ring_size) {
153 			IPSEC_MB_LOG(
154 			    INFO, "Reusing existing ring %s for processed ops",
155 			    ring_name);
156 			return r;
157 		}
158 		IPSEC_MB_LOG(
159 		    ERR, "Unable to reuse existing ring %s for processed ops",
160 		    ring_name);
161 		return NULL;
162 	}
163 
164 	return rte_ring_create(ring_name, ring_size, socket_id,
165 			       RING_F_SP_ENQ | RING_F_SC_DEQ);
166 }
167 
168 #if IMB_VERSION(1, 1, 0) <= IMB_VERSION_NUM
169 static IMB_MGR *
170 ipsec_mb_alloc_mgr_from_memzone(const struct rte_memzone **mb_mgr_mz,
171 		const char *mb_mgr_mz_name)
172 {
173 	IMB_MGR *mb_mgr;
174 
175 	if (rte_eal_process_type() ==  RTE_PROC_PRIMARY) {
176 		*mb_mgr_mz = rte_memzone_lookup(mb_mgr_mz_name);
177 		if (*mb_mgr_mz == NULL) {
178 			*mb_mgr_mz = rte_memzone_reserve(mb_mgr_mz_name,
179 			imb_get_mb_mgr_size(),
180 			rte_socket_id(), 0);
181 		}
182 		if (*mb_mgr_mz == NULL) {
183 			IPSEC_MB_LOG(DEBUG, "Error allocating memzone for %s",
184 					mb_mgr_mz_name);
185 			return NULL;
186 		}
187 		mb_mgr = imb_set_pointers_mb_mgr((*mb_mgr_mz)->addr, 0, 1);
188 		init_mb_mgr_auto(mb_mgr, NULL);
189 	} else {
190 		*mb_mgr_mz = rte_memzone_lookup(mb_mgr_mz_name);
191 		if (*mb_mgr_mz == NULL) {
192 			IPSEC_MB_LOG(ERR,
193 				"Secondary can't find %s mz, did primary create it?",
194 				mb_mgr_mz_name);
195 			return NULL;
196 		}
197 		mb_mgr = imb_set_pointers_mb_mgr((*mb_mgr_mz)->addr, 0, 0);
198 	}
199 	return mb_mgr;
200 }
201 #endif
202 
203 /** Setup a queue pair */
204 int
205 ipsec_mb_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
206 				const struct rte_cryptodev_qp_conf *qp_conf,
207 				int socket_id)
208 {
209 	struct ipsec_mb_qp *qp = NULL;
210 	struct ipsec_mb_dev_private *internals = dev->data->dev_private;
211 	struct ipsec_mb_internals *pmd_data =
212 		&ipsec_mb_pmds[internals->pmd_type];
213 	uint32_t qp_size;
214 	int ret;
215 
216 	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
217 #if IMB_VERSION(1, 1, 0) > IMB_VERSION_NUM
218 		IPSEC_MB_LOG(ERR, "The intel-ipsec-mb version (%s) does not support multiprocess,"
219 				"the minimum version required for this feature is %s.",
220 				IMB_VERSION_STR, IMB_MP_REQ_VER_STR);
221 		return -EINVAL;
222 #endif
223 		qp = dev->data->queue_pairs[qp_id];
224 		if (qp == NULL) {
225 			IPSEC_MB_LOG(ERR, "Primary process hasn't configured device qp.");
226 			return -EINVAL;
227 		}
228 	} else {
229 		/* Free memory prior to re-allocation if needed. */
230 		if (dev->data->queue_pairs[qp_id] != NULL)
231 			ipsec_mb_qp_release(dev, qp_id);
232 
233 		qp_size = sizeof(*qp) + pmd_data->qp_priv_size;
234 		/* Allocate the queue pair data structure. */
235 		qp = rte_zmalloc_socket("IPSEC PMD Queue Pair", qp_size,
236 					RTE_CACHE_LINE_SIZE, socket_id);
237 		if (qp == NULL)
238 			return -ENOMEM;
239 	}
240 
241 #if IMB_VERSION(1, 1, 0) > IMB_VERSION_NUM
242 	qp->mb_mgr = alloc_init_mb_mgr();
243 #else
244 	char mz_name[IPSEC_MB_MAX_MZ_NAME];
245 	snprintf(mz_name, sizeof(mz_name), "IMB_MGR_DEV_%d_QP_%d",
246 			dev->data->dev_id, qp_id);
247 	qp->mb_mgr = ipsec_mb_alloc_mgr_from_memzone(&(qp->mb_mgr_mz),
248 			mz_name);
249 #endif
250 	if (qp->mb_mgr == NULL) {
251 		ret = -ENOMEM;
252 		goto qp_setup_cleanup;
253 	}
254 
255 	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
256 		return 0;
257 
258 	qp->id = qp_id;
259 	dev->data->queue_pairs[qp_id] = qp;
260 	if (ipsec_mb_qp_set_unique_name(dev, qp)) {
261 		ret = -EINVAL;
262 		goto qp_setup_cleanup;
263 	}
264 
265 	qp->pmd_type = internals->pmd_type;
266 	qp->sess_mp = qp_conf->mp_session;
267 
268 	qp->ingress_queue = ipsec_mb_qp_create_processed_ops_ring(qp,
269 		qp_conf->nb_descriptors, socket_id);
270 	if (qp->ingress_queue == NULL) {
271 		ret = -EINVAL;
272 		goto qp_setup_cleanup;
273 	}
274 
275 	memset(&qp->stats, 0, sizeof(qp->stats));
276 
277 	if (pmd_data->queue_pair_configure) {
278 		ret = pmd_data->queue_pair_configure(qp);
279 		if (ret < 0)
280 			goto qp_setup_cleanup;
281 	}
282 
283 	return 0;
284 
285 qp_setup_cleanup:
286 #if IMB_VERSION(1, 1, 0) > IMB_VERSION_NUM
287 	if (qp->mb_mgr)
288 		free_mb_mgr(qp->mb_mgr);
289 #else
290 	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
291 		return ret;
292 	if (qp->mb_mgr_mz)
293 		rte_memzone_free(qp->mb_mgr_mz);
294 #endif
295 	rte_free(qp);
296 	return ret;
297 }
298 
299 /** Return the size of the specific pmd session structure */
300 unsigned
301 ipsec_mb_sym_session_get_size(struct rte_cryptodev *dev)
302 {
303 	struct ipsec_mb_dev_private *internals = dev->data->dev_private;
304 	struct ipsec_mb_internals *pmd_data =
305 		&ipsec_mb_pmds[internals->pmd_type];
306 
307 	return pmd_data->session_priv_size;
308 }
309 
310 /** Configure pmd specific multi-buffer session from a crypto xform chain */
311 int
312 ipsec_mb_sym_session_configure(
313 	struct rte_cryptodev *dev, struct rte_crypto_sym_xform *xform,
314 	struct rte_cryptodev_sym_session *sess)
315 {
316 	struct ipsec_mb_dev_private *internals = dev->data->dev_private;
317 	struct ipsec_mb_internals *pmd_data =
318 		&ipsec_mb_pmds[internals->pmd_type];
319 	IMB_MGR *mb_mgr = alloc_init_mb_mgr();
320 	int ret = 0;
321 
322 	if (!mb_mgr)
323 		return -ENOMEM;
324 
325 	if (unlikely(sess == NULL)) {
326 		IPSEC_MB_LOG(ERR, "invalid session struct");
327 		free_mb_mgr(mb_mgr);
328 		return -EINVAL;
329 	}
330 
331 	ret = (*pmd_data->session_configure)(mb_mgr,
332 			(void *)sess->driver_priv_data, xform);
333 	if (ret != 0) {
334 		IPSEC_MB_LOG(ERR, "failed configure session parameters");
335 
336 		/* Return session to mempool */
337 		free_mb_mgr(mb_mgr);
338 		return ret;
339 	}
340 
341 	free_mb_mgr(mb_mgr);
342 	return 0;
343 }
344 
345 /** Clear the session memory */
346 void
347 ipsec_mb_sym_session_clear(struct rte_cryptodev *dev __rte_unused,
348 		struct rte_cryptodev_sym_session *sess __rte_unused)
349 {}
350