xref: /dpdk/drivers/crypto/ipsec_mb/ipsec_mb_private.h (revision 2b843cac232eb3f2fa79e4254e21766817e2019f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2021 Intel Corporation
3  */
4 
5 #ifndef _IPSEC_MB_PRIVATE_H_
6 #define _IPSEC_MB_PRIVATE_H_
7 
8 #if defined(RTE_ARCH_ARM)
9 #include <ipsec-mb.h>
10 #else
11 #include <intel-ipsec-mb.h>
12 #endif
13 #include <cryptodev_pmd.h>
14 #include <bus_vdev_driver.h>
15 
16 #include <rte_security.h>
17 #include <rte_security_driver.h>
18 
19 /* Maximum length for digest */
20 #define DIGEST_LENGTH_MAX 64
21 
22 /* Maximum length for memzone name */
23 #define IPSEC_MB_MAX_MZ_NAME 32
24 
25 /* ipsec mb multi-process queue pair config */
26 #define IPSEC_MB_MP_MSG "ipsec_mb_mp_msg"
27 
28 enum ipsec_mb_vector_mode {
29 	IPSEC_MB_NOT_SUPPORTED = 0,
30 	IPSEC_MB_SSE,
31 	IPSEC_MB_AVX,
32 	IPSEC_MB_AVX2,
33 	IPSEC_MB_AVX512,
34 	IPSEC_MB_ARM64,
35 };
36 
37 extern enum ipsec_mb_vector_mode vector_mode;
38 
39 /** IMB_MGR instances, one per thread */
40 extern RTE_DEFINE_PER_LCORE(IMB_MGR *, mb_mgr);
41 
42 #define CRYPTODEV_NAME_AESNI_MB_PMD crypto_aesni_mb
43 /**< IPSEC Multi buffer aesni_mb PMD device name */
44 
45 #define CRYPTODEV_NAME_AESNI_GCM_PMD crypto_aesni_gcm
46 /**< IPSEC Multi buffer PMD aesni_gcm device name */
47 
48 #define CRYPTODEV_NAME_KASUMI_PMD crypto_kasumi
49 /**< IPSEC Multi buffer PMD kasumi device name */
50 
51 #define CRYPTODEV_NAME_SNOW3G_PMD crypto_snow3g
52 /**< IPSEC Multi buffer PMD snow3g device name */
53 
54 #define CRYPTODEV_NAME_ZUC_PMD crypto_zuc
55 /**< IPSEC Multi buffer PMD zuc device name */
56 
57 #define CRYPTODEV_NAME_CHACHA20_POLY1305_PMD crypto_chacha20_poly1305
58 /**< IPSEC Multi buffer PMD chacha20_poly1305 device name */
59 
60 /** PMD LOGTYPE DRIVER, common to all PMDs */
61 extern int ipsec_mb_logtype_driver;
62 #define RTE_LOGTYPE_IPSEC_MB ipsec_mb_logtype_driver
63 #define IPSEC_MB_LOG(level, ...) \
64 	RTE_LOG_LINE_PREFIX(level, IPSEC_MB, "%s() line %u: ", \
65 		__func__ RTE_LOG_COMMA __LINE__, __VA_ARGS__)
66 
67 /** All supported device types */
68 enum ipsec_mb_pmd_types {
69 	IPSEC_MB_PMD_TYPE_AESNI_MB = 0,
70 	IPSEC_MB_PMD_TYPE_AESNI_GCM,
71 	IPSEC_MB_PMD_TYPE_KASUMI,
72 	IPSEC_MB_PMD_TYPE_SNOW3G,
73 	IPSEC_MB_PMD_TYPE_ZUC,
74 	IPSEC_MB_PMD_TYPE_CHACHA20_POLY1305,
75 	IPSEC_MB_N_PMD_TYPES
76 };
77 
78 /** Crypto operations */
79 enum ipsec_mb_operation {
80 	IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN = 0,
81 	IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT,
82 	IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT,
83 	IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY,
84 	IPSEC_MB_OP_ENCRYPT_ONLY,
85 	IPSEC_MB_OP_DECRYPT_ONLY,
86 	IPSEC_MB_OP_HASH_GEN_ONLY,
87 	IPSEC_MB_OP_HASH_VERIFY_ONLY,
88 	IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT,
89 	IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT,
90 	IPSEC_MB_OP_NOT_SUPPORTED
91 };
92 
93 extern uint8_t pmd_driver_id_aesni_mb;
94 extern uint8_t pmd_driver_id_aesni_gcm;
95 extern uint8_t pmd_driver_id_kasumi;
96 extern uint8_t pmd_driver_id_snow3g;
97 extern uint8_t pmd_driver_id_zuc;
98 extern uint8_t pmd_driver_id_chacha20_poly1305;
99 
100 /** Helper function. Gets driver ID based on PMD type */
101 static __rte_always_inline uint8_t
102 ipsec_mb_get_driver_id(enum ipsec_mb_pmd_types pmd_type)
103 {
104 	switch (pmd_type) {
105 	case IPSEC_MB_PMD_TYPE_AESNI_MB:
106 		return pmd_driver_id_aesni_mb;
107 	case IPSEC_MB_PMD_TYPE_AESNI_GCM:
108 		return pmd_driver_id_aesni_gcm;
109 	case IPSEC_MB_PMD_TYPE_KASUMI:
110 		return pmd_driver_id_kasumi;
111 	case IPSEC_MB_PMD_TYPE_SNOW3G:
112 		return pmd_driver_id_snow3g;
113 	case IPSEC_MB_PMD_TYPE_ZUC:
114 		return pmd_driver_id_zuc;
115 	case IPSEC_MB_PMD_TYPE_CHACHA20_POLY1305:
116 		return pmd_driver_id_chacha20_poly1305;
117 	default:
118 		break;
119 	}
120 	return UINT8_MAX;
121 }
122 
123 /** Common private data structure for each PMD */
124 struct ipsec_mb_dev_private {
125 	enum ipsec_mb_pmd_types pmd_type;
126 	/**< PMD  type */
127 	uint32_t max_nb_queue_pairs;
128 	/**< Max number of queue pairs supported by device */
129 	uint8_t priv[];
130 };
131 
132 /** IPSEC Multi buffer queue pair common queue pair data for all PMDs */
133 struct ipsec_mb_qp {
134 	uint16_t id;
135 	/**< Queue Pair Identifier */
136 	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
137 	struct rte_ring *ingress_queue;
138 	/**< Ring for placing operations ready for processing */
139 	struct rte_mempool *sess_mp;
140 	/**< Session Private Data Mempool */
141 	struct rte_cryptodev_stats stats;
142 	/**< Queue pair statistics */
143 	enum ipsec_mb_pmd_types pmd_type;
144 	/**< pmd type */
145 	uint8_t digest_idx;
146 	/**< Index of the next
147 	 * slot to be used in temp_digests,
148 	 * to store the digest for a given operation
149 	 */
150 	uint16_t qp_used_by_pid;
151 	/**< The process id used for queue pairs **/
152 	IMB_MGR *mb_mgr;
153 	/**< Multi buffer manager */
154 	const struct rte_memzone *mb_mgr_mz;
155 	/**< Shared memzone for storing mb_mgr */
156 	__extension__ uint8_t additional_data[];
157 	/**< Storing PMD specific additional data */
158 };
159 
160 /** Request types for IPC. */
161 enum ipsec_mb_mp_req_type {
162 	RTE_IPSEC_MB_MP_REQ_NONE, /**< unknown event type */
163 	RTE_IPSEC_MB_MP_REQ_QP_SET, /**< Queue pair setup request */
164 	RTE_IPSEC_MB_MP_REQ_QP_FREE /**< Queue pair free request */
165 };
166 
167 /** Parameters for IPC. */
168 struct ipsec_mb_mp_param {
169 	enum ipsec_mb_mp_req_type type; /**< IPC request type */
170 	int dev_id;
171 	/**< The identifier of the device */
172 	int qp_id;
173 	/**< The index of the queue pair to be configured */
174 	int socket_id;
175 	/**< Socket to allocate resources on */
176 	uint16_t process_id;
177 	/**< The pid who send out the requested */
178 	uint32_t nb_descriptors;
179 	/**< Number of descriptors per queue pair */
180 	void *mp_session;
181 	/**< The mempool for creating session in sessionless mode */
182 	int result;
183 	/**< The request result for response message */
184 };
185 
186 int
187 ipsec_mb_ipc_request(const struct rte_mp_msg *mp_msg, const void *peer);
188 
189 static __rte_always_inline void *
190 ipsec_mb_get_qp_private_data(struct ipsec_mb_qp *qp)
191 {
192 	return (void *)qp->additional_data;
193 }
194 
195 /** Helper function. Allocates job manager */
196 static __rte_always_inline IMB_MGR *
197 alloc_init_mb_mgr(void)
198 {
199 	IMB_MGR *mb_mgr = alloc_mb_mgr(0);
200 
201 	if (unlikely(mb_mgr == NULL)) {
202 		IPSEC_MB_LOG(ERR, "Failed to allocate IMB_MGR data");
203 		return NULL;
204 	}
205 
206 	init_mb_mgr_auto(mb_mgr, NULL);
207 
208 	return mb_mgr;
209 }
210 
211 /** Helper function. Gets per thread job manager */
212 static __rte_always_inline IMB_MGR *
213 get_per_thread_mb_mgr(void)
214 {
215 	if (unlikely(RTE_PER_LCORE(mb_mgr) == NULL))
216 		RTE_PER_LCORE(mb_mgr) = alloc_init_mb_mgr();
217 
218 	return RTE_PER_LCORE(mb_mgr);
219 }
220 
221 /** Helper function. Gets mode and chained xforms from the xform */
222 static __rte_always_inline int
223 ipsec_mb_parse_xform(const struct rte_crypto_sym_xform *xform,
224 			enum ipsec_mb_operation *mode,
225 			const struct rte_crypto_sym_xform **auth_xform,
226 			const struct rte_crypto_sym_xform **cipher_xform,
227 			const struct rte_crypto_sym_xform **aead_xform)
228 {
229 	if (xform == NULL) {
230 		*mode = IPSEC_MB_OP_NOT_SUPPORTED;
231 		return -ENOTSUP;
232 	}
233 
234 	const struct rte_crypto_sym_xform *next = xform->next;
235 
236 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
237 		if (next == NULL) {
238 			if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
239 				*mode = IPSEC_MB_OP_ENCRYPT_ONLY;
240 				*cipher_xform = xform;
241 				*auth_xform = NULL;
242 				return 0;
243 			}
244 			*mode = IPSEC_MB_OP_DECRYPT_ONLY;
245 			*cipher_xform = xform;
246 			*auth_xform = NULL;
247 			return 0;
248 		}
249 
250 		if (next->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
251 			*mode = IPSEC_MB_OP_NOT_SUPPORTED;
252 			return -ENOTSUP;
253 		}
254 
255 		if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
256 			if (next->auth.op != RTE_CRYPTO_AUTH_OP_GENERATE) {
257 				*mode = IPSEC_MB_OP_NOT_SUPPORTED;
258 				return -ENOTSUP;
259 			}
260 
261 			*mode = IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN;
262 			*cipher_xform = xform;
263 			*auth_xform = xform->next;
264 			return 0;
265 		}
266 		if (next->auth.op != RTE_CRYPTO_AUTH_OP_VERIFY) {
267 			*mode = IPSEC_MB_OP_NOT_SUPPORTED;
268 			return -ENOTSUP;
269 		}
270 
271 		*mode = IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY;
272 		*cipher_xform = xform;
273 		*auth_xform = xform->next;
274 		return 0;
275 	}
276 
277 	if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
278 		if (next == NULL) {
279 			if (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) {
280 				*mode = IPSEC_MB_OP_HASH_GEN_ONLY;
281 				*auth_xform = xform;
282 				*cipher_xform = NULL;
283 				return 0;
284 			}
285 			*mode = IPSEC_MB_OP_HASH_VERIFY_ONLY;
286 			*auth_xform = xform;
287 			*cipher_xform = NULL;
288 			return 0;
289 		}
290 
291 		if (next->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
292 			*mode = IPSEC_MB_OP_NOT_SUPPORTED;
293 			return -ENOTSUP;
294 		}
295 
296 		if (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) {
297 			if (next->cipher.op != RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
298 				*mode = IPSEC_MB_OP_NOT_SUPPORTED;
299 				return -ENOTSUP;
300 			}
301 
302 			*mode = IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT;
303 			*auth_xform = xform;
304 			*cipher_xform = xform->next;
305 			return 0;
306 		}
307 		if (next->cipher.op != RTE_CRYPTO_CIPHER_OP_DECRYPT) {
308 			*mode = IPSEC_MB_OP_NOT_SUPPORTED;
309 			return -ENOTSUP;
310 		}
311 
312 		*mode = IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT;
313 		*auth_xform = xform;
314 		*cipher_xform = xform->next;
315 		return 0;
316 	}
317 
318 	if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
319 		if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
320 			/*
321 			 * CCM requires to hash first and cipher later
322 			 * when encrypting
323 			 */
324 			if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM) {
325 				*mode = IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT;
326 				*aead_xform = xform;
327 				return 0;
328 				} else {
329 					*mode =
330 				IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT;
331 					*aead_xform = xform;
332 					return 0;
333 				}
334 		} else {
335 			if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM) {
336 				*mode = IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT;
337 				*aead_xform = xform;
338 				return 0;
339 			}
340 			*mode = IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT;
341 			*aead_xform = xform;
342 			return 0;
343 		}
344 	}
345 
346 	*mode = IPSEC_MB_OP_NOT_SUPPORTED;
347 	return -ENOTSUP;
348 }
349 
350 /** Device creation function */
351 int
352 ipsec_mb_create(struct rte_vdev_device *vdev,
353 	enum ipsec_mb_pmd_types pmd_type);
354 
355 /** Device remove function */
356 int
357 ipsec_mb_remove(struct rte_vdev_device *vdev);
358 
359 /** Configure queue pair PMD type specific data */
360 typedef int (*ipsec_mb_queue_pair_configure_t)(struct ipsec_mb_qp *qp);
361 
362 /** Configure session PMD type specific data */
363 typedef int (*ipsec_mb_session_configure_t)(IMB_MGR *mbr_mgr,
364 		void *session_private,
365 		const struct rte_crypto_sym_xform *xform);
366 
367 /** Configure internals PMD type specific data */
368 typedef int (*ipsec_mb_dev_configure_t)(struct rte_cryptodev *dev);
369 
370 /** Per PMD type operation and data */
371 struct ipsec_mb_internals {
372 	uint8_t is_configured;
373 	dequeue_pkt_burst_t dequeue_burst;
374 	ipsec_mb_dev_configure_t dev_config;
375 	ipsec_mb_queue_pair_configure_t queue_pair_configure;
376 	ipsec_mb_session_configure_t session_configure;
377 	const struct rte_cryptodev_capabilities *caps;
378 	struct rte_cryptodev_ops *ops;
379 	struct rte_security_ops *security_ops;
380 	uint64_t feature_flags;
381 	uint32_t session_priv_size;
382 	uint32_t qp_priv_size;
383 	uint32_t internals_priv_size;
384 };
385 
386 /** Global PMD type specific data */
387 extern struct ipsec_mb_internals ipsec_mb_pmds[IPSEC_MB_N_PMD_TYPES];
388 
389 int
390 ipsec_mb_config(struct rte_cryptodev *dev,
391 	struct rte_cryptodev_config *config);
392 
393 int
394 ipsec_mb_start(struct rte_cryptodev *dev);
395 
396 void
397 ipsec_mb_stop(struct rte_cryptodev *dev);
398 
399 int
400 ipsec_mb_close(struct rte_cryptodev *dev);
401 
402 void
403 ipsec_mb_stats_get(struct rte_cryptodev *dev,
404 		struct rte_cryptodev_stats *stats);
405 
406 void
407 ipsec_mb_stats_reset(struct rte_cryptodev *dev);
408 
409 void
410 ipsec_mb_info_get(struct rte_cryptodev *dev,
411 		struct rte_cryptodev_info *dev_info);
412 
413 int
414 ipsec_mb_qp_release(struct rte_cryptodev *dev, uint16_t qp_id);
415 
416 int
417 ipsec_mb_qp_set_unique_name(struct rte_cryptodev *dev, struct ipsec_mb_qp *qp);
418 
419 int
420 ipsec_mb_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
421 				 const struct rte_cryptodev_qp_conf *qp_conf,
422 				 int socket_id);
423 
424 /** Returns the size of the aesni multi-buffer session structure */
425 unsigned
426 ipsec_mb_sym_session_get_size(struct rte_cryptodev *dev);
427 
428 /** Configure an aesni multi-buffer session from a crypto xform chain */
429 int ipsec_mb_sym_session_configure(
430 	struct rte_cryptodev *dev,
431 	struct rte_crypto_sym_xform *xform,
432 	struct rte_cryptodev_sym_session *sess);
433 
434 /** Clear the memory of session so it does not leave key material behind */
435 void
436 ipsec_mb_sym_session_clear(struct rte_cryptodev *dev,
437 				struct rte_cryptodev_sym_session *sess);
438 
439 /** Get session from op. If sessionless create a session */
440 static __rte_always_inline void *
441 ipsec_mb_get_session_private(struct ipsec_mb_qp *qp, struct rte_crypto_op *op)
442 {
443 	struct rte_cryptodev_sym_session *sess = NULL;
444 	uint32_t driver_id = ipsec_mb_get_driver_id(qp->pmd_type);
445 	struct rte_crypto_sym_op *sym_op = op->sym;
446 	uint8_t sess_type = op->sess_type;
447 	void *_sess;
448 	struct ipsec_mb_internals *pmd_data = &ipsec_mb_pmds[qp->pmd_type];
449 
450 	switch (sess_type) {
451 	case RTE_CRYPTO_OP_WITH_SESSION:
452 		if (likely(sym_op->session != NULL))
453 			sess = sym_op->session;
454 		else
455 			goto error_exit;
456 	break;
457 	case RTE_CRYPTO_OP_SESSIONLESS:
458 		if (!qp->sess_mp ||
459 		    rte_mempool_get(qp->sess_mp, (void **)&_sess))
460 			return NULL;
461 
462 		sess = _sess;
463 		if (sess->sess_data_sz < pmd_data->session_priv_size) {
464 			rte_mempool_put(qp->sess_mp, _sess);
465 			goto error_exit;
466 		}
467 
468 		if (unlikely(pmd_data->session_configure(qp->mb_mgr,
469 			CRYPTODEV_GET_SYM_SESS_PRIV(sess), sym_op->xform) != 0)) {
470 			rte_mempool_put(qp->sess_mp, _sess);
471 			goto error_exit;
472 		}
473 
474 		sess->driver_id = driver_id;
475 		sym_op->session = sess;
476 
477 	break;
478 	default:
479 		IPSEC_MB_LOG(ERR, "Unrecognized session type %u", sess_type);
480 	}
481 
482 	return CRYPTODEV_GET_SYM_SESS_PRIV(sess);
483 
484 error_exit:
485 	op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
486 	return NULL;
487 }
488 
489 #endif /* _IPSEC_MB_PRIVATE_H_ */
490