xref: /dpdk/drivers/crypto/ipsec_mb/pmd_kasumi.c (revision e9fd1ebf981f361844aea9ec94e17f4bda5e1479)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2021 Intel Corporation
3  */
4 
5 #include <bus_vdev_driver.h>
6 #include <rte_common.h>
7 #include <rte_cpuflags.h>
8 #include <rte_cryptodev.h>
9 #include <rte_hexdump.h>
10 #include <rte_malloc.h>
11 
12 #include "pmd_kasumi_priv.h"
13 
14 /** Parse crypto xform chain and set private session parameters. */
15 static int
16 kasumi_session_configure(IMB_MGR *mgr, void *priv_sess,
17 			  const struct rte_crypto_sym_xform *xform)
18 {
19 	const struct rte_crypto_sym_xform *auth_xform = NULL;
20 	const struct rte_crypto_sym_xform *cipher_xform = NULL;
21 	enum ipsec_mb_operation mode;
22 	struct kasumi_session *sess = (struct kasumi_session *)priv_sess;
23 	/* Select Crypto operation - hash then cipher / cipher then hash */
24 	int ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
25 				&cipher_xform, NULL);
26 
27 	if (ret)
28 		return ret;
29 
30 	if (cipher_xform) {
31 		/* Only KASUMI F8 supported */
32 		if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_KASUMI_F8) {
33 			IPSEC_MB_LOG(ERR, "Unsupported cipher algorithm ");
34 			return -ENOTSUP;
35 		}
36 
37 		sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
38 		if (cipher_xform->cipher.iv.length != KASUMI_IV_LENGTH) {
39 			IPSEC_MB_LOG(ERR, "Wrong IV length");
40 			return -EINVAL;
41 		}
42 
43 		/* Initialize key */
44 		IMB_KASUMI_INIT_F8_KEY_SCHED(mgr,
45 					      cipher_xform->cipher.key.data,
46 					      &sess->pKeySched_cipher);
47 	}
48 
49 	if (auth_xform) {
50 		/* Only KASUMI F9 supported */
51 		if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_KASUMI_F9) {
52 			IPSEC_MB_LOG(ERR, "Unsupported authentication");
53 			return -ENOTSUP;
54 		}
55 
56 		if (auth_xform->auth.digest_length != KASUMI_DIGEST_LENGTH) {
57 			IPSEC_MB_LOG(ERR, "Wrong digest length");
58 			return -EINVAL;
59 		}
60 
61 		sess->auth_op = auth_xform->auth.op;
62 
63 		/* Initialize key */
64 		IMB_KASUMI_INIT_F9_KEY_SCHED(mgr, auth_xform->auth.key.data,
65 					      &sess->pKeySched_hash);
66 	}
67 
68 	sess->op = mode;
69 	return ret;
70 }
71 
72 /** Encrypt/decrypt mbufs with same cipher key. */
73 static uint8_t
74 process_kasumi_cipher_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
75 			  struct kasumi_session *session, uint8_t num_ops)
76 {
77 	unsigned int i;
78 	uint8_t processed_ops = 0;
79 	const void *src[num_ops];
80 	void *dst[num_ops];
81 	uint8_t *iv_ptr;
82 	uint64_t iv[num_ops];
83 	uint32_t num_bytes[num_ops];
84 
85 	for (i = 0; i < num_ops; i++) {
86 		src[i] = rte_pktmbuf_mtod_offset(ops[i]->sym->m_src,
87 						 uint8_t *,
88 						 (ops[i]->sym->cipher.data.offset >> 3));
89 		dst[i] = ops[i]->sym->m_dst
90 			     ? rte_pktmbuf_mtod_offset(ops[i]->sym->m_dst,
91 						       uint8_t *,
92 						       (ops[i]->sym->cipher.data.offset >> 3))
93 			     : rte_pktmbuf_mtod_offset(ops[i]->sym->m_src,
94 						       uint8_t *,
95 						       (ops[i]->sym->cipher.data.offset >> 3));
96 		iv_ptr = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
97 						    session->cipher_iv_offset);
98 		iv[i] = *((uint64_t *)(iv_ptr));
99 		num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
100 
101 		processed_ops++;
102 	}
103 
104 	if (processed_ops != 0)
105 		IMB_KASUMI_F8_N_BUFFER(qp->mb_mgr, &session->pKeySched_cipher,
106 					iv, src, dst, num_bytes,
107 					processed_ops);
108 
109 	return processed_ops;
110 }
111 
112 /** Encrypt/decrypt mbuf (bit level function). */
113 static uint8_t
114 process_kasumi_cipher_op_bit(struct ipsec_mb_qp *qp, struct rte_crypto_op *op,
115 			      struct kasumi_session *session)
116 {
117 	uint8_t *src, *dst;
118 	uint8_t *iv_ptr;
119 	uint64_t iv;
120 	uint32_t length_in_bits, offset_in_bits;
121 
122 	offset_in_bits = op->sym->cipher.data.offset;
123 	src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
124 	if (op->sym->m_dst == NULL)
125 		dst = src;
126 	else
127 		dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
128 	iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
129 					    session->cipher_iv_offset);
130 	iv = *((uint64_t *)(iv_ptr));
131 	length_in_bits = op->sym->cipher.data.length;
132 
133 	IMB_KASUMI_F8_1_BUFFER_BIT(qp->mb_mgr, &session->pKeySched_cipher, iv,
134 				    src, dst, length_in_bits, offset_in_bits);
135 
136 	return 1;
137 }
138 
139 /** Generate/verify hash from mbufs with same hash key. */
140 static int
141 process_kasumi_hash_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
142 			struct kasumi_session *session, uint8_t num_ops)
143 {
144 	unsigned int i;
145 	uint8_t processed_ops = 0;
146 	uint8_t *src, *dst;
147 	uint32_t length_in_bits;
148 	uint32_t num_bytes;
149 	struct kasumi_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
150 
151 	for (i = 0; i < num_ops; i++) {
152 		/* Data must be byte aligned */
153 		if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
154 			ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
155 			IPSEC_MB_LOG(ERR, "Invalid Offset");
156 			break;
157 		}
158 
159 		length_in_bits = ops[i]->sym->auth.data.length;
160 
161 		src = rte_pktmbuf_mtod_offset(ops[i]->sym->m_src, uint8_t *,
162 					      (ops[i]->sym->auth.data.offset >> 3));
163 		/* Direction from next bit after end of message */
164 		num_bytes = length_in_bits >> 3;
165 
166 		if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
167 			dst = qp_data->temp_digest;
168 			IMB_KASUMI_F9_1_BUFFER(qp->mb_mgr,
169 						&session->pKeySched_hash, src,
170 						num_bytes, dst);
171 
172 			/* Verify digest. */
173 			if (memcmp(dst, ops[i]->sym->auth.digest.data,
174 				    KASUMI_DIGEST_LENGTH)
175 			    != 0)
176 				ops[i]->status
177 				    = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
178 		} else {
179 			dst = ops[i]->sym->auth.digest.data;
180 
181 			IMB_KASUMI_F9_1_BUFFER(qp->mb_mgr,
182 						&session->pKeySched_hash, src,
183 						num_bytes, dst);
184 		}
185 		processed_ops++;
186 	}
187 
188 	return processed_ops;
189 }
190 
191 /** Process a batch of crypto ops which shares the same session. */
192 static int
193 process_ops(struct rte_crypto_op **ops, struct kasumi_session *session,
194 		struct ipsec_mb_qp *qp, uint8_t num_ops)
195 {
196 	unsigned int i;
197 	unsigned int processed_ops;
198 
199 	switch (session->op) {
200 	case IPSEC_MB_OP_ENCRYPT_ONLY:
201 	case IPSEC_MB_OP_DECRYPT_ONLY:
202 		processed_ops
203 		    = process_kasumi_cipher_op(qp, ops, session, num_ops);
204 		break;
205 	case IPSEC_MB_OP_HASH_GEN_ONLY:
206 	case IPSEC_MB_OP_HASH_VERIFY_ONLY:
207 		processed_ops
208 		    = process_kasumi_hash_op(qp, ops, session, num_ops);
209 		break;
210 	case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
211 	case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY:
212 		processed_ops
213 		    = process_kasumi_cipher_op(qp, ops, session, num_ops);
214 		process_kasumi_hash_op(qp, ops, session, processed_ops);
215 		break;
216 	case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
217 	case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT:
218 		processed_ops
219 		    = process_kasumi_hash_op(qp, ops, session, num_ops);
220 		process_kasumi_cipher_op(qp, ops, session, processed_ops);
221 		break;
222 	default:
223 		/* Operation not supported. */
224 		processed_ops = 0;
225 	}
226 
227 	for (i = 0; i < num_ops; i++) {
228 		/*
229 		 * If there was no error/authentication failure,
230 		 * change status to successful.
231 		 */
232 		if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
233 			ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
234 		/* Free session if a session-less crypto op. */
235 		if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
236 			memset(session, 0, sizeof(struct kasumi_session));
237 			rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
238 			ops[i]->sym->session = NULL;
239 		}
240 	}
241 	return processed_ops;
242 }
243 
244 /** Process a crypto op with length/offset in bits. */
245 static int
246 process_op_bit(struct rte_crypto_op *op, struct kasumi_session *session,
247 		struct ipsec_mb_qp *qp)
248 {
249 	unsigned int processed_op;
250 
251 	switch (session->op) {
252 		/* case KASUMI_OP_ONLY_CIPHER: */
253 	case IPSEC_MB_OP_ENCRYPT_ONLY:
254 	case IPSEC_MB_OP_DECRYPT_ONLY:
255 		processed_op = process_kasumi_cipher_op_bit(qp, op, session);
256 		break;
257 	/* case KASUMI_OP_ONLY_AUTH: */
258 	case IPSEC_MB_OP_HASH_GEN_ONLY:
259 	case IPSEC_MB_OP_HASH_VERIFY_ONLY:
260 		processed_op = process_kasumi_hash_op(qp, &op, session, 1);
261 		break;
262 	/* case KASUMI_OP_CIPHER_AUTH: */
263 	case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
264 		processed_op = process_kasumi_cipher_op_bit(qp, op, session);
265 		if (processed_op == 1)
266 			process_kasumi_hash_op(qp, &op, session, 1);
267 		break;
268 	/* case KASUMI_OP_AUTH_CIPHER: */
269 	case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
270 		processed_op = process_kasumi_hash_op(qp, &op, session, 1);
271 		if (processed_op == 1)
272 			process_kasumi_cipher_op_bit(qp, op, session);
273 		break;
274 	default:
275 		/* Operation not supported. */
276 		processed_op = 0;
277 	}
278 
279 	/*
280 	 * If there was no error/authentication failure,
281 	 * change status to successful.
282 	 */
283 	if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
284 		op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
285 
286 	/* Free session if a session-less crypto op. */
287 	if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
288 		memset(CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session), 0,
289 			sizeof(struct kasumi_session));
290 		rte_mempool_put(qp->sess_mp, (void *)op->sym->session);
291 		op->sym->session = NULL;
292 	}
293 	return processed_op;
294 }
295 
296 static uint16_t
297 kasumi_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
298 			  uint16_t nb_ops)
299 {
300 	struct rte_crypto_op *c_ops[nb_ops];
301 	struct rte_crypto_op *curr_c_op = NULL;
302 
303 	struct kasumi_session *prev_sess = NULL, *curr_sess = NULL;
304 	struct ipsec_mb_qp *qp = queue_pair;
305 	unsigned int i;
306 	uint8_t burst_size = 0;
307 	uint8_t processed_ops;
308 	unsigned int nb_dequeued;
309 
310 	nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
311 					      (void **)ops, nb_ops, NULL);
312 	for (i = 0; i < nb_dequeued; i++) {
313 		curr_c_op = ops[i];
314 
315 #ifdef RTE_LIBRTE_PMD_KASUMI_DEBUG
316 		if (!rte_pktmbuf_is_contiguous(curr_c_op->sym->m_src)
317 		    || (curr_c_op->sym->m_dst != NULL
318 			&& !rte_pktmbuf_is_contiguous(
319 			    curr_c_op->sym->m_dst))) {
320 			IPSEC_MB_LOG(ERR,
321 				      "PMD supports only contiguous mbufs, op (%p) provides noncontiguous mbuf as source/destination buffer.",
322 				      curr_c_op);
323 			curr_c_op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
324 			break;
325 		}
326 #endif
327 
328 		/* Set status as enqueued (not processed yet) by default. */
329 		curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
330 
331 		curr_sess = (struct kasumi_session *)
332 			ipsec_mb_get_session_private(qp, curr_c_op);
333 		if (unlikely(curr_sess == NULL
334 			      || curr_sess->op == IPSEC_MB_OP_NOT_SUPPORTED)) {
335 			curr_c_op->status
336 			    = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
337 			break;
338 		}
339 
340 		/* If length/offset is at bit-level, process this buffer alone.
341 		 */
342 		if (((curr_c_op->sym->cipher.data.length % BYTE_LEN) != 0)
343 		    || ((ops[i]->sym->cipher.data.offset % BYTE_LEN) != 0)) {
344 			/* Process the ops of the previous session. */
345 			if (prev_sess != NULL) {
346 				processed_ops = process_ops(c_ops, prev_sess,
347 						qp, burst_size);
348 				if (processed_ops < burst_size) {
349 					burst_size = 0;
350 					break;
351 				}
352 
353 				burst_size = 0;
354 				prev_sess = NULL;
355 			}
356 
357 			processed_ops = process_op_bit(curr_c_op,
358 					curr_sess, qp);
359 			if (processed_ops != 1)
360 				break;
361 
362 			continue;
363 		}
364 
365 		/* Batch ops that share the same session. */
366 		if (prev_sess == NULL) {
367 			prev_sess = curr_sess;
368 			c_ops[burst_size++] = curr_c_op;
369 		} else if (curr_sess == prev_sess) {
370 			c_ops[burst_size++] = curr_c_op;
371 			/*
372 			 * When there are enough ops to process in a batch,
373 			 * process them, and start a new batch.
374 			 */
375 			if (burst_size == KASUMI_MAX_BURST) {
376 				processed_ops = process_ops(c_ops, prev_sess,
377 						qp, burst_size);
378 				if (processed_ops < burst_size) {
379 					burst_size = 0;
380 					break;
381 				}
382 
383 				burst_size = 0;
384 				prev_sess = NULL;
385 			}
386 		} else {
387 			/*
388 			 * Different session, process the ops
389 			 * of the previous session.
390 			 */
391 			processed_ops = process_ops(c_ops, prev_sess, qp,
392 					burst_size);
393 			if (processed_ops < burst_size) {
394 				burst_size = 0;
395 				break;
396 			}
397 
398 			burst_size = 0;
399 			prev_sess = curr_sess;
400 
401 			c_ops[burst_size++] = curr_c_op;
402 		}
403 	}
404 
405 	if (burst_size != 0) {
406 		/* Process the crypto ops of the last session. */
407 		processed_ops = process_ops(c_ops, prev_sess, qp, burst_size);
408 	}
409 
410 	qp->stats.dequeued_count += i;
411 	return i;
412 }
413 
414 struct rte_cryptodev_ops kasumi_pmd_ops = {
415 	.dev_configure = ipsec_mb_config,
416 	.dev_start = ipsec_mb_start,
417 	.dev_stop = ipsec_mb_stop,
418 	.dev_close = ipsec_mb_close,
419 
420 	.stats_get = ipsec_mb_stats_get,
421 	.stats_reset = ipsec_mb_stats_reset,
422 
423 	.dev_infos_get = ipsec_mb_info_get,
424 
425 	.queue_pair_setup = ipsec_mb_qp_setup,
426 	.queue_pair_release = ipsec_mb_qp_release,
427 
428 	.sym_session_get_size = ipsec_mb_sym_session_get_size,
429 	.sym_session_configure = ipsec_mb_sym_session_configure,
430 	.sym_session_clear = ipsec_mb_sym_session_clear
431 };
432 
433 struct rte_cryptodev_ops *rte_kasumi_pmd_ops = &kasumi_pmd_ops;
434 
435 static int
436 kasumi_probe(struct rte_vdev_device *vdev)
437 {
438 	return ipsec_mb_create(vdev, IPSEC_MB_PMD_TYPE_KASUMI);
439 }
440 
441 static struct rte_vdev_driver cryptodev_kasumi_pmd_drv = {
442 	.probe = kasumi_probe,
443 	.remove = ipsec_mb_remove
444 };
445 
446 static struct cryptodev_driver kasumi_crypto_drv;
447 
448 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_KASUMI_PMD, cryptodev_kasumi_pmd_drv);
449 RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_KASUMI_PMD, cryptodev_kasumi_pmd);
450 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_KASUMI_PMD,
451 			       "max_nb_queue_pairs=<int> socket_id=<int>");
452 RTE_PMD_REGISTER_CRYPTO_DRIVER(kasumi_crypto_drv,
453 				cryptodev_kasumi_pmd_drv.driver,
454 				pmd_driver_id_kasumi);
455 
456 /* Constructor function to register kasumi PMD */
457 RTE_INIT(ipsec_mb_register_kasumi)
458 {
459 	struct ipsec_mb_internals *kasumi_data
460 	    = &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_KASUMI];
461 
462 	kasumi_data->caps = kasumi_capabilities;
463 	kasumi_data->dequeue_burst = kasumi_pmd_dequeue_burst;
464 	kasumi_data->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO
465 				| RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING
466 				| RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA
467 				| RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT
468 				| RTE_CRYPTODEV_FF_SYM_SESSIONLESS
469 				| RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
470 	kasumi_data->internals_priv_size = 0;
471 	kasumi_data->ops = &kasumi_pmd_ops;
472 	kasumi_data->qp_priv_size = sizeof(struct kasumi_qp_data);
473 	kasumi_data->session_configure = kasumi_session_configure;
474 	kasumi_data->session_priv_size = sizeof(struct kasumi_session);
475 }
476