xref: /dpdk/drivers/crypto/ipsec_mb/pmd_aesni_gcm.c (revision 2a440d6ab362de705d99c6740b27a3e0755a87f4)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2021 Intel Corporation
3  */
4 
5 #include "pmd_aesni_gcm_priv.h"
6 
7 static void
aesni_gcm_set_ops(struct aesni_gcm_ops * ops,IMB_MGR * mb_mgr)8 aesni_gcm_set_ops(struct aesni_gcm_ops *ops, IMB_MGR *mb_mgr)
9 {
10 	/* Set 128 bit function pointers. */
11 	ops[GCM_KEY_128].pre = mb_mgr->gcm128_pre;
12 	ops[GCM_KEY_128].init = mb_mgr->gcm128_init;
13 
14 	ops[GCM_KEY_128].enc = mb_mgr->gcm128_enc;
15 	ops[GCM_KEY_128].update_enc = mb_mgr->gcm128_enc_update;
16 	ops[GCM_KEY_128].finalize_enc = mb_mgr->gcm128_enc_finalize;
17 
18 	ops[GCM_KEY_128].dec = mb_mgr->gcm128_dec;
19 	ops[GCM_KEY_128].update_dec = mb_mgr->gcm128_dec_update;
20 	ops[GCM_KEY_128].finalize_dec = mb_mgr->gcm128_dec_finalize;
21 
22 	ops[GCM_KEY_128].gmac_init = mb_mgr->gmac128_init;
23 	ops[GCM_KEY_128].gmac_update = mb_mgr->gmac128_update;
24 	ops[GCM_KEY_128].gmac_finalize = mb_mgr->gmac128_finalize;
25 
26 	/* Set 192 bit function pointers. */
27 	ops[GCM_KEY_192].pre = mb_mgr->gcm192_pre;
28 	ops[GCM_KEY_192].init = mb_mgr->gcm192_init;
29 
30 	ops[GCM_KEY_192].enc = mb_mgr->gcm192_enc;
31 	ops[GCM_KEY_192].update_enc = mb_mgr->gcm192_enc_update;
32 	ops[GCM_KEY_192].finalize_enc = mb_mgr->gcm192_enc_finalize;
33 
34 	ops[GCM_KEY_192].dec = mb_mgr->gcm192_dec;
35 	ops[GCM_KEY_192].update_dec = mb_mgr->gcm192_dec_update;
36 	ops[GCM_KEY_192].finalize_dec = mb_mgr->gcm192_dec_finalize;
37 
38 	ops[GCM_KEY_192].gmac_init = mb_mgr->gmac192_init;
39 	ops[GCM_KEY_192].gmac_update = mb_mgr->gmac192_update;
40 	ops[GCM_KEY_192].gmac_finalize = mb_mgr->gmac192_finalize;
41 
42 	/* Set 256 bit function pointers. */
43 	ops[GCM_KEY_256].pre = mb_mgr->gcm256_pre;
44 	ops[GCM_KEY_256].init = mb_mgr->gcm256_init;
45 
46 	ops[GCM_KEY_256].enc = mb_mgr->gcm256_enc;
47 	ops[GCM_KEY_256].update_enc = mb_mgr->gcm256_enc_update;
48 	ops[GCM_KEY_256].finalize_enc = mb_mgr->gcm256_enc_finalize;
49 
50 	ops[GCM_KEY_256].dec = mb_mgr->gcm256_dec;
51 	ops[GCM_KEY_256].update_dec = mb_mgr->gcm256_dec_update;
52 	ops[GCM_KEY_256].finalize_dec = mb_mgr->gcm256_dec_finalize;
53 
54 	ops[GCM_KEY_256].gmac_init = mb_mgr->gmac256_init;
55 	ops[GCM_KEY_256].gmac_update = mb_mgr->gmac256_update;
56 	ops[GCM_KEY_256].gmac_finalize = mb_mgr->gmac256_finalize;
57 }
58 
59 static int
aesni_gcm_session_configure(IMB_MGR * mb_mgr,void * session,const struct rte_crypto_sym_xform * xform)60 aesni_gcm_session_configure(IMB_MGR *mb_mgr, void *session,
61 			    const struct rte_crypto_sym_xform *xform)
62 {
63 	struct aesni_gcm_session *sess = session;
64 	const struct rte_crypto_sym_xform *auth_xform;
65 	const struct rte_crypto_sym_xform *cipher_xform;
66 	const struct rte_crypto_sym_xform *aead_xform;
67 
68 	uint8_t key_length;
69 	const uint8_t *key;
70 	enum ipsec_mb_operation mode;
71 	int ret = 0;
72 
73 	ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
74 				&cipher_xform, &aead_xform);
75 	if (ret)
76 		return ret;
77 
78 	/**< GCM key type */
79 
80 	sess->op = mode;
81 
82 	switch (sess->op) {
83 	case IPSEC_MB_OP_HASH_GEN_ONLY:
84 	case IPSEC_MB_OP_HASH_VERIFY_ONLY:
85 		/* AES-GMAC
86 		 * auth_xform = xform;
87 		 */
88 		if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GMAC) {
89 			IPSEC_MB_LOG(ERR,
90 	"Only AES GMAC is supported as an authentication only algorithm");
91 			ret = -ENOTSUP;
92 			goto error_exit;
93 		}
94 		/* Set IV parameters */
95 		sess->iv.offset = auth_xform->auth.iv.offset;
96 		sess->iv.length = auth_xform->auth.iv.length;
97 		key_length = auth_xform->auth.key.length;
98 		key = auth_xform->auth.key.data;
99 		sess->req_digest_length =
100 		    RTE_MIN(auth_xform->auth.digest_length,
101 				DIGEST_LENGTH_MAX);
102 		break;
103 	case IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT:
104 	case IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT:
105 		/* AES-GCM
106 		 * aead_xform = xform;
107 		 */
108 
109 		if (aead_xform->aead.algo != RTE_CRYPTO_AEAD_AES_GCM) {
110 			IPSEC_MB_LOG(ERR,
111 			"The only combined operation supported is AES GCM");
112 			ret = -ENOTSUP;
113 			goto error_exit;
114 		}
115 		/* Set IV parameters */
116 		sess->iv.offset = aead_xform->aead.iv.offset;
117 		sess->iv.length = aead_xform->aead.iv.length;
118 		key_length = aead_xform->aead.key.length;
119 		key = aead_xform->aead.key.data;
120 		sess->aad_length = aead_xform->aead.aad_length;
121 		sess->req_digest_length =
122 			RTE_MIN(aead_xform->aead.digest_length,
123 				DIGEST_LENGTH_MAX);
124 		break;
125 	default:
126 		IPSEC_MB_LOG(
127 		    ERR, "Wrong xform type, has to be AEAD or authentication");
128 		ret = -ENOTSUP;
129 		goto error_exit;
130 	}
131 
132 	/* Check key length, and calculate GCM pre-compute. */
133 	switch (key_length) {
134 	case 16:
135 		sess->key_length = GCM_KEY_128;
136 		mb_mgr->gcm128_pre(key, &sess->gdata_key);
137 		break;
138 	case 24:
139 		sess->key_length = GCM_KEY_192;
140 		mb_mgr->gcm192_pre(key, &sess->gdata_key);
141 		break;
142 	case 32:
143 		sess->key_length = GCM_KEY_256;
144 		mb_mgr->gcm256_pre(key, &sess->gdata_key);
145 		break;
146 	default:
147 		IPSEC_MB_LOG(ERR, "Invalid key length");
148 		ret = -EINVAL;
149 		goto error_exit;
150 	}
151 
152 	/* Digest check */
153 	if (sess->req_digest_length > 16) {
154 		IPSEC_MB_LOG(ERR, "Invalid digest length");
155 		ret = -EINVAL;
156 		goto error_exit;
157 	}
158 	/*
159 	 * If size requested is different, generate the full digest
160 	 * (16 bytes) in a temporary location and then memcpy
161 	 * the requested number of bytes.
162 	 */
163 	if (sess->req_digest_length < 4)
164 		sess->gen_digest_length = 16;
165 	else
166 		sess->gen_digest_length = sess->req_digest_length;
167 
168 error_exit:
169 	return ret;
170 }
171 
172 /**
173  * Process a completed job and return rte_mbuf which job processed
174  *
175  * @param job	IMB_JOB job to process
176  *
177  * @return
178  * - Returns processed mbuf which is trimmed of output digest used in
179  * verification of supplied digest in the case of a HASH_CIPHER operation
180  * - Returns NULL on invalid job
181  */
182 static void
post_process_gcm_crypto_op(struct ipsec_mb_qp * qp,struct rte_crypto_op * op,struct aesni_gcm_session * session)183 post_process_gcm_crypto_op(struct ipsec_mb_qp *qp,
184 		struct rte_crypto_op *op,
185 		struct aesni_gcm_session *session)
186 {
187 	struct aesni_gcm_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
188 
189 	op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
190 	/* Verify digest if required */
191 	if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT ||
192 			session->op == IPSEC_MB_OP_HASH_VERIFY_ONLY) {
193 		uint8_t *digest;
194 
195 		uint8_t *tag = qp_data->temp_digest;
196 
197 		if (session->op == IPSEC_MB_OP_HASH_VERIFY_ONLY)
198 			digest = op->sym->auth.digest.data;
199 		else
200 			digest = op->sym->aead.digest.data;
201 
202 #ifdef RTE_LIBRTE_PMD_AESNI_GCM_DEBUG
203 		rte_hexdump(stdout, "auth tag (orig):",
204 				digest, session->req_digest_length);
205 		rte_hexdump(stdout, "auth tag (calc):",
206 				tag, session->req_digest_length);
207 #endif
208 
209 		if (memcmp(tag, digest,	session->req_digest_length) != 0)
210 			op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
211 	} else {
212 		if (session->req_digest_length != session->gen_digest_length) {
213 			if (session->op ==
214 				IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT)
215 				memcpy(op->sym->aead.digest.data,
216 					qp_data->temp_digest,
217 					session->req_digest_length);
218 			else
219 				memcpy(op->sym->auth.digest.data,
220 					qp_data->temp_digest,
221 					session->req_digest_length);
222 		}
223 	}
224 }
225 
226 /**
227  * Process a completed GCM request
228  *
229  * @param qp		Queue Pair to process
230  * @param op		Crypto operation
231  * @param sess		AESNI-GCM session
232  *
233  */
234 static void
handle_completed_gcm_crypto_op(struct ipsec_mb_qp * qp,struct rte_crypto_op * op,struct aesni_gcm_session * sess)235 handle_completed_gcm_crypto_op(struct ipsec_mb_qp *qp,
236 		struct rte_crypto_op *op,
237 		struct aesni_gcm_session *sess)
238 {
239 	post_process_gcm_crypto_op(qp, op, sess);
240 
241 	/* Free session if a session-less crypto op */
242 	if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
243 		memset(sess, 0, sizeof(struct aesni_gcm_session));
244 		rte_mempool_put(qp->sess_mp, op->sym->session);
245 		op->sym->session = NULL;
246 	}
247 }
248 
249 /**
250  * Process a crypto operation, calling
251  * the GCM API from the multi buffer library.
252  *
253  * @param	qp		queue pair
254  * @param	op		symmetric crypto operation
255  * @param	session		GCM session
256  *
257  * @return
258  *  0 on success
259  */
260 static int
process_gcm_crypto_op(struct ipsec_mb_qp * qp,struct rte_crypto_op * op,struct aesni_gcm_session * session)261 process_gcm_crypto_op(struct ipsec_mb_qp *qp, struct rte_crypto_op *op,
262 		struct aesni_gcm_session *session)
263 {
264 	struct aesni_gcm_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
265 	uint8_t *src, *dst;
266 	uint8_t *iv_ptr;
267 	struct rte_crypto_sym_op *sym_op = op->sym;
268 	struct rte_mbuf *m_src = sym_op->m_src;
269 	uint32_t offset, data_offset, data_length;
270 	uint32_t part_len, total_len, data_len;
271 	uint8_t *tag;
272 	unsigned int oop = 0;
273 	struct aesni_gcm_ops *ops = &qp_data->ops[session->key_length];
274 
275 	if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT ||
276 			session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT) {
277 		offset = sym_op->aead.data.offset;
278 		data_offset = offset;
279 		data_length = sym_op->aead.data.length;
280 	} else {
281 		offset = sym_op->auth.data.offset;
282 		data_offset = offset;
283 		data_length = sym_op->auth.data.length;
284 	}
285 
286 	RTE_ASSERT(m_src != NULL);
287 
288 	while (offset >= m_src->data_len && data_length != 0) {
289 		offset -= m_src->data_len;
290 		m_src = m_src->next;
291 
292 		RTE_ASSERT(m_src != NULL);
293 	}
294 
295 	src = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset);
296 
297 	data_len = m_src->data_len - offset;
298 	part_len = (data_len < data_length) ? data_len :
299 			data_length;
300 
301 	RTE_ASSERT((sym_op->m_dst == NULL) ||
302 			((sym_op->m_dst != NULL) &&
303 				rte_pktmbuf_is_contiguous(sym_op->m_dst)));
304 
305 	/* In-place */
306 	if (sym_op->m_dst == NULL || (sym_op->m_dst == sym_op->m_src))
307 		dst = src;
308 	/* Out-of-place */
309 	else {
310 		oop = 1;
311 		/* Segmented destination buffer is not supported
312 		 * if operation is Out-of-place
313 		 */
314 		RTE_ASSERT(rte_pktmbuf_is_contiguous(sym_op->m_dst));
315 		dst = rte_pktmbuf_mtod_offset(sym_op->m_dst, uint8_t *,
316 					data_offset);
317 	}
318 
319 	iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
320 				session->iv.offset);
321 
322 	if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT) {
323 		ops->init(&session->gdata_key, &qp_data->gcm_ctx_data, iv_ptr,
324 				sym_op->aead.aad.data,
325 				(uint64_t)session->aad_length);
326 
327 		ops->update_enc(&session->gdata_key, &qp_data->gcm_ctx_data,
328 				dst, src, (uint64_t)part_len);
329 		total_len = data_length - part_len;
330 
331 		while (total_len) {
332 			m_src = m_src->next;
333 
334 			RTE_ASSERT(m_src != NULL);
335 
336 			src = rte_pktmbuf_mtod(m_src, uint8_t *);
337 			if (oop)
338 				dst += part_len;
339 			else
340 				dst = src;
341 			part_len = (m_src->data_len < total_len) ?
342 					m_src->data_len : total_len;
343 
344 			ops->update_enc(&session->gdata_key,
345 					&qp_data->gcm_ctx_data,
346 					dst, src, (uint64_t)part_len);
347 			total_len -= part_len;
348 		}
349 
350 		if (session->req_digest_length != session->gen_digest_length)
351 			tag = qp_data->temp_digest;
352 		else
353 			tag = sym_op->aead.digest.data;
354 
355 		ops->finalize_enc(&session->gdata_key, &qp_data->gcm_ctx_data,
356 				tag, session->gen_digest_length);
357 	} else if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT) {
358 		ops->init(&session->gdata_key, &qp_data->gcm_ctx_data, iv_ptr,
359 				sym_op->aead.aad.data,
360 				(uint64_t)session->aad_length);
361 
362 		ops->update_dec(&session->gdata_key, &qp_data->gcm_ctx_data,
363 				dst, src, (uint64_t)part_len);
364 		total_len = data_length - part_len;
365 
366 		while (total_len) {
367 			m_src = m_src->next;
368 
369 			RTE_ASSERT(m_src != NULL);
370 
371 			src = rte_pktmbuf_mtod(m_src, uint8_t *);
372 			if (oop)
373 				dst += part_len;
374 			else
375 				dst = src;
376 			part_len = (m_src->data_len < total_len) ?
377 					m_src->data_len : total_len;
378 
379 			ops->update_dec(&session->gdata_key,
380 					&qp_data->gcm_ctx_data,
381 					dst, src, (uint64_t)part_len);
382 			total_len -= part_len;
383 		}
384 
385 		tag = qp_data->temp_digest;
386 		ops->finalize_dec(&session->gdata_key, &qp_data->gcm_ctx_data,
387 				tag, session->gen_digest_length);
388 	} else if (session->op == IPSEC_MB_OP_HASH_GEN_ONLY) {
389 		ops->gmac_init(&session->gdata_key, &qp_data->gcm_ctx_data,
390 				iv_ptr, session->iv.length);
391 
392 		ops->gmac_update(&session->gdata_key, &qp_data->gcm_ctx_data,
393 				src, (uint64_t)part_len);
394 		total_len = data_length - part_len;
395 
396 		while (total_len) {
397 			m_src = m_src->next;
398 
399 			RTE_ASSERT(m_src != NULL);
400 
401 			src = rte_pktmbuf_mtod(m_src, uint8_t *);
402 			part_len = (m_src->data_len < total_len) ?
403 					m_src->data_len : total_len;
404 
405 			ops->gmac_update(&session->gdata_key,
406 					&qp_data->gcm_ctx_data, src,
407 					(uint64_t)part_len);
408 			total_len -= part_len;
409 		}
410 
411 		if (session->req_digest_length != session->gen_digest_length)
412 			tag = qp_data->temp_digest;
413 		else
414 			tag = sym_op->auth.digest.data;
415 
416 		ops->gmac_finalize(&session->gdata_key, &qp_data->gcm_ctx_data,
417 				tag, session->gen_digest_length);
418 	} else { /* IPSEC_MB_OP_HASH_VERIFY_ONLY */
419 		ops->gmac_init(&session->gdata_key, &qp_data->gcm_ctx_data,
420 				iv_ptr, session->iv.length);
421 
422 		ops->gmac_update(&session->gdata_key, &qp_data->gcm_ctx_data,
423 				src, (uint64_t)part_len);
424 		total_len = data_length - part_len;
425 
426 		while (total_len) {
427 			m_src = m_src->next;
428 
429 			RTE_ASSERT(m_src != NULL);
430 
431 			src = rte_pktmbuf_mtod(m_src, uint8_t *);
432 			part_len = (m_src->data_len < total_len) ?
433 					m_src->data_len : total_len;
434 
435 			ops->gmac_update(&session->gdata_key,
436 					&qp_data->gcm_ctx_data, src,
437 					(uint64_t)part_len);
438 			total_len -= part_len;
439 		}
440 
441 		tag = qp_data->temp_digest;
442 
443 		ops->gmac_finalize(&session->gdata_key, &qp_data->gcm_ctx_data,
444 				tag, session->gen_digest_length);
445 	}
446 	return 0;
447 }
448 
449 /** Get gcm session */
450 static inline struct aesni_gcm_session *
aesni_gcm_get_session(struct ipsec_mb_qp * qp,struct rte_crypto_op * op)451 aesni_gcm_get_session(struct ipsec_mb_qp *qp,
452 	     struct rte_crypto_op *op)
453 {
454 	struct rte_cryptodev_sym_session *sess = NULL;
455 	struct rte_crypto_sym_op *sym_op = op->sym;
456 
457 	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
458 		if (likely(sym_op->session != NULL))
459 			sess = sym_op->session;
460 	} else {
461 		if (rte_mempool_get(qp->sess_mp, (void **)&sess))
462 			return NULL;
463 
464 		if (unlikely(sess->sess_data_sz <
465 				sizeof(struct aesni_gcm_session))) {
466 			rte_mempool_put(qp->sess_mp, sess);
467 			return NULL;
468 		}
469 
470 		if (unlikely(aesni_gcm_session_configure(qp->mb_mgr,
471 				CRYPTODEV_GET_SYM_SESS_PRIV(sess),
472 				sym_op->xform) != 0)) {
473 			rte_mempool_put(qp->sess_mp, sess);
474 			sess = NULL;
475 		}
476 		sym_op->session = sess;
477 	}
478 
479 	if (unlikely(sess == NULL))
480 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
481 
482 	return CRYPTODEV_GET_SYM_SESS_PRIV(sess);
483 }
484 
485 static uint16_t
aesni_gcm_pmd_dequeue_burst(void * queue_pair,struct rte_crypto_op ** ops,uint16_t nb_ops)486 aesni_gcm_pmd_dequeue_burst(void *queue_pair,
487 		struct rte_crypto_op **ops, uint16_t nb_ops)
488 {
489 	struct aesni_gcm_session *sess;
490 	struct ipsec_mb_qp *qp = queue_pair;
491 
492 	int retval = 0;
493 	unsigned int i, nb_dequeued;
494 
495 	nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
496 			(void **)ops, nb_ops, NULL);
497 
498 	for (i = 0; i < nb_dequeued; i++) {
499 
500 		sess = aesni_gcm_get_session(qp, ops[i]);
501 		if (unlikely(sess == NULL)) {
502 			ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
503 			qp->stats.dequeue_err_count++;
504 			break;
505 		}
506 
507 		retval = process_gcm_crypto_op(qp, ops[i], sess);
508 		if (retval < 0) {
509 			ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
510 			qp->stats.dequeue_err_count++;
511 			break;
512 		}
513 
514 		handle_completed_gcm_crypto_op(qp, ops[i], sess);
515 	}
516 
517 	qp->stats.dequeued_count += i;
518 
519 	return i;
520 }
521 
522 static inline void
aesni_gcm_fill_error_code(struct rte_crypto_sym_vec * vec,int32_t errnum)523 aesni_gcm_fill_error_code(struct rte_crypto_sym_vec *vec,
524 			  int32_t errnum)
525 {
526 	uint32_t i;
527 
528 	for (i = 0; i < vec->num; i++)
529 		vec->status[i] = errnum;
530 }
531 
532 static inline int32_t
aesni_gcm_sgl_op_finalize_encryption(const struct aesni_gcm_session * s,struct gcm_context_data * gdata_ctx,uint8_t * digest,struct aesni_gcm_ops ops)533 aesni_gcm_sgl_op_finalize_encryption(const struct aesni_gcm_session *s,
534 				     struct gcm_context_data *gdata_ctx,
535 				     uint8_t *digest, struct aesni_gcm_ops ops)
536 {
537 	if (s->req_digest_length != s->gen_digest_length) {
538 		uint8_t tmpdigest[s->gen_digest_length];
539 
540 		ops.finalize_enc(&s->gdata_key, gdata_ctx, tmpdigest,
541 				s->gen_digest_length);
542 		memcpy(digest, tmpdigest, s->req_digest_length);
543 	} else {
544 		ops.finalize_enc(&s->gdata_key, gdata_ctx, digest,
545 				s->gen_digest_length);
546 	}
547 
548 	return 0;
549 }
550 
551 static inline int32_t
aesni_gcm_sgl_op_finalize_decryption(const struct aesni_gcm_session * s,struct gcm_context_data * gdata_ctx,uint8_t * digest,struct aesni_gcm_ops ops)552 aesni_gcm_sgl_op_finalize_decryption(const struct aesni_gcm_session *s,
553 				     struct gcm_context_data *gdata_ctx,
554 				     uint8_t *digest, struct aesni_gcm_ops ops)
555 {
556 	uint8_t tmpdigest[s->gen_digest_length];
557 
558 	ops.finalize_dec(&s->gdata_key, gdata_ctx, tmpdigest,
559 			s->gen_digest_length);
560 
561 	return memcmp(digest, tmpdigest, s->req_digest_length) == 0 ? 0
562 								    : EBADMSG;
563 }
564 
565 static inline void
aesni_gcm_process_gcm_sgl_op(const struct aesni_gcm_session * s,struct gcm_context_data * gdata_ctx,struct rte_crypto_sgl * sgl,void * iv,void * aad,struct aesni_gcm_ops ops)566 aesni_gcm_process_gcm_sgl_op(const struct aesni_gcm_session *s,
567 			     struct gcm_context_data *gdata_ctx,
568 			     struct rte_crypto_sgl *sgl, void *iv, void *aad,
569 			     struct aesni_gcm_ops ops)
570 {
571 	uint32_t i;
572 
573 	/* init crypto operation */
574 	ops.init(&s->gdata_key, gdata_ctx, iv, aad,
575 		    (uint64_t)s->aad_length);
576 
577 	/* update with sgl data */
578 	for (i = 0; i < sgl->num; i++) {
579 		struct rte_crypto_vec *vec = &sgl->vec[i];
580 
581 		switch (s->op) {
582 		case IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT:
583 			ops.update_enc(&s->gdata_key, gdata_ctx,
584 			      vec->base, vec->base, vec->len);
585 			break;
586 		case IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT:
587 			ops.update_dec(&s->gdata_key, gdata_ctx,
588 			      vec->base, vec->base, vec->len);
589 			break;
590 		default:
591 			IPSEC_MB_LOG(ERR, "Invalid session op");
592 			break;
593 		}
594 
595 	}
596 }
597 
598 static inline void
aesni_gcm_process_gmac_sgl_op(const struct aesni_gcm_session * s,struct gcm_context_data * gdata_ctx,struct rte_crypto_sgl * sgl,void * iv,struct aesni_gcm_ops ops)599 aesni_gcm_process_gmac_sgl_op(const struct aesni_gcm_session *s,
600 			      struct gcm_context_data *gdata_ctx,
601 			      struct rte_crypto_sgl *sgl, void *iv,
602 			      struct aesni_gcm_ops ops)
603 {
604 	ops.init(&s->gdata_key, gdata_ctx, iv, sgl->vec[0].base,
605 		    sgl->vec[0].len);
606 }
607 
608 static inline uint32_t
aesni_gcm_sgl_encrypt(struct aesni_gcm_session * s,struct gcm_context_data * gdata_ctx,struct rte_crypto_sym_vec * vec,struct aesni_gcm_ops ops)609 aesni_gcm_sgl_encrypt(struct aesni_gcm_session *s,
610 		      struct gcm_context_data *gdata_ctx,
611 		      struct rte_crypto_sym_vec *vec,
612 		      struct aesni_gcm_ops ops)
613 {
614 	uint32_t i, processed;
615 
616 	processed = 0;
617 	for (i = 0; i < vec->num; ++i) {
618 		aesni_gcm_process_gcm_sgl_op(s, gdata_ctx, &vec->src_sgl[i],
619 					     vec->iv[i].va, vec->aad[i].va,
620 					     ops);
621 		vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(
622 		    s, gdata_ctx, vec->digest[i].va, ops);
623 		processed += (vec->status[i] == 0);
624 	}
625 
626 	return processed;
627 }
628 
629 static inline uint32_t
aesni_gcm_sgl_decrypt(struct aesni_gcm_session * s,struct gcm_context_data * gdata_ctx,struct rte_crypto_sym_vec * vec,struct aesni_gcm_ops ops)630 aesni_gcm_sgl_decrypt(struct aesni_gcm_session *s,
631 		      struct gcm_context_data *gdata_ctx,
632 		      struct rte_crypto_sym_vec *vec,
633 		      struct aesni_gcm_ops ops)
634 {
635 	uint32_t i, processed;
636 
637 	processed = 0;
638 	for (i = 0; i < vec->num; ++i) {
639 		aesni_gcm_process_gcm_sgl_op(s, gdata_ctx, &vec->src_sgl[i],
640 					     vec->iv[i].va, vec->aad[i].va,
641 					     ops);
642 		vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(
643 		    s, gdata_ctx, vec->digest[i].va, ops);
644 		processed += (vec->status[i] == 0);
645 	}
646 
647 	return processed;
648 }
649 
650 static inline uint32_t
aesni_gmac_sgl_generate(struct aesni_gcm_session * s,struct gcm_context_data * gdata_ctx,struct rte_crypto_sym_vec * vec,struct aesni_gcm_ops ops)651 aesni_gmac_sgl_generate(struct aesni_gcm_session *s,
652 			struct gcm_context_data *gdata_ctx,
653 			struct rte_crypto_sym_vec *vec,
654 			struct aesni_gcm_ops ops)
655 {
656 	uint32_t i, processed;
657 
658 	processed = 0;
659 	for (i = 0; i < vec->num; ++i) {
660 		if (vec->src_sgl[i].num != 1) {
661 			vec->status[i] = ENOTSUP;
662 			continue;
663 		}
664 
665 		aesni_gcm_process_gmac_sgl_op(s, gdata_ctx, &vec->src_sgl[i],
666 					      vec->iv[i].va, ops);
667 		vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(
668 		    s, gdata_ctx, vec->digest[i].va, ops);
669 		processed += (vec->status[i] == 0);
670 	}
671 
672 	return processed;
673 }
674 
675 static inline uint32_t
aesni_gmac_sgl_verify(struct aesni_gcm_session * s,struct gcm_context_data * gdata_ctx,struct rte_crypto_sym_vec * vec,struct aesni_gcm_ops ops)676 aesni_gmac_sgl_verify(struct aesni_gcm_session *s,
677 		      struct gcm_context_data *gdata_ctx,
678 		      struct rte_crypto_sym_vec *vec,
679 		      struct aesni_gcm_ops ops)
680 {
681 	uint32_t i, processed;
682 
683 	processed = 0;
684 	for (i = 0; i < vec->num; ++i) {
685 		if (vec->src_sgl[i].num != 1) {
686 			vec->status[i] = ENOTSUP;
687 			continue;
688 		}
689 
690 		aesni_gcm_process_gmac_sgl_op(s, gdata_ctx, &vec->src_sgl[i],
691 					      vec->iv[i].va, ops);
692 		vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(
693 		    s, gdata_ctx, vec->digest[i].va, ops);
694 		processed += (vec->status[i] == 0);
695 	}
696 
697 	return processed;
698 }
699 
700 /** Process CPU crypto bulk operations */
701 static uint32_t
aesni_gcm_process_bulk(struct rte_cryptodev * dev __rte_unused,struct rte_cryptodev_sym_session * sess,__rte_unused union rte_crypto_sym_ofs ofs,struct rte_crypto_sym_vec * vec)702 aesni_gcm_process_bulk(struct rte_cryptodev *dev __rte_unused,
703 			struct rte_cryptodev_sym_session *sess,
704 			__rte_unused union rte_crypto_sym_ofs ofs,
705 			struct rte_crypto_sym_vec *vec)
706 {
707 	struct aesni_gcm_session *s = CRYPTODEV_GET_SYM_SESS_PRIV(sess);
708 	struct gcm_context_data gdata_ctx;
709 	IMB_MGR *mb_mgr;
710 
711 	/* get per-thread MB MGR, create one if needed */
712 	mb_mgr = get_per_thread_mb_mgr();
713 	if (unlikely(mb_mgr == NULL))
714 		return 0;
715 
716 	/* Check if function pointers have been set for this thread ops. */
717 	if (unlikely(RTE_PER_LCORE(gcm_ops)[s->key_length].init == NULL))
718 		aesni_gcm_set_ops(RTE_PER_LCORE(gcm_ops), mb_mgr);
719 
720 	switch (s->op) {
721 	case IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT:
722 		return aesni_gcm_sgl_encrypt(s, &gdata_ctx, vec,
723 				RTE_PER_LCORE(gcm_ops)[s->key_length]);
724 	case IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT:
725 		return aesni_gcm_sgl_decrypt(s, &gdata_ctx, vec,
726 				RTE_PER_LCORE(gcm_ops)[s->key_length]);
727 	case IPSEC_MB_OP_HASH_GEN_ONLY:
728 		return aesni_gmac_sgl_generate(s, &gdata_ctx, vec,
729 				RTE_PER_LCORE(gcm_ops)[s->key_length]);
730 	case IPSEC_MB_OP_HASH_VERIFY_ONLY:
731 		return aesni_gmac_sgl_verify(s, &gdata_ctx, vec,
732 				RTE_PER_LCORE(gcm_ops)[s->key_length]);
733 	default:
734 		aesni_gcm_fill_error_code(vec, EINVAL);
735 		return 0;
736 	}
737 }
738 
739 static int
aesni_gcm_qp_setup(struct rte_cryptodev * dev,uint16_t qp_id,const struct rte_cryptodev_qp_conf * qp_conf,int socket_id)740 aesni_gcm_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
741 				const struct rte_cryptodev_qp_conf *qp_conf,
742 				int socket_id)
743 {
744 	int ret = ipsec_mb_qp_setup(dev, qp_id, qp_conf, socket_id);
745 	if (ret < 0)
746 		return ret;
747 
748 	struct ipsec_mb_qp *qp = dev->data->queue_pairs[qp_id];
749 	struct aesni_gcm_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
750 	aesni_gcm_set_ops(qp_data->ops, qp->mb_mgr);
751 	return 0;
752 }
753 
754 struct rte_cryptodev_ops aesni_gcm_pmd_ops = {
755 	.dev_configure = ipsec_mb_config,
756 	.dev_start = ipsec_mb_start,
757 	.dev_stop = ipsec_mb_stop,
758 	.dev_close = ipsec_mb_close,
759 
760 	.stats_get = ipsec_mb_stats_get,
761 	.stats_reset = ipsec_mb_stats_reset,
762 
763 	.dev_infos_get = ipsec_mb_info_get,
764 
765 	.queue_pair_setup = aesni_gcm_qp_setup,
766 	.queue_pair_release = ipsec_mb_qp_release,
767 
768 	.sym_cpu_process = aesni_gcm_process_bulk,
769 
770 	.sym_session_get_size = ipsec_mb_sym_session_get_size,
771 	.sym_session_configure = ipsec_mb_sym_session_configure,
772 	.sym_session_clear = ipsec_mb_sym_session_clear
773 };
774 
775 static int
aesni_gcm_probe(struct rte_vdev_device * vdev)776 aesni_gcm_probe(struct rte_vdev_device *vdev)
777 {
778 	return ipsec_mb_create(vdev, IPSEC_MB_PMD_TYPE_AESNI_GCM);
779 }
780 
781 static struct rte_vdev_driver cryptodev_aesni_gcm_pmd_drv = {
782 	.probe = aesni_gcm_probe,
783 	.remove = ipsec_mb_remove
784 };
785 
786 static struct cryptodev_driver aesni_gcm_crypto_drv;
787 
788 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_GCM_PMD,
789 		      cryptodev_aesni_gcm_pmd_drv);
790 RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_GCM_PMD, cryptodev_aesni_gcm_pmd);
791 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_GCM_PMD,
792 			      "max_nb_queue_pairs=<int> socket_id=<int>");
793 RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_gcm_crypto_drv,
794 			       cryptodev_aesni_gcm_pmd_drv.driver,
795 			       pmd_driver_id_aesni_gcm);
796 
797 /* Constructor function to register aesni-gcm PMD */
RTE_INIT(ipsec_mb_register_aesni_gcm)798 RTE_INIT(ipsec_mb_register_aesni_gcm)
799 {
800 	struct ipsec_mb_internals *aesni_gcm_data =
801 		&ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_AESNI_GCM];
802 
803 	aesni_gcm_data->caps = aesni_gcm_capabilities;
804 	aesni_gcm_data->dequeue_burst = aesni_gcm_pmd_dequeue_burst;
805 	aesni_gcm_data->feature_flags =
806 		RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
807 		RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
808 		RTE_CRYPTODEV_FF_IN_PLACE_SGL |
809 		RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
810 		RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
811 		RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO |
812 		RTE_CRYPTODEV_FF_SYM_SESSIONLESS;
813 	aesni_gcm_data->internals_priv_size = 0;
814 	aesni_gcm_data->ops = &aesni_gcm_pmd_ops;
815 	aesni_gcm_data->qp_priv_size = sizeof(struct aesni_gcm_qp_data);
816 	aesni_gcm_data->queue_pair_configure = NULL;
817 	aesni_gcm_data->session_configure = aesni_gcm_session_configure;
818 	aesni_gcm_data->session_priv_size = sizeof(struct aesni_gcm_session);
819 }
820