xref: /dpdk/drivers/crypto/ipsec_mb/pmd_aesni_mb.c (revision 3da59f30a23f2e795d2315f3d949e1b3e0ce0c3d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2021 Intel Corporation
3  */
4 
5 #include <unistd.h>
6 
7 #include "pmd_aesni_mb_priv.h"
8 
9 RTE_DEFINE_PER_LCORE(pid_t, pid);
10 
11 struct aesni_mb_op_buf_data {
12 	struct rte_mbuf *m;
13 	uint32_t offset;
14 };
15 
16 /**
17  * Calculate the authentication pre-computes
18  *
19  * @param one_block_hash	Function pointer
20  *				to calculate digest on ipad/opad
21  * @param ipad			Inner pad output byte array
22  * @param opad			Outer pad output byte array
23  * @param hkey			Authentication key
24  * @param hkey_len		Authentication key length
25  * @param blocksize		Block size of selected hash algo
26  */
27 static void
28 calculate_auth_precomputes(hash_one_block_t one_block_hash,
29 		uint8_t *ipad, uint8_t *opad,
30 		const uint8_t *hkey, uint16_t hkey_len,
31 		uint16_t blocksize)
32 {
33 	uint32_t i, length;
34 
35 	uint8_t ipad_buf[blocksize] __rte_aligned(16);
36 	uint8_t opad_buf[blocksize] __rte_aligned(16);
37 
38 	/* Setup inner and outer pads */
39 	memset(ipad_buf, HMAC_IPAD_VALUE, blocksize);
40 	memset(opad_buf, HMAC_OPAD_VALUE, blocksize);
41 
42 	/* XOR hash key with inner and outer pads */
43 	length = hkey_len > blocksize ? blocksize : hkey_len;
44 
45 	for (i = 0; i < length; i++) {
46 		ipad_buf[i] ^= hkey[i];
47 		opad_buf[i] ^= hkey[i];
48 	}
49 
50 	/* Compute partial hashes */
51 	(*one_block_hash)(ipad_buf, ipad);
52 	(*one_block_hash)(opad_buf, opad);
53 
54 	/* Clean up stack */
55 	memset(ipad_buf, 0, blocksize);
56 	memset(opad_buf, 0, blocksize);
57 }
58 
59 static inline int
60 is_aead_algo(IMB_HASH_ALG hash_alg, IMB_CIPHER_MODE cipher_mode)
61 {
62 	return (hash_alg == IMB_AUTH_CHACHA20_POLY1305 ||
63 		hash_alg == IMB_AUTH_AES_CCM ||
64 		cipher_mode == IMB_CIPHER_GCM);
65 }
66 
67 /** Set session authentication parameters */
68 static int
69 aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr,
70 		struct aesni_mb_session *sess,
71 		const struct rte_crypto_sym_xform *xform)
72 {
73 	hash_one_block_t hash_oneblock_fn = NULL;
74 	unsigned int key_larger_block_size = 0;
75 	uint8_t hashed_key[HMAC_MAX_BLOCK_SIZE] = { 0 };
76 	uint32_t auth_precompute = 1;
77 
78 	if (xform == NULL) {
79 		sess->template_job.hash_alg = IMB_AUTH_NULL;
80 		return 0;
81 	}
82 
83 	if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
84 		IPSEC_MB_LOG(ERR, "Crypto xform struct not of type auth");
85 		return -1;
86 	}
87 
88 	/* Set IV parameters */
89 	sess->auth_iv.offset = xform->auth.iv.offset;
90 
91 	/* Set the request digest size */
92 	sess->auth.req_digest_len = xform->auth.digest_length;
93 
94 	/* Select auth generate/verify */
95 	sess->auth.operation = xform->auth.op;
96 
97 	/* Set Authentication Parameters */
98 	if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL) {
99 		sess->template_job.hash_alg = IMB_AUTH_NULL;
100 		sess->template_job.auth_tag_output_len_in_bytes = 0;
101 		return 0;
102 	}
103 
104 	if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_XCBC_MAC) {
105 		sess->template_job.hash_alg = IMB_AUTH_AES_XCBC;
106 
107 		uint16_t xcbc_mac_digest_len =
108 			get_truncated_digest_byte_length(IMB_AUTH_AES_XCBC);
109 		if (sess->auth.req_digest_len != xcbc_mac_digest_len) {
110 			IPSEC_MB_LOG(ERR, "Invalid digest size\n");
111 			return -EINVAL;
112 		}
113 		sess->template_job.auth_tag_output_len_in_bytes = sess->auth.req_digest_len;
114 
115 		IMB_AES_XCBC_KEYEXP(mb_mgr, xform->auth.key.data,
116 				sess->auth.xcbc.k1_expanded,
117 				sess->auth.xcbc.k2, sess->auth.xcbc.k3);
118 		sess->template_job.u.XCBC._k1_expanded = sess->auth.xcbc.k1_expanded;
119 		sess->template_job.u.XCBC._k2 = sess->auth.xcbc.k2;
120 		sess->template_job.u.XCBC._k3 = sess->auth.xcbc.k3;
121 		return 0;
122 	}
123 
124 	if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_CMAC) {
125 		uint32_t dust[4*15];
126 
127 		sess->template_job.hash_alg = IMB_AUTH_AES_CMAC;
128 
129 		uint16_t cmac_digest_len =
130 				get_digest_byte_length(IMB_AUTH_AES_CMAC);
131 
132 		if (sess->auth.req_digest_len > cmac_digest_len) {
133 			IPSEC_MB_LOG(ERR, "Invalid digest size\n");
134 			return -EINVAL;
135 		}
136 		/*
137 		 * Multi-buffer lib supports digest sizes from 4 to 16 bytes
138 		 * in version 0.50 and sizes of 12 and 16 bytes,
139 		 * in version 0.49.
140 		 * If size requested is different, generate the full digest
141 		 * (16 bytes) in a temporary location and then memcpy
142 		 * the requested number of bytes.
143 		 */
144 		if (sess->auth.req_digest_len < 4)
145 			sess->template_job.auth_tag_output_len_in_bytes = cmac_digest_len;
146 		else
147 			sess->template_job.auth_tag_output_len_in_bytes = sess->auth.req_digest_len;
148 
149 		IMB_AES_KEYEXP_128(mb_mgr, xform->auth.key.data,
150 				sess->auth.cmac.expkey, dust);
151 		IMB_AES_CMAC_SUBKEY_GEN_128(mb_mgr, sess->auth.cmac.expkey,
152 				sess->auth.cmac.skey1, sess->auth.cmac.skey2);
153 		sess->template_job.u.CMAC._key_expanded = sess->auth.cmac.expkey;
154 		sess->template_job.u.CMAC._skey1 = sess->auth.cmac.skey1;
155 		sess->template_job.u.CMAC._skey2 = sess->auth.cmac.skey2;
156 		return 0;
157 	}
158 
159 	if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
160 		if (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) {
161 			sess->template_job.cipher_direction = IMB_DIR_ENCRYPT;
162 			sess->template_job.chain_order = IMB_ORDER_CIPHER_HASH;
163 		} else
164 			sess->template_job.cipher_direction = IMB_DIR_DECRYPT;
165 
166 		if (sess->auth.req_digest_len >
167 			get_digest_byte_length(IMB_AUTH_AES_GMAC)) {
168 			IPSEC_MB_LOG(ERR, "Invalid digest size\n");
169 			return -EINVAL;
170 		}
171 		sess->template_job.auth_tag_output_len_in_bytes = sess->auth.req_digest_len;
172 		sess->template_job.u.GMAC.iv_len_in_bytes = xform->auth.iv.length;
173 		sess->iv.offset = xform->auth.iv.offset;
174 
175 		switch (xform->auth.key.length) {
176 		case IMB_KEY_128_BYTES:
177 			sess->template_job.hash_alg = IMB_AUTH_AES_GMAC_128;
178 			IMB_AES128_GCM_PRE(mb_mgr, xform->auth.key.data,
179 				&sess->cipher.gcm_key);
180 			sess->template_job.key_len_in_bytes = IMB_KEY_128_BYTES;
181 			break;
182 		case IMB_KEY_192_BYTES:
183 			sess->template_job.hash_alg = IMB_AUTH_AES_GMAC_192;
184 			IMB_AES192_GCM_PRE(mb_mgr, xform->auth.key.data,
185 				&sess->cipher.gcm_key);
186 			sess->template_job.key_len_in_bytes = IMB_KEY_192_BYTES;
187 			break;
188 		case IMB_KEY_256_BYTES:
189 			sess->template_job.hash_alg = IMB_AUTH_AES_GMAC_256;
190 			IMB_AES256_GCM_PRE(mb_mgr, xform->auth.key.data,
191 				&sess->cipher.gcm_key);
192 			sess->template_job.key_len_in_bytes = IMB_KEY_256_BYTES;
193 			break;
194 		default:
195 			IPSEC_MB_LOG(ERR, "Invalid authentication key length\n");
196 			return -EINVAL;
197 		}
198 		sess->template_job.u.GMAC._key = &sess->cipher.gcm_key;
199 
200 		return 0;
201 	}
202 
203 	if (xform->auth.algo == RTE_CRYPTO_AUTH_ZUC_EIA3) {
204 		if (xform->auth.key.length == 16) {
205 			sess->template_job.hash_alg = IMB_AUTH_ZUC_EIA3_BITLEN;
206 
207 			if (sess->auth.req_digest_len != 4) {
208 				IPSEC_MB_LOG(ERR, "Invalid digest size\n");
209 				return -EINVAL;
210 			}
211 		} else if (xform->auth.key.length == 32) {
212 			sess->template_job.hash_alg = IMB_AUTH_ZUC256_EIA3_BITLEN;
213 #if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM
214 			if (sess->auth.req_digest_len != 4 &&
215 					sess->auth.req_digest_len != 8 &&
216 					sess->auth.req_digest_len != 16) {
217 #else
218 			if (sess->auth.req_digest_len != 4) {
219 #endif
220 				IPSEC_MB_LOG(ERR, "Invalid digest size\n");
221 				return -EINVAL;
222 			}
223 		} else {
224 			IPSEC_MB_LOG(ERR, "Invalid authentication key length\n");
225 			return -EINVAL;
226 		}
227 
228 		sess->template_job.auth_tag_output_len_in_bytes = sess->auth.req_digest_len;
229 
230 		memcpy(sess->auth.zuc_auth_key, xform->auth.key.data,
231 			xform->auth.key.length);
232 		sess->template_job.u.ZUC_EIA3._key = sess->auth.zuc_auth_key;
233 		return 0;
234 	} else if (xform->auth.algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
235 		sess->template_job.hash_alg = IMB_AUTH_SNOW3G_UIA2_BITLEN;
236 		uint16_t snow3g_uia2_digest_len =
237 			get_truncated_digest_byte_length(
238 						IMB_AUTH_SNOW3G_UIA2_BITLEN);
239 		if (sess->auth.req_digest_len != snow3g_uia2_digest_len) {
240 			IPSEC_MB_LOG(ERR, "Invalid digest size\n");
241 			return -EINVAL;
242 		}
243 		sess->template_job.auth_tag_output_len_in_bytes = sess->auth.req_digest_len;
244 
245 		IMB_SNOW3G_INIT_KEY_SCHED(mb_mgr, xform->auth.key.data,
246 					&sess->auth.pKeySched_snow3g_auth);
247 		sess->template_job.u.SNOW3G_UIA2._key = (void *)
248 			&sess->auth.pKeySched_snow3g_auth;
249 		return 0;
250 	} else if (xform->auth.algo == RTE_CRYPTO_AUTH_KASUMI_F9) {
251 		sess->template_job.hash_alg = IMB_AUTH_KASUMI_UIA1;
252 		uint16_t kasumi_f9_digest_len =
253 			get_truncated_digest_byte_length(IMB_AUTH_KASUMI_UIA1);
254 		if (sess->auth.req_digest_len != kasumi_f9_digest_len) {
255 			IPSEC_MB_LOG(ERR, "Invalid digest size\n");
256 			return -EINVAL;
257 		}
258 		sess->template_job.auth_tag_output_len_in_bytes = sess->auth.req_digest_len;
259 
260 		IMB_KASUMI_INIT_F9_KEY_SCHED(mb_mgr, xform->auth.key.data,
261 					&sess->auth.pKeySched_kasumi_auth);
262 		sess->template_job.u.KASUMI_UIA1._key = (void *)
263 			&sess->auth.pKeySched_kasumi_auth;
264 		return 0;
265 	}
266 
267 	switch (xform->auth.algo) {
268 	case RTE_CRYPTO_AUTH_MD5_HMAC:
269 		sess->template_job.hash_alg = IMB_AUTH_MD5;
270 		hash_oneblock_fn = mb_mgr->md5_one_block;
271 		break;
272 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
273 		sess->template_job.hash_alg = IMB_AUTH_HMAC_SHA_1;
274 		hash_oneblock_fn = mb_mgr->sha1_one_block;
275 		if (xform->auth.key.length > get_auth_algo_blocksize(
276 				IMB_AUTH_HMAC_SHA_1)) {
277 			IMB_SHA1(mb_mgr,
278 				xform->auth.key.data,
279 				xform->auth.key.length,
280 				hashed_key);
281 			key_larger_block_size = 1;
282 		}
283 		break;
284 	case RTE_CRYPTO_AUTH_SHA1:
285 		sess->template_job.hash_alg = IMB_AUTH_SHA_1;
286 		auth_precompute = 0;
287 		break;
288 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
289 		sess->template_job.hash_alg = IMB_AUTH_HMAC_SHA_224;
290 		hash_oneblock_fn = mb_mgr->sha224_one_block;
291 		if (xform->auth.key.length > get_auth_algo_blocksize(
292 				IMB_AUTH_HMAC_SHA_224)) {
293 			IMB_SHA224(mb_mgr,
294 				xform->auth.key.data,
295 				xform->auth.key.length,
296 				hashed_key);
297 			key_larger_block_size = 1;
298 		}
299 		break;
300 	case RTE_CRYPTO_AUTH_SHA224:
301 		sess->template_job.hash_alg = IMB_AUTH_SHA_224;
302 		auth_precompute = 0;
303 		break;
304 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
305 		sess->template_job.hash_alg = IMB_AUTH_HMAC_SHA_256;
306 		hash_oneblock_fn = mb_mgr->sha256_one_block;
307 		if (xform->auth.key.length > get_auth_algo_blocksize(
308 				IMB_AUTH_HMAC_SHA_256)) {
309 			IMB_SHA256(mb_mgr,
310 				xform->auth.key.data,
311 				xform->auth.key.length,
312 				hashed_key);
313 			key_larger_block_size = 1;
314 		}
315 		break;
316 	case RTE_CRYPTO_AUTH_SHA256:
317 		sess->template_job.hash_alg = IMB_AUTH_SHA_256;
318 		auth_precompute = 0;
319 		break;
320 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
321 		sess->template_job.hash_alg = IMB_AUTH_HMAC_SHA_384;
322 		hash_oneblock_fn = mb_mgr->sha384_one_block;
323 		if (xform->auth.key.length > get_auth_algo_blocksize(
324 				IMB_AUTH_HMAC_SHA_384)) {
325 			IMB_SHA384(mb_mgr,
326 				xform->auth.key.data,
327 				xform->auth.key.length,
328 				hashed_key);
329 			key_larger_block_size = 1;
330 		}
331 		break;
332 	case RTE_CRYPTO_AUTH_SHA384:
333 		sess->template_job.hash_alg = IMB_AUTH_SHA_384;
334 		auth_precompute = 0;
335 		break;
336 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
337 		sess->template_job.hash_alg = IMB_AUTH_HMAC_SHA_512;
338 		hash_oneblock_fn = mb_mgr->sha512_one_block;
339 		if (xform->auth.key.length > get_auth_algo_blocksize(
340 				IMB_AUTH_HMAC_SHA_512)) {
341 			IMB_SHA512(mb_mgr,
342 				xform->auth.key.data,
343 				xform->auth.key.length,
344 				hashed_key);
345 			key_larger_block_size = 1;
346 		}
347 		break;
348 	case RTE_CRYPTO_AUTH_SHA512:
349 		sess->template_job.hash_alg = IMB_AUTH_SHA_512;
350 		auth_precompute = 0;
351 		break;
352 	default:
353 		IPSEC_MB_LOG(ERR,
354 			"Unsupported authentication algorithm selection");
355 		return -ENOTSUP;
356 	}
357 	uint16_t trunc_digest_size =
358 			get_truncated_digest_byte_length(sess->template_job.hash_alg);
359 	uint16_t full_digest_size =
360 			get_digest_byte_length(sess->template_job.hash_alg);
361 
362 	if (sess->auth.req_digest_len > full_digest_size ||
363 			sess->auth.req_digest_len == 0) {
364 		IPSEC_MB_LOG(ERR, "Invalid digest size\n");
365 		return -EINVAL;
366 	}
367 
368 	if (sess->auth.req_digest_len != trunc_digest_size &&
369 			sess->auth.req_digest_len != full_digest_size)
370 		sess->template_job.auth_tag_output_len_in_bytes = full_digest_size;
371 	else
372 		sess->template_job.auth_tag_output_len_in_bytes = sess->auth.req_digest_len;
373 
374 	/* Plain SHA does not require precompute key */
375 	if (auth_precompute == 0)
376 		return 0;
377 
378 	/* Calculate Authentication precomputes */
379 	if (key_larger_block_size) {
380 		calculate_auth_precomputes(hash_oneblock_fn,
381 			sess->auth.pads.inner, sess->auth.pads.outer,
382 			hashed_key,
383 			xform->auth.key.length,
384 			get_auth_algo_blocksize(sess->template_job.hash_alg));
385 	} else {
386 		calculate_auth_precomputes(hash_oneblock_fn,
387 			sess->auth.pads.inner, sess->auth.pads.outer,
388 			xform->auth.key.data,
389 			xform->auth.key.length,
390 			get_auth_algo_blocksize(sess->template_job.hash_alg));
391 	}
392 	sess->template_job.u.HMAC._hashed_auth_key_xor_ipad =
393 		sess->auth.pads.inner;
394 	sess->template_job.u.HMAC._hashed_auth_key_xor_opad =
395 		sess->auth.pads.outer;
396 
397 	return 0;
398 }
399 
400 /** Set session cipher parameters */
401 static int
402 aesni_mb_set_session_cipher_parameters(const IMB_MGR *mb_mgr,
403 		struct aesni_mb_session *sess,
404 		const struct rte_crypto_sym_xform *xform)
405 {
406 	uint8_t is_aes = 0;
407 	uint8_t is_3DES = 0;
408 	uint8_t is_docsis = 0;
409 	uint8_t is_zuc = 0;
410 	uint8_t is_snow3g = 0;
411 	uint8_t is_kasumi = 0;
412 
413 	if (xform == NULL) {
414 		sess->template_job.cipher_mode = IMB_CIPHER_NULL;
415 		return 0;
416 	}
417 
418 	if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
419 		IPSEC_MB_LOG(ERR, "Crypto xform struct not of type cipher");
420 		return -EINVAL;
421 	}
422 
423 	/* Select cipher direction */
424 	switch (xform->cipher.op) {
425 	case RTE_CRYPTO_CIPHER_OP_ENCRYPT:
426 		sess->template_job.cipher_direction = IMB_DIR_ENCRYPT;
427 		break;
428 	case RTE_CRYPTO_CIPHER_OP_DECRYPT:
429 		sess->template_job.cipher_direction = IMB_DIR_DECRYPT;
430 		break;
431 	default:
432 		IPSEC_MB_LOG(ERR, "Invalid cipher operation parameter");
433 		return -EINVAL;
434 	}
435 
436 	/* Select cipher mode */
437 	switch (xform->cipher.algo) {
438 	case RTE_CRYPTO_CIPHER_AES_CBC:
439 		sess->template_job.cipher_mode = IMB_CIPHER_CBC;
440 		is_aes = 1;
441 		break;
442 	case RTE_CRYPTO_CIPHER_AES_CTR:
443 		sess->template_job.cipher_mode = IMB_CIPHER_CNTR;
444 		is_aes = 1;
445 		break;
446 	case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
447 		sess->template_job.cipher_mode = IMB_CIPHER_DOCSIS_SEC_BPI;
448 		is_docsis = 1;
449 		break;
450 	case RTE_CRYPTO_CIPHER_DES_CBC:
451 		sess->template_job.cipher_mode = IMB_CIPHER_DES;
452 		break;
453 	case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
454 		sess->template_job.cipher_mode = IMB_CIPHER_DOCSIS_DES;
455 		break;
456 	case RTE_CRYPTO_CIPHER_3DES_CBC:
457 		sess->template_job.cipher_mode = IMB_CIPHER_DES3;
458 		is_3DES = 1;
459 		break;
460 	case RTE_CRYPTO_CIPHER_AES_ECB:
461 		sess->template_job.cipher_mode = IMB_CIPHER_ECB;
462 		is_aes = 1;
463 		break;
464 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
465 		sess->template_job.cipher_mode = IMB_CIPHER_ZUC_EEA3;
466 		is_zuc = 1;
467 		break;
468 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
469 		sess->template_job.cipher_mode = IMB_CIPHER_SNOW3G_UEA2_BITLEN;
470 		is_snow3g = 1;
471 		break;
472 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
473 		sess->template_job.cipher_mode = IMB_CIPHER_KASUMI_UEA1_BITLEN;
474 		is_kasumi = 1;
475 		break;
476 	case RTE_CRYPTO_CIPHER_NULL:
477 		sess->template_job.cipher_mode = IMB_CIPHER_NULL;
478 		sess->template_job.key_len_in_bytes = 0;
479 		sess->iv.offset = xform->cipher.iv.offset;
480 		sess->template_job.iv_len_in_bytes = xform->cipher.iv.length;
481 		return 0;
482 	default:
483 		IPSEC_MB_LOG(ERR, "Unsupported cipher mode parameter");
484 		return -ENOTSUP;
485 	}
486 
487 	/* Set IV parameters */
488 	sess->iv.offset = xform->cipher.iv.offset;
489 	sess->template_job.iv_len_in_bytes = xform->cipher.iv.length;
490 
491 	/* Check key length and choose key expansion function for AES */
492 	if (is_aes) {
493 		switch (xform->cipher.key.length) {
494 		case IMB_KEY_128_BYTES:
495 			sess->template_job.key_len_in_bytes = IMB_KEY_128_BYTES;
496 			IMB_AES_KEYEXP_128(mb_mgr, xform->cipher.key.data,
497 					sess->cipher.expanded_aes_keys.encode,
498 					sess->cipher.expanded_aes_keys.decode);
499 			break;
500 		case IMB_KEY_192_BYTES:
501 			sess->template_job.key_len_in_bytes = IMB_KEY_192_BYTES;
502 			IMB_AES_KEYEXP_192(mb_mgr, xform->cipher.key.data,
503 					sess->cipher.expanded_aes_keys.encode,
504 					sess->cipher.expanded_aes_keys.decode);
505 			break;
506 		case IMB_KEY_256_BYTES:
507 			sess->template_job.key_len_in_bytes = IMB_KEY_256_BYTES;
508 			IMB_AES_KEYEXP_256(mb_mgr, xform->cipher.key.data,
509 					sess->cipher.expanded_aes_keys.encode,
510 					sess->cipher.expanded_aes_keys.decode);
511 			break;
512 		default:
513 			IPSEC_MB_LOG(ERR, "Invalid cipher key length");
514 			return -EINVAL;
515 		}
516 
517 		sess->template_job.enc_keys = sess->cipher.expanded_aes_keys.encode;
518 		sess->template_job.dec_keys = sess->cipher.expanded_aes_keys.decode;
519 	} else if (is_docsis) {
520 		switch (xform->cipher.key.length) {
521 		case IMB_KEY_128_BYTES:
522 			sess->template_job.key_len_in_bytes = IMB_KEY_128_BYTES;
523 			IMB_AES_KEYEXP_128(mb_mgr, xform->cipher.key.data,
524 					sess->cipher.expanded_aes_keys.encode,
525 					sess->cipher.expanded_aes_keys.decode);
526 			break;
527 		case IMB_KEY_256_BYTES:
528 			sess->template_job.key_len_in_bytes = IMB_KEY_256_BYTES;
529 			IMB_AES_KEYEXP_256(mb_mgr, xform->cipher.key.data,
530 					sess->cipher.expanded_aes_keys.encode,
531 					sess->cipher.expanded_aes_keys.decode);
532 			break;
533 		default:
534 			IPSEC_MB_LOG(ERR, "Invalid cipher key length");
535 			return -EINVAL;
536 		}
537 		sess->template_job.enc_keys = sess->cipher.expanded_aes_keys.encode;
538 		sess->template_job.dec_keys = sess->cipher.expanded_aes_keys.decode;
539 	} else if (is_3DES) {
540 		uint64_t *keys[3] = {sess->cipher.exp_3des_keys.key[0],
541 				sess->cipher.exp_3des_keys.key[1],
542 				sess->cipher.exp_3des_keys.key[2]};
543 
544 		switch (xform->cipher.key.length) {
545 		case  24:
546 			IMB_DES_KEYSCHED(mb_mgr, keys[0],
547 					xform->cipher.key.data);
548 			IMB_DES_KEYSCHED(mb_mgr, keys[1],
549 					xform->cipher.key.data + 8);
550 			IMB_DES_KEYSCHED(mb_mgr, keys[2],
551 					xform->cipher.key.data + 16);
552 
553 			/* Initialize keys - 24 bytes: [K1-K2-K3] */
554 			sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
555 			sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1];
556 			sess->cipher.exp_3des_keys.ks_ptr[2] = keys[2];
557 			break;
558 		case 16:
559 			IMB_DES_KEYSCHED(mb_mgr, keys[0],
560 					xform->cipher.key.data);
561 			IMB_DES_KEYSCHED(mb_mgr, keys[1],
562 					xform->cipher.key.data + 8);
563 			/* Initialize keys - 16 bytes: [K1=K1,K2=K2,K3=K1] */
564 			sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
565 			sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1];
566 			sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0];
567 			break;
568 		case 8:
569 			IMB_DES_KEYSCHED(mb_mgr, keys[0],
570 					xform->cipher.key.data);
571 
572 			/* Initialize keys - 8 bytes: [K1 = K2 = K3] */
573 			sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
574 			sess->cipher.exp_3des_keys.ks_ptr[1] = keys[0];
575 			sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0];
576 			break;
577 		default:
578 			IPSEC_MB_LOG(ERR, "Invalid cipher key length");
579 			return -EINVAL;
580 		}
581 
582 		sess->template_job.enc_keys = sess->cipher.exp_3des_keys.ks_ptr;
583 		sess->template_job.dec_keys = sess->cipher.exp_3des_keys.ks_ptr;
584 		sess->template_job.key_len_in_bytes = 24;
585 	} else if (is_zuc) {
586 		if (xform->cipher.key.length != 16 &&
587 				xform->cipher.key.length != 32) {
588 			IPSEC_MB_LOG(ERR, "Invalid cipher key length");
589 			return -EINVAL;
590 		}
591 		sess->template_job.key_len_in_bytes = xform->cipher.key.length;
592 		memcpy(sess->cipher.zuc_cipher_key, xform->cipher.key.data,
593 			xform->cipher.key.length);
594 		sess->template_job.enc_keys = sess->cipher.zuc_cipher_key;
595 		sess->template_job.dec_keys = sess->cipher.zuc_cipher_key;
596 	} else if (is_snow3g) {
597 		if (xform->cipher.key.length != 16) {
598 			IPSEC_MB_LOG(ERR, "Invalid cipher key length");
599 			return -EINVAL;
600 		}
601 		sess->template_job.key_len_in_bytes = 16;
602 		IMB_SNOW3G_INIT_KEY_SCHED(mb_mgr, xform->cipher.key.data,
603 					&sess->cipher.pKeySched_snow3g_cipher);
604 		sess->template_job.enc_keys = &sess->cipher.pKeySched_snow3g_cipher;
605 		sess->template_job.dec_keys = &sess->cipher.pKeySched_snow3g_cipher;
606 	} else if (is_kasumi) {
607 		if (xform->cipher.key.length != 16) {
608 			IPSEC_MB_LOG(ERR, "Invalid cipher key length");
609 			return -EINVAL;
610 		}
611 		sess->template_job.key_len_in_bytes = 16;
612 		IMB_KASUMI_INIT_F8_KEY_SCHED(mb_mgr, xform->cipher.key.data,
613 					&sess->cipher.pKeySched_kasumi_cipher);
614 		sess->template_job.enc_keys = &sess->cipher.pKeySched_kasumi_cipher;
615 		sess->template_job.dec_keys = &sess->cipher.pKeySched_kasumi_cipher;
616 	} else {
617 		if (xform->cipher.key.length != 8) {
618 			IPSEC_MB_LOG(ERR, "Invalid cipher key length");
619 			return -EINVAL;
620 		}
621 		sess->template_job.key_len_in_bytes = 8;
622 
623 		IMB_DES_KEYSCHED(mb_mgr,
624 			(uint64_t *)sess->cipher.expanded_aes_keys.encode,
625 				xform->cipher.key.data);
626 		IMB_DES_KEYSCHED(mb_mgr,
627 			(uint64_t *)sess->cipher.expanded_aes_keys.decode,
628 				xform->cipher.key.data);
629 		sess->template_job.enc_keys = sess->cipher.expanded_aes_keys.encode;
630 		sess->template_job.dec_keys = sess->cipher.expanded_aes_keys.decode;
631 	}
632 
633 	return 0;
634 }
635 
636 static int
637 aesni_mb_set_session_aead_parameters(const IMB_MGR *mb_mgr,
638 		struct aesni_mb_session *sess,
639 		const struct rte_crypto_sym_xform *xform)
640 {
641 	switch (xform->aead.op) {
642 	case RTE_CRYPTO_AEAD_OP_ENCRYPT:
643 		sess->template_job.cipher_direction = IMB_DIR_ENCRYPT;
644 		sess->auth.operation = RTE_CRYPTO_AUTH_OP_GENERATE;
645 		break;
646 	case RTE_CRYPTO_AEAD_OP_DECRYPT:
647 		sess->template_job.cipher_direction = IMB_DIR_DECRYPT;
648 		sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY;
649 		break;
650 	default:
651 		IPSEC_MB_LOG(ERR, "Invalid aead operation parameter");
652 		return -EINVAL;
653 	}
654 
655 	/* Set IV parameters */
656 	sess->iv.offset = xform->aead.iv.offset;
657 	sess->template_job.iv_len_in_bytes = xform->aead.iv.length;
658 
659 	/* Set digest sizes */
660 	sess->auth.req_digest_len = xform->aead.digest_length;
661 	sess->template_job.auth_tag_output_len_in_bytes = sess->auth.req_digest_len;
662 
663 	switch (xform->aead.algo) {
664 	case RTE_CRYPTO_AEAD_AES_CCM:
665 		sess->template_job.cipher_mode = IMB_CIPHER_CCM;
666 		sess->template_job.hash_alg = IMB_AUTH_AES_CCM;
667 		sess->template_job.u.CCM.aad_len_in_bytes = xform->aead.aad_length;
668 
669 		/* Check key length and choose key expansion function for AES */
670 		switch (xform->aead.key.length) {
671 		case IMB_KEY_128_BYTES:
672 			sess->template_job.key_len_in_bytes = IMB_KEY_128_BYTES;
673 			IMB_AES_KEYEXP_128(mb_mgr, xform->aead.key.data,
674 					sess->cipher.expanded_aes_keys.encode,
675 					sess->cipher.expanded_aes_keys.decode);
676 			break;
677 		case IMB_KEY_256_BYTES:
678 			sess->template_job.key_len_in_bytes = IMB_KEY_256_BYTES;
679 			IMB_AES_KEYEXP_256(mb_mgr, xform->aead.key.data,
680 					sess->cipher.expanded_aes_keys.encode,
681 					sess->cipher.expanded_aes_keys.decode);
682 			break;
683 		default:
684 			IPSEC_MB_LOG(ERR, "Invalid cipher key length");
685 			return -EINVAL;
686 		}
687 
688 		sess->template_job.enc_keys = sess->cipher.expanded_aes_keys.encode;
689 		sess->template_job.dec_keys = sess->cipher.expanded_aes_keys.decode;
690 		/* CCM digests must be between 4 and 16 and an even number */
691 		if (sess->auth.req_digest_len < AES_CCM_DIGEST_MIN_LEN ||
692 			sess->auth.req_digest_len > AES_CCM_DIGEST_MAX_LEN ||
693 			(sess->auth.req_digest_len & 1) == 1) {
694 			IPSEC_MB_LOG(ERR, "Invalid digest size\n");
695 			return -EINVAL;
696 		}
697 		break;
698 
699 	case RTE_CRYPTO_AEAD_AES_GCM:
700 		sess->template_job.cipher_mode = IMB_CIPHER_GCM;
701 		sess->template_job.hash_alg = IMB_AUTH_AES_GMAC;
702 		sess->template_job.u.GCM.aad_len_in_bytes = xform->aead.aad_length;
703 
704 		switch (xform->aead.key.length) {
705 		case IMB_KEY_128_BYTES:
706 			sess->template_job.key_len_in_bytes = IMB_KEY_128_BYTES;
707 			IMB_AES128_GCM_PRE(mb_mgr, xform->aead.key.data,
708 				&sess->cipher.gcm_key);
709 			break;
710 		case IMB_KEY_192_BYTES:
711 			sess->template_job.key_len_in_bytes = IMB_KEY_192_BYTES;
712 			IMB_AES192_GCM_PRE(mb_mgr, xform->aead.key.data,
713 				&sess->cipher.gcm_key);
714 			break;
715 		case IMB_KEY_256_BYTES:
716 			sess->template_job.key_len_in_bytes = IMB_KEY_256_BYTES;
717 			IMB_AES256_GCM_PRE(mb_mgr, xform->aead.key.data,
718 				&sess->cipher.gcm_key);
719 			break;
720 		default:
721 			IPSEC_MB_LOG(ERR, "Invalid cipher key length");
722 			return -EINVAL;
723 		}
724 
725 		sess->template_job.enc_keys = &sess->cipher.gcm_key;
726 		sess->template_job.dec_keys = &sess->cipher.gcm_key;
727 		/* GCM digest size must be between 1 and 16 */
728 		if (sess->auth.req_digest_len == 0 ||
729 				sess->auth.req_digest_len > 16) {
730 			IPSEC_MB_LOG(ERR, "Invalid digest size\n");
731 			return -EINVAL;
732 		}
733 		break;
734 
735 	case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
736 		sess->template_job.cipher_mode = IMB_CIPHER_CHACHA20_POLY1305;
737 		sess->template_job.hash_alg = IMB_AUTH_CHACHA20_POLY1305;
738 		sess->template_job.u.CHACHA20_POLY1305.aad_len_in_bytes =
739 			xform->aead.aad_length;
740 
741 		if (xform->aead.key.length != 32) {
742 			IPSEC_MB_LOG(ERR, "Invalid key length");
743 			return -EINVAL;
744 		}
745 		sess->template_job.key_len_in_bytes = 32;
746 		memcpy(sess->cipher.expanded_aes_keys.encode,
747 			xform->aead.key.data, 32);
748 		sess->template_job.enc_keys = sess->cipher.expanded_aes_keys.encode;
749 		sess->template_job.dec_keys = sess->cipher.expanded_aes_keys.decode;
750 		if (sess->auth.req_digest_len != 16) {
751 			IPSEC_MB_LOG(ERR, "Invalid digest size\n");
752 			return -EINVAL;
753 		}
754 		break;
755 	default:
756 		IPSEC_MB_LOG(ERR, "Unsupported aead mode parameter");
757 		return -ENOTSUP;
758 	}
759 
760 	return 0;
761 }
762 
763 /** Configure a aesni multi-buffer session from a crypto xform chain */
764 static int
765 aesni_mb_session_configure(IMB_MGR *mb_mgr,
766 		void *priv_sess,
767 		const struct rte_crypto_sym_xform *xform)
768 {
769 	const struct rte_crypto_sym_xform *auth_xform = NULL;
770 	const struct rte_crypto_sym_xform *cipher_xform = NULL;
771 	const struct rte_crypto_sym_xform *aead_xform = NULL;
772 	enum ipsec_mb_operation mode;
773 	struct aesni_mb_session *sess = (struct aesni_mb_session *) priv_sess;
774 	int ret;
775 
776 	ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
777 				&cipher_xform, &aead_xform);
778 	if (ret)
779 		return ret;
780 
781 	/* Select Crypto operation - hash then cipher / cipher then hash */
782 	switch (mode) {
783 	case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
784 		sess->template_job.chain_order = IMB_ORDER_HASH_CIPHER;
785 		break;
786 	case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
787 	case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY:
788 		sess->template_job.chain_order = IMB_ORDER_CIPHER_HASH;
789 		break;
790 	case IPSEC_MB_OP_HASH_GEN_ONLY:
791 	case IPSEC_MB_OP_HASH_VERIFY_ONLY:
792 	case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT:
793 		sess->template_job.chain_order = IMB_ORDER_HASH_CIPHER;
794 		break;
795 	/*
796 	 * Multi buffer library operates only at two modes,
797 	 * IMB_ORDER_CIPHER_HASH and IMB_ORDER_HASH_CIPHER.
798 	 * When doing ciphering only, chain order depends
799 	 * on cipher operation: encryption is always
800 	 * the first operation and decryption the last one.
801 	 */
802 	case IPSEC_MB_OP_ENCRYPT_ONLY:
803 		sess->template_job.chain_order = IMB_ORDER_CIPHER_HASH;
804 		break;
805 	case IPSEC_MB_OP_DECRYPT_ONLY:
806 		sess->template_job.chain_order = IMB_ORDER_HASH_CIPHER;
807 		break;
808 	case IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT:
809 		sess->template_job.chain_order = IMB_ORDER_CIPHER_HASH;
810 		break;
811 	case IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT:
812 		sess->template_job.chain_order = IMB_ORDER_HASH_CIPHER;
813 		break;
814 	case IPSEC_MB_OP_NOT_SUPPORTED:
815 	default:
816 		IPSEC_MB_LOG(ERR,
817 			"Unsupported operation chain order parameter");
818 		return -ENOTSUP;
819 	}
820 
821 	/* Default IV length = 0 */
822 	sess->template_job.iv_len_in_bytes = 0;
823 
824 	ret = aesni_mb_set_session_auth_parameters(mb_mgr, sess, auth_xform);
825 	if (ret != 0) {
826 		IPSEC_MB_LOG(ERR,
827 			"Invalid/unsupported authentication parameters");
828 		return ret;
829 	}
830 
831 	ret = aesni_mb_set_session_cipher_parameters(mb_mgr, sess,
832 			cipher_xform);
833 	if (ret != 0) {
834 		IPSEC_MB_LOG(ERR, "Invalid/unsupported cipher parameters");
835 		return ret;
836 	}
837 
838 	if (aead_xform) {
839 		ret = aesni_mb_set_session_aead_parameters(mb_mgr, sess,
840 				aead_xform);
841 		if (ret != 0) {
842 			IPSEC_MB_LOG(ERR,
843 				"Invalid/unsupported aead parameters");
844 			return ret;
845 		}
846 	}
847 
848 #if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM
849 	sess->session_id = imb_set_session(mb_mgr, &sess->template_job);
850 	sess->pid = getpid();
851 	RTE_PER_LCORE(pid) = sess->pid;
852 #endif
853 
854 	return 0;
855 }
856 
857 /** Check DOCSIS security session configuration is valid */
858 static int
859 check_docsis_sec_session(struct rte_security_session_conf *conf)
860 {
861 	struct rte_crypto_sym_xform *crypto_sym = conf->crypto_xform;
862 	struct rte_security_docsis_xform *docsis = &conf->docsis;
863 
864 	/* Downlink: CRC generate -> Cipher encrypt */
865 	if (docsis->direction == RTE_SECURITY_DOCSIS_DOWNLINK) {
866 
867 		if (crypto_sym != NULL &&
868 		    crypto_sym->type ==	RTE_CRYPTO_SYM_XFORM_CIPHER &&
869 		    crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
870 		    crypto_sym->cipher.algo ==
871 					RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
872 		    (crypto_sym->cipher.key.length == IMB_KEY_128_BYTES ||
873 		     crypto_sym->cipher.key.length == IMB_KEY_256_BYTES) &&
874 		    crypto_sym->cipher.iv.length == IMB_AES_BLOCK_SIZE &&
875 		    crypto_sym->next == NULL) {
876 			return 0;
877 		}
878 	/* Uplink: Cipher decrypt -> CRC verify */
879 	} else if (docsis->direction == RTE_SECURITY_DOCSIS_UPLINK) {
880 
881 		if (crypto_sym != NULL &&
882 		    crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
883 		    crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
884 		    crypto_sym->cipher.algo ==
885 					RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
886 		    (crypto_sym->cipher.key.length == IMB_KEY_128_BYTES ||
887 		     crypto_sym->cipher.key.length == IMB_KEY_256_BYTES) &&
888 		    crypto_sym->cipher.iv.length == IMB_AES_BLOCK_SIZE &&
889 		    crypto_sym->next == NULL) {
890 			return 0;
891 		}
892 	}
893 
894 	return -EINVAL;
895 }
896 
897 /** Set DOCSIS security session auth (CRC) parameters */
898 static int
899 aesni_mb_set_docsis_sec_session_auth_parameters(struct aesni_mb_session *sess,
900 		struct rte_security_docsis_xform *xform)
901 {
902 	if (xform == NULL) {
903 		IPSEC_MB_LOG(ERR, "Invalid DOCSIS xform");
904 		return -EINVAL;
905 	}
906 
907 	/* Select CRC generate/verify */
908 	if (xform->direction == RTE_SECURITY_DOCSIS_UPLINK) {
909 		sess->template_job.hash_alg = IMB_AUTH_DOCSIS_CRC32;
910 		sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY;
911 	} else if (xform->direction == RTE_SECURITY_DOCSIS_DOWNLINK) {
912 		sess->template_job.hash_alg = IMB_AUTH_DOCSIS_CRC32;
913 		sess->auth.operation = RTE_CRYPTO_AUTH_OP_GENERATE;
914 	} else {
915 		IPSEC_MB_LOG(ERR, "Unsupported DOCSIS direction");
916 		return -ENOTSUP;
917 	}
918 
919 	sess->auth.req_digest_len = RTE_ETHER_CRC_LEN;
920 	sess->template_job.auth_tag_output_len_in_bytes = RTE_ETHER_CRC_LEN;
921 
922 	return 0;
923 }
924 
925 /**
926  * Parse DOCSIS security session configuration and set private session
927  * parameters
928  */
929 static int
930 aesni_mb_set_docsis_sec_session_parameters(
931 		__rte_unused struct rte_cryptodev *dev,
932 		struct rte_security_session_conf *conf,
933 		void *sess)
934 {
935 	IMB_MGR  *mb_mgr = alloc_init_mb_mgr();
936 	struct rte_security_docsis_xform *docsis_xform;
937 	struct rte_crypto_sym_xform *cipher_xform;
938 	struct aesni_mb_session *ipsec_sess = sess;
939 	int ret = 0;
940 
941 	if (!mb_mgr)
942 		return -ENOMEM;
943 
944 	ret = check_docsis_sec_session(conf);
945 	if (ret) {
946 		IPSEC_MB_LOG(ERR, "Unsupported DOCSIS security configuration");
947 		goto error_exit;
948 	}
949 
950 	switch (conf->docsis.direction) {
951 	case RTE_SECURITY_DOCSIS_UPLINK:
952 		ipsec_sess->template_job.chain_order = IMB_ORDER_CIPHER_HASH;
953 		docsis_xform = &conf->docsis;
954 		cipher_xform = conf->crypto_xform;
955 		break;
956 	case RTE_SECURITY_DOCSIS_DOWNLINK:
957 		ipsec_sess->template_job.chain_order = IMB_ORDER_HASH_CIPHER;
958 		cipher_xform = conf->crypto_xform;
959 		docsis_xform = &conf->docsis;
960 		break;
961 	default:
962 		IPSEC_MB_LOG(ERR, "Unsupported DOCSIS security configuration");
963 		ret = -EINVAL;
964 		goto error_exit;
965 	}
966 
967 	/* Default IV length = 0 */
968 	ipsec_sess->template_job.iv_len_in_bytes = 0;
969 
970 	ret = aesni_mb_set_docsis_sec_session_auth_parameters(ipsec_sess,
971 			docsis_xform);
972 	if (ret != 0) {
973 		IPSEC_MB_LOG(ERR, "Invalid/unsupported DOCSIS parameters");
974 		goto error_exit;
975 	}
976 
977 	ret = aesni_mb_set_session_cipher_parameters(mb_mgr,
978 			ipsec_sess, cipher_xform);
979 
980 	if (ret != 0) {
981 		IPSEC_MB_LOG(ERR, "Invalid/unsupported cipher parameters");
982 		goto error_exit;
983 	}
984 
985 #if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM
986 	ipsec_sess->session_id = imb_set_session(mb_mgr, &ipsec_sess->template_job);
987 #endif
988 
989 error_exit:
990 	free_mb_mgr(mb_mgr);
991 	return ret;
992 }
993 
994 static inline uint64_t
995 auth_start_offset(struct rte_crypto_op *op, struct aesni_mb_session *session,
996 		uint32_t oop, const uint32_t auth_offset,
997 		const uint32_t cipher_offset, const uint32_t auth_length,
998 		const uint32_t cipher_length, uint8_t lb_sgl)
999 {
1000 	struct rte_mbuf *m_src, *m_dst;
1001 	uint8_t *p_src, *p_dst;
1002 	uintptr_t u_src, u_dst;
1003 	uint32_t cipher_end, auth_end;
1004 
1005 	/* Only cipher then hash needs special calculation. */
1006 	if (!oop || session->template_job.chain_order != IMB_ORDER_CIPHER_HASH || lb_sgl)
1007 		return auth_offset;
1008 
1009 	m_src = op->sym->m_src;
1010 	m_dst = op->sym->m_dst;
1011 
1012 	p_src = rte_pktmbuf_mtod(m_src, uint8_t *);
1013 	p_dst = rte_pktmbuf_mtod(m_dst, uint8_t *);
1014 	u_src = (uintptr_t)p_src;
1015 	u_dst = (uintptr_t)p_dst + auth_offset;
1016 
1017 	/**
1018 	 * Copy the content between cipher offset and auth offset for generating
1019 	 * correct digest.
1020 	 */
1021 	if (cipher_offset > auth_offset)
1022 		memcpy(p_dst + auth_offset,
1023 				p_src + auth_offset,
1024 				cipher_offset -
1025 				auth_offset);
1026 
1027 	/**
1028 	 * Copy the content between (cipher offset + length) and (auth offset +
1029 	 * length) for generating correct digest
1030 	 */
1031 	cipher_end = cipher_offset + cipher_length;
1032 	auth_end = auth_offset + auth_length;
1033 	if (cipher_end < auth_end)
1034 		memcpy(p_dst + cipher_end, p_src + cipher_end,
1035 				auth_end - cipher_end);
1036 
1037 	/**
1038 	 * Since intel-ipsec-mb only supports positive values,
1039 	 * we need to deduct the correct offset between src and dst.
1040 	 */
1041 
1042 	return u_src < u_dst ? (u_dst - u_src) :
1043 			(UINT64_MAX - u_src + u_dst + 1);
1044 }
1045 
1046 static inline void
1047 set_cpu_mb_job_params(IMB_JOB *job, struct aesni_mb_session *session,
1048 		union rte_crypto_sym_ofs sofs, void *buf, uint32_t len,
1049 		struct rte_crypto_va_iova_ptr *iv,
1050 		struct rte_crypto_va_iova_ptr *aad, void *digest, void *udata)
1051 {
1052 	memcpy(job, &session->template_job, sizeof(IMB_JOB));
1053 
1054 	/* Set authentication parameters */
1055 	job->iv = iv->va;
1056 
1057 	switch (job->hash_alg) {
1058 	case IMB_AUTH_AES_CCM:
1059 		job->u.CCM.aad = (uint8_t *)aad->va + 18;
1060 		job->iv++;
1061 		break;
1062 
1063 	case IMB_AUTH_AES_GMAC:
1064 		job->u.GCM.aad = aad->va;
1065 		break;
1066 
1067 	case IMB_AUTH_AES_GMAC_128:
1068 	case IMB_AUTH_AES_GMAC_192:
1069 	case IMB_AUTH_AES_GMAC_256:
1070 		job->u.GMAC._iv = iv->va;
1071 		break;
1072 
1073 	case IMB_AUTH_CHACHA20_POLY1305:
1074 		job->u.CHACHA20_POLY1305.aad = aad->va;
1075 		break;
1076 	default:
1077 		job->u.HMAC._hashed_auth_key_xor_ipad =
1078 				session->auth.pads.inner;
1079 		job->u.HMAC._hashed_auth_key_xor_opad =
1080 				session->auth.pads.outer;
1081 	}
1082 
1083 	/*
1084 	 * Multi-buffer library current only support returning a truncated
1085 	 * digest length as specified in the relevant IPsec RFCs
1086 	 */
1087 
1088 	/* Set digest location and length */
1089 	job->auth_tag_output = digest;
1090 
1091 	/* Data Parameters */
1092 	job->src = buf;
1093 	job->dst = (uint8_t *)buf + sofs.ofs.cipher.head;
1094 	job->cipher_start_src_offset_in_bytes = sofs.ofs.cipher.head;
1095 	job->hash_start_src_offset_in_bytes = sofs.ofs.auth.head;
1096 	job->msg_len_to_hash_in_bytes = len - sofs.ofs.auth.head -
1097 		sofs.ofs.auth.tail;
1098 	job->msg_len_to_cipher_in_bytes = len - sofs.ofs.cipher.head -
1099 		sofs.ofs.cipher.tail;
1100 
1101 	job->user_data = udata;
1102 }
1103 
1104 static int
1105 handle_aead_sgl_job(IMB_JOB *job, IMB_MGR *mb_mgr,
1106 		uint32_t *total_len,
1107 		struct aesni_mb_op_buf_data *src_data,
1108 		struct aesni_mb_op_buf_data *dst_data)
1109 {
1110 	uint32_t data_len, part_len;
1111 
1112 	if (*total_len == 0) {
1113 		job->sgl_state = IMB_SGL_COMPLETE;
1114 		return 0;
1115 	}
1116 
1117 	if (src_data->m == NULL) {
1118 		IPSEC_MB_LOG(ERR, "Invalid source buffer");
1119 		return -EINVAL;
1120 	}
1121 
1122 	job->sgl_state = IMB_SGL_UPDATE;
1123 
1124 	data_len = src_data->m->data_len - src_data->offset;
1125 
1126 	job->src = rte_pktmbuf_mtod_offset(src_data->m, uint8_t *,
1127 			src_data->offset);
1128 
1129 	if (dst_data->m != NULL) {
1130 		if (dst_data->m->data_len - dst_data->offset == 0) {
1131 			dst_data->m = dst_data->m->next;
1132 			if (dst_data->m == NULL) {
1133 				IPSEC_MB_LOG(ERR, "Invalid destination buffer");
1134 				return -EINVAL;
1135 			}
1136 			dst_data->offset = 0;
1137 		}
1138 		part_len = RTE_MIN(data_len, (dst_data->m->data_len -
1139 				dst_data->offset));
1140 		job->dst = rte_pktmbuf_mtod_offset(dst_data->m,
1141 				uint8_t *, dst_data->offset);
1142 		dst_data->offset += part_len;
1143 	} else {
1144 		part_len = RTE_MIN(data_len, *total_len);
1145 		job->dst = rte_pktmbuf_mtod_offset(src_data->m, uint8_t *,
1146 			src_data->offset);
1147 	}
1148 
1149 	job->msg_len_to_cipher_in_bytes = part_len;
1150 	job->msg_len_to_hash_in_bytes = part_len;
1151 
1152 	job = IMB_SUBMIT_JOB(mb_mgr);
1153 
1154 	*total_len -= part_len;
1155 
1156 	if (part_len != data_len) {
1157 		src_data->offset += part_len;
1158 	} else {
1159 		src_data->m = src_data->m->next;
1160 		src_data->offset = 0;
1161 	}
1162 
1163 	return 0;
1164 }
1165 
1166 static uint64_t
1167 sgl_linear_cipher_auth_len(IMB_JOB *job, uint64_t *auth_len)
1168 {
1169 	uint64_t cipher_len;
1170 
1171 	if (job->cipher_mode == IMB_CIPHER_SNOW3G_UEA2_BITLEN ||
1172 			job->cipher_mode == IMB_CIPHER_KASUMI_UEA1_BITLEN)
1173 		cipher_len = (job->msg_len_to_cipher_in_bits >> 3) +
1174 				(job->cipher_start_src_offset_in_bits >> 3);
1175 	else
1176 		cipher_len = job->msg_len_to_cipher_in_bytes +
1177 				job->cipher_start_src_offset_in_bytes;
1178 
1179 	if (job->hash_alg == IMB_AUTH_SNOW3G_UIA2_BITLEN ||
1180 			job->hash_alg == IMB_AUTH_ZUC_EIA3_BITLEN)
1181 		*auth_len = (job->msg_len_to_hash_in_bits >> 3) +
1182 				job->hash_start_src_offset_in_bytes;
1183 	else
1184 		*auth_len = job->msg_len_to_hash_in_bytes +
1185 				job->hash_start_src_offset_in_bytes;
1186 
1187 	return RTE_MAX(*auth_len, cipher_len);
1188 }
1189 
1190 static int
1191 handle_sgl_linear(IMB_JOB *job, struct rte_crypto_op *op, uint32_t dst_offset,
1192 		struct aesni_mb_session *session)
1193 {
1194 	uint64_t auth_len, total_len;
1195 	uint8_t *src, *linear_buf = NULL;
1196 	int lb_offset = 0;
1197 	struct rte_mbuf *src_seg;
1198 	uint16_t src_len;
1199 
1200 	total_len = sgl_linear_cipher_auth_len(job, &auth_len);
1201 	linear_buf = rte_zmalloc(NULL, total_len + job->auth_tag_output_len_in_bytes, 0);
1202 	if (linear_buf == NULL) {
1203 		IPSEC_MB_LOG(ERR, "Error allocating memory for SGL Linear Buffer\n");
1204 		return -1;
1205 	}
1206 
1207 	for (src_seg = op->sym->m_src; (src_seg != NULL) &&
1208 			(total_len - lb_offset > 0);
1209 			src_seg = src_seg->next) {
1210 		src = rte_pktmbuf_mtod(src_seg, uint8_t *);
1211 		src_len =  RTE_MIN(src_seg->data_len, total_len - lb_offset);
1212 		rte_memcpy(linear_buf + lb_offset, src, src_len);
1213 		lb_offset += src_len;
1214 	}
1215 
1216 	job->src = linear_buf;
1217 	job->dst = linear_buf + dst_offset;
1218 	job->user_data2 = linear_buf;
1219 
1220 	if (job->hash_alg == IMB_AUTH_AES_GMAC)
1221 		job->u.GCM.aad = linear_buf;
1222 
1223 	if (session->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY)
1224 		job->auth_tag_output = linear_buf + lb_offset;
1225 	else
1226 		job->auth_tag_output = linear_buf + auth_len;
1227 
1228 	return 0;
1229 }
1230 
1231 static inline int
1232 imb_lib_support_sgl_algo(IMB_CIPHER_MODE alg)
1233 {
1234 	if (alg == IMB_CIPHER_CHACHA20_POLY1305 ||
1235 			alg == IMB_CIPHER_CHACHA20_POLY1305_SGL ||
1236 			alg == IMB_CIPHER_GCM_SGL ||
1237 			alg == IMB_CIPHER_GCM)
1238 		return 1;
1239 	return 0;
1240 }
1241 
1242 #if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM
1243 static inline int
1244 single_sgl_job(IMB_JOB *job, struct rte_crypto_op *op,
1245 		int oop, uint32_t offset, struct rte_mbuf *m_src,
1246 		struct rte_mbuf *m_dst, struct IMB_SGL_IOV *sgl_segs)
1247 {
1248 	uint32_t num_segs = 0;
1249 	struct aesni_mb_op_buf_data src_sgl = {0};
1250 	struct aesni_mb_op_buf_data dst_sgl = {0};
1251 	uint32_t total_len;
1252 
1253 	job->sgl_state = IMB_SGL_ALL;
1254 
1255 	src_sgl.m = m_src;
1256 	src_sgl.offset = offset;
1257 
1258 	while (src_sgl.offset >= src_sgl.m->data_len) {
1259 		src_sgl.offset -= src_sgl.m->data_len;
1260 		src_sgl.m = src_sgl.m->next;
1261 
1262 		RTE_ASSERT(src_sgl.m != NULL);
1263 	}
1264 
1265 	if (oop) {
1266 		dst_sgl.m = m_dst;
1267 		dst_sgl.offset = offset;
1268 
1269 		while (dst_sgl.offset >= dst_sgl.m->data_len) {
1270 			dst_sgl.offset -= dst_sgl.m->data_len;
1271 			dst_sgl.m = dst_sgl.m->next;
1272 
1273 			RTE_ASSERT(dst_sgl.m != NULL);
1274 		}
1275 	}
1276 	total_len = op->sym->aead.data.length;
1277 
1278 	while (total_len != 0) {
1279 		uint32_t data_len, part_len;
1280 
1281 		if (src_sgl.m == NULL) {
1282 			IPSEC_MB_LOG(ERR, "Invalid source buffer");
1283 			return -EINVAL;
1284 		}
1285 
1286 		data_len = src_sgl.m->data_len - src_sgl.offset;
1287 
1288 		sgl_segs[num_segs].in = rte_pktmbuf_mtod_offset(src_sgl.m, uint8_t *,
1289 				src_sgl.offset);
1290 
1291 		if (dst_sgl.m != NULL) {
1292 			if (dst_sgl.m->data_len - dst_sgl.offset == 0) {
1293 				dst_sgl.m = dst_sgl.m->next;
1294 				if (dst_sgl.m == NULL) {
1295 					IPSEC_MB_LOG(ERR, "Invalid destination buffer");
1296 					return -EINVAL;
1297 				}
1298 				dst_sgl.offset = 0;
1299 			}
1300 			part_len = RTE_MIN(data_len, (dst_sgl.m->data_len -
1301 					dst_sgl.offset));
1302 			sgl_segs[num_segs].out = rte_pktmbuf_mtod_offset(dst_sgl.m,
1303 					uint8_t *, dst_sgl.offset);
1304 			dst_sgl.offset += part_len;
1305 		} else {
1306 			part_len = RTE_MIN(data_len, total_len);
1307 			sgl_segs[num_segs].out = rte_pktmbuf_mtod_offset(src_sgl.m, uint8_t *,
1308 				src_sgl.offset);
1309 		}
1310 
1311 		sgl_segs[num_segs].len = part_len;
1312 
1313 		total_len -= part_len;
1314 
1315 		if (part_len != data_len) {
1316 			src_sgl.offset += part_len;
1317 		} else {
1318 			src_sgl.m = src_sgl.m->next;
1319 			src_sgl.offset = 0;
1320 		}
1321 		num_segs++;
1322 	}
1323 	job->num_sgl_io_segs = num_segs;
1324 	job->sgl_io_segs = sgl_segs;
1325 	return 0;
1326 }
1327 #endif
1328 
1329 static inline int
1330 multi_sgl_job(IMB_JOB *job, struct rte_crypto_op *op,
1331 		int oop, uint32_t offset, struct rte_mbuf *m_src,
1332 		struct rte_mbuf *m_dst, IMB_MGR *mb_mgr)
1333 {
1334 	int ret;
1335 	IMB_JOB base_job;
1336 	struct aesni_mb_op_buf_data src_sgl = {0};
1337 	struct aesni_mb_op_buf_data dst_sgl = {0};
1338 	uint32_t total_len;
1339 
1340 	base_job = *job;
1341 	job->sgl_state = IMB_SGL_INIT;
1342 	job = IMB_SUBMIT_JOB(mb_mgr);
1343 	total_len = op->sym->aead.data.length;
1344 
1345 	src_sgl.m = m_src;
1346 	src_sgl.offset = offset;
1347 
1348 	while (src_sgl.offset >= src_sgl.m->data_len) {
1349 		src_sgl.offset -= src_sgl.m->data_len;
1350 		src_sgl.m = src_sgl.m->next;
1351 
1352 		RTE_ASSERT(src_sgl.m != NULL);
1353 	}
1354 
1355 	if (oop) {
1356 		dst_sgl.m = m_dst;
1357 		dst_sgl.offset = offset;
1358 
1359 		while (dst_sgl.offset >= dst_sgl.m->data_len) {
1360 			dst_sgl.offset -= dst_sgl.m->data_len;
1361 			dst_sgl.m = dst_sgl.m->next;
1362 
1363 			RTE_ASSERT(dst_sgl.m != NULL);
1364 		}
1365 	}
1366 
1367 	while (job->sgl_state != IMB_SGL_COMPLETE) {
1368 		job = IMB_GET_NEXT_JOB(mb_mgr);
1369 		*job = base_job;
1370 		ret = handle_aead_sgl_job(job, mb_mgr, &total_len,
1371 			&src_sgl, &dst_sgl);
1372 		if (ret < 0)
1373 			return ret;
1374 	}
1375 	return 0;
1376 }
1377 
1378 static inline int
1379 set_gcm_job(IMB_MGR *mb_mgr, IMB_JOB *job, const uint8_t sgl,
1380 	struct aesni_mb_qp_data *qp_data,
1381 	struct rte_crypto_op *op, uint8_t *digest_idx,
1382 	const struct aesni_mb_session *session,
1383 	struct rte_mbuf *m_src, struct rte_mbuf *m_dst,
1384 	const int oop)
1385 {
1386 	const uint32_t m_offset = op->sym->aead.data.offset;
1387 
1388 	job->u.GCM.aad = op->sym->aead.aad.data;
1389 	if (sgl) {
1390 		job->u.GCM.ctx = &qp_data->gcm_sgl_ctx;
1391 		job->cipher_mode = IMB_CIPHER_GCM_SGL;
1392 		job->hash_alg = IMB_AUTH_GCM_SGL;
1393 		job->hash_start_src_offset_in_bytes = 0;
1394 		job->msg_len_to_hash_in_bytes = 0;
1395 		job->msg_len_to_cipher_in_bytes = 0;
1396 		job->cipher_start_src_offset_in_bytes = 0;
1397 #if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM
1398 		imb_set_session(mb_mgr, job);
1399 #endif
1400 	} else {
1401 		job->hash_start_src_offset_in_bytes =
1402 				op->sym->aead.data.offset;
1403 		job->msg_len_to_hash_in_bytes =
1404 				op->sym->aead.data.length;
1405 		job->cipher_start_src_offset_in_bytes =
1406 			op->sym->aead.data.offset;
1407 		job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length;
1408 	}
1409 
1410 	if (session->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
1411 		job->auth_tag_output = qp_data->temp_digests[*digest_idx];
1412 		*digest_idx = (*digest_idx + 1) % IMB_MAX_JOBS;
1413 	} else {
1414 		job->auth_tag_output = op->sym->aead.digest.data;
1415 	}
1416 
1417 	job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1418 			session->iv.offset);
1419 
1420 	/* Set user data to be crypto operation data struct */
1421 	job->user_data = op;
1422 
1423 	if (sgl) {
1424 		job->src = NULL;
1425 		job->dst = NULL;
1426 
1427 #if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM
1428 		if (m_src->nb_segs <= MAX_NUM_SEGS)
1429 			return single_sgl_job(job, op, oop,
1430 					m_offset, m_src, m_dst,
1431 					qp_data->sgl_segs);
1432 		else
1433 #endif
1434 			return multi_sgl_job(job, op, oop,
1435 					m_offset, m_src, m_dst, mb_mgr);
1436 	} else {
1437 		job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
1438 		job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset);
1439 	}
1440 
1441 	return 0;
1442 }
1443 
1444 /** Check if conditions are met for digest-appended operations */
1445 static uint8_t *
1446 aesni_mb_digest_appended_in_src(struct rte_crypto_op *op, IMB_JOB *job,
1447 		uint32_t oop)
1448 {
1449 	unsigned int auth_size, cipher_size;
1450 	uint8_t *end_cipher;
1451 	uint8_t *start_cipher;
1452 
1453 	if (job->cipher_mode == IMB_CIPHER_NULL)
1454 		return NULL;
1455 
1456 	if (job->cipher_mode == IMB_CIPHER_ZUC_EEA3 ||
1457 		job->cipher_mode == IMB_CIPHER_SNOW3G_UEA2_BITLEN ||
1458 		job->cipher_mode == IMB_CIPHER_KASUMI_UEA1_BITLEN) {
1459 		cipher_size = (op->sym->cipher.data.offset >> 3) +
1460 			(op->sym->cipher.data.length >> 3);
1461 	} else {
1462 		cipher_size = (op->sym->cipher.data.offset) +
1463 			(op->sym->cipher.data.length);
1464 	}
1465 	if (job->hash_alg == IMB_AUTH_ZUC_EIA3_BITLEN ||
1466 		job->hash_alg == IMB_AUTH_SNOW3G_UIA2_BITLEN ||
1467 		job->hash_alg == IMB_AUTH_KASUMI_UIA1 ||
1468 		job->hash_alg == IMB_AUTH_ZUC256_EIA3_BITLEN) {
1469 		auth_size = (op->sym->auth.data.offset >> 3) +
1470 			(op->sym->auth.data.length >> 3);
1471 	} else {
1472 		auth_size = (op->sym->auth.data.offset) +
1473 			(op->sym->auth.data.length);
1474 	}
1475 
1476 	if (!oop) {
1477 		end_cipher = rte_pktmbuf_mtod_offset(op->sym->m_src, uint8_t *, cipher_size);
1478 		start_cipher = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
1479 	} else {
1480 		end_cipher = rte_pktmbuf_mtod_offset(op->sym->m_dst, uint8_t *, cipher_size);
1481 		start_cipher = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
1482 	}
1483 
1484 	if (start_cipher < op->sym->auth.digest.data &&
1485 		op->sym->auth.digest.data < end_cipher) {
1486 		return rte_pktmbuf_mtod_offset(op->sym->m_src, uint8_t *, auth_size);
1487 	} else {
1488 		return NULL;
1489 	}
1490 }
1491 
1492 /**
1493  * Process a crypto operation and complete a IMB_JOB job structure for
1494  * submission to the multi buffer library for processing.
1495  *
1496  * @param	qp		queue pair
1497  * @param	job		IMB_JOB structure to fill
1498  * @param	op		crypto op to process
1499  * @param	digest_idx	ID for digest to use
1500  *
1501  * @return
1502  * - 0 on success, the IMB_JOB will be filled
1503  * - -1 if invalid session or errors allocationg SGL linear buffer,
1504  *   IMB_JOB will not be filled
1505  */
1506 static inline int
1507 set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp,
1508 		struct rte_crypto_op *op, uint8_t *digest_idx,
1509 		IMB_MGR *mb_mgr, pid_t pid)
1510 {
1511 	struct rte_mbuf *m_src = op->sym->m_src, *m_dst;
1512 	struct aesni_mb_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
1513 	struct aesni_mb_session *session;
1514 	uint32_t m_offset;
1515 	int oop;
1516 	uint32_t auth_off_in_bytes;
1517 	uint32_t ciph_off_in_bytes;
1518 	uint32_t auth_len_in_bytes;
1519 	uint32_t ciph_len_in_bytes;
1520 	uint8_t sgl = 0;
1521 	uint8_t lb_sgl = 0;
1522 
1523 #if IMB_VERSION(1, 3, 0) >= IMB_VERSION_NUM
1524 	(void) pid;
1525 #endif
1526 
1527 	session = ipsec_mb_get_session_private(qp, op);
1528 	if (session == NULL) {
1529 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
1530 		return -1;
1531 	}
1532 
1533 	const IMB_CIPHER_MODE cipher_mode =
1534 			session->template_job.cipher_mode;
1535 
1536 #if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM
1537 	if (session->pid != pid) {
1538 		memcpy(job, &session->template_job, sizeof(IMB_JOB));
1539 		imb_set_session(mb_mgr, job);
1540 	} else if (job->session_id != session->session_id)
1541 #endif
1542 		memcpy(job, &session->template_job, sizeof(IMB_JOB));
1543 
1544 	if (!op->sym->m_dst) {
1545 		/* in-place operation */
1546 		m_dst = m_src;
1547 		oop = 0;
1548 	} else if (op->sym->m_dst == op->sym->m_src) {
1549 		/* in-place operation */
1550 		m_dst = m_src;
1551 		oop = 0;
1552 	} else {
1553 		/* out-of-place operation */
1554 		m_dst = op->sym->m_dst;
1555 		oop = 1;
1556 	}
1557 
1558 	if (m_src->nb_segs > 1 || m_dst->nb_segs > 1) {
1559 		sgl = 1;
1560 		if (!imb_lib_support_sgl_algo(cipher_mode))
1561 			lb_sgl = 1;
1562 	}
1563 
1564 	if (cipher_mode == IMB_CIPHER_GCM)
1565 		return set_gcm_job(mb_mgr, job, sgl, qp_data,
1566 				op, digest_idx, session, m_src, m_dst, oop);
1567 
1568 	/* Set authentication parameters */
1569 	const int aead = is_aead_algo(job->hash_alg, cipher_mode);
1570 
1571 	switch (job->hash_alg) {
1572 	case IMB_AUTH_AES_CCM:
1573 		job->u.CCM.aad = op->sym->aead.aad.data + 18;
1574 		break;
1575 
1576 	case IMB_AUTH_AES_GMAC:
1577 		job->u.GCM.aad = op->sym->aead.aad.data;
1578 		if (sgl) {
1579 			job->u.GCM.ctx = &qp_data->gcm_sgl_ctx;
1580 			job->cipher_mode = IMB_CIPHER_GCM_SGL;
1581 			job->hash_alg = IMB_AUTH_GCM_SGL;
1582 #if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM
1583 			imb_set_session(mb_mgr, job);
1584 #endif
1585 		}
1586 		break;
1587 	case IMB_AUTH_AES_GMAC_128:
1588 	case IMB_AUTH_AES_GMAC_192:
1589 	case IMB_AUTH_AES_GMAC_256:
1590 		job->u.GMAC._iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1591 						session->auth_iv.offset);
1592 		break;
1593 	case IMB_AUTH_ZUC_EIA3_BITLEN:
1594 	case IMB_AUTH_ZUC256_EIA3_BITLEN:
1595 		job->u.ZUC_EIA3._iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1596 						session->auth_iv.offset);
1597 		break;
1598 	case IMB_AUTH_SNOW3G_UIA2_BITLEN:
1599 		job->u.SNOW3G_UIA2._iv =
1600 			rte_crypto_op_ctod_offset(op, uint8_t *,
1601 						session->auth_iv.offset);
1602 		break;
1603 	case IMB_AUTH_CHACHA20_POLY1305:
1604 		job->u.CHACHA20_POLY1305.aad = op->sym->aead.aad.data;
1605 		if (sgl) {
1606 			job->u.CHACHA20_POLY1305.ctx = &qp_data->chacha_sgl_ctx;
1607 			job->cipher_mode = IMB_CIPHER_CHACHA20_POLY1305_SGL;
1608 			job->hash_alg = IMB_AUTH_CHACHA20_POLY1305_SGL;
1609 #if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM
1610 			imb_set_session(mb_mgr, job);
1611 #endif
1612 		}
1613 		break;
1614 	default:
1615 		break;
1616 	}
1617 
1618 	if (aead)
1619 		m_offset = op->sym->aead.data.offset;
1620 	else
1621 		m_offset = op->sym->cipher.data.offset;
1622 
1623 	if (cipher_mode == IMB_CIPHER_ZUC_EEA3)
1624 		m_offset >>= 3;
1625 	else if (cipher_mode == IMB_CIPHER_SNOW3G_UEA2_BITLEN)
1626 		m_offset = 0;
1627 	else if (cipher_mode == IMB_CIPHER_KASUMI_UEA1_BITLEN)
1628 		m_offset = 0;
1629 
1630 	/* Set digest output location */
1631 	if (job->hash_alg != IMB_AUTH_NULL &&
1632 			session->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
1633 		job->auth_tag_output = qp_data->temp_digests[*digest_idx];
1634 		*digest_idx = (*digest_idx + 1) % IMB_MAX_JOBS;
1635 	} else {
1636 		if (aead)
1637 			job->auth_tag_output = op->sym->aead.digest.data;
1638 		else {
1639 			job->auth_tag_output = aesni_mb_digest_appended_in_src(op, job, oop);
1640 			if (job->auth_tag_output == NULL) {
1641 				job->auth_tag_output = op->sym->auth.digest.data;
1642 			}
1643 		}
1644 		if (session->auth.req_digest_len !=
1645 				job->auth_tag_output_len_in_bytes) {
1646 			job->auth_tag_output =
1647 				qp_data->temp_digests[*digest_idx];
1648 			*digest_idx = (*digest_idx + 1) % IMB_MAX_JOBS;
1649 		}
1650 	}
1651 	/*
1652 	 * Multi-buffer library current only support returning a truncated
1653 	 * digest length as specified in the relevant IPsec RFCs
1654 	 */
1655 
1656 	/* Data Parameters */
1657 	if (sgl) {
1658 		job->src = NULL;
1659 		job->dst = NULL;
1660 	} else {
1661 		job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
1662 		job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset);
1663 	}
1664 
1665 	switch (job->hash_alg) {
1666 	case IMB_AUTH_AES_CCM:
1667 		job->hash_start_src_offset_in_bytes = op->sym->aead.data.offset;
1668 		job->msg_len_to_hash_in_bytes = op->sym->aead.data.length;
1669 
1670 		job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1671 			session->iv.offset + 1);
1672 		break;
1673 
1674 	case IMB_AUTH_AES_GMAC:
1675 		job->hash_start_src_offset_in_bytes =
1676 				op->sym->aead.data.offset;
1677 		job->msg_len_to_hash_in_bytes =
1678 				op->sym->aead.data.length;
1679 		job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1680 				session->iv.offset);
1681 		break;
1682 	case IMB_AUTH_AES_GMAC_128:
1683 	case IMB_AUTH_AES_GMAC_192:
1684 	case IMB_AUTH_AES_GMAC_256:
1685 		job->hash_start_src_offset_in_bytes =
1686 				op->sym->auth.data.offset;
1687 		job->msg_len_to_hash_in_bytes =
1688 				op->sym->auth.data.length;
1689 		break;
1690 
1691 	case IMB_AUTH_GCM_SGL:
1692 	case IMB_AUTH_CHACHA20_POLY1305_SGL:
1693 		job->hash_start_src_offset_in_bytes = 0;
1694 		job->msg_len_to_hash_in_bytes = 0;
1695 		job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1696 			session->iv.offset);
1697 		break;
1698 
1699 	case IMB_AUTH_CHACHA20_POLY1305:
1700 		job->hash_start_src_offset_in_bytes =
1701 			op->sym->aead.data.offset;
1702 		job->msg_len_to_hash_in_bytes =
1703 					op->sym->aead.data.length;
1704 		job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1705 				session->iv.offset);
1706 		break;
1707 	/* ZUC and SNOW3G require length in bits and offset in bytes */
1708 	case IMB_AUTH_ZUC_EIA3_BITLEN:
1709 	case IMB_AUTH_ZUC256_EIA3_BITLEN:
1710 	case IMB_AUTH_SNOW3G_UIA2_BITLEN:
1711 		auth_off_in_bytes = op->sym->auth.data.offset >> 3;
1712 		ciph_off_in_bytes = op->sym->cipher.data.offset >> 3;
1713 		auth_len_in_bytes = op->sym->auth.data.length >> 3;
1714 		ciph_len_in_bytes = op->sym->cipher.data.length >> 3;
1715 
1716 		job->hash_start_src_offset_in_bytes = auth_start_offset(op,
1717 				session, oop, auth_off_in_bytes,
1718 				ciph_off_in_bytes, auth_len_in_bytes,
1719 				ciph_len_in_bytes, lb_sgl);
1720 		job->msg_len_to_hash_in_bits = op->sym->auth.data.length;
1721 
1722 		job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1723 			session->iv.offset);
1724 		break;
1725 
1726 	/* KASUMI requires lengths and offset in bytes */
1727 	case IMB_AUTH_KASUMI_UIA1:
1728 		auth_off_in_bytes = op->sym->auth.data.offset >> 3;
1729 		ciph_off_in_bytes = op->sym->cipher.data.offset >> 3;
1730 		auth_len_in_bytes = op->sym->auth.data.length >> 3;
1731 		ciph_len_in_bytes = op->sym->cipher.data.length >> 3;
1732 
1733 		job->hash_start_src_offset_in_bytes = auth_start_offset(op,
1734 				session, oop, auth_off_in_bytes,
1735 				ciph_off_in_bytes, auth_len_in_bytes,
1736 				ciph_len_in_bytes, lb_sgl);
1737 		job->msg_len_to_hash_in_bytes = auth_len_in_bytes;
1738 
1739 		job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1740 			session->iv.offset);
1741 		break;
1742 
1743 	default:
1744 		job->hash_start_src_offset_in_bytes = auth_start_offset(op,
1745 				session, oop, op->sym->auth.data.offset,
1746 				op->sym->cipher.data.offset,
1747 				op->sym->auth.data.length,
1748 				op->sym->cipher.data.length, lb_sgl);
1749 		job->msg_len_to_hash_in_bytes = op->sym->auth.data.length;
1750 
1751 		job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1752 			session->iv.offset);
1753 	}
1754 
1755 	switch (job->cipher_mode) {
1756 	/* ZUC requires length and offset in bytes */
1757 	case IMB_CIPHER_ZUC_EEA3:
1758 		job->cipher_start_src_offset_in_bytes =
1759 					op->sym->cipher.data.offset >> 3;
1760 		job->msg_len_to_cipher_in_bytes =
1761 					op->sym->cipher.data.length >> 3;
1762 		break;
1763 	/* ZUC and SNOW3G require length and offset in bits */
1764 	case IMB_CIPHER_SNOW3G_UEA2_BITLEN:
1765 	case IMB_CIPHER_KASUMI_UEA1_BITLEN:
1766 		job->cipher_start_src_offset_in_bits =
1767 					op->sym->cipher.data.offset;
1768 		job->msg_len_to_cipher_in_bits =
1769 					op->sym->cipher.data.length;
1770 		break;
1771 	case IMB_CIPHER_GCM:
1772 		job->cipher_start_src_offset_in_bytes =
1773 				op->sym->aead.data.offset;
1774 		job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length;
1775 		break;
1776 	case IMB_CIPHER_CCM:
1777 	case IMB_CIPHER_CHACHA20_POLY1305:
1778 		job->cipher_start_src_offset_in_bytes =
1779 				op->sym->aead.data.offset;
1780 		job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length;
1781 		break;
1782 	case IMB_CIPHER_GCM_SGL:
1783 	case IMB_CIPHER_CHACHA20_POLY1305_SGL:
1784 		job->msg_len_to_cipher_in_bytes = 0;
1785 		job->cipher_start_src_offset_in_bytes = 0;
1786 		break;
1787 	default:
1788 		job->cipher_start_src_offset_in_bytes =
1789 					op->sym->cipher.data.offset;
1790 		job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length;
1791 	}
1792 
1793 	if (cipher_mode == IMB_CIPHER_NULL && oop) {
1794 		memcpy(job->dst + job->cipher_start_src_offset_in_bytes,
1795 			job->src + job->cipher_start_src_offset_in_bytes,
1796 			job->msg_len_to_cipher_in_bytes);
1797 	}
1798 
1799 	/* Set user data to be crypto operation data struct */
1800 	job->user_data = op;
1801 
1802 	if (sgl) {
1803 
1804 		if (lb_sgl)
1805 			return handle_sgl_linear(job, op, m_offset, session);
1806 
1807 #if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM
1808 		if (m_src->nb_segs <= MAX_NUM_SEGS)
1809 			return single_sgl_job(job, op, oop,
1810 					m_offset, m_src, m_dst,
1811 					qp_data->sgl_segs);
1812 		else
1813 #endif
1814 			return multi_sgl_job(job, op, oop,
1815 					m_offset, m_src, m_dst, mb_mgr);
1816 	}
1817 
1818 	return 0;
1819 }
1820 
1821 /**
1822  * Process a crypto operation containing a security op and complete a
1823  * IMB_JOB job structure for submission to the multi buffer library for
1824  * processing.
1825  */
1826 static inline int
1827 set_sec_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp,
1828 			struct rte_crypto_op *op, uint8_t *digest_idx)
1829 {
1830 	struct aesni_mb_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
1831 	struct rte_mbuf *m_src, *m_dst;
1832 	struct rte_crypto_sym_op *sym;
1833 	struct aesni_mb_session *session = NULL;
1834 
1835 	if (unlikely(op->sess_type != RTE_CRYPTO_OP_SECURITY_SESSION)) {
1836 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
1837 		return -1;
1838 	}
1839 	session = SECURITY_GET_SESS_PRIV(op->sym->session);
1840 
1841 	if (unlikely(session == NULL)) {
1842 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
1843 		return -1;
1844 	}
1845 	/* Only DOCSIS protocol operations supported now */
1846 	if (session->template_job.cipher_mode != IMB_CIPHER_DOCSIS_SEC_BPI ||
1847 			session->template_job.hash_alg != IMB_AUTH_DOCSIS_CRC32) {
1848 		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
1849 		return -1;
1850 	}
1851 
1852 	sym = op->sym;
1853 	m_src = sym->m_src;
1854 
1855 	if (likely(sym->m_dst == NULL || sym->m_dst == m_src)) {
1856 		/* in-place operation */
1857 		m_dst = m_src;
1858 	} else {
1859 		/* out-of-place operation not supported */
1860 		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
1861 		return -ENOTSUP;
1862 	}
1863 
1864 	memcpy(job, &session->template_job, sizeof(IMB_JOB));
1865 
1866 	/* Set cipher parameters */
1867 	job->enc_keys = session->cipher.expanded_aes_keys.encode;
1868 	job->dec_keys = session->cipher.expanded_aes_keys.decode;
1869 
1870 	/* Set IV parameters */
1871 	job->iv = (uint8_t *)op + session->iv.offset;
1872 
1873 	/* Set digest output location */
1874 	job->auth_tag_output = qp_data->temp_digests[*digest_idx];
1875 	*digest_idx = (*digest_idx + 1) % IMB_MAX_JOBS;
1876 
1877 	/* Set data parameters */
1878 	job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
1879 	job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *,
1880 						sym->cipher.data.offset);
1881 
1882 	job->cipher_start_src_offset_in_bytes = sym->cipher.data.offset;
1883 	job->msg_len_to_cipher_in_bytes = sym->cipher.data.length;
1884 
1885 	job->hash_start_src_offset_in_bytes = sym->auth.data.offset;
1886 	job->msg_len_to_hash_in_bytes = sym->auth.data.length;
1887 
1888 	job->user_data = op;
1889 
1890 	return 0;
1891 }
1892 
1893 static inline void
1894 verify_docsis_sec_crc(IMB_JOB *job, uint8_t *status)
1895 {
1896 	uint16_t crc_offset;
1897 	uint8_t *crc;
1898 
1899 	if (!job->msg_len_to_hash_in_bytes)
1900 		return;
1901 
1902 	crc_offset = job->hash_start_src_offset_in_bytes +
1903 			job->msg_len_to_hash_in_bytes -
1904 			job->cipher_start_src_offset_in_bytes;
1905 	crc = job->dst + crc_offset;
1906 
1907 	/* Verify CRC (at the end of the message) */
1908 	if (memcmp(job->auth_tag_output, crc, RTE_ETHER_CRC_LEN) != 0)
1909 		*status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
1910 }
1911 
1912 static inline void
1913 verify_digest(IMB_JOB *job, void *digest, uint16_t len, uint8_t *status)
1914 {
1915 	/* Verify digest if required */
1916 	if (memcmp(job->auth_tag_output, digest, len) != 0)
1917 		*status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
1918 }
1919 
1920 static inline void
1921 generate_digest(IMB_JOB *job, struct rte_crypto_op *op,
1922 		struct aesni_mb_session *sess)
1923 {
1924 	/* No extra copy needed */
1925 	if (likely(sess->auth.req_digest_len == job->auth_tag_output_len_in_bytes))
1926 		return;
1927 
1928 	/*
1929 	 * This can only happen for HMAC, so only digest
1930 	 * for authentication algos is required
1931 	 */
1932 	memcpy(op->sym->auth.digest.data, job->auth_tag_output,
1933 			sess->auth.req_digest_len);
1934 }
1935 
1936 static void
1937 post_process_sgl_linear(struct rte_crypto_op *op, IMB_JOB *job,
1938 		struct aesni_mb_session *sess, uint8_t *linear_buf)
1939 {
1940 
1941 	int lb_offset = 0;
1942 	struct rte_mbuf *m_dst = op->sym->m_dst == NULL ?
1943 			op->sym->m_src : op->sym->m_dst;
1944 	uint16_t total_len, dst_len;
1945 	uint64_t auth_len;
1946 	uint8_t *dst;
1947 
1948 	total_len = sgl_linear_cipher_auth_len(job, &auth_len);
1949 
1950 	if (sess->auth.operation != RTE_CRYPTO_AUTH_OP_VERIFY)
1951 		total_len += job->auth_tag_output_len_in_bytes;
1952 
1953 	for (; (m_dst != NULL) && (total_len - lb_offset > 0); m_dst = m_dst->next) {
1954 		dst = rte_pktmbuf_mtod(m_dst, uint8_t *);
1955 		dst_len = RTE_MIN(m_dst->data_len, total_len - lb_offset);
1956 		rte_memcpy(dst, linear_buf + lb_offset, dst_len);
1957 		lb_offset += dst_len;
1958 	}
1959 }
1960 
1961 /**
1962  * Process a completed job and return rte_mbuf which job processed
1963  *
1964  * @param qp	Queue Pair to process
1965  * @param job	IMB_JOB job to process
1966  *
1967  * @return
1968  * - Returns processed crypto operation.
1969  * - Returns NULL on invalid job
1970  */
1971 static inline struct rte_crypto_op *
1972 post_process_mb_job(struct ipsec_mb_qp *qp, IMB_JOB *job)
1973 {
1974 	struct rte_crypto_op *op = (struct rte_crypto_op *)job->user_data;
1975 	struct aesni_mb_session *sess = NULL;
1976 	uint8_t *linear_buf = NULL;
1977 	int sgl = 0;
1978 	uint8_t oop = 0;
1979 	uint8_t is_docsis_sec = 0;
1980 
1981 	if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
1982 		/*
1983 		 * Assuming at this point that if it's a security type op, that
1984 		 * this is for DOCSIS
1985 		 */
1986 		is_docsis_sec = 1;
1987 		sess = SECURITY_GET_SESS_PRIV(op->sym->session);
1988 	} else
1989 		sess = CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session);
1990 
1991 	if (likely(op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)) {
1992 		switch (job->status) {
1993 		case IMB_STATUS_COMPLETED:
1994 			op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1995 
1996 			if ((op->sym->m_src->nb_segs > 1 ||
1997 					(op->sym->m_dst != NULL &&
1998 					op->sym->m_dst->nb_segs > 1)) &&
1999 					!imb_lib_support_sgl_algo(job->cipher_mode)) {
2000 				linear_buf = (uint8_t *) job->user_data2;
2001 				sgl = 1;
2002 
2003 				post_process_sgl_linear(op, job, sess, linear_buf);
2004 			}
2005 
2006 			if (job->hash_alg == IMB_AUTH_NULL)
2007 				break;
2008 
2009 			if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
2010 				if (is_aead_algo(job->hash_alg,
2011 						job->cipher_mode))
2012 					verify_digest(job,
2013 						op->sym->aead.digest.data,
2014 						sess->auth.req_digest_len,
2015 						&op->status);
2016 				else if (is_docsis_sec)
2017 					verify_docsis_sec_crc(job,
2018 						&op->status);
2019 				else
2020 					verify_digest(job,
2021 						op->sym->auth.digest.data,
2022 						sess->auth.req_digest_len,
2023 						&op->status);
2024 			} else {
2025 				if (!op->sym->m_dst || op->sym->m_dst == op->sym->m_src) {
2026 					/* in-place operation */
2027 					oop = 0;
2028 				} else { /* out-of-place operation */
2029 					oop = 1;
2030 				}
2031 
2032 				/* Enable digest check */
2033 				if (op->sym->m_src->nb_segs == 1 && op->sym->m_dst != NULL
2034 				&& !is_aead_algo(job->hash_alg,	sess->template_job.cipher_mode) &&
2035 				aesni_mb_digest_appended_in_src(op, job, oop) != NULL) {
2036 					unsigned int auth_size, cipher_size;
2037 					int unencrypted_bytes = 0;
2038 					if (job->cipher_mode == IMB_CIPHER_SNOW3G_UEA2_BITLEN ||
2039 						job->cipher_mode == IMB_CIPHER_KASUMI_UEA1_BITLEN ||
2040 						job->cipher_mode == IMB_CIPHER_ZUC_EEA3) {
2041 						cipher_size = (op->sym->cipher.data.offset >> 3) +
2042 							(op->sym->cipher.data.length >> 3);
2043 					} else {
2044 						cipher_size = (op->sym->cipher.data.offset) +
2045 							(op->sym->cipher.data.length);
2046 					}
2047 					if (job->hash_alg == IMB_AUTH_ZUC_EIA3_BITLEN ||
2048 						job->hash_alg == IMB_AUTH_SNOW3G_UIA2_BITLEN ||
2049 						job->hash_alg == IMB_AUTH_KASUMI_UIA1 ||
2050 						job->hash_alg == IMB_AUTH_ZUC256_EIA3_BITLEN) {
2051 						auth_size = (op->sym->auth.data.offset >> 3) +
2052 							(op->sym->auth.data.length >> 3);
2053 					} else {
2054 						auth_size = (op->sym->auth.data.offset) +
2055 						(op->sym->auth.data.length);
2056 					}
2057 					/* Check for unencrypted bytes in partial digest cases */
2058 					if (job->cipher_mode != IMB_CIPHER_NULL) {
2059 						unencrypted_bytes = auth_size +
2060 						job->auth_tag_output_len_in_bytes - cipher_size;
2061 					}
2062 					if (unencrypted_bytes > 0)
2063 						rte_memcpy(
2064 						rte_pktmbuf_mtod_offset(op->sym->m_dst, uint8_t *,
2065 						cipher_size),
2066 						rte_pktmbuf_mtod_offset(op->sym->m_src, uint8_t *,
2067 						cipher_size),
2068 						unencrypted_bytes);
2069 				}
2070 				generate_digest(job, op, sess);
2071 			}
2072 			break;
2073 		default:
2074 			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
2075 		}
2076 		if (sgl)
2077 			rte_free(linear_buf);
2078 	}
2079 
2080 	/* Free session if a session-less crypto op */
2081 	if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
2082 		memset(sess, 0, sizeof(struct aesni_mb_session));
2083 		rte_mempool_put(qp->sess_mp, op->sym->session);
2084 		op->sym->session = NULL;
2085 	}
2086 
2087 	return op;
2088 }
2089 
2090 static inline void
2091 post_process_mb_sync_job(IMB_JOB *job)
2092 {
2093 	uint32_t *st;
2094 
2095 	st = job->user_data;
2096 	st[0] = (job->status == IMB_STATUS_COMPLETED) ? 0 : EBADMSG;
2097 }
2098 
2099 static inline uint32_t
2100 handle_completed_sync_jobs(IMB_JOB *job, IMB_MGR *mb_mgr)
2101 {
2102 	uint32_t i;
2103 
2104 	for (i = 0; job != NULL; i++, job = IMB_GET_COMPLETED_JOB(mb_mgr))
2105 		post_process_mb_sync_job(job);
2106 
2107 	return i;
2108 }
2109 
2110 static inline uint32_t
2111 flush_mb_sync_mgr(IMB_MGR *mb_mgr)
2112 {
2113 	IMB_JOB *job;
2114 
2115 	job = IMB_FLUSH_JOB(mb_mgr);
2116 	return handle_completed_sync_jobs(job, mb_mgr);
2117 }
2118 
2119 static inline IMB_JOB *
2120 set_job_null_op(IMB_JOB *job, struct rte_crypto_op *op)
2121 {
2122 	job->chain_order = IMB_ORDER_HASH_CIPHER;
2123 	job->cipher_mode = IMB_CIPHER_NULL;
2124 	job->hash_alg = IMB_AUTH_NULL;
2125 	job->cipher_direction = IMB_DIR_DECRYPT;
2126 
2127 	/* Set user data to be crypto operation data struct */
2128 	job->user_data = op;
2129 
2130 	return job;
2131 }
2132 
2133 #if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM
2134 static uint16_t
2135 aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
2136 		uint16_t nb_ops)
2137 {
2138 	struct ipsec_mb_qp *qp = queue_pair;
2139 	IMB_MGR *mb_mgr = qp->mb_mgr;
2140 	struct rte_crypto_op *op;
2141 	struct rte_crypto_op *deqd_ops[IMB_MAX_BURST_SIZE];
2142 	IMB_JOB *job;
2143 	int retval, processed_jobs = 0;
2144 	uint16_t i, nb_jobs;
2145 	IMB_JOB *jobs[IMB_MAX_BURST_SIZE] = {NULL};
2146 	pid_t pid;
2147 
2148 	if (unlikely(nb_ops == 0 || mb_mgr == NULL))
2149 		return 0;
2150 
2151 	uint8_t digest_idx = qp->digest_idx;
2152 	uint16_t burst_sz = (nb_ops > IMB_MAX_BURST_SIZE) ?
2153 		IMB_MAX_BURST_SIZE : nb_ops;
2154 
2155 	/*
2156 	 * If nb_ops is greater than the max supported
2157 	 * ipsec_mb burst size, then process in bursts of
2158 	 * IMB_MAX_BURST_SIZE until all operations are submitted
2159 	 */
2160 	while (nb_ops) {
2161 		uint16_t nb_submit_ops;
2162 		uint16_t n = (nb_ops / burst_sz) ?
2163 			burst_sz : nb_ops;
2164 
2165 		if (unlikely((IMB_GET_NEXT_BURST(mb_mgr, n, jobs)) < n)) {
2166 			/*
2167 			 * Not enough free jobs in the queue
2168 			 * Flush n jobs until enough jobs available
2169 			 */
2170 			nb_jobs = IMB_FLUSH_BURST(mb_mgr, n, jobs);
2171 			for (i = 0; i < nb_jobs; i++) {
2172 				job = jobs[i];
2173 
2174 				op = post_process_mb_job(qp, job);
2175 				if (op) {
2176 					ops[processed_jobs++] = op;
2177 					qp->stats.dequeued_count++;
2178 				} else {
2179 					qp->stats.dequeue_err_count++;
2180 					break;
2181 				}
2182 			}
2183 			nb_ops -= i;
2184 			continue;
2185 		}
2186 
2187 		if (!RTE_PER_LCORE(pid))
2188 			RTE_PER_LCORE(pid) = getpid();
2189 
2190 		pid = RTE_PER_LCORE(pid);
2191 
2192 		/*
2193 		 * Get the next operations to process from ingress queue.
2194 		 * There is no need to return the job to the IMB_MGR
2195 		 * if there are no more operations to process, since
2196 		 * the IMB_MGR can use that pointer again in next
2197 		 * get_next calls.
2198 		 */
2199 		nb_submit_ops = rte_ring_dequeue_burst(qp->ingress_queue,
2200 						(void **)deqd_ops, n, NULL);
2201 		for (i = 0; i < nb_submit_ops; i++) {
2202 			job = jobs[i];
2203 			op = deqd_ops[i];
2204 
2205 			if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
2206 				retval = set_sec_mb_job_params(job, qp, op,
2207 							       &digest_idx);
2208 			else
2209 				retval = set_mb_job_params(job, qp, op,
2210 							   &digest_idx, mb_mgr, pid);
2211 
2212 			if (unlikely(retval != 0)) {
2213 				qp->stats.dequeue_err_count++;
2214 				set_job_null_op(job, op);
2215 			}
2216 		}
2217 
2218 		/* Submit jobs to multi-buffer for processing */
2219 #ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG
2220 		int err = 0;
2221 
2222 		nb_jobs = IMB_SUBMIT_BURST(mb_mgr, nb_submit_ops, jobs);
2223 		err = imb_get_errno(mb_mgr);
2224 		if (err)
2225 			IPSEC_MB_LOG(ERR, "%s", imb_get_strerror(err));
2226 #else
2227 		nb_jobs = IMB_SUBMIT_BURST_NOCHECK(mb_mgr,
2228 						   nb_submit_ops, jobs);
2229 #endif
2230 		for (i = 0; i < nb_jobs; i++) {
2231 			job = jobs[i];
2232 
2233 			op = post_process_mb_job(qp, job);
2234 			if (op) {
2235 				ops[processed_jobs++] = op;
2236 				qp->stats.dequeued_count++;
2237 			} else {
2238 				qp->stats.dequeue_err_count++;
2239 				break;
2240 			}
2241 		}
2242 
2243 		qp->digest_idx = digest_idx;
2244 
2245 		if (processed_jobs < 1) {
2246 			nb_jobs = IMB_FLUSH_BURST(mb_mgr, n, jobs);
2247 
2248 			for (i = 0; i < nb_jobs; i++) {
2249 				job = jobs[i];
2250 
2251 				op = post_process_mb_job(qp, job);
2252 				if (op) {
2253 					ops[processed_jobs++] = op;
2254 					qp->stats.dequeued_count++;
2255 				} else {
2256 					qp->stats.dequeue_err_count++;
2257 					break;
2258 				}
2259 			}
2260 		}
2261 		nb_ops -= n;
2262 	}
2263 
2264 	return processed_jobs;
2265 }
2266 #else
2267 
2268 /**
2269  * Process a completed IMB_JOB job and keep processing jobs until
2270  * get_completed_job return NULL
2271  *
2272  * @param qp		Queue Pair to process
2273  * @param mb_mgr	IMB_MGR to use
2274  * @param job		IMB_JOB job
2275  * @param ops		crypto ops to fill
2276  * @param nb_ops	number of crypto ops
2277  *
2278  * @return
2279  * - Number of processed jobs
2280  */
2281 static unsigned
2282 handle_completed_jobs(struct ipsec_mb_qp *qp, IMB_MGR *mb_mgr,
2283 		IMB_JOB *job, struct rte_crypto_op **ops,
2284 		uint16_t nb_ops)
2285 {
2286 	struct rte_crypto_op *op = NULL;
2287 	uint16_t processed_jobs = 0;
2288 
2289 	while (job != NULL) {
2290 		op = post_process_mb_job(qp, job);
2291 
2292 		if (op) {
2293 			ops[processed_jobs++] = op;
2294 			qp->stats.dequeued_count++;
2295 		} else {
2296 			qp->stats.dequeue_err_count++;
2297 			break;
2298 		}
2299 		if (processed_jobs == nb_ops)
2300 			break;
2301 
2302 		job = IMB_GET_COMPLETED_JOB(mb_mgr);
2303 	}
2304 
2305 	return processed_jobs;
2306 }
2307 
2308 static inline uint16_t
2309 flush_mb_mgr(struct ipsec_mb_qp *qp, IMB_MGR *mb_mgr,
2310 		struct rte_crypto_op **ops, uint16_t nb_ops)
2311 {
2312 	int processed_ops = 0;
2313 
2314 	/* Flush the remaining jobs */
2315 	IMB_JOB *job = IMB_FLUSH_JOB(mb_mgr);
2316 
2317 	if (job)
2318 		processed_ops += handle_completed_jobs(qp, mb_mgr, job,
2319 				&ops[processed_ops], nb_ops - processed_ops);
2320 
2321 	return processed_ops;
2322 }
2323 
2324 static uint16_t
2325 aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
2326 		uint16_t nb_ops)
2327 {
2328 	struct ipsec_mb_qp *qp = queue_pair;
2329 	IMB_MGR *mb_mgr = qp->mb_mgr;
2330 	struct rte_crypto_op *op;
2331 	IMB_JOB *job;
2332 	int retval, processed_jobs = 0;
2333 	pid_t pid = 0;
2334 
2335 	if (unlikely(nb_ops == 0 || mb_mgr == NULL))
2336 		return 0;
2337 
2338 	uint8_t digest_idx = qp->digest_idx;
2339 
2340 	do {
2341 		/* Get next free mb job struct from mb manager */
2342 		job = IMB_GET_NEXT_JOB(mb_mgr);
2343 		if (unlikely(job == NULL)) {
2344 			/* if no free mb job structs we need to flush mb_mgr */
2345 			processed_jobs += flush_mb_mgr(qp, mb_mgr,
2346 					&ops[processed_jobs],
2347 					nb_ops - processed_jobs);
2348 
2349 			if (nb_ops == processed_jobs)
2350 				break;
2351 
2352 			job = IMB_GET_NEXT_JOB(mb_mgr);
2353 		}
2354 
2355 		/*
2356 		 * Get next operation to process from ingress queue.
2357 		 * There is no need to return the job to the IMB_MGR
2358 		 * if there are no more operations to process, since the IMB_MGR
2359 		 * can use that pointer again in next get_next calls.
2360 		 */
2361 		retval = rte_ring_dequeue(qp->ingress_queue, (void **)&op);
2362 		if (retval < 0)
2363 			break;
2364 
2365 		if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
2366 			retval = set_sec_mb_job_params(job, qp, op,
2367 						&digest_idx);
2368 		else
2369 			retval = set_mb_job_params(job, qp, op,
2370 				&digest_idx, mb_mgr, pid);
2371 
2372 		if (unlikely(retval != 0)) {
2373 			qp->stats.dequeue_err_count++;
2374 			set_job_null_op(job, op);
2375 		}
2376 
2377 		/* Submit job to multi-buffer for processing */
2378 #ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG
2379 		job = IMB_SUBMIT_JOB(mb_mgr);
2380 #else
2381 		job = IMB_SUBMIT_JOB_NOCHECK(mb_mgr);
2382 #endif
2383 		/*
2384 		 * If submit returns a processed job then handle it,
2385 		 * before submitting subsequent jobs
2386 		 */
2387 		if (job)
2388 			processed_jobs += handle_completed_jobs(qp, mb_mgr,
2389 					job, &ops[processed_jobs],
2390 					nb_ops - processed_jobs);
2391 
2392 	} while (processed_jobs < nb_ops);
2393 
2394 	qp->digest_idx = digest_idx;
2395 
2396 	if (processed_jobs < 1)
2397 		processed_jobs += flush_mb_mgr(qp, mb_mgr,
2398 				&ops[processed_jobs],
2399 				nb_ops - processed_jobs);
2400 
2401 	return processed_jobs;
2402 }
2403 #endif
2404 static inline int
2405 check_crypto_sgl(union rte_crypto_sym_ofs so, const struct rte_crypto_sgl *sgl)
2406 {
2407 	/* no multi-seg support with current AESNI-MB PMD */
2408 	if (sgl->num != 1)
2409 		return -ENOTSUP;
2410 	else if (so.ofs.cipher.head + so.ofs.cipher.tail > sgl->vec[0].len)
2411 		return -EINVAL;
2412 	return 0;
2413 }
2414 
2415 static inline IMB_JOB *
2416 submit_sync_job(IMB_MGR *mb_mgr)
2417 {
2418 #ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG
2419 	return IMB_SUBMIT_JOB(mb_mgr);
2420 #else
2421 	return IMB_SUBMIT_JOB_NOCHECK(mb_mgr);
2422 #endif
2423 }
2424 
2425 static inline uint32_t
2426 generate_sync_dgst(struct rte_crypto_sym_vec *vec,
2427 	const uint8_t dgst[][DIGEST_LENGTH_MAX], uint32_t len)
2428 {
2429 	uint32_t i, k;
2430 
2431 	for (i = 0, k = 0; i != vec->num; i++) {
2432 		if (vec->status[i] == 0) {
2433 			memcpy(vec->digest[i].va, dgst[i], len);
2434 			k++;
2435 		}
2436 	}
2437 
2438 	return k;
2439 }
2440 
2441 static inline uint32_t
2442 verify_sync_dgst(struct rte_crypto_sym_vec *vec,
2443 	const uint8_t dgst[][DIGEST_LENGTH_MAX], uint32_t len)
2444 {
2445 	uint32_t i, k;
2446 
2447 	for (i = 0, k = 0; i != vec->num; i++) {
2448 		if (vec->status[i] == 0) {
2449 			if (memcmp(vec->digest[i].va, dgst[i], len) != 0)
2450 				vec->status[i] = EBADMSG;
2451 			else
2452 				k++;
2453 		}
2454 	}
2455 
2456 	return k;
2457 }
2458 
2459 static uint32_t
2460 aesni_mb_process_bulk(struct rte_cryptodev *dev __rte_unused,
2461 	struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs sofs,
2462 	struct rte_crypto_sym_vec *vec)
2463 {
2464 	int32_t ret;
2465 	uint32_t i, j, k, len;
2466 	void *buf;
2467 	IMB_JOB *job;
2468 	IMB_MGR *mb_mgr;
2469 	struct aesni_mb_session *s = CRYPTODEV_GET_SYM_SESS_PRIV(sess);
2470 	uint8_t tmp_dgst[vec->num][DIGEST_LENGTH_MAX];
2471 
2472 	/* get per-thread MB MGR, create one if needed */
2473 	mb_mgr = get_per_thread_mb_mgr();
2474 	if (unlikely(mb_mgr == NULL))
2475 		return 0;
2476 
2477 	for (i = 0, j = 0, k = 0; i != vec->num; i++) {
2478 		ret = check_crypto_sgl(sofs, vec->src_sgl + i);
2479 		if (ret != 0) {
2480 			vec->status[i] = ret;
2481 			continue;
2482 		}
2483 
2484 		buf = vec->src_sgl[i].vec[0].base;
2485 		len = vec->src_sgl[i].vec[0].len;
2486 
2487 		job = IMB_GET_NEXT_JOB(mb_mgr);
2488 		if (job == NULL) {
2489 			k += flush_mb_sync_mgr(mb_mgr);
2490 			job = IMB_GET_NEXT_JOB(mb_mgr);
2491 			RTE_ASSERT(job != NULL);
2492 		}
2493 
2494 		/* Submit job for processing */
2495 		set_cpu_mb_job_params(job, s, sofs, buf, len, &vec->iv[i],
2496 			&vec->aad[i], tmp_dgst[i], &vec->status[i]);
2497 		job = submit_sync_job(mb_mgr);
2498 		j++;
2499 
2500 		/* handle completed jobs */
2501 		k += handle_completed_sync_jobs(job, mb_mgr);
2502 	}
2503 
2504 	/* flush remaining jobs */
2505 	while (k != j)
2506 		k += flush_mb_sync_mgr(mb_mgr);
2507 
2508 	/* finish processing for successful jobs: check/update digest */
2509 	if (k != 0) {
2510 		if (s->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY)
2511 			k = verify_sync_dgst(vec,
2512 				(const uint8_t (*)[DIGEST_LENGTH_MAX])tmp_dgst,
2513 				s->auth.req_digest_len);
2514 		else
2515 			k = generate_sync_dgst(vec,
2516 				(const uint8_t (*)[DIGEST_LENGTH_MAX])tmp_dgst,
2517 				s->auth.req_digest_len);
2518 	}
2519 
2520 	return k;
2521 }
2522 
2523 struct rte_cryptodev_ops aesni_mb_pmd_ops = {
2524 	.dev_configure = ipsec_mb_config,
2525 	.dev_start = ipsec_mb_start,
2526 	.dev_stop = ipsec_mb_stop,
2527 	.dev_close = ipsec_mb_close,
2528 
2529 	.stats_get = ipsec_mb_stats_get,
2530 	.stats_reset = ipsec_mb_stats_reset,
2531 
2532 	.dev_infos_get = ipsec_mb_info_get,
2533 
2534 	.queue_pair_setup = ipsec_mb_qp_setup,
2535 	.queue_pair_release = ipsec_mb_qp_release,
2536 
2537 	.sym_cpu_process = aesni_mb_process_bulk,
2538 
2539 	.sym_session_get_size = ipsec_mb_sym_session_get_size,
2540 	.sym_session_configure = ipsec_mb_sym_session_configure,
2541 	.sym_session_clear = ipsec_mb_sym_session_clear
2542 };
2543 
2544 /**
2545  * Configure a aesni multi-buffer session from a security session
2546  * configuration
2547  */
2548 static int
2549 aesni_mb_pmd_sec_sess_create(void *dev, struct rte_security_session_conf *conf,
2550 		struct rte_security_session *sess)
2551 {
2552 	void *sess_private_data = SECURITY_GET_SESS_PRIV(sess);
2553 	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2554 	int ret;
2555 
2556 	if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL ||
2557 			conf->protocol != RTE_SECURITY_PROTOCOL_DOCSIS) {
2558 		IPSEC_MB_LOG(ERR, "Invalid security protocol");
2559 		return -EINVAL;
2560 	}
2561 
2562 	ret = aesni_mb_set_docsis_sec_session_parameters(cdev, conf,
2563 			sess_private_data);
2564 
2565 	if (ret != 0) {
2566 		IPSEC_MB_LOG(ERR, "Failed to configure session parameters");
2567 		return ret;
2568 	}
2569 
2570 	return ret;
2571 }
2572 
2573 /** Clear the memory of session so it does not leave key material behind */
2574 static int
2575 aesni_mb_pmd_sec_sess_destroy(void *dev __rte_unused,
2576 		struct rte_security_session *sess)
2577 {
2578 	void *sess_priv = SECURITY_GET_SESS_PRIV(sess);
2579 
2580 	if (sess_priv) {
2581 		memset(sess_priv, 0, sizeof(struct aesni_mb_session));
2582 	}
2583 	return 0;
2584 }
2585 
2586 static unsigned int
2587 aesni_mb_pmd_sec_sess_get_size(void *device __rte_unused)
2588 {
2589 	return sizeof(struct aesni_mb_session);
2590 }
2591 
2592 /** Get security capabilities for aesni multi-buffer */
2593 static const struct rte_security_capability *
2594 aesni_mb_pmd_sec_capa_get(void *device __rte_unused)
2595 {
2596 	return aesni_mb_pmd_security_cap;
2597 }
2598 
2599 static struct rte_security_ops aesni_mb_pmd_sec_ops = {
2600 		.session_create = aesni_mb_pmd_sec_sess_create,
2601 		.session_update = NULL,
2602 		.session_get_size = aesni_mb_pmd_sec_sess_get_size,
2603 		.session_stats_get = NULL,
2604 		.session_destroy = aesni_mb_pmd_sec_sess_destroy,
2605 		.set_pkt_metadata = NULL,
2606 		.capabilities_get = aesni_mb_pmd_sec_capa_get
2607 };
2608 
2609 struct rte_security_ops *rte_aesni_mb_pmd_sec_ops = &aesni_mb_pmd_sec_ops;
2610 
2611 static int
2612 aesni_mb_configure_dev(struct rte_cryptodev *dev)
2613 {
2614 	struct rte_security_ctx *security_instance;
2615 
2616 	security_instance = rte_malloc("aesni_mb_sec",
2617 				sizeof(struct rte_security_ctx),
2618 				RTE_CACHE_LINE_SIZE);
2619 	if (security_instance != NULL) {
2620 		security_instance->device = (void *)dev;
2621 		security_instance->ops = rte_aesni_mb_pmd_sec_ops;
2622 		security_instance->sess_cnt = 0;
2623 		dev->security_ctx = security_instance;
2624 
2625 		return 0;
2626 	}
2627 
2628 	return -ENOMEM;
2629 }
2630 
2631 static int
2632 aesni_mb_probe(struct rte_vdev_device *vdev)
2633 {
2634 	return ipsec_mb_create(vdev, IPSEC_MB_PMD_TYPE_AESNI_MB);
2635 }
2636 
2637 static struct rte_vdev_driver cryptodev_aesni_mb_pmd_drv = {
2638 	.probe = aesni_mb_probe,
2639 	.remove = ipsec_mb_remove
2640 };
2641 
2642 static struct cryptodev_driver aesni_mb_crypto_drv;
2643 
2644 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_MB_PMD,
2645 	cryptodev_aesni_mb_pmd_drv);
2646 RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd);
2647 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_MB_PMD,
2648 			"max_nb_queue_pairs=<int> socket_id=<int>");
2649 RTE_PMD_REGISTER_CRYPTO_DRIVER(
2650 	aesni_mb_crypto_drv,
2651 	cryptodev_aesni_mb_pmd_drv.driver,
2652 	pmd_driver_id_aesni_mb);
2653 
2654 /* Constructor function to register aesni-mb PMD */
2655 RTE_INIT(ipsec_mb_register_aesni_mb)
2656 {
2657 	struct ipsec_mb_internals *aesni_mb_data =
2658 		&ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_AESNI_MB];
2659 
2660 	aesni_mb_data->caps = aesni_mb_capabilities;
2661 	aesni_mb_data->dequeue_burst = aesni_mb_dequeue_burst;
2662 	aesni_mb_data->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2663 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2664 			RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
2665 			RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO |
2666 			RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA |
2667 			RTE_CRYPTODEV_FF_SYM_SESSIONLESS |
2668 			RTE_CRYPTODEV_FF_IN_PLACE_SGL |
2669 			RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
2670 			RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
2671 			RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
2672 			RTE_CRYPTODEV_FF_SECURITY |
2673 			RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED;
2674 
2675 	aesni_mb_data->internals_priv_size = 0;
2676 	aesni_mb_data->ops = &aesni_mb_pmd_ops;
2677 	aesni_mb_data->qp_priv_size = sizeof(struct aesni_mb_qp_data);
2678 	aesni_mb_data->queue_pair_configure = NULL;
2679 	aesni_mb_data->security_ops = &aesni_mb_pmd_sec_ops;
2680 	aesni_mb_data->dev_config = aesni_mb_configure_dev;
2681 	aesni_mb_data->session_configure = aesni_mb_session_configure;
2682 	aesni_mb_data->session_priv_size = sizeof(struct aesni_mb_session);
2683 }
2684