xref: /dpdk/drivers/crypto/qat/qat_sym_session.c (revision f665790a5dbad7b645ff46f31d65e977324e7bfc)
1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
2  * Copyright(c) 2015-2022 Intel Corporation
3  */
4 
5 #define OPENSSL_API_COMPAT 0x10100000L
6 
7 #ifdef RTE_QAT_OPENSSL
8 #include <openssl/sha.h>	/* Needed to calculate pre-compute values */
9 #include <openssl/aes.h>	/* Needed to calculate pre-compute values */
10 #include <openssl/md5.h>	/* Needed to calculate pre-compute values */
11 #include <openssl/evp.h>	/* Needed for bpi runt block processing */
12 #endif
13 
14 #ifndef RTE_QAT_OPENSSL
15 #ifndef RTE_ARCH_ARM
16 #include <intel-ipsec-mb.h>
17 #endif
18 #endif
19 
20 #include <rte_memcpy.h>
21 #include <rte_common.h>
22 #include <rte_spinlock.h>
23 #include <rte_byteorder.h>
24 #include <rte_log.h>
25 #include <rte_malloc.h>
26 #include <rte_crypto_sym.h>
27 #include <rte_security_driver.h>
28 #include <rte_ether.h>
29 
30 #include "qat_logs.h"
31 #include "qat_sym_session.h"
32 #include "qat_sym.h"
33 
34 #ifdef RTE_QAT_OPENSSL
35 #if (OPENSSL_VERSION_NUMBER >= 0x30000000L)
36 #include <openssl/provider.h>
37 
38 static OSSL_PROVIDER * legacy_lib;
39 static OSSL_PROVIDER *default_lib;
40 
41 /* Some cryptographic algorithms such as MD and DES are now considered legacy
42  * and not enabled by default in OpenSSL 3.0. Load up lagacy provider as MD5
43  * DES are needed in QAT pre-computes and secure session creation.
44  */
45 static int ossl_legacy_provider_load(void)
46 {
47 	/* Load Multiple providers into the default (NULL) library context */
48 	legacy_lib = OSSL_PROVIDER_load(NULL, "legacy");
49 	if (legacy_lib == NULL)
50 		return -EINVAL;
51 
52 	default_lib = OSSL_PROVIDER_load(NULL, "default");
53 	if (default_lib == NULL) {
54 		OSSL_PROVIDER_unload(legacy_lib);
55 		return  -EINVAL;
56 	}
57 
58 	return 0;
59 }
60 
61 static void ossl_legacy_provider_unload(void)
62 {
63 	OSSL_PROVIDER_unload(legacy_lib);
64 	OSSL_PROVIDER_unload(default_lib);
65 }
66 #endif
67 #endif
68 
69 #define ETH_CRC32_POLYNOMIAL    0x04c11db7
70 #define ETH_CRC32_INIT_VAL      0xffffffff
71 #define ETH_CRC32_XOR_OUT       0xffffffff
72 #define ETH_CRC32_POLYNOMIAL_BE RTE_BE32(ETH_CRC32_POLYNOMIAL)
73 #define ETH_CRC32_INIT_VAL_BE   RTE_BE32(ETH_CRC32_INIT_VAL)
74 #define ETH_CRC32_XOR_OUT_BE    RTE_BE32(ETH_CRC32_XOR_OUT)
75 
76 /* SHA1 - 20 bytes - Initialiser state can be found in FIPS stds 180-2 */
77 static const uint8_t sha1InitialState[] = {
78 	0x67, 0x45, 0x23, 0x01, 0xef, 0xcd, 0xab, 0x89, 0x98, 0xba,
79 	0xdc, 0xfe, 0x10, 0x32, 0x54, 0x76, 0xc3, 0xd2, 0xe1, 0xf0};
80 
81 /* SHA 224 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */
82 static const uint8_t sha224InitialState[] = {
83 	0xc1, 0x05, 0x9e, 0xd8, 0x36, 0x7c, 0xd5, 0x07, 0x30, 0x70, 0xdd,
84 	0x17, 0xf7, 0x0e, 0x59, 0x39, 0xff, 0xc0, 0x0b, 0x31, 0x68, 0x58,
85 	0x15, 0x11, 0x64, 0xf9, 0x8f, 0xa7, 0xbe, 0xfa, 0x4f, 0xa4};
86 
87 /* SHA 256 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */
88 static const uint8_t sha256InitialState[] = {
89 	0x6a, 0x09, 0xe6, 0x67, 0xbb, 0x67, 0xae, 0x85, 0x3c, 0x6e, 0xf3,
90 	0x72, 0xa5, 0x4f, 0xf5, 0x3a, 0x51, 0x0e, 0x52, 0x7f, 0x9b, 0x05,
91 	0x68, 0x8c, 0x1f, 0x83, 0xd9, 0xab, 0x5b, 0xe0, 0xcd, 0x19};
92 
93 /* SHA 384 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
94 static const uint8_t sha384InitialState[] = {
95 	0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8, 0x62, 0x9a, 0x29,
96 	0x2a, 0x36, 0x7c, 0xd5, 0x07, 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70,
97 	0xdd, 0x17, 0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39, 0x67,
98 	0x33, 0x26, 0x67, 0xff, 0xc0, 0x0b, 0x31, 0x8e, 0xb4, 0x4a, 0x87,
99 	0x68, 0x58, 0x15, 0x11, 0xdb, 0x0c, 0x2e, 0x0d, 0x64, 0xf9, 0x8f,
100 	0xa7, 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f, 0xa4};
101 
102 /* SHA 512 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
103 static const uint8_t sha512InitialState[] = {
104 	0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, 0xbb, 0x67, 0xae,
105 	0x85, 0x84, 0xca, 0xa7, 0x3b, 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94,
106 	0xf8, 0x2b, 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1, 0x51,
107 	0x0e, 0x52, 0x7f, 0xad, 0xe6, 0x82, 0xd1, 0x9b, 0x05, 0x68, 0x8c,
108 	0x2b, 0x3e, 0x6c, 0x1f, 0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd,
109 	0x6b, 0x5b, 0xe0, 0xcd, 0x19, 0x13, 0x7e, 0x21, 0x79};
110 
111 static uint8_t sm3InitialState[] = {
112 	0x73, 0x80, 0x16, 0x6f, 0x49, 0x14, 0xb2, 0xb9,
113 	0x17, 0x24, 0x42, 0xd7, 0xda, 0x8a, 0x06, 0x00,
114 	0xa9, 0x6f, 0x30, 0xbc, 0x16, 0x31, 0x38, 0xaa,
115 	0xe3, 0x8d, 0xee, 0x4d, 0xb0, 0xfb, 0x0e, 0x4e
116 };
117 
118 static int
119 qat_sym_cd_cipher_set(struct qat_sym_session *cd,
120 						const uint8_t *enckey,
121 						uint32_t enckeylen);
122 
123 static int
124 qat_sym_cd_crc_set(struct qat_sym_session *cdesc,
125 					enum qat_device_gen qat_dev_gen);
126 
127 static int
128 qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
129 	const uint8_t *authkey,
130 	uint32_t authkeylen,
131 	uint32_t aad_length,
132 	uint32_t digestsize,
133 	unsigned int operation,
134 	enum qat_device_gen qat_dev_gen);
135 
136 static void
137 qat_sym_session_init_common_hdr(struct qat_sym_session *session);
138 
139 static void
140 qat_sym_session_init_gen_lce_hdr(struct qat_sym_session *session);
141 
142 /* Req/cd init functions */
143 
144 static void
145 qat_sym_session_finalize(struct qat_sym_session *session)
146 {
147 	qat_sym_session_init_common_hdr(session);
148 }
149 
150 #ifdef RTE_QAT_OPENSSL
151 /** Frees a context previously created
152  *  Depends on openssl libcrypto
153  */
154 static void
155 bpi_cipher_ctx_free(void *bpi_ctx)
156 {
157 	if (bpi_ctx != NULL)
158 		EVP_CIPHER_CTX_free((EVP_CIPHER_CTX *)bpi_ctx);
159 }
160 
161 /** Creates a context in either AES or DES in ECB mode
162  *  Depends on openssl libcrypto
163  */
164 static int
165 bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo,
166 		enum rte_crypto_cipher_operation direction __rte_unused,
167 		const uint8_t *key, uint16_t key_length, void **ctx)
168 {
169 	const EVP_CIPHER *algo = NULL;
170 	int ret;
171 	*ctx = EVP_CIPHER_CTX_new();
172 
173 	if (*ctx == NULL) {
174 		ret = -ENOMEM;
175 		goto ctx_init_err;
176 	}
177 
178 	if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI)
179 		algo = EVP_des_ecb();
180 	else
181 		if (key_length == ICP_QAT_HW_AES_128_KEY_SZ)
182 			algo = EVP_aes_128_ecb();
183 		else
184 			algo = EVP_aes_256_ecb();
185 
186 	/* IV will be ECB encrypted whether direction is encrypt or decrypt*/
187 	if (EVP_EncryptInit_ex(*ctx, algo, NULL, key, 0) != 1) {
188 		ret = -EINVAL;
189 		goto ctx_init_err;
190 	}
191 
192 	return 0;
193 
194 ctx_init_err:
195 	if (*ctx != NULL) {
196 		EVP_CIPHER_CTX_free(*ctx);
197 		*ctx = NULL;
198 	}
199 	return ret;
200 }
201 #endif
202 
203 #ifndef RTE_QAT_OPENSSL
204 /** Creates a context in either AES or DES in ECB mode
205  */
206 static int
207 ipsec_mb_ctx_init(const uint8_t *key, uint16_t key_length,
208 		enum rte_crypto_cipher_algorithm cryptodev_algo,
209 		uint64_t *expkey, uint32_t *dust, IMB_MGR **m)
210 {
211 	int ret;
212 
213 	*m = alloc_mb_mgr(0);
214 	if (*m == NULL)
215 		return -ENOMEM;
216 
217 	init_mb_mgr_auto(*m, NULL);
218 
219 	if (cryptodev_algo == RTE_CRYPTO_CIPHER_AES_DOCSISBPI) {
220 		if (key_length == ICP_QAT_HW_AES_128_KEY_SZ)
221 			IMB_AES_KEYEXP_128(*m, key, expkey, dust);
222 		else if (key_length == ICP_QAT_HW_AES_256_KEY_SZ)
223 			IMB_AES_KEYEXP_256(*m, key, expkey, dust);
224 		else {
225 			ret = -EFAULT;
226 			goto error_out;
227 		}
228 	} else if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI) {
229 		if (key_length == ICP_QAT_HW_DES_KEY_SZ)
230 			IMB_DES_KEYSCHED(*m, (uint64_t *)expkey, key);
231 		else {
232 			ret = -EFAULT;
233 			goto error_out;
234 		}
235 	}
236 	return 0;
237 
238 error_out:
239 	if (*m) {
240 		free_mb_mgr(*m);
241 		*m = NULL;
242 	}
243 	return ret;
244 }
245 #endif
246 
247 static int
248 qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo,
249 		struct qat_cryptodev_private *internals)
250 {
251 	int i = 0;
252 	const struct rte_cryptodev_capabilities *capability;
253 
254 	while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
255 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
256 		if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
257 			continue;
258 
259 		if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
260 			continue;
261 
262 		if (capability->sym.cipher.algo == algo)
263 			return 1;
264 	}
265 	return 0;
266 }
267 
268 static int
269 qat_is_auth_alg_supported(enum rte_crypto_auth_algorithm algo,
270 		struct qat_cryptodev_private *internals)
271 {
272 	int i = 0;
273 	const struct rte_cryptodev_capabilities *capability;
274 
275 	while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
276 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
277 		if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
278 			continue;
279 
280 		if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
281 			continue;
282 
283 		if (capability->sym.auth.algo == algo)
284 			return 1;
285 	}
286 	return 0;
287 }
288 
289 void
290 qat_sym_session_clear(struct rte_cryptodev *dev __rte_unused,
291 		struct rte_cryptodev_sym_session *sess)
292 {
293 	struct qat_sym_session *s = CRYPTODEV_GET_SYM_SESS_PRIV(sess);
294 
295 #ifdef RTE_QAT_OPENSSL
296 	if (s->bpi_ctx)
297 		bpi_cipher_ctx_free(s->bpi_ctx);
298 #else
299 	if (s->mb_mgr)
300 		free_mb_mgr(s->mb_mgr);
301 #endif
302 }
303 
304 static int
305 qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
306 {
307 	/* Cipher Only */
308 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
309 		return ICP_QAT_FW_LA_CMD_CIPHER;
310 
311 	/* Authentication Only */
312 	if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
313 		return ICP_QAT_FW_LA_CMD_AUTH;
314 
315 	/* AEAD */
316 	if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
317 		/* AES-GCM and AES-CCM works with different direction
318 		 * GCM first encrypts and generate hash where AES-CCM
319 		 * first generate hash and encrypts. Similar relation
320 		 * applies to decryption.
321 		 */
322 		if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
323 			if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
324 				return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
325 			else
326 				return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
327 		else
328 			if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
329 				return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
330 			else
331 				return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
332 	}
333 
334 	if (xform->next == NULL)
335 		return -1;
336 
337 	/* Cipher then Authenticate */
338 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
339 			xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
340 		return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
341 
342 	/* Authenticate then Cipher */
343 	if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
344 			xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
345 		return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
346 
347 	return -1;
348 }
349 
350 static struct rte_crypto_auth_xform *
351 qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
352 {
353 	do {
354 		if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
355 			return &xform->auth;
356 
357 		xform = xform->next;
358 	} while (xform);
359 
360 	return NULL;
361 }
362 
363 static struct rte_crypto_cipher_xform *
364 qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
365 {
366 	do {
367 		if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
368 			return &xform->cipher;
369 
370 		xform = xform->next;
371 	} while (xform);
372 
373 	return NULL;
374 }
375 
376 int
377 qat_sym_session_configure_cipher(struct rte_cryptodev *dev,
378 		struct rte_crypto_sym_xform *xform,
379 		struct qat_sym_session *session)
380 {
381 	struct qat_cryptodev_private *internals = dev->data->dev_private;
382 	struct rte_crypto_cipher_xform *cipher_xform = NULL;
383 	enum qat_device_gen qat_dev_gen =
384 				internals->qat_dev->qat_dev_gen;
385 	int ret, is_wireless = 0;
386 	struct icp_qat_fw_la_bulk_req *req_tmpl = &session->fw_req;
387 	struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
388 
389 	/* Get cipher xform from crypto xform chain */
390 	cipher_xform = qat_get_cipher_xform(xform);
391 
392 	session->cipher_iv.offset = cipher_xform->iv.offset;
393 	session->cipher_iv.length = cipher_xform->iv.length;
394 
395 	switch (cipher_xform->algo) {
396 	case RTE_CRYPTO_CIPHER_AES_CBC:
397 		if (qat_sym_validate_aes_key(cipher_xform->key.length,
398 				&session->qat_cipher_alg) != 0) {
399 			QAT_LOG(ERR, "Invalid AES cipher key size");
400 			ret = -EINVAL;
401 			goto error_out;
402 		}
403 		session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
404 		break;
405 	case RTE_CRYPTO_CIPHER_AES_CTR:
406 		if (qat_sym_validate_aes_key(cipher_xform->key.length,
407 				&session->qat_cipher_alg) != 0) {
408 			QAT_LOG(ERR, "Invalid AES cipher key size");
409 			ret = -EINVAL;
410 			goto error_out;
411 		}
412 		session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
413 		if (qat_dev_gen == QAT_GEN4 || qat_dev_gen == QAT_GEN5 ||
414 				qat_dev_gen == QAT_VQAT)
415 			session->is_ucs = 1;
416 		break;
417 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
418 		if (qat_sym_validate_snow3g_key(cipher_xform->key.length,
419 					&session->qat_cipher_alg) != 0) {
420 			QAT_LOG(ERR, "Invalid SNOW 3G cipher key size");
421 			ret = -EINVAL;
422 			goto error_out;
423 		}
424 		session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
425 		if (internals->qat_dev->options.has_wireless_slice)
426 			is_wireless = 1;
427 		break;
428 	case RTE_CRYPTO_CIPHER_NULL:
429 		session->qat_cipher_alg = ICP_QAT_HW_CIPHER_ALGO_NULL;
430 		session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
431 		break;
432 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
433 		if (qat_sym_validate_kasumi_key(cipher_xform->key.length,
434 					&session->qat_cipher_alg) != 0) {
435 			QAT_LOG(ERR, "Invalid KASUMI cipher key size");
436 			ret = -EINVAL;
437 			goto error_out;
438 		}
439 		session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE;
440 		break;
441 	case RTE_CRYPTO_CIPHER_3DES_CBC:
442 		if (qat_sym_validate_3des_key(cipher_xform->key.length,
443 				&session->qat_cipher_alg) != 0) {
444 			QAT_LOG(ERR, "Invalid 3DES cipher key size");
445 			ret = -EINVAL;
446 			goto error_out;
447 		}
448 		session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
449 		break;
450 	case RTE_CRYPTO_CIPHER_DES_CBC:
451 		if (qat_sym_validate_des_key(cipher_xform->key.length,
452 				&session->qat_cipher_alg) != 0) {
453 			QAT_LOG(ERR, "Invalid DES cipher key size");
454 			ret = -EINVAL;
455 			goto error_out;
456 		}
457 		session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
458 		break;
459 	case RTE_CRYPTO_CIPHER_3DES_CTR:
460 		if (qat_sym_validate_3des_key(cipher_xform->key.length,
461 				&session->qat_cipher_alg) != 0) {
462 			QAT_LOG(ERR, "Invalid 3DES cipher key size");
463 			ret = -EINVAL;
464 			goto error_out;
465 		}
466 		session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
467 		break;
468 	case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
469 #ifdef RTE_QAT_OPENSSL
470 		ret = bpi_cipher_ctx_init(
471 					cipher_xform->algo,
472 					cipher_xform->op,
473 					cipher_xform->key.data,
474 					cipher_xform->key.length,
475 					&session->bpi_ctx);
476 #else
477 		session->docsis_key_len = cipher_xform->key.length;
478 		ret = ipsec_mb_ctx_init(
479 					cipher_xform->key.data,
480 					cipher_xform->key.length,
481 					cipher_xform->algo,
482 					session->expkey,
483 					session->dust,
484 					&session->mb_mgr);
485 #endif
486 		if (ret != 0) {
487 			QAT_LOG(ERR, "failed to create DES BPI ctx");
488 			goto error_out;
489 		}
490 		if (qat_sym_validate_des_key(cipher_xform->key.length,
491 				&session->qat_cipher_alg) != 0) {
492 			QAT_LOG(ERR, "Invalid DES cipher key size");
493 			ret = -EINVAL;
494 			goto error_out;
495 		}
496 		session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
497 		break;
498 	case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
499 #ifdef RTE_QAT_OPENSSL
500 		ret = bpi_cipher_ctx_init(
501 					cipher_xform->algo,
502 					cipher_xform->op,
503 					cipher_xform->key.data,
504 					cipher_xform->key.length,
505 					&session->bpi_ctx);
506 #else
507 		session->docsis_key_len = cipher_xform->key.length;
508 		ret = ipsec_mb_ctx_init(
509 					cipher_xform->key.data,
510 					cipher_xform->key.length,
511 					cipher_xform->algo,
512 					session->expkey,
513 					session->dust,
514 					&session->mb_mgr);
515 #endif
516 		if (ret != 0) {
517 			QAT_LOG(ERR, "failed to create AES BPI ctx");
518 			goto error_out;
519 		}
520 		if (qat_sym_validate_aes_docsisbpi_key(cipher_xform->key.length,
521 				&session->qat_cipher_alg) != 0) {
522 			QAT_LOG(ERR, "Invalid AES DOCSISBPI key size");
523 			ret = -EINVAL;
524 			goto error_out;
525 		}
526 		session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
527 		break;
528 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
529 		if (!qat_is_cipher_alg_supported(
530 			cipher_xform->algo, internals)) {
531 			QAT_LOG(ERR, "%s not supported on this device",
532 				rte_cryptodev_get_cipher_algo_string(
533 					cipher_xform->algo));
534 			ret = -ENOTSUP;
535 			goto error_out;
536 		}
537 		if (qat_sym_validate_zuc_key(cipher_xform->key.length,
538 				&session->qat_cipher_alg) != 0) {
539 			QAT_LOG(ERR, "Invalid ZUC cipher key size");
540 			ret = -EINVAL;
541 			goto error_out;
542 		}
543 		session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
544 		if (cipher_xform->key.length == ICP_QAT_HW_ZUC_256_KEY_SZ)
545 			session->is_zuc256 = 1;
546 		if (internals->qat_dev->options.has_wireless_slice)
547 			is_wireless = 1;
548 		break;
549 	case RTE_CRYPTO_CIPHER_AES_XTS:
550 		if ((cipher_xform->key.length/2) == ICP_QAT_HW_AES_192_KEY_SZ) {
551 			QAT_LOG(ERR, "AES-XTS-192 not supported");
552 			ret = -EINVAL;
553 			goto error_out;
554 		}
555 		if (qat_sym_validate_aes_key((cipher_xform->key.length/2),
556 				&session->qat_cipher_alg) != 0) {
557 			QAT_LOG(ERR, "Invalid AES-XTS cipher key size");
558 			ret = -EINVAL;
559 			goto error_out;
560 		}
561 		session->qat_mode = ICP_QAT_HW_CIPHER_XTS_MODE;
562 		break;
563 	case RTE_CRYPTO_CIPHER_SM4_ECB:
564 		session->qat_cipher_alg = ICP_QAT_HW_CIPHER_ALGO_SM4;
565 		session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
566 		break;
567 	case RTE_CRYPTO_CIPHER_SM4_CBC:
568 		session->qat_cipher_alg = ICP_QAT_HW_CIPHER_ALGO_SM4;
569 		session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
570 		break;
571 	case RTE_CRYPTO_CIPHER_SM4_CTR:
572 		session->qat_cipher_alg = ICP_QAT_HW_CIPHER_ALGO_SM4;
573 		session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
574 		break;
575 	case RTE_CRYPTO_CIPHER_3DES_ECB:
576 	case RTE_CRYPTO_CIPHER_AES_ECB:
577 	case RTE_CRYPTO_CIPHER_AES_F8:
578 	case RTE_CRYPTO_CIPHER_ARC4:
579 		QAT_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u",
580 				cipher_xform->algo);
581 		ret = -ENOTSUP;
582 		goto error_out;
583 	default:
584 		QAT_LOG(ERR, "Crypto: Undefined Cipher specified %u",
585 				cipher_xform->algo);
586 		ret = -EINVAL;
587 		goto error_out;
588 	}
589 
590 	if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
591 		session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
592 	else
593 		session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
594 
595 	if (qat_sym_cd_cipher_set(session,
596 						cipher_xform->key.data,
597 						cipher_xform->key.length)) {
598 		ret = -EINVAL;
599 		goto error_out;
600 	}
601 
602 	if (is_wireless) {
603 		/* Set the Use Extended Protocol Flags bit in LW 1 */
604 		ICP_QAT_FW_USE_EXTENDED_PROTOCOL_FLAGS_SET(
605 				header->ext_flags,
606 				QAT_LA_USE_EXTENDED_PROTOCOL_FLAGS);
607 		/* Force usage of Wireless Cipher slice */
608 		ICP_QAT_FW_USE_WCP_SLICE_SET(header->ext_flags,
609 				QAT_LA_USE_WCP_SLICE);
610 		session->is_wireless = 1;
611 	}
612 
613 	return 0;
614 
615 error_out:
616 #ifdef RTE_QAT_OPENSSL
617 	if (session->bpi_ctx) {
618 		bpi_cipher_ctx_free(session->bpi_ctx);
619 		session->bpi_ctx = NULL;
620 	}
621 #else
622 	if (session->mb_mgr) {
623 		free_mb_mgr(session->mb_mgr);
624 		session->mb_mgr = NULL;
625 	}
626 
627 #endif
628 	return ret;
629 }
630 
631 int
632 qat_sym_session_configure(struct rte_cryptodev *dev,
633 		struct rte_crypto_sym_xform *xform,
634 		struct rte_cryptodev_sym_session *sess)
635 {
636 	int ret;
637 
638 #ifdef RTE_QAT_OPENSSL
639 #if (OPENSSL_VERSION_NUMBER >= 0x30000000L)
640 	ossl_legacy_provider_load();
641 #endif
642 #endif
643 	ret = qat_sym_session_set_parameters(dev, xform,
644 			CRYPTODEV_GET_SYM_SESS_PRIV(sess),
645 			CRYPTODEV_GET_SYM_SESS_PRIV_IOVA(sess));
646 	if (ret != 0) {
647 		QAT_LOG(ERR,
648 		    "Crypto QAT PMD: failed to configure session parameters");
649 
650 		return ret;
651 	}
652 
653 #ifdef RTE_QAT_OPENSSL
654 # if (OPENSSL_VERSION_NUMBER >= 0x30000000L)
655 	ossl_legacy_provider_unload();
656 # endif
657 # endif
658 	return 0;
659 }
660 
661 int
662 qat_sym_session_set_parameters(struct rte_cryptodev *dev,
663 		struct rte_crypto_sym_xform *xform, void *session_private,
664 		rte_iova_t session_paddr)
665 {
666 	struct qat_sym_session *session = session_private;
667 	struct qat_cryptodev_private *internals = dev->data->dev_private;
668 	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
669 	int ret;
670 	int qat_cmd_id;
671 
672 	/* Verify the session physical address is known */
673 	if (session_paddr == 0 || session_paddr == RTE_BAD_IOVA) {
674 		QAT_LOG(ERR,
675 			"Session physical address unknown. Bad memory pool.");
676 		return -EINVAL;
677 	}
678 
679 	memset(session, 0, sizeof(*session));
680 	/* Set context descriptor physical address */
681 	session->cd_paddr = session_paddr +
682 			offsetof(struct qat_sym_session, cd);
683 	session->prefix_paddr = session_paddr +
684 			offsetof(struct qat_sym_session, prefix_state);
685 
686 	session->dev_id = internals->dev_id;
687 	session->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_NONE;
688 	session->is_ucs = 0;
689 
690 	/* Get requested QAT command id */
691 	qat_cmd_id = qat_get_cmd_id(xform);
692 	if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) {
693 		QAT_LOG(ERR, "Unsupported xform chain requested");
694 		return -ENOTSUP;
695 	}
696 	session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
697 	switch (session->qat_cmd) {
698 	case ICP_QAT_FW_LA_CMD_CIPHER:
699 		ret = qat_sym_session_configure_cipher(dev, xform, session);
700 		if (ret < 0)
701 			return ret;
702 		break;
703 	case ICP_QAT_FW_LA_CMD_AUTH:
704 		ret = qat_sym_session_configure_auth(dev, xform, session);
705 		if (ret < 0)
706 			return ret;
707 		session->is_single_pass_gmac =
708 			       qat_dev_gen == QAT_GEN3 &&
709 			       xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC &&
710 			       xform->auth.iv.length == QAT_AES_GCM_SPC_IV_SIZE;
711 		break;
712 	case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
713 		if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
714 			ret = qat_sym_session_configure_aead(dev, xform,
715 					session);
716 			if (ret < 0)
717 				return ret;
718 		} else {
719 			ret = qat_sym_session_configure_cipher(dev,
720 					xform, session);
721 			if (ret < 0)
722 				return ret;
723 			ret = qat_sym_session_configure_auth(dev,
724 					xform, session);
725 			if (ret < 0)
726 				return ret;
727 		}
728 		break;
729 	case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
730 		if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
731 			ret = qat_sym_session_configure_aead(dev, xform,
732 					session);
733 			if (ret < 0)
734 				return ret;
735 		} else {
736 			ret = qat_sym_session_configure_auth(dev,
737 					xform, session);
738 			if (ret < 0)
739 				return ret;
740 			ret = qat_sym_session_configure_cipher(dev,
741 					xform, session);
742 			if (ret < 0)
743 				return ret;
744 		}
745 		break;
746 	case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
747 	case ICP_QAT_FW_LA_CMD_TRNG_TEST:
748 	case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE:
749 	case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE:
750 	case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE:
751 	case ICP_QAT_FW_LA_CMD_MGF1:
752 	case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP:
753 	case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP:
754 	case ICP_QAT_FW_LA_CMD_CIPHER_CRC:
755 	case ICP_QAT_FW_LA_CMD_DELIMITER:
756 	QAT_LOG(ERR, "Unsupported Service %u",
757 		session->qat_cmd);
758 		return -ENOTSUP;
759 	default:
760 	QAT_LOG(ERR, "Unsupported Service %u",
761 		session->qat_cmd);
762 		return -ENOTSUP;
763 	}
764 
765 	if (qat_dev_gen == QAT_GEN_LCE) {
766 		qat_sym_session_init_gen_lce_hdr(session);
767 		return 0;
768 	}
769 
770 	qat_sym_session_finalize(session);
771 
772 	return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)dev,
773 			(void *)session);
774 }
775 
776 int
777 qat_cipher_crc_cap_msg_sess_prepare(struct qat_sym_session *session,
778 					rte_iova_t session_paddr,
779 					const uint8_t *cipherkey,
780 					uint32_t cipherkeylen,
781 					enum qat_device_gen qat_dev_gen)
782 {
783 	int ret;
784 
785 	/* Set content descriptor physical address */
786 	session->cd_paddr = session_paddr +
787 				offsetof(struct qat_sym_session, cd);
788 
789 	/* Set up some pre-requisite variables */
790 	session->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_NONE;
791 	session->is_ucs = 0;
792 	session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER_CRC;
793 	session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
794 	session->qat_cipher_alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
795 	session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
796 	session->is_auth = 1;
797 	session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_NULL;
798 	session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
799 	session->auth_op = ICP_QAT_HW_AUTH_GENERATE;
800 	session->digest_length = RTE_ETHER_CRC_LEN;
801 
802 	ret = qat_sym_cd_cipher_set(session, cipherkey, cipherkeylen);
803 	if (ret < 0)
804 		return -EINVAL;
805 
806 	ret = qat_sym_cd_crc_set(session, qat_dev_gen);
807 	if (ret < 0)
808 		return -EINVAL;
809 
810 	qat_sym_session_finalize(session);
811 
812 	return 0;
813 }
814 
815 static int
816 qat_sym_session_handle_single_pass(struct qat_sym_session *session,
817 		const struct rte_crypto_aead_xform *aead_xform)
818 {
819 	session->is_single_pass = 1;
820 	session->is_auth = 1;
821 	session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER;
822 	/* Chacha-Poly is special case that use QAT CTR mode */
823 	if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM)
824 		session->qat_mode = ICP_QAT_HW_CIPHER_AEAD_MODE;
825 	else
826 		session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
827 
828 	session->cipher_iv.offset = aead_xform->iv.offset;
829 	session->cipher_iv.length = aead_xform->iv.length;
830 	session->aad_len = aead_xform->aad_length;
831 	session->digest_length = aead_xform->digest_length;
832 
833 	if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
834 		session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
835 		session->auth_op = ICP_QAT_HW_AUTH_GENERATE;
836 	} else {
837 		session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
838 		session->auth_op = ICP_QAT_HW_AUTH_VERIFY;
839 	}
840 
841 	return 0;
842 }
843 
844 int
845 qat_sym_session_configure_auth(struct rte_cryptodev *dev,
846 				struct rte_crypto_sym_xform *xform,
847 				struct qat_sym_session *session)
848 {
849 	struct rte_crypto_auth_xform *auth_xform = qat_get_auth_xform(xform);
850 	struct qat_cryptodev_private *internals = dev->data->dev_private;
851 	const uint8_t *key_data = auth_xform->key.data;
852 	uint16_t key_length = auth_xform->key.length;
853 	enum qat_device_gen qat_dev_gen =
854 			internals->qat_dev->qat_dev_gen;
855 	struct icp_qat_fw_la_bulk_req *req_tmpl = &session->fw_req;
856 	struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
857 	struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl =
858 			(struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *)
859 			session->fw_req.cd_ctrl.content_desc_ctrl_lw;
860 	uint8_t hash_flag = 0;
861 	int is_wireless = 0;
862 
863 	session->aes_cmac = 0;
864 	session->auth_key_length = auth_xform->key.length;
865 	session->auth_iv.offset = auth_xform->iv.offset;
866 	session->auth_iv.length = auth_xform->iv.length;
867 	session->auth_mode = ICP_QAT_HW_AUTH_MODE1;
868 	session->is_auth = 1;
869 	session->digest_length = auth_xform->digest_length;
870 
871 	switch (auth_xform->algo) {
872 	case RTE_CRYPTO_AUTH_SM3:
873 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SM3;
874 		session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
875 		break;
876 	case RTE_CRYPTO_AUTH_SM3_HMAC:
877 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SM3;
878 		session->auth_mode = ICP_QAT_HW_AUTH_MODE2;
879 		break;
880 	case RTE_CRYPTO_AUTH_SHA1:
881 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
882 		session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
883 		break;
884 	case RTE_CRYPTO_AUTH_SHA224:
885 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
886 		session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
887 		break;
888 	case RTE_CRYPTO_AUTH_SHA256:
889 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
890 		session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
891 		break;
892 	case RTE_CRYPTO_AUTH_SHA384:
893 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
894 		session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
895 		break;
896 	case RTE_CRYPTO_AUTH_SHA512:
897 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
898 		session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
899 		break;
900 	case RTE_CRYPTO_AUTH_SHA3_224:
901 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA3_224;
902 		session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
903 		break;
904 	case RTE_CRYPTO_AUTH_SHA3_256:
905 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA3_256;
906 		session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
907 		break;
908 	case RTE_CRYPTO_AUTH_SHA3_384:
909 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA3_384;
910 		session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
911 		break;
912 	case RTE_CRYPTO_AUTH_SHA3_512:
913 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA3_512;
914 		session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
915 		break;
916 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
917 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
918 		break;
919 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
920 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
921 		break;
922 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
923 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
924 		break;
925 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
926 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
927 		break;
928 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
929 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
930 		break;
931 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
932 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
933 		break;
934 	case RTE_CRYPTO_AUTH_AES_CMAC:
935 		session->aes_cmac = 1;
936 		if (!internals->qat_dev->options.has_wireless_slice) {
937 			session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
938 			break;
939 		}
940 		is_wireless = 1;
941 		session->is_wireless = 1;
942 		switch (key_length) {
943 		case ICP_QAT_HW_AES_128_KEY_SZ:
944 			session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_128_CMAC;
945 			break;
946 		default:
947 			QAT_LOG(ERR, "Invalid key length: %d", key_length);
948 			return -ENOTSUP;
949 		}
950 		break;
951 	case RTE_CRYPTO_AUTH_AES_GMAC:
952 		if (qat_sym_validate_aes_key(auth_xform->key.length,
953 				&session->qat_cipher_alg) != 0) {
954 			QAT_LOG(ERR, "Invalid AES key size");
955 			return -EINVAL;
956 		}
957 		session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
958 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
959 		if (session->auth_iv.length == 0)
960 			session->auth_iv.length = AES_GCM_J0_LEN;
961 		else
962 			session->is_iv12B = 1;
963 		if (qat_dev_gen == QAT_GEN4 || qat_dev_gen == QAT_GEN5 ||
964 				qat_dev_gen == QAT_VQAT) {
965 			session->is_cnt_zero = 1;
966 			session->is_ucs = 1;
967 		}
968 		break;
969 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
970 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
971 		if (internals->qat_dev->options.has_wireless_slice) {
972 			is_wireless = 1;
973 			session->is_wireless = 1;
974 			hash_flag = 1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS;
975 		}
976 		break;
977 	case RTE_CRYPTO_AUTH_MD5_HMAC:
978 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_MD5;
979 		break;
980 	case RTE_CRYPTO_AUTH_NULL:
981 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_NULL;
982 		break;
983 	case RTE_CRYPTO_AUTH_KASUMI_F9:
984 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_KASUMI_F9;
985 		break;
986 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
987 		if (!qat_is_auth_alg_supported(auth_xform->algo, internals)) {
988 			QAT_LOG(ERR, "%s not supported on this device",
989 				rte_cryptodev_get_auth_algo_string(auth_xform->algo));
990 			return -ENOTSUP;
991 		}
992 		if (key_length == ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ)
993 			session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3;
994 		else if (key_length == ICP_QAT_HW_ZUC_256_KEY_SZ) {
995 			switch (auth_xform->digest_length) {
996 			case 4:
997 				session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_32;
998 				break;
999 			case 8:
1000 				session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_64;
1001 				break;
1002 			case 16:
1003 				session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_128;
1004 				break;
1005 			default:
1006 				QAT_LOG(ERR, "Invalid digest length: %d",
1007 						auth_xform->digest_length);
1008 				return -ENOTSUP;
1009 			}
1010 			session->is_zuc256 = 1;
1011 		} else {
1012 			QAT_LOG(ERR, "Invalid key length: %d", key_length);
1013 			return -ENOTSUP;
1014 		}
1015 		if (internals->qat_dev->options.has_wireless_slice) {
1016 			is_wireless = 1;
1017 			session->is_wireless = 1;
1018 			hash_flag = 1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS;
1019 		} else
1020 			session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
1021 		break;
1022 	case RTE_CRYPTO_AUTH_MD5:
1023 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1024 		QAT_LOG(ERR, "Crypto: Unsupported hash alg %u",
1025 				auth_xform->algo);
1026 		return -ENOTSUP;
1027 	default:
1028 		QAT_LOG(ERR, "Crypto: Undefined Hash algo %u specified",
1029 				auth_xform->algo);
1030 		return -EINVAL;
1031 	}
1032 
1033 	if (auth_xform->algo == RTE_CRYPTO_AUTH_AES_GMAC) {
1034 		session->is_gmac = 1;
1035 		if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE) {
1036 			session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
1037 			session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
1038 			/*
1039 			 * It needs to create cipher desc content first,
1040 			 * then authentication
1041 			 */
1042 			if (qat_sym_cd_cipher_set(session,
1043 						auth_xform->key.data,
1044 						auth_xform->key.length))
1045 				return -EINVAL;
1046 
1047 			if (qat_sym_cd_auth_set(session,
1048 						key_data,
1049 						key_length,
1050 						0,
1051 						auth_xform->digest_length,
1052 						auth_xform->op,
1053 						qat_dev_gen))
1054 				return -EINVAL;
1055 		} else {
1056 			session->qat_cmd = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
1057 			session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
1058 			/*
1059 			 * It needs to create authentication desc content first,
1060 			 * then cipher
1061 			 */
1062 
1063 			if (qat_sym_cd_auth_set(session,
1064 					key_data,
1065 					key_length,
1066 					0,
1067 					auth_xform->digest_length,
1068 					auth_xform->op,
1069 					qat_dev_gen))
1070 				return -EINVAL;
1071 
1072 			if (qat_sym_cd_cipher_set(session,
1073 						auth_xform->key.data,
1074 						auth_xform->key.length))
1075 				return -EINVAL;
1076 		}
1077 	} else {
1078 		if (qat_sym_cd_auth_set(session,
1079 				key_data,
1080 				key_length,
1081 				0,
1082 				auth_xform->digest_length,
1083 				auth_xform->op,
1084 				qat_dev_gen))
1085 			return -EINVAL;
1086 	}
1087 
1088 	if (is_wireless) {
1089 		if (!session->aes_cmac) {
1090 			/* Set the Use Extended Protocol Flags bit in LW 1 */
1091 			ICP_QAT_FW_USE_EXTENDED_PROTOCOL_FLAGS_SET(
1092 					header->ext_flags,
1093 					QAT_LA_USE_EXTENDED_PROTOCOL_FLAGS);
1094 
1095 			/* Set Hash Flags in LW 28 */
1096 			cd_ctrl->hash_flags |= hash_flag;
1097 		}
1098 		/* Force usage of Wireless Auth slice */
1099 		ICP_QAT_FW_USE_WAT_SLICE_SET(header->ext_flags,
1100 				QAT_LA_USE_WAT_SLICE);
1101 	}
1102 
1103 	return 0;
1104 }
1105 
1106 int
1107 qat_sym_session_configure_aead(struct rte_cryptodev *dev,
1108 				struct rte_crypto_sym_xform *xform,
1109 				struct qat_sym_session *session)
1110 {
1111 	struct rte_crypto_aead_xform *aead_xform = &xform->aead;
1112 	enum rte_crypto_auth_operation crypto_operation;
1113 	struct qat_cryptodev_private *internals =
1114 			dev->data->dev_private;
1115 	enum qat_device_gen qat_dev_gen =
1116 			internals->qat_dev->qat_dev_gen;
1117 	if (qat_dev_gen == QAT_GEN_LCE) {
1118 		struct icp_qat_fw_la_bulk_req *req_tmpl = &session->fw_req;
1119 		struct lce_key_buff_desc *key_buff = &req_tmpl->key_buff;
1120 
1121 		key_buff->keybuff = session->key_paddr;
1122 	}
1123 
1124 	/*
1125 	 * Store AEAD IV parameters as cipher IV,
1126 	 * to avoid unnecessary memory usage
1127 	 */
1128 	session->cipher_iv.offset = xform->aead.iv.offset;
1129 	session->cipher_iv.length = xform->aead.iv.length;
1130 
1131 	session->auth_mode = ICP_QAT_HW_AUTH_MODE1;
1132 	session->is_auth = 1;
1133 	session->digest_length = aead_xform->digest_length;
1134 
1135 	session->is_single_pass = 0;
1136 	switch (aead_xform->algo) {
1137 	case RTE_CRYPTO_AEAD_AES_GCM:
1138 		if (qat_sym_validate_aes_key(aead_xform->key.length,
1139 				&session->qat_cipher_alg) != 0) {
1140 			QAT_LOG(ERR, "Invalid AES key size");
1141 			return -EINVAL;
1142 		}
1143 		session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
1144 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
1145 
1146 		if (qat_dev_gen == QAT_GEN4 || qat_dev_gen == QAT_GEN5 ||
1147 				qat_dev_gen == QAT_VQAT)
1148 			session->is_ucs = 1;
1149 		if (session->cipher_iv.length == 0) {
1150 			session->cipher_iv.length = AES_GCM_J0_LEN;
1151 			break;
1152 		}
1153 		session->is_iv12B = 1;
1154 		if (qat_dev_gen < QAT_GEN3)
1155 			break;
1156 		qat_sym_session_handle_single_pass(session,
1157 				aead_xform);
1158 		break;
1159 	case RTE_CRYPTO_AEAD_AES_CCM:
1160 		if (qat_sym_validate_aes_key(aead_xform->key.length,
1161 				&session->qat_cipher_alg) != 0) {
1162 			QAT_LOG(ERR, "Invalid AES key size");
1163 			return -EINVAL;
1164 		}
1165 		session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
1166 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC;
1167 		if (qat_dev_gen == QAT_GEN4 || qat_dev_gen == QAT_GEN5 ||
1168 				qat_dev_gen == QAT_VQAT)
1169 			session->is_ucs = 1;
1170 		break;
1171 	case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
1172 		if (aead_xform->key.length != ICP_QAT_HW_CHACHAPOLY_KEY_SZ)
1173 			return -EINVAL;
1174 		if (qat_dev_gen == QAT_GEN4 || qat_dev_gen == QAT_GEN5 ||
1175 				qat_dev_gen == QAT_VQAT)
1176 			session->is_ucs = 1;
1177 		session->qat_cipher_alg =
1178 				ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305;
1179 		qat_sym_session_handle_single_pass(session,
1180 						aead_xform);
1181 		break;
1182 	default:
1183 		QAT_LOG(ERR, "Crypto: Undefined AEAD specified %u",
1184 				aead_xform->algo);
1185 		return -EINVAL;
1186 	}
1187 
1188 	if (session->is_single_pass) {
1189 		if (qat_dev_gen != QAT_GEN_LCE) {
1190 			if (qat_sym_cd_cipher_set(session,
1191 					aead_xform->key.data, aead_xform->key.length))
1192 				return -EINVAL;
1193 		} else {
1194 			session->auth_key_length = aead_xform->key.length;
1195 			memcpy(session->key_array, aead_xform->key.data, aead_xform->key.length);
1196 		}
1197 	} else if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
1198 			aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
1199 			(aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
1200 			aead_xform->algo == RTE_CRYPTO_AEAD_AES_CCM)) {
1201 		session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
1202 		/*
1203 		 * It needs to create cipher desc content first,
1204 		 * then authentication
1205 		 */
1206 		crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
1207 			RTE_CRYPTO_AUTH_OP_GENERATE : RTE_CRYPTO_AUTH_OP_VERIFY;
1208 
1209 		if (qat_sym_cd_cipher_set(session,
1210 					aead_xform->key.data,
1211 					aead_xform->key.length))
1212 			return -EINVAL;
1213 
1214 		if (qat_sym_cd_auth_set(session,
1215 					aead_xform->key.data,
1216 					aead_xform->key.length,
1217 					aead_xform->aad_length,
1218 					aead_xform->digest_length,
1219 					crypto_operation,
1220 					qat_dev_gen))
1221 			return -EINVAL;
1222 	} else {
1223 		session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
1224 		/*
1225 		 * It needs to create authentication desc content first,
1226 		 * then cipher
1227 		 */
1228 
1229 		crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
1230 			RTE_CRYPTO_AUTH_OP_VERIFY : RTE_CRYPTO_AUTH_OP_GENERATE;
1231 
1232 		if (qat_sym_cd_auth_set(session,
1233 					aead_xform->key.data,
1234 					aead_xform->key.length,
1235 					aead_xform->aad_length,
1236 					aead_xform->digest_length,
1237 					crypto_operation,
1238 					qat_dev_gen))
1239 			return -EINVAL;
1240 
1241 		if (qat_sym_cd_cipher_set(session,
1242 					aead_xform->key.data,
1243 					aead_xform->key.length))
1244 			return -EINVAL;
1245 	}
1246 
1247 	return 0;
1248 }
1249 
1250 unsigned int qat_sym_session_get_private_size(
1251 		struct rte_cryptodev *dev __rte_unused)
1252 {
1253 	return RTE_ALIGN_CEIL(sizeof(struct qat_sym_session), 8);
1254 }
1255 
1256 /* returns block size in bytes per cipher algo */
1257 int qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg)
1258 {
1259 	switch (qat_cipher_alg) {
1260 	case ICP_QAT_HW_CIPHER_ALGO_DES:
1261 		return ICP_QAT_HW_DES_BLK_SZ;
1262 	case ICP_QAT_HW_CIPHER_ALGO_3DES:
1263 		return ICP_QAT_HW_3DES_BLK_SZ;
1264 	case ICP_QAT_HW_CIPHER_ALGO_AES128:
1265 	case ICP_QAT_HW_CIPHER_ALGO_AES192:
1266 	case ICP_QAT_HW_CIPHER_ALGO_AES256:
1267 		return ICP_QAT_HW_AES_BLK_SZ;
1268 	default:
1269 		QAT_LOG(ERR, "invalid block cipher alg %u", qat_cipher_alg);
1270 		return -EFAULT;
1271 	};
1272 	return -EFAULT;
1273 }
1274 
1275 /*
1276  * Returns size in bytes per hash algo for state1 size field in cd_ctrl
1277  * This is digest size rounded up to nearest quadword
1278  */
1279 static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
1280 {
1281 	switch (qat_hash_alg) {
1282 	case ICP_QAT_HW_AUTH_ALGO_SHA1:
1283 		return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ,
1284 						QAT_HW_DEFAULT_ALIGNMENT);
1285 	case ICP_QAT_HW_AUTH_ALGO_SHA224:
1286 		return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA224_STATE1_SZ,
1287 						QAT_HW_DEFAULT_ALIGNMENT);
1288 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
1289 		return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ,
1290 						QAT_HW_DEFAULT_ALIGNMENT);
1291 	case ICP_QAT_HW_AUTH_ALGO_SHA384:
1292 		return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA384_STATE1_SZ,
1293 						QAT_HW_DEFAULT_ALIGNMENT);
1294 	case ICP_QAT_HW_AUTH_ALGO_SHA512:
1295 		return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
1296 						QAT_HW_DEFAULT_ALIGNMENT);
1297 	case ICP_QAT_HW_AUTH_ALGO_SHA3_224:
1298 		return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA3_224_STATE1_SZ,
1299 						QAT_HW_DEFAULT_ALIGNMENT);
1300 	case ICP_QAT_HW_AUTH_ALGO_SHA3_256:
1301 		return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA3_256_STATE1_SZ,
1302 						QAT_HW_DEFAULT_ALIGNMENT);
1303 	case ICP_QAT_HW_AUTH_ALGO_SHA3_384:
1304 		return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA3_384_STATE1_SZ,
1305 						QAT_HW_DEFAULT_ALIGNMENT);
1306 	case ICP_QAT_HW_AUTH_ALGO_SHA3_512:
1307 		return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA3_512_STATE1_SZ,
1308 						QAT_HW_DEFAULT_ALIGNMENT);
1309 	case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1310 		return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ,
1311 						QAT_HW_DEFAULT_ALIGNMENT);
1312 	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
1313 	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
1314 		return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ,
1315 						QAT_HW_DEFAULT_ALIGNMENT);
1316 	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
1317 		return QAT_HW_ROUND_UP(ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ,
1318 						QAT_HW_DEFAULT_ALIGNMENT);
1319 	case ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_32:
1320 		return QAT_HW_ROUND_UP(ICP_QAT_HW_ZUC_256_MAC_32_STATE1_SZ,
1321 						QAT_HW_DEFAULT_ALIGNMENT);
1322 	case ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_64:
1323 		return QAT_HW_ROUND_UP(ICP_QAT_HW_ZUC_256_MAC_64_STATE1_SZ,
1324 						QAT_HW_DEFAULT_ALIGNMENT);
1325 	case ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_128:
1326 		return QAT_HW_ROUND_UP(ICP_QAT_HW_ZUC_256_MAC_128_STATE1_SZ,
1327 						QAT_HW_DEFAULT_ALIGNMENT);
1328 	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
1329 		return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
1330 						QAT_HW_DEFAULT_ALIGNMENT);
1331 	case ICP_QAT_HW_AUTH_ALGO_MD5:
1332 		return QAT_HW_ROUND_UP(ICP_QAT_HW_MD5_STATE1_SZ,
1333 						QAT_HW_DEFAULT_ALIGNMENT);
1334 	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
1335 		return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ,
1336 						QAT_HW_DEFAULT_ALIGNMENT);
1337 	case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
1338 		return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ,
1339 						QAT_HW_DEFAULT_ALIGNMENT);
1340 	case ICP_QAT_HW_AUTH_ALGO_SM3:
1341 		return QAT_HW_ROUND_UP(ICP_QAT_HW_SM3_STATE1_SZ,
1342 						QAT_HW_DEFAULT_ALIGNMENT);
1343 	case ICP_QAT_HW_AUTH_ALGO_NULL:
1344 		return QAT_HW_ROUND_UP(ICP_QAT_HW_NULL_STATE1_SZ,
1345 						QAT_HW_DEFAULT_ALIGNMENT);
1346 	case ICP_QAT_HW_AUTH_ALGO_AES_128_CMAC:
1347 		return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_CMAC_STATE1_SZ,
1348 						QAT_HW_DEFAULT_ALIGNMENT);
1349 	case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
1350 		/* return maximum state1 size in this case */
1351 		return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
1352 						QAT_HW_DEFAULT_ALIGNMENT);
1353 	default:
1354 		QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
1355 		return -EFAULT;
1356 	};
1357 	return -EFAULT;
1358 }
1359 
1360 /* returns digest size in bytes  per hash algo */
1361 static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
1362 {
1363 	switch (qat_hash_alg) {
1364 	case ICP_QAT_HW_AUTH_ALGO_SHA1:
1365 		return ICP_QAT_HW_SHA1_STATE1_SZ;
1366 	case ICP_QAT_HW_AUTH_ALGO_SHA224:
1367 		return ICP_QAT_HW_SHA224_STATE1_SZ;
1368 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
1369 		return ICP_QAT_HW_SHA256_STATE1_SZ;
1370 	case ICP_QAT_HW_AUTH_ALGO_SHA384:
1371 		return ICP_QAT_HW_SHA384_STATE1_SZ;
1372 	case ICP_QAT_HW_AUTH_ALGO_SHA512:
1373 		return ICP_QAT_HW_SHA512_STATE1_SZ;
1374 	case ICP_QAT_HW_AUTH_ALGO_SHA3_224:
1375 		return ICP_QAT_HW_SHA3_224_STATE1_SZ;
1376 	case ICP_QAT_HW_AUTH_ALGO_SHA3_256:
1377 		return ICP_QAT_HW_SHA3_256_STATE1_SZ;
1378 	case ICP_QAT_HW_AUTH_ALGO_SHA3_384:
1379 		return ICP_QAT_HW_SHA3_384_STATE1_SZ;
1380 	case ICP_QAT_HW_AUTH_ALGO_SHA3_512:
1381 		return ICP_QAT_HW_SHA3_512_STATE1_SZ;
1382 	case ICP_QAT_HW_AUTH_ALGO_MD5:
1383 		return ICP_QAT_HW_MD5_STATE1_SZ;
1384 	case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1385 	case ICP_QAT_HW_AUTH_ALGO_AES_128_CMAC:
1386 		return ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1387 	case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
1388 		/* return maximum digest size in this case */
1389 		return ICP_QAT_HW_SHA512_STATE1_SZ;
1390 	default:
1391 		QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
1392 		return -EFAULT;
1393 	};
1394 	return -EFAULT;
1395 }
1396 
1397 /* returns block size in byes per hash algo */
1398 static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
1399 {
1400 	switch (qat_hash_alg) {
1401 	case ICP_QAT_HW_AUTH_ALGO_SHA1:
1402 	case ICP_QAT_HW_AUTH_ALGO_SHA224:
1403 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
1404 		return QAT_SHA_CBLOCK;
1405 	case ICP_QAT_HW_AUTH_ALGO_SHA384:
1406 	case ICP_QAT_HW_AUTH_ALGO_SHA512:
1407 		return QAT_SHA512_CBLOCK;
1408 	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
1409 		return 16;
1410 	case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1411 		return ICP_QAT_HW_AES_BLK_SZ;
1412 	case ICP_QAT_HW_AUTH_ALGO_MD5:
1413 		return QAT_MD5_CBLOCK;
1414 	case ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_32:
1415 	case ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_64:
1416 	case ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_128:
1417 		return ICP_QAT_HW_ZUC_256_BLK_SZ;
1418 	case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
1419 		/* return maximum block size in this case */
1420 		return QAT_SHA512_CBLOCK;
1421 	case ICP_QAT_HW_AUTH_ALGO_SM3:
1422 		return QAT_SM3_BLOCK_SIZE;
1423 	default:
1424 		QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
1425 		return -EFAULT;
1426 	};
1427 	return -EFAULT;
1428 }
1429 
1430 #define HMAC_IPAD_VALUE	0x36
1431 #define HMAC_OPAD_VALUE	0x5c
1432 #define HASH_XCBC_PRECOMP_KEY_NUM 3
1433 
1434 #ifdef RTE_QAT_OPENSSL
1435 static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
1436 {
1437 	SHA_CTX ctx;
1438 
1439 	if (!SHA1_Init(&ctx))
1440 		return -EFAULT;
1441 	SHA1_Transform(&ctx, data_in);
1442 	rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
1443 	return 0;
1444 }
1445 
1446 static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
1447 {
1448 	SHA256_CTX ctx;
1449 
1450 	if (!SHA224_Init(&ctx))
1451 		return -EFAULT;
1452 	SHA256_Transform(&ctx, data_in);
1453 	rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
1454 	return 0;
1455 }
1456 
1457 static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
1458 {
1459 	SHA256_CTX ctx;
1460 
1461 	if (!SHA256_Init(&ctx))
1462 		return -EFAULT;
1463 	SHA256_Transform(&ctx, data_in);
1464 	rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
1465 	return 0;
1466 }
1467 
1468 static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
1469 {
1470 	SHA512_CTX ctx;
1471 
1472 	if (!SHA384_Init(&ctx))
1473 		return -EFAULT;
1474 	SHA512_Transform(&ctx, data_in);
1475 	rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
1476 	return 0;
1477 }
1478 
1479 static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
1480 {
1481 	SHA512_CTX ctx;
1482 
1483 	if (!SHA512_Init(&ctx))
1484 		return -EFAULT;
1485 	SHA512_Transform(&ctx, data_in);
1486 	rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
1487 	return 0;
1488 }
1489 
1490 static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
1491 {
1492 	MD5_CTX ctx;
1493 
1494 	if (!MD5_Init(&ctx))
1495 		return -EFAULT;
1496 	MD5_Transform(&ctx, data_in);
1497 	rte_memcpy(data_out, &ctx, MD5_DIGEST_LENGTH);
1498 
1499 	return 0;
1500 }
1501 
1502 static void aes_cmac_key_derive(uint8_t *base, uint8_t *derived)
1503 {
1504 	int i;
1505 
1506 	derived[0] = base[0] << 1;
1507 	for (i = 1; i < ICP_QAT_HW_AES_BLK_SZ ; i++) {
1508 		derived[i] = base[i] << 1;
1509 		derived[i - 1] |= base[i] >> 7;
1510 	}
1511 
1512 	if (base[0] & 0x80)
1513 		derived[ICP_QAT_HW_AES_BLK_SZ - 1] ^= QAT_AES_CMAC_CONST_RB;
1514 }
1515 
1516 static int
1517 partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
1518 		uint8_t *data_in, uint8_t *data_out)
1519 {
1520 	int digest_size;
1521 	uint8_t digest[qat_hash_get_digest_size(
1522 			ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1523 	uint32_t *hash_state_out_be32;
1524 	uint64_t *hash_state_out_be64;
1525 	int i;
1526 
1527 	/* Initialize to avoid gcc warning */
1528 	memset(digest, 0, sizeof(digest));
1529 
1530 	digest_size = qat_hash_get_digest_size(hash_alg);
1531 	if (digest_size <= 0)
1532 		return -EFAULT;
1533 
1534 	hash_state_out_be32 = (uint32_t *)data_out;
1535 	hash_state_out_be64 = (uint64_t *)data_out;
1536 
1537 	switch (hash_alg) {
1538 	case ICP_QAT_HW_AUTH_ALGO_SHA1:
1539 		if (partial_hash_sha1(data_in, digest))
1540 			return -EFAULT;
1541 		for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1542 			*hash_state_out_be32 =
1543 				rte_bswap32(*(((uint32_t *)digest)+i));
1544 		break;
1545 	case ICP_QAT_HW_AUTH_ALGO_SHA224:
1546 		if (partial_hash_sha224(data_in, digest))
1547 			return -EFAULT;
1548 		for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1549 			*hash_state_out_be32 =
1550 				rte_bswap32(*(((uint32_t *)digest)+i));
1551 		break;
1552 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
1553 		if (partial_hash_sha256(data_in, digest))
1554 			return -EFAULT;
1555 		for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1556 			*hash_state_out_be32 =
1557 				rte_bswap32(*(((uint32_t *)digest)+i));
1558 		break;
1559 	case ICP_QAT_HW_AUTH_ALGO_SHA384:
1560 		if (partial_hash_sha384(data_in, digest))
1561 			return -EFAULT;
1562 		for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
1563 			*hash_state_out_be64 =
1564 				rte_bswap64(*(((uint64_t *)digest)+i));
1565 		break;
1566 	case ICP_QAT_HW_AUTH_ALGO_SHA512:
1567 		if (partial_hash_sha512(data_in, digest))
1568 			return -EFAULT;
1569 		for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
1570 			*hash_state_out_be64 =
1571 				rte_bswap64(*(((uint64_t *)digest)+i));
1572 		break;
1573 	case ICP_QAT_HW_AUTH_ALGO_MD5:
1574 		if (partial_hash_md5(data_in, data_out))
1575 			return -EFAULT;
1576 		break;
1577 	default:
1578 		QAT_LOG(ERR, "invalid hash alg %u", hash_alg);
1579 		return -EFAULT;
1580 	}
1581 
1582 	return 0;
1583 }
1584 
1585 static const uint8_t AES_CMAC_SEED[ICP_QAT_HW_AES_128_KEY_SZ];
1586 
1587 static int qat_sym_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
1588 				const uint8_t *auth_key,
1589 				uint16_t auth_keylen,
1590 				uint8_t *p_state_buf,
1591 				uint16_t *p_state_len,
1592 				uint8_t aes_cmac)
1593 {
1594 	int block_size;
1595 	uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1596 	uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1597 	int i;
1598 
1599 	if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
1600 
1601 		/* CMAC */
1602 		if (aes_cmac) {
1603 			AES_KEY enc_key;
1604 			uint8_t *in = NULL;
1605 			uint8_t k0[ICP_QAT_HW_AES_128_KEY_SZ];
1606 			uint8_t *k1, *k2;
1607 
1608 			auth_keylen = ICP_QAT_HW_AES_128_KEY_SZ;
1609 
1610 			in = rte_zmalloc("AES CMAC K1",
1611 					 ICP_QAT_HW_AES_128_KEY_SZ, 16);
1612 
1613 			if (in == NULL) {
1614 				QAT_LOG(ERR, "Failed to alloc memory");
1615 				return -ENOMEM;
1616 			}
1617 
1618 			rte_memcpy(in, AES_CMAC_SEED,
1619 				   ICP_QAT_HW_AES_128_KEY_SZ);
1620 			rte_memcpy(p_state_buf, auth_key, auth_keylen);
1621 
1622 			if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
1623 				&enc_key) != 0) {
1624 				rte_free(in);
1625 				return -EFAULT;
1626 			}
1627 
1628 			AES_encrypt(in, k0, &enc_key);
1629 
1630 			k1 = p_state_buf + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1631 			k2 = k1 + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1632 
1633 			aes_cmac_key_derive(k0, k1);
1634 			aes_cmac_key_derive(k1, k2);
1635 
1636 			memset(k0, 0, ICP_QAT_HW_AES_128_KEY_SZ);
1637 			*p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
1638 			rte_free(in);
1639 			goto out;
1640 		} else {
1641 			static uint8_t qat_aes_xcbc_key_seed[
1642 					ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
1643 				0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
1644 				0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
1645 				0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
1646 				0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
1647 				0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
1648 				0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
1649 			};
1650 
1651 			uint8_t *in = NULL;
1652 			uint8_t *out = p_state_buf;
1653 			int x;
1654 			AES_KEY enc_key;
1655 
1656 			in = rte_zmalloc("working mem for key",
1657 					ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
1658 			if (in == NULL) {
1659 				QAT_LOG(ERR, "Failed to alloc memory");
1660 				return -ENOMEM;
1661 			}
1662 
1663 			rte_memcpy(in, qat_aes_xcbc_key_seed,
1664 					ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
1665 			for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
1666 				if (AES_set_encrypt_key(auth_key,
1667 							auth_keylen << 3,
1668 							&enc_key) != 0) {
1669 					rte_free(in -
1670 					  (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
1671 					memset(out -
1672 					   (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
1673 					  0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
1674 					return -EFAULT;
1675 				}
1676 				AES_encrypt(in, out, &enc_key);
1677 				in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
1678 				out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
1679 			}
1680 			*p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
1681 			rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
1682 			goto out;
1683 		}
1684 
1685 	} else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
1686 		(hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
1687 		uint8_t *in = NULL;
1688 		uint8_t *out = p_state_buf;
1689 		AES_KEY enc_key;
1690 
1691 		memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
1692 				ICP_QAT_HW_GALOIS_LEN_A_SZ +
1693 				ICP_QAT_HW_GALOIS_E_CTR0_SZ);
1694 		in = rte_zmalloc("working mem for key",
1695 				ICP_QAT_HW_GALOIS_H_SZ, 16);
1696 		if (in == NULL) {
1697 			QAT_LOG(ERR, "Failed to alloc memory");
1698 			return -ENOMEM;
1699 		}
1700 
1701 		memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
1702 		if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
1703 			&enc_key) != 0) {
1704 			return -EFAULT;
1705 		}
1706 		AES_encrypt(in, out, &enc_key);
1707 		*p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
1708 				ICP_QAT_HW_GALOIS_LEN_A_SZ +
1709 				ICP_QAT_HW_GALOIS_E_CTR0_SZ;
1710 		rte_free(in);
1711 		return 0;
1712 	}
1713 
1714 	block_size = qat_hash_get_block_size(hash_alg);
1715 	if (block_size < 0)
1716 		return block_size;
1717 	/* init ipad and opad from key and xor with fixed values */
1718 	memset(ipad, 0, block_size);
1719 	memset(opad, 0, block_size);
1720 
1721 	if (auth_keylen > (unsigned int)block_size) {
1722 		QAT_LOG(ERR, "invalid keylen %u", auth_keylen);
1723 		return -EFAULT;
1724 	}
1725 
1726 	RTE_VERIFY(auth_keylen <= sizeof(ipad));
1727 	RTE_VERIFY(auth_keylen <= sizeof(opad));
1728 
1729 	rte_memcpy(ipad, auth_key, auth_keylen);
1730 	rte_memcpy(opad, auth_key, auth_keylen);
1731 
1732 	for (i = 0; i < block_size; i++) {
1733 		uint8_t *ipad_ptr = ipad + i;
1734 		uint8_t *opad_ptr = opad + i;
1735 		*ipad_ptr ^= HMAC_IPAD_VALUE;
1736 		*opad_ptr ^= HMAC_OPAD_VALUE;
1737 	}
1738 
1739 	/* do partial hash of ipad and copy to state1 */
1740 	if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
1741 		memset(ipad, 0, block_size);
1742 		memset(opad, 0, block_size);
1743 		QAT_LOG(ERR, "ipad precompute failed");
1744 		return -EFAULT;
1745 	}
1746 
1747 	/*
1748 	 * State len is a multiple of 8, so may be larger than the digest.
1749 	 * Put the partial hash of opad state_len bytes after state1
1750 	 */
1751 	*p_state_len = qat_hash_get_state1_size(hash_alg);
1752 	if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
1753 		memset(ipad, 0, block_size);
1754 		memset(opad, 0, block_size);
1755 		QAT_LOG(ERR, "opad precompute failed");
1756 		return -EFAULT;
1757 	}
1758 
1759 	/*  don't leave data lying around */
1760 	memset(ipad, 0, block_size);
1761 	memset(opad, 0, block_size);
1762 out:
1763 	return 0;
1764 }
1765 
1766 #else
1767 
1768 static int aes_ipsecmb_job(uint8_t *in, uint8_t *out, IMB_MGR *m,
1769 		const uint8_t *key, uint16_t auth_keylen)
1770 {
1771 	int err;
1772 	struct IMB_JOB *job;
1773 	DECLARE_ALIGNED(uint32_t expkey[4*15], 16);
1774 	DECLARE_ALIGNED(uint32_t dust[4*15], 16);
1775 
1776 	if (auth_keylen == ICP_QAT_HW_AES_128_KEY_SZ)
1777 		IMB_AES_KEYEXP_128(m, key, expkey, dust);
1778 	else if (auth_keylen == ICP_QAT_HW_AES_192_KEY_SZ)
1779 		IMB_AES_KEYEXP_192(m, key, expkey, dust);
1780 	else if (auth_keylen == ICP_QAT_HW_AES_256_KEY_SZ)
1781 		IMB_AES_KEYEXP_256(m, key, expkey, dust);
1782 	else
1783 		return -EFAULT;
1784 
1785 	job = IMB_GET_NEXT_JOB(m);
1786 
1787 	job->src = in;
1788 	job->dst = out;
1789 	job->enc_keys = expkey;
1790 	job->key_len_in_bytes = auth_keylen;
1791 	job->msg_len_to_cipher_in_bytes = 16;
1792 	job->iv_len_in_bytes = 0;
1793 	job->cipher_direction = IMB_DIR_ENCRYPT;
1794 	job->cipher_mode = IMB_CIPHER_ECB;
1795 	job->hash_alg = IMB_AUTH_NULL;
1796 
1797 	while (IMB_FLUSH_JOB(m) != NULL)
1798 		;
1799 
1800 	job = IMB_SUBMIT_JOB(m);
1801 	if (job) {
1802 		if (job->status == IMB_STATUS_COMPLETED)
1803 			return 0;
1804 	}
1805 
1806 	err = imb_get_errno(m);
1807 	if (err)
1808 		QAT_LOG(ERR, "Error: %s!", imb_get_strerror(err));
1809 
1810 	return -EFAULT;
1811 }
1812 
1813 static int
1814 partial_hash_compute_ipsec_mb(enum icp_qat_hw_auth_algo hash_alg,
1815 		uint8_t *data_in, uint8_t *data_out, IMB_MGR *m)
1816 {
1817 	int digest_size;
1818 	uint8_t digest[qat_hash_get_digest_size(
1819 			ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1820 	uint32_t *hash_state_out_be32;
1821 	uint64_t *hash_state_out_be64;
1822 	int i;
1823 
1824 	/* Initialize to avoid gcc warning */
1825 	memset(digest, 0, sizeof(digest));
1826 
1827 	digest_size = qat_hash_get_digest_size(hash_alg);
1828 	if (digest_size <= 0)
1829 		return -EFAULT;
1830 
1831 	hash_state_out_be32 = (uint32_t *)data_out;
1832 	hash_state_out_be64 = (uint64_t *)data_out;
1833 
1834 	switch (hash_alg) {
1835 	case ICP_QAT_HW_AUTH_ALGO_SHA1:
1836 		IMB_SHA1_ONE_BLOCK(m, data_in, digest);
1837 		for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1838 			*hash_state_out_be32 =
1839 				rte_bswap32(*(((uint32_t *)digest)+i));
1840 		break;
1841 	case ICP_QAT_HW_AUTH_ALGO_SHA224:
1842 		IMB_SHA224_ONE_BLOCK(m, data_in, digest);
1843 		for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1844 			*hash_state_out_be32 =
1845 				rte_bswap32(*(((uint32_t *)digest)+i));
1846 		break;
1847 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
1848 		IMB_SHA256_ONE_BLOCK(m, data_in, digest);
1849 		for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1850 			*hash_state_out_be32 =
1851 				rte_bswap32(*(((uint32_t *)digest)+i));
1852 		break;
1853 	case ICP_QAT_HW_AUTH_ALGO_SHA384:
1854 		IMB_SHA384_ONE_BLOCK(m, data_in, digest);
1855 		for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
1856 			*hash_state_out_be64 =
1857 				rte_bswap64(*(((uint64_t *)digest)+i));
1858 		break;
1859 	case ICP_QAT_HW_AUTH_ALGO_SHA512:
1860 		IMB_SHA512_ONE_BLOCK(m, data_in, digest);
1861 		for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
1862 			*hash_state_out_be64 =
1863 				rte_bswap64(*(((uint64_t *)digest)+i));
1864 		break;
1865 	case ICP_QAT_HW_AUTH_ALGO_MD5:
1866 		IMB_MD5_ONE_BLOCK(m, data_in, data_out);
1867 		break;
1868 	default:
1869 		QAT_LOG(ERR, "invalid hash alg %u", hash_alg);
1870 		return -EFAULT;
1871 	}
1872 
1873 	return 0;
1874 }
1875 
1876 static int qat_sym_do_precomputes_ipsec_mb(enum icp_qat_hw_auth_algo hash_alg,
1877 				const uint8_t *auth_key,
1878 				uint16_t auth_keylen,
1879 				uint8_t *p_state_buf,
1880 				uint16_t *p_state_len,
1881 				uint8_t aes_cmac)
1882 {
1883 	int block_size = 0;
1884 	uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1885 	uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1886 	int i, ret = 0;
1887 	uint8_t in[ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ];
1888 
1889 	IMB_MGR *m;
1890 	m = alloc_mb_mgr(0);
1891 	if (m == NULL)
1892 		return -ENOMEM;
1893 
1894 	init_mb_mgr_auto(m, NULL);
1895 	memset(in, 0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
1896 	if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
1897 
1898 		/* CMAC */
1899 		if (aes_cmac) {
1900 			uint8_t *k1, *k2;
1901 			auth_keylen = ICP_QAT_HW_AES_128_KEY_SZ;
1902 			rte_memcpy(p_state_buf, auth_key, auth_keylen);
1903 
1904 			DECLARE_ALIGNED(uint32_t expkey[4*15], 16);
1905 			DECLARE_ALIGNED(uint32_t dust[4*15], 16);
1906 			IMB_AES_KEYEXP_128(m, p_state_buf, expkey, dust);
1907 			k1 = p_state_buf + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1908 			k2 = k1 + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1909 
1910 			IMB_AES_CMAC_SUBKEY_GEN_128(m, expkey, k1, k2);
1911 			*p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
1912 			goto out;
1913 		}
1914 
1915 		static uint8_t qat_aes_xcbc_key_seed[
1916 				ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
1917 			0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
1918 			0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
1919 			0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
1920 			0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
1921 			0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
1922 			0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
1923 		};
1924 
1925 		uint8_t *input = in;
1926 		uint8_t *out = p_state_buf;
1927 		rte_memcpy(input, qat_aes_xcbc_key_seed,
1928 				ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
1929 		for (i = 0; i < HASH_XCBC_PRECOMP_KEY_NUM; i++) {
1930 			if (aes_ipsecmb_job(input, out, m, auth_key, auth_keylen)) {
1931 				memset(input -
1932 				   (i * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
1933 				  0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
1934 				ret = -EFAULT;
1935 				goto out;
1936 			}
1937 
1938 			input += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
1939 			out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
1940 		}
1941 		*p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
1942 		goto out;
1943 
1944 	} else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
1945 		(hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
1946 		uint8_t *out = p_state_buf;
1947 
1948 		memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
1949 				ICP_QAT_HW_GALOIS_LEN_A_SZ +
1950 				ICP_QAT_HW_GALOIS_E_CTR0_SZ);
1951 		if (aes_ipsecmb_job(in, out, m, auth_key, auth_keylen)) {
1952 			ret = -EFAULT;
1953 			goto out;
1954 		}
1955 
1956 		*p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
1957 				ICP_QAT_HW_GALOIS_LEN_A_SZ +
1958 				ICP_QAT_HW_GALOIS_E_CTR0_SZ;
1959 		goto out;
1960 	}
1961 
1962 	block_size = qat_hash_get_block_size(hash_alg);
1963 	if (block_size < 0) {
1964 		free_mb_mgr(m);
1965 		return block_size;
1966 	}
1967 
1968 	if (auth_keylen > (unsigned int)block_size) {
1969 		QAT_LOG(ERR, "invalid keylen %u", auth_keylen);
1970 		ret = -EFAULT;
1971 		goto out;
1972 	}
1973 	/* init ipad and opad from key and xor with fixed values */
1974 	memset(ipad, 0, block_size);
1975 	memset(opad, 0, block_size);
1976 	RTE_VERIFY(auth_keylen <= sizeof(ipad));
1977 	RTE_VERIFY(auth_keylen <= sizeof(opad));
1978 	rte_memcpy(ipad, auth_key, auth_keylen);
1979 	rte_memcpy(opad, auth_key, auth_keylen);
1980 
1981 	for (i = 0; i < block_size; i++) {
1982 		uint8_t *ipad_ptr = ipad + i;
1983 		uint8_t *opad_ptr = opad + i;
1984 		*ipad_ptr ^= HMAC_IPAD_VALUE;
1985 		*opad_ptr ^= HMAC_OPAD_VALUE;
1986 	}
1987 
1988 	/* do partial hash of ipad and copy to state1 */
1989 	if (partial_hash_compute_ipsec_mb(hash_alg, ipad, p_state_buf, m)) {
1990 		QAT_LOG(ERR, "ipad precompute failed");
1991 		ret = -EFAULT;
1992 		goto out;
1993 	}
1994 
1995 	/*
1996 	 * State len is a multiple of 8, so may be larger than the digest.
1997 	 * Put the partial hash of opad state_len bytes after state1
1998 	 */
1999 	*p_state_len = qat_hash_get_state1_size(hash_alg);
2000 	if (partial_hash_compute_ipsec_mb(hash_alg, opad,
2001 				p_state_buf + *p_state_len, m)) {
2002 		QAT_LOG(ERR, "opad precompute failed");
2003 		ret = -EFAULT;
2004 		goto out;
2005 	}
2006 
2007 out:
2008 	/*  don't leave data lying around */
2009 	memset(ipad, 0, block_size);
2010 	memset(opad, 0, block_size);
2011 	free_mb_mgr(m);
2012 	return ret;
2013 }
2014 #endif
2015 
2016 static void
2017 qat_sym_session_init_common_hdr(struct qat_sym_session *session)
2018 {
2019 	struct icp_qat_fw_la_bulk_req *req_tmpl = &session->fw_req;
2020 	struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
2021 	enum qat_sym_proto_flag proto_flags = session->qat_proto_flag;
2022 	uint32_t slice_flags = session->slice_types;
2023 
2024 	header->hdr_flags =
2025 		ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
2026 	header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
2027 	header->service_cmd_id = session->qat_cmd;
2028 	header->comn_req_flags =
2029 		ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
2030 					QAT_COMN_PTR_TYPE_FLAT);
2031 	ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
2032 				  ICP_QAT_FW_LA_PARTIAL_NONE);
2033 	ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
2034 					   ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
2035 
2036 	switch (proto_flags)		{
2037 	case QAT_CRYPTO_PROTO_FLAG_NONE:
2038 		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
2039 					ICP_QAT_FW_LA_NO_PROTO);
2040 		break;
2041 	case QAT_CRYPTO_PROTO_FLAG_CCM:
2042 		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
2043 					ICP_QAT_FW_LA_CCM_PROTO);
2044 		break;
2045 	case QAT_CRYPTO_PROTO_FLAG_GCM:
2046 		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
2047 					ICP_QAT_FW_LA_GCM_PROTO);
2048 		break;
2049 	case QAT_CRYPTO_PROTO_FLAG_SNOW3G:
2050 		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
2051 					ICP_QAT_FW_LA_SNOW_3G_PROTO);
2052 		break;
2053 	case QAT_CRYPTO_PROTO_FLAG_ZUC:
2054 		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(header->serv_specif_flags,
2055 			ICP_QAT_FW_LA_ZUC_3G_PROTO);
2056 		break;
2057 	}
2058 
2059 	/* More than one of the following flags can be set at once */
2060 	if (QAT_SESSION_IS_SLICE_SET(slice_flags, QAT_CRYPTO_SLICE_SPC)) {
2061 		ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
2062 			header->serv_specif_flags,
2063 			ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
2064 	}
2065 	if (QAT_SESSION_IS_SLICE_SET(slice_flags, QAT_CRYPTO_SLICE_UCS)) {
2066 		ICP_QAT_FW_LA_SLICE_TYPE_SET(
2067 			header->serv_specif_flags,
2068 			ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE);
2069 	}
2070 
2071 	if (session->is_auth) {
2072 		if (session->auth_op == ICP_QAT_HW_AUTH_VERIFY) {
2073 			ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
2074 					ICP_QAT_FW_LA_NO_RET_AUTH_RES);
2075 			ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
2076 					ICP_QAT_FW_LA_CMP_AUTH_RES);
2077 		} else if (session->auth_op == ICP_QAT_HW_AUTH_GENERATE) {
2078 			ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
2079 						ICP_QAT_FW_LA_RET_AUTH_RES);
2080 			ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
2081 						ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
2082 		}
2083 	} else {
2084 		ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
2085 					ICP_QAT_FW_LA_NO_RET_AUTH_RES);
2086 		ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
2087 					ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
2088 	}
2089 
2090 	if (session->is_iv12B) {
2091 		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
2092 			header->serv_specif_flags,
2093 			ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
2094 	}
2095 
2096 	ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
2097 					   ICP_QAT_FW_LA_NO_UPDATE_STATE);
2098 	ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
2099 					ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
2100 }
2101 
2102 static void
2103 qat_sym_session_init_gen_lce_hdr(struct qat_sym_session *session)
2104 {
2105 	struct icp_qat_fw_la_bulk_req *req_tmpl = &session->fw_req;
2106 	struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
2107 
2108 	/*
2109 	 * GEN_LCE specifies separate command id for AEAD operations but Cryptodev
2110 	 * API processes AEAD operations as Single pass Crypto operations.
2111 	 * Hence even for GEN_LCE, Session Algo Command ID is CIPHER.
2112 	 * Note, however Session Algo Mode is AEAD.
2113 	 */
2114 	header->service_cmd_id = ICP_QAT_FW_LA_CMD_AEAD;
2115 	header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
2116 	header->hdr_flags = ICP_QAT_FW_COMN_HDR_FLAGS_BUILD_GEN_LCE(ICP_QAT_FW_COMN_REQ_FLAG_SET,
2117 			ICP_QAT_FW_COMN_GEN_LCE_DESC_LAYOUT);
2118 	header->comn_req_flags = ICP_QAT_FW_COMN_FLAGS_BUILD_GEN_LCE(QAT_COMN_PTR_TYPE_SGL,
2119 			QAT_COMN_KEY_BUFFER_USED);
2120 
2121 	ICP_QAT_FW_SYM_AEAD_ALGO_SET(header->serv_specif_flags, QAT_LA_CRYPTO_AEAD_AES_GCM_GEN_LCE);
2122 	ICP_QAT_FW_SYM_IV_SIZE_SET(header->serv_specif_flags, ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
2123 	ICP_QAT_FW_SYM_IV_IN_DESC_FLAG_SET(header->serv_specif_flags,
2124 			ICP_QAT_FW_SYM_IV_IN_DESC_VALID);
2125 
2126 	if (session->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
2127 		ICP_QAT_FW_SYM_DIR_FLAG_SET(header->serv_specif_flags, ICP_QAT_HW_CIPHER_DECRYPT);
2128 	} else {
2129 		ICP_QAT_FW_SYM_DIR_FLAG_SET(header->serv_specif_flags, ICP_QAT_HW_CIPHER_ENCRYPT);
2130 	}
2131 }
2132 
2133 int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
2134 						const uint8_t *cipherkey,
2135 						uint32_t cipherkeylen)
2136 {
2137 	struct icp_qat_hw_cipher_algo_blk *cipher;
2138 	struct icp_qat_hw_cipher_algo_blk20 *cipher20;
2139 	struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
2140 	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
2141 	struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
2142 	void *ptr = &req_tmpl->cd_ctrl;
2143 	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
2144 	struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
2145 	enum icp_qat_hw_cipher_convert key_convert;
2146 	struct icp_qat_fw_la_cipher_20_req_params *req_ucs =
2147 			(struct icp_qat_fw_la_cipher_20_req_params *)
2148 			&cdesc->fw_req.serv_specif_rqpars;
2149 	struct icp_qat_fw_la_cipher_req_params *req_cipher =
2150 			(struct icp_qat_fw_la_cipher_req_params *)
2151 			&cdesc->fw_req.serv_specif_rqpars;
2152 	uint32_t total_key_size;
2153 	uint16_t cipher_offset, cd_size;
2154 	uint32_t wordIndex  = 0;
2155 	uint32_t *temp_key = NULL;
2156 
2157 	if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
2158 		cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
2159 		ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
2160 					ICP_QAT_FW_SLICE_CIPHER);
2161 		ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
2162 					ICP_QAT_FW_SLICE_DRAM_WR);
2163 		ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
2164 					ICP_QAT_FW_LA_NO_RET_AUTH_RES);
2165 		ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
2166 					ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
2167 		cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
2168 	} else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
2169 		cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
2170 		ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
2171 					ICP_QAT_FW_SLICE_CIPHER);
2172 		ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
2173 					ICP_QAT_FW_SLICE_AUTH);
2174 		ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
2175 					ICP_QAT_FW_SLICE_AUTH);
2176 		ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
2177 					ICP_QAT_FW_SLICE_DRAM_WR);
2178 		cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
2179 	} else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_CRC) {
2180 		cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
2181 		cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
2182 	} else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
2183 		QAT_LOG(ERR, "Invalid param, must be a cipher command.");
2184 		return -EFAULT;
2185 	}
2186 
2187 	if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
2188 		/*
2189 		 * CTR Streaming ciphers are a special case. Decrypt = encrypt
2190 		 * Overriding default values previously set.
2191 		 * Chacha20-Poly1305 is special case, CTR but single-pass
2192 		 * so both direction need to be used.
2193 		 */
2194 		cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
2195 		if (cdesc->qat_cipher_alg ==
2196 			ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305 &&
2197 			cdesc->auth_op == ICP_QAT_HW_AUTH_VERIFY) {
2198 				cdesc->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
2199 		}
2200 		key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
2201 	} else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2
2202 		|| cdesc->qat_cipher_alg ==
2203 			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3
2204 			|| cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_ZUC_256) {
2205 		key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
2206 		cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
2207 	} else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
2208 		key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
2209 	else if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE)
2210 		key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
2211 	else
2212 		key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
2213 
2214 	if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
2215 		total_key_size = ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ +
2216 			ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
2217 		cipher_cd_ctrl->cipher_state_sz =
2218 			ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
2219 		cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
2220 
2221 	} else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
2222 		total_key_size = ICP_QAT_HW_KASUMI_F8_KEY_SZ;
2223 		cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_KASUMI_BLK_SZ >> 3;
2224 		cipher_cd_ctrl->cipher_padding_sz =
2225 					(2 * ICP_QAT_HW_KASUMI_BLK_SZ) >> 3;
2226 	} else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) {
2227 		total_key_size = ICP_QAT_HW_3DES_KEY_SZ;
2228 		cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_3DES_BLK_SZ >> 3;
2229 	} else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_DES) {
2230 		total_key_size = ICP_QAT_HW_DES_KEY_SZ;
2231 		cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_DES_BLK_SZ >> 3;
2232 	} else if (cdesc->qat_cipher_alg ==
2233 		ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
2234 		total_key_size = ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ +
2235 			ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
2236 		cipher_cd_ctrl->cipher_state_sz =
2237 			ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
2238 		cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
2239 	} else if (cdesc->qat_cipher_alg ==
2240 			ICP_QAT_HW_CIPHER_ALGO_ZUC_256) {
2241 		if (cdesc->cipher_iv.length != 23 && cdesc->cipher_iv.length != 25) {
2242 			QAT_LOG(ERR, "Invalid IV length for ZUC256, must be 23 or 25.");
2243 			return -EINVAL;
2244 		}
2245 		total_key_size = ICP_QAT_HW_ZUC_256_KEY_SZ +
2246 			ICP_QAT_HW_ZUC_256_IV_SZ;
2247 		cipher_cd_ctrl->cipher_state_sz =
2248 			ICP_QAT_HW_ZUC_256_IV_SZ >> 3;
2249 		cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
2250 	} else {
2251 		total_key_size = cipherkeylen;
2252 		cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
2253 	}
2254 	cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
2255 	cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
2256 
2257 	cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr;
2258 	cipher20 = (struct icp_qat_hw_cipher_algo_blk20 *)cdesc->cd_cur_ptr;
2259 	cipher->cipher_config.val =
2260 	    ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
2261 					cdesc->qat_cipher_alg, key_convert,
2262 					cdesc->qat_dir);
2263 
2264 	if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
2265 		temp_key = (uint32_t *)(cdesc->cd_cur_ptr +
2266 					sizeof(struct icp_qat_hw_cipher_config)
2267 					+ cipherkeylen);
2268 		memcpy(cipher->key, cipherkey, cipherkeylen);
2269 		memcpy(temp_key, cipherkey, cipherkeylen);
2270 
2271 		/* XOR Key with KASUMI F8 key modifier at 4 bytes level */
2272 		for (wordIndex = 0; wordIndex < (cipherkeylen >> 2);
2273 								wordIndex++)
2274 			temp_key[wordIndex] ^= KASUMI_F8_KEY_MODIFIER_4_BYTES;
2275 
2276 		cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
2277 					cipherkeylen + cipherkeylen;
2278 	} else if (cdesc->is_ucs) {
2279 		const uint8_t *final_key = cipherkey;
2280 
2281 		cdesc->slice_types |= QAT_CRYPTO_SLICE_UCS;
2282 		total_key_size = RTE_ALIGN_CEIL(cipherkeylen,
2283 			ICP_QAT_HW_AES_128_KEY_SZ);
2284 		cipher20->cipher_config.reserved[0] = 0;
2285 		cipher20->cipher_config.reserved[1] = 0;
2286 		cipher20->cipher_config.reserved[2] = 0;
2287 
2288 		rte_memcpy(cipher20->key, final_key, cipherkeylen);
2289 		cdesc->cd_cur_ptr +=
2290 			sizeof(struct icp_qat_hw_ucs_cipher_config) +
2291 					cipherkeylen;
2292 	} else {
2293 		memcpy(cipher->key, cipherkey, cipherkeylen);
2294 		cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
2295 					cipherkeylen;
2296 	}
2297 
2298 	if (cdesc->is_single_pass) {
2299 		QAT_FIELD_SET(cipher->cipher_config.val,
2300 			cdesc->digest_length,
2301 			QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
2302 			QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
2303 		/* UCS and SPC 1.8/2.0 share configuration of 2nd config word */
2304 		cdesc->cd.cipher.cipher_config.reserved =
2305 				ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(
2306 					cdesc->aad_len);
2307 		cdesc->slice_types |= QAT_CRYPTO_SLICE_SPC;
2308 	}
2309 
2310 	if (total_key_size > cipherkeylen) {
2311 		uint32_t padding_size =  total_key_size-cipherkeylen;
2312 		if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
2313 			&& (cipherkeylen == QAT_3DES_KEY_SZ_OPT2)) {
2314 			/* K3 not provided so use K1 = K3*/
2315 			memcpy(cdesc->cd_cur_ptr, cipherkey, padding_size);
2316 		} else if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
2317 			&& (cipherkeylen == QAT_3DES_KEY_SZ_OPT3)) {
2318 			/* K2 and K3 not provided so use K1 = K2 = K3*/
2319 			memcpy(cdesc->cd_cur_ptr, cipherkey,
2320 				cipherkeylen);
2321 			memcpy(cdesc->cd_cur_ptr+cipherkeylen,
2322 				cipherkey, cipherkeylen);
2323 		} else
2324 			memset(cdesc->cd_cur_ptr, 0, padding_size);
2325 
2326 		cdesc->cd_cur_ptr += padding_size;
2327 	}
2328 	if (cdesc->is_ucs) {
2329 		/*
2330 		 * These values match in terms of position auth
2331 		 * slice request fields
2332 		 */
2333 		req_ucs->spc_auth_res_sz = cdesc->digest_length;
2334 		if (!cdesc->is_gmac) {
2335 			req_ucs->spc_aad_sz = cdesc->aad_len;
2336 			req_ucs->spc_aad_offset = 0;
2337 		}
2338 	} else if (cdesc->is_single_pass) {
2339 		req_cipher->spc_aad_sz = cdesc->aad_len;
2340 		req_cipher->spc_auth_res_sz = cdesc->digest_length;
2341 	}
2342 	cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
2343 	cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
2344 	cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3;
2345 
2346 	return 0;
2347 }
2348 
2349 int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
2350 		const uint8_t *authkey,
2351 		uint32_t authkeylen,
2352 		uint32_t aad_length,
2353 		uint32_t digestsize,
2354 		unsigned int operation,
2355 		enum qat_device_gen qat_dev_gen)
2356 {
2357 	struct icp_qat_hw_auth_setup *hash, *hash_2 = NULL;
2358 	struct icp_qat_hw_cipher_algo_blk *cipherconfig;
2359 	struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
2360 	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
2361 	void *ptr = &req_tmpl->cd_ctrl;
2362 	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
2363 	struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
2364 	struct icp_qat_fw_la_auth_req_params *auth_param =
2365 		(struct icp_qat_fw_la_auth_req_params *)
2366 		((char *)&req_tmpl->serv_specif_rqpars +
2367 		ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
2368 	uint16_t state1_size = 0, state2_size = 0, cd_extra_size = 0;
2369 	uint16_t hash_offset, cd_size;
2370 	uint32_t *aad_len = NULL;
2371 	uint32_t wordIndex  = 0;
2372 	uint32_t *pTempKey;
2373 	uint8_t *prefix = NULL;
2374 	int ret = 0;
2375 
2376 	if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
2377 		ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
2378 					ICP_QAT_FW_SLICE_AUTH);
2379 		ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
2380 					ICP_QAT_FW_SLICE_DRAM_WR);
2381 		cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
2382 	} else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
2383 		ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
2384 				ICP_QAT_FW_SLICE_AUTH);
2385 		ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
2386 				ICP_QAT_FW_SLICE_CIPHER);
2387 		ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
2388 				ICP_QAT_FW_SLICE_CIPHER);
2389 		ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
2390 				ICP_QAT_FW_SLICE_DRAM_WR);
2391 		cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
2392 	} else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
2393 		QAT_LOG(ERR, "Invalid param, must be a hash command.");
2394 		return -EFAULT;
2395 	}
2396 
2397 	if (operation == RTE_CRYPTO_AUTH_OP_VERIFY)
2398 		cdesc->auth_op = ICP_QAT_HW_AUTH_VERIFY;
2399 	else
2400 		cdesc->auth_op = ICP_QAT_HW_AUTH_GENERATE;
2401 
2402 	/*
2403 	 * Setup the inner hash config
2404 	 */
2405 	hash_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
2406 	hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr;
2407 	hash->auth_config.reserved = 0;
2408 	if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL)
2409 		hash->auth_config.config =
2410 			ICP_QAT_HW_AUTH_CONFIG_BUILD(cdesc->auth_mode,
2411 				cdesc->qat_hash_alg, 4);
2412 	else
2413 		hash->auth_config.config =
2414 			ICP_QAT_HW_AUTH_CONFIG_BUILD(cdesc->auth_mode,
2415 				cdesc->qat_hash_alg, digestsize);
2416 
2417 	if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0
2418 		|| cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2
2419 		|| cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9
2420 		|| cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3
2421 		|| cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_32
2422 		|| cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_64
2423 		|| cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_128
2424 		|| cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC
2425 		|| cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_128_CMAC
2426 		|| cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC
2427 		|| cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL
2428 		|| cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SM3
2429 		|| cdesc->is_cnt_zero
2430 			)
2431 		hash->auth_counter.counter = 0;
2432 	else {
2433 		int block_size = qat_hash_get_block_size(cdesc->qat_hash_alg);
2434 
2435 		if (block_size < 0)
2436 			return block_size;
2437 		hash->auth_counter.counter = rte_bswap32(block_size);
2438 	}
2439 
2440 	hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3;
2441 	cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_auth_setup);
2442 	switch (cdesc->qat_hash_alg) {
2443 	case ICP_QAT_HW_AUTH_ALGO_SM3:
2444 		rte_memcpy(cdesc->cd_cur_ptr, sm3InitialState,
2445 				sizeof(sm3InitialState));
2446 		state1_size = qat_hash_get_state1_size(
2447 				cdesc->qat_hash_alg);
2448 		state2_size = ICP_QAT_HW_SM3_STATE2_SZ;
2449 		if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0)
2450 			break;
2451 		hash_2 = (struct icp_qat_hw_auth_setup *)(cdesc->cd_cur_ptr +
2452 			state1_size + state2_size);
2453 		hash_2->auth_config.config =
2454 			ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE2,
2455 				cdesc->qat_hash_alg, digestsize);
2456 		rte_memcpy(cdesc->cd_cur_ptr + state1_size + state2_size +
2457 			sizeof(*hash_2), sm3InitialState,
2458 			sizeof(sm3InitialState));
2459 		hash_cd_ctrl->inner_state1_sz = state1_size;
2460 		hash_cd_ctrl->inner_state2_sz  = state2_size;
2461 		hash_cd_ctrl->inner_state2_offset =
2462 			hash_cd_ctrl->hash_cfg_offset +
2463 			((sizeof(struct icp_qat_hw_auth_setup) +
2464 			RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
2465 		hash_cd_ctrl->outer_config_offset =
2466 			hash_cd_ctrl->inner_state2_offset +
2467 			((hash_cd_ctrl->inner_state2_sz) >> 3);
2468 		hash_cd_ctrl->outer_state1_sz = state1_size;
2469 		hash_cd_ctrl->outer_res_sz = state2_size;
2470 		hash_cd_ctrl->outer_prefix_sz =
2471 			qat_hash_get_block_size(cdesc->qat_hash_alg);
2472 		hash_cd_ctrl->outer_prefix_offset =
2473 			qat_hash_get_block_size(cdesc->qat_hash_alg) >> 3;
2474 		auth_param->u2.inner_prefix_sz =
2475 			qat_hash_get_block_size(cdesc->qat_hash_alg);
2476 		auth_param->hash_state_sz = digestsize;
2477 		if (qat_dev_gen == QAT_GEN4 || qat_dev_gen == QAT_GEN5 ||
2478 				qat_dev_gen == QAT_VQAT) {
2479 			ICP_QAT_FW_HASH_FLAG_MODE2_SET(
2480 				hash_cd_ctrl->hash_flags,
2481 				QAT_FW_LA_MODE2);
2482 		} else {
2483 			hash_cd_ctrl->hash_flags |=
2484 				ICP_QAT_FW_AUTH_HDR_FLAG_DO_NESTED;
2485 		}
2486 		prefix = cdesc->prefix_state;
2487 		rte_memcpy(prefix, authkey, authkeylen);
2488 		rte_memcpy(prefix + QAT_PREFIX_SIZE, authkey,
2489 			authkeylen);
2490 		cd_extra_size += sizeof(struct icp_qat_hw_auth_setup) +
2491 			state1_size + state2_size;
2492 		break;
2493 	case ICP_QAT_HW_AUTH_ALGO_SHA1:
2494 		if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
2495 			/* Plain SHA-1 */
2496 			rte_memcpy(cdesc->cd_cur_ptr, sha1InitialState,
2497 					sizeof(sha1InitialState));
2498 			state1_size = qat_hash_get_state1_size(
2499 					cdesc->qat_hash_alg);
2500 			break;
2501 		}
2502 		/* SHA-1 HMAC */
2503 #ifdef RTE_QAT_OPENSSL
2504 		ret = qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1, authkey,
2505 			authkeylen, cdesc->cd_cur_ptr, &state1_size,
2506 			cdesc->aes_cmac);
2507 
2508 #else
2509 		ret = qat_sym_do_precomputes_ipsec_mb(ICP_QAT_HW_AUTH_ALGO_SHA1,
2510 			authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size,
2511 			cdesc->aes_cmac);
2512 #endif
2513 
2514 		if (ret) {
2515 			QAT_LOG(ERR, "(SHA)precompute failed");
2516 			return -EFAULT;
2517 		}
2518 		state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
2519 		break;
2520 	case ICP_QAT_HW_AUTH_ALGO_SHA224:
2521 		if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
2522 			/* Plain SHA-224 */
2523 			rte_memcpy(cdesc->cd_cur_ptr, sha224InitialState,
2524 					sizeof(sha224InitialState));
2525 			state1_size = qat_hash_get_state1_size(
2526 					cdesc->qat_hash_alg);
2527 			break;
2528 		}
2529 		/* SHA-224 HMAC */
2530 #ifdef RTE_QAT_OPENSSL
2531 		ret = qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224, authkey,
2532 			authkeylen, cdesc->cd_cur_ptr, &state1_size,
2533 			cdesc->aes_cmac);
2534 #else
2535 		ret = qat_sym_do_precomputes_ipsec_mb(ICP_QAT_HW_AUTH_ALGO_SHA224,
2536 			authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size,
2537 			cdesc->aes_cmac);
2538 #endif
2539 		if (ret) {
2540 			QAT_LOG(ERR, "(SHA)precompute failed");
2541 			return -EFAULT;
2542 		}
2543 		state2_size = ICP_QAT_HW_SHA224_STATE2_SZ;
2544 		break;
2545 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
2546 		if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
2547 			/* Plain SHA-256 */
2548 			rte_memcpy(cdesc->cd_cur_ptr, sha256InitialState,
2549 					sizeof(sha256InitialState));
2550 			state1_size = qat_hash_get_state1_size(
2551 					cdesc->qat_hash_alg);
2552 			break;
2553 		}
2554 		/* SHA-256 HMAC */
2555 #ifdef RTE_QAT_OPENSSL
2556 		ret = qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256, authkey,
2557 			authkeylen, cdesc->cd_cur_ptr, &state1_size,
2558 			cdesc->aes_cmac);
2559 #else
2560 		ret = qat_sym_do_precomputes_ipsec_mb(ICP_QAT_HW_AUTH_ALGO_SHA256,
2561 			authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size,
2562 			cdesc->aes_cmac);
2563 #endif
2564 		if (ret) {
2565 			QAT_LOG(ERR, "(SHA)precompute failed");
2566 			return -EFAULT;
2567 		}
2568 		state2_size = ICP_QAT_HW_SHA256_STATE2_SZ;
2569 		break;
2570 	case ICP_QAT_HW_AUTH_ALGO_SHA384:
2571 		if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
2572 			/* Plain SHA-384 */
2573 			rte_memcpy(cdesc->cd_cur_ptr, sha384InitialState,
2574 					sizeof(sha384InitialState));
2575 			state1_size = qat_hash_get_state1_size(
2576 					cdesc->qat_hash_alg);
2577 			break;
2578 		}
2579 		/* SHA-384 HMAC */
2580 #ifdef RTE_QAT_OPENSSL
2581 		ret = qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384, authkey,
2582 			authkeylen, cdesc->cd_cur_ptr, &state1_size,
2583 			cdesc->aes_cmac);
2584 #else
2585 		ret = qat_sym_do_precomputes_ipsec_mb(ICP_QAT_HW_AUTH_ALGO_SHA384,
2586 			authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size,
2587 			cdesc->aes_cmac);
2588 #endif
2589 		if (ret) {
2590 			QAT_LOG(ERR, "(SHA)precompute failed");
2591 			return -EFAULT;
2592 		}
2593 		state2_size = ICP_QAT_HW_SHA384_STATE2_SZ;
2594 		break;
2595 	case ICP_QAT_HW_AUTH_ALGO_SHA512:
2596 		if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
2597 			/* Plain SHA-512 */
2598 			rte_memcpy(cdesc->cd_cur_ptr, sha512InitialState,
2599 					sizeof(sha512InitialState));
2600 			state1_size = qat_hash_get_state1_size(
2601 					cdesc->qat_hash_alg);
2602 			break;
2603 		}
2604 		/* SHA-512 HMAC */
2605 #ifdef RTE_QAT_OPENSSL
2606 		ret = qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512, authkey,
2607 			authkeylen, cdesc->cd_cur_ptr, &state1_size,
2608 			cdesc->aes_cmac);
2609 #else
2610 		ret = qat_sym_do_precomputes_ipsec_mb(ICP_QAT_HW_AUTH_ALGO_SHA512,
2611 			authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size,
2612 			cdesc->aes_cmac);
2613 #endif
2614 		if (ret) {
2615 			QAT_LOG(ERR, "(SHA)precompute failed");
2616 			return -EFAULT;
2617 		}
2618 		state2_size = ICP_QAT_HW_SHA512_STATE2_SZ;
2619 		break;
2620 	case ICP_QAT_HW_AUTH_ALGO_SHA3_224:
2621 		/* Plain SHA3-224 */
2622 		memset(cdesc->cd_cur_ptr, 0, state1_size);
2623 		state1_size = qat_hash_get_state1_size(
2624 				cdesc->qat_hash_alg);
2625 		break;
2626 	case ICP_QAT_HW_AUTH_ALGO_SHA3_256:
2627 		/* Plain SHA3-256 */
2628 		memset(cdesc->cd_cur_ptr, 0, state1_size);
2629 		state1_size = qat_hash_get_state1_size(
2630 				cdesc->qat_hash_alg);
2631 		break;
2632 	case ICP_QAT_HW_AUTH_ALGO_SHA3_384:
2633 		/* Plain SHA3-384 */
2634 		memset(cdesc->cd_cur_ptr, 0, state1_size);
2635 		state1_size = qat_hash_get_state1_size(
2636 				cdesc->qat_hash_alg);
2637 		break;
2638 	case ICP_QAT_HW_AUTH_ALGO_SHA3_512:
2639 		/* Plain SHA3-512 */
2640 		memset(cdesc->cd_cur_ptr, 0, state1_size);
2641 		state1_size = qat_hash_get_state1_size(
2642 				cdesc->qat_hash_alg);
2643 		break;
2644 	case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
2645 		state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
2646 
2647 		if (cdesc->aes_cmac)
2648 			memset(cdesc->cd_cur_ptr, 0, state1_size);
2649 #ifdef RTE_QAT_OPENSSL
2650 		ret = qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
2651 			authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
2652 			&state2_size, cdesc->aes_cmac);
2653 #else
2654 		ret = qat_sym_do_precomputes_ipsec_mb(
2655 			ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
2656 			authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
2657 			&state2_size, cdesc->aes_cmac);
2658 #endif
2659 		if (ret) {
2660 			QAT_LOG(ERR, "(%s)precompute failed",
2661 				cdesc->aes_cmac ? "CMAC" : "XCBC");
2662 			return -EFAULT;
2663 		}
2664 		break;
2665 	case ICP_QAT_HW_AUTH_ALGO_AES_128_CMAC:
2666 		state1_size = ICP_QAT_HW_AES_CMAC_STATE1_SZ;
2667 		memset(cdesc->cd_cur_ptr, 0, state1_size);
2668 		memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
2669 		state2_size = ICP_QAT_HW_AES_128_CMAC_STATE2_SZ;
2670 		break;
2671 	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
2672 	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
2673 		cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
2674 		state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
2675 #ifdef RTE_QAT_OPENSSL
2676 		ret = qat_sym_do_precomputes(cdesc->qat_hash_alg, authkey,
2677 			authkeylen, cdesc->cd_cur_ptr + state1_size,
2678 			&state2_size, cdesc->aes_cmac);
2679 #else
2680 		ret = qat_sym_do_precomputes_ipsec_mb(cdesc->qat_hash_alg, authkey,
2681 			authkeylen, cdesc->cd_cur_ptr + state1_size,
2682 			&state2_size, cdesc->aes_cmac);
2683 #endif
2684 		if (ret) {
2685 			QAT_LOG(ERR, "(GCM)precompute failed");
2686 			return -EFAULT;
2687 		}
2688 		/*
2689 		 * Write (the length of AAD) into bytes 16-19 of state2
2690 		 * in big-endian format. This field is 8 bytes
2691 		 */
2692 		auth_param->u2.aad_sz =
2693 				RTE_ALIGN_CEIL(aad_length, 16);
2694 		auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
2695 
2696 		aad_len = (uint32_t *)(cdesc->cd_cur_ptr +
2697 					ICP_QAT_HW_GALOIS_128_STATE1_SZ +
2698 					ICP_QAT_HW_GALOIS_H_SZ);
2699 		*aad_len = rte_bswap32(aad_length);
2700 		cdesc->aad_len = aad_length;
2701 		break;
2702 	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
2703 		if (!cdesc->is_wireless)
2704 			cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
2705 		state1_size = qat_hash_get_state1_size(
2706 				ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2);
2707 		state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
2708 		memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
2709 
2710 		cipherconfig = (struct icp_qat_hw_cipher_algo_blk *)
2711 				(cdesc->cd_cur_ptr + state1_size + state2_size);
2712 		cipherconfig->cipher_config.val =
2713 		ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE,
2714 			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2,
2715 			ICP_QAT_HW_CIPHER_KEY_CONVERT,
2716 			ICP_QAT_HW_CIPHER_ENCRYPT);
2717 		memcpy(cipherconfig->key, authkey, authkeylen);
2718 		memset(cipherconfig->key + authkeylen,
2719 				0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
2720 		cd_extra_size += sizeof(struct icp_qat_hw_cipher_config) +
2721 				authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
2722 		auth_param->hash_state_sz = ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
2723 		break;
2724 	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
2725 		if (!cdesc->is_wireless) {
2726 			hash->auth_config.config =
2727 				ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE0,
2728 					cdesc->qat_hash_alg, digestsize);
2729 			cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
2730 		}
2731 		state1_size = qat_hash_get_state1_size(
2732 				ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3);
2733 		state2_size = ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ;
2734 		memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size
2735 			+ ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ);
2736 
2737 		memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
2738 		cd_extra_size += ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
2739 		auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
2740 
2741 		break;
2742 	case ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_32:
2743 	case ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_64:
2744 	case ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_128:
2745 		state1_size = qat_hash_get_state1_size(cdesc->qat_hash_alg);
2746 		state2_size = ICP_QAT_HW_ZUC_256_STATE2_SZ;
2747 		memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size
2748 			+ ICP_QAT_HW_ZUC_256_IV_SZ);
2749 
2750 		memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
2751 		cd_extra_size += ICP_QAT_HW_ZUC_256_IV_SZ;
2752 		auth_param->hash_state_sz = ICP_QAT_HW_ZUC_256_IV_SZ >> 3;
2753 		break;
2754 	case ICP_QAT_HW_AUTH_ALGO_MD5:
2755 #ifdef RTE_QAT_OPENSSL
2756 		ret = qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5, authkey,
2757 			authkeylen, cdesc->cd_cur_ptr, &state1_size,
2758 			cdesc->aes_cmac);
2759 #else
2760 		ret = qat_sym_do_precomputes_ipsec_mb(ICP_QAT_HW_AUTH_ALGO_MD5,
2761 			authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size,
2762 			cdesc->aes_cmac);
2763 #endif
2764 		if (ret) {
2765 			QAT_LOG(ERR, "(MD5)precompute failed");
2766 			return -EFAULT;
2767 		}
2768 		state2_size = ICP_QAT_HW_MD5_STATE2_SZ;
2769 		break;
2770 	case ICP_QAT_HW_AUTH_ALGO_NULL:
2771 		state1_size = qat_hash_get_state1_size(
2772 				ICP_QAT_HW_AUTH_ALGO_NULL);
2773 		state2_size = ICP_QAT_HW_NULL_STATE2_SZ;
2774 		break;
2775 	case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
2776 		cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM;
2777 		state1_size = qat_hash_get_state1_size(
2778 				ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC);
2779 		state2_size = ICP_QAT_HW_AES_CBC_MAC_KEY_SZ +
2780 				ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ;
2781 
2782 		if (aad_length > 0) {
2783 			aad_length += ICP_QAT_HW_CCM_AAD_B0_LEN +
2784 			ICP_QAT_HW_CCM_AAD_LEN_INFO;
2785 			auth_param->u2.aad_sz =
2786 			RTE_ALIGN_CEIL(aad_length,
2787 			ICP_QAT_HW_CCM_AAD_ALIGNMENT);
2788 		} else {
2789 			auth_param->u2.aad_sz = ICP_QAT_HW_CCM_AAD_B0_LEN;
2790 		}
2791 		cdesc->aad_len = aad_length;
2792 		hash->auth_counter.counter = 0;
2793 
2794 		hash_cd_ctrl->outer_prefix_sz = digestsize;
2795 		auth_param->hash_state_sz = digestsize;
2796 
2797 		memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
2798 		break;
2799 	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
2800 		state1_size = qat_hash_get_state1_size(
2801 				ICP_QAT_HW_AUTH_ALGO_KASUMI_F9);
2802 		state2_size = ICP_QAT_HW_KASUMI_F9_STATE2_SZ;
2803 		memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
2804 		pTempKey = (uint32_t *)(cdesc->cd_cur_ptr + state1_size
2805 							+ authkeylen);
2806 		/*
2807 		* The Inner Hash Initial State2 block must contain IK
2808 		* (Initialisation Key), followed by IK XOR-ed with KM
2809 		* (Key Modifier): IK||(IK^KM).
2810 		*/
2811 		/* write the auth key */
2812 		memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
2813 		/* initialise temp key with auth key */
2814 		memcpy(pTempKey, authkey, authkeylen);
2815 		/* XOR Key with KASUMI F9 key modifier at 4 bytes level */
2816 		for (wordIndex = 0; wordIndex < (authkeylen >> 2); wordIndex++)
2817 			pTempKey[wordIndex] ^= KASUMI_F9_KEY_MODIFIER_4_BYTES;
2818 		break;
2819 	default:
2820 		QAT_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg);
2821 		return -EFAULT;
2822 	}
2823 
2824 	/* Auth CD config setup */
2825 	hash_cd_ctrl->hash_flags |= ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
2826 	hash_cd_ctrl->inner_state1_sz = state1_size;
2827 	if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) {
2828 		hash_cd_ctrl->inner_res_sz = 4;
2829 		hash_cd_ctrl->final_sz = 4;
2830 		auth_param->auth_res_sz = 4;
2831 	} else {
2832 		hash_cd_ctrl->inner_res_sz = digestsize;
2833 		hash_cd_ctrl->final_sz = digestsize;
2834 		auth_param->auth_res_sz = digestsize;
2835 	}
2836 
2837 	hash_cd_ctrl->inner_state2_sz  = state2_size;
2838 	hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
2839 			((sizeof(struct icp_qat_hw_auth_setup) +
2840 			 RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8))
2841 					>> 3);
2842 	cdesc->cd_cur_ptr += state1_size + state2_size + cd_extra_size;
2843 	cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
2844 	cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
2845 	cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
2846 	return 0;
2847 }
2848 
2849 int qat_sym_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2850 {
2851 	switch (key_len) {
2852 	case ICP_QAT_HW_AES_128_KEY_SZ:
2853 		*alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
2854 		break;
2855 	case ICP_QAT_HW_AES_192_KEY_SZ:
2856 		*alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
2857 		break;
2858 	case ICP_QAT_HW_AES_256_KEY_SZ:
2859 		*alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
2860 		break;
2861 	default:
2862 		return -EINVAL;
2863 	}
2864 	return 0;
2865 }
2866 
2867 int qat_sym_validate_aes_docsisbpi_key(int key_len,
2868 		enum icp_qat_hw_cipher_algo *alg)
2869 {
2870 	switch (key_len) {
2871 	case ICP_QAT_HW_AES_128_KEY_SZ:
2872 		*alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
2873 		break;
2874 	case ICP_QAT_HW_AES_256_KEY_SZ:
2875 		*alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
2876 		break;
2877 	default:
2878 		return -EINVAL;
2879 	}
2880 	return 0;
2881 }
2882 
2883 int qat_sym_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2884 {
2885 	switch (key_len) {
2886 	case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ:
2887 		*alg = ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2;
2888 		break;
2889 	default:
2890 		return -EINVAL;
2891 	}
2892 	return 0;
2893 }
2894 
2895 int qat_sym_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2896 {
2897 	switch (key_len) {
2898 	case ICP_QAT_HW_KASUMI_KEY_SZ:
2899 		*alg = ICP_QAT_HW_CIPHER_ALGO_KASUMI;
2900 		break;
2901 	default:
2902 		return -EINVAL;
2903 	}
2904 	return 0;
2905 }
2906 
2907 int qat_sym_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2908 {
2909 	switch (key_len) {
2910 	case ICP_QAT_HW_DES_KEY_SZ:
2911 		*alg = ICP_QAT_HW_CIPHER_ALGO_DES;
2912 		break;
2913 	default:
2914 		return -EINVAL;
2915 	}
2916 	return 0;
2917 }
2918 
2919 int qat_sym_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2920 {
2921 	switch (key_len) {
2922 	case QAT_3DES_KEY_SZ_OPT1:
2923 	case QAT_3DES_KEY_SZ_OPT2:
2924 	case QAT_3DES_KEY_SZ_OPT3:
2925 		*alg = ICP_QAT_HW_CIPHER_ALGO_3DES;
2926 		break;
2927 	default:
2928 		return -EINVAL;
2929 	}
2930 	return 0;
2931 }
2932 
2933 int qat_sym_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2934 {
2935 	switch (key_len) {
2936 	case ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ:
2937 		*alg = ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3;
2938 		break;
2939 	case ICP_QAT_HW_ZUC_256_KEY_SZ:
2940 		*alg = ICP_QAT_HW_CIPHER_ALGO_ZUC_256;
2941 		break;
2942 	default:
2943 		return -EINVAL;
2944 	}
2945 	return 0;
2946 }
2947 
2948 static int
2949 qat_sec_session_check_docsis(struct rte_security_session_conf *conf)
2950 {
2951 	struct rte_crypto_sym_xform *crypto_sym = conf->crypto_xform;
2952 	struct rte_security_docsis_xform *docsis = &conf->docsis;
2953 
2954 	/* CRC generate -> Cipher encrypt */
2955 	if (docsis->direction == RTE_SECURITY_DOCSIS_DOWNLINK) {
2956 
2957 		if (crypto_sym != NULL &&
2958 		    crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2959 		    crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
2960 		    crypto_sym->cipher.algo ==
2961 					RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
2962 		    (crypto_sym->cipher.key.length ==
2963 					ICP_QAT_HW_AES_128_KEY_SZ ||
2964 		     crypto_sym->cipher.key.length ==
2965 					ICP_QAT_HW_AES_256_KEY_SZ) &&
2966 		    crypto_sym->cipher.iv.length == ICP_QAT_HW_AES_BLK_SZ &&
2967 		    crypto_sym->next == NULL) {
2968 			return 0;
2969 		}
2970 	/* Cipher decrypt -> CRC verify */
2971 	} else if (docsis->direction == RTE_SECURITY_DOCSIS_UPLINK) {
2972 
2973 		if (crypto_sym != NULL &&
2974 		    crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2975 		    crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
2976 		    crypto_sym->cipher.algo ==
2977 					RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
2978 		    (crypto_sym->cipher.key.length ==
2979 					ICP_QAT_HW_AES_128_KEY_SZ ||
2980 		     crypto_sym->cipher.key.length ==
2981 					ICP_QAT_HW_AES_256_KEY_SZ) &&
2982 		    crypto_sym->cipher.iv.length == ICP_QAT_HW_AES_BLK_SZ &&
2983 		    crypto_sym->next == NULL) {
2984 			return 0;
2985 		}
2986 	}
2987 
2988 	return -EINVAL;
2989 }
2990 
2991 static int
2992 qat_sym_cd_crc_set(struct qat_sym_session *cdesc,
2993 		enum qat_device_gen qat_dev_gen)
2994 {
2995 	struct icp_qat_hw_gen2_crc_cd *crc_cd_gen2;
2996 	struct icp_qat_hw_gen3_crc_cd *crc_cd_gen3;
2997 	struct icp_qat_hw_gen4_crc_cd *crc_cd_gen4;
2998 	struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
2999 	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
3000 	void *ptr = &req_tmpl->cd_ctrl;
3001 	struct icp_qat_fw_auth_cd_ctrl_hdr *crc_cd_ctrl = ptr;
3002 	struct icp_qat_fw_la_auth_req_params *crc_param =
3003 				(struct icp_qat_fw_la_auth_req_params *)
3004 				((char *)&req_tmpl->serv_specif_rqpars +
3005 				ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
3006 	struct icp_qat_fw_ucs_slice_cipher_config crc_cfg;
3007 	uint16_t crc_cfg_offset, cd_size;
3008 
3009 	crc_cfg_offset = cdesc->cd_cur_ptr - ((uint8_t *)&cdesc->cd);
3010 
3011 	switch (qat_dev_gen) {
3012 	case QAT_GEN2:
3013 		crc_cd_gen2 =
3014 			(struct icp_qat_hw_gen2_crc_cd *)cdesc->cd_cur_ptr;
3015 		crc_cd_gen2->flags = 0;
3016 		crc_cd_gen2->initial_crc = 0;
3017 		memset(&crc_cd_gen2->reserved1,
3018 			0,
3019 			sizeof(crc_cd_gen2->reserved1));
3020 		memset(&crc_cd_gen2->reserved2,
3021 			0,
3022 			sizeof(crc_cd_gen2->reserved2));
3023 		cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_gen2_crc_cd);
3024 		break;
3025 	case QAT_GEN3:
3026 		crc_cd_gen3 =
3027 			(struct icp_qat_hw_gen3_crc_cd *)cdesc->cd_cur_ptr;
3028 		crc_cd_gen3->flags = ICP_QAT_HW_GEN3_CRC_FLAGS_BUILD(1, 1);
3029 		crc_cd_gen3->polynomial = ETH_CRC32_POLYNOMIAL;
3030 		crc_cd_gen3->initial_crc = ETH_CRC32_INIT_VAL;
3031 		crc_cd_gen3->xor_val = ETH_CRC32_XOR_OUT;
3032 		memset(&crc_cd_gen3->reserved1,
3033 			0,
3034 			sizeof(crc_cd_gen3->reserved1));
3035 		memset(&crc_cd_gen3->reserved2,
3036 			0,
3037 			sizeof(crc_cd_gen3->reserved2));
3038 		crc_cd_gen3->reserved3 = 0;
3039 		cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_gen3_crc_cd);
3040 		break;
3041 	case QAT_GEN4:
3042 	case QAT_GEN5:
3043 		crc_cfg.mode = ICP_QAT_HW_CIPHER_ECB_MODE;
3044 		crc_cfg.algo = ICP_QAT_HW_CIPHER_ALGO_NULL;
3045 		crc_cfg.hash_cmp_val = 0;
3046 		crc_cfg.dir = ICP_QAT_HW_CIPHER_ENCRYPT;
3047 		crc_cfg.associated_data_len_in_bytes = 0;
3048 		crc_cfg.crc_reflect_out =
3049 				ICP_QAT_HW_CIPHER_UCS_REFLECT_OUT_ENABLED;
3050 		crc_cfg.crc_reflect_in =
3051 				ICP_QAT_HW_CIPHER_UCS_REFLECT_IN_ENABLED;
3052 		crc_cfg.crc_encoding = ICP_QAT_HW_CIPHER_UCS_CRC32;
3053 
3054 		crc_cd_gen4 =
3055 			(struct icp_qat_hw_gen4_crc_cd *)cdesc->cd_cur_ptr;
3056 		crc_cd_gen4->ucs_config[0] =
3057 			ICP_QAT_HW_UCS_CIPHER_GEN4_BUILD_CONFIG_LOWER(crc_cfg);
3058 		crc_cd_gen4->ucs_config[1] =
3059 			ICP_QAT_HW_UCS_CIPHER_GEN4_BUILD_CONFIG_UPPER(crc_cfg);
3060 		crc_cd_gen4->polynomial = ETH_CRC32_POLYNOMIAL_BE;
3061 		crc_cd_gen4->initial_crc = ETH_CRC32_INIT_VAL_BE;
3062 		crc_cd_gen4->xor_val = ETH_CRC32_XOR_OUT_BE;
3063 		crc_cd_gen4->reserved1 = 0;
3064 		crc_cd_gen4->reserved2 = 0;
3065 		crc_cd_gen4->reserved3 = 0;
3066 		cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_gen4_crc_cd);
3067 		break;
3068 	default:
3069 		return -EINVAL;
3070 	}
3071 
3072 	crc_cd_ctrl->hash_cfg_offset = crc_cfg_offset >> 3;
3073 	crc_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
3074 	crc_cd_ctrl->inner_res_sz = cdesc->digest_length;
3075 	crc_cd_ctrl->final_sz = cdesc->digest_length;
3076 	crc_cd_ctrl->inner_state1_sz = 0;
3077 	crc_cd_ctrl->inner_state2_sz  = 0;
3078 	crc_cd_ctrl->inner_state2_offset = 0;
3079 	crc_cd_ctrl->outer_prefix_sz = 0;
3080 	crc_cd_ctrl->outer_config_offset = 0;
3081 	crc_cd_ctrl->outer_state1_sz = 0;
3082 	crc_cd_ctrl->outer_res_sz = 0;
3083 	crc_cd_ctrl->outer_prefix_offset = 0;
3084 
3085 	crc_param->auth_res_sz = cdesc->digest_length;
3086 	crc_param->u2.aad_sz = 0;
3087 	crc_param->hash_state_sz = 0;
3088 
3089 	cd_size = cdesc->cd_cur_ptr - (uint8_t *)&cdesc->cd;
3090 	cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
3091 	cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
3092 
3093 	return 0;
3094 }
3095 
3096 static int
3097 qat_sym_session_configure_crc(struct rte_cryptodev *dev,
3098 		const struct rte_crypto_sym_xform *cipher_xform,
3099 		struct qat_sym_session *session)
3100 {
3101 	struct qat_cryptodev_private *internals = dev->data->dev_private;
3102 	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
3103 	int ret;
3104 
3105 	session->is_auth = 1;
3106 	session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_NULL;
3107 	session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
3108 	session->auth_op = cipher_xform->cipher.op ==
3109 				RTE_CRYPTO_CIPHER_OP_ENCRYPT ?
3110 					ICP_QAT_HW_AUTH_GENERATE :
3111 					ICP_QAT_HW_AUTH_VERIFY;
3112 	session->digest_length = RTE_ETHER_CRC_LEN;
3113 
3114 	ret = qat_sym_cd_crc_set(session, qat_dev_gen);
3115 	if (ret < 0)
3116 		return ret;
3117 
3118 	return 0;
3119 }
3120 
3121 static int
3122 qat_sec_session_set_docsis_parameters(struct rte_cryptodev *dev,
3123 		struct rte_security_session_conf *conf, void *session_private,
3124 		rte_iova_t session_paddr)
3125 {
3126 	int ret;
3127 	int qat_cmd_id;
3128 	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
3129 	struct rte_crypto_sym_xform *xform = NULL;
3130 	struct qat_sym_session *session = session_private;
3131 	struct qat_cryptodev_private *internals = cdev->data->dev_private;
3132 	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
3133 
3134 	/* Clear the session */
3135 	memset(session, 0, qat_sym_session_get_private_size(dev));
3136 
3137 	ret = qat_sec_session_check_docsis(conf);
3138 	if (ret) {
3139 		QAT_LOG(ERR, "Unsupported DOCSIS security configuration");
3140 		return ret;
3141 	}
3142 
3143 	xform = conf->crypto_xform;
3144 
3145 	/* Verify the session physical address is known */
3146 	if (session_paddr == 0 || session_paddr == RTE_BAD_IOVA) {
3147 		QAT_LOG(ERR,
3148 			"Session physical address unknown. Bad memory pool.");
3149 		return -EINVAL;
3150 	}
3151 
3152 	/* Set context descriptor physical address */
3153 	session->cd_paddr = session_paddr +
3154 			offsetof(struct qat_sym_session, cd);
3155 	session->prefix_paddr = session_paddr +
3156 			offsetof(struct qat_sym_session, prefix_state);
3157 
3158 	/* Get requested QAT command id - should be cipher */
3159 	qat_cmd_id = qat_get_cmd_id(xform);
3160 	if (qat_cmd_id != ICP_QAT_FW_LA_CMD_CIPHER) {
3161 		QAT_LOG(ERR, "Unsupported xform chain requested");
3162 		return -ENOTSUP;
3163 	} else if (internals->internal_capabilities
3164 					& QAT_SYM_CAP_CIPHER_CRC) {
3165 		qat_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_CRC;
3166 	}
3167 	session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
3168 
3169 	ret = qat_sym_session_configure_cipher(dev, xform, session);
3170 	if (ret < 0)
3171 		return ret;
3172 
3173 	if (qat_cmd_id == ICP_QAT_FW_LA_CMD_CIPHER_CRC) {
3174 		ret = qat_sym_session_configure_crc(dev, xform, session);
3175 		if (ret < 0)
3176 			return ret;
3177 	}
3178 	qat_sym_session_finalize(session);
3179 
3180 	return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)cdev,
3181 			(void *)session);
3182 }
3183 
3184 int
3185 qat_security_session_create(void *dev,
3186 				struct rte_security_session_conf *conf,
3187 				struct rte_security_session *sess)
3188 {
3189 	void *sess_private_data = SECURITY_GET_SESS_PRIV(sess);
3190 	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
3191 	int ret;
3192 
3193 	if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL ||
3194 			conf->protocol != RTE_SECURITY_PROTOCOL_DOCSIS) {
3195 		QAT_LOG(ERR, "Invalid security protocol");
3196 		return -EINVAL;
3197 	}
3198 
3199 #ifdef RTE_QAT_OPENSSL
3200 #if (OPENSSL_VERSION_NUMBER >= 0x30000000L)
3201 	if (ossl_legacy_provider_load())
3202 		return -EINVAL;
3203 #endif
3204 #endif
3205 	ret = qat_sec_session_set_docsis_parameters(cdev, conf,
3206 			sess_private_data, SECURITY_GET_SESS_PRIV_IOVA(sess));
3207 	if (ret != 0) {
3208 		QAT_LOG(ERR, "Failed to configure session parameters");
3209 		return ret;
3210 	}
3211 
3212 #ifdef RTE_QAT_OPENSSL
3213 #if (OPENSSL_VERSION_NUMBER >= 0x30000000L)
3214 	ossl_legacy_provider_unload();
3215 #endif
3216 #endif
3217 	return 0;
3218 }
3219 
3220 int
3221 qat_security_session_destroy(void *dev __rte_unused,
3222 				 struct rte_security_session *sess)
3223 {
3224 	void *sess_priv = SECURITY_GET_SESS_PRIV(sess);
3225 	struct qat_sym_session *s = (struct qat_sym_session *)sess_priv;
3226 
3227 	if (sess_priv) {
3228 #ifdef RTE_QAT_OPENSSL
3229 		if (s->bpi_ctx)
3230 			bpi_cipher_ctx_free(s->bpi_ctx);
3231 #else
3232 		if (s->mb_mgr)
3233 			free_mb_mgr(s->mb_mgr);
3234 #endif
3235 		memset(s, 0, qat_sym_session_get_private_size(dev));
3236 	}
3237 
3238 	return 0;
3239 }
3240 
3241 unsigned int
3242 qat_security_session_get_size(void *device __rte_unused)
3243 {
3244 	return sizeof(struct qat_sym_session);
3245 }
3246