xref: /dpdk/drivers/crypto/qat/qat_sym_session.c (revision 68a03efeed657e6e05f281479b33b51102797e15)
1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
2  * Copyright(c) 2015-2019 Intel Corporation
3  */
4 
5 #include <openssl/sha.h>	/* Needed to calculate pre-compute values */
6 #include <openssl/aes.h>	/* Needed to calculate pre-compute values */
7 #include <openssl/md5.h>	/* Needed to calculate pre-compute values */
8 #include <openssl/evp.h>	/* Needed for bpi runt block processing */
9 
10 #include <rte_memcpy.h>
11 #include <rte_common.h>
12 #include <rte_spinlock.h>
13 #include <rte_byteorder.h>
14 #include <rte_log.h>
15 #include <rte_malloc.h>
16 #include <rte_crypto_sym.h>
17 #ifdef RTE_LIB_SECURITY
18 #include <rte_security.h>
19 #endif
20 
21 #include "qat_logs.h"
22 #include "qat_sym_session.h"
23 #include "qat_sym_pmd.h"
24 
25 /* SHA1 - 20 bytes - Initialiser state can be found in FIPS stds 180-2 */
26 static const uint8_t sha1InitialState[] = {
27 	0x67, 0x45, 0x23, 0x01, 0xef, 0xcd, 0xab, 0x89, 0x98, 0xba,
28 	0xdc, 0xfe, 0x10, 0x32, 0x54, 0x76, 0xc3, 0xd2, 0xe1, 0xf0};
29 
30 /* SHA 224 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */
31 static const uint8_t sha224InitialState[] = {
32 	0xc1, 0x05, 0x9e, 0xd8, 0x36, 0x7c, 0xd5, 0x07, 0x30, 0x70, 0xdd,
33 	0x17, 0xf7, 0x0e, 0x59, 0x39, 0xff, 0xc0, 0x0b, 0x31, 0x68, 0x58,
34 	0x15, 0x11, 0x64, 0xf9, 0x8f, 0xa7, 0xbe, 0xfa, 0x4f, 0xa4};
35 
36 /* SHA 256 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */
37 static const uint8_t sha256InitialState[] = {
38 	0x6a, 0x09, 0xe6, 0x67, 0xbb, 0x67, 0xae, 0x85, 0x3c, 0x6e, 0xf3,
39 	0x72, 0xa5, 0x4f, 0xf5, 0x3a, 0x51, 0x0e, 0x52, 0x7f, 0x9b, 0x05,
40 	0x68, 0x8c, 0x1f, 0x83, 0xd9, 0xab, 0x5b, 0xe0, 0xcd, 0x19};
41 
42 /* SHA 384 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
43 static const uint8_t sha384InitialState[] = {
44 	0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8, 0x62, 0x9a, 0x29,
45 	0x2a, 0x36, 0x7c, 0xd5, 0x07, 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70,
46 	0xdd, 0x17, 0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39, 0x67,
47 	0x33, 0x26, 0x67, 0xff, 0xc0, 0x0b, 0x31, 0x8e, 0xb4, 0x4a, 0x87,
48 	0x68, 0x58, 0x15, 0x11, 0xdb, 0x0c, 0x2e, 0x0d, 0x64, 0xf9, 0x8f,
49 	0xa7, 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f, 0xa4};
50 
51 /* SHA 512 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
52 static const uint8_t sha512InitialState[] = {
53 	0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, 0xbb, 0x67, 0xae,
54 	0x85, 0x84, 0xca, 0xa7, 0x3b, 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94,
55 	0xf8, 0x2b, 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1, 0x51,
56 	0x0e, 0x52, 0x7f, 0xad, 0xe6, 0x82, 0xd1, 0x9b, 0x05, 0x68, 0x8c,
57 	0x2b, 0x3e, 0x6c, 0x1f, 0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd,
58 	0x6b, 0x5b, 0xe0, 0xcd, 0x19, 0x13, 0x7e, 0x21, 0x79};
59 
60 /** Frees a context previously created
61  *  Depends on openssl libcrypto
62  */
63 static void
64 bpi_cipher_ctx_free(void *bpi_ctx)
65 {
66 	if (bpi_ctx != NULL)
67 		EVP_CIPHER_CTX_free((EVP_CIPHER_CTX *)bpi_ctx);
68 }
69 
70 /** Creates a context in either AES or DES in ECB mode
71  *  Depends on openssl libcrypto
72  */
73 static int
74 bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo,
75 		enum rte_crypto_cipher_operation direction __rte_unused,
76 		const uint8_t *key, uint16_t key_length, void **ctx)
77 {
78 	const EVP_CIPHER *algo = NULL;
79 	int ret;
80 	*ctx = EVP_CIPHER_CTX_new();
81 
82 	if (*ctx == NULL) {
83 		ret = -ENOMEM;
84 		goto ctx_init_err;
85 	}
86 
87 	if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI)
88 		algo = EVP_des_ecb();
89 	else
90 		if (key_length == ICP_QAT_HW_AES_128_KEY_SZ)
91 			algo = EVP_aes_128_ecb();
92 		else
93 			algo = EVP_aes_256_ecb();
94 
95 	/* IV will be ECB encrypted whether direction is encrypt or decrypt*/
96 	if (EVP_EncryptInit_ex(*ctx, algo, NULL, key, 0) != 1) {
97 		ret = -EINVAL;
98 		goto ctx_init_err;
99 	}
100 
101 	return 0;
102 
103 ctx_init_err:
104 	if (*ctx != NULL)
105 		EVP_CIPHER_CTX_free(*ctx);
106 	return ret;
107 }
108 
109 static int
110 qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo,
111 		struct qat_sym_dev_private *internals)
112 {
113 	int i = 0;
114 	const struct rte_cryptodev_capabilities *capability;
115 
116 	while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
117 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
118 		if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
119 			continue;
120 
121 		if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
122 			continue;
123 
124 		if (capability->sym.cipher.algo == algo)
125 			return 1;
126 	}
127 	return 0;
128 }
129 
130 static int
131 qat_is_auth_alg_supported(enum rte_crypto_auth_algorithm algo,
132 		struct qat_sym_dev_private *internals)
133 {
134 	int i = 0;
135 	const struct rte_cryptodev_capabilities *capability;
136 
137 	while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
138 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
139 		if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
140 			continue;
141 
142 		if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
143 			continue;
144 
145 		if (capability->sym.auth.algo == algo)
146 			return 1;
147 	}
148 	return 0;
149 }
150 
151 void
152 qat_sym_session_clear(struct rte_cryptodev *dev,
153 		struct rte_cryptodev_sym_session *sess)
154 {
155 	uint8_t index = dev->driver_id;
156 	void *sess_priv = get_sym_session_private_data(sess, index);
157 	struct qat_sym_session *s = (struct qat_sym_session *)sess_priv;
158 
159 	if (sess_priv) {
160 		if (s->bpi_ctx)
161 			bpi_cipher_ctx_free(s->bpi_ctx);
162 		memset(s, 0, qat_sym_session_get_private_size(dev));
163 		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
164 
165 		set_sym_session_private_data(sess, index, NULL);
166 		rte_mempool_put(sess_mp, sess_priv);
167 	}
168 }
169 
170 static int
171 qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
172 {
173 	/* Cipher Only */
174 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
175 		return ICP_QAT_FW_LA_CMD_CIPHER;
176 
177 	/* Authentication Only */
178 	if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
179 		return ICP_QAT_FW_LA_CMD_AUTH;
180 
181 	/* AEAD */
182 	if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
183 		/* AES-GCM and AES-CCM works with different direction
184 		 * GCM first encrypts and generate hash where AES-CCM
185 		 * first generate hash and encrypts. Similar relation
186 		 * applies to decryption.
187 		 */
188 		if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
189 			if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
190 				return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
191 			else
192 				return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
193 		else
194 			if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
195 				return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
196 			else
197 				return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
198 	}
199 
200 	if (xform->next == NULL)
201 		return -1;
202 
203 	/* Cipher then Authenticate */
204 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
205 			xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
206 		return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
207 
208 	/* Authenticate then Cipher */
209 	if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
210 			xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
211 		return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
212 
213 	return -1;
214 }
215 
216 static struct rte_crypto_auth_xform *
217 qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
218 {
219 	do {
220 		if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
221 			return &xform->auth;
222 
223 		xform = xform->next;
224 	} while (xform);
225 
226 	return NULL;
227 }
228 
229 static struct rte_crypto_cipher_xform *
230 qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
231 {
232 	do {
233 		if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
234 			return &xform->cipher;
235 
236 		xform = xform->next;
237 	} while (xform);
238 
239 	return NULL;
240 }
241 
242 int
243 qat_sym_session_configure_cipher(struct rte_cryptodev *dev,
244 		struct rte_crypto_sym_xform *xform,
245 		struct qat_sym_session *session)
246 {
247 	struct qat_sym_dev_private *internals = dev->data->dev_private;
248 	struct rte_crypto_cipher_xform *cipher_xform = NULL;
249 	int ret;
250 
251 	/* Get cipher xform from crypto xform chain */
252 	cipher_xform = qat_get_cipher_xform(xform);
253 
254 	session->cipher_iv.offset = cipher_xform->iv.offset;
255 	session->cipher_iv.length = cipher_xform->iv.length;
256 
257 	switch (cipher_xform->algo) {
258 	case RTE_CRYPTO_CIPHER_AES_CBC:
259 		if (qat_sym_validate_aes_key(cipher_xform->key.length,
260 				&session->qat_cipher_alg) != 0) {
261 			QAT_LOG(ERR, "Invalid AES cipher key size");
262 			ret = -EINVAL;
263 			goto error_out;
264 		}
265 		session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
266 		break;
267 	case RTE_CRYPTO_CIPHER_AES_CTR:
268 		if (qat_sym_validate_aes_key(cipher_xform->key.length,
269 				&session->qat_cipher_alg) != 0) {
270 			QAT_LOG(ERR, "Invalid AES cipher key size");
271 			ret = -EINVAL;
272 			goto error_out;
273 		}
274 		session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
275 		break;
276 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
277 		if (qat_sym_validate_snow3g_key(cipher_xform->key.length,
278 					&session->qat_cipher_alg) != 0) {
279 			QAT_LOG(ERR, "Invalid SNOW 3G cipher key size");
280 			ret = -EINVAL;
281 			goto error_out;
282 		}
283 		session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
284 		break;
285 	case RTE_CRYPTO_CIPHER_NULL:
286 		session->qat_cipher_alg = ICP_QAT_HW_CIPHER_ALGO_NULL;
287 		session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
288 		break;
289 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
290 		if (qat_sym_validate_kasumi_key(cipher_xform->key.length,
291 					&session->qat_cipher_alg) != 0) {
292 			QAT_LOG(ERR, "Invalid KASUMI cipher key size");
293 			ret = -EINVAL;
294 			goto error_out;
295 		}
296 		session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE;
297 		break;
298 	case RTE_CRYPTO_CIPHER_3DES_CBC:
299 		if (qat_sym_validate_3des_key(cipher_xform->key.length,
300 				&session->qat_cipher_alg) != 0) {
301 			QAT_LOG(ERR, "Invalid 3DES cipher key size");
302 			ret = -EINVAL;
303 			goto error_out;
304 		}
305 		session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
306 		break;
307 	case RTE_CRYPTO_CIPHER_DES_CBC:
308 		if (qat_sym_validate_des_key(cipher_xform->key.length,
309 				&session->qat_cipher_alg) != 0) {
310 			QAT_LOG(ERR, "Invalid DES cipher key size");
311 			ret = -EINVAL;
312 			goto error_out;
313 		}
314 		session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
315 		break;
316 	case RTE_CRYPTO_CIPHER_3DES_CTR:
317 		if (qat_sym_validate_3des_key(cipher_xform->key.length,
318 				&session->qat_cipher_alg) != 0) {
319 			QAT_LOG(ERR, "Invalid 3DES cipher key size");
320 			ret = -EINVAL;
321 			goto error_out;
322 		}
323 		session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
324 		break;
325 	case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
326 		ret = bpi_cipher_ctx_init(
327 					cipher_xform->algo,
328 					cipher_xform->op,
329 					cipher_xform->key.data,
330 					cipher_xform->key.length,
331 					&session->bpi_ctx);
332 		if (ret != 0) {
333 			QAT_LOG(ERR, "failed to create DES BPI ctx");
334 			goto error_out;
335 		}
336 		if (qat_sym_validate_des_key(cipher_xform->key.length,
337 				&session->qat_cipher_alg) != 0) {
338 			QAT_LOG(ERR, "Invalid DES cipher key size");
339 			ret = -EINVAL;
340 			goto error_out;
341 		}
342 		session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
343 		break;
344 	case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
345 		ret = bpi_cipher_ctx_init(
346 					cipher_xform->algo,
347 					cipher_xform->op,
348 					cipher_xform->key.data,
349 					cipher_xform->key.length,
350 					&session->bpi_ctx);
351 		if (ret != 0) {
352 			QAT_LOG(ERR, "failed to create AES BPI ctx");
353 			goto error_out;
354 		}
355 		if (qat_sym_validate_aes_docsisbpi_key(cipher_xform->key.length,
356 				&session->qat_cipher_alg) != 0) {
357 			QAT_LOG(ERR, "Invalid AES DOCSISBPI key size");
358 			ret = -EINVAL;
359 			goto error_out;
360 		}
361 		session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
362 		break;
363 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
364 		if (!qat_is_cipher_alg_supported(
365 			cipher_xform->algo, internals)) {
366 			QAT_LOG(ERR, "%s not supported on this device",
367 				rte_crypto_cipher_algorithm_strings
368 					[cipher_xform->algo]);
369 			ret = -ENOTSUP;
370 			goto error_out;
371 		}
372 		if (qat_sym_validate_zuc_key(cipher_xform->key.length,
373 				&session->qat_cipher_alg) != 0) {
374 			QAT_LOG(ERR, "Invalid ZUC cipher key size");
375 			ret = -EINVAL;
376 			goto error_out;
377 		}
378 		session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
379 		break;
380 	case RTE_CRYPTO_CIPHER_AES_XTS:
381 		if ((cipher_xform->key.length/2) == ICP_QAT_HW_AES_192_KEY_SZ) {
382 			QAT_LOG(ERR, "AES-XTS-192 not supported");
383 			ret = -EINVAL;
384 			goto error_out;
385 		}
386 		if (qat_sym_validate_aes_key((cipher_xform->key.length/2),
387 				&session->qat_cipher_alg) != 0) {
388 			QAT_LOG(ERR, "Invalid AES-XTS cipher key size");
389 			ret = -EINVAL;
390 			goto error_out;
391 		}
392 		session->qat_mode = ICP_QAT_HW_CIPHER_XTS_MODE;
393 		break;
394 	case RTE_CRYPTO_CIPHER_3DES_ECB:
395 	case RTE_CRYPTO_CIPHER_AES_ECB:
396 	case RTE_CRYPTO_CIPHER_AES_F8:
397 	case RTE_CRYPTO_CIPHER_ARC4:
398 		QAT_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u",
399 				cipher_xform->algo);
400 		ret = -ENOTSUP;
401 		goto error_out;
402 	default:
403 		QAT_LOG(ERR, "Crypto: Undefined Cipher specified %u\n",
404 				cipher_xform->algo);
405 		ret = -EINVAL;
406 		goto error_out;
407 	}
408 
409 	if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
410 		session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
411 	else
412 		session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
413 
414 	if (qat_sym_session_aead_create_cd_cipher(session,
415 						cipher_xform->key.data,
416 						cipher_xform->key.length)) {
417 		ret = -EINVAL;
418 		goto error_out;
419 	}
420 
421 	return 0;
422 
423 error_out:
424 	if (session->bpi_ctx) {
425 		bpi_cipher_ctx_free(session->bpi_ctx);
426 		session->bpi_ctx = NULL;
427 	}
428 	return ret;
429 }
430 
431 int
432 qat_sym_session_configure(struct rte_cryptodev *dev,
433 		struct rte_crypto_sym_xform *xform,
434 		struct rte_cryptodev_sym_session *sess,
435 		struct rte_mempool *mempool)
436 {
437 	void *sess_private_data;
438 	int ret;
439 
440 	if (rte_mempool_get(mempool, &sess_private_data)) {
441 		CDEV_LOG_ERR(
442 			"Couldn't get object from session mempool");
443 		return -ENOMEM;
444 	}
445 
446 	ret = qat_sym_session_set_parameters(dev, xform, sess_private_data);
447 	if (ret != 0) {
448 		QAT_LOG(ERR,
449 		    "Crypto QAT PMD: failed to configure session parameters");
450 
451 		/* Return session to mempool */
452 		rte_mempool_put(mempool, sess_private_data);
453 		return ret;
454 	}
455 
456 	set_sym_session_private_data(sess, dev->driver_id,
457 		sess_private_data);
458 
459 	return 0;
460 }
461 
462 static void
463 qat_sym_session_set_ext_hash_flags(struct qat_sym_session *session,
464 		uint8_t hash_flag)
465 {
466 	struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr;
467 	struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl =
468 			(struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *)
469 			session->fw_req.cd_ctrl.content_desc_ctrl_lw;
470 
471 	/* Set the Use Extended Protocol Flags bit in LW 1 */
472 	QAT_FIELD_SET(header->comn_req_flags,
473 			QAT_COMN_EXT_FLAGS_USED,
474 			QAT_COMN_EXT_FLAGS_BITPOS,
475 			QAT_COMN_EXT_FLAGS_MASK);
476 
477 	/* Set Hash Flags in LW 28 */
478 	cd_ctrl->hash_flags |= hash_flag;
479 
480 	/* Set proto flags in LW 1 */
481 	switch (session->qat_cipher_alg) {
482 	case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2:
483 		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
484 				ICP_QAT_FW_LA_SNOW_3G_PROTO);
485 		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
486 				header->serv_specif_flags, 0);
487 		break;
488 	case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3:
489 		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
490 				ICP_QAT_FW_LA_NO_PROTO);
491 		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
492 				header->serv_specif_flags,
493 				ICP_QAT_FW_LA_ZUC_3G_PROTO);
494 		break;
495 	default:
496 		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
497 				ICP_QAT_FW_LA_NO_PROTO);
498 		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
499 				header->serv_specif_flags, 0);
500 		break;
501 	}
502 }
503 
504 static void
505 qat_sym_session_handle_mixed(const struct rte_cryptodev *dev,
506 		struct qat_sym_session *session)
507 {
508 	const struct qat_sym_dev_private *qat_private = dev->data->dev_private;
509 	enum qat_device_gen min_dev_gen = (qat_private->internal_capabilities &
510 			QAT_SYM_CAP_MIXED_CRYPTO) ? QAT_GEN2 : QAT_GEN3;
511 
512 	if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
513 			session->qat_cipher_alg !=
514 			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
515 		session->min_qat_dev_gen = min_dev_gen;
516 		qat_sym_session_set_ext_hash_flags(session,
517 			1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
518 	} else if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
519 			session->qat_cipher_alg !=
520 			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
521 		session->min_qat_dev_gen = min_dev_gen;
522 		qat_sym_session_set_ext_hash_flags(session,
523 			1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
524 	} else if ((session->aes_cmac ||
525 			session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
526 			(session->qat_cipher_alg ==
527 			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
528 			session->qat_cipher_alg ==
529 			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
530 		session->min_qat_dev_gen = min_dev_gen;
531 		qat_sym_session_set_ext_hash_flags(session, 0);
532 	}
533 }
534 
535 int
536 qat_sym_session_set_parameters(struct rte_cryptodev *dev,
537 		struct rte_crypto_sym_xform *xform, void *session_private)
538 {
539 	struct qat_sym_session *session = session_private;
540 	int ret;
541 	int qat_cmd_id;
542 
543 	/* Verify the session physical address is known */
544 	rte_iova_t session_paddr = rte_mempool_virt2iova(session);
545 	if (session_paddr == 0 || session_paddr == RTE_BAD_IOVA) {
546 		QAT_LOG(ERR,
547 			"Session physical address unknown. Bad memory pool.");
548 		return -EINVAL;
549 	}
550 
551 	/* Set context descriptor physical address */
552 	session->cd_paddr = session_paddr +
553 			offsetof(struct qat_sym_session, cd);
554 
555 	session->min_qat_dev_gen = QAT_GEN1;
556 
557 	/* Get requested QAT command id */
558 	qat_cmd_id = qat_get_cmd_id(xform);
559 	if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) {
560 		QAT_LOG(ERR, "Unsupported xform chain requested");
561 		return -ENOTSUP;
562 	}
563 	session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
564 	switch (session->qat_cmd) {
565 	case ICP_QAT_FW_LA_CMD_CIPHER:
566 		ret = qat_sym_session_configure_cipher(dev, xform, session);
567 		if (ret < 0)
568 			return ret;
569 		break;
570 	case ICP_QAT_FW_LA_CMD_AUTH:
571 		ret = qat_sym_session_configure_auth(dev, xform, session);
572 		if (ret < 0)
573 			return ret;
574 		break;
575 	case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
576 		if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
577 			ret = qat_sym_session_configure_aead(dev, xform,
578 					session);
579 			if (ret < 0)
580 				return ret;
581 		} else {
582 			ret = qat_sym_session_configure_cipher(dev,
583 					xform, session);
584 			if (ret < 0)
585 				return ret;
586 			ret = qat_sym_session_configure_auth(dev,
587 					xform, session);
588 			if (ret < 0)
589 				return ret;
590 			/* Special handling of mixed hash+cipher algorithms */
591 			qat_sym_session_handle_mixed(dev, session);
592 		}
593 		break;
594 	case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
595 		if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
596 			ret = qat_sym_session_configure_aead(dev, xform,
597 					session);
598 			if (ret < 0)
599 				return ret;
600 		} else {
601 			ret = qat_sym_session_configure_auth(dev,
602 					xform, session);
603 			if (ret < 0)
604 				return ret;
605 			ret = qat_sym_session_configure_cipher(dev,
606 					xform, session);
607 			if (ret < 0)
608 				return ret;
609 			/* Special handling of mixed hash+cipher algorithms */
610 			qat_sym_session_handle_mixed(dev, session);
611 		}
612 		break;
613 	case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
614 	case ICP_QAT_FW_LA_CMD_TRNG_TEST:
615 	case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE:
616 	case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE:
617 	case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE:
618 	case ICP_QAT_FW_LA_CMD_MGF1:
619 	case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP:
620 	case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP:
621 	case ICP_QAT_FW_LA_CMD_DELIMITER:
622 	QAT_LOG(ERR, "Unsupported Service %u",
623 		session->qat_cmd);
624 		return -ENOTSUP;
625 	default:
626 	QAT_LOG(ERR, "Unsupported Service %u",
627 		session->qat_cmd);
628 		return -ENOTSUP;
629 	}
630 
631 	return 0;
632 }
633 
634 static int
635 qat_sym_session_handle_single_pass(struct qat_sym_session *session,
636 		struct rte_crypto_aead_xform *aead_xform)
637 {
638 	struct icp_qat_fw_la_cipher_req_params *cipher_param =
639 			(void *) &session->fw_req.serv_specif_rqpars;
640 
641 	session->is_single_pass = 1;
642 	session->min_qat_dev_gen = QAT_GEN3;
643 	session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER;
644 	if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) {
645 		session->qat_mode = ICP_QAT_HW_CIPHER_AEAD_MODE;
646 		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
647 			session->fw_req.comn_hdr.serv_specif_flags,
648 			ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
649 	} else {
650 		/* Chacha-Poly is special case that use QAT CTR mode */
651 		session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
652 	}
653 	session->cipher_iv.offset = aead_xform->iv.offset;
654 	session->cipher_iv.length = aead_xform->iv.length;
655 	if (qat_sym_session_aead_create_cd_cipher(session,
656 			aead_xform->key.data, aead_xform->key.length))
657 		return -EINVAL;
658 	session->aad_len = aead_xform->aad_length;
659 	session->digest_length = aead_xform->digest_length;
660 	if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
661 		session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
662 		session->auth_op = ICP_QAT_HW_AUTH_GENERATE;
663 		ICP_QAT_FW_LA_RET_AUTH_SET(
664 			session->fw_req.comn_hdr.serv_specif_flags,
665 			ICP_QAT_FW_LA_RET_AUTH_RES);
666 	} else {
667 		session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
668 		session->auth_op = ICP_QAT_HW_AUTH_VERIFY;
669 		ICP_QAT_FW_LA_CMP_AUTH_SET(
670 			session->fw_req.comn_hdr.serv_specif_flags,
671 			ICP_QAT_FW_LA_CMP_AUTH_RES);
672 	}
673 	ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
674 			session->fw_req.comn_hdr.serv_specif_flags,
675 			ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
676 	ICP_QAT_FW_LA_PROTO_SET(
677 			session->fw_req.comn_hdr.serv_specif_flags,
678 			ICP_QAT_FW_LA_NO_PROTO);
679 	session->fw_req.comn_hdr.service_cmd_id =
680 			ICP_QAT_FW_LA_CMD_CIPHER;
681 	session->cd.cipher.cipher_config.val =
682 			ICP_QAT_HW_CIPHER_CONFIG_BUILD(
683 				ICP_QAT_HW_CIPHER_AEAD_MODE,
684 				session->qat_cipher_alg,
685 				ICP_QAT_HW_CIPHER_NO_CONVERT,
686 				session->qat_dir);
687 	QAT_FIELD_SET(session->cd.cipher.cipher_config.val,
688 			aead_xform->digest_length,
689 			QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
690 			QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
691 	session->cd.cipher.cipher_config.reserved =
692 			ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(
693 				aead_xform->aad_length);
694 	cipher_param->spc_aad_sz = aead_xform->aad_length;
695 	cipher_param->spc_auth_res_sz = aead_xform->digest_length;
696 
697 	return 0;
698 }
699 
700 int
701 qat_sym_session_configure_auth(struct rte_cryptodev *dev,
702 				struct rte_crypto_sym_xform *xform,
703 				struct qat_sym_session *session)
704 {
705 	struct rte_crypto_auth_xform *auth_xform = qat_get_auth_xform(xform);
706 	struct qat_sym_dev_private *internals = dev->data->dev_private;
707 	const uint8_t *key_data = auth_xform->key.data;
708 	uint8_t key_length = auth_xform->key.length;
709 	session->aes_cmac = 0;
710 
711 	session->auth_iv.offset = auth_xform->iv.offset;
712 	session->auth_iv.length = auth_xform->iv.length;
713 	session->auth_mode = ICP_QAT_HW_AUTH_MODE1;
714 
715 	switch (auth_xform->algo) {
716 	case RTE_CRYPTO_AUTH_SHA1:
717 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
718 		session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
719 		break;
720 	case RTE_CRYPTO_AUTH_SHA224:
721 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
722 		session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
723 		break;
724 	case RTE_CRYPTO_AUTH_SHA256:
725 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
726 		session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
727 		break;
728 	case RTE_CRYPTO_AUTH_SHA384:
729 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
730 		session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
731 		break;
732 	case RTE_CRYPTO_AUTH_SHA512:
733 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
734 		session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
735 		break;
736 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
737 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
738 		break;
739 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
740 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
741 		break;
742 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
743 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
744 		break;
745 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
746 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
747 		break;
748 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
749 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
750 		break;
751 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
752 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
753 		break;
754 	case RTE_CRYPTO_AUTH_AES_CMAC:
755 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
756 		session->aes_cmac = 1;
757 		break;
758 	case RTE_CRYPTO_AUTH_AES_GMAC:
759 		if (qat_sym_validate_aes_key(auth_xform->key.length,
760 				&session->qat_cipher_alg) != 0) {
761 			QAT_LOG(ERR, "Invalid AES key size");
762 			return -EINVAL;
763 		}
764 		session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
765 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
766 		if (session->auth_iv.length == 0)
767 			session->auth_iv.length = AES_GCM_J0_LEN;
768 
769 		break;
770 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
771 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
772 		break;
773 	case RTE_CRYPTO_AUTH_MD5_HMAC:
774 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_MD5;
775 		break;
776 	case RTE_CRYPTO_AUTH_NULL:
777 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_NULL;
778 		break;
779 	case RTE_CRYPTO_AUTH_KASUMI_F9:
780 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_KASUMI_F9;
781 		break;
782 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
783 		if (!qat_is_auth_alg_supported(auth_xform->algo, internals)) {
784 			QAT_LOG(ERR, "%s not supported on this device",
785 				rte_crypto_auth_algorithm_strings
786 				[auth_xform->algo]);
787 			return -ENOTSUP;
788 		}
789 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3;
790 		break;
791 	case RTE_CRYPTO_AUTH_MD5:
792 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
793 		QAT_LOG(ERR, "Crypto: Unsupported hash alg %u",
794 				auth_xform->algo);
795 		return -ENOTSUP;
796 	default:
797 		QAT_LOG(ERR, "Crypto: Undefined Hash algo %u specified",
798 				auth_xform->algo);
799 		return -EINVAL;
800 	}
801 
802 	if (auth_xform->algo == RTE_CRYPTO_AUTH_AES_GMAC) {
803 		if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE) {
804 			session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
805 			session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
806 			/*
807 			 * It needs to create cipher desc content first,
808 			 * then authentication
809 			 */
810 
811 			if (qat_sym_session_aead_create_cd_cipher(session,
812 						auth_xform->key.data,
813 						auth_xform->key.length))
814 				return -EINVAL;
815 
816 			if (qat_sym_session_aead_create_cd_auth(session,
817 						key_data,
818 						key_length,
819 						0,
820 						auth_xform->digest_length,
821 						auth_xform->op))
822 				return -EINVAL;
823 		} else {
824 			session->qat_cmd = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
825 			session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
826 			/*
827 			 * It needs to create authentication desc content first,
828 			 * then cipher
829 			 */
830 
831 			if (qat_sym_session_aead_create_cd_auth(session,
832 					key_data,
833 					key_length,
834 					0,
835 					auth_xform->digest_length,
836 					auth_xform->op))
837 				return -EINVAL;
838 
839 			if (qat_sym_session_aead_create_cd_cipher(session,
840 						auth_xform->key.data,
841 						auth_xform->key.length))
842 				return -EINVAL;
843 		}
844 		/* Restore to authentication only only */
845 		session->qat_cmd = ICP_QAT_FW_LA_CMD_AUTH;
846 	} else {
847 		if (qat_sym_session_aead_create_cd_auth(session,
848 				key_data,
849 				key_length,
850 				0,
851 				auth_xform->digest_length,
852 				auth_xform->op))
853 			return -EINVAL;
854 	}
855 
856 	session->digest_length = auth_xform->digest_length;
857 	return 0;
858 }
859 
860 int
861 qat_sym_session_configure_aead(struct rte_cryptodev *dev,
862 				struct rte_crypto_sym_xform *xform,
863 				struct qat_sym_session *session)
864 {
865 	struct rte_crypto_aead_xform *aead_xform = &xform->aead;
866 	enum rte_crypto_auth_operation crypto_operation;
867 	struct qat_sym_dev_private *internals =
868 			dev->data->dev_private;
869 	enum qat_device_gen qat_dev_gen =
870 			internals->qat_dev->qat_dev_gen;
871 
872 	/*
873 	 * Store AEAD IV parameters as cipher IV,
874 	 * to avoid unnecessary memory usage
875 	 */
876 	session->cipher_iv.offset = xform->aead.iv.offset;
877 	session->cipher_iv.length = xform->aead.iv.length;
878 
879 	session->auth_mode = ICP_QAT_HW_AUTH_MODE1;
880 
881 	session->is_single_pass = 0;
882 	switch (aead_xform->algo) {
883 	case RTE_CRYPTO_AEAD_AES_GCM:
884 		if (qat_sym_validate_aes_key(aead_xform->key.length,
885 				&session->qat_cipher_alg) != 0) {
886 			QAT_LOG(ERR, "Invalid AES key size");
887 			return -EINVAL;
888 		}
889 		session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
890 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
891 		if (qat_dev_gen > QAT_GEN2 && aead_xform->iv.length ==
892 				QAT_AES_GCM_SPC_IV_SIZE) {
893 			return qat_sym_session_handle_single_pass(session,
894 					aead_xform);
895 		}
896 		if (session->cipher_iv.length == 0)
897 			session->cipher_iv.length = AES_GCM_J0_LEN;
898 
899 		break;
900 	case RTE_CRYPTO_AEAD_AES_CCM:
901 		if (qat_sym_validate_aes_key(aead_xform->key.length,
902 				&session->qat_cipher_alg) != 0) {
903 			QAT_LOG(ERR, "Invalid AES key size");
904 			return -EINVAL;
905 		}
906 		session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
907 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC;
908 		break;
909 	case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
910 		if (aead_xform->key.length != ICP_QAT_HW_CHACHAPOLY_KEY_SZ)
911 			return -EINVAL;
912 		session->qat_cipher_alg =
913 				ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305;
914 		return qat_sym_session_handle_single_pass(session,
915 						aead_xform);
916 	default:
917 		QAT_LOG(ERR, "Crypto: Undefined AEAD specified %u\n",
918 				aead_xform->algo);
919 		return -EINVAL;
920 	}
921 
922 	if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
923 			aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
924 			(aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
925 			aead_xform->algo == RTE_CRYPTO_AEAD_AES_CCM)) {
926 		session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
927 		/*
928 		 * It needs to create cipher desc content first,
929 		 * then authentication
930 		 */
931 		crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
932 			RTE_CRYPTO_AUTH_OP_GENERATE : RTE_CRYPTO_AUTH_OP_VERIFY;
933 
934 		if (qat_sym_session_aead_create_cd_cipher(session,
935 					aead_xform->key.data,
936 					aead_xform->key.length))
937 			return -EINVAL;
938 
939 		if (qat_sym_session_aead_create_cd_auth(session,
940 					aead_xform->key.data,
941 					aead_xform->key.length,
942 					aead_xform->aad_length,
943 					aead_xform->digest_length,
944 					crypto_operation))
945 			return -EINVAL;
946 	} else {
947 		session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
948 		/*
949 		 * It needs to create authentication desc content first,
950 		 * then cipher
951 		 */
952 
953 		crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
954 			RTE_CRYPTO_AUTH_OP_VERIFY : RTE_CRYPTO_AUTH_OP_GENERATE;
955 
956 		if (qat_sym_session_aead_create_cd_auth(session,
957 					aead_xform->key.data,
958 					aead_xform->key.length,
959 					aead_xform->aad_length,
960 					aead_xform->digest_length,
961 					crypto_operation))
962 			return -EINVAL;
963 
964 		if (qat_sym_session_aead_create_cd_cipher(session,
965 					aead_xform->key.data,
966 					aead_xform->key.length))
967 			return -EINVAL;
968 	}
969 
970 	session->digest_length = aead_xform->digest_length;
971 	return 0;
972 }
973 
974 unsigned int qat_sym_session_get_private_size(
975 		struct rte_cryptodev *dev __rte_unused)
976 {
977 	return RTE_ALIGN_CEIL(sizeof(struct qat_sym_session), 8);
978 }
979 
980 /* returns block size in bytes per cipher algo */
981 int qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg)
982 {
983 	switch (qat_cipher_alg) {
984 	case ICP_QAT_HW_CIPHER_ALGO_DES:
985 		return ICP_QAT_HW_DES_BLK_SZ;
986 	case ICP_QAT_HW_CIPHER_ALGO_3DES:
987 		return ICP_QAT_HW_3DES_BLK_SZ;
988 	case ICP_QAT_HW_CIPHER_ALGO_AES128:
989 	case ICP_QAT_HW_CIPHER_ALGO_AES192:
990 	case ICP_QAT_HW_CIPHER_ALGO_AES256:
991 		return ICP_QAT_HW_AES_BLK_SZ;
992 	default:
993 		QAT_LOG(ERR, "invalid block cipher alg %u", qat_cipher_alg);
994 		return -EFAULT;
995 	};
996 	return -EFAULT;
997 }
998 
999 /*
1000  * Returns size in bytes per hash algo for state1 size field in cd_ctrl
1001  * This is digest size rounded up to nearest quadword
1002  */
1003 static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
1004 {
1005 	switch (qat_hash_alg) {
1006 	case ICP_QAT_HW_AUTH_ALGO_SHA1:
1007 		return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ,
1008 						QAT_HW_DEFAULT_ALIGNMENT);
1009 	case ICP_QAT_HW_AUTH_ALGO_SHA224:
1010 		return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA224_STATE1_SZ,
1011 						QAT_HW_DEFAULT_ALIGNMENT);
1012 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
1013 		return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ,
1014 						QAT_HW_DEFAULT_ALIGNMENT);
1015 	case ICP_QAT_HW_AUTH_ALGO_SHA384:
1016 		return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA384_STATE1_SZ,
1017 						QAT_HW_DEFAULT_ALIGNMENT);
1018 	case ICP_QAT_HW_AUTH_ALGO_SHA512:
1019 		return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
1020 						QAT_HW_DEFAULT_ALIGNMENT);
1021 	case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1022 		return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ,
1023 						QAT_HW_DEFAULT_ALIGNMENT);
1024 	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
1025 	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
1026 		return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ,
1027 						QAT_HW_DEFAULT_ALIGNMENT);
1028 	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
1029 		return QAT_HW_ROUND_UP(ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ,
1030 						QAT_HW_DEFAULT_ALIGNMENT);
1031 	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
1032 		return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
1033 						QAT_HW_DEFAULT_ALIGNMENT);
1034 	case ICP_QAT_HW_AUTH_ALGO_MD5:
1035 		return QAT_HW_ROUND_UP(ICP_QAT_HW_MD5_STATE1_SZ,
1036 						QAT_HW_DEFAULT_ALIGNMENT);
1037 	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
1038 		return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ,
1039 						QAT_HW_DEFAULT_ALIGNMENT);
1040 	case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
1041 		return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ,
1042 						QAT_HW_DEFAULT_ALIGNMENT);
1043 	case ICP_QAT_HW_AUTH_ALGO_NULL:
1044 		return QAT_HW_ROUND_UP(ICP_QAT_HW_NULL_STATE1_SZ,
1045 						QAT_HW_DEFAULT_ALIGNMENT);
1046 	case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
1047 		/* return maximum state1 size in this case */
1048 		return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
1049 						QAT_HW_DEFAULT_ALIGNMENT);
1050 	default:
1051 		QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
1052 		return -EFAULT;
1053 	};
1054 	return -EFAULT;
1055 }
1056 
1057 /* returns digest size in bytes  per hash algo */
1058 static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
1059 {
1060 	switch (qat_hash_alg) {
1061 	case ICP_QAT_HW_AUTH_ALGO_SHA1:
1062 		return ICP_QAT_HW_SHA1_STATE1_SZ;
1063 	case ICP_QAT_HW_AUTH_ALGO_SHA224:
1064 		return ICP_QAT_HW_SHA224_STATE1_SZ;
1065 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
1066 		return ICP_QAT_HW_SHA256_STATE1_SZ;
1067 	case ICP_QAT_HW_AUTH_ALGO_SHA384:
1068 		return ICP_QAT_HW_SHA384_STATE1_SZ;
1069 	case ICP_QAT_HW_AUTH_ALGO_SHA512:
1070 		return ICP_QAT_HW_SHA512_STATE1_SZ;
1071 	case ICP_QAT_HW_AUTH_ALGO_MD5:
1072 		return ICP_QAT_HW_MD5_STATE1_SZ;
1073 	case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1074 		return ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1075 	case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
1076 		/* return maximum digest size in this case */
1077 		return ICP_QAT_HW_SHA512_STATE1_SZ;
1078 	default:
1079 		QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
1080 		return -EFAULT;
1081 	};
1082 	return -EFAULT;
1083 }
1084 
1085 /* returns block size in byes per hash algo */
1086 static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
1087 {
1088 	switch (qat_hash_alg) {
1089 	case ICP_QAT_HW_AUTH_ALGO_SHA1:
1090 		return SHA_CBLOCK;
1091 	case ICP_QAT_HW_AUTH_ALGO_SHA224:
1092 		return SHA256_CBLOCK;
1093 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
1094 		return SHA256_CBLOCK;
1095 	case ICP_QAT_HW_AUTH_ALGO_SHA384:
1096 		return SHA512_CBLOCK;
1097 	case ICP_QAT_HW_AUTH_ALGO_SHA512:
1098 		return SHA512_CBLOCK;
1099 	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
1100 		return 16;
1101 	case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1102 		return ICP_QAT_HW_AES_BLK_SZ;
1103 	case ICP_QAT_HW_AUTH_ALGO_MD5:
1104 		return MD5_CBLOCK;
1105 	case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
1106 		/* return maximum block size in this case */
1107 		return SHA512_CBLOCK;
1108 	default:
1109 		QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
1110 		return -EFAULT;
1111 	};
1112 	return -EFAULT;
1113 }
1114 
1115 static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
1116 {
1117 	SHA_CTX ctx;
1118 
1119 	if (!SHA1_Init(&ctx))
1120 		return -EFAULT;
1121 	SHA1_Transform(&ctx, data_in);
1122 	rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
1123 	return 0;
1124 }
1125 
1126 static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
1127 {
1128 	SHA256_CTX ctx;
1129 
1130 	if (!SHA224_Init(&ctx))
1131 		return -EFAULT;
1132 	SHA256_Transform(&ctx, data_in);
1133 	rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
1134 	return 0;
1135 }
1136 
1137 static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
1138 {
1139 	SHA256_CTX ctx;
1140 
1141 	if (!SHA256_Init(&ctx))
1142 		return -EFAULT;
1143 	SHA256_Transform(&ctx, data_in);
1144 	rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
1145 	return 0;
1146 }
1147 
1148 static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
1149 {
1150 	SHA512_CTX ctx;
1151 
1152 	if (!SHA384_Init(&ctx))
1153 		return -EFAULT;
1154 	SHA512_Transform(&ctx, data_in);
1155 	rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
1156 	return 0;
1157 }
1158 
1159 static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
1160 {
1161 	SHA512_CTX ctx;
1162 
1163 	if (!SHA512_Init(&ctx))
1164 		return -EFAULT;
1165 	SHA512_Transform(&ctx, data_in);
1166 	rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
1167 	return 0;
1168 }
1169 
1170 static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
1171 {
1172 	MD5_CTX ctx;
1173 
1174 	if (!MD5_Init(&ctx))
1175 		return -EFAULT;
1176 	MD5_Transform(&ctx, data_in);
1177 	rte_memcpy(data_out, &ctx, MD5_DIGEST_LENGTH);
1178 
1179 	return 0;
1180 }
1181 
1182 static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
1183 			uint8_t *data_in,
1184 			uint8_t *data_out)
1185 {
1186 	int digest_size;
1187 	uint8_t digest[qat_hash_get_digest_size(
1188 			ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1189 	uint32_t *hash_state_out_be32;
1190 	uint64_t *hash_state_out_be64;
1191 	int i;
1192 
1193 	digest_size = qat_hash_get_digest_size(hash_alg);
1194 	if (digest_size <= 0)
1195 		return -EFAULT;
1196 
1197 	hash_state_out_be32 = (uint32_t *)data_out;
1198 	hash_state_out_be64 = (uint64_t *)data_out;
1199 
1200 	switch (hash_alg) {
1201 	case ICP_QAT_HW_AUTH_ALGO_SHA1:
1202 		if (partial_hash_sha1(data_in, digest))
1203 			return -EFAULT;
1204 		for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1205 			*hash_state_out_be32 =
1206 				rte_bswap32(*(((uint32_t *)digest)+i));
1207 		break;
1208 	case ICP_QAT_HW_AUTH_ALGO_SHA224:
1209 		if (partial_hash_sha224(data_in, digest))
1210 			return -EFAULT;
1211 		for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1212 			*hash_state_out_be32 =
1213 				rte_bswap32(*(((uint32_t *)digest)+i));
1214 		break;
1215 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
1216 		if (partial_hash_sha256(data_in, digest))
1217 			return -EFAULT;
1218 		for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1219 			*hash_state_out_be32 =
1220 				rte_bswap32(*(((uint32_t *)digest)+i));
1221 		break;
1222 	case ICP_QAT_HW_AUTH_ALGO_SHA384:
1223 		if (partial_hash_sha384(data_in, digest))
1224 			return -EFAULT;
1225 		for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
1226 			*hash_state_out_be64 =
1227 				rte_bswap64(*(((uint64_t *)digest)+i));
1228 		break;
1229 	case ICP_QAT_HW_AUTH_ALGO_SHA512:
1230 		if (partial_hash_sha512(data_in, digest))
1231 			return -EFAULT;
1232 		for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
1233 			*hash_state_out_be64 =
1234 				rte_bswap64(*(((uint64_t *)digest)+i));
1235 		break;
1236 	case ICP_QAT_HW_AUTH_ALGO_MD5:
1237 		if (partial_hash_md5(data_in, data_out))
1238 			return -EFAULT;
1239 		break;
1240 	default:
1241 		QAT_LOG(ERR, "invalid hash alg %u", hash_alg);
1242 		return -EFAULT;
1243 	}
1244 
1245 	return 0;
1246 }
1247 #define HMAC_IPAD_VALUE	0x36
1248 #define HMAC_OPAD_VALUE	0x5c
1249 #define HASH_XCBC_PRECOMP_KEY_NUM 3
1250 
1251 static const uint8_t AES_CMAC_SEED[ICP_QAT_HW_AES_128_KEY_SZ];
1252 
1253 static void aes_cmac_key_derive(uint8_t *base, uint8_t *derived)
1254 {
1255 	int i;
1256 
1257 	derived[0] = base[0] << 1;
1258 	for (i = 1; i < ICP_QAT_HW_AES_BLK_SZ ; i++) {
1259 		derived[i] = base[i] << 1;
1260 		derived[i - 1] |= base[i] >> 7;
1261 	}
1262 
1263 	if (base[0] & 0x80)
1264 		derived[ICP_QAT_HW_AES_BLK_SZ - 1] ^= QAT_AES_CMAC_CONST_RB;
1265 }
1266 
1267 static int qat_sym_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
1268 				const uint8_t *auth_key,
1269 				uint16_t auth_keylen,
1270 				uint8_t *p_state_buf,
1271 				uint16_t *p_state_len,
1272 				uint8_t aes_cmac)
1273 {
1274 	int block_size;
1275 	uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1276 	uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1277 	int i;
1278 
1279 	if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
1280 
1281 		/* CMAC */
1282 		if (aes_cmac) {
1283 			AES_KEY enc_key;
1284 			uint8_t *in = NULL;
1285 			uint8_t k0[ICP_QAT_HW_AES_128_KEY_SZ];
1286 			uint8_t *k1, *k2;
1287 
1288 			auth_keylen = ICP_QAT_HW_AES_128_KEY_SZ;
1289 
1290 			in = rte_zmalloc("AES CMAC K1",
1291 					 ICP_QAT_HW_AES_128_KEY_SZ, 16);
1292 
1293 			if (in == NULL) {
1294 				QAT_LOG(ERR, "Failed to alloc memory");
1295 				return -ENOMEM;
1296 			}
1297 
1298 			rte_memcpy(in, AES_CMAC_SEED,
1299 				   ICP_QAT_HW_AES_128_KEY_SZ);
1300 			rte_memcpy(p_state_buf, auth_key, auth_keylen);
1301 
1302 			if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
1303 				&enc_key) != 0) {
1304 				rte_free(in);
1305 				return -EFAULT;
1306 			}
1307 
1308 			AES_encrypt(in, k0, &enc_key);
1309 
1310 			k1 = p_state_buf + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1311 			k2 = k1 + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1312 
1313 			aes_cmac_key_derive(k0, k1);
1314 			aes_cmac_key_derive(k1, k2);
1315 
1316 			memset(k0, 0, ICP_QAT_HW_AES_128_KEY_SZ);
1317 			*p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
1318 			rte_free(in);
1319 			return 0;
1320 		} else {
1321 			static uint8_t qat_aes_xcbc_key_seed[
1322 					ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
1323 				0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
1324 				0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
1325 				0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
1326 				0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
1327 				0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
1328 				0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
1329 			};
1330 
1331 			uint8_t *in = NULL;
1332 			uint8_t *out = p_state_buf;
1333 			int x;
1334 			AES_KEY enc_key;
1335 
1336 			in = rte_zmalloc("working mem for key",
1337 					ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
1338 			if (in == NULL) {
1339 				QAT_LOG(ERR, "Failed to alloc memory");
1340 				return -ENOMEM;
1341 			}
1342 
1343 			rte_memcpy(in, qat_aes_xcbc_key_seed,
1344 					ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
1345 			for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
1346 				if (AES_set_encrypt_key(auth_key,
1347 							auth_keylen << 3,
1348 							&enc_key) != 0) {
1349 					rte_free(in -
1350 					  (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
1351 					memset(out -
1352 					   (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
1353 					  0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
1354 					return -EFAULT;
1355 				}
1356 				AES_encrypt(in, out, &enc_key);
1357 				in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
1358 				out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
1359 			}
1360 			*p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
1361 			rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
1362 			return 0;
1363 		}
1364 
1365 	} else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
1366 		(hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
1367 		uint8_t *in = NULL;
1368 		uint8_t *out = p_state_buf;
1369 		AES_KEY enc_key;
1370 
1371 		memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
1372 				ICP_QAT_HW_GALOIS_LEN_A_SZ +
1373 				ICP_QAT_HW_GALOIS_E_CTR0_SZ);
1374 		in = rte_zmalloc("working mem for key",
1375 				ICP_QAT_HW_GALOIS_H_SZ, 16);
1376 		if (in == NULL) {
1377 			QAT_LOG(ERR, "Failed to alloc memory");
1378 			return -ENOMEM;
1379 		}
1380 
1381 		memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
1382 		if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
1383 			&enc_key) != 0) {
1384 			return -EFAULT;
1385 		}
1386 		AES_encrypt(in, out, &enc_key);
1387 		*p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
1388 				ICP_QAT_HW_GALOIS_LEN_A_SZ +
1389 				ICP_QAT_HW_GALOIS_E_CTR0_SZ;
1390 		rte_free(in);
1391 		return 0;
1392 	}
1393 
1394 	block_size = qat_hash_get_block_size(hash_alg);
1395 	if (block_size < 0)
1396 		return block_size;
1397 	/* init ipad and opad from key and xor with fixed values */
1398 	memset(ipad, 0, block_size);
1399 	memset(opad, 0, block_size);
1400 
1401 	if (auth_keylen > (unsigned int)block_size) {
1402 		QAT_LOG(ERR, "invalid keylen %u", auth_keylen);
1403 		return -EFAULT;
1404 	}
1405 	rte_memcpy(ipad, auth_key, auth_keylen);
1406 	rte_memcpy(opad, auth_key, auth_keylen);
1407 
1408 	for (i = 0; i < block_size; i++) {
1409 		uint8_t *ipad_ptr = ipad + i;
1410 		uint8_t *opad_ptr = opad + i;
1411 		*ipad_ptr ^= HMAC_IPAD_VALUE;
1412 		*opad_ptr ^= HMAC_OPAD_VALUE;
1413 	}
1414 
1415 	/* do partial hash of ipad and copy to state1 */
1416 	if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
1417 		memset(ipad, 0, block_size);
1418 		memset(opad, 0, block_size);
1419 		QAT_LOG(ERR, "ipad precompute failed");
1420 		return -EFAULT;
1421 	}
1422 
1423 	/*
1424 	 * State len is a multiple of 8, so may be larger than the digest.
1425 	 * Put the partial hash of opad state_len bytes after state1
1426 	 */
1427 	*p_state_len = qat_hash_get_state1_size(hash_alg);
1428 	if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
1429 		memset(ipad, 0, block_size);
1430 		memset(opad, 0, block_size);
1431 		QAT_LOG(ERR, "opad precompute failed");
1432 		return -EFAULT;
1433 	}
1434 
1435 	/*  don't leave data lying around */
1436 	memset(ipad, 0, block_size);
1437 	memset(opad, 0, block_size);
1438 	return 0;
1439 }
1440 
1441 static void
1442 qat_sym_session_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
1443 		enum qat_sym_proto_flag proto_flags)
1444 {
1445 	header->hdr_flags =
1446 		ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
1447 	header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
1448 	header->comn_req_flags =
1449 		ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
1450 					QAT_COMN_PTR_TYPE_FLAT);
1451 	ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
1452 				  ICP_QAT_FW_LA_PARTIAL_NONE);
1453 	ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
1454 					   ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
1455 
1456 	switch (proto_flags)		{
1457 	case QAT_CRYPTO_PROTO_FLAG_NONE:
1458 		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1459 					ICP_QAT_FW_LA_NO_PROTO);
1460 		break;
1461 	case QAT_CRYPTO_PROTO_FLAG_CCM:
1462 		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1463 					ICP_QAT_FW_LA_CCM_PROTO);
1464 		break;
1465 	case QAT_CRYPTO_PROTO_FLAG_GCM:
1466 		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1467 					ICP_QAT_FW_LA_GCM_PROTO);
1468 		break;
1469 	case QAT_CRYPTO_PROTO_FLAG_SNOW3G:
1470 		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1471 					ICP_QAT_FW_LA_SNOW_3G_PROTO);
1472 		break;
1473 	case QAT_CRYPTO_PROTO_FLAG_ZUC:
1474 		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(header->serv_specif_flags,
1475 			ICP_QAT_FW_LA_ZUC_3G_PROTO);
1476 		break;
1477 	}
1478 
1479 	ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
1480 					   ICP_QAT_FW_LA_NO_UPDATE_STATE);
1481 	ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
1482 					ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
1483 }
1484 
1485 /*
1486  *	Snow3G and ZUC should never use this function
1487  *	and set its protocol flag in both cipher and auth part of content
1488  *	descriptor building function
1489  */
1490 static enum qat_sym_proto_flag
1491 qat_get_crypto_proto_flag(uint16_t flags)
1492 {
1493 	int proto = ICP_QAT_FW_LA_PROTO_GET(flags);
1494 	enum qat_sym_proto_flag qat_proto_flag =
1495 			QAT_CRYPTO_PROTO_FLAG_NONE;
1496 
1497 	switch (proto) {
1498 	case ICP_QAT_FW_LA_GCM_PROTO:
1499 		qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
1500 		break;
1501 	case ICP_QAT_FW_LA_CCM_PROTO:
1502 		qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM;
1503 		break;
1504 	}
1505 
1506 	return qat_proto_flag;
1507 }
1508 
1509 int qat_sym_session_aead_create_cd_cipher(struct qat_sym_session *cdesc,
1510 						const uint8_t *cipherkey,
1511 						uint32_t cipherkeylen)
1512 {
1513 	struct icp_qat_hw_cipher_algo_blk *cipher;
1514 	struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
1515 	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
1516 	struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
1517 	void *ptr = &req_tmpl->cd_ctrl;
1518 	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
1519 	struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
1520 	enum icp_qat_hw_cipher_convert key_convert;
1521 	enum qat_sym_proto_flag qat_proto_flag =
1522 		QAT_CRYPTO_PROTO_FLAG_NONE;
1523 	uint32_t total_key_size;
1524 	uint16_t cipher_offset, cd_size;
1525 	uint32_t wordIndex  = 0;
1526 	uint32_t *temp_key = NULL;
1527 
1528 	if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
1529 		cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
1530 		ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
1531 					ICP_QAT_FW_SLICE_CIPHER);
1532 		ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
1533 					ICP_QAT_FW_SLICE_DRAM_WR);
1534 		ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1535 					ICP_QAT_FW_LA_NO_RET_AUTH_RES);
1536 		ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1537 					ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
1538 		cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1539 	} else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
1540 		cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
1541 		ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
1542 					ICP_QAT_FW_SLICE_CIPHER);
1543 		ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
1544 					ICP_QAT_FW_SLICE_AUTH);
1545 		ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
1546 					ICP_QAT_FW_SLICE_AUTH);
1547 		ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
1548 					ICP_QAT_FW_SLICE_DRAM_WR);
1549 		cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1550 	} else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
1551 		QAT_LOG(ERR, "Invalid param, must be a cipher command.");
1552 		return -EFAULT;
1553 	}
1554 
1555 	if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
1556 		/*
1557 		 * CTR Streaming ciphers are a special case. Decrypt = encrypt
1558 		 * Overriding default values previously set
1559 		 */
1560 		cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
1561 		key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
1562 	} else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2
1563 		|| cdesc->qat_cipher_alg ==
1564 			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)
1565 		key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
1566 	else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
1567 		key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
1568 	else
1569 		key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
1570 
1571 	if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
1572 		total_key_size = ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ +
1573 			ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
1574 		cipher_cd_ctrl->cipher_state_sz =
1575 			ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
1576 		qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
1577 
1578 	} else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
1579 		total_key_size = ICP_QAT_HW_KASUMI_F8_KEY_SZ;
1580 		cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_KASUMI_BLK_SZ >> 3;
1581 		cipher_cd_ctrl->cipher_padding_sz =
1582 					(2 * ICP_QAT_HW_KASUMI_BLK_SZ) >> 3;
1583 	} else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) {
1584 		total_key_size = ICP_QAT_HW_3DES_KEY_SZ;
1585 		cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_3DES_BLK_SZ >> 3;
1586 		qat_proto_flag =
1587 			qat_get_crypto_proto_flag(header->serv_specif_flags);
1588 	} else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_DES) {
1589 		total_key_size = ICP_QAT_HW_DES_KEY_SZ;
1590 		cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_DES_BLK_SZ >> 3;
1591 		qat_proto_flag =
1592 			qat_get_crypto_proto_flag(header->serv_specif_flags);
1593 	} else if (cdesc->qat_cipher_alg ==
1594 		ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
1595 		total_key_size = ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ +
1596 			ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
1597 		cipher_cd_ctrl->cipher_state_sz =
1598 			ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
1599 		qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
1600 		cdesc->min_qat_dev_gen = QAT_GEN2;
1601 	} else {
1602 		total_key_size = cipherkeylen;
1603 		cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
1604 		qat_proto_flag =
1605 			qat_get_crypto_proto_flag(header->serv_specif_flags);
1606 	}
1607 	cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3;
1608 	cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
1609 	cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
1610 
1611 	header->service_cmd_id = cdesc->qat_cmd;
1612 	qat_sym_session_init_common_hdr(header, qat_proto_flag);
1613 
1614 	cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr;
1615 	cipher->cipher_config.val =
1616 	    ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
1617 					cdesc->qat_cipher_alg, key_convert,
1618 					cdesc->qat_dir);
1619 
1620 	if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
1621 		temp_key = (uint32_t *)(cdesc->cd_cur_ptr +
1622 					sizeof(struct icp_qat_hw_cipher_config)
1623 					+ cipherkeylen);
1624 		memcpy(cipher->key, cipherkey, cipherkeylen);
1625 		memcpy(temp_key, cipherkey, cipherkeylen);
1626 
1627 		/* XOR Key with KASUMI F8 key modifier at 4 bytes level */
1628 		for (wordIndex = 0; wordIndex < (cipherkeylen >> 2);
1629 								wordIndex++)
1630 			temp_key[wordIndex] ^= KASUMI_F8_KEY_MODIFIER_4_BYTES;
1631 
1632 		cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
1633 					cipherkeylen + cipherkeylen;
1634 	} else {
1635 		memcpy(cipher->key, cipherkey, cipherkeylen);
1636 		cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
1637 					cipherkeylen;
1638 	}
1639 
1640 	if (total_key_size > cipherkeylen) {
1641 		uint32_t padding_size =  total_key_size-cipherkeylen;
1642 		if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
1643 			&& (cipherkeylen == QAT_3DES_KEY_SZ_OPT2)) {
1644 			/* K3 not provided so use K1 = K3*/
1645 			memcpy(cdesc->cd_cur_ptr, cipherkey, padding_size);
1646 		} else if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
1647 			&& (cipherkeylen == QAT_3DES_KEY_SZ_OPT3)) {
1648 			/* K2 and K3 not provided so use K1 = K2 = K3*/
1649 			memcpy(cdesc->cd_cur_ptr, cipherkey,
1650 				cipherkeylen);
1651 			memcpy(cdesc->cd_cur_ptr+cipherkeylen,
1652 				cipherkey, cipherkeylen);
1653 		} else
1654 			memset(cdesc->cd_cur_ptr, 0, padding_size);
1655 
1656 		cdesc->cd_cur_ptr += padding_size;
1657 	}
1658 	cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
1659 	cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
1660 
1661 	return 0;
1662 }
1663 
1664 int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc,
1665 						const uint8_t *authkey,
1666 						uint32_t authkeylen,
1667 						uint32_t aad_length,
1668 						uint32_t digestsize,
1669 						unsigned int operation)
1670 {
1671 	struct icp_qat_hw_auth_setup *hash;
1672 	struct icp_qat_hw_cipher_algo_blk *cipherconfig;
1673 	struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
1674 	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
1675 	struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
1676 	void *ptr = &req_tmpl->cd_ctrl;
1677 	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
1678 	struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
1679 	struct icp_qat_fw_la_auth_req_params *auth_param =
1680 		(struct icp_qat_fw_la_auth_req_params *)
1681 		((char *)&req_tmpl->serv_specif_rqpars +
1682 		ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
1683 	uint16_t state1_size = 0, state2_size = 0, cd_extra_size = 0;
1684 	uint16_t hash_offset, cd_size;
1685 	uint32_t *aad_len = NULL;
1686 	uint32_t wordIndex  = 0;
1687 	uint32_t *pTempKey;
1688 	enum qat_sym_proto_flag qat_proto_flag =
1689 		QAT_CRYPTO_PROTO_FLAG_NONE;
1690 
1691 	if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
1692 		ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
1693 					ICP_QAT_FW_SLICE_AUTH);
1694 		ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
1695 					ICP_QAT_FW_SLICE_DRAM_WR);
1696 		cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1697 	} else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
1698 		ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
1699 				ICP_QAT_FW_SLICE_AUTH);
1700 		ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
1701 				ICP_QAT_FW_SLICE_CIPHER);
1702 		ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
1703 				ICP_QAT_FW_SLICE_CIPHER);
1704 		ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
1705 				ICP_QAT_FW_SLICE_DRAM_WR);
1706 		cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1707 	} else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
1708 		QAT_LOG(ERR, "Invalid param, must be a hash command.");
1709 		return -EFAULT;
1710 	}
1711 
1712 	if (operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
1713 		ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1714 				ICP_QAT_FW_LA_NO_RET_AUTH_RES);
1715 		ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1716 				ICP_QAT_FW_LA_CMP_AUTH_RES);
1717 		cdesc->auth_op = ICP_QAT_HW_AUTH_VERIFY;
1718 	} else {
1719 		ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1720 					   ICP_QAT_FW_LA_RET_AUTH_RES);
1721 		ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1722 					   ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
1723 		cdesc->auth_op = ICP_QAT_HW_AUTH_GENERATE;
1724 	}
1725 
1726 	/*
1727 	 * Setup the inner hash config
1728 	 */
1729 	hash_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
1730 	hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr;
1731 	hash->auth_config.reserved = 0;
1732 	hash->auth_config.config =
1733 			ICP_QAT_HW_AUTH_CONFIG_BUILD(cdesc->auth_mode,
1734 				cdesc->qat_hash_alg, digestsize);
1735 
1736 	if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0
1737 		|| cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2
1738 		|| cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9
1739 		|| cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3
1740 		|| cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC
1741 		|| cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC
1742 		|| cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL
1743 			)
1744 		hash->auth_counter.counter = 0;
1745 	else {
1746 		int block_size = qat_hash_get_block_size(cdesc->qat_hash_alg);
1747 
1748 		if (block_size < 0)
1749 			return block_size;
1750 		hash->auth_counter.counter = rte_bswap32(block_size);
1751 	}
1752 
1753 	cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_auth_setup);
1754 
1755 	/*
1756 	 * cd_cur_ptr now points at the state1 information.
1757 	 */
1758 	switch (cdesc->qat_hash_alg) {
1759 	case ICP_QAT_HW_AUTH_ALGO_SHA1:
1760 		if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1761 			/* Plain SHA-1 */
1762 			rte_memcpy(cdesc->cd_cur_ptr, sha1InitialState,
1763 					sizeof(sha1InitialState));
1764 			state1_size = qat_hash_get_state1_size(
1765 					cdesc->qat_hash_alg);
1766 			break;
1767 		}
1768 		/* SHA-1 HMAC */
1769 		if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1, authkey,
1770 			authkeylen, cdesc->cd_cur_ptr, &state1_size,
1771 			cdesc->aes_cmac)) {
1772 			QAT_LOG(ERR, "(SHA)precompute failed");
1773 			return -EFAULT;
1774 		}
1775 		state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
1776 		break;
1777 	case ICP_QAT_HW_AUTH_ALGO_SHA224:
1778 		if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1779 			/* Plain SHA-224 */
1780 			rte_memcpy(cdesc->cd_cur_ptr, sha224InitialState,
1781 					sizeof(sha224InitialState));
1782 			state1_size = qat_hash_get_state1_size(
1783 					cdesc->qat_hash_alg);
1784 			break;
1785 		}
1786 		/* SHA-224 HMAC */
1787 		if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224, authkey,
1788 			authkeylen, cdesc->cd_cur_ptr, &state1_size,
1789 			cdesc->aes_cmac)) {
1790 			QAT_LOG(ERR, "(SHA)precompute failed");
1791 			return -EFAULT;
1792 		}
1793 		state2_size = ICP_QAT_HW_SHA224_STATE2_SZ;
1794 		break;
1795 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
1796 		if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1797 			/* Plain SHA-256 */
1798 			rte_memcpy(cdesc->cd_cur_ptr, sha256InitialState,
1799 					sizeof(sha256InitialState));
1800 			state1_size = qat_hash_get_state1_size(
1801 					cdesc->qat_hash_alg);
1802 			break;
1803 		}
1804 		/* SHA-256 HMAC */
1805 		if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256, authkey,
1806 			authkeylen, cdesc->cd_cur_ptr,	&state1_size,
1807 			cdesc->aes_cmac)) {
1808 			QAT_LOG(ERR, "(SHA)precompute failed");
1809 			return -EFAULT;
1810 		}
1811 		state2_size = ICP_QAT_HW_SHA256_STATE2_SZ;
1812 		break;
1813 	case ICP_QAT_HW_AUTH_ALGO_SHA384:
1814 		if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1815 			/* Plain SHA-384 */
1816 			rte_memcpy(cdesc->cd_cur_ptr, sha384InitialState,
1817 					sizeof(sha384InitialState));
1818 			state1_size = qat_hash_get_state1_size(
1819 					cdesc->qat_hash_alg);
1820 			break;
1821 		}
1822 		/* SHA-384 HMAC */
1823 		if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384, authkey,
1824 			authkeylen, cdesc->cd_cur_ptr, &state1_size,
1825 			cdesc->aes_cmac)) {
1826 			QAT_LOG(ERR, "(SHA)precompute failed");
1827 			return -EFAULT;
1828 		}
1829 		state2_size = ICP_QAT_HW_SHA384_STATE2_SZ;
1830 		break;
1831 	case ICP_QAT_HW_AUTH_ALGO_SHA512:
1832 		if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1833 			/* Plain SHA-512 */
1834 			rte_memcpy(cdesc->cd_cur_ptr, sha512InitialState,
1835 					sizeof(sha512InitialState));
1836 			state1_size = qat_hash_get_state1_size(
1837 					cdesc->qat_hash_alg);
1838 			break;
1839 		}
1840 		/* SHA-512 HMAC */
1841 		if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512, authkey,
1842 			authkeylen, cdesc->cd_cur_ptr,	&state1_size,
1843 			cdesc->aes_cmac)) {
1844 			QAT_LOG(ERR, "(SHA)precompute failed");
1845 			return -EFAULT;
1846 		}
1847 		state2_size = ICP_QAT_HW_SHA512_STATE2_SZ;
1848 		break;
1849 	case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1850 		state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1851 
1852 		if (cdesc->aes_cmac)
1853 			memset(cdesc->cd_cur_ptr, 0, state1_size);
1854 		if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
1855 			authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
1856 			&state2_size, cdesc->aes_cmac)) {
1857 			cdesc->aes_cmac ? QAT_LOG(ERR,
1858 						  "(CMAC)precompute failed")
1859 					: QAT_LOG(ERR,
1860 						  "(XCBC)precompute failed");
1861 			return -EFAULT;
1862 		}
1863 		break;
1864 	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
1865 	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
1866 		qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
1867 		state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
1868 		if (qat_sym_do_precomputes(cdesc->qat_hash_alg, authkey,
1869 			authkeylen, cdesc->cd_cur_ptr + state1_size,
1870 			&state2_size, cdesc->aes_cmac)) {
1871 			QAT_LOG(ERR, "(GCM)precompute failed");
1872 			return -EFAULT;
1873 		}
1874 		/*
1875 		 * Write (the length of AAD) into bytes 16-19 of state2
1876 		 * in big-endian format. This field is 8 bytes
1877 		 */
1878 		auth_param->u2.aad_sz =
1879 				RTE_ALIGN_CEIL(aad_length, 16);
1880 		auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
1881 
1882 		aad_len = (uint32_t *)(cdesc->cd_cur_ptr +
1883 					ICP_QAT_HW_GALOIS_128_STATE1_SZ +
1884 					ICP_QAT_HW_GALOIS_H_SZ);
1885 		*aad_len = rte_bswap32(aad_length);
1886 		cdesc->aad_len = aad_length;
1887 		break;
1888 	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
1889 		qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
1890 		state1_size = qat_hash_get_state1_size(
1891 				ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2);
1892 		state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
1893 		memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
1894 
1895 		cipherconfig = (struct icp_qat_hw_cipher_algo_blk *)
1896 				(cdesc->cd_cur_ptr + state1_size + state2_size);
1897 		cipherconfig->cipher_config.val =
1898 		ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE,
1899 			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2,
1900 			ICP_QAT_HW_CIPHER_KEY_CONVERT,
1901 			ICP_QAT_HW_CIPHER_ENCRYPT);
1902 		memcpy(cipherconfig->key, authkey, authkeylen);
1903 		memset(cipherconfig->key + authkeylen,
1904 				0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
1905 		cd_extra_size += sizeof(struct icp_qat_hw_cipher_config) +
1906 				authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
1907 		auth_param->hash_state_sz = ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
1908 		break;
1909 	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
1910 		hash->auth_config.config =
1911 			ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE0,
1912 				cdesc->qat_hash_alg, digestsize);
1913 		qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
1914 		state1_size = qat_hash_get_state1_size(
1915 				ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3);
1916 		state2_size = ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ;
1917 		memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size
1918 			+ ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ);
1919 
1920 		memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
1921 		cd_extra_size += ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
1922 		auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
1923 		cdesc->min_qat_dev_gen = QAT_GEN2;
1924 
1925 		break;
1926 	case ICP_QAT_HW_AUTH_ALGO_MD5:
1927 		if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5, authkey,
1928 			authkeylen, cdesc->cd_cur_ptr, &state1_size,
1929 			cdesc->aes_cmac)) {
1930 			QAT_LOG(ERR, "(MD5)precompute failed");
1931 			return -EFAULT;
1932 		}
1933 		state2_size = ICP_QAT_HW_MD5_STATE2_SZ;
1934 		break;
1935 	case ICP_QAT_HW_AUTH_ALGO_NULL:
1936 		state1_size = qat_hash_get_state1_size(
1937 				ICP_QAT_HW_AUTH_ALGO_NULL);
1938 		state2_size = ICP_QAT_HW_NULL_STATE2_SZ;
1939 		break;
1940 	case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
1941 		qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM;
1942 		state1_size = qat_hash_get_state1_size(
1943 				ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC);
1944 		state2_size = ICP_QAT_HW_AES_CBC_MAC_KEY_SZ +
1945 				ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ;
1946 
1947 		if (aad_length > 0) {
1948 			aad_length += ICP_QAT_HW_CCM_AAD_B0_LEN +
1949 			ICP_QAT_HW_CCM_AAD_LEN_INFO;
1950 			auth_param->u2.aad_sz =
1951 			RTE_ALIGN_CEIL(aad_length,
1952 			ICP_QAT_HW_CCM_AAD_ALIGNMENT);
1953 		} else {
1954 			auth_param->u2.aad_sz = ICP_QAT_HW_CCM_AAD_B0_LEN;
1955 		}
1956 		cdesc->aad_len = aad_length;
1957 		hash->auth_counter.counter = 0;
1958 
1959 		hash_cd_ctrl->outer_prefix_sz = digestsize;
1960 		auth_param->hash_state_sz = digestsize;
1961 
1962 		memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
1963 		break;
1964 	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
1965 		state1_size = qat_hash_get_state1_size(
1966 				ICP_QAT_HW_AUTH_ALGO_KASUMI_F9);
1967 		state2_size = ICP_QAT_HW_KASUMI_F9_STATE2_SZ;
1968 		memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
1969 		pTempKey = (uint32_t *)(cdesc->cd_cur_ptr + state1_size
1970 							+ authkeylen);
1971 		/*
1972 		* The Inner Hash Initial State2 block must contain IK
1973 		* (Initialisation Key), followed by IK XOR-ed with KM
1974 		* (Key Modifier): IK||(IK^KM).
1975 		*/
1976 		/* write the auth key */
1977 		memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
1978 		/* initialise temp key with auth key */
1979 		memcpy(pTempKey, authkey, authkeylen);
1980 		/* XOR Key with KASUMI F9 key modifier at 4 bytes level */
1981 		for (wordIndex = 0; wordIndex < (authkeylen >> 2); wordIndex++)
1982 			pTempKey[wordIndex] ^= KASUMI_F9_KEY_MODIFIER_4_BYTES;
1983 		break;
1984 	default:
1985 		QAT_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg);
1986 		return -EFAULT;
1987 	}
1988 
1989 	/* Request template setup */
1990 	qat_sym_session_init_common_hdr(header, qat_proto_flag);
1991 	header->service_cmd_id = cdesc->qat_cmd;
1992 
1993 	/* Auth CD config setup */
1994 	hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3;
1995 	hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
1996 	hash_cd_ctrl->inner_res_sz = digestsize;
1997 	hash_cd_ctrl->final_sz = digestsize;
1998 	hash_cd_ctrl->inner_state1_sz = state1_size;
1999 	auth_param->auth_res_sz = digestsize;
2000 
2001 	hash_cd_ctrl->inner_state2_sz  = state2_size;
2002 	hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
2003 			((sizeof(struct icp_qat_hw_auth_setup) +
2004 			 RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8))
2005 					>> 3);
2006 
2007 	cdesc->cd_cur_ptr += state1_size + state2_size + cd_extra_size;
2008 	cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
2009 
2010 	cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
2011 	cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
2012 
2013 	return 0;
2014 }
2015 
2016 int qat_sym_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2017 {
2018 	switch (key_len) {
2019 	case ICP_QAT_HW_AES_128_KEY_SZ:
2020 		*alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
2021 		break;
2022 	case ICP_QAT_HW_AES_192_KEY_SZ:
2023 		*alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
2024 		break;
2025 	case ICP_QAT_HW_AES_256_KEY_SZ:
2026 		*alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
2027 		break;
2028 	default:
2029 		return -EINVAL;
2030 	}
2031 	return 0;
2032 }
2033 
2034 int qat_sym_validate_aes_docsisbpi_key(int key_len,
2035 		enum icp_qat_hw_cipher_algo *alg)
2036 {
2037 	switch (key_len) {
2038 	case ICP_QAT_HW_AES_128_KEY_SZ:
2039 		*alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
2040 		break;
2041 	case ICP_QAT_HW_AES_256_KEY_SZ:
2042 		*alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
2043 		break;
2044 	default:
2045 		return -EINVAL;
2046 	}
2047 	return 0;
2048 }
2049 
2050 int qat_sym_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2051 {
2052 	switch (key_len) {
2053 	case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ:
2054 		*alg = ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2;
2055 		break;
2056 	default:
2057 		return -EINVAL;
2058 	}
2059 	return 0;
2060 }
2061 
2062 int qat_sym_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2063 {
2064 	switch (key_len) {
2065 	case ICP_QAT_HW_KASUMI_KEY_SZ:
2066 		*alg = ICP_QAT_HW_CIPHER_ALGO_KASUMI;
2067 		break;
2068 	default:
2069 		return -EINVAL;
2070 	}
2071 	return 0;
2072 }
2073 
2074 int qat_sym_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2075 {
2076 	switch (key_len) {
2077 	case ICP_QAT_HW_DES_KEY_SZ:
2078 		*alg = ICP_QAT_HW_CIPHER_ALGO_DES;
2079 		break;
2080 	default:
2081 		return -EINVAL;
2082 	}
2083 	return 0;
2084 }
2085 
2086 int qat_sym_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2087 {
2088 	switch (key_len) {
2089 	case QAT_3DES_KEY_SZ_OPT1:
2090 	case QAT_3DES_KEY_SZ_OPT2:
2091 	case QAT_3DES_KEY_SZ_OPT3:
2092 		*alg = ICP_QAT_HW_CIPHER_ALGO_3DES;
2093 		break;
2094 	default:
2095 		return -EINVAL;
2096 	}
2097 	return 0;
2098 }
2099 
2100 int qat_sym_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2101 {
2102 	switch (key_len) {
2103 	case ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ:
2104 		*alg = ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3;
2105 		break;
2106 	default:
2107 		return -EINVAL;
2108 	}
2109 	return 0;
2110 }
2111 
2112 #ifdef RTE_LIB_SECURITY
2113 static int
2114 qat_sec_session_check_docsis(struct rte_security_session_conf *conf)
2115 {
2116 	struct rte_crypto_sym_xform *crypto_sym = conf->crypto_xform;
2117 	struct rte_security_docsis_xform *docsis = &conf->docsis;
2118 
2119 	/* CRC generate -> Cipher encrypt */
2120 	if (docsis->direction == RTE_SECURITY_DOCSIS_DOWNLINK) {
2121 
2122 		if (crypto_sym != NULL &&
2123 		    crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2124 		    crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
2125 		    crypto_sym->cipher.algo ==
2126 					RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
2127 		    (crypto_sym->cipher.key.length ==
2128 					ICP_QAT_HW_AES_128_KEY_SZ ||
2129 		     crypto_sym->cipher.key.length ==
2130 					ICP_QAT_HW_AES_256_KEY_SZ) &&
2131 		    crypto_sym->cipher.iv.length == ICP_QAT_HW_AES_BLK_SZ &&
2132 		    crypto_sym->next == NULL) {
2133 			return 0;
2134 		}
2135 	/* Cipher decrypt -> CRC verify */
2136 	} else if (docsis->direction == RTE_SECURITY_DOCSIS_UPLINK) {
2137 
2138 		if (crypto_sym != NULL &&
2139 		    crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2140 		    crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
2141 		    crypto_sym->cipher.algo ==
2142 					RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
2143 		    (crypto_sym->cipher.key.length ==
2144 					ICP_QAT_HW_AES_128_KEY_SZ ||
2145 		     crypto_sym->cipher.key.length ==
2146 					ICP_QAT_HW_AES_256_KEY_SZ) &&
2147 		    crypto_sym->cipher.iv.length == ICP_QAT_HW_AES_BLK_SZ &&
2148 		    crypto_sym->next == NULL) {
2149 			return 0;
2150 		}
2151 	}
2152 
2153 	return -EINVAL;
2154 }
2155 
2156 static int
2157 qat_sec_session_set_docsis_parameters(struct rte_cryptodev *dev,
2158 		struct rte_security_session_conf *conf, void *session_private)
2159 {
2160 	int ret;
2161 	int qat_cmd_id;
2162 	struct rte_crypto_sym_xform *xform = NULL;
2163 	struct qat_sym_session *session = session_private;
2164 
2165 	/* Clear the session */
2166 	memset(session, 0, qat_sym_session_get_private_size(dev));
2167 
2168 	ret = qat_sec_session_check_docsis(conf);
2169 	if (ret) {
2170 		QAT_LOG(ERR, "Unsupported DOCSIS security configuration");
2171 		return ret;
2172 	}
2173 
2174 	xform = conf->crypto_xform;
2175 
2176 	/* Verify the session physical address is known */
2177 	rte_iova_t session_paddr = rte_mempool_virt2iova(session);
2178 	if (session_paddr == 0 || session_paddr == RTE_BAD_IOVA) {
2179 		QAT_LOG(ERR,
2180 			"Session physical address unknown. Bad memory pool.");
2181 		return -EINVAL;
2182 	}
2183 
2184 	/* Set context descriptor physical address */
2185 	session->cd_paddr = session_paddr +
2186 			offsetof(struct qat_sym_session, cd);
2187 
2188 	session->min_qat_dev_gen = QAT_GEN1;
2189 
2190 	/* Get requested QAT command id - should be cipher */
2191 	qat_cmd_id = qat_get_cmd_id(xform);
2192 	if (qat_cmd_id != ICP_QAT_FW_LA_CMD_CIPHER) {
2193 		QAT_LOG(ERR, "Unsupported xform chain requested");
2194 		return -ENOTSUP;
2195 	}
2196 	session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
2197 
2198 	ret = qat_sym_session_configure_cipher(dev, xform, session);
2199 	if (ret < 0)
2200 		return ret;
2201 
2202 	return 0;
2203 }
2204 
2205 int
2206 qat_security_session_create(void *dev,
2207 				struct rte_security_session_conf *conf,
2208 				struct rte_security_session *sess,
2209 				struct rte_mempool *mempool)
2210 {
2211 	void *sess_private_data;
2212 	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2213 	int ret;
2214 
2215 	if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL ||
2216 			conf->protocol != RTE_SECURITY_PROTOCOL_DOCSIS) {
2217 		QAT_LOG(ERR, "Invalid security protocol");
2218 		return -EINVAL;
2219 	}
2220 
2221 	if (rte_mempool_get(mempool, &sess_private_data)) {
2222 		QAT_LOG(ERR, "Couldn't get object from session mempool");
2223 		return -ENOMEM;
2224 	}
2225 
2226 	ret = qat_sec_session_set_docsis_parameters(cdev, conf,
2227 			sess_private_data);
2228 	if (ret != 0) {
2229 		QAT_LOG(ERR, "Failed to configure session parameters");
2230 		/* Return session to mempool */
2231 		rte_mempool_put(mempool, sess_private_data);
2232 		return ret;
2233 	}
2234 
2235 	set_sec_session_private_data(sess, sess_private_data);
2236 
2237 	return ret;
2238 }
2239 
2240 int
2241 qat_security_session_destroy(void *dev __rte_unused,
2242 				 struct rte_security_session *sess)
2243 {
2244 	void *sess_priv = get_sec_session_private_data(sess);
2245 	struct qat_sym_session *s = (struct qat_sym_session *)sess_priv;
2246 
2247 	if (sess_priv) {
2248 		if (s->bpi_ctx)
2249 			bpi_cipher_ctx_free(s->bpi_ctx);
2250 		memset(s, 0, qat_sym_session_get_private_size(dev));
2251 		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2252 
2253 		set_sec_session_private_data(sess, NULL);
2254 		rte_mempool_put(sess_mp, sess_priv);
2255 	}
2256 	return 0;
2257 }
2258 #endif
2259