xref: /dpdk/drivers/crypto/qat/qat_crypto.c (revision a3a2e2c8f7de433e10b1548df65b20bf10086d9c)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *	 * Redistributions of source code must retain the above copyright
12  *	   notice, this list of conditions and the following disclaimer.
13  *	 * Redistributions in binary form must reproduce the above copyright
14  *	   notice, this list of conditions and the following disclaimer in
15  *	   the documentation and/or other materials provided with the
16  *	   distribution.
17  *	 * Neither the name of Intel Corporation nor the names of its
18  *	   contributors may be used to endorse or promote products derived
19  *	   from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <strings.h>
37 #include <string.h>
38 #include <inttypes.h>
39 #include <errno.h>
40 #include <sys/queue.h>
41 #include <stdarg.h>
42 
43 #include <rte_common.h>
44 #include <rte_log.h>
45 #include <rte_debug.h>
46 #include <rte_memory.h>
47 #include <rte_memzone.h>
48 #include <rte_tailq.h>
49 #include <rte_ether.h>
50 #include <rte_malloc.h>
51 #include <rte_launch.h>
52 #include <rte_eal.h>
53 #include <rte_per_lcore.h>
54 #include <rte_lcore.h>
55 #include <rte_atomic.h>
56 #include <rte_branch_prediction.h>
57 #include <rte_mempool.h>
58 #include <rte_mbuf.h>
59 #include <rte_string_fns.h>
60 #include <rte_spinlock.h>
61 #include <rte_hexdump.h>
62 #include <rte_crypto_sym.h>
63 #include <rte_cryptodev_pci.h>
64 #include <openssl/evp.h>
65 
66 #include "qat_logs.h"
67 #include "qat_algs.h"
68 #include "qat_crypto.h"
69 #include "adf_transport_access_macros.h"
70 
71 #define BYTE_LENGTH    8
72 
73 static int
74 qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo,
75 		struct qat_pmd_private *internals) {
76 	int i = 0;
77 	const struct rte_cryptodev_capabilities *capability;
78 
79 	while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
80 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
81 		if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
82 			continue;
83 
84 		if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
85 			continue;
86 
87 		if (capability->sym.cipher.algo == algo)
88 			return 1;
89 	}
90 	return 0;
91 }
92 
93 static int
94 qat_is_auth_alg_supported(enum rte_crypto_auth_algorithm algo,
95 		struct qat_pmd_private *internals) {
96 	int i = 0;
97 	const struct rte_cryptodev_capabilities *capability;
98 
99 	while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
100 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
101 		if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
102 			continue;
103 
104 		if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
105 			continue;
106 
107 		if (capability->sym.auth.algo == algo)
108 			return 1;
109 	}
110 	return 0;
111 }
112 
113 /** Encrypt a single partial block
114  *  Depends on openssl libcrypto
115  *  Uses ECB+XOR to do CFB encryption, same result, more performant
116  */
117 static inline int
118 bpi_cipher_encrypt(uint8_t *src, uint8_t *dst,
119 		uint8_t *iv, int ivlen, int srclen,
120 		void *bpi_ctx)
121 {
122 	EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
123 	int encrypted_ivlen;
124 	uint8_t encrypted_iv[16];
125 	int i;
126 
127 	/* ECB method: encrypt the IV, then XOR this with plaintext */
128 	if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
129 								<= 0)
130 		goto cipher_encrypt_err;
131 
132 	for (i = 0; i < srclen; i++)
133 		*(dst+i) = *(src+i)^(encrypted_iv[i]);
134 
135 	return 0;
136 
137 cipher_encrypt_err:
138 	PMD_DRV_LOG(ERR, "libcrypto ECB cipher encrypt failed");
139 	return -EINVAL;
140 }
141 
142 /** Decrypt a single partial block
143  *  Depends on openssl libcrypto
144  *  Uses ECB+XOR to do CFB encryption, same result, more performant
145  */
146 static inline int
147 bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
148 		uint8_t *iv, int ivlen, int srclen,
149 		void *bpi_ctx)
150 {
151 	EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
152 	int encrypted_ivlen;
153 	uint8_t encrypted_iv[16];
154 	int i;
155 
156 	/* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
157 	if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
158 								<= 0)
159 		goto cipher_decrypt_err;
160 
161 	for (i = 0; i < srclen; i++)
162 		*(dst+i) = *(src+i)^(encrypted_iv[i]);
163 
164 	return 0;
165 
166 cipher_decrypt_err:
167 	PMD_DRV_LOG(ERR, "libcrypto ECB cipher encrypt for BPI IV failed");
168 	return -EINVAL;
169 }
170 
171 /** Creates a context in either AES or DES in ECB mode
172  *  Depends on openssl libcrypto
173  */
174 static void *
175 bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo,
176 		enum rte_crypto_cipher_operation direction __rte_unused,
177 					uint8_t *key)
178 {
179 	const EVP_CIPHER *algo = NULL;
180 	EVP_CIPHER_CTX *ctx = EVP_CIPHER_CTX_new();
181 
182 	if (ctx == NULL)
183 		goto ctx_init_err;
184 
185 	if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI)
186 		algo = EVP_des_ecb();
187 	else
188 		algo = EVP_aes_128_ecb();
189 
190 	/* IV will be ECB encrypted whether direction is encrypt or decrypt*/
191 	if (EVP_EncryptInit_ex(ctx, algo, NULL, key, 0) != 1)
192 		goto ctx_init_err;
193 
194 	return ctx;
195 
196 ctx_init_err:
197 	if (ctx != NULL)
198 		EVP_CIPHER_CTX_free(ctx);
199 	return NULL;
200 }
201 
202 /** Frees a context previously created
203  *  Depends on openssl libcrypto
204  */
205 static void
206 bpi_cipher_ctx_free(void *bpi_ctx)
207 {
208 	if (bpi_ctx != NULL)
209 		EVP_CIPHER_CTX_free((EVP_CIPHER_CTX *)bpi_ctx);
210 }
211 
212 static inline uint32_t
213 adf_modulo(uint32_t data, uint32_t shift);
214 
215 static inline int
216 qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
217 		struct qat_crypto_op_cookie *qat_op_cookie);
218 
219 void qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
220 		void *session)
221 {
222 	struct qat_session *sess = session;
223 	phys_addr_t cd_paddr;
224 
225 	PMD_INIT_FUNC_TRACE();
226 	if (sess) {
227 		if (sess->bpi_ctx) {
228 			bpi_cipher_ctx_free(sess->bpi_ctx);
229 			sess->bpi_ctx = NULL;
230 		}
231 		cd_paddr = sess->cd_paddr;
232 		memset(sess, 0, qat_crypto_sym_get_session_private_size(dev));
233 		sess->cd_paddr = cd_paddr;
234 	} else
235 		PMD_DRV_LOG(ERR, "NULL session");
236 }
237 
238 static int
239 qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
240 {
241 	/* Cipher Only */
242 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
243 		return ICP_QAT_FW_LA_CMD_CIPHER;
244 
245 	/* Authentication Only */
246 	if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
247 		return ICP_QAT_FW_LA_CMD_AUTH;
248 
249 	if (xform->next == NULL)
250 		return -1;
251 
252 	/* Cipher then Authenticate */
253 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
254 			xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
255 		return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
256 
257 	/* Authenticate then Cipher */
258 	if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
259 			xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
260 		return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
261 
262 	return -1;
263 }
264 
265 static struct rte_crypto_auth_xform *
266 qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
267 {
268 	do {
269 		if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
270 			return &xform->auth;
271 
272 		xform = xform->next;
273 	} while (xform);
274 
275 	return NULL;
276 }
277 
278 static struct rte_crypto_cipher_xform *
279 qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
280 {
281 	do {
282 		if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
283 			return &xform->cipher;
284 
285 		xform = xform->next;
286 	} while (xform);
287 
288 	return NULL;
289 }
290 void *
291 qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
292 		struct rte_crypto_sym_xform *xform, void *session_private)
293 {
294 	struct qat_session *session = session_private;
295 	struct qat_pmd_private *internals = dev->data->dev_private;
296 	struct rte_crypto_cipher_xform *cipher_xform = NULL;
297 
298 	/* Get cipher xform from crypto xform chain */
299 	cipher_xform = qat_get_cipher_xform(xform);
300 
301 	switch (cipher_xform->algo) {
302 	case RTE_CRYPTO_CIPHER_AES_CBC:
303 		if (qat_alg_validate_aes_key(cipher_xform->key.length,
304 				&session->qat_cipher_alg) != 0) {
305 			PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
306 			goto error_out;
307 		}
308 		session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
309 		break;
310 	case RTE_CRYPTO_CIPHER_AES_GCM:
311 		if (qat_alg_validate_aes_key(cipher_xform->key.length,
312 				&session->qat_cipher_alg) != 0) {
313 			PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
314 			goto error_out;
315 		}
316 		session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
317 		break;
318 	case RTE_CRYPTO_CIPHER_AES_CTR:
319 		if (qat_alg_validate_aes_key(cipher_xform->key.length,
320 				&session->qat_cipher_alg) != 0) {
321 			PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
322 			goto error_out;
323 		}
324 		session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
325 		break;
326 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
327 		if (qat_alg_validate_snow3g_key(cipher_xform->key.length,
328 					&session->qat_cipher_alg) != 0) {
329 			PMD_DRV_LOG(ERR, "Invalid SNOW 3G cipher key size");
330 			goto error_out;
331 		}
332 		session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
333 		break;
334 	case RTE_CRYPTO_CIPHER_NULL:
335 		session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
336 		break;
337 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
338 		if (qat_alg_validate_kasumi_key(cipher_xform->key.length,
339 					&session->qat_cipher_alg) != 0) {
340 			PMD_DRV_LOG(ERR, "Invalid KASUMI cipher key size");
341 			goto error_out;
342 		}
343 		session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE;
344 		break;
345 	case RTE_CRYPTO_CIPHER_3DES_CBC:
346 		if (qat_alg_validate_3des_key(cipher_xform->key.length,
347 				&session->qat_cipher_alg) != 0) {
348 			PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size");
349 			goto error_out;
350 		}
351 		session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
352 		break;
353 	case RTE_CRYPTO_CIPHER_DES_CBC:
354 		if (qat_alg_validate_des_key(cipher_xform->key.length,
355 				&session->qat_cipher_alg) != 0) {
356 			PMD_DRV_LOG(ERR, "Invalid DES cipher key size");
357 			goto error_out;
358 		}
359 		session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
360 		break;
361 	case RTE_CRYPTO_CIPHER_3DES_CTR:
362 		if (qat_alg_validate_3des_key(cipher_xform->key.length,
363 				&session->qat_cipher_alg) != 0) {
364 			PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size");
365 			goto error_out;
366 		}
367 		session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
368 		break;
369 	case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
370 		session->bpi_ctx = bpi_cipher_ctx_init(
371 					cipher_xform->algo,
372 					cipher_xform->op,
373 					cipher_xform->key.data);
374 		if (session->bpi_ctx == NULL) {
375 			PMD_DRV_LOG(ERR, "failed to create DES BPI ctx");
376 			goto error_out;
377 		}
378 		if (qat_alg_validate_des_key(cipher_xform->key.length,
379 				&session->qat_cipher_alg) != 0) {
380 			PMD_DRV_LOG(ERR, "Invalid DES cipher key size");
381 			goto error_out;
382 		}
383 		session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
384 		break;
385 	case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
386 		session->bpi_ctx = bpi_cipher_ctx_init(
387 					cipher_xform->algo,
388 					cipher_xform->op,
389 					cipher_xform->key.data);
390 		if (session->bpi_ctx == NULL) {
391 			PMD_DRV_LOG(ERR, "failed to create AES BPI ctx");
392 			goto error_out;
393 		}
394 		if (qat_alg_validate_aes_docsisbpi_key(cipher_xform->key.length,
395 				&session->qat_cipher_alg) != 0) {
396 			PMD_DRV_LOG(ERR, "Invalid AES DOCSISBPI key size");
397 			goto error_out;
398 		}
399 		session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
400 		break;
401 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
402 		if (!qat_is_cipher_alg_supported(
403 			cipher_xform->algo, internals)) {
404 			PMD_DRV_LOG(ERR, "%s not supported on this device",
405 				rte_crypto_cipher_algorithm_strings
406 					[cipher_xform->algo]);
407 			goto error_out;
408 		}
409 		if (qat_alg_validate_zuc_key(cipher_xform->key.length,
410 				&session->qat_cipher_alg) != 0) {
411 			PMD_DRV_LOG(ERR, "Invalid ZUC cipher key size");
412 			goto error_out;
413 		}
414 		session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
415 		break;
416 	case RTE_CRYPTO_CIPHER_3DES_ECB:
417 	case RTE_CRYPTO_CIPHER_AES_ECB:
418 	case RTE_CRYPTO_CIPHER_AES_CCM:
419 	case RTE_CRYPTO_CIPHER_AES_F8:
420 	case RTE_CRYPTO_CIPHER_AES_XTS:
421 	case RTE_CRYPTO_CIPHER_ARC4:
422 		PMD_DRV_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u",
423 				cipher_xform->algo);
424 		goto error_out;
425 	default:
426 		PMD_DRV_LOG(ERR, "Crypto: Undefined Cipher specified %u\n",
427 				cipher_xform->algo);
428 		goto error_out;
429 	}
430 
431 	if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
432 		session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
433 	else
434 		session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
435 
436 	if (qat_alg_aead_session_create_content_desc_cipher(session,
437 						cipher_xform->key.data,
438 						cipher_xform->key.length))
439 		goto error_out;
440 
441 	return session;
442 
443 error_out:
444 	if (session->bpi_ctx) {
445 		bpi_cipher_ctx_free(session->bpi_ctx);
446 		session->bpi_ctx = NULL;
447 	}
448 	return NULL;
449 }
450 
451 
452 void *
453 qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
454 		struct rte_crypto_sym_xform *xform, void *session_private)
455 {
456 	struct qat_session *session = session_private;
457 
458 	int qat_cmd_id;
459 	PMD_INIT_FUNC_TRACE();
460 
461 	/* Get requested QAT command id */
462 	qat_cmd_id = qat_get_cmd_id(xform);
463 	if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) {
464 		PMD_DRV_LOG(ERR, "Unsupported xform chain requested");
465 		goto error_out;
466 	}
467 	session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
468 	switch (session->qat_cmd) {
469 	case ICP_QAT_FW_LA_CMD_CIPHER:
470 	session = qat_crypto_sym_configure_session_cipher(dev, xform, session);
471 		break;
472 	case ICP_QAT_FW_LA_CMD_AUTH:
473 	session = qat_crypto_sym_configure_session_auth(dev, xform, session);
474 		break;
475 	case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
476 	session = qat_crypto_sym_configure_session_cipher(dev, xform, session);
477 	session = qat_crypto_sym_configure_session_auth(dev, xform, session);
478 		break;
479 	case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
480 	session = qat_crypto_sym_configure_session_auth(dev, xform, session);
481 	session = qat_crypto_sym_configure_session_cipher(dev, xform, session);
482 		break;
483 	case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
484 	case ICP_QAT_FW_LA_CMD_TRNG_TEST:
485 	case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE:
486 	case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE:
487 	case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE:
488 	case ICP_QAT_FW_LA_CMD_MGF1:
489 	case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP:
490 	case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP:
491 	case ICP_QAT_FW_LA_CMD_DELIMITER:
492 	PMD_DRV_LOG(ERR, "Unsupported Service %u",
493 		session->qat_cmd);
494 		goto error_out;
495 	default:
496 	PMD_DRV_LOG(ERR, "Unsupported Service %u",
497 		session->qat_cmd);
498 		goto error_out;
499 	}
500 
501 	return session;
502 
503 error_out:
504 	return NULL;
505 }
506 
507 struct qat_session *
508 qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
509 				struct rte_crypto_sym_xform *xform,
510 				struct qat_session *session_private)
511 {
512 
513 	struct qat_session *session = session_private;
514 	struct rte_crypto_auth_xform *auth_xform = NULL;
515 	struct rte_crypto_cipher_xform *cipher_xform = NULL;
516 	struct qat_pmd_private *internals = dev->data->dev_private;
517 	auth_xform = qat_get_auth_xform(xform);
518 
519 	switch (auth_xform->algo) {
520 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
521 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
522 		break;
523 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
524 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
525 		break;
526 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
527 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
528 		break;
529 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
530 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
531 		break;
532 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
533 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
534 		break;
535 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
536 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
537 		break;
538 	case RTE_CRYPTO_AUTH_AES_GCM:
539 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
540 		break;
541 	case RTE_CRYPTO_AUTH_AES_GMAC:
542 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
543 		break;
544 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
545 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
546 		break;
547 	case RTE_CRYPTO_AUTH_MD5_HMAC:
548 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_MD5;
549 		break;
550 	case RTE_CRYPTO_AUTH_NULL:
551 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_NULL;
552 		break;
553 	case RTE_CRYPTO_AUTH_KASUMI_F9:
554 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_KASUMI_F9;
555 		break;
556 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
557 		if (!qat_is_auth_alg_supported(auth_xform->algo, internals)) {
558 			PMD_DRV_LOG(ERR, "%s not supported on this device",
559 				rte_crypto_auth_algorithm_strings
560 				[auth_xform->algo]);
561 			goto error_out;
562 		}
563 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3;
564 		break;
565 	case RTE_CRYPTO_AUTH_SHA1:
566 	case RTE_CRYPTO_AUTH_SHA256:
567 	case RTE_CRYPTO_AUTH_SHA512:
568 	case RTE_CRYPTO_AUTH_SHA224:
569 	case RTE_CRYPTO_AUTH_SHA384:
570 	case RTE_CRYPTO_AUTH_MD5:
571 	case RTE_CRYPTO_AUTH_AES_CCM:
572 	case RTE_CRYPTO_AUTH_AES_CMAC:
573 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
574 		PMD_DRV_LOG(ERR, "Crypto: Unsupported hash alg %u",
575 				auth_xform->algo);
576 		goto error_out;
577 	default:
578 		PMD_DRV_LOG(ERR, "Crypto: Undefined Hash algo %u specified",
579 				auth_xform->algo);
580 		goto error_out;
581 	}
582 	cipher_xform = qat_get_cipher_xform(xform);
583 
584 	if ((session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
585 			(session->qat_hash_alg ==
586 				ICP_QAT_HW_AUTH_ALGO_GALOIS_64))  {
587 		if (qat_alg_aead_session_create_content_desc_auth(session,
588 				cipher_xform->key.data,
589 				cipher_xform->key.length,
590 				auth_xform->add_auth_data_length,
591 				auth_xform->digest_length,
592 				auth_xform->op))
593 			goto error_out;
594 	} else {
595 		if (qat_alg_aead_session_create_content_desc_auth(session,
596 				auth_xform->key.data,
597 				auth_xform->key.length,
598 				auth_xform->add_auth_data_length,
599 				auth_xform->digest_length,
600 				auth_xform->op))
601 			goto error_out;
602 	}
603 	return session;
604 
605 error_out:
606 	return NULL;
607 }
608 
609 unsigned qat_crypto_sym_get_session_private_size(
610 		struct rte_cryptodev *dev __rte_unused)
611 {
612 	return RTE_ALIGN_CEIL(sizeof(struct qat_session), 8);
613 }
614 
615 static inline uint32_t
616 qat_bpicipher_preprocess(struct qat_session *ctx,
617 				struct rte_crypto_op *op)
618 {
619 	uint8_t block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
620 	struct rte_crypto_sym_op *sym_op = op->sym;
621 	uint8_t last_block_len = sym_op->cipher.data.length % block_len;
622 
623 	if (last_block_len &&
624 			ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
625 
626 		/* Decrypt last block */
627 		uint8_t *last_block, *dst, *iv;
628 		uint32_t last_block_offset = sym_op->cipher.data.offset +
629 				sym_op->cipher.data.length - last_block_len;
630 		last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
631 				uint8_t *, last_block_offset);
632 
633 		if (unlikely(sym_op->m_dst != NULL))
634 			/* out-of-place operation (OOP) */
635 			dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
636 						uint8_t *, last_block_offset);
637 		else
638 			dst = last_block;
639 
640 		if (last_block_len < sym_op->cipher.data.length)
641 			/* use previous block ciphertext as IV */
642 			iv = last_block - block_len;
643 		else
644 			/* runt block, i.e. less than one full block */
645 			iv = sym_op->cipher.iv.data;
646 
647 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
648 		rte_hexdump(stdout, "BPI: src before pre-process:", last_block,
649 			last_block_len);
650 		if (sym_op->m_dst != NULL)
651 			rte_hexdump(stdout, "BPI: dst before pre-process:", dst,
652 				last_block_len);
653 #endif
654 		bpi_cipher_decrypt(last_block, dst, iv, block_len,
655 				last_block_len, ctx->bpi_ctx);
656 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
657 		rte_hexdump(stdout, "BPI: src after pre-process:", last_block,
658 			last_block_len);
659 		if (sym_op->m_dst != NULL)
660 			rte_hexdump(stdout, "BPI: dst after pre-process:", dst,
661 				last_block_len);
662 #endif
663 	}
664 
665 	return sym_op->cipher.data.length - last_block_len;
666 }
667 
668 static inline uint32_t
669 qat_bpicipher_postprocess(struct qat_session *ctx,
670 				struct rte_crypto_op *op)
671 {
672 	uint8_t block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
673 	struct rte_crypto_sym_op *sym_op = op->sym;
674 	uint8_t last_block_len = sym_op->cipher.data.length % block_len;
675 
676 	if (last_block_len > 0 &&
677 			ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
678 
679 		/* Encrypt last block */
680 		uint8_t *last_block, *dst, *iv;
681 		uint32_t last_block_offset;
682 
683 		last_block_offset = sym_op->cipher.data.offset +
684 				sym_op->cipher.data.length - last_block_len;
685 		last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
686 				uint8_t *, last_block_offset);
687 
688 		if (unlikely(sym_op->m_dst != NULL))
689 			/* out-of-place operation (OOP) */
690 			dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
691 						uint8_t *, last_block_offset);
692 		else
693 			dst = last_block;
694 
695 		if (last_block_len < sym_op->cipher.data.length)
696 			/* use previous block ciphertext as IV */
697 			iv = dst - block_len;
698 		else
699 			/* runt block, i.e. less than one full block */
700 			iv = sym_op->cipher.iv.data;
701 
702 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
703 		rte_hexdump(stdout, "BPI: src before post-process:", last_block,
704 			last_block_len);
705 		if (sym_op->m_dst != NULL)
706 			rte_hexdump(stdout, "BPI: dst before post-process:",
707 					dst, last_block_len);
708 #endif
709 		bpi_cipher_encrypt(last_block, dst, iv, block_len,
710 				last_block_len, ctx->bpi_ctx);
711 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
712 		rte_hexdump(stdout, "BPI: src after post-process:", last_block,
713 			last_block_len);
714 		if (sym_op->m_dst != NULL)
715 			rte_hexdump(stdout, "BPI: dst after post-process:", dst,
716 				last_block_len);
717 #endif
718 	}
719 	return sym_op->cipher.data.length - last_block_len;
720 }
721 
722 uint16_t
723 qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
724 		uint16_t nb_ops)
725 {
726 	register struct qat_queue *queue;
727 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
728 	register uint32_t nb_ops_sent = 0;
729 	register struct rte_crypto_op **cur_op = ops;
730 	register int ret;
731 	uint16_t nb_ops_possible = nb_ops;
732 	register uint8_t *base_addr;
733 	register uint32_t tail;
734 	int overflow;
735 
736 	if (unlikely(nb_ops == 0))
737 		return 0;
738 
739 	/* read params used a lot in main loop into registers */
740 	queue = &(tmp_qp->tx_q);
741 	base_addr = (uint8_t *)queue->base_addr;
742 	tail = queue->tail;
743 
744 	/* Find how many can actually fit on the ring */
745 	overflow = rte_atomic16_add_return(&tmp_qp->inflights16, nb_ops)
746 				- queue->max_inflights;
747 	if (overflow > 0) {
748 		rte_atomic16_sub(&tmp_qp->inflights16, overflow);
749 		nb_ops_possible = nb_ops - overflow;
750 		if (nb_ops_possible == 0)
751 			return 0;
752 	}
753 
754 	while (nb_ops_sent != nb_ops_possible) {
755 		ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail,
756 				tmp_qp->op_cookies[tail / queue->msg_size]);
757 		if (ret != 0) {
758 			tmp_qp->stats.enqueue_err_count++;
759 			/*
760 			 * This message cannot be enqueued,
761 			 * decrease number of ops that wasn't sent
762 			 */
763 			rte_atomic16_sub(&tmp_qp->inflights16,
764 					nb_ops_possible - nb_ops_sent);
765 			if (nb_ops_sent == 0)
766 				return 0;
767 			goto kick_tail;
768 		}
769 
770 		tail = adf_modulo(tail + queue->msg_size, queue->modulo);
771 		nb_ops_sent++;
772 		cur_op++;
773 	}
774 kick_tail:
775 	WRITE_CSR_RING_TAIL(tmp_qp->mmap_bar_addr, queue->hw_bundle_number,
776 			queue->hw_queue_number, tail);
777 	queue->tail = tail;
778 	tmp_qp->stats.enqueued_count += nb_ops_sent;
779 	return nb_ops_sent;
780 }
781 
782 uint16_t
783 qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
784 		uint16_t nb_ops)
785 {
786 	struct qat_queue *queue;
787 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
788 	uint32_t msg_counter = 0;
789 	struct rte_crypto_op *rx_op;
790 	struct icp_qat_fw_comn_resp *resp_msg;
791 
792 	queue = &(tmp_qp->rx_q);
793 	resp_msg = (struct icp_qat_fw_comn_resp *)
794 			((uint8_t *)queue->base_addr + queue->head);
795 
796 	while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
797 			msg_counter != nb_ops) {
798 		rx_op = (struct rte_crypto_op *)(uintptr_t)
799 				(resp_msg->opaque_data);
800 
801 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
802 		rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
803 			sizeof(struct icp_qat_fw_comn_resp));
804 
805 #endif
806 		if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
807 				ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
808 					resp_msg->comn_hdr.comn_status)) {
809 			rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
810 		} else {
811 			struct qat_session *sess = (struct qat_session *)
812 						(rx_op->sym->session->_private);
813 			if (sess->bpi_ctx)
814 				qat_bpicipher_postprocess(sess, rx_op);
815 			rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
816 		}
817 
818 		*(uint32_t *)resp_msg = ADF_RING_EMPTY_SIG;
819 		queue->head = adf_modulo(queue->head +
820 				queue->msg_size,
821 				ADF_RING_SIZE_MODULO(queue->queue_size));
822 		resp_msg = (struct icp_qat_fw_comn_resp *)
823 					((uint8_t *)queue->base_addr +
824 							queue->head);
825 		*ops = rx_op;
826 		ops++;
827 		msg_counter++;
828 	}
829 	if (msg_counter > 0) {
830 		WRITE_CSR_RING_HEAD(tmp_qp->mmap_bar_addr,
831 					queue->hw_bundle_number,
832 					queue->hw_queue_number, queue->head);
833 		rte_atomic16_sub(&tmp_qp->inflights16, msg_counter);
834 		tmp_qp->stats.dequeued_count += msg_counter;
835 	}
836 	return msg_counter;
837 }
838 
839 static inline int
840 qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buff_start,
841 		struct qat_alg_buf_list *list, uint32_t data_len)
842 {
843 	int nr = 1;
844 
845 	uint32_t buf_len = rte_pktmbuf_mtophys(buf) -
846 			buff_start + rte_pktmbuf_data_len(buf);
847 
848 	list->bufers[0].addr = buff_start;
849 	list->bufers[0].resrvd = 0;
850 	list->bufers[0].len = buf_len;
851 
852 	if (data_len <= buf_len) {
853 		list->num_bufs = nr;
854 		list->bufers[0].len = data_len;
855 		return 0;
856 	}
857 
858 	buf = buf->next;
859 	while (buf) {
860 		if (unlikely(nr == QAT_SGL_MAX_NUMBER)) {
861 			PMD_DRV_LOG(ERR, "QAT PMD exceeded size of QAT SGL"
862 					" entry(%u)",
863 					QAT_SGL_MAX_NUMBER);
864 			return -EINVAL;
865 		}
866 
867 		list->bufers[nr].len = rte_pktmbuf_data_len(buf);
868 		list->bufers[nr].resrvd = 0;
869 		list->bufers[nr].addr = rte_pktmbuf_mtophys(buf);
870 
871 		buf_len += list->bufers[nr].len;
872 		buf = buf->next;
873 
874 		if (buf_len > data_len) {
875 			list->bufers[nr].len -=
876 				buf_len - data_len;
877 			buf = NULL;
878 		}
879 		++nr;
880 	}
881 	list->num_bufs = nr;
882 
883 	return 0;
884 }
885 
886 static inline int
887 qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
888 		struct qat_crypto_op_cookie *qat_op_cookie)
889 {
890 	int ret = 0;
891 	struct qat_session *ctx;
892 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
893 	struct icp_qat_fw_la_auth_req_params *auth_param;
894 	register struct icp_qat_fw_la_bulk_req *qat_req;
895 	uint8_t do_auth = 0, do_cipher = 0;
896 	uint32_t cipher_len = 0, cipher_ofs = 0;
897 	uint32_t auth_len = 0, auth_ofs = 0;
898 	uint32_t min_ofs = 0;
899 	uint64_t src_buf_start = 0, dst_buf_start = 0;
900 	uint8_t do_sgl = 0;
901 
902 
903 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
904 	if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
905 		PMD_DRV_LOG(ERR, "QAT PMD only supports symmetric crypto "
906 				"operation requests, op (%p) is not a "
907 				"symmetric operation.", op);
908 		return -EINVAL;
909 	}
910 #endif
911 	if (unlikely(op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS)) {
912 		PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
913 				" requests, op (%p) is sessionless.", op);
914 		return -EINVAL;
915 	}
916 
917 	if (unlikely(op->sym->session->dev_type != RTE_CRYPTODEV_QAT_SYM_PMD)) {
918 		PMD_DRV_LOG(ERR, "Session was not created for this device");
919 		return -EINVAL;
920 	}
921 
922 	ctx = (struct qat_session *)op->sym->session->_private;
923 	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
924 	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
925 	qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
926 	cipher_param = (void *)&qat_req->serv_specif_rqpars;
927 	auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
928 
929 	if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
930 		ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
931 		do_auth = 1;
932 		do_cipher = 1;
933 	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
934 		do_auth = 1;
935 		do_cipher = 0;
936 	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
937 		do_auth = 0;
938 		do_cipher = 1;
939 	}
940 
941 	if (do_cipher) {
942 
943 		if (ctx->qat_cipher_alg ==
944 					 ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
945 			ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
946 			ctx->qat_cipher_alg ==
947 				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
948 
949 			if (unlikely(
950 				(cipher_param->cipher_length % BYTE_LENGTH != 0)
951 				 || (cipher_param->cipher_offset
952 							% BYTE_LENGTH != 0))) {
953 				PMD_DRV_LOG(ERR,
954 		  "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
955 				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
956 				return -EINVAL;
957 			}
958 			cipher_len = op->sym->cipher.data.length >> 3;
959 			cipher_ofs = op->sym->cipher.data.offset >> 3;
960 
961 		} else if (ctx->bpi_ctx) {
962 			/* DOCSIS - only send complete blocks to device
963 			 * Process any partial block using CFB mode.
964 			 * Even if 0 complete blocks, still send this to device
965 			 * to get into rx queue for post-process and dequeuing
966 			 */
967 			cipher_len = qat_bpicipher_preprocess(ctx, op);
968 			cipher_ofs = op->sym->cipher.data.offset;
969 		} else {
970 			cipher_len = op->sym->cipher.data.length;
971 			cipher_ofs = op->sym->cipher.data.offset;
972 		}
973 
974 		/* copy IV into request if it fits */
975 		/*
976 		 * If IV length is zero do not copy anything but still
977 		 * use request descriptor embedded IV
978 		 *
979 		 */
980 		if (op->sym->cipher.iv.length) {
981 			if (op->sym->cipher.iv.length <=
982 					sizeof(cipher_param->u.cipher_IV_array)) {
983 				rte_memcpy(cipher_param->u.cipher_IV_array,
984 						op->sym->cipher.iv.data,
985 						op->sym->cipher.iv.length);
986 			} else {
987 				ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
988 						qat_req->comn_hdr.serv_specif_flags,
989 						ICP_QAT_FW_CIPH_IV_64BIT_PTR);
990 				cipher_param->u.s.cipher_IV_ptr =
991 						op->sym->cipher.iv.phys_addr;
992 			}
993 		}
994 		min_ofs = cipher_ofs;
995 	}
996 
997 	if (do_auth) {
998 
999 		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
1000 			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
1001 			ctx->qat_hash_alg ==
1002 				ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
1003 			if (unlikely((auth_param->auth_off % BYTE_LENGTH != 0)
1004 				|| (auth_param->auth_len % BYTE_LENGTH != 0))) {
1005 				PMD_DRV_LOG(ERR,
1006 		"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
1007 				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
1008 				return -EINVAL;
1009 			}
1010 			auth_ofs = op->sym->auth.data.offset >> 3;
1011 			auth_len = op->sym->auth.data.length >> 3;
1012 
1013 			if (ctx->qat_hash_alg ==
1014 					ICP_QAT_HW_AUTH_ALGO_KASUMI_F9) {
1015 				if (do_cipher) {
1016 					auth_len = auth_len + auth_ofs + 1 -
1017 						ICP_QAT_HW_KASUMI_BLK_SZ;
1018 					auth_ofs = ICP_QAT_HW_KASUMI_BLK_SZ;
1019 				} else {
1020 					auth_len = auth_len + auth_ofs + 1;
1021 					auth_ofs = 0;
1022 				}
1023 			}
1024 
1025 		} else if (ctx->qat_hash_alg ==
1026 					ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
1027 				ctx->qat_hash_alg ==
1028 					ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
1029 			auth_ofs = op->sym->cipher.data.offset;
1030 			auth_len = op->sym->cipher.data.length;
1031 		} else {
1032 			auth_ofs = op->sym->auth.data.offset;
1033 			auth_len = op->sym->auth.data.length;
1034 		}
1035 		min_ofs = auth_ofs;
1036 
1037 		auth_param->auth_res_addr = op->sym->auth.digest.phys_addr;
1038 
1039 		auth_param->u1.aad_adr = op->sym->auth.aad.phys_addr;
1040 
1041 	}
1042 
1043 	if (op->sym->m_src->next || (op->sym->m_dst && op->sym->m_dst->next))
1044 		do_sgl = 1;
1045 
1046 	/* adjust for chain case */
1047 	if (do_cipher && do_auth)
1048 		min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
1049 
1050 	if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl))
1051 		min_ofs = 0;
1052 
1053 	if (unlikely(op->sym->m_dst != NULL)) {
1054 		/* Out-of-place operation (OOP)
1055 		 * Don't align DMA start. DMA the minimum data-set
1056 		 * so as not to overwrite data in dest buffer
1057 		 */
1058 		src_buf_start =
1059 			rte_pktmbuf_mtophys_offset(op->sym->m_src, min_ofs);
1060 		dst_buf_start =
1061 			rte_pktmbuf_mtophys_offset(op->sym->m_dst, min_ofs);
1062 
1063 	} else {
1064 		/* In-place operation
1065 		 * Start DMA at nearest aligned address below min_ofs
1066 		 */
1067 		src_buf_start =
1068 			rte_pktmbuf_mtophys_offset(op->sym->m_src, min_ofs)
1069 						& QAT_64_BTYE_ALIGN_MASK;
1070 
1071 		if (unlikely((rte_pktmbuf_mtophys(op->sym->m_src) -
1072 					rte_pktmbuf_headroom(op->sym->m_src))
1073 							> src_buf_start)) {
1074 			/* alignment has pushed addr ahead of start of mbuf
1075 			 * so revert and take the performance hit
1076 			 */
1077 			src_buf_start =
1078 				rte_pktmbuf_mtophys_offset(op->sym->m_src,
1079 								min_ofs);
1080 		}
1081 		dst_buf_start = src_buf_start;
1082 	}
1083 
1084 	if (do_cipher) {
1085 		cipher_param->cipher_offset =
1086 				(uint32_t)rte_pktmbuf_mtophys_offset(
1087 				op->sym->m_src, cipher_ofs) - src_buf_start;
1088 		cipher_param->cipher_length = cipher_len;
1089 	} else {
1090 		cipher_param->cipher_offset = 0;
1091 		cipher_param->cipher_length = 0;
1092 	}
1093 	if (do_auth) {
1094 		auth_param->auth_off = (uint32_t)rte_pktmbuf_mtophys_offset(
1095 				op->sym->m_src, auth_ofs) - src_buf_start;
1096 		auth_param->auth_len = auth_len;
1097 	} else {
1098 		auth_param->auth_off = 0;
1099 		auth_param->auth_len = 0;
1100 	}
1101 	qat_req->comn_mid.dst_length =
1102 		qat_req->comn_mid.src_length =
1103 		(cipher_param->cipher_offset + cipher_param->cipher_length)
1104 		> (auth_param->auth_off + auth_param->auth_len) ?
1105 		(cipher_param->cipher_offset + cipher_param->cipher_length)
1106 		: (auth_param->auth_off + auth_param->auth_len);
1107 
1108 	if (do_sgl) {
1109 
1110 		ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
1111 				QAT_COMN_PTR_TYPE_SGL);
1112 		ret = qat_sgl_fill_array(op->sym->m_src, src_buf_start,
1113 				&qat_op_cookie->qat_sgl_list_src,
1114 				qat_req->comn_mid.src_length);
1115 		if (ret) {
1116 			PMD_DRV_LOG(ERR, "QAT PMD Cannot fill sgl array");
1117 			return ret;
1118 		}
1119 
1120 		if (likely(op->sym->m_dst == NULL))
1121 			qat_req->comn_mid.dest_data_addr =
1122 				qat_req->comn_mid.src_data_addr =
1123 				qat_op_cookie->qat_sgl_src_phys_addr;
1124 		else {
1125 			ret = qat_sgl_fill_array(op->sym->m_dst,
1126 					dst_buf_start,
1127 					&qat_op_cookie->qat_sgl_list_dst,
1128 						qat_req->comn_mid.dst_length);
1129 
1130 			if (ret) {
1131 				PMD_DRV_LOG(ERR, "QAT PMD Cannot "
1132 						"fill sgl array");
1133 				return ret;
1134 			}
1135 
1136 			qat_req->comn_mid.src_data_addr =
1137 				qat_op_cookie->qat_sgl_src_phys_addr;
1138 			qat_req->comn_mid.dest_data_addr =
1139 					qat_op_cookie->qat_sgl_dst_phys_addr;
1140 		}
1141 	} else {
1142 		qat_req->comn_mid.src_data_addr = src_buf_start;
1143 		qat_req->comn_mid.dest_data_addr = dst_buf_start;
1144 	}
1145 
1146 	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
1147 			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
1148 		if (op->sym->cipher.iv.length == 12) {
1149 			/*
1150 			 * For GCM a 12 bit IV is allowed,
1151 			 * but we need to inform the f/w
1152 			 */
1153 			ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
1154 				qat_req->comn_hdr.serv_specif_flags,
1155 				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
1156 		}
1157 		if (op->sym->cipher.data.length == 0) {
1158 			/*
1159 			 * GMAC
1160 			 */
1161 			qat_req->comn_mid.dest_data_addr =
1162 				qat_req->comn_mid.src_data_addr =
1163 						op->sym->auth.aad.phys_addr;
1164 			qat_req->comn_mid.dst_length =
1165 				qat_req->comn_mid.src_length =
1166 					rte_pktmbuf_data_len(op->sym->m_src);
1167 			cipher_param->cipher_length = 0;
1168 			cipher_param->cipher_offset = 0;
1169 			auth_param->u1.aad_adr = 0;
1170 			auth_param->auth_len = op->sym->auth.aad.length;
1171 			auth_param->auth_off = op->sym->auth.data.offset;
1172 			auth_param->u2.aad_sz = 0;
1173 		}
1174 	}
1175 
1176 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
1177 	rte_hexdump(stdout, "qat_req:", qat_req,
1178 			sizeof(struct icp_qat_fw_la_bulk_req));
1179 	rte_hexdump(stdout, "src_data:",
1180 			rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
1181 			rte_pktmbuf_data_len(op->sym->m_src));
1182 	rte_hexdump(stdout, "iv:", op->sym->cipher.iv.data,
1183 			op->sym->cipher.iv.length);
1184 	rte_hexdump(stdout, "digest:", op->sym->auth.digest.data,
1185 			op->sym->auth.digest.length);
1186 	rte_hexdump(stdout, "aad:", op->sym->auth.aad.data,
1187 			op->sym->auth.aad.length);
1188 #endif
1189 	return 0;
1190 }
1191 
1192 static inline uint32_t adf_modulo(uint32_t data, uint32_t shift)
1193 {
1194 	uint32_t div = data >> shift;
1195 	uint32_t mult = div << shift;
1196 
1197 	return data - mult;
1198 }
1199 
1200 void qat_crypto_sym_session_init(struct rte_mempool *mp, void *sym_sess)
1201 {
1202 	struct rte_cryptodev_sym_session *sess = sym_sess;
1203 	struct qat_session *s = (void *)sess->_private;
1204 
1205 	PMD_INIT_FUNC_TRACE();
1206 	s->cd_paddr = rte_mempool_virt2phy(mp, sess) +
1207 		offsetof(struct qat_session, cd) +
1208 		offsetof(struct rte_cryptodev_sym_session, _private);
1209 }
1210 
1211 int qat_dev_config(__rte_unused struct rte_cryptodev *dev,
1212 		__rte_unused struct rte_cryptodev_config *config)
1213 {
1214 	PMD_INIT_FUNC_TRACE();
1215 	return 0;
1216 }
1217 
1218 int qat_dev_start(__rte_unused struct rte_cryptodev *dev)
1219 {
1220 	PMD_INIT_FUNC_TRACE();
1221 	return 0;
1222 }
1223 
1224 void qat_dev_stop(__rte_unused struct rte_cryptodev *dev)
1225 {
1226 	PMD_INIT_FUNC_TRACE();
1227 }
1228 
1229 int qat_dev_close(struct rte_cryptodev *dev)
1230 {
1231 	int i, ret;
1232 
1233 	PMD_INIT_FUNC_TRACE();
1234 
1235 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
1236 		ret = qat_crypto_sym_qp_release(dev, i);
1237 		if (ret < 0)
1238 			return ret;
1239 	}
1240 
1241 	return 0;
1242 }
1243 
1244 void qat_dev_info_get(struct rte_cryptodev *dev,
1245 			struct rte_cryptodev_info *info)
1246 {
1247 	struct qat_pmd_private *internals = dev->data->dev_private;
1248 
1249 	PMD_INIT_FUNC_TRACE();
1250 	if (info != NULL) {
1251 		info->max_nb_queue_pairs =
1252 				ADF_NUM_SYM_QPS_PER_BUNDLE *
1253 				ADF_NUM_BUNDLES_PER_DEV;
1254 		info->feature_flags = dev->feature_flags;
1255 		info->capabilities = internals->qat_dev_capabilities;
1256 		info->sym.max_nb_sessions = internals->max_nb_sessions;
1257 		info->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
1258 		info->pci_dev = RTE_DEV_TO_PCI(dev->device);
1259 	}
1260 }
1261 
1262 void qat_crypto_sym_stats_get(struct rte_cryptodev *dev,
1263 		struct rte_cryptodev_stats *stats)
1264 {
1265 	int i;
1266 	struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);
1267 
1268 	PMD_INIT_FUNC_TRACE();
1269 	if (stats == NULL) {
1270 		PMD_DRV_LOG(ERR, "invalid stats ptr NULL");
1271 		return;
1272 	}
1273 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
1274 		if (qp[i] == NULL) {
1275 			PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
1276 			continue;
1277 		}
1278 
1279 		stats->enqueued_count += qp[i]->stats.enqueued_count;
1280 		stats->dequeued_count += qp[i]->stats.dequeued_count;
1281 		stats->enqueue_err_count += qp[i]->stats.enqueue_err_count;
1282 		stats->dequeue_err_count += qp[i]->stats.dequeue_err_count;
1283 	}
1284 }
1285 
1286 void qat_crypto_sym_stats_reset(struct rte_cryptodev *dev)
1287 {
1288 	int i;
1289 	struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);
1290 
1291 	PMD_INIT_FUNC_TRACE();
1292 	for (i = 0; i < dev->data->nb_queue_pairs; i++)
1293 		memset(&(qp[i]->stats), 0, sizeof(qp[i]->stats));
1294 	PMD_DRV_LOG(DEBUG, "QAT crypto: stats cleared");
1295 }
1296