xref: /dpdk/drivers/crypto/ccp/ccp_crypto.c (revision bdce2564dbf78e1fecc0db438b562ae19f0c057c)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
3  */
4 
5 #define OPENSSL_API_COMPAT 0x10100000L
6 
7 #include <dirent.h>
8 #include <fcntl.h>
9 #include <stdio.h>
10 #include <string.h>
11 #include <sys/mman.h>
12 #include <sys/queue.h>
13 #include <sys/types.h>
14 #include <unistd.h>
15 #include <openssl/sha.h>
16 #include <openssl/cmac.h> /*sub key apis*/
17 #include <openssl/evp.h> /*sub key apis*/
18 
19 #include <rte_hexdump.h>
20 #include <rte_memzone.h>
21 #include <rte_malloc.h>
22 #include <rte_memory.h>
23 #include <rte_spinlock.h>
24 #include <rte_string_fns.h>
25 #include <cryptodev_pmd.h>
26 
27 #include "ccp_dev.h"
28 #include "ccp_crypto.h"
29 #include "ccp_pci.h"
30 #include "ccp_pmd_private.h"
31 
32 #include <openssl/conf.h>
33 #include <openssl/err.h>
34 #include <openssl/hmac.h>
35 
36 extern int iommu_mode;
37 void *sha_ctx;
38 /* SHA initial context values */
39 uint32_t ccp_sha1_init[SHA_COMMON_DIGEST_SIZE / sizeof(uint32_t)] = {
40 	SHA1_H4, SHA1_H3,
41 	SHA1_H2, SHA1_H1,
42 	SHA1_H0, 0x0U,
43 	0x0U, 0x0U,
44 };
45 
46 uint32_t ccp_sha224_init[SHA256_DIGEST_SIZE / sizeof(uint32_t)] = {
47 	SHA224_H7, SHA224_H6,
48 	SHA224_H5, SHA224_H4,
49 	SHA224_H3, SHA224_H2,
50 	SHA224_H1, SHA224_H0,
51 };
52 
53 uint32_t ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(uint32_t)] = {
54 	SHA256_H7, SHA256_H6,
55 	SHA256_H5, SHA256_H4,
56 	SHA256_H3, SHA256_H2,
57 	SHA256_H1, SHA256_H0,
58 };
59 
60 uint64_t ccp_sha384_init[SHA512_DIGEST_SIZE / sizeof(uint64_t)] = {
61 	SHA384_H7, SHA384_H6,
62 	SHA384_H5, SHA384_H4,
63 	SHA384_H3, SHA384_H2,
64 	SHA384_H1, SHA384_H0,
65 };
66 
67 uint64_t ccp_sha512_init[SHA512_DIGEST_SIZE / sizeof(uint64_t)] = {
68 	SHA512_H7, SHA512_H6,
69 	SHA512_H5, SHA512_H4,
70 	SHA512_H3, SHA512_H2,
71 	SHA512_H1, SHA512_H0,
72 };
73 
74 #if defined(_MSC_VER)
75 #define SHA3_CONST(x) x
76 #else
77 #define SHA3_CONST(x) x##L
78 #endif
79 
80 /** 'Words' here refers to uint64_t */
81 #define SHA3_KECCAK_SPONGE_WORDS \
82 	(((1600) / 8) / sizeof(uint64_t))
83 typedef struct sha3_context_ {
84 	uint64_t saved;
85 	/**
86 	 * The portion of the input message that we
87 	 * didn't consume yet
88 	 */
89 	union {
90 		uint64_t s[SHA3_KECCAK_SPONGE_WORDS];
91 		/* Keccak's state */
92 		uint8_t sb[SHA3_KECCAK_SPONGE_WORDS * 8];
93 		/**total 200 ctx size**/
94 	};
95 	unsigned int byteIndex;
96 	/**
97 	 * 0..7--the next byte after the set one
98 	 * (starts from 0; 0--none are buffered)
99 	 */
100 	unsigned int wordIndex;
101 	/**
102 	 * 0..24--the next word to integrate input
103 	 * (starts from 0)
104 	 */
105 	unsigned int capacityWords;
106 	/**
107 	 * the double size of the hash output in
108 	 * words (e.g. 16 for Keccak 512)
109 	 */
110 } sha3_context;
111 
112 #ifndef SHA3_ROTL64
113 #define SHA3_ROTL64(x, y) \
114 	(((x) << (y)) | ((x) >> ((sizeof(uint64_t)*8) - (y))))
115 #endif
116 
117 static const uint64_t keccakf_rndc[24] = {
118 	SHA3_CONST(0x0000000000000001UL), SHA3_CONST(0x0000000000008082UL),
119 	SHA3_CONST(0x800000000000808aUL), SHA3_CONST(0x8000000080008000UL),
120 	SHA3_CONST(0x000000000000808bUL), SHA3_CONST(0x0000000080000001UL),
121 	SHA3_CONST(0x8000000080008081UL), SHA3_CONST(0x8000000000008009UL),
122 	SHA3_CONST(0x000000000000008aUL), SHA3_CONST(0x0000000000000088UL),
123 	SHA3_CONST(0x0000000080008009UL), SHA3_CONST(0x000000008000000aUL),
124 	SHA3_CONST(0x000000008000808bUL), SHA3_CONST(0x800000000000008bUL),
125 	SHA3_CONST(0x8000000000008089UL), SHA3_CONST(0x8000000000008003UL),
126 	SHA3_CONST(0x8000000000008002UL), SHA3_CONST(0x8000000000000080UL),
127 	SHA3_CONST(0x000000000000800aUL), SHA3_CONST(0x800000008000000aUL),
128 	SHA3_CONST(0x8000000080008081UL), SHA3_CONST(0x8000000000008080UL),
129 	SHA3_CONST(0x0000000080000001UL), SHA3_CONST(0x8000000080008008UL)
130 };
131 
132 static const unsigned int keccakf_rotc[24] = {
133 	1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14, 27, 41, 56, 8, 25, 43, 62,
134 	18, 39, 61, 20, 44
135 };
136 
137 static const unsigned int keccakf_piln[24] = {
138 	10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4, 15, 23, 19, 13, 12, 2, 20,
139 	14, 22, 9, 6, 1
140 };
141 
142 static enum ccp_cmd_order
143 ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
144 {
145 	enum ccp_cmd_order res = CCP_CMD_NOT_SUPPORTED;
146 
147 	if (xform == NULL)
148 		return res;
149 	if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
150 		if (xform->next == NULL)
151 			return CCP_CMD_AUTH;
152 		else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
153 			return CCP_CMD_HASH_CIPHER;
154 	}
155 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
156 		if (xform->next == NULL)
157 			return CCP_CMD_CIPHER;
158 		else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
159 			return CCP_CMD_CIPHER_HASH;
160 	}
161 	if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
162 		return CCP_CMD_COMBINED;
163 	return res;
164 }
165 
166 /* partial hash using openssl */
167 static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
168 {
169 	SHA_CTX ctx;
170 
171 	if (!SHA1_Init(&ctx))
172 		return -EFAULT;
173 	SHA1_Transform(&ctx, data_in);
174 	rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
175 	return 0;
176 }
177 
178 static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
179 {
180 	SHA256_CTX ctx;
181 
182 	if (!SHA224_Init(&ctx))
183 		return -EFAULT;
184 	SHA256_Transform(&ctx, data_in);
185 	rte_memcpy(data_out, &ctx,
186 		   SHA256_DIGEST_LENGTH);
187 	return 0;
188 }
189 
190 static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
191 {
192 	SHA256_CTX ctx;
193 
194 	if (!SHA256_Init(&ctx))
195 		return -EFAULT;
196 	SHA256_Transform(&ctx, data_in);
197 	rte_memcpy(data_out, &ctx,
198 		   SHA256_DIGEST_LENGTH);
199 	return 0;
200 }
201 
202 static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
203 {
204 	SHA512_CTX ctx;
205 
206 	if (!SHA384_Init(&ctx))
207 		return -EFAULT;
208 	SHA512_Transform(&ctx, data_in);
209 	rte_memcpy(data_out, &ctx,
210 		   SHA512_DIGEST_LENGTH);
211 	return 0;
212 }
213 
214 static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
215 {
216 	SHA512_CTX ctx;
217 
218 	if (!SHA512_Init(&ctx))
219 		return -EFAULT;
220 	SHA512_Transform(&ctx, data_in);
221 	rte_memcpy(data_out, &ctx,
222 		   SHA512_DIGEST_LENGTH);
223 	return 0;
224 }
225 
226 static void
227 keccakf(uint64_t s[25])
228 {
229 	int i, j, round;
230 	uint64_t t, bc[5];
231 #define KECCAK_ROUNDS 24
232 
233 	for (round = 0; round < KECCAK_ROUNDS; round++) {
234 
235 		/* Theta */
236 		for (i = 0; i < 5; i++)
237 			bc[i] = s[i] ^ s[i + 5] ^ s[i + 10] ^ s[i + 15] ^
238 				s[i + 20];
239 
240 		for (i = 0; i < 5; i++) {
241 			t = bc[(i + 4) % 5] ^ SHA3_ROTL64(bc[(i + 1) % 5], 1);
242 			for (j = 0; j < 25; j += 5)
243 				s[j + i] ^= t;
244 		}
245 
246 		/* Rho Pi */
247 		t = s[1];
248 		for (i = 0; i < 24; i++) {
249 			j = keccakf_piln[i];
250 			bc[0] = s[j];
251 			s[j] = SHA3_ROTL64(t, keccakf_rotc[i]);
252 			t = bc[0];
253 		}
254 
255 		/* Chi */
256 		for (j = 0; j < 25; j += 5) {
257 			for (i = 0; i < 5; i++)
258 				bc[i] = s[j + i];
259 			for (i = 0; i < 5; i++)
260 				s[j + i] ^= (~bc[(i + 1) % 5]) &
261 					    bc[(i + 2) % 5];
262 		}
263 
264 		/* Iota */
265 		s[0] ^= keccakf_rndc[round];
266 	}
267 }
268 
269 static void
270 sha3_Init224(void *priv)
271 {
272 	sha3_context *ctx = (sha3_context *) priv;
273 
274 	memset(ctx, 0, sizeof(*ctx));
275 	ctx->capacityWords = 2 * 224 / (8 * sizeof(uint64_t));
276 }
277 
278 static void
279 sha3_Init256(void *priv)
280 {
281 	sha3_context *ctx = (sha3_context *) priv;
282 
283 	memset(ctx, 0, sizeof(*ctx));
284 	ctx->capacityWords = 2 * 256 / (8 * sizeof(uint64_t));
285 }
286 
287 static void
288 sha3_Init384(void *priv)
289 {
290 	sha3_context *ctx = (sha3_context *) priv;
291 
292 	memset(ctx, 0, sizeof(*ctx));
293 	ctx->capacityWords = 2 * 384 / (8 * sizeof(uint64_t));
294 }
295 
296 static void
297 sha3_Init512(void *priv)
298 {
299 	sha3_context *ctx = (sha3_context *) priv;
300 
301 	memset(ctx, 0, sizeof(*ctx));
302 	ctx->capacityWords = 2 * 512 / (8 * sizeof(uint64_t));
303 }
304 
305 
306 /* This is simply the 'update' with the padding block.
307  * The padding block is 0x01 || 0x00* || 0x80. First 0x01 and last 0x80
308  * bytes are always present, but they can be the same byte.
309  */
310 static void
311 sha3_Update(void *priv, void const *bufIn, size_t len)
312 {
313 	sha3_context *ctx = (sha3_context *) priv;
314 	unsigned int old_tail = (8 - ctx->byteIndex) & 7;
315 	size_t words;
316 	unsigned int tail;
317 	size_t i;
318 	const uint8_t *buf = bufIn;
319 
320 	if (len < old_tail) {
321 		while (len--)
322 			ctx->saved |= (uint64_t) (*(buf++)) <<
323 				      ((ctx->byteIndex++) * 8);
324 		return;
325 	}
326 
327 	if (old_tail) {
328 		len -= old_tail;
329 		while (old_tail--)
330 			ctx->saved |= (uint64_t) (*(buf++)) <<
331 				      ((ctx->byteIndex++) * 8);
332 
333 		ctx->s[ctx->wordIndex] ^= ctx->saved;
334 		ctx->byteIndex = 0;
335 		ctx->saved = 0;
336 		if (++ctx->wordIndex ==
337 		   (SHA3_KECCAK_SPONGE_WORDS - ctx->capacityWords)) {
338 			keccakf(ctx->s);
339 			ctx->wordIndex = 0;
340 		}
341 	}
342 
343 	words = len / sizeof(uint64_t);
344 	tail = len - words * sizeof(uint64_t);
345 
346 	for (i = 0; i < words; i++, buf += sizeof(uint64_t)) {
347 		const uint64_t t = (uint64_t) (buf[0]) |
348 			((uint64_t) (buf[1]) << 8 * 1) |
349 			((uint64_t) (buf[2]) << 8 * 2) |
350 			((uint64_t) (buf[3]) << 8 * 3) |
351 			((uint64_t) (buf[4]) << 8 * 4) |
352 			((uint64_t) (buf[5]) << 8 * 5) |
353 			((uint64_t) (buf[6]) << 8 * 6) |
354 			((uint64_t) (buf[7]) << 8 * 7);
355 		ctx->s[ctx->wordIndex] ^= t;
356 		if (++ctx->wordIndex ==
357 		   (SHA3_KECCAK_SPONGE_WORDS - ctx->capacityWords)) {
358 			keccakf(ctx->s);
359 			ctx->wordIndex = 0;
360 		}
361 	}
362 
363 	while (tail--)
364 		ctx->saved |= (uint64_t) (*(buf++)) << ((ctx->byteIndex++) * 8);
365 }
366 
367 int partial_hash_sha3_224(uint8_t *data_in, uint8_t *data_out)
368 {
369 	sha3_context *ctx;
370 	int i;
371 
372 	ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
373 	if (!ctx) {
374 		CCP_LOG_ERR("sha3-ctx creation failed");
375 		return -ENOMEM;
376 	}
377 	sha3_Init224(ctx);
378 	sha3_Update(ctx, data_in, SHA3_224_BLOCK_SIZE);
379 	for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
380 		*data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
381 	rte_free(ctx);
382 
383 	return 0;
384 }
385 
386 int partial_hash_sha3_256(uint8_t *data_in, uint8_t *data_out)
387 {
388 	sha3_context *ctx;
389 	int i;
390 
391 	ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
392 	if (!ctx) {
393 		CCP_LOG_ERR("sha3-ctx creation failed");
394 		return -ENOMEM;
395 	}
396 	sha3_Init256(ctx);
397 	sha3_Update(ctx, data_in, SHA3_256_BLOCK_SIZE);
398 	for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
399 		*data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
400 	rte_free(ctx);
401 
402 	return 0;
403 }
404 
405 int partial_hash_sha3_384(uint8_t *data_in, uint8_t *data_out)
406 {
407 	sha3_context *ctx;
408 	int i;
409 
410 	ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
411 	if (!ctx) {
412 		CCP_LOG_ERR("sha3-ctx creation failed");
413 		return -ENOMEM;
414 	}
415 	sha3_Init384(ctx);
416 	sha3_Update(ctx, data_in, SHA3_384_BLOCK_SIZE);
417 	for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
418 		*data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
419 	rte_free(ctx);
420 
421 	return 0;
422 }
423 
424 int partial_hash_sha3_512(uint8_t *data_in, uint8_t *data_out)
425 {
426 	sha3_context *ctx;
427 	int i;
428 
429 	ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
430 	if (!ctx) {
431 		CCP_LOG_ERR("sha3-ctx creation failed");
432 		return -ENOMEM;
433 	}
434 	sha3_Init512(ctx);
435 	sha3_Update(ctx, data_in, SHA3_512_BLOCK_SIZE);
436 	for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
437 		*data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
438 	rte_free(ctx);
439 
440 	return 0;
441 }
442 
443 static int generate_partial_hash(struct ccp_session *sess)
444 {
445 
446 	uint8_t ipad[sess->auth.block_size];
447 	uint8_t	opad[sess->auth.block_size];
448 	uint8_t *ipad_t, *opad_t;
449 	uint32_t *hash_value_be32, hash_temp32[8];
450 	uint64_t *hash_value_be64, hash_temp64[8];
451 	int i, count;
452 	uint8_t *hash_value_sha3;
453 
454 	opad_t = ipad_t = (uint8_t *)sess->auth.key;
455 
456 	hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute);
457 	hash_value_be64 = (uint64_t *)((uint8_t *)sess->auth.pre_compute);
458 
459 	/* considering key size is always equal to block size of algorithm */
460 	for (i = 0; i < sess->auth.block_size; i++) {
461 		ipad[i] = (ipad_t[i] ^ HMAC_IPAD_VALUE);
462 		opad[i] = (opad_t[i] ^ HMAC_OPAD_VALUE);
463 	}
464 
465 	switch (sess->auth.algo) {
466 	case CCP_AUTH_ALGO_SHA1_HMAC:
467 		count = SHA1_DIGEST_SIZE >> 2;
468 
469 		if (partial_hash_sha1(ipad, (uint8_t *)hash_temp32))
470 			return -1;
471 		for (i = 0; i < count; i++, hash_value_be32++)
472 			*hash_value_be32 = hash_temp32[count - 1 - i];
473 
474 		hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute
475 					       + sess->auth.ctx_len);
476 		if (partial_hash_sha1(opad, (uint8_t *)hash_temp32))
477 			return -1;
478 		for (i = 0; i < count; i++, hash_value_be32++)
479 			*hash_value_be32 = hash_temp32[count - 1 - i];
480 		return 0;
481 	case CCP_AUTH_ALGO_SHA224_HMAC:
482 		count = SHA256_DIGEST_SIZE >> 2;
483 
484 		if (partial_hash_sha224(ipad, (uint8_t *)hash_temp32))
485 			return -1;
486 		for (i = 0; i < count; i++, hash_value_be32++)
487 			*hash_value_be32 = hash_temp32[count - 1 - i];
488 
489 		hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute
490 					       + sess->auth.ctx_len);
491 		if (partial_hash_sha224(opad, (uint8_t *)hash_temp32))
492 			return -1;
493 		for (i = 0; i < count; i++, hash_value_be32++)
494 			*hash_value_be32 = hash_temp32[count - 1 - i];
495 		return 0;
496 	case CCP_AUTH_ALGO_SHA3_224_HMAC:
497 		hash_value_sha3 = sess->auth.pre_compute;
498 		if (partial_hash_sha3_224(ipad, hash_value_sha3))
499 			return -1;
500 
501 		hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
502 					       + sess->auth.ctx_len);
503 		if (partial_hash_sha3_224(opad, hash_value_sha3))
504 			return -1;
505 		return 0;
506 	case CCP_AUTH_ALGO_SHA256_HMAC:
507 		count = SHA256_DIGEST_SIZE >> 2;
508 
509 		if (partial_hash_sha256(ipad, (uint8_t *)hash_temp32))
510 			return -1;
511 		for (i = 0; i < count; i++, hash_value_be32++)
512 			*hash_value_be32 = hash_temp32[count - 1 - i];
513 
514 		hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute
515 					       + sess->auth.ctx_len);
516 		if (partial_hash_sha256(opad, (uint8_t *)hash_temp32))
517 			return -1;
518 		for (i = 0; i < count; i++, hash_value_be32++)
519 			*hash_value_be32 = hash_temp32[count - 1 - i];
520 		return 0;
521 	case CCP_AUTH_ALGO_SHA3_256_HMAC:
522 		hash_value_sha3 = sess->auth.pre_compute;
523 		if (partial_hash_sha3_256(ipad, hash_value_sha3))
524 			return -1;
525 
526 		hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
527 					      + sess->auth.ctx_len);
528 		if (partial_hash_sha3_256(opad, hash_value_sha3))
529 			return -1;
530 		return 0;
531 	case CCP_AUTH_ALGO_SHA384_HMAC:
532 		count = SHA512_DIGEST_SIZE >> 3;
533 
534 		if (partial_hash_sha384(ipad, (uint8_t *)hash_temp64))
535 			return -1;
536 		for (i = 0; i < count; i++, hash_value_be64++)
537 			*hash_value_be64 = hash_temp64[count - 1 - i];
538 
539 		hash_value_be64 = (uint64_t *)((uint8_t *)sess->auth.pre_compute
540 					       + sess->auth.ctx_len);
541 		if (partial_hash_sha384(opad, (uint8_t *)hash_temp64))
542 			return -1;
543 		for (i = 0; i < count; i++, hash_value_be64++)
544 			*hash_value_be64 = hash_temp64[count - 1 - i];
545 		return 0;
546 	case CCP_AUTH_ALGO_SHA3_384_HMAC:
547 		hash_value_sha3 = sess->auth.pre_compute;
548 		if (partial_hash_sha3_384(ipad, hash_value_sha3))
549 			return -1;
550 
551 		hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
552 					      + sess->auth.ctx_len);
553 		if (partial_hash_sha3_384(opad, hash_value_sha3))
554 			return -1;
555 		return 0;
556 	case CCP_AUTH_ALGO_SHA512_HMAC:
557 		count = SHA512_DIGEST_SIZE >> 3;
558 
559 		if (partial_hash_sha512(ipad, (uint8_t *)hash_temp64))
560 			return -1;
561 		for (i = 0; i < count; i++, hash_value_be64++)
562 			*hash_value_be64 = hash_temp64[count - 1 - i];
563 
564 		hash_value_be64 = (uint64_t *)((uint8_t *)sess->auth.pre_compute
565 					       + sess->auth.ctx_len);
566 		if (partial_hash_sha512(opad, (uint8_t *)hash_temp64))
567 			return -1;
568 		for (i = 0; i < count; i++, hash_value_be64++)
569 			*hash_value_be64 = hash_temp64[count - 1 - i];
570 		return 0;
571 	case CCP_AUTH_ALGO_SHA3_512_HMAC:
572 		hash_value_sha3 = sess->auth.pre_compute;
573 		if (partial_hash_sha3_512(ipad, hash_value_sha3))
574 			return -1;
575 
576 		hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
577 					      + sess->auth.ctx_len);
578 		if (partial_hash_sha3_512(opad, hash_value_sha3))
579 			return -1;
580 		return 0;
581 	default:
582 		CCP_LOG_ERR("Invalid auth algo");
583 		return -1;
584 	}
585 }
586 
587 /* prepare temporary keys K1 and K2 */
588 static void prepare_key(unsigned char *k, unsigned char *l, int bl)
589 {
590 	int i;
591 	/* Shift block to left, including carry */
592 	for (i = 0; i < bl; i++) {
593 		k[i] = l[i] << 1;
594 		if (i < bl - 1 && l[i + 1] & 0x80)
595 			k[i] |= 1;
596 	}
597 	/* If MSB set fixup with R */
598 	if (l[0] & 0x80)
599 		k[bl - 1] ^= bl == 16 ? 0x87 : 0x1b;
600 }
601 
602 /* subkeys K1 and K2 generation for CMAC */
603 static int
604 generate_cmac_subkeys(struct ccp_session *sess)
605 {
606 	const EVP_CIPHER *algo;
607 	EVP_CIPHER_CTX *ctx;
608 	unsigned char *ccp_ctx;
609 	size_t i;
610 	int dstlen, totlen;
611 	unsigned char zero_iv[AES_BLOCK_SIZE] = {0};
612 	unsigned char dst[2 * AES_BLOCK_SIZE] = {0};
613 	unsigned char k1[AES_BLOCK_SIZE] = {0};
614 	unsigned char k2[AES_BLOCK_SIZE] = {0};
615 
616 	if (sess->auth.ut.aes_type == CCP_AES_TYPE_128)
617 		algo =  EVP_aes_128_cbc();
618 	else if (sess->auth.ut.aes_type == CCP_AES_TYPE_192)
619 		algo =  EVP_aes_192_cbc();
620 	else if (sess->auth.ut.aes_type == CCP_AES_TYPE_256)
621 		algo =  EVP_aes_256_cbc();
622 	else {
623 		CCP_LOG_ERR("Invalid CMAC type length");
624 		return -1;
625 	}
626 
627 	ctx = EVP_CIPHER_CTX_new();
628 	if (!ctx) {
629 		CCP_LOG_ERR("ctx creation failed");
630 		return -1;
631 	}
632 	if (EVP_EncryptInit(ctx, algo, (unsigned char *)sess->auth.key,
633 			    (unsigned char *)zero_iv) <= 0)
634 		goto key_generate_err;
635 	if (EVP_CIPHER_CTX_set_padding(ctx, 0) <= 0)
636 		goto key_generate_err;
637 	if (EVP_EncryptUpdate(ctx, dst, &dstlen, zero_iv,
638 			      AES_BLOCK_SIZE) <= 0)
639 		goto key_generate_err;
640 	if (EVP_EncryptFinal_ex(ctx, dst + dstlen, &totlen) <= 0)
641 		goto key_generate_err;
642 
643 	memset(sess->auth.pre_compute, 0, CCP_SB_BYTES * 2);
644 
645 	ccp_ctx = (unsigned char *)(sess->auth.pre_compute + CCP_SB_BYTES - 1);
646 	prepare_key(k1, dst, AES_BLOCK_SIZE);
647 	for (i = 0; i < AES_BLOCK_SIZE;  i++, ccp_ctx--)
648 		*ccp_ctx = k1[i];
649 
650 	ccp_ctx = (unsigned char *)(sess->auth.pre_compute +
651 				   (2 * CCP_SB_BYTES) - 1);
652 	prepare_key(k2, k1, AES_BLOCK_SIZE);
653 	for (i = 0; i < AES_BLOCK_SIZE;  i++, ccp_ctx--)
654 		*ccp_ctx = k2[i];
655 
656 	EVP_CIPHER_CTX_free(ctx);
657 
658 	return 0;
659 
660 key_generate_err:
661 	CCP_LOG_ERR("CMAC Init failed");
662 		return -1;
663 }
664 
665 /* configure session */
666 static int
667 ccp_configure_session_cipher(struct ccp_session *sess,
668 			     const struct rte_crypto_sym_xform *xform)
669 {
670 	const struct rte_crypto_cipher_xform *cipher_xform = NULL;
671 	size_t i, j, x;
672 
673 	cipher_xform = &xform->cipher;
674 
675 	/* set cipher direction */
676 	if (cipher_xform->op ==  RTE_CRYPTO_CIPHER_OP_ENCRYPT)
677 		sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT;
678 	else
679 		sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
680 
681 	/* set cipher key */
682 	sess->cipher.key_length = cipher_xform->key.length;
683 	rte_memcpy(sess->cipher.key, cipher_xform->key.data,
684 		   cipher_xform->key.length);
685 
686 	/* set iv parameters */
687 	sess->iv.offset = cipher_xform->iv.offset;
688 	sess->iv.length = cipher_xform->iv.length;
689 
690 	switch (cipher_xform->algo) {
691 	case RTE_CRYPTO_CIPHER_AES_CTR:
692 		sess->cipher.algo = CCP_CIPHER_ALGO_AES_CTR;
693 		sess->cipher.um.aes_mode = CCP_AES_MODE_CTR;
694 		sess->cipher.engine = CCP_ENGINE_AES;
695 		break;
696 	case RTE_CRYPTO_CIPHER_AES_ECB:
697 		sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC;
698 		sess->cipher.um.aes_mode = CCP_AES_MODE_ECB;
699 		sess->cipher.engine = CCP_ENGINE_AES;
700 		break;
701 	case RTE_CRYPTO_CIPHER_AES_CBC:
702 		sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC;
703 		sess->cipher.um.aes_mode = CCP_AES_MODE_CBC;
704 		sess->cipher.engine = CCP_ENGINE_AES;
705 		break;
706 	case RTE_CRYPTO_CIPHER_3DES_CBC:
707 		sess->cipher.algo = CCP_CIPHER_ALGO_3DES_CBC;
708 		sess->cipher.um.des_mode = CCP_DES_MODE_CBC;
709 		sess->cipher.engine = CCP_ENGINE_3DES;
710 		break;
711 	default:
712 		CCP_LOG_ERR("Unsupported cipher algo");
713 		return -1;
714 	}
715 
716 
717 	switch (sess->cipher.engine) {
718 	case CCP_ENGINE_AES:
719 		if (sess->cipher.key_length == 16)
720 			sess->cipher.ut.aes_type = CCP_AES_TYPE_128;
721 		else if (sess->cipher.key_length == 24)
722 			sess->cipher.ut.aes_type = CCP_AES_TYPE_192;
723 		else if (sess->cipher.key_length == 32)
724 			sess->cipher.ut.aes_type = CCP_AES_TYPE_256;
725 		else {
726 			CCP_LOG_ERR("Invalid cipher key length");
727 			return -1;
728 		}
729 		for (i = 0; i < sess->cipher.key_length ; i++)
730 			sess->cipher.key_ccp[sess->cipher.key_length - i - 1] =
731 				sess->cipher.key[i];
732 		break;
733 	case CCP_ENGINE_3DES:
734 		if (sess->cipher.key_length == 16)
735 			sess->cipher.ut.des_type = CCP_DES_TYPE_128;
736 		else if (sess->cipher.key_length == 24)
737 			sess->cipher.ut.des_type = CCP_DES_TYPE_192;
738 		else {
739 			CCP_LOG_ERR("Invalid cipher key length");
740 			return -1;
741 		}
742 		for (j = 0, x = 0; j < sess->cipher.key_length/8; j++, x += 8)
743 			for (i = 0; i < 8; i++)
744 				sess->cipher.key_ccp[(8 + x) - i - 1] =
745 					sess->cipher.key[i + x];
746 		break;
747 	default:
748 		CCP_LOG_ERR("Invalid CCP Engine");
749 		return -ENOTSUP;
750 	}
751 	if (iommu_mode == 2) {
752 		sess->cipher.nonce_phys = rte_mem_virt2iova(sess->cipher.nonce);
753 		sess->cipher.key_phys = rte_mem_virt2iova(sess->cipher.key_ccp);
754 	} else {
755 		sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
756 		sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
757 	}
758 	return 0;
759 }
760 
761 static int
762 ccp_configure_session_auth(struct ccp_session *sess,
763 			   const struct rte_crypto_sym_xform *xform)
764 {
765 	const struct rte_crypto_auth_xform *auth_xform = NULL;
766 	size_t i;
767 
768 	auth_xform = &xform->auth;
769 
770 	sess->auth.digest_length = auth_xform->digest_length;
771 	if (auth_xform->op ==  RTE_CRYPTO_AUTH_OP_GENERATE)
772 		sess->auth.op = CCP_AUTH_OP_GENERATE;
773 	else
774 		sess->auth.op = CCP_AUTH_OP_VERIFY;
775 	switch (auth_xform->algo) {
776 	case RTE_CRYPTO_AUTH_MD5_HMAC:
777 		if (sess->auth_opt) {
778 			sess->auth.algo = CCP_AUTH_ALGO_MD5_HMAC;
779 			sess->auth.offset = ((CCP_SB_BYTES << 1) -
780 					     MD5_DIGEST_SIZE);
781 			sess->auth.key_length = auth_xform->key.length;
782 			sess->auth.block_size = MD5_BLOCK_SIZE;
783 			memset(sess->auth.key, 0, sess->auth.block_size);
784 			rte_memcpy(sess->auth.key, auth_xform->key.data,
785 				   auth_xform->key.length);
786 		} else
787 			return -1; /* HMAC MD5 not supported on CCP */
788 		break;
789 	case RTE_CRYPTO_AUTH_SHA1:
790 		sess->auth.engine = CCP_ENGINE_SHA;
791 		sess->auth.algo = CCP_AUTH_ALGO_SHA1;
792 		sess->auth.ut.sha_type = CCP_SHA_TYPE_1;
793 		sess->auth.ctx = (void *)ccp_sha1_init;
794 		sess->auth.ctx_len = CCP_SB_BYTES;
795 		sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
796 		rte_memcpy(sha_ctx, sess->auth.ctx, SHA_COMMON_DIGEST_SIZE);
797 		break;
798 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
799 		if (sess->auth_opt) {
800 			if (auth_xform->key.length > SHA1_BLOCK_SIZE)
801 				return -1;
802 			sess->auth.algo = CCP_AUTH_ALGO_SHA1_HMAC;
803 			sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
804 			sess->auth.block_size = SHA1_BLOCK_SIZE;
805 			sess->auth.key_length = auth_xform->key.length;
806 			memset(sess->auth.key, 0, sess->auth.block_size);
807 			rte_memcpy(sess->auth.key, auth_xform->key.data,
808 				   auth_xform->key.length);
809 		} else {
810 			if (auth_xform->key.length > SHA1_BLOCK_SIZE)
811 				return -1;
812 			sess->auth.engine = CCP_ENGINE_SHA;
813 			sess->auth.algo = CCP_AUTH_ALGO_SHA1_HMAC;
814 			sess->auth.ut.sha_type = CCP_SHA_TYPE_1;
815 			sess->auth.ctx_len = CCP_SB_BYTES;
816 			sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
817 			sess->auth.block_size = SHA1_BLOCK_SIZE;
818 			sess->auth.key_length = auth_xform->key.length;
819 			memset(sess->auth.key, 0, sess->auth.block_size);
820 			memset(sess->auth.pre_compute, 0,
821 			       sess->auth.ctx_len << 1);
822 			rte_memcpy(sess->auth.key, auth_xform->key.data,
823 				   auth_xform->key.length);
824 			if (generate_partial_hash(sess))
825 				return -1;
826 		}
827 		break;
828 	case RTE_CRYPTO_AUTH_SHA224:
829 		sess->auth.algo = CCP_AUTH_ALGO_SHA224;
830 		sess->auth.engine = CCP_ENGINE_SHA;
831 		sess->auth.ut.sha_type = CCP_SHA_TYPE_224;
832 		sess->auth.ctx = (void *)ccp_sha224_init;
833 		sess->auth.ctx_len = CCP_SB_BYTES;
834 		sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
835 		rte_memcpy(sha_ctx, sess->auth.ctx, SHA256_DIGEST_SIZE);
836 		break;
837 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
838 		if (sess->auth_opt) {
839 			if (auth_xform->key.length > SHA224_BLOCK_SIZE)
840 				return -1;
841 			sess->auth.algo = CCP_AUTH_ALGO_SHA224_HMAC;
842 			sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
843 			sess->auth.block_size = SHA224_BLOCK_SIZE;
844 			sess->auth.key_length = auth_xform->key.length;
845 			memset(sess->auth.key, 0, sess->auth.block_size);
846 			rte_memcpy(sess->auth.key, auth_xform->key.data,
847 				   auth_xform->key.length);
848 		} else {
849 			if (auth_xform->key.length > SHA224_BLOCK_SIZE)
850 				return -1;
851 			sess->auth.algo = CCP_AUTH_ALGO_SHA224_HMAC;
852 			sess->auth.engine = CCP_ENGINE_SHA;
853 			sess->auth.ut.sha_type = CCP_SHA_TYPE_224;
854 			sess->auth.ctx_len = CCP_SB_BYTES;
855 			sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
856 			sess->auth.block_size = SHA224_BLOCK_SIZE;
857 			sess->auth.key_length = auth_xform->key.length;
858 			memset(sess->auth.key, 0, sess->auth.block_size);
859 			memset(sess->auth.pre_compute, 0,
860 			       sess->auth.ctx_len << 1);
861 			rte_memcpy(sess->auth.key, auth_xform->key.data,
862 				   auth_xform->key.length);
863 			if (generate_partial_hash(sess))
864 				return -1;
865 		}
866 		break;
867 	case RTE_CRYPTO_AUTH_SHA3_224:
868 		sess->auth.algo = CCP_AUTH_ALGO_SHA3_224;
869 		sess->auth.engine = CCP_ENGINE_SHA;
870 		sess->auth.ut.sha_type = CCP_SHA3_TYPE_224;
871 		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
872 		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA224_DIGEST_SIZE;
873 		break;
874 	case RTE_CRYPTO_AUTH_SHA3_224_HMAC:
875 		if (auth_xform->key.length > SHA3_224_BLOCK_SIZE)
876 			return -1;
877 		sess->auth.algo = CCP_AUTH_ALGO_SHA3_224_HMAC;
878 		sess->auth.engine = CCP_ENGINE_SHA;
879 		sess->auth.ut.sha_type = CCP_SHA3_TYPE_224;
880 		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
881 		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA224_DIGEST_SIZE;
882 		sess->auth.block_size = SHA3_224_BLOCK_SIZE;
883 		sess->auth.key_length = auth_xform->key.length;
884 		memset(sess->auth.key, 0, sess->auth.block_size);
885 		memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
886 		rte_memcpy(sess->auth.key, auth_xform->key.data,
887 			   auth_xform->key.length);
888 		if (generate_partial_hash(sess))
889 			return -1;
890 		break;
891 	case RTE_CRYPTO_AUTH_SHA256:
892 		sess->auth.algo = CCP_AUTH_ALGO_SHA256;
893 		sess->auth.engine = CCP_ENGINE_SHA;
894 		sess->auth.ut.sha_type = CCP_SHA_TYPE_256;
895 		sess->auth.ctx = (void *)ccp_sha256_init;
896 		sess->auth.ctx_len = CCP_SB_BYTES;
897 		sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
898 		rte_memcpy(sha_ctx, sess->auth.ctx, SHA256_DIGEST_SIZE);
899 		break;
900 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
901 		if (sess->auth_opt) {
902 			if (auth_xform->key.length > SHA256_BLOCK_SIZE)
903 				return -1;
904 			sess->auth.algo = CCP_AUTH_ALGO_SHA256_HMAC;
905 			sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
906 			sess->auth.block_size = SHA256_BLOCK_SIZE;
907 			sess->auth.key_length = auth_xform->key.length;
908 			memset(sess->auth.key, 0, sess->auth.block_size);
909 			rte_memcpy(sess->auth.key, auth_xform->key.data,
910 				   auth_xform->key.length);
911 		} else {
912 			if (auth_xform->key.length > SHA256_BLOCK_SIZE)
913 				return -1;
914 			sess->auth.algo = CCP_AUTH_ALGO_SHA256_HMAC;
915 			sess->auth.engine = CCP_ENGINE_SHA;
916 			sess->auth.ut.sha_type = CCP_SHA_TYPE_256;
917 			sess->auth.ctx_len = CCP_SB_BYTES;
918 			sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
919 			sess->auth.block_size = SHA256_BLOCK_SIZE;
920 			sess->auth.key_length = auth_xform->key.length;
921 			memset(sess->auth.key, 0, sess->auth.block_size);
922 			memset(sess->auth.pre_compute, 0,
923 			       sess->auth.ctx_len << 1);
924 			rte_memcpy(sess->auth.key, auth_xform->key.data,
925 				   auth_xform->key.length);
926 			if (generate_partial_hash(sess))
927 				return -1;
928 		}
929 		break;
930 	case RTE_CRYPTO_AUTH_SHA3_256:
931 		sess->auth.algo = CCP_AUTH_ALGO_SHA3_256;
932 		sess->auth.engine = CCP_ENGINE_SHA;
933 		sess->auth.ut.sha_type = CCP_SHA3_TYPE_256;
934 		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
935 		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA256_DIGEST_SIZE;
936 		break;
937 	case RTE_CRYPTO_AUTH_SHA3_256_HMAC:
938 		if (auth_xform->key.length > SHA3_256_BLOCK_SIZE)
939 			return -1;
940 		sess->auth.algo = CCP_AUTH_ALGO_SHA3_256_HMAC;
941 		sess->auth.engine = CCP_ENGINE_SHA;
942 		sess->auth.ut.sha_type = CCP_SHA3_TYPE_256;
943 		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
944 		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA256_DIGEST_SIZE;
945 		sess->auth.block_size = SHA3_256_BLOCK_SIZE;
946 		sess->auth.key_length = auth_xform->key.length;
947 		memset(sess->auth.key, 0, sess->auth.block_size);
948 		memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
949 		rte_memcpy(sess->auth.key, auth_xform->key.data,
950 			   auth_xform->key.length);
951 		if (generate_partial_hash(sess))
952 			return -1;
953 		break;
954 	case RTE_CRYPTO_AUTH_SHA384:
955 		sess->auth.algo = CCP_AUTH_ALGO_SHA384;
956 		sess->auth.engine = CCP_ENGINE_SHA;
957 		sess->auth.ut.sha_type = CCP_SHA_TYPE_384;
958 		sess->auth.ctx = (void *)ccp_sha384_init;
959 		sess->auth.ctx_len = CCP_SB_BYTES << 1;
960 		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA384_DIGEST_SIZE;
961 		rte_memcpy(sha_ctx, sess->auth.ctx, SHA512_DIGEST_SIZE);
962 		break;
963 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
964 		if (sess->auth_opt) {
965 			if (auth_xform->key.length > SHA384_BLOCK_SIZE)
966 				return -1;
967 			sess->auth.algo = CCP_AUTH_ALGO_SHA384_HMAC;
968 			sess->auth.offset = ((CCP_SB_BYTES << 1) -
969 					     SHA384_DIGEST_SIZE);
970 			sess->auth.block_size = SHA384_BLOCK_SIZE;
971 			sess->auth.key_length = auth_xform->key.length;
972 			memset(sess->auth.key, 0, sess->auth.block_size);
973 			rte_memcpy(sess->auth.key, auth_xform->key.data,
974 				   auth_xform->key.length);
975 		} else {
976 			if (auth_xform->key.length > SHA384_BLOCK_SIZE)
977 				return -1;
978 			sess->auth.algo = CCP_AUTH_ALGO_SHA384_HMAC;
979 			sess->auth.engine = CCP_ENGINE_SHA;
980 			sess->auth.ut.sha_type = CCP_SHA_TYPE_384;
981 			sess->auth.ctx_len = CCP_SB_BYTES << 1;
982 			sess->auth.offset = ((CCP_SB_BYTES << 1) -
983 					     SHA384_DIGEST_SIZE);
984 			sess->auth.block_size = SHA384_BLOCK_SIZE;
985 			sess->auth.key_length = auth_xform->key.length;
986 			memset(sess->auth.key, 0, sess->auth.block_size);
987 			memset(sess->auth.pre_compute, 0,
988 			       sess->auth.ctx_len << 1);
989 			rte_memcpy(sess->auth.key, auth_xform->key.data,
990 				   auth_xform->key.length);
991 			if (generate_partial_hash(sess))
992 				return -1;
993 		}
994 		break;
995 	case RTE_CRYPTO_AUTH_SHA3_384:
996 		sess->auth.algo = CCP_AUTH_ALGO_SHA3_384;
997 		sess->auth.engine = CCP_ENGINE_SHA;
998 		sess->auth.ut.sha_type = CCP_SHA3_TYPE_384;
999 		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
1000 		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA384_DIGEST_SIZE;
1001 		break;
1002 	case RTE_CRYPTO_AUTH_SHA3_384_HMAC:
1003 		if (auth_xform->key.length > SHA3_384_BLOCK_SIZE)
1004 			return -1;
1005 		sess->auth.algo = CCP_AUTH_ALGO_SHA3_384_HMAC;
1006 		sess->auth.engine = CCP_ENGINE_SHA;
1007 		sess->auth.ut.sha_type = CCP_SHA3_TYPE_384;
1008 		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
1009 		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA384_DIGEST_SIZE;
1010 		sess->auth.block_size = SHA3_384_BLOCK_SIZE;
1011 		sess->auth.key_length = auth_xform->key.length;
1012 		memset(sess->auth.key, 0, sess->auth.block_size);
1013 		memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
1014 		rte_memcpy(sess->auth.key, auth_xform->key.data,
1015 			   auth_xform->key.length);
1016 		if (generate_partial_hash(sess))
1017 			return -1;
1018 		break;
1019 	case RTE_CRYPTO_AUTH_SHA512:
1020 		sess->auth.algo = CCP_AUTH_ALGO_SHA512;
1021 		sess->auth.engine = CCP_ENGINE_SHA;
1022 		sess->auth.ut.sha_type = CCP_SHA_TYPE_512;
1023 		sess->auth.ctx = (void *)ccp_sha512_init;
1024 		sess->auth.ctx_len = CCP_SB_BYTES << 1;
1025 		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA512_DIGEST_SIZE;
1026 		rte_memcpy(sha_ctx, sess->auth.ctx, SHA512_DIGEST_SIZE);
1027 		break;
1028 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
1029 		if (sess->auth_opt) {
1030 			if (auth_xform->key.length > SHA512_BLOCK_SIZE)
1031 				return -1;
1032 			sess->auth.algo = CCP_AUTH_ALGO_SHA512_HMAC;
1033 			sess->auth.offset = ((CCP_SB_BYTES << 1) -
1034 					     SHA512_DIGEST_SIZE);
1035 			sess->auth.block_size = SHA512_BLOCK_SIZE;
1036 			sess->auth.key_length = auth_xform->key.length;
1037 			memset(sess->auth.key, 0, sess->auth.block_size);
1038 			rte_memcpy(sess->auth.key, auth_xform->key.data,
1039 				   auth_xform->key.length);
1040 		} else {
1041 			if (auth_xform->key.length > SHA512_BLOCK_SIZE)
1042 				return -1;
1043 			sess->auth.algo = CCP_AUTH_ALGO_SHA512_HMAC;
1044 			sess->auth.engine = CCP_ENGINE_SHA;
1045 			sess->auth.ut.sha_type = CCP_SHA_TYPE_512;
1046 			sess->auth.ctx_len = CCP_SB_BYTES << 1;
1047 			sess->auth.offset = ((CCP_SB_BYTES << 1) -
1048 					     SHA512_DIGEST_SIZE);
1049 			sess->auth.block_size = SHA512_BLOCK_SIZE;
1050 			sess->auth.key_length = auth_xform->key.length;
1051 			memset(sess->auth.key, 0, sess->auth.block_size);
1052 			memset(sess->auth.pre_compute, 0,
1053 			       sess->auth.ctx_len << 1);
1054 			rte_memcpy(sess->auth.key, auth_xform->key.data,
1055 				   auth_xform->key.length);
1056 			if (generate_partial_hash(sess))
1057 				return -1;
1058 		}
1059 		break;
1060 	case RTE_CRYPTO_AUTH_SHA3_512:
1061 		sess->auth.algo = CCP_AUTH_ALGO_SHA3_512;
1062 		sess->auth.engine = CCP_ENGINE_SHA;
1063 		sess->auth.ut.sha_type = CCP_SHA3_TYPE_512;
1064 		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
1065 		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA512_DIGEST_SIZE;
1066 		break;
1067 	case RTE_CRYPTO_AUTH_SHA3_512_HMAC:
1068 		if (auth_xform->key.length > SHA3_512_BLOCK_SIZE)
1069 			return -1;
1070 		sess->auth.algo = CCP_AUTH_ALGO_SHA3_512_HMAC;
1071 		sess->auth.engine = CCP_ENGINE_SHA;
1072 		sess->auth.ut.sha_type = CCP_SHA3_TYPE_512;
1073 		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
1074 		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA512_DIGEST_SIZE;
1075 		sess->auth.block_size = SHA3_512_BLOCK_SIZE;
1076 		sess->auth.key_length = auth_xform->key.length;
1077 		memset(sess->auth.key, 0, sess->auth.block_size);
1078 		memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
1079 		rte_memcpy(sess->auth.key, auth_xform->key.data,
1080 			   auth_xform->key.length);
1081 		if (generate_partial_hash(sess))
1082 			return -1;
1083 		break;
1084 	case RTE_CRYPTO_AUTH_AES_CMAC:
1085 		sess->auth.algo = CCP_AUTH_ALGO_AES_CMAC;
1086 		sess->auth.engine = CCP_ENGINE_AES;
1087 		sess->auth.um.aes_mode = CCP_AES_MODE_CMAC;
1088 		sess->auth.key_length = auth_xform->key.length;
1089 		/* padding and hash result */
1090 		sess->auth.ctx_len = CCP_SB_BYTES << 1;
1091 		sess->auth.offset = AES_BLOCK_SIZE;
1092 		sess->auth.block_size = AES_BLOCK_SIZE;
1093 		if (sess->auth.key_length == 16)
1094 			sess->auth.ut.aes_type = CCP_AES_TYPE_128;
1095 		else if (sess->auth.key_length == 24)
1096 			sess->auth.ut.aes_type = CCP_AES_TYPE_192;
1097 		else if (sess->auth.key_length == 32)
1098 			sess->auth.ut.aes_type = CCP_AES_TYPE_256;
1099 		else {
1100 			CCP_LOG_ERR("Invalid CMAC key length");
1101 			return -1;
1102 		}
1103 		rte_memcpy(sess->auth.key, auth_xform->key.data,
1104 			   sess->auth.key_length);
1105 		for (i = 0; i < sess->auth.key_length; i++)
1106 			sess->auth.key_ccp[sess->auth.key_length - i - 1] =
1107 				sess->auth.key[i];
1108 		if (generate_cmac_subkeys(sess))
1109 			return -1;
1110 		break;
1111 	default:
1112 		CCP_LOG_ERR("Unsupported hash algo");
1113 		return -ENOTSUP;
1114 	}
1115 	return 0;
1116 }
1117 
1118 static int
1119 ccp_configure_session_aead(struct ccp_session *sess,
1120 			   const struct rte_crypto_sym_xform *xform)
1121 {
1122 	const struct rte_crypto_aead_xform *aead_xform = NULL;
1123 	size_t i;
1124 
1125 	aead_xform = &xform->aead;
1126 
1127 	sess->cipher.key_length = aead_xform->key.length;
1128 	rte_memcpy(sess->cipher.key, aead_xform->key.data,
1129 		   aead_xform->key.length);
1130 
1131 	if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
1132 		sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT;
1133 		sess->auth.op = CCP_AUTH_OP_GENERATE;
1134 	} else {
1135 		sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
1136 		sess->auth.op = CCP_AUTH_OP_VERIFY;
1137 	}
1138 	sess->aead_algo = aead_xform->algo;
1139 	sess->auth.aad_length = aead_xform->aad_length;
1140 	sess->auth.digest_length = aead_xform->digest_length;
1141 
1142 	/* set iv parameters */
1143 	sess->iv.offset = aead_xform->iv.offset;
1144 	sess->iv.length = aead_xform->iv.length;
1145 
1146 	switch (aead_xform->algo) {
1147 	case RTE_CRYPTO_AEAD_AES_GCM:
1148 		sess->cipher.algo = CCP_CIPHER_ALGO_AES_GCM;
1149 		sess->cipher.um.aes_mode = CCP_AES_MODE_GCTR;
1150 		sess->cipher.engine = CCP_ENGINE_AES;
1151 		if (sess->cipher.key_length == 16)
1152 			sess->cipher.ut.aes_type = CCP_AES_TYPE_128;
1153 		else if (sess->cipher.key_length == 24)
1154 			sess->cipher.ut.aes_type = CCP_AES_TYPE_192;
1155 		else if (sess->cipher.key_length == 32)
1156 			sess->cipher.ut.aes_type = CCP_AES_TYPE_256;
1157 		else {
1158 			CCP_LOG_ERR("Invalid aead key length");
1159 			return -1;
1160 		}
1161 		for (i = 0; i < sess->cipher.key_length; i++)
1162 			sess->cipher.key_ccp[sess->cipher.key_length - i - 1] =
1163 				sess->cipher.key[i];
1164 		sess->auth.algo = CCP_AUTH_ALGO_AES_GCM;
1165 		sess->auth.engine = CCP_ENGINE_AES;
1166 		sess->auth.um.aes_mode = CCP_AES_MODE_GHASH;
1167 		sess->auth.ctx_len = CCP_SB_BYTES;
1168 		sess->auth.offset = 0;
1169 		sess->auth.block_size = AES_BLOCK_SIZE;
1170 		sess->cmd_id = CCP_CMD_COMBINED;
1171 		break;
1172 	default:
1173 		CCP_LOG_ERR("Unsupported aead algo");
1174 		return -ENOTSUP;
1175 	}
1176 	if (iommu_mode == 2) {
1177 		sess->cipher.nonce_phys = rte_mem_virt2iova(sess->cipher.nonce);
1178 		sess->cipher.key_phys = rte_mem_virt2iova(sess->cipher.key_ccp);
1179 	} else {
1180 		sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
1181 		sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
1182 	}
1183 	return 0;
1184 }
1185 
1186 int
1187 ccp_set_session_parameters(struct ccp_session *sess,
1188 			   const struct rte_crypto_sym_xform *xform,
1189 			   struct ccp_private *internals)
1190 {
1191 	const struct rte_crypto_sym_xform *cipher_xform = NULL;
1192 	const struct rte_crypto_sym_xform *auth_xform = NULL;
1193 	const struct rte_crypto_sym_xform *aead_xform = NULL;
1194 	int ret = 0;
1195 
1196 	sess->auth_opt = internals->auth_opt;
1197 	sess->cmd_id = ccp_get_cmd_id(xform);
1198 
1199 	switch (sess->cmd_id) {
1200 	case CCP_CMD_CIPHER:
1201 		cipher_xform = xform;
1202 		break;
1203 	case CCP_CMD_AUTH:
1204 		auth_xform = xform;
1205 		break;
1206 	case CCP_CMD_CIPHER_HASH:
1207 		cipher_xform = xform;
1208 		auth_xform = xform->next;
1209 		break;
1210 	case CCP_CMD_HASH_CIPHER:
1211 		auth_xform = xform;
1212 		cipher_xform = xform->next;
1213 		break;
1214 	case CCP_CMD_COMBINED:
1215 		aead_xform = xform;
1216 		break;
1217 	default:
1218 		CCP_LOG_ERR("Unsupported cmd_id");
1219 		return -1;
1220 	}
1221 
1222 	/* Default IV length = 0 */
1223 	sess->iv.length = 0;
1224 	if (cipher_xform) {
1225 		ret = ccp_configure_session_cipher(sess, cipher_xform);
1226 		if (ret != 0) {
1227 			CCP_LOG_ERR("Invalid/unsupported cipher parameters");
1228 			return ret;
1229 		}
1230 	}
1231 	if (auth_xform) {
1232 		ret = ccp_configure_session_auth(sess, auth_xform);
1233 		if (ret != 0) {
1234 			CCP_LOG_ERR("Invalid/unsupported auth parameters");
1235 			return ret;
1236 		}
1237 	}
1238 	if (aead_xform) {
1239 		ret = ccp_configure_session_aead(sess, aead_xform);
1240 		if (ret != 0) {
1241 			CCP_LOG_ERR("Invalid/unsupported aead parameters");
1242 			return ret;
1243 		}
1244 	}
1245 	return ret;
1246 }
1247 
1248 /* calculate CCP descriptors requirement */
1249 static inline int
1250 ccp_cipher_slot(struct ccp_session *session)
1251 {
1252 	int count = 0;
1253 
1254 	switch (session->cipher.algo) {
1255 	case CCP_CIPHER_ALGO_AES_CBC:
1256 		count = 2;
1257 		/**< op + passthrough for iv */
1258 		break;
1259 	case CCP_CIPHER_ALGO_AES_ECB:
1260 		count = 1;
1261 		/**<only op*/
1262 		break;
1263 	case CCP_CIPHER_ALGO_AES_CTR:
1264 		count = 2;
1265 		/**< op + passthrough for iv */
1266 		break;
1267 	case CCP_CIPHER_ALGO_3DES_CBC:
1268 		count = 2;
1269 		/**< op + passthrough for iv */
1270 		break;
1271 	default:
1272 		CCP_LOG_ERR("Unsupported cipher algo %d",
1273 			    session->cipher.algo);
1274 	}
1275 	return count;
1276 }
1277 
1278 static inline int
1279 ccp_auth_slot(struct ccp_session *session)
1280 {
1281 	int count = 0;
1282 
1283 	switch (session->auth.algo) {
1284 	case CCP_AUTH_ALGO_SHA1:
1285 	case CCP_AUTH_ALGO_SHA224:
1286 	case CCP_AUTH_ALGO_SHA256:
1287 	case CCP_AUTH_ALGO_SHA384:
1288 	case CCP_AUTH_ALGO_SHA512:
1289 		count = 3;
1290 		/**< op + lsb passthrough cpy to/from*/
1291 		break;
1292 	case CCP_AUTH_ALGO_MD5_HMAC:
1293 		break;
1294 	case CCP_AUTH_ALGO_SHA1_HMAC:
1295 	case CCP_AUTH_ALGO_SHA224_HMAC:
1296 	case CCP_AUTH_ALGO_SHA256_HMAC:
1297 		if (session->auth_opt == 0)
1298 			count = 6;
1299 		break;
1300 	case CCP_AUTH_ALGO_SHA384_HMAC:
1301 	case CCP_AUTH_ALGO_SHA512_HMAC:
1302 		/**
1303 		 * 1. Load PHash1 = H(k ^ ipad); to LSB
1304 		 * 2. generate IHash = H(hash on message with PHash1
1305 		 * as init values);
1306 		 * 3. Retrieve IHash 2 slots for 384/512
1307 		 * 4. Load Phash2 = H(k ^ opad); to LSB
1308 		 * 5. generate FHash = H(hash on Ihash with Phash2
1309 		 * as init value);
1310 		 * 6. Retrieve HMAC output from LSB to host memory
1311 		 */
1312 		if (session->auth_opt == 0)
1313 			count = 7;
1314 		break;
1315 	case CCP_AUTH_ALGO_SHA3_224:
1316 	case CCP_AUTH_ALGO_SHA3_256:
1317 	case CCP_AUTH_ALGO_SHA3_384:
1318 	case CCP_AUTH_ALGO_SHA3_512:
1319 		count = 1;
1320 		/**< only op ctx and dst in host memory*/
1321 		break;
1322 	case CCP_AUTH_ALGO_SHA3_224_HMAC:
1323 	case CCP_AUTH_ALGO_SHA3_256_HMAC:
1324 		count = 3;
1325 		break;
1326 	case CCP_AUTH_ALGO_SHA3_384_HMAC:
1327 	case CCP_AUTH_ALGO_SHA3_512_HMAC:
1328 		count = 4;
1329 		/**
1330 		 * 1. Op to Perform Ihash
1331 		 * 2. Retrieve result from LSB to host memory
1332 		 * 3. Perform final hash
1333 		 */
1334 		break;
1335 	case CCP_AUTH_ALGO_AES_CMAC:
1336 		count = 4;
1337 		/**
1338 		 * op
1339 		 * extra descriptor in padding case
1340 		 * (k1/k2(255:128) with iv(127:0))
1341 		 * Retrieve result
1342 		 */
1343 		break;
1344 	default:
1345 		CCP_LOG_ERR("Unsupported auth algo %d",
1346 			    session->auth.algo);
1347 	}
1348 
1349 	return count;
1350 }
1351 
1352 static int
1353 ccp_aead_slot(struct ccp_session *session)
1354 {
1355 	int count = 0;
1356 
1357 	switch (session->aead_algo) {
1358 	case RTE_CRYPTO_AEAD_AES_GCM:
1359 		break;
1360 	default:
1361 		CCP_LOG_ERR("Unsupported aead algo %d",
1362 			    session->aead_algo);
1363 	}
1364 	switch (session->auth.algo) {
1365 	case CCP_AUTH_ALGO_AES_GCM:
1366 		count = 5;
1367 		/**
1368 		 * 1. Passthru iv
1369 		 * 2. Hash AAD
1370 		 * 3. GCTR
1371 		 * 4. Reload passthru
1372 		 * 5. Hash Final tag
1373 		 */
1374 		break;
1375 	default:
1376 		CCP_LOG_ERR("Unsupported combined auth ALGO %d",
1377 			    session->auth.algo);
1378 	}
1379 	return count;
1380 }
1381 
1382 int
1383 ccp_compute_slot_count(struct ccp_session *session)
1384 {
1385 	int count = 0;
1386 
1387 	switch (session->cmd_id) {
1388 	case CCP_CMD_CIPHER:
1389 		count = ccp_cipher_slot(session);
1390 		break;
1391 	case CCP_CMD_AUTH:
1392 		count = ccp_auth_slot(session);
1393 		break;
1394 	case CCP_CMD_CIPHER_HASH:
1395 	case CCP_CMD_HASH_CIPHER:
1396 		count = ccp_cipher_slot(session);
1397 		count += ccp_auth_slot(session);
1398 		break;
1399 	case CCP_CMD_COMBINED:
1400 		count = ccp_aead_slot(session);
1401 		break;
1402 	default:
1403 		CCP_LOG_ERR("Unsupported cmd_id");
1404 
1405 	}
1406 
1407 	return count;
1408 }
1409 
1410 static uint8_t
1411 algo_select(int sessalgo,
1412 	    const EVP_MD **algo)
1413 {
1414 	int res = 0;
1415 
1416 	switch (sessalgo) {
1417 	case CCP_AUTH_ALGO_MD5_HMAC:
1418 		*algo = EVP_md5();
1419 		break;
1420 	case CCP_AUTH_ALGO_SHA1_HMAC:
1421 		*algo = EVP_sha1();
1422 		break;
1423 	case CCP_AUTH_ALGO_SHA224_HMAC:
1424 		*algo = EVP_sha224();
1425 		break;
1426 	case CCP_AUTH_ALGO_SHA256_HMAC:
1427 		*algo = EVP_sha256();
1428 		break;
1429 	case CCP_AUTH_ALGO_SHA384_HMAC:
1430 		*algo = EVP_sha384();
1431 		break;
1432 	case CCP_AUTH_ALGO_SHA512_HMAC:
1433 		*algo = EVP_sha512();
1434 		break;
1435 	default:
1436 		res = -EINVAL;
1437 		break;
1438 	}
1439 	return res;
1440 }
1441 
1442 static int
1443 process_cpu_auth_hmac(uint8_t *src, uint8_t *dst,
1444 		      __rte_unused uint8_t *iv,
1445 		      EVP_PKEY *pkey,
1446 		      int srclen,
1447 		      EVP_MD_CTX *ctx,
1448 		      const EVP_MD *algo,
1449 		      uint16_t d_len)
1450 {
1451 	size_t dstlen;
1452 	unsigned char temp_dst[64];
1453 
1454 	if (EVP_DigestSignInit(ctx, NULL, algo, NULL, pkey) <= 0)
1455 		goto process_auth_err;
1456 
1457 	if (EVP_DigestSignUpdate(ctx, (char *)src, srclen) <= 0)
1458 		goto process_auth_err;
1459 
1460 	if (EVP_DigestSignFinal(ctx, temp_dst, &dstlen) <= 0)
1461 		goto process_auth_err;
1462 
1463 	memcpy(dst, temp_dst, d_len);
1464 	return 0;
1465 process_auth_err:
1466 	CCP_LOG_ERR("Process cpu auth failed");
1467 	return -EINVAL;
1468 }
1469 
1470 static int cpu_crypto_auth(struct ccp_qp *qp,
1471 			   struct rte_crypto_op *op,
1472 			   struct ccp_session *sess,
1473 			   EVP_MD_CTX *ctx)
1474 {
1475 	uint8_t *src, *dst;
1476 	int srclen, status;
1477 	struct rte_mbuf *mbuf_src, *mbuf_dst;
1478 	const EVP_MD *algo = NULL;
1479 	EVP_PKEY *pkey;
1480 
1481 	algo_select(sess->auth.algo, &algo);
1482 	pkey = EVP_PKEY_new_mac_key(EVP_PKEY_HMAC, NULL, sess->auth.key,
1483 				    sess->auth.key_length);
1484 	mbuf_src = op->sym->m_src;
1485 	mbuf_dst = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
1486 	srclen = op->sym->auth.data.length;
1487 	src = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
1488 				      op->sym->auth.data.offset);
1489 
1490 	if (sess->auth.op == CCP_AUTH_OP_VERIFY) {
1491 		dst = qp->temp_digest;
1492 	} else {
1493 		dst = op->sym->auth.digest.data;
1494 		if (dst == NULL) {
1495 			dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
1496 						     op->sym->auth.data.offset +
1497 						     sess->auth.digest_length);
1498 		}
1499 	}
1500 	status = process_cpu_auth_hmac(src, dst, NULL,
1501 				       pkey, srclen,
1502 				       ctx,
1503 				       algo,
1504 				       sess->auth.digest_length);
1505 	if (status) {
1506 		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
1507 		return status;
1508 	}
1509 
1510 	if (sess->auth.op == CCP_AUTH_OP_VERIFY) {
1511 		if (memcmp(dst, op->sym->auth.digest.data,
1512 			   sess->auth.digest_length) != 0) {
1513 			op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
1514 		} else {
1515 			op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1516 		}
1517 	} else {
1518 		op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1519 	}
1520 	EVP_PKEY_free(pkey);
1521 	return 0;
1522 }
1523 
1524 static void
1525 ccp_perform_passthru(struct ccp_passthru *pst,
1526 		     struct ccp_queue *cmd_q)
1527 {
1528 	struct ccp_desc *desc;
1529 	union ccp_function function;
1530 
1531 	desc = &cmd_q->qbase_desc[cmd_q->qidx];
1532 
1533 	CCP_CMD_ENGINE(desc) = CCP_ENGINE_PASSTHRU;
1534 
1535 	CCP_CMD_SOC(desc) = 0;
1536 	CCP_CMD_IOC(desc) = 0;
1537 	CCP_CMD_INIT(desc) = 0;
1538 	CCP_CMD_EOM(desc) = 0;
1539 	CCP_CMD_PROT(desc) = 0;
1540 
1541 	function.raw = 0;
1542 	CCP_PT_BYTESWAP(&function) = pst->byte_swap;
1543 	CCP_PT_BITWISE(&function) = pst->bit_mod;
1544 	CCP_CMD_FUNCTION(desc) = function.raw;
1545 
1546 	CCP_CMD_LEN(desc) = pst->len;
1547 
1548 	if (pst->dir) {
1549 		CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr);
1550 		CCP_CMD_SRC_HI(desc) = high32_value(pst->src_addr);
1551 		CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
1552 
1553 		CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr);
1554 		CCP_CMD_DST_HI(desc) = 0;
1555 		CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SB;
1556 
1557 		if (pst->bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
1558 			CCP_CMD_LSB_ID(desc) = cmd_q->sb_key;
1559 	} else {
1560 
1561 		CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr);
1562 		CCP_CMD_SRC_HI(desc) = 0;
1563 		CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SB;
1564 
1565 		CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr);
1566 		CCP_CMD_DST_HI(desc) = high32_value(pst->dest_addr);
1567 		CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
1568 	}
1569 
1570 	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
1571 }
1572 
1573 static int
1574 ccp_perform_hmac(struct rte_crypto_op *op,
1575 		 struct ccp_queue *cmd_q)
1576 {
1577 
1578 	struct ccp_session *session;
1579 	union ccp_function function;
1580 	struct ccp_desc *desc;
1581 	uint32_t tail;
1582 	phys_addr_t src_addr, dest_addr, dest_addr_t;
1583 	struct ccp_passthru pst;
1584 	uint64_t auth_msg_bits;
1585 	void *append_ptr;
1586 	uint8_t *addr;
1587 
1588 	session = (struct ccp_session *)op->sym->session->driver_priv_data;
1589 	addr = session->auth.pre_compute;
1590 
1591 	src_addr = rte_pktmbuf_iova_offset(op->sym->m_src,
1592 					      op->sym->auth.data.offset);
1593 	append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src,
1594 						session->auth.ctx_len);
1595 	if (iommu_mode == 2) {
1596 		dest_addr = (phys_addr_t)rte_mem_virt2iova(append_ptr);
1597 		pst.src_addr = (phys_addr_t)rte_mem_virt2iova((void *)addr);
1598 	} else {
1599 		dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
1600 		pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
1601 	}
1602 	dest_addr_t = dest_addr;
1603 
1604 	/** Load PHash1 to LSB*/
1605 	pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
1606 	pst.len = session->auth.ctx_len;
1607 	pst.dir = 1;
1608 	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
1609 	pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
1610 	ccp_perform_passthru(&pst, cmd_q);
1611 
1612 	/**sha engine command descriptor for IntermediateHash*/
1613 
1614 	desc = &cmd_q->qbase_desc[cmd_q->qidx];
1615 	memset(desc, 0, Q_DESC_SIZE);
1616 
1617 	CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
1618 
1619 	CCP_CMD_SOC(desc) = 0;
1620 	CCP_CMD_IOC(desc) = 0;
1621 	CCP_CMD_INIT(desc) = 1;
1622 	CCP_CMD_EOM(desc) = 1;
1623 	CCP_CMD_PROT(desc) = 0;
1624 
1625 	function.raw = 0;
1626 	CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
1627 	CCP_CMD_FUNCTION(desc) = function.raw;
1628 
1629 	CCP_CMD_LEN(desc) = op->sym->auth.data.length;
1630 	auth_msg_bits = (op->sym->auth.data.length +
1631 			 session->auth.block_size)  * 8;
1632 
1633 	CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
1634 	CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
1635 	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
1636 
1637 	CCP_CMD_LSB_ID(desc) = cmd_q->sb_sha;
1638 	CCP_CMD_SHA_LO(desc) = ((uint32_t)auth_msg_bits);
1639 	CCP_CMD_SHA_HI(desc) = high32_value(auth_msg_bits);
1640 
1641 	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
1642 
1643 	rte_wmb();
1644 
1645 	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
1646 	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
1647 	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
1648 		      cmd_q->qcontrol | CMD_Q_RUN);
1649 
1650 	/* Intermediate Hash value retrieve */
1651 	if ((session->auth.ut.sha_type == CCP_SHA_TYPE_384) ||
1652 	    (session->auth.ut.sha_type == CCP_SHA_TYPE_512)) {
1653 
1654 		pst.src_addr =
1655 			(phys_addr_t)((cmd_q->sb_sha + 1) * CCP_SB_BYTES);
1656 		pst.dest_addr = dest_addr_t;
1657 		pst.len = CCP_SB_BYTES;
1658 		pst.dir = 0;
1659 		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
1660 		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
1661 		ccp_perform_passthru(&pst, cmd_q);
1662 
1663 		pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
1664 		pst.dest_addr = dest_addr_t + CCP_SB_BYTES;
1665 		pst.len = CCP_SB_BYTES;
1666 		pst.dir = 0;
1667 		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
1668 		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
1669 		ccp_perform_passthru(&pst, cmd_q);
1670 
1671 	} else {
1672 		pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
1673 		pst.dest_addr = dest_addr_t;
1674 		pst.len = session->auth.ctx_len;
1675 		pst.dir = 0;
1676 		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
1677 		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
1678 		ccp_perform_passthru(&pst, cmd_q);
1679 
1680 	}
1681 
1682 	/** Load PHash2 to LSB*/
1683 	addr += session->auth.ctx_len;
1684 	if (iommu_mode == 2)
1685 		pst.src_addr = (phys_addr_t)rte_mem_virt2iova((void *)addr);
1686 	else
1687 		pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
1688 	pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
1689 	pst.len = session->auth.ctx_len;
1690 	pst.dir = 1;
1691 	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
1692 	pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
1693 	ccp_perform_passthru(&pst, cmd_q);
1694 
1695 	/**sha engine command descriptor for FinalHash*/
1696 	dest_addr_t += session->auth.offset;
1697 
1698 	desc = &cmd_q->qbase_desc[cmd_q->qidx];
1699 	memset(desc, 0, Q_DESC_SIZE);
1700 
1701 	CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
1702 
1703 	CCP_CMD_SOC(desc) = 0;
1704 	CCP_CMD_IOC(desc) = 0;
1705 	CCP_CMD_INIT(desc) = 1;
1706 	CCP_CMD_EOM(desc) = 1;
1707 	CCP_CMD_PROT(desc) = 0;
1708 
1709 	function.raw = 0;
1710 	CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
1711 	CCP_CMD_FUNCTION(desc) = function.raw;
1712 
1713 	CCP_CMD_LEN(desc) = (session->auth.ctx_len -
1714 			     session->auth.offset);
1715 	auth_msg_bits = (session->auth.block_size +
1716 			 session->auth.ctx_len -
1717 			 session->auth.offset) * 8;
1718 
1719 	CCP_CMD_SRC_LO(desc) = (uint32_t)(dest_addr_t);
1720 	CCP_CMD_SRC_HI(desc) = high32_value(dest_addr_t);
1721 	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
1722 
1723 	CCP_CMD_LSB_ID(desc) = cmd_q->sb_sha;
1724 	CCP_CMD_SHA_LO(desc) = ((uint32_t)auth_msg_bits);
1725 	CCP_CMD_SHA_HI(desc) = high32_value(auth_msg_bits);
1726 
1727 	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
1728 
1729 	rte_wmb();
1730 
1731 	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
1732 	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
1733 	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
1734 		      cmd_q->qcontrol | CMD_Q_RUN);
1735 
1736 	/* Retrieve hmac output */
1737 	pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
1738 	pst.dest_addr = dest_addr;
1739 	pst.len = session->auth.ctx_len;
1740 	pst.dir = 0;
1741 	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
1742 	if ((session->auth.ut.sha_type == CCP_SHA_TYPE_384) ||
1743 	    (session->auth.ut.sha_type == CCP_SHA_TYPE_512))
1744 		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
1745 	else
1746 		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
1747 	ccp_perform_passthru(&pst, cmd_q);
1748 
1749 	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
1750 	return 0;
1751 
1752 }
1753 
1754 static int
1755 ccp_perform_sha(struct rte_crypto_op *op,
1756 		struct ccp_queue *cmd_q)
1757 {
1758 	struct ccp_session *session;
1759 	union ccp_function function;
1760 	struct ccp_desc *desc;
1761 	uint32_t tail;
1762 	phys_addr_t src_addr, dest_addr;
1763 	struct ccp_passthru pst;
1764 	void *append_ptr;
1765 	uint64_t auth_msg_bits;
1766 
1767 	session = (struct ccp_session *)op->sym->session->driver_priv_data;
1768 
1769 	src_addr = rte_pktmbuf_iova_offset(op->sym->m_src,
1770 					      op->sym->auth.data.offset);
1771 	append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src,
1772 						session->auth.ctx_len);
1773 	if (iommu_mode == 2) {
1774 		dest_addr = (phys_addr_t)rte_mem_virt2iova(append_ptr);
1775 		pst.src_addr = (phys_addr_t)sha_ctx;
1776 	} else {
1777 		dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
1778 		pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)
1779 						     session->auth.ctx);
1780 	}
1781 
1782 	/** Passthru sha context*/
1783 
1784 	pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
1785 	pst.len = session->auth.ctx_len;
1786 	pst.dir = 1;
1787 	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
1788 	pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
1789 	ccp_perform_passthru(&pst, cmd_q);
1790 
1791 	/**prepare sha command descriptor*/
1792 
1793 	desc = &cmd_q->qbase_desc[cmd_q->qidx];
1794 	memset(desc, 0, Q_DESC_SIZE);
1795 
1796 	CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
1797 
1798 	CCP_CMD_SOC(desc) = 0;
1799 	CCP_CMD_IOC(desc) = 0;
1800 	CCP_CMD_INIT(desc) = 1;
1801 	CCP_CMD_EOM(desc) = 1;
1802 	CCP_CMD_PROT(desc) = 0;
1803 
1804 	function.raw = 0;
1805 	CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
1806 	CCP_CMD_FUNCTION(desc) = function.raw;
1807 
1808 	CCP_CMD_LEN(desc) = op->sym->auth.data.length;
1809 	auth_msg_bits = op->sym->auth.data.length * 8;
1810 
1811 	CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
1812 	CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
1813 	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
1814 
1815 	CCP_CMD_LSB_ID(desc) = cmd_q->sb_sha;
1816 	CCP_CMD_SHA_LO(desc) = ((uint32_t)auth_msg_bits);
1817 	CCP_CMD_SHA_HI(desc) = high32_value(auth_msg_bits);
1818 
1819 	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
1820 
1821 	rte_wmb();
1822 
1823 	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
1824 	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
1825 	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
1826 		      cmd_q->qcontrol | CMD_Q_RUN);
1827 
1828 	/* Hash value retrieve */
1829 	pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
1830 	pst.dest_addr = dest_addr;
1831 	pst.len = session->auth.ctx_len;
1832 	pst.dir = 0;
1833 	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
1834 	if ((session->auth.ut.sha_type == CCP_SHA_TYPE_384) ||
1835 	    (session->auth.ut.sha_type == CCP_SHA_TYPE_512))
1836 		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
1837 	else
1838 		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
1839 	ccp_perform_passthru(&pst, cmd_q);
1840 
1841 	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
1842 	return 0;
1843 
1844 }
1845 
1846 static int
1847 ccp_perform_sha3_hmac(struct rte_crypto_op *op,
1848 		      struct ccp_queue *cmd_q)
1849 {
1850 	struct ccp_session *session;
1851 	struct ccp_passthru pst;
1852 	union ccp_function function;
1853 	struct ccp_desc *desc;
1854 	uint8_t *append_ptr;
1855 	uint32_t tail;
1856 	phys_addr_t src_addr, dest_addr, ctx_paddr, dest_addr_t;
1857 
1858 	session = (struct ccp_session *)op->sym->session->driver_priv_data;
1859 
1860 	src_addr = rte_pktmbuf_iova_offset(op->sym->m_src,
1861 					      op->sym->auth.data.offset);
1862 	append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
1863 						session->auth.ctx_len);
1864 	if (!append_ptr) {
1865 		CCP_LOG_ERR("CCP MBUF append failed\n");
1866 		return -1;
1867 	}
1868 	if (iommu_mode == 2) {
1869 		dest_addr = (phys_addr_t)rte_mem_virt2iova((void *)append_ptr);
1870 		ctx_paddr = (phys_addr_t)rte_mem_virt2iova(
1871 					session->auth.pre_compute);
1872 	} else {
1873 		dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
1874 		ctx_paddr = (phys_addr_t)rte_mem_virt2phy(
1875 					session->auth.pre_compute);
1876 	}
1877 	dest_addr_t = dest_addr + (session->auth.ctx_len / 2);
1878 	desc = &cmd_q->qbase_desc[cmd_q->qidx];
1879 	memset(desc, 0, Q_DESC_SIZE);
1880 
1881 	/*desc1 for SHA3-Ihash operation */
1882 	CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
1883 	CCP_CMD_INIT(desc) = 1;
1884 	CCP_CMD_EOM(desc) = 1;
1885 
1886 	function.raw = 0;
1887 	CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
1888 	CCP_CMD_FUNCTION(desc) = function.raw;
1889 	CCP_CMD_LEN(desc) = op->sym->auth.data.length;
1890 
1891 	CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
1892 	CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
1893 	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
1894 
1895 	CCP_CMD_DST_LO(desc) = (cmd_q->sb_sha * CCP_SB_BYTES);
1896 	CCP_CMD_DST_HI(desc) = 0;
1897 	CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SB;
1898 
1899 	CCP_CMD_KEY_LO(desc) = ((uint32_t)ctx_paddr);
1900 	CCP_CMD_KEY_HI(desc) = high32_value(ctx_paddr);
1901 	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
1902 
1903 	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
1904 
1905 	rte_wmb();
1906 	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
1907 	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
1908 	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
1909 		      cmd_q->qcontrol | CMD_Q_RUN);
1910 
1911 	/* Intermediate Hash value retrieve */
1912 	if ((session->auth.ut.sha_type == CCP_SHA3_TYPE_384) ||
1913 	    (session->auth.ut.sha_type == CCP_SHA3_TYPE_512)) {
1914 
1915 		pst.src_addr =
1916 			(phys_addr_t)((cmd_q->sb_sha + 1) * CCP_SB_BYTES);
1917 		pst.dest_addr = dest_addr_t;
1918 		pst.len = CCP_SB_BYTES;
1919 		pst.dir = 0;
1920 		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
1921 		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
1922 		ccp_perform_passthru(&pst, cmd_q);
1923 
1924 		pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
1925 		pst.dest_addr = dest_addr_t + CCP_SB_BYTES;
1926 		pst.len = CCP_SB_BYTES;
1927 		pst.dir = 0;
1928 		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
1929 		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
1930 		ccp_perform_passthru(&pst, cmd_q);
1931 
1932 	} else {
1933 		pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
1934 		pst.dest_addr = dest_addr_t;
1935 		pst.len = CCP_SB_BYTES;
1936 		pst.dir = 0;
1937 		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
1938 		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
1939 		ccp_perform_passthru(&pst, cmd_q);
1940 	}
1941 
1942 	/**sha engine command descriptor for FinalHash*/
1943 	ctx_paddr += CCP_SHA3_CTX_SIZE;
1944 	desc = &cmd_q->qbase_desc[cmd_q->qidx];
1945 	memset(desc, 0, Q_DESC_SIZE);
1946 
1947 	CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
1948 	CCP_CMD_INIT(desc) = 1;
1949 	CCP_CMD_EOM(desc) = 1;
1950 
1951 	function.raw = 0;
1952 	CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
1953 	CCP_CMD_FUNCTION(desc) = function.raw;
1954 
1955 	if (session->auth.ut.sha_type == CCP_SHA3_TYPE_224) {
1956 		dest_addr_t += (CCP_SB_BYTES - SHA224_DIGEST_SIZE);
1957 		CCP_CMD_LEN(desc) = SHA224_DIGEST_SIZE;
1958 	} else if (session->auth.ut.sha_type == CCP_SHA3_TYPE_256) {
1959 		CCP_CMD_LEN(desc) = SHA256_DIGEST_SIZE;
1960 	} else if (session->auth.ut.sha_type == CCP_SHA3_TYPE_384) {
1961 		dest_addr_t += (2 * CCP_SB_BYTES - SHA384_DIGEST_SIZE);
1962 		CCP_CMD_LEN(desc) = SHA384_DIGEST_SIZE;
1963 	} else {
1964 		CCP_CMD_LEN(desc) = SHA512_DIGEST_SIZE;
1965 	}
1966 
1967 	CCP_CMD_SRC_LO(desc) = ((uint32_t)dest_addr_t);
1968 	CCP_CMD_SRC_HI(desc) = high32_value(dest_addr_t);
1969 	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
1970 
1971 	CCP_CMD_DST_LO(desc) = (uint32_t)dest_addr;
1972 	CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
1973 	CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
1974 
1975 	CCP_CMD_KEY_LO(desc) = ((uint32_t)ctx_paddr);
1976 	CCP_CMD_KEY_HI(desc) = high32_value(ctx_paddr);
1977 	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
1978 
1979 	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
1980 
1981 	rte_wmb();
1982 	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
1983 	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
1984 	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
1985 		      cmd_q->qcontrol | CMD_Q_RUN);
1986 
1987 	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
1988 	return 0;
1989 }
1990 
1991 static int
1992 ccp_perform_sha3(struct rte_crypto_op *op,
1993 		 struct ccp_queue *cmd_q)
1994 {
1995 	struct ccp_session *session;
1996 	union ccp_function function;
1997 	struct ccp_desc *desc;
1998 	uint8_t *ctx_addr = NULL, *append_ptr = NULL;
1999 	uint32_t tail;
2000 	phys_addr_t src_addr, dest_addr, ctx_paddr;
2001 
2002 	session = (struct ccp_session *)op->sym->session->driver_priv_data;
2003 
2004 	src_addr = rte_pktmbuf_iova_offset(op->sym->m_src,
2005 					      op->sym->auth.data.offset);
2006 	append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
2007 						session->auth.ctx_len);
2008 	if (!append_ptr) {
2009 		CCP_LOG_ERR("CCP MBUF append failed\n");
2010 		return -1;
2011 	}
2012 	if (iommu_mode == 2) {
2013 		dest_addr = (phys_addr_t)rte_mem_virt2iova((void *)append_ptr);
2014 		ctx_paddr = (phys_addr_t)rte_mem_virt2iova((void *)ctx_addr);
2015 	} else {
2016 		dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
2017 		ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
2018 	}
2019 
2020 	ctx_addr = session->auth.sha3_ctx;
2021 
2022 	desc = &cmd_q->qbase_desc[cmd_q->qidx];
2023 	memset(desc, 0, Q_DESC_SIZE);
2024 
2025 	/* prepare desc for SHA3 operation */
2026 	CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
2027 	CCP_CMD_INIT(desc) = 1;
2028 	CCP_CMD_EOM(desc) = 1;
2029 
2030 	function.raw = 0;
2031 	CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
2032 	CCP_CMD_FUNCTION(desc) = function.raw;
2033 
2034 	CCP_CMD_LEN(desc) = op->sym->auth.data.length;
2035 
2036 	CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
2037 	CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
2038 	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
2039 
2040 	CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
2041 	CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
2042 	CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
2043 
2044 	CCP_CMD_KEY_LO(desc) = ((uint32_t)ctx_paddr);
2045 	CCP_CMD_KEY_HI(desc) = high32_value(ctx_paddr);
2046 	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
2047 
2048 	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
2049 
2050 	rte_wmb();
2051 
2052 	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
2053 	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
2054 	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
2055 		      cmd_q->qcontrol | CMD_Q_RUN);
2056 
2057 	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
2058 	return 0;
2059 }
2060 
2061 static int
2062 ccp_perform_aes_cmac(struct rte_crypto_op *op,
2063 		     struct ccp_queue *cmd_q)
2064 {
2065 	struct ccp_session *session;
2066 	union ccp_function function;
2067 	struct ccp_passthru pst;
2068 	struct ccp_desc *desc;
2069 	uint32_t tail;
2070 	uint8_t *src_tb, *append_ptr, *ctx_addr;
2071 	phys_addr_t src_addr, dest_addr, key_addr;
2072 	int length, non_align_len;
2073 
2074 	session = (struct ccp_session *)op->sym->session->driver_priv_data;
2075 	key_addr = rte_mem_virt2phy(session->auth.key_ccp);
2076 
2077 	src_addr = rte_pktmbuf_iova_offset(op->sym->m_src,
2078 					      op->sym->auth.data.offset);
2079 	append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
2080 						session->auth.ctx_len);
2081 	dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
2082 
2083 	function.raw = 0;
2084 	CCP_AES_ENCRYPT(&function) = CCP_CIPHER_DIR_ENCRYPT;
2085 	CCP_AES_MODE(&function) = session->auth.um.aes_mode;
2086 	CCP_AES_TYPE(&function) = session->auth.ut.aes_type;
2087 
2088 	if (op->sym->auth.data.length % session->auth.block_size == 0) {
2089 
2090 		ctx_addr = session->auth.pre_compute;
2091 		memset(ctx_addr, 0, AES_BLOCK_SIZE);
2092 		if (iommu_mode == 2)
2093 			pst.src_addr = (phys_addr_t)rte_mem_virt2iova(
2094 							(void *)ctx_addr);
2095 		else
2096 			pst.src_addr = (phys_addr_t)rte_mem_virt2phy(
2097 							(void *)ctx_addr);
2098 
2099 		pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
2100 		pst.len = CCP_SB_BYTES;
2101 		pst.dir = 1;
2102 		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
2103 		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
2104 		ccp_perform_passthru(&pst, cmd_q);
2105 
2106 		desc = &cmd_q->qbase_desc[cmd_q->qidx];
2107 		memset(desc, 0, Q_DESC_SIZE);
2108 
2109 		/* prepare desc for aes-cmac command */
2110 		CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
2111 		CCP_CMD_EOM(desc) = 1;
2112 		CCP_CMD_FUNCTION(desc) = function.raw;
2113 
2114 		CCP_CMD_LEN(desc) = op->sym->auth.data.length;
2115 		CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
2116 		CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
2117 		CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
2118 
2119 		CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
2120 		CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
2121 		CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
2122 		CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
2123 
2124 		cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
2125 
2126 		rte_wmb();
2127 
2128 		tail =
2129 		(uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
2130 		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
2131 		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
2132 			      cmd_q->qcontrol | CMD_Q_RUN);
2133 	} else {
2134 		ctx_addr = session->auth.pre_compute + CCP_SB_BYTES;
2135 		memset(ctx_addr, 0, AES_BLOCK_SIZE);
2136 		if (iommu_mode == 2)
2137 			pst.src_addr = (phys_addr_t)rte_mem_virt2iova(
2138 							(void *)ctx_addr);
2139 		else
2140 			pst.src_addr = (phys_addr_t)rte_mem_virt2phy(
2141 							(void *)ctx_addr);
2142 		pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
2143 		pst.len = CCP_SB_BYTES;
2144 		pst.dir = 1;
2145 		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
2146 		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
2147 		ccp_perform_passthru(&pst, cmd_q);
2148 
2149 		length = (op->sym->auth.data.length / AES_BLOCK_SIZE);
2150 		length *= AES_BLOCK_SIZE;
2151 		non_align_len = op->sym->auth.data.length - length;
2152 		/* prepare desc for aes-cmac command */
2153 		/*Command 1*/
2154 		desc = &cmd_q->qbase_desc[cmd_q->qidx];
2155 		memset(desc, 0, Q_DESC_SIZE);
2156 
2157 		CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
2158 		CCP_CMD_INIT(desc) = 1;
2159 		CCP_CMD_FUNCTION(desc) = function.raw;
2160 
2161 		CCP_CMD_LEN(desc) = length;
2162 		CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
2163 		CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
2164 		CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
2165 
2166 		CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
2167 		CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
2168 		CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
2169 		CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
2170 
2171 		cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
2172 
2173 		/*Command 2*/
2174 		append_ptr = append_ptr + CCP_SB_BYTES;
2175 		memset(append_ptr, 0, AES_BLOCK_SIZE);
2176 		src_tb = rte_pktmbuf_mtod_offset(op->sym->m_src,
2177 						 uint8_t *,
2178 						 op->sym->auth.data.offset +
2179 						 length);
2180 		rte_memcpy(append_ptr, src_tb, non_align_len);
2181 		append_ptr[non_align_len] = CMAC_PAD_VALUE;
2182 
2183 		desc = &cmd_q->qbase_desc[cmd_q->qidx];
2184 		memset(desc, 0, Q_DESC_SIZE);
2185 
2186 		CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
2187 		CCP_CMD_EOM(desc) = 1;
2188 		CCP_CMD_FUNCTION(desc) = function.raw;
2189 		CCP_CMD_LEN(desc) = AES_BLOCK_SIZE;
2190 
2191 		CCP_CMD_SRC_LO(desc) = ((uint32_t)(dest_addr + CCP_SB_BYTES));
2192 		CCP_CMD_SRC_HI(desc) = high32_value(dest_addr + CCP_SB_BYTES);
2193 		CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
2194 
2195 		CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
2196 		CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
2197 		CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
2198 		CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
2199 
2200 		cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
2201 
2202 		rte_wmb();
2203 		tail =
2204 		(uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
2205 		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
2206 		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
2207 			      cmd_q->qcontrol | CMD_Q_RUN);
2208 	}
2209 	/* Retrieve result */
2210 	pst.dest_addr = dest_addr;
2211 	pst.src_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
2212 	pst.len = CCP_SB_BYTES;
2213 	pst.dir = 0;
2214 	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
2215 	pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
2216 	ccp_perform_passthru(&pst, cmd_q);
2217 
2218 	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
2219 	return 0;
2220 }
2221 
2222 static int
2223 ccp_perform_aes(struct rte_crypto_op *op,
2224 		struct ccp_queue *cmd_q,
2225 		struct ccp_batch_info *b_info)
2226 {
2227 	struct ccp_session *session;
2228 	union ccp_function function;
2229 	uint8_t *lsb_buf;
2230 	struct ccp_passthru pst = {0};
2231 	struct ccp_desc *desc;
2232 	phys_addr_t src_addr, dest_addr, key_addr;
2233 	uint8_t *iv;
2234 
2235 	session = (struct ccp_session *)op->sym->session->driver_priv_data;
2236 	function.raw = 0;
2237 
2238 	iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
2239 	if (session->cipher.um.aes_mode != CCP_AES_MODE_ECB) {
2240 		if (session->cipher.um.aes_mode == CCP_AES_MODE_CTR) {
2241 			rte_memcpy(session->cipher.nonce + AES_BLOCK_SIZE,
2242 				   iv, session->iv.length);
2243 			pst.src_addr = (phys_addr_t)session->cipher.nonce_phys;
2244 			CCP_AES_SIZE(&function) = 0x1F;
2245 		} else {
2246 			lsb_buf =
2247 			&(b_info->lsb_buf[b_info->lsb_buf_idx*CCP_SB_BYTES]);
2248 			rte_memcpy(lsb_buf +
2249 				   (CCP_SB_BYTES - session->iv.length),
2250 				   iv, session->iv.length);
2251 			pst.src_addr = b_info->lsb_buf_phys +
2252 				(b_info->lsb_buf_idx * CCP_SB_BYTES);
2253 			b_info->lsb_buf_idx++;
2254 		}
2255 
2256 		pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
2257 		pst.len = CCP_SB_BYTES;
2258 		pst.dir = 1;
2259 		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
2260 		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
2261 		ccp_perform_passthru(&pst, cmd_q);
2262 	}
2263 
2264 	desc = &cmd_q->qbase_desc[cmd_q->qidx];
2265 
2266 	src_addr = rte_pktmbuf_iova_offset(op->sym->m_src,
2267 					      op->sym->cipher.data.offset);
2268 	if (likely(op->sym->m_dst != NULL))
2269 		dest_addr = rte_pktmbuf_iova_offset(op->sym->m_dst,
2270 						op->sym->cipher.data.offset);
2271 	else
2272 		dest_addr = src_addr;
2273 	key_addr = session->cipher.key_phys;
2274 
2275 	/* prepare desc for aes command */
2276 	CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
2277 	CCP_CMD_INIT(desc) = 1;
2278 	CCP_CMD_EOM(desc) = 1;
2279 
2280 	CCP_AES_ENCRYPT(&function) = session->cipher.dir;
2281 	CCP_AES_MODE(&function) = session->cipher.um.aes_mode;
2282 	CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
2283 	CCP_CMD_FUNCTION(desc) = function.raw;
2284 
2285 	CCP_CMD_LEN(desc) = op->sym->cipher.data.length;
2286 
2287 	CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
2288 	CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
2289 	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
2290 
2291 	CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
2292 	CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
2293 	CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
2294 
2295 	CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
2296 	CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
2297 	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
2298 
2299 	if (session->cipher.um.aes_mode != CCP_AES_MODE_ECB)
2300 		CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
2301 
2302 	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
2303 	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
2304 	return 0;
2305 }
2306 
2307 static int
2308 ccp_perform_3des(struct rte_crypto_op *op,
2309 		struct ccp_queue *cmd_q,
2310 		struct ccp_batch_info *b_info)
2311 {
2312 	struct ccp_session *session;
2313 	union ccp_function function;
2314 	unsigned char *lsb_buf;
2315 	struct ccp_passthru pst;
2316 	struct ccp_desc *desc;
2317 	uint32_t tail;
2318 	uint8_t *iv;
2319 	phys_addr_t src_addr, dest_addr, key_addr;
2320 
2321 	session = (struct ccp_session *)op->sym->session->driver_priv_data;
2322 
2323 	iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
2324 	switch (session->cipher.um.des_mode) {
2325 	case CCP_DES_MODE_CBC:
2326 		lsb_buf = &(b_info->lsb_buf[b_info->lsb_buf_idx*CCP_SB_BYTES]);
2327 		b_info->lsb_buf_idx++;
2328 
2329 		rte_memcpy(lsb_buf + (CCP_SB_BYTES - session->iv.length),
2330 			   iv, session->iv.length);
2331 		if (iommu_mode == 2)
2332 			pst.src_addr = (phys_addr_t)rte_mem_virt2iova(
2333 							(void *) lsb_buf);
2334 		else
2335 			pst.src_addr = (phys_addr_t)rte_mem_virt2phy(
2336 							(void *) lsb_buf);
2337 		pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
2338 		pst.len = CCP_SB_BYTES;
2339 		pst.dir = 1;
2340 		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
2341 		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
2342 		ccp_perform_passthru(&pst, cmd_q);
2343 		break;
2344 	case CCP_DES_MODE_CFB:
2345 	case CCP_DES_MODE_ECB:
2346 		CCP_LOG_ERR("Unsupported DES cipher mode");
2347 		return -ENOTSUP;
2348 	}
2349 
2350 	src_addr = rte_pktmbuf_iova_offset(op->sym->m_src,
2351 					      op->sym->cipher.data.offset);
2352 	if (unlikely(op->sym->m_dst != NULL))
2353 		dest_addr =
2354 			rte_pktmbuf_iova_offset(op->sym->m_dst,
2355 						   op->sym->cipher.data.offset);
2356 	else
2357 		dest_addr = src_addr;
2358 
2359 	if (iommu_mode == 2)
2360 		key_addr = rte_mem_virt2iova(session->cipher.key_ccp);
2361 	else
2362 		key_addr = rte_mem_virt2phy(session->cipher.key_ccp);
2363 
2364 	desc = &cmd_q->qbase_desc[cmd_q->qidx];
2365 
2366 	memset(desc, 0, Q_DESC_SIZE);
2367 
2368 	/* prepare desc for des command */
2369 	CCP_CMD_ENGINE(desc) = CCP_ENGINE_3DES;
2370 
2371 	CCP_CMD_SOC(desc) = 0;
2372 	CCP_CMD_IOC(desc) = 0;
2373 	CCP_CMD_INIT(desc) = 1;
2374 	CCP_CMD_EOM(desc) = 1;
2375 	CCP_CMD_PROT(desc) = 0;
2376 
2377 	function.raw = 0;
2378 	CCP_DES_ENCRYPT(&function) = session->cipher.dir;
2379 	CCP_DES_MODE(&function) = session->cipher.um.des_mode;
2380 	CCP_DES_TYPE(&function) = session->cipher.ut.des_type;
2381 	CCP_CMD_FUNCTION(desc) = function.raw;
2382 
2383 	CCP_CMD_LEN(desc) = op->sym->cipher.data.length;
2384 
2385 	CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
2386 	CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
2387 	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
2388 
2389 	CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
2390 	CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
2391 	CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
2392 
2393 	CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
2394 	CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
2395 	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
2396 
2397 	if (session->cipher.um.des_mode)
2398 		CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
2399 
2400 	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
2401 
2402 	rte_wmb();
2403 
2404 	/* Write the new tail address back to the queue register */
2405 	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
2406 	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
2407 	/* Turn the queue back on using our cached control register */
2408 	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
2409 		      cmd_q->qcontrol | CMD_Q_RUN);
2410 
2411 	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
2412 	return 0;
2413 }
2414 
2415 static int
2416 ccp_perform_aes_gcm(struct rte_crypto_op *op, struct ccp_queue *cmd_q)
2417 {
2418 	struct ccp_session *session;
2419 	union ccp_function function;
2420 	uint8_t *iv;
2421 	struct ccp_passthru pst;
2422 	struct ccp_desc *desc;
2423 	uint32_t tail;
2424 	uint64_t *temp;
2425 	phys_addr_t src_addr, dest_addr, key_addr, aad_addr;
2426 	phys_addr_t digest_dest_addr;
2427 	int length, non_align_len;
2428 
2429 	session = (struct ccp_session *)op->sym->session->driver_priv_data;
2430 	iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
2431 	key_addr = session->cipher.key_phys;
2432 
2433 	src_addr = rte_pktmbuf_iova_offset(op->sym->m_src,
2434 					      op->sym->aead.data.offset);
2435 	if (unlikely(op->sym->m_dst != NULL))
2436 		dest_addr = rte_pktmbuf_iova_offset(op->sym->m_dst,
2437 						op->sym->aead.data.offset);
2438 	else
2439 		dest_addr = src_addr;
2440 	rte_pktmbuf_append(op->sym->m_src, session->auth.ctx_len);
2441 	digest_dest_addr = op->sym->aead.digest.phys_addr;
2442 	temp = (uint64_t *)(op->sym->aead.digest.data + AES_BLOCK_SIZE);
2443 	*temp++ = rte_bswap64(session->auth.aad_length << 3);
2444 	*temp = rte_bswap64(op->sym->aead.data.length << 3);
2445 
2446 	non_align_len = op->sym->aead.data.length % AES_BLOCK_SIZE;
2447 	length = CCP_ALIGN(op->sym->aead.data.length, AES_BLOCK_SIZE);
2448 
2449 	aad_addr = op->sym->aead.aad.phys_addr;
2450 
2451 	/* CMD1 IV Passthru */
2452 	rte_memcpy(session->cipher.nonce + AES_BLOCK_SIZE, iv,
2453 		   session->iv.length);
2454 	pst.src_addr = session->cipher.nonce_phys;
2455 	pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
2456 	pst.len = CCP_SB_BYTES;
2457 	pst.dir = 1;
2458 	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
2459 	pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
2460 	ccp_perform_passthru(&pst, cmd_q);
2461 
2462 	/* CMD2 GHASH-AAD */
2463 	function.raw = 0;
2464 	CCP_AES_ENCRYPT(&function) = CCP_AES_MODE_GHASH_AAD;
2465 	CCP_AES_MODE(&function) = CCP_AES_MODE_GHASH;
2466 	CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
2467 
2468 	desc = &cmd_q->qbase_desc[cmd_q->qidx];
2469 	memset(desc, 0, Q_DESC_SIZE);
2470 
2471 	CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
2472 	CCP_CMD_INIT(desc) = 1;
2473 	CCP_CMD_FUNCTION(desc) = function.raw;
2474 
2475 	CCP_CMD_LEN(desc) = session->auth.aad_length;
2476 
2477 	CCP_CMD_SRC_LO(desc) = ((uint32_t)aad_addr);
2478 	CCP_CMD_SRC_HI(desc) = high32_value(aad_addr);
2479 	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
2480 
2481 	CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
2482 	CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
2483 	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
2484 
2485 	CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
2486 
2487 	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
2488 	rte_wmb();
2489 
2490 	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
2491 	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
2492 	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
2493 		      cmd_q->qcontrol | CMD_Q_RUN);
2494 
2495 	/* CMD3 : GCTR Plain text */
2496 	function.raw = 0;
2497 	CCP_AES_ENCRYPT(&function) = session->cipher.dir;
2498 	CCP_AES_MODE(&function) = CCP_AES_MODE_GCTR;
2499 	CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
2500 	if (non_align_len == 0)
2501 		CCP_AES_SIZE(&function) = (AES_BLOCK_SIZE << 3) - 1;
2502 	else
2503 		CCP_AES_SIZE(&function) = (non_align_len << 3) - 1;
2504 
2505 
2506 	desc = &cmd_q->qbase_desc[cmd_q->qidx];
2507 	memset(desc, 0, Q_DESC_SIZE);
2508 
2509 	CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
2510 	CCP_CMD_EOM(desc) = 1;
2511 	CCP_CMD_FUNCTION(desc) = function.raw;
2512 
2513 	CCP_CMD_LEN(desc) = length;
2514 
2515 	CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
2516 	CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
2517 	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
2518 
2519 	CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
2520 	CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
2521 	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
2522 
2523 	CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
2524 	CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
2525 	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
2526 
2527 	CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
2528 
2529 	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
2530 	rte_wmb();
2531 
2532 	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
2533 	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
2534 	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
2535 		      cmd_q->qcontrol | CMD_Q_RUN);
2536 
2537 	/* CMD4 : PT to copy IV */
2538 	pst.src_addr = session->cipher.nonce_phys;
2539 	pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
2540 	pst.len = AES_BLOCK_SIZE;
2541 	pst.dir = 1;
2542 	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
2543 	pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
2544 	ccp_perform_passthru(&pst, cmd_q);
2545 
2546 	/* CMD5 : GHASH-Final */
2547 	function.raw = 0;
2548 	CCP_AES_ENCRYPT(&function) = CCP_AES_MODE_GHASH_FINAL;
2549 	CCP_AES_MODE(&function) = CCP_AES_MODE_GHASH;
2550 	CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
2551 
2552 	desc = &cmd_q->qbase_desc[cmd_q->qidx];
2553 	memset(desc, 0, Q_DESC_SIZE);
2554 
2555 	CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
2556 	CCP_CMD_FUNCTION(desc) = function.raw;
2557 	/* Last block (AAD_len || PT_len)*/
2558 	CCP_CMD_LEN(desc) = AES_BLOCK_SIZE;
2559 
2560 	CCP_CMD_SRC_LO(desc) = ((uint32_t)digest_dest_addr + AES_BLOCK_SIZE);
2561 	CCP_CMD_SRC_HI(desc) = high32_value(digest_dest_addr + AES_BLOCK_SIZE);
2562 	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
2563 
2564 	CCP_CMD_DST_LO(desc) = ((uint32_t)digest_dest_addr);
2565 	CCP_CMD_DST_HI(desc) = high32_value(digest_dest_addr);
2566 	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
2567 
2568 	CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
2569 	CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
2570 	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
2571 
2572 	CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
2573 
2574 	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
2575 	rte_wmb();
2576 
2577 	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
2578 	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
2579 	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
2580 		      cmd_q->qcontrol | CMD_Q_RUN);
2581 
2582 	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
2583 	return 0;
2584 }
2585 
2586 static inline int
2587 ccp_crypto_cipher(struct rte_crypto_op *op,
2588 		  struct ccp_queue *cmd_q,
2589 		  struct ccp_batch_info *b_info)
2590 {
2591 	int result = 0;
2592 	struct ccp_session *session;
2593 
2594 	session = (struct ccp_session *)op->sym->session->driver_priv_data;
2595 
2596 	switch (session->cipher.algo) {
2597 	case CCP_CIPHER_ALGO_AES_CBC:
2598 		result = ccp_perform_aes(op, cmd_q, b_info);
2599 		b_info->desccnt += 2;
2600 		break;
2601 	case CCP_CIPHER_ALGO_AES_CTR:
2602 		result = ccp_perform_aes(op, cmd_q, b_info);
2603 		b_info->desccnt += 2;
2604 		break;
2605 	case CCP_CIPHER_ALGO_AES_ECB:
2606 		result = ccp_perform_aes(op, cmd_q, b_info);
2607 		b_info->desccnt += 1;
2608 		break;
2609 	case CCP_CIPHER_ALGO_3DES_CBC:
2610 		result = ccp_perform_3des(op, cmd_q, b_info);
2611 		b_info->desccnt += 2;
2612 		break;
2613 	default:
2614 		CCP_LOG_ERR("Unsupported cipher algo %d",
2615 			    session->cipher.algo);
2616 		return -ENOTSUP;
2617 	}
2618 	return result;
2619 }
2620 
2621 static inline int
2622 ccp_crypto_auth(struct rte_crypto_op *op,
2623 		struct ccp_queue *cmd_q,
2624 		struct ccp_batch_info *b_info)
2625 {
2626 
2627 	int result = 0;
2628 	struct ccp_session *session;
2629 
2630 	session = (struct ccp_session *)op->sym->session->driver_priv_data;
2631 
2632 	switch (session->auth.algo) {
2633 	case CCP_AUTH_ALGO_SHA1:
2634 	case CCP_AUTH_ALGO_SHA224:
2635 	case CCP_AUTH_ALGO_SHA256:
2636 	case CCP_AUTH_ALGO_SHA384:
2637 	case CCP_AUTH_ALGO_SHA512:
2638 		result = ccp_perform_sha(op, cmd_q);
2639 		b_info->desccnt += 3;
2640 		break;
2641 	case CCP_AUTH_ALGO_MD5_HMAC:
2642 		if (session->auth_opt == 0)
2643 			result = -1;
2644 		break;
2645 	case CCP_AUTH_ALGO_SHA1_HMAC:
2646 	case CCP_AUTH_ALGO_SHA224_HMAC:
2647 	case CCP_AUTH_ALGO_SHA256_HMAC:
2648 		if (session->auth_opt == 0) {
2649 			result = ccp_perform_hmac(op, cmd_q);
2650 			b_info->desccnt += 6;
2651 		}
2652 		break;
2653 	case CCP_AUTH_ALGO_SHA384_HMAC:
2654 	case CCP_AUTH_ALGO_SHA512_HMAC:
2655 		if (session->auth_opt == 0) {
2656 			result = ccp_perform_hmac(op, cmd_q);
2657 			b_info->desccnt += 7;
2658 		}
2659 		break;
2660 	case CCP_AUTH_ALGO_SHA3_224:
2661 	case CCP_AUTH_ALGO_SHA3_256:
2662 	case CCP_AUTH_ALGO_SHA3_384:
2663 	case CCP_AUTH_ALGO_SHA3_512:
2664 		result = ccp_perform_sha3(op, cmd_q);
2665 		b_info->desccnt += 1;
2666 		break;
2667 	case CCP_AUTH_ALGO_SHA3_224_HMAC:
2668 	case CCP_AUTH_ALGO_SHA3_256_HMAC:
2669 		result = ccp_perform_sha3_hmac(op, cmd_q);
2670 		b_info->desccnt += 3;
2671 		break;
2672 	case CCP_AUTH_ALGO_SHA3_384_HMAC:
2673 	case CCP_AUTH_ALGO_SHA3_512_HMAC:
2674 		result = ccp_perform_sha3_hmac(op, cmd_q);
2675 		b_info->desccnt += 4;
2676 		break;
2677 	case CCP_AUTH_ALGO_AES_CMAC:
2678 		result = ccp_perform_aes_cmac(op, cmd_q);
2679 		b_info->desccnt += 4;
2680 		break;
2681 	default:
2682 		CCP_LOG_ERR("Unsupported auth algo %d",
2683 			    session->auth.algo);
2684 		return -ENOTSUP;
2685 	}
2686 
2687 	return result;
2688 }
2689 
2690 static inline int
2691 ccp_crypto_aead(struct rte_crypto_op *op,
2692 		struct ccp_queue *cmd_q,
2693 		struct ccp_batch_info *b_info)
2694 {
2695 	int result = 0;
2696 	struct ccp_session *session;
2697 
2698 	session = (struct ccp_session *)op->sym->session->driver_priv_data;
2699 
2700 	switch (session->auth.algo) {
2701 	case CCP_AUTH_ALGO_AES_GCM:
2702 		if (session->cipher.algo != CCP_CIPHER_ALGO_AES_GCM) {
2703 			CCP_LOG_ERR("Incorrect chain order");
2704 			return -1;
2705 		}
2706 		result = ccp_perform_aes_gcm(op, cmd_q);
2707 		b_info->desccnt += 5;
2708 		break;
2709 	default:
2710 		CCP_LOG_ERR("Unsupported aead algo %d",
2711 			    session->aead_algo);
2712 		return -ENOTSUP;
2713 	}
2714 	return result;
2715 }
2716 
2717 int
2718 process_ops_to_enqueue(struct ccp_qp *qp,
2719 		       struct rte_crypto_op **op,
2720 		       struct ccp_queue *cmd_q,
2721 		       uint16_t nb_ops,
2722 		       uint16_t total_nb_ops,
2723 		       int slots_req,
2724 		       uint16_t b_idx)
2725 {
2726 	int i, result = 0;
2727 	struct ccp_batch_info *b_info;
2728 	struct ccp_session *session;
2729 	EVP_MD_CTX *auth_ctx = NULL;
2730 
2731 	if (rte_mempool_get(qp->batch_mp, (void **)&b_info)) {
2732 		CCP_LOG_ERR("batch info allocation failed");
2733 		return 0;
2734 	}
2735 
2736 	auth_ctx = EVP_MD_CTX_create();
2737 	if (unlikely(!auth_ctx)) {
2738 		CCP_LOG_ERR("Unable to create auth ctx");
2739 		return 0;
2740 	}
2741 	b_info->auth_ctr = 0;
2742 
2743 	/* populate batch info necessary for dequeue */
2744 	b_info->op_idx = 0;
2745 	b_info->b_idx = 0;
2746 	b_info->lsb_buf_idx = 0;
2747 	b_info->desccnt = 0;
2748 	b_info->cmd_q = cmd_q;
2749 	if (iommu_mode == 2)
2750 		b_info->lsb_buf_phys =
2751 			(phys_addr_t)rte_mem_virt2iova((void *)b_info->lsb_buf);
2752 	else
2753 		b_info->lsb_buf_phys =
2754 			(phys_addr_t)rte_mem_virt2phy((void *)b_info->lsb_buf);
2755 
2756 	rte_atomic64_sub(&b_info->cmd_q->free_slots, slots_req);
2757 
2758 	b_info->head_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
2759 					 Q_DESC_SIZE);
2760 	for (i = b_idx; i < (nb_ops+b_idx); i++) {
2761 		session = (struct ccp_session *)
2762 			op[i]->sym->session->driver_priv_data;
2763 		switch (session->cmd_id) {
2764 		case CCP_CMD_CIPHER:
2765 			result = ccp_crypto_cipher(op[i], cmd_q, b_info);
2766 			break;
2767 		case CCP_CMD_AUTH:
2768 			if (session->auth_opt) {
2769 				b_info->auth_ctr++;
2770 				result = cpu_crypto_auth(qp, op[i],
2771 							 session, auth_ctx);
2772 			} else
2773 				result = ccp_crypto_auth(op[i], cmd_q, b_info);
2774 			break;
2775 		case CCP_CMD_CIPHER_HASH:
2776 			result = ccp_crypto_cipher(op[i], cmd_q, b_info);
2777 			if (result)
2778 				break;
2779 			result = ccp_crypto_auth(op[i], cmd_q, b_info);
2780 			break;
2781 		case CCP_CMD_HASH_CIPHER:
2782 			if (session->auth_opt) {
2783 				result = cpu_crypto_auth(qp, op[i],
2784 							 session, auth_ctx);
2785 				if (op[i]->status !=
2786 				    RTE_CRYPTO_OP_STATUS_SUCCESS)
2787 					CCP_LOG_ERR("RTE_CRYPTO_OP_STATUS_AUTH_FAILED");
2788 			} else
2789 				result = ccp_crypto_auth(op[i], cmd_q, b_info);
2790 
2791 			if (result)
2792 				break;
2793 			result = ccp_crypto_cipher(op[i], cmd_q, b_info);
2794 			break;
2795 		case CCP_CMD_COMBINED:
2796 			result = ccp_crypto_aead(op[i], cmd_q, b_info);
2797 			break;
2798 		default:
2799 			CCP_LOG_ERR("Unsupported cmd_id");
2800 			result = -1;
2801 		}
2802 		if (unlikely(result < 0)) {
2803 			rte_atomic64_add(&b_info->cmd_q->free_slots,
2804 					 (slots_req - b_info->desccnt));
2805 			break;
2806 		}
2807 		b_info->op[i] = op[i];
2808 	}
2809 
2810 	b_info->opcnt = i;
2811 	b_info->b_idx = b_idx;
2812 	b_info->total_nb_ops = total_nb_ops;
2813 	b_info->tail_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
2814 					 Q_DESC_SIZE);
2815 
2816 	rte_wmb();
2817 	/* Write the new tail address back to the queue register */
2818 	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE,
2819 			      b_info->tail_offset);
2820 	/* Turn the queue back on using our cached control register */
2821 	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
2822 			      cmd_q->qcontrol | CMD_Q_RUN);
2823 
2824 	rte_ring_enqueue(qp->processed_pkts, (void *)b_info);
2825 
2826 	EVP_MD_CTX_destroy(auth_ctx);
2827 	return i-b_idx;
2828 }
2829 
2830 static inline void ccp_auth_dq_prepare(struct rte_crypto_op *op)
2831 {
2832 	struct ccp_session *session;
2833 	uint8_t *digest_data, *addr;
2834 	struct rte_mbuf *m_last;
2835 	int offset, digest_offset;
2836 	uint8_t digest_le[64];
2837 
2838 	session = (struct ccp_session *)op->sym->session->driver_priv_data;
2839 
2840 	if (session->cmd_id == CCP_CMD_COMBINED) {
2841 		digest_data = op->sym->aead.digest.data;
2842 		digest_offset = op->sym->aead.data.offset +
2843 					op->sym->aead.data.length;
2844 	} else {
2845 		digest_data = op->sym->auth.digest.data;
2846 		digest_offset = op->sym->auth.data.offset +
2847 					op->sym->auth.data.length;
2848 	}
2849 	m_last = rte_pktmbuf_lastseg(op->sym->m_src);
2850 	addr = (uint8_t *)((char *)m_last->buf_addr + m_last->data_off +
2851 			   m_last->data_len - session->auth.ctx_len);
2852 
2853 	rte_mb();
2854 	offset = session->auth.offset;
2855 
2856 	if (session->auth.engine == CCP_ENGINE_SHA)
2857 		if ((session->auth.ut.sha_type != CCP_SHA_TYPE_1) &&
2858 		    (session->auth.ut.sha_type != CCP_SHA_TYPE_224) &&
2859 		    (session->auth.ut.sha_type != CCP_SHA_TYPE_256)) {
2860 			/* All other algorithms require byte
2861 			 * swap done by host
2862 			 */
2863 			unsigned int i;
2864 
2865 			offset = session->auth.ctx_len -
2866 				session->auth.offset - 1;
2867 			for (i = 0; i < session->auth.digest_length; i++)
2868 				digest_le[i] = addr[offset - i];
2869 			offset = 0;
2870 			addr = digest_le;
2871 		}
2872 
2873 	op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
2874 	if (session->auth.op == CCP_AUTH_OP_VERIFY) {
2875 		if (memcmp(addr + offset, digest_data,
2876 			   session->auth.digest_length) != 0)
2877 			op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
2878 
2879 	} else {
2880 		if (unlikely(digest_data == 0))
2881 			digest_data = rte_pktmbuf_mtod_offset(
2882 					op->sym->m_dst, uint8_t *,
2883 					digest_offset);
2884 		rte_memcpy(digest_data, addr + offset,
2885 			   session->auth.digest_length);
2886 	}
2887 	/* Trim area used for digest from mbuf. */
2888 	rte_pktmbuf_trim(op->sym->m_src,
2889 			 session->auth.ctx_len);
2890 }
2891 
2892 static int
2893 ccp_prepare_ops(struct ccp_qp *qp,
2894 		struct rte_crypto_op **op_d,
2895 		struct ccp_batch_info *b_info,
2896 		uint16_t nb_ops)
2897 {
2898 	int i, min_ops;
2899 	struct ccp_session *session;
2900 
2901 	EVP_MD_CTX *auth_ctx = NULL;
2902 
2903 	auth_ctx = EVP_MD_CTX_create();
2904 	if (unlikely(!auth_ctx)) {
2905 		CCP_LOG_ERR("Unable to create auth ctx");
2906 		return 0;
2907 	}
2908 	min_ops = RTE_MIN(nb_ops, b_info->opcnt);
2909 
2910 	for (i =  b_info->b_idx; i < min_ops; i++) {
2911 		op_d[i] = b_info->op[b_info->b_idx + b_info->op_idx++];
2912 		session = (struct ccp_session *)
2913 			op_d[i]->sym->session->driver_priv_data;
2914 		switch (session->cmd_id) {
2915 		case CCP_CMD_CIPHER:
2916 			op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
2917 			break;
2918 		case CCP_CMD_AUTH:
2919 			if (session->auth_opt == 0)
2920 				ccp_auth_dq_prepare(op_d[i]);
2921 			break;
2922 		case CCP_CMD_CIPHER_HASH:
2923 			if (session->auth_opt)
2924 				cpu_crypto_auth(qp, op_d[i],
2925 						session, auth_ctx);
2926 			else
2927 				ccp_auth_dq_prepare(op_d[i]);
2928 			break;
2929 		case CCP_CMD_HASH_CIPHER:
2930 			if (session->auth_opt)
2931 				op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
2932 			else
2933 				ccp_auth_dq_prepare(op_d[i]);
2934 			break;
2935 		case CCP_CMD_COMBINED:
2936 			ccp_auth_dq_prepare(op_d[i]);
2937 			break;
2938 		default:
2939 			CCP_LOG_ERR("Unsupported cmd_id");
2940 		}
2941 	}
2942 
2943 	EVP_MD_CTX_destroy(auth_ctx);
2944 	b_info->opcnt -= min_ops;
2945 	return min_ops;
2946 }
2947 
2948 int
2949 process_ops_to_dequeue(struct ccp_qp *qp,
2950 		       struct rte_crypto_op **op,
2951 		       uint16_t nb_ops,
2952 		       uint16_t *total_nb_ops)
2953 {
2954 	struct ccp_batch_info *b_info;
2955 	uint32_t cur_head_offset;
2956 
2957 	if (qp->b_info != NULL) {
2958 		b_info = qp->b_info;
2959 		if (unlikely(b_info->op_idx > 0))
2960 			goto success;
2961 	} else if (rte_ring_dequeue(qp->processed_pkts,
2962 				    (void **)&b_info))
2963 		return 0;
2964 
2965 	if (b_info->auth_ctr == b_info->opcnt)
2966 		goto success;
2967 	*total_nb_ops = b_info->total_nb_ops;
2968 	cur_head_offset = CCP_READ_REG(b_info->cmd_q->reg_base,
2969 				       CMD_Q_HEAD_LO_BASE);
2970 
2971 	if (b_info->head_offset < b_info->tail_offset) {
2972 		if ((cur_head_offset >= b_info->head_offset) &&
2973 		    (cur_head_offset < b_info->tail_offset)) {
2974 			qp->b_info = b_info;
2975 			return 0;
2976 		}
2977 	} else if (b_info->tail_offset != b_info->head_offset) {
2978 		if ((cur_head_offset >= b_info->head_offset) ||
2979 		    (cur_head_offset < b_info->tail_offset)) {
2980 			qp->b_info = b_info;
2981 			return 0;
2982 		}
2983 	}
2984 
2985 
2986 success:
2987 	*total_nb_ops = b_info->total_nb_ops;
2988 	nb_ops = ccp_prepare_ops(qp, op, b_info, nb_ops);
2989 	rte_atomic64_add(&b_info->cmd_q->free_slots, b_info->desccnt);
2990 	b_info->desccnt = 0;
2991 	if (b_info->opcnt > 0) {
2992 		qp->b_info = b_info;
2993 	} else {
2994 		rte_mempool_put(qp->batch_mp, (void *)b_info);
2995 		qp->b_info = NULL;
2996 	}
2997 
2998 	return nb_ops;
2999 }
3000