xref: /dpdk/drivers/crypto/ccp/ccp_crypto.c (revision d9a9e56192e8d3dca23fe52030d7c377d97abd67)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
3  */
4 
5 #include <dirent.h>
6 #include <fcntl.h>
7 #include <stdio.h>
8 #include <string.h>
9 #include <sys/mman.h>
10 #include <sys/queue.h>
11 #include <sys/types.h>
12 #include <unistd.h>
13 
14 #include <rte_hexdump.h>
15 #include <rte_memzone.h>
16 #include <rte_malloc.h>
17 #include <rte_memory.h>
18 #include <rte_spinlock.h>
19 #include <rte_string_fns.h>
20 #include <rte_cryptodev_pmd.h>
21 
22 #include "ccp_dev.h"
23 #include "ccp_crypto.h"
24 #include "ccp_pci.h"
25 #include "ccp_pmd_private.h"
26 
27 static enum ccp_cmd_order
28 ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
29 {
30 	enum ccp_cmd_order res = CCP_CMD_NOT_SUPPORTED;
31 
32 	if (xform == NULL)
33 		return res;
34 	if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
35 		if (xform->next == NULL)
36 			return CCP_CMD_AUTH;
37 		else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
38 			return CCP_CMD_HASH_CIPHER;
39 	}
40 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
41 		if (xform->next == NULL)
42 			return CCP_CMD_CIPHER;
43 		else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
44 			return CCP_CMD_CIPHER_HASH;
45 	}
46 	if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
47 		return CCP_CMD_COMBINED;
48 	return res;
49 }
50 
51 /* configure session */
52 static int
53 ccp_configure_session_cipher(struct ccp_session *sess,
54 			     const struct rte_crypto_sym_xform *xform)
55 {
56 	const struct rte_crypto_cipher_xform *cipher_xform = NULL;
57 	size_t i;
58 
59 	cipher_xform = &xform->cipher;
60 
61 	/* set cipher direction */
62 	if (cipher_xform->op ==  RTE_CRYPTO_CIPHER_OP_ENCRYPT)
63 		sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT;
64 	else
65 		sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
66 
67 	/* set cipher key */
68 	sess->cipher.key_length = cipher_xform->key.length;
69 	rte_memcpy(sess->cipher.key, cipher_xform->key.data,
70 		   cipher_xform->key.length);
71 
72 	/* set iv parameters */
73 	sess->iv.offset = cipher_xform->iv.offset;
74 	sess->iv.length = cipher_xform->iv.length;
75 
76 	switch (cipher_xform->algo) {
77 	case RTE_CRYPTO_CIPHER_AES_CTR:
78 		sess->cipher.algo = CCP_CIPHER_ALGO_AES_CTR;
79 		sess->cipher.um.aes_mode = CCP_AES_MODE_CTR;
80 		sess->cipher.engine = CCP_ENGINE_AES;
81 		break;
82 	case RTE_CRYPTO_CIPHER_AES_ECB:
83 		sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC;
84 		sess->cipher.um.aes_mode = CCP_AES_MODE_ECB;
85 		sess->cipher.engine = CCP_ENGINE_AES;
86 		break;
87 	case RTE_CRYPTO_CIPHER_AES_CBC:
88 		sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC;
89 		sess->cipher.um.aes_mode = CCP_AES_MODE_CBC;
90 		sess->cipher.engine = CCP_ENGINE_AES;
91 		break;
92 	default:
93 		CCP_LOG_ERR("Unsupported cipher algo");
94 		return -1;
95 	}
96 
97 
98 	switch (sess->cipher.engine) {
99 	case CCP_ENGINE_AES:
100 		if (sess->cipher.key_length == 16)
101 			sess->cipher.ut.aes_type = CCP_AES_TYPE_128;
102 		else if (sess->cipher.key_length == 24)
103 			sess->cipher.ut.aes_type = CCP_AES_TYPE_192;
104 		else if (sess->cipher.key_length == 32)
105 			sess->cipher.ut.aes_type = CCP_AES_TYPE_256;
106 		else {
107 			CCP_LOG_ERR("Invalid cipher key length");
108 			return -1;
109 		}
110 		for (i = 0; i < sess->cipher.key_length ; i++)
111 			sess->cipher.key_ccp[sess->cipher.key_length - i - 1] =
112 				sess->cipher.key[i];
113 		break;
114 	default:
115 		CCP_LOG_ERR("Invalid CCP Engine");
116 		return -ENOTSUP;
117 	}
118 	sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
119 	sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
120 	return 0;
121 }
122 
123 static int
124 ccp_configure_session_auth(struct ccp_session *sess,
125 			   const struct rte_crypto_sym_xform *xform)
126 {
127 	const struct rte_crypto_auth_xform *auth_xform = NULL;
128 
129 	auth_xform = &xform->auth;
130 
131 	sess->auth.digest_length = auth_xform->digest_length;
132 	if (auth_xform->op ==  RTE_CRYPTO_AUTH_OP_GENERATE)
133 		sess->auth.op = CCP_AUTH_OP_GENERATE;
134 	else
135 		sess->auth.op = CCP_AUTH_OP_VERIFY;
136 	switch (auth_xform->algo) {
137 	default:
138 		CCP_LOG_ERR("Unsupported hash algo");
139 		return -ENOTSUP;
140 	}
141 	return 0;
142 }
143 
144 static int
145 ccp_configure_session_aead(struct ccp_session *sess,
146 			   const struct rte_crypto_sym_xform *xform)
147 {
148 	const struct rte_crypto_aead_xform *aead_xform = NULL;
149 
150 	aead_xform = &xform->aead;
151 
152 	sess->cipher.key_length = aead_xform->key.length;
153 	rte_memcpy(sess->cipher.key, aead_xform->key.data,
154 		   aead_xform->key.length);
155 
156 	if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
157 		sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT;
158 		sess->auth.op = CCP_AUTH_OP_GENERATE;
159 	} else {
160 		sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
161 		sess->auth.op = CCP_AUTH_OP_VERIFY;
162 	}
163 	sess->auth.aad_length = aead_xform->aad_length;
164 	sess->auth.digest_length = aead_xform->digest_length;
165 
166 	/* set iv parameters */
167 	sess->iv.offset = aead_xform->iv.offset;
168 	sess->iv.length = aead_xform->iv.length;
169 
170 	switch (aead_xform->algo) {
171 	default:
172 		CCP_LOG_ERR("Unsupported aead algo");
173 		return -ENOTSUP;
174 	}
175 	return 0;
176 }
177 
178 int
179 ccp_set_session_parameters(struct ccp_session *sess,
180 			   const struct rte_crypto_sym_xform *xform)
181 {
182 	const struct rte_crypto_sym_xform *cipher_xform = NULL;
183 	const struct rte_crypto_sym_xform *auth_xform = NULL;
184 	const struct rte_crypto_sym_xform *aead_xform = NULL;
185 	int ret = 0;
186 
187 	sess->cmd_id = ccp_get_cmd_id(xform);
188 
189 	switch (sess->cmd_id) {
190 	case CCP_CMD_CIPHER:
191 		cipher_xform = xform;
192 		break;
193 	case CCP_CMD_AUTH:
194 		auth_xform = xform;
195 		break;
196 	case CCP_CMD_CIPHER_HASH:
197 		cipher_xform = xform;
198 		auth_xform = xform->next;
199 		break;
200 	case CCP_CMD_HASH_CIPHER:
201 		auth_xform = xform;
202 		cipher_xform = xform->next;
203 		break;
204 	case CCP_CMD_COMBINED:
205 		aead_xform = xform;
206 		break;
207 	default:
208 		CCP_LOG_ERR("Unsupported cmd_id");
209 		return -1;
210 	}
211 
212 	/* Default IV length = 0 */
213 	sess->iv.length = 0;
214 	if (cipher_xform) {
215 		ret = ccp_configure_session_cipher(sess, cipher_xform);
216 		if (ret != 0) {
217 			CCP_LOG_ERR("Invalid/unsupported cipher parameters");
218 			return ret;
219 		}
220 	}
221 	if (auth_xform) {
222 		ret = ccp_configure_session_auth(sess, auth_xform);
223 		if (ret != 0) {
224 			CCP_LOG_ERR("Invalid/unsupported auth parameters");
225 			return ret;
226 		}
227 	}
228 	if (aead_xform) {
229 		ret = ccp_configure_session_aead(sess, aead_xform);
230 		if (ret != 0) {
231 			CCP_LOG_ERR("Invalid/unsupported aead parameters");
232 			return ret;
233 		}
234 	}
235 	return ret;
236 }
237 
238 /* calculate CCP descriptors requirement */
239 static inline int
240 ccp_cipher_slot(struct ccp_session *session)
241 {
242 	int count = 0;
243 
244 	switch (session->cipher.algo) {
245 	case CCP_CIPHER_ALGO_AES_CBC:
246 		count = 2;
247 		/**< op + passthrough for iv */
248 		break;
249 	case CCP_CIPHER_ALGO_AES_ECB:
250 		count = 1;
251 		/**<only op*/
252 		break;
253 	case CCP_CIPHER_ALGO_AES_CTR:
254 		count = 2;
255 		/**< op + passthrough for iv */
256 		break;
257 	default:
258 		CCP_LOG_ERR("Unsupported cipher algo %d",
259 			    session->cipher.algo);
260 	}
261 	return count;
262 }
263 
264 static inline int
265 ccp_auth_slot(struct ccp_session *session)
266 {
267 	int count = 0;
268 
269 	switch (session->auth.algo) {
270 	default:
271 		CCP_LOG_ERR("Unsupported auth algo %d",
272 			    session->auth.algo);
273 	}
274 
275 	return count;
276 }
277 
278 static int
279 ccp_aead_slot(struct ccp_session *session)
280 {
281 	int count = 0;
282 
283 	switch (session->aead_algo) {
284 	default:
285 		CCP_LOG_ERR("Unsupported aead algo %d",
286 			    session->aead_algo);
287 	}
288 	return count;
289 }
290 
291 int
292 ccp_compute_slot_count(struct ccp_session *session)
293 {
294 	int count = 0;
295 
296 	switch (session->cmd_id) {
297 	case CCP_CMD_CIPHER:
298 		count = ccp_cipher_slot(session);
299 		break;
300 	case CCP_CMD_AUTH:
301 		count = ccp_auth_slot(session);
302 		break;
303 	case CCP_CMD_CIPHER_HASH:
304 	case CCP_CMD_HASH_CIPHER:
305 		count = ccp_cipher_slot(session);
306 		count += ccp_auth_slot(session);
307 		break;
308 	case CCP_CMD_COMBINED:
309 		count = ccp_aead_slot(session);
310 		break;
311 	default:
312 		CCP_LOG_ERR("Unsupported cmd_id");
313 
314 	}
315 
316 	return count;
317 }
318 
319 static void
320 ccp_perform_passthru(struct ccp_passthru *pst,
321 		     struct ccp_queue *cmd_q)
322 {
323 	struct ccp_desc *desc;
324 	union ccp_function function;
325 
326 	desc = &cmd_q->qbase_desc[cmd_q->qidx];
327 
328 	CCP_CMD_ENGINE(desc) = CCP_ENGINE_PASSTHRU;
329 
330 	CCP_CMD_SOC(desc) = 0;
331 	CCP_CMD_IOC(desc) = 0;
332 	CCP_CMD_INIT(desc) = 0;
333 	CCP_CMD_EOM(desc) = 0;
334 	CCP_CMD_PROT(desc) = 0;
335 
336 	function.raw = 0;
337 	CCP_PT_BYTESWAP(&function) = pst->byte_swap;
338 	CCP_PT_BITWISE(&function) = pst->bit_mod;
339 	CCP_CMD_FUNCTION(desc) = function.raw;
340 
341 	CCP_CMD_LEN(desc) = pst->len;
342 
343 	if (pst->dir) {
344 		CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr);
345 		CCP_CMD_SRC_HI(desc) = high32_value(pst->src_addr);
346 		CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
347 
348 		CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr);
349 		CCP_CMD_DST_HI(desc) = 0;
350 		CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SB;
351 
352 		if (pst->bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
353 			CCP_CMD_LSB_ID(desc) = cmd_q->sb_key;
354 	} else {
355 
356 		CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr);
357 		CCP_CMD_SRC_HI(desc) = 0;
358 		CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SB;
359 
360 		CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr);
361 		CCP_CMD_DST_HI(desc) = high32_value(pst->dest_addr);
362 		CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
363 	}
364 
365 	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
366 }
367 
368 static int
369 ccp_perform_aes(struct rte_crypto_op *op,
370 		struct ccp_queue *cmd_q,
371 		struct ccp_batch_info *b_info)
372 {
373 	struct ccp_session *session;
374 	union ccp_function function;
375 	uint8_t *lsb_buf;
376 	struct ccp_passthru pst = {0};
377 	struct ccp_desc *desc;
378 	phys_addr_t src_addr, dest_addr, key_addr;
379 	uint8_t *iv;
380 
381 	session = (struct ccp_session *)get_session_private_data(
382 					 op->sym->session,
383 					ccp_cryptodev_driver_id);
384 	function.raw = 0;
385 
386 	iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
387 	if (session->cipher.um.aes_mode != CCP_AES_MODE_ECB) {
388 		if (session->cipher.um.aes_mode == CCP_AES_MODE_CTR) {
389 			rte_memcpy(session->cipher.nonce + AES_BLOCK_SIZE,
390 				   iv, session->iv.length);
391 			pst.src_addr = (phys_addr_t)session->cipher.nonce_phys;
392 			CCP_AES_SIZE(&function) = 0x1F;
393 		} else {
394 			lsb_buf =
395 			&(b_info->lsb_buf[b_info->lsb_buf_idx*CCP_SB_BYTES]);
396 			rte_memcpy(lsb_buf +
397 				   (CCP_SB_BYTES - session->iv.length),
398 				   iv, session->iv.length);
399 			pst.src_addr = b_info->lsb_buf_phys +
400 				(b_info->lsb_buf_idx * CCP_SB_BYTES);
401 			b_info->lsb_buf_idx++;
402 		}
403 
404 		pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
405 		pst.len = CCP_SB_BYTES;
406 		pst.dir = 1;
407 		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
408 		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
409 		ccp_perform_passthru(&pst, cmd_q);
410 	}
411 
412 	desc = &cmd_q->qbase_desc[cmd_q->qidx];
413 
414 	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
415 					      op->sym->cipher.data.offset);
416 	if (likely(op->sym->m_dst != NULL))
417 		dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst,
418 						op->sym->cipher.data.offset);
419 	else
420 		dest_addr = src_addr;
421 	key_addr = session->cipher.key_phys;
422 
423 	/* prepare desc for aes command */
424 	CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
425 	CCP_CMD_INIT(desc) = 1;
426 	CCP_CMD_EOM(desc) = 1;
427 
428 	CCP_AES_ENCRYPT(&function) = session->cipher.dir;
429 	CCP_AES_MODE(&function) = session->cipher.um.aes_mode;
430 	CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
431 	CCP_CMD_FUNCTION(desc) = function.raw;
432 
433 	CCP_CMD_LEN(desc) = op->sym->cipher.data.length;
434 
435 	CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
436 	CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
437 	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
438 
439 	CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
440 	CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
441 	CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
442 
443 	CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
444 	CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
445 	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
446 
447 	if (session->cipher.um.aes_mode != CCP_AES_MODE_ECB)
448 		CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
449 
450 	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
451 	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
452 	return 0;
453 }
454 
455 static inline int
456 ccp_crypto_cipher(struct rte_crypto_op *op,
457 		  struct ccp_queue *cmd_q,
458 		  struct ccp_batch_info *b_info)
459 {
460 	int result = 0;
461 	struct ccp_session *session;
462 
463 	session = (struct ccp_session *)get_session_private_data(
464 					 op->sym->session,
465 					 ccp_cryptodev_driver_id);
466 
467 	switch (session->cipher.algo) {
468 	case CCP_CIPHER_ALGO_AES_CBC:
469 		result = ccp_perform_aes(op, cmd_q, b_info);
470 		b_info->desccnt += 2;
471 		break;
472 	case CCP_CIPHER_ALGO_AES_CTR:
473 		result = ccp_perform_aes(op, cmd_q, b_info);
474 		b_info->desccnt += 2;
475 		break;
476 	case CCP_CIPHER_ALGO_AES_ECB:
477 		result = ccp_perform_aes(op, cmd_q, b_info);
478 		b_info->desccnt += 1;
479 		break;
480 	default:
481 		CCP_LOG_ERR("Unsupported cipher algo %d",
482 			    session->cipher.algo);
483 		return -ENOTSUP;
484 	}
485 	return result;
486 }
487 
488 static inline int
489 ccp_crypto_auth(struct rte_crypto_op *op,
490 		struct ccp_queue *cmd_q __rte_unused,
491 		struct ccp_batch_info *b_info __rte_unused)
492 {
493 
494 	int result = 0;
495 	struct ccp_session *session;
496 
497 	session = (struct ccp_session *)get_session_private_data(
498 					 op->sym->session,
499 					ccp_cryptodev_driver_id);
500 
501 	switch (session->auth.algo) {
502 	default:
503 		CCP_LOG_ERR("Unsupported auth algo %d",
504 			    session->auth.algo);
505 		return -ENOTSUP;
506 	}
507 
508 	return result;
509 }
510 
511 static inline int
512 ccp_crypto_aead(struct rte_crypto_op *op,
513 		struct ccp_queue *cmd_q __rte_unused,
514 		struct ccp_batch_info *b_info __rte_unused)
515 {
516 	int result = 0;
517 	struct ccp_session *session;
518 
519 	session = (struct ccp_session *)get_session_private_data(
520 					 op->sym->session,
521 					ccp_cryptodev_driver_id);
522 
523 	switch (session->aead_algo) {
524 	default:
525 		CCP_LOG_ERR("Unsupported aead algo %d",
526 			    session->aead_algo);
527 		return -ENOTSUP;
528 	}
529 	return result;
530 }
531 
532 int
533 process_ops_to_enqueue(const struct ccp_qp *qp,
534 		       struct rte_crypto_op **op,
535 		       struct ccp_queue *cmd_q,
536 		       uint16_t nb_ops,
537 		       int slots_req)
538 {
539 	int i, result = 0;
540 	struct ccp_batch_info *b_info;
541 	struct ccp_session *session;
542 
543 	if (rte_mempool_get(qp->batch_mp, (void **)&b_info)) {
544 		CCP_LOG_ERR("batch info allocation failed");
545 		return 0;
546 	}
547 	/* populate batch info necessary for dequeue */
548 	b_info->op_idx = 0;
549 	b_info->lsb_buf_idx = 0;
550 	b_info->desccnt = 0;
551 	b_info->cmd_q = cmd_q;
552 	b_info->lsb_buf_phys =
553 		(phys_addr_t)rte_mem_virt2phy((void *)b_info->lsb_buf);
554 	rte_atomic64_sub(&b_info->cmd_q->free_slots, slots_req);
555 
556 	b_info->head_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
557 					 Q_DESC_SIZE);
558 	for (i = 0; i < nb_ops; i++) {
559 		session = (struct ccp_session *)get_session_private_data(
560 						 op[i]->sym->session,
561 						 ccp_cryptodev_driver_id);
562 		switch (session->cmd_id) {
563 		case CCP_CMD_CIPHER:
564 			result = ccp_crypto_cipher(op[i], cmd_q, b_info);
565 			break;
566 		case CCP_CMD_AUTH:
567 			result = ccp_crypto_auth(op[i], cmd_q, b_info);
568 			break;
569 		case CCP_CMD_CIPHER_HASH:
570 			result = ccp_crypto_cipher(op[i], cmd_q, b_info);
571 			if (result)
572 				break;
573 			result = ccp_crypto_auth(op[i], cmd_q, b_info);
574 			break;
575 		case CCP_CMD_HASH_CIPHER:
576 			result = ccp_crypto_auth(op[i], cmd_q, b_info);
577 			if (result)
578 				break;
579 			result = ccp_crypto_cipher(op[i], cmd_q, b_info);
580 			break;
581 		case CCP_CMD_COMBINED:
582 			result = ccp_crypto_aead(op[i], cmd_q, b_info);
583 			break;
584 		default:
585 			CCP_LOG_ERR("Unsupported cmd_id");
586 			result = -1;
587 		}
588 		if (unlikely(result < 0)) {
589 			rte_atomic64_add(&b_info->cmd_q->free_slots,
590 					 (slots_req - b_info->desccnt));
591 			break;
592 		}
593 		b_info->op[i] = op[i];
594 	}
595 
596 	b_info->opcnt = i;
597 	b_info->tail_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
598 					 Q_DESC_SIZE);
599 
600 	rte_wmb();
601 	/* Write the new tail address back to the queue register */
602 	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE,
603 			      b_info->tail_offset);
604 	/* Turn the queue back on using our cached control register */
605 	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
606 			      cmd_q->qcontrol | CMD_Q_RUN);
607 
608 	rte_ring_enqueue(qp->processed_pkts, (void *)b_info);
609 
610 	return i;
611 }
612 
613 static inline void ccp_auth_dq_prepare(struct rte_crypto_op *op)
614 {
615 	struct ccp_session *session;
616 	uint8_t *digest_data, *addr;
617 	struct rte_mbuf *m_last;
618 	int offset, digest_offset;
619 	uint8_t digest_le[64];
620 
621 	session = (struct ccp_session *)get_session_private_data(
622 					 op->sym->session,
623 					ccp_cryptodev_driver_id);
624 
625 	if (session->cmd_id == CCP_CMD_COMBINED) {
626 		digest_data = op->sym->aead.digest.data;
627 		digest_offset = op->sym->aead.data.offset +
628 					op->sym->aead.data.length;
629 	} else {
630 		digest_data = op->sym->auth.digest.data;
631 		digest_offset = op->sym->auth.data.offset +
632 					op->sym->auth.data.length;
633 	}
634 	m_last = rte_pktmbuf_lastseg(op->sym->m_src);
635 	addr = (uint8_t *)((char *)m_last->buf_addr + m_last->data_off +
636 			   m_last->data_len - session->auth.ctx_len);
637 
638 	rte_mb();
639 	offset = session->auth.offset;
640 
641 	if (session->auth.engine == CCP_ENGINE_SHA)
642 		if ((session->auth.ut.sha_type != CCP_SHA_TYPE_1) &&
643 		    (session->auth.ut.sha_type != CCP_SHA_TYPE_224) &&
644 		    (session->auth.ut.sha_type != CCP_SHA_TYPE_256)) {
645 			/* All other algorithms require byte
646 			 * swap done by host
647 			 */
648 			unsigned int i;
649 
650 			offset = session->auth.ctx_len -
651 				session->auth.offset - 1;
652 			for (i = 0; i < session->auth.digest_length; i++)
653 				digest_le[i] = addr[offset - i];
654 			offset = 0;
655 			addr = digest_le;
656 		}
657 
658 	op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
659 	if (session->auth.op == CCP_AUTH_OP_VERIFY) {
660 		if (memcmp(addr + offset, digest_data,
661 			   session->auth.digest_length) != 0)
662 			op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
663 
664 	} else {
665 		if (unlikely(digest_data == 0))
666 			digest_data = rte_pktmbuf_mtod_offset(
667 					op->sym->m_dst, uint8_t *,
668 					digest_offset);
669 		rte_memcpy(digest_data, addr + offset,
670 			   session->auth.digest_length);
671 	}
672 	/* Trim area used for digest from mbuf. */
673 	rte_pktmbuf_trim(op->sym->m_src,
674 			 session->auth.ctx_len);
675 }
676 
677 static int
678 ccp_prepare_ops(struct rte_crypto_op **op_d,
679 		struct ccp_batch_info *b_info,
680 		uint16_t nb_ops)
681 {
682 	int i, min_ops;
683 	struct ccp_session *session;
684 
685 	min_ops = RTE_MIN(nb_ops, b_info->opcnt);
686 
687 	for (i = 0; i < min_ops; i++) {
688 		op_d[i] = b_info->op[b_info->op_idx++];
689 		session = (struct ccp_session *)get_session_private_data(
690 						 op_d[i]->sym->session,
691 						ccp_cryptodev_driver_id);
692 		switch (session->cmd_id) {
693 		case CCP_CMD_CIPHER:
694 			op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
695 			break;
696 		case CCP_CMD_AUTH:
697 		case CCP_CMD_CIPHER_HASH:
698 		case CCP_CMD_HASH_CIPHER:
699 		case CCP_CMD_COMBINED:
700 			ccp_auth_dq_prepare(op_d[i]);
701 			break;
702 		default:
703 			CCP_LOG_ERR("Unsupported cmd_id");
704 		}
705 	}
706 
707 	b_info->opcnt -= min_ops;
708 	return min_ops;
709 }
710 
711 int
712 process_ops_to_dequeue(struct ccp_qp *qp,
713 		       struct rte_crypto_op **op,
714 		       uint16_t nb_ops)
715 {
716 	struct ccp_batch_info *b_info;
717 	uint32_t cur_head_offset;
718 
719 	if (qp->b_info != NULL) {
720 		b_info = qp->b_info;
721 		if (unlikely(b_info->op_idx > 0))
722 			goto success;
723 	} else if (rte_ring_dequeue(qp->processed_pkts,
724 				    (void **)&b_info))
725 		return 0;
726 	cur_head_offset = CCP_READ_REG(b_info->cmd_q->reg_base,
727 				       CMD_Q_HEAD_LO_BASE);
728 
729 	if (b_info->head_offset < b_info->tail_offset) {
730 		if ((cur_head_offset >= b_info->head_offset) &&
731 		    (cur_head_offset < b_info->tail_offset)) {
732 			qp->b_info = b_info;
733 			return 0;
734 		}
735 	} else {
736 		if ((cur_head_offset >= b_info->head_offset) ||
737 		    (cur_head_offset < b_info->tail_offset)) {
738 			qp->b_info = b_info;
739 			return 0;
740 		}
741 	}
742 
743 
744 success:
745 	nb_ops = ccp_prepare_ops(op, b_info, nb_ops);
746 	rte_atomic64_add(&b_info->cmd_q->free_slots, b_info->desccnt);
747 	b_info->desccnt = 0;
748 	if (b_info->opcnt > 0) {
749 		qp->b_info = b_info;
750 	} else {
751 		rte_mempool_put(qp->batch_mp, (void *)b_info);
752 		qp->b_info = NULL;
753 	}
754 
755 	return nb_ops;
756 }
757