xref: /dpdk/drivers/crypto/ccp/ccp_crypto.c (revision c05adb06035ed2ddb33e614bf5037e847c1108b6)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
3  */
4 
5 #include <dirent.h>
6 #include <fcntl.h>
7 #include <stdio.h>
8 #include <string.h>
9 #include <sys/mman.h>
10 #include <sys/queue.h>
11 #include <sys/types.h>
12 #include <unistd.h>
13 
14 #include <rte_hexdump.h>
15 #include <rte_memzone.h>
16 #include <rte_malloc.h>
17 #include <rte_memory.h>
18 #include <rte_spinlock.h>
19 #include <rte_string_fns.h>
20 #include <rte_cryptodev_pmd.h>
21 
22 #include "ccp_dev.h"
23 #include "ccp_crypto.h"
24 #include "ccp_pci.h"
25 #include "ccp_pmd_private.h"
26 
27 static enum ccp_cmd_order
28 ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
29 {
30 	enum ccp_cmd_order res = CCP_CMD_NOT_SUPPORTED;
31 
32 	if (xform == NULL)
33 		return res;
34 	if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
35 		if (xform->next == NULL)
36 			return CCP_CMD_AUTH;
37 		else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
38 			return CCP_CMD_HASH_CIPHER;
39 	}
40 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
41 		if (xform->next == NULL)
42 			return CCP_CMD_CIPHER;
43 		else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
44 			return CCP_CMD_CIPHER_HASH;
45 	}
46 	if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
47 		return CCP_CMD_COMBINED;
48 	return res;
49 }
50 
51 /* configure session */
52 static int
53 ccp_configure_session_cipher(struct ccp_session *sess,
54 			     const struct rte_crypto_sym_xform *xform)
55 {
56 	const struct rte_crypto_cipher_xform *cipher_xform = NULL;
57 	size_t i, j, x;
58 
59 	cipher_xform = &xform->cipher;
60 
61 	/* set cipher direction */
62 	if (cipher_xform->op ==  RTE_CRYPTO_CIPHER_OP_ENCRYPT)
63 		sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT;
64 	else
65 		sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
66 
67 	/* set cipher key */
68 	sess->cipher.key_length = cipher_xform->key.length;
69 	rte_memcpy(sess->cipher.key, cipher_xform->key.data,
70 		   cipher_xform->key.length);
71 
72 	/* set iv parameters */
73 	sess->iv.offset = cipher_xform->iv.offset;
74 	sess->iv.length = cipher_xform->iv.length;
75 
76 	switch (cipher_xform->algo) {
77 	case RTE_CRYPTO_CIPHER_AES_CTR:
78 		sess->cipher.algo = CCP_CIPHER_ALGO_AES_CTR;
79 		sess->cipher.um.aes_mode = CCP_AES_MODE_CTR;
80 		sess->cipher.engine = CCP_ENGINE_AES;
81 		break;
82 	case RTE_CRYPTO_CIPHER_AES_ECB:
83 		sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC;
84 		sess->cipher.um.aes_mode = CCP_AES_MODE_ECB;
85 		sess->cipher.engine = CCP_ENGINE_AES;
86 		break;
87 	case RTE_CRYPTO_CIPHER_AES_CBC:
88 		sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC;
89 		sess->cipher.um.aes_mode = CCP_AES_MODE_CBC;
90 		sess->cipher.engine = CCP_ENGINE_AES;
91 		break;
92 	case RTE_CRYPTO_CIPHER_3DES_CBC:
93 		sess->cipher.algo = CCP_CIPHER_ALGO_3DES_CBC;
94 		sess->cipher.um.des_mode = CCP_DES_MODE_CBC;
95 		sess->cipher.engine = CCP_ENGINE_3DES;
96 		break;
97 	default:
98 		CCP_LOG_ERR("Unsupported cipher algo");
99 		return -1;
100 	}
101 
102 
103 	switch (sess->cipher.engine) {
104 	case CCP_ENGINE_AES:
105 		if (sess->cipher.key_length == 16)
106 			sess->cipher.ut.aes_type = CCP_AES_TYPE_128;
107 		else if (sess->cipher.key_length == 24)
108 			sess->cipher.ut.aes_type = CCP_AES_TYPE_192;
109 		else if (sess->cipher.key_length == 32)
110 			sess->cipher.ut.aes_type = CCP_AES_TYPE_256;
111 		else {
112 			CCP_LOG_ERR("Invalid cipher key length");
113 			return -1;
114 		}
115 		for (i = 0; i < sess->cipher.key_length ; i++)
116 			sess->cipher.key_ccp[sess->cipher.key_length - i - 1] =
117 				sess->cipher.key[i];
118 		break;
119 	case CCP_ENGINE_3DES:
120 		if (sess->cipher.key_length == 16)
121 			sess->cipher.ut.des_type = CCP_DES_TYPE_128;
122 		else if (sess->cipher.key_length == 24)
123 			sess->cipher.ut.des_type = CCP_DES_TYPE_192;
124 		else {
125 			CCP_LOG_ERR("Invalid cipher key length");
126 			return -1;
127 		}
128 		for (j = 0, x = 0; j < sess->cipher.key_length/8; j++, x += 8)
129 			for (i = 0; i < 8; i++)
130 				sess->cipher.key_ccp[(8 + x) - i - 1] =
131 					sess->cipher.key[i + x];
132 		break;
133 	default:
134 		CCP_LOG_ERR("Invalid CCP Engine");
135 		return -ENOTSUP;
136 	}
137 	sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
138 	sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
139 	return 0;
140 }
141 
142 static int
143 ccp_configure_session_auth(struct ccp_session *sess,
144 			   const struct rte_crypto_sym_xform *xform)
145 {
146 	const struct rte_crypto_auth_xform *auth_xform = NULL;
147 
148 	auth_xform = &xform->auth;
149 
150 	sess->auth.digest_length = auth_xform->digest_length;
151 	if (auth_xform->op ==  RTE_CRYPTO_AUTH_OP_GENERATE)
152 		sess->auth.op = CCP_AUTH_OP_GENERATE;
153 	else
154 		sess->auth.op = CCP_AUTH_OP_VERIFY;
155 	switch (auth_xform->algo) {
156 	default:
157 		CCP_LOG_ERR("Unsupported hash algo");
158 		return -ENOTSUP;
159 	}
160 	return 0;
161 }
162 
163 static int
164 ccp_configure_session_aead(struct ccp_session *sess,
165 			   const struct rte_crypto_sym_xform *xform)
166 {
167 	const struct rte_crypto_aead_xform *aead_xform = NULL;
168 
169 	aead_xform = &xform->aead;
170 
171 	sess->cipher.key_length = aead_xform->key.length;
172 	rte_memcpy(sess->cipher.key, aead_xform->key.data,
173 		   aead_xform->key.length);
174 
175 	if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
176 		sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT;
177 		sess->auth.op = CCP_AUTH_OP_GENERATE;
178 	} else {
179 		sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
180 		sess->auth.op = CCP_AUTH_OP_VERIFY;
181 	}
182 	sess->auth.aad_length = aead_xform->aad_length;
183 	sess->auth.digest_length = aead_xform->digest_length;
184 
185 	/* set iv parameters */
186 	sess->iv.offset = aead_xform->iv.offset;
187 	sess->iv.length = aead_xform->iv.length;
188 
189 	switch (aead_xform->algo) {
190 	default:
191 		CCP_LOG_ERR("Unsupported aead algo");
192 		return -ENOTSUP;
193 	}
194 	return 0;
195 }
196 
197 int
198 ccp_set_session_parameters(struct ccp_session *sess,
199 			   const struct rte_crypto_sym_xform *xform)
200 {
201 	const struct rte_crypto_sym_xform *cipher_xform = NULL;
202 	const struct rte_crypto_sym_xform *auth_xform = NULL;
203 	const struct rte_crypto_sym_xform *aead_xform = NULL;
204 	int ret = 0;
205 
206 	sess->cmd_id = ccp_get_cmd_id(xform);
207 
208 	switch (sess->cmd_id) {
209 	case CCP_CMD_CIPHER:
210 		cipher_xform = xform;
211 		break;
212 	case CCP_CMD_AUTH:
213 		auth_xform = xform;
214 		break;
215 	case CCP_CMD_CIPHER_HASH:
216 		cipher_xform = xform;
217 		auth_xform = xform->next;
218 		break;
219 	case CCP_CMD_HASH_CIPHER:
220 		auth_xform = xform;
221 		cipher_xform = xform->next;
222 		break;
223 	case CCP_CMD_COMBINED:
224 		aead_xform = xform;
225 		break;
226 	default:
227 		CCP_LOG_ERR("Unsupported cmd_id");
228 		return -1;
229 	}
230 
231 	/* Default IV length = 0 */
232 	sess->iv.length = 0;
233 	if (cipher_xform) {
234 		ret = ccp_configure_session_cipher(sess, cipher_xform);
235 		if (ret != 0) {
236 			CCP_LOG_ERR("Invalid/unsupported cipher parameters");
237 			return ret;
238 		}
239 	}
240 	if (auth_xform) {
241 		ret = ccp_configure_session_auth(sess, auth_xform);
242 		if (ret != 0) {
243 			CCP_LOG_ERR("Invalid/unsupported auth parameters");
244 			return ret;
245 		}
246 	}
247 	if (aead_xform) {
248 		ret = ccp_configure_session_aead(sess, aead_xform);
249 		if (ret != 0) {
250 			CCP_LOG_ERR("Invalid/unsupported aead parameters");
251 			return ret;
252 		}
253 	}
254 	return ret;
255 }
256 
257 /* calculate CCP descriptors requirement */
258 static inline int
259 ccp_cipher_slot(struct ccp_session *session)
260 {
261 	int count = 0;
262 
263 	switch (session->cipher.algo) {
264 	case CCP_CIPHER_ALGO_AES_CBC:
265 		count = 2;
266 		/**< op + passthrough for iv */
267 		break;
268 	case CCP_CIPHER_ALGO_AES_ECB:
269 		count = 1;
270 		/**<only op*/
271 		break;
272 	case CCP_CIPHER_ALGO_AES_CTR:
273 		count = 2;
274 		/**< op + passthrough for iv */
275 		break;
276 	case CCP_CIPHER_ALGO_3DES_CBC:
277 		count = 2;
278 		/**< op + passthrough for iv */
279 		break;
280 	default:
281 		CCP_LOG_ERR("Unsupported cipher algo %d",
282 			    session->cipher.algo);
283 	}
284 	return count;
285 }
286 
287 static inline int
288 ccp_auth_slot(struct ccp_session *session)
289 {
290 	int count = 0;
291 
292 	switch (session->auth.algo) {
293 	default:
294 		CCP_LOG_ERR("Unsupported auth algo %d",
295 			    session->auth.algo);
296 	}
297 
298 	return count;
299 }
300 
301 static int
302 ccp_aead_slot(struct ccp_session *session)
303 {
304 	int count = 0;
305 
306 	switch (session->aead_algo) {
307 	default:
308 		CCP_LOG_ERR("Unsupported aead algo %d",
309 			    session->aead_algo);
310 	}
311 	return count;
312 }
313 
314 int
315 ccp_compute_slot_count(struct ccp_session *session)
316 {
317 	int count = 0;
318 
319 	switch (session->cmd_id) {
320 	case CCP_CMD_CIPHER:
321 		count = ccp_cipher_slot(session);
322 		break;
323 	case CCP_CMD_AUTH:
324 		count = ccp_auth_slot(session);
325 		break;
326 	case CCP_CMD_CIPHER_HASH:
327 	case CCP_CMD_HASH_CIPHER:
328 		count = ccp_cipher_slot(session);
329 		count += ccp_auth_slot(session);
330 		break;
331 	case CCP_CMD_COMBINED:
332 		count = ccp_aead_slot(session);
333 		break;
334 	default:
335 		CCP_LOG_ERR("Unsupported cmd_id");
336 
337 	}
338 
339 	return count;
340 }
341 
342 static void
343 ccp_perform_passthru(struct ccp_passthru *pst,
344 		     struct ccp_queue *cmd_q)
345 {
346 	struct ccp_desc *desc;
347 	union ccp_function function;
348 
349 	desc = &cmd_q->qbase_desc[cmd_q->qidx];
350 
351 	CCP_CMD_ENGINE(desc) = CCP_ENGINE_PASSTHRU;
352 
353 	CCP_CMD_SOC(desc) = 0;
354 	CCP_CMD_IOC(desc) = 0;
355 	CCP_CMD_INIT(desc) = 0;
356 	CCP_CMD_EOM(desc) = 0;
357 	CCP_CMD_PROT(desc) = 0;
358 
359 	function.raw = 0;
360 	CCP_PT_BYTESWAP(&function) = pst->byte_swap;
361 	CCP_PT_BITWISE(&function) = pst->bit_mod;
362 	CCP_CMD_FUNCTION(desc) = function.raw;
363 
364 	CCP_CMD_LEN(desc) = pst->len;
365 
366 	if (pst->dir) {
367 		CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr);
368 		CCP_CMD_SRC_HI(desc) = high32_value(pst->src_addr);
369 		CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
370 
371 		CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr);
372 		CCP_CMD_DST_HI(desc) = 0;
373 		CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SB;
374 
375 		if (pst->bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
376 			CCP_CMD_LSB_ID(desc) = cmd_q->sb_key;
377 	} else {
378 
379 		CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr);
380 		CCP_CMD_SRC_HI(desc) = 0;
381 		CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SB;
382 
383 		CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr);
384 		CCP_CMD_DST_HI(desc) = high32_value(pst->dest_addr);
385 		CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
386 	}
387 
388 	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
389 }
390 
391 static int
392 ccp_perform_aes(struct rte_crypto_op *op,
393 		struct ccp_queue *cmd_q,
394 		struct ccp_batch_info *b_info)
395 {
396 	struct ccp_session *session;
397 	union ccp_function function;
398 	uint8_t *lsb_buf;
399 	struct ccp_passthru pst = {0};
400 	struct ccp_desc *desc;
401 	phys_addr_t src_addr, dest_addr, key_addr;
402 	uint8_t *iv;
403 
404 	session = (struct ccp_session *)get_session_private_data(
405 					 op->sym->session,
406 					ccp_cryptodev_driver_id);
407 	function.raw = 0;
408 
409 	iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
410 	if (session->cipher.um.aes_mode != CCP_AES_MODE_ECB) {
411 		if (session->cipher.um.aes_mode == CCP_AES_MODE_CTR) {
412 			rte_memcpy(session->cipher.nonce + AES_BLOCK_SIZE,
413 				   iv, session->iv.length);
414 			pst.src_addr = (phys_addr_t)session->cipher.nonce_phys;
415 			CCP_AES_SIZE(&function) = 0x1F;
416 		} else {
417 			lsb_buf =
418 			&(b_info->lsb_buf[b_info->lsb_buf_idx*CCP_SB_BYTES]);
419 			rte_memcpy(lsb_buf +
420 				   (CCP_SB_BYTES - session->iv.length),
421 				   iv, session->iv.length);
422 			pst.src_addr = b_info->lsb_buf_phys +
423 				(b_info->lsb_buf_idx * CCP_SB_BYTES);
424 			b_info->lsb_buf_idx++;
425 		}
426 
427 		pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
428 		pst.len = CCP_SB_BYTES;
429 		pst.dir = 1;
430 		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
431 		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
432 		ccp_perform_passthru(&pst, cmd_q);
433 	}
434 
435 	desc = &cmd_q->qbase_desc[cmd_q->qidx];
436 
437 	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
438 					      op->sym->cipher.data.offset);
439 	if (likely(op->sym->m_dst != NULL))
440 		dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst,
441 						op->sym->cipher.data.offset);
442 	else
443 		dest_addr = src_addr;
444 	key_addr = session->cipher.key_phys;
445 
446 	/* prepare desc for aes command */
447 	CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
448 	CCP_CMD_INIT(desc) = 1;
449 	CCP_CMD_EOM(desc) = 1;
450 
451 	CCP_AES_ENCRYPT(&function) = session->cipher.dir;
452 	CCP_AES_MODE(&function) = session->cipher.um.aes_mode;
453 	CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
454 	CCP_CMD_FUNCTION(desc) = function.raw;
455 
456 	CCP_CMD_LEN(desc) = op->sym->cipher.data.length;
457 
458 	CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
459 	CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
460 	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
461 
462 	CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
463 	CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
464 	CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
465 
466 	CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
467 	CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
468 	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
469 
470 	if (session->cipher.um.aes_mode != CCP_AES_MODE_ECB)
471 		CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
472 
473 	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
474 	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
475 	return 0;
476 }
477 
478 static int
479 ccp_perform_3des(struct rte_crypto_op *op,
480 		struct ccp_queue *cmd_q,
481 		struct ccp_batch_info *b_info)
482 {
483 	struct ccp_session *session;
484 	union ccp_function function;
485 	unsigned char *lsb_buf;
486 	struct ccp_passthru pst;
487 	struct ccp_desc *desc;
488 	uint32_t tail;
489 	uint8_t *iv;
490 	phys_addr_t src_addr, dest_addr, key_addr;
491 
492 	session = (struct ccp_session *)get_session_private_data(
493 					 op->sym->session,
494 					ccp_cryptodev_driver_id);
495 
496 	iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
497 	switch (session->cipher.um.des_mode) {
498 	case CCP_DES_MODE_CBC:
499 		lsb_buf = &(b_info->lsb_buf[b_info->lsb_buf_idx*CCP_SB_BYTES]);
500 		b_info->lsb_buf_idx++;
501 
502 		rte_memcpy(lsb_buf + (CCP_SB_BYTES - session->iv.length),
503 			   iv, session->iv.length);
504 
505 		pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *) lsb_buf);
506 		pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
507 		pst.len = CCP_SB_BYTES;
508 		pst.dir = 1;
509 		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
510 		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
511 		ccp_perform_passthru(&pst, cmd_q);
512 		break;
513 	case CCP_DES_MODE_CFB:
514 	case CCP_DES_MODE_ECB:
515 		CCP_LOG_ERR("Unsupported DES cipher mode");
516 		return -ENOTSUP;
517 	}
518 
519 	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
520 					      op->sym->cipher.data.offset);
521 	if (unlikely(op->sym->m_dst != NULL))
522 		dest_addr =
523 			rte_pktmbuf_mtophys_offset(op->sym->m_dst,
524 						   op->sym->cipher.data.offset);
525 	else
526 		dest_addr = src_addr;
527 
528 	key_addr = rte_mem_virt2phy(session->cipher.key_ccp);
529 
530 	desc = &cmd_q->qbase_desc[cmd_q->qidx];
531 
532 	memset(desc, 0, Q_DESC_SIZE);
533 
534 	/* prepare desc for des command */
535 	CCP_CMD_ENGINE(desc) = CCP_ENGINE_3DES;
536 
537 	CCP_CMD_SOC(desc) = 0;
538 	CCP_CMD_IOC(desc) = 0;
539 	CCP_CMD_INIT(desc) = 1;
540 	CCP_CMD_EOM(desc) = 1;
541 	CCP_CMD_PROT(desc) = 0;
542 
543 	function.raw = 0;
544 	CCP_DES_ENCRYPT(&function) = session->cipher.dir;
545 	CCP_DES_MODE(&function) = session->cipher.um.des_mode;
546 	CCP_DES_TYPE(&function) = session->cipher.ut.des_type;
547 	CCP_CMD_FUNCTION(desc) = function.raw;
548 
549 	CCP_CMD_LEN(desc) = op->sym->cipher.data.length;
550 
551 	CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
552 	CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
553 	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
554 
555 	CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
556 	CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
557 	CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
558 
559 	CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
560 	CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
561 	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
562 
563 	if (session->cipher.um.des_mode)
564 		CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
565 
566 	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
567 
568 	rte_wmb();
569 
570 	/* Write the new tail address back to the queue register */
571 	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
572 	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
573 	/* Turn the queue back on using our cached control register */
574 	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
575 		      cmd_q->qcontrol | CMD_Q_RUN);
576 
577 	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
578 	return 0;
579 }
580 
581 static inline int
582 ccp_crypto_cipher(struct rte_crypto_op *op,
583 		  struct ccp_queue *cmd_q,
584 		  struct ccp_batch_info *b_info)
585 {
586 	int result = 0;
587 	struct ccp_session *session;
588 
589 	session = (struct ccp_session *)get_session_private_data(
590 					 op->sym->session,
591 					 ccp_cryptodev_driver_id);
592 
593 	switch (session->cipher.algo) {
594 	case CCP_CIPHER_ALGO_AES_CBC:
595 		result = ccp_perform_aes(op, cmd_q, b_info);
596 		b_info->desccnt += 2;
597 		break;
598 	case CCP_CIPHER_ALGO_AES_CTR:
599 		result = ccp_perform_aes(op, cmd_q, b_info);
600 		b_info->desccnt += 2;
601 		break;
602 	case CCP_CIPHER_ALGO_AES_ECB:
603 		result = ccp_perform_aes(op, cmd_q, b_info);
604 		b_info->desccnt += 1;
605 		break;
606 	case CCP_CIPHER_ALGO_3DES_CBC:
607 		result = ccp_perform_3des(op, cmd_q, b_info);
608 		b_info->desccnt += 2;
609 		break;
610 	default:
611 		CCP_LOG_ERR("Unsupported cipher algo %d",
612 			    session->cipher.algo);
613 		return -ENOTSUP;
614 	}
615 	return result;
616 }
617 
618 static inline int
619 ccp_crypto_auth(struct rte_crypto_op *op,
620 		struct ccp_queue *cmd_q __rte_unused,
621 		struct ccp_batch_info *b_info __rte_unused)
622 {
623 
624 	int result = 0;
625 	struct ccp_session *session;
626 
627 	session = (struct ccp_session *)get_session_private_data(
628 					 op->sym->session,
629 					ccp_cryptodev_driver_id);
630 
631 	switch (session->auth.algo) {
632 	default:
633 		CCP_LOG_ERR("Unsupported auth algo %d",
634 			    session->auth.algo);
635 		return -ENOTSUP;
636 	}
637 
638 	return result;
639 }
640 
641 static inline int
642 ccp_crypto_aead(struct rte_crypto_op *op,
643 		struct ccp_queue *cmd_q __rte_unused,
644 		struct ccp_batch_info *b_info __rte_unused)
645 {
646 	int result = 0;
647 	struct ccp_session *session;
648 
649 	session = (struct ccp_session *)get_session_private_data(
650 					 op->sym->session,
651 					ccp_cryptodev_driver_id);
652 
653 	switch (session->aead_algo) {
654 	default:
655 		CCP_LOG_ERR("Unsupported aead algo %d",
656 			    session->aead_algo);
657 		return -ENOTSUP;
658 	}
659 	return result;
660 }
661 
662 int
663 process_ops_to_enqueue(const struct ccp_qp *qp,
664 		       struct rte_crypto_op **op,
665 		       struct ccp_queue *cmd_q,
666 		       uint16_t nb_ops,
667 		       int slots_req)
668 {
669 	int i, result = 0;
670 	struct ccp_batch_info *b_info;
671 	struct ccp_session *session;
672 
673 	if (rte_mempool_get(qp->batch_mp, (void **)&b_info)) {
674 		CCP_LOG_ERR("batch info allocation failed");
675 		return 0;
676 	}
677 	/* populate batch info necessary for dequeue */
678 	b_info->op_idx = 0;
679 	b_info->lsb_buf_idx = 0;
680 	b_info->desccnt = 0;
681 	b_info->cmd_q = cmd_q;
682 	b_info->lsb_buf_phys =
683 		(phys_addr_t)rte_mem_virt2phy((void *)b_info->lsb_buf);
684 	rte_atomic64_sub(&b_info->cmd_q->free_slots, slots_req);
685 
686 	b_info->head_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
687 					 Q_DESC_SIZE);
688 	for (i = 0; i < nb_ops; i++) {
689 		session = (struct ccp_session *)get_session_private_data(
690 						 op[i]->sym->session,
691 						 ccp_cryptodev_driver_id);
692 		switch (session->cmd_id) {
693 		case CCP_CMD_CIPHER:
694 			result = ccp_crypto_cipher(op[i], cmd_q, b_info);
695 			break;
696 		case CCP_CMD_AUTH:
697 			result = ccp_crypto_auth(op[i], cmd_q, b_info);
698 			break;
699 		case CCP_CMD_CIPHER_HASH:
700 			result = ccp_crypto_cipher(op[i], cmd_q, b_info);
701 			if (result)
702 				break;
703 			result = ccp_crypto_auth(op[i], cmd_q, b_info);
704 			break;
705 		case CCP_CMD_HASH_CIPHER:
706 			result = ccp_crypto_auth(op[i], cmd_q, b_info);
707 			if (result)
708 				break;
709 			result = ccp_crypto_cipher(op[i], cmd_q, b_info);
710 			break;
711 		case CCP_CMD_COMBINED:
712 			result = ccp_crypto_aead(op[i], cmd_q, b_info);
713 			break;
714 		default:
715 			CCP_LOG_ERR("Unsupported cmd_id");
716 			result = -1;
717 		}
718 		if (unlikely(result < 0)) {
719 			rte_atomic64_add(&b_info->cmd_q->free_slots,
720 					 (slots_req - b_info->desccnt));
721 			break;
722 		}
723 		b_info->op[i] = op[i];
724 	}
725 
726 	b_info->opcnt = i;
727 	b_info->tail_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
728 					 Q_DESC_SIZE);
729 
730 	rte_wmb();
731 	/* Write the new tail address back to the queue register */
732 	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE,
733 			      b_info->tail_offset);
734 	/* Turn the queue back on using our cached control register */
735 	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
736 			      cmd_q->qcontrol | CMD_Q_RUN);
737 
738 	rte_ring_enqueue(qp->processed_pkts, (void *)b_info);
739 
740 	return i;
741 }
742 
743 static inline void ccp_auth_dq_prepare(struct rte_crypto_op *op)
744 {
745 	struct ccp_session *session;
746 	uint8_t *digest_data, *addr;
747 	struct rte_mbuf *m_last;
748 	int offset, digest_offset;
749 	uint8_t digest_le[64];
750 
751 	session = (struct ccp_session *)get_session_private_data(
752 					 op->sym->session,
753 					ccp_cryptodev_driver_id);
754 
755 	if (session->cmd_id == CCP_CMD_COMBINED) {
756 		digest_data = op->sym->aead.digest.data;
757 		digest_offset = op->sym->aead.data.offset +
758 					op->sym->aead.data.length;
759 	} else {
760 		digest_data = op->sym->auth.digest.data;
761 		digest_offset = op->sym->auth.data.offset +
762 					op->sym->auth.data.length;
763 	}
764 	m_last = rte_pktmbuf_lastseg(op->sym->m_src);
765 	addr = (uint8_t *)((char *)m_last->buf_addr + m_last->data_off +
766 			   m_last->data_len - session->auth.ctx_len);
767 
768 	rte_mb();
769 	offset = session->auth.offset;
770 
771 	if (session->auth.engine == CCP_ENGINE_SHA)
772 		if ((session->auth.ut.sha_type != CCP_SHA_TYPE_1) &&
773 		    (session->auth.ut.sha_type != CCP_SHA_TYPE_224) &&
774 		    (session->auth.ut.sha_type != CCP_SHA_TYPE_256)) {
775 			/* All other algorithms require byte
776 			 * swap done by host
777 			 */
778 			unsigned int i;
779 
780 			offset = session->auth.ctx_len -
781 				session->auth.offset - 1;
782 			for (i = 0; i < session->auth.digest_length; i++)
783 				digest_le[i] = addr[offset - i];
784 			offset = 0;
785 			addr = digest_le;
786 		}
787 
788 	op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
789 	if (session->auth.op == CCP_AUTH_OP_VERIFY) {
790 		if (memcmp(addr + offset, digest_data,
791 			   session->auth.digest_length) != 0)
792 			op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
793 
794 	} else {
795 		if (unlikely(digest_data == 0))
796 			digest_data = rte_pktmbuf_mtod_offset(
797 					op->sym->m_dst, uint8_t *,
798 					digest_offset);
799 		rte_memcpy(digest_data, addr + offset,
800 			   session->auth.digest_length);
801 	}
802 	/* Trim area used for digest from mbuf. */
803 	rte_pktmbuf_trim(op->sym->m_src,
804 			 session->auth.ctx_len);
805 }
806 
807 static int
808 ccp_prepare_ops(struct rte_crypto_op **op_d,
809 		struct ccp_batch_info *b_info,
810 		uint16_t nb_ops)
811 {
812 	int i, min_ops;
813 	struct ccp_session *session;
814 
815 	min_ops = RTE_MIN(nb_ops, b_info->opcnt);
816 
817 	for (i = 0; i < min_ops; i++) {
818 		op_d[i] = b_info->op[b_info->op_idx++];
819 		session = (struct ccp_session *)get_session_private_data(
820 						 op_d[i]->sym->session,
821 						ccp_cryptodev_driver_id);
822 		switch (session->cmd_id) {
823 		case CCP_CMD_CIPHER:
824 			op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
825 			break;
826 		case CCP_CMD_AUTH:
827 		case CCP_CMD_CIPHER_HASH:
828 		case CCP_CMD_HASH_CIPHER:
829 		case CCP_CMD_COMBINED:
830 			ccp_auth_dq_prepare(op_d[i]);
831 			break;
832 		default:
833 			CCP_LOG_ERR("Unsupported cmd_id");
834 		}
835 	}
836 
837 	b_info->opcnt -= min_ops;
838 	return min_ops;
839 }
840 
841 int
842 process_ops_to_dequeue(struct ccp_qp *qp,
843 		       struct rte_crypto_op **op,
844 		       uint16_t nb_ops)
845 {
846 	struct ccp_batch_info *b_info;
847 	uint32_t cur_head_offset;
848 
849 	if (qp->b_info != NULL) {
850 		b_info = qp->b_info;
851 		if (unlikely(b_info->op_idx > 0))
852 			goto success;
853 	} else if (rte_ring_dequeue(qp->processed_pkts,
854 				    (void **)&b_info))
855 		return 0;
856 	cur_head_offset = CCP_READ_REG(b_info->cmd_q->reg_base,
857 				       CMD_Q_HEAD_LO_BASE);
858 
859 	if (b_info->head_offset < b_info->tail_offset) {
860 		if ((cur_head_offset >= b_info->head_offset) &&
861 		    (cur_head_offset < b_info->tail_offset)) {
862 			qp->b_info = b_info;
863 			return 0;
864 		}
865 	} else {
866 		if ((cur_head_offset >= b_info->head_offset) ||
867 		    (cur_head_offset < b_info->tail_offset)) {
868 			qp->b_info = b_info;
869 			return 0;
870 		}
871 	}
872 
873 
874 success:
875 	nb_ops = ccp_prepare_ops(op, b_info, nb_ops);
876 	rte_atomic64_add(&b_info->cmd_q->free_slots, b_info->desccnt);
877 	b_info->desccnt = 0;
878 	if (b_info->opcnt > 0) {
879 		qp->b_info = b_info;
880 	} else {
881 		rte_mempool_put(qp->batch_mp, (void *)b_info);
882 		qp->b_info = NULL;
883 	}
884 
885 	return nb_ops;
886 }
887