xref: /dpdk/drivers/common/cpt/cpt_ucode.h (revision f665790a5dbad7b645ff46f31d65e977324e7bfc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Cavium, Inc
3  */
4 
5 #ifndef _CPT_UCODE_H_
6 #define _CPT_UCODE_H_
7 #include <stdbool.h>
8 
9 #include "cpt_common.h"
10 #include "cpt_hw_types.h"
11 #include "cpt_mcode_defines.h"
12 
13 /*
14  * This file defines functions that are interfaces to microcode spec.
15  *
16  */
17 
18 static uint8_t zuc_d[32] = {
19 	0x44, 0xD7, 0x26, 0xBC, 0x62, 0x6B, 0x13, 0x5E,
20 	0x57, 0x89, 0x35, 0xE2, 0x71, 0x35, 0x09, 0xAF,
21 	0x4D, 0x78, 0x2F, 0x13, 0x6B, 0xC4, 0x1A, 0xF1,
22 	0x5E, 0x26, 0x3C, 0x4D, 0x78, 0x9A, 0x47, 0xAC
23 };
24 
25 static __rte_always_inline void
26 gen_key_snow3g(const uint8_t *ck, uint32_t *keyx)
27 {
28 	int i, base;
29 
30 	for (i = 0; i < 4; i++) {
31 		base = 4 * i;
32 		keyx[3 - i] = (ck[base] << 24) | (ck[base + 1] << 16) |
33 			(ck[base + 2] << 8) | (ck[base + 3]);
34 		keyx[3 - i] = rte_cpu_to_be_32(keyx[3 - i]);
35 	}
36 }
37 
38 static __rte_always_inline int
39 cpt_mac_len_verify(struct rte_crypto_auth_xform *auth)
40 {
41 	uint16_t mac_len = auth->digest_length;
42 	int ret;
43 
44 	if ((auth->algo != RTE_CRYPTO_AUTH_NULL) && (mac_len == 0))
45 		return -1;
46 
47 	switch (auth->algo) {
48 	case RTE_CRYPTO_AUTH_MD5:
49 	case RTE_CRYPTO_AUTH_MD5_HMAC:
50 		ret = (mac_len <= 16) ? 0 : -1;
51 		break;
52 	case RTE_CRYPTO_AUTH_SHA1:
53 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
54 		ret = (mac_len <= 20) ? 0 : -1;
55 		break;
56 	case RTE_CRYPTO_AUTH_SHA224:
57 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
58 		ret = (mac_len <= 28) ? 0 : -1;
59 		break;
60 	case RTE_CRYPTO_AUTH_SHA256:
61 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
62 		ret = (mac_len <= 32) ? 0 : -1;
63 		break;
64 	case RTE_CRYPTO_AUTH_SHA384:
65 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
66 		ret = (mac_len <= 48) ? 0 : -1;
67 		break;
68 	case RTE_CRYPTO_AUTH_SHA512:
69 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
70 		ret = (mac_len <= 64) ? 0 : -1;
71 		break;
72 	case RTE_CRYPTO_AUTH_NULL:
73 		ret = 0;
74 		break;
75 	default:
76 		ret = -1;
77 	}
78 
79 	return ret;
80 }
81 
82 static __rte_always_inline void
83 cpt_fc_salt_update(struct cpt_ctx *cpt_ctx,
84 		   uint8_t *salt)
85 {
86 	mc_fc_context_t *fctx = &cpt_ctx->mc_ctx.fctx;
87 	memcpy(fctx->enc.encr_iv, salt, 4);
88 }
89 
90 static __rte_always_inline int
91 cpt_fc_ciph_validate_key_aes(uint16_t key_len)
92 {
93 	switch (key_len) {
94 	case 16:
95 	case 24:
96 	case 32:
97 		return 0;
98 	default:
99 		return -1;
100 	}
101 }
102 
103 static __rte_always_inline int
104 cpt_fc_ciph_set_type(cipher_type_t type, struct cpt_ctx *ctx, uint16_t key_len)
105 {
106 	int fc_type = 0;
107 	switch (type) {
108 	case PASSTHROUGH:
109 		fc_type = FC_GEN;
110 		break;
111 	case DES3_CBC:
112 	case DES3_ECB:
113 		fc_type = FC_GEN;
114 		break;
115 	case AES_CBC:
116 	case AES_ECB:
117 	case AES_CFB:
118 	case AES_CTR:
119 	case AES_GCM:
120 		if (unlikely(cpt_fc_ciph_validate_key_aes(key_len) != 0))
121 			return -1;
122 		fc_type = FC_GEN;
123 		break;
124 	case CHACHA20:
125 		fc_type = FC_GEN;
126 		break;
127 	case AES_XTS:
128 		key_len = key_len / 2;
129 		if (unlikely(key_len == 24)) {
130 			CPT_LOG_DP_ERR("Invalid AES key len for XTS");
131 			return -1;
132 		}
133 		if (unlikely(cpt_fc_ciph_validate_key_aes(key_len) != 0))
134 			return -1;
135 		fc_type = FC_GEN;
136 		break;
137 	case ZUC_EEA3:
138 	case SNOW3G_UEA2:
139 		if (unlikely(key_len != 16))
140 			return -1;
141 		/* No support for AEAD yet */
142 		if (unlikely(ctx->hash_type))
143 			return -1;
144 		fc_type = ZUC_SNOW3G;
145 		break;
146 	case KASUMI_F8_CBC:
147 	case KASUMI_F8_ECB:
148 		if (unlikely(key_len != 16))
149 			return -1;
150 		/* No support for AEAD yet */
151 		if (unlikely(ctx->hash_type))
152 			return -1;
153 		fc_type = KASUMI;
154 		break;
155 	default:
156 		return -1;
157 	}
158 
159 	ctx->fc_type = fc_type;
160 	return 0;
161 }
162 
163 static __rte_always_inline void
164 cpt_fc_ciph_set_key_passthrough(struct cpt_ctx *cpt_ctx, mc_fc_context_t *fctx)
165 {
166 	cpt_ctx->enc_cipher = 0;
167 	fctx->enc.enc_cipher = 0;
168 }
169 
170 static __rte_always_inline void
171 cpt_fc_ciph_set_key_set_aes_key_type(mc_fc_context_t *fctx, uint16_t key_len)
172 {
173 	mc_aes_type_t aes_key_type = 0;
174 	switch (key_len) {
175 	case 16:
176 		aes_key_type = AES_128_BIT;
177 		break;
178 	case 24:
179 		aes_key_type = AES_192_BIT;
180 		break;
181 	case 32:
182 		aes_key_type = AES_256_BIT;
183 		break;
184 	default:
185 		/* This should not happen */
186 		CPT_LOG_DP_ERR("Invalid AES key len");
187 		return;
188 	}
189 	fctx->enc.aes_key = aes_key_type;
190 }
191 
192 static __rte_always_inline void
193 cpt_fc_ciph_set_key_snow3g_uea2(struct cpt_ctx *cpt_ctx, const uint8_t *key,
194 		uint16_t key_len)
195 {
196 	mc_zuc_snow3g_ctx_t *zs_ctx = &cpt_ctx->mc_ctx.zs_ctx;
197 	uint32_t keyx[4];
198 
199 	cpt_ctx->snow3g = 1;
200 	gen_key_snow3g(key, keyx);
201 	memcpy(zs_ctx->ci_key, keyx, key_len);
202 	cpt_ctx->zsk_flags = 0;
203 }
204 
205 static __rte_always_inline void
206 cpt_fc_ciph_set_key_zuc_eea3(struct cpt_ctx *cpt_ctx, const uint8_t *key,
207 		uint16_t key_len)
208 {
209 	mc_zuc_snow3g_ctx_t *zs_ctx = &cpt_ctx->mc_ctx.zs_ctx;
210 
211 	cpt_ctx->snow3g = 0;
212 	memcpy(zs_ctx->ci_key, key, key_len);
213 	memcpy(zs_ctx->zuc_const, zuc_d, 32);
214 	cpt_ctx->zsk_flags = 0;
215 }
216 
217 static __rte_always_inline void
218 cpt_fc_ciph_set_key_kasumi_f8_ecb(struct cpt_ctx *cpt_ctx, const uint8_t *key,
219 		uint16_t key_len)
220 {
221 	mc_kasumi_ctx_t *k_ctx = &cpt_ctx->mc_ctx.k_ctx;
222 
223 	cpt_ctx->k_ecb = 1;
224 	memcpy(k_ctx->ci_key, key, key_len);
225 	cpt_ctx->zsk_flags = 0;
226 }
227 
228 static __rte_always_inline void
229 cpt_fc_ciph_set_key_kasumi_f8_cbc(struct cpt_ctx *cpt_ctx, const uint8_t *key,
230 		uint16_t key_len)
231 {
232 	mc_kasumi_ctx_t *k_ctx = &cpt_ctx->mc_ctx.k_ctx;
233 
234 	memcpy(k_ctx->ci_key, key, key_len);
235 	cpt_ctx->zsk_flags = 0;
236 }
237 
238 static __rte_always_inline int
239 cpt_fc_ciph_set_key(struct cpt_ctx *cpt_ctx, cipher_type_t type,
240 		    const uint8_t *key, uint16_t key_len, uint8_t *salt)
241 {
242 	mc_fc_context_t *fctx = &cpt_ctx->mc_ctx.fctx;
243 	int ret;
244 
245 	ret = cpt_fc_ciph_set_type(type, cpt_ctx, key_len);
246 	if (unlikely(ret))
247 		return -1;
248 
249 	if (cpt_ctx->fc_type == FC_GEN) {
250 		/*
251 		 * We need to always say IV is from DPTR as user can
252 		 * sometimes override IV per operation.
253 		 */
254 		fctx->enc.iv_source = CPT_FROM_DPTR;
255 
256 		if (cpt_ctx->auth_key_len > 64)
257 			return -1;
258 	}
259 
260 	switch (type) {
261 	case PASSTHROUGH:
262 		cpt_fc_ciph_set_key_passthrough(cpt_ctx, fctx);
263 		goto success;
264 	case DES3_CBC:
265 		/* CPT performs DES using 3DES with the 8B DES-key
266 		 * replicated 2 more times to match the 24B 3DES-key.
267 		 * Eg. If org. key is "0x0a 0x0b", then new key is
268 		 * "0x0a 0x0b 0x0a 0x0b 0x0a 0x0b"
269 		 */
270 		if (key_len == 8) {
271 			/* Skipping the first 8B as it will be copied
272 			 * in the regular code flow
273 			 */
274 			memcpy(fctx->enc.encr_key+key_len, key, key_len);
275 			memcpy(fctx->enc.encr_key+2*key_len, key, key_len);
276 		}
277 		break;
278 	case DES3_ECB:
279 		/* For DES3_ECB IV need to be from CTX. */
280 		fctx->enc.iv_source = CPT_FROM_CTX;
281 		break;
282 	case AES_CBC:
283 	case AES_ECB:
284 	case AES_CFB:
285 	case AES_CTR:
286 	case CHACHA20:
287 		cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
288 		break;
289 	case AES_GCM:
290 		/* Even though iv source is from dptr,
291 		 * aes_gcm salt is taken from ctx
292 		 */
293 		if (salt) {
294 			memcpy(fctx->enc.encr_iv, salt, 4);
295 			/* Assuming it was just salt update
296 			 * and nothing else
297 			 */
298 			if (!key)
299 				goto success;
300 		}
301 		cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
302 		break;
303 	case AES_XTS:
304 		key_len = key_len / 2;
305 		cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
306 
307 		/* Copy key2 for XTS into ipad */
308 		memset(fctx->hmac.ipad, 0, sizeof(fctx->hmac.ipad));
309 		memcpy(fctx->hmac.ipad, &key[key_len], key_len);
310 		break;
311 	case SNOW3G_UEA2:
312 		cpt_fc_ciph_set_key_snow3g_uea2(cpt_ctx, key, key_len);
313 		goto success;
314 	case ZUC_EEA3:
315 		cpt_fc_ciph_set_key_zuc_eea3(cpt_ctx, key, key_len);
316 		goto success;
317 	case KASUMI_F8_ECB:
318 		cpt_fc_ciph_set_key_kasumi_f8_ecb(cpt_ctx, key, key_len);
319 		goto success;
320 	case KASUMI_F8_CBC:
321 		cpt_fc_ciph_set_key_kasumi_f8_cbc(cpt_ctx, key, key_len);
322 		goto success;
323 	default:
324 		return -1;
325 	}
326 
327 	/* Only for FC_GEN case */
328 
329 	/* For GMAC auth, cipher must be NULL */
330 	if (cpt_ctx->hash_type != GMAC_TYPE)
331 		fctx->enc.enc_cipher = type;
332 
333 	memcpy(fctx->enc.encr_key, key, key_len);
334 
335 success:
336 	cpt_ctx->enc_cipher = type;
337 
338 	return 0;
339 }
340 
341 static __rte_always_inline uint32_t
342 fill_sg_comp(sg_comp_t *list,
343 	     uint32_t i,
344 	     phys_addr_t dma_addr,
345 	     uint32_t size)
346 {
347 	sg_comp_t *to = &list[i>>2];
348 
349 	to->u.s.len[i%4] = rte_cpu_to_be_16(size);
350 	to->ptr[i%4] = rte_cpu_to_be_64(dma_addr);
351 	i++;
352 	return i;
353 }
354 
355 static __rte_always_inline uint32_t
356 fill_sg_comp_from_buf(sg_comp_t *list,
357 		      uint32_t i,
358 		      buf_ptr_t *from)
359 {
360 	sg_comp_t *to = &list[i>>2];
361 
362 	to->u.s.len[i%4] = rte_cpu_to_be_16(from->size);
363 	to->ptr[i%4] = rte_cpu_to_be_64(from->dma_addr);
364 	i++;
365 	return i;
366 }
367 
368 static __rte_always_inline uint32_t
369 fill_sg_comp_from_buf_min(sg_comp_t *list,
370 			  uint32_t i,
371 			  buf_ptr_t *from,
372 			  uint32_t *psize)
373 {
374 	sg_comp_t *to = &list[i >> 2];
375 	uint32_t size = *psize;
376 	uint32_t e_len;
377 
378 	e_len = (size > from->size) ? from->size : size;
379 	to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
380 	to->ptr[i % 4] = rte_cpu_to_be_64(from->dma_addr);
381 	*psize -= e_len;
382 	i++;
383 	return i;
384 }
385 
386 /*
387  * This fills the MC expected SGIO list
388  * from IOV given by user.
389  */
390 static __rte_always_inline uint32_t
391 fill_sg_comp_from_iov(sg_comp_t *list,
392 		      uint32_t i,
393 		      iov_ptr_t *from, uint32_t from_offset,
394 		      uint32_t *psize, buf_ptr_t *extra_buf,
395 		      uint32_t extra_offset)
396 {
397 	int32_t j;
398 	uint32_t extra_len = extra_buf ? extra_buf->size : 0;
399 	uint32_t size = *psize;
400 
401 	for (j = 0; (j < from->buf_cnt) && size; j++) {
402 		phys_addr_t dma_addr = from->bufs[j].dma_addr;
403 		uint32_t buf_sz = from->bufs[j].size;
404 		sg_comp_t *to = &list[i >> 2];
405 		phys_addr_t e_dma_addr;
406 		uint32_t e_len;
407 
408 		if (unlikely(from_offset)) {
409 			if (from_offset >= buf_sz) {
410 				from_offset -= buf_sz;
411 				continue;
412 			}
413 			e_dma_addr = dma_addr + from_offset;
414 			e_len = (size > (buf_sz - from_offset)) ?
415 				(buf_sz - from_offset) : size;
416 			from_offset = 0;
417 		} else {
418 			e_dma_addr = dma_addr;
419 			e_len = (size > buf_sz) ? buf_sz : size;
420 		}
421 
422 		to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
423 		to->ptr[i % 4] = rte_cpu_to_be_64(e_dma_addr);
424 
425 		if (extra_len && (e_len >= extra_offset)) {
426 			/* Break the data at given offset */
427 			uint32_t next_len = e_len - extra_offset;
428 			phys_addr_t next_dma = e_dma_addr + extra_offset;
429 
430 			if (!extra_offset) {
431 				i--;
432 			} else {
433 				e_len = extra_offset;
434 				size -= e_len;
435 				to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
436 			}
437 
438 			extra_len = RTE_MIN(extra_len, size);
439 			/* Insert extra data ptr */
440 			if (extra_len) {
441 				i++;
442 				to = &list[i >> 2];
443 				to->u.s.len[i % 4] =
444 					rte_cpu_to_be_16(extra_len);
445 				to->ptr[i % 4] =
446 					rte_cpu_to_be_64(extra_buf->dma_addr);
447 				size -= extra_len;
448 			}
449 
450 			next_len = RTE_MIN(next_len, size);
451 			/* insert the rest of the data */
452 			if (next_len) {
453 				i++;
454 				to = &list[i >> 2];
455 				to->u.s.len[i % 4] = rte_cpu_to_be_16(next_len);
456 				to->ptr[i % 4] = rte_cpu_to_be_64(next_dma);
457 				size -= next_len;
458 			}
459 			extra_len = 0;
460 
461 		} else {
462 			size -= e_len;
463 		}
464 		if (extra_offset)
465 			extra_offset -= size;
466 		i++;
467 	}
468 
469 	*psize = size;
470 	return (uint32_t)i;
471 }
472 
473 static __rte_always_inline void
474 cpt_digest_gen_prep(uint32_t flags,
475 		    uint64_t d_lens,
476 		    digest_params_t *params,
477 		    void *op,
478 		    void **prep_req)
479 {
480 	struct cpt_request_info *req;
481 	uint32_t size, i;
482 	uint16_t data_len, mac_len, key_len;
483 	auth_type_t hash_type;
484 	buf_ptr_t *meta_p;
485 	struct cpt_ctx *ctx;
486 	sg_comp_t *gather_comp;
487 	sg_comp_t *scatter_comp;
488 	uint8_t *in_buffer;
489 	uint32_t g_size_bytes, s_size_bytes;
490 	uint64_t dptr_dma, rptr_dma;
491 	vq_cmd_word0_t vq_cmd_w0;
492 	void *c_vaddr, *m_vaddr;
493 	uint64_t c_dma, m_dma;
494 
495 	ctx = params->ctx_buf.vaddr;
496 	meta_p = &params->meta_buf;
497 
498 	m_vaddr = meta_p->vaddr;
499 	m_dma = meta_p->dma_addr;
500 
501 	/*
502 	 * Save initial space that followed app data for completion code &
503 	 * alternate completion code to fall in same cache line as app data
504 	 */
505 	m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
506 	m_dma += COMPLETION_CODE_SIZE;
507 	size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
508 		(uint8_t *)m_vaddr;
509 	c_vaddr = (uint8_t *)m_vaddr + size;
510 	c_dma = m_dma + size;
511 	size += sizeof(cpt_res_s_t);
512 
513 	m_vaddr = (uint8_t *)m_vaddr + size;
514 	m_dma += size;
515 
516 	req = m_vaddr;
517 
518 	size = sizeof(struct cpt_request_info);
519 	m_vaddr = (uint8_t *)m_vaddr + size;
520 	m_dma += size;
521 
522 	hash_type = ctx->hash_type;
523 	mac_len = ctx->mac_len;
524 	key_len = ctx->auth_key_len;
525 	data_len = AUTH_DLEN(d_lens);
526 
527 	/*GP op header */
528 	vq_cmd_w0.s.opcode.minor = 0;
529 	vq_cmd_w0.s.param2 = ((uint16_t)hash_type << 8) | mac_len;
530 	if (ctx->hmac) {
531 		vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_HMAC | CPT_DMA_MODE;
532 		vq_cmd_w0.s.param1 = key_len;
533 		vq_cmd_w0.s.dlen = data_len + RTE_ALIGN_CEIL(key_len, 8);
534 	} else {
535 		vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_HASH | CPT_DMA_MODE;
536 		vq_cmd_w0.s.param1 = 0;
537 		vq_cmd_w0.s.dlen = data_len;
538 	}
539 
540 	/* Null auth only case enters the if */
541 	if (unlikely(!hash_type && !ctx->enc_cipher)) {
542 		vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_MISC;
543 		/* Minor op is passthrough */
544 		vq_cmd_w0.s.opcode.minor = 0x03;
545 		/* Send out completion code only */
546 		vq_cmd_w0.s.param2 = 0x1;
547 	}
548 
549 	/* DPTR has SG list */
550 	in_buffer = m_vaddr;
551 	dptr_dma = m_dma;
552 
553 	((uint16_t *)in_buffer)[0] = 0;
554 	((uint16_t *)in_buffer)[1] = 0;
555 
556 	/* TODO Add error check if space will be sufficient */
557 	gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
558 
559 	/*
560 	 * Input gather list
561 	 */
562 
563 	i = 0;
564 
565 	if (ctx->hmac) {
566 		uint64_t k_dma = ctx->auth_key_iova;
567 		/* Key */
568 		i = fill_sg_comp(gather_comp, i, k_dma,
569 				 RTE_ALIGN_CEIL(key_len, 8));
570 	}
571 
572 	/* input data */
573 	size = data_len;
574 	if (size) {
575 		i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov,
576 					  0, &size, NULL, 0);
577 		if (unlikely(size)) {
578 			CPT_LOG_DP_DEBUG("Insufficient dst IOV size, short"
579 					 " by %dB", size);
580 			return;
581 		}
582 	} else {
583 		/*
584 		 * Looks like we need to support zero data
585 		 * gather ptr in case of hash & hmac
586 		 */
587 		i++;
588 	}
589 	((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
590 	g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
591 
592 	/*
593 	 * Output Gather list
594 	 */
595 
596 	i = 0;
597 	scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
598 
599 	if (flags & VALID_MAC_BUF) {
600 		if (unlikely(params->mac_buf.size < mac_len)) {
601 			CPT_LOG_DP_ERR("Insufficient MAC size");
602 			return;
603 		}
604 
605 		size = mac_len;
606 		i = fill_sg_comp_from_buf_min(scatter_comp, i,
607 					      &params->mac_buf, &size);
608 	} else {
609 		size = mac_len;
610 		i = fill_sg_comp_from_iov(scatter_comp, i,
611 					  params->src_iov, data_len,
612 					  &size, NULL, 0);
613 		if (unlikely(size)) {
614 			CPT_LOG_DP_ERR("Insufficient dst IOV size, short by"
615 				       " %dB", size);
616 			return;
617 		}
618 	}
619 
620 	((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
621 	s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
622 
623 	size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
624 
625 	/* This is DPTR len incase of SG mode */
626 	vq_cmd_w0.s.dlen = size;
627 
628 	m_vaddr = (uint8_t *)m_vaddr + size;
629 	m_dma += size;
630 
631 	/* cpt alternate completion address saved earlier */
632 	req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
633 	*req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
634 	rptr_dma = c_dma - 8;
635 
636 	req->ist.ei1 = dptr_dma;
637 	req->ist.ei2 = rptr_dma;
638 
639 	/* 16 byte aligned cpt res address */
640 	req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
641 	*req->completion_addr = COMPLETION_CODE_INIT;
642 	req->comp_baddr  = c_dma;
643 
644 	/* Fill microcode part of instruction */
645 	req->ist.ei0 = vq_cmd_w0.u64;
646 
647 	req->op = op;
648 
649 	*prep_req = req;
650 	return;
651 }
652 
653 static __rte_always_inline void
654 cpt_enc_hmac_prep(uint32_t flags,
655 		  uint64_t d_offs,
656 		  uint64_t d_lens,
657 		  fc_params_t *fc_params,
658 		  void *op,
659 		  void **prep_req)
660 {
661 	uint32_t iv_offset = 0;
662 	int32_t inputlen, outputlen, enc_dlen, auth_dlen;
663 	struct cpt_ctx *cpt_ctx;
664 	uint32_t cipher_type, hash_type;
665 	uint32_t mac_len, size;
666 	uint8_t iv_len = 16;
667 	struct cpt_request_info *req;
668 	buf_ptr_t *meta_p, *aad_buf = NULL;
669 	uint32_t encr_offset, auth_offset;
670 	uint32_t encr_data_len, auth_data_len, aad_len = 0;
671 	uint32_t passthrough_len = 0;
672 	void *m_vaddr, *offset_vaddr;
673 	uint64_t m_dma, offset_dma;
674 	vq_cmd_word0_t vq_cmd_w0;
675 	void *c_vaddr;
676 	uint64_t c_dma;
677 
678 	meta_p = &fc_params->meta_buf;
679 	m_vaddr = meta_p->vaddr;
680 	m_dma = meta_p->dma_addr;
681 
682 	encr_offset = ENCR_OFFSET(d_offs);
683 	auth_offset = AUTH_OFFSET(d_offs);
684 	encr_data_len = ENCR_DLEN(d_lens);
685 	auth_data_len = AUTH_DLEN(d_lens);
686 	if (unlikely(flags & VALID_AAD_BUF)) {
687 		/*
688 		 * We dont support both aad
689 		 * and auth data separately
690 		 */
691 		auth_data_len = 0;
692 		auth_offset = 0;
693 		aad_len = fc_params->aad_buf.size;
694 		aad_buf = &fc_params->aad_buf;
695 	}
696 	cpt_ctx = fc_params->ctx_buf.vaddr;
697 	cipher_type = cpt_ctx->enc_cipher;
698 	hash_type = cpt_ctx->hash_type;
699 	mac_len = cpt_ctx->mac_len;
700 
701 	/*
702 	 * Save initial space that followed app data for completion code &
703 	 * alternate completion code to fall in same cache line as app data
704 	 */
705 	m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
706 	m_dma += COMPLETION_CODE_SIZE;
707 	size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
708 		(uint8_t *)m_vaddr;
709 
710 	c_vaddr = (uint8_t *)m_vaddr + size;
711 	c_dma = m_dma + size;
712 	size += sizeof(cpt_res_s_t);
713 
714 	m_vaddr = (uint8_t *)m_vaddr + size;
715 	m_dma += size;
716 
717 	/* start cpt request info struct at 8 byte boundary */
718 	size = (uint8_t *)RTE_PTR_ALIGN(m_vaddr, 8) -
719 		(uint8_t *)m_vaddr;
720 
721 	req = (struct cpt_request_info *)((uint8_t *)m_vaddr + size);
722 
723 	size += sizeof(struct cpt_request_info);
724 	m_vaddr = (uint8_t *)m_vaddr + size;
725 	m_dma += size;
726 
727 	if (unlikely(!(flags & VALID_IV_BUF))) {
728 		iv_len = 0;
729 		iv_offset = ENCR_IV_OFFSET(d_offs);
730 	}
731 
732 	if (unlikely(flags & VALID_AAD_BUF)) {
733 		/*
734 		 * When AAD is given, data above encr_offset is pass through
735 		 * Since AAD is given as separate pointer and not as offset,
736 		 * this is a special case as we need to fragment input data
737 		 * into passthrough + encr_data and then insert AAD in between.
738 		 */
739 		if (hash_type != GMAC_TYPE) {
740 			passthrough_len = encr_offset;
741 			auth_offset = passthrough_len + iv_len;
742 			encr_offset = passthrough_len + aad_len + iv_len;
743 			auth_data_len = aad_len + encr_data_len;
744 		} else {
745 			passthrough_len = 16 + aad_len;
746 			auth_offset = passthrough_len + iv_len;
747 			auth_data_len = aad_len;
748 		}
749 	} else {
750 		encr_offset += iv_len;
751 		auth_offset += iv_len;
752 	}
753 
754 	/* Encryption */
755 	vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_FC;
756 	vq_cmd_w0.s.opcode.minor = CPT_FC_MINOR_OP_ENCRYPT;
757 	vq_cmd_w0.s.opcode.minor |= (cpt_ctx->auth_enc <<
758 					CPT_HMAC_FIRST_BIT_POS);
759 
760 	if (hash_type == GMAC_TYPE) {
761 		encr_offset = 0;
762 		encr_data_len = 0;
763 	}
764 
765 	auth_dlen = auth_offset + auth_data_len;
766 	enc_dlen = encr_data_len + encr_offset;
767 	if (unlikely(encr_data_len & 0xf)) {
768 		if ((cipher_type == DES3_CBC) || (cipher_type == DES3_ECB))
769 			enc_dlen = RTE_ALIGN_CEIL(encr_data_len, 8) +
770 					encr_offset;
771 		else if (likely((cipher_type == AES_CBC) ||
772 				(cipher_type == AES_ECB)))
773 			enc_dlen = RTE_ALIGN_CEIL(encr_data_len, 8) +
774 					encr_offset;
775 	}
776 
777 	if (unlikely(auth_dlen > enc_dlen)) {
778 		inputlen = auth_dlen;
779 		outputlen = auth_dlen + mac_len;
780 	} else {
781 		inputlen = enc_dlen;
782 		outputlen = enc_dlen + mac_len;
783 	}
784 
785 	if (cpt_ctx->auth_enc != 0)
786 		outputlen = enc_dlen;
787 
788 	/* GP op header */
789 	vq_cmd_w0.s.param1 = encr_data_len;
790 	vq_cmd_w0.s.param2 = auth_data_len;
791 	/*
792 	 * In 83XX since we have a limitation of
793 	 * IV & Offset control word not part of instruction
794 	 * and need to be part of Data Buffer, we check if
795 	 * head room is there and then only do the Direct mode processing
796 	 */
797 	if (likely((flags & SINGLE_BUF_INPLACE) &&
798 		   (flags & SINGLE_BUF_HEADTAILROOM))) {
799 		void *dm_vaddr = fc_params->bufs[0].vaddr;
800 		uint64_t dm_dma_addr = fc_params->bufs[0].dma_addr;
801 		/*
802 		 * This flag indicates that there is 24 bytes head room and
803 		 * 8 bytes tail room available, so that we get to do
804 		 * DIRECT MODE with limitation
805 		 */
806 
807 		offset_vaddr = (uint8_t *)dm_vaddr - OFF_CTRL_LEN - iv_len;
808 		offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
809 
810 		/* DPTR */
811 		req->ist.ei1 = offset_dma;
812 		/* RPTR should just exclude offset control word */
813 		req->ist.ei2 = dm_dma_addr - iv_len;
814 		req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
815 						    + outputlen - iv_len);
816 
817 		vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
818 
819 		if (likely(iv_len)) {
820 			uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
821 						      + OFF_CTRL_LEN);
822 			uint64_t *src = fc_params->iv_buf;
823 			dest[0] = src[0];
824 			dest[1] = src[1];
825 		}
826 
827 		*(uint64_t *)offset_vaddr =
828 			rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
829 				((uint64_t)iv_offset << 8) |
830 				((uint64_t)auth_offset));
831 
832 	} else {
833 		uint32_t i, g_size_bytes, s_size_bytes;
834 		uint64_t dptr_dma, rptr_dma;
835 		sg_comp_t *gather_comp;
836 		sg_comp_t *scatter_comp;
837 		uint8_t *in_buffer;
838 
839 		/* This falls under strict SG mode */
840 		offset_vaddr = m_vaddr;
841 		offset_dma = m_dma;
842 		size = OFF_CTRL_LEN + iv_len;
843 
844 		m_vaddr = (uint8_t *)m_vaddr + size;
845 		m_dma += size;
846 
847 		vq_cmd_w0.s.opcode.major |= CPT_DMA_MODE;
848 
849 		if (likely(iv_len)) {
850 			uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
851 						      + OFF_CTRL_LEN);
852 			uint64_t *src = fc_params->iv_buf;
853 			dest[0] = src[0];
854 			dest[1] = src[1];
855 		}
856 
857 		*(uint64_t *)offset_vaddr =
858 			rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
859 				((uint64_t)iv_offset << 8) |
860 				((uint64_t)auth_offset));
861 
862 		/* DPTR has SG list */
863 		in_buffer = m_vaddr;
864 		dptr_dma = m_dma;
865 
866 		((uint16_t *)in_buffer)[0] = 0;
867 		((uint16_t *)in_buffer)[1] = 0;
868 
869 		/* TODO Add error check if space will be sufficient */
870 		gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
871 
872 		/*
873 		 * Input Gather List
874 		 */
875 
876 		i = 0;
877 
878 		/* Offset control word that includes iv */
879 		i = fill_sg_comp(gather_comp, i, offset_dma,
880 				 OFF_CTRL_LEN + iv_len);
881 
882 		/* Add input data */
883 		size = inputlen - iv_len;
884 		if (likely(size)) {
885 			uint32_t aad_offset = aad_len ? passthrough_len : 0;
886 
887 			if (unlikely(flags & SINGLE_BUF_INPLACE)) {
888 				i = fill_sg_comp_from_buf_min(gather_comp, i,
889 							      fc_params->bufs,
890 							      &size);
891 			} else {
892 				i = fill_sg_comp_from_iov(gather_comp, i,
893 							  fc_params->src_iov,
894 							  0, &size,
895 							  aad_buf, aad_offset);
896 			}
897 
898 			if (unlikely(size)) {
899 				CPT_LOG_DP_ERR("Insufficient buffer space,"
900 					       " size %d needed", size);
901 				return;
902 			}
903 		}
904 		((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
905 		g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
906 
907 		/*
908 		 * Output Scatter list
909 		 */
910 		i = 0;
911 		scatter_comp =
912 			(sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
913 
914 		/* Add IV */
915 		if (likely(iv_len)) {
916 			i = fill_sg_comp(scatter_comp, i,
917 					 offset_dma + OFF_CTRL_LEN,
918 					 iv_len);
919 		}
920 
921 		/* output data or output data + digest*/
922 		if (unlikely(flags & VALID_MAC_BUF)) {
923 			size = outputlen - iv_len - mac_len;
924 			if (size) {
925 				uint32_t aad_offset =
926 					aad_len ? passthrough_len : 0;
927 
928 				if (unlikely(flags & SINGLE_BUF_INPLACE)) {
929 					i = fill_sg_comp_from_buf_min(
930 							scatter_comp,
931 							i,
932 							fc_params->bufs,
933 							&size);
934 				} else {
935 					i = fill_sg_comp_from_iov(scatter_comp,
936 							i,
937 							fc_params->dst_iov,
938 							0,
939 							&size,
940 							aad_buf,
941 							aad_offset);
942 				}
943 				if (unlikely(size)) {
944 					CPT_LOG_DP_ERR("Insufficient buffer"
945 						       " space, size %d needed",
946 						       size);
947 					return;
948 				}
949 			}
950 			/* mac_data */
951 			if (mac_len) {
952 				i = fill_sg_comp_from_buf(scatter_comp, i,
953 							  &fc_params->mac_buf);
954 			}
955 		} else {
956 			/* Output including mac */
957 			size = outputlen - iv_len;
958 			if (likely(size)) {
959 				uint32_t aad_offset =
960 					aad_len ? passthrough_len : 0;
961 
962 				if (unlikely(flags & SINGLE_BUF_INPLACE)) {
963 					i = fill_sg_comp_from_buf_min(
964 							scatter_comp,
965 							i,
966 							fc_params->bufs,
967 							&size);
968 				} else {
969 					i = fill_sg_comp_from_iov(scatter_comp,
970 							i,
971 							fc_params->dst_iov,
972 							0,
973 							&size,
974 							aad_buf,
975 							aad_offset);
976 				}
977 				if (unlikely(size)) {
978 					CPT_LOG_DP_ERR("Insufficient buffer"
979 						       " space, size %d needed",
980 						       size);
981 					return;
982 				}
983 			}
984 		}
985 		((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
986 		s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
987 
988 		size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
989 
990 		/* This is DPTR len incase of SG mode */
991 		vq_cmd_w0.s.dlen = size;
992 
993 		m_vaddr = (uint8_t *)m_vaddr + size;
994 		m_dma += size;
995 
996 		/* cpt alternate completion address saved earlier */
997 		req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
998 		*req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
999 		rptr_dma = c_dma - 8;
1000 
1001 		req->ist.ei1 = dptr_dma;
1002 		req->ist.ei2 = rptr_dma;
1003 	}
1004 
1005 	if (unlikely((encr_offset >> 16) ||
1006 		     (iv_offset >> 8) ||
1007 		     (auth_offset >> 8))) {
1008 		CPT_LOG_DP_ERR("Offset not supported");
1009 		CPT_LOG_DP_ERR("enc_offset: %d", encr_offset);
1010 		CPT_LOG_DP_ERR("iv_offset : %d", iv_offset);
1011 		CPT_LOG_DP_ERR("auth_offset: %d", auth_offset);
1012 		return;
1013 	}
1014 
1015 	/* 16 byte aligned cpt res address */
1016 	req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1017 	*req->completion_addr = COMPLETION_CODE_INIT;
1018 	req->comp_baddr  = c_dma;
1019 
1020 	/* Fill microcode part of instruction */
1021 	req->ist.ei0 = vq_cmd_w0.u64;
1022 
1023 	req->op  = op;
1024 
1025 	*prep_req = req;
1026 	return;
1027 }
1028 
1029 static __rte_always_inline void
1030 cpt_dec_hmac_prep(uint32_t flags,
1031 		  uint64_t d_offs,
1032 		  uint64_t d_lens,
1033 		  fc_params_t *fc_params,
1034 		  void *op,
1035 		  void **prep_req)
1036 {
1037 	uint32_t iv_offset = 0, size;
1038 	int32_t inputlen, outputlen, enc_dlen, auth_dlen;
1039 	struct cpt_ctx *cpt_ctx;
1040 	int32_t hash_type, mac_len;
1041 	uint8_t iv_len = 16;
1042 	struct cpt_request_info *req;
1043 	buf_ptr_t *meta_p, *aad_buf = NULL;
1044 	uint32_t encr_offset, auth_offset;
1045 	uint32_t encr_data_len, auth_data_len, aad_len = 0;
1046 	uint32_t passthrough_len = 0;
1047 	void *m_vaddr, *offset_vaddr;
1048 	uint64_t m_dma, offset_dma;
1049 	vq_cmd_word0_t vq_cmd_w0;
1050 	void *c_vaddr;
1051 	uint64_t c_dma;
1052 
1053 	meta_p = &fc_params->meta_buf;
1054 	m_vaddr = meta_p->vaddr;
1055 	m_dma = meta_p->dma_addr;
1056 
1057 	encr_offset = ENCR_OFFSET(d_offs);
1058 	auth_offset = AUTH_OFFSET(d_offs);
1059 	encr_data_len = ENCR_DLEN(d_lens);
1060 	auth_data_len = AUTH_DLEN(d_lens);
1061 
1062 	if (unlikely(flags & VALID_AAD_BUF)) {
1063 		/*
1064 		 * We dont support both aad
1065 		 * and auth data separately
1066 		 */
1067 		auth_data_len = 0;
1068 		auth_offset = 0;
1069 		aad_len = fc_params->aad_buf.size;
1070 		aad_buf = &fc_params->aad_buf;
1071 	}
1072 
1073 	cpt_ctx = fc_params->ctx_buf.vaddr;
1074 	hash_type = cpt_ctx->hash_type;
1075 	mac_len = cpt_ctx->mac_len;
1076 
1077 	if (unlikely(!(flags & VALID_IV_BUF))) {
1078 		iv_len = 0;
1079 		iv_offset = ENCR_IV_OFFSET(d_offs);
1080 	}
1081 
1082 	if (unlikely(flags & VALID_AAD_BUF)) {
1083 		/*
1084 		 * When AAD is given, data above encr_offset is pass through
1085 		 * Since AAD is given as separate pointer and not as offset,
1086 		 * this is a special case as we need to fragment input data
1087 		 * into passthrough + encr_data and then insert AAD in between.
1088 		 */
1089 		if (hash_type != GMAC_TYPE) {
1090 			passthrough_len = encr_offset;
1091 			auth_offset = passthrough_len + iv_len;
1092 			encr_offset = passthrough_len + aad_len + iv_len;
1093 			auth_data_len = aad_len + encr_data_len;
1094 		} else {
1095 			passthrough_len = 16 + aad_len;
1096 			auth_offset = passthrough_len + iv_len;
1097 			auth_data_len = aad_len;
1098 		}
1099 	} else {
1100 		encr_offset += iv_len;
1101 		auth_offset += iv_len;
1102 	}
1103 
1104 	/*
1105 	 * Save initial space that followed app data for completion code &
1106 	 * alternate completion code to fall in same cache line as app data
1107 	 */
1108 	m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1109 	m_dma += COMPLETION_CODE_SIZE;
1110 	size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1111 	       (uint8_t *)m_vaddr;
1112 	c_vaddr = (uint8_t *)m_vaddr + size;
1113 	c_dma = m_dma + size;
1114 	size += sizeof(cpt_res_s_t);
1115 
1116 	m_vaddr = (uint8_t *)m_vaddr + size;
1117 	m_dma += size;
1118 
1119 	/* start cpt request info structure at 8 byte alignment */
1120 	size = (uint8_t *)RTE_PTR_ALIGN(m_vaddr, 8) -
1121 		(uint8_t *)m_vaddr;
1122 
1123 	req = (struct cpt_request_info *)((uint8_t *)m_vaddr + size);
1124 
1125 	size += sizeof(struct cpt_request_info);
1126 	m_vaddr = (uint8_t *)m_vaddr + size;
1127 	m_dma += size;
1128 
1129 	/* Decryption */
1130 	vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_FC;
1131 	vq_cmd_w0.s.opcode.minor = CPT_FC_MINOR_OP_DECRYPT;
1132 	vq_cmd_w0.s.opcode.minor |= (cpt_ctx->dec_auth <<
1133 					CPT_HMAC_FIRST_BIT_POS);
1134 
1135 	if (hash_type == GMAC_TYPE) {
1136 		encr_offset = 0;
1137 		encr_data_len = 0;
1138 	}
1139 
1140 	enc_dlen = encr_offset + encr_data_len;
1141 	auth_dlen = auth_offset + auth_data_len;
1142 
1143 	if (auth_dlen > enc_dlen) {
1144 		inputlen = auth_dlen + mac_len;
1145 		outputlen = auth_dlen;
1146 	} else {
1147 		inputlen = enc_dlen + mac_len;
1148 		outputlen = enc_dlen;
1149 	}
1150 
1151 	if (cpt_ctx->dec_auth != 0)
1152 		outputlen = inputlen = enc_dlen;
1153 
1154 	vq_cmd_w0.s.param1 = encr_data_len;
1155 	vq_cmd_w0.s.param2 = auth_data_len;
1156 
1157 	/*
1158 	 * In 83XX since we have a limitation of
1159 	 * IV & Offset control word not part of instruction
1160 	 * and need to be part of Data Buffer, we check if
1161 	 * head room is there and then only do the Direct mode processing
1162 	 */
1163 	if (likely((flags & SINGLE_BUF_INPLACE) &&
1164 		   (flags & SINGLE_BUF_HEADTAILROOM))) {
1165 		void *dm_vaddr = fc_params->bufs[0].vaddr;
1166 		uint64_t dm_dma_addr = fc_params->bufs[0].dma_addr;
1167 		/*
1168 		 * This flag indicates that there is 24 bytes head room and
1169 		 * 8 bytes tail room available, so that we get to do
1170 		 * DIRECT MODE with limitation
1171 		 */
1172 
1173 		offset_vaddr = (uint8_t *)dm_vaddr - OFF_CTRL_LEN - iv_len;
1174 		offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1175 		req->ist.ei1 = offset_dma;
1176 
1177 		/* RPTR should just exclude offset control word */
1178 		req->ist.ei2 = dm_dma_addr - iv_len;
1179 
1180 		req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr +
1181 					outputlen - iv_len);
1182 		/* since this is decryption,
1183 		 * don't touch the content of
1184 		 * alternate ccode space as it contains
1185 		 * hmac.
1186 		 */
1187 
1188 		vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
1189 
1190 		if (likely(iv_len)) {
1191 			uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
1192 						      OFF_CTRL_LEN);
1193 			uint64_t *src = fc_params->iv_buf;
1194 			dest[0] = src[0];
1195 			dest[1] = src[1];
1196 		}
1197 
1198 		if (unlikely((encr_offset >> 16) ||
1199 			     (iv_offset >> 8) ||
1200 			     (auth_offset >> 8))) {
1201 			CPT_LOG_DP_ERR("Offset not supported");
1202 			CPT_LOG_DP_ERR("enc_offset: %d", encr_offset);
1203 			CPT_LOG_DP_ERR("iv_offset : %d", iv_offset);
1204 			CPT_LOG_DP_ERR("auth_offset: %d", auth_offset);
1205 			return;
1206 		}
1207 
1208 		*(uint64_t *)offset_vaddr =
1209 			rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
1210 				((uint64_t)iv_offset << 8) |
1211 				((uint64_t)auth_offset));
1212 
1213 	} else {
1214 		uint64_t dptr_dma, rptr_dma;
1215 		uint32_t g_size_bytes, s_size_bytes;
1216 		sg_comp_t *gather_comp;
1217 		sg_comp_t *scatter_comp;
1218 		uint8_t *in_buffer;
1219 		uint8_t i = 0;
1220 
1221 		/* This falls under strict SG mode */
1222 		offset_vaddr = m_vaddr;
1223 		offset_dma = m_dma;
1224 		size = OFF_CTRL_LEN + iv_len;
1225 
1226 		m_vaddr = (uint8_t *)m_vaddr + size;
1227 		m_dma += size;
1228 
1229 		vq_cmd_w0.s.opcode.major |= CPT_DMA_MODE;
1230 
1231 		if (likely(iv_len)) {
1232 			uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
1233 						      OFF_CTRL_LEN);
1234 			uint64_t *src = fc_params->iv_buf;
1235 			dest[0] = src[0];
1236 			dest[1] = src[1];
1237 		}
1238 
1239 		if (unlikely((encr_offset >> 16) ||
1240 			     (iv_offset >> 8) ||
1241 			     (auth_offset >> 8))) {
1242 			CPT_LOG_DP_ERR("Offset not supported");
1243 			CPT_LOG_DP_ERR("enc_offset: %d", encr_offset);
1244 			CPT_LOG_DP_ERR("iv_offset : %d", iv_offset);
1245 			CPT_LOG_DP_ERR("auth_offset: %d", auth_offset);
1246 			return;
1247 		}
1248 
1249 		*(uint64_t *)offset_vaddr =
1250 			rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
1251 				((uint64_t)iv_offset << 8) |
1252 				((uint64_t)auth_offset));
1253 
1254 		/* DPTR has SG list */
1255 		in_buffer = m_vaddr;
1256 		dptr_dma = m_dma;
1257 
1258 		((uint16_t *)in_buffer)[0] = 0;
1259 		((uint16_t *)in_buffer)[1] = 0;
1260 
1261 		/* TODO Add error check if space will be sufficient */
1262 		gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1263 
1264 		/*
1265 		 * Input Gather List
1266 		 */
1267 		i = 0;
1268 
1269 		/* Offset control word that includes iv */
1270 		i = fill_sg_comp(gather_comp, i, offset_dma,
1271 				 OFF_CTRL_LEN + iv_len);
1272 
1273 		/* Add input data */
1274 		if (flags & VALID_MAC_BUF) {
1275 			size = inputlen - iv_len - mac_len;
1276 			if (size) {
1277 				/* input data only */
1278 				if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1279 					i = fill_sg_comp_from_buf_min(
1280 							gather_comp, i,
1281 							fc_params->bufs,
1282 							&size);
1283 				} else {
1284 					uint32_t aad_offset = aad_len ?
1285 						passthrough_len : 0;
1286 
1287 					i = fill_sg_comp_from_iov(gather_comp,
1288 							i,
1289 							fc_params->src_iov,
1290 							0, &size,
1291 							aad_buf,
1292 							aad_offset);
1293 				}
1294 				if (unlikely(size)) {
1295 					CPT_LOG_DP_ERR("Insufficient buffer"
1296 						       " space, size %d needed",
1297 						       size);
1298 					return;
1299 				}
1300 			}
1301 
1302 			/* mac data */
1303 			if (mac_len) {
1304 				i = fill_sg_comp_from_buf(gather_comp, i,
1305 							  &fc_params->mac_buf);
1306 			}
1307 		} else {
1308 			/* input data + mac */
1309 			size = inputlen - iv_len;
1310 			if (size) {
1311 				if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1312 					i = fill_sg_comp_from_buf_min(
1313 							gather_comp, i,
1314 							fc_params->bufs,
1315 							&size);
1316 				} else {
1317 					uint32_t aad_offset = aad_len ?
1318 						passthrough_len : 0;
1319 
1320 					if (unlikely(!fc_params->src_iov)) {
1321 						CPT_LOG_DP_ERR("Bad input args");
1322 						return;
1323 					}
1324 
1325 					i = fill_sg_comp_from_iov(
1326 							gather_comp, i,
1327 							fc_params->src_iov,
1328 							0, &size,
1329 							aad_buf,
1330 							aad_offset);
1331 				}
1332 
1333 				if (unlikely(size)) {
1334 					CPT_LOG_DP_ERR("Insufficient buffer"
1335 						       " space, size %d needed",
1336 						       size);
1337 					return;
1338 				}
1339 			}
1340 		}
1341 		((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1342 		g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1343 
1344 		/*
1345 		 * Output Scatter List
1346 		 */
1347 
1348 		i = 0;
1349 		scatter_comp =
1350 			(sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1351 
1352 		/* Add iv */
1353 		if (iv_len) {
1354 			i = fill_sg_comp(scatter_comp, i,
1355 					 offset_dma + OFF_CTRL_LEN,
1356 					 iv_len);
1357 		}
1358 
1359 		/* Add output data */
1360 		size = outputlen - iv_len;
1361 		if (size) {
1362 			if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1363 				/* handle single buffer here */
1364 				i = fill_sg_comp_from_buf_min(scatter_comp, i,
1365 							      fc_params->bufs,
1366 							      &size);
1367 			} else {
1368 				uint32_t aad_offset = aad_len ?
1369 					passthrough_len : 0;
1370 
1371 				if (unlikely(!fc_params->dst_iov)) {
1372 					CPT_LOG_DP_ERR("Bad input args");
1373 					return;
1374 				}
1375 
1376 				i = fill_sg_comp_from_iov(scatter_comp, i,
1377 							  fc_params->dst_iov, 0,
1378 							  &size, aad_buf,
1379 							  aad_offset);
1380 			}
1381 
1382 			if (unlikely(size)) {
1383 				CPT_LOG_DP_ERR("Insufficient buffer space,"
1384 					       " size %d needed", size);
1385 				return;
1386 			}
1387 		}
1388 
1389 		((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1390 		s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1391 
1392 		size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1393 
1394 		/* This is DPTR len incase of SG mode */
1395 		vq_cmd_w0.s.dlen = size;
1396 
1397 		m_vaddr = (uint8_t *)m_vaddr + size;
1398 		m_dma += size;
1399 
1400 		/* cpt alternate completion address saved earlier */
1401 		req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1402 		*req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1403 		rptr_dma = c_dma - 8;
1404 		size += COMPLETION_CODE_SIZE;
1405 
1406 		req->ist.ei1 = dptr_dma;
1407 		req->ist.ei2 = rptr_dma;
1408 	}
1409 
1410 	/* 16 byte aligned cpt res address */
1411 	req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1412 	*req->completion_addr = COMPLETION_CODE_INIT;
1413 	req->comp_baddr  = c_dma;
1414 
1415 	/* Fill microcode part of instruction */
1416 	req->ist.ei0 = vq_cmd_w0.u64;
1417 
1418 	req->op = op;
1419 
1420 	*prep_req = req;
1421 	return;
1422 }
1423 
1424 static __rte_always_inline void
1425 cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
1426 			uint64_t d_offs,
1427 			uint64_t d_lens,
1428 			fc_params_t *params,
1429 			void *op,
1430 			void **prep_req)
1431 {
1432 	uint32_t size;
1433 	int32_t inputlen, outputlen;
1434 	struct cpt_ctx *cpt_ctx;
1435 	uint32_t mac_len = 0;
1436 	uint8_t snow3g, j;
1437 	struct cpt_request_info *req;
1438 	buf_ptr_t *buf_p;
1439 	uint32_t encr_offset = 0, auth_offset = 0;
1440 	uint32_t encr_data_len = 0, auth_data_len = 0;
1441 	int flags, iv_len = 16;
1442 	void *m_vaddr, *c_vaddr;
1443 	uint64_t m_dma, c_dma, offset_ctrl;
1444 	uint64_t *offset_vaddr, offset_dma;
1445 	uint32_t *iv_s, iv[4];
1446 	vq_cmd_word0_t vq_cmd_w0;
1447 
1448 	buf_p = &params->meta_buf;
1449 	m_vaddr = buf_p->vaddr;
1450 	m_dma = buf_p->dma_addr;
1451 
1452 	cpt_ctx = params->ctx_buf.vaddr;
1453 	flags = cpt_ctx->zsk_flags;
1454 	mac_len = cpt_ctx->mac_len;
1455 	snow3g = cpt_ctx->snow3g;
1456 
1457 	/*
1458 	 * Save initial space that followed app data for completion code &
1459 	 * alternate completion code to fall in same cache line as app data
1460 	 */
1461 	m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1462 	m_dma += COMPLETION_CODE_SIZE;
1463 	size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1464 		(uint8_t *)m_vaddr;
1465 
1466 	c_vaddr = (uint8_t *)m_vaddr + size;
1467 	c_dma = m_dma + size;
1468 	size += sizeof(cpt_res_s_t);
1469 
1470 	m_vaddr = (uint8_t *)m_vaddr + size;
1471 	m_dma += size;
1472 
1473 	/* Reserve memory for cpt request info */
1474 	req = m_vaddr;
1475 
1476 	size = sizeof(struct cpt_request_info);
1477 	m_vaddr = (uint8_t *)m_vaddr + size;
1478 	m_dma += size;
1479 
1480 	vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_ZUC_SNOW3G;
1481 
1482 	/* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
1483 
1484 	vq_cmd_w0.s.opcode.minor = ((1 << 7) | (snow3g << 5) | (0 << 4) |
1485 			  (0 << 3) | (flags & 0x7));
1486 
1487 	if (flags == 0x1) {
1488 		/*
1489 		 * Microcode expects offsets in bytes
1490 		 * TODO: Rounding off
1491 		 */
1492 		auth_data_len = AUTH_DLEN(d_lens);
1493 
1494 		/* EIA3 or UIA2 */
1495 		auth_offset = AUTH_OFFSET(d_offs);
1496 		auth_offset = auth_offset / 8;
1497 
1498 		/* consider iv len */
1499 		auth_offset += iv_len;
1500 
1501 		inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
1502 		outputlen = mac_len;
1503 
1504 		offset_ctrl = rte_cpu_to_be_64((uint64_t)auth_offset);
1505 
1506 	} else {
1507 		/* EEA3 or UEA2 */
1508 		/*
1509 		 * Microcode expects offsets in bytes
1510 		 * TODO: Rounding off
1511 		 */
1512 		encr_data_len = ENCR_DLEN(d_lens);
1513 
1514 		encr_offset = ENCR_OFFSET(d_offs);
1515 		encr_offset = encr_offset / 8;
1516 		/* consider iv len */
1517 		encr_offset += iv_len;
1518 
1519 		inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1520 		outputlen = inputlen;
1521 
1522 		/* iv offset is 0 */
1523 		offset_ctrl = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1524 	}
1525 
1526 	if (unlikely((encr_offset >> 16) ||
1527 		     (auth_offset >> 8))) {
1528 		CPT_LOG_DP_ERR("Offset not supported");
1529 		CPT_LOG_DP_ERR("enc_offset: %d", encr_offset);
1530 		CPT_LOG_DP_ERR("auth_offset: %d", auth_offset);
1531 		return;
1532 	}
1533 
1534 	/* IV */
1535 	iv_s = (flags == 0x1) ? params->auth_iv_buf :
1536 		params->iv_buf;
1537 
1538 	if (snow3g) {
1539 		/*
1540 		 * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
1541 		 * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
1542 		 */
1543 
1544 		for (j = 0; j < 4; j++)
1545 			iv[j] = iv_s[3 - j];
1546 	} else {
1547 		/* ZUC doesn't need a swap */
1548 		for (j = 0; j < 4; j++)
1549 			iv[j] = iv_s[j];
1550 	}
1551 
1552 	/*
1553 	 * GP op header, lengths are expected in bits.
1554 	 */
1555 	vq_cmd_w0.s.param1 = encr_data_len;
1556 	vq_cmd_w0.s.param2 = auth_data_len;
1557 
1558 	/*
1559 	 * In 83XX since we have a limitation of
1560 	 * IV & Offset control word not part of instruction
1561 	 * and need to be part of Data Buffer, we check if
1562 	 * head room is there and then only do the Direct mode processing
1563 	 */
1564 	if (likely((req_flags & SINGLE_BUF_INPLACE) &&
1565 		   (req_flags & SINGLE_BUF_HEADTAILROOM))) {
1566 		void *dm_vaddr = params->bufs[0].vaddr;
1567 		uint64_t dm_dma_addr = params->bufs[0].dma_addr;
1568 		/*
1569 		 * This flag indicates that there is 24 bytes head room and
1570 		 * 8 bytes tail room available, so that we get to do
1571 		 * DIRECT MODE with limitation
1572 		 */
1573 
1574 		offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1575 					    OFF_CTRL_LEN - iv_len);
1576 		offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1577 
1578 		/* DPTR */
1579 		req->ist.ei1 = offset_dma;
1580 		/* RPTR should just exclude offset control word */
1581 		req->ist.ei2 = dm_dma_addr - iv_len;
1582 		req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
1583 						    + outputlen - iv_len);
1584 
1585 		vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
1586 
1587 		if (likely(iv_len)) {
1588 			uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr
1589 						      + OFF_CTRL_LEN);
1590 			memcpy(iv_d, iv, 16);
1591 		}
1592 
1593 		*offset_vaddr = offset_ctrl;
1594 	} else {
1595 		uint32_t i, g_size_bytes, s_size_bytes;
1596 		uint64_t dptr_dma, rptr_dma;
1597 		sg_comp_t *gather_comp;
1598 		sg_comp_t *scatter_comp;
1599 		uint8_t *in_buffer;
1600 		uint32_t *iv_d;
1601 
1602 		/* save space for iv */
1603 		offset_vaddr = m_vaddr;
1604 		offset_dma = m_dma;
1605 
1606 		m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
1607 		m_dma += OFF_CTRL_LEN + iv_len;
1608 
1609 		vq_cmd_w0.s.opcode.major |= CPT_DMA_MODE;
1610 
1611 		/* DPTR has SG list */
1612 		in_buffer = m_vaddr;
1613 		dptr_dma = m_dma;
1614 
1615 		((uint16_t *)in_buffer)[0] = 0;
1616 		((uint16_t *)in_buffer)[1] = 0;
1617 
1618 		/* TODO Add error check if space will be sufficient */
1619 		gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1620 
1621 		/*
1622 		 * Input Gather List
1623 		 */
1624 		i = 0;
1625 
1626 		/* Offset control word followed by iv */
1627 
1628 		i = fill_sg_comp(gather_comp, i, offset_dma,
1629 				 OFF_CTRL_LEN + iv_len);
1630 
1631 		/* iv offset is 0 */
1632 		*offset_vaddr = offset_ctrl;
1633 
1634 		iv_d = (uint32_t *)((uint8_t *)offset_vaddr + OFF_CTRL_LEN);
1635 		memcpy(iv_d, iv, 16);
1636 
1637 		/* input data */
1638 		size = inputlen - iv_len;
1639 		if (size) {
1640 			i = fill_sg_comp_from_iov(gather_comp, i,
1641 						  params->src_iov,
1642 						  0, &size, NULL, 0);
1643 			if (unlikely(size)) {
1644 				CPT_LOG_DP_ERR("Insufficient buffer space,"
1645 					       " size %d needed", size);
1646 				return;
1647 			}
1648 		}
1649 		((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1650 		g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1651 
1652 		/*
1653 		 * Output Scatter List
1654 		 */
1655 
1656 		i = 0;
1657 		scatter_comp =
1658 			(sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1659 
1660 		if (flags == 0x1) {
1661 			/* IV in SLIST only for EEA3 & UEA2 */
1662 			iv_len = 0;
1663 		}
1664 
1665 		if (iv_len) {
1666 			i = fill_sg_comp(scatter_comp, i,
1667 					 offset_dma + OFF_CTRL_LEN, iv_len);
1668 		}
1669 
1670 		/* Add output data */
1671 		if (req_flags & VALID_MAC_BUF) {
1672 			size = outputlen - iv_len - mac_len;
1673 			if (size) {
1674 				i = fill_sg_comp_from_iov(scatter_comp, i,
1675 							  params->dst_iov, 0,
1676 							  &size, NULL, 0);
1677 
1678 				if (unlikely(size)) {
1679 					CPT_LOG_DP_ERR("Insufficient buffer space,"
1680 						       " size %d needed", size);
1681 					return;
1682 				}
1683 			}
1684 
1685 			/* mac data */
1686 			if (mac_len) {
1687 				i = fill_sg_comp_from_buf(scatter_comp, i,
1688 							  &params->mac_buf);
1689 			}
1690 		} else {
1691 			/* Output including mac */
1692 			size = outputlen - iv_len;
1693 			if (size) {
1694 				i = fill_sg_comp_from_iov(scatter_comp, i,
1695 							  params->dst_iov, 0,
1696 							  &size, NULL, 0);
1697 
1698 				if (unlikely(size)) {
1699 					CPT_LOG_DP_ERR("Insufficient buffer space,"
1700 						       " size %d needed", size);
1701 					return;
1702 				}
1703 			}
1704 		}
1705 		((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1706 		s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1707 
1708 		size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1709 
1710 		/* This is DPTR len incase of SG mode */
1711 		vq_cmd_w0.s.dlen = size;
1712 
1713 		m_vaddr = (uint8_t *)m_vaddr + size;
1714 		m_dma += size;
1715 
1716 		/* cpt alternate completion address saved earlier */
1717 		req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1718 		*req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1719 		rptr_dma = c_dma - 8;
1720 
1721 		req->ist.ei1 = dptr_dma;
1722 		req->ist.ei2 = rptr_dma;
1723 	}
1724 
1725 	/* 16 byte aligned cpt res address */
1726 	req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1727 	*req->completion_addr = COMPLETION_CODE_INIT;
1728 	req->comp_baddr  = c_dma;
1729 
1730 	/* Fill microcode part of instruction */
1731 	req->ist.ei0 = vq_cmd_w0.u64;
1732 
1733 	req->op = op;
1734 
1735 	*prep_req = req;
1736 	return;
1737 }
1738 
1739 static __rte_always_inline void
1740 cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
1741 			uint64_t d_offs,
1742 			uint64_t d_lens,
1743 			fc_params_t *params,
1744 			void *op,
1745 			void **prep_req)
1746 {
1747 	uint32_t size;
1748 	int32_t inputlen = 0, outputlen;
1749 	struct cpt_ctx *cpt_ctx;
1750 	uint8_t snow3g, iv_len = 16;
1751 	struct cpt_request_info *req;
1752 	buf_ptr_t *buf_p;
1753 	uint32_t encr_offset;
1754 	uint32_t encr_data_len;
1755 	int flags;
1756 	void *m_vaddr, *c_vaddr;
1757 	uint64_t m_dma, c_dma;
1758 	uint64_t *offset_vaddr, offset_dma;
1759 	uint32_t *iv_s, iv[4], j;
1760 	vq_cmd_word0_t vq_cmd_w0;
1761 
1762 	buf_p = &params->meta_buf;
1763 	m_vaddr = buf_p->vaddr;
1764 	m_dma = buf_p->dma_addr;
1765 
1766 	/*
1767 	 * Microcode expects offsets in bytes
1768 	 * TODO: Rounding off
1769 	 */
1770 	encr_offset = ENCR_OFFSET(d_offs) / 8;
1771 	encr_data_len = ENCR_DLEN(d_lens);
1772 
1773 	cpt_ctx = params->ctx_buf.vaddr;
1774 	flags = cpt_ctx->zsk_flags;
1775 	snow3g = cpt_ctx->snow3g;
1776 	/*
1777 	 * Save initial space that followed app data for completion code &
1778 	 * alternate completion code to fall in same cache line as app data
1779 	 */
1780 	m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1781 	m_dma += COMPLETION_CODE_SIZE;
1782 	size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1783 		(uint8_t *)m_vaddr;
1784 
1785 	c_vaddr = (uint8_t *)m_vaddr + size;
1786 	c_dma = m_dma + size;
1787 	size += sizeof(cpt_res_s_t);
1788 
1789 	m_vaddr = (uint8_t *)m_vaddr + size;
1790 	m_dma += size;
1791 
1792 	/* Reserve memory for cpt request info */
1793 	req = m_vaddr;
1794 
1795 	size = sizeof(struct cpt_request_info);
1796 	m_vaddr = (uint8_t *)m_vaddr + size;
1797 	m_dma += size;
1798 
1799 	vq_cmd_w0.u64 = 0;
1800 	vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_ZUC_SNOW3G;
1801 
1802 	/* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
1803 
1804 	vq_cmd_w0.s.opcode.minor = ((1 << 7) | (snow3g << 5) | (0 << 4) |
1805 			  (0 << 3) | (flags & 0x7));
1806 
1807 	/* consider iv len */
1808 	encr_offset += iv_len;
1809 
1810 	inputlen = encr_offset +
1811 		(RTE_ALIGN(encr_data_len, 8) / 8);
1812 	outputlen = inputlen;
1813 
1814 	/* IV */
1815 	iv_s = params->iv_buf;
1816 	if (snow3g) {
1817 		/*
1818 		 * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
1819 		 * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
1820 		 */
1821 
1822 		for (j = 0; j < 4; j++)
1823 			iv[j] = iv_s[3 - j];
1824 	} else {
1825 		/* ZUC doesn't need a swap */
1826 		for (j = 0; j < 4; j++)
1827 			iv[j] = iv_s[j];
1828 	}
1829 
1830 	/*
1831 	 * GP op header, lengths are expected in bits.
1832 	 */
1833 	vq_cmd_w0.s.param1 = encr_data_len;
1834 
1835 	/*
1836 	 * In 83XX since we have a limitation of
1837 	 * IV & Offset control word not part of instruction
1838 	 * and need to be part of Data Buffer, we check if
1839 	 * head room is there and then only do the Direct mode processing
1840 	 */
1841 	if (likely((req_flags & SINGLE_BUF_INPLACE) &&
1842 		   (req_flags & SINGLE_BUF_HEADTAILROOM))) {
1843 		void *dm_vaddr = params->bufs[0].vaddr;
1844 		uint64_t dm_dma_addr = params->bufs[0].dma_addr;
1845 		/*
1846 		 * This flag indicates that there is 24 bytes head room and
1847 		 * 8 bytes tail room available, so that we get to do
1848 		 * DIRECT MODE with limitation
1849 		 */
1850 
1851 		offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1852 					    OFF_CTRL_LEN - iv_len);
1853 		offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1854 
1855 		/* DPTR */
1856 		req->ist.ei1 = offset_dma;
1857 		/* RPTR should just exclude offset control word */
1858 		req->ist.ei2 = dm_dma_addr - iv_len;
1859 		req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
1860 						    + outputlen - iv_len);
1861 
1862 		vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
1863 
1864 		if (likely(iv_len)) {
1865 			uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr
1866 						      + OFF_CTRL_LEN);
1867 			memcpy(iv_d, iv, 16);
1868 		}
1869 
1870 		/* iv offset is 0 */
1871 		*offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1872 	} else {
1873 		uint32_t i, g_size_bytes, s_size_bytes;
1874 		uint64_t dptr_dma, rptr_dma;
1875 		sg_comp_t *gather_comp;
1876 		sg_comp_t *scatter_comp;
1877 		uint8_t *in_buffer;
1878 		uint32_t *iv_d;
1879 
1880 		/* save space for offset and iv... */
1881 		offset_vaddr = m_vaddr;
1882 		offset_dma = m_dma;
1883 
1884 		m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
1885 		m_dma += OFF_CTRL_LEN + iv_len;
1886 
1887 		vq_cmd_w0.s.opcode.major |= CPT_DMA_MODE;
1888 
1889 		/* DPTR has SG list */
1890 		in_buffer = m_vaddr;
1891 		dptr_dma = m_dma;
1892 
1893 		((uint16_t *)in_buffer)[0] = 0;
1894 		((uint16_t *)in_buffer)[1] = 0;
1895 
1896 		/* TODO Add error check if space will be sufficient */
1897 		gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1898 
1899 		/*
1900 		 * Input Gather List
1901 		 */
1902 		i = 0;
1903 
1904 		/* Offset control word */
1905 
1906 		/* iv offset is 0 */
1907 		*offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1908 
1909 		i = fill_sg_comp(gather_comp, i, offset_dma,
1910 				 OFF_CTRL_LEN + iv_len);
1911 
1912 		iv_d = (uint32_t *)((uint8_t *)offset_vaddr + OFF_CTRL_LEN);
1913 		memcpy(iv_d, iv, 16);
1914 
1915 		/* Add input data */
1916 		size = inputlen - iv_len;
1917 		if (size) {
1918 			i = fill_sg_comp_from_iov(gather_comp, i,
1919 						  params->src_iov,
1920 						  0, &size, NULL, 0);
1921 			if (unlikely(size)) {
1922 				CPT_LOG_DP_ERR("Insufficient buffer space,"
1923 					       " size %d needed", size);
1924 				return;
1925 			}
1926 		}
1927 		((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1928 		g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1929 
1930 		/*
1931 		 * Output Scatter List
1932 		 */
1933 
1934 		i = 0;
1935 		scatter_comp =
1936 			(sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1937 
1938 		/* IV */
1939 		i = fill_sg_comp(scatter_comp, i,
1940 				 offset_dma + OFF_CTRL_LEN,
1941 				 iv_len);
1942 
1943 		/* Add output data */
1944 		size = outputlen - iv_len;
1945 		if (size) {
1946 			i = fill_sg_comp_from_iov(scatter_comp, i,
1947 						  params->dst_iov, 0,
1948 						  &size, NULL, 0);
1949 
1950 			if (unlikely(size)) {
1951 				CPT_LOG_DP_ERR("Insufficient buffer space,"
1952 					       " size %d needed", size);
1953 				return;
1954 			}
1955 		}
1956 		((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1957 		s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1958 
1959 		size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1960 
1961 		/* This is DPTR len incase of SG mode */
1962 		vq_cmd_w0.s.dlen = size;
1963 
1964 		m_vaddr = (uint8_t *)m_vaddr + size;
1965 		m_dma += size;
1966 
1967 		/* cpt alternate completion address saved earlier */
1968 		req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1969 		*req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1970 		rptr_dma = c_dma - 8;
1971 
1972 		req->ist.ei1 = dptr_dma;
1973 		req->ist.ei2 = rptr_dma;
1974 	}
1975 
1976 	if (unlikely((encr_offset >> 16))) {
1977 		CPT_LOG_DP_ERR("Offset not supported");
1978 		CPT_LOG_DP_ERR("enc_offset: %d", encr_offset);
1979 		return;
1980 	}
1981 
1982 	/* 16 byte aligned cpt res address */
1983 	req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1984 	*req->completion_addr = COMPLETION_CODE_INIT;
1985 	req->comp_baddr  = c_dma;
1986 
1987 	/* Fill microcode part of instruction */
1988 	req->ist.ei0 = vq_cmd_w0.u64;
1989 
1990 	req->op = op;
1991 
1992 	*prep_req = req;
1993 	return;
1994 }
1995 
1996 static __rte_always_inline void
1997 cpt_kasumi_enc_prep(uint32_t req_flags,
1998 		    uint64_t d_offs,
1999 		    uint64_t d_lens,
2000 		    fc_params_t *params,
2001 		    void *op,
2002 		    void **prep_req)
2003 {
2004 	uint32_t size;
2005 	int32_t inputlen = 0, outputlen = 0;
2006 	struct cpt_ctx *cpt_ctx;
2007 	uint32_t mac_len = 0;
2008 	uint8_t i = 0;
2009 	struct cpt_request_info *req;
2010 	buf_ptr_t *buf_p;
2011 	uint32_t encr_offset, auth_offset;
2012 	uint32_t encr_data_len, auth_data_len;
2013 	int flags;
2014 	uint8_t *iv_s, *iv_d, iv_len = 8;
2015 	uint8_t dir = 0;
2016 	void *m_vaddr, *c_vaddr;
2017 	uint64_t m_dma, c_dma;
2018 	uint64_t *offset_vaddr, offset_dma;
2019 	vq_cmd_word0_t vq_cmd_w0;
2020 	uint8_t *in_buffer;
2021 	uint32_t g_size_bytes, s_size_bytes;
2022 	uint64_t dptr_dma, rptr_dma;
2023 	sg_comp_t *gather_comp;
2024 	sg_comp_t *scatter_comp;
2025 
2026 	buf_p = &params->meta_buf;
2027 	m_vaddr = buf_p->vaddr;
2028 	m_dma = buf_p->dma_addr;
2029 
2030 	encr_offset = ENCR_OFFSET(d_offs) / 8;
2031 	auth_offset = AUTH_OFFSET(d_offs) / 8;
2032 	encr_data_len = ENCR_DLEN(d_lens);
2033 	auth_data_len = AUTH_DLEN(d_lens);
2034 
2035 	cpt_ctx = params->ctx_buf.vaddr;
2036 	flags = cpt_ctx->zsk_flags;
2037 	mac_len = cpt_ctx->mac_len;
2038 
2039 	if (flags == 0x0)
2040 		iv_s = params->iv_buf;
2041 	else
2042 		iv_s = params->auth_iv_buf;
2043 
2044 	dir = iv_s[8] & 0x1;
2045 
2046 	/*
2047 	 * Save initial space that followed app data for completion code &
2048 	 * alternate completion code to fall in same cache line as app data
2049 	 */
2050 	m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
2051 	m_dma += COMPLETION_CODE_SIZE;
2052 	size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
2053 		(uint8_t *)m_vaddr;
2054 
2055 	c_vaddr = (uint8_t *)m_vaddr + size;
2056 	c_dma = m_dma + size;
2057 	size += sizeof(cpt_res_s_t);
2058 
2059 	m_vaddr = (uint8_t *)m_vaddr + size;
2060 	m_dma += size;
2061 
2062 	/* Reserve memory for cpt request info */
2063 	req = m_vaddr;
2064 
2065 	size = sizeof(struct cpt_request_info);
2066 	m_vaddr = (uint8_t *)m_vaddr + size;
2067 	m_dma += size;
2068 
2069 	vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE;
2070 
2071 	/* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
2072 	vq_cmd_w0.s.opcode.minor = ((1 << 6) | (cpt_ctx->k_ecb << 5) |
2073 			  (dir << 4) | (0 << 3) | (flags & 0x7));
2074 
2075 	/*
2076 	 * GP op header, lengths are expected in bits.
2077 	 */
2078 	vq_cmd_w0.s.param1 = encr_data_len;
2079 	vq_cmd_w0.s.param2 = auth_data_len;
2080 
2081 	/* consider iv len */
2082 	if (flags == 0x0) {
2083 		encr_offset += iv_len;
2084 		auth_offset += iv_len;
2085 	}
2086 
2087 	/* save space for offset ctrl and iv */
2088 	offset_vaddr = m_vaddr;
2089 	offset_dma = m_dma;
2090 
2091 	m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
2092 	m_dma += OFF_CTRL_LEN + iv_len;
2093 
2094 	/* DPTR has SG list */
2095 	in_buffer = m_vaddr;
2096 	dptr_dma = m_dma;
2097 
2098 	((uint16_t *)in_buffer)[0] = 0;
2099 	((uint16_t *)in_buffer)[1] = 0;
2100 
2101 	/* TODO Add error check if space will be sufficient */
2102 	gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
2103 
2104 	/*
2105 	 * Input Gather List
2106 	 */
2107 	i = 0;
2108 
2109 	/* Offset control word followed by iv */
2110 
2111 	if (flags == 0x0) {
2112 		inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
2113 		outputlen = inputlen;
2114 		/* iv offset is 0 */
2115 		*offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
2116 		if (unlikely((encr_offset >> 16))) {
2117 			CPT_LOG_DP_ERR("Offset not supported");
2118 			CPT_LOG_DP_ERR("enc_offset: %d", encr_offset);
2119 			return;
2120 		}
2121 	} else {
2122 		inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
2123 		outputlen = mac_len;
2124 		/* iv offset is 0 */
2125 		*offset_vaddr = rte_cpu_to_be_64((uint64_t)auth_offset);
2126 		if (unlikely((auth_offset >> 8))) {
2127 			CPT_LOG_DP_ERR("Offset not supported");
2128 			CPT_LOG_DP_ERR("auth_offset: %d", auth_offset);
2129 			return;
2130 		}
2131 	}
2132 
2133 	i = fill_sg_comp(gather_comp, i, offset_dma, OFF_CTRL_LEN + iv_len);
2134 
2135 	/* IV */
2136 	iv_d = (uint8_t *)offset_vaddr + OFF_CTRL_LEN;
2137 	memcpy(iv_d, iv_s, iv_len);
2138 
2139 	/* input data */
2140 	size = inputlen - iv_len;
2141 	if (size) {
2142 		i = fill_sg_comp_from_iov(gather_comp, i,
2143 					  params->src_iov, 0,
2144 					  &size, NULL, 0);
2145 
2146 		if (unlikely(size)) {
2147 			CPT_LOG_DP_ERR("Insufficient buffer space,"
2148 				       " size %d needed", size);
2149 			return;
2150 		}
2151 	}
2152 	((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
2153 	g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2154 
2155 	/*
2156 	 * Output Scatter List
2157 	 */
2158 
2159 	i = 0;
2160 	scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
2161 
2162 	if (flags == 0x1) {
2163 		/* IV in SLIST only for F8 */
2164 		iv_len = 0;
2165 	}
2166 
2167 	/* IV */
2168 	if (iv_len) {
2169 		i = fill_sg_comp(scatter_comp, i,
2170 				 offset_dma + OFF_CTRL_LEN,
2171 				 iv_len);
2172 	}
2173 
2174 	/* Add output data */
2175 	if (req_flags & VALID_MAC_BUF) {
2176 		size = outputlen - iv_len - mac_len;
2177 		if (size) {
2178 			i = fill_sg_comp_from_iov(scatter_comp, i,
2179 						  params->dst_iov, 0,
2180 						  &size, NULL, 0);
2181 
2182 			if (unlikely(size)) {
2183 				CPT_LOG_DP_ERR("Insufficient buffer space,"
2184 					       " size %d needed", size);
2185 				return;
2186 			}
2187 		}
2188 
2189 		/* mac data */
2190 		if (mac_len) {
2191 			i = fill_sg_comp_from_buf(scatter_comp, i,
2192 						  &params->mac_buf);
2193 		}
2194 	} else {
2195 		/* Output including mac */
2196 		size = outputlen - iv_len;
2197 		if (size) {
2198 			i = fill_sg_comp_from_iov(scatter_comp, i,
2199 						  params->dst_iov, 0,
2200 						  &size, NULL, 0);
2201 
2202 			if (unlikely(size)) {
2203 				CPT_LOG_DP_ERR("Insufficient buffer space,"
2204 					       " size %d needed", size);
2205 				return;
2206 			}
2207 		}
2208 	}
2209 	((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
2210 	s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2211 
2212 	size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
2213 
2214 	/* This is DPTR len incase of SG mode */
2215 	vq_cmd_w0.s.dlen = size;
2216 
2217 	m_vaddr = (uint8_t *)m_vaddr + size;
2218 	m_dma += size;
2219 
2220 	/* cpt alternate completion address saved earlier */
2221 	req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
2222 	*req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
2223 	rptr_dma = c_dma - 8;
2224 
2225 	req->ist.ei1 = dptr_dma;
2226 	req->ist.ei2 = rptr_dma;
2227 
2228 	/* 16 byte aligned cpt res address */
2229 	req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
2230 	*req->completion_addr = COMPLETION_CODE_INIT;
2231 	req->comp_baddr  = c_dma;
2232 
2233 	/* Fill microcode part of instruction */
2234 	req->ist.ei0 = vq_cmd_w0.u64;
2235 
2236 	req->op = op;
2237 
2238 	*prep_req = req;
2239 	return;
2240 }
2241 
2242 static __rte_always_inline void
2243 cpt_kasumi_dec_prep(uint64_t d_offs,
2244 		    uint64_t d_lens,
2245 		    fc_params_t *params,
2246 		    void *op,
2247 		    void **prep_req)
2248 {
2249 	uint32_t size;
2250 	int32_t inputlen = 0, outputlen;
2251 	struct cpt_ctx *cpt_ctx;
2252 	uint8_t i = 0, iv_len = 8;
2253 	struct cpt_request_info *req;
2254 	buf_ptr_t *buf_p;
2255 	uint32_t encr_offset;
2256 	uint32_t encr_data_len;
2257 	int flags;
2258 	uint8_t dir = 0;
2259 	void *m_vaddr, *c_vaddr;
2260 	uint64_t m_dma, c_dma;
2261 	uint64_t *offset_vaddr, offset_dma;
2262 	vq_cmd_word0_t vq_cmd_w0;
2263 	uint8_t *in_buffer;
2264 	uint32_t g_size_bytes, s_size_bytes;
2265 	uint64_t dptr_dma, rptr_dma;
2266 	sg_comp_t *gather_comp;
2267 	sg_comp_t *scatter_comp;
2268 
2269 	buf_p = &params->meta_buf;
2270 	m_vaddr = buf_p->vaddr;
2271 	m_dma = buf_p->dma_addr;
2272 
2273 	encr_offset = ENCR_OFFSET(d_offs) / 8;
2274 	encr_data_len = ENCR_DLEN(d_lens);
2275 
2276 	cpt_ctx = params->ctx_buf.vaddr;
2277 	flags = cpt_ctx->zsk_flags;
2278 	/*
2279 	 * Save initial space that followed app data for completion code &
2280 	 * alternate completion code to fall in same cache line as app data
2281 	 */
2282 	m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
2283 	m_dma += COMPLETION_CODE_SIZE;
2284 	size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
2285 		(uint8_t *)m_vaddr;
2286 
2287 	c_vaddr = (uint8_t *)m_vaddr + size;
2288 	c_dma = m_dma + size;
2289 	size += sizeof(cpt_res_s_t);
2290 
2291 	m_vaddr = (uint8_t *)m_vaddr + size;
2292 	m_dma += size;
2293 
2294 	/* Reserve memory for cpt request info */
2295 	req = m_vaddr;
2296 
2297 	size = sizeof(struct cpt_request_info);
2298 	m_vaddr = (uint8_t *)m_vaddr + size;
2299 	m_dma += size;
2300 
2301 	vq_cmd_w0.u64 = 0;
2302 	vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE;
2303 
2304 	/* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
2305 	vq_cmd_w0.s.opcode.minor = ((1 << 6) | (cpt_ctx->k_ecb << 5) |
2306 			  (dir << 4) | (0 << 3) | (flags & 0x7));
2307 
2308 	/*
2309 	 * GP op header, lengths are expected in bits.
2310 	 */
2311 	vq_cmd_w0.s.param1 = encr_data_len;
2312 
2313 	/* consider iv len */
2314 	encr_offset += iv_len;
2315 
2316 	inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
2317 	outputlen = inputlen;
2318 
2319 	/* save space for offset ctrl & iv */
2320 	offset_vaddr = m_vaddr;
2321 	offset_dma = m_dma;
2322 
2323 	m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
2324 	m_dma += OFF_CTRL_LEN + iv_len;
2325 
2326 	/* DPTR has SG list */
2327 	in_buffer = m_vaddr;
2328 	dptr_dma = m_dma;
2329 
2330 	((uint16_t *)in_buffer)[0] = 0;
2331 	((uint16_t *)in_buffer)[1] = 0;
2332 
2333 	/* TODO Add error check if space will be sufficient */
2334 	gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
2335 
2336 	/*
2337 	 * Input Gather List
2338 	 */
2339 	i = 0;
2340 
2341 	/* Offset control word followed by iv */
2342 	*offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
2343 	if (unlikely((encr_offset >> 16))) {
2344 		CPT_LOG_DP_ERR("Offset not supported");
2345 		CPT_LOG_DP_ERR("enc_offset: %d", encr_offset);
2346 		return;
2347 	}
2348 
2349 	i = fill_sg_comp(gather_comp, i, offset_dma, OFF_CTRL_LEN + iv_len);
2350 
2351 	/* IV */
2352 	memcpy((uint8_t *)offset_vaddr + OFF_CTRL_LEN,
2353 	       params->iv_buf, iv_len);
2354 
2355 	/* Add input data */
2356 	size = inputlen - iv_len;
2357 	if (size) {
2358 		i = fill_sg_comp_from_iov(gather_comp, i,
2359 					  params->src_iov,
2360 					  0, &size, NULL, 0);
2361 		if (unlikely(size)) {
2362 			CPT_LOG_DP_ERR("Insufficient buffer space,"
2363 				       " size %d needed", size);
2364 			return;
2365 		}
2366 	}
2367 	((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
2368 	g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2369 
2370 	/*
2371 	 * Output Scatter List
2372 	 */
2373 
2374 	i = 0;
2375 	scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
2376 
2377 	/* IV */
2378 	i = fill_sg_comp(scatter_comp, i,
2379 			 offset_dma + OFF_CTRL_LEN,
2380 			 iv_len);
2381 
2382 	/* Add output data */
2383 	size = outputlen - iv_len;
2384 	if (size) {
2385 		i = fill_sg_comp_from_iov(scatter_comp, i,
2386 					  params->dst_iov, 0,
2387 					  &size, NULL, 0);
2388 		if (unlikely(size)) {
2389 			CPT_LOG_DP_ERR("Insufficient buffer space,"
2390 				       " size %d needed", size);
2391 			return;
2392 		}
2393 	}
2394 	((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
2395 	s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2396 
2397 	size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
2398 
2399 	/* This is DPTR len incase of SG mode */
2400 	vq_cmd_w0.s.dlen = size;
2401 
2402 	m_vaddr = (uint8_t *)m_vaddr + size;
2403 	m_dma += size;
2404 
2405 	/* cpt alternate completion address saved earlier */
2406 	req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
2407 	*req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
2408 	rptr_dma = c_dma - 8;
2409 
2410 	req->ist.ei1 = dptr_dma;
2411 	req->ist.ei2 = rptr_dma;
2412 
2413 	/* 16 byte aligned cpt res address */
2414 	req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
2415 	*req->completion_addr = COMPLETION_CODE_INIT;
2416 	req->comp_baddr  = c_dma;
2417 
2418 	/* Fill microcode part of instruction */
2419 	req->ist.ei0 = vq_cmd_w0.u64;
2420 
2421 	req->op = op;
2422 
2423 	*prep_req = req;
2424 	return;
2425 }
2426 
2427 static __rte_always_inline void *
2428 cpt_fc_dec_hmac_prep(uint32_t flags,
2429 		     uint64_t d_offs,
2430 		     uint64_t d_lens,
2431 		     fc_params_t *fc_params,
2432 		     void *op)
2433 {
2434 	struct cpt_ctx *ctx = fc_params->ctx_buf.vaddr;
2435 	uint8_t fc_type;
2436 	void *prep_req = NULL;
2437 
2438 	fc_type = ctx->fc_type;
2439 
2440 	if (likely(fc_type == FC_GEN)) {
2441 		cpt_dec_hmac_prep(flags, d_offs, d_lens, fc_params, op,
2442 				  &prep_req);
2443 	} else if (fc_type == ZUC_SNOW3G) {
2444 		cpt_zuc_snow3g_dec_prep(flags, d_offs, d_lens, fc_params, op,
2445 					&prep_req);
2446 	} else if (fc_type == KASUMI) {
2447 		cpt_kasumi_dec_prep(d_offs, d_lens, fc_params, op, &prep_req);
2448 	}
2449 
2450 	/*
2451 	 * For AUTH_ONLY case,
2452 	 * MC only supports digest generation and verification
2453 	 * should be done in software by memcmp()
2454 	 */
2455 
2456 	return prep_req;
2457 }
2458 
2459 static __rte_always_inline void *__rte_hot
2460 cpt_fc_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
2461 		     fc_params_t *fc_params, void *op)
2462 {
2463 	struct cpt_ctx *ctx = fc_params->ctx_buf.vaddr;
2464 	uint8_t fc_type;
2465 	void *prep_req = NULL;
2466 
2467 	fc_type = ctx->fc_type;
2468 
2469 	/* Common api for rest of the ops */
2470 	if (likely(fc_type == FC_GEN)) {
2471 		cpt_enc_hmac_prep(flags, d_offs, d_lens, fc_params, op,
2472 				  &prep_req);
2473 	} else if (fc_type == ZUC_SNOW3G) {
2474 		cpt_zuc_snow3g_enc_prep(flags, d_offs, d_lens, fc_params, op,
2475 					&prep_req);
2476 	} else if (fc_type == KASUMI) {
2477 		cpt_kasumi_enc_prep(flags, d_offs, d_lens, fc_params, op,
2478 				    &prep_req);
2479 	} else if (fc_type == HASH_HMAC) {
2480 		cpt_digest_gen_prep(flags, d_lens, fc_params, op, &prep_req);
2481 	}
2482 
2483 	return prep_req;
2484 }
2485 
2486 static __rte_always_inline int
2487 cpt_fc_auth_set_key(struct cpt_ctx *cpt_ctx, auth_type_t type,
2488 		    const uint8_t *key, uint16_t key_len, uint16_t mac_len)
2489 {
2490 	mc_fc_context_t *fctx = &cpt_ctx->mc_ctx.fctx;
2491 	mc_zuc_snow3g_ctx_t *zs_ctx = &cpt_ctx->mc_ctx.zs_ctx;
2492 	mc_kasumi_ctx_t *k_ctx = &cpt_ctx->mc_ctx.k_ctx;
2493 
2494 	if ((type >= ZUC_EIA3) && (type <= KASUMI_F9_ECB)) {
2495 		uint32_t keyx[4];
2496 
2497 		if (key_len != 16)
2498 			return -1;
2499 		/* No support for AEAD yet */
2500 		if (cpt_ctx->enc_cipher)
2501 			return -1;
2502 		/* For ZUC/SNOW3G/Kasumi */
2503 		switch (type) {
2504 		case SNOW3G_UIA2:
2505 			cpt_ctx->snow3g = 1;
2506 			gen_key_snow3g(key, keyx);
2507 			memcpy(zs_ctx->ci_key, keyx, key_len);
2508 			cpt_ctx->fc_type = ZUC_SNOW3G;
2509 			cpt_ctx->zsk_flags = 0x1;
2510 			break;
2511 		case ZUC_EIA3:
2512 			cpt_ctx->snow3g = 0;
2513 			memcpy(zs_ctx->ci_key, key, key_len);
2514 			memcpy(zs_ctx->zuc_const, zuc_d, 32);
2515 			cpt_ctx->fc_type = ZUC_SNOW3G;
2516 			cpt_ctx->zsk_flags = 0x1;
2517 			break;
2518 		case KASUMI_F9_ECB:
2519 			/* Kasumi ECB mode */
2520 			cpt_ctx->k_ecb = 1;
2521 			memcpy(k_ctx->ci_key, key, key_len);
2522 			cpt_ctx->fc_type = KASUMI;
2523 			cpt_ctx->zsk_flags = 0x1;
2524 			break;
2525 		case KASUMI_F9_CBC:
2526 			memcpy(k_ctx->ci_key, key, key_len);
2527 			cpt_ctx->fc_type = KASUMI;
2528 			cpt_ctx->zsk_flags = 0x1;
2529 			break;
2530 		default:
2531 			return -1;
2532 		}
2533 		cpt_ctx->mac_len = 4;
2534 		cpt_ctx->hash_type = type;
2535 		return 0;
2536 	}
2537 
2538 	if (!(cpt_ctx->fc_type == FC_GEN && !type)) {
2539 		if (!cpt_ctx->fc_type || !cpt_ctx->enc_cipher)
2540 			cpt_ctx->fc_type = HASH_HMAC;
2541 	}
2542 
2543 	if (cpt_ctx->fc_type == FC_GEN && key_len > 64)
2544 		return -1;
2545 
2546 	/* For GMAC auth, cipher must be NULL */
2547 	if (type == GMAC_TYPE)
2548 		fctx->enc.enc_cipher = 0;
2549 
2550 	fctx->enc.hash_type = cpt_ctx->hash_type = type;
2551 	fctx->enc.mac_len = cpt_ctx->mac_len = mac_len;
2552 
2553 	if (key_len) {
2554 		cpt_ctx->hmac = 1;
2555 
2556 		cpt_ctx->auth_key = rte_zmalloc(NULL, key_len, 8);
2557 		if (cpt_ctx->auth_key == NULL)
2558 			return -1;
2559 
2560 		cpt_ctx->auth_key_iova = rte_mem_virt2iova(cpt_ctx->auth_key);
2561 		memcpy(cpt_ctx->auth_key, key, key_len);
2562 		cpt_ctx->auth_key_len = key_len;
2563 		memset(fctx->hmac.ipad, 0, sizeof(fctx->hmac.ipad));
2564 		memset(fctx->hmac.opad, 0, sizeof(fctx->hmac.opad));
2565 
2566 		if (key_len <= 64)
2567 			memcpy(fctx->hmac.opad, key, key_len);
2568 		fctx->enc.auth_input_type = 1;
2569 	}
2570 	return 0;
2571 }
2572 
2573 static __rte_always_inline int
2574 fill_sess_aead(struct rte_crypto_sym_xform *xform,
2575 		 struct cpt_sess_misc *sess)
2576 {
2577 	struct rte_crypto_aead_xform *aead_form;
2578 	cipher_type_t enc_type = 0; /* NULL Cipher type */
2579 	auth_type_t auth_type = 0; /* NULL Auth type */
2580 	uint32_t cipher_key_len = 0;
2581 	uint8_t aes_gcm = 0;
2582 	aead_form = &xform->aead;
2583 	void *ctx = SESS_PRIV(sess);
2584 
2585 	if (aead_form->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
2586 		sess->cpt_op |= CPT_OP_CIPHER_ENCRYPT;
2587 		sess->cpt_op |= CPT_OP_AUTH_GENERATE;
2588 	} else if (aead_form->op == RTE_CRYPTO_AEAD_OP_DECRYPT) {
2589 		sess->cpt_op |= CPT_OP_CIPHER_DECRYPT;
2590 		sess->cpt_op |= CPT_OP_AUTH_VERIFY;
2591 	} else {
2592 		CPT_LOG_DP_ERR("Unknown aead operation");
2593 		return -1;
2594 	}
2595 	switch (aead_form->algo) {
2596 	case RTE_CRYPTO_AEAD_AES_GCM:
2597 		enc_type = AES_GCM;
2598 		cipher_key_len = 16;
2599 		aes_gcm = 1;
2600 		break;
2601 	case RTE_CRYPTO_AEAD_AES_CCM:
2602 		CPT_LOG_DP_ERR("Crypto: Unsupported cipher algo %u",
2603 			       aead_form->algo);
2604 		return -1;
2605 	case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
2606 		enc_type = CHACHA20;
2607 		auth_type = POLY1305;
2608 		cipher_key_len = 32;
2609 		sess->chacha_poly = 1;
2610 		break;
2611 	default:
2612 		CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2613 			       aead_form->algo);
2614 		return -1;
2615 	}
2616 	if (aead_form->key.length < cipher_key_len) {
2617 		CPT_LOG_DP_ERR("Invalid cipher params keylen %lu",
2618 			       (unsigned int long)aead_form->key.length);
2619 		return -1;
2620 	}
2621 	sess->zsk_flag = 0;
2622 	sess->aes_gcm = aes_gcm;
2623 	sess->mac_len = aead_form->digest_length;
2624 	sess->iv_offset = aead_form->iv.offset;
2625 	sess->iv_length = aead_form->iv.length;
2626 	sess->aad_length = aead_form->aad_length;
2627 
2628 	if (unlikely(cpt_fc_ciph_set_key(ctx, enc_type, aead_form->key.data,
2629 			aead_form->key.length, NULL)))
2630 		return -1;
2631 
2632 	if (unlikely(cpt_fc_auth_set_key(ctx, auth_type, NULL, 0,
2633 			aead_form->digest_length)))
2634 		return -1;
2635 
2636 	return 0;
2637 }
2638 
2639 static __rte_always_inline int
2640 fill_sess_cipher(struct rte_crypto_sym_xform *xform,
2641 		 struct cpt_sess_misc *sess)
2642 {
2643 	struct rte_crypto_cipher_xform *c_form;
2644 	struct cpt_ctx *ctx = SESS_PRIV(sess);
2645 	cipher_type_t enc_type = 0; /* NULL Cipher type */
2646 	uint32_t cipher_key_len = 0;
2647 	uint8_t zsk_flag = 0, aes_ctr = 0, is_null = 0;
2648 
2649 	c_form = &xform->cipher;
2650 
2651 	if (c_form->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
2652 		sess->cpt_op |= CPT_OP_CIPHER_ENCRYPT;
2653 	else if (c_form->op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2654 		sess->cpt_op |= CPT_OP_CIPHER_DECRYPT;
2655 		if (xform->next != NULL &&
2656 		    xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2657 			/* Perform decryption followed by auth verify */
2658 			ctx->dec_auth = 1;
2659 		}
2660 	} else {
2661 		CPT_LOG_DP_ERR("Unknown cipher operation");
2662 		return -1;
2663 	}
2664 
2665 	switch (c_form->algo) {
2666 	case RTE_CRYPTO_CIPHER_AES_CBC:
2667 		enc_type = AES_CBC;
2668 		cipher_key_len = 16;
2669 		break;
2670 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2671 		enc_type = DES3_CBC;
2672 		cipher_key_len = 24;
2673 		break;
2674 	case RTE_CRYPTO_CIPHER_DES_CBC:
2675 		/* DES is implemented using 3DES in hardware */
2676 		enc_type = DES3_CBC;
2677 		cipher_key_len = 8;
2678 		break;
2679 	case RTE_CRYPTO_CIPHER_AES_CTR:
2680 		enc_type = AES_CTR;
2681 		cipher_key_len = 16;
2682 		aes_ctr = 1;
2683 		break;
2684 	case RTE_CRYPTO_CIPHER_NULL:
2685 		enc_type = 0;
2686 		is_null = 1;
2687 		break;
2688 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
2689 		enc_type = KASUMI_F8_ECB;
2690 		cipher_key_len = 16;
2691 		zsk_flag = K_F8;
2692 		break;
2693 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2694 		enc_type = SNOW3G_UEA2;
2695 		cipher_key_len = 16;
2696 		zsk_flag = ZS_EA;
2697 		break;
2698 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2699 		enc_type = ZUC_EEA3;
2700 		cipher_key_len = 16;
2701 		zsk_flag = ZS_EA;
2702 		break;
2703 	case RTE_CRYPTO_CIPHER_AES_XTS:
2704 		enc_type = AES_XTS;
2705 		cipher_key_len = 16;
2706 		break;
2707 	case RTE_CRYPTO_CIPHER_3DES_ECB:
2708 		enc_type = DES3_ECB;
2709 		cipher_key_len = 24;
2710 		break;
2711 	case RTE_CRYPTO_CIPHER_AES_ECB:
2712 		enc_type = AES_ECB;
2713 		cipher_key_len = 16;
2714 		break;
2715 	case RTE_CRYPTO_CIPHER_3DES_CTR:
2716 	case RTE_CRYPTO_CIPHER_AES_F8:
2717 	case RTE_CRYPTO_CIPHER_ARC4:
2718 		CPT_LOG_DP_ERR("Crypto: Unsupported cipher algo %u",
2719 			       c_form->algo);
2720 		return -1;
2721 	default:
2722 		CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2723 			       c_form->algo);
2724 		return -1;
2725 	}
2726 
2727 	if (c_form->key.length < cipher_key_len) {
2728 		CPT_LOG_DP_ERR("Invalid cipher params keylen %lu",
2729 			       (unsigned long) c_form->key.length);
2730 		return -1;
2731 	}
2732 
2733 	sess->zsk_flag = zsk_flag;
2734 	sess->aes_gcm = 0;
2735 	sess->aes_ctr = aes_ctr;
2736 	sess->iv_offset = c_form->iv.offset;
2737 	sess->iv_length = c_form->iv.length;
2738 	sess->is_null = is_null;
2739 
2740 	if (unlikely(cpt_fc_ciph_set_key(SESS_PRIV(sess), enc_type,
2741 			c_form->key.data, c_form->key.length, NULL)))
2742 		return -1;
2743 
2744 	return 0;
2745 }
2746 
2747 static __rte_always_inline int
2748 fill_sess_auth(struct rte_crypto_sym_xform *xform,
2749 	       struct cpt_sess_misc *sess)
2750 {
2751 	struct cpt_ctx *ctx = SESS_PRIV(sess);
2752 	struct rte_crypto_auth_xform *a_form;
2753 	auth_type_t auth_type = 0; /* NULL Auth type */
2754 	uint8_t zsk_flag = 0, aes_gcm = 0, is_null = 0;
2755 
2756 	if (xform->next != NULL &&
2757 	    xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2758 	    xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2759 		/* Perform auth followed by encryption */
2760 		ctx->auth_enc = 1;
2761 	}
2762 
2763 	a_form = &xform->auth;
2764 
2765 	if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
2766 		sess->cpt_op |= CPT_OP_AUTH_VERIFY;
2767 	else if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
2768 		sess->cpt_op |= CPT_OP_AUTH_GENERATE;
2769 	else {
2770 		CPT_LOG_DP_ERR("Unknown auth operation");
2771 		return -1;
2772 	}
2773 
2774 	switch (a_form->algo) {
2775 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
2776 		/* Fall through */
2777 	case RTE_CRYPTO_AUTH_SHA1:
2778 		auth_type = SHA1_TYPE;
2779 		break;
2780 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2781 	case RTE_CRYPTO_AUTH_SHA256:
2782 		auth_type = SHA2_SHA256;
2783 		break;
2784 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2785 	case RTE_CRYPTO_AUTH_SHA512:
2786 		auth_type = SHA2_SHA512;
2787 		break;
2788 	case RTE_CRYPTO_AUTH_AES_GMAC:
2789 		auth_type = GMAC_TYPE;
2790 		aes_gcm = 1;
2791 		break;
2792 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2793 	case RTE_CRYPTO_AUTH_SHA224:
2794 		auth_type = SHA2_SHA224;
2795 		break;
2796 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2797 	case RTE_CRYPTO_AUTH_SHA384:
2798 		auth_type = SHA2_SHA384;
2799 		break;
2800 	case RTE_CRYPTO_AUTH_MD5_HMAC:
2801 	case RTE_CRYPTO_AUTH_MD5:
2802 		auth_type = MD5_TYPE;
2803 		break;
2804 	case RTE_CRYPTO_AUTH_KASUMI_F9:
2805 		auth_type = KASUMI_F9_ECB;
2806 		/*
2807 		 * Indicate that direction needs to be taken out
2808 		 * from end of src
2809 		 */
2810 		zsk_flag = K_F9;
2811 		break;
2812 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2813 		auth_type = SNOW3G_UIA2;
2814 		zsk_flag = ZS_IA;
2815 		break;
2816 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
2817 		auth_type = ZUC_EIA3;
2818 		zsk_flag = ZS_IA;
2819 		break;
2820 	case RTE_CRYPTO_AUTH_NULL:
2821 		auth_type = 0;
2822 		is_null = 1;
2823 		break;
2824 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2825 	case RTE_CRYPTO_AUTH_AES_CMAC:
2826 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2827 		CPT_LOG_DP_ERR("Crypto: Unsupported hash algo %u",
2828 			       a_form->algo);
2829 		return -1;
2830 	default:
2831 		CPT_LOG_DP_ERR("Crypto: Undefined Hash algo %u specified",
2832 			       a_form->algo);
2833 		return -1;
2834 	}
2835 
2836 	sess->zsk_flag = zsk_flag;
2837 	sess->aes_gcm = aes_gcm;
2838 	sess->mac_len = a_form->digest_length;
2839 	sess->is_null = is_null;
2840 	if (zsk_flag) {
2841 		sess->auth_iv_offset = a_form->iv.offset;
2842 		sess->auth_iv_length = a_form->iv.length;
2843 	}
2844 	if (unlikely(cpt_fc_auth_set_key(SESS_PRIV(sess), auth_type,
2845 			a_form->key.data, a_form->key.length,
2846 			a_form->digest_length)))
2847 		return -1;
2848 
2849 	return 0;
2850 }
2851 
2852 static __rte_always_inline int
2853 fill_sess_gmac(struct rte_crypto_sym_xform *xform,
2854 		 struct cpt_sess_misc *sess)
2855 {
2856 	struct rte_crypto_auth_xform *a_form;
2857 	cipher_type_t enc_type = 0; /* NULL Cipher type */
2858 	auth_type_t auth_type = 0; /* NULL Auth type */
2859 	void *ctx = SESS_PRIV(sess);
2860 
2861 	a_form = &xform->auth;
2862 
2863 	if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
2864 		sess->cpt_op |= CPT_OP_ENCODE;
2865 	else if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
2866 		sess->cpt_op |= CPT_OP_DECODE;
2867 	else {
2868 		CPT_LOG_DP_ERR("Unknown auth operation");
2869 		return -1;
2870 	}
2871 
2872 	switch (a_form->algo) {
2873 	case RTE_CRYPTO_AUTH_AES_GMAC:
2874 		enc_type = AES_GCM;
2875 		auth_type = GMAC_TYPE;
2876 		break;
2877 	default:
2878 		CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2879 			       a_form->algo);
2880 		return -1;
2881 	}
2882 
2883 	sess->zsk_flag = 0;
2884 	sess->aes_gcm = 0;
2885 	sess->is_gmac = 1;
2886 	sess->iv_offset = a_form->iv.offset;
2887 	sess->iv_length = a_form->iv.length;
2888 	sess->mac_len = a_form->digest_length;
2889 
2890 	if (unlikely(cpt_fc_ciph_set_key(ctx, enc_type, a_form->key.data,
2891 			a_form->key.length, NULL)))
2892 		return -1;
2893 
2894 	if (unlikely(cpt_fc_auth_set_key(ctx, auth_type, NULL, 0,
2895 			a_form->digest_length)))
2896 		return -1;
2897 
2898 	return 0;
2899 }
2900 
2901 static __rte_always_inline void *
2902 alloc_op_meta(struct rte_mbuf *m_src,
2903 	      buf_ptr_t *buf,
2904 	      int32_t len,
2905 	      struct rte_mempool *cpt_meta_pool)
2906 {
2907 	uint8_t *mdata;
2908 
2909 #ifndef CPT_ALWAYS_USE_SEPARATE_BUF
2910 	if (likely(m_src && (m_src->nb_segs == 1))) {
2911 		int32_t tailroom;
2912 		phys_addr_t mphys;
2913 
2914 		/* Check if tailroom is sufficient to hold meta data */
2915 		tailroom = rte_pktmbuf_tailroom(m_src);
2916 		if (likely(tailroom > len + 8)) {
2917 			mdata = (uint8_t *)m_src->buf_addr + m_src->buf_len;
2918 			mphys = m_src->buf_iova + m_src->buf_len;
2919 			mdata -= len;
2920 			mphys -= len;
2921 			buf->vaddr = mdata;
2922 			buf->dma_addr = mphys;
2923 			buf->size = len;
2924 			/* Indicate that this is a mbuf allocated mdata */
2925 			mdata = (uint8_t *)((uint64_t)mdata | 1ull);
2926 			return mdata;
2927 		}
2928 	}
2929 #else
2930 	RTE_SET_USED(m_src);
2931 #endif
2932 
2933 	if (unlikely(rte_mempool_get(cpt_meta_pool, (void **)&mdata) < 0))
2934 		return NULL;
2935 
2936 	buf->vaddr = mdata;
2937 	buf->dma_addr = rte_mempool_virt2iova(mdata);
2938 	buf->size = len;
2939 
2940 	return mdata;
2941 }
2942 
2943 /**
2944  * cpt_free_metabuf - free metabuf to mempool.
2945  * @param instance: pointer to instance.
2946  * @param objp: pointer to the metabuf.
2947  */
2948 static __rte_always_inline void
2949 free_op_meta(void *mdata, struct rte_mempool *cpt_meta_pool)
2950 {
2951 	bool nofree = ((uintptr_t)mdata & 1ull);
2952 
2953 	if (likely(nofree))
2954 		return;
2955 	rte_mempool_put(cpt_meta_pool, mdata);
2956 }
2957 
2958 static __rte_always_inline uint32_t
2959 prepare_iov_from_pkt(struct rte_mbuf *pkt,
2960 		     iov_ptr_t *iovec, uint32_t start_offset)
2961 {
2962 	uint16_t index = 0;
2963 	void *seg_data = NULL;
2964 	phys_addr_t seg_phys;
2965 	int32_t seg_size = 0;
2966 
2967 	if (!pkt) {
2968 		iovec->buf_cnt = 0;
2969 		return 0;
2970 	}
2971 
2972 	if (!start_offset) {
2973 		seg_data = rte_pktmbuf_mtod(pkt, void *);
2974 		seg_phys = rte_pktmbuf_iova(pkt);
2975 		seg_size = pkt->data_len;
2976 	} else {
2977 		while (start_offset >= pkt->data_len) {
2978 			start_offset -= pkt->data_len;
2979 			pkt = pkt->next;
2980 		}
2981 
2982 		seg_data = rte_pktmbuf_mtod_offset(pkt, void *, start_offset);
2983 		seg_phys = rte_pktmbuf_iova_offset(pkt, start_offset);
2984 		seg_size = pkt->data_len - start_offset;
2985 		if (!seg_size)
2986 			return 1;
2987 	}
2988 
2989 	/* first seg */
2990 	iovec->bufs[index].vaddr = seg_data;
2991 	iovec->bufs[index].dma_addr = seg_phys;
2992 	iovec->bufs[index].size = seg_size;
2993 	index++;
2994 	pkt = pkt->next;
2995 
2996 	while (unlikely(pkt != NULL)) {
2997 		seg_data = rte_pktmbuf_mtod(pkt, void *);
2998 		seg_phys = rte_pktmbuf_iova(pkt);
2999 		seg_size = pkt->data_len;
3000 		if (!seg_size)
3001 			break;
3002 
3003 		iovec->bufs[index].vaddr = seg_data;
3004 		iovec->bufs[index].dma_addr = seg_phys;
3005 		iovec->bufs[index].size = seg_size;
3006 
3007 		index++;
3008 
3009 		pkt = pkt->next;
3010 	}
3011 
3012 	iovec->buf_cnt = index;
3013 	return 0;
3014 }
3015 
3016 static __rte_always_inline uint32_t
3017 prepare_iov_from_pkt_inplace(struct rte_mbuf *pkt,
3018 			     fc_params_t *param,
3019 			     uint32_t *flags)
3020 {
3021 	uint16_t index = 0;
3022 	void *seg_data = NULL;
3023 	phys_addr_t seg_phys;
3024 	uint32_t seg_size = 0;
3025 	iov_ptr_t *iovec;
3026 
3027 	seg_data = rte_pktmbuf_mtod(pkt, void *);
3028 	seg_phys = rte_pktmbuf_iova(pkt);
3029 	seg_size = pkt->data_len;
3030 
3031 	/* first seg */
3032 	if (likely(!pkt->next)) {
3033 		uint32_t headroom, tailroom;
3034 
3035 		*flags |= SINGLE_BUF_INPLACE;
3036 		headroom = rte_pktmbuf_headroom(pkt);
3037 		tailroom = rte_pktmbuf_tailroom(pkt);
3038 		if (likely((headroom >= 24) &&
3039 		    (tailroom >= 8))) {
3040 			/* In 83XX this is prerequisite for Direct mode */
3041 			*flags |= SINGLE_BUF_HEADTAILROOM;
3042 		}
3043 		param->bufs[0].vaddr = seg_data;
3044 		param->bufs[0].dma_addr = seg_phys;
3045 		param->bufs[0].size = seg_size;
3046 		return 0;
3047 	}
3048 	iovec = param->src_iov;
3049 	iovec->bufs[index].vaddr = seg_data;
3050 	iovec->bufs[index].dma_addr = seg_phys;
3051 	iovec->bufs[index].size = seg_size;
3052 	index++;
3053 	pkt = pkt->next;
3054 
3055 	while (unlikely(pkt != NULL)) {
3056 		seg_data = rte_pktmbuf_mtod(pkt, void *);
3057 		seg_phys = rte_pktmbuf_iova(pkt);
3058 		seg_size = pkt->data_len;
3059 
3060 		if (!seg_size)
3061 			break;
3062 
3063 		iovec->bufs[index].vaddr = seg_data;
3064 		iovec->bufs[index].dma_addr = seg_phys;
3065 		iovec->bufs[index].size = seg_size;
3066 
3067 		index++;
3068 
3069 		pkt = pkt->next;
3070 	}
3071 
3072 	iovec->buf_cnt = index;
3073 	return 0;
3074 }
3075 
3076 static __rte_always_inline int
3077 fill_fc_params(struct rte_crypto_op *cop,
3078 	       struct cpt_sess_misc *sess_misc,
3079 	       struct cpt_qp_meta_info *m_info,
3080 	       void **mdata_ptr,
3081 	       void **prep_req)
3082 {
3083 	uint32_t space = 0;
3084 	struct rte_crypto_sym_op *sym_op = cop->sym;
3085 	struct cpt_ctx *ctx = SESS_PRIV(sess_misc);
3086 	void *mdata = NULL;
3087 	uintptr_t *op;
3088 	uint32_t mc_hash_off;
3089 	uint32_t flags = 0;
3090 	uint64_t d_offs, d_lens;
3091 	struct rte_mbuf *m_src, *m_dst;
3092 	uint8_t cpt_op = sess_misc->cpt_op;
3093 #ifdef CPT_ALWAYS_USE_SG_MODE
3094 	uint8_t inplace = 0;
3095 #else
3096 	uint8_t inplace = 1;
3097 #endif
3098 	fc_params_t fc_params;
3099 	char src[SRC_IOV_SIZE];
3100 	char dst[SRC_IOV_SIZE];
3101 	uint32_t iv_buf[4];
3102 	int ret;
3103 
3104 	if (likely(sess_misc->iv_length)) {
3105 		flags |= VALID_IV_BUF;
3106 		fc_params.iv_buf = rte_crypto_op_ctod_offset(cop,
3107 				   uint8_t *, sess_misc->iv_offset);
3108 		if (sess_misc->aes_ctr &&
3109 		    unlikely(sess_misc->iv_length != 16)) {
3110 			memcpy((uint8_t *)iv_buf,
3111 				rte_crypto_op_ctod_offset(cop,
3112 				uint8_t *, sess_misc->iv_offset), 12);
3113 			iv_buf[3] = rte_cpu_to_be_32(0x1);
3114 			fc_params.iv_buf = iv_buf;
3115 		}
3116 	}
3117 
3118 	if (sess_misc->zsk_flag) {
3119 		fc_params.auth_iv_buf = rte_crypto_op_ctod_offset(cop,
3120 					uint8_t *,
3121 					sess_misc->auth_iv_offset);
3122 		if (sess_misc->zsk_flag != ZS_EA)
3123 			inplace = 0;
3124 	}
3125 	m_src = sym_op->m_src;
3126 	m_dst = sym_op->m_dst;
3127 
3128 	if (sess_misc->aes_gcm || sess_misc->chacha_poly) {
3129 		uint8_t *salt;
3130 		uint8_t *aad_data;
3131 		uint16_t aad_len;
3132 
3133 		d_offs = sym_op->aead.data.offset;
3134 		d_lens = sym_op->aead.data.length;
3135 		mc_hash_off = sym_op->aead.data.offset +
3136 			      sym_op->aead.data.length;
3137 
3138 		aad_data = sym_op->aead.aad.data;
3139 		aad_len = sess_misc->aad_length;
3140 		if (likely((aad_data + aad_len) ==
3141 			   rte_pktmbuf_mtod_offset(m_src,
3142 				uint8_t *,
3143 				sym_op->aead.data.offset))) {
3144 			d_offs = (d_offs - aad_len) | (d_offs << 16);
3145 			d_lens = (d_lens + aad_len) | (d_lens << 32);
3146 		} else {
3147 			fc_params.aad_buf.vaddr = sym_op->aead.aad.data;
3148 			fc_params.aad_buf.dma_addr = sym_op->aead.aad.phys_addr;
3149 			fc_params.aad_buf.size = aad_len;
3150 			flags |= VALID_AAD_BUF;
3151 			inplace = 0;
3152 			d_offs = d_offs << 16;
3153 			d_lens = d_lens << 32;
3154 		}
3155 
3156 		salt = fc_params.iv_buf;
3157 		if (unlikely(*(uint32_t *)salt != sess_misc->salt)) {
3158 			cpt_fc_salt_update(SESS_PRIV(sess_misc), salt);
3159 			sess_misc->salt = *(uint32_t *)salt;
3160 		}
3161 		fc_params.iv_buf = salt + 4;
3162 		if (likely(sess_misc->mac_len)) {
3163 			struct rte_mbuf *m = (cpt_op & CPT_OP_ENCODE) ? m_dst :
3164 					     m_src;
3165 
3166 			if (!m)
3167 				m = m_src;
3168 
3169 			/* hmac immediately following data is best case */
3170 			if (unlikely(rte_pktmbuf_mtod_offset(m, uint8_t *, mc_hash_off) !=
3171 				     (uint8_t *)sym_op->aead.digest.data)) {
3172 				flags |= VALID_MAC_BUF;
3173 				fc_params.mac_buf.size = sess_misc->mac_len;
3174 				fc_params.mac_buf.vaddr =
3175 				  sym_op->aead.digest.data;
3176 				fc_params.mac_buf.dma_addr =
3177 				 sym_op->aead.digest.phys_addr;
3178 				inplace = 0;
3179 			}
3180 		}
3181 	} else {
3182 		d_offs = sym_op->cipher.data.offset;
3183 		d_lens = sym_op->cipher.data.length;
3184 		mc_hash_off = sym_op->cipher.data.offset +
3185 			      sym_op->cipher.data.length;
3186 		d_offs = (d_offs << 16) | sym_op->auth.data.offset;
3187 		d_lens = (d_lens << 32) | sym_op->auth.data.length;
3188 
3189 		if (mc_hash_off < (sym_op->auth.data.offset +
3190 				   sym_op->auth.data.length)){
3191 			mc_hash_off = (sym_op->auth.data.offset +
3192 				       sym_op->auth.data.length);
3193 		}
3194 		/* for gmac, salt should be updated like in gcm */
3195 		if (unlikely(sess_misc->is_gmac)) {
3196 			uint8_t *salt;
3197 			salt = fc_params.iv_buf;
3198 			if (unlikely(*(uint32_t *)salt != sess_misc->salt)) {
3199 				cpt_fc_salt_update(SESS_PRIV(sess_misc), salt);
3200 				sess_misc->salt = *(uint32_t *)salt;
3201 			}
3202 			fc_params.iv_buf = salt + 4;
3203 		}
3204 		if (likely(sess_misc->mac_len)) {
3205 			struct rte_mbuf *m;
3206 
3207 			m = (cpt_op & CPT_OP_ENCODE) ? m_dst : m_src;
3208 			if (!m)
3209 				m = m_src;
3210 
3211 			/* hmac immediately following data is best case */
3212 			if (!ctx->dec_auth && !ctx->auth_enc &&
3213 				 (unlikely(rte_pktmbuf_mtod_offset(m, uint8_t *, mc_hash_off) !=
3214 					   (uint8_t *)sym_op->auth.digest.data))) {
3215 				flags |= VALID_MAC_BUF;
3216 				fc_params.mac_buf.size =
3217 					sess_misc->mac_len;
3218 				fc_params.mac_buf.vaddr =
3219 					sym_op->auth.digest.data;
3220 				fc_params.mac_buf.dma_addr =
3221 				sym_op->auth.digest.phys_addr;
3222 				inplace = 0;
3223 			}
3224 		}
3225 	}
3226 	fc_params.ctx_buf.vaddr = SESS_PRIV(sess_misc);
3227 	fc_params.ctx_buf.dma_addr = sess_misc->ctx_dma_addr;
3228 
3229 	if (!ctx->dec_auth &&
3230 		  unlikely(sess_misc->is_null ||
3231 		  sess_misc->cpt_op == CPT_OP_DECODE))
3232 		inplace = 0;
3233 
3234 	if (likely(!m_dst && inplace)) {
3235 		/* Case of single buffer without AAD buf or
3236 		 * separate mac buf in place and
3237 		 * not air crypto
3238 		 */
3239 		fc_params.dst_iov = fc_params.src_iov = (void *)src;
3240 
3241 		if (unlikely(prepare_iov_from_pkt_inplace(m_src,
3242 							  &fc_params,
3243 							  &flags))) {
3244 			CPT_LOG_DP_ERR("Prepare inplace src iov failed");
3245 			ret = -EINVAL;
3246 			goto err_exit;
3247 		}
3248 
3249 	} else {
3250 		/* Out of place processing */
3251 		fc_params.src_iov = (void *)src;
3252 		fc_params.dst_iov = (void *)dst;
3253 
3254 		/* Store SG I/O in the api for reuse */
3255 		if (prepare_iov_from_pkt(m_src, fc_params.src_iov, 0)) {
3256 			CPT_LOG_DP_ERR("Prepare src iov failed");
3257 			ret = -EINVAL;
3258 			goto err_exit;
3259 		}
3260 
3261 		if (unlikely(m_dst != NULL)) {
3262 			uint32_t pkt_len;
3263 
3264 			/* Try to make room as much as src has */
3265 			pkt_len = rte_pktmbuf_pkt_len(m_dst);
3266 
3267 			if (unlikely(pkt_len < rte_pktmbuf_pkt_len(m_src))) {
3268 				pkt_len = rte_pktmbuf_pkt_len(m_src) - pkt_len;
3269 				if (!rte_pktmbuf_append(m_dst, pkt_len)) {
3270 					CPT_LOG_DP_ERR("Not enough space in "
3271 						       "m_dst %p, need %u"
3272 						       " more",
3273 						       m_dst, pkt_len);
3274 					ret = -EINVAL;
3275 					goto err_exit;
3276 				}
3277 			}
3278 
3279 			if (prepare_iov_from_pkt(m_dst, fc_params.dst_iov, 0)) {
3280 				CPT_LOG_DP_ERR("Prepare dst iov failed for "
3281 					       "m_dst %p", m_dst);
3282 				ret = -EINVAL;
3283 				goto err_exit;
3284 			}
3285 		} else {
3286 			fc_params.dst_iov = (void *)src;
3287 		}
3288 	}
3289 
3290 	if (likely(flags & SINGLE_BUF_HEADTAILROOM))
3291 		mdata = alloc_op_meta(m_src, &fc_params.meta_buf,
3292 				      m_info->lb_mlen, m_info->pool);
3293 	else
3294 		mdata = alloc_op_meta(NULL, &fc_params.meta_buf,
3295 				      m_info->sg_mlen, m_info->pool);
3296 
3297 	if (unlikely(mdata == NULL)) {
3298 		CPT_LOG_DP_ERR("Error allocating meta buffer for request");
3299 		ret = -ENOMEM;
3300 		goto err_exit;
3301 	}
3302 
3303 	op = (uintptr_t *)((uintptr_t)mdata & (uintptr_t)~1ull);
3304 	op[0] = (uintptr_t)mdata;
3305 	op[1] = (uintptr_t)cop;
3306 	op[2] = op[3] = 0; /* Used to indicate auth verify */
3307 	space += 4 * sizeof(uint64_t);
3308 
3309 	fc_params.meta_buf.vaddr = (uint8_t *)op + space;
3310 	fc_params.meta_buf.dma_addr += space;
3311 	fc_params.meta_buf.size -= space;
3312 
3313 	/* Finally prepare the instruction */
3314 	if (cpt_op & CPT_OP_ENCODE)
3315 		*prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens,
3316 						 &fc_params, op);
3317 	else
3318 		*prep_req = cpt_fc_dec_hmac_prep(flags, d_offs, d_lens,
3319 						 &fc_params, op);
3320 
3321 	if (unlikely(*prep_req == NULL)) {
3322 		CPT_LOG_DP_ERR("Preparing request failed due to bad input arg");
3323 		ret = -EINVAL;
3324 		goto free_mdata_and_exit;
3325 	}
3326 
3327 	*mdata_ptr = mdata;
3328 
3329 	return 0;
3330 
3331 free_mdata_and_exit:
3332 	free_op_meta(mdata, m_info->pool);
3333 err_exit:
3334 	return ret;
3335 }
3336 
3337 static __rte_always_inline void
3338 compl_auth_verify(struct rte_crypto_op *op,
3339 		      uint8_t *gen_mac,
3340 		      uint64_t mac_len)
3341 {
3342 	uint8_t *mac;
3343 	struct rte_crypto_sym_op *sym_op = op->sym;
3344 
3345 	if (sym_op->auth.digest.data)
3346 		mac = sym_op->auth.digest.data;
3347 	else
3348 		mac = rte_pktmbuf_mtod_offset(sym_op->m_src,
3349 					      uint8_t *,
3350 					      sym_op->auth.data.length +
3351 					      sym_op->auth.data.offset);
3352 	if (!mac) {
3353 		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3354 		return;
3355 	}
3356 
3357 	if (memcmp(mac, gen_mac, mac_len))
3358 		op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
3359 	else
3360 		op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3361 }
3362 
3363 static __rte_always_inline void
3364 find_kasumif9_direction_and_length(uint8_t *src,
3365 				   uint32_t counter_num_bytes,
3366 				   uint32_t *addr_length_in_bits,
3367 				   uint8_t *addr_direction)
3368 {
3369 	uint8_t found = 0;
3370 	uint32_t pos;
3371 	uint8_t last_byte;
3372 	while (!found && counter_num_bytes > 0) {
3373 		counter_num_bytes--;
3374 		if (src[counter_num_bytes] == 0x00)
3375 			continue;
3376 		pos = rte_bsf32(src[counter_num_bytes]);
3377 		if (pos == 7) {
3378 			if (likely(counter_num_bytes > 0)) {
3379 				last_byte = src[counter_num_bytes - 1];
3380 				*addr_direction  =  last_byte & 0x1;
3381 				*addr_length_in_bits = counter_num_bytes * 8
3382 							- 1;
3383 			}
3384 		} else {
3385 			last_byte = src[counter_num_bytes];
3386 			*addr_direction = (last_byte >> (pos + 1)) & 0x1;
3387 			*addr_length_in_bits = counter_num_bytes * 8
3388 						+ (8 - (pos + 2));
3389 		}
3390 		found = 1;
3391 	}
3392 }
3393 
3394 /*
3395  * This handles all auth only except AES_GMAC
3396  */
3397 static __rte_always_inline int
3398 fill_digest_params(struct rte_crypto_op *cop,
3399 		   struct cpt_sess_misc *sess,
3400 		   struct cpt_qp_meta_info *m_info,
3401 		   void **mdata_ptr,
3402 		   void **prep_req)
3403 {
3404 	uint32_t space = 0;
3405 	struct rte_crypto_sym_op *sym_op = cop->sym;
3406 	void *mdata;
3407 	phys_addr_t mphys;
3408 	uint64_t *op;
3409 	uint32_t auth_range_off;
3410 	uint32_t flags = 0;
3411 	uint64_t d_offs = 0, d_lens;
3412 	struct rte_mbuf *m_src, *m_dst;
3413 	uint16_t auth_op = sess->cpt_op & CPT_OP_AUTH_MASK;
3414 	uint16_t mac_len = sess->mac_len;
3415 	fc_params_t params;
3416 	char src[SRC_IOV_SIZE];
3417 	uint8_t iv_buf[16];
3418 	int ret;
3419 
3420 	memset(&params, 0, sizeof(fc_params_t));
3421 
3422 	m_src = sym_op->m_src;
3423 
3424 	/* For just digest lets force mempool alloc */
3425 	mdata = alloc_op_meta(NULL, &params.meta_buf, m_info->sg_mlen,
3426 			      m_info->pool);
3427 	if (mdata == NULL) {
3428 		ret = -ENOMEM;
3429 		goto err_exit;
3430 	}
3431 
3432 	mphys = params.meta_buf.dma_addr;
3433 
3434 	op = mdata;
3435 	op[0] = (uintptr_t)mdata;
3436 	op[1] = (uintptr_t)cop;
3437 	op[2] = op[3] = 0; /* Used to indicate auth verify */
3438 	space += 4 * sizeof(uint64_t);
3439 
3440 	auth_range_off = sym_op->auth.data.offset;
3441 
3442 	flags = VALID_MAC_BUF;
3443 	params.src_iov = (void *)src;
3444 	if (unlikely(sess->zsk_flag)) {
3445 		/*
3446 		 * Since for Zuc, Kasumi, Snow3g offsets are in bits
3447 		 * we will send pass through even for auth only case,
3448 		 * let MC handle it
3449 		 */
3450 		d_offs = auth_range_off;
3451 		auth_range_off = 0;
3452 		params.auth_iv_buf = rte_crypto_op_ctod_offset(cop,
3453 					uint8_t *, sess->auth_iv_offset);
3454 		if (sess->zsk_flag == K_F9) {
3455 			uint32_t length_in_bits, num_bytes;
3456 			uint8_t *src, direction = 0;
3457 
3458 			memcpy(iv_buf, rte_pktmbuf_mtod(cop->sym->m_src,
3459 							uint8_t *), 8);
3460 			/*
3461 			 * This is kasumi f9, take direction from
3462 			 * source buffer
3463 			 */
3464 			length_in_bits = cop->sym->auth.data.length;
3465 			num_bytes = (length_in_bits >> 3);
3466 			src = rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *);
3467 			find_kasumif9_direction_and_length(src,
3468 						num_bytes,
3469 						&length_in_bits,
3470 						&direction);
3471 			length_in_bits -= 64;
3472 			cop->sym->auth.data.offset += 64;
3473 			d_offs = cop->sym->auth.data.offset;
3474 			auth_range_off = d_offs / 8;
3475 			cop->sym->auth.data.length = length_in_bits;
3476 
3477 			/* Store it at end of auth iv */
3478 			iv_buf[8] = direction;
3479 			params.auth_iv_buf = iv_buf;
3480 		}
3481 	}
3482 
3483 	d_lens = sym_op->auth.data.length;
3484 
3485 	params.ctx_buf.vaddr = SESS_PRIV(sess);
3486 	params.ctx_buf.dma_addr = sess->ctx_dma_addr;
3487 
3488 	if (auth_op == CPT_OP_AUTH_GENERATE) {
3489 		if (sym_op->auth.digest.data) {
3490 			/*
3491 			 * Digest to be generated
3492 			 * in separate buffer
3493 			 */
3494 			params.mac_buf.size =
3495 				sess->mac_len;
3496 			params.mac_buf.vaddr =
3497 				sym_op->auth.digest.data;
3498 			params.mac_buf.dma_addr =
3499 				sym_op->auth.digest.phys_addr;
3500 		} else {
3501 			uint32_t off = sym_op->auth.data.offset +
3502 				sym_op->auth.data.length;
3503 			int32_t dlen, space;
3504 
3505 			m_dst = sym_op->m_dst ?
3506 				sym_op->m_dst : sym_op->m_src;
3507 			dlen = rte_pktmbuf_pkt_len(m_dst);
3508 
3509 			space = off + mac_len - dlen;
3510 			if (space > 0)
3511 				if (!rte_pktmbuf_append(m_dst, space)) {
3512 					CPT_LOG_DP_ERR("Failed to extend "
3513 						       "mbuf by %uB", space);
3514 					ret = -EINVAL;
3515 					goto free_mdata_and_exit;
3516 				}
3517 
3518 			params.mac_buf.vaddr =
3519 				rte_pktmbuf_mtod_offset(m_dst, void *, off);
3520 			params.mac_buf.dma_addr =
3521 				rte_pktmbuf_iova_offset(m_dst, off);
3522 			params.mac_buf.size = mac_len;
3523 		}
3524 	} else {
3525 		/* Need space for storing generated mac */
3526 		params.mac_buf.vaddr = (uint8_t *)mdata + space;
3527 		params.mac_buf.dma_addr = mphys + space;
3528 		params.mac_buf.size = mac_len;
3529 		space += RTE_ALIGN_CEIL(mac_len, 8);
3530 		op[2] = (uintptr_t)params.mac_buf.vaddr;
3531 		op[3] = mac_len;
3532 	}
3533 
3534 	params.meta_buf.vaddr = (uint8_t *)mdata + space;
3535 	params.meta_buf.dma_addr = mphys + space;
3536 	params.meta_buf.size -= space;
3537 
3538 	/* Out of place processing */
3539 	params.src_iov = (void *)src;
3540 
3541 	/*Store SG I/O in the api for reuse */
3542 	if (prepare_iov_from_pkt(m_src, params.src_iov, auth_range_off)) {
3543 		CPT_LOG_DP_ERR("Prepare src iov failed");
3544 		ret = -EINVAL;
3545 		goto free_mdata_and_exit;
3546 	}
3547 
3548 	*prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, &params, op);
3549 	if (unlikely(*prep_req == NULL)) {
3550 		ret = -EINVAL;
3551 		goto free_mdata_and_exit;
3552 	}
3553 
3554 	*mdata_ptr = mdata;
3555 
3556 	return 0;
3557 
3558 free_mdata_and_exit:
3559 	free_op_meta(mdata, m_info->pool);
3560 err_exit:
3561 	return ret;
3562 }
3563 
3564 #endif /*_CPT_UCODE_H_ */
3565