xref: /dpdk/drivers/common/cpt/cpt_ucode.h (revision 200bc52e5aa0d72e70464c9cd22b55cf536ed13c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Cavium, Inc
3  */
4 
5 #ifndef _CPT_UCODE_H_
6 #define _CPT_UCODE_H_
7 #include <stdbool.h>
8 
9 #include "cpt_common.h"
10 #include "cpt_hw_types.h"
11 #include "cpt_mcode_defines.h"
12 
13 /*
14  * This file defines functions that are interfaces to microcode spec.
15  *
16  */
17 
18 static uint8_t zuc_d[32] = {
19 	0x44, 0xD7, 0x26, 0xBC, 0x62, 0x6B, 0x13, 0x5E,
20 	0x57, 0x89, 0x35, 0xE2, 0x71, 0x35, 0x09, 0xAF,
21 	0x4D, 0x78, 0x2F, 0x13, 0x6B, 0xC4, 0x1A, 0xF1,
22 	0x5E, 0x26, 0x3C, 0x4D, 0x78, 0x9A, 0x47, 0xAC
23 };
24 
25 static __rte_always_inline int
26 cpt_is_algo_supported(struct rte_crypto_sym_xform *xform)
27 {
28 	/*
29 	 * Microcode only supports the following combination.
30 	 * Encryption followed by authentication
31 	 * Authentication followed by decryption
32 	 */
33 	if (xform->next) {
34 		if ((xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) &&
35 		    (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
36 		    (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)) {
37 			/* Unsupported as of now by microcode */
38 			CPT_LOG_DP_ERR("Unsupported combination");
39 			return -1;
40 		}
41 		if ((xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
42 		    (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) &&
43 		    (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT)) {
44 			/* For GMAC auth there is no cipher operation */
45 			if (xform->aead.algo != RTE_CRYPTO_AEAD_AES_GCM ||
46 			    xform->next->auth.algo !=
47 			    RTE_CRYPTO_AUTH_AES_GMAC) {
48 				/* Unsupported as of now by microcode */
49 				CPT_LOG_DP_ERR("Unsupported combination");
50 				return -1;
51 			}
52 		}
53 	}
54 	return 0;
55 }
56 
57 static __rte_always_inline void
58 gen_key_snow3g(uint8_t *ck, uint32_t *keyx)
59 {
60 	int i, base;
61 
62 	for (i = 0; i < 4; i++) {
63 		base = 4 * i;
64 		keyx[3 - i] = (ck[base] << 24) | (ck[base + 1] << 16) |
65 			(ck[base + 2] << 8) | (ck[base + 3]);
66 		keyx[3 - i] = rte_cpu_to_be_32(keyx[3 - i]);
67 	}
68 }
69 
70 static __rte_always_inline void
71 cpt_fc_salt_update(void *ctx,
72 		   uint8_t *salt)
73 {
74 	struct cpt_ctx *cpt_ctx = ctx;
75 	memcpy(&cpt_ctx->fctx.enc.encr_iv, salt, 4);
76 }
77 
78 static __rte_always_inline int
79 cpt_fc_ciph_validate_key_aes(uint16_t key_len)
80 {
81 	switch (key_len) {
82 	case CPT_BYTE_16:
83 	case CPT_BYTE_24:
84 	case CPT_BYTE_32:
85 		return 0;
86 	default:
87 		return -1;
88 	}
89 }
90 
91 static __rte_always_inline int
92 cpt_fc_ciph_validate_key(cipher_type_t type, struct cpt_ctx *cpt_ctx,
93 		uint16_t key_len)
94 {
95 	int fc_type = 0;
96 	switch (type) {
97 	case PASSTHROUGH:
98 		fc_type = FC_GEN;
99 		break;
100 	case DES3_CBC:
101 	case DES3_ECB:
102 		fc_type = FC_GEN;
103 		break;
104 	case AES_CBC:
105 	case AES_ECB:
106 	case AES_CFB:
107 	case AES_CTR:
108 	case AES_GCM:
109 		if (unlikely(cpt_fc_ciph_validate_key_aes(key_len) != 0))
110 			return -1;
111 		fc_type = FC_GEN;
112 		break;
113 	case AES_XTS:
114 		key_len = key_len / 2;
115 		if (unlikely(key_len == CPT_BYTE_24)) {
116 			CPT_LOG_DP_ERR("Invalid AES key len for XTS");
117 			return -1;
118 		}
119 		if (unlikely(cpt_fc_ciph_validate_key_aes(key_len) != 0))
120 			return -1;
121 		fc_type = FC_GEN;
122 		break;
123 	case ZUC_EEA3:
124 	case SNOW3G_UEA2:
125 		if (unlikely(key_len != 16))
126 			return -1;
127 		/* No support for AEAD yet */
128 		if (unlikely(cpt_ctx->hash_type))
129 			return -1;
130 		fc_type = ZUC_SNOW3G;
131 		break;
132 	case KASUMI_F8_CBC:
133 	case KASUMI_F8_ECB:
134 		if (unlikely(key_len != 16))
135 			return -1;
136 		/* No support for AEAD yet */
137 		if (unlikely(cpt_ctx->hash_type))
138 			return -1;
139 		fc_type = KASUMI;
140 		break;
141 	default:
142 		return -1;
143 	}
144 	return fc_type;
145 }
146 
147 static __rte_always_inline void
148 cpt_fc_ciph_set_key_passthrough(struct cpt_ctx *cpt_ctx, mc_fc_context_t *fctx)
149 {
150 	cpt_ctx->enc_cipher = 0;
151 	CPT_P_ENC_CTRL(fctx).enc_cipher = 0;
152 }
153 
154 static __rte_always_inline void
155 cpt_fc_ciph_set_key_set_aes_key_type(mc_fc_context_t *fctx, uint16_t key_len)
156 {
157 	mc_aes_type_t aes_key_type = 0;
158 	switch (key_len) {
159 	case CPT_BYTE_16:
160 		aes_key_type = AES_128_BIT;
161 		break;
162 	case CPT_BYTE_24:
163 		aes_key_type = AES_192_BIT;
164 		break;
165 	case CPT_BYTE_32:
166 		aes_key_type = AES_256_BIT;
167 		break;
168 	default:
169 		/* This should not happen */
170 		CPT_LOG_DP_ERR("Invalid AES key len");
171 		return;
172 	}
173 	CPT_P_ENC_CTRL(fctx).aes_key = aes_key_type;
174 }
175 
176 static __rte_always_inline void
177 cpt_fc_ciph_set_key_snow3g_uea2(struct cpt_ctx *cpt_ctx, uint8_t *key,
178 		uint16_t key_len)
179 {
180 	uint32_t keyx[4];
181 	cpt_ctx->snow3g = 1;
182 	gen_key_snow3g(key, keyx);
183 	memcpy(cpt_ctx->zs_ctx.ci_key, keyx, key_len);
184 	cpt_ctx->fc_type = ZUC_SNOW3G;
185 	cpt_ctx->zsk_flags = 0;
186 }
187 
188 static __rte_always_inline void
189 cpt_fc_ciph_set_key_zuc_eea3(struct cpt_ctx *cpt_ctx, uint8_t *key,
190 		uint16_t key_len)
191 {
192 	cpt_ctx->snow3g = 0;
193 	memcpy(cpt_ctx->zs_ctx.ci_key, key, key_len);
194 	memcpy(cpt_ctx->zs_ctx.zuc_const, zuc_d, 32);
195 	cpt_ctx->fc_type = ZUC_SNOW3G;
196 	cpt_ctx->zsk_flags = 0;
197 }
198 
199 static __rte_always_inline void
200 cpt_fc_ciph_set_key_kasumi_f8_ecb(struct cpt_ctx *cpt_ctx, uint8_t *key,
201 		uint16_t key_len)
202 {
203 	cpt_ctx->k_ecb = 1;
204 	memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
205 	cpt_ctx->zsk_flags = 0;
206 	cpt_ctx->fc_type = KASUMI;
207 }
208 
209 static __rte_always_inline void
210 cpt_fc_ciph_set_key_kasumi_f8_cbc(struct cpt_ctx *cpt_ctx, uint8_t *key,
211 		uint16_t key_len)
212 {
213 	memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
214 	cpt_ctx->zsk_flags = 0;
215 	cpt_ctx->fc_type = KASUMI;
216 }
217 
218 static __rte_always_inline int
219 cpt_fc_ciph_set_key(void *ctx, cipher_type_t type, uint8_t *key,
220 		    uint16_t key_len, uint8_t *salt)
221 {
222 	struct cpt_ctx *cpt_ctx = ctx;
223 	mc_fc_context_t *fctx = &cpt_ctx->fctx;
224 	uint64_t *ctrl_flags = NULL;
225 	int fc_type;
226 
227 	/* Validate key before proceeding */
228 	fc_type = cpt_fc_ciph_validate_key(type, cpt_ctx, key_len);
229 	if (unlikely(fc_type == -1))
230 		return -1;
231 
232 	if (fc_type == FC_GEN) {
233 		cpt_ctx->fc_type = FC_GEN;
234 		ctrl_flags = (uint64_t *)&(fctx->enc.enc_ctrl.flags);
235 		*ctrl_flags = rte_be_to_cpu_64(*ctrl_flags);
236 		/*
237 		 * We need to always say IV is from DPTR as user can
238 		 * sometimes iverride IV per operation.
239 		 */
240 		CPT_P_ENC_CTRL(fctx).iv_source = CPT_FROM_DPTR;
241 	}
242 
243 	switch (type) {
244 	case PASSTHROUGH:
245 		cpt_fc_ciph_set_key_passthrough(cpt_ctx, fctx);
246 		goto fc_success;
247 	case DES3_CBC:
248 		/* CPT performs DES using 3DES with the 8B DES-key
249 		 * replicated 2 more times to match the 24B 3DES-key.
250 		 * Eg. If org. key is "0x0a 0x0b", then new key is
251 		 * "0x0a 0x0b 0x0a 0x0b 0x0a 0x0b"
252 		 */
253 		if (key_len == 8) {
254 			/* Skipping the first 8B as it will be copied
255 			 * in the regular code flow
256 			 */
257 			memcpy(fctx->enc.encr_key+key_len, key, key_len);
258 			memcpy(fctx->enc.encr_key+2*key_len, key, key_len);
259 		}
260 		break;
261 	case DES3_ECB:
262 		/* For DES3_ECB IV need to be from CTX. */
263 		CPT_P_ENC_CTRL(fctx).iv_source = CPT_FROM_CTX;
264 		break;
265 	case AES_CBC:
266 	case AES_ECB:
267 	case AES_CFB:
268 	case AES_CTR:
269 		cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
270 		break;
271 	case AES_GCM:
272 		/* Even though iv source is from dptr,
273 		 * aes_gcm salt is taken from ctx
274 		 */
275 		if (salt) {
276 			memcpy(fctx->enc.encr_iv, salt, 4);
277 			/* Assuming it was just salt update
278 			 * and nothing else
279 			 */
280 			if (!key)
281 				goto fc_success;
282 		}
283 		cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
284 		break;
285 	case AES_XTS:
286 		key_len = key_len / 2;
287 		cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
288 
289 		/* Copy key2 for XTS into ipad */
290 		memset(fctx->hmac.ipad, 0, sizeof(fctx->hmac.ipad));
291 		memcpy(fctx->hmac.ipad, &key[key_len], key_len);
292 		break;
293 	case SNOW3G_UEA2:
294 		cpt_fc_ciph_set_key_snow3g_uea2(cpt_ctx, key, key_len);
295 		goto success;
296 	case ZUC_EEA3:
297 		cpt_fc_ciph_set_key_zuc_eea3(cpt_ctx, key, key_len);
298 		goto success;
299 	case KASUMI_F8_ECB:
300 		cpt_fc_ciph_set_key_kasumi_f8_ecb(cpt_ctx, key, key_len);
301 		goto success;
302 	case KASUMI_F8_CBC:
303 		cpt_fc_ciph_set_key_kasumi_f8_cbc(cpt_ctx, key, key_len);
304 		goto success;
305 	default:
306 		break;
307 	}
308 
309 	/* Only for FC_GEN case */
310 
311 	/* For GMAC auth, cipher must be NULL */
312 	if (cpt_ctx->hash_type != GMAC_TYPE)
313 		CPT_P_ENC_CTRL(fctx).enc_cipher = type;
314 
315 	memcpy(fctx->enc.encr_key, key, key_len);
316 
317 fc_success:
318 	*ctrl_flags = rte_cpu_to_be_64(*ctrl_flags);
319 
320 success:
321 	cpt_ctx->enc_cipher = type;
322 
323 	return 0;
324 }
325 
326 static __rte_always_inline uint32_t
327 fill_sg_comp(sg_comp_t *list,
328 	     uint32_t i,
329 	     phys_addr_t dma_addr,
330 	     uint32_t size)
331 {
332 	sg_comp_t *to = &list[i>>2];
333 
334 	to->u.s.len[i%4] = rte_cpu_to_be_16(size);
335 	to->ptr[i%4] = rte_cpu_to_be_64(dma_addr);
336 	i++;
337 	return i;
338 }
339 
340 static __rte_always_inline uint32_t
341 fill_sg_comp_from_buf(sg_comp_t *list,
342 		      uint32_t i,
343 		      buf_ptr_t *from)
344 {
345 	sg_comp_t *to = &list[i>>2];
346 
347 	to->u.s.len[i%4] = rte_cpu_to_be_16(from->size);
348 	to->ptr[i%4] = rte_cpu_to_be_64(from->dma_addr);
349 	i++;
350 	return i;
351 }
352 
353 static __rte_always_inline uint32_t
354 fill_sg_comp_from_buf_min(sg_comp_t *list,
355 			  uint32_t i,
356 			  buf_ptr_t *from,
357 			  uint32_t *psize)
358 {
359 	sg_comp_t *to = &list[i >> 2];
360 	uint32_t size = *psize;
361 	uint32_t e_len;
362 
363 	e_len = (size > from->size) ? from->size : size;
364 	to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
365 	to->ptr[i % 4] = rte_cpu_to_be_64(from->dma_addr);
366 	*psize -= e_len;
367 	i++;
368 	return i;
369 }
370 
371 /*
372  * This fills the MC expected SGIO list
373  * from IOV given by user.
374  */
375 static __rte_always_inline uint32_t
376 fill_sg_comp_from_iov(sg_comp_t *list,
377 		      uint32_t i,
378 		      iov_ptr_t *from, uint32_t from_offset,
379 		      uint32_t *psize, buf_ptr_t *extra_buf,
380 		      uint32_t extra_offset)
381 {
382 	int32_t j;
383 	uint32_t extra_len = extra_buf ? extra_buf->size : 0;
384 	uint32_t size = *psize - extra_len;
385 	buf_ptr_t *bufs;
386 
387 	bufs = from->bufs;
388 	for (j = 0; (j < from->buf_cnt) && size; j++) {
389 		phys_addr_t e_dma_addr;
390 		uint32_t e_len;
391 		sg_comp_t *to = &list[i >> 2];
392 
393 		if (!bufs[j].size)
394 			continue;
395 
396 		if (unlikely(from_offset)) {
397 			if (from_offset >= bufs[j].size) {
398 				from_offset -= bufs[j].size;
399 				continue;
400 			}
401 			e_dma_addr = bufs[j].dma_addr + from_offset;
402 			e_len = (size > (bufs[j].size - from_offset)) ?
403 				(bufs[j].size - from_offset) : size;
404 			from_offset = 0;
405 		} else {
406 			e_dma_addr = bufs[j].dma_addr;
407 			e_len = (size > bufs[j].size) ?
408 				bufs[j].size : size;
409 		}
410 
411 		to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
412 		to->ptr[i % 4] = rte_cpu_to_be_64(e_dma_addr);
413 
414 		if (extra_len && (e_len >= extra_offset)) {
415 			/* Break the data at given offset */
416 			uint32_t next_len = e_len - extra_offset;
417 			phys_addr_t next_dma = e_dma_addr + extra_offset;
418 
419 			if (!extra_offset) {
420 				i--;
421 			} else {
422 				e_len = extra_offset;
423 				size -= e_len;
424 				to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
425 			}
426 
427 			/* Insert extra data ptr */
428 			if (extra_len) {
429 				i++;
430 				to = &list[i >> 2];
431 				to->u.s.len[i % 4] =
432 					rte_cpu_to_be_16(extra_buf->size);
433 				to->ptr[i % 4] =
434 					rte_cpu_to_be_64(extra_buf->dma_addr);
435 
436 				/* size already decremented by extra len */
437 			}
438 
439 			/* insert the rest of the data */
440 			if (next_len) {
441 				i++;
442 				to = &list[i >> 2];
443 				to->u.s.len[i % 4] = rte_cpu_to_be_16(next_len);
444 				to->ptr[i % 4] = rte_cpu_to_be_64(next_dma);
445 				size -= next_len;
446 			}
447 			extra_len = 0;
448 
449 		} else {
450 			size -= e_len;
451 		}
452 		if (extra_offset)
453 			extra_offset -= size;
454 		i++;
455 	}
456 
457 	*psize = size;
458 	return (uint32_t)i;
459 }
460 
461 static __rte_always_inline void
462 cpt_digest_gen_prep(uint32_t flags,
463 		    uint64_t d_lens,
464 		    digest_params_t *params,
465 		    void *op,
466 		    void **prep_req)
467 {
468 	struct cpt_request_info *req;
469 	uint32_t size, i;
470 	int32_t m_size;
471 	uint16_t data_len, mac_len, key_len;
472 	auth_type_t hash_type;
473 	buf_ptr_t *meta_p;
474 	struct cpt_ctx *ctx;
475 	sg_comp_t *gather_comp;
476 	sg_comp_t *scatter_comp;
477 	uint8_t *in_buffer;
478 	uint32_t g_size_bytes, s_size_bytes;
479 	uint64_t dptr_dma, rptr_dma;
480 	vq_cmd_word0_t vq_cmd_w0;
481 	vq_cmd_word3_t vq_cmd_w3;
482 	void *c_vaddr, *m_vaddr;
483 	uint64_t c_dma, m_dma;
484 	opcode_info_t opcode;
485 
486 	ctx = params->ctx_buf.vaddr;
487 	meta_p = &params->meta_buf;
488 
489 	m_vaddr = meta_p->vaddr;
490 	m_dma = meta_p->dma_addr;
491 	m_size = meta_p->size;
492 
493 	/*
494 	 * Save initial space that followed app data for completion code &
495 	 * alternate completion code to fall in same cache line as app data
496 	 */
497 	m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
498 	m_dma += COMPLETION_CODE_SIZE;
499 	size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
500 		(uint8_t *)m_vaddr;
501 	c_vaddr = (uint8_t *)m_vaddr + size;
502 	c_dma = m_dma + size;
503 	size += sizeof(cpt_res_s_t);
504 
505 	m_vaddr = (uint8_t *)m_vaddr + size;
506 	m_dma += size;
507 	m_size -= size;
508 
509 	req = m_vaddr;
510 
511 	size = sizeof(struct cpt_request_info);
512 	m_vaddr = (uint8_t *)m_vaddr + size;
513 	m_dma += size;
514 	m_size -= size;
515 
516 	hash_type = ctx->hash_type;
517 	mac_len = ctx->mac_len;
518 	key_len = ctx->auth_key_len;
519 	data_len = AUTH_DLEN(d_lens);
520 
521 	/*GP op header */
522 	vq_cmd_w0.u64 = 0;
523 	vq_cmd_w0.s.param2 = rte_cpu_to_be_16(((uint16_t)hash_type << 8));
524 	if (ctx->hmac) {
525 		opcode.s.major = CPT_MAJOR_OP_HMAC | CPT_DMA_MODE;
526 		vq_cmd_w0.s.param1 = rte_cpu_to_be_16(key_len);
527 		vq_cmd_w0.s.dlen =
528 			rte_cpu_to_be_16((data_len + ROUNDUP8(key_len)));
529 	} else {
530 		opcode.s.major = CPT_MAJOR_OP_HASH | CPT_DMA_MODE;
531 		vq_cmd_w0.s.param1 = 0;
532 		vq_cmd_w0.s.dlen = rte_cpu_to_be_16(data_len);
533 	}
534 
535 	opcode.s.minor = 0;
536 
537 	/* Null auth only case enters the if */
538 	if (unlikely(!hash_type && !ctx->enc_cipher)) {
539 		opcode.s.major = CPT_MAJOR_OP_MISC;
540 		/* Minor op is passthrough */
541 		opcode.s.minor = 0x03;
542 		/* Send out completion code only */
543 		vq_cmd_w0.s.param2 = rte_cpu_to_be_16(0x1);
544 	}
545 
546 	vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
547 
548 	/* DPTR has SG list */
549 	in_buffer = m_vaddr;
550 	dptr_dma = m_dma;
551 
552 	((uint16_t *)in_buffer)[0] = 0;
553 	((uint16_t *)in_buffer)[1] = 0;
554 
555 	/* TODO Add error check if space will be sufficient */
556 	gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
557 
558 	/*
559 	 * Input gather list
560 	 */
561 
562 	i = 0;
563 
564 	if (ctx->hmac) {
565 		uint64_t k_dma = params->ctx_buf.dma_addr +
566 			offsetof(struct cpt_ctx, auth_key);
567 		/* Key */
568 		i = fill_sg_comp(gather_comp, i, k_dma, ROUNDUP8(key_len));
569 	}
570 
571 	/* input data */
572 	size = data_len;
573 	if (size) {
574 		i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov,
575 					  0, &size, NULL, 0);
576 		if (unlikely(size)) {
577 			CPT_LOG_DP_DEBUG("Insufficient dst IOV size, short"
578 					 " by %dB", size);
579 			return;
580 		}
581 	} else {
582 		/*
583 		 * Looks like we need to support zero data
584 		 * gather ptr in case of hash & hmac
585 		 */
586 		i++;
587 	}
588 	((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
589 	g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
590 
591 	/*
592 	 * Output Gather list
593 	 */
594 
595 	i = 0;
596 	scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
597 
598 	if (flags & VALID_MAC_BUF) {
599 		if (unlikely(params->mac_buf.size < mac_len)) {
600 			CPT_LOG_DP_ERR("Insufficient MAC size");
601 			return;
602 		}
603 
604 		size = mac_len;
605 		i = fill_sg_comp_from_buf_min(scatter_comp, i,
606 					      &params->mac_buf, &size);
607 	} else {
608 		size = mac_len;
609 		i = fill_sg_comp_from_iov(scatter_comp, i,
610 					  params->src_iov, data_len,
611 					  &size, NULL, 0);
612 		if (unlikely(size)) {
613 			CPT_LOG_DP_ERR("Insufficient dst IOV size, short by"
614 				       " %dB", size);
615 			return;
616 		}
617 	}
618 
619 	((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
620 	s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
621 
622 	size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
623 
624 	/* This is DPTR len incase of SG mode */
625 	vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
626 
627 	m_vaddr = (uint8_t *)m_vaddr + size;
628 	m_dma += size;
629 	m_size -= size;
630 
631 	/* cpt alternate completion address saved earlier */
632 	req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
633 	*req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
634 	rptr_dma = c_dma - 8;
635 
636 	req->ist.ei1 = dptr_dma;
637 	req->ist.ei2 = rptr_dma;
638 	/* First 16-bit swap then 64-bit swap */
639 	/* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
640 	 * to eliminate all the swapping
641 	 */
642 	vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
643 
644 	/* vq command w3 */
645 	vq_cmd_w3.u64 = 0;
646 
647 	/* 16 byte aligned cpt res address */
648 	req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
649 	*req->completion_addr = COMPLETION_CODE_INIT;
650 	req->comp_baddr  = c_dma;
651 
652 	/* Fill microcode part of instruction */
653 	req->ist.ei0 = vq_cmd_w0.u64;
654 	req->ist.ei3 = vq_cmd_w3.u64;
655 
656 	req->op = op;
657 
658 	*prep_req = req;
659 	return;
660 }
661 
662 static __rte_always_inline void
663 cpt_enc_hmac_prep(uint32_t flags,
664 		  uint64_t d_offs,
665 		  uint64_t d_lens,
666 		  fc_params_t *fc_params,
667 		  void *op,
668 		  void **prep_req)
669 {
670 	uint32_t iv_offset = 0;
671 	int32_t inputlen, outputlen, enc_dlen, auth_dlen;
672 	struct cpt_ctx *cpt_ctx;
673 	uint32_t cipher_type, hash_type;
674 	uint32_t mac_len, size;
675 	uint8_t iv_len = 16;
676 	struct cpt_request_info *req;
677 	buf_ptr_t *meta_p, *aad_buf = NULL;
678 	uint32_t encr_offset, auth_offset;
679 	uint32_t encr_data_len, auth_data_len, aad_len = 0;
680 	uint32_t passthrough_len = 0;
681 	void *m_vaddr, *offset_vaddr;
682 	uint64_t m_dma, offset_dma, ctx_dma;
683 	vq_cmd_word0_t vq_cmd_w0;
684 	vq_cmd_word3_t vq_cmd_w3;
685 	void *c_vaddr;
686 	uint64_t c_dma;
687 	int32_t m_size;
688 	opcode_info_t opcode;
689 
690 	meta_p = &fc_params->meta_buf;
691 	m_vaddr = meta_p->vaddr;
692 	m_dma = meta_p->dma_addr;
693 	m_size = meta_p->size;
694 
695 	encr_offset = ENCR_OFFSET(d_offs);
696 	auth_offset = AUTH_OFFSET(d_offs);
697 	encr_data_len = ENCR_DLEN(d_lens);
698 	auth_data_len = AUTH_DLEN(d_lens);
699 	if (unlikely(flags & VALID_AAD_BUF)) {
700 		/*
701 		 * We dont support both aad
702 		 * and auth data separately
703 		 */
704 		auth_data_len = 0;
705 		auth_offset = 0;
706 		aad_len = fc_params->aad_buf.size;
707 		aad_buf = &fc_params->aad_buf;
708 	}
709 	cpt_ctx = fc_params->ctx_buf.vaddr;
710 	cipher_type = cpt_ctx->enc_cipher;
711 	hash_type = cpt_ctx->hash_type;
712 	mac_len = cpt_ctx->mac_len;
713 
714 	/*
715 	 * Save initial space that followed app data for completion code &
716 	 * alternate completion code to fall in same cache line as app data
717 	 */
718 	m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
719 	m_dma += COMPLETION_CODE_SIZE;
720 	size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
721 		(uint8_t *)m_vaddr;
722 
723 	c_vaddr = (uint8_t *)m_vaddr + size;
724 	c_dma = m_dma + size;
725 	size += sizeof(cpt_res_s_t);
726 
727 	m_vaddr = (uint8_t *)m_vaddr + size;
728 	m_dma += size;
729 	m_size -= size;
730 
731 	/* start cpt request info struct at 8 byte boundary */
732 	size = (uint8_t *)RTE_PTR_ALIGN(m_vaddr, 8) -
733 		(uint8_t *)m_vaddr;
734 
735 	req = (struct cpt_request_info *)((uint8_t *)m_vaddr + size);
736 
737 	size += sizeof(struct cpt_request_info);
738 	m_vaddr = (uint8_t *)m_vaddr + size;
739 	m_dma += size;
740 	m_size -= size;
741 
742 	if (hash_type == GMAC_TYPE)
743 		encr_data_len = 0;
744 
745 	if (unlikely(!(flags & VALID_IV_BUF))) {
746 		iv_len = 0;
747 		iv_offset = ENCR_IV_OFFSET(d_offs);
748 	}
749 
750 	if (unlikely(flags & VALID_AAD_BUF)) {
751 		/*
752 		 * When AAD is given, data above encr_offset is pass through
753 		 * Since AAD is given as separate pointer and not as offset,
754 		 * this is a special case as we need to fragment input data
755 		 * into passthrough + encr_data and then insert AAD in between.
756 		 */
757 		if (hash_type != GMAC_TYPE) {
758 			passthrough_len = encr_offset;
759 			auth_offset = passthrough_len + iv_len;
760 			encr_offset = passthrough_len + aad_len + iv_len;
761 			auth_data_len = aad_len + encr_data_len;
762 		} else {
763 			passthrough_len = 16 + aad_len;
764 			auth_offset = passthrough_len + iv_len;
765 			auth_data_len = aad_len;
766 		}
767 	} else {
768 		encr_offset += iv_len;
769 		auth_offset += iv_len;
770 	}
771 
772 	/* Encryption */
773 	opcode.s.major = CPT_MAJOR_OP_FC;
774 	opcode.s.minor = 0;
775 
776 	auth_dlen = auth_offset + auth_data_len;
777 	enc_dlen = encr_data_len + encr_offset;
778 	if (unlikely(encr_data_len & 0xf)) {
779 		if ((cipher_type == DES3_CBC) || (cipher_type == DES3_ECB))
780 			enc_dlen = ROUNDUP8(encr_data_len) + encr_offset;
781 		else if (likely((cipher_type == AES_CBC) ||
782 				(cipher_type == AES_ECB)))
783 			enc_dlen = ROUNDUP16(encr_data_len) + encr_offset;
784 	}
785 
786 	if (unlikely(hash_type == GMAC_TYPE)) {
787 		encr_offset = auth_dlen;
788 		enc_dlen = 0;
789 	}
790 
791 	if (unlikely(auth_dlen > enc_dlen)) {
792 		inputlen = auth_dlen;
793 		outputlen = auth_dlen + mac_len;
794 	} else {
795 		inputlen = enc_dlen;
796 		outputlen = enc_dlen + mac_len;
797 	}
798 
799 	/* GP op header */
800 	vq_cmd_w0.u64 = 0;
801 	vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
802 	vq_cmd_w0.s.param2 = rte_cpu_to_be_16(auth_data_len);
803 	/*
804 	 * In 83XX since we have a limitation of
805 	 * IV & Offset control word not part of instruction
806 	 * and need to be part of Data Buffer, we check if
807 	 * head room is there and then only do the Direct mode processing
808 	 */
809 	if (likely((flags & SINGLE_BUF_INPLACE) &&
810 		   (flags & SINGLE_BUF_HEADTAILROOM))) {
811 		void *dm_vaddr = fc_params->bufs[0].vaddr;
812 		uint64_t dm_dma_addr = fc_params->bufs[0].dma_addr;
813 		/*
814 		 * This flag indicates that there is 24 bytes head room and
815 		 * 8 bytes tail room available, so that we get to do
816 		 * DIRECT MODE with limitation
817 		 */
818 
819 		offset_vaddr = (uint8_t *)dm_vaddr - OFF_CTRL_LEN - iv_len;
820 		offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
821 
822 		/* DPTR */
823 		req->ist.ei1 = offset_dma;
824 		/* RPTR should just exclude offset control word */
825 		req->ist.ei2 = dm_dma_addr - iv_len;
826 		req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
827 						    + outputlen - iv_len);
828 
829 		vq_cmd_w0.s.dlen = rte_cpu_to_be_16(inputlen + OFF_CTRL_LEN);
830 
831 		vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
832 
833 		if (likely(iv_len)) {
834 			uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
835 						      + OFF_CTRL_LEN);
836 			uint64_t *src = fc_params->iv_buf;
837 			dest[0] = src[0];
838 			dest[1] = src[1];
839 		}
840 
841 		*(uint64_t *)offset_vaddr =
842 			rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
843 				((uint64_t)iv_offset << 8) |
844 				((uint64_t)auth_offset));
845 
846 	} else {
847 		uint32_t i, g_size_bytes, s_size_bytes;
848 		uint64_t dptr_dma, rptr_dma;
849 		sg_comp_t *gather_comp;
850 		sg_comp_t *scatter_comp;
851 		uint8_t *in_buffer;
852 
853 		/* This falls under strict SG mode */
854 		offset_vaddr = m_vaddr;
855 		offset_dma = m_dma;
856 		size = OFF_CTRL_LEN + iv_len;
857 
858 		m_vaddr = (uint8_t *)m_vaddr + size;
859 		m_dma += size;
860 		m_size -= size;
861 
862 		opcode.s.major |= CPT_DMA_MODE;
863 
864 		vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
865 
866 		if (likely(iv_len)) {
867 			uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
868 						      + OFF_CTRL_LEN);
869 			uint64_t *src = fc_params->iv_buf;
870 			dest[0] = src[0];
871 			dest[1] = src[1];
872 		}
873 
874 		*(uint64_t *)offset_vaddr =
875 			rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
876 				((uint64_t)iv_offset << 8) |
877 				((uint64_t)auth_offset));
878 
879 		/* DPTR has SG list */
880 		in_buffer = m_vaddr;
881 		dptr_dma = m_dma;
882 
883 		((uint16_t *)in_buffer)[0] = 0;
884 		((uint16_t *)in_buffer)[1] = 0;
885 
886 		/* TODO Add error check if space will be sufficient */
887 		gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
888 
889 		/*
890 		 * Input Gather List
891 		 */
892 
893 		i = 0;
894 
895 		/* Offset control word that includes iv */
896 		i = fill_sg_comp(gather_comp, i, offset_dma,
897 				 OFF_CTRL_LEN + iv_len);
898 
899 		/* Add input data */
900 		size = inputlen - iv_len;
901 		if (likely(size)) {
902 			uint32_t aad_offset = aad_len ? passthrough_len : 0;
903 
904 			if (unlikely(flags & SINGLE_BUF_INPLACE)) {
905 				i = fill_sg_comp_from_buf_min(gather_comp, i,
906 							      fc_params->bufs,
907 							      &size);
908 			} else {
909 				i = fill_sg_comp_from_iov(gather_comp, i,
910 							  fc_params->src_iov,
911 							  0, &size,
912 							  aad_buf, aad_offset);
913 			}
914 
915 			if (unlikely(size)) {
916 				CPT_LOG_DP_ERR("Insufficient buffer space,"
917 					       " size %d needed", size);
918 				return;
919 			}
920 		}
921 		((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
922 		g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
923 
924 		/*
925 		 * Output Scatter list
926 		 */
927 		i = 0;
928 		scatter_comp =
929 			(sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
930 
931 		/* Add IV */
932 		if (likely(iv_len)) {
933 			i = fill_sg_comp(scatter_comp, i,
934 					 offset_dma + OFF_CTRL_LEN,
935 					 iv_len);
936 		}
937 
938 		/* output data or output data + digest*/
939 		if (unlikely(flags & VALID_MAC_BUF)) {
940 			size = outputlen - iv_len - mac_len;
941 			if (size) {
942 				uint32_t aad_offset =
943 					aad_len ? passthrough_len : 0;
944 
945 				if (unlikely(flags & SINGLE_BUF_INPLACE)) {
946 					i = fill_sg_comp_from_buf_min(
947 							scatter_comp,
948 							i,
949 							fc_params->bufs,
950 							&size);
951 				} else {
952 					i = fill_sg_comp_from_iov(scatter_comp,
953 							i,
954 							fc_params->dst_iov,
955 							0,
956 							&size,
957 							aad_buf,
958 							aad_offset);
959 				}
960 				if (unlikely(size)) {
961 					CPT_LOG_DP_ERR("Insufficient buffer"
962 						       " space, size %d needed",
963 						       size);
964 					return;
965 				}
966 			}
967 			/* mac_data */
968 			if (mac_len) {
969 				i = fill_sg_comp_from_buf(scatter_comp, i,
970 							  &fc_params->mac_buf);
971 			}
972 		} else {
973 			/* Output including mac */
974 			size = outputlen - iv_len;
975 			if (likely(size)) {
976 				uint32_t aad_offset =
977 					aad_len ? passthrough_len : 0;
978 
979 				if (unlikely(flags & SINGLE_BUF_INPLACE)) {
980 					i = fill_sg_comp_from_buf_min(
981 							scatter_comp,
982 							i,
983 							fc_params->bufs,
984 							&size);
985 				} else {
986 					i = fill_sg_comp_from_iov(scatter_comp,
987 							i,
988 							fc_params->dst_iov,
989 							0,
990 							&size,
991 							aad_buf,
992 							aad_offset);
993 				}
994 				if (unlikely(size)) {
995 					CPT_LOG_DP_ERR("Insufficient buffer"
996 						       " space, size %d needed",
997 						       size);
998 					return;
999 				}
1000 			}
1001 		}
1002 		((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1003 		s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1004 
1005 		size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1006 
1007 		/* This is DPTR len incase of SG mode */
1008 		vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
1009 
1010 		m_vaddr = (uint8_t *)m_vaddr + size;
1011 		m_dma += size;
1012 		m_size -= size;
1013 
1014 		/* cpt alternate completion address saved earlier */
1015 		req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1016 		*req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1017 		rptr_dma = c_dma - 8;
1018 
1019 		req->ist.ei1 = dptr_dma;
1020 		req->ist.ei2 = rptr_dma;
1021 	}
1022 
1023 	/* First 16-bit swap then 64-bit swap */
1024 	/* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
1025 	 * to eliminate all the swapping
1026 	 */
1027 	vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
1028 
1029 	ctx_dma = fc_params->ctx_buf.dma_addr +
1030 		offsetof(struct cpt_ctx, fctx);
1031 	/* vq command w3 */
1032 	vq_cmd_w3.u64 = 0;
1033 	vq_cmd_w3.s.grp = 0;
1034 	vq_cmd_w3.s.cptr = ctx_dma;
1035 
1036 	/* 16 byte aligned cpt res address */
1037 	req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1038 	*req->completion_addr = COMPLETION_CODE_INIT;
1039 	req->comp_baddr  = c_dma;
1040 
1041 	/* Fill microcode part of instruction */
1042 	req->ist.ei0 = vq_cmd_w0.u64;
1043 	req->ist.ei3 = vq_cmd_w3.u64;
1044 
1045 	req->op  = op;
1046 
1047 	*prep_req = req;
1048 	return;
1049 }
1050 
1051 static __rte_always_inline void
1052 cpt_dec_hmac_prep(uint32_t flags,
1053 		  uint64_t d_offs,
1054 		  uint64_t d_lens,
1055 		  fc_params_t *fc_params,
1056 		  void *op,
1057 		  void **prep_req)
1058 {
1059 	uint32_t iv_offset = 0, size;
1060 	int32_t inputlen, outputlen, enc_dlen, auth_dlen;
1061 	struct cpt_ctx *cpt_ctx;
1062 	int32_t hash_type, mac_len, m_size;
1063 	uint8_t iv_len = 16;
1064 	struct cpt_request_info *req;
1065 	buf_ptr_t *meta_p, *aad_buf = NULL;
1066 	uint32_t encr_offset, auth_offset;
1067 	uint32_t encr_data_len, auth_data_len, aad_len = 0;
1068 	uint32_t passthrough_len = 0;
1069 	void *m_vaddr, *offset_vaddr;
1070 	uint64_t m_dma, offset_dma, ctx_dma;
1071 	opcode_info_t opcode;
1072 	vq_cmd_word0_t vq_cmd_w0;
1073 	vq_cmd_word3_t vq_cmd_w3;
1074 	void *c_vaddr;
1075 	uint64_t c_dma;
1076 
1077 	meta_p = &fc_params->meta_buf;
1078 	m_vaddr = meta_p->vaddr;
1079 	m_dma = meta_p->dma_addr;
1080 	m_size = meta_p->size;
1081 
1082 	encr_offset = ENCR_OFFSET(d_offs);
1083 	auth_offset = AUTH_OFFSET(d_offs);
1084 	encr_data_len = ENCR_DLEN(d_lens);
1085 	auth_data_len = AUTH_DLEN(d_lens);
1086 
1087 	if (unlikely(flags & VALID_AAD_BUF)) {
1088 		/*
1089 		 * We dont support both aad
1090 		 * and auth data separately
1091 		 */
1092 		auth_data_len = 0;
1093 		auth_offset = 0;
1094 		aad_len = fc_params->aad_buf.size;
1095 		aad_buf = &fc_params->aad_buf;
1096 	}
1097 
1098 	cpt_ctx = fc_params->ctx_buf.vaddr;
1099 	hash_type = cpt_ctx->hash_type;
1100 	mac_len = cpt_ctx->mac_len;
1101 
1102 	if (hash_type == GMAC_TYPE)
1103 		encr_data_len = 0;
1104 
1105 	if (unlikely(!(flags & VALID_IV_BUF))) {
1106 		iv_len = 0;
1107 		iv_offset = ENCR_IV_OFFSET(d_offs);
1108 	}
1109 
1110 	if (unlikely(flags & VALID_AAD_BUF)) {
1111 		/*
1112 		 * When AAD is given, data above encr_offset is pass through
1113 		 * Since AAD is given as separate pointer and not as offset,
1114 		 * this is a special case as we need to fragment input data
1115 		 * into passthrough + encr_data and then insert AAD in between.
1116 		 */
1117 		if (hash_type != GMAC_TYPE) {
1118 			passthrough_len = encr_offset;
1119 			auth_offset = passthrough_len + iv_len;
1120 			encr_offset = passthrough_len + aad_len + iv_len;
1121 			auth_data_len = aad_len + encr_data_len;
1122 		} else {
1123 			passthrough_len = 16 + aad_len;
1124 			auth_offset = passthrough_len + iv_len;
1125 			auth_data_len = aad_len;
1126 		}
1127 	} else {
1128 		encr_offset += iv_len;
1129 		auth_offset += iv_len;
1130 	}
1131 
1132 	/*
1133 	 * Save initial space that followed app data for completion code &
1134 	 * alternate completion code to fall in same cache line as app data
1135 	 */
1136 	m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1137 	m_dma += COMPLETION_CODE_SIZE;
1138 	size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1139 	       (uint8_t *)m_vaddr;
1140 	c_vaddr = (uint8_t *)m_vaddr + size;
1141 	c_dma = m_dma + size;
1142 	size += sizeof(cpt_res_s_t);
1143 
1144 	m_vaddr = (uint8_t *)m_vaddr + size;
1145 	m_dma += size;
1146 	m_size -= size;
1147 
1148 	/* start cpt request info structure at 8 byte alignment */
1149 	size = (uint8_t *)RTE_PTR_ALIGN(m_vaddr, 8) -
1150 		(uint8_t *)m_vaddr;
1151 
1152 	req = (struct cpt_request_info *)((uint8_t *)m_vaddr + size);
1153 
1154 	size += sizeof(struct cpt_request_info);
1155 	m_vaddr = (uint8_t *)m_vaddr + size;
1156 	m_dma += size;
1157 	m_size -= size;
1158 
1159 	/* Decryption */
1160 	opcode.s.major = CPT_MAJOR_OP_FC;
1161 	opcode.s.minor = 1;
1162 
1163 	enc_dlen = encr_offset + encr_data_len;
1164 	auth_dlen = auth_offset + auth_data_len;
1165 
1166 	if (auth_dlen > enc_dlen) {
1167 		inputlen = auth_dlen + mac_len;
1168 		outputlen = auth_dlen;
1169 	} else {
1170 		inputlen = enc_dlen + mac_len;
1171 		outputlen = enc_dlen;
1172 	}
1173 
1174 	if (hash_type == GMAC_TYPE)
1175 		encr_offset = inputlen;
1176 
1177 	vq_cmd_w0.u64 = 0;
1178 	vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
1179 	vq_cmd_w0.s.param2 = rte_cpu_to_be_16(auth_data_len);
1180 
1181 	/*
1182 	 * In 83XX since we have a limitation of
1183 	 * IV & Offset control word not part of instruction
1184 	 * and need to be part of Data Buffer, we check if
1185 	 * head room is there and then only do the Direct mode processing
1186 	 */
1187 	if (likely((flags & SINGLE_BUF_INPLACE) &&
1188 		   (flags & SINGLE_BUF_HEADTAILROOM))) {
1189 		void *dm_vaddr = fc_params->bufs[0].vaddr;
1190 		uint64_t dm_dma_addr = fc_params->bufs[0].dma_addr;
1191 		/*
1192 		 * This flag indicates that there is 24 bytes head room and
1193 		 * 8 bytes tail room available, so that we get to do
1194 		 * DIRECT MODE with limitation
1195 		 */
1196 
1197 		offset_vaddr = (uint8_t *)dm_vaddr - OFF_CTRL_LEN - iv_len;
1198 		offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1199 		req->ist.ei1 = offset_dma;
1200 
1201 		/* RPTR should just exclude offset control word */
1202 		req->ist.ei2 = dm_dma_addr - iv_len;
1203 
1204 		req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr +
1205 					outputlen - iv_len);
1206 		/* since this is decryption,
1207 		 * don't touch the content of
1208 		 * alternate ccode space as it contains
1209 		 * hmac.
1210 		 */
1211 
1212 		vq_cmd_w0.s.dlen = rte_cpu_to_be_16(inputlen + OFF_CTRL_LEN);
1213 
1214 		vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
1215 
1216 		if (likely(iv_len)) {
1217 			uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
1218 						      OFF_CTRL_LEN);
1219 			uint64_t *src = fc_params->iv_buf;
1220 			dest[0] = src[0];
1221 			dest[1] = src[1];
1222 		}
1223 
1224 		*(uint64_t *)offset_vaddr =
1225 			rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
1226 				((uint64_t)iv_offset << 8) |
1227 				((uint64_t)auth_offset));
1228 
1229 	} else {
1230 		uint64_t dptr_dma, rptr_dma;
1231 		uint32_t g_size_bytes, s_size_bytes;
1232 		sg_comp_t *gather_comp;
1233 		sg_comp_t *scatter_comp;
1234 		uint8_t *in_buffer;
1235 		uint8_t i = 0;
1236 
1237 		/* This falls under strict SG mode */
1238 		offset_vaddr = m_vaddr;
1239 		offset_dma = m_dma;
1240 		size = OFF_CTRL_LEN + iv_len;
1241 
1242 		m_vaddr = (uint8_t *)m_vaddr + size;
1243 		m_dma += size;
1244 		m_size -= size;
1245 
1246 		opcode.s.major |= CPT_DMA_MODE;
1247 
1248 		vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
1249 
1250 		if (likely(iv_len)) {
1251 			uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
1252 						      OFF_CTRL_LEN);
1253 			uint64_t *src = fc_params->iv_buf;
1254 			dest[0] = src[0];
1255 			dest[1] = src[1];
1256 		}
1257 
1258 		*(uint64_t *)offset_vaddr =
1259 			rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
1260 				((uint64_t)iv_offset << 8) |
1261 				((uint64_t)auth_offset));
1262 
1263 		/* DPTR has SG list */
1264 		in_buffer = m_vaddr;
1265 		dptr_dma = m_dma;
1266 
1267 		((uint16_t *)in_buffer)[0] = 0;
1268 		((uint16_t *)in_buffer)[1] = 0;
1269 
1270 		/* TODO Add error check if space will be sufficient */
1271 		gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1272 
1273 		/*
1274 		 * Input Gather List
1275 		 */
1276 		i = 0;
1277 
1278 		/* Offset control word that includes iv */
1279 		i = fill_sg_comp(gather_comp, i, offset_dma,
1280 				 OFF_CTRL_LEN + iv_len);
1281 
1282 		/* Add input data */
1283 		if (flags & VALID_MAC_BUF) {
1284 			size = inputlen - iv_len - mac_len;
1285 			if (size) {
1286 				/* input data only */
1287 				if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1288 					i = fill_sg_comp_from_buf_min(
1289 							gather_comp, i,
1290 							fc_params->bufs,
1291 							&size);
1292 				} else {
1293 					uint32_t aad_offset = aad_len ?
1294 						passthrough_len : 0;
1295 
1296 					i = fill_sg_comp_from_iov(gather_comp,
1297 							i,
1298 							fc_params->src_iov,
1299 							0, &size,
1300 							aad_buf,
1301 							aad_offset);
1302 				}
1303 				if (unlikely(size)) {
1304 					CPT_LOG_DP_ERR("Insufficient buffer"
1305 						       " space, size %d needed",
1306 						       size);
1307 					return;
1308 				}
1309 			}
1310 
1311 			/* mac data */
1312 			if (mac_len) {
1313 				i = fill_sg_comp_from_buf(gather_comp, i,
1314 							  &fc_params->mac_buf);
1315 			}
1316 		} else {
1317 			/* input data + mac */
1318 			size = inputlen - iv_len;
1319 			if (size) {
1320 				if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1321 					i = fill_sg_comp_from_buf_min(
1322 							gather_comp, i,
1323 							fc_params->bufs,
1324 							&size);
1325 				} else {
1326 					uint32_t aad_offset = aad_len ?
1327 						passthrough_len : 0;
1328 
1329 					if (unlikely(!fc_params->src_iov)) {
1330 						CPT_LOG_DP_ERR("Bad input args");
1331 						return;
1332 					}
1333 
1334 					i = fill_sg_comp_from_iov(
1335 							gather_comp, i,
1336 							fc_params->src_iov,
1337 							0, &size,
1338 							aad_buf,
1339 							aad_offset);
1340 				}
1341 
1342 				if (unlikely(size)) {
1343 					CPT_LOG_DP_ERR("Insufficient buffer"
1344 						       " space, size %d needed",
1345 						       size);
1346 					return;
1347 				}
1348 			}
1349 		}
1350 		((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1351 		g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1352 
1353 		/*
1354 		 * Output Scatter List
1355 		 */
1356 
1357 		i = 0;
1358 		scatter_comp =
1359 			(sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1360 
1361 		/* Add iv */
1362 		if (iv_len) {
1363 			i = fill_sg_comp(scatter_comp, i,
1364 					 offset_dma + OFF_CTRL_LEN,
1365 					 iv_len);
1366 		}
1367 
1368 		/* Add output data */
1369 		size = outputlen - iv_len;
1370 		if (size) {
1371 			if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1372 				/* handle single buffer here */
1373 				i = fill_sg_comp_from_buf_min(scatter_comp, i,
1374 							      fc_params->bufs,
1375 							      &size);
1376 			} else {
1377 				uint32_t aad_offset = aad_len ?
1378 					passthrough_len : 0;
1379 
1380 				if (unlikely(!fc_params->dst_iov)) {
1381 					CPT_LOG_DP_ERR("Bad input args");
1382 					return;
1383 				}
1384 
1385 				i = fill_sg_comp_from_iov(scatter_comp, i,
1386 							  fc_params->dst_iov, 0,
1387 							  &size, aad_buf,
1388 							  aad_offset);
1389 			}
1390 
1391 			if (unlikely(size)) {
1392 				CPT_LOG_DP_ERR("Insufficient buffer space,"
1393 					       " size %d needed", size);
1394 				return;
1395 			}
1396 		}
1397 
1398 		((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1399 		s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1400 
1401 		size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1402 
1403 		/* This is DPTR len incase of SG mode */
1404 		vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
1405 
1406 		m_vaddr = (uint8_t *)m_vaddr + size;
1407 		m_dma += size;
1408 		m_size -= size;
1409 
1410 		/* cpt alternate completion address saved earlier */
1411 		req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1412 		*req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1413 		rptr_dma = c_dma - 8;
1414 		size += COMPLETION_CODE_SIZE;
1415 
1416 		req->ist.ei1 = dptr_dma;
1417 		req->ist.ei2 = rptr_dma;
1418 	}
1419 
1420 	/* First 16-bit swap then 64-bit swap */
1421 	/* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
1422 	 * to eliminate all the swapping
1423 	 */
1424 	vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
1425 
1426 	ctx_dma = fc_params->ctx_buf.dma_addr +
1427 		offsetof(struct cpt_ctx, fctx);
1428 	/* vq command w3 */
1429 	vq_cmd_w3.u64 = 0;
1430 	vq_cmd_w3.s.grp = 0;
1431 	vq_cmd_w3.s.cptr = ctx_dma;
1432 
1433 	/* 16 byte aligned cpt res address */
1434 	req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1435 	*req->completion_addr = COMPLETION_CODE_INIT;
1436 	req->comp_baddr  = c_dma;
1437 
1438 	/* Fill microcode part of instruction */
1439 	req->ist.ei0 = vq_cmd_w0.u64;
1440 	req->ist.ei3 = vq_cmd_w3.u64;
1441 
1442 	req->op = op;
1443 
1444 	*prep_req = req;
1445 	return;
1446 }
1447 
1448 static __rte_always_inline void
1449 cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
1450 			uint64_t d_offs,
1451 			uint64_t d_lens,
1452 			fc_params_t *params,
1453 			void *op,
1454 			void **prep_req)
1455 {
1456 	uint32_t size;
1457 	int32_t inputlen, outputlen;
1458 	struct cpt_ctx *cpt_ctx;
1459 	uint32_t mac_len = 0;
1460 	uint8_t snow3g, j;
1461 	struct cpt_request_info *req;
1462 	buf_ptr_t *buf_p;
1463 	uint32_t encr_offset = 0, auth_offset = 0;
1464 	uint32_t encr_data_len = 0, auth_data_len = 0;
1465 	int flags, iv_len = 16, m_size;
1466 	void *m_vaddr, *c_vaddr;
1467 	uint64_t m_dma, c_dma, offset_ctrl;
1468 	uint64_t *offset_vaddr, offset_dma;
1469 	uint32_t *iv_s, iv[4];
1470 	vq_cmd_word0_t vq_cmd_w0;
1471 	vq_cmd_word3_t vq_cmd_w3;
1472 	opcode_info_t opcode;
1473 
1474 	buf_p = &params->meta_buf;
1475 	m_vaddr = buf_p->vaddr;
1476 	m_dma = buf_p->dma_addr;
1477 	m_size = buf_p->size;
1478 
1479 	cpt_ctx = params->ctx_buf.vaddr;
1480 	flags = cpt_ctx->zsk_flags;
1481 	mac_len = cpt_ctx->mac_len;
1482 	snow3g = cpt_ctx->snow3g;
1483 
1484 	/*
1485 	 * Save initial space that followed app data for completion code &
1486 	 * alternate completion code to fall in same cache line as app data
1487 	 */
1488 	m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1489 	m_dma += COMPLETION_CODE_SIZE;
1490 	size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1491 		(uint8_t *)m_vaddr;
1492 
1493 	c_vaddr = (uint8_t *)m_vaddr + size;
1494 	c_dma = m_dma + size;
1495 	size += sizeof(cpt_res_s_t);
1496 
1497 	m_vaddr = (uint8_t *)m_vaddr + size;
1498 	m_dma += size;
1499 	m_size -= size;
1500 
1501 	/* Reserve memory for cpt request info */
1502 	req = m_vaddr;
1503 
1504 	size = sizeof(struct cpt_request_info);
1505 	m_vaddr = (uint8_t *)m_vaddr + size;
1506 	m_dma += size;
1507 	m_size -= size;
1508 
1509 	opcode.s.major = CPT_MAJOR_OP_ZUC_SNOW3G;
1510 
1511 	/* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
1512 	opcode.s.minor = ((1 << 6) | (snow3g << 5) | (0 << 4) |
1513 			  (0 << 3) | (flags & 0x7));
1514 
1515 	if (flags == 0x1) {
1516 		/*
1517 		 * Microcode expects offsets in bytes
1518 		 * TODO: Rounding off
1519 		 */
1520 		auth_data_len = AUTH_DLEN(d_lens);
1521 
1522 		/* EIA3 or UIA2 */
1523 		auth_offset = AUTH_OFFSET(d_offs);
1524 		auth_offset = auth_offset / 8;
1525 
1526 		/* consider iv len */
1527 		auth_offset += iv_len;
1528 
1529 		inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
1530 		outputlen = mac_len;
1531 
1532 		offset_ctrl = rte_cpu_to_be_64((uint64_t)auth_offset);
1533 
1534 	} else {
1535 		/* EEA3 or UEA2 */
1536 		/*
1537 		 * Microcode expects offsets in bytes
1538 		 * TODO: Rounding off
1539 		 */
1540 		encr_data_len = ENCR_DLEN(d_lens);
1541 
1542 		encr_offset = ENCR_OFFSET(d_offs);
1543 		encr_offset = encr_offset / 8;
1544 		/* consider iv len */
1545 		encr_offset += iv_len;
1546 
1547 		inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1548 		outputlen = inputlen;
1549 
1550 		/* iv offset is 0 */
1551 		offset_ctrl = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1552 	}
1553 
1554 	/* IV */
1555 	iv_s = (flags == 0x1) ? params->auth_iv_buf :
1556 		params->iv_buf;
1557 
1558 	if (snow3g) {
1559 		/*
1560 		 * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
1561 		 * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
1562 		 */
1563 
1564 		for (j = 0; j < 4; j++)
1565 			iv[j] = iv_s[3 - j];
1566 	} else {
1567 		/* ZUC doesn't need a swap */
1568 		for (j = 0; j < 4; j++)
1569 			iv[j] = iv_s[j];
1570 	}
1571 
1572 	/*
1573 	 * GP op header, lengths are expected in bits.
1574 	 */
1575 	vq_cmd_w0.u64 = 0;
1576 	vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
1577 	vq_cmd_w0.s.param2 = rte_cpu_to_be_16(auth_data_len);
1578 
1579 	/*
1580 	 * In 83XX since we have a limitation of
1581 	 * IV & Offset control word not part of instruction
1582 	 * and need to be part of Data Buffer, we check if
1583 	 * head room is there and then only do the Direct mode processing
1584 	 */
1585 	if (likely((req_flags & SINGLE_BUF_INPLACE) &&
1586 		   (req_flags & SINGLE_BUF_HEADTAILROOM))) {
1587 		void *dm_vaddr = params->bufs[0].vaddr;
1588 		uint64_t dm_dma_addr = params->bufs[0].dma_addr;
1589 		/*
1590 		 * This flag indicates that there is 24 bytes head room and
1591 		 * 8 bytes tail room available, so that we get to do
1592 		 * DIRECT MODE with limitation
1593 		 */
1594 
1595 		offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1596 					    OFF_CTRL_LEN - iv_len);
1597 		offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1598 
1599 		/* DPTR */
1600 		req->ist.ei1 = offset_dma;
1601 		/* RPTR should just exclude offset control word */
1602 		req->ist.ei2 = dm_dma_addr - iv_len;
1603 		req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
1604 						    + outputlen - iv_len);
1605 
1606 		vq_cmd_w0.s.dlen = rte_cpu_to_be_16(inputlen + OFF_CTRL_LEN);
1607 
1608 		vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
1609 
1610 		if (likely(iv_len)) {
1611 			uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr
1612 						      + OFF_CTRL_LEN);
1613 			memcpy(iv_d, iv, 16);
1614 		}
1615 
1616 		*offset_vaddr = offset_ctrl;
1617 	} else {
1618 		uint32_t i, g_size_bytes, s_size_bytes;
1619 		uint64_t dptr_dma, rptr_dma;
1620 		sg_comp_t *gather_comp;
1621 		sg_comp_t *scatter_comp;
1622 		uint8_t *in_buffer;
1623 		uint32_t *iv_d;
1624 
1625 		/* save space for iv */
1626 		offset_vaddr = m_vaddr;
1627 		offset_dma = m_dma;
1628 
1629 		m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
1630 		m_dma += OFF_CTRL_LEN + iv_len;
1631 		m_size -= OFF_CTRL_LEN + iv_len;
1632 
1633 		opcode.s.major |= CPT_DMA_MODE;
1634 
1635 		vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
1636 
1637 		/* DPTR has SG list */
1638 		in_buffer = m_vaddr;
1639 		dptr_dma = m_dma;
1640 
1641 		((uint16_t *)in_buffer)[0] = 0;
1642 		((uint16_t *)in_buffer)[1] = 0;
1643 
1644 		/* TODO Add error check if space will be sufficient */
1645 		gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1646 
1647 		/*
1648 		 * Input Gather List
1649 		 */
1650 		i = 0;
1651 
1652 		/* Offset control word followed by iv */
1653 
1654 		i = fill_sg_comp(gather_comp, i, offset_dma,
1655 				 OFF_CTRL_LEN + iv_len);
1656 
1657 		/* iv offset is 0 */
1658 		*offset_vaddr = offset_ctrl;
1659 
1660 		iv_d = (uint32_t *)((uint8_t *)offset_vaddr + OFF_CTRL_LEN);
1661 		memcpy(iv_d, iv, 16);
1662 
1663 		/* input data */
1664 		size = inputlen - iv_len;
1665 		if (size) {
1666 			i = fill_sg_comp_from_iov(gather_comp, i,
1667 						  params->src_iov,
1668 						  0, &size, NULL, 0);
1669 			if (unlikely(size)) {
1670 				CPT_LOG_DP_ERR("Insufficient buffer space,"
1671 					       " size %d needed", size);
1672 				return;
1673 			}
1674 		}
1675 		((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1676 		g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1677 
1678 		/*
1679 		 * Output Scatter List
1680 		 */
1681 
1682 		i = 0;
1683 		scatter_comp =
1684 			(sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1685 
1686 		if (flags == 0x1) {
1687 			/* IV in SLIST only for EEA3 & UEA2 */
1688 			iv_len = 0;
1689 		}
1690 
1691 		if (iv_len) {
1692 			i = fill_sg_comp(scatter_comp, i,
1693 					 offset_dma + OFF_CTRL_LEN, iv_len);
1694 		}
1695 
1696 		/* Add output data */
1697 		if (req_flags & VALID_MAC_BUF) {
1698 			size = outputlen - iv_len - mac_len;
1699 			if (size) {
1700 				i = fill_sg_comp_from_iov(scatter_comp, i,
1701 							  params->dst_iov, 0,
1702 							  &size, NULL, 0);
1703 
1704 				if (unlikely(size)) {
1705 					CPT_LOG_DP_ERR("Insufficient buffer space,"
1706 						       " size %d needed", size);
1707 					return;
1708 				}
1709 			}
1710 
1711 			/* mac data */
1712 			if (mac_len) {
1713 				i = fill_sg_comp_from_buf(scatter_comp, i,
1714 							  &params->mac_buf);
1715 			}
1716 		} else {
1717 			/* Output including mac */
1718 			size = outputlen - iv_len;
1719 			if (size) {
1720 				i = fill_sg_comp_from_iov(scatter_comp, i,
1721 							  params->dst_iov, 0,
1722 							  &size, NULL, 0);
1723 
1724 				if (unlikely(size)) {
1725 					CPT_LOG_DP_ERR("Insufficient buffer space,"
1726 						       " size %d needed", size);
1727 					return;
1728 				}
1729 			}
1730 		}
1731 		((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1732 		s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1733 
1734 		size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1735 
1736 		/* This is DPTR len incase of SG mode */
1737 		vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
1738 
1739 		m_vaddr = (uint8_t *)m_vaddr + size;
1740 		m_dma += size;
1741 		m_size -= size;
1742 
1743 		/* cpt alternate completion address saved earlier */
1744 		req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1745 		*req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1746 		rptr_dma = c_dma - 8;
1747 
1748 		req->ist.ei1 = dptr_dma;
1749 		req->ist.ei2 = rptr_dma;
1750 	}
1751 
1752 	/* First 16-bit swap then 64-bit swap */
1753 	/* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
1754 	 * to eliminate all the swapping
1755 	 */
1756 	vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
1757 
1758 	/* vq command w3 */
1759 	vq_cmd_w3.u64 = 0;
1760 	vq_cmd_w3.s.grp = 0;
1761 	vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
1762 		offsetof(struct cpt_ctx, zs_ctx);
1763 
1764 	/* 16 byte aligned cpt res address */
1765 	req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1766 	*req->completion_addr = COMPLETION_CODE_INIT;
1767 	req->comp_baddr  = c_dma;
1768 
1769 	/* Fill microcode part of instruction */
1770 	req->ist.ei0 = vq_cmd_w0.u64;
1771 	req->ist.ei3 = vq_cmd_w3.u64;
1772 
1773 	req->op = op;
1774 
1775 	*prep_req = req;
1776 	return;
1777 }
1778 
1779 static __rte_always_inline void
1780 cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
1781 			uint64_t d_offs,
1782 			uint64_t d_lens,
1783 			fc_params_t *params,
1784 			void *op,
1785 			void **prep_req)
1786 {
1787 	uint32_t size;
1788 	int32_t inputlen = 0, outputlen;
1789 	struct cpt_ctx *cpt_ctx;
1790 	uint8_t snow3g, iv_len = 16;
1791 	struct cpt_request_info *req;
1792 	buf_ptr_t *buf_p;
1793 	uint32_t encr_offset;
1794 	uint32_t encr_data_len;
1795 	int flags, m_size;
1796 	void *m_vaddr, *c_vaddr;
1797 	uint64_t m_dma, c_dma;
1798 	uint64_t *offset_vaddr, offset_dma;
1799 	uint32_t *iv_s, iv[4], j;
1800 	vq_cmd_word0_t vq_cmd_w0;
1801 	vq_cmd_word3_t vq_cmd_w3;
1802 	opcode_info_t opcode;
1803 
1804 	buf_p = &params->meta_buf;
1805 	m_vaddr = buf_p->vaddr;
1806 	m_dma = buf_p->dma_addr;
1807 	m_size = buf_p->size;
1808 
1809 	/*
1810 	 * Microcode expects offsets in bytes
1811 	 * TODO: Rounding off
1812 	 */
1813 	encr_offset = ENCR_OFFSET(d_offs) / 8;
1814 	encr_data_len = ENCR_DLEN(d_lens);
1815 
1816 	cpt_ctx = params->ctx_buf.vaddr;
1817 	flags = cpt_ctx->zsk_flags;
1818 	snow3g = cpt_ctx->snow3g;
1819 	/*
1820 	 * Save initial space that followed app data for completion code &
1821 	 * alternate completion code to fall in same cache line as app data
1822 	 */
1823 	m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1824 	m_dma += COMPLETION_CODE_SIZE;
1825 	size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1826 		(uint8_t *)m_vaddr;
1827 
1828 	c_vaddr = (uint8_t *)m_vaddr + size;
1829 	c_dma = m_dma + size;
1830 	size += sizeof(cpt_res_s_t);
1831 
1832 	m_vaddr = (uint8_t *)m_vaddr + size;
1833 	m_dma += size;
1834 	m_size -= size;
1835 
1836 	/* Reserve memory for cpt request info */
1837 	req = m_vaddr;
1838 
1839 	size = sizeof(struct cpt_request_info);
1840 	m_vaddr = (uint8_t *)m_vaddr + size;
1841 	m_dma += size;
1842 	m_size -= size;
1843 
1844 	opcode.s.major = CPT_MAJOR_OP_ZUC_SNOW3G;
1845 
1846 	/* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
1847 	opcode.s.minor = ((1 << 6) | (snow3g << 5) | (0 << 4) |
1848 			  (0 << 3) | (flags & 0x7));
1849 
1850 	/* consider iv len */
1851 	encr_offset += iv_len;
1852 
1853 	inputlen = encr_offset +
1854 		(RTE_ALIGN(encr_data_len, 8) / 8);
1855 	outputlen = inputlen;
1856 
1857 	/* IV */
1858 	iv_s = params->iv_buf;
1859 	if (snow3g) {
1860 		/*
1861 		 * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
1862 		 * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
1863 		 */
1864 
1865 		for (j = 0; j < 4; j++)
1866 			iv[j] = iv_s[3 - j];
1867 	} else {
1868 		/* ZUC doesn't need a swap */
1869 		for (j = 0; j < 4; j++)
1870 			iv[j] = iv_s[j];
1871 	}
1872 
1873 	/*
1874 	 * GP op header, lengths are expected in bits.
1875 	 */
1876 	vq_cmd_w0.u64 = 0;
1877 	vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
1878 
1879 	/*
1880 	 * In 83XX since we have a limitation of
1881 	 * IV & Offset control word not part of instruction
1882 	 * and need to be part of Data Buffer, we check if
1883 	 * head room is there and then only do the Direct mode processing
1884 	 */
1885 	if (likely((req_flags & SINGLE_BUF_INPLACE) &&
1886 		   (req_flags & SINGLE_BUF_HEADTAILROOM))) {
1887 		void *dm_vaddr = params->bufs[0].vaddr;
1888 		uint64_t dm_dma_addr = params->bufs[0].dma_addr;
1889 		/*
1890 		 * This flag indicates that there is 24 bytes head room and
1891 		 * 8 bytes tail room available, so that we get to do
1892 		 * DIRECT MODE with limitation
1893 		 */
1894 
1895 		offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1896 					    OFF_CTRL_LEN - iv_len);
1897 		offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1898 
1899 		/* DPTR */
1900 		req->ist.ei1 = offset_dma;
1901 		/* RPTR should just exclude offset control word */
1902 		req->ist.ei2 = dm_dma_addr - iv_len;
1903 		req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
1904 						    + outputlen - iv_len);
1905 
1906 		vq_cmd_w0.s.dlen = rte_cpu_to_be_16(inputlen + OFF_CTRL_LEN);
1907 
1908 		vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
1909 
1910 		if (likely(iv_len)) {
1911 			uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr
1912 						      + OFF_CTRL_LEN);
1913 			memcpy(iv_d, iv, 16);
1914 		}
1915 
1916 		/* iv offset is 0 */
1917 		*offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1918 	} else {
1919 		uint32_t i, g_size_bytes, s_size_bytes;
1920 		uint64_t dptr_dma, rptr_dma;
1921 		sg_comp_t *gather_comp;
1922 		sg_comp_t *scatter_comp;
1923 		uint8_t *in_buffer;
1924 		uint32_t *iv_d;
1925 
1926 		/* save space for offset and iv... */
1927 		offset_vaddr = m_vaddr;
1928 		offset_dma = m_dma;
1929 
1930 		m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
1931 		m_dma += OFF_CTRL_LEN + iv_len;
1932 		m_size -= OFF_CTRL_LEN + iv_len;
1933 
1934 		opcode.s.major |= CPT_DMA_MODE;
1935 
1936 		vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
1937 
1938 		/* DPTR has SG list */
1939 		in_buffer = m_vaddr;
1940 		dptr_dma = m_dma;
1941 
1942 		((uint16_t *)in_buffer)[0] = 0;
1943 		((uint16_t *)in_buffer)[1] = 0;
1944 
1945 		/* TODO Add error check if space will be sufficient */
1946 		gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1947 
1948 		/*
1949 		 * Input Gather List
1950 		 */
1951 		i = 0;
1952 
1953 		/* Offset control word */
1954 
1955 		/* iv offset is 0 */
1956 		*offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1957 
1958 		i = fill_sg_comp(gather_comp, i, offset_dma,
1959 				 OFF_CTRL_LEN + iv_len);
1960 
1961 		iv_d = (uint32_t *)((uint8_t *)offset_vaddr + OFF_CTRL_LEN);
1962 		memcpy(iv_d, iv, 16);
1963 
1964 		/* Add input data */
1965 		size = inputlen - iv_len;
1966 		if (size) {
1967 			i = fill_sg_comp_from_iov(gather_comp, i,
1968 						  params->src_iov,
1969 						  0, &size, NULL, 0);
1970 			if (unlikely(size)) {
1971 				CPT_LOG_DP_ERR("Insufficient buffer space,"
1972 					       " size %d needed", size);
1973 				return;
1974 			}
1975 		}
1976 		((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1977 		g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1978 
1979 		/*
1980 		 * Output Scatter List
1981 		 */
1982 
1983 		i = 0;
1984 		scatter_comp =
1985 			(sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1986 
1987 		/* IV */
1988 		i = fill_sg_comp(scatter_comp, i,
1989 				 offset_dma + OFF_CTRL_LEN,
1990 				 iv_len);
1991 
1992 		/* Add output data */
1993 		size = outputlen - iv_len;
1994 		if (size) {
1995 			i = fill_sg_comp_from_iov(scatter_comp, i,
1996 						  params->dst_iov, 0,
1997 						  &size, NULL, 0);
1998 
1999 			if (unlikely(size)) {
2000 				CPT_LOG_DP_ERR("Insufficient buffer space,"
2001 					       " size %d needed", size);
2002 				return;
2003 			}
2004 		}
2005 		((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
2006 		s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2007 
2008 		size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
2009 
2010 		/* This is DPTR len incase of SG mode */
2011 		vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
2012 
2013 		m_vaddr = (uint8_t *)m_vaddr + size;
2014 		m_dma += size;
2015 		m_size -= size;
2016 
2017 		/* cpt alternate completion address saved earlier */
2018 		req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
2019 		*req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
2020 		rptr_dma = c_dma - 8;
2021 
2022 		req->ist.ei1 = dptr_dma;
2023 		req->ist.ei2 = rptr_dma;
2024 	}
2025 
2026 	/* First 16-bit swap then 64-bit swap */
2027 	/* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
2028 	 * to eliminate all the swapping
2029 	 */
2030 	vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
2031 
2032 	/* vq command w3 */
2033 	vq_cmd_w3.u64 = 0;
2034 	vq_cmd_w3.s.grp = 0;
2035 	vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
2036 		offsetof(struct cpt_ctx, zs_ctx);
2037 
2038 	/* 16 byte aligned cpt res address */
2039 	req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
2040 	*req->completion_addr = COMPLETION_CODE_INIT;
2041 	req->comp_baddr  = c_dma;
2042 
2043 	/* Fill microcode part of instruction */
2044 	req->ist.ei0 = vq_cmd_w0.u64;
2045 	req->ist.ei3 = vq_cmd_w3.u64;
2046 
2047 	req->op = op;
2048 
2049 	*prep_req = req;
2050 	return;
2051 }
2052 
2053 static __rte_always_inline void
2054 cpt_kasumi_enc_prep(uint32_t req_flags,
2055 		    uint64_t d_offs,
2056 		    uint64_t d_lens,
2057 		    fc_params_t *params,
2058 		    void *op,
2059 		    void **prep_req)
2060 {
2061 	uint32_t size;
2062 	int32_t inputlen = 0, outputlen = 0;
2063 	struct cpt_ctx *cpt_ctx;
2064 	uint32_t mac_len = 0;
2065 	uint8_t i = 0;
2066 	struct cpt_request_info *req;
2067 	buf_ptr_t *buf_p;
2068 	uint32_t encr_offset, auth_offset;
2069 	uint32_t encr_data_len, auth_data_len;
2070 	int flags, m_size;
2071 	uint8_t *iv_s, *iv_d, iv_len = 8;
2072 	uint8_t dir = 0;
2073 	void *m_vaddr, *c_vaddr;
2074 	uint64_t m_dma, c_dma;
2075 	uint64_t *offset_vaddr, offset_dma;
2076 	vq_cmd_word0_t vq_cmd_w0;
2077 	vq_cmd_word3_t vq_cmd_w3;
2078 	opcode_info_t opcode;
2079 	uint8_t *in_buffer;
2080 	uint32_t g_size_bytes, s_size_bytes;
2081 	uint64_t dptr_dma, rptr_dma;
2082 	sg_comp_t *gather_comp;
2083 	sg_comp_t *scatter_comp;
2084 
2085 	buf_p = &params->meta_buf;
2086 	m_vaddr = buf_p->vaddr;
2087 	m_dma = buf_p->dma_addr;
2088 	m_size = buf_p->size;
2089 
2090 	encr_offset = ENCR_OFFSET(d_offs) / 8;
2091 	auth_offset = AUTH_OFFSET(d_offs) / 8;
2092 	encr_data_len = ENCR_DLEN(d_lens);
2093 	auth_data_len = AUTH_DLEN(d_lens);
2094 
2095 	cpt_ctx = params->ctx_buf.vaddr;
2096 	flags = cpt_ctx->zsk_flags;
2097 	mac_len = cpt_ctx->mac_len;
2098 
2099 	if (flags == 0x0)
2100 		iv_s = params->iv_buf;
2101 	else
2102 		iv_s = params->auth_iv_buf;
2103 
2104 	dir = iv_s[8] & 0x1;
2105 
2106 	/*
2107 	 * Save initial space that followed app data for completion code &
2108 	 * alternate completion code to fall in same cache line as app data
2109 	 */
2110 	m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
2111 	m_dma += COMPLETION_CODE_SIZE;
2112 	size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
2113 		(uint8_t *)m_vaddr;
2114 
2115 	c_vaddr = (uint8_t *)m_vaddr + size;
2116 	c_dma = m_dma + size;
2117 	size += sizeof(cpt_res_s_t);
2118 
2119 	m_vaddr = (uint8_t *)m_vaddr + size;
2120 	m_dma += size;
2121 	m_size -= size;
2122 
2123 	/* Reserve memory for cpt request info */
2124 	req = m_vaddr;
2125 
2126 	size = sizeof(struct cpt_request_info);
2127 	m_vaddr = (uint8_t *)m_vaddr + size;
2128 	m_dma += size;
2129 	m_size -= size;
2130 
2131 	opcode.s.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE;
2132 
2133 	/* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
2134 	opcode.s.minor = ((1 << 6) | (cpt_ctx->k_ecb << 5) |
2135 			  (dir << 4) | (0 << 3) | (flags & 0x7));
2136 
2137 	/*
2138 	 * GP op header, lengths are expected in bits.
2139 	 */
2140 	vq_cmd_w0.u64 = 0;
2141 	vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
2142 	vq_cmd_w0.s.param2 = rte_cpu_to_be_16(auth_data_len);
2143 	vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
2144 
2145 	/* consider iv len */
2146 	if (flags == 0x0) {
2147 		encr_offset += iv_len;
2148 		auth_offset += iv_len;
2149 	}
2150 
2151 	/* save space for offset ctrl and iv */
2152 	offset_vaddr = m_vaddr;
2153 	offset_dma = m_dma;
2154 
2155 	m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
2156 	m_dma += OFF_CTRL_LEN + iv_len;
2157 	m_size -= OFF_CTRL_LEN + iv_len;
2158 
2159 	/* DPTR has SG list */
2160 	in_buffer = m_vaddr;
2161 	dptr_dma = m_dma;
2162 
2163 	((uint16_t *)in_buffer)[0] = 0;
2164 	((uint16_t *)in_buffer)[1] = 0;
2165 
2166 	/* TODO Add error check if space will be sufficient */
2167 	gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
2168 
2169 	/*
2170 	 * Input Gather List
2171 	 */
2172 	i = 0;
2173 
2174 	/* Offset control word followed by iv */
2175 
2176 	if (flags == 0x0) {
2177 		inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
2178 		outputlen = inputlen;
2179 		/* iv offset is 0 */
2180 		*offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
2181 	} else {
2182 		inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
2183 		outputlen = mac_len;
2184 		/* iv offset is 0 */
2185 		*offset_vaddr = rte_cpu_to_be_64((uint64_t)auth_offset);
2186 	}
2187 
2188 	i = fill_sg_comp(gather_comp, i, offset_dma, OFF_CTRL_LEN + iv_len);
2189 
2190 	/* IV */
2191 	iv_d = (uint8_t *)offset_vaddr + OFF_CTRL_LEN;
2192 	memcpy(iv_d, iv_s, iv_len);
2193 
2194 	/* input data */
2195 	size = inputlen - iv_len;
2196 	if (size) {
2197 		i = fill_sg_comp_from_iov(gather_comp, i,
2198 					  params->src_iov, 0,
2199 					  &size, NULL, 0);
2200 
2201 		if (unlikely(size)) {
2202 			CPT_LOG_DP_ERR("Insufficient buffer space,"
2203 				       " size %d needed", size);
2204 			return;
2205 		}
2206 	}
2207 	((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
2208 	g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2209 
2210 	/*
2211 	 * Output Scatter List
2212 	 */
2213 
2214 	i = 0;
2215 	scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
2216 
2217 	if (flags == 0x1) {
2218 		/* IV in SLIST only for F8 */
2219 		iv_len = 0;
2220 	}
2221 
2222 	/* IV */
2223 	if (iv_len) {
2224 		i = fill_sg_comp(scatter_comp, i,
2225 				 offset_dma + OFF_CTRL_LEN,
2226 				 iv_len);
2227 	}
2228 
2229 	/* Add output data */
2230 	if (req_flags & VALID_MAC_BUF) {
2231 		size = outputlen - iv_len - mac_len;
2232 		if (size) {
2233 			i = fill_sg_comp_from_iov(scatter_comp, i,
2234 						  params->dst_iov, 0,
2235 						  &size, NULL, 0);
2236 
2237 			if (unlikely(size)) {
2238 				CPT_LOG_DP_ERR("Insufficient buffer space,"
2239 					       " size %d needed", size);
2240 				return;
2241 			}
2242 		}
2243 
2244 		/* mac data */
2245 		if (mac_len) {
2246 			i = fill_sg_comp_from_buf(scatter_comp, i,
2247 						  &params->mac_buf);
2248 		}
2249 	} else {
2250 		/* Output including mac */
2251 		size = outputlen - iv_len;
2252 		if (size) {
2253 			i = fill_sg_comp_from_iov(scatter_comp, i,
2254 						  params->dst_iov, 0,
2255 						  &size, NULL, 0);
2256 
2257 			if (unlikely(size)) {
2258 				CPT_LOG_DP_ERR("Insufficient buffer space,"
2259 					       " size %d needed", size);
2260 				return;
2261 			}
2262 		}
2263 	}
2264 	((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
2265 	s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2266 
2267 	size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
2268 
2269 	/* This is DPTR len incase of SG mode */
2270 	vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
2271 
2272 	m_vaddr = (uint8_t *)m_vaddr + size;
2273 	m_dma += size;
2274 	m_size -= size;
2275 
2276 	/* cpt alternate completion address saved earlier */
2277 	req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
2278 	*req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
2279 	rptr_dma = c_dma - 8;
2280 
2281 	req->ist.ei1 = dptr_dma;
2282 	req->ist.ei2 = rptr_dma;
2283 
2284 	/* First 16-bit swap then 64-bit swap */
2285 	/* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
2286 	 * to eliminate all the swapping
2287 	 */
2288 	vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
2289 
2290 	/* vq command w3 */
2291 	vq_cmd_w3.u64 = 0;
2292 	vq_cmd_w3.s.grp = 0;
2293 	vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
2294 		offsetof(struct cpt_ctx, k_ctx);
2295 
2296 	/* 16 byte aligned cpt res address */
2297 	req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
2298 	*req->completion_addr = COMPLETION_CODE_INIT;
2299 	req->comp_baddr  = c_dma;
2300 
2301 	/* Fill microcode part of instruction */
2302 	req->ist.ei0 = vq_cmd_w0.u64;
2303 	req->ist.ei3 = vq_cmd_w3.u64;
2304 
2305 	req->op = op;
2306 
2307 	*prep_req = req;
2308 	return;
2309 }
2310 
2311 static __rte_always_inline void
2312 cpt_kasumi_dec_prep(uint64_t d_offs,
2313 		    uint64_t d_lens,
2314 		    fc_params_t *params,
2315 		    void *op,
2316 		    void **prep_req)
2317 {
2318 	uint32_t size;
2319 	int32_t inputlen = 0, outputlen;
2320 	struct cpt_ctx *cpt_ctx;
2321 	uint8_t i = 0, iv_len = 8;
2322 	struct cpt_request_info *req;
2323 	buf_ptr_t *buf_p;
2324 	uint32_t encr_offset;
2325 	uint32_t encr_data_len;
2326 	int flags, m_size;
2327 	uint8_t dir = 0;
2328 	void *m_vaddr, *c_vaddr;
2329 	uint64_t m_dma, c_dma;
2330 	uint64_t *offset_vaddr, offset_dma;
2331 	vq_cmd_word0_t vq_cmd_w0;
2332 	vq_cmd_word3_t vq_cmd_w3;
2333 	opcode_info_t opcode;
2334 	uint8_t *in_buffer;
2335 	uint32_t g_size_bytes, s_size_bytes;
2336 	uint64_t dptr_dma, rptr_dma;
2337 	sg_comp_t *gather_comp;
2338 	sg_comp_t *scatter_comp;
2339 
2340 	buf_p = &params->meta_buf;
2341 	m_vaddr = buf_p->vaddr;
2342 	m_dma = buf_p->dma_addr;
2343 	m_size = buf_p->size;
2344 
2345 	encr_offset = ENCR_OFFSET(d_offs) / 8;
2346 	encr_data_len = ENCR_DLEN(d_lens);
2347 
2348 	cpt_ctx = params->ctx_buf.vaddr;
2349 	flags = cpt_ctx->zsk_flags;
2350 	/*
2351 	 * Save initial space that followed app data for completion code &
2352 	 * alternate completion code to fall in same cache line as app data
2353 	 */
2354 	m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
2355 	m_dma += COMPLETION_CODE_SIZE;
2356 	size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
2357 		(uint8_t *)m_vaddr;
2358 
2359 	c_vaddr = (uint8_t *)m_vaddr + size;
2360 	c_dma = m_dma + size;
2361 	size += sizeof(cpt_res_s_t);
2362 
2363 	m_vaddr = (uint8_t *)m_vaddr + size;
2364 	m_dma += size;
2365 	m_size -= size;
2366 
2367 	/* Reserve memory for cpt request info */
2368 	req = m_vaddr;
2369 
2370 	size = sizeof(struct cpt_request_info);
2371 	m_vaddr = (uint8_t *)m_vaddr + size;
2372 	m_dma += size;
2373 	m_size -= size;
2374 
2375 	opcode.s.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE;
2376 
2377 	/* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
2378 	opcode.s.minor = ((1 << 6) | (cpt_ctx->k_ecb << 5) |
2379 			  (dir << 4) | (0 << 3) | (flags & 0x7));
2380 
2381 	/*
2382 	 * GP op header, lengths are expected in bits.
2383 	 */
2384 	vq_cmd_w0.u64 = 0;
2385 	vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
2386 	vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
2387 
2388 	/* consider iv len */
2389 	encr_offset += iv_len;
2390 
2391 	inputlen = iv_len + (RTE_ALIGN(encr_data_len, 8) / 8);
2392 	outputlen = inputlen;
2393 
2394 	/* save space for offset ctrl & iv */
2395 	offset_vaddr = m_vaddr;
2396 	offset_dma = m_dma;
2397 
2398 	m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
2399 	m_dma += OFF_CTRL_LEN + iv_len;
2400 	m_size -= OFF_CTRL_LEN + iv_len;
2401 
2402 	/* DPTR has SG list */
2403 	in_buffer = m_vaddr;
2404 	dptr_dma = m_dma;
2405 
2406 	((uint16_t *)in_buffer)[0] = 0;
2407 	((uint16_t *)in_buffer)[1] = 0;
2408 
2409 	/* TODO Add error check if space will be sufficient */
2410 	gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
2411 
2412 	/*
2413 	 * Input Gather List
2414 	 */
2415 	i = 0;
2416 
2417 	/* Offset control word followed by iv */
2418 	*offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
2419 
2420 	i = fill_sg_comp(gather_comp, i, offset_dma, OFF_CTRL_LEN + iv_len);
2421 
2422 	/* IV */
2423 	memcpy((uint8_t *)offset_vaddr + OFF_CTRL_LEN,
2424 	       params->iv_buf, iv_len);
2425 
2426 	/* Add input data */
2427 	size = inputlen - iv_len;
2428 	if (size) {
2429 		i = fill_sg_comp_from_iov(gather_comp, i,
2430 					  params->src_iov,
2431 					  0, &size, NULL, 0);
2432 		if (unlikely(size)) {
2433 			CPT_LOG_DP_ERR("Insufficient buffer space,"
2434 				       " size %d needed", size);
2435 			return;
2436 		}
2437 	}
2438 	((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
2439 	g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2440 
2441 	/*
2442 	 * Output Scatter List
2443 	 */
2444 
2445 	i = 0;
2446 	scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
2447 
2448 	/* IV */
2449 	i = fill_sg_comp(scatter_comp, i,
2450 			 offset_dma + OFF_CTRL_LEN,
2451 			 iv_len);
2452 
2453 	/* Add output data */
2454 	size = outputlen - iv_len;
2455 	if (size) {
2456 		i = fill_sg_comp_from_iov(scatter_comp, i,
2457 					  params->dst_iov, 0,
2458 					  &size, NULL, 0);
2459 		if (unlikely(size)) {
2460 			CPT_LOG_DP_ERR("Insufficient buffer space,"
2461 				       " size %d needed", size);
2462 			return;
2463 		}
2464 	}
2465 	((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
2466 	s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2467 
2468 	size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
2469 
2470 	/* This is DPTR len incase of SG mode */
2471 	vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
2472 
2473 	m_vaddr = (uint8_t *)m_vaddr + size;
2474 	m_dma += size;
2475 	m_size -= size;
2476 
2477 	/* cpt alternate completion address saved earlier */
2478 	req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
2479 	*req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
2480 	rptr_dma = c_dma - 8;
2481 
2482 	req->ist.ei1 = dptr_dma;
2483 	req->ist.ei2 = rptr_dma;
2484 
2485 	/* First 16-bit swap then 64-bit swap */
2486 	/* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
2487 	 * to eliminate all the swapping
2488 	 */
2489 	vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
2490 
2491 	/* vq command w3 */
2492 	vq_cmd_w3.u64 = 0;
2493 	vq_cmd_w3.s.grp = 0;
2494 	vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
2495 		offsetof(struct cpt_ctx, k_ctx);
2496 
2497 	/* 16 byte aligned cpt res address */
2498 	req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
2499 	*req->completion_addr = COMPLETION_CODE_INIT;
2500 	req->comp_baddr  = c_dma;
2501 
2502 	/* Fill microcode part of instruction */
2503 	req->ist.ei0 = vq_cmd_w0.u64;
2504 	req->ist.ei3 = vq_cmd_w3.u64;
2505 
2506 	req->op = op;
2507 
2508 	*prep_req = req;
2509 	return;
2510 }
2511 
2512 static __rte_always_inline void *
2513 cpt_fc_dec_hmac_prep(uint32_t flags,
2514 		     uint64_t d_offs,
2515 		     uint64_t d_lens,
2516 		     fc_params_t *fc_params,
2517 		     void *op)
2518 {
2519 	struct cpt_ctx *ctx = fc_params->ctx_buf.vaddr;
2520 	uint8_t fc_type;
2521 	void *prep_req = NULL;
2522 
2523 	fc_type = ctx->fc_type;
2524 
2525 	if (likely(fc_type == FC_GEN)) {
2526 		cpt_dec_hmac_prep(flags, d_offs, d_lens, fc_params, op,
2527 				  &prep_req);
2528 	} else if (fc_type == ZUC_SNOW3G) {
2529 		cpt_zuc_snow3g_dec_prep(flags, d_offs, d_lens, fc_params, op,
2530 					&prep_req);
2531 	} else if (fc_type == KASUMI) {
2532 		cpt_kasumi_dec_prep(d_offs, d_lens, fc_params, op, &prep_req);
2533 	}
2534 
2535 	/*
2536 	 * For AUTH_ONLY case,
2537 	 * MC only supports digest generation and verification
2538 	 * should be done in software by memcmp()
2539 	 */
2540 
2541 	return prep_req;
2542 }
2543 
2544 static __rte_always_inline void *__hot
2545 cpt_fc_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
2546 		     fc_params_t *fc_params, void *op)
2547 {
2548 	struct cpt_ctx *ctx = fc_params->ctx_buf.vaddr;
2549 	uint8_t fc_type;
2550 	void *prep_req = NULL;
2551 
2552 	fc_type = ctx->fc_type;
2553 
2554 	/* Common api for rest of the ops */
2555 	if (likely(fc_type == FC_GEN)) {
2556 		cpt_enc_hmac_prep(flags, d_offs, d_lens, fc_params, op,
2557 				  &prep_req);
2558 	} else if (fc_type == ZUC_SNOW3G) {
2559 		cpt_zuc_snow3g_enc_prep(flags, d_offs, d_lens, fc_params, op,
2560 					&prep_req);
2561 	} else if (fc_type == KASUMI) {
2562 		cpt_kasumi_enc_prep(flags, d_offs, d_lens, fc_params, op,
2563 				    &prep_req);
2564 	} else if (fc_type == HASH_HMAC) {
2565 		cpt_digest_gen_prep(flags, d_lens, fc_params, op, &prep_req);
2566 	}
2567 
2568 	return prep_req;
2569 }
2570 
2571 static __rte_always_inline int
2572 cpt_fc_auth_set_key(void *ctx, auth_type_t type, uint8_t *key,
2573 		    uint16_t key_len, uint16_t mac_len)
2574 {
2575 	struct cpt_ctx *cpt_ctx = ctx;
2576 	mc_fc_context_t *fctx = &cpt_ctx->fctx;
2577 	uint64_t *ctrl_flags = NULL;
2578 
2579 	if ((type >= ZUC_EIA3) && (type <= KASUMI_F9_ECB)) {
2580 		uint32_t keyx[4];
2581 
2582 		if (key_len != 16)
2583 			return -1;
2584 		/* No support for AEAD yet */
2585 		if (cpt_ctx->enc_cipher)
2586 			return -1;
2587 		/* For ZUC/SNOW3G/Kasumi */
2588 		switch (type) {
2589 		case SNOW3G_UIA2:
2590 			cpt_ctx->snow3g = 1;
2591 			gen_key_snow3g(key, keyx);
2592 			memcpy(cpt_ctx->zs_ctx.ci_key, keyx, key_len);
2593 			cpt_ctx->fc_type = ZUC_SNOW3G;
2594 			cpt_ctx->zsk_flags = 0x1;
2595 			break;
2596 		case ZUC_EIA3:
2597 			cpt_ctx->snow3g = 0;
2598 			memcpy(cpt_ctx->zs_ctx.ci_key, key, key_len);
2599 			memcpy(cpt_ctx->zs_ctx.zuc_const, zuc_d, 32);
2600 			cpt_ctx->fc_type = ZUC_SNOW3G;
2601 			cpt_ctx->zsk_flags = 0x1;
2602 			break;
2603 		case KASUMI_F9_ECB:
2604 			/* Kasumi ECB mode */
2605 			cpt_ctx->k_ecb = 1;
2606 			memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
2607 			cpt_ctx->fc_type = KASUMI;
2608 			cpt_ctx->zsk_flags = 0x1;
2609 			break;
2610 		case KASUMI_F9_CBC:
2611 			memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
2612 			cpt_ctx->fc_type = KASUMI;
2613 			cpt_ctx->zsk_flags = 0x1;
2614 			break;
2615 		default:
2616 			return -1;
2617 		}
2618 		cpt_ctx->mac_len = 4;
2619 		cpt_ctx->hash_type = type;
2620 		return 0;
2621 	}
2622 
2623 	if (!(cpt_ctx->fc_type == FC_GEN && !type)) {
2624 		if (!cpt_ctx->fc_type || !cpt_ctx->enc_cipher)
2625 			cpt_ctx->fc_type = HASH_HMAC;
2626 	}
2627 
2628 	ctrl_flags = (uint64_t *)&fctx->enc.enc_ctrl.flags;
2629 	*ctrl_flags = rte_be_to_cpu_64(*ctrl_flags);
2630 
2631 	/* For GMAC auth, cipher must be NULL */
2632 	if (type == GMAC_TYPE)
2633 		CPT_P_ENC_CTRL(fctx).enc_cipher = 0;
2634 
2635 	CPT_P_ENC_CTRL(fctx).hash_type = cpt_ctx->hash_type = type;
2636 	CPT_P_ENC_CTRL(fctx).mac_len = cpt_ctx->mac_len = mac_len;
2637 
2638 	if (key_len) {
2639 		cpt_ctx->hmac = 1;
2640 		memset(cpt_ctx->auth_key, 0, sizeof(cpt_ctx->auth_key));
2641 		memcpy(cpt_ctx->auth_key, key, key_len);
2642 		cpt_ctx->auth_key_len = key_len;
2643 		memset(fctx->hmac.ipad, 0, sizeof(fctx->hmac.ipad));
2644 		memset(fctx->hmac.opad, 0, sizeof(fctx->hmac.opad));
2645 		memcpy(fctx->hmac.opad, key, key_len);
2646 		CPT_P_ENC_CTRL(fctx).auth_input_type = 1;
2647 	}
2648 	*ctrl_flags = rte_cpu_to_be_64(*ctrl_flags);
2649 	return 0;
2650 }
2651 
2652 static __rte_always_inline int
2653 fill_sess_aead(struct rte_crypto_sym_xform *xform,
2654 		 struct cpt_sess_misc *sess)
2655 {
2656 	struct rte_crypto_aead_xform *aead_form;
2657 	cipher_type_t enc_type = 0; /* NULL Cipher type */
2658 	auth_type_t auth_type = 0; /* NULL Auth type */
2659 	uint32_t cipher_key_len = 0;
2660 	uint8_t zsk_flag = 0, aes_gcm = 0;
2661 	aead_form = &xform->aead;
2662 	void *ctx;
2663 
2664 	if (aead_form->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
2665 	   aead_form->algo == RTE_CRYPTO_AEAD_AES_GCM) {
2666 		sess->cpt_op |= CPT_OP_CIPHER_ENCRYPT;
2667 		sess->cpt_op |= CPT_OP_AUTH_GENERATE;
2668 	} else if (aead_form->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
2669 		aead_form->algo == RTE_CRYPTO_AEAD_AES_GCM) {
2670 		sess->cpt_op |= CPT_OP_CIPHER_DECRYPT;
2671 		sess->cpt_op |= CPT_OP_AUTH_VERIFY;
2672 	} else {
2673 		CPT_LOG_DP_ERR("Unknown cipher operation\n");
2674 		return -1;
2675 	}
2676 	switch (aead_form->algo) {
2677 	case RTE_CRYPTO_AEAD_AES_GCM:
2678 		enc_type = AES_GCM;
2679 		cipher_key_len = 16;
2680 		aes_gcm = 1;
2681 		break;
2682 	case RTE_CRYPTO_AEAD_AES_CCM:
2683 		CPT_LOG_DP_ERR("Crypto: Unsupported cipher algo %u",
2684 			       aead_form->algo);
2685 		return -1;
2686 	default:
2687 		CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2688 			       aead_form->algo);
2689 		return -1;
2690 	}
2691 	if (aead_form->key.length < cipher_key_len) {
2692 		CPT_LOG_DP_ERR("Invalid cipher params keylen %lu",
2693 			       (unsigned int long)aead_form->key.length);
2694 		return -1;
2695 	}
2696 	sess->zsk_flag = zsk_flag;
2697 	sess->aes_gcm = aes_gcm;
2698 	sess->mac_len = aead_form->digest_length;
2699 	sess->iv_offset = aead_form->iv.offset;
2700 	sess->iv_length = aead_form->iv.length;
2701 	sess->aad_length = aead_form->aad_length;
2702 	ctx = (void *)((uint8_t *)sess + sizeof(struct cpt_sess_misc)),
2703 
2704 	cpt_fc_ciph_set_key(ctx, enc_type, aead_form->key.data,
2705 			aead_form->key.length, NULL);
2706 
2707 	cpt_fc_auth_set_key(ctx, auth_type, NULL, 0, aead_form->digest_length);
2708 
2709 	return 0;
2710 }
2711 
2712 static __rte_always_inline int
2713 fill_sess_cipher(struct rte_crypto_sym_xform *xform,
2714 		 struct cpt_sess_misc *sess)
2715 {
2716 	struct rte_crypto_cipher_xform *c_form;
2717 	cipher_type_t enc_type = 0; /* NULL Cipher type */
2718 	uint32_t cipher_key_len = 0;
2719 	uint8_t zsk_flag = 0, aes_gcm = 0, aes_ctr = 0, is_null = 0;
2720 
2721 	if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER)
2722 		return -1;
2723 
2724 	c_form = &xform->cipher;
2725 
2726 	if (c_form->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
2727 		sess->cpt_op |= CPT_OP_CIPHER_ENCRYPT;
2728 	else if (c_form->op == RTE_CRYPTO_CIPHER_OP_DECRYPT)
2729 		sess->cpt_op |= CPT_OP_CIPHER_DECRYPT;
2730 	else {
2731 		CPT_LOG_DP_ERR("Unknown cipher operation\n");
2732 		return -1;
2733 	}
2734 
2735 	switch (c_form->algo) {
2736 	case RTE_CRYPTO_CIPHER_AES_CBC:
2737 		enc_type = AES_CBC;
2738 		cipher_key_len = 16;
2739 		break;
2740 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2741 		enc_type = DES3_CBC;
2742 		cipher_key_len = 24;
2743 		break;
2744 	case RTE_CRYPTO_CIPHER_DES_CBC:
2745 		/* DES is implemented using 3DES in hardware */
2746 		enc_type = DES3_CBC;
2747 		cipher_key_len = 8;
2748 		break;
2749 	case RTE_CRYPTO_CIPHER_AES_CTR:
2750 		enc_type = AES_CTR;
2751 		cipher_key_len = 16;
2752 		aes_ctr = 1;
2753 		break;
2754 	case RTE_CRYPTO_CIPHER_NULL:
2755 		enc_type = 0;
2756 		is_null = 1;
2757 		break;
2758 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
2759 		enc_type = KASUMI_F8_ECB;
2760 		cipher_key_len = 16;
2761 		zsk_flag = K_F8;
2762 		break;
2763 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2764 		enc_type = SNOW3G_UEA2;
2765 		cipher_key_len = 16;
2766 		zsk_flag = ZS_EA;
2767 		break;
2768 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2769 		enc_type = ZUC_EEA3;
2770 		cipher_key_len = 16;
2771 		zsk_flag = ZS_EA;
2772 		break;
2773 	case RTE_CRYPTO_CIPHER_AES_XTS:
2774 		enc_type = AES_XTS;
2775 		cipher_key_len = 16;
2776 		break;
2777 	case RTE_CRYPTO_CIPHER_3DES_ECB:
2778 		enc_type = DES3_ECB;
2779 		cipher_key_len = 24;
2780 		break;
2781 	case RTE_CRYPTO_CIPHER_AES_ECB:
2782 		enc_type = AES_ECB;
2783 		cipher_key_len = 16;
2784 		break;
2785 	case RTE_CRYPTO_CIPHER_3DES_CTR:
2786 	case RTE_CRYPTO_CIPHER_AES_F8:
2787 	case RTE_CRYPTO_CIPHER_ARC4:
2788 		CPT_LOG_DP_ERR("Crypto: Unsupported cipher algo %u",
2789 			       c_form->algo);
2790 		return -1;
2791 	default:
2792 		CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2793 			       c_form->algo);
2794 		return -1;
2795 	}
2796 
2797 	if (c_form->key.length < cipher_key_len) {
2798 		CPT_LOG_DP_ERR("Invalid cipher params keylen %lu",
2799 			       (unsigned long) c_form->key.length);
2800 		return -1;
2801 	}
2802 
2803 	sess->zsk_flag = zsk_flag;
2804 	sess->aes_gcm = aes_gcm;
2805 	sess->aes_ctr = aes_ctr;
2806 	sess->iv_offset = c_form->iv.offset;
2807 	sess->iv_length = c_form->iv.length;
2808 	sess->is_null = is_null;
2809 
2810 	cpt_fc_ciph_set_key(SESS_PRIV(sess), enc_type, c_form->key.data,
2811 			    c_form->key.length, NULL);
2812 
2813 	return 0;
2814 }
2815 
2816 static __rte_always_inline int
2817 fill_sess_auth(struct rte_crypto_sym_xform *xform,
2818 	       struct cpt_sess_misc *sess)
2819 {
2820 	struct rte_crypto_auth_xform *a_form;
2821 	auth_type_t auth_type = 0; /* NULL Auth type */
2822 	uint8_t zsk_flag = 0, aes_gcm = 0, is_null = 0;
2823 
2824 	if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH)
2825 		goto error_out;
2826 
2827 	a_form = &xform->auth;
2828 
2829 	if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
2830 		sess->cpt_op |= CPT_OP_AUTH_VERIFY;
2831 	else if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
2832 		sess->cpt_op |= CPT_OP_AUTH_GENERATE;
2833 	else {
2834 		CPT_LOG_DP_ERR("Unknown auth operation");
2835 		return -1;
2836 	}
2837 
2838 	if (a_form->key.length > 64) {
2839 		CPT_LOG_DP_ERR("Auth key length is big");
2840 		return -1;
2841 	}
2842 
2843 	switch (a_form->algo) {
2844 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
2845 		/* Fall through */
2846 	case RTE_CRYPTO_AUTH_SHA1:
2847 		auth_type = SHA1_TYPE;
2848 		break;
2849 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2850 	case RTE_CRYPTO_AUTH_SHA256:
2851 		auth_type = SHA2_SHA256;
2852 		break;
2853 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2854 	case RTE_CRYPTO_AUTH_SHA512:
2855 		auth_type = SHA2_SHA512;
2856 		break;
2857 	case RTE_CRYPTO_AUTH_AES_GMAC:
2858 		auth_type = GMAC_TYPE;
2859 		aes_gcm = 1;
2860 		break;
2861 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2862 	case RTE_CRYPTO_AUTH_SHA224:
2863 		auth_type = SHA2_SHA224;
2864 		break;
2865 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2866 	case RTE_CRYPTO_AUTH_SHA384:
2867 		auth_type = SHA2_SHA384;
2868 		break;
2869 	case RTE_CRYPTO_AUTH_MD5_HMAC:
2870 	case RTE_CRYPTO_AUTH_MD5:
2871 		auth_type = MD5_TYPE;
2872 		break;
2873 	case RTE_CRYPTO_AUTH_KASUMI_F9:
2874 		auth_type = KASUMI_F9_ECB;
2875 		/*
2876 		 * Indicate that direction needs to be taken out
2877 		 * from end of src
2878 		 */
2879 		zsk_flag = K_F9;
2880 		break;
2881 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2882 		auth_type = SNOW3G_UIA2;
2883 		zsk_flag = ZS_IA;
2884 		break;
2885 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
2886 		auth_type = ZUC_EIA3;
2887 		zsk_flag = ZS_IA;
2888 		break;
2889 	case RTE_CRYPTO_AUTH_NULL:
2890 		auth_type = 0;
2891 		is_null = 1;
2892 		break;
2893 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2894 	case RTE_CRYPTO_AUTH_AES_CMAC:
2895 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2896 		CPT_LOG_DP_ERR("Crypto: Unsupported hash algo %u",
2897 			       a_form->algo);
2898 		goto error_out;
2899 	default:
2900 		CPT_LOG_DP_ERR("Crypto: Undefined Hash algo %u specified",
2901 			       a_form->algo);
2902 		goto error_out;
2903 	}
2904 
2905 	sess->zsk_flag = zsk_flag;
2906 	sess->aes_gcm = aes_gcm;
2907 	sess->mac_len = a_form->digest_length;
2908 	sess->is_null = is_null;
2909 	if (zsk_flag) {
2910 		sess->auth_iv_offset = a_form->iv.offset;
2911 		sess->auth_iv_length = a_form->iv.length;
2912 	}
2913 	cpt_fc_auth_set_key(SESS_PRIV(sess), auth_type, a_form->key.data,
2914 			    a_form->key.length, a_form->digest_length);
2915 
2916 	return 0;
2917 
2918 error_out:
2919 	return -1;
2920 }
2921 
2922 static __rte_always_inline int
2923 fill_sess_gmac(struct rte_crypto_sym_xform *xform,
2924 		 struct cpt_sess_misc *sess)
2925 {
2926 	struct rte_crypto_auth_xform *a_form;
2927 	cipher_type_t enc_type = 0; /* NULL Cipher type */
2928 	auth_type_t auth_type = 0; /* NULL Auth type */
2929 	uint8_t zsk_flag = 0, aes_gcm = 0;
2930 	void *ctx;
2931 
2932 	if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH)
2933 		return -1;
2934 
2935 	a_form = &xform->auth;
2936 
2937 	if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
2938 		sess->cpt_op |= CPT_OP_ENCODE;
2939 	else if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
2940 		sess->cpt_op |= CPT_OP_DECODE;
2941 	else {
2942 		CPT_LOG_DP_ERR("Unknown auth operation");
2943 		return -1;
2944 	}
2945 
2946 	switch (a_form->algo) {
2947 	case RTE_CRYPTO_AUTH_AES_GMAC:
2948 		enc_type = AES_GCM;
2949 		auth_type = GMAC_TYPE;
2950 		break;
2951 	default:
2952 		CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2953 			       a_form->algo);
2954 		return -1;
2955 	}
2956 
2957 	sess->zsk_flag = zsk_flag;
2958 	sess->aes_gcm = aes_gcm;
2959 	sess->is_gmac = 1;
2960 	sess->iv_offset = a_form->iv.offset;
2961 	sess->iv_length = a_form->iv.length;
2962 	sess->mac_len = a_form->digest_length;
2963 	ctx = (void *)((uint8_t *)sess + sizeof(struct cpt_sess_misc)),
2964 
2965 	cpt_fc_ciph_set_key(ctx, enc_type, a_form->key.data,
2966 			a_form->key.length, NULL);
2967 	cpt_fc_auth_set_key(ctx, auth_type, NULL, 0, a_form->digest_length);
2968 
2969 	return 0;
2970 }
2971 
2972 static __rte_always_inline void *
2973 alloc_op_meta(struct rte_mbuf *m_src,
2974 	      buf_ptr_t *buf,
2975 	      int32_t len,
2976 	      struct rte_mempool *cpt_meta_pool)
2977 {
2978 	uint8_t *mdata;
2979 
2980 #ifndef CPT_ALWAYS_USE_SEPARATE_BUF
2981 	if (likely(m_src && (m_src->nb_segs == 1))) {
2982 		int32_t tailroom;
2983 		phys_addr_t mphys;
2984 
2985 		/* Check if tailroom is sufficient to hold meta data */
2986 		tailroom = rte_pktmbuf_tailroom(m_src);
2987 		if (likely(tailroom > len + 8)) {
2988 			mdata = (uint8_t *)m_src->buf_addr + m_src->buf_len;
2989 			mphys = m_src->buf_physaddr + m_src->buf_len;
2990 			mdata -= len;
2991 			mphys -= len;
2992 			buf->vaddr = mdata;
2993 			buf->dma_addr = mphys;
2994 			buf->size = len;
2995 			/* Indicate that this is a mbuf allocated mdata */
2996 			mdata = (uint8_t *)((uint64_t)mdata | 1ull);
2997 			return mdata;
2998 		}
2999 	}
3000 #else
3001 	RTE_SET_USED(m_src);
3002 #endif
3003 
3004 	if (unlikely(rte_mempool_get(cpt_meta_pool, (void **)&mdata) < 0))
3005 		return NULL;
3006 
3007 	buf->vaddr = mdata;
3008 	buf->dma_addr = rte_mempool_virt2iova(mdata);
3009 	buf->size = len;
3010 
3011 	return mdata;
3012 }
3013 
3014 /**
3015  * cpt_free_metabuf - free metabuf to mempool.
3016  * @param instance: pointer to instance.
3017  * @param objp: pointer to the metabuf.
3018  */
3019 static __rte_always_inline void
3020 free_op_meta(void *mdata, struct rte_mempool *cpt_meta_pool)
3021 {
3022 	bool nofree = ((uintptr_t)mdata & 1ull);
3023 
3024 	if (likely(nofree))
3025 		return;
3026 	rte_mempool_put(cpt_meta_pool, mdata);
3027 }
3028 
3029 static __rte_always_inline uint32_t
3030 prepare_iov_from_pkt(struct rte_mbuf *pkt,
3031 		     iov_ptr_t *iovec, uint32_t start_offset)
3032 {
3033 	uint16_t index = 0;
3034 	void *seg_data = NULL;
3035 	phys_addr_t seg_phys;
3036 	int32_t seg_size = 0;
3037 
3038 	if (!pkt) {
3039 		iovec->buf_cnt = 0;
3040 		return 0;
3041 	}
3042 
3043 	if (!start_offset) {
3044 		seg_data = rte_pktmbuf_mtod(pkt, void *);
3045 		seg_phys = rte_pktmbuf_mtophys(pkt);
3046 		seg_size = pkt->data_len;
3047 	} else {
3048 		while (start_offset >= pkt->data_len) {
3049 			start_offset -= pkt->data_len;
3050 			pkt = pkt->next;
3051 		}
3052 
3053 		seg_data = rte_pktmbuf_mtod_offset(pkt, void *, start_offset);
3054 		seg_phys = rte_pktmbuf_mtophys_offset(pkt, start_offset);
3055 		seg_size = pkt->data_len - start_offset;
3056 		if (!seg_size)
3057 			return 1;
3058 	}
3059 
3060 	/* first seg */
3061 	iovec->bufs[index].vaddr = seg_data;
3062 	iovec->bufs[index].dma_addr = seg_phys;
3063 	iovec->bufs[index].size = seg_size;
3064 	index++;
3065 	pkt = pkt->next;
3066 
3067 	while (unlikely(pkt != NULL)) {
3068 		seg_data = rte_pktmbuf_mtod(pkt, void *);
3069 		seg_phys = rte_pktmbuf_mtophys(pkt);
3070 		seg_size = pkt->data_len;
3071 		if (!seg_size)
3072 			break;
3073 
3074 		iovec->bufs[index].vaddr = seg_data;
3075 		iovec->bufs[index].dma_addr = seg_phys;
3076 		iovec->bufs[index].size = seg_size;
3077 
3078 		index++;
3079 
3080 		pkt = pkt->next;
3081 	}
3082 
3083 	iovec->buf_cnt = index;
3084 	return 0;
3085 }
3086 
3087 static __rte_always_inline uint32_t
3088 prepare_iov_from_pkt_inplace(struct rte_mbuf *pkt,
3089 			     fc_params_t *param,
3090 			     uint32_t *flags)
3091 {
3092 	uint16_t index = 0;
3093 	void *seg_data = NULL;
3094 	phys_addr_t seg_phys;
3095 	uint32_t seg_size = 0;
3096 	iov_ptr_t *iovec;
3097 
3098 	seg_data = rte_pktmbuf_mtod(pkt, void *);
3099 	seg_phys = rte_pktmbuf_mtophys(pkt);
3100 	seg_size = pkt->data_len;
3101 
3102 	/* first seg */
3103 	if (likely(!pkt->next)) {
3104 		uint32_t headroom, tailroom;
3105 
3106 		*flags |= SINGLE_BUF_INPLACE;
3107 		headroom = rte_pktmbuf_headroom(pkt);
3108 		tailroom = rte_pktmbuf_tailroom(pkt);
3109 		if (likely((headroom >= 24) &&
3110 		    (tailroom >= 8))) {
3111 			/* In 83XX this is prerequivisit for Direct mode */
3112 			*flags |= SINGLE_BUF_HEADTAILROOM;
3113 		}
3114 		param->bufs[0].vaddr = seg_data;
3115 		param->bufs[0].dma_addr = seg_phys;
3116 		param->bufs[0].size = seg_size;
3117 		return 0;
3118 	}
3119 	iovec = param->src_iov;
3120 	iovec->bufs[index].vaddr = seg_data;
3121 	iovec->bufs[index].dma_addr = seg_phys;
3122 	iovec->bufs[index].size = seg_size;
3123 	index++;
3124 	pkt = pkt->next;
3125 
3126 	while (unlikely(pkt != NULL)) {
3127 		seg_data = rte_pktmbuf_mtod(pkt, void *);
3128 		seg_phys = rte_pktmbuf_mtophys(pkt);
3129 		seg_size = pkt->data_len;
3130 
3131 		if (!seg_size)
3132 			break;
3133 
3134 		iovec->bufs[index].vaddr = seg_data;
3135 		iovec->bufs[index].dma_addr = seg_phys;
3136 		iovec->bufs[index].size = seg_size;
3137 
3138 		index++;
3139 
3140 		pkt = pkt->next;
3141 	}
3142 
3143 	iovec->buf_cnt = index;
3144 	return 0;
3145 }
3146 
3147 static __rte_always_inline int
3148 fill_fc_params(struct rte_crypto_op *cop,
3149 	       struct cpt_sess_misc *sess_misc,
3150 	       struct cpt_qp_meta_info *m_info,
3151 	       void **mdata_ptr,
3152 	       void **prep_req)
3153 {
3154 	uint32_t space = 0;
3155 	struct rte_crypto_sym_op *sym_op = cop->sym;
3156 	void *mdata = NULL;
3157 	uintptr_t *op;
3158 	uint32_t mc_hash_off;
3159 	uint32_t flags = 0;
3160 	uint64_t d_offs, d_lens;
3161 	struct rte_mbuf *m_src, *m_dst;
3162 	uint8_t cpt_op = sess_misc->cpt_op;
3163 	uint8_t zsk_flag = sess_misc->zsk_flag;
3164 	uint8_t aes_gcm = sess_misc->aes_gcm;
3165 	uint16_t mac_len = sess_misc->mac_len;
3166 #ifdef CPT_ALWAYS_USE_SG_MODE
3167 	uint8_t inplace = 0;
3168 #else
3169 	uint8_t inplace = 1;
3170 #endif
3171 	fc_params_t fc_params;
3172 	char src[SRC_IOV_SIZE];
3173 	char dst[SRC_IOV_SIZE];
3174 	uint32_t iv_buf[4];
3175 	int ret;
3176 
3177 	if (likely(sess_misc->iv_length)) {
3178 		flags |= VALID_IV_BUF;
3179 		fc_params.iv_buf = rte_crypto_op_ctod_offset(cop,
3180 				   uint8_t *, sess_misc->iv_offset);
3181 		if (sess_misc->aes_ctr &&
3182 		    unlikely(sess_misc->iv_length != 16)) {
3183 			memcpy((uint8_t *)iv_buf,
3184 				rte_crypto_op_ctod_offset(cop,
3185 				uint8_t *, sess_misc->iv_offset), 12);
3186 			iv_buf[3] = rte_cpu_to_be_32(0x1);
3187 			fc_params.iv_buf = iv_buf;
3188 		}
3189 	}
3190 
3191 	if (zsk_flag) {
3192 		fc_params.auth_iv_buf = rte_crypto_op_ctod_offset(cop,
3193 					uint8_t *,
3194 					sess_misc->auth_iv_offset);
3195 		if (zsk_flag == K_F9) {
3196 			CPT_LOG_DP_ERR("Should not reach here for "
3197 			"kasumi F9\n");
3198 		}
3199 		if (zsk_flag != ZS_EA)
3200 			inplace = 0;
3201 	}
3202 	m_src = sym_op->m_src;
3203 	m_dst = sym_op->m_dst;
3204 
3205 	if (aes_gcm) {
3206 		uint8_t *salt;
3207 		uint8_t *aad_data;
3208 		uint16_t aad_len;
3209 
3210 		d_offs = sym_op->aead.data.offset;
3211 		d_lens = sym_op->aead.data.length;
3212 		mc_hash_off = sym_op->aead.data.offset +
3213 			      sym_op->aead.data.length;
3214 
3215 		aad_data = sym_op->aead.aad.data;
3216 		aad_len = sess_misc->aad_length;
3217 		if (likely((aad_data + aad_len) ==
3218 			   rte_pktmbuf_mtod_offset(m_src,
3219 				uint8_t *,
3220 				sym_op->aead.data.offset))) {
3221 			d_offs = (d_offs - aad_len) | (d_offs << 16);
3222 			d_lens = (d_lens + aad_len) | (d_lens << 32);
3223 		} else {
3224 			fc_params.aad_buf.vaddr = sym_op->aead.aad.data;
3225 			fc_params.aad_buf.dma_addr = sym_op->aead.aad.phys_addr;
3226 			fc_params.aad_buf.size = aad_len;
3227 			flags |= VALID_AAD_BUF;
3228 			inplace = 0;
3229 			d_offs = d_offs << 16;
3230 			d_lens = d_lens << 32;
3231 		}
3232 
3233 		salt = fc_params.iv_buf;
3234 		if (unlikely(*(uint32_t *)salt != sess_misc->salt)) {
3235 			cpt_fc_salt_update(SESS_PRIV(sess_misc), salt);
3236 			sess_misc->salt = *(uint32_t *)salt;
3237 		}
3238 		fc_params.iv_buf = salt + 4;
3239 		if (likely(mac_len)) {
3240 			struct rte_mbuf *m = (cpt_op & CPT_OP_ENCODE) ? m_dst :
3241 					     m_src;
3242 
3243 			if (!m)
3244 				m = m_src;
3245 
3246 			/* hmac immediately following data is best case */
3247 			if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
3248 			    mc_hash_off !=
3249 			    (uint8_t *)sym_op->aead.digest.data)) {
3250 				flags |= VALID_MAC_BUF;
3251 				fc_params.mac_buf.size = sess_misc->mac_len;
3252 				fc_params.mac_buf.vaddr =
3253 				  sym_op->aead.digest.data;
3254 				fc_params.mac_buf.dma_addr =
3255 				 sym_op->aead.digest.phys_addr;
3256 				inplace = 0;
3257 			}
3258 		}
3259 	} else {
3260 		d_offs = sym_op->cipher.data.offset;
3261 		d_lens = sym_op->cipher.data.length;
3262 		mc_hash_off = sym_op->cipher.data.offset +
3263 			      sym_op->cipher.data.length;
3264 		d_offs = (d_offs << 16) | sym_op->auth.data.offset;
3265 		d_lens = (d_lens << 32) | sym_op->auth.data.length;
3266 
3267 		if (mc_hash_off < (sym_op->auth.data.offset +
3268 				   sym_op->auth.data.length)){
3269 			mc_hash_off = (sym_op->auth.data.offset +
3270 				       sym_op->auth.data.length);
3271 		}
3272 		/* for gmac, salt should be updated like in gcm */
3273 		if (unlikely(sess_misc->is_gmac)) {
3274 			uint8_t *salt;
3275 			salt = fc_params.iv_buf;
3276 			if (unlikely(*(uint32_t *)salt != sess_misc->salt)) {
3277 				cpt_fc_salt_update(SESS_PRIV(sess_misc), salt);
3278 				sess_misc->salt = *(uint32_t *)salt;
3279 			}
3280 			fc_params.iv_buf = salt + 4;
3281 		}
3282 		if (likely(mac_len)) {
3283 			struct rte_mbuf *m;
3284 
3285 			m = (cpt_op & CPT_OP_ENCODE) ? m_dst : m_src;
3286 			if (!m)
3287 				m = m_src;
3288 
3289 			/* hmac immediately following data is best case */
3290 			if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
3291 			    mc_hash_off !=
3292 			     (uint8_t *)sym_op->auth.digest.data)) {
3293 				flags |= VALID_MAC_BUF;
3294 				fc_params.mac_buf.size =
3295 					sess_misc->mac_len;
3296 				fc_params.mac_buf.vaddr =
3297 					sym_op->auth.digest.data;
3298 				fc_params.mac_buf.dma_addr =
3299 				sym_op->auth.digest.phys_addr;
3300 				inplace = 0;
3301 			}
3302 		}
3303 	}
3304 	fc_params.ctx_buf.vaddr = SESS_PRIV(sess_misc);
3305 	fc_params.ctx_buf.dma_addr = sess_misc->ctx_dma_addr;
3306 
3307 	if (unlikely(sess_misc->is_null || sess_misc->cpt_op == CPT_OP_DECODE))
3308 		inplace = 0;
3309 
3310 	if (likely(!m_dst && inplace)) {
3311 		/* Case of single buffer without AAD buf or
3312 		 * separate mac buf in place and
3313 		 * not air crypto
3314 		 */
3315 		fc_params.dst_iov = fc_params.src_iov = (void *)src;
3316 
3317 		if (unlikely(prepare_iov_from_pkt_inplace(m_src,
3318 							  &fc_params,
3319 							  &flags))) {
3320 			CPT_LOG_DP_ERR("Prepare inplace src iov failed");
3321 			ret = -EINVAL;
3322 			goto err_exit;
3323 		}
3324 
3325 	} else {
3326 		/* Out of place processing */
3327 		fc_params.src_iov = (void *)src;
3328 		fc_params.dst_iov = (void *)dst;
3329 
3330 		/* Store SG I/O in the api for reuse */
3331 		if (prepare_iov_from_pkt(m_src, fc_params.src_iov, 0)) {
3332 			CPT_LOG_DP_ERR("Prepare src iov failed");
3333 			ret = -EINVAL;
3334 			goto err_exit;
3335 		}
3336 
3337 		if (unlikely(m_dst != NULL)) {
3338 			uint32_t pkt_len;
3339 
3340 			/* Try to make room as much as src has */
3341 			m_dst = sym_op->m_dst;
3342 			pkt_len = rte_pktmbuf_pkt_len(m_dst);
3343 
3344 			if (unlikely(pkt_len < rte_pktmbuf_pkt_len(m_src))) {
3345 				pkt_len = rte_pktmbuf_pkt_len(m_src) - pkt_len;
3346 				if (!rte_pktmbuf_append(m_dst, pkt_len)) {
3347 					CPT_LOG_DP_ERR("Not enough space in "
3348 						       "m_dst %p, need %u"
3349 						       " more",
3350 						       m_dst, pkt_len);
3351 					ret = -EINVAL;
3352 					goto err_exit;
3353 				}
3354 			}
3355 
3356 			if (prepare_iov_from_pkt(m_dst, fc_params.dst_iov, 0)) {
3357 				CPT_LOG_DP_ERR("Prepare dst iov failed for "
3358 					       "m_dst %p", m_dst);
3359 				ret = -EINVAL;
3360 				goto err_exit;
3361 			}
3362 		} else {
3363 			fc_params.dst_iov = (void *)src;
3364 		}
3365 	}
3366 
3367 	if (likely(flags & SINGLE_BUF_HEADTAILROOM))
3368 		mdata = alloc_op_meta(m_src, &fc_params.meta_buf,
3369 				      m_info->lb_mlen, m_info->pool);
3370 	else
3371 		mdata = alloc_op_meta(NULL, &fc_params.meta_buf,
3372 				      m_info->sg_mlen, m_info->pool);
3373 
3374 	if (unlikely(mdata == NULL)) {
3375 		CPT_LOG_DP_ERR("Error allocating meta buffer for request");
3376 		ret = -ENOMEM;
3377 		goto err_exit;
3378 	}
3379 
3380 	op = (uintptr_t *)((uintptr_t)mdata & (uintptr_t)~1ull);
3381 	op[0] = (uintptr_t)mdata;
3382 	op[1] = (uintptr_t)cop;
3383 	op[2] = op[3] = 0; /* Used to indicate auth verify */
3384 	space += 4 * sizeof(uint64_t);
3385 
3386 	fc_params.meta_buf.vaddr = (uint8_t *)op + space;
3387 	fc_params.meta_buf.dma_addr += space;
3388 	fc_params.meta_buf.size -= space;
3389 
3390 	/* Finally prepare the instruction */
3391 	if (cpt_op & CPT_OP_ENCODE)
3392 		*prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens,
3393 						 &fc_params, op);
3394 	else
3395 		*prep_req = cpt_fc_dec_hmac_prep(flags, d_offs, d_lens,
3396 						 &fc_params, op);
3397 
3398 	if (unlikely(*prep_req == NULL)) {
3399 		CPT_LOG_DP_ERR("Preparing request failed due to bad input arg");
3400 		ret = -EINVAL;
3401 		goto free_mdata_and_exit;
3402 	}
3403 
3404 	*mdata_ptr = mdata;
3405 
3406 	return 0;
3407 
3408 free_mdata_and_exit:
3409 	free_op_meta(mdata, m_info->pool);
3410 err_exit:
3411 	return ret;
3412 }
3413 
3414 static __rte_always_inline void
3415 compl_auth_verify(struct rte_crypto_op *op,
3416 		      uint8_t *gen_mac,
3417 		      uint64_t mac_len)
3418 {
3419 	uint8_t *mac;
3420 	struct rte_crypto_sym_op *sym_op = op->sym;
3421 
3422 	if (sym_op->auth.digest.data)
3423 		mac = sym_op->auth.digest.data;
3424 	else
3425 		mac = rte_pktmbuf_mtod_offset(sym_op->m_src,
3426 					      uint8_t *,
3427 					      sym_op->auth.data.length +
3428 					      sym_op->auth.data.offset);
3429 	if (!mac) {
3430 		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3431 		return;
3432 	}
3433 
3434 	if (memcmp(mac, gen_mac, mac_len))
3435 		op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
3436 	else
3437 		op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3438 }
3439 
3440 static __rte_always_inline int
3441 instance_session_cfg(struct rte_crypto_sym_xform *xform, void *sess)
3442 {
3443 	struct rte_crypto_sym_xform *chain;
3444 
3445 	CPT_PMD_INIT_FUNC_TRACE();
3446 
3447 	if (cpt_is_algo_supported(xform))
3448 		goto err;
3449 
3450 	chain = xform;
3451 	while (chain) {
3452 		switch (chain->type) {
3453 		case RTE_CRYPTO_SYM_XFORM_AEAD:
3454 			if (fill_sess_aead(chain, sess))
3455 				goto err;
3456 			break;
3457 		case RTE_CRYPTO_SYM_XFORM_CIPHER:
3458 			if (fill_sess_cipher(chain, sess))
3459 				goto err;
3460 			break;
3461 		case RTE_CRYPTO_SYM_XFORM_AUTH:
3462 			if (chain->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
3463 				if (fill_sess_gmac(chain, sess))
3464 					goto err;
3465 			} else {
3466 				if (fill_sess_auth(chain, sess))
3467 					goto err;
3468 			}
3469 			break;
3470 		default:
3471 			CPT_LOG_DP_ERR("Invalid crypto xform type");
3472 			break;
3473 		}
3474 		chain = chain->next;
3475 	}
3476 
3477 	return 0;
3478 
3479 err:
3480 	return -1;
3481 }
3482 
3483 static __rte_always_inline void
3484 find_kasumif9_direction_and_length(uint8_t *src,
3485 				   uint32_t counter_num_bytes,
3486 				   uint32_t *addr_length_in_bits,
3487 				   uint8_t *addr_direction)
3488 {
3489 	uint8_t found = 0;
3490 	uint32_t pos;
3491 	uint8_t last_byte;
3492 	while (!found && counter_num_bytes > 0) {
3493 		counter_num_bytes--;
3494 		if (src[counter_num_bytes] == 0x00)
3495 			continue;
3496 		pos = rte_bsf32(src[counter_num_bytes]);
3497 		if (pos == 7) {
3498 			if (likely(counter_num_bytes > 0)) {
3499 				last_byte = src[counter_num_bytes - 1];
3500 				*addr_direction  =  last_byte & 0x1;
3501 				*addr_length_in_bits = counter_num_bytes * 8
3502 							- 1;
3503 			}
3504 		} else {
3505 			last_byte = src[counter_num_bytes];
3506 			*addr_direction = (last_byte >> (pos + 1)) & 0x1;
3507 			*addr_length_in_bits = counter_num_bytes * 8
3508 						+ (8 - (pos + 2));
3509 		}
3510 		found = 1;
3511 	}
3512 }
3513 
3514 /*
3515  * This handles all auth only except AES_GMAC
3516  */
3517 static __rte_always_inline int
3518 fill_digest_params(struct rte_crypto_op *cop,
3519 		   struct cpt_sess_misc *sess,
3520 		   struct cpt_qp_meta_info *m_info,
3521 		   void **mdata_ptr,
3522 		   void **prep_req)
3523 {
3524 	uint32_t space = 0;
3525 	struct rte_crypto_sym_op *sym_op = cop->sym;
3526 	void *mdata;
3527 	phys_addr_t mphys;
3528 	uint64_t *op;
3529 	uint32_t auth_range_off;
3530 	uint32_t flags = 0;
3531 	uint64_t d_offs = 0, d_lens;
3532 	struct rte_mbuf *m_src, *m_dst;
3533 	uint16_t auth_op = sess->cpt_op & CPT_OP_AUTH_MASK;
3534 	uint8_t zsk_flag = sess->zsk_flag;
3535 	uint16_t mac_len = sess->mac_len;
3536 	fc_params_t params;
3537 	char src[SRC_IOV_SIZE];
3538 	uint8_t iv_buf[16];
3539 	int ret;
3540 
3541 	memset(&params, 0, sizeof(fc_params_t));
3542 
3543 	m_src = sym_op->m_src;
3544 
3545 	/* For just digest lets force mempool alloc */
3546 	mdata = alloc_op_meta(NULL, &params.meta_buf, m_info->sg_mlen,
3547 			      m_info->pool);
3548 	if (mdata == NULL) {
3549 		ret = -ENOMEM;
3550 		goto err_exit;
3551 	}
3552 
3553 	mphys = params.meta_buf.dma_addr;
3554 
3555 	op = mdata;
3556 	op[0] = (uintptr_t)mdata;
3557 	op[1] = (uintptr_t)cop;
3558 	op[2] = op[3] = 0; /* Used to indicate auth verify */
3559 	space += 4 * sizeof(uint64_t);
3560 
3561 	auth_range_off = sym_op->auth.data.offset;
3562 
3563 	flags = VALID_MAC_BUF;
3564 	params.src_iov = (void *)src;
3565 	if (unlikely(zsk_flag)) {
3566 		/*
3567 		 * Since for Zuc, Kasumi, Snow3g offsets are in bits
3568 		 * we will send pass through even for auth only case,
3569 		 * let MC handle it
3570 		 */
3571 		d_offs = auth_range_off;
3572 		auth_range_off = 0;
3573 		params.auth_iv_buf = rte_crypto_op_ctod_offset(cop,
3574 					uint8_t *, sess->auth_iv_offset);
3575 		if (zsk_flag == K_F9) {
3576 			uint32_t length_in_bits, num_bytes;
3577 			uint8_t *src, direction = 0;
3578 			uint32_t counter_num_bytes;
3579 
3580 			memcpy(iv_buf, rte_pktmbuf_mtod(cop->sym->m_src,
3581 							uint8_t *), 8);
3582 			/*
3583 			 * This is kasumi f9, take direction from
3584 			 * source buffer
3585 			 */
3586 			length_in_bits = cop->sym->auth.data.length;
3587 			num_bytes = (length_in_bits >> 3);
3588 			counter_num_bytes = num_bytes;
3589 			src = rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *);
3590 			find_kasumif9_direction_and_length(src,
3591 						counter_num_bytes,
3592 						&length_in_bits,
3593 						&direction);
3594 			length_in_bits -= 64;
3595 			cop->sym->auth.data.offset += 64;
3596 			d_offs = cop->sym->auth.data.offset;
3597 			auth_range_off = d_offs / 8;
3598 			cop->sym->auth.data.length = length_in_bits;
3599 
3600 			/* Store it at end of auth iv */
3601 			iv_buf[8] = direction;
3602 			params.auth_iv_buf = iv_buf;
3603 		}
3604 	}
3605 
3606 	d_lens = sym_op->auth.data.length;
3607 
3608 	params.ctx_buf.vaddr = SESS_PRIV(sess);
3609 	params.ctx_buf.dma_addr = sess->ctx_dma_addr;
3610 
3611 	if (auth_op == CPT_OP_AUTH_GENERATE) {
3612 		if (sym_op->auth.digest.data) {
3613 			/*
3614 			 * Digest to be generated
3615 			 * in separate buffer
3616 			 */
3617 			params.mac_buf.size =
3618 				sess->mac_len;
3619 			params.mac_buf.vaddr =
3620 				sym_op->auth.digest.data;
3621 			params.mac_buf.dma_addr =
3622 				sym_op->auth.digest.phys_addr;
3623 		} else {
3624 			uint32_t off = sym_op->auth.data.offset +
3625 				sym_op->auth.data.length;
3626 			int32_t dlen, space;
3627 
3628 			m_dst = sym_op->m_dst ?
3629 				sym_op->m_dst : sym_op->m_src;
3630 			dlen = rte_pktmbuf_pkt_len(m_dst);
3631 
3632 			space = off + mac_len - dlen;
3633 			if (space > 0)
3634 				if (!rte_pktmbuf_append(m_dst, space)) {
3635 					CPT_LOG_DP_ERR("Failed to extend "
3636 						       "mbuf by %uB", space);
3637 					ret = -EINVAL;
3638 					goto free_mdata_and_exit;
3639 				}
3640 
3641 			params.mac_buf.vaddr =
3642 				rte_pktmbuf_mtod_offset(m_dst, void *, off);
3643 			params.mac_buf.dma_addr =
3644 				rte_pktmbuf_mtophys_offset(m_dst, off);
3645 			params.mac_buf.size = mac_len;
3646 		}
3647 	} else {
3648 		/* Need space for storing generated mac */
3649 		params.mac_buf.vaddr = (uint8_t *)mdata + space;
3650 		params.mac_buf.dma_addr = mphys + space;
3651 		params.mac_buf.size = mac_len;
3652 		space += RTE_ALIGN_CEIL(mac_len, 8);
3653 		op[2] = (uintptr_t)params.mac_buf.vaddr;
3654 		op[3] = mac_len;
3655 	}
3656 
3657 	params.meta_buf.vaddr = (uint8_t *)mdata + space;
3658 	params.meta_buf.dma_addr = mphys + space;
3659 	params.meta_buf.size -= space;
3660 
3661 	/* Out of place processing */
3662 	params.src_iov = (void *)src;
3663 
3664 	/*Store SG I/O in the api for reuse */
3665 	if (prepare_iov_from_pkt(m_src, params.src_iov, auth_range_off)) {
3666 		CPT_LOG_DP_ERR("Prepare src iov failed");
3667 		ret = -EINVAL;
3668 		goto free_mdata_and_exit;
3669 	}
3670 
3671 	*prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, &params, op);
3672 	if (unlikely(*prep_req == NULL)) {
3673 		ret = -EINVAL;
3674 		goto free_mdata_and_exit;
3675 	}
3676 
3677 	*mdata_ptr = mdata;
3678 
3679 	return 0;
3680 
3681 free_mdata_and_exit:
3682 	free_op_meta(mdata, m_info->pool);
3683 err_exit:
3684 	return ret;
3685 }
3686 
3687 #endif /*_CPT_UCODE_H_ */
3688