xref: /dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c (revision 9e06e39b3c6fbcf03d233cbe1fb9604d45dc866f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016-2018 NXP
5  *
6  */
7 
8 #include <time.h>
9 #include <net/if.h>
10 #include <unistd.h>
11 
12 #include <rte_ip.h>
13 #include <rte_mbuf.h>
14 #include <rte_cryptodev.h>
15 #include <rte_malloc.h>
16 #include <rte_memcpy.h>
17 #include <rte_string_fns.h>
18 #include <rte_cycles.h>
19 #include <rte_kvargs.h>
20 #include <rte_dev.h>
21 #include <rte_cryptodev_pmd.h>
22 #include <rte_common.h>
23 #include <rte_fslmc.h>
24 #include <fslmc_vfio.h>
25 #include <dpaa2_hw_pvt.h>
26 #include <dpaa2_hw_dpio.h>
27 #include <dpaa2_hw_mempool.h>
28 #include <fsl_dpopr.h>
29 #include <fsl_dpseci.h>
30 #include <fsl_mc_sys.h>
31 
32 #include "dpaa2_sec_priv.h"
33 #include "dpaa2_sec_event.h"
34 #include "dpaa2_sec_logs.h"
35 
36 /* Required types */
37 typedef uint64_t	dma_addr_t;
38 
39 /* RTA header files */
40 #include <desc/ipsec.h>
41 #include <desc/pdcp.h>
42 #include <desc/algo.h>
43 
44 /* Minimum job descriptor consists of a oneword job descriptor HEADER and
45  * a pointer to the shared descriptor
46  */
47 #define MIN_JOB_DESC_SIZE	(CAAM_CMD_SZ + CAAM_PTR_SZ)
48 #define FSL_VENDOR_ID           0x1957
49 #define FSL_DEVICE_ID           0x410
50 #define FSL_SUBSYSTEM_SEC       1
51 #define FSL_MC_DPSECI_DEVID     3
52 
53 #define NO_PREFETCH 0
54 /* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
55 #define FLE_POOL_NUM_BUFS	32000
56 #define FLE_POOL_BUF_SIZE	256
57 #define FLE_POOL_CACHE_SIZE	512
58 #define FLE_SG_MEM_SIZE(num)	(FLE_POOL_BUF_SIZE + ((num) * 32))
59 #define SEC_FLC_DHR_OUTBOUND	-114
60 #define SEC_FLC_DHR_INBOUND	0
61 
62 enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8;
63 
64 static uint8_t cryptodev_driver_id;
65 
66 int dpaa2_logtype_sec;
67 
68 #ifdef RTE_LIBRTE_SECURITY
69 static inline int
70 build_proto_compound_sg_fd(dpaa2_sec_session *sess,
71 			   struct rte_crypto_op *op,
72 			   struct qbman_fd *fd, uint16_t bpid)
73 {
74 	struct rte_crypto_sym_op *sym_op = op->sym;
75 	struct ctxt_priv *priv = sess->ctxt;
76 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
77 	struct sec_flow_context *flc;
78 	struct rte_mbuf *mbuf;
79 	uint32_t in_len = 0, out_len = 0;
80 
81 	if (sym_op->m_dst)
82 		mbuf = sym_op->m_dst;
83 	else
84 		mbuf = sym_op->m_src;
85 
86 	/* first FLE entry used to store mbuf and session ctxt */
87 	fle = (struct qbman_fle *)rte_malloc(NULL,
88 			FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
89 			RTE_CACHE_LINE_SIZE);
90 	if (unlikely(!fle)) {
91 		DPAA2_SEC_DP_ERR("Proto:SG: Memory alloc failed for SGE");
92 		return -1;
93 	}
94 	memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
95 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
96 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
97 
98 	/* Save the shared descriptor */
99 	flc = &priv->flc_desc[0].flc;
100 
101 	op_fle = fle + 1;
102 	ip_fle = fle + 2;
103 	sge = fle + 3;
104 
105 	if (likely(bpid < MAX_BPID)) {
106 		DPAA2_SET_FD_BPID(fd, bpid);
107 		DPAA2_SET_FLE_BPID(op_fle, bpid);
108 		DPAA2_SET_FLE_BPID(ip_fle, bpid);
109 	} else {
110 		DPAA2_SET_FD_IVP(fd);
111 		DPAA2_SET_FLE_IVP(op_fle);
112 		DPAA2_SET_FLE_IVP(ip_fle);
113 	}
114 
115 	/* Configure FD as a FRAME LIST */
116 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
117 	DPAA2_SET_FD_COMPOUND_FMT(fd);
118 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
119 
120 	/* Configure Output FLE with Scatter/Gather Entry */
121 	DPAA2_SET_FLE_SG_EXT(op_fle);
122 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
123 
124 	/* Configure Output SGE for Encap/Decap */
125 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
126 	DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
127 	/* o/p segs */
128 	while (mbuf->next) {
129 		sge->length = mbuf->data_len;
130 		out_len += sge->length;
131 		sge++;
132 		mbuf = mbuf->next;
133 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
134 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
135 	}
136 	/* using buf_len for last buf - so that extra data can be added */
137 	sge->length = mbuf->buf_len - mbuf->data_off;
138 	out_len += sge->length;
139 
140 	DPAA2_SET_FLE_FIN(sge);
141 	op_fle->length = out_len;
142 
143 	sge++;
144 	mbuf = sym_op->m_src;
145 
146 	/* Configure Input FLE with Scatter/Gather Entry */
147 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
148 	DPAA2_SET_FLE_SG_EXT(ip_fle);
149 	DPAA2_SET_FLE_FIN(ip_fle);
150 
151 	/* Configure input SGE for Encap/Decap */
152 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
153 	DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
154 	sge->length = mbuf->data_len;
155 	in_len += sge->length;
156 
157 	mbuf = mbuf->next;
158 	/* i/p segs */
159 	while (mbuf) {
160 		sge++;
161 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
162 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
163 		sge->length = mbuf->data_len;
164 		in_len += sge->length;
165 		mbuf = mbuf->next;
166 	}
167 	ip_fle->length = in_len;
168 	DPAA2_SET_FLE_FIN(sge);
169 
170 	/* In case of PDCP, per packet HFN is stored in
171 	 * mbuf priv after sym_op.
172 	 */
173 	if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
174 		uint32_t hfn_ovd = *((uint8_t *)op + sess->pdcp.hfn_ovd_offset);
175 		/*enable HFN override override */
176 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
177 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
178 		DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
179 	}
180 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
181 
182 	return 0;
183 }
184 
185 static inline int
186 build_proto_compound_fd(dpaa2_sec_session *sess,
187 	       struct rte_crypto_op *op,
188 	       struct qbman_fd *fd, uint16_t bpid)
189 {
190 	struct rte_crypto_sym_op *sym_op = op->sym;
191 	struct ctxt_priv *priv = sess->ctxt;
192 	struct qbman_fle *fle, *ip_fle, *op_fle;
193 	struct sec_flow_context *flc;
194 	struct rte_mbuf *src_mbuf = sym_op->m_src;
195 	struct rte_mbuf *dst_mbuf = sym_op->m_dst;
196 	int retval;
197 
198 	if (!dst_mbuf)
199 		dst_mbuf = src_mbuf;
200 
201 	/* Save the shared descriptor */
202 	flc = &priv->flc_desc[0].flc;
203 
204 	/* we are using the first FLE entry to store Mbuf */
205 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
206 	if (retval) {
207 		DPAA2_SEC_DP_ERR("Memory alloc failed");
208 		return -1;
209 	}
210 	memset(fle, 0, FLE_POOL_BUF_SIZE);
211 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
212 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
213 
214 	op_fle = fle + 1;
215 	ip_fle = fle + 2;
216 
217 	if (likely(bpid < MAX_BPID)) {
218 		DPAA2_SET_FD_BPID(fd, bpid);
219 		DPAA2_SET_FLE_BPID(op_fle, bpid);
220 		DPAA2_SET_FLE_BPID(ip_fle, bpid);
221 	} else {
222 		DPAA2_SET_FD_IVP(fd);
223 		DPAA2_SET_FLE_IVP(op_fle);
224 		DPAA2_SET_FLE_IVP(ip_fle);
225 	}
226 
227 	/* Configure FD as a FRAME LIST */
228 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
229 	DPAA2_SET_FD_COMPOUND_FMT(fd);
230 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
231 
232 	/* Configure Output FLE with dst mbuf data  */
233 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_MBUF_VADDR_TO_IOVA(dst_mbuf));
234 	DPAA2_SET_FLE_OFFSET(op_fle, dst_mbuf->data_off);
235 	DPAA2_SET_FLE_LEN(op_fle, dst_mbuf->buf_len);
236 
237 	/* Configure Input FLE with src mbuf data */
238 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_MBUF_VADDR_TO_IOVA(src_mbuf));
239 	DPAA2_SET_FLE_OFFSET(ip_fle, src_mbuf->data_off);
240 	DPAA2_SET_FLE_LEN(ip_fle, src_mbuf->pkt_len);
241 
242 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
243 	DPAA2_SET_FLE_FIN(ip_fle);
244 
245 	/* In case of PDCP, per packet HFN is stored in
246 	 * mbuf priv after sym_op.
247 	 */
248 	if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
249 		uint32_t hfn_ovd = *((uint8_t *)op + sess->pdcp.hfn_ovd_offset);
250 		/*enable HFN override override */
251 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
252 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
253 		DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
254 	}
255 
256 	return 0;
257 
258 }
259 
260 static inline int
261 build_proto_fd(dpaa2_sec_session *sess,
262 	       struct rte_crypto_op *op,
263 	       struct qbman_fd *fd, uint16_t bpid)
264 {
265 	struct rte_crypto_sym_op *sym_op = op->sym;
266 	if (sym_op->m_dst)
267 		return build_proto_compound_fd(sess, op, fd, bpid);
268 
269 	struct ctxt_priv *priv = sess->ctxt;
270 	struct sec_flow_context *flc;
271 	struct rte_mbuf *mbuf = sym_op->m_src;
272 
273 	if (likely(bpid < MAX_BPID))
274 		DPAA2_SET_FD_BPID(fd, bpid);
275 	else
276 		DPAA2_SET_FD_IVP(fd);
277 
278 	/* Save the shared descriptor */
279 	flc = &priv->flc_desc[0].flc;
280 
281 	DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
282 	DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off);
283 	DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len);
284 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
285 
286 	/* save physical address of mbuf */
287 	op->sym->aead.digest.phys_addr = mbuf->buf_iova;
288 	mbuf->buf_iova = (size_t)op;
289 
290 	return 0;
291 }
292 #endif
293 
294 static inline int
295 build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,
296 		 struct rte_crypto_op *op,
297 		 struct qbman_fd *fd, __rte_unused uint16_t bpid)
298 {
299 	struct rte_crypto_sym_op *sym_op = op->sym;
300 	struct ctxt_priv *priv = sess->ctxt;
301 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
302 	struct sec_flow_context *flc;
303 	uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
304 	int icv_len = sess->digest_length;
305 	uint8_t *old_icv;
306 	struct rte_mbuf *mbuf;
307 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
308 			sess->iv.offset);
309 
310 	if (sym_op->m_dst)
311 		mbuf = sym_op->m_dst;
312 	else
313 		mbuf = sym_op->m_src;
314 
315 	/* first FLE entry used to store mbuf and session ctxt */
316 	fle = (struct qbman_fle *)rte_malloc(NULL,
317 			FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
318 			RTE_CACHE_LINE_SIZE);
319 	if (unlikely(!fle)) {
320 		DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
321 		return -1;
322 	}
323 	memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
324 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
325 	DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv);
326 
327 	op_fle = fle + 1;
328 	ip_fle = fle + 2;
329 	sge = fle + 3;
330 
331 	/* Save the shared descriptor */
332 	flc = &priv->flc_desc[0].flc;
333 
334 	/* Configure FD as a FRAME LIST */
335 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
336 	DPAA2_SET_FD_COMPOUND_FMT(fd);
337 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
338 
339 	DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n"
340 		   "iv-len=%d data_off: 0x%x\n",
341 		   sym_op->aead.data.offset,
342 		   sym_op->aead.data.length,
343 		   sess->digest_length,
344 		   sess->iv.length,
345 		   sym_op->m_src->data_off);
346 
347 	/* Configure Output FLE with Scatter/Gather Entry */
348 	DPAA2_SET_FLE_SG_EXT(op_fle);
349 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
350 
351 	if (auth_only_len)
352 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
353 
354 	op_fle->length = (sess->dir == DIR_ENC) ?
355 			(sym_op->aead.data.length + icv_len) :
356 			sym_op->aead.data.length;
357 
358 	/* Configure Output SGE for Encap/Decap */
359 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
360 	DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->aead.data.offset);
361 	sge->length = mbuf->data_len - sym_op->aead.data.offset;
362 
363 	mbuf = mbuf->next;
364 	/* o/p segs */
365 	while (mbuf) {
366 		sge++;
367 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
368 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
369 		sge->length = mbuf->data_len;
370 		mbuf = mbuf->next;
371 	}
372 	sge->length -= icv_len;
373 
374 	if (sess->dir == DIR_ENC) {
375 		sge++;
376 		DPAA2_SET_FLE_ADDR(sge,
377 				DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
378 		sge->length = icv_len;
379 	}
380 	DPAA2_SET_FLE_FIN(sge);
381 
382 	sge++;
383 	mbuf = sym_op->m_src;
384 
385 	/* Configure Input FLE with Scatter/Gather Entry */
386 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
387 	DPAA2_SET_FLE_SG_EXT(ip_fle);
388 	DPAA2_SET_FLE_FIN(ip_fle);
389 	ip_fle->length = (sess->dir == DIR_ENC) ?
390 		(sym_op->aead.data.length + sess->iv.length + auth_only_len) :
391 		(sym_op->aead.data.length + sess->iv.length + auth_only_len +
392 		 icv_len);
393 
394 	/* Configure Input SGE for Encap/Decap */
395 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
396 	sge->length = sess->iv.length;
397 
398 	sge++;
399 	if (auth_only_len) {
400 		DPAA2_SET_FLE_ADDR(sge,
401 				DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
402 		sge->length = auth_only_len;
403 		sge++;
404 	}
405 
406 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
407 	DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
408 				mbuf->data_off);
409 	sge->length = mbuf->data_len - sym_op->aead.data.offset;
410 
411 	mbuf = mbuf->next;
412 	/* i/p segs */
413 	while (mbuf) {
414 		sge++;
415 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
416 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
417 		sge->length = mbuf->data_len;
418 		mbuf = mbuf->next;
419 	}
420 
421 	if (sess->dir == DIR_DEC) {
422 		sge++;
423 		old_icv = (uint8_t *)(sge + 1);
424 		memcpy(old_icv,	sym_op->aead.digest.data, icv_len);
425 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
426 		sge->length = icv_len;
427 	}
428 
429 	DPAA2_SET_FLE_FIN(sge);
430 	if (auth_only_len) {
431 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
432 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
433 	}
434 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
435 
436 	return 0;
437 }
438 
439 static inline int
440 build_authenc_gcm_fd(dpaa2_sec_session *sess,
441 		     struct rte_crypto_op *op,
442 		     struct qbman_fd *fd, uint16_t bpid)
443 {
444 	struct rte_crypto_sym_op *sym_op = op->sym;
445 	struct ctxt_priv *priv = sess->ctxt;
446 	struct qbman_fle *fle, *sge;
447 	struct sec_flow_context *flc;
448 	uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
449 	int icv_len = sess->digest_length, retval;
450 	uint8_t *old_icv;
451 	struct rte_mbuf *dst;
452 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
453 			sess->iv.offset);
454 
455 	if (sym_op->m_dst)
456 		dst = sym_op->m_dst;
457 	else
458 		dst = sym_op->m_src;
459 
460 	/* TODO we are using the first FLE entry to store Mbuf and session ctxt.
461 	 * Currently we donot know which FLE has the mbuf stored.
462 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
463 	 * to get the MBUF Addr from the previous FLE.
464 	 * We can have a better approach to use the inline Mbuf
465 	 */
466 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
467 	if (retval) {
468 		DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE");
469 		return -1;
470 	}
471 	memset(fle, 0, FLE_POOL_BUF_SIZE);
472 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
473 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
474 	fle = fle + 1;
475 	sge = fle + 2;
476 	if (likely(bpid < MAX_BPID)) {
477 		DPAA2_SET_FD_BPID(fd, bpid);
478 		DPAA2_SET_FLE_BPID(fle, bpid);
479 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
480 		DPAA2_SET_FLE_BPID(sge, bpid);
481 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
482 		DPAA2_SET_FLE_BPID(sge + 2, bpid);
483 		DPAA2_SET_FLE_BPID(sge + 3, bpid);
484 	} else {
485 		DPAA2_SET_FD_IVP(fd);
486 		DPAA2_SET_FLE_IVP(fle);
487 		DPAA2_SET_FLE_IVP((fle + 1));
488 		DPAA2_SET_FLE_IVP(sge);
489 		DPAA2_SET_FLE_IVP((sge + 1));
490 		DPAA2_SET_FLE_IVP((sge + 2));
491 		DPAA2_SET_FLE_IVP((sge + 3));
492 	}
493 
494 	/* Save the shared descriptor */
495 	flc = &priv->flc_desc[0].flc;
496 	/* Configure FD as a FRAME LIST */
497 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
498 	DPAA2_SET_FD_COMPOUND_FMT(fd);
499 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
500 
501 	DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n"
502 		   "iv-len=%d data_off: 0x%x\n",
503 		   sym_op->aead.data.offset,
504 		   sym_op->aead.data.length,
505 		   sess->digest_length,
506 		   sess->iv.length,
507 		   sym_op->m_src->data_off);
508 
509 	/* Configure Output FLE with Scatter/Gather Entry */
510 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
511 	if (auth_only_len)
512 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
513 	fle->length = (sess->dir == DIR_ENC) ?
514 			(sym_op->aead.data.length + icv_len) :
515 			sym_op->aead.data.length;
516 
517 	DPAA2_SET_FLE_SG_EXT(fle);
518 
519 	/* Configure Output SGE for Encap/Decap */
520 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
521 	DPAA2_SET_FLE_OFFSET(sge, dst->data_off + sym_op->aead.data.offset);
522 	sge->length = sym_op->aead.data.length;
523 
524 	if (sess->dir == DIR_ENC) {
525 		sge++;
526 		DPAA2_SET_FLE_ADDR(sge,
527 				DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
528 		sge->length = sess->digest_length;
529 	}
530 	DPAA2_SET_FLE_FIN(sge);
531 
532 	sge++;
533 	fle++;
534 
535 	/* Configure Input FLE with Scatter/Gather Entry */
536 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
537 	DPAA2_SET_FLE_SG_EXT(fle);
538 	DPAA2_SET_FLE_FIN(fle);
539 	fle->length = (sess->dir == DIR_ENC) ?
540 		(sym_op->aead.data.length + sess->iv.length + auth_only_len) :
541 		(sym_op->aead.data.length + sess->iv.length + auth_only_len +
542 		 sess->digest_length);
543 
544 	/* Configure Input SGE for Encap/Decap */
545 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
546 	sge->length = sess->iv.length;
547 	sge++;
548 	if (auth_only_len) {
549 		DPAA2_SET_FLE_ADDR(sge,
550 				DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
551 		sge->length = auth_only_len;
552 		DPAA2_SET_FLE_BPID(sge, bpid);
553 		sge++;
554 	}
555 
556 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
557 	DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
558 				sym_op->m_src->data_off);
559 	sge->length = sym_op->aead.data.length;
560 	if (sess->dir == DIR_DEC) {
561 		sge++;
562 		old_icv = (uint8_t *)(sge + 1);
563 		memcpy(old_icv,	sym_op->aead.digest.data,
564 		       sess->digest_length);
565 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
566 		sge->length = sess->digest_length;
567 	}
568 	DPAA2_SET_FLE_FIN(sge);
569 
570 	if (auth_only_len) {
571 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
572 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
573 	}
574 
575 	DPAA2_SET_FD_LEN(fd, fle->length);
576 	return 0;
577 }
578 
579 static inline int
580 build_authenc_sg_fd(dpaa2_sec_session *sess,
581 		 struct rte_crypto_op *op,
582 		 struct qbman_fd *fd, __rte_unused uint16_t bpid)
583 {
584 	struct rte_crypto_sym_op *sym_op = op->sym;
585 	struct ctxt_priv *priv = sess->ctxt;
586 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
587 	struct sec_flow_context *flc;
588 	uint16_t auth_hdr_len = sym_op->cipher.data.offset -
589 				sym_op->auth.data.offset;
590 	uint16_t auth_tail_len = sym_op->auth.data.length -
591 				sym_op->cipher.data.length - auth_hdr_len;
592 	uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
593 	int icv_len = sess->digest_length;
594 	uint8_t *old_icv;
595 	struct rte_mbuf *mbuf;
596 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
597 			sess->iv.offset);
598 
599 	if (sym_op->m_dst)
600 		mbuf = sym_op->m_dst;
601 	else
602 		mbuf = sym_op->m_src;
603 
604 	/* first FLE entry used to store mbuf and session ctxt */
605 	fle = (struct qbman_fle *)rte_malloc(NULL,
606 			FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
607 			RTE_CACHE_LINE_SIZE);
608 	if (unlikely(!fle)) {
609 		DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
610 		return -1;
611 	}
612 	memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
613 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
614 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
615 
616 	op_fle = fle + 1;
617 	ip_fle = fle + 2;
618 	sge = fle + 3;
619 
620 	/* Save the shared descriptor */
621 	flc = &priv->flc_desc[0].flc;
622 
623 	/* Configure FD as a FRAME LIST */
624 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
625 	DPAA2_SET_FD_COMPOUND_FMT(fd);
626 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
627 
628 	DPAA2_SEC_DP_DEBUG(
629 		"AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n"
630 		"cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
631 		sym_op->auth.data.offset,
632 		sym_op->auth.data.length,
633 		sess->digest_length,
634 		sym_op->cipher.data.offset,
635 		sym_op->cipher.data.length,
636 		sess->iv.length,
637 		sym_op->m_src->data_off);
638 
639 	/* Configure Output FLE with Scatter/Gather Entry */
640 	DPAA2_SET_FLE_SG_EXT(op_fle);
641 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
642 
643 	if (auth_only_len)
644 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
645 
646 	op_fle->length = (sess->dir == DIR_ENC) ?
647 			(sym_op->cipher.data.length + icv_len) :
648 			sym_op->cipher.data.length;
649 
650 	/* Configure Output SGE for Encap/Decap */
651 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
652 	DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->auth.data.offset);
653 	sge->length = mbuf->data_len - sym_op->auth.data.offset;
654 
655 	mbuf = mbuf->next;
656 	/* o/p segs */
657 	while (mbuf) {
658 		sge++;
659 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
660 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
661 		sge->length = mbuf->data_len;
662 		mbuf = mbuf->next;
663 	}
664 	sge->length -= icv_len;
665 
666 	if (sess->dir == DIR_ENC) {
667 		sge++;
668 		DPAA2_SET_FLE_ADDR(sge,
669 				DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
670 		sge->length = icv_len;
671 	}
672 	DPAA2_SET_FLE_FIN(sge);
673 
674 	sge++;
675 	mbuf = sym_op->m_src;
676 
677 	/* Configure Input FLE with Scatter/Gather Entry */
678 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
679 	DPAA2_SET_FLE_SG_EXT(ip_fle);
680 	DPAA2_SET_FLE_FIN(ip_fle);
681 	ip_fle->length = (sess->dir == DIR_ENC) ?
682 			(sym_op->auth.data.length + sess->iv.length) :
683 			(sym_op->auth.data.length + sess->iv.length +
684 			 icv_len);
685 
686 	/* Configure Input SGE for Encap/Decap */
687 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
688 	sge->length = sess->iv.length;
689 
690 	sge++;
691 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
692 	DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
693 				mbuf->data_off);
694 	sge->length = mbuf->data_len - sym_op->auth.data.offset;
695 
696 	mbuf = mbuf->next;
697 	/* i/p segs */
698 	while (mbuf) {
699 		sge++;
700 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
701 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
702 		sge->length = mbuf->data_len;
703 		mbuf = mbuf->next;
704 	}
705 	sge->length -= icv_len;
706 
707 	if (sess->dir == DIR_DEC) {
708 		sge++;
709 		old_icv = (uint8_t *)(sge + 1);
710 		memcpy(old_icv,	sym_op->auth.digest.data,
711 		       icv_len);
712 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
713 		sge->length = icv_len;
714 	}
715 
716 	DPAA2_SET_FLE_FIN(sge);
717 	if (auth_only_len) {
718 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
719 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
720 	}
721 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
722 
723 	return 0;
724 }
725 
726 static inline int
727 build_authenc_fd(dpaa2_sec_session *sess,
728 		 struct rte_crypto_op *op,
729 		 struct qbman_fd *fd, uint16_t bpid)
730 {
731 	struct rte_crypto_sym_op *sym_op = op->sym;
732 	struct ctxt_priv *priv = sess->ctxt;
733 	struct qbman_fle *fle, *sge;
734 	struct sec_flow_context *flc;
735 	uint16_t auth_hdr_len = sym_op->cipher.data.offset -
736 				sym_op->auth.data.offset;
737 	uint16_t auth_tail_len = sym_op->auth.data.length -
738 				sym_op->cipher.data.length - auth_hdr_len;
739 	uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
740 
741 	int icv_len = sess->digest_length, retval;
742 	uint8_t *old_icv;
743 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
744 			sess->iv.offset);
745 	struct rte_mbuf *dst;
746 
747 	if (sym_op->m_dst)
748 		dst = sym_op->m_dst;
749 	else
750 		dst = sym_op->m_src;
751 
752 	/* we are using the first FLE entry to store Mbuf.
753 	 * Currently we donot know which FLE has the mbuf stored.
754 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
755 	 * to get the MBUF Addr from the previous FLE.
756 	 * We can have a better approach to use the inline Mbuf
757 	 */
758 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
759 	if (retval) {
760 		DPAA2_SEC_ERR("Memory alloc failed for SGE");
761 		return -1;
762 	}
763 	memset(fle, 0, FLE_POOL_BUF_SIZE);
764 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
765 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
766 	fle = fle + 1;
767 	sge = fle + 2;
768 	if (likely(bpid < MAX_BPID)) {
769 		DPAA2_SET_FD_BPID(fd, bpid);
770 		DPAA2_SET_FLE_BPID(fle, bpid);
771 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
772 		DPAA2_SET_FLE_BPID(sge, bpid);
773 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
774 		DPAA2_SET_FLE_BPID(sge + 2, bpid);
775 		DPAA2_SET_FLE_BPID(sge + 3, bpid);
776 	} else {
777 		DPAA2_SET_FD_IVP(fd);
778 		DPAA2_SET_FLE_IVP(fle);
779 		DPAA2_SET_FLE_IVP((fle + 1));
780 		DPAA2_SET_FLE_IVP(sge);
781 		DPAA2_SET_FLE_IVP((sge + 1));
782 		DPAA2_SET_FLE_IVP((sge + 2));
783 		DPAA2_SET_FLE_IVP((sge + 3));
784 	}
785 
786 	/* Save the shared descriptor */
787 	flc = &priv->flc_desc[0].flc;
788 	/* Configure FD as a FRAME LIST */
789 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
790 	DPAA2_SET_FD_COMPOUND_FMT(fd);
791 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
792 
793 	DPAA2_SEC_DP_DEBUG(
794 		"AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n"
795 		"cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
796 		sym_op->auth.data.offset,
797 		sym_op->auth.data.length,
798 		sess->digest_length,
799 		sym_op->cipher.data.offset,
800 		sym_op->cipher.data.length,
801 		sess->iv.length,
802 		sym_op->m_src->data_off);
803 
804 	/* Configure Output FLE with Scatter/Gather Entry */
805 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
806 	if (auth_only_len)
807 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
808 	fle->length = (sess->dir == DIR_ENC) ?
809 			(sym_op->cipher.data.length + icv_len) :
810 			sym_op->cipher.data.length;
811 
812 	DPAA2_SET_FLE_SG_EXT(fle);
813 
814 	/* Configure Output SGE for Encap/Decap */
815 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
816 	DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
817 				dst->data_off);
818 	sge->length = sym_op->cipher.data.length;
819 
820 	if (sess->dir == DIR_ENC) {
821 		sge++;
822 		DPAA2_SET_FLE_ADDR(sge,
823 				DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
824 		sge->length = sess->digest_length;
825 		DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
826 					sess->iv.length));
827 	}
828 	DPAA2_SET_FLE_FIN(sge);
829 
830 	sge++;
831 	fle++;
832 
833 	/* Configure Input FLE with Scatter/Gather Entry */
834 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
835 	DPAA2_SET_FLE_SG_EXT(fle);
836 	DPAA2_SET_FLE_FIN(fle);
837 	fle->length = (sess->dir == DIR_ENC) ?
838 			(sym_op->auth.data.length + sess->iv.length) :
839 			(sym_op->auth.data.length + sess->iv.length +
840 			 sess->digest_length);
841 
842 	/* Configure Input SGE for Encap/Decap */
843 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
844 	sge->length = sess->iv.length;
845 	sge++;
846 
847 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
848 	DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
849 				sym_op->m_src->data_off);
850 	sge->length = sym_op->auth.data.length;
851 	if (sess->dir == DIR_DEC) {
852 		sge++;
853 		old_icv = (uint8_t *)(sge + 1);
854 		memcpy(old_icv,	sym_op->auth.digest.data,
855 		       sess->digest_length);
856 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
857 		sge->length = sess->digest_length;
858 		DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
859 				 sess->digest_length +
860 				 sess->iv.length));
861 	}
862 	DPAA2_SET_FLE_FIN(sge);
863 	if (auth_only_len) {
864 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
865 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
866 	}
867 	return 0;
868 }
869 
870 static inline int build_auth_sg_fd(
871 		dpaa2_sec_session *sess,
872 		struct rte_crypto_op *op,
873 		struct qbman_fd *fd,
874 		__rte_unused uint16_t bpid)
875 {
876 	struct rte_crypto_sym_op *sym_op = op->sym;
877 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
878 	struct sec_flow_context *flc;
879 	struct ctxt_priv *priv = sess->ctxt;
880 	int data_len, data_offset;
881 	uint8_t *old_digest;
882 	struct rte_mbuf *mbuf;
883 
884 	data_len = sym_op->auth.data.length;
885 	data_offset = sym_op->auth.data.offset;
886 
887 	if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
888 	    sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
889 		if ((data_len & 7) || (data_offset & 7)) {
890 			DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
891 			return -1;
892 		}
893 
894 		data_len = data_len >> 3;
895 		data_offset = data_offset >> 3;
896 	}
897 
898 	mbuf = sym_op->m_src;
899 	fle = (struct qbman_fle *)rte_malloc(NULL,
900 			FLE_SG_MEM_SIZE(mbuf->nb_segs),
901 			RTE_CACHE_LINE_SIZE);
902 	if (unlikely(!fle)) {
903 		DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
904 		return -1;
905 	}
906 	memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs));
907 	/* first FLE entry used to store mbuf and session ctxt */
908 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
909 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
910 	op_fle = fle + 1;
911 	ip_fle = fle + 2;
912 	sge = fle + 3;
913 
914 	flc = &priv->flc_desc[DESC_INITFINAL].flc;
915 	/* sg FD */
916 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
917 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
918 	DPAA2_SET_FD_COMPOUND_FMT(fd);
919 
920 	/* o/p fle */
921 	DPAA2_SET_FLE_ADDR(op_fle,
922 				DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
923 	op_fle->length = sess->digest_length;
924 
925 	/* i/p fle */
926 	DPAA2_SET_FLE_SG_EXT(ip_fle);
927 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
928 	ip_fle->length = data_len;
929 
930 	if (sess->iv.length) {
931 		uint8_t *iv_ptr;
932 
933 		iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
934 						   sess->iv.offset);
935 
936 		if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
937 			iv_ptr = conv_to_snow_f9_iv(iv_ptr);
938 			sge->length = 12;
939 		} else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
940 			iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
941 			sge->length = 8;
942 		} else {
943 			sge->length = sess->iv.length;
944 		}
945 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
946 		ip_fle->length += sge->length;
947 		sge++;
948 	}
949 	/* i/p 1st seg */
950 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
951 	DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
952 
953 	if (data_len <= (mbuf->data_len - data_offset)) {
954 		sge->length = data_len;
955 		data_len = 0;
956 	} else {
957 		sge->length = mbuf->data_len - data_offset;
958 
959 		/* remaining i/p segs */
960 		while ((data_len = data_len - sge->length) &&
961 		       (mbuf = mbuf->next)) {
962 			sge++;
963 			DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
964 			DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
965 			if (data_len > mbuf->data_len)
966 				sge->length = mbuf->data_len;
967 			else
968 				sge->length = data_len;
969 		}
970 	}
971 
972 	if (sess->dir == DIR_DEC) {
973 		/* Digest verification case */
974 		sge++;
975 		old_digest = (uint8_t *)(sge + 1);
976 		rte_memcpy(old_digest, sym_op->auth.digest.data,
977 			   sess->digest_length);
978 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
979 		sge->length = sess->digest_length;
980 		ip_fle->length += sess->digest_length;
981 	}
982 	DPAA2_SET_FLE_FIN(sge);
983 	DPAA2_SET_FLE_FIN(ip_fle);
984 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
985 
986 	return 0;
987 }
988 
989 static inline int
990 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
991 	      struct qbman_fd *fd, uint16_t bpid)
992 {
993 	struct rte_crypto_sym_op *sym_op = op->sym;
994 	struct qbman_fle *fle, *sge;
995 	struct sec_flow_context *flc;
996 	struct ctxt_priv *priv = sess->ctxt;
997 	int data_len, data_offset;
998 	uint8_t *old_digest;
999 	int retval;
1000 
1001 	data_len = sym_op->auth.data.length;
1002 	data_offset = sym_op->auth.data.offset;
1003 
1004 	if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
1005 	    sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1006 		if ((data_len & 7) || (data_offset & 7)) {
1007 			DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
1008 			return -1;
1009 		}
1010 
1011 		data_len = data_len >> 3;
1012 		data_offset = data_offset >> 3;
1013 	}
1014 
1015 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
1016 	if (retval) {
1017 		DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE");
1018 		return -1;
1019 	}
1020 	memset(fle, 0, FLE_POOL_BUF_SIZE);
1021 	/* TODO we are using the first FLE entry to store Mbuf.
1022 	 * Currently we donot know which FLE has the mbuf stored.
1023 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
1024 	 * to get the MBUF Addr from the previous FLE.
1025 	 * We can have a better approach to use the inline Mbuf
1026 	 */
1027 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1028 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1029 	fle = fle + 1;
1030 	sge = fle + 2;
1031 
1032 	if (likely(bpid < MAX_BPID)) {
1033 		DPAA2_SET_FD_BPID(fd, bpid);
1034 		DPAA2_SET_FLE_BPID(fle, bpid);
1035 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
1036 		DPAA2_SET_FLE_BPID(sge, bpid);
1037 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
1038 	} else {
1039 		DPAA2_SET_FD_IVP(fd);
1040 		DPAA2_SET_FLE_IVP(fle);
1041 		DPAA2_SET_FLE_IVP((fle + 1));
1042 		DPAA2_SET_FLE_IVP(sge);
1043 		DPAA2_SET_FLE_IVP((sge + 1));
1044 	}
1045 
1046 	flc = &priv->flc_desc[DESC_INITFINAL].flc;
1047 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1048 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1049 	DPAA2_SET_FD_COMPOUND_FMT(fd);
1050 
1051 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
1052 	fle->length = sess->digest_length;
1053 	fle++;
1054 
1055 	/* Setting input FLE */
1056 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1057 	DPAA2_SET_FLE_SG_EXT(fle);
1058 	fle->length = data_len;
1059 
1060 	if (sess->iv.length) {
1061 		uint8_t *iv_ptr;
1062 
1063 		iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1064 						   sess->iv.offset);
1065 
1066 		if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
1067 			iv_ptr = conv_to_snow_f9_iv(iv_ptr);
1068 			sge->length = 12;
1069 		} else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1070 			iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
1071 			sge->length = 8;
1072 		} else {
1073 			sge->length = sess->iv.length;
1074 		}
1075 
1076 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1077 		fle->length = fle->length + sge->length;
1078 		sge++;
1079 	}
1080 
1081 	/* Setting data to authenticate */
1082 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
1083 	DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off);
1084 	sge->length = data_len;
1085 
1086 	if (sess->dir == DIR_DEC) {
1087 		sge++;
1088 		old_digest = (uint8_t *)(sge + 1);
1089 		rte_memcpy(old_digest, sym_op->auth.digest.data,
1090 			   sess->digest_length);
1091 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
1092 		sge->length = sess->digest_length;
1093 		fle->length = fle->length + sess->digest_length;
1094 	}
1095 
1096 	DPAA2_SET_FLE_FIN(sge);
1097 	DPAA2_SET_FLE_FIN(fle);
1098 	DPAA2_SET_FD_LEN(fd, fle->length);
1099 
1100 	return 0;
1101 }
1102 
1103 static int
1104 build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
1105 		struct qbman_fd *fd, __rte_unused uint16_t bpid)
1106 {
1107 	struct rte_crypto_sym_op *sym_op = op->sym;
1108 	struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
1109 	int data_len, data_offset;
1110 	struct sec_flow_context *flc;
1111 	struct ctxt_priv *priv = sess->ctxt;
1112 	struct rte_mbuf *mbuf;
1113 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1114 			sess->iv.offset);
1115 
1116 	data_len = sym_op->cipher.data.length;
1117 	data_offset = sym_op->cipher.data.offset;
1118 
1119 	if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1120 		sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1121 		if ((data_len & 7) || (data_offset & 7)) {
1122 			DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
1123 			return -1;
1124 		}
1125 
1126 		data_len = data_len >> 3;
1127 		data_offset = data_offset >> 3;
1128 	}
1129 
1130 	if (sym_op->m_dst)
1131 		mbuf = sym_op->m_dst;
1132 	else
1133 		mbuf = sym_op->m_src;
1134 
1135 	/* first FLE entry used to store mbuf and session ctxt */
1136 	fle = (struct qbman_fle *)rte_malloc(NULL,
1137 			FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
1138 			RTE_CACHE_LINE_SIZE);
1139 	if (!fle) {
1140 		DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE");
1141 		return -1;
1142 	}
1143 	memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
1144 	/* first FLE entry used to store mbuf and session ctxt */
1145 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1146 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1147 
1148 	op_fle = fle + 1;
1149 	ip_fle = fle + 2;
1150 	sge = fle + 3;
1151 
1152 	flc = &priv->flc_desc[0].flc;
1153 
1154 	DPAA2_SEC_DP_DEBUG(
1155 		"CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d"
1156 		" data_off: 0x%x\n",
1157 		data_offset,
1158 		data_len,
1159 		sess->iv.length,
1160 		sym_op->m_src->data_off);
1161 
1162 	/* o/p fle */
1163 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
1164 	op_fle->length = data_len;
1165 	DPAA2_SET_FLE_SG_EXT(op_fle);
1166 
1167 	/* o/p 1st seg */
1168 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1169 	DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
1170 	sge->length = mbuf->data_len - data_offset;
1171 
1172 	mbuf = mbuf->next;
1173 	/* o/p segs */
1174 	while (mbuf) {
1175 		sge++;
1176 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1177 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
1178 		sge->length = mbuf->data_len;
1179 		mbuf = mbuf->next;
1180 	}
1181 	DPAA2_SET_FLE_FIN(sge);
1182 
1183 	DPAA2_SEC_DP_DEBUG(
1184 		"CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
1185 		flc, fle, fle->addr_hi, fle->addr_lo,
1186 		fle->length);
1187 
1188 	/* i/p fle */
1189 	mbuf = sym_op->m_src;
1190 	sge++;
1191 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
1192 	ip_fle->length = sess->iv.length + data_len;
1193 	DPAA2_SET_FLE_SG_EXT(ip_fle);
1194 
1195 	/* i/p IV */
1196 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1197 	DPAA2_SET_FLE_OFFSET(sge, 0);
1198 	sge->length = sess->iv.length;
1199 
1200 	sge++;
1201 
1202 	/* i/p 1st seg */
1203 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1204 	DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
1205 	sge->length = mbuf->data_len - data_offset;
1206 
1207 	mbuf = mbuf->next;
1208 	/* i/p segs */
1209 	while (mbuf) {
1210 		sge++;
1211 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1212 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
1213 		sge->length = mbuf->data_len;
1214 		mbuf = mbuf->next;
1215 	}
1216 	DPAA2_SET_FLE_FIN(sge);
1217 	DPAA2_SET_FLE_FIN(ip_fle);
1218 
1219 	/* sg fd */
1220 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
1221 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
1222 	DPAA2_SET_FD_COMPOUND_FMT(fd);
1223 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1224 
1225 	DPAA2_SEC_DP_DEBUG(
1226 		"CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1227 		" off =%d, len =%d\n",
1228 		DPAA2_GET_FD_ADDR(fd),
1229 		DPAA2_GET_FD_BPID(fd),
1230 		rte_dpaa2_bpid_info[bpid].meta_data_size,
1231 		DPAA2_GET_FD_OFFSET(fd),
1232 		DPAA2_GET_FD_LEN(fd));
1233 	return 0;
1234 }
1235 
1236 static int
1237 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
1238 		struct qbman_fd *fd, uint16_t bpid)
1239 {
1240 	struct rte_crypto_sym_op *sym_op = op->sym;
1241 	struct qbman_fle *fle, *sge;
1242 	int retval, data_len, data_offset;
1243 	struct sec_flow_context *flc;
1244 	struct ctxt_priv *priv = sess->ctxt;
1245 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1246 			sess->iv.offset);
1247 	struct rte_mbuf *dst;
1248 
1249 	data_len = sym_op->cipher.data.length;
1250 	data_offset = sym_op->cipher.data.offset;
1251 
1252 	if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1253 		sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1254 		if ((data_len & 7) || (data_offset & 7)) {
1255 			DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
1256 			return -1;
1257 		}
1258 
1259 		data_len = data_len >> 3;
1260 		data_offset = data_offset >> 3;
1261 	}
1262 
1263 	if (sym_op->m_dst)
1264 		dst = sym_op->m_dst;
1265 	else
1266 		dst = sym_op->m_src;
1267 
1268 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
1269 	if (retval) {
1270 		DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE");
1271 		return -1;
1272 	}
1273 	memset(fle, 0, FLE_POOL_BUF_SIZE);
1274 	/* TODO we are using the first FLE entry to store Mbuf.
1275 	 * Currently we donot know which FLE has the mbuf stored.
1276 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
1277 	 * to get the MBUF Addr from the previous FLE.
1278 	 * We can have a better approach to use the inline Mbuf
1279 	 */
1280 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1281 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1282 	fle = fle + 1;
1283 	sge = fle + 2;
1284 
1285 	if (likely(bpid < MAX_BPID)) {
1286 		DPAA2_SET_FD_BPID(fd, bpid);
1287 		DPAA2_SET_FLE_BPID(fle, bpid);
1288 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
1289 		DPAA2_SET_FLE_BPID(sge, bpid);
1290 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
1291 	} else {
1292 		DPAA2_SET_FD_IVP(fd);
1293 		DPAA2_SET_FLE_IVP(fle);
1294 		DPAA2_SET_FLE_IVP((fle + 1));
1295 		DPAA2_SET_FLE_IVP(sge);
1296 		DPAA2_SET_FLE_IVP((sge + 1));
1297 	}
1298 
1299 	flc = &priv->flc_desc[0].flc;
1300 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1301 	DPAA2_SET_FD_LEN(fd, data_len + sess->iv.length);
1302 	DPAA2_SET_FD_COMPOUND_FMT(fd);
1303 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1304 
1305 	DPAA2_SEC_DP_DEBUG(
1306 		"CIPHER: cipher_off: 0x%x/length %d, ivlen=%d,"
1307 		" data_off: 0x%x\n",
1308 		data_offset,
1309 		data_len,
1310 		sess->iv.length,
1311 		sym_op->m_src->data_off);
1312 
1313 	DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst));
1314 	DPAA2_SET_FLE_OFFSET(fle, data_offset + dst->data_off);
1315 
1316 	fle->length = data_len + sess->iv.length;
1317 
1318 	DPAA2_SEC_DP_DEBUG(
1319 		"CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n",
1320 		flc, fle, fle->addr_hi, fle->addr_lo,
1321 		fle->length);
1322 
1323 	fle++;
1324 
1325 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1326 	fle->length = data_len + sess->iv.length;
1327 
1328 	DPAA2_SET_FLE_SG_EXT(fle);
1329 
1330 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1331 	sge->length = sess->iv.length;
1332 
1333 	sge++;
1334 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
1335 	DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off);
1336 
1337 	sge->length = data_len;
1338 	DPAA2_SET_FLE_FIN(sge);
1339 	DPAA2_SET_FLE_FIN(fle);
1340 
1341 	DPAA2_SEC_DP_DEBUG(
1342 		"CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1343 		" off =%d, len =%d\n",
1344 		DPAA2_GET_FD_ADDR(fd),
1345 		DPAA2_GET_FD_BPID(fd),
1346 		rte_dpaa2_bpid_info[bpid].meta_data_size,
1347 		DPAA2_GET_FD_OFFSET(fd),
1348 		DPAA2_GET_FD_LEN(fd));
1349 
1350 	return 0;
1351 }
1352 
1353 static inline int
1354 build_sec_fd(struct rte_crypto_op *op,
1355 	     struct qbman_fd *fd, uint16_t bpid)
1356 {
1357 	int ret = -1;
1358 	dpaa2_sec_session *sess;
1359 
1360 	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
1361 		sess = (dpaa2_sec_session *)get_sym_session_private_data(
1362 				op->sym->session, cryptodev_driver_id);
1363 #ifdef RTE_LIBRTE_SECURITY
1364 	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1365 		sess = (dpaa2_sec_session *)get_sec_session_private_data(
1366 				op->sym->sec_session);
1367 #endif
1368 	else
1369 		return -1;
1370 
1371 	/* Any of the buffer is segmented*/
1372 	if (!rte_pktmbuf_is_contiguous(op->sym->m_src) ||
1373 		  ((op->sym->m_dst != NULL) &&
1374 		   !rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1375 		switch (sess->ctxt_type) {
1376 		case DPAA2_SEC_CIPHER:
1377 			ret = build_cipher_sg_fd(sess, op, fd, bpid);
1378 			break;
1379 		case DPAA2_SEC_AUTH:
1380 			ret = build_auth_sg_fd(sess, op, fd, bpid);
1381 			break;
1382 		case DPAA2_SEC_AEAD:
1383 			ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid);
1384 			break;
1385 		case DPAA2_SEC_CIPHER_HASH:
1386 			ret = build_authenc_sg_fd(sess, op, fd, bpid);
1387 			break;
1388 #ifdef RTE_LIBRTE_SECURITY
1389 		case DPAA2_SEC_IPSEC:
1390 		case DPAA2_SEC_PDCP:
1391 			ret = build_proto_compound_sg_fd(sess, op, fd, bpid);
1392 			break;
1393 #endif
1394 		case DPAA2_SEC_HASH_CIPHER:
1395 		default:
1396 			DPAA2_SEC_ERR("error: Unsupported session");
1397 		}
1398 	} else {
1399 		switch (sess->ctxt_type) {
1400 		case DPAA2_SEC_CIPHER:
1401 			ret = build_cipher_fd(sess, op, fd, bpid);
1402 			break;
1403 		case DPAA2_SEC_AUTH:
1404 			ret = build_auth_fd(sess, op, fd, bpid);
1405 			break;
1406 		case DPAA2_SEC_AEAD:
1407 			ret = build_authenc_gcm_fd(sess, op, fd, bpid);
1408 			break;
1409 		case DPAA2_SEC_CIPHER_HASH:
1410 			ret = build_authenc_fd(sess, op, fd, bpid);
1411 			break;
1412 #ifdef RTE_LIBRTE_SECURITY
1413 		case DPAA2_SEC_IPSEC:
1414 			ret = build_proto_fd(sess, op, fd, bpid);
1415 			break;
1416 		case DPAA2_SEC_PDCP:
1417 			ret = build_proto_compound_fd(sess, op, fd, bpid);
1418 			break;
1419 #endif
1420 		case DPAA2_SEC_HASH_CIPHER:
1421 		default:
1422 			DPAA2_SEC_ERR("error: Unsupported session");
1423 		}
1424 	}
1425 	return ret;
1426 }
1427 
1428 static uint16_t
1429 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1430 			uint16_t nb_ops)
1431 {
1432 	/* Function to transmit the frames to given device and VQ*/
1433 	uint32_t loop;
1434 	int32_t ret;
1435 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1436 	uint32_t frames_to_send;
1437 	struct qbman_eq_desc eqdesc;
1438 	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1439 	struct qbman_swp *swp;
1440 	uint16_t num_tx = 0;
1441 	uint32_t flags[MAX_TX_RING_SLOTS] = {0};
1442 	/*todo - need to support multiple buffer pools */
1443 	uint16_t bpid;
1444 	struct rte_mempool *mb_pool;
1445 
1446 	if (unlikely(nb_ops == 0))
1447 		return 0;
1448 
1449 	if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
1450 		DPAA2_SEC_ERR("sessionless crypto op not supported");
1451 		return 0;
1452 	}
1453 	/*Prepare enqueue descriptor*/
1454 	qbman_eq_desc_clear(&eqdesc);
1455 	qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1456 	qbman_eq_desc_set_response(&eqdesc, 0, 0);
1457 	qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
1458 
1459 	if (!DPAA2_PER_LCORE_DPIO) {
1460 		ret = dpaa2_affine_qbman_swp();
1461 		if (ret) {
1462 			DPAA2_SEC_ERR("Failure in affining portal");
1463 			return 0;
1464 		}
1465 	}
1466 	swp = DPAA2_PER_LCORE_PORTAL;
1467 
1468 	while (nb_ops) {
1469 		frames_to_send = (nb_ops > dpaa2_eqcr_size) ?
1470 			dpaa2_eqcr_size : nb_ops;
1471 
1472 		for (loop = 0; loop < frames_to_send; loop++) {
1473 			if ((*ops)->sym->m_src->seqn) {
1474 			 uint8_t dqrr_index = (*ops)->sym->m_src->seqn - 1;
1475 
1476 			 flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index;
1477 			 DPAA2_PER_LCORE_DQRR_SIZE--;
1478 			 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
1479 			 (*ops)->sym->m_src->seqn = DPAA2_INVALID_MBUF_SEQN;
1480 			}
1481 
1482 			/*Clear the unused FD fields before sending*/
1483 			memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
1484 			mb_pool = (*ops)->sym->m_src->pool;
1485 			bpid = mempool_to_bpid(mb_pool);
1486 			ret = build_sec_fd(*ops, &fd_arr[loop], bpid);
1487 			if (ret) {
1488 				DPAA2_SEC_ERR("error: Improper packet contents"
1489 					      " for crypto operation");
1490 				goto skip_tx;
1491 			}
1492 			ops++;
1493 		}
1494 		loop = 0;
1495 		while (loop < frames_to_send) {
1496 			loop += qbman_swp_enqueue_multiple(swp, &eqdesc,
1497 							&fd_arr[loop],
1498 							&flags[loop],
1499 							frames_to_send - loop);
1500 		}
1501 
1502 		num_tx += frames_to_send;
1503 		nb_ops -= frames_to_send;
1504 	}
1505 skip_tx:
1506 	dpaa2_qp->tx_vq.tx_pkts += num_tx;
1507 	dpaa2_qp->tx_vq.err_pkts += nb_ops;
1508 	return num_tx;
1509 }
1510 
1511 #ifdef RTE_LIBRTE_SECURITY
1512 static inline struct rte_crypto_op *
1513 sec_simple_fd_to_mbuf(const struct qbman_fd *fd)
1514 {
1515 	struct rte_crypto_op *op;
1516 	uint16_t len = DPAA2_GET_FD_LEN(fd);
1517 	uint16_t diff = 0;
1518 	dpaa2_sec_session *sess_priv __rte_unused;
1519 
1520 	struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
1521 		DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
1522 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
1523 
1524 	diff = len - mbuf->pkt_len;
1525 	mbuf->pkt_len += diff;
1526 	mbuf->data_len += diff;
1527 	op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova;
1528 	mbuf->buf_iova = op->sym->aead.digest.phys_addr;
1529 	op->sym->aead.digest.phys_addr = 0L;
1530 
1531 	sess_priv = (dpaa2_sec_session *)get_sec_session_private_data(
1532 				op->sym->sec_session);
1533 	if (sess_priv->dir == DIR_ENC)
1534 		mbuf->data_off += SEC_FLC_DHR_OUTBOUND;
1535 	else
1536 		mbuf->data_off += SEC_FLC_DHR_INBOUND;
1537 
1538 	return op;
1539 }
1540 #endif
1541 
1542 static inline struct rte_crypto_op *
1543 sec_fd_to_mbuf(const struct qbman_fd *fd)
1544 {
1545 	struct qbman_fle *fle;
1546 	struct rte_crypto_op *op;
1547 	struct ctxt_priv *priv;
1548 	struct rte_mbuf *dst, *src;
1549 
1550 #ifdef RTE_LIBRTE_SECURITY
1551 	if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single)
1552 		return sec_simple_fd_to_mbuf(fd);
1553 #endif
1554 	fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
1555 
1556 	DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
1557 			   fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
1558 
1559 	/* we are using the first FLE entry to store Mbuf.
1560 	 * Currently we donot know which FLE has the mbuf stored.
1561 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
1562 	 * to get the MBUF Addr from the previous FLE.
1563 	 * We can have a better approach to use the inline Mbuf
1564 	 */
1565 
1566 	if (unlikely(DPAA2_GET_FD_IVP(fd))) {
1567 		/* TODO complete it. */
1568 		DPAA2_SEC_ERR("error: non inline buffer");
1569 		return NULL;
1570 	}
1571 	op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
1572 
1573 	/* Prefeth op */
1574 	src = op->sym->m_src;
1575 	rte_prefetch0(src);
1576 
1577 	if (op->sym->m_dst) {
1578 		dst = op->sym->m_dst;
1579 		rte_prefetch0(dst);
1580 	} else
1581 		dst = src;
1582 
1583 #ifdef RTE_LIBRTE_SECURITY
1584 	if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
1585 		dpaa2_sec_session *sess = (dpaa2_sec_session *)
1586 			get_sec_session_private_data(op->sym->sec_session);
1587 		if (sess->ctxt_type == DPAA2_SEC_IPSEC ||
1588 				sess->ctxt_type == DPAA2_SEC_PDCP) {
1589 			uint16_t len = DPAA2_GET_FD_LEN(fd);
1590 			dst->pkt_len = len;
1591 			while (dst->next != NULL) {
1592 				len -= dst->data_len;
1593 				dst = dst->next;
1594 			}
1595 			dst->data_len = len;
1596 		}
1597 	}
1598 #endif
1599 	DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p,"
1600 		" fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n",
1601 		(void *)dst,
1602 		dst->buf_addr,
1603 		DPAA2_GET_FD_ADDR(fd),
1604 		DPAA2_GET_FD_BPID(fd),
1605 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
1606 		DPAA2_GET_FD_OFFSET(fd),
1607 		DPAA2_GET_FD_LEN(fd));
1608 
1609 	/* free the fle memory */
1610 	if (likely(rte_pktmbuf_is_contiguous(src))) {
1611 		priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1);
1612 		rte_mempool_put(priv->fle_pool, (void *)(fle-1));
1613 	} else
1614 		rte_free((void *)(fle-1));
1615 
1616 	return op;
1617 }
1618 
1619 static uint16_t
1620 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1621 			uint16_t nb_ops)
1622 {
1623 	/* Function is responsible to receive frames for a given device and VQ*/
1624 	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1625 	struct qbman_result *dq_storage;
1626 	uint32_t fqid = dpaa2_qp->rx_vq.fqid;
1627 	int ret, num_rx = 0;
1628 	uint8_t is_last = 0, status;
1629 	struct qbman_swp *swp;
1630 	const struct qbman_fd *fd;
1631 	struct qbman_pull_desc pulldesc;
1632 
1633 	if (!DPAA2_PER_LCORE_DPIO) {
1634 		ret = dpaa2_affine_qbman_swp();
1635 		if (ret) {
1636 			DPAA2_SEC_ERR("Failure in affining portal");
1637 			return 0;
1638 		}
1639 	}
1640 	swp = DPAA2_PER_LCORE_PORTAL;
1641 	dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
1642 
1643 	qbman_pull_desc_clear(&pulldesc);
1644 	qbman_pull_desc_set_numframes(&pulldesc,
1645 				      (nb_ops > dpaa2_dqrr_size) ?
1646 				      dpaa2_dqrr_size : nb_ops);
1647 	qbman_pull_desc_set_fq(&pulldesc, fqid);
1648 	qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1649 				    (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage),
1650 				    1);
1651 
1652 	/*Issue a volatile dequeue command. */
1653 	while (1) {
1654 		if (qbman_swp_pull(swp, &pulldesc)) {
1655 			DPAA2_SEC_WARN(
1656 				"SEC VDQ command is not issued : QBMAN busy");
1657 			/* Portal was busy, try again */
1658 			continue;
1659 		}
1660 		break;
1661 	};
1662 
1663 	/* Receive the packets till Last Dequeue entry is found with
1664 	 * respect to the above issues PULL command.
1665 	 */
1666 	while (!is_last) {
1667 		/* Check if the previous issued command is completed.
1668 		 * Also seems like the SWP is shared between the Ethernet Driver
1669 		 * and the SEC driver.
1670 		 */
1671 		while (!qbman_check_command_complete(dq_storage))
1672 			;
1673 
1674 		/* Loop until the dq_storage is updated with
1675 		 * new token by QBMAN
1676 		 */
1677 		while (!qbman_check_new_result(dq_storage))
1678 			;
1679 		/* Check whether Last Pull command is Expired and
1680 		 * setting Condition for Loop termination
1681 		 */
1682 		if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1683 			is_last = 1;
1684 			/* Check for valid frame. */
1685 			status = (uint8_t)qbman_result_DQ_flags(dq_storage);
1686 			if (unlikely(
1687 				(status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
1688 				DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
1689 				continue;
1690 			}
1691 		}
1692 
1693 		fd = qbman_result_DQ_fd(dq_storage);
1694 		ops[num_rx] = sec_fd_to_mbuf(fd);
1695 
1696 		if (unlikely(fd->simple.frc)) {
1697 			/* TODO Parse SEC errors */
1698 			DPAA2_SEC_ERR("SEC returned Error - %x",
1699 				      fd->simple.frc);
1700 			ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
1701 		} else {
1702 			ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1703 		}
1704 
1705 		num_rx++;
1706 		dq_storage++;
1707 	} /* End of Packet Rx loop */
1708 
1709 	dpaa2_qp->rx_vq.rx_pkts += num_rx;
1710 
1711 	DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1712 	/*Return the total number of packets received to DPAA2 app*/
1713 	return num_rx;
1714 }
1715 
1716 /** Release queue pair */
1717 static int
1718 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
1719 {
1720 	struct dpaa2_sec_qp *qp =
1721 		(struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id];
1722 
1723 	PMD_INIT_FUNC_TRACE();
1724 
1725 	if (qp->rx_vq.q_storage) {
1726 		dpaa2_free_dq_storage(qp->rx_vq.q_storage);
1727 		rte_free(qp->rx_vq.q_storage);
1728 	}
1729 	rte_free(qp);
1730 
1731 	dev->data->queue_pairs[queue_pair_id] = NULL;
1732 
1733 	return 0;
1734 }
1735 
1736 /** Setup a queue pair */
1737 static int
1738 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1739 		__rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1740 		__rte_unused int socket_id)
1741 {
1742 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1743 	struct dpaa2_sec_qp *qp;
1744 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1745 	struct dpseci_rx_queue_cfg cfg;
1746 	int32_t retcode;
1747 
1748 	PMD_INIT_FUNC_TRACE();
1749 
1750 	/* If qp is already in use free ring memory and qp metadata. */
1751 	if (dev->data->queue_pairs[qp_id] != NULL) {
1752 		DPAA2_SEC_INFO("QP already setup");
1753 		return 0;
1754 	}
1755 
1756 	DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p",
1757 		    dev, qp_id, qp_conf);
1758 
1759 	memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
1760 
1761 	qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp),
1762 			RTE_CACHE_LINE_SIZE);
1763 	if (!qp) {
1764 		DPAA2_SEC_ERR("malloc failed for rx/tx queues");
1765 		return -1;
1766 	}
1767 
1768 	qp->rx_vq.crypto_data = dev->data;
1769 	qp->tx_vq.crypto_data = dev->data;
1770 	qp->rx_vq.q_storage = rte_malloc("sec dq storage",
1771 		sizeof(struct queue_storage_info_t),
1772 		RTE_CACHE_LINE_SIZE);
1773 	if (!qp->rx_vq.q_storage) {
1774 		DPAA2_SEC_ERR("malloc failed for q_storage");
1775 		return -1;
1776 	}
1777 	memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
1778 
1779 	if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
1780 		DPAA2_SEC_ERR("Unable to allocate dequeue storage");
1781 		return -1;
1782 	}
1783 
1784 	dev->data->queue_pairs[qp_id] = qp;
1785 
1786 	cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
1787 	cfg.user_ctx = (size_t)(&qp->rx_vq);
1788 	retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
1789 				      qp_id, &cfg);
1790 	return retcode;
1791 }
1792 
1793 /** Return the number of allocated queue pairs */
1794 static uint32_t
1795 dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev)
1796 {
1797 	PMD_INIT_FUNC_TRACE();
1798 
1799 	return dev->data->nb_queue_pairs;
1800 }
1801 
1802 /** Returns the size of the aesni gcm session structure */
1803 static unsigned int
1804 dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1805 {
1806 	PMD_INIT_FUNC_TRACE();
1807 
1808 	return sizeof(dpaa2_sec_session);
1809 }
1810 
1811 static int
1812 dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
1813 		      struct rte_crypto_sym_xform *xform,
1814 		      dpaa2_sec_session *session)
1815 {
1816 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1817 	struct alginfo cipherdata;
1818 	int bufsize;
1819 	struct ctxt_priv *priv;
1820 	struct sec_flow_context *flc;
1821 
1822 	PMD_INIT_FUNC_TRACE();
1823 
1824 	/* For SEC CIPHER only one descriptor is required. */
1825 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1826 			sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1827 			RTE_CACHE_LINE_SIZE);
1828 	if (priv == NULL) {
1829 		DPAA2_SEC_ERR("No Memory for priv CTXT");
1830 		return -1;
1831 	}
1832 
1833 	priv->fle_pool = dev_priv->fle_pool;
1834 
1835 	flc = &priv->flc_desc[0].flc;
1836 
1837 	session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1838 			RTE_CACHE_LINE_SIZE);
1839 	if (session->cipher_key.data == NULL) {
1840 		DPAA2_SEC_ERR("No Memory for cipher key");
1841 		rte_free(priv);
1842 		return -1;
1843 	}
1844 	session->cipher_key.length = xform->cipher.key.length;
1845 
1846 	memcpy(session->cipher_key.data, xform->cipher.key.data,
1847 	       xform->cipher.key.length);
1848 	cipherdata.key = (size_t)session->cipher_key.data;
1849 	cipherdata.keylen = session->cipher_key.length;
1850 	cipherdata.key_enc_flags = 0;
1851 	cipherdata.key_type = RTA_DATA_IMM;
1852 
1853 	/* Set IV parameters */
1854 	session->iv.offset = xform->cipher.iv.offset;
1855 	session->iv.length = xform->cipher.iv.length;
1856 	session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1857 				DIR_ENC : DIR_DEC;
1858 
1859 	switch (xform->cipher.algo) {
1860 	case RTE_CRYPTO_CIPHER_AES_CBC:
1861 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
1862 		cipherdata.algmode = OP_ALG_AAI_CBC;
1863 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1864 		bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1865 						SHR_NEVER, &cipherdata, NULL,
1866 						session->iv.length,
1867 						session->dir);
1868 		break;
1869 	case RTE_CRYPTO_CIPHER_3DES_CBC:
1870 		cipherdata.algtype = OP_ALG_ALGSEL_3DES;
1871 		cipherdata.algmode = OP_ALG_AAI_CBC;
1872 		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1873 		bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1874 						SHR_NEVER, &cipherdata, NULL,
1875 						session->iv.length,
1876 						session->dir);
1877 		break;
1878 	case RTE_CRYPTO_CIPHER_AES_CTR:
1879 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
1880 		cipherdata.algmode = OP_ALG_AAI_CTR;
1881 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
1882 		bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1883 						SHR_NEVER, &cipherdata, NULL,
1884 						session->iv.length,
1885 						session->dir);
1886 		break;
1887 	case RTE_CRYPTO_CIPHER_3DES_CTR:
1888 		cipherdata.algtype = OP_ALG_ALGSEL_3DES;
1889 		cipherdata.algmode = OP_ALG_AAI_CTR;
1890 		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CTR;
1891 		bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1892 						SHR_NEVER, &cipherdata, NULL,
1893 						session->iv.length,
1894 						session->dir);
1895 		break;
1896 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1897 		cipherdata.algtype = OP_ALG_ALGSEL_SNOW_F8;
1898 		session->cipher_alg = RTE_CRYPTO_CIPHER_SNOW3G_UEA2;
1899 		bufsize = cnstr_shdsc_snow_f8(priv->flc_desc[0].desc, 1, 0,
1900 					      &cipherdata,
1901 					      session->dir);
1902 		break;
1903 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
1904 		cipherdata.algtype = OP_ALG_ALGSEL_ZUCE;
1905 		session->cipher_alg = RTE_CRYPTO_CIPHER_ZUC_EEA3;
1906 		bufsize = cnstr_shdsc_zuce(priv->flc_desc[0].desc, 1, 0,
1907 					      &cipherdata,
1908 					      session->dir);
1909 		break;
1910 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
1911 	case RTE_CRYPTO_CIPHER_AES_F8:
1912 	case RTE_CRYPTO_CIPHER_AES_ECB:
1913 	case RTE_CRYPTO_CIPHER_3DES_ECB:
1914 	case RTE_CRYPTO_CIPHER_AES_XTS:
1915 	case RTE_CRYPTO_CIPHER_ARC4:
1916 	case RTE_CRYPTO_CIPHER_NULL:
1917 		DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
1918 			xform->cipher.algo);
1919 		goto error_out;
1920 	default:
1921 		DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
1922 			xform->cipher.algo);
1923 		goto error_out;
1924 	}
1925 
1926 	if (bufsize < 0) {
1927 		DPAA2_SEC_ERR("Crypto: Descriptor build failed");
1928 		goto error_out;
1929 	}
1930 
1931 	flc->word1_sdl = (uint8_t)bufsize;
1932 	session->ctxt = priv;
1933 
1934 #ifdef CAAM_DESC_DEBUG
1935 	int i;
1936 	for (i = 0; i < bufsize; i++)
1937 		DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]);
1938 #endif
1939 	return 0;
1940 
1941 error_out:
1942 	rte_free(session->cipher_key.data);
1943 	rte_free(priv);
1944 	return -1;
1945 }
1946 
1947 static int
1948 dpaa2_sec_auth_init(struct rte_cryptodev *dev,
1949 		    struct rte_crypto_sym_xform *xform,
1950 		    dpaa2_sec_session *session)
1951 {
1952 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1953 	struct alginfo authdata;
1954 	int bufsize;
1955 	struct ctxt_priv *priv;
1956 	struct sec_flow_context *flc;
1957 
1958 	PMD_INIT_FUNC_TRACE();
1959 
1960 	/* For SEC AUTH three descriptors are required for various stages */
1961 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1962 			sizeof(struct ctxt_priv) + 3 *
1963 			sizeof(struct sec_flc_desc),
1964 			RTE_CACHE_LINE_SIZE);
1965 	if (priv == NULL) {
1966 		DPAA2_SEC_ERR("No Memory for priv CTXT");
1967 		return -1;
1968 	}
1969 
1970 	priv->fle_pool = dev_priv->fle_pool;
1971 	flc = &priv->flc_desc[DESC_INITFINAL].flc;
1972 
1973 	session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1974 			RTE_CACHE_LINE_SIZE);
1975 	if (session->auth_key.data == NULL) {
1976 		DPAA2_SEC_ERR("Unable to allocate memory for auth key");
1977 		rte_free(priv);
1978 		return -1;
1979 	}
1980 	session->auth_key.length = xform->auth.key.length;
1981 
1982 	memcpy(session->auth_key.data, xform->auth.key.data,
1983 	       xform->auth.key.length);
1984 	authdata.key = (size_t)session->auth_key.data;
1985 	authdata.keylen = session->auth_key.length;
1986 	authdata.key_enc_flags = 0;
1987 	authdata.key_type = RTA_DATA_IMM;
1988 
1989 	session->digest_length = xform->auth.digest_length;
1990 	session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1991 				DIR_ENC : DIR_DEC;
1992 
1993 	switch (xform->auth.algo) {
1994 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
1995 		authdata.algtype = OP_ALG_ALGSEL_SHA1;
1996 		authdata.algmode = OP_ALG_AAI_HMAC;
1997 		session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1998 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
1999 					   1, 0, SHR_NEVER, &authdata,
2000 					   !session->dir,
2001 					   session->digest_length);
2002 		break;
2003 	case RTE_CRYPTO_AUTH_MD5_HMAC:
2004 		authdata.algtype = OP_ALG_ALGSEL_MD5;
2005 		authdata.algmode = OP_ALG_AAI_HMAC;
2006 		session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
2007 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2008 					   1, 0, SHR_NEVER, &authdata,
2009 					   !session->dir,
2010 					   session->digest_length);
2011 		break;
2012 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2013 		authdata.algtype = OP_ALG_ALGSEL_SHA256;
2014 		authdata.algmode = OP_ALG_AAI_HMAC;
2015 		session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2016 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2017 					   1, 0, SHR_NEVER, &authdata,
2018 					   !session->dir,
2019 					   session->digest_length);
2020 		break;
2021 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2022 		authdata.algtype = OP_ALG_ALGSEL_SHA384;
2023 		authdata.algmode = OP_ALG_AAI_HMAC;
2024 		session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2025 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2026 					   1, 0, SHR_NEVER, &authdata,
2027 					   !session->dir,
2028 					   session->digest_length);
2029 		break;
2030 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2031 		authdata.algtype = OP_ALG_ALGSEL_SHA512;
2032 		authdata.algmode = OP_ALG_AAI_HMAC;
2033 		session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2034 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2035 					   1, 0, SHR_NEVER, &authdata,
2036 					   !session->dir,
2037 					   session->digest_length);
2038 		break;
2039 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2040 		authdata.algtype = OP_ALG_ALGSEL_SHA224;
2041 		authdata.algmode = OP_ALG_AAI_HMAC;
2042 		session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
2043 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2044 					   1, 0, SHR_NEVER, &authdata,
2045 					   !session->dir,
2046 					   session->digest_length);
2047 		break;
2048 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2049 		authdata.algtype = OP_ALG_ALGSEL_SNOW_F9;
2050 		authdata.algmode = OP_ALG_AAI_F9;
2051 		session->auth_alg = RTE_CRYPTO_AUTH_SNOW3G_UIA2;
2052 		session->iv.offset = xform->auth.iv.offset;
2053 		session->iv.length = xform->auth.iv.length;
2054 		bufsize = cnstr_shdsc_snow_f9(priv->flc_desc[DESC_INITFINAL].desc,
2055 					      1, 0, &authdata,
2056 					      !session->dir,
2057 					      session->digest_length);
2058 		break;
2059 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
2060 		authdata.algtype = OP_ALG_ALGSEL_ZUCA;
2061 		authdata.algmode = OP_ALG_AAI_F9;
2062 		session->auth_alg = RTE_CRYPTO_AUTH_ZUC_EIA3;
2063 		session->iv.offset = xform->auth.iv.offset;
2064 		session->iv.length = xform->auth.iv.length;
2065 		bufsize = cnstr_shdsc_zuca(priv->flc_desc[DESC_INITFINAL].desc,
2066 					   1, 0, &authdata,
2067 					   !session->dir,
2068 					   session->digest_length);
2069 		break;
2070 	case RTE_CRYPTO_AUTH_KASUMI_F9:
2071 	case RTE_CRYPTO_AUTH_NULL:
2072 	case RTE_CRYPTO_AUTH_SHA1:
2073 	case RTE_CRYPTO_AUTH_SHA256:
2074 	case RTE_CRYPTO_AUTH_SHA512:
2075 	case RTE_CRYPTO_AUTH_SHA224:
2076 	case RTE_CRYPTO_AUTH_SHA384:
2077 	case RTE_CRYPTO_AUTH_MD5:
2078 	case RTE_CRYPTO_AUTH_AES_GMAC:
2079 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2080 	case RTE_CRYPTO_AUTH_AES_CMAC:
2081 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2082 		DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un",
2083 			      xform->auth.algo);
2084 		goto error_out;
2085 	default:
2086 		DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2087 			      xform->auth.algo);
2088 		goto error_out;
2089 	}
2090 
2091 	if (bufsize < 0) {
2092 		DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2093 		goto error_out;
2094 	}
2095 
2096 	flc->word1_sdl = (uint8_t)bufsize;
2097 	session->ctxt = priv;
2098 #ifdef CAAM_DESC_DEBUG
2099 	int i;
2100 	for (i = 0; i < bufsize; i++)
2101 		DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2102 				i, priv->flc_desc[DESC_INITFINAL].desc[i]);
2103 #endif
2104 
2105 	return 0;
2106 
2107 error_out:
2108 	rte_free(session->auth_key.data);
2109 	rte_free(priv);
2110 	return -1;
2111 }
2112 
2113 static int
2114 dpaa2_sec_aead_init(struct rte_cryptodev *dev,
2115 		    struct rte_crypto_sym_xform *xform,
2116 		    dpaa2_sec_session *session)
2117 {
2118 	struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
2119 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2120 	struct alginfo aeaddata;
2121 	int bufsize;
2122 	struct ctxt_priv *priv;
2123 	struct sec_flow_context *flc;
2124 	struct rte_crypto_aead_xform *aead_xform = &xform->aead;
2125 	int err;
2126 
2127 	PMD_INIT_FUNC_TRACE();
2128 
2129 	/* Set IV parameters */
2130 	session->iv.offset = aead_xform->iv.offset;
2131 	session->iv.length = aead_xform->iv.length;
2132 	session->ctxt_type = DPAA2_SEC_AEAD;
2133 
2134 	/* For SEC AEAD only one descriptor is required */
2135 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2136 			sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
2137 			RTE_CACHE_LINE_SIZE);
2138 	if (priv == NULL) {
2139 		DPAA2_SEC_ERR("No Memory for priv CTXT");
2140 		return -1;
2141 	}
2142 
2143 	priv->fle_pool = dev_priv->fle_pool;
2144 	flc = &priv->flc_desc[0].flc;
2145 
2146 	session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2147 					       RTE_CACHE_LINE_SIZE);
2148 	if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2149 		DPAA2_SEC_ERR("No Memory for aead key");
2150 		rte_free(priv);
2151 		return -1;
2152 	}
2153 	memcpy(session->aead_key.data, aead_xform->key.data,
2154 	       aead_xform->key.length);
2155 
2156 	session->digest_length = aead_xform->digest_length;
2157 	session->aead_key.length = aead_xform->key.length;
2158 	ctxt->auth_only_len = aead_xform->aad_length;
2159 
2160 	aeaddata.key = (size_t)session->aead_key.data;
2161 	aeaddata.keylen = session->aead_key.length;
2162 	aeaddata.key_enc_flags = 0;
2163 	aeaddata.key_type = RTA_DATA_IMM;
2164 
2165 	switch (aead_xform->algo) {
2166 	case RTE_CRYPTO_AEAD_AES_GCM:
2167 		aeaddata.algtype = OP_ALG_ALGSEL_AES;
2168 		aeaddata.algmode = OP_ALG_AAI_GCM;
2169 		session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2170 		break;
2171 	case RTE_CRYPTO_AEAD_AES_CCM:
2172 		DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u",
2173 			      aead_xform->algo);
2174 		goto error_out;
2175 	default:
2176 		DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
2177 			      aead_xform->algo);
2178 		goto error_out;
2179 	}
2180 	session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2181 				DIR_ENC : DIR_DEC;
2182 
2183 	priv->flc_desc[0].desc[0] = aeaddata.keylen;
2184 	err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2185 			       MIN_JOB_DESC_SIZE,
2186 			       (unsigned int *)priv->flc_desc[0].desc,
2187 			       &priv->flc_desc[0].desc[1], 1);
2188 
2189 	if (err < 0) {
2190 		DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2191 		goto error_out;
2192 	}
2193 	if (priv->flc_desc[0].desc[1] & 1) {
2194 		aeaddata.key_type = RTA_DATA_IMM;
2195 	} else {
2196 		aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key);
2197 		aeaddata.key_type = RTA_DATA_PTR;
2198 	}
2199 	priv->flc_desc[0].desc[0] = 0;
2200 	priv->flc_desc[0].desc[1] = 0;
2201 
2202 	if (session->dir == DIR_ENC)
2203 		bufsize = cnstr_shdsc_gcm_encap(
2204 				priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
2205 				&aeaddata, session->iv.length,
2206 				session->digest_length);
2207 	else
2208 		bufsize = cnstr_shdsc_gcm_decap(
2209 				priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
2210 				&aeaddata, session->iv.length,
2211 				session->digest_length);
2212 	if (bufsize < 0) {
2213 		DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2214 		goto error_out;
2215 	}
2216 
2217 	flc->word1_sdl = (uint8_t)bufsize;
2218 	session->ctxt = priv;
2219 #ifdef CAAM_DESC_DEBUG
2220 	int i;
2221 	for (i = 0; i < bufsize; i++)
2222 		DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n",
2223 			    i, priv->flc_desc[0].desc[i]);
2224 #endif
2225 	return 0;
2226 
2227 error_out:
2228 	rte_free(session->aead_key.data);
2229 	rte_free(priv);
2230 	return -1;
2231 }
2232 
2233 
2234 static int
2235 dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
2236 		    struct rte_crypto_sym_xform *xform,
2237 		    dpaa2_sec_session *session)
2238 {
2239 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2240 	struct alginfo authdata, cipherdata;
2241 	int bufsize;
2242 	struct ctxt_priv *priv;
2243 	struct sec_flow_context *flc;
2244 	struct rte_crypto_cipher_xform *cipher_xform;
2245 	struct rte_crypto_auth_xform *auth_xform;
2246 	int err;
2247 
2248 	PMD_INIT_FUNC_TRACE();
2249 
2250 	if (session->ext_params.aead_ctxt.auth_cipher_text) {
2251 		cipher_xform = &xform->cipher;
2252 		auth_xform = &xform->next->auth;
2253 		session->ctxt_type =
2254 			(cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2255 			DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER;
2256 	} else {
2257 		cipher_xform = &xform->next->cipher;
2258 		auth_xform = &xform->auth;
2259 		session->ctxt_type =
2260 			(cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2261 			DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH;
2262 	}
2263 
2264 	/* Set IV parameters */
2265 	session->iv.offset = cipher_xform->iv.offset;
2266 	session->iv.length = cipher_xform->iv.length;
2267 
2268 	/* For SEC AEAD only one descriptor is required */
2269 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2270 			sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
2271 			RTE_CACHE_LINE_SIZE);
2272 	if (priv == NULL) {
2273 		DPAA2_SEC_ERR("No Memory for priv CTXT");
2274 		return -1;
2275 	}
2276 
2277 	priv->fle_pool = dev_priv->fle_pool;
2278 	flc = &priv->flc_desc[0].flc;
2279 
2280 	session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2281 					       RTE_CACHE_LINE_SIZE);
2282 	if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2283 		DPAA2_SEC_ERR("No Memory for cipher key");
2284 		rte_free(priv);
2285 		return -1;
2286 	}
2287 	session->cipher_key.length = cipher_xform->key.length;
2288 	session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2289 					     RTE_CACHE_LINE_SIZE);
2290 	if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2291 		DPAA2_SEC_ERR("No Memory for auth key");
2292 		rte_free(session->cipher_key.data);
2293 		rte_free(priv);
2294 		return -1;
2295 	}
2296 	session->auth_key.length = auth_xform->key.length;
2297 	memcpy(session->cipher_key.data, cipher_xform->key.data,
2298 	       cipher_xform->key.length);
2299 	memcpy(session->auth_key.data, auth_xform->key.data,
2300 	       auth_xform->key.length);
2301 
2302 	authdata.key = (size_t)session->auth_key.data;
2303 	authdata.keylen = session->auth_key.length;
2304 	authdata.key_enc_flags = 0;
2305 	authdata.key_type = RTA_DATA_IMM;
2306 
2307 	session->digest_length = auth_xform->digest_length;
2308 
2309 	switch (auth_xform->algo) {
2310 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
2311 		authdata.algtype = OP_ALG_ALGSEL_SHA1;
2312 		authdata.algmode = OP_ALG_AAI_HMAC;
2313 		session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
2314 		break;
2315 	case RTE_CRYPTO_AUTH_MD5_HMAC:
2316 		authdata.algtype = OP_ALG_ALGSEL_MD5;
2317 		authdata.algmode = OP_ALG_AAI_HMAC;
2318 		session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
2319 		break;
2320 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2321 		authdata.algtype = OP_ALG_ALGSEL_SHA224;
2322 		authdata.algmode = OP_ALG_AAI_HMAC;
2323 		session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
2324 		break;
2325 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2326 		authdata.algtype = OP_ALG_ALGSEL_SHA256;
2327 		authdata.algmode = OP_ALG_AAI_HMAC;
2328 		session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2329 		break;
2330 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2331 		authdata.algtype = OP_ALG_ALGSEL_SHA384;
2332 		authdata.algmode = OP_ALG_AAI_HMAC;
2333 		session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2334 		break;
2335 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2336 		authdata.algtype = OP_ALG_ALGSEL_SHA512;
2337 		authdata.algmode = OP_ALG_AAI_HMAC;
2338 		session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2339 		break;
2340 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2341 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2342 	case RTE_CRYPTO_AUTH_NULL:
2343 	case RTE_CRYPTO_AUTH_SHA1:
2344 	case RTE_CRYPTO_AUTH_SHA256:
2345 	case RTE_CRYPTO_AUTH_SHA512:
2346 	case RTE_CRYPTO_AUTH_SHA224:
2347 	case RTE_CRYPTO_AUTH_SHA384:
2348 	case RTE_CRYPTO_AUTH_MD5:
2349 	case RTE_CRYPTO_AUTH_AES_GMAC:
2350 	case RTE_CRYPTO_AUTH_KASUMI_F9:
2351 	case RTE_CRYPTO_AUTH_AES_CMAC:
2352 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2353 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
2354 		DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2355 			      auth_xform->algo);
2356 		goto error_out;
2357 	default:
2358 		DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2359 			      auth_xform->algo);
2360 		goto error_out;
2361 	}
2362 	cipherdata.key = (size_t)session->cipher_key.data;
2363 	cipherdata.keylen = session->cipher_key.length;
2364 	cipherdata.key_enc_flags = 0;
2365 	cipherdata.key_type = RTA_DATA_IMM;
2366 
2367 	switch (cipher_xform->algo) {
2368 	case RTE_CRYPTO_CIPHER_AES_CBC:
2369 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
2370 		cipherdata.algmode = OP_ALG_AAI_CBC;
2371 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2372 		break;
2373 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2374 		cipherdata.algtype = OP_ALG_ALGSEL_3DES;
2375 		cipherdata.algmode = OP_ALG_AAI_CBC;
2376 		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2377 		break;
2378 	case RTE_CRYPTO_CIPHER_AES_CTR:
2379 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
2380 		cipherdata.algmode = OP_ALG_AAI_CTR;
2381 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2382 		break;
2383 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2384 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2385 	case RTE_CRYPTO_CIPHER_NULL:
2386 	case RTE_CRYPTO_CIPHER_3DES_ECB:
2387 	case RTE_CRYPTO_CIPHER_AES_ECB:
2388 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
2389 		DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2390 			      cipher_xform->algo);
2391 		goto error_out;
2392 	default:
2393 		DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2394 			      cipher_xform->algo);
2395 		goto error_out;
2396 	}
2397 	session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2398 				DIR_ENC : DIR_DEC;
2399 
2400 	priv->flc_desc[0].desc[0] = cipherdata.keylen;
2401 	priv->flc_desc[0].desc[1] = authdata.keylen;
2402 	err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2403 			       MIN_JOB_DESC_SIZE,
2404 			       (unsigned int *)priv->flc_desc[0].desc,
2405 			       &priv->flc_desc[0].desc[2], 2);
2406 
2407 	if (err < 0) {
2408 		DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2409 		goto error_out;
2410 	}
2411 	if (priv->flc_desc[0].desc[2] & 1) {
2412 		cipherdata.key_type = RTA_DATA_IMM;
2413 	} else {
2414 		cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
2415 		cipherdata.key_type = RTA_DATA_PTR;
2416 	}
2417 	if (priv->flc_desc[0].desc[2] & (1 << 1)) {
2418 		authdata.key_type = RTA_DATA_IMM;
2419 	} else {
2420 		authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
2421 		authdata.key_type = RTA_DATA_PTR;
2422 	}
2423 	priv->flc_desc[0].desc[0] = 0;
2424 	priv->flc_desc[0].desc[1] = 0;
2425 	priv->flc_desc[0].desc[2] = 0;
2426 
2427 	if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
2428 		bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
2429 					      0, SHR_SERIAL,
2430 					      &cipherdata, &authdata,
2431 					      session->iv.length,
2432 					      session->digest_length,
2433 					      session->dir);
2434 		if (bufsize < 0) {
2435 			DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2436 			goto error_out;
2437 		}
2438 	} else {
2439 		DPAA2_SEC_ERR("Hash before cipher not supported");
2440 		goto error_out;
2441 	}
2442 
2443 	flc->word1_sdl = (uint8_t)bufsize;
2444 	session->ctxt = priv;
2445 #ifdef CAAM_DESC_DEBUG
2446 	int i;
2447 	for (i = 0; i < bufsize; i++)
2448 		DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2449 			    i, priv->flc_desc[0].desc[i]);
2450 #endif
2451 
2452 	return 0;
2453 
2454 error_out:
2455 	rte_free(session->cipher_key.data);
2456 	rte_free(session->auth_key.data);
2457 	rte_free(priv);
2458 	return -1;
2459 }
2460 
2461 static int
2462 dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
2463 			    struct rte_crypto_sym_xform *xform,	void *sess)
2464 {
2465 	dpaa2_sec_session *session = sess;
2466 	int ret;
2467 
2468 	PMD_INIT_FUNC_TRACE();
2469 
2470 	if (unlikely(sess == NULL)) {
2471 		DPAA2_SEC_ERR("Invalid session struct");
2472 		return -1;
2473 	}
2474 
2475 	memset(session, 0, sizeof(dpaa2_sec_session));
2476 	/* Default IV length = 0 */
2477 	session->iv.length = 0;
2478 
2479 	/* Cipher Only */
2480 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2481 		session->ctxt_type = DPAA2_SEC_CIPHER;
2482 		ret = dpaa2_sec_cipher_init(dev, xform, session);
2483 
2484 	/* Authentication Only */
2485 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2486 		   xform->next == NULL) {
2487 		session->ctxt_type = DPAA2_SEC_AUTH;
2488 		ret = dpaa2_sec_auth_init(dev, xform, session);
2489 
2490 	/* Cipher then Authenticate */
2491 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2492 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2493 		session->ext_params.aead_ctxt.auth_cipher_text = true;
2494 		ret = dpaa2_sec_aead_chain_init(dev, xform, session);
2495 
2496 	/* Authenticate then Cipher */
2497 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2498 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2499 		session->ext_params.aead_ctxt.auth_cipher_text = false;
2500 		ret = dpaa2_sec_aead_chain_init(dev, xform, session);
2501 
2502 	/* AEAD operation for AES-GCM kind of Algorithms */
2503 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2504 		   xform->next == NULL) {
2505 		ret = dpaa2_sec_aead_init(dev, xform, session);
2506 
2507 	} else {
2508 		DPAA2_SEC_ERR("Invalid crypto type");
2509 		return -EINVAL;
2510 	}
2511 
2512 	return ret;
2513 }
2514 
2515 #ifdef RTE_LIBRTE_SECURITY
2516 static int
2517 dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2518 			dpaa2_sec_session *session,
2519 			struct alginfo *aeaddata)
2520 {
2521 	PMD_INIT_FUNC_TRACE();
2522 
2523 	session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2524 					       RTE_CACHE_LINE_SIZE);
2525 	if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2526 		DPAA2_SEC_ERR("No Memory for aead key");
2527 		return -1;
2528 	}
2529 	memcpy(session->aead_key.data, aead_xform->key.data,
2530 	       aead_xform->key.length);
2531 
2532 	session->digest_length = aead_xform->digest_length;
2533 	session->aead_key.length = aead_xform->key.length;
2534 
2535 	aeaddata->key = (size_t)session->aead_key.data;
2536 	aeaddata->keylen = session->aead_key.length;
2537 	aeaddata->key_enc_flags = 0;
2538 	aeaddata->key_type = RTA_DATA_IMM;
2539 
2540 	switch (aead_xform->algo) {
2541 	case RTE_CRYPTO_AEAD_AES_GCM:
2542 		aeaddata->algtype = OP_ALG_ALGSEL_AES;
2543 		aeaddata->algmode = OP_ALG_AAI_GCM;
2544 		session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2545 		break;
2546 	case RTE_CRYPTO_AEAD_AES_CCM:
2547 		aeaddata->algtype = OP_ALG_ALGSEL_AES;
2548 		aeaddata->algmode = OP_ALG_AAI_CCM;
2549 		session->aead_alg = RTE_CRYPTO_AEAD_AES_CCM;
2550 		break;
2551 	default:
2552 		DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
2553 			      aead_xform->algo);
2554 		return -1;
2555 	}
2556 	session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2557 				DIR_ENC : DIR_DEC;
2558 
2559 	return 0;
2560 }
2561 
2562 static int
2563 dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2564 	struct rte_crypto_auth_xform *auth_xform,
2565 	dpaa2_sec_session *session,
2566 	struct alginfo *cipherdata,
2567 	struct alginfo *authdata)
2568 {
2569 	if (cipher_xform) {
2570 		session->cipher_key.data = rte_zmalloc(NULL,
2571 						       cipher_xform->key.length,
2572 						       RTE_CACHE_LINE_SIZE);
2573 		if (session->cipher_key.data == NULL &&
2574 				cipher_xform->key.length > 0) {
2575 			DPAA2_SEC_ERR("No Memory for cipher key");
2576 			return -ENOMEM;
2577 		}
2578 
2579 		session->cipher_key.length = cipher_xform->key.length;
2580 		memcpy(session->cipher_key.data, cipher_xform->key.data,
2581 				cipher_xform->key.length);
2582 		session->cipher_alg = cipher_xform->algo;
2583 	} else {
2584 		session->cipher_key.data = NULL;
2585 		session->cipher_key.length = 0;
2586 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2587 	}
2588 
2589 	if (auth_xform) {
2590 		session->auth_key.data = rte_zmalloc(NULL,
2591 						auth_xform->key.length,
2592 						RTE_CACHE_LINE_SIZE);
2593 		if (session->auth_key.data == NULL &&
2594 				auth_xform->key.length > 0) {
2595 			DPAA2_SEC_ERR("No Memory for auth key");
2596 			return -ENOMEM;
2597 		}
2598 		session->auth_key.length = auth_xform->key.length;
2599 		memcpy(session->auth_key.data, auth_xform->key.data,
2600 				auth_xform->key.length);
2601 		session->auth_alg = auth_xform->algo;
2602 	} else {
2603 		session->auth_key.data = NULL;
2604 		session->auth_key.length = 0;
2605 		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2606 	}
2607 
2608 	authdata->key = (size_t)session->auth_key.data;
2609 	authdata->keylen = session->auth_key.length;
2610 	authdata->key_enc_flags = 0;
2611 	authdata->key_type = RTA_DATA_IMM;
2612 	switch (session->auth_alg) {
2613 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
2614 		authdata->algtype = OP_PCL_IPSEC_HMAC_SHA1_96;
2615 		authdata->algmode = OP_ALG_AAI_HMAC;
2616 		break;
2617 	case RTE_CRYPTO_AUTH_MD5_HMAC:
2618 		authdata->algtype = OP_PCL_IPSEC_HMAC_MD5_96;
2619 		authdata->algmode = OP_ALG_AAI_HMAC;
2620 		break;
2621 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2622 		authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2623 		authdata->algmode = OP_ALG_AAI_HMAC;
2624 		break;
2625 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2626 		authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2627 		authdata->algmode = OP_ALG_AAI_HMAC;
2628 		break;
2629 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2630 		authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2631 		authdata->algmode = OP_ALG_AAI_HMAC;
2632 		break;
2633 	case RTE_CRYPTO_AUTH_AES_CMAC:
2634 		authdata->algtype = OP_PCL_IPSEC_AES_CMAC_96;
2635 		break;
2636 	case RTE_CRYPTO_AUTH_NULL:
2637 		authdata->algtype = OP_PCL_IPSEC_HMAC_NULL;
2638 		break;
2639 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2640 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2641 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2642 	case RTE_CRYPTO_AUTH_SHA1:
2643 	case RTE_CRYPTO_AUTH_SHA256:
2644 	case RTE_CRYPTO_AUTH_SHA512:
2645 	case RTE_CRYPTO_AUTH_SHA224:
2646 	case RTE_CRYPTO_AUTH_SHA384:
2647 	case RTE_CRYPTO_AUTH_MD5:
2648 	case RTE_CRYPTO_AUTH_AES_GMAC:
2649 	case RTE_CRYPTO_AUTH_KASUMI_F9:
2650 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2651 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
2652 		DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2653 			      session->auth_alg);
2654 		return -1;
2655 	default:
2656 		DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2657 			      session->auth_alg);
2658 		return -1;
2659 	}
2660 	cipherdata->key = (size_t)session->cipher_key.data;
2661 	cipherdata->keylen = session->cipher_key.length;
2662 	cipherdata->key_enc_flags = 0;
2663 	cipherdata->key_type = RTA_DATA_IMM;
2664 
2665 	switch (session->cipher_alg) {
2666 	case RTE_CRYPTO_CIPHER_AES_CBC:
2667 		cipherdata->algtype = OP_PCL_IPSEC_AES_CBC;
2668 		cipherdata->algmode = OP_ALG_AAI_CBC;
2669 		break;
2670 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2671 		cipherdata->algtype = OP_PCL_IPSEC_3DES;
2672 		cipherdata->algmode = OP_ALG_AAI_CBC;
2673 		break;
2674 	case RTE_CRYPTO_CIPHER_AES_CTR:
2675 		cipherdata->algtype = OP_PCL_IPSEC_AES_CTR;
2676 		cipherdata->algmode = OP_ALG_AAI_CTR;
2677 		break;
2678 	case RTE_CRYPTO_CIPHER_NULL:
2679 		cipherdata->algtype = OP_PCL_IPSEC_NULL;
2680 		break;
2681 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2682 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2683 	case RTE_CRYPTO_CIPHER_3DES_ECB:
2684 	case RTE_CRYPTO_CIPHER_AES_ECB:
2685 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
2686 		DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2687 			      session->cipher_alg);
2688 		return -1;
2689 	default:
2690 		DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2691 			      session->cipher_alg);
2692 		return -1;
2693 	}
2694 
2695 	return 0;
2696 }
2697 
2698 #ifdef RTE_LIBRTE_SECURITY_TEST
2699 static uint8_t aes_cbc_iv[] = {
2700 	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
2701 	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f };
2702 #endif
2703 
2704 static int
2705 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
2706 			    struct rte_security_session_conf *conf,
2707 			    void *sess)
2708 {
2709 	struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2710 	struct rte_crypto_cipher_xform *cipher_xform = NULL;
2711 	struct rte_crypto_auth_xform *auth_xform = NULL;
2712 	struct rte_crypto_aead_xform *aead_xform = NULL;
2713 	dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
2714 	struct ctxt_priv *priv;
2715 	struct ipsec_encap_pdb encap_pdb;
2716 	struct ipsec_decap_pdb decap_pdb;
2717 	struct alginfo authdata, cipherdata;
2718 	int bufsize;
2719 	struct sec_flow_context *flc;
2720 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2721 	int ret = -1;
2722 
2723 	PMD_INIT_FUNC_TRACE();
2724 
2725 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2726 				sizeof(struct ctxt_priv) +
2727 				sizeof(struct sec_flc_desc),
2728 				RTE_CACHE_LINE_SIZE);
2729 
2730 	if (priv == NULL) {
2731 		DPAA2_SEC_ERR("No memory for priv CTXT");
2732 		return -ENOMEM;
2733 	}
2734 
2735 	priv->fle_pool = dev_priv->fle_pool;
2736 	flc = &priv->flc_desc[0].flc;
2737 
2738 	memset(session, 0, sizeof(dpaa2_sec_session));
2739 
2740 	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2741 		cipher_xform = &conf->crypto_xform->cipher;
2742 		if (conf->crypto_xform->next)
2743 			auth_xform = &conf->crypto_xform->next->auth;
2744 		ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
2745 					session, &cipherdata, &authdata);
2746 	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2747 		auth_xform = &conf->crypto_xform->auth;
2748 		if (conf->crypto_xform->next)
2749 			cipher_xform = &conf->crypto_xform->next->cipher;
2750 		ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
2751 					session, &cipherdata, &authdata);
2752 	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2753 		aead_xform = &conf->crypto_xform->aead;
2754 		ret = dpaa2_sec_ipsec_aead_init(aead_xform,
2755 					session, &cipherdata);
2756 	} else {
2757 		DPAA2_SEC_ERR("XFORM not specified");
2758 		ret = -EINVAL;
2759 		goto out;
2760 	}
2761 	if (ret) {
2762 		DPAA2_SEC_ERR("Failed to process xform");
2763 		goto out;
2764 	}
2765 
2766 	session->ctxt_type = DPAA2_SEC_IPSEC;
2767 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2768 		uint8_t *hdr = NULL;
2769 		struct ip ip4_hdr;
2770 		struct rte_ipv6_hdr ip6_hdr;
2771 
2772 		flc->dhr = SEC_FLC_DHR_OUTBOUND;
2773 		/* For Sec Proto only one descriptor is required. */
2774 		memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb));
2775 		encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2776 			PDBOPTS_ESP_OIHI_PDB_INL |
2777 			PDBOPTS_ESP_IVSRC |
2778 			PDBHMO_ESP_ENCAP_DTTL |
2779 			PDBHMO_ESP_SNR;
2780 		if (ipsec_xform->options.esn)
2781 			encap_pdb.options |= PDBOPTS_ESP_ESN;
2782 		encap_pdb.spi = ipsec_xform->spi;
2783 		session->dir = DIR_ENC;
2784 		if (ipsec_xform->tunnel.type ==
2785 				RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2786 			encap_pdb.ip_hdr_len = sizeof(struct ip);
2787 			ip4_hdr.ip_v = IPVERSION;
2788 			ip4_hdr.ip_hl = 5;
2789 			ip4_hdr.ip_len = rte_cpu_to_be_16(sizeof(ip4_hdr));
2790 			ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2791 			ip4_hdr.ip_id = 0;
2792 			ip4_hdr.ip_off = 0;
2793 			ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2794 			ip4_hdr.ip_p = IPPROTO_ESP;
2795 			ip4_hdr.ip_sum = 0;
2796 			ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2797 			ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2798 			ip4_hdr.ip_sum = calc_chksum((uint16_t *)(void *)
2799 					&ip4_hdr, sizeof(struct ip));
2800 			hdr = (uint8_t *)&ip4_hdr;
2801 		} else if (ipsec_xform->tunnel.type ==
2802 				RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2803 			ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2804 				DPAA2_IPv6_DEFAULT_VTC_FLOW |
2805 				((ipsec_xform->tunnel.ipv6.dscp <<
2806 					RTE_IPV6_HDR_TC_SHIFT) &
2807 					RTE_IPV6_HDR_TC_MASK) |
2808 				((ipsec_xform->tunnel.ipv6.flabel <<
2809 					RTE_IPV6_HDR_FL_SHIFT) &
2810 					RTE_IPV6_HDR_FL_MASK));
2811 			/* Payload length will be updated by HW */
2812 			ip6_hdr.payload_len = 0;
2813 			ip6_hdr.hop_limits =
2814 					ipsec_xform->tunnel.ipv6.hlimit;
2815 			ip6_hdr.proto = (ipsec_xform->proto ==
2816 					RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2817 					IPPROTO_ESP : IPPROTO_AH;
2818 			memcpy(&ip6_hdr.src_addr,
2819 				&ipsec_xform->tunnel.ipv6.src_addr, 16);
2820 			memcpy(&ip6_hdr.dst_addr,
2821 				&ipsec_xform->tunnel.ipv6.dst_addr, 16);
2822 			encap_pdb.ip_hdr_len = sizeof(struct rte_ipv6_hdr);
2823 			hdr = (uint8_t *)&ip6_hdr;
2824 		}
2825 
2826 		bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc,
2827 				1, 0, SHR_SERIAL, &encap_pdb,
2828 				hdr, &cipherdata, &authdata);
2829 	} else if (ipsec_xform->direction ==
2830 			RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2831 		flc->dhr = SEC_FLC_DHR_INBOUND;
2832 		memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2833 		decap_pdb.options = (ipsec_xform->tunnel.type ==
2834 				RTE_SECURITY_IPSEC_TUNNEL_IPV4) ?
2835 				sizeof(struct ip) << 16 :
2836 				sizeof(struct rte_ipv6_hdr) << 16;
2837 		if (ipsec_xform->options.esn)
2838 			decap_pdb.options |= PDBOPTS_ESP_ESN;
2839 		session->dir = DIR_DEC;
2840 		bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc,
2841 				1, 0, SHR_SERIAL,
2842 				&decap_pdb, &cipherdata, &authdata);
2843 	} else
2844 		goto out;
2845 
2846 	if (bufsize < 0) {
2847 		DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2848 		goto out;
2849 	}
2850 
2851 	flc->word1_sdl = (uint8_t)bufsize;
2852 
2853 	/* Enable the stashing control bit */
2854 	DPAA2_SET_FLC_RSC(flc);
2855 	flc->word2_rflc_31_0 = lower_32_bits(
2856 			(size_t)&(((struct dpaa2_sec_qp *)
2857 			dev->data->queue_pairs[0])->rx_vq) | 0x14);
2858 	flc->word3_rflc_63_32 = upper_32_bits(
2859 			(size_t)&(((struct dpaa2_sec_qp *)
2860 			dev->data->queue_pairs[0])->rx_vq));
2861 
2862 	/* Set EWS bit i.e. enable write-safe */
2863 	DPAA2_SET_FLC_EWS(flc);
2864 	/* Set BS = 1 i.e reuse input buffers as output buffers */
2865 	DPAA2_SET_FLC_REUSE_BS(flc);
2866 	/* Set FF = 10; reuse input buffers if they provide sufficient space */
2867 	DPAA2_SET_FLC_REUSE_FF(flc);
2868 
2869 	session->ctxt = priv;
2870 
2871 	return 0;
2872 out:
2873 	rte_free(session->auth_key.data);
2874 	rte_free(session->cipher_key.data);
2875 	rte_free(priv);
2876 	return ret;
2877 }
2878 
2879 static int
2880 dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
2881 			   struct rte_security_session_conf *conf,
2882 			   void *sess)
2883 {
2884 	struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2885 	struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2886 	struct rte_crypto_auth_xform *auth_xform = NULL;
2887 	struct rte_crypto_cipher_xform *cipher_xform;
2888 	dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
2889 	struct ctxt_priv *priv;
2890 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2891 	struct alginfo authdata, cipherdata;
2892 	struct alginfo *p_authdata = NULL;
2893 	int bufsize = -1;
2894 	struct sec_flow_context *flc;
2895 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
2896 	int swap = true;
2897 #else
2898 	int swap = false;
2899 #endif
2900 
2901 	PMD_INIT_FUNC_TRACE();
2902 
2903 	memset(session, 0, sizeof(dpaa2_sec_session));
2904 
2905 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2906 				sizeof(struct ctxt_priv) +
2907 				sizeof(struct sec_flc_desc),
2908 				RTE_CACHE_LINE_SIZE);
2909 
2910 	if (priv == NULL) {
2911 		DPAA2_SEC_ERR("No memory for priv CTXT");
2912 		return -ENOMEM;
2913 	}
2914 
2915 	priv->fle_pool = dev_priv->fle_pool;
2916 	flc = &priv->flc_desc[0].flc;
2917 
2918 	/* find xfrm types */
2919 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2920 		cipher_xform = &xform->cipher;
2921 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2922 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2923 		session->ext_params.aead_ctxt.auth_cipher_text = true;
2924 		cipher_xform = &xform->cipher;
2925 		auth_xform = &xform->next->auth;
2926 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2927 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2928 		session->ext_params.aead_ctxt.auth_cipher_text = false;
2929 		cipher_xform = &xform->next->cipher;
2930 		auth_xform = &xform->auth;
2931 	} else {
2932 		DPAA2_SEC_ERR("Invalid crypto type");
2933 		return -EINVAL;
2934 	}
2935 
2936 	session->ctxt_type = DPAA2_SEC_PDCP;
2937 	if (cipher_xform) {
2938 		session->cipher_key.data = rte_zmalloc(NULL,
2939 					       cipher_xform->key.length,
2940 					       RTE_CACHE_LINE_SIZE);
2941 		if (session->cipher_key.data == NULL &&
2942 				cipher_xform->key.length > 0) {
2943 			DPAA2_SEC_ERR("No Memory for cipher key");
2944 			rte_free(priv);
2945 			return -ENOMEM;
2946 		}
2947 		session->cipher_key.length = cipher_xform->key.length;
2948 		memcpy(session->cipher_key.data, cipher_xform->key.data,
2949 			cipher_xform->key.length);
2950 		session->dir =
2951 			(cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2952 					DIR_ENC : DIR_DEC;
2953 		session->cipher_alg = cipher_xform->algo;
2954 	} else {
2955 		session->cipher_key.data = NULL;
2956 		session->cipher_key.length = 0;
2957 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2958 		session->dir = DIR_ENC;
2959 	}
2960 
2961 	session->pdcp.domain = pdcp_xform->domain;
2962 	session->pdcp.bearer = pdcp_xform->bearer;
2963 	session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
2964 	session->pdcp.sn_size = pdcp_xform->sn_size;
2965 	session->pdcp.hfn = pdcp_xform->hfn;
2966 	session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
2967 	session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
2968 	/* hfv ovd offset location is stored in iv.offset value*/
2969 	session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
2970 
2971 	cipherdata.key = (size_t)session->cipher_key.data;
2972 	cipherdata.keylen = session->cipher_key.length;
2973 	cipherdata.key_enc_flags = 0;
2974 	cipherdata.key_type = RTA_DATA_IMM;
2975 
2976 	switch (session->cipher_alg) {
2977 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2978 		cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW;
2979 		break;
2980 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2981 		cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC;
2982 		break;
2983 	case RTE_CRYPTO_CIPHER_AES_CTR:
2984 		cipherdata.algtype = PDCP_CIPHER_TYPE_AES;
2985 		break;
2986 	case RTE_CRYPTO_CIPHER_NULL:
2987 		cipherdata.algtype = PDCP_CIPHER_TYPE_NULL;
2988 		break;
2989 	default:
2990 		DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2991 			      session->cipher_alg);
2992 		goto out;
2993 	}
2994 
2995 	if (auth_xform) {
2996 		session->auth_key.data = rte_zmalloc(NULL,
2997 						     auth_xform->key.length,
2998 						     RTE_CACHE_LINE_SIZE);
2999 		if (!session->auth_key.data &&
3000 		    auth_xform->key.length > 0) {
3001 			DPAA2_SEC_ERR("No Memory for auth key");
3002 			rte_free(session->cipher_key.data);
3003 			rte_free(priv);
3004 			return -ENOMEM;
3005 		}
3006 		session->auth_key.length = auth_xform->key.length;
3007 		memcpy(session->auth_key.data, auth_xform->key.data,
3008 		       auth_xform->key.length);
3009 		session->auth_alg = auth_xform->algo;
3010 	} else {
3011 		session->auth_key.data = NULL;
3012 		session->auth_key.length = 0;
3013 		session->auth_alg = 0;
3014 	}
3015 	authdata.key = (size_t)session->auth_key.data;
3016 	authdata.keylen = session->auth_key.length;
3017 	authdata.key_enc_flags = 0;
3018 	authdata.key_type = RTA_DATA_IMM;
3019 
3020 	if (session->auth_alg) {
3021 		switch (session->auth_alg) {
3022 		case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
3023 			authdata.algtype = PDCP_AUTH_TYPE_SNOW;
3024 			break;
3025 		case RTE_CRYPTO_AUTH_ZUC_EIA3:
3026 			authdata.algtype = PDCP_AUTH_TYPE_ZUC;
3027 			break;
3028 		case RTE_CRYPTO_AUTH_AES_CMAC:
3029 			authdata.algtype = PDCP_AUTH_TYPE_AES;
3030 			break;
3031 		case RTE_CRYPTO_AUTH_NULL:
3032 			authdata.algtype = PDCP_AUTH_TYPE_NULL;
3033 			break;
3034 		default:
3035 			DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
3036 				      session->auth_alg);
3037 			goto out;
3038 		}
3039 
3040 		p_authdata = &authdata;
3041 	} else if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3042 		DPAA2_SEC_ERR("Crypto: Integrity must for c-plane");
3043 		goto out;
3044 	}
3045 
3046 	if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3047 		if (session->dir == DIR_ENC)
3048 			bufsize = cnstr_shdsc_pdcp_c_plane_encap(
3049 					priv->flc_desc[0].desc, 1, swap,
3050 					pdcp_xform->hfn,
3051 					session->pdcp.sn_size,
3052 					pdcp_xform->bearer,
3053 					pdcp_xform->pkt_dir,
3054 					pdcp_xform->hfn_threshold,
3055 					&cipherdata, &authdata,
3056 					0);
3057 		else if (session->dir == DIR_DEC)
3058 			bufsize = cnstr_shdsc_pdcp_c_plane_decap(
3059 					priv->flc_desc[0].desc, 1, swap,
3060 					pdcp_xform->hfn,
3061 					session->pdcp.sn_size,
3062 					pdcp_xform->bearer,
3063 					pdcp_xform->pkt_dir,
3064 					pdcp_xform->hfn_threshold,
3065 					&cipherdata, &authdata,
3066 					0);
3067 	} else {
3068 		if (session->dir == DIR_ENC)
3069 			bufsize = cnstr_shdsc_pdcp_u_plane_encap(
3070 					priv->flc_desc[0].desc, 1, swap,
3071 					session->pdcp.sn_size,
3072 					pdcp_xform->hfn,
3073 					pdcp_xform->bearer,
3074 					pdcp_xform->pkt_dir,
3075 					pdcp_xform->hfn_threshold,
3076 					&cipherdata, p_authdata, 0);
3077 		else if (session->dir == DIR_DEC)
3078 			bufsize = cnstr_shdsc_pdcp_u_plane_decap(
3079 					priv->flc_desc[0].desc, 1, swap,
3080 					session->pdcp.sn_size,
3081 					pdcp_xform->hfn,
3082 					pdcp_xform->bearer,
3083 					pdcp_xform->pkt_dir,
3084 					pdcp_xform->hfn_threshold,
3085 					&cipherdata, p_authdata, 0);
3086 	}
3087 
3088 	if (bufsize < 0) {
3089 		DPAA2_SEC_ERR("Crypto: Invalid buffer length");
3090 		goto out;
3091 	}
3092 
3093 	/* Enable the stashing control bit */
3094 	DPAA2_SET_FLC_RSC(flc);
3095 	flc->word2_rflc_31_0 = lower_32_bits(
3096 			(size_t)&(((struct dpaa2_sec_qp *)
3097 			dev->data->queue_pairs[0])->rx_vq) | 0x14);
3098 	flc->word3_rflc_63_32 = upper_32_bits(
3099 			(size_t)&(((struct dpaa2_sec_qp *)
3100 			dev->data->queue_pairs[0])->rx_vq));
3101 
3102 	flc->word1_sdl = (uint8_t)bufsize;
3103 
3104 	/* TODO - check the perf impact or
3105 	 * align as per descriptor type
3106 	 * Set EWS bit i.e. enable write-safe
3107 	 * DPAA2_SET_FLC_EWS(flc);
3108 	 */
3109 
3110 	/* Set BS = 1 i.e reuse input buffers as output buffers */
3111 	DPAA2_SET_FLC_REUSE_BS(flc);
3112 	/* Set FF = 10; reuse input buffers if they provide sufficient space */
3113 	DPAA2_SET_FLC_REUSE_FF(flc);
3114 
3115 	session->ctxt = priv;
3116 
3117 	return 0;
3118 out:
3119 	rte_free(session->auth_key.data);
3120 	rte_free(session->cipher_key.data);
3121 	rte_free(priv);
3122 	return -1;
3123 }
3124 
3125 static int
3126 dpaa2_sec_security_session_create(void *dev,
3127 				  struct rte_security_session_conf *conf,
3128 				  struct rte_security_session *sess,
3129 				  struct rte_mempool *mempool)
3130 {
3131 	void *sess_private_data;
3132 	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
3133 	int ret;
3134 
3135 	if (rte_mempool_get(mempool, &sess_private_data)) {
3136 		DPAA2_SEC_ERR("Couldn't get object from session mempool");
3137 		return -ENOMEM;
3138 	}
3139 
3140 	switch (conf->protocol) {
3141 	case RTE_SECURITY_PROTOCOL_IPSEC:
3142 		ret = dpaa2_sec_set_ipsec_session(cdev, conf,
3143 				sess_private_data);
3144 		break;
3145 	case RTE_SECURITY_PROTOCOL_MACSEC:
3146 		return -ENOTSUP;
3147 	case RTE_SECURITY_PROTOCOL_PDCP:
3148 		ret = dpaa2_sec_set_pdcp_session(cdev, conf,
3149 				sess_private_data);
3150 		break;
3151 	default:
3152 		return -EINVAL;
3153 	}
3154 	if (ret != 0) {
3155 		DPAA2_SEC_ERR("Failed to configure session parameters");
3156 		/* Return session to mempool */
3157 		rte_mempool_put(mempool, sess_private_data);
3158 		return ret;
3159 	}
3160 
3161 	set_sec_session_private_data(sess, sess_private_data);
3162 
3163 	return ret;
3164 }
3165 
3166 /** Clear the memory of session so it doesn't leave key material behind */
3167 static int
3168 dpaa2_sec_security_session_destroy(void *dev __rte_unused,
3169 		struct rte_security_session *sess)
3170 {
3171 	PMD_INIT_FUNC_TRACE();
3172 	void *sess_priv = get_sec_session_private_data(sess);
3173 
3174 	dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
3175 
3176 	if (sess_priv) {
3177 		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
3178 
3179 		rte_free(s->ctxt);
3180 		rte_free(s->cipher_key.data);
3181 		rte_free(s->auth_key.data);
3182 		memset(s, 0, sizeof(dpaa2_sec_session));
3183 		set_sec_session_private_data(sess, NULL);
3184 		rte_mempool_put(sess_mp, sess_priv);
3185 	}
3186 	return 0;
3187 }
3188 #endif
3189 static int
3190 dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev,
3191 		struct rte_crypto_sym_xform *xform,
3192 		struct rte_cryptodev_sym_session *sess,
3193 		struct rte_mempool *mempool)
3194 {
3195 	void *sess_private_data;
3196 	int ret;
3197 
3198 	if (rte_mempool_get(mempool, &sess_private_data)) {
3199 		DPAA2_SEC_ERR("Couldn't get object from session mempool");
3200 		return -ENOMEM;
3201 	}
3202 
3203 	ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data);
3204 	if (ret != 0) {
3205 		DPAA2_SEC_ERR("Failed to configure session parameters");
3206 		/* Return session to mempool */
3207 		rte_mempool_put(mempool, sess_private_data);
3208 		return ret;
3209 	}
3210 
3211 	set_sym_session_private_data(sess, dev->driver_id,
3212 		sess_private_data);
3213 
3214 	return 0;
3215 }
3216 
3217 /** Clear the memory of session so it doesn't leave key material behind */
3218 static void
3219 dpaa2_sec_sym_session_clear(struct rte_cryptodev *dev,
3220 		struct rte_cryptodev_sym_session *sess)
3221 {
3222 	PMD_INIT_FUNC_TRACE();
3223 	uint8_t index = dev->driver_id;
3224 	void *sess_priv = get_sym_session_private_data(sess, index);
3225 	dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
3226 
3227 	if (sess_priv) {
3228 		rte_free(s->ctxt);
3229 		rte_free(s->cipher_key.data);
3230 		rte_free(s->auth_key.data);
3231 		memset(s, 0, sizeof(dpaa2_sec_session));
3232 		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
3233 		set_sym_session_private_data(sess, index, NULL);
3234 		rte_mempool_put(sess_mp, sess_priv);
3235 	}
3236 }
3237 
3238 static int
3239 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
3240 			struct rte_cryptodev_config *config __rte_unused)
3241 {
3242 	PMD_INIT_FUNC_TRACE();
3243 
3244 	return 0;
3245 }
3246 
3247 static int
3248 dpaa2_sec_dev_start(struct rte_cryptodev *dev)
3249 {
3250 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3251 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3252 	struct dpseci_attr attr;
3253 	struct dpaa2_queue *dpaa2_q;
3254 	struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3255 					dev->data->queue_pairs;
3256 	struct dpseci_rx_queue_attr rx_attr;
3257 	struct dpseci_tx_queue_attr tx_attr;
3258 	int ret, i;
3259 
3260 	PMD_INIT_FUNC_TRACE();
3261 
3262 	memset(&attr, 0, sizeof(struct dpseci_attr));
3263 
3264 	ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
3265 	if (ret) {
3266 		DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED",
3267 			      priv->hw_id);
3268 		goto get_attr_failure;
3269 	}
3270 	ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr);
3271 	if (ret) {
3272 		DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC");
3273 		goto get_attr_failure;
3274 	}
3275 	for (i = 0; i < attr.num_rx_queues && qp[i]; i++) {
3276 		dpaa2_q = &qp[i]->rx_vq;
3277 		dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
3278 				    &rx_attr);
3279 		dpaa2_q->fqid = rx_attr.fqid;
3280 		DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid);
3281 	}
3282 	for (i = 0; i < attr.num_tx_queues && qp[i]; i++) {
3283 		dpaa2_q = &qp[i]->tx_vq;
3284 		dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
3285 				    &tx_attr);
3286 		dpaa2_q->fqid = tx_attr.fqid;
3287 		DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid);
3288 	}
3289 
3290 	return 0;
3291 get_attr_failure:
3292 	dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
3293 	return -1;
3294 }
3295 
3296 static void
3297 dpaa2_sec_dev_stop(struct rte_cryptodev *dev)
3298 {
3299 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3300 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3301 	int ret;
3302 
3303 	PMD_INIT_FUNC_TRACE();
3304 
3305 	ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
3306 	if (ret) {
3307 		DPAA2_SEC_ERR("Failure in disabling dpseci %d device",
3308 			     priv->hw_id);
3309 		return;
3310 	}
3311 
3312 	ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token);
3313 	if (ret < 0) {
3314 		DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret);
3315 		return;
3316 	}
3317 }
3318 
3319 static int
3320 dpaa2_sec_dev_close(struct rte_cryptodev *dev)
3321 {
3322 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3323 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3324 	int ret;
3325 
3326 	PMD_INIT_FUNC_TRACE();
3327 
3328 	/* Function is reverse of dpaa2_sec_dev_init.
3329 	 * It does the following:
3330 	 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id
3331 	 * 2. Close the DPSECI device
3332 	 * 3. Free the allocated resources.
3333 	 */
3334 
3335 	/*Close the device at underlying layer*/
3336 	ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
3337 	if (ret) {
3338 		DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret);
3339 		return -1;
3340 	}
3341 
3342 	/*Free the allocated memory for ethernet private data and dpseci*/
3343 	priv->hw = NULL;
3344 	rte_free(dpseci);
3345 
3346 	return 0;
3347 }
3348 
3349 static void
3350 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev,
3351 			struct rte_cryptodev_info *info)
3352 {
3353 	struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
3354 
3355 	PMD_INIT_FUNC_TRACE();
3356 	if (info != NULL) {
3357 		info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3358 		info->feature_flags = dev->feature_flags;
3359 		info->capabilities = dpaa2_sec_capabilities;
3360 		/* No limit of number of sessions */
3361 		info->sym.max_nb_sessions = 0;
3362 		info->driver_id = cryptodev_driver_id;
3363 	}
3364 }
3365 
3366 static
3367 void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
3368 			 struct rte_cryptodev_stats *stats)
3369 {
3370 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3371 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3372 	struct dpseci_sec_counters counters = {0};
3373 	struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3374 					dev->data->queue_pairs;
3375 	int ret, i;
3376 
3377 	PMD_INIT_FUNC_TRACE();
3378 	if (stats == NULL) {
3379 		DPAA2_SEC_ERR("Invalid stats ptr NULL");
3380 		return;
3381 	}
3382 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3383 		if (qp[i] == NULL) {
3384 			DPAA2_SEC_DEBUG("Uninitialised queue pair");
3385 			continue;
3386 		}
3387 
3388 		stats->enqueued_count += qp[i]->tx_vq.tx_pkts;
3389 		stats->dequeued_count += qp[i]->rx_vq.rx_pkts;
3390 		stats->enqueue_err_count += qp[i]->tx_vq.err_pkts;
3391 		stats->dequeue_err_count += qp[i]->rx_vq.err_pkts;
3392 	}
3393 
3394 	ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token,
3395 				      &counters);
3396 	if (ret) {
3397 		DPAA2_SEC_ERR("SEC counters failed");
3398 	} else {
3399 		DPAA2_SEC_INFO("dpseci hardware stats:"
3400 			    "\n\tNum of Requests Dequeued = %" PRIu64
3401 			    "\n\tNum of Outbound Encrypt Requests = %" PRIu64
3402 			    "\n\tNum of Inbound Decrypt Requests = %" PRIu64
3403 			    "\n\tNum of Outbound Bytes Encrypted = %" PRIu64
3404 			    "\n\tNum of Outbound Bytes Protected = %" PRIu64
3405 			    "\n\tNum of Inbound Bytes Decrypted = %" PRIu64
3406 			    "\n\tNum of Inbound Bytes Validated = %" PRIu64,
3407 			    counters.dequeued_requests,
3408 			    counters.ob_enc_requests,
3409 			    counters.ib_dec_requests,
3410 			    counters.ob_enc_bytes,
3411 			    counters.ob_prot_bytes,
3412 			    counters.ib_dec_bytes,
3413 			    counters.ib_valid_bytes);
3414 	}
3415 }
3416 
3417 static
3418 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev)
3419 {
3420 	int i;
3421 	struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3422 				   (dev->data->queue_pairs);
3423 
3424 	PMD_INIT_FUNC_TRACE();
3425 
3426 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3427 		if (qp[i] == NULL) {
3428 			DPAA2_SEC_DEBUG("Uninitialised queue pair");
3429 			continue;
3430 		}
3431 		qp[i]->tx_vq.rx_pkts = 0;
3432 		qp[i]->tx_vq.tx_pkts = 0;
3433 		qp[i]->tx_vq.err_pkts = 0;
3434 		qp[i]->rx_vq.rx_pkts = 0;
3435 		qp[i]->rx_vq.tx_pkts = 0;
3436 		qp[i]->rx_vq.err_pkts = 0;
3437 	}
3438 }
3439 
3440 static void __attribute__((hot))
3441 dpaa2_sec_process_parallel_event(struct qbman_swp *swp,
3442 				 const struct qbman_fd *fd,
3443 				 const struct qbman_result *dq,
3444 				 struct dpaa2_queue *rxq,
3445 				 struct rte_event *ev)
3446 {
3447 	/* Prefetching mbuf */
3448 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
3449 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
3450 
3451 	/* Prefetching ipsec crypto_op stored in priv data of mbuf */
3452 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
3453 
3454 	ev->flow_id = rxq->ev.flow_id;
3455 	ev->sub_event_type = rxq->ev.sub_event_type;
3456 	ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3457 	ev->op = RTE_EVENT_OP_NEW;
3458 	ev->sched_type = rxq->ev.sched_type;
3459 	ev->queue_id = rxq->ev.queue_id;
3460 	ev->priority = rxq->ev.priority;
3461 	ev->event_ptr = sec_fd_to_mbuf(fd);
3462 
3463 	qbman_swp_dqrr_consume(swp, dq);
3464 }
3465 static void
3466 dpaa2_sec_process_atomic_event(struct qbman_swp *swp __attribute__((unused)),
3467 				 const struct qbman_fd *fd,
3468 				 const struct qbman_result *dq,
3469 				 struct dpaa2_queue *rxq,
3470 				 struct rte_event *ev)
3471 {
3472 	uint8_t dqrr_index;
3473 	struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr;
3474 	/* Prefetching mbuf */
3475 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
3476 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
3477 
3478 	/* Prefetching ipsec crypto_op stored in priv data of mbuf */
3479 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
3480 
3481 	ev->flow_id = rxq->ev.flow_id;
3482 	ev->sub_event_type = rxq->ev.sub_event_type;
3483 	ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3484 	ev->op = RTE_EVENT_OP_NEW;
3485 	ev->sched_type = rxq->ev.sched_type;
3486 	ev->queue_id = rxq->ev.queue_id;
3487 	ev->priority = rxq->ev.priority;
3488 
3489 	ev->event_ptr = sec_fd_to_mbuf(fd);
3490 	dqrr_index = qbman_get_dqrr_idx(dq);
3491 	crypto_op->sym->m_src->seqn = dqrr_index + 1;
3492 	DPAA2_PER_LCORE_DQRR_SIZE++;
3493 	DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
3494 	DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src;
3495 }
3496 
3497 int
3498 dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
3499 		int qp_id,
3500 		struct dpaa2_dpcon_dev *dpcon,
3501 		const struct rte_event *event)
3502 {
3503 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3504 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3505 	struct dpaa2_sec_qp *qp = dev->data->queue_pairs[qp_id];
3506 	struct dpseci_rx_queue_cfg cfg;
3507 	uint8_t priority;
3508 	int ret;
3509 
3510 	if (event->sched_type == RTE_SCHED_TYPE_PARALLEL)
3511 		qp->rx_vq.cb = dpaa2_sec_process_parallel_event;
3512 	else if (event->sched_type == RTE_SCHED_TYPE_ATOMIC)
3513 		qp->rx_vq.cb = dpaa2_sec_process_atomic_event;
3514 	else
3515 		return -EINVAL;
3516 
3517 	priority = (RTE_EVENT_DEV_PRIORITY_LOWEST / event->priority) *
3518 		   (dpcon->num_priorities - 1);
3519 
3520 	memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
3521 	cfg.options = DPSECI_QUEUE_OPT_DEST;
3522 	cfg.dest_cfg.dest_type = DPSECI_DEST_DPCON;
3523 	cfg.dest_cfg.dest_id = dpcon->dpcon_id;
3524 	cfg.dest_cfg.priority = priority;
3525 
3526 	cfg.options |= DPSECI_QUEUE_OPT_USER_CTX;
3527 	cfg.user_ctx = (size_t)(qp);
3528 	if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) {
3529 		cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION;
3530 		cfg.order_preservation_en = 1;
3531 	}
3532 	ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
3533 				  qp_id, &cfg);
3534 	if (ret) {
3535 		RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
3536 		return ret;
3537 	}
3538 
3539 	memcpy(&qp->rx_vq.ev, event, sizeof(struct rte_event));
3540 
3541 	return 0;
3542 }
3543 
3544 int
3545 dpaa2_sec_eventq_detach(const struct rte_cryptodev *dev,
3546 			int qp_id)
3547 {
3548 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3549 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3550 	struct dpseci_rx_queue_cfg cfg;
3551 	int ret;
3552 
3553 	memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
3554 	cfg.options = DPSECI_QUEUE_OPT_DEST;
3555 	cfg.dest_cfg.dest_type = DPSECI_DEST_NONE;
3556 
3557 	ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
3558 				  qp_id, &cfg);
3559 	if (ret)
3560 		RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
3561 
3562 	return ret;
3563 }
3564 
3565 static struct rte_cryptodev_ops crypto_ops = {
3566 	.dev_configure	      = dpaa2_sec_dev_configure,
3567 	.dev_start	      = dpaa2_sec_dev_start,
3568 	.dev_stop	      = dpaa2_sec_dev_stop,
3569 	.dev_close	      = dpaa2_sec_dev_close,
3570 	.dev_infos_get        = dpaa2_sec_dev_infos_get,
3571 	.stats_get	      = dpaa2_sec_stats_get,
3572 	.stats_reset	      = dpaa2_sec_stats_reset,
3573 	.queue_pair_setup     = dpaa2_sec_queue_pair_setup,
3574 	.queue_pair_release   = dpaa2_sec_queue_pair_release,
3575 	.queue_pair_count     = dpaa2_sec_queue_pair_count,
3576 	.sym_session_get_size     = dpaa2_sec_sym_session_get_size,
3577 	.sym_session_configure    = dpaa2_sec_sym_session_configure,
3578 	.sym_session_clear        = dpaa2_sec_sym_session_clear,
3579 };
3580 
3581 #ifdef RTE_LIBRTE_SECURITY
3582 static const struct rte_security_capability *
3583 dpaa2_sec_capabilities_get(void *device __rte_unused)
3584 {
3585 	return dpaa2_sec_security_cap;
3586 }
3587 
3588 static const struct rte_security_ops dpaa2_sec_security_ops = {
3589 	.session_create = dpaa2_sec_security_session_create,
3590 	.session_update = NULL,
3591 	.session_stats_get = NULL,
3592 	.session_destroy = dpaa2_sec_security_session_destroy,
3593 	.set_pkt_metadata = NULL,
3594 	.capabilities_get = dpaa2_sec_capabilities_get
3595 };
3596 #endif
3597 
3598 static int
3599 dpaa2_sec_uninit(const struct rte_cryptodev *dev)
3600 {
3601 	struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
3602 
3603 	rte_free(dev->security_ctx);
3604 
3605 	rte_mempool_free(internals->fle_pool);
3606 
3607 	DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u",
3608 		       dev->data->name, rte_socket_id());
3609 
3610 	return 0;
3611 }
3612 
3613 static int
3614 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
3615 {
3616 	struct dpaa2_sec_dev_private *internals;
3617 	struct rte_device *dev = cryptodev->device;
3618 	struct rte_dpaa2_device *dpaa2_dev;
3619 #ifdef RTE_LIBRTE_SECURITY
3620 	struct rte_security_ctx *security_instance;
3621 #endif
3622 	struct fsl_mc_io *dpseci;
3623 	uint16_t token;
3624 	struct dpseci_attr attr;
3625 	int retcode, hw_id;
3626 	char str[30];
3627 
3628 	PMD_INIT_FUNC_TRACE();
3629 	dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
3630 	if (dpaa2_dev == NULL) {
3631 		DPAA2_SEC_ERR("DPAA2 SEC device not found");
3632 		return -1;
3633 	}
3634 	hw_id = dpaa2_dev->object_id;
3635 
3636 	cryptodev->driver_id = cryptodev_driver_id;
3637 	cryptodev->dev_ops = &crypto_ops;
3638 
3639 	cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst;
3640 	cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst;
3641 	cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3642 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
3643 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3644 			RTE_CRYPTODEV_FF_SECURITY |
3645 			RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3646 			RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3647 			RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3648 			RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3649 			RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3650 
3651 	internals = cryptodev->data->dev_private;
3652 
3653 	/*
3654 	 * For secondary processes, we don't initialise any further as primary
3655 	 * has already done this work. Only check we don't need a different
3656 	 * RX function
3657 	 */
3658 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3659 		DPAA2_SEC_DEBUG("Device already init by primary process");
3660 		return 0;
3661 	}
3662 #ifdef RTE_LIBRTE_SECURITY
3663 	/* Initialize security_ctx only for primary process*/
3664 	security_instance = rte_malloc("rte_security_instances_ops",
3665 				sizeof(struct rte_security_ctx), 0);
3666 	if (security_instance == NULL)
3667 		return -ENOMEM;
3668 	security_instance->device = (void *)cryptodev;
3669 	security_instance->ops = &dpaa2_sec_security_ops;
3670 	security_instance->sess_cnt = 0;
3671 	cryptodev->security_ctx = security_instance;
3672 #endif
3673 	/*Open the rte device via MC and save the handle for further use*/
3674 	dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1,
3675 				sizeof(struct fsl_mc_io), 0);
3676 	if (!dpseci) {
3677 		DPAA2_SEC_ERR(
3678 			"Error in allocating the memory for dpsec object");
3679 		return -1;
3680 	}
3681 	dpseci->regs = rte_mcp_ptr_list[0];
3682 
3683 	retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token);
3684 	if (retcode != 0) {
3685 		DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x",
3686 			      retcode);
3687 		goto init_error;
3688 	}
3689 	retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr);
3690 	if (retcode != 0) {
3691 		DPAA2_SEC_ERR(
3692 			     "Cannot get dpsec device attributed: Error = %x",
3693 			     retcode);
3694 		goto init_error;
3695 	}
3696 	snprintf(cryptodev->data->name, sizeof(cryptodev->data->name),
3697 			"dpsec-%u", hw_id);
3698 
3699 	internals->max_nb_queue_pairs = attr.num_tx_queues;
3700 	cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs;
3701 	internals->hw = dpseci;
3702 	internals->token = token;
3703 
3704 	snprintf(str, sizeof(str), "sec_fle_pool_p%d_%d",
3705 			getpid(), cryptodev->data->dev_id);
3706 	internals->fle_pool = rte_mempool_create((const char *)str,
3707 			FLE_POOL_NUM_BUFS,
3708 			FLE_POOL_BUF_SIZE,
3709 			FLE_POOL_CACHE_SIZE, 0,
3710 			NULL, NULL, NULL, NULL,
3711 			SOCKET_ID_ANY, 0);
3712 	if (!internals->fle_pool) {
3713 		DPAA2_SEC_ERR("Mempool (%s) creation failed", str);
3714 		goto init_error;
3715 	}
3716 
3717 	DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name);
3718 	return 0;
3719 
3720 init_error:
3721 	DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name);
3722 
3723 	/* dpaa2_sec_uninit(crypto_dev_name); */
3724 	return -EFAULT;
3725 }
3726 
3727 static int
3728 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
3729 			  struct rte_dpaa2_device *dpaa2_dev)
3730 {
3731 	struct rte_cryptodev *cryptodev;
3732 	char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3733 
3734 	int retval;
3735 
3736 	snprintf(cryptodev_name, sizeof(cryptodev_name), "dpsec-%d",
3737 			dpaa2_dev->object_id);
3738 
3739 	cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3740 	if (cryptodev == NULL)
3741 		return -ENOMEM;
3742 
3743 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3744 		cryptodev->data->dev_private = rte_zmalloc_socket(
3745 					"cryptodev private structure",
3746 					sizeof(struct dpaa2_sec_dev_private),
3747 					RTE_CACHE_LINE_SIZE,
3748 					rte_socket_id());
3749 
3750 		if (cryptodev->data->dev_private == NULL)
3751 			rte_panic("Cannot allocate memzone for private "
3752 				  "device data");
3753 	}
3754 
3755 	dpaa2_dev->cryptodev = cryptodev;
3756 	cryptodev->device = &dpaa2_dev->device;
3757 
3758 	/* init user callbacks */
3759 	TAILQ_INIT(&(cryptodev->link_intr_cbs));
3760 
3761 	if (dpaa2_svr_family == SVR_LX2160A)
3762 		rta_set_sec_era(RTA_SEC_ERA_10);
3763 
3764 	DPAA2_SEC_INFO("2-SEC ERA is %d", rta_get_sec_era());
3765 
3766 	/* Invoke PMD device initialization function */
3767 	retval = dpaa2_sec_dev_init(cryptodev);
3768 	if (retval == 0)
3769 		return 0;
3770 
3771 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3772 		rte_free(cryptodev->data->dev_private);
3773 
3774 	cryptodev->attached = RTE_CRYPTODEV_DETACHED;
3775 
3776 	return -ENXIO;
3777 }
3778 
3779 static int
3780 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
3781 {
3782 	struct rte_cryptodev *cryptodev;
3783 	int ret;
3784 
3785 	cryptodev = dpaa2_dev->cryptodev;
3786 	if (cryptodev == NULL)
3787 		return -ENODEV;
3788 
3789 	ret = dpaa2_sec_uninit(cryptodev);
3790 	if (ret)
3791 		return ret;
3792 
3793 	return rte_cryptodev_pmd_destroy(cryptodev);
3794 }
3795 
3796 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
3797 	.drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
3798 	.drv_type = DPAA2_CRYPTO,
3799 	.driver = {
3800 		.name = "DPAA2 SEC PMD"
3801 	},
3802 	.probe = cryptodev_dpaa2_sec_probe,
3803 	.remove = cryptodev_dpaa2_sec_remove,
3804 };
3805 
3806 static struct cryptodev_driver dpaa2_sec_crypto_drv;
3807 
3808 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
3809 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv,
3810 		rte_dpaa2_sec_driver.driver, cryptodev_driver_id);
3811 
3812 RTE_INIT(dpaa2_sec_init_log)
3813 {
3814 	/* Bus level logs */
3815 	dpaa2_logtype_sec = rte_log_register("pmd.crypto.dpaa2");
3816 	if (dpaa2_logtype_sec >= 0)
3817 		rte_log_set_level(dpaa2_logtype_sec, RTE_LOG_NOTICE);
3818 }
3819