xref: /dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c (revision 27b549c12df2ef2db6b271795b4df7b14a2d9c2c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016-2020 NXP
5  *
6  */
7 
8 #include <time.h>
9 #include <net/if.h>
10 #include <unistd.h>
11 
12 #include <rte_ip.h>
13 #include <rte_mbuf.h>
14 #include <rte_cryptodev.h>
15 #include <rte_malloc.h>
16 #include <rte_memcpy.h>
17 #include <rte_string_fns.h>
18 #include <rte_cycles.h>
19 #include <rte_kvargs.h>
20 #include <rte_dev.h>
21 #include <rte_cryptodev_pmd.h>
22 #include <rte_common.h>
23 #include <rte_fslmc.h>
24 #include <fslmc_vfio.h>
25 #include <dpaa2_hw_pvt.h>
26 #include <dpaa2_hw_dpio.h>
27 #include <dpaa2_hw_mempool.h>
28 #include <fsl_dpopr.h>
29 #include <fsl_dpseci.h>
30 #include <fsl_mc_sys.h>
31 
32 #include "dpaa2_sec_priv.h"
33 #include "dpaa2_sec_event.h"
34 #include "dpaa2_sec_logs.h"
35 
36 /* RTA header files */
37 #include <desc/ipsec.h>
38 #include <desc/pdcp.h>
39 #include <desc/sdap.h>
40 #include <desc/algo.h>
41 
42 /* Minimum job descriptor consists of a oneword job descriptor HEADER and
43  * a pointer to the shared descriptor
44  */
45 #define MIN_JOB_DESC_SIZE	(CAAM_CMD_SZ + CAAM_PTR_SZ)
46 #define FSL_VENDOR_ID           0x1957
47 #define FSL_DEVICE_ID           0x410
48 #define FSL_SUBSYSTEM_SEC       1
49 #define FSL_MC_DPSECI_DEVID     3
50 
51 #define NO_PREFETCH 0
52 /* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
53 #define FLE_POOL_NUM_BUFS	32000
54 #define FLE_POOL_BUF_SIZE	256
55 #define FLE_POOL_CACHE_SIZE	512
56 #define FLE_SG_MEM_SIZE(num)	(FLE_POOL_BUF_SIZE + ((num) * 32))
57 #define SEC_FLC_DHR_OUTBOUND	-114
58 #define SEC_FLC_DHR_INBOUND	0
59 
60 static uint8_t cryptodev_driver_id;
61 
62 #ifdef RTE_LIB_SECURITY
63 static inline int
64 build_proto_compound_sg_fd(dpaa2_sec_session *sess,
65 			   struct rte_crypto_op *op,
66 			   struct qbman_fd *fd, uint16_t bpid)
67 {
68 	struct rte_crypto_sym_op *sym_op = op->sym;
69 	struct ctxt_priv *priv = sess->ctxt;
70 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
71 	struct sec_flow_context *flc;
72 	struct rte_mbuf *mbuf;
73 	uint32_t in_len = 0, out_len = 0;
74 
75 	if (sym_op->m_dst)
76 		mbuf = sym_op->m_dst;
77 	else
78 		mbuf = sym_op->m_src;
79 
80 	/* first FLE entry used to store mbuf and session ctxt */
81 	fle = (struct qbman_fle *)rte_malloc(NULL,
82 			FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
83 			RTE_CACHE_LINE_SIZE);
84 	if (unlikely(!fle)) {
85 		DPAA2_SEC_DP_ERR("Proto:SG: Memory alloc failed for SGE");
86 		return -ENOMEM;
87 	}
88 	memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
89 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
90 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
91 
92 	/* Save the shared descriptor */
93 	flc = &priv->flc_desc[0].flc;
94 
95 	op_fle = fle + 1;
96 	ip_fle = fle + 2;
97 	sge = fle + 3;
98 
99 	if (likely(bpid < MAX_BPID)) {
100 		DPAA2_SET_FD_BPID(fd, bpid);
101 		DPAA2_SET_FLE_BPID(op_fle, bpid);
102 		DPAA2_SET_FLE_BPID(ip_fle, bpid);
103 	} else {
104 		DPAA2_SET_FD_IVP(fd);
105 		DPAA2_SET_FLE_IVP(op_fle);
106 		DPAA2_SET_FLE_IVP(ip_fle);
107 	}
108 
109 	/* Configure FD as a FRAME LIST */
110 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
111 	DPAA2_SET_FD_COMPOUND_FMT(fd);
112 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
113 
114 	/* Configure Output FLE with Scatter/Gather Entry */
115 	DPAA2_SET_FLE_SG_EXT(op_fle);
116 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
117 
118 	/* Configure Output SGE for Encap/Decap */
119 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
120 	DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
121 	/* o/p segs */
122 	while (mbuf->next) {
123 		sge->length = mbuf->data_len;
124 		out_len += sge->length;
125 		sge++;
126 		mbuf = mbuf->next;
127 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
128 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
129 	}
130 	/* using buf_len for last buf - so that extra data can be added */
131 	sge->length = mbuf->buf_len - mbuf->data_off;
132 	out_len += sge->length;
133 
134 	DPAA2_SET_FLE_FIN(sge);
135 	op_fle->length = out_len;
136 
137 	sge++;
138 	mbuf = sym_op->m_src;
139 
140 	/* Configure Input FLE with Scatter/Gather Entry */
141 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
142 	DPAA2_SET_FLE_SG_EXT(ip_fle);
143 	DPAA2_SET_FLE_FIN(ip_fle);
144 
145 	/* Configure input SGE for Encap/Decap */
146 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
147 	DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
148 	sge->length = mbuf->data_len;
149 	in_len += sge->length;
150 
151 	mbuf = mbuf->next;
152 	/* i/p segs */
153 	while (mbuf) {
154 		sge++;
155 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
156 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
157 		sge->length = mbuf->data_len;
158 		in_len += sge->length;
159 		mbuf = mbuf->next;
160 	}
161 	ip_fle->length = in_len;
162 	DPAA2_SET_FLE_FIN(sge);
163 
164 	/* In case of PDCP, per packet HFN is stored in
165 	 * mbuf priv after sym_op.
166 	 */
167 	if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
168 		uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)op +
169 					sess->pdcp.hfn_ovd_offset);
170 		/*enable HFN override override */
171 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
172 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
173 		DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
174 	}
175 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
176 
177 	return 0;
178 }
179 
180 static inline int
181 build_proto_compound_fd(dpaa2_sec_session *sess,
182 	       struct rte_crypto_op *op,
183 	       struct qbman_fd *fd, uint16_t bpid)
184 {
185 	struct rte_crypto_sym_op *sym_op = op->sym;
186 	struct ctxt_priv *priv = sess->ctxt;
187 	struct qbman_fle *fle, *ip_fle, *op_fle;
188 	struct sec_flow_context *flc;
189 	struct rte_mbuf *src_mbuf = sym_op->m_src;
190 	struct rte_mbuf *dst_mbuf = sym_op->m_dst;
191 	int retval;
192 
193 	if (!dst_mbuf)
194 		dst_mbuf = src_mbuf;
195 
196 	/* Save the shared descriptor */
197 	flc = &priv->flc_desc[0].flc;
198 
199 	/* we are using the first FLE entry to store Mbuf */
200 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
201 	if (retval) {
202 		DPAA2_SEC_DP_ERR("Memory alloc failed");
203 		return -ENOMEM;
204 	}
205 	memset(fle, 0, FLE_POOL_BUF_SIZE);
206 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
207 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
208 
209 	op_fle = fle + 1;
210 	ip_fle = fle + 2;
211 
212 	if (likely(bpid < MAX_BPID)) {
213 		DPAA2_SET_FD_BPID(fd, bpid);
214 		DPAA2_SET_FLE_BPID(op_fle, bpid);
215 		DPAA2_SET_FLE_BPID(ip_fle, bpid);
216 	} else {
217 		DPAA2_SET_FD_IVP(fd);
218 		DPAA2_SET_FLE_IVP(op_fle);
219 		DPAA2_SET_FLE_IVP(ip_fle);
220 	}
221 
222 	/* Configure FD as a FRAME LIST */
223 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
224 	DPAA2_SET_FD_COMPOUND_FMT(fd);
225 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
226 
227 	/* Configure Output FLE with dst mbuf data  */
228 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_MBUF_VADDR_TO_IOVA(dst_mbuf));
229 	DPAA2_SET_FLE_OFFSET(op_fle, dst_mbuf->data_off);
230 	DPAA2_SET_FLE_LEN(op_fle, dst_mbuf->buf_len);
231 
232 	/* Configure Input FLE with src mbuf data */
233 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_MBUF_VADDR_TO_IOVA(src_mbuf));
234 	DPAA2_SET_FLE_OFFSET(ip_fle, src_mbuf->data_off);
235 	DPAA2_SET_FLE_LEN(ip_fle, src_mbuf->pkt_len);
236 
237 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
238 	DPAA2_SET_FLE_FIN(ip_fle);
239 
240 	/* In case of PDCP, per packet HFN is stored in
241 	 * mbuf priv after sym_op.
242 	 */
243 	if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
244 		uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)op +
245 					sess->pdcp.hfn_ovd_offset);
246 		/*enable HFN override override */
247 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
248 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
249 		DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
250 	}
251 
252 	return 0;
253 
254 }
255 
256 static inline int
257 build_proto_fd(dpaa2_sec_session *sess,
258 	       struct rte_crypto_op *op,
259 	       struct qbman_fd *fd, uint16_t bpid)
260 {
261 	struct rte_crypto_sym_op *sym_op = op->sym;
262 	if (sym_op->m_dst)
263 		return build_proto_compound_fd(sess, op, fd, bpid);
264 
265 	struct ctxt_priv *priv = sess->ctxt;
266 	struct sec_flow_context *flc;
267 	struct rte_mbuf *mbuf = sym_op->m_src;
268 
269 	if (likely(bpid < MAX_BPID))
270 		DPAA2_SET_FD_BPID(fd, bpid);
271 	else
272 		DPAA2_SET_FD_IVP(fd);
273 
274 	/* Save the shared descriptor */
275 	flc = &priv->flc_desc[0].flc;
276 
277 	DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
278 	DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off);
279 	DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len);
280 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
281 
282 	/* save physical address of mbuf */
283 	op->sym->aead.digest.phys_addr = mbuf->buf_iova;
284 	mbuf->buf_iova = (size_t)op;
285 
286 	return 0;
287 }
288 #endif
289 
290 static inline int
291 build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,
292 		 struct rte_crypto_op *op,
293 		 struct qbman_fd *fd, __rte_unused uint16_t bpid)
294 {
295 	struct rte_crypto_sym_op *sym_op = op->sym;
296 	struct ctxt_priv *priv = sess->ctxt;
297 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
298 	struct sec_flow_context *flc;
299 	uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
300 	int icv_len = sess->digest_length;
301 	uint8_t *old_icv;
302 	struct rte_mbuf *mbuf;
303 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
304 			sess->iv.offset);
305 
306 	if (sym_op->m_dst)
307 		mbuf = sym_op->m_dst;
308 	else
309 		mbuf = sym_op->m_src;
310 
311 	/* first FLE entry used to store mbuf and session ctxt */
312 	fle = (struct qbman_fle *)rte_malloc(NULL,
313 			FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
314 			RTE_CACHE_LINE_SIZE);
315 	if (unlikely(!fle)) {
316 		DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
317 		return -ENOMEM;
318 	}
319 	memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
320 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
321 	DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv);
322 
323 	op_fle = fle + 1;
324 	ip_fle = fle + 2;
325 	sge = fle + 3;
326 
327 	/* Save the shared descriptor */
328 	flc = &priv->flc_desc[0].flc;
329 
330 	/* Configure FD as a FRAME LIST */
331 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
332 	DPAA2_SET_FD_COMPOUND_FMT(fd);
333 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
334 
335 	DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n"
336 		   "iv-len=%d data_off: 0x%x\n",
337 		   sym_op->aead.data.offset,
338 		   sym_op->aead.data.length,
339 		   sess->digest_length,
340 		   sess->iv.length,
341 		   sym_op->m_src->data_off);
342 
343 	/* Configure Output FLE with Scatter/Gather Entry */
344 	DPAA2_SET_FLE_SG_EXT(op_fle);
345 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
346 
347 	if (auth_only_len)
348 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
349 
350 	op_fle->length = (sess->dir == DIR_ENC) ?
351 			(sym_op->aead.data.length + icv_len) :
352 			sym_op->aead.data.length;
353 
354 	/* Configure Output SGE for Encap/Decap */
355 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
356 	DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->aead.data.offset);
357 	sge->length = mbuf->data_len - sym_op->aead.data.offset;
358 
359 	mbuf = mbuf->next;
360 	/* o/p segs */
361 	while (mbuf) {
362 		sge++;
363 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
364 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
365 		sge->length = mbuf->data_len;
366 		mbuf = mbuf->next;
367 	}
368 	sge->length -= icv_len;
369 
370 	if (sess->dir == DIR_ENC) {
371 		sge++;
372 		DPAA2_SET_FLE_ADDR(sge,
373 				DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
374 		sge->length = icv_len;
375 	}
376 	DPAA2_SET_FLE_FIN(sge);
377 
378 	sge++;
379 	mbuf = sym_op->m_src;
380 
381 	/* Configure Input FLE with Scatter/Gather Entry */
382 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
383 	DPAA2_SET_FLE_SG_EXT(ip_fle);
384 	DPAA2_SET_FLE_FIN(ip_fle);
385 	ip_fle->length = (sess->dir == DIR_ENC) ?
386 		(sym_op->aead.data.length + sess->iv.length + auth_only_len) :
387 		(sym_op->aead.data.length + sess->iv.length + auth_only_len +
388 		 icv_len);
389 
390 	/* Configure Input SGE for Encap/Decap */
391 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
392 	sge->length = sess->iv.length;
393 
394 	sge++;
395 	if (auth_only_len) {
396 		DPAA2_SET_FLE_ADDR(sge,
397 				DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
398 		sge->length = auth_only_len;
399 		sge++;
400 	}
401 
402 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
403 	DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
404 				mbuf->data_off);
405 	sge->length = mbuf->data_len - sym_op->aead.data.offset;
406 
407 	mbuf = mbuf->next;
408 	/* i/p segs */
409 	while (mbuf) {
410 		sge++;
411 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
412 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
413 		sge->length = mbuf->data_len;
414 		mbuf = mbuf->next;
415 	}
416 
417 	if (sess->dir == DIR_DEC) {
418 		sge++;
419 		old_icv = (uint8_t *)(sge + 1);
420 		memcpy(old_icv,	sym_op->aead.digest.data, icv_len);
421 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
422 		sge->length = icv_len;
423 	}
424 
425 	DPAA2_SET_FLE_FIN(sge);
426 	if (auth_only_len) {
427 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
428 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
429 	}
430 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
431 
432 	return 0;
433 }
434 
435 static inline int
436 build_authenc_gcm_fd(dpaa2_sec_session *sess,
437 		     struct rte_crypto_op *op,
438 		     struct qbman_fd *fd, uint16_t bpid)
439 {
440 	struct rte_crypto_sym_op *sym_op = op->sym;
441 	struct ctxt_priv *priv = sess->ctxt;
442 	struct qbman_fle *fle, *sge;
443 	struct sec_flow_context *flc;
444 	uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
445 	int icv_len = sess->digest_length, retval;
446 	uint8_t *old_icv;
447 	struct rte_mbuf *dst;
448 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
449 			sess->iv.offset);
450 
451 	if (sym_op->m_dst)
452 		dst = sym_op->m_dst;
453 	else
454 		dst = sym_op->m_src;
455 
456 	/* TODO we are using the first FLE entry to store Mbuf and session ctxt.
457 	 * Currently we donot know which FLE has the mbuf stored.
458 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
459 	 * to get the MBUF Addr from the previous FLE.
460 	 * We can have a better approach to use the inline Mbuf
461 	 */
462 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
463 	if (retval) {
464 		DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE");
465 		return -ENOMEM;
466 	}
467 	memset(fle, 0, FLE_POOL_BUF_SIZE);
468 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
469 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
470 	fle = fle + 1;
471 	sge = fle + 2;
472 	if (likely(bpid < MAX_BPID)) {
473 		DPAA2_SET_FD_BPID(fd, bpid);
474 		DPAA2_SET_FLE_BPID(fle, bpid);
475 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
476 		DPAA2_SET_FLE_BPID(sge, bpid);
477 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
478 		DPAA2_SET_FLE_BPID(sge + 2, bpid);
479 		DPAA2_SET_FLE_BPID(sge + 3, bpid);
480 	} else {
481 		DPAA2_SET_FD_IVP(fd);
482 		DPAA2_SET_FLE_IVP(fle);
483 		DPAA2_SET_FLE_IVP((fle + 1));
484 		DPAA2_SET_FLE_IVP(sge);
485 		DPAA2_SET_FLE_IVP((sge + 1));
486 		DPAA2_SET_FLE_IVP((sge + 2));
487 		DPAA2_SET_FLE_IVP((sge + 3));
488 	}
489 
490 	/* Save the shared descriptor */
491 	flc = &priv->flc_desc[0].flc;
492 	/* Configure FD as a FRAME LIST */
493 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
494 	DPAA2_SET_FD_COMPOUND_FMT(fd);
495 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
496 
497 	DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n"
498 		   "iv-len=%d data_off: 0x%x\n",
499 		   sym_op->aead.data.offset,
500 		   sym_op->aead.data.length,
501 		   sess->digest_length,
502 		   sess->iv.length,
503 		   sym_op->m_src->data_off);
504 
505 	/* Configure Output FLE with Scatter/Gather Entry */
506 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
507 	if (auth_only_len)
508 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
509 	fle->length = (sess->dir == DIR_ENC) ?
510 			(sym_op->aead.data.length + icv_len) :
511 			sym_op->aead.data.length;
512 
513 	DPAA2_SET_FLE_SG_EXT(fle);
514 
515 	/* Configure Output SGE for Encap/Decap */
516 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
517 	DPAA2_SET_FLE_OFFSET(sge, dst->data_off + sym_op->aead.data.offset);
518 	sge->length = sym_op->aead.data.length;
519 
520 	if (sess->dir == DIR_ENC) {
521 		sge++;
522 		DPAA2_SET_FLE_ADDR(sge,
523 				DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
524 		sge->length = sess->digest_length;
525 	}
526 	DPAA2_SET_FLE_FIN(sge);
527 
528 	sge++;
529 	fle++;
530 
531 	/* Configure Input FLE with Scatter/Gather Entry */
532 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
533 	DPAA2_SET_FLE_SG_EXT(fle);
534 	DPAA2_SET_FLE_FIN(fle);
535 	fle->length = (sess->dir == DIR_ENC) ?
536 		(sym_op->aead.data.length + sess->iv.length + auth_only_len) :
537 		(sym_op->aead.data.length + sess->iv.length + auth_only_len +
538 		 sess->digest_length);
539 
540 	/* Configure Input SGE for Encap/Decap */
541 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
542 	sge->length = sess->iv.length;
543 	sge++;
544 	if (auth_only_len) {
545 		DPAA2_SET_FLE_ADDR(sge,
546 				DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
547 		sge->length = auth_only_len;
548 		DPAA2_SET_FLE_BPID(sge, bpid);
549 		sge++;
550 	}
551 
552 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
553 	DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
554 				sym_op->m_src->data_off);
555 	sge->length = sym_op->aead.data.length;
556 	if (sess->dir == DIR_DEC) {
557 		sge++;
558 		old_icv = (uint8_t *)(sge + 1);
559 		memcpy(old_icv,	sym_op->aead.digest.data,
560 		       sess->digest_length);
561 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
562 		sge->length = sess->digest_length;
563 	}
564 	DPAA2_SET_FLE_FIN(sge);
565 
566 	if (auth_only_len) {
567 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
568 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
569 	}
570 
571 	DPAA2_SET_FD_LEN(fd, fle->length);
572 	return 0;
573 }
574 
575 static inline int
576 build_authenc_sg_fd(dpaa2_sec_session *sess,
577 		 struct rte_crypto_op *op,
578 		 struct qbman_fd *fd, __rte_unused uint16_t bpid)
579 {
580 	struct rte_crypto_sym_op *sym_op = op->sym;
581 	struct ctxt_priv *priv = sess->ctxt;
582 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
583 	struct sec_flow_context *flc;
584 	uint16_t auth_hdr_len = sym_op->cipher.data.offset -
585 				sym_op->auth.data.offset;
586 	uint16_t auth_tail_len = sym_op->auth.data.length -
587 				sym_op->cipher.data.length - auth_hdr_len;
588 	uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
589 	int icv_len = sess->digest_length;
590 	uint8_t *old_icv;
591 	struct rte_mbuf *mbuf;
592 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
593 			sess->iv.offset);
594 
595 	if (sym_op->m_dst)
596 		mbuf = sym_op->m_dst;
597 	else
598 		mbuf = sym_op->m_src;
599 
600 	/* first FLE entry used to store mbuf and session ctxt */
601 	fle = (struct qbman_fle *)rte_malloc(NULL,
602 			FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
603 			RTE_CACHE_LINE_SIZE);
604 	if (unlikely(!fle)) {
605 		DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
606 		return -ENOMEM;
607 	}
608 	memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
609 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
610 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
611 
612 	op_fle = fle + 1;
613 	ip_fle = fle + 2;
614 	sge = fle + 3;
615 
616 	/* Save the shared descriptor */
617 	flc = &priv->flc_desc[0].flc;
618 
619 	/* Configure FD as a FRAME LIST */
620 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
621 	DPAA2_SET_FD_COMPOUND_FMT(fd);
622 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
623 
624 	DPAA2_SEC_DP_DEBUG(
625 		"AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n"
626 		"cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
627 		sym_op->auth.data.offset,
628 		sym_op->auth.data.length,
629 		sess->digest_length,
630 		sym_op->cipher.data.offset,
631 		sym_op->cipher.data.length,
632 		sess->iv.length,
633 		sym_op->m_src->data_off);
634 
635 	/* Configure Output FLE with Scatter/Gather Entry */
636 	DPAA2_SET_FLE_SG_EXT(op_fle);
637 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
638 
639 	if (auth_only_len)
640 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
641 
642 	op_fle->length = (sess->dir == DIR_ENC) ?
643 			(sym_op->cipher.data.length + icv_len) :
644 			sym_op->cipher.data.length;
645 
646 	/* Configure Output SGE for Encap/Decap */
647 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
648 	DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->auth.data.offset);
649 	sge->length = mbuf->data_len - sym_op->auth.data.offset;
650 
651 	mbuf = mbuf->next;
652 	/* o/p segs */
653 	while (mbuf) {
654 		sge++;
655 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
656 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
657 		sge->length = mbuf->data_len;
658 		mbuf = mbuf->next;
659 	}
660 	sge->length -= icv_len;
661 
662 	if (sess->dir == DIR_ENC) {
663 		sge++;
664 		DPAA2_SET_FLE_ADDR(sge,
665 				DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
666 		sge->length = icv_len;
667 	}
668 	DPAA2_SET_FLE_FIN(sge);
669 
670 	sge++;
671 	mbuf = sym_op->m_src;
672 
673 	/* Configure Input FLE with Scatter/Gather Entry */
674 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
675 	DPAA2_SET_FLE_SG_EXT(ip_fle);
676 	DPAA2_SET_FLE_FIN(ip_fle);
677 	ip_fle->length = (sess->dir == DIR_ENC) ?
678 			(sym_op->auth.data.length + sess->iv.length) :
679 			(sym_op->auth.data.length + sess->iv.length +
680 			 icv_len);
681 
682 	/* Configure Input SGE for Encap/Decap */
683 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
684 	sge->length = sess->iv.length;
685 
686 	sge++;
687 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
688 	DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
689 				mbuf->data_off);
690 	sge->length = mbuf->data_len - sym_op->auth.data.offset;
691 
692 	mbuf = mbuf->next;
693 	/* i/p segs */
694 	while (mbuf) {
695 		sge++;
696 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
697 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
698 		sge->length = mbuf->data_len;
699 		mbuf = mbuf->next;
700 	}
701 	sge->length -= icv_len;
702 
703 	if (sess->dir == DIR_DEC) {
704 		sge++;
705 		old_icv = (uint8_t *)(sge + 1);
706 		memcpy(old_icv,	sym_op->auth.digest.data,
707 		       icv_len);
708 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
709 		sge->length = icv_len;
710 	}
711 
712 	DPAA2_SET_FLE_FIN(sge);
713 	if (auth_only_len) {
714 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
715 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
716 	}
717 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
718 
719 	return 0;
720 }
721 
722 static inline int
723 build_authenc_fd(dpaa2_sec_session *sess,
724 		 struct rte_crypto_op *op,
725 		 struct qbman_fd *fd, uint16_t bpid)
726 {
727 	struct rte_crypto_sym_op *sym_op = op->sym;
728 	struct ctxt_priv *priv = sess->ctxt;
729 	struct qbman_fle *fle, *sge;
730 	struct sec_flow_context *flc;
731 	uint16_t auth_hdr_len = sym_op->cipher.data.offset -
732 				sym_op->auth.data.offset;
733 	uint16_t auth_tail_len = sym_op->auth.data.length -
734 				sym_op->cipher.data.length - auth_hdr_len;
735 	uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
736 
737 	int icv_len = sess->digest_length, retval;
738 	uint8_t *old_icv;
739 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
740 			sess->iv.offset);
741 	struct rte_mbuf *dst;
742 
743 	if (sym_op->m_dst)
744 		dst = sym_op->m_dst;
745 	else
746 		dst = sym_op->m_src;
747 
748 	/* we are using the first FLE entry to store Mbuf.
749 	 * Currently we donot know which FLE has the mbuf stored.
750 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
751 	 * to get the MBUF Addr from the previous FLE.
752 	 * We can have a better approach to use the inline Mbuf
753 	 */
754 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
755 	if (retval) {
756 		DPAA2_SEC_ERR("Memory alloc failed for SGE");
757 		return -ENOMEM;
758 	}
759 	memset(fle, 0, FLE_POOL_BUF_SIZE);
760 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
761 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
762 	fle = fle + 1;
763 	sge = fle + 2;
764 	if (likely(bpid < MAX_BPID)) {
765 		DPAA2_SET_FD_BPID(fd, bpid);
766 		DPAA2_SET_FLE_BPID(fle, bpid);
767 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
768 		DPAA2_SET_FLE_BPID(sge, bpid);
769 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
770 		DPAA2_SET_FLE_BPID(sge + 2, bpid);
771 		DPAA2_SET_FLE_BPID(sge + 3, bpid);
772 	} else {
773 		DPAA2_SET_FD_IVP(fd);
774 		DPAA2_SET_FLE_IVP(fle);
775 		DPAA2_SET_FLE_IVP((fle + 1));
776 		DPAA2_SET_FLE_IVP(sge);
777 		DPAA2_SET_FLE_IVP((sge + 1));
778 		DPAA2_SET_FLE_IVP((sge + 2));
779 		DPAA2_SET_FLE_IVP((sge + 3));
780 	}
781 
782 	/* Save the shared descriptor */
783 	flc = &priv->flc_desc[0].flc;
784 	/* Configure FD as a FRAME LIST */
785 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
786 	DPAA2_SET_FD_COMPOUND_FMT(fd);
787 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
788 
789 	DPAA2_SEC_DP_DEBUG(
790 		"AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n"
791 		"cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
792 		sym_op->auth.data.offset,
793 		sym_op->auth.data.length,
794 		sess->digest_length,
795 		sym_op->cipher.data.offset,
796 		sym_op->cipher.data.length,
797 		sess->iv.length,
798 		sym_op->m_src->data_off);
799 
800 	/* Configure Output FLE with Scatter/Gather Entry */
801 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
802 	if (auth_only_len)
803 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
804 	fle->length = (sess->dir == DIR_ENC) ?
805 			(sym_op->cipher.data.length + icv_len) :
806 			sym_op->cipher.data.length;
807 
808 	DPAA2_SET_FLE_SG_EXT(fle);
809 
810 	/* Configure Output SGE for Encap/Decap */
811 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
812 	DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
813 				dst->data_off);
814 	sge->length = sym_op->cipher.data.length;
815 
816 	if (sess->dir == DIR_ENC) {
817 		sge++;
818 		DPAA2_SET_FLE_ADDR(sge,
819 				DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
820 		sge->length = sess->digest_length;
821 		DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
822 					sess->iv.length));
823 	}
824 	DPAA2_SET_FLE_FIN(sge);
825 
826 	sge++;
827 	fle++;
828 
829 	/* Configure Input FLE with Scatter/Gather Entry */
830 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
831 	DPAA2_SET_FLE_SG_EXT(fle);
832 	DPAA2_SET_FLE_FIN(fle);
833 	fle->length = (sess->dir == DIR_ENC) ?
834 			(sym_op->auth.data.length + sess->iv.length) :
835 			(sym_op->auth.data.length + sess->iv.length +
836 			 sess->digest_length);
837 
838 	/* Configure Input SGE for Encap/Decap */
839 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
840 	sge->length = sess->iv.length;
841 	sge++;
842 
843 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
844 	DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
845 				sym_op->m_src->data_off);
846 	sge->length = sym_op->auth.data.length;
847 	if (sess->dir == DIR_DEC) {
848 		sge++;
849 		old_icv = (uint8_t *)(sge + 1);
850 		memcpy(old_icv,	sym_op->auth.digest.data,
851 		       sess->digest_length);
852 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
853 		sge->length = sess->digest_length;
854 		DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
855 				 sess->digest_length +
856 				 sess->iv.length));
857 	}
858 	DPAA2_SET_FLE_FIN(sge);
859 	if (auth_only_len) {
860 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
861 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
862 	}
863 	return 0;
864 }
865 
866 static inline int build_auth_sg_fd(
867 		dpaa2_sec_session *sess,
868 		struct rte_crypto_op *op,
869 		struct qbman_fd *fd,
870 		__rte_unused uint16_t bpid)
871 {
872 	struct rte_crypto_sym_op *sym_op = op->sym;
873 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
874 	struct sec_flow_context *flc;
875 	struct ctxt_priv *priv = sess->ctxt;
876 	int data_len, data_offset;
877 	uint8_t *old_digest;
878 	struct rte_mbuf *mbuf;
879 
880 	data_len = sym_op->auth.data.length;
881 	data_offset = sym_op->auth.data.offset;
882 
883 	if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
884 	    sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
885 		if ((data_len & 7) || (data_offset & 7)) {
886 			DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
887 			return -ENOTSUP;
888 		}
889 
890 		data_len = data_len >> 3;
891 		data_offset = data_offset >> 3;
892 	}
893 
894 	mbuf = sym_op->m_src;
895 	fle = (struct qbman_fle *)rte_malloc(NULL,
896 			FLE_SG_MEM_SIZE(mbuf->nb_segs),
897 			RTE_CACHE_LINE_SIZE);
898 	if (unlikely(!fle)) {
899 		DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
900 		return -ENOMEM;
901 	}
902 	memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs));
903 	/* first FLE entry used to store mbuf and session ctxt */
904 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
905 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
906 	op_fle = fle + 1;
907 	ip_fle = fle + 2;
908 	sge = fle + 3;
909 
910 	flc = &priv->flc_desc[DESC_INITFINAL].flc;
911 	/* sg FD */
912 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
913 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
914 	DPAA2_SET_FD_COMPOUND_FMT(fd);
915 
916 	/* o/p fle */
917 	DPAA2_SET_FLE_ADDR(op_fle,
918 				DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
919 	op_fle->length = sess->digest_length;
920 
921 	/* i/p fle */
922 	DPAA2_SET_FLE_SG_EXT(ip_fle);
923 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
924 	ip_fle->length = data_len;
925 
926 	if (sess->iv.length) {
927 		uint8_t *iv_ptr;
928 
929 		iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
930 						   sess->iv.offset);
931 
932 		if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
933 			iv_ptr = conv_to_snow_f9_iv(iv_ptr);
934 			sge->length = 12;
935 		} else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
936 			iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
937 			sge->length = 8;
938 		} else {
939 			sge->length = sess->iv.length;
940 		}
941 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
942 		ip_fle->length += sge->length;
943 		sge++;
944 	}
945 	/* i/p 1st seg */
946 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
947 	DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
948 
949 	if (data_len <= (mbuf->data_len - data_offset)) {
950 		sge->length = data_len;
951 		data_len = 0;
952 	} else {
953 		sge->length = mbuf->data_len - data_offset;
954 
955 		/* remaining i/p segs */
956 		while ((data_len = data_len - sge->length) &&
957 		       (mbuf = mbuf->next)) {
958 			sge++;
959 			DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
960 			DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
961 			if (data_len > mbuf->data_len)
962 				sge->length = mbuf->data_len;
963 			else
964 				sge->length = data_len;
965 		}
966 	}
967 
968 	if (sess->dir == DIR_DEC) {
969 		/* Digest verification case */
970 		sge++;
971 		old_digest = (uint8_t *)(sge + 1);
972 		rte_memcpy(old_digest, sym_op->auth.digest.data,
973 			   sess->digest_length);
974 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
975 		sge->length = sess->digest_length;
976 		ip_fle->length += sess->digest_length;
977 	}
978 	DPAA2_SET_FLE_FIN(sge);
979 	DPAA2_SET_FLE_FIN(ip_fle);
980 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
981 
982 	return 0;
983 }
984 
985 static inline int
986 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
987 	      struct qbman_fd *fd, uint16_t bpid)
988 {
989 	struct rte_crypto_sym_op *sym_op = op->sym;
990 	struct qbman_fle *fle, *sge;
991 	struct sec_flow_context *flc;
992 	struct ctxt_priv *priv = sess->ctxt;
993 	int data_len, data_offset;
994 	uint8_t *old_digest;
995 	int retval;
996 
997 	data_len = sym_op->auth.data.length;
998 	data_offset = sym_op->auth.data.offset;
999 
1000 	if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
1001 	    sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1002 		if ((data_len & 7) || (data_offset & 7)) {
1003 			DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
1004 			return -ENOTSUP;
1005 		}
1006 
1007 		data_len = data_len >> 3;
1008 		data_offset = data_offset >> 3;
1009 	}
1010 
1011 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
1012 	if (retval) {
1013 		DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE");
1014 		return -ENOMEM;
1015 	}
1016 	memset(fle, 0, FLE_POOL_BUF_SIZE);
1017 	/* TODO we are using the first FLE entry to store Mbuf.
1018 	 * Currently we donot know which FLE has the mbuf stored.
1019 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
1020 	 * to get the MBUF Addr from the previous FLE.
1021 	 * We can have a better approach to use the inline Mbuf
1022 	 */
1023 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1024 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1025 	fle = fle + 1;
1026 	sge = fle + 2;
1027 
1028 	if (likely(bpid < MAX_BPID)) {
1029 		DPAA2_SET_FD_BPID(fd, bpid);
1030 		DPAA2_SET_FLE_BPID(fle, bpid);
1031 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
1032 		DPAA2_SET_FLE_BPID(sge, bpid);
1033 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
1034 	} else {
1035 		DPAA2_SET_FD_IVP(fd);
1036 		DPAA2_SET_FLE_IVP(fle);
1037 		DPAA2_SET_FLE_IVP((fle + 1));
1038 		DPAA2_SET_FLE_IVP(sge);
1039 		DPAA2_SET_FLE_IVP((sge + 1));
1040 	}
1041 
1042 	flc = &priv->flc_desc[DESC_INITFINAL].flc;
1043 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1044 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1045 	DPAA2_SET_FD_COMPOUND_FMT(fd);
1046 
1047 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
1048 	fle->length = sess->digest_length;
1049 	fle++;
1050 
1051 	/* Setting input FLE */
1052 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1053 	DPAA2_SET_FLE_SG_EXT(fle);
1054 	fle->length = data_len;
1055 
1056 	if (sess->iv.length) {
1057 		uint8_t *iv_ptr;
1058 
1059 		iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1060 						   sess->iv.offset);
1061 
1062 		if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
1063 			iv_ptr = conv_to_snow_f9_iv(iv_ptr);
1064 			sge->length = 12;
1065 		} else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1066 			iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
1067 			sge->length = 8;
1068 		} else {
1069 			sge->length = sess->iv.length;
1070 		}
1071 
1072 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1073 		fle->length = fle->length + sge->length;
1074 		sge++;
1075 	}
1076 
1077 	/* Setting data to authenticate */
1078 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
1079 	DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off);
1080 	sge->length = data_len;
1081 
1082 	if (sess->dir == DIR_DEC) {
1083 		sge++;
1084 		old_digest = (uint8_t *)(sge + 1);
1085 		rte_memcpy(old_digest, sym_op->auth.digest.data,
1086 			   sess->digest_length);
1087 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
1088 		sge->length = sess->digest_length;
1089 		fle->length = fle->length + sess->digest_length;
1090 	}
1091 
1092 	DPAA2_SET_FLE_FIN(sge);
1093 	DPAA2_SET_FLE_FIN(fle);
1094 	DPAA2_SET_FD_LEN(fd, fle->length);
1095 
1096 	return 0;
1097 }
1098 
1099 static int
1100 build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
1101 		struct qbman_fd *fd, __rte_unused uint16_t bpid)
1102 {
1103 	struct rte_crypto_sym_op *sym_op = op->sym;
1104 	struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
1105 	int data_len, data_offset;
1106 	struct sec_flow_context *flc;
1107 	struct ctxt_priv *priv = sess->ctxt;
1108 	struct rte_mbuf *mbuf;
1109 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1110 			sess->iv.offset);
1111 
1112 	data_len = sym_op->cipher.data.length;
1113 	data_offset = sym_op->cipher.data.offset;
1114 
1115 	if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1116 		sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1117 		if ((data_len & 7) || (data_offset & 7)) {
1118 			DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
1119 			return -ENOTSUP;
1120 		}
1121 
1122 		data_len = data_len >> 3;
1123 		data_offset = data_offset >> 3;
1124 	}
1125 
1126 	if (sym_op->m_dst)
1127 		mbuf = sym_op->m_dst;
1128 	else
1129 		mbuf = sym_op->m_src;
1130 
1131 	/* first FLE entry used to store mbuf and session ctxt */
1132 	fle = (struct qbman_fle *)rte_malloc(NULL,
1133 			FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
1134 			RTE_CACHE_LINE_SIZE);
1135 	if (!fle) {
1136 		DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE");
1137 		return -ENOMEM;
1138 	}
1139 	memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
1140 	/* first FLE entry used to store mbuf and session ctxt */
1141 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1142 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1143 
1144 	op_fle = fle + 1;
1145 	ip_fle = fle + 2;
1146 	sge = fle + 3;
1147 
1148 	flc = &priv->flc_desc[0].flc;
1149 
1150 	DPAA2_SEC_DP_DEBUG(
1151 		"CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d"
1152 		" data_off: 0x%x\n",
1153 		data_offset,
1154 		data_len,
1155 		sess->iv.length,
1156 		sym_op->m_src->data_off);
1157 
1158 	/* o/p fle */
1159 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
1160 	op_fle->length = data_len;
1161 	DPAA2_SET_FLE_SG_EXT(op_fle);
1162 
1163 	/* o/p 1st seg */
1164 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1165 	DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
1166 	sge->length = mbuf->data_len - data_offset;
1167 
1168 	mbuf = mbuf->next;
1169 	/* o/p segs */
1170 	while (mbuf) {
1171 		sge++;
1172 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1173 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
1174 		sge->length = mbuf->data_len;
1175 		mbuf = mbuf->next;
1176 	}
1177 	DPAA2_SET_FLE_FIN(sge);
1178 
1179 	DPAA2_SEC_DP_DEBUG(
1180 		"CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
1181 		flc, fle, fle->addr_hi, fle->addr_lo,
1182 		fle->length);
1183 
1184 	/* i/p fle */
1185 	mbuf = sym_op->m_src;
1186 	sge++;
1187 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
1188 	ip_fle->length = sess->iv.length + data_len;
1189 	DPAA2_SET_FLE_SG_EXT(ip_fle);
1190 
1191 	/* i/p IV */
1192 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1193 	DPAA2_SET_FLE_OFFSET(sge, 0);
1194 	sge->length = sess->iv.length;
1195 
1196 	sge++;
1197 
1198 	/* i/p 1st seg */
1199 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1200 	DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
1201 	sge->length = mbuf->data_len - data_offset;
1202 
1203 	mbuf = mbuf->next;
1204 	/* i/p segs */
1205 	while (mbuf) {
1206 		sge++;
1207 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1208 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
1209 		sge->length = mbuf->data_len;
1210 		mbuf = mbuf->next;
1211 	}
1212 	DPAA2_SET_FLE_FIN(sge);
1213 	DPAA2_SET_FLE_FIN(ip_fle);
1214 
1215 	/* sg fd */
1216 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
1217 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
1218 	DPAA2_SET_FD_COMPOUND_FMT(fd);
1219 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1220 
1221 	DPAA2_SEC_DP_DEBUG(
1222 		"CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1223 		" off =%d, len =%d\n",
1224 		DPAA2_GET_FD_ADDR(fd),
1225 		DPAA2_GET_FD_BPID(fd),
1226 		rte_dpaa2_bpid_info[bpid].meta_data_size,
1227 		DPAA2_GET_FD_OFFSET(fd),
1228 		DPAA2_GET_FD_LEN(fd));
1229 	return 0;
1230 }
1231 
1232 static int
1233 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
1234 		struct qbman_fd *fd, uint16_t bpid)
1235 {
1236 	struct rte_crypto_sym_op *sym_op = op->sym;
1237 	struct qbman_fle *fle, *sge;
1238 	int retval, data_len, data_offset;
1239 	struct sec_flow_context *flc;
1240 	struct ctxt_priv *priv = sess->ctxt;
1241 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1242 			sess->iv.offset);
1243 	struct rte_mbuf *dst;
1244 
1245 	data_len = sym_op->cipher.data.length;
1246 	data_offset = sym_op->cipher.data.offset;
1247 
1248 	if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1249 		sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1250 		if ((data_len & 7) || (data_offset & 7)) {
1251 			DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
1252 			return -ENOTSUP;
1253 		}
1254 
1255 		data_len = data_len >> 3;
1256 		data_offset = data_offset >> 3;
1257 	}
1258 
1259 	if (sym_op->m_dst)
1260 		dst = sym_op->m_dst;
1261 	else
1262 		dst = sym_op->m_src;
1263 
1264 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
1265 	if (retval) {
1266 		DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE");
1267 		return -ENOMEM;
1268 	}
1269 	memset(fle, 0, FLE_POOL_BUF_SIZE);
1270 	/* TODO we are using the first FLE entry to store Mbuf.
1271 	 * Currently we donot know which FLE has the mbuf stored.
1272 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
1273 	 * to get the MBUF Addr from the previous FLE.
1274 	 * We can have a better approach to use the inline Mbuf
1275 	 */
1276 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1277 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1278 	fle = fle + 1;
1279 	sge = fle + 2;
1280 
1281 	if (likely(bpid < MAX_BPID)) {
1282 		DPAA2_SET_FD_BPID(fd, bpid);
1283 		DPAA2_SET_FLE_BPID(fle, bpid);
1284 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
1285 		DPAA2_SET_FLE_BPID(sge, bpid);
1286 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
1287 	} else {
1288 		DPAA2_SET_FD_IVP(fd);
1289 		DPAA2_SET_FLE_IVP(fle);
1290 		DPAA2_SET_FLE_IVP((fle + 1));
1291 		DPAA2_SET_FLE_IVP(sge);
1292 		DPAA2_SET_FLE_IVP((sge + 1));
1293 	}
1294 
1295 	flc = &priv->flc_desc[0].flc;
1296 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1297 	DPAA2_SET_FD_LEN(fd, data_len + sess->iv.length);
1298 	DPAA2_SET_FD_COMPOUND_FMT(fd);
1299 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1300 
1301 	DPAA2_SEC_DP_DEBUG(
1302 		"CIPHER: cipher_off: 0x%x/length %d, ivlen=%d,"
1303 		" data_off: 0x%x\n",
1304 		data_offset,
1305 		data_len,
1306 		sess->iv.length,
1307 		sym_op->m_src->data_off);
1308 
1309 	DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst));
1310 	DPAA2_SET_FLE_OFFSET(fle, data_offset + dst->data_off);
1311 
1312 	fle->length = data_len + sess->iv.length;
1313 
1314 	DPAA2_SEC_DP_DEBUG(
1315 		"CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n",
1316 		flc, fle, fle->addr_hi, fle->addr_lo,
1317 		fle->length);
1318 
1319 	fle++;
1320 
1321 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1322 	fle->length = data_len + sess->iv.length;
1323 
1324 	DPAA2_SET_FLE_SG_EXT(fle);
1325 
1326 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1327 	sge->length = sess->iv.length;
1328 
1329 	sge++;
1330 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
1331 	DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off);
1332 
1333 	sge->length = data_len;
1334 	DPAA2_SET_FLE_FIN(sge);
1335 	DPAA2_SET_FLE_FIN(fle);
1336 
1337 	DPAA2_SEC_DP_DEBUG(
1338 		"CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1339 		" off =%d, len =%d\n",
1340 		DPAA2_GET_FD_ADDR(fd),
1341 		DPAA2_GET_FD_BPID(fd),
1342 		rte_dpaa2_bpid_info[bpid].meta_data_size,
1343 		DPAA2_GET_FD_OFFSET(fd),
1344 		DPAA2_GET_FD_LEN(fd));
1345 
1346 	return 0;
1347 }
1348 
1349 static inline int
1350 build_sec_fd(struct rte_crypto_op *op,
1351 	     struct qbman_fd *fd, uint16_t bpid)
1352 {
1353 	int ret = -1;
1354 	dpaa2_sec_session *sess;
1355 
1356 	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
1357 		sess = (dpaa2_sec_session *)get_sym_session_private_data(
1358 				op->sym->session, cryptodev_driver_id);
1359 #ifdef RTE_LIB_SECURITY
1360 	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1361 		sess = (dpaa2_sec_session *)get_sec_session_private_data(
1362 				op->sym->sec_session);
1363 #endif
1364 	else
1365 		return -ENOTSUP;
1366 
1367 	if (!sess)
1368 		return -EINVAL;
1369 
1370 	/* Any of the buffer is segmented*/
1371 	if (!rte_pktmbuf_is_contiguous(op->sym->m_src) ||
1372 		  ((op->sym->m_dst != NULL) &&
1373 		   !rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1374 		switch (sess->ctxt_type) {
1375 		case DPAA2_SEC_CIPHER:
1376 			ret = build_cipher_sg_fd(sess, op, fd, bpid);
1377 			break;
1378 		case DPAA2_SEC_AUTH:
1379 			ret = build_auth_sg_fd(sess, op, fd, bpid);
1380 			break;
1381 		case DPAA2_SEC_AEAD:
1382 			ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid);
1383 			break;
1384 		case DPAA2_SEC_CIPHER_HASH:
1385 			ret = build_authenc_sg_fd(sess, op, fd, bpid);
1386 			break;
1387 #ifdef RTE_LIB_SECURITY
1388 		case DPAA2_SEC_IPSEC:
1389 		case DPAA2_SEC_PDCP:
1390 			ret = build_proto_compound_sg_fd(sess, op, fd, bpid);
1391 			break;
1392 #endif
1393 		case DPAA2_SEC_HASH_CIPHER:
1394 		default:
1395 			DPAA2_SEC_ERR("error: Unsupported session");
1396 		}
1397 	} else {
1398 		switch (sess->ctxt_type) {
1399 		case DPAA2_SEC_CIPHER:
1400 			ret = build_cipher_fd(sess, op, fd, bpid);
1401 			break;
1402 		case DPAA2_SEC_AUTH:
1403 			ret = build_auth_fd(sess, op, fd, bpid);
1404 			break;
1405 		case DPAA2_SEC_AEAD:
1406 			ret = build_authenc_gcm_fd(sess, op, fd, bpid);
1407 			break;
1408 		case DPAA2_SEC_CIPHER_HASH:
1409 			ret = build_authenc_fd(sess, op, fd, bpid);
1410 			break;
1411 #ifdef RTE_LIB_SECURITY
1412 		case DPAA2_SEC_IPSEC:
1413 			ret = build_proto_fd(sess, op, fd, bpid);
1414 			break;
1415 		case DPAA2_SEC_PDCP:
1416 			ret = build_proto_compound_fd(sess, op, fd, bpid);
1417 			break;
1418 #endif
1419 		case DPAA2_SEC_HASH_CIPHER:
1420 		default:
1421 			DPAA2_SEC_ERR("error: Unsupported session");
1422 			ret = -ENOTSUP;
1423 		}
1424 	}
1425 	return ret;
1426 }
1427 
1428 static uint16_t
1429 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1430 			uint16_t nb_ops)
1431 {
1432 	/* Function to transmit the frames to given device and VQ*/
1433 	uint32_t loop;
1434 	int32_t ret;
1435 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1436 	uint32_t frames_to_send, retry_count;
1437 	struct qbman_eq_desc eqdesc;
1438 	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1439 	struct qbman_swp *swp;
1440 	uint16_t num_tx = 0;
1441 	uint32_t flags[MAX_TX_RING_SLOTS] = {0};
1442 	/*todo - need to support multiple buffer pools */
1443 	uint16_t bpid;
1444 	struct rte_mempool *mb_pool;
1445 
1446 	if (unlikely(nb_ops == 0))
1447 		return 0;
1448 
1449 	if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
1450 		DPAA2_SEC_ERR("sessionless crypto op not supported");
1451 		return 0;
1452 	}
1453 	/*Prepare enqueue descriptor*/
1454 	qbman_eq_desc_clear(&eqdesc);
1455 	qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1456 	qbman_eq_desc_set_response(&eqdesc, 0, 0);
1457 	qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
1458 
1459 	if (!DPAA2_PER_LCORE_DPIO) {
1460 		ret = dpaa2_affine_qbman_swp();
1461 		if (ret) {
1462 			DPAA2_SEC_ERR(
1463 				"Failed to allocate IO portal, tid: %d\n",
1464 				rte_gettid());
1465 			return 0;
1466 		}
1467 	}
1468 	swp = DPAA2_PER_LCORE_PORTAL;
1469 
1470 	while (nb_ops) {
1471 		frames_to_send = (nb_ops > dpaa2_eqcr_size) ?
1472 			dpaa2_eqcr_size : nb_ops;
1473 
1474 		for (loop = 0; loop < frames_to_send; loop++) {
1475 			if ((*ops)->sym->m_src->seqn) {
1476 			 uint8_t dqrr_index = (*ops)->sym->m_src->seqn - 1;
1477 
1478 			 flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index;
1479 			 DPAA2_PER_LCORE_DQRR_SIZE--;
1480 			 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
1481 			 (*ops)->sym->m_src->seqn = DPAA2_INVALID_MBUF_SEQN;
1482 			}
1483 
1484 			/*Clear the unused FD fields before sending*/
1485 			memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
1486 			mb_pool = (*ops)->sym->m_src->pool;
1487 			bpid = mempool_to_bpid(mb_pool);
1488 			ret = build_sec_fd(*ops, &fd_arr[loop], bpid);
1489 			if (ret) {
1490 				DPAA2_SEC_ERR("error: Improper packet contents"
1491 					      " for crypto operation");
1492 				goto skip_tx;
1493 			}
1494 			ops++;
1495 		}
1496 
1497 		loop = 0;
1498 		retry_count = 0;
1499 		while (loop < frames_to_send) {
1500 			ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
1501 							 &fd_arr[loop],
1502 							 &flags[loop],
1503 							 frames_to_send - loop);
1504 			if (unlikely(ret < 0)) {
1505 				retry_count++;
1506 				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1507 					num_tx += loop;
1508 					nb_ops -= loop;
1509 					goto skip_tx;
1510 				}
1511 			} else {
1512 				loop += ret;
1513 				retry_count = 0;
1514 			}
1515 		}
1516 
1517 		num_tx += loop;
1518 		nb_ops -= loop;
1519 	}
1520 skip_tx:
1521 	dpaa2_qp->tx_vq.tx_pkts += num_tx;
1522 	dpaa2_qp->tx_vq.err_pkts += nb_ops;
1523 	return num_tx;
1524 }
1525 
1526 #ifdef RTE_LIB_SECURITY
1527 static inline struct rte_crypto_op *
1528 sec_simple_fd_to_mbuf(const struct qbman_fd *fd)
1529 {
1530 	struct rte_crypto_op *op;
1531 	uint16_t len = DPAA2_GET_FD_LEN(fd);
1532 	int16_t diff = 0;
1533 	dpaa2_sec_session *sess_priv __rte_unused;
1534 
1535 	struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
1536 		DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
1537 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
1538 
1539 	diff = len - mbuf->pkt_len;
1540 	mbuf->pkt_len += diff;
1541 	mbuf->data_len += diff;
1542 	op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova;
1543 	mbuf->buf_iova = op->sym->aead.digest.phys_addr;
1544 	op->sym->aead.digest.phys_addr = 0L;
1545 
1546 	sess_priv = (dpaa2_sec_session *)get_sec_session_private_data(
1547 				op->sym->sec_session);
1548 	if (sess_priv->dir == DIR_ENC)
1549 		mbuf->data_off += SEC_FLC_DHR_OUTBOUND;
1550 	else
1551 		mbuf->data_off += SEC_FLC_DHR_INBOUND;
1552 
1553 	return op;
1554 }
1555 #endif
1556 
1557 static inline struct rte_crypto_op *
1558 sec_fd_to_mbuf(const struct qbman_fd *fd)
1559 {
1560 	struct qbman_fle *fle;
1561 	struct rte_crypto_op *op;
1562 	struct ctxt_priv *priv;
1563 	struct rte_mbuf *dst, *src;
1564 
1565 #ifdef RTE_LIB_SECURITY
1566 	if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single)
1567 		return sec_simple_fd_to_mbuf(fd);
1568 #endif
1569 	fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
1570 
1571 	DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
1572 			   fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
1573 
1574 	/* we are using the first FLE entry to store Mbuf.
1575 	 * Currently we donot know which FLE has the mbuf stored.
1576 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
1577 	 * to get the MBUF Addr from the previous FLE.
1578 	 * We can have a better approach to use the inline Mbuf
1579 	 */
1580 
1581 	if (unlikely(DPAA2_GET_FD_IVP(fd))) {
1582 		/* TODO complete it. */
1583 		DPAA2_SEC_ERR("error: non inline buffer");
1584 		return NULL;
1585 	}
1586 	op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
1587 
1588 	/* Prefeth op */
1589 	src = op->sym->m_src;
1590 	rte_prefetch0(src);
1591 
1592 	if (op->sym->m_dst) {
1593 		dst = op->sym->m_dst;
1594 		rte_prefetch0(dst);
1595 	} else
1596 		dst = src;
1597 
1598 #ifdef RTE_LIB_SECURITY
1599 	if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
1600 		uint16_t len = DPAA2_GET_FD_LEN(fd);
1601 		dst->pkt_len = len;
1602 		while (dst->next != NULL) {
1603 			len -= dst->data_len;
1604 			dst = dst->next;
1605 		}
1606 		dst->data_len = len;
1607 	}
1608 #endif
1609 	DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p,"
1610 		" fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n",
1611 		(void *)dst,
1612 		dst->buf_addr,
1613 		DPAA2_GET_FD_ADDR(fd),
1614 		DPAA2_GET_FD_BPID(fd),
1615 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
1616 		DPAA2_GET_FD_OFFSET(fd),
1617 		DPAA2_GET_FD_LEN(fd));
1618 
1619 	/* free the fle memory */
1620 	if (likely(rte_pktmbuf_is_contiguous(src))) {
1621 		priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1);
1622 		rte_mempool_put(priv->fle_pool, (void *)(fle-1));
1623 	} else
1624 		rte_free((void *)(fle-1));
1625 
1626 	return op;
1627 }
1628 
1629 static uint16_t
1630 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1631 			uint16_t nb_ops)
1632 {
1633 	/* Function is responsible to receive frames for a given device and VQ*/
1634 	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1635 	struct qbman_result *dq_storage;
1636 	uint32_t fqid = dpaa2_qp->rx_vq.fqid;
1637 	int ret, num_rx = 0;
1638 	uint8_t is_last = 0, status;
1639 	struct qbman_swp *swp;
1640 	const struct qbman_fd *fd;
1641 	struct qbman_pull_desc pulldesc;
1642 
1643 	if (!DPAA2_PER_LCORE_DPIO) {
1644 		ret = dpaa2_affine_qbman_swp();
1645 		if (ret) {
1646 			DPAA2_SEC_ERR(
1647 				"Failed to allocate IO portal, tid: %d\n",
1648 				rte_gettid());
1649 			return 0;
1650 		}
1651 	}
1652 	swp = DPAA2_PER_LCORE_PORTAL;
1653 	dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
1654 
1655 	qbman_pull_desc_clear(&pulldesc);
1656 	qbman_pull_desc_set_numframes(&pulldesc,
1657 				      (nb_ops > dpaa2_dqrr_size) ?
1658 				      dpaa2_dqrr_size : nb_ops);
1659 	qbman_pull_desc_set_fq(&pulldesc, fqid);
1660 	qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1661 				    (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage),
1662 				    1);
1663 
1664 	/*Issue a volatile dequeue command. */
1665 	while (1) {
1666 		if (qbman_swp_pull(swp, &pulldesc)) {
1667 			DPAA2_SEC_WARN(
1668 				"SEC VDQ command is not issued : QBMAN busy");
1669 			/* Portal was busy, try again */
1670 			continue;
1671 		}
1672 		break;
1673 	};
1674 
1675 	/* Receive the packets till Last Dequeue entry is found with
1676 	 * respect to the above issues PULL command.
1677 	 */
1678 	while (!is_last) {
1679 		/* Check if the previous issued command is completed.
1680 		 * Also seems like the SWP is shared between the Ethernet Driver
1681 		 * and the SEC driver.
1682 		 */
1683 		while (!qbman_check_command_complete(dq_storage))
1684 			;
1685 
1686 		/* Loop until the dq_storage is updated with
1687 		 * new token by QBMAN
1688 		 */
1689 		while (!qbman_check_new_result(dq_storage))
1690 			;
1691 		/* Check whether Last Pull command is Expired and
1692 		 * setting Condition for Loop termination
1693 		 */
1694 		if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1695 			is_last = 1;
1696 			/* Check for valid frame. */
1697 			status = (uint8_t)qbman_result_DQ_flags(dq_storage);
1698 			if (unlikely(
1699 				(status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
1700 				DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
1701 				continue;
1702 			}
1703 		}
1704 
1705 		fd = qbman_result_DQ_fd(dq_storage);
1706 		ops[num_rx] = sec_fd_to_mbuf(fd);
1707 
1708 		if (unlikely(fd->simple.frc)) {
1709 			/* TODO Parse SEC errors */
1710 			DPAA2_SEC_ERR("SEC returned Error - %x",
1711 				      fd->simple.frc);
1712 			ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
1713 		} else {
1714 			ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1715 		}
1716 
1717 		num_rx++;
1718 		dq_storage++;
1719 	} /* End of Packet Rx loop */
1720 
1721 	dpaa2_qp->rx_vq.rx_pkts += num_rx;
1722 
1723 	DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1724 	/*Return the total number of packets received to DPAA2 app*/
1725 	return num_rx;
1726 }
1727 
1728 /** Release queue pair */
1729 static int
1730 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
1731 {
1732 	struct dpaa2_sec_qp *qp =
1733 		(struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id];
1734 
1735 	PMD_INIT_FUNC_TRACE();
1736 
1737 	if (qp->rx_vq.q_storage) {
1738 		dpaa2_free_dq_storage(qp->rx_vq.q_storage);
1739 		rte_free(qp->rx_vq.q_storage);
1740 	}
1741 	rte_free(qp);
1742 
1743 	dev->data->queue_pairs[queue_pair_id] = NULL;
1744 
1745 	return 0;
1746 }
1747 
1748 /** Setup a queue pair */
1749 static int
1750 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1751 		__rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1752 		__rte_unused int socket_id)
1753 {
1754 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1755 	struct dpaa2_sec_qp *qp;
1756 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1757 	struct dpseci_rx_queue_cfg cfg;
1758 	int32_t retcode;
1759 
1760 	PMD_INIT_FUNC_TRACE();
1761 
1762 	/* If qp is already in use free ring memory and qp metadata. */
1763 	if (dev->data->queue_pairs[qp_id] != NULL) {
1764 		DPAA2_SEC_INFO("QP already setup");
1765 		return 0;
1766 	}
1767 
1768 	DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p",
1769 		    dev, qp_id, qp_conf);
1770 
1771 	memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
1772 
1773 	qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp),
1774 			RTE_CACHE_LINE_SIZE);
1775 	if (!qp) {
1776 		DPAA2_SEC_ERR("malloc failed for rx/tx queues");
1777 		return -ENOMEM;
1778 	}
1779 
1780 	qp->rx_vq.crypto_data = dev->data;
1781 	qp->tx_vq.crypto_data = dev->data;
1782 	qp->rx_vq.q_storage = rte_malloc("sec dq storage",
1783 		sizeof(struct queue_storage_info_t),
1784 		RTE_CACHE_LINE_SIZE);
1785 	if (!qp->rx_vq.q_storage) {
1786 		DPAA2_SEC_ERR("malloc failed for q_storage");
1787 		return -ENOMEM;
1788 	}
1789 	memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
1790 
1791 	if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
1792 		DPAA2_SEC_ERR("Unable to allocate dequeue storage");
1793 		return -ENOMEM;
1794 	}
1795 
1796 	dev->data->queue_pairs[qp_id] = qp;
1797 
1798 	cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
1799 	cfg.user_ctx = (size_t)(&qp->rx_vq);
1800 	retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
1801 				      qp_id, &cfg);
1802 	return retcode;
1803 }
1804 
1805 /** Returns the size of the aesni gcm session structure */
1806 static unsigned int
1807 dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1808 {
1809 	PMD_INIT_FUNC_TRACE();
1810 
1811 	return sizeof(dpaa2_sec_session);
1812 }
1813 
1814 static int
1815 dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
1816 		      struct rte_crypto_sym_xform *xform,
1817 		      dpaa2_sec_session *session)
1818 {
1819 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1820 	struct alginfo cipherdata;
1821 	int bufsize, ret = 0;
1822 	struct ctxt_priv *priv;
1823 	struct sec_flow_context *flc;
1824 
1825 	PMD_INIT_FUNC_TRACE();
1826 
1827 	/* For SEC CIPHER only one descriptor is required. */
1828 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1829 			sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1830 			RTE_CACHE_LINE_SIZE);
1831 	if (priv == NULL) {
1832 		DPAA2_SEC_ERR("No Memory for priv CTXT");
1833 		return -ENOMEM;
1834 	}
1835 
1836 	priv->fle_pool = dev_priv->fle_pool;
1837 
1838 	flc = &priv->flc_desc[0].flc;
1839 
1840 	session->ctxt_type = DPAA2_SEC_CIPHER;
1841 	session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1842 			RTE_CACHE_LINE_SIZE);
1843 	if (session->cipher_key.data == NULL) {
1844 		DPAA2_SEC_ERR("No Memory for cipher key");
1845 		rte_free(priv);
1846 		return -ENOMEM;
1847 	}
1848 	session->cipher_key.length = xform->cipher.key.length;
1849 
1850 	memcpy(session->cipher_key.data, xform->cipher.key.data,
1851 	       xform->cipher.key.length);
1852 	cipherdata.key = (size_t)session->cipher_key.data;
1853 	cipherdata.keylen = session->cipher_key.length;
1854 	cipherdata.key_enc_flags = 0;
1855 	cipherdata.key_type = RTA_DATA_IMM;
1856 
1857 	/* Set IV parameters */
1858 	session->iv.offset = xform->cipher.iv.offset;
1859 	session->iv.length = xform->cipher.iv.length;
1860 	session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1861 				DIR_ENC : DIR_DEC;
1862 
1863 	switch (xform->cipher.algo) {
1864 	case RTE_CRYPTO_CIPHER_AES_CBC:
1865 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
1866 		cipherdata.algmode = OP_ALG_AAI_CBC;
1867 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1868 		bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1869 						SHR_NEVER, &cipherdata,
1870 						session->iv.length,
1871 						session->dir);
1872 		break;
1873 	case RTE_CRYPTO_CIPHER_3DES_CBC:
1874 		cipherdata.algtype = OP_ALG_ALGSEL_3DES;
1875 		cipherdata.algmode = OP_ALG_AAI_CBC;
1876 		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1877 		bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1878 						SHR_NEVER, &cipherdata,
1879 						session->iv.length,
1880 						session->dir);
1881 		break;
1882 	case RTE_CRYPTO_CIPHER_DES_CBC:
1883 		cipherdata.algtype = OP_ALG_ALGSEL_DES;
1884 		cipherdata.algmode = OP_ALG_AAI_CBC;
1885 		session->cipher_alg = RTE_CRYPTO_CIPHER_DES_CBC;
1886 		bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1887 						SHR_NEVER, &cipherdata,
1888 						session->iv.length,
1889 						session->dir);
1890 		break;
1891 	case RTE_CRYPTO_CIPHER_AES_CTR:
1892 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
1893 		cipherdata.algmode = OP_ALG_AAI_CTR;
1894 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
1895 		bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1896 						SHR_NEVER, &cipherdata,
1897 						session->iv.length,
1898 						session->dir);
1899 		break;
1900 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1901 		cipherdata.algtype = OP_ALG_ALGSEL_SNOW_F8;
1902 		session->cipher_alg = RTE_CRYPTO_CIPHER_SNOW3G_UEA2;
1903 		bufsize = cnstr_shdsc_snow_f8(priv->flc_desc[0].desc, 1, 0,
1904 					      &cipherdata,
1905 					      session->dir);
1906 		break;
1907 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
1908 		cipherdata.algtype = OP_ALG_ALGSEL_ZUCE;
1909 		session->cipher_alg = RTE_CRYPTO_CIPHER_ZUC_EEA3;
1910 		bufsize = cnstr_shdsc_zuce(priv->flc_desc[0].desc, 1, 0,
1911 					      &cipherdata,
1912 					      session->dir);
1913 		break;
1914 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
1915 	case RTE_CRYPTO_CIPHER_AES_F8:
1916 	case RTE_CRYPTO_CIPHER_AES_ECB:
1917 	case RTE_CRYPTO_CIPHER_3DES_ECB:
1918 	case RTE_CRYPTO_CIPHER_3DES_CTR:
1919 	case RTE_CRYPTO_CIPHER_AES_XTS:
1920 	case RTE_CRYPTO_CIPHER_ARC4:
1921 	case RTE_CRYPTO_CIPHER_NULL:
1922 		DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
1923 			xform->cipher.algo);
1924 		ret = -ENOTSUP;
1925 		goto error_out;
1926 	default:
1927 		DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
1928 			xform->cipher.algo);
1929 		ret = -ENOTSUP;
1930 		goto error_out;
1931 	}
1932 
1933 	if (bufsize < 0) {
1934 		DPAA2_SEC_ERR("Crypto: Descriptor build failed");
1935 		ret = -EINVAL;
1936 		goto error_out;
1937 	}
1938 
1939 	flc->word1_sdl = (uint8_t)bufsize;
1940 	session->ctxt = priv;
1941 
1942 #ifdef CAAM_DESC_DEBUG
1943 	int i;
1944 	for (i = 0; i < bufsize; i++)
1945 		DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]);
1946 #endif
1947 	return ret;
1948 
1949 error_out:
1950 	rte_free(session->cipher_key.data);
1951 	rte_free(priv);
1952 	return ret;
1953 }
1954 
1955 static int
1956 dpaa2_sec_auth_init(struct rte_cryptodev *dev,
1957 		    struct rte_crypto_sym_xform *xform,
1958 		    dpaa2_sec_session *session)
1959 {
1960 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1961 	struct alginfo authdata;
1962 	int bufsize, ret = 0;
1963 	struct ctxt_priv *priv;
1964 	struct sec_flow_context *flc;
1965 
1966 	PMD_INIT_FUNC_TRACE();
1967 
1968 	/* For SEC AUTH three descriptors are required for various stages */
1969 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1970 			sizeof(struct ctxt_priv) + 3 *
1971 			sizeof(struct sec_flc_desc),
1972 			RTE_CACHE_LINE_SIZE);
1973 	if (priv == NULL) {
1974 		DPAA2_SEC_ERR("No Memory for priv CTXT");
1975 		return -ENOMEM;
1976 	}
1977 
1978 	priv->fle_pool = dev_priv->fle_pool;
1979 	flc = &priv->flc_desc[DESC_INITFINAL].flc;
1980 
1981 	session->ctxt_type = DPAA2_SEC_AUTH;
1982 	session->auth_key.length = xform->auth.key.length;
1983 	if (xform->auth.key.length) {
1984 		session->auth_key.data = rte_zmalloc(NULL,
1985 			xform->auth.key.length,
1986 			RTE_CACHE_LINE_SIZE);
1987 		if (session->auth_key.data == NULL) {
1988 			DPAA2_SEC_ERR("Unable to allocate memory for auth key");
1989 			rte_free(priv);
1990 			return -ENOMEM;
1991 		}
1992 		memcpy(session->auth_key.data, xform->auth.key.data,
1993 		       xform->auth.key.length);
1994 		authdata.key = (size_t)session->auth_key.data;
1995 		authdata.key_enc_flags = 0;
1996 		authdata.key_type = RTA_DATA_IMM;
1997 	}
1998 	authdata.keylen = session->auth_key.length;
1999 
2000 	session->digest_length = xform->auth.digest_length;
2001 	session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2002 				DIR_ENC : DIR_DEC;
2003 
2004 	switch (xform->auth.algo) {
2005 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
2006 		authdata.algtype = OP_ALG_ALGSEL_SHA1;
2007 		authdata.algmode = OP_ALG_AAI_HMAC;
2008 		session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
2009 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2010 					   1, 0, SHR_NEVER, &authdata,
2011 					   !session->dir,
2012 					   session->digest_length);
2013 		break;
2014 	case RTE_CRYPTO_AUTH_MD5_HMAC:
2015 		authdata.algtype = OP_ALG_ALGSEL_MD5;
2016 		authdata.algmode = OP_ALG_AAI_HMAC;
2017 		session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
2018 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2019 					   1, 0, SHR_NEVER, &authdata,
2020 					   !session->dir,
2021 					   session->digest_length);
2022 		break;
2023 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2024 		authdata.algtype = OP_ALG_ALGSEL_SHA256;
2025 		authdata.algmode = OP_ALG_AAI_HMAC;
2026 		session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2027 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2028 					   1, 0, SHR_NEVER, &authdata,
2029 					   !session->dir,
2030 					   session->digest_length);
2031 		break;
2032 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2033 		authdata.algtype = OP_ALG_ALGSEL_SHA384;
2034 		authdata.algmode = OP_ALG_AAI_HMAC;
2035 		session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2036 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2037 					   1, 0, SHR_NEVER, &authdata,
2038 					   !session->dir,
2039 					   session->digest_length);
2040 		break;
2041 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2042 		authdata.algtype = OP_ALG_ALGSEL_SHA512;
2043 		authdata.algmode = OP_ALG_AAI_HMAC;
2044 		session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2045 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2046 					   1, 0, SHR_NEVER, &authdata,
2047 					   !session->dir,
2048 					   session->digest_length);
2049 		break;
2050 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2051 		authdata.algtype = OP_ALG_ALGSEL_SHA224;
2052 		authdata.algmode = OP_ALG_AAI_HMAC;
2053 		session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
2054 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2055 					   1, 0, SHR_NEVER, &authdata,
2056 					   !session->dir,
2057 					   session->digest_length);
2058 		break;
2059 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2060 		authdata.algtype = OP_ALG_ALGSEL_SNOW_F9;
2061 		authdata.algmode = OP_ALG_AAI_F9;
2062 		session->auth_alg = RTE_CRYPTO_AUTH_SNOW3G_UIA2;
2063 		session->iv.offset = xform->auth.iv.offset;
2064 		session->iv.length = xform->auth.iv.length;
2065 		bufsize = cnstr_shdsc_snow_f9(priv->flc_desc[DESC_INITFINAL].desc,
2066 					      1, 0, &authdata,
2067 					      !session->dir,
2068 					      session->digest_length);
2069 		break;
2070 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
2071 		authdata.algtype = OP_ALG_ALGSEL_ZUCA;
2072 		authdata.algmode = OP_ALG_AAI_F9;
2073 		session->auth_alg = RTE_CRYPTO_AUTH_ZUC_EIA3;
2074 		session->iv.offset = xform->auth.iv.offset;
2075 		session->iv.length = xform->auth.iv.length;
2076 		bufsize = cnstr_shdsc_zuca(priv->flc_desc[DESC_INITFINAL].desc,
2077 					   1, 0, &authdata,
2078 					   !session->dir,
2079 					   session->digest_length);
2080 		break;
2081 	case RTE_CRYPTO_AUTH_SHA1:
2082 		authdata.algtype = OP_ALG_ALGSEL_SHA1;
2083 		authdata.algmode = OP_ALG_AAI_HASH;
2084 		session->auth_alg = RTE_CRYPTO_AUTH_SHA1;
2085 		bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2086 					   1, 0, SHR_NEVER, &authdata,
2087 					   !session->dir,
2088 					   session->digest_length);
2089 		break;
2090 	case RTE_CRYPTO_AUTH_MD5:
2091 		authdata.algtype = OP_ALG_ALGSEL_MD5;
2092 		authdata.algmode = OP_ALG_AAI_HASH;
2093 		session->auth_alg = RTE_CRYPTO_AUTH_MD5;
2094 		bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2095 					   1, 0, SHR_NEVER, &authdata,
2096 					   !session->dir,
2097 					   session->digest_length);
2098 		break;
2099 	case RTE_CRYPTO_AUTH_SHA256:
2100 		authdata.algtype = OP_ALG_ALGSEL_SHA256;
2101 		authdata.algmode = OP_ALG_AAI_HASH;
2102 		session->auth_alg = RTE_CRYPTO_AUTH_SHA256;
2103 		bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2104 					   1, 0, SHR_NEVER, &authdata,
2105 					   !session->dir,
2106 					   session->digest_length);
2107 		break;
2108 	case RTE_CRYPTO_AUTH_SHA384:
2109 		authdata.algtype = OP_ALG_ALGSEL_SHA384;
2110 		authdata.algmode = OP_ALG_AAI_HASH;
2111 		session->auth_alg = RTE_CRYPTO_AUTH_SHA384;
2112 		bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2113 					   1, 0, SHR_NEVER, &authdata,
2114 					   !session->dir,
2115 					   session->digest_length);
2116 		break;
2117 	case RTE_CRYPTO_AUTH_SHA512:
2118 		authdata.algtype = OP_ALG_ALGSEL_SHA512;
2119 		authdata.algmode = OP_ALG_AAI_HASH;
2120 		session->auth_alg = RTE_CRYPTO_AUTH_SHA512;
2121 		bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2122 					   1, 0, SHR_NEVER, &authdata,
2123 					   !session->dir,
2124 					   session->digest_length);
2125 		break;
2126 	case RTE_CRYPTO_AUTH_SHA224:
2127 		authdata.algtype = OP_ALG_ALGSEL_SHA224;
2128 		authdata.algmode = OP_ALG_AAI_HASH;
2129 		session->auth_alg = RTE_CRYPTO_AUTH_SHA224;
2130 		bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2131 					   1, 0, SHR_NEVER, &authdata,
2132 					   !session->dir,
2133 					   session->digest_length);
2134 		break;
2135 	case RTE_CRYPTO_AUTH_AES_GMAC:
2136 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2137 	case RTE_CRYPTO_AUTH_AES_CMAC:
2138 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2139 	case RTE_CRYPTO_AUTH_KASUMI_F9:
2140 	case RTE_CRYPTO_AUTH_NULL:
2141 		DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un",
2142 			      xform->auth.algo);
2143 		ret = -ENOTSUP;
2144 		goto error_out;
2145 	default:
2146 		DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2147 			      xform->auth.algo);
2148 		ret = -ENOTSUP;
2149 		goto error_out;
2150 	}
2151 
2152 	if (bufsize < 0) {
2153 		DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2154 		ret = -EINVAL;
2155 		goto error_out;
2156 	}
2157 
2158 	flc->word1_sdl = (uint8_t)bufsize;
2159 	session->ctxt = priv;
2160 #ifdef CAAM_DESC_DEBUG
2161 	int i;
2162 	for (i = 0; i < bufsize; i++)
2163 		DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2164 				i, priv->flc_desc[DESC_INITFINAL].desc[i]);
2165 #endif
2166 
2167 	return ret;
2168 
2169 error_out:
2170 	rte_free(session->auth_key.data);
2171 	rte_free(priv);
2172 	return ret;
2173 }
2174 
2175 static int
2176 dpaa2_sec_aead_init(struct rte_cryptodev *dev,
2177 		    struct rte_crypto_sym_xform *xform,
2178 		    dpaa2_sec_session *session)
2179 {
2180 	struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
2181 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2182 	struct alginfo aeaddata;
2183 	int bufsize;
2184 	struct ctxt_priv *priv;
2185 	struct sec_flow_context *flc;
2186 	struct rte_crypto_aead_xform *aead_xform = &xform->aead;
2187 	int err, ret = 0;
2188 
2189 	PMD_INIT_FUNC_TRACE();
2190 
2191 	/* Set IV parameters */
2192 	session->iv.offset = aead_xform->iv.offset;
2193 	session->iv.length = aead_xform->iv.length;
2194 	session->ctxt_type = DPAA2_SEC_AEAD;
2195 
2196 	/* For SEC AEAD only one descriptor is required */
2197 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2198 			sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
2199 			RTE_CACHE_LINE_SIZE);
2200 	if (priv == NULL) {
2201 		DPAA2_SEC_ERR("No Memory for priv CTXT");
2202 		return -ENOMEM;
2203 	}
2204 
2205 	priv->fle_pool = dev_priv->fle_pool;
2206 	flc = &priv->flc_desc[0].flc;
2207 
2208 	session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2209 					       RTE_CACHE_LINE_SIZE);
2210 	if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2211 		DPAA2_SEC_ERR("No Memory for aead key");
2212 		rte_free(priv);
2213 		return -ENOMEM;
2214 	}
2215 	memcpy(session->aead_key.data, aead_xform->key.data,
2216 	       aead_xform->key.length);
2217 
2218 	session->digest_length = aead_xform->digest_length;
2219 	session->aead_key.length = aead_xform->key.length;
2220 	ctxt->auth_only_len = aead_xform->aad_length;
2221 
2222 	aeaddata.key = (size_t)session->aead_key.data;
2223 	aeaddata.keylen = session->aead_key.length;
2224 	aeaddata.key_enc_flags = 0;
2225 	aeaddata.key_type = RTA_DATA_IMM;
2226 
2227 	switch (aead_xform->algo) {
2228 	case RTE_CRYPTO_AEAD_AES_GCM:
2229 		aeaddata.algtype = OP_ALG_ALGSEL_AES;
2230 		aeaddata.algmode = OP_ALG_AAI_GCM;
2231 		session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2232 		break;
2233 	case RTE_CRYPTO_AEAD_AES_CCM:
2234 		DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u",
2235 			      aead_xform->algo);
2236 		ret = -ENOTSUP;
2237 		goto error_out;
2238 	default:
2239 		DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
2240 			      aead_xform->algo);
2241 		ret = -ENOTSUP;
2242 		goto error_out;
2243 	}
2244 	session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2245 				DIR_ENC : DIR_DEC;
2246 
2247 	priv->flc_desc[0].desc[0] = aeaddata.keylen;
2248 	err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2249 			       DESC_JOB_IO_LEN,
2250 			       (unsigned int *)priv->flc_desc[0].desc,
2251 			       &priv->flc_desc[0].desc[1], 1);
2252 
2253 	if (err < 0) {
2254 		DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2255 		ret = -EINVAL;
2256 		goto error_out;
2257 	}
2258 	if (priv->flc_desc[0].desc[1] & 1) {
2259 		aeaddata.key_type = RTA_DATA_IMM;
2260 	} else {
2261 		aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key);
2262 		aeaddata.key_type = RTA_DATA_PTR;
2263 	}
2264 	priv->flc_desc[0].desc[0] = 0;
2265 	priv->flc_desc[0].desc[1] = 0;
2266 
2267 	if (session->dir == DIR_ENC)
2268 		bufsize = cnstr_shdsc_gcm_encap(
2269 				priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
2270 				&aeaddata, session->iv.length,
2271 				session->digest_length);
2272 	else
2273 		bufsize = cnstr_shdsc_gcm_decap(
2274 				priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
2275 				&aeaddata, session->iv.length,
2276 				session->digest_length);
2277 	if (bufsize < 0) {
2278 		DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2279 		ret = -EINVAL;
2280 		goto error_out;
2281 	}
2282 
2283 	flc->word1_sdl = (uint8_t)bufsize;
2284 	session->ctxt = priv;
2285 #ifdef CAAM_DESC_DEBUG
2286 	int i;
2287 	for (i = 0; i < bufsize; i++)
2288 		DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n",
2289 			    i, priv->flc_desc[0].desc[i]);
2290 #endif
2291 	return ret;
2292 
2293 error_out:
2294 	rte_free(session->aead_key.data);
2295 	rte_free(priv);
2296 	return ret;
2297 }
2298 
2299 
2300 static int
2301 dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
2302 		    struct rte_crypto_sym_xform *xform,
2303 		    dpaa2_sec_session *session)
2304 {
2305 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2306 	struct alginfo authdata, cipherdata;
2307 	int bufsize;
2308 	struct ctxt_priv *priv;
2309 	struct sec_flow_context *flc;
2310 	struct rte_crypto_cipher_xform *cipher_xform;
2311 	struct rte_crypto_auth_xform *auth_xform;
2312 	int err, ret = 0;
2313 
2314 	PMD_INIT_FUNC_TRACE();
2315 
2316 	if (session->ext_params.aead_ctxt.auth_cipher_text) {
2317 		cipher_xform = &xform->cipher;
2318 		auth_xform = &xform->next->auth;
2319 		session->ctxt_type =
2320 			(cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2321 			DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER;
2322 	} else {
2323 		cipher_xform = &xform->next->cipher;
2324 		auth_xform = &xform->auth;
2325 		session->ctxt_type =
2326 			(cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2327 			DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH;
2328 	}
2329 
2330 	/* Set IV parameters */
2331 	session->iv.offset = cipher_xform->iv.offset;
2332 	session->iv.length = cipher_xform->iv.length;
2333 
2334 	/* For SEC AEAD only one descriptor is required */
2335 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2336 			sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
2337 			RTE_CACHE_LINE_SIZE);
2338 	if (priv == NULL) {
2339 		DPAA2_SEC_ERR("No Memory for priv CTXT");
2340 		return -ENOMEM;
2341 	}
2342 
2343 	priv->fle_pool = dev_priv->fle_pool;
2344 	flc = &priv->flc_desc[0].flc;
2345 
2346 	session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2347 					       RTE_CACHE_LINE_SIZE);
2348 	if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2349 		DPAA2_SEC_ERR("No Memory for cipher key");
2350 		rte_free(priv);
2351 		return -ENOMEM;
2352 	}
2353 	session->cipher_key.length = cipher_xform->key.length;
2354 	session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2355 					     RTE_CACHE_LINE_SIZE);
2356 	if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2357 		DPAA2_SEC_ERR("No Memory for auth key");
2358 		rte_free(session->cipher_key.data);
2359 		rte_free(priv);
2360 		return -ENOMEM;
2361 	}
2362 	session->auth_key.length = auth_xform->key.length;
2363 	memcpy(session->cipher_key.data, cipher_xform->key.data,
2364 	       cipher_xform->key.length);
2365 	memcpy(session->auth_key.data, auth_xform->key.data,
2366 	       auth_xform->key.length);
2367 
2368 	authdata.key = (size_t)session->auth_key.data;
2369 	authdata.keylen = session->auth_key.length;
2370 	authdata.key_enc_flags = 0;
2371 	authdata.key_type = RTA_DATA_IMM;
2372 
2373 	session->digest_length = auth_xform->digest_length;
2374 
2375 	switch (auth_xform->algo) {
2376 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
2377 		authdata.algtype = OP_ALG_ALGSEL_SHA1;
2378 		authdata.algmode = OP_ALG_AAI_HMAC;
2379 		session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
2380 		break;
2381 	case RTE_CRYPTO_AUTH_MD5_HMAC:
2382 		authdata.algtype = OP_ALG_ALGSEL_MD5;
2383 		authdata.algmode = OP_ALG_AAI_HMAC;
2384 		session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
2385 		break;
2386 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2387 		authdata.algtype = OP_ALG_ALGSEL_SHA224;
2388 		authdata.algmode = OP_ALG_AAI_HMAC;
2389 		session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
2390 		break;
2391 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2392 		authdata.algtype = OP_ALG_ALGSEL_SHA256;
2393 		authdata.algmode = OP_ALG_AAI_HMAC;
2394 		session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2395 		break;
2396 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2397 		authdata.algtype = OP_ALG_ALGSEL_SHA384;
2398 		authdata.algmode = OP_ALG_AAI_HMAC;
2399 		session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2400 		break;
2401 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2402 		authdata.algtype = OP_ALG_ALGSEL_SHA512;
2403 		authdata.algmode = OP_ALG_AAI_HMAC;
2404 		session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2405 		break;
2406 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2407 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2408 	case RTE_CRYPTO_AUTH_NULL:
2409 	case RTE_CRYPTO_AUTH_SHA1:
2410 	case RTE_CRYPTO_AUTH_SHA256:
2411 	case RTE_CRYPTO_AUTH_SHA512:
2412 	case RTE_CRYPTO_AUTH_SHA224:
2413 	case RTE_CRYPTO_AUTH_SHA384:
2414 	case RTE_CRYPTO_AUTH_MD5:
2415 	case RTE_CRYPTO_AUTH_AES_GMAC:
2416 	case RTE_CRYPTO_AUTH_KASUMI_F9:
2417 	case RTE_CRYPTO_AUTH_AES_CMAC:
2418 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2419 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
2420 		DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2421 			      auth_xform->algo);
2422 		ret = -ENOTSUP;
2423 		goto error_out;
2424 	default:
2425 		DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2426 			      auth_xform->algo);
2427 		ret = -ENOTSUP;
2428 		goto error_out;
2429 	}
2430 	cipherdata.key = (size_t)session->cipher_key.data;
2431 	cipherdata.keylen = session->cipher_key.length;
2432 	cipherdata.key_enc_flags = 0;
2433 	cipherdata.key_type = RTA_DATA_IMM;
2434 
2435 	switch (cipher_xform->algo) {
2436 	case RTE_CRYPTO_CIPHER_AES_CBC:
2437 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
2438 		cipherdata.algmode = OP_ALG_AAI_CBC;
2439 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2440 		break;
2441 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2442 		cipherdata.algtype = OP_ALG_ALGSEL_3DES;
2443 		cipherdata.algmode = OP_ALG_AAI_CBC;
2444 		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2445 		break;
2446 	case RTE_CRYPTO_CIPHER_DES_CBC:
2447 		cipherdata.algtype = OP_ALG_ALGSEL_DES;
2448 		cipherdata.algmode = OP_ALG_AAI_CBC;
2449 		session->cipher_alg = RTE_CRYPTO_CIPHER_DES_CBC;
2450 		break;
2451 	case RTE_CRYPTO_CIPHER_AES_CTR:
2452 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
2453 		cipherdata.algmode = OP_ALG_AAI_CTR;
2454 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2455 		break;
2456 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2457 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2458 	case RTE_CRYPTO_CIPHER_NULL:
2459 	case RTE_CRYPTO_CIPHER_3DES_ECB:
2460 	case RTE_CRYPTO_CIPHER_3DES_CTR:
2461 	case RTE_CRYPTO_CIPHER_AES_ECB:
2462 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
2463 		DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2464 			      cipher_xform->algo);
2465 		ret = -ENOTSUP;
2466 		goto error_out;
2467 	default:
2468 		DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2469 			      cipher_xform->algo);
2470 		ret = -ENOTSUP;
2471 		goto error_out;
2472 	}
2473 	session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2474 				DIR_ENC : DIR_DEC;
2475 
2476 	priv->flc_desc[0].desc[0] = cipherdata.keylen;
2477 	priv->flc_desc[0].desc[1] = authdata.keylen;
2478 	err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2479 			       DESC_JOB_IO_LEN,
2480 			       (unsigned int *)priv->flc_desc[0].desc,
2481 			       &priv->flc_desc[0].desc[2], 2);
2482 
2483 	if (err < 0) {
2484 		DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2485 		ret = -EINVAL;
2486 		goto error_out;
2487 	}
2488 	if (priv->flc_desc[0].desc[2] & 1) {
2489 		cipherdata.key_type = RTA_DATA_IMM;
2490 	} else {
2491 		cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
2492 		cipherdata.key_type = RTA_DATA_PTR;
2493 	}
2494 	if (priv->flc_desc[0].desc[2] & (1 << 1)) {
2495 		authdata.key_type = RTA_DATA_IMM;
2496 	} else {
2497 		authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
2498 		authdata.key_type = RTA_DATA_PTR;
2499 	}
2500 	priv->flc_desc[0].desc[0] = 0;
2501 	priv->flc_desc[0].desc[1] = 0;
2502 	priv->flc_desc[0].desc[2] = 0;
2503 
2504 	if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
2505 		bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
2506 					      0, SHR_SERIAL,
2507 					      &cipherdata, &authdata,
2508 					      session->iv.length,
2509 					      session->digest_length,
2510 					      session->dir);
2511 		if (bufsize < 0) {
2512 			DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2513 			ret = -EINVAL;
2514 			goto error_out;
2515 		}
2516 	} else {
2517 		DPAA2_SEC_ERR("Hash before cipher not supported");
2518 		ret = -ENOTSUP;
2519 		goto error_out;
2520 	}
2521 
2522 	flc->word1_sdl = (uint8_t)bufsize;
2523 	session->ctxt = priv;
2524 #ifdef CAAM_DESC_DEBUG
2525 	int i;
2526 	for (i = 0; i < bufsize; i++)
2527 		DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2528 			    i, priv->flc_desc[0].desc[i]);
2529 #endif
2530 
2531 	return ret;
2532 
2533 error_out:
2534 	rte_free(session->cipher_key.data);
2535 	rte_free(session->auth_key.data);
2536 	rte_free(priv);
2537 	return ret;
2538 }
2539 
2540 static int
2541 dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
2542 			    struct rte_crypto_sym_xform *xform,	void *sess)
2543 {
2544 	dpaa2_sec_session *session = sess;
2545 	int ret;
2546 
2547 	PMD_INIT_FUNC_TRACE();
2548 
2549 	if (unlikely(sess == NULL)) {
2550 		DPAA2_SEC_ERR("Invalid session struct");
2551 		return -EINVAL;
2552 	}
2553 
2554 	memset(session, 0, sizeof(dpaa2_sec_session));
2555 	/* Default IV length = 0 */
2556 	session->iv.length = 0;
2557 
2558 	/* Cipher Only */
2559 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2560 		ret = dpaa2_sec_cipher_init(dev, xform, session);
2561 
2562 	/* Authentication Only */
2563 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2564 		   xform->next == NULL) {
2565 		ret = dpaa2_sec_auth_init(dev, xform, session);
2566 
2567 	/* Cipher then Authenticate */
2568 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2569 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2570 		session->ext_params.aead_ctxt.auth_cipher_text = true;
2571 		if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2572 			ret = dpaa2_sec_auth_init(dev, xform, session);
2573 		else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
2574 			ret = dpaa2_sec_cipher_init(dev, xform, session);
2575 		else
2576 			ret = dpaa2_sec_aead_chain_init(dev, xform, session);
2577 	/* Authenticate then Cipher */
2578 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2579 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2580 		session->ext_params.aead_ctxt.auth_cipher_text = false;
2581 		if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
2582 			ret = dpaa2_sec_cipher_init(dev, xform, session);
2583 		else if (xform->next->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2584 			ret = dpaa2_sec_auth_init(dev, xform, session);
2585 		else
2586 			ret = dpaa2_sec_aead_chain_init(dev, xform, session);
2587 	/* AEAD operation for AES-GCM kind of Algorithms */
2588 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2589 		   xform->next == NULL) {
2590 		ret = dpaa2_sec_aead_init(dev, xform, session);
2591 
2592 	} else {
2593 		DPAA2_SEC_ERR("Invalid crypto type");
2594 		return -EINVAL;
2595 	}
2596 
2597 	return ret;
2598 }
2599 
2600 #ifdef RTE_LIB_SECURITY
2601 static int
2602 dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2603 			dpaa2_sec_session *session,
2604 			struct alginfo *aeaddata)
2605 {
2606 	PMD_INIT_FUNC_TRACE();
2607 
2608 	session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2609 					       RTE_CACHE_LINE_SIZE);
2610 	if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2611 		DPAA2_SEC_ERR("No Memory for aead key");
2612 		return -ENOMEM;
2613 	}
2614 	memcpy(session->aead_key.data, aead_xform->key.data,
2615 	       aead_xform->key.length);
2616 
2617 	session->digest_length = aead_xform->digest_length;
2618 	session->aead_key.length = aead_xform->key.length;
2619 
2620 	aeaddata->key = (size_t)session->aead_key.data;
2621 	aeaddata->keylen = session->aead_key.length;
2622 	aeaddata->key_enc_flags = 0;
2623 	aeaddata->key_type = RTA_DATA_IMM;
2624 
2625 	switch (aead_xform->algo) {
2626 	case RTE_CRYPTO_AEAD_AES_GCM:
2627 		switch (session->digest_length) {
2628 		case 8:
2629 			aeaddata->algtype = OP_PCL_IPSEC_AES_GCM8;
2630 			break;
2631 		case 12:
2632 			aeaddata->algtype = OP_PCL_IPSEC_AES_GCM12;
2633 			break;
2634 		case 16:
2635 			aeaddata->algtype = OP_PCL_IPSEC_AES_GCM16;
2636 			break;
2637 		default:
2638 			DPAA2_SEC_ERR("Crypto: Undefined GCM digest %d",
2639 				      session->digest_length);
2640 			return -EINVAL;
2641 		}
2642 		aeaddata->algmode = OP_ALG_AAI_GCM;
2643 		session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2644 		break;
2645 	case RTE_CRYPTO_AEAD_AES_CCM:
2646 		switch (session->digest_length) {
2647 		case 8:
2648 			aeaddata->algtype = OP_PCL_IPSEC_AES_CCM8;
2649 			break;
2650 		case 12:
2651 			aeaddata->algtype = OP_PCL_IPSEC_AES_CCM12;
2652 			break;
2653 		case 16:
2654 			aeaddata->algtype = OP_PCL_IPSEC_AES_CCM16;
2655 			break;
2656 		default:
2657 			DPAA2_SEC_ERR("Crypto: Undefined CCM digest %d",
2658 				      session->digest_length);
2659 			return -EINVAL;
2660 		}
2661 		aeaddata->algmode = OP_ALG_AAI_CCM;
2662 		session->aead_alg = RTE_CRYPTO_AEAD_AES_CCM;
2663 		break;
2664 	default:
2665 		DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
2666 			      aead_xform->algo);
2667 		return -ENOTSUP;
2668 	}
2669 	session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2670 				DIR_ENC : DIR_DEC;
2671 
2672 	return 0;
2673 }
2674 
2675 static int
2676 dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2677 	struct rte_crypto_auth_xform *auth_xform,
2678 	dpaa2_sec_session *session,
2679 	struct alginfo *cipherdata,
2680 	struct alginfo *authdata)
2681 {
2682 	if (cipher_xform) {
2683 		session->cipher_key.data = rte_zmalloc(NULL,
2684 						       cipher_xform->key.length,
2685 						       RTE_CACHE_LINE_SIZE);
2686 		if (session->cipher_key.data == NULL &&
2687 				cipher_xform->key.length > 0) {
2688 			DPAA2_SEC_ERR("No Memory for cipher key");
2689 			return -ENOMEM;
2690 		}
2691 
2692 		session->cipher_key.length = cipher_xform->key.length;
2693 		memcpy(session->cipher_key.data, cipher_xform->key.data,
2694 				cipher_xform->key.length);
2695 		session->cipher_alg = cipher_xform->algo;
2696 	} else {
2697 		session->cipher_key.data = NULL;
2698 		session->cipher_key.length = 0;
2699 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2700 	}
2701 
2702 	if (auth_xform) {
2703 		session->auth_key.data = rte_zmalloc(NULL,
2704 						auth_xform->key.length,
2705 						RTE_CACHE_LINE_SIZE);
2706 		if (session->auth_key.data == NULL &&
2707 				auth_xform->key.length > 0) {
2708 			DPAA2_SEC_ERR("No Memory for auth key");
2709 			return -ENOMEM;
2710 		}
2711 		session->auth_key.length = auth_xform->key.length;
2712 		memcpy(session->auth_key.data, auth_xform->key.data,
2713 				auth_xform->key.length);
2714 		session->auth_alg = auth_xform->algo;
2715 		session->digest_length = auth_xform->digest_length;
2716 	} else {
2717 		session->auth_key.data = NULL;
2718 		session->auth_key.length = 0;
2719 		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2720 	}
2721 
2722 	authdata->key = (size_t)session->auth_key.data;
2723 	authdata->keylen = session->auth_key.length;
2724 	authdata->key_enc_flags = 0;
2725 	authdata->key_type = RTA_DATA_IMM;
2726 	switch (session->auth_alg) {
2727 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
2728 		authdata->algtype = OP_PCL_IPSEC_HMAC_SHA1_96;
2729 		authdata->algmode = OP_ALG_AAI_HMAC;
2730 		break;
2731 	case RTE_CRYPTO_AUTH_MD5_HMAC:
2732 		authdata->algtype = OP_PCL_IPSEC_HMAC_MD5_96;
2733 		authdata->algmode = OP_ALG_AAI_HMAC;
2734 		break;
2735 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2736 		authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2737 		authdata->algmode = OP_ALG_AAI_HMAC;
2738 		if (session->digest_length != 16)
2739 			DPAA2_SEC_WARN(
2740 			"+++Using sha256-hmac truncated len is non-standard,"
2741 			"it will not work with lookaside proto");
2742 		break;
2743 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2744 		authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2745 		authdata->algmode = OP_ALG_AAI_HMAC;
2746 		break;
2747 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2748 		authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2749 		authdata->algmode = OP_ALG_AAI_HMAC;
2750 		break;
2751 	case RTE_CRYPTO_AUTH_AES_CMAC:
2752 		authdata->algtype = OP_PCL_IPSEC_AES_CMAC_96;
2753 		break;
2754 	case RTE_CRYPTO_AUTH_NULL:
2755 		authdata->algtype = OP_PCL_IPSEC_HMAC_NULL;
2756 		break;
2757 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2758 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2759 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2760 	case RTE_CRYPTO_AUTH_SHA1:
2761 	case RTE_CRYPTO_AUTH_SHA256:
2762 	case RTE_CRYPTO_AUTH_SHA512:
2763 	case RTE_CRYPTO_AUTH_SHA224:
2764 	case RTE_CRYPTO_AUTH_SHA384:
2765 	case RTE_CRYPTO_AUTH_MD5:
2766 	case RTE_CRYPTO_AUTH_AES_GMAC:
2767 	case RTE_CRYPTO_AUTH_KASUMI_F9:
2768 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2769 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
2770 		DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2771 			      session->auth_alg);
2772 		return -ENOTSUP;
2773 	default:
2774 		DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2775 			      session->auth_alg);
2776 		return -ENOTSUP;
2777 	}
2778 	cipherdata->key = (size_t)session->cipher_key.data;
2779 	cipherdata->keylen = session->cipher_key.length;
2780 	cipherdata->key_enc_flags = 0;
2781 	cipherdata->key_type = RTA_DATA_IMM;
2782 
2783 	switch (session->cipher_alg) {
2784 	case RTE_CRYPTO_CIPHER_AES_CBC:
2785 		cipherdata->algtype = OP_PCL_IPSEC_AES_CBC;
2786 		cipherdata->algmode = OP_ALG_AAI_CBC;
2787 		break;
2788 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2789 		cipherdata->algtype = OP_PCL_IPSEC_3DES;
2790 		cipherdata->algmode = OP_ALG_AAI_CBC;
2791 		break;
2792 	case RTE_CRYPTO_CIPHER_DES_CBC:
2793 		cipherdata->algtype = OP_PCL_IPSEC_DES;
2794 		cipherdata->algmode = OP_ALG_AAI_CBC;
2795 		break;
2796 	case RTE_CRYPTO_CIPHER_AES_CTR:
2797 		cipherdata->algtype = OP_PCL_IPSEC_AES_CTR;
2798 		cipherdata->algmode = OP_ALG_AAI_CTR;
2799 		break;
2800 	case RTE_CRYPTO_CIPHER_NULL:
2801 		cipherdata->algtype = OP_PCL_IPSEC_NULL;
2802 		break;
2803 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2804 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2805 	case RTE_CRYPTO_CIPHER_3DES_ECB:
2806 	case RTE_CRYPTO_CIPHER_3DES_CTR:
2807 	case RTE_CRYPTO_CIPHER_AES_ECB:
2808 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
2809 		DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2810 			      session->cipher_alg);
2811 		return -ENOTSUP;
2812 	default:
2813 		DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2814 			      session->cipher_alg);
2815 		return -ENOTSUP;
2816 	}
2817 
2818 	return 0;
2819 }
2820 
2821 #ifdef RTE_LIBRTE_SECURITY_TEST
2822 static uint8_t aes_cbc_iv[] = {
2823 	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
2824 	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f };
2825 #endif
2826 
2827 static int
2828 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
2829 			    struct rte_security_session_conf *conf,
2830 			    void *sess)
2831 {
2832 	struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2833 	struct rte_crypto_cipher_xform *cipher_xform = NULL;
2834 	struct rte_crypto_auth_xform *auth_xform = NULL;
2835 	struct rte_crypto_aead_xform *aead_xform = NULL;
2836 	dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
2837 	struct ctxt_priv *priv;
2838 	struct alginfo authdata, cipherdata;
2839 	int bufsize;
2840 	struct sec_flow_context *flc;
2841 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2842 	int ret = -1;
2843 
2844 	PMD_INIT_FUNC_TRACE();
2845 
2846 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2847 				sizeof(struct ctxt_priv) +
2848 				sizeof(struct sec_flc_desc),
2849 				RTE_CACHE_LINE_SIZE);
2850 
2851 	if (priv == NULL) {
2852 		DPAA2_SEC_ERR("No memory for priv CTXT");
2853 		return -ENOMEM;
2854 	}
2855 
2856 	priv->fle_pool = dev_priv->fle_pool;
2857 	flc = &priv->flc_desc[0].flc;
2858 
2859 	memset(session, 0, sizeof(dpaa2_sec_session));
2860 
2861 	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2862 		cipher_xform = &conf->crypto_xform->cipher;
2863 		if (conf->crypto_xform->next)
2864 			auth_xform = &conf->crypto_xform->next->auth;
2865 		ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
2866 					session, &cipherdata, &authdata);
2867 	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2868 		auth_xform = &conf->crypto_xform->auth;
2869 		if (conf->crypto_xform->next)
2870 			cipher_xform = &conf->crypto_xform->next->cipher;
2871 		ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
2872 					session, &cipherdata, &authdata);
2873 	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2874 		aead_xform = &conf->crypto_xform->aead;
2875 		ret = dpaa2_sec_ipsec_aead_init(aead_xform,
2876 					session, &cipherdata);
2877 		authdata.keylen = 0;
2878 		authdata.algtype = 0;
2879 	} else {
2880 		DPAA2_SEC_ERR("XFORM not specified");
2881 		ret = -EINVAL;
2882 		goto out;
2883 	}
2884 	if (ret) {
2885 		DPAA2_SEC_ERR("Failed to process xform");
2886 		goto out;
2887 	}
2888 
2889 	session->ctxt_type = DPAA2_SEC_IPSEC;
2890 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2891 		uint8_t *hdr = NULL;
2892 		struct ip ip4_hdr;
2893 		struct rte_ipv6_hdr ip6_hdr;
2894 		struct ipsec_encap_pdb encap_pdb;
2895 
2896 		flc->dhr = SEC_FLC_DHR_OUTBOUND;
2897 		/* For Sec Proto only one descriptor is required. */
2898 		memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb));
2899 
2900 		/* copy algo specific data to PDB */
2901 		switch (cipherdata.algtype) {
2902 		case OP_PCL_IPSEC_AES_CTR:
2903 			encap_pdb.ctr.ctr_initial = 0x00000001;
2904 			encap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2905 			break;
2906 		case OP_PCL_IPSEC_AES_GCM8:
2907 		case OP_PCL_IPSEC_AES_GCM12:
2908 		case OP_PCL_IPSEC_AES_GCM16:
2909 			memcpy(encap_pdb.gcm.salt,
2910 				(uint8_t *)&(ipsec_xform->salt), 4);
2911 			break;
2912 		}
2913 
2914 		encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2915 			PDBOPTS_ESP_OIHI_PDB_INL |
2916 			PDBOPTS_ESP_IVSRC |
2917 			PDBHMO_ESP_ENCAP_DTTL |
2918 			PDBHMO_ESP_SNR;
2919 		if (ipsec_xform->options.esn)
2920 			encap_pdb.options |= PDBOPTS_ESP_ESN;
2921 		encap_pdb.spi = ipsec_xform->spi;
2922 		session->dir = DIR_ENC;
2923 		if (ipsec_xform->tunnel.type ==
2924 				RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2925 			encap_pdb.ip_hdr_len = sizeof(struct ip);
2926 			ip4_hdr.ip_v = IPVERSION;
2927 			ip4_hdr.ip_hl = 5;
2928 			ip4_hdr.ip_len = rte_cpu_to_be_16(sizeof(ip4_hdr));
2929 			ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2930 			ip4_hdr.ip_id = 0;
2931 			ip4_hdr.ip_off = 0;
2932 			ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2933 			ip4_hdr.ip_p = IPPROTO_ESP;
2934 			ip4_hdr.ip_sum = 0;
2935 			ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2936 			ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2937 			ip4_hdr.ip_sum = calc_chksum((uint16_t *)(void *)
2938 					&ip4_hdr, sizeof(struct ip));
2939 			hdr = (uint8_t *)&ip4_hdr;
2940 		} else if (ipsec_xform->tunnel.type ==
2941 				RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2942 			ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2943 				DPAA2_IPv6_DEFAULT_VTC_FLOW |
2944 				((ipsec_xform->tunnel.ipv6.dscp <<
2945 					RTE_IPV6_HDR_TC_SHIFT) &
2946 					RTE_IPV6_HDR_TC_MASK) |
2947 				((ipsec_xform->tunnel.ipv6.flabel <<
2948 					RTE_IPV6_HDR_FL_SHIFT) &
2949 					RTE_IPV6_HDR_FL_MASK));
2950 			/* Payload length will be updated by HW */
2951 			ip6_hdr.payload_len = 0;
2952 			ip6_hdr.hop_limits =
2953 					ipsec_xform->tunnel.ipv6.hlimit;
2954 			ip6_hdr.proto = (ipsec_xform->proto ==
2955 					RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2956 					IPPROTO_ESP : IPPROTO_AH;
2957 			memcpy(&ip6_hdr.src_addr,
2958 				&ipsec_xform->tunnel.ipv6.src_addr, 16);
2959 			memcpy(&ip6_hdr.dst_addr,
2960 				&ipsec_xform->tunnel.ipv6.dst_addr, 16);
2961 			encap_pdb.ip_hdr_len = sizeof(struct rte_ipv6_hdr);
2962 			hdr = (uint8_t *)&ip6_hdr;
2963 		}
2964 
2965 		bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc,
2966 				1, 0, (rta_sec_era >= RTA_SEC_ERA_10) ?
2967 				SHR_WAIT : SHR_SERIAL, &encap_pdb,
2968 				hdr, &cipherdata, &authdata);
2969 	} else if (ipsec_xform->direction ==
2970 			RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2971 		struct ipsec_decap_pdb decap_pdb;
2972 
2973 		flc->dhr = SEC_FLC_DHR_INBOUND;
2974 		memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2975 		/* copy algo specific data to PDB */
2976 		switch (cipherdata.algtype) {
2977 		case OP_PCL_IPSEC_AES_CTR:
2978 			decap_pdb.ctr.ctr_initial = 0x00000001;
2979 			decap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2980 			break;
2981 		case OP_PCL_IPSEC_AES_GCM8:
2982 		case OP_PCL_IPSEC_AES_GCM12:
2983 		case OP_PCL_IPSEC_AES_GCM16:
2984 			memcpy(decap_pdb.gcm.salt,
2985 				(uint8_t *)&(ipsec_xform->salt), 4);
2986 			break;
2987 		}
2988 
2989 		decap_pdb.options = (ipsec_xform->tunnel.type ==
2990 				RTE_SECURITY_IPSEC_TUNNEL_IPV4) ?
2991 				sizeof(struct ip) << 16 :
2992 				sizeof(struct rte_ipv6_hdr) << 16;
2993 		if (ipsec_xform->options.esn)
2994 			decap_pdb.options |= PDBOPTS_ESP_ESN;
2995 
2996 		if (ipsec_xform->replay_win_sz) {
2997 			uint32_t win_sz;
2998 			win_sz = rte_align32pow2(ipsec_xform->replay_win_sz);
2999 
3000 			if (rta_sec_era < RTA_SEC_ERA_10 && win_sz > 128) {
3001 				DPAA2_SEC_INFO("Max Anti replay Win sz = 128");
3002 				win_sz = 128;
3003 			}
3004 			switch (win_sz) {
3005 			case 1:
3006 			case 2:
3007 			case 4:
3008 			case 8:
3009 			case 16:
3010 			case 32:
3011 				decap_pdb.options |= PDBOPTS_ESP_ARS32;
3012 				break;
3013 			case 64:
3014 				decap_pdb.options |= PDBOPTS_ESP_ARS64;
3015 				break;
3016 			case 256:
3017 				decap_pdb.options |= PDBOPTS_ESP_ARS256;
3018 				break;
3019 			case 512:
3020 				decap_pdb.options |= PDBOPTS_ESP_ARS512;
3021 				break;
3022 			case 1024:
3023 				decap_pdb.options |= PDBOPTS_ESP_ARS1024;
3024 				break;
3025 			case 128:
3026 			default:
3027 				decap_pdb.options |= PDBOPTS_ESP_ARS128;
3028 			}
3029 		}
3030 		session->dir = DIR_DEC;
3031 		bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc,
3032 				1, 0, (rta_sec_era >= RTA_SEC_ERA_10) ?
3033 				SHR_WAIT : SHR_SERIAL,
3034 				&decap_pdb, &cipherdata, &authdata);
3035 	} else
3036 		goto out;
3037 
3038 	if (bufsize < 0) {
3039 		DPAA2_SEC_ERR("Crypto: Invalid buffer length");
3040 		goto out;
3041 	}
3042 
3043 	flc->word1_sdl = (uint8_t)bufsize;
3044 
3045 	/* Enable the stashing control bit */
3046 	DPAA2_SET_FLC_RSC(flc);
3047 	flc->word2_rflc_31_0 = lower_32_bits(
3048 			(size_t)&(((struct dpaa2_sec_qp *)
3049 			dev->data->queue_pairs[0])->rx_vq) | 0x14);
3050 	flc->word3_rflc_63_32 = upper_32_bits(
3051 			(size_t)&(((struct dpaa2_sec_qp *)
3052 			dev->data->queue_pairs[0])->rx_vq));
3053 
3054 	/* Set EWS bit i.e. enable write-safe */
3055 	DPAA2_SET_FLC_EWS(flc);
3056 	/* Set BS = 1 i.e reuse input buffers as output buffers */
3057 	DPAA2_SET_FLC_REUSE_BS(flc);
3058 	/* Set FF = 10; reuse input buffers if they provide sufficient space */
3059 	DPAA2_SET_FLC_REUSE_FF(flc);
3060 
3061 	session->ctxt = priv;
3062 
3063 	return 0;
3064 out:
3065 	rte_free(session->auth_key.data);
3066 	rte_free(session->cipher_key.data);
3067 	rte_free(priv);
3068 	return ret;
3069 }
3070 
3071 static int
3072 dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
3073 			   struct rte_security_session_conf *conf,
3074 			   void *sess)
3075 {
3076 	struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
3077 	struct rte_crypto_sym_xform *xform = conf->crypto_xform;
3078 	struct rte_crypto_auth_xform *auth_xform = NULL;
3079 	struct rte_crypto_cipher_xform *cipher_xform;
3080 	dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
3081 	struct ctxt_priv *priv;
3082 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
3083 	struct alginfo authdata, cipherdata;
3084 	struct alginfo *p_authdata = NULL;
3085 	int bufsize = -1;
3086 	struct sec_flow_context *flc;
3087 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
3088 	int swap = true;
3089 #else
3090 	int swap = false;
3091 #endif
3092 
3093 	PMD_INIT_FUNC_TRACE();
3094 
3095 	memset(session, 0, sizeof(dpaa2_sec_session));
3096 
3097 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
3098 				sizeof(struct ctxt_priv) +
3099 				sizeof(struct sec_flc_desc),
3100 				RTE_CACHE_LINE_SIZE);
3101 
3102 	if (priv == NULL) {
3103 		DPAA2_SEC_ERR("No memory for priv CTXT");
3104 		return -ENOMEM;
3105 	}
3106 
3107 	priv->fle_pool = dev_priv->fle_pool;
3108 	flc = &priv->flc_desc[0].flc;
3109 
3110 	/* find xfrm types */
3111 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
3112 		cipher_xform = &xform->cipher;
3113 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
3114 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
3115 		session->ext_params.aead_ctxt.auth_cipher_text = true;
3116 		cipher_xform = &xform->cipher;
3117 		auth_xform = &xform->next->auth;
3118 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
3119 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
3120 		session->ext_params.aead_ctxt.auth_cipher_text = false;
3121 		cipher_xform = &xform->next->cipher;
3122 		auth_xform = &xform->auth;
3123 	} else {
3124 		DPAA2_SEC_ERR("Invalid crypto type");
3125 		return -EINVAL;
3126 	}
3127 
3128 	session->ctxt_type = DPAA2_SEC_PDCP;
3129 	if (cipher_xform) {
3130 		session->cipher_key.data = rte_zmalloc(NULL,
3131 					       cipher_xform->key.length,
3132 					       RTE_CACHE_LINE_SIZE);
3133 		if (session->cipher_key.data == NULL &&
3134 				cipher_xform->key.length > 0) {
3135 			DPAA2_SEC_ERR("No Memory for cipher key");
3136 			rte_free(priv);
3137 			return -ENOMEM;
3138 		}
3139 		session->cipher_key.length = cipher_xform->key.length;
3140 		memcpy(session->cipher_key.data, cipher_xform->key.data,
3141 			cipher_xform->key.length);
3142 		session->dir =
3143 			(cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
3144 					DIR_ENC : DIR_DEC;
3145 		session->cipher_alg = cipher_xform->algo;
3146 	} else {
3147 		session->cipher_key.data = NULL;
3148 		session->cipher_key.length = 0;
3149 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
3150 		session->dir = DIR_ENC;
3151 	}
3152 
3153 	session->pdcp.domain = pdcp_xform->domain;
3154 	session->pdcp.bearer = pdcp_xform->bearer;
3155 	session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
3156 	session->pdcp.sn_size = pdcp_xform->sn_size;
3157 	session->pdcp.hfn = pdcp_xform->hfn;
3158 	session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
3159 	session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
3160 	/* hfv ovd offset location is stored in iv.offset value*/
3161 	session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
3162 
3163 	cipherdata.key = (size_t)session->cipher_key.data;
3164 	cipherdata.keylen = session->cipher_key.length;
3165 	cipherdata.key_enc_flags = 0;
3166 	cipherdata.key_type = RTA_DATA_IMM;
3167 
3168 	switch (session->cipher_alg) {
3169 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
3170 		cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW;
3171 		break;
3172 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
3173 		cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC;
3174 		break;
3175 	case RTE_CRYPTO_CIPHER_AES_CTR:
3176 		cipherdata.algtype = PDCP_CIPHER_TYPE_AES;
3177 		break;
3178 	case RTE_CRYPTO_CIPHER_NULL:
3179 		cipherdata.algtype = PDCP_CIPHER_TYPE_NULL;
3180 		break;
3181 	default:
3182 		DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
3183 			      session->cipher_alg);
3184 		goto out;
3185 	}
3186 
3187 	if (auth_xform) {
3188 		session->auth_key.data = rte_zmalloc(NULL,
3189 						     auth_xform->key.length,
3190 						     RTE_CACHE_LINE_SIZE);
3191 		if (!session->auth_key.data &&
3192 		    auth_xform->key.length > 0) {
3193 			DPAA2_SEC_ERR("No Memory for auth key");
3194 			rte_free(session->cipher_key.data);
3195 			rte_free(priv);
3196 			return -ENOMEM;
3197 		}
3198 		session->auth_key.length = auth_xform->key.length;
3199 		memcpy(session->auth_key.data, auth_xform->key.data,
3200 		       auth_xform->key.length);
3201 		session->auth_alg = auth_xform->algo;
3202 	} else {
3203 		session->auth_key.data = NULL;
3204 		session->auth_key.length = 0;
3205 		session->auth_alg = 0;
3206 	}
3207 	authdata.key = (size_t)session->auth_key.data;
3208 	authdata.keylen = session->auth_key.length;
3209 	authdata.key_enc_flags = 0;
3210 	authdata.key_type = RTA_DATA_IMM;
3211 
3212 	if (session->auth_alg) {
3213 		switch (session->auth_alg) {
3214 		case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
3215 			authdata.algtype = PDCP_AUTH_TYPE_SNOW;
3216 			break;
3217 		case RTE_CRYPTO_AUTH_ZUC_EIA3:
3218 			authdata.algtype = PDCP_AUTH_TYPE_ZUC;
3219 			break;
3220 		case RTE_CRYPTO_AUTH_AES_CMAC:
3221 			authdata.algtype = PDCP_AUTH_TYPE_AES;
3222 			break;
3223 		case RTE_CRYPTO_AUTH_NULL:
3224 			authdata.algtype = PDCP_AUTH_TYPE_NULL;
3225 			break;
3226 		default:
3227 			DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
3228 				      session->auth_alg);
3229 			goto out;
3230 		}
3231 
3232 		p_authdata = &authdata;
3233 	} else if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3234 		DPAA2_SEC_ERR("Crypto: Integrity must for c-plane");
3235 		goto out;
3236 	}
3237 
3238 	if (rta_inline_pdcp_query(authdata.algtype,
3239 				cipherdata.algtype,
3240 				session->pdcp.sn_size,
3241 				session->pdcp.hfn_ovd)) {
3242 		cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
3243 		cipherdata.key_type = RTA_DATA_PTR;
3244 	}
3245 
3246 	if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3247 		if (session->dir == DIR_ENC)
3248 			bufsize = cnstr_shdsc_pdcp_c_plane_encap(
3249 					priv->flc_desc[0].desc, 1, swap,
3250 					pdcp_xform->hfn,
3251 					session->pdcp.sn_size,
3252 					pdcp_xform->bearer,
3253 					pdcp_xform->pkt_dir,
3254 					pdcp_xform->hfn_threshold,
3255 					&cipherdata, &authdata,
3256 					0);
3257 		else if (session->dir == DIR_DEC)
3258 			bufsize = cnstr_shdsc_pdcp_c_plane_decap(
3259 					priv->flc_desc[0].desc, 1, swap,
3260 					pdcp_xform->hfn,
3261 					session->pdcp.sn_size,
3262 					pdcp_xform->bearer,
3263 					pdcp_xform->pkt_dir,
3264 					pdcp_xform->hfn_threshold,
3265 					&cipherdata, &authdata,
3266 					0);
3267 	} else {
3268 		if (session->dir == DIR_ENC) {
3269 			if (pdcp_xform->sdap_enabled)
3270 				bufsize = cnstr_shdsc_pdcp_sdap_u_plane_encap(
3271 					priv->flc_desc[0].desc, 1, swap,
3272 					session->pdcp.sn_size,
3273 					pdcp_xform->hfn,
3274 					pdcp_xform->bearer,
3275 					pdcp_xform->pkt_dir,
3276 					pdcp_xform->hfn_threshold,
3277 					&cipherdata, p_authdata, 0);
3278 			else
3279 				bufsize = cnstr_shdsc_pdcp_u_plane_encap(
3280 					priv->flc_desc[0].desc, 1, swap,
3281 					session->pdcp.sn_size,
3282 					pdcp_xform->hfn,
3283 					pdcp_xform->bearer,
3284 					pdcp_xform->pkt_dir,
3285 					pdcp_xform->hfn_threshold,
3286 					&cipherdata, p_authdata, 0);
3287 		} else if (session->dir == DIR_DEC) {
3288 			if (pdcp_xform->sdap_enabled)
3289 				bufsize = cnstr_shdsc_pdcp_sdap_u_plane_decap(
3290 					priv->flc_desc[0].desc, 1, swap,
3291 					session->pdcp.sn_size,
3292 					pdcp_xform->hfn,
3293 					pdcp_xform->bearer,
3294 					pdcp_xform->pkt_dir,
3295 					pdcp_xform->hfn_threshold,
3296 					&cipherdata, p_authdata, 0);
3297 			else
3298 				bufsize = cnstr_shdsc_pdcp_u_plane_decap(
3299 					priv->flc_desc[0].desc, 1, swap,
3300 					session->pdcp.sn_size,
3301 					pdcp_xform->hfn,
3302 					pdcp_xform->bearer,
3303 					pdcp_xform->pkt_dir,
3304 					pdcp_xform->hfn_threshold,
3305 					&cipherdata, p_authdata, 0);
3306 		}
3307 	}
3308 
3309 	if (bufsize < 0) {
3310 		DPAA2_SEC_ERR("Crypto: Invalid buffer length");
3311 		goto out;
3312 	}
3313 
3314 	/* Enable the stashing control bit */
3315 	DPAA2_SET_FLC_RSC(flc);
3316 	flc->word2_rflc_31_0 = lower_32_bits(
3317 			(size_t)&(((struct dpaa2_sec_qp *)
3318 			dev->data->queue_pairs[0])->rx_vq) | 0x14);
3319 	flc->word3_rflc_63_32 = upper_32_bits(
3320 			(size_t)&(((struct dpaa2_sec_qp *)
3321 			dev->data->queue_pairs[0])->rx_vq));
3322 
3323 	flc->word1_sdl = (uint8_t)bufsize;
3324 
3325 	/* TODO - check the perf impact or
3326 	 * align as per descriptor type
3327 	 * Set EWS bit i.e. enable write-safe
3328 	 * DPAA2_SET_FLC_EWS(flc);
3329 	 */
3330 
3331 	/* Set BS = 1 i.e reuse input buffers as output buffers */
3332 	DPAA2_SET_FLC_REUSE_BS(flc);
3333 	/* Set FF = 10; reuse input buffers if they provide sufficient space */
3334 	DPAA2_SET_FLC_REUSE_FF(flc);
3335 
3336 	session->ctxt = priv;
3337 
3338 	return 0;
3339 out:
3340 	rte_free(session->auth_key.data);
3341 	rte_free(session->cipher_key.data);
3342 	rte_free(priv);
3343 	return -EINVAL;
3344 }
3345 
3346 static int
3347 dpaa2_sec_security_session_create(void *dev,
3348 				  struct rte_security_session_conf *conf,
3349 				  struct rte_security_session *sess,
3350 				  struct rte_mempool *mempool)
3351 {
3352 	void *sess_private_data;
3353 	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
3354 	int ret;
3355 
3356 	if (rte_mempool_get(mempool, &sess_private_data)) {
3357 		DPAA2_SEC_ERR("Couldn't get object from session mempool");
3358 		return -ENOMEM;
3359 	}
3360 
3361 	switch (conf->protocol) {
3362 	case RTE_SECURITY_PROTOCOL_IPSEC:
3363 		ret = dpaa2_sec_set_ipsec_session(cdev, conf,
3364 				sess_private_data);
3365 		break;
3366 	case RTE_SECURITY_PROTOCOL_MACSEC:
3367 		return -ENOTSUP;
3368 	case RTE_SECURITY_PROTOCOL_PDCP:
3369 		ret = dpaa2_sec_set_pdcp_session(cdev, conf,
3370 				sess_private_data);
3371 		break;
3372 	default:
3373 		return -EINVAL;
3374 	}
3375 	if (ret != 0) {
3376 		DPAA2_SEC_ERR("Failed to configure session parameters");
3377 		/* Return session to mempool */
3378 		rte_mempool_put(mempool, sess_private_data);
3379 		return ret;
3380 	}
3381 
3382 	set_sec_session_private_data(sess, sess_private_data);
3383 
3384 	return ret;
3385 }
3386 
3387 /** Clear the memory of session so it doesn't leave key material behind */
3388 static int
3389 dpaa2_sec_security_session_destroy(void *dev __rte_unused,
3390 		struct rte_security_session *sess)
3391 {
3392 	PMD_INIT_FUNC_TRACE();
3393 	void *sess_priv = get_sec_session_private_data(sess);
3394 
3395 	dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
3396 
3397 	if (sess_priv) {
3398 		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
3399 
3400 		rte_free(s->ctxt);
3401 		rte_free(s->cipher_key.data);
3402 		rte_free(s->auth_key.data);
3403 		memset(s, 0, sizeof(dpaa2_sec_session));
3404 		set_sec_session_private_data(sess, NULL);
3405 		rte_mempool_put(sess_mp, sess_priv);
3406 	}
3407 	return 0;
3408 }
3409 #endif
3410 static int
3411 dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev,
3412 		struct rte_crypto_sym_xform *xform,
3413 		struct rte_cryptodev_sym_session *sess,
3414 		struct rte_mempool *mempool)
3415 {
3416 	void *sess_private_data;
3417 	int ret;
3418 
3419 	if (rte_mempool_get(mempool, &sess_private_data)) {
3420 		DPAA2_SEC_ERR("Couldn't get object from session mempool");
3421 		return -ENOMEM;
3422 	}
3423 
3424 	ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data);
3425 	if (ret != 0) {
3426 		DPAA2_SEC_ERR("Failed to configure session parameters");
3427 		/* Return session to mempool */
3428 		rte_mempool_put(mempool, sess_private_data);
3429 		return ret;
3430 	}
3431 
3432 	set_sym_session_private_data(sess, dev->driver_id,
3433 		sess_private_data);
3434 
3435 	return 0;
3436 }
3437 
3438 /** Clear the memory of session so it doesn't leave key material behind */
3439 static void
3440 dpaa2_sec_sym_session_clear(struct rte_cryptodev *dev,
3441 		struct rte_cryptodev_sym_session *sess)
3442 {
3443 	PMD_INIT_FUNC_TRACE();
3444 	uint8_t index = dev->driver_id;
3445 	void *sess_priv = get_sym_session_private_data(sess, index);
3446 	dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
3447 
3448 	if (sess_priv) {
3449 		rte_free(s->ctxt);
3450 		rte_free(s->cipher_key.data);
3451 		rte_free(s->auth_key.data);
3452 		memset(s, 0, sizeof(dpaa2_sec_session));
3453 		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
3454 		set_sym_session_private_data(sess, index, NULL);
3455 		rte_mempool_put(sess_mp, sess_priv);
3456 	}
3457 }
3458 
3459 static int
3460 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
3461 			struct rte_cryptodev_config *config __rte_unused)
3462 {
3463 	PMD_INIT_FUNC_TRACE();
3464 
3465 	return 0;
3466 }
3467 
3468 static int
3469 dpaa2_sec_dev_start(struct rte_cryptodev *dev)
3470 {
3471 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3472 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3473 	struct dpseci_attr attr;
3474 	struct dpaa2_queue *dpaa2_q;
3475 	struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3476 					dev->data->queue_pairs;
3477 	struct dpseci_rx_queue_attr rx_attr;
3478 	struct dpseci_tx_queue_attr tx_attr;
3479 	int ret, i;
3480 
3481 	PMD_INIT_FUNC_TRACE();
3482 
3483 	memset(&attr, 0, sizeof(struct dpseci_attr));
3484 
3485 	ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
3486 	if (ret) {
3487 		DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED",
3488 			      priv->hw_id);
3489 		goto get_attr_failure;
3490 	}
3491 	ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr);
3492 	if (ret) {
3493 		DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC");
3494 		goto get_attr_failure;
3495 	}
3496 	for (i = 0; i < attr.num_rx_queues && qp[i]; i++) {
3497 		dpaa2_q = &qp[i]->rx_vq;
3498 		dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
3499 				    &rx_attr);
3500 		dpaa2_q->fqid = rx_attr.fqid;
3501 		DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid);
3502 	}
3503 	for (i = 0; i < attr.num_tx_queues && qp[i]; i++) {
3504 		dpaa2_q = &qp[i]->tx_vq;
3505 		dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
3506 				    &tx_attr);
3507 		dpaa2_q->fqid = tx_attr.fqid;
3508 		DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid);
3509 	}
3510 
3511 	return 0;
3512 get_attr_failure:
3513 	dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
3514 	return -1;
3515 }
3516 
3517 static void
3518 dpaa2_sec_dev_stop(struct rte_cryptodev *dev)
3519 {
3520 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3521 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3522 	int ret;
3523 
3524 	PMD_INIT_FUNC_TRACE();
3525 
3526 	ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
3527 	if (ret) {
3528 		DPAA2_SEC_ERR("Failure in disabling dpseci %d device",
3529 			     priv->hw_id);
3530 		return;
3531 	}
3532 
3533 	ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token);
3534 	if (ret < 0) {
3535 		DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret);
3536 		return;
3537 	}
3538 }
3539 
3540 static int
3541 dpaa2_sec_dev_close(struct rte_cryptodev *dev)
3542 {
3543 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3544 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3545 	int ret;
3546 
3547 	PMD_INIT_FUNC_TRACE();
3548 
3549 	/* Function is reverse of dpaa2_sec_dev_init.
3550 	 * It does the following:
3551 	 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id
3552 	 * 2. Close the DPSECI device
3553 	 * 3. Free the allocated resources.
3554 	 */
3555 
3556 	/*Close the device at underlying layer*/
3557 	ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
3558 	if (ret) {
3559 		DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret);
3560 		return -1;
3561 	}
3562 
3563 	/*Free the allocated memory for ethernet private data and dpseci*/
3564 	priv->hw = NULL;
3565 	rte_free(dpseci);
3566 
3567 	return 0;
3568 }
3569 
3570 static void
3571 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev,
3572 			struct rte_cryptodev_info *info)
3573 {
3574 	struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
3575 
3576 	PMD_INIT_FUNC_TRACE();
3577 	if (info != NULL) {
3578 		info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3579 		info->feature_flags = dev->feature_flags;
3580 		info->capabilities = dpaa2_sec_capabilities;
3581 		/* No limit of number of sessions */
3582 		info->sym.max_nb_sessions = 0;
3583 		info->driver_id = cryptodev_driver_id;
3584 	}
3585 }
3586 
3587 static
3588 void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
3589 			 struct rte_cryptodev_stats *stats)
3590 {
3591 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3592 	struct fsl_mc_io dpseci;
3593 	struct dpseci_sec_counters counters = {0};
3594 	struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3595 					dev->data->queue_pairs;
3596 	int ret, i;
3597 
3598 	PMD_INIT_FUNC_TRACE();
3599 	if (stats == NULL) {
3600 		DPAA2_SEC_ERR("Invalid stats ptr NULL");
3601 		return;
3602 	}
3603 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3604 		if (qp == NULL || qp[i] == NULL) {
3605 			DPAA2_SEC_DEBUG("Uninitialised queue pair");
3606 			continue;
3607 		}
3608 
3609 		stats->enqueued_count += qp[i]->tx_vq.tx_pkts;
3610 		stats->dequeued_count += qp[i]->rx_vq.rx_pkts;
3611 		stats->enqueue_err_count += qp[i]->tx_vq.err_pkts;
3612 		stats->dequeue_err_count += qp[i]->rx_vq.err_pkts;
3613 	}
3614 
3615 	/* In case as secondary process access stats, MCP portal in priv-hw
3616 	 * may have primary process address. Need the secondary process
3617 	 * based MCP portal address for this object.
3618 	 */
3619 	dpseci.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
3620 	ret = dpseci_get_sec_counters(&dpseci, CMD_PRI_LOW, priv->token,
3621 				      &counters);
3622 	if (ret) {
3623 		DPAA2_SEC_ERR("SEC counters failed");
3624 	} else {
3625 		DPAA2_SEC_INFO("dpseci hardware stats:"
3626 			    "\n\tNum of Requests Dequeued = %" PRIu64
3627 			    "\n\tNum of Outbound Encrypt Requests = %" PRIu64
3628 			    "\n\tNum of Inbound Decrypt Requests = %" PRIu64
3629 			    "\n\tNum of Outbound Bytes Encrypted = %" PRIu64
3630 			    "\n\tNum of Outbound Bytes Protected = %" PRIu64
3631 			    "\n\tNum of Inbound Bytes Decrypted = %" PRIu64
3632 			    "\n\tNum of Inbound Bytes Validated = %" PRIu64,
3633 			    counters.dequeued_requests,
3634 			    counters.ob_enc_requests,
3635 			    counters.ib_dec_requests,
3636 			    counters.ob_enc_bytes,
3637 			    counters.ob_prot_bytes,
3638 			    counters.ib_dec_bytes,
3639 			    counters.ib_valid_bytes);
3640 	}
3641 }
3642 
3643 static
3644 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev)
3645 {
3646 	int i;
3647 	struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3648 				   (dev->data->queue_pairs);
3649 
3650 	PMD_INIT_FUNC_TRACE();
3651 
3652 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3653 		if (qp[i] == NULL) {
3654 			DPAA2_SEC_DEBUG("Uninitialised queue pair");
3655 			continue;
3656 		}
3657 		qp[i]->tx_vq.rx_pkts = 0;
3658 		qp[i]->tx_vq.tx_pkts = 0;
3659 		qp[i]->tx_vq.err_pkts = 0;
3660 		qp[i]->rx_vq.rx_pkts = 0;
3661 		qp[i]->rx_vq.tx_pkts = 0;
3662 		qp[i]->rx_vq.err_pkts = 0;
3663 	}
3664 }
3665 
3666 static void __rte_hot
3667 dpaa2_sec_process_parallel_event(struct qbman_swp *swp,
3668 				 const struct qbman_fd *fd,
3669 				 const struct qbman_result *dq,
3670 				 struct dpaa2_queue *rxq,
3671 				 struct rte_event *ev)
3672 {
3673 	/* Prefetching mbuf */
3674 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
3675 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
3676 
3677 	/* Prefetching ipsec crypto_op stored in priv data of mbuf */
3678 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
3679 
3680 	ev->flow_id = rxq->ev.flow_id;
3681 	ev->sub_event_type = rxq->ev.sub_event_type;
3682 	ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3683 	ev->op = RTE_EVENT_OP_NEW;
3684 	ev->sched_type = rxq->ev.sched_type;
3685 	ev->queue_id = rxq->ev.queue_id;
3686 	ev->priority = rxq->ev.priority;
3687 	ev->event_ptr = sec_fd_to_mbuf(fd);
3688 
3689 	qbman_swp_dqrr_consume(swp, dq);
3690 }
3691 static void
3692 dpaa2_sec_process_atomic_event(struct qbman_swp *swp __rte_unused,
3693 				 const struct qbman_fd *fd,
3694 				 const struct qbman_result *dq,
3695 				 struct dpaa2_queue *rxq,
3696 				 struct rte_event *ev)
3697 {
3698 	uint8_t dqrr_index;
3699 	struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr;
3700 	/* Prefetching mbuf */
3701 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
3702 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
3703 
3704 	/* Prefetching ipsec crypto_op stored in priv data of mbuf */
3705 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
3706 
3707 	ev->flow_id = rxq->ev.flow_id;
3708 	ev->sub_event_type = rxq->ev.sub_event_type;
3709 	ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3710 	ev->op = RTE_EVENT_OP_NEW;
3711 	ev->sched_type = rxq->ev.sched_type;
3712 	ev->queue_id = rxq->ev.queue_id;
3713 	ev->priority = rxq->ev.priority;
3714 
3715 	ev->event_ptr = sec_fd_to_mbuf(fd);
3716 	dqrr_index = qbman_get_dqrr_idx(dq);
3717 	crypto_op->sym->m_src->seqn = dqrr_index + 1;
3718 	DPAA2_PER_LCORE_DQRR_SIZE++;
3719 	DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
3720 	DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src;
3721 }
3722 
3723 int
3724 dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
3725 		int qp_id,
3726 		struct dpaa2_dpcon_dev *dpcon,
3727 		const struct rte_event *event)
3728 {
3729 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3730 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3731 	struct dpaa2_sec_qp *qp = dev->data->queue_pairs[qp_id];
3732 	struct dpseci_rx_queue_cfg cfg;
3733 	uint8_t priority;
3734 	int ret;
3735 
3736 	if (event->sched_type == RTE_SCHED_TYPE_PARALLEL)
3737 		qp->rx_vq.cb = dpaa2_sec_process_parallel_event;
3738 	else if (event->sched_type == RTE_SCHED_TYPE_ATOMIC)
3739 		qp->rx_vq.cb = dpaa2_sec_process_atomic_event;
3740 	else
3741 		return -EINVAL;
3742 
3743 	priority = (RTE_EVENT_DEV_PRIORITY_LOWEST / event->priority) *
3744 		   (dpcon->num_priorities - 1);
3745 
3746 	memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
3747 	cfg.options = DPSECI_QUEUE_OPT_DEST;
3748 	cfg.dest_cfg.dest_type = DPSECI_DEST_DPCON;
3749 	cfg.dest_cfg.dest_id = dpcon->dpcon_id;
3750 	cfg.dest_cfg.priority = priority;
3751 
3752 	cfg.options |= DPSECI_QUEUE_OPT_USER_CTX;
3753 	cfg.user_ctx = (size_t)(qp);
3754 	if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) {
3755 		cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION;
3756 		cfg.order_preservation_en = 1;
3757 	}
3758 	ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
3759 				  qp_id, &cfg);
3760 	if (ret) {
3761 		RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
3762 		return ret;
3763 	}
3764 
3765 	memcpy(&qp->rx_vq.ev, event, sizeof(struct rte_event));
3766 
3767 	return 0;
3768 }
3769 
3770 int
3771 dpaa2_sec_eventq_detach(const struct rte_cryptodev *dev,
3772 			int qp_id)
3773 {
3774 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3775 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3776 	struct dpseci_rx_queue_cfg cfg;
3777 	int ret;
3778 
3779 	memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
3780 	cfg.options = DPSECI_QUEUE_OPT_DEST;
3781 	cfg.dest_cfg.dest_type = DPSECI_DEST_NONE;
3782 
3783 	ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
3784 				  qp_id, &cfg);
3785 	if (ret)
3786 		RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
3787 
3788 	return ret;
3789 }
3790 
3791 static struct rte_cryptodev_ops crypto_ops = {
3792 	.dev_configure	      = dpaa2_sec_dev_configure,
3793 	.dev_start	      = dpaa2_sec_dev_start,
3794 	.dev_stop	      = dpaa2_sec_dev_stop,
3795 	.dev_close	      = dpaa2_sec_dev_close,
3796 	.dev_infos_get        = dpaa2_sec_dev_infos_get,
3797 	.stats_get	      = dpaa2_sec_stats_get,
3798 	.stats_reset	      = dpaa2_sec_stats_reset,
3799 	.queue_pair_setup     = dpaa2_sec_queue_pair_setup,
3800 	.queue_pair_release   = dpaa2_sec_queue_pair_release,
3801 	.sym_session_get_size     = dpaa2_sec_sym_session_get_size,
3802 	.sym_session_configure    = dpaa2_sec_sym_session_configure,
3803 	.sym_session_clear        = dpaa2_sec_sym_session_clear,
3804 };
3805 
3806 #ifdef RTE_LIB_SECURITY
3807 static const struct rte_security_capability *
3808 dpaa2_sec_capabilities_get(void *device __rte_unused)
3809 {
3810 	return dpaa2_sec_security_cap;
3811 }
3812 
3813 static const struct rte_security_ops dpaa2_sec_security_ops = {
3814 	.session_create = dpaa2_sec_security_session_create,
3815 	.session_update = NULL,
3816 	.session_stats_get = NULL,
3817 	.session_destroy = dpaa2_sec_security_session_destroy,
3818 	.set_pkt_metadata = NULL,
3819 	.capabilities_get = dpaa2_sec_capabilities_get
3820 };
3821 #endif
3822 
3823 static int
3824 dpaa2_sec_uninit(const struct rte_cryptodev *dev)
3825 {
3826 	struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
3827 
3828 	rte_free(dev->security_ctx);
3829 
3830 	rte_mempool_free(internals->fle_pool);
3831 
3832 	DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u",
3833 		       dev->data->name, rte_socket_id());
3834 
3835 	return 0;
3836 }
3837 
3838 static int
3839 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
3840 {
3841 	struct dpaa2_sec_dev_private *internals;
3842 	struct rte_device *dev = cryptodev->device;
3843 	struct rte_dpaa2_device *dpaa2_dev;
3844 #ifdef RTE_LIB_SECURITY
3845 	struct rte_security_ctx *security_instance;
3846 #endif
3847 	struct fsl_mc_io *dpseci;
3848 	uint16_t token;
3849 	struct dpseci_attr attr;
3850 	int retcode, hw_id;
3851 	char str[30];
3852 
3853 	PMD_INIT_FUNC_TRACE();
3854 	dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
3855 	hw_id = dpaa2_dev->object_id;
3856 
3857 	cryptodev->driver_id = cryptodev_driver_id;
3858 	cryptodev->dev_ops = &crypto_ops;
3859 
3860 	cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst;
3861 	cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst;
3862 	cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3863 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
3864 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3865 			RTE_CRYPTODEV_FF_SECURITY |
3866 			RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3867 			RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3868 			RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3869 			RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3870 			RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3871 
3872 	internals = cryptodev->data->dev_private;
3873 
3874 	/*
3875 	 * For secondary processes, we don't initialise any further as primary
3876 	 * has already done this work. Only check we don't need a different
3877 	 * RX function
3878 	 */
3879 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3880 		DPAA2_SEC_DEBUG("Device already init by primary process");
3881 		return 0;
3882 	}
3883 #ifdef RTE_LIB_SECURITY
3884 	/* Initialize security_ctx only for primary process*/
3885 	security_instance = rte_malloc("rte_security_instances_ops",
3886 				sizeof(struct rte_security_ctx), 0);
3887 	if (security_instance == NULL)
3888 		return -ENOMEM;
3889 	security_instance->device = (void *)cryptodev;
3890 	security_instance->ops = &dpaa2_sec_security_ops;
3891 	security_instance->sess_cnt = 0;
3892 	cryptodev->security_ctx = security_instance;
3893 #endif
3894 	/*Open the rte device via MC and save the handle for further use*/
3895 	dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1,
3896 				sizeof(struct fsl_mc_io), 0);
3897 	if (!dpseci) {
3898 		DPAA2_SEC_ERR(
3899 			"Error in allocating the memory for dpsec object");
3900 		return -ENOMEM;
3901 	}
3902 	dpseci->regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
3903 
3904 	retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token);
3905 	if (retcode != 0) {
3906 		DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x",
3907 			      retcode);
3908 		goto init_error;
3909 	}
3910 	retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr);
3911 	if (retcode != 0) {
3912 		DPAA2_SEC_ERR(
3913 			     "Cannot get dpsec device attributed: Error = %x",
3914 			     retcode);
3915 		goto init_error;
3916 	}
3917 	snprintf(cryptodev->data->name, sizeof(cryptodev->data->name),
3918 			"dpsec-%u", hw_id);
3919 
3920 	internals->max_nb_queue_pairs = attr.num_tx_queues;
3921 	cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs;
3922 	internals->hw = dpseci;
3923 	internals->token = token;
3924 
3925 	snprintf(str, sizeof(str), "sec_fle_pool_p%d_%d",
3926 			getpid(), cryptodev->data->dev_id);
3927 	internals->fle_pool = rte_mempool_create((const char *)str,
3928 			FLE_POOL_NUM_BUFS,
3929 			FLE_POOL_BUF_SIZE,
3930 			FLE_POOL_CACHE_SIZE, 0,
3931 			NULL, NULL, NULL, NULL,
3932 			SOCKET_ID_ANY, 0);
3933 	if (!internals->fle_pool) {
3934 		DPAA2_SEC_ERR("Mempool (%s) creation failed", str);
3935 		goto init_error;
3936 	}
3937 
3938 	DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name);
3939 	return 0;
3940 
3941 init_error:
3942 	DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name);
3943 
3944 	/* dpaa2_sec_uninit(crypto_dev_name); */
3945 	return -EFAULT;
3946 }
3947 
3948 static int
3949 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
3950 			  struct rte_dpaa2_device *dpaa2_dev)
3951 {
3952 	struct rte_cryptodev *cryptodev;
3953 	char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3954 
3955 	int retval;
3956 
3957 	snprintf(cryptodev_name, sizeof(cryptodev_name), "dpsec-%d",
3958 			dpaa2_dev->object_id);
3959 
3960 	cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3961 	if (cryptodev == NULL)
3962 		return -ENOMEM;
3963 
3964 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3965 		cryptodev->data->dev_private = rte_zmalloc_socket(
3966 					"cryptodev private structure",
3967 					sizeof(struct dpaa2_sec_dev_private),
3968 					RTE_CACHE_LINE_SIZE,
3969 					rte_socket_id());
3970 
3971 		if (cryptodev->data->dev_private == NULL)
3972 			rte_panic("Cannot allocate memzone for private "
3973 				  "device data");
3974 	}
3975 
3976 	dpaa2_dev->cryptodev = cryptodev;
3977 	cryptodev->device = &dpaa2_dev->device;
3978 
3979 	/* init user callbacks */
3980 	TAILQ_INIT(&(cryptodev->link_intr_cbs));
3981 
3982 	if (dpaa2_svr_family == SVR_LX2160A)
3983 		rta_set_sec_era(RTA_SEC_ERA_10);
3984 	else
3985 		rta_set_sec_era(RTA_SEC_ERA_8);
3986 
3987 	DPAA2_SEC_INFO("2-SEC ERA is %d", rta_get_sec_era());
3988 
3989 	/* Invoke PMD device initialization function */
3990 	retval = dpaa2_sec_dev_init(cryptodev);
3991 	if (retval == 0)
3992 		return 0;
3993 
3994 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3995 		rte_free(cryptodev->data->dev_private);
3996 
3997 	cryptodev->attached = RTE_CRYPTODEV_DETACHED;
3998 
3999 	return -ENXIO;
4000 }
4001 
4002 static int
4003 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
4004 {
4005 	struct rte_cryptodev *cryptodev;
4006 	int ret;
4007 
4008 	cryptodev = dpaa2_dev->cryptodev;
4009 	if (cryptodev == NULL)
4010 		return -ENODEV;
4011 
4012 	ret = dpaa2_sec_uninit(cryptodev);
4013 	if (ret)
4014 		return ret;
4015 
4016 	return rte_cryptodev_pmd_destroy(cryptodev);
4017 }
4018 
4019 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
4020 	.drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
4021 	.drv_type = DPAA2_CRYPTO,
4022 	.driver = {
4023 		.name = "DPAA2 SEC PMD"
4024 	},
4025 	.probe = cryptodev_dpaa2_sec_probe,
4026 	.remove = cryptodev_dpaa2_sec_remove,
4027 };
4028 
4029 static struct cryptodev_driver dpaa2_sec_crypto_drv;
4030 
4031 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
4032 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv,
4033 		rte_dpaa2_sec_driver.driver, cryptodev_driver_id);
4034 RTE_LOG_REGISTER(dpaa2_logtype_sec, pmd.crypto.dpaa2, NOTICE);
4035