xref: /dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c (revision c9902a15bd005b6d4fe072cf7b60fe4ee679155f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016-2021 NXP
5  *
6  */
7 
8 #include <time.h>
9 #include <net/if.h>
10 #include <unistd.h>
11 
12 #include <rte_ip.h>
13 #include <rte_mbuf.h>
14 #include <rte_cryptodev.h>
15 #include <rte_malloc.h>
16 #include <rte_memcpy.h>
17 #include <rte_string_fns.h>
18 #include <rte_cycles.h>
19 #include <rte_kvargs.h>
20 #include <rte_dev.h>
21 #include <cryptodev_pmd.h>
22 #include <rte_common.h>
23 #include <rte_fslmc.h>
24 #include <fslmc_vfio.h>
25 #include <dpaa2_hw_pvt.h>
26 #include <dpaa2_hw_dpio.h>
27 #include <dpaa2_hw_mempool.h>
28 #include <fsl_dpopr.h>
29 #include <fsl_dpseci.h>
30 #include <fsl_mc_sys.h>
31 
32 #include "dpaa2_sec_priv.h"
33 #include "dpaa2_sec_event.h"
34 #include "dpaa2_sec_logs.h"
35 
36 /* RTA header files */
37 #include <desc/ipsec.h>
38 #include <desc/pdcp.h>
39 #include <desc/sdap.h>
40 #include <desc/algo.h>
41 
42 /* Minimum job descriptor consists of a oneword job descriptor HEADER and
43  * a pointer to the shared descriptor
44  */
45 #define MIN_JOB_DESC_SIZE	(CAAM_CMD_SZ + CAAM_PTR_SZ)
46 #define FSL_VENDOR_ID           0x1957
47 #define FSL_DEVICE_ID           0x410
48 #define FSL_SUBSYSTEM_SEC       1
49 #define FSL_MC_DPSECI_DEVID     3
50 
51 #define NO_PREFETCH 0
52 /* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
53 #define FLE_POOL_NUM_BUFS	32000
54 #define FLE_POOL_BUF_SIZE	256
55 #define FLE_POOL_CACHE_SIZE	512
56 #define FLE_SG_MEM_SIZE(num)	(FLE_POOL_BUF_SIZE + ((num) * 32))
57 #define SEC_FLC_DHR_OUTBOUND	-114
58 #define SEC_FLC_DHR_INBOUND	0
59 
60 static uint8_t cryptodev_driver_id;
61 
62 #ifdef RTE_LIB_SECURITY
63 static inline int
64 build_proto_compound_sg_fd(dpaa2_sec_session *sess,
65 			   struct rte_crypto_op *op,
66 			   struct qbman_fd *fd, uint16_t bpid)
67 {
68 	struct rte_crypto_sym_op *sym_op = op->sym;
69 	struct ctxt_priv *priv = sess->ctxt;
70 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
71 	struct sec_flow_context *flc;
72 	struct rte_mbuf *mbuf;
73 	uint32_t in_len = 0, out_len = 0;
74 
75 	if (sym_op->m_dst)
76 		mbuf = sym_op->m_dst;
77 	else
78 		mbuf = sym_op->m_src;
79 
80 	/* first FLE entry used to store mbuf and session ctxt */
81 	fle = (struct qbman_fle *)rte_malloc(NULL,
82 			FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
83 			RTE_CACHE_LINE_SIZE);
84 	if (unlikely(!fle)) {
85 		DPAA2_SEC_DP_ERR("Proto:SG: Memory alloc failed for SGE");
86 		return -ENOMEM;
87 	}
88 	memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
89 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
90 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
91 
92 	/* Save the shared descriptor */
93 	flc = &priv->flc_desc[0].flc;
94 
95 	op_fle = fle + 1;
96 	ip_fle = fle + 2;
97 	sge = fle + 3;
98 
99 	if (likely(bpid < MAX_BPID)) {
100 		DPAA2_SET_FD_BPID(fd, bpid);
101 		DPAA2_SET_FLE_BPID(op_fle, bpid);
102 		DPAA2_SET_FLE_BPID(ip_fle, bpid);
103 	} else {
104 		DPAA2_SET_FD_IVP(fd);
105 		DPAA2_SET_FLE_IVP(op_fle);
106 		DPAA2_SET_FLE_IVP(ip_fle);
107 	}
108 
109 	/* Configure FD as a FRAME LIST */
110 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
111 	DPAA2_SET_FD_COMPOUND_FMT(fd);
112 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
113 
114 	/* Configure Output FLE with Scatter/Gather Entry */
115 	DPAA2_SET_FLE_SG_EXT(op_fle);
116 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
117 
118 	/* Configure Output SGE for Encap/Decap */
119 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
120 	DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
121 	/* o/p segs */
122 	while (mbuf->next) {
123 		sge->length = mbuf->data_len;
124 		out_len += sge->length;
125 		sge++;
126 		mbuf = mbuf->next;
127 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
128 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
129 	}
130 	/* using buf_len for last buf - so that extra data can be added */
131 	sge->length = mbuf->buf_len - mbuf->data_off;
132 	out_len += sge->length;
133 
134 	DPAA2_SET_FLE_FIN(sge);
135 	op_fle->length = out_len;
136 
137 	sge++;
138 	mbuf = sym_op->m_src;
139 
140 	/* Configure Input FLE with Scatter/Gather Entry */
141 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
142 	DPAA2_SET_FLE_SG_EXT(ip_fle);
143 	DPAA2_SET_FLE_FIN(ip_fle);
144 
145 	/* Configure input SGE for Encap/Decap */
146 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
147 	DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
148 	sge->length = mbuf->data_len;
149 	in_len += sge->length;
150 
151 	mbuf = mbuf->next;
152 	/* i/p segs */
153 	while (mbuf) {
154 		sge++;
155 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
156 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
157 		sge->length = mbuf->data_len;
158 		in_len += sge->length;
159 		mbuf = mbuf->next;
160 	}
161 	ip_fle->length = in_len;
162 	DPAA2_SET_FLE_FIN(sge);
163 
164 	/* In case of PDCP, per packet HFN is stored in
165 	 * mbuf priv after sym_op.
166 	 */
167 	if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
168 		uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)op +
169 					sess->pdcp.hfn_ovd_offset);
170 		/*enable HFN override override */
171 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
172 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
173 		DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
174 	}
175 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
176 
177 	return 0;
178 }
179 
180 static inline int
181 build_proto_compound_fd(dpaa2_sec_session *sess,
182 	       struct rte_crypto_op *op,
183 	       struct qbman_fd *fd, uint16_t bpid)
184 {
185 	struct rte_crypto_sym_op *sym_op = op->sym;
186 	struct ctxt_priv *priv = sess->ctxt;
187 	struct qbman_fle *fle, *ip_fle, *op_fle;
188 	struct sec_flow_context *flc;
189 	struct rte_mbuf *src_mbuf = sym_op->m_src;
190 	struct rte_mbuf *dst_mbuf = sym_op->m_dst;
191 	int retval;
192 
193 	if (!dst_mbuf)
194 		dst_mbuf = src_mbuf;
195 
196 	/* Save the shared descriptor */
197 	flc = &priv->flc_desc[0].flc;
198 
199 	/* we are using the first FLE entry to store Mbuf */
200 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
201 	if (retval) {
202 		DPAA2_SEC_DP_ERR("Memory alloc failed");
203 		return -ENOMEM;
204 	}
205 	memset(fle, 0, FLE_POOL_BUF_SIZE);
206 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
207 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
208 
209 	op_fle = fle + 1;
210 	ip_fle = fle + 2;
211 
212 	if (likely(bpid < MAX_BPID)) {
213 		DPAA2_SET_FD_BPID(fd, bpid);
214 		DPAA2_SET_FLE_BPID(op_fle, bpid);
215 		DPAA2_SET_FLE_BPID(ip_fle, bpid);
216 	} else {
217 		DPAA2_SET_FD_IVP(fd);
218 		DPAA2_SET_FLE_IVP(op_fle);
219 		DPAA2_SET_FLE_IVP(ip_fle);
220 	}
221 
222 	/* Configure FD as a FRAME LIST */
223 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
224 	DPAA2_SET_FD_COMPOUND_FMT(fd);
225 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
226 
227 	/* Configure Output FLE with dst mbuf data  */
228 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_MBUF_VADDR_TO_IOVA(dst_mbuf));
229 	DPAA2_SET_FLE_OFFSET(op_fle, dst_mbuf->data_off);
230 	DPAA2_SET_FLE_LEN(op_fle, dst_mbuf->buf_len);
231 
232 	/* Configure Input FLE with src mbuf data */
233 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_MBUF_VADDR_TO_IOVA(src_mbuf));
234 	DPAA2_SET_FLE_OFFSET(ip_fle, src_mbuf->data_off);
235 	DPAA2_SET_FLE_LEN(ip_fle, src_mbuf->pkt_len);
236 
237 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
238 	DPAA2_SET_FLE_FIN(ip_fle);
239 
240 	/* In case of PDCP, per packet HFN is stored in
241 	 * mbuf priv after sym_op.
242 	 */
243 	if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
244 		uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)op +
245 					sess->pdcp.hfn_ovd_offset);
246 		/*enable HFN override override */
247 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
248 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
249 		DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
250 	}
251 
252 	return 0;
253 
254 }
255 
256 static inline int
257 build_proto_fd(dpaa2_sec_session *sess,
258 	       struct rte_crypto_op *op,
259 	       struct qbman_fd *fd, uint16_t bpid)
260 {
261 	struct rte_crypto_sym_op *sym_op = op->sym;
262 	if (sym_op->m_dst)
263 		return build_proto_compound_fd(sess, op, fd, bpid);
264 
265 	struct ctxt_priv *priv = sess->ctxt;
266 	struct sec_flow_context *flc;
267 	struct rte_mbuf *mbuf = sym_op->m_src;
268 
269 	if (likely(bpid < MAX_BPID))
270 		DPAA2_SET_FD_BPID(fd, bpid);
271 	else
272 		DPAA2_SET_FD_IVP(fd);
273 
274 	/* Save the shared descriptor */
275 	flc = &priv->flc_desc[0].flc;
276 
277 	DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
278 	DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off);
279 	DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len);
280 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
281 
282 	/* save physical address of mbuf */
283 	op->sym->aead.digest.phys_addr = mbuf->buf_iova;
284 	mbuf->buf_iova = (size_t)op;
285 
286 	return 0;
287 }
288 #endif
289 
290 static inline int
291 build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,
292 		 struct rte_crypto_op *op,
293 		 struct qbman_fd *fd, __rte_unused uint16_t bpid)
294 {
295 	struct rte_crypto_sym_op *sym_op = op->sym;
296 	struct ctxt_priv *priv = sess->ctxt;
297 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
298 	struct sec_flow_context *flc;
299 	uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
300 	int icv_len = sess->digest_length;
301 	uint8_t *old_icv;
302 	struct rte_mbuf *mbuf;
303 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
304 			sess->iv.offset);
305 
306 	if (sym_op->m_dst)
307 		mbuf = sym_op->m_dst;
308 	else
309 		mbuf = sym_op->m_src;
310 
311 	/* first FLE entry used to store mbuf and session ctxt */
312 	fle = (struct qbman_fle *)rte_malloc(NULL,
313 			FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
314 			RTE_CACHE_LINE_SIZE);
315 	if (unlikely(!fle)) {
316 		DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
317 		return -ENOMEM;
318 	}
319 	memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
320 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
321 	DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv);
322 
323 	op_fle = fle + 1;
324 	ip_fle = fle + 2;
325 	sge = fle + 3;
326 
327 	/* Save the shared descriptor */
328 	flc = &priv->flc_desc[0].flc;
329 
330 	/* Configure FD as a FRAME LIST */
331 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
332 	DPAA2_SET_FD_COMPOUND_FMT(fd);
333 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
334 
335 	DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n"
336 		   "iv-len=%d data_off: 0x%x\n",
337 		   sym_op->aead.data.offset,
338 		   sym_op->aead.data.length,
339 		   sess->digest_length,
340 		   sess->iv.length,
341 		   sym_op->m_src->data_off);
342 
343 	/* Configure Output FLE with Scatter/Gather Entry */
344 	DPAA2_SET_FLE_SG_EXT(op_fle);
345 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
346 
347 	if (auth_only_len)
348 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
349 
350 	op_fle->length = (sess->dir == DIR_ENC) ?
351 			(sym_op->aead.data.length + icv_len) :
352 			sym_op->aead.data.length;
353 
354 	/* Configure Output SGE for Encap/Decap */
355 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
356 	DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->aead.data.offset);
357 	sge->length = mbuf->data_len - sym_op->aead.data.offset;
358 
359 	mbuf = mbuf->next;
360 	/* o/p segs */
361 	while (mbuf) {
362 		sge++;
363 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
364 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
365 		sge->length = mbuf->data_len;
366 		mbuf = mbuf->next;
367 	}
368 	sge->length -= icv_len;
369 
370 	if (sess->dir == DIR_ENC) {
371 		sge++;
372 		DPAA2_SET_FLE_ADDR(sge,
373 				DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
374 		sge->length = icv_len;
375 	}
376 	DPAA2_SET_FLE_FIN(sge);
377 
378 	sge++;
379 	mbuf = sym_op->m_src;
380 
381 	/* Configure Input FLE with Scatter/Gather Entry */
382 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
383 	DPAA2_SET_FLE_SG_EXT(ip_fle);
384 	DPAA2_SET_FLE_FIN(ip_fle);
385 	ip_fle->length = (sess->dir == DIR_ENC) ?
386 		(sym_op->aead.data.length + sess->iv.length + auth_only_len) :
387 		(sym_op->aead.data.length + sess->iv.length + auth_only_len +
388 		 icv_len);
389 
390 	/* Configure Input SGE for Encap/Decap */
391 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
392 	sge->length = sess->iv.length;
393 
394 	sge++;
395 	if (auth_only_len) {
396 		DPAA2_SET_FLE_ADDR(sge,
397 				DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
398 		sge->length = auth_only_len;
399 		sge++;
400 	}
401 
402 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
403 	DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
404 				mbuf->data_off);
405 	sge->length = mbuf->data_len - sym_op->aead.data.offset;
406 
407 	mbuf = mbuf->next;
408 	/* i/p segs */
409 	while (mbuf) {
410 		sge++;
411 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
412 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
413 		sge->length = mbuf->data_len;
414 		mbuf = mbuf->next;
415 	}
416 
417 	if (sess->dir == DIR_DEC) {
418 		sge++;
419 		old_icv = (uint8_t *)(sge + 1);
420 		memcpy(old_icv,	sym_op->aead.digest.data, icv_len);
421 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
422 		sge->length = icv_len;
423 	}
424 
425 	DPAA2_SET_FLE_FIN(sge);
426 	if (auth_only_len) {
427 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
428 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
429 	}
430 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
431 
432 	return 0;
433 }
434 
435 static inline int
436 build_authenc_gcm_fd(dpaa2_sec_session *sess,
437 		     struct rte_crypto_op *op,
438 		     struct qbman_fd *fd, uint16_t bpid)
439 {
440 	struct rte_crypto_sym_op *sym_op = op->sym;
441 	struct ctxt_priv *priv = sess->ctxt;
442 	struct qbman_fle *fle, *sge;
443 	struct sec_flow_context *flc;
444 	uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
445 	int icv_len = sess->digest_length, retval;
446 	uint8_t *old_icv;
447 	struct rte_mbuf *dst;
448 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
449 			sess->iv.offset);
450 
451 	if (sym_op->m_dst)
452 		dst = sym_op->m_dst;
453 	else
454 		dst = sym_op->m_src;
455 
456 	/* TODO we are using the first FLE entry to store Mbuf and session ctxt.
457 	 * Currently we donot know which FLE has the mbuf stored.
458 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
459 	 * to get the MBUF Addr from the previous FLE.
460 	 * We can have a better approach to use the inline Mbuf
461 	 */
462 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
463 	if (retval) {
464 		DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE");
465 		return -ENOMEM;
466 	}
467 	memset(fle, 0, FLE_POOL_BUF_SIZE);
468 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
469 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
470 	fle = fle + 1;
471 	sge = fle + 2;
472 	if (likely(bpid < MAX_BPID)) {
473 		DPAA2_SET_FD_BPID(fd, bpid);
474 		DPAA2_SET_FLE_BPID(fle, bpid);
475 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
476 		DPAA2_SET_FLE_BPID(sge, bpid);
477 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
478 		DPAA2_SET_FLE_BPID(sge + 2, bpid);
479 		DPAA2_SET_FLE_BPID(sge + 3, bpid);
480 	} else {
481 		DPAA2_SET_FD_IVP(fd);
482 		DPAA2_SET_FLE_IVP(fle);
483 		DPAA2_SET_FLE_IVP((fle + 1));
484 		DPAA2_SET_FLE_IVP(sge);
485 		DPAA2_SET_FLE_IVP((sge + 1));
486 		DPAA2_SET_FLE_IVP((sge + 2));
487 		DPAA2_SET_FLE_IVP((sge + 3));
488 	}
489 
490 	/* Save the shared descriptor */
491 	flc = &priv->flc_desc[0].flc;
492 	/* Configure FD as a FRAME LIST */
493 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
494 	DPAA2_SET_FD_COMPOUND_FMT(fd);
495 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
496 
497 	DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n"
498 		   "iv-len=%d data_off: 0x%x\n",
499 		   sym_op->aead.data.offset,
500 		   sym_op->aead.data.length,
501 		   sess->digest_length,
502 		   sess->iv.length,
503 		   sym_op->m_src->data_off);
504 
505 	/* Configure Output FLE with Scatter/Gather Entry */
506 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
507 	if (auth_only_len)
508 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
509 	fle->length = (sess->dir == DIR_ENC) ?
510 			(sym_op->aead.data.length + icv_len) :
511 			sym_op->aead.data.length;
512 
513 	DPAA2_SET_FLE_SG_EXT(fle);
514 
515 	/* Configure Output SGE for Encap/Decap */
516 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
517 	DPAA2_SET_FLE_OFFSET(sge, dst->data_off + sym_op->aead.data.offset);
518 	sge->length = sym_op->aead.data.length;
519 
520 	if (sess->dir == DIR_ENC) {
521 		sge++;
522 		DPAA2_SET_FLE_ADDR(sge,
523 				DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
524 		sge->length = sess->digest_length;
525 	}
526 	DPAA2_SET_FLE_FIN(sge);
527 
528 	sge++;
529 	fle++;
530 
531 	/* Configure Input FLE with Scatter/Gather Entry */
532 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
533 	DPAA2_SET_FLE_SG_EXT(fle);
534 	DPAA2_SET_FLE_FIN(fle);
535 	fle->length = (sess->dir == DIR_ENC) ?
536 		(sym_op->aead.data.length + sess->iv.length + auth_only_len) :
537 		(sym_op->aead.data.length + sess->iv.length + auth_only_len +
538 		 sess->digest_length);
539 
540 	/* Configure Input SGE for Encap/Decap */
541 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
542 	sge->length = sess->iv.length;
543 	sge++;
544 	if (auth_only_len) {
545 		DPAA2_SET_FLE_ADDR(sge,
546 				DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
547 		sge->length = auth_only_len;
548 		DPAA2_SET_FLE_BPID(sge, bpid);
549 		sge++;
550 	}
551 
552 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
553 	DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
554 				sym_op->m_src->data_off);
555 	sge->length = sym_op->aead.data.length;
556 	if (sess->dir == DIR_DEC) {
557 		sge++;
558 		old_icv = (uint8_t *)(sge + 1);
559 		memcpy(old_icv,	sym_op->aead.digest.data,
560 		       sess->digest_length);
561 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
562 		sge->length = sess->digest_length;
563 	}
564 	DPAA2_SET_FLE_FIN(sge);
565 
566 	if (auth_only_len) {
567 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
568 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
569 	}
570 
571 	DPAA2_SET_FD_LEN(fd, fle->length);
572 	return 0;
573 }
574 
575 static inline int
576 build_authenc_sg_fd(dpaa2_sec_session *sess,
577 		 struct rte_crypto_op *op,
578 		 struct qbman_fd *fd, __rte_unused uint16_t bpid)
579 {
580 	struct rte_crypto_sym_op *sym_op = op->sym;
581 	struct ctxt_priv *priv = sess->ctxt;
582 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
583 	struct sec_flow_context *flc;
584 	uint16_t auth_hdr_len = sym_op->cipher.data.offset -
585 				sym_op->auth.data.offset;
586 	uint16_t auth_tail_len = sym_op->auth.data.length -
587 				sym_op->cipher.data.length - auth_hdr_len;
588 	uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
589 	int icv_len = sess->digest_length;
590 	uint8_t *old_icv;
591 	struct rte_mbuf *mbuf;
592 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
593 			sess->iv.offset);
594 
595 	if (sym_op->m_dst)
596 		mbuf = sym_op->m_dst;
597 	else
598 		mbuf = sym_op->m_src;
599 
600 	/* first FLE entry used to store mbuf and session ctxt */
601 	fle = (struct qbman_fle *)rte_malloc(NULL,
602 			FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
603 			RTE_CACHE_LINE_SIZE);
604 	if (unlikely(!fle)) {
605 		DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
606 		return -ENOMEM;
607 	}
608 	memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
609 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
610 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
611 
612 	op_fle = fle + 1;
613 	ip_fle = fle + 2;
614 	sge = fle + 3;
615 
616 	/* Save the shared descriptor */
617 	flc = &priv->flc_desc[0].flc;
618 
619 	/* Configure FD as a FRAME LIST */
620 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
621 	DPAA2_SET_FD_COMPOUND_FMT(fd);
622 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
623 
624 	DPAA2_SEC_DP_DEBUG(
625 		"AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n"
626 		"cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
627 		sym_op->auth.data.offset,
628 		sym_op->auth.data.length,
629 		sess->digest_length,
630 		sym_op->cipher.data.offset,
631 		sym_op->cipher.data.length,
632 		sess->iv.length,
633 		sym_op->m_src->data_off);
634 
635 	/* Configure Output FLE with Scatter/Gather Entry */
636 	DPAA2_SET_FLE_SG_EXT(op_fle);
637 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
638 
639 	if (auth_only_len)
640 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
641 
642 	op_fle->length = (sess->dir == DIR_ENC) ?
643 			(sym_op->cipher.data.length + icv_len) :
644 			sym_op->cipher.data.length;
645 
646 	/* Configure Output SGE for Encap/Decap */
647 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
648 	DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->auth.data.offset);
649 	sge->length = mbuf->data_len - sym_op->auth.data.offset;
650 
651 	mbuf = mbuf->next;
652 	/* o/p segs */
653 	while (mbuf) {
654 		sge++;
655 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
656 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
657 		sge->length = mbuf->data_len;
658 		mbuf = mbuf->next;
659 	}
660 	sge->length -= icv_len;
661 
662 	if (sess->dir == DIR_ENC) {
663 		sge++;
664 		DPAA2_SET_FLE_ADDR(sge,
665 				DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
666 		sge->length = icv_len;
667 	}
668 	DPAA2_SET_FLE_FIN(sge);
669 
670 	sge++;
671 	mbuf = sym_op->m_src;
672 
673 	/* Configure Input FLE with Scatter/Gather Entry */
674 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
675 	DPAA2_SET_FLE_SG_EXT(ip_fle);
676 	DPAA2_SET_FLE_FIN(ip_fle);
677 	ip_fle->length = (sess->dir == DIR_ENC) ?
678 			(sym_op->auth.data.length + sess->iv.length) :
679 			(sym_op->auth.data.length + sess->iv.length +
680 			 icv_len);
681 
682 	/* Configure Input SGE for Encap/Decap */
683 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
684 	sge->length = sess->iv.length;
685 
686 	sge++;
687 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
688 	DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
689 				mbuf->data_off);
690 	sge->length = mbuf->data_len - sym_op->auth.data.offset;
691 
692 	mbuf = mbuf->next;
693 	/* i/p segs */
694 	while (mbuf) {
695 		sge++;
696 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
697 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
698 		sge->length = mbuf->data_len;
699 		mbuf = mbuf->next;
700 	}
701 	sge->length -= icv_len;
702 
703 	if (sess->dir == DIR_DEC) {
704 		sge++;
705 		old_icv = (uint8_t *)(sge + 1);
706 		memcpy(old_icv,	sym_op->auth.digest.data,
707 		       icv_len);
708 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
709 		sge->length = icv_len;
710 	}
711 
712 	DPAA2_SET_FLE_FIN(sge);
713 	if (auth_only_len) {
714 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
715 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
716 	}
717 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
718 
719 	return 0;
720 }
721 
722 static inline int
723 build_authenc_fd(dpaa2_sec_session *sess,
724 		 struct rte_crypto_op *op,
725 		 struct qbman_fd *fd, uint16_t bpid)
726 {
727 	struct rte_crypto_sym_op *sym_op = op->sym;
728 	struct ctxt_priv *priv = sess->ctxt;
729 	struct qbman_fle *fle, *sge;
730 	struct sec_flow_context *flc;
731 	uint16_t auth_hdr_len = sym_op->cipher.data.offset -
732 				sym_op->auth.data.offset;
733 	uint16_t auth_tail_len = sym_op->auth.data.length -
734 				sym_op->cipher.data.length - auth_hdr_len;
735 	uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
736 
737 	int icv_len = sess->digest_length, retval;
738 	uint8_t *old_icv;
739 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
740 			sess->iv.offset);
741 	struct rte_mbuf *dst;
742 
743 	if (sym_op->m_dst)
744 		dst = sym_op->m_dst;
745 	else
746 		dst = sym_op->m_src;
747 
748 	/* we are using the first FLE entry to store Mbuf.
749 	 * Currently we donot know which FLE has the mbuf stored.
750 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
751 	 * to get the MBUF Addr from the previous FLE.
752 	 * We can have a better approach to use the inline Mbuf
753 	 */
754 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
755 	if (retval) {
756 		DPAA2_SEC_ERR("Memory alloc failed for SGE");
757 		return -ENOMEM;
758 	}
759 	memset(fle, 0, FLE_POOL_BUF_SIZE);
760 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
761 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
762 	fle = fle + 1;
763 	sge = fle + 2;
764 	if (likely(bpid < MAX_BPID)) {
765 		DPAA2_SET_FD_BPID(fd, bpid);
766 		DPAA2_SET_FLE_BPID(fle, bpid);
767 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
768 		DPAA2_SET_FLE_BPID(sge, bpid);
769 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
770 		DPAA2_SET_FLE_BPID(sge + 2, bpid);
771 		DPAA2_SET_FLE_BPID(sge + 3, bpid);
772 	} else {
773 		DPAA2_SET_FD_IVP(fd);
774 		DPAA2_SET_FLE_IVP(fle);
775 		DPAA2_SET_FLE_IVP((fle + 1));
776 		DPAA2_SET_FLE_IVP(sge);
777 		DPAA2_SET_FLE_IVP((sge + 1));
778 		DPAA2_SET_FLE_IVP((sge + 2));
779 		DPAA2_SET_FLE_IVP((sge + 3));
780 	}
781 
782 	/* Save the shared descriptor */
783 	flc = &priv->flc_desc[0].flc;
784 	/* Configure FD as a FRAME LIST */
785 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
786 	DPAA2_SET_FD_COMPOUND_FMT(fd);
787 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
788 
789 	DPAA2_SEC_DP_DEBUG(
790 		"AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n"
791 		"cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
792 		sym_op->auth.data.offset,
793 		sym_op->auth.data.length,
794 		sess->digest_length,
795 		sym_op->cipher.data.offset,
796 		sym_op->cipher.data.length,
797 		sess->iv.length,
798 		sym_op->m_src->data_off);
799 
800 	/* Configure Output FLE with Scatter/Gather Entry */
801 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
802 	if (auth_only_len)
803 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
804 	fle->length = (sess->dir == DIR_ENC) ?
805 			(sym_op->cipher.data.length + icv_len) :
806 			sym_op->cipher.data.length;
807 
808 	DPAA2_SET_FLE_SG_EXT(fle);
809 
810 	/* Configure Output SGE for Encap/Decap */
811 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
812 	DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
813 				dst->data_off);
814 	sge->length = sym_op->cipher.data.length;
815 
816 	if (sess->dir == DIR_ENC) {
817 		sge++;
818 		DPAA2_SET_FLE_ADDR(sge,
819 				DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
820 		sge->length = sess->digest_length;
821 		DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
822 					sess->iv.length));
823 	}
824 	DPAA2_SET_FLE_FIN(sge);
825 
826 	sge++;
827 	fle++;
828 
829 	/* Configure Input FLE with Scatter/Gather Entry */
830 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
831 	DPAA2_SET_FLE_SG_EXT(fle);
832 	DPAA2_SET_FLE_FIN(fle);
833 	fle->length = (sess->dir == DIR_ENC) ?
834 			(sym_op->auth.data.length + sess->iv.length) :
835 			(sym_op->auth.data.length + sess->iv.length +
836 			 sess->digest_length);
837 
838 	/* Configure Input SGE for Encap/Decap */
839 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
840 	sge->length = sess->iv.length;
841 	sge++;
842 
843 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
844 	DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
845 				sym_op->m_src->data_off);
846 	sge->length = sym_op->auth.data.length;
847 	if (sess->dir == DIR_DEC) {
848 		sge++;
849 		old_icv = (uint8_t *)(sge + 1);
850 		memcpy(old_icv,	sym_op->auth.digest.data,
851 		       sess->digest_length);
852 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
853 		sge->length = sess->digest_length;
854 		DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
855 				 sess->digest_length +
856 				 sess->iv.length));
857 	}
858 	DPAA2_SET_FLE_FIN(sge);
859 	if (auth_only_len) {
860 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
861 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
862 	}
863 	return 0;
864 }
865 
866 static inline int build_auth_sg_fd(
867 		dpaa2_sec_session *sess,
868 		struct rte_crypto_op *op,
869 		struct qbman_fd *fd,
870 		__rte_unused uint16_t bpid)
871 {
872 	struct rte_crypto_sym_op *sym_op = op->sym;
873 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
874 	struct sec_flow_context *flc;
875 	struct ctxt_priv *priv = sess->ctxt;
876 	int data_len, data_offset;
877 	uint8_t *old_digest;
878 	struct rte_mbuf *mbuf;
879 
880 	data_len = sym_op->auth.data.length;
881 	data_offset = sym_op->auth.data.offset;
882 
883 	if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
884 	    sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
885 		if ((data_len & 7) || (data_offset & 7)) {
886 			DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
887 			return -ENOTSUP;
888 		}
889 
890 		data_len = data_len >> 3;
891 		data_offset = data_offset >> 3;
892 	}
893 
894 	mbuf = sym_op->m_src;
895 	fle = (struct qbman_fle *)rte_malloc(NULL,
896 			FLE_SG_MEM_SIZE(mbuf->nb_segs),
897 			RTE_CACHE_LINE_SIZE);
898 	if (unlikely(!fle)) {
899 		DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
900 		return -ENOMEM;
901 	}
902 	memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs));
903 	/* first FLE entry used to store mbuf and session ctxt */
904 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
905 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
906 	op_fle = fle + 1;
907 	ip_fle = fle + 2;
908 	sge = fle + 3;
909 
910 	flc = &priv->flc_desc[DESC_INITFINAL].flc;
911 	/* sg FD */
912 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
913 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
914 	DPAA2_SET_FD_COMPOUND_FMT(fd);
915 
916 	/* o/p fle */
917 	DPAA2_SET_FLE_ADDR(op_fle,
918 				DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
919 	op_fle->length = sess->digest_length;
920 
921 	/* i/p fle */
922 	DPAA2_SET_FLE_SG_EXT(ip_fle);
923 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
924 	ip_fle->length = data_len;
925 
926 	if (sess->iv.length) {
927 		uint8_t *iv_ptr;
928 
929 		iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
930 						   sess->iv.offset);
931 
932 		if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
933 			iv_ptr = conv_to_snow_f9_iv(iv_ptr);
934 			sge->length = 12;
935 		} else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
936 			iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
937 			sge->length = 8;
938 		} else {
939 			sge->length = sess->iv.length;
940 		}
941 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
942 		ip_fle->length += sge->length;
943 		sge++;
944 	}
945 	/* i/p 1st seg */
946 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
947 	DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
948 
949 	if (data_len <= (mbuf->data_len - data_offset)) {
950 		sge->length = data_len;
951 		data_len = 0;
952 	} else {
953 		sge->length = mbuf->data_len - data_offset;
954 
955 		/* remaining i/p segs */
956 		while ((data_len = data_len - sge->length) &&
957 		       (mbuf = mbuf->next)) {
958 			sge++;
959 			DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
960 			DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
961 			if (data_len > mbuf->data_len)
962 				sge->length = mbuf->data_len;
963 			else
964 				sge->length = data_len;
965 		}
966 	}
967 
968 	if (sess->dir == DIR_DEC) {
969 		/* Digest verification case */
970 		sge++;
971 		old_digest = (uint8_t *)(sge + 1);
972 		rte_memcpy(old_digest, sym_op->auth.digest.data,
973 			   sess->digest_length);
974 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
975 		sge->length = sess->digest_length;
976 		ip_fle->length += sess->digest_length;
977 	}
978 	DPAA2_SET_FLE_FIN(sge);
979 	DPAA2_SET_FLE_FIN(ip_fle);
980 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
981 
982 	return 0;
983 }
984 
985 static inline int
986 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
987 	      struct qbman_fd *fd, uint16_t bpid)
988 {
989 	struct rte_crypto_sym_op *sym_op = op->sym;
990 	struct qbman_fle *fle, *sge;
991 	struct sec_flow_context *flc;
992 	struct ctxt_priv *priv = sess->ctxt;
993 	int data_len, data_offset;
994 	uint8_t *old_digest;
995 	int retval;
996 
997 	data_len = sym_op->auth.data.length;
998 	data_offset = sym_op->auth.data.offset;
999 
1000 	if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
1001 	    sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1002 		if ((data_len & 7) || (data_offset & 7)) {
1003 			DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
1004 			return -ENOTSUP;
1005 		}
1006 
1007 		data_len = data_len >> 3;
1008 		data_offset = data_offset >> 3;
1009 	}
1010 
1011 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
1012 	if (retval) {
1013 		DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE");
1014 		return -ENOMEM;
1015 	}
1016 	memset(fle, 0, FLE_POOL_BUF_SIZE);
1017 	/* TODO we are using the first FLE entry to store Mbuf.
1018 	 * Currently we donot know which FLE has the mbuf stored.
1019 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
1020 	 * to get the MBUF Addr from the previous FLE.
1021 	 * We can have a better approach to use the inline Mbuf
1022 	 */
1023 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1024 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1025 	fle = fle + 1;
1026 	sge = fle + 2;
1027 
1028 	if (likely(bpid < MAX_BPID)) {
1029 		DPAA2_SET_FD_BPID(fd, bpid);
1030 		DPAA2_SET_FLE_BPID(fle, bpid);
1031 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
1032 		DPAA2_SET_FLE_BPID(sge, bpid);
1033 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
1034 	} else {
1035 		DPAA2_SET_FD_IVP(fd);
1036 		DPAA2_SET_FLE_IVP(fle);
1037 		DPAA2_SET_FLE_IVP((fle + 1));
1038 		DPAA2_SET_FLE_IVP(sge);
1039 		DPAA2_SET_FLE_IVP((sge + 1));
1040 	}
1041 
1042 	flc = &priv->flc_desc[DESC_INITFINAL].flc;
1043 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1044 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1045 	DPAA2_SET_FD_COMPOUND_FMT(fd);
1046 
1047 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
1048 	fle->length = sess->digest_length;
1049 	fle++;
1050 
1051 	/* Setting input FLE */
1052 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1053 	DPAA2_SET_FLE_SG_EXT(fle);
1054 	fle->length = data_len;
1055 
1056 	if (sess->iv.length) {
1057 		uint8_t *iv_ptr;
1058 
1059 		iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1060 						   sess->iv.offset);
1061 
1062 		if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
1063 			iv_ptr = conv_to_snow_f9_iv(iv_ptr);
1064 			sge->length = 12;
1065 		} else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1066 			iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
1067 			sge->length = 8;
1068 		} else {
1069 			sge->length = sess->iv.length;
1070 		}
1071 
1072 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1073 		fle->length = fle->length + sge->length;
1074 		sge++;
1075 	}
1076 
1077 	/* Setting data to authenticate */
1078 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
1079 	DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off);
1080 	sge->length = data_len;
1081 
1082 	if (sess->dir == DIR_DEC) {
1083 		sge++;
1084 		old_digest = (uint8_t *)(sge + 1);
1085 		rte_memcpy(old_digest, sym_op->auth.digest.data,
1086 			   sess->digest_length);
1087 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
1088 		sge->length = sess->digest_length;
1089 		fle->length = fle->length + sess->digest_length;
1090 	}
1091 
1092 	DPAA2_SET_FLE_FIN(sge);
1093 	DPAA2_SET_FLE_FIN(fle);
1094 	DPAA2_SET_FD_LEN(fd, fle->length);
1095 
1096 	return 0;
1097 }
1098 
1099 static int
1100 build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
1101 		struct qbman_fd *fd, __rte_unused uint16_t bpid)
1102 {
1103 	struct rte_crypto_sym_op *sym_op = op->sym;
1104 	struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
1105 	int data_len, data_offset;
1106 	struct sec_flow_context *flc;
1107 	struct ctxt_priv *priv = sess->ctxt;
1108 	struct rte_mbuf *mbuf;
1109 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1110 			sess->iv.offset);
1111 
1112 	data_len = sym_op->cipher.data.length;
1113 	data_offset = sym_op->cipher.data.offset;
1114 
1115 	if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1116 		sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1117 		if ((data_len & 7) || (data_offset & 7)) {
1118 			DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
1119 			return -ENOTSUP;
1120 		}
1121 
1122 		data_len = data_len >> 3;
1123 		data_offset = data_offset >> 3;
1124 	}
1125 
1126 	if (sym_op->m_dst)
1127 		mbuf = sym_op->m_dst;
1128 	else
1129 		mbuf = sym_op->m_src;
1130 
1131 	/* first FLE entry used to store mbuf and session ctxt */
1132 	fle = (struct qbman_fle *)rte_malloc(NULL,
1133 			FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
1134 			RTE_CACHE_LINE_SIZE);
1135 	if (!fle) {
1136 		DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE");
1137 		return -ENOMEM;
1138 	}
1139 	memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
1140 	/* first FLE entry used to store mbuf and session ctxt */
1141 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1142 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1143 
1144 	op_fle = fle + 1;
1145 	ip_fle = fle + 2;
1146 	sge = fle + 3;
1147 
1148 	flc = &priv->flc_desc[0].flc;
1149 
1150 	DPAA2_SEC_DP_DEBUG(
1151 		"CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d"
1152 		" data_off: 0x%x\n",
1153 		data_offset,
1154 		data_len,
1155 		sess->iv.length,
1156 		sym_op->m_src->data_off);
1157 
1158 	/* o/p fle */
1159 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
1160 	op_fle->length = data_len;
1161 	DPAA2_SET_FLE_SG_EXT(op_fle);
1162 
1163 	/* o/p 1st seg */
1164 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1165 	DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
1166 	sge->length = mbuf->data_len - data_offset;
1167 
1168 	mbuf = mbuf->next;
1169 	/* o/p segs */
1170 	while (mbuf) {
1171 		sge++;
1172 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1173 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
1174 		sge->length = mbuf->data_len;
1175 		mbuf = mbuf->next;
1176 	}
1177 	DPAA2_SET_FLE_FIN(sge);
1178 
1179 	DPAA2_SEC_DP_DEBUG(
1180 		"CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
1181 		flc, fle, fle->addr_hi, fle->addr_lo,
1182 		fle->length);
1183 
1184 	/* i/p fle */
1185 	mbuf = sym_op->m_src;
1186 	sge++;
1187 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
1188 	ip_fle->length = sess->iv.length + data_len;
1189 	DPAA2_SET_FLE_SG_EXT(ip_fle);
1190 
1191 	/* i/p IV */
1192 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1193 	DPAA2_SET_FLE_OFFSET(sge, 0);
1194 	sge->length = sess->iv.length;
1195 
1196 	sge++;
1197 
1198 	/* i/p 1st seg */
1199 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1200 	DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
1201 	sge->length = mbuf->data_len - data_offset;
1202 
1203 	mbuf = mbuf->next;
1204 	/* i/p segs */
1205 	while (mbuf) {
1206 		sge++;
1207 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1208 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
1209 		sge->length = mbuf->data_len;
1210 		mbuf = mbuf->next;
1211 	}
1212 	DPAA2_SET_FLE_FIN(sge);
1213 	DPAA2_SET_FLE_FIN(ip_fle);
1214 
1215 	/* sg fd */
1216 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
1217 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
1218 	DPAA2_SET_FD_COMPOUND_FMT(fd);
1219 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1220 
1221 	DPAA2_SEC_DP_DEBUG(
1222 		"CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1223 		" off =%d, len =%d\n",
1224 		DPAA2_GET_FD_ADDR(fd),
1225 		DPAA2_GET_FD_BPID(fd),
1226 		rte_dpaa2_bpid_info[bpid].meta_data_size,
1227 		DPAA2_GET_FD_OFFSET(fd),
1228 		DPAA2_GET_FD_LEN(fd));
1229 	return 0;
1230 }
1231 
1232 static int
1233 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
1234 		struct qbman_fd *fd, uint16_t bpid)
1235 {
1236 	struct rte_crypto_sym_op *sym_op = op->sym;
1237 	struct qbman_fle *fle, *sge;
1238 	int retval, data_len, data_offset;
1239 	struct sec_flow_context *flc;
1240 	struct ctxt_priv *priv = sess->ctxt;
1241 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1242 			sess->iv.offset);
1243 	struct rte_mbuf *dst;
1244 
1245 	data_len = sym_op->cipher.data.length;
1246 	data_offset = sym_op->cipher.data.offset;
1247 
1248 	if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1249 		sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1250 		if ((data_len & 7) || (data_offset & 7)) {
1251 			DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
1252 			return -ENOTSUP;
1253 		}
1254 
1255 		data_len = data_len >> 3;
1256 		data_offset = data_offset >> 3;
1257 	}
1258 
1259 	if (sym_op->m_dst)
1260 		dst = sym_op->m_dst;
1261 	else
1262 		dst = sym_op->m_src;
1263 
1264 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
1265 	if (retval) {
1266 		DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE");
1267 		return -ENOMEM;
1268 	}
1269 	memset(fle, 0, FLE_POOL_BUF_SIZE);
1270 	/* TODO we are using the first FLE entry to store Mbuf.
1271 	 * Currently we donot know which FLE has the mbuf stored.
1272 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
1273 	 * to get the MBUF Addr from the previous FLE.
1274 	 * We can have a better approach to use the inline Mbuf
1275 	 */
1276 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1277 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1278 	fle = fle + 1;
1279 	sge = fle + 2;
1280 
1281 	if (likely(bpid < MAX_BPID)) {
1282 		DPAA2_SET_FD_BPID(fd, bpid);
1283 		DPAA2_SET_FLE_BPID(fle, bpid);
1284 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
1285 		DPAA2_SET_FLE_BPID(sge, bpid);
1286 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
1287 	} else {
1288 		DPAA2_SET_FD_IVP(fd);
1289 		DPAA2_SET_FLE_IVP(fle);
1290 		DPAA2_SET_FLE_IVP((fle + 1));
1291 		DPAA2_SET_FLE_IVP(sge);
1292 		DPAA2_SET_FLE_IVP((sge + 1));
1293 	}
1294 
1295 	flc = &priv->flc_desc[0].flc;
1296 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1297 	DPAA2_SET_FD_LEN(fd, data_len + sess->iv.length);
1298 	DPAA2_SET_FD_COMPOUND_FMT(fd);
1299 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1300 
1301 	DPAA2_SEC_DP_DEBUG(
1302 		"CIPHER: cipher_off: 0x%x/length %d, ivlen=%d,"
1303 		" data_off: 0x%x\n",
1304 		data_offset,
1305 		data_len,
1306 		sess->iv.length,
1307 		sym_op->m_src->data_off);
1308 
1309 	DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst));
1310 	DPAA2_SET_FLE_OFFSET(fle, data_offset + dst->data_off);
1311 
1312 	fle->length = data_len + sess->iv.length;
1313 
1314 	DPAA2_SEC_DP_DEBUG(
1315 		"CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n",
1316 		flc, fle, fle->addr_hi, fle->addr_lo,
1317 		fle->length);
1318 
1319 	fle++;
1320 
1321 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1322 	fle->length = data_len + sess->iv.length;
1323 
1324 	DPAA2_SET_FLE_SG_EXT(fle);
1325 
1326 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1327 	sge->length = sess->iv.length;
1328 
1329 	sge++;
1330 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
1331 	DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off);
1332 
1333 	sge->length = data_len;
1334 	DPAA2_SET_FLE_FIN(sge);
1335 	DPAA2_SET_FLE_FIN(fle);
1336 
1337 	DPAA2_SEC_DP_DEBUG(
1338 		"CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1339 		" off =%d, len =%d\n",
1340 		DPAA2_GET_FD_ADDR(fd),
1341 		DPAA2_GET_FD_BPID(fd),
1342 		rte_dpaa2_bpid_info[bpid].meta_data_size,
1343 		DPAA2_GET_FD_OFFSET(fd),
1344 		DPAA2_GET_FD_LEN(fd));
1345 
1346 	return 0;
1347 }
1348 
1349 static inline int
1350 build_sec_fd(struct rte_crypto_op *op,
1351 	     struct qbman_fd *fd, uint16_t bpid)
1352 {
1353 	int ret = -1;
1354 	dpaa2_sec_session *sess;
1355 
1356 	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
1357 		sess = (dpaa2_sec_session *)get_sym_session_private_data(
1358 				op->sym->session, cryptodev_driver_id);
1359 #ifdef RTE_LIB_SECURITY
1360 	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1361 		sess = (dpaa2_sec_session *)get_sec_session_private_data(
1362 				op->sym->sec_session);
1363 #endif
1364 	else
1365 		return -ENOTSUP;
1366 
1367 	if (!sess)
1368 		return -EINVAL;
1369 
1370 	/* Any of the buffer is segmented*/
1371 	if (!rte_pktmbuf_is_contiguous(op->sym->m_src) ||
1372 		  ((op->sym->m_dst != NULL) &&
1373 		   !rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1374 		switch (sess->ctxt_type) {
1375 		case DPAA2_SEC_CIPHER:
1376 			ret = build_cipher_sg_fd(sess, op, fd, bpid);
1377 			break;
1378 		case DPAA2_SEC_AUTH:
1379 			ret = build_auth_sg_fd(sess, op, fd, bpid);
1380 			break;
1381 		case DPAA2_SEC_AEAD:
1382 			ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid);
1383 			break;
1384 		case DPAA2_SEC_CIPHER_HASH:
1385 			ret = build_authenc_sg_fd(sess, op, fd, bpid);
1386 			break;
1387 #ifdef RTE_LIB_SECURITY
1388 		case DPAA2_SEC_IPSEC:
1389 		case DPAA2_SEC_PDCP:
1390 			ret = build_proto_compound_sg_fd(sess, op, fd, bpid);
1391 			break;
1392 #endif
1393 		case DPAA2_SEC_HASH_CIPHER:
1394 		default:
1395 			DPAA2_SEC_ERR("error: Unsupported session");
1396 		}
1397 	} else {
1398 		switch (sess->ctxt_type) {
1399 		case DPAA2_SEC_CIPHER:
1400 			ret = build_cipher_fd(sess, op, fd, bpid);
1401 			break;
1402 		case DPAA2_SEC_AUTH:
1403 			ret = build_auth_fd(sess, op, fd, bpid);
1404 			break;
1405 		case DPAA2_SEC_AEAD:
1406 			ret = build_authenc_gcm_fd(sess, op, fd, bpid);
1407 			break;
1408 		case DPAA2_SEC_CIPHER_HASH:
1409 			ret = build_authenc_fd(sess, op, fd, bpid);
1410 			break;
1411 #ifdef RTE_LIB_SECURITY
1412 		case DPAA2_SEC_IPSEC:
1413 			ret = build_proto_fd(sess, op, fd, bpid);
1414 			break;
1415 		case DPAA2_SEC_PDCP:
1416 			ret = build_proto_compound_fd(sess, op, fd, bpid);
1417 			break;
1418 #endif
1419 		case DPAA2_SEC_HASH_CIPHER:
1420 		default:
1421 			DPAA2_SEC_ERR("error: Unsupported session");
1422 			ret = -ENOTSUP;
1423 		}
1424 	}
1425 	return ret;
1426 }
1427 
1428 static uint16_t
1429 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1430 			uint16_t nb_ops)
1431 {
1432 	/* Function to transmit the frames to given device and VQ*/
1433 	uint32_t loop;
1434 	int32_t ret;
1435 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1436 	uint32_t frames_to_send, retry_count;
1437 	struct qbman_eq_desc eqdesc;
1438 	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1439 	struct qbman_swp *swp;
1440 	uint16_t num_tx = 0;
1441 	uint32_t flags[MAX_TX_RING_SLOTS] = {0};
1442 	/*todo - need to support multiple buffer pools */
1443 	uint16_t bpid;
1444 	struct rte_mempool *mb_pool;
1445 
1446 	if (unlikely(nb_ops == 0))
1447 		return 0;
1448 
1449 	if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
1450 		DPAA2_SEC_ERR("sessionless crypto op not supported");
1451 		return 0;
1452 	}
1453 	/*Prepare enqueue descriptor*/
1454 	qbman_eq_desc_clear(&eqdesc);
1455 	qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1456 	qbman_eq_desc_set_response(&eqdesc, 0, 0);
1457 	qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
1458 
1459 	if (!DPAA2_PER_LCORE_DPIO) {
1460 		ret = dpaa2_affine_qbman_swp();
1461 		if (ret) {
1462 			DPAA2_SEC_ERR(
1463 				"Failed to allocate IO portal, tid: %d\n",
1464 				rte_gettid());
1465 			return 0;
1466 		}
1467 	}
1468 	swp = DPAA2_PER_LCORE_PORTAL;
1469 
1470 	while (nb_ops) {
1471 		frames_to_send = (nb_ops > dpaa2_eqcr_size) ?
1472 			dpaa2_eqcr_size : nb_ops;
1473 
1474 		for (loop = 0; loop < frames_to_send; loop++) {
1475 			if (*dpaa2_seqn((*ops)->sym->m_src)) {
1476 				uint8_t dqrr_index =
1477 					*dpaa2_seqn((*ops)->sym->m_src) - 1;
1478 
1479 				flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index;
1480 				DPAA2_PER_LCORE_DQRR_SIZE--;
1481 				DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
1482 				*dpaa2_seqn((*ops)->sym->m_src) =
1483 					DPAA2_INVALID_MBUF_SEQN;
1484 			}
1485 
1486 			/*Clear the unused FD fields before sending*/
1487 			memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
1488 			mb_pool = (*ops)->sym->m_src->pool;
1489 			bpid = mempool_to_bpid(mb_pool);
1490 			ret = build_sec_fd(*ops, &fd_arr[loop], bpid);
1491 			if (ret) {
1492 				DPAA2_SEC_ERR("error: Improper packet contents"
1493 					      " for crypto operation");
1494 				goto skip_tx;
1495 			}
1496 			ops++;
1497 		}
1498 
1499 		loop = 0;
1500 		retry_count = 0;
1501 		while (loop < frames_to_send) {
1502 			ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
1503 							 &fd_arr[loop],
1504 							 &flags[loop],
1505 							 frames_to_send - loop);
1506 			if (unlikely(ret < 0)) {
1507 				retry_count++;
1508 				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1509 					num_tx += loop;
1510 					nb_ops -= loop;
1511 					goto skip_tx;
1512 				}
1513 			} else {
1514 				loop += ret;
1515 				retry_count = 0;
1516 			}
1517 		}
1518 
1519 		num_tx += loop;
1520 		nb_ops -= loop;
1521 	}
1522 skip_tx:
1523 	dpaa2_qp->tx_vq.tx_pkts += num_tx;
1524 	dpaa2_qp->tx_vq.err_pkts += nb_ops;
1525 	return num_tx;
1526 }
1527 
1528 #ifdef RTE_LIB_SECURITY
1529 static inline struct rte_crypto_op *
1530 sec_simple_fd_to_mbuf(const struct qbman_fd *fd)
1531 {
1532 	struct rte_crypto_op *op;
1533 	uint16_t len = DPAA2_GET_FD_LEN(fd);
1534 	int16_t diff = 0;
1535 	dpaa2_sec_session *sess_priv __rte_unused;
1536 
1537 	struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
1538 		DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
1539 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
1540 
1541 	diff = len - mbuf->pkt_len;
1542 	mbuf->pkt_len += diff;
1543 	mbuf->data_len += diff;
1544 	op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova;
1545 	mbuf->buf_iova = op->sym->aead.digest.phys_addr;
1546 	op->sym->aead.digest.phys_addr = 0L;
1547 
1548 	sess_priv = (dpaa2_sec_session *)get_sec_session_private_data(
1549 				op->sym->sec_session);
1550 	if (sess_priv->dir == DIR_ENC)
1551 		mbuf->data_off += SEC_FLC_DHR_OUTBOUND;
1552 	else
1553 		mbuf->data_off += SEC_FLC_DHR_INBOUND;
1554 
1555 	return op;
1556 }
1557 #endif
1558 
1559 static inline struct rte_crypto_op *
1560 sec_fd_to_mbuf(const struct qbman_fd *fd)
1561 {
1562 	struct qbman_fle *fle;
1563 	struct rte_crypto_op *op;
1564 	struct ctxt_priv *priv;
1565 	struct rte_mbuf *dst, *src;
1566 
1567 #ifdef RTE_LIB_SECURITY
1568 	if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single)
1569 		return sec_simple_fd_to_mbuf(fd);
1570 #endif
1571 	fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
1572 
1573 	DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
1574 			   fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
1575 
1576 	/* we are using the first FLE entry to store Mbuf.
1577 	 * Currently we donot know which FLE has the mbuf stored.
1578 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
1579 	 * to get the MBUF Addr from the previous FLE.
1580 	 * We can have a better approach to use the inline Mbuf
1581 	 */
1582 
1583 	if (unlikely(DPAA2_GET_FD_IVP(fd))) {
1584 		/* TODO complete it. */
1585 		DPAA2_SEC_ERR("error: non inline buffer");
1586 		return NULL;
1587 	}
1588 	op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
1589 
1590 	/* Prefeth op */
1591 	src = op->sym->m_src;
1592 	rte_prefetch0(src);
1593 
1594 	if (op->sym->m_dst) {
1595 		dst = op->sym->m_dst;
1596 		rte_prefetch0(dst);
1597 	} else
1598 		dst = src;
1599 
1600 #ifdef RTE_LIB_SECURITY
1601 	if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
1602 		uint16_t len = DPAA2_GET_FD_LEN(fd);
1603 		dst->pkt_len = len;
1604 		while (dst->next != NULL) {
1605 			len -= dst->data_len;
1606 			dst = dst->next;
1607 		}
1608 		dst->data_len = len;
1609 	}
1610 #endif
1611 	DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p,"
1612 		" fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n",
1613 		(void *)dst,
1614 		dst->buf_addr,
1615 		DPAA2_GET_FD_ADDR(fd),
1616 		DPAA2_GET_FD_BPID(fd),
1617 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
1618 		DPAA2_GET_FD_OFFSET(fd),
1619 		DPAA2_GET_FD_LEN(fd));
1620 
1621 	/* free the fle memory */
1622 	if (likely(rte_pktmbuf_is_contiguous(src))) {
1623 		priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1);
1624 		rte_mempool_put(priv->fle_pool, (void *)(fle-1));
1625 	} else
1626 		rte_free((void *)(fle-1));
1627 
1628 	return op;
1629 }
1630 
1631 static uint16_t
1632 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1633 			uint16_t nb_ops)
1634 {
1635 	/* Function is responsible to receive frames for a given device and VQ*/
1636 	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1637 	struct qbman_result *dq_storage;
1638 	uint32_t fqid = dpaa2_qp->rx_vq.fqid;
1639 	int ret, num_rx = 0;
1640 	uint8_t is_last = 0, status;
1641 	struct qbman_swp *swp;
1642 	const struct qbman_fd *fd;
1643 	struct qbman_pull_desc pulldesc;
1644 
1645 	if (!DPAA2_PER_LCORE_DPIO) {
1646 		ret = dpaa2_affine_qbman_swp();
1647 		if (ret) {
1648 			DPAA2_SEC_ERR(
1649 				"Failed to allocate IO portal, tid: %d\n",
1650 				rte_gettid());
1651 			return 0;
1652 		}
1653 	}
1654 	swp = DPAA2_PER_LCORE_PORTAL;
1655 	dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
1656 
1657 	qbman_pull_desc_clear(&pulldesc);
1658 	qbman_pull_desc_set_numframes(&pulldesc,
1659 				      (nb_ops > dpaa2_dqrr_size) ?
1660 				      dpaa2_dqrr_size : nb_ops);
1661 	qbman_pull_desc_set_fq(&pulldesc, fqid);
1662 	qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1663 				    (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage),
1664 				    1);
1665 
1666 	/*Issue a volatile dequeue command. */
1667 	while (1) {
1668 		if (qbman_swp_pull(swp, &pulldesc)) {
1669 			DPAA2_SEC_WARN(
1670 				"SEC VDQ command is not issued : QBMAN busy");
1671 			/* Portal was busy, try again */
1672 			continue;
1673 		}
1674 		break;
1675 	};
1676 
1677 	/* Receive the packets till Last Dequeue entry is found with
1678 	 * respect to the above issues PULL command.
1679 	 */
1680 	while (!is_last) {
1681 		/* Check if the previous issued command is completed.
1682 		 * Also seems like the SWP is shared between the Ethernet Driver
1683 		 * and the SEC driver.
1684 		 */
1685 		while (!qbman_check_command_complete(dq_storage))
1686 			;
1687 
1688 		/* Loop until the dq_storage is updated with
1689 		 * new token by QBMAN
1690 		 */
1691 		while (!qbman_check_new_result(dq_storage))
1692 			;
1693 		/* Check whether Last Pull command is Expired and
1694 		 * setting Condition for Loop termination
1695 		 */
1696 		if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1697 			is_last = 1;
1698 			/* Check for valid frame. */
1699 			status = (uint8_t)qbman_result_DQ_flags(dq_storage);
1700 			if (unlikely(
1701 				(status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
1702 				DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
1703 				continue;
1704 			}
1705 		}
1706 
1707 		fd = qbman_result_DQ_fd(dq_storage);
1708 		ops[num_rx] = sec_fd_to_mbuf(fd);
1709 
1710 		if (unlikely(fd->simple.frc)) {
1711 			/* TODO Parse SEC errors */
1712 			DPAA2_SEC_DP_ERR("SEC returned Error - %x\n",
1713 				      fd->simple.frc);
1714 			dpaa2_qp->rx_vq.err_pkts += 1;
1715 			ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
1716 		} else {
1717 			ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1718 		}
1719 
1720 		num_rx++;
1721 		dq_storage++;
1722 	} /* End of Packet Rx loop */
1723 
1724 	dpaa2_qp->rx_vq.rx_pkts += num_rx;
1725 
1726 	DPAA2_SEC_DP_DEBUG("SEC RX pkts %d err pkts %" PRIu64 "\n", num_rx,
1727 				dpaa2_qp->rx_vq.err_pkts);
1728 	/*Return the total number of packets received to DPAA2 app*/
1729 	return num_rx;
1730 }
1731 
1732 /** Release queue pair */
1733 static int
1734 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
1735 {
1736 	struct dpaa2_sec_qp *qp =
1737 		(struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id];
1738 
1739 	PMD_INIT_FUNC_TRACE();
1740 
1741 	if (qp->rx_vq.q_storage) {
1742 		dpaa2_free_dq_storage(qp->rx_vq.q_storage);
1743 		rte_free(qp->rx_vq.q_storage);
1744 	}
1745 	rte_free(qp);
1746 
1747 	dev->data->queue_pairs[queue_pair_id] = NULL;
1748 
1749 	return 0;
1750 }
1751 
1752 /** Setup a queue pair */
1753 static int
1754 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1755 		__rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1756 		__rte_unused int socket_id)
1757 {
1758 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1759 	struct dpaa2_sec_qp *qp;
1760 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1761 	struct dpseci_rx_queue_cfg cfg;
1762 	int32_t retcode;
1763 
1764 	PMD_INIT_FUNC_TRACE();
1765 
1766 	/* If qp is already in use free ring memory and qp metadata. */
1767 	if (dev->data->queue_pairs[qp_id] != NULL) {
1768 		DPAA2_SEC_INFO("QP already setup");
1769 		return 0;
1770 	}
1771 
1772 	DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p",
1773 		    dev, qp_id, qp_conf);
1774 
1775 	memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
1776 
1777 	qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp),
1778 			RTE_CACHE_LINE_SIZE);
1779 	if (!qp) {
1780 		DPAA2_SEC_ERR("malloc failed for rx/tx queues");
1781 		return -ENOMEM;
1782 	}
1783 
1784 	qp->rx_vq.crypto_data = dev->data;
1785 	qp->tx_vq.crypto_data = dev->data;
1786 	qp->rx_vq.q_storage = rte_malloc("sec dq storage",
1787 		sizeof(struct queue_storage_info_t),
1788 		RTE_CACHE_LINE_SIZE);
1789 	if (!qp->rx_vq.q_storage) {
1790 		DPAA2_SEC_ERR("malloc failed for q_storage");
1791 		return -ENOMEM;
1792 	}
1793 	memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
1794 
1795 	if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
1796 		DPAA2_SEC_ERR("Unable to allocate dequeue storage");
1797 		return -ENOMEM;
1798 	}
1799 
1800 	dev->data->queue_pairs[qp_id] = qp;
1801 
1802 	cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
1803 	cfg.user_ctx = (size_t)(&qp->rx_vq);
1804 	retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
1805 				      qp_id, &cfg);
1806 	return retcode;
1807 }
1808 
1809 /** Returns the size of the aesni gcm session structure */
1810 static unsigned int
1811 dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1812 {
1813 	PMD_INIT_FUNC_TRACE();
1814 
1815 	return sizeof(dpaa2_sec_session);
1816 }
1817 
1818 static int
1819 dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
1820 		      struct rte_crypto_sym_xform *xform,
1821 		      dpaa2_sec_session *session)
1822 {
1823 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1824 	struct alginfo cipherdata;
1825 	int bufsize, ret = 0;
1826 	struct ctxt_priv *priv;
1827 	struct sec_flow_context *flc;
1828 
1829 	PMD_INIT_FUNC_TRACE();
1830 
1831 	/* For SEC CIPHER only one descriptor is required. */
1832 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1833 			sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1834 			RTE_CACHE_LINE_SIZE);
1835 	if (priv == NULL) {
1836 		DPAA2_SEC_ERR("No Memory for priv CTXT");
1837 		return -ENOMEM;
1838 	}
1839 
1840 	priv->fle_pool = dev_priv->fle_pool;
1841 
1842 	flc = &priv->flc_desc[0].flc;
1843 
1844 	session->ctxt_type = DPAA2_SEC_CIPHER;
1845 	session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1846 			RTE_CACHE_LINE_SIZE);
1847 	if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1848 		DPAA2_SEC_ERR("No Memory for cipher key");
1849 		rte_free(priv);
1850 		return -ENOMEM;
1851 	}
1852 	session->cipher_key.length = xform->cipher.key.length;
1853 
1854 	memcpy(session->cipher_key.data, xform->cipher.key.data,
1855 	       xform->cipher.key.length);
1856 	cipherdata.key = (size_t)session->cipher_key.data;
1857 	cipherdata.keylen = session->cipher_key.length;
1858 	cipherdata.key_enc_flags = 0;
1859 	cipherdata.key_type = RTA_DATA_IMM;
1860 
1861 	/* Set IV parameters */
1862 	session->iv.offset = xform->cipher.iv.offset;
1863 	session->iv.length = xform->cipher.iv.length;
1864 	session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1865 				DIR_ENC : DIR_DEC;
1866 
1867 	switch (xform->cipher.algo) {
1868 	case RTE_CRYPTO_CIPHER_AES_CBC:
1869 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
1870 		cipherdata.algmode = OP_ALG_AAI_CBC;
1871 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1872 		bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1873 						SHR_NEVER, &cipherdata,
1874 						session->iv.length,
1875 						session->dir);
1876 		break;
1877 	case RTE_CRYPTO_CIPHER_3DES_CBC:
1878 		cipherdata.algtype = OP_ALG_ALGSEL_3DES;
1879 		cipherdata.algmode = OP_ALG_AAI_CBC;
1880 		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1881 		bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1882 						SHR_NEVER, &cipherdata,
1883 						session->iv.length,
1884 						session->dir);
1885 		break;
1886 	case RTE_CRYPTO_CIPHER_DES_CBC:
1887 		cipherdata.algtype = OP_ALG_ALGSEL_DES;
1888 		cipherdata.algmode = OP_ALG_AAI_CBC;
1889 		session->cipher_alg = RTE_CRYPTO_CIPHER_DES_CBC;
1890 		bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1891 						SHR_NEVER, &cipherdata,
1892 						session->iv.length,
1893 						session->dir);
1894 		break;
1895 	case RTE_CRYPTO_CIPHER_AES_CTR:
1896 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
1897 		cipherdata.algmode = OP_ALG_AAI_CTR;
1898 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
1899 		bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1900 						SHR_NEVER, &cipherdata,
1901 						session->iv.length,
1902 						session->dir);
1903 		break;
1904 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1905 		cipherdata.algtype = OP_ALG_ALGSEL_SNOW_F8;
1906 		session->cipher_alg = RTE_CRYPTO_CIPHER_SNOW3G_UEA2;
1907 		bufsize = cnstr_shdsc_snow_f8(priv->flc_desc[0].desc, 1, 0,
1908 					      &cipherdata,
1909 					      session->dir);
1910 		break;
1911 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
1912 		cipherdata.algtype = OP_ALG_ALGSEL_ZUCE;
1913 		session->cipher_alg = RTE_CRYPTO_CIPHER_ZUC_EEA3;
1914 		bufsize = cnstr_shdsc_zuce(priv->flc_desc[0].desc, 1, 0,
1915 					      &cipherdata,
1916 					      session->dir);
1917 		break;
1918 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
1919 	case RTE_CRYPTO_CIPHER_AES_F8:
1920 	case RTE_CRYPTO_CIPHER_AES_ECB:
1921 	case RTE_CRYPTO_CIPHER_3DES_ECB:
1922 	case RTE_CRYPTO_CIPHER_3DES_CTR:
1923 	case RTE_CRYPTO_CIPHER_AES_XTS:
1924 	case RTE_CRYPTO_CIPHER_ARC4:
1925 	case RTE_CRYPTO_CIPHER_NULL:
1926 		DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
1927 			xform->cipher.algo);
1928 		ret = -ENOTSUP;
1929 		goto error_out;
1930 	default:
1931 		DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
1932 			xform->cipher.algo);
1933 		ret = -ENOTSUP;
1934 		goto error_out;
1935 	}
1936 
1937 	if (bufsize < 0) {
1938 		DPAA2_SEC_ERR("Crypto: Descriptor build failed");
1939 		ret = -EINVAL;
1940 		goto error_out;
1941 	}
1942 
1943 	flc->word1_sdl = (uint8_t)bufsize;
1944 	session->ctxt = priv;
1945 
1946 #ifdef CAAM_DESC_DEBUG
1947 	int i;
1948 	for (i = 0; i < bufsize; i++)
1949 		DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]);
1950 #endif
1951 	return ret;
1952 
1953 error_out:
1954 	rte_free(session->cipher_key.data);
1955 	rte_free(priv);
1956 	return ret;
1957 }
1958 
1959 static int
1960 dpaa2_sec_auth_init(struct rte_cryptodev *dev,
1961 		    struct rte_crypto_sym_xform *xform,
1962 		    dpaa2_sec_session *session)
1963 {
1964 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1965 	struct alginfo authdata;
1966 	int bufsize, ret = 0;
1967 	struct ctxt_priv *priv;
1968 	struct sec_flow_context *flc;
1969 
1970 	PMD_INIT_FUNC_TRACE();
1971 
1972 	/* For SEC AUTH three descriptors are required for various stages */
1973 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1974 			sizeof(struct ctxt_priv) + 3 *
1975 			sizeof(struct sec_flc_desc),
1976 			RTE_CACHE_LINE_SIZE);
1977 	if (priv == NULL) {
1978 		DPAA2_SEC_ERR("No Memory for priv CTXT");
1979 		return -ENOMEM;
1980 	}
1981 
1982 	priv->fle_pool = dev_priv->fle_pool;
1983 	flc = &priv->flc_desc[DESC_INITFINAL].flc;
1984 
1985 	session->ctxt_type = DPAA2_SEC_AUTH;
1986 	session->auth_key.length = xform->auth.key.length;
1987 	if (xform->auth.key.length) {
1988 		session->auth_key.data = rte_zmalloc(NULL,
1989 			xform->auth.key.length,
1990 			RTE_CACHE_LINE_SIZE);
1991 		if (session->auth_key.data == NULL) {
1992 			DPAA2_SEC_ERR("Unable to allocate memory for auth key");
1993 			rte_free(priv);
1994 			return -ENOMEM;
1995 		}
1996 		memcpy(session->auth_key.data, xform->auth.key.data,
1997 		       xform->auth.key.length);
1998 		authdata.key = (size_t)session->auth_key.data;
1999 		authdata.key_enc_flags = 0;
2000 		authdata.key_type = RTA_DATA_IMM;
2001 	}
2002 	authdata.keylen = session->auth_key.length;
2003 
2004 	session->digest_length = xform->auth.digest_length;
2005 	session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2006 				DIR_ENC : DIR_DEC;
2007 
2008 	switch (xform->auth.algo) {
2009 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
2010 		authdata.algtype = OP_ALG_ALGSEL_SHA1;
2011 		authdata.algmode = OP_ALG_AAI_HMAC;
2012 		session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
2013 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2014 					   1, 0, SHR_NEVER, &authdata,
2015 					   !session->dir,
2016 					   session->digest_length);
2017 		break;
2018 	case RTE_CRYPTO_AUTH_MD5_HMAC:
2019 		authdata.algtype = OP_ALG_ALGSEL_MD5;
2020 		authdata.algmode = OP_ALG_AAI_HMAC;
2021 		session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
2022 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2023 					   1, 0, SHR_NEVER, &authdata,
2024 					   !session->dir,
2025 					   session->digest_length);
2026 		break;
2027 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2028 		authdata.algtype = OP_ALG_ALGSEL_SHA256;
2029 		authdata.algmode = OP_ALG_AAI_HMAC;
2030 		session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2031 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2032 					   1, 0, SHR_NEVER, &authdata,
2033 					   !session->dir,
2034 					   session->digest_length);
2035 		break;
2036 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2037 		authdata.algtype = OP_ALG_ALGSEL_SHA384;
2038 		authdata.algmode = OP_ALG_AAI_HMAC;
2039 		session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2040 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2041 					   1, 0, SHR_NEVER, &authdata,
2042 					   !session->dir,
2043 					   session->digest_length);
2044 		break;
2045 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2046 		authdata.algtype = OP_ALG_ALGSEL_SHA512;
2047 		authdata.algmode = OP_ALG_AAI_HMAC;
2048 		session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2049 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2050 					   1, 0, SHR_NEVER, &authdata,
2051 					   !session->dir,
2052 					   session->digest_length);
2053 		break;
2054 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2055 		authdata.algtype = OP_ALG_ALGSEL_SHA224;
2056 		authdata.algmode = OP_ALG_AAI_HMAC;
2057 		session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
2058 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2059 					   1, 0, SHR_NEVER, &authdata,
2060 					   !session->dir,
2061 					   session->digest_length);
2062 		break;
2063 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2064 		authdata.algtype = OP_ALG_ALGSEL_SNOW_F9;
2065 		authdata.algmode = OP_ALG_AAI_F9;
2066 		session->auth_alg = RTE_CRYPTO_AUTH_SNOW3G_UIA2;
2067 		session->iv.offset = xform->auth.iv.offset;
2068 		session->iv.length = xform->auth.iv.length;
2069 		bufsize = cnstr_shdsc_snow_f9(priv->flc_desc[DESC_INITFINAL].desc,
2070 					      1, 0, &authdata,
2071 					      !session->dir,
2072 					      session->digest_length);
2073 		break;
2074 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
2075 		authdata.algtype = OP_ALG_ALGSEL_ZUCA;
2076 		authdata.algmode = OP_ALG_AAI_F9;
2077 		session->auth_alg = RTE_CRYPTO_AUTH_ZUC_EIA3;
2078 		session->iv.offset = xform->auth.iv.offset;
2079 		session->iv.length = xform->auth.iv.length;
2080 		bufsize = cnstr_shdsc_zuca(priv->flc_desc[DESC_INITFINAL].desc,
2081 					   1, 0, &authdata,
2082 					   !session->dir,
2083 					   session->digest_length);
2084 		break;
2085 	case RTE_CRYPTO_AUTH_SHA1:
2086 		authdata.algtype = OP_ALG_ALGSEL_SHA1;
2087 		authdata.algmode = OP_ALG_AAI_HASH;
2088 		session->auth_alg = RTE_CRYPTO_AUTH_SHA1;
2089 		bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2090 					   1, 0, SHR_NEVER, &authdata,
2091 					   !session->dir,
2092 					   session->digest_length);
2093 		break;
2094 	case RTE_CRYPTO_AUTH_MD5:
2095 		authdata.algtype = OP_ALG_ALGSEL_MD5;
2096 		authdata.algmode = OP_ALG_AAI_HASH;
2097 		session->auth_alg = RTE_CRYPTO_AUTH_MD5;
2098 		bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2099 					   1, 0, SHR_NEVER, &authdata,
2100 					   !session->dir,
2101 					   session->digest_length);
2102 		break;
2103 	case RTE_CRYPTO_AUTH_SHA256:
2104 		authdata.algtype = OP_ALG_ALGSEL_SHA256;
2105 		authdata.algmode = OP_ALG_AAI_HASH;
2106 		session->auth_alg = RTE_CRYPTO_AUTH_SHA256;
2107 		bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2108 					   1, 0, SHR_NEVER, &authdata,
2109 					   !session->dir,
2110 					   session->digest_length);
2111 		break;
2112 	case RTE_CRYPTO_AUTH_SHA384:
2113 		authdata.algtype = OP_ALG_ALGSEL_SHA384;
2114 		authdata.algmode = OP_ALG_AAI_HASH;
2115 		session->auth_alg = RTE_CRYPTO_AUTH_SHA384;
2116 		bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2117 					   1, 0, SHR_NEVER, &authdata,
2118 					   !session->dir,
2119 					   session->digest_length);
2120 		break;
2121 	case RTE_CRYPTO_AUTH_SHA512:
2122 		authdata.algtype = OP_ALG_ALGSEL_SHA512;
2123 		authdata.algmode = OP_ALG_AAI_HASH;
2124 		session->auth_alg = RTE_CRYPTO_AUTH_SHA512;
2125 		bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2126 					   1, 0, SHR_NEVER, &authdata,
2127 					   !session->dir,
2128 					   session->digest_length);
2129 		break;
2130 	case RTE_CRYPTO_AUTH_SHA224:
2131 		authdata.algtype = OP_ALG_ALGSEL_SHA224;
2132 		authdata.algmode = OP_ALG_AAI_HASH;
2133 		session->auth_alg = RTE_CRYPTO_AUTH_SHA224;
2134 		bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2135 					   1, 0, SHR_NEVER, &authdata,
2136 					   !session->dir,
2137 					   session->digest_length);
2138 		break;
2139 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2140 		authdata.algtype = OP_ALG_ALGSEL_AES;
2141 		authdata.algmode = OP_ALG_AAI_XCBC_MAC;
2142 		session->auth_alg = RTE_CRYPTO_AUTH_AES_XCBC_MAC;
2143 		bufsize = cnstr_shdsc_aes_mac(
2144 					priv->flc_desc[DESC_INITFINAL].desc,
2145 					1, 0, SHR_NEVER, &authdata,
2146 					!session->dir,
2147 					session->digest_length);
2148 		break;
2149 	case RTE_CRYPTO_AUTH_AES_CMAC:
2150 		authdata.algtype = OP_ALG_ALGSEL_AES;
2151 		authdata.algmode = OP_ALG_AAI_CMAC;
2152 		session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
2153 		bufsize = cnstr_shdsc_aes_mac(
2154 					   priv->flc_desc[DESC_INITFINAL].desc,
2155 					   1, 0, SHR_NEVER, &authdata,
2156 					   !session->dir,
2157 					   session->digest_length);
2158 		break;
2159 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2160 	case RTE_CRYPTO_AUTH_AES_GMAC:
2161 	case RTE_CRYPTO_AUTH_KASUMI_F9:
2162 	case RTE_CRYPTO_AUTH_NULL:
2163 		DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un",
2164 			      xform->auth.algo);
2165 		ret = -ENOTSUP;
2166 		goto error_out;
2167 	default:
2168 		DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2169 			      xform->auth.algo);
2170 		ret = -ENOTSUP;
2171 		goto error_out;
2172 	}
2173 
2174 	if (bufsize < 0) {
2175 		DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2176 		ret = -EINVAL;
2177 		goto error_out;
2178 	}
2179 
2180 	flc->word1_sdl = (uint8_t)bufsize;
2181 	session->ctxt = priv;
2182 #ifdef CAAM_DESC_DEBUG
2183 	int i;
2184 	for (i = 0; i < bufsize; i++)
2185 		DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2186 				i, priv->flc_desc[DESC_INITFINAL].desc[i]);
2187 #endif
2188 
2189 	return ret;
2190 
2191 error_out:
2192 	rte_free(session->auth_key.data);
2193 	rte_free(priv);
2194 	return ret;
2195 }
2196 
2197 static int
2198 dpaa2_sec_aead_init(struct rte_cryptodev *dev,
2199 		    struct rte_crypto_sym_xform *xform,
2200 		    dpaa2_sec_session *session)
2201 {
2202 	struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
2203 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2204 	struct alginfo aeaddata;
2205 	int bufsize;
2206 	struct ctxt_priv *priv;
2207 	struct sec_flow_context *flc;
2208 	struct rte_crypto_aead_xform *aead_xform = &xform->aead;
2209 	int err, ret = 0;
2210 
2211 	PMD_INIT_FUNC_TRACE();
2212 
2213 	/* Set IV parameters */
2214 	session->iv.offset = aead_xform->iv.offset;
2215 	session->iv.length = aead_xform->iv.length;
2216 	session->ctxt_type = DPAA2_SEC_AEAD;
2217 
2218 	/* For SEC AEAD only one descriptor is required */
2219 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2220 			sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
2221 			RTE_CACHE_LINE_SIZE);
2222 	if (priv == NULL) {
2223 		DPAA2_SEC_ERR("No Memory for priv CTXT");
2224 		return -ENOMEM;
2225 	}
2226 
2227 	priv->fle_pool = dev_priv->fle_pool;
2228 	flc = &priv->flc_desc[0].flc;
2229 
2230 	session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2231 					       RTE_CACHE_LINE_SIZE);
2232 	if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2233 		DPAA2_SEC_ERR("No Memory for aead key");
2234 		rte_free(priv);
2235 		return -ENOMEM;
2236 	}
2237 	memcpy(session->aead_key.data, aead_xform->key.data,
2238 	       aead_xform->key.length);
2239 
2240 	session->digest_length = aead_xform->digest_length;
2241 	session->aead_key.length = aead_xform->key.length;
2242 	ctxt->auth_only_len = aead_xform->aad_length;
2243 
2244 	aeaddata.key = (size_t)session->aead_key.data;
2245 	aeaddata.keylen = session->aead_key.length;
2246 	aeaddata.key_enc_flags = 0;
2247 	aeaddata.key_type = RTA_DATA_IMM;
2248 
2249 	switch (aead_xform->algo) {
2250 	case RTE_CRYPTO_AEAD_AES_GCM:
2251 		aeaddata.algtype = OP_ALG_ALGSEL_AES;
2252 		aeaddata.algmode = OP_ALG_AAI_GCM;
2253 		session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2254 		break;
2255 	case RTE_CRYPTO_AEAD_AES_CCM:
2256 		DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u",
2257 			      aead_xform->algo);
2258 		ret = -ENOTSUP;
2259 		goto error_out;
2260 	default:
2261 		DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
2262 			      aead_xform->algo);
2263 		ret = -ENOTSUP;
2264 		goto error_out;
2265 	}
2266 	session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2267 				DIR_ENC : DIR_DEC;
2268 
2269 	priv->flc_desc[0].desc[0] = aeaddata.keylen;
2270 	err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2271 			       DESC_JOB_IO_LEN,
2272 			       (unsigned int *)priv->flc_desc[0].desc,
2273 			       &priv->flc_desc[0].desc[1], 1);
2274 
2275 	if (err < 0) {
2276 		DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2277 		ret = -EINVAL;
2278 		goto error_out;
2279 	}
2280 	if (priv->flc_desc[0].desc[1] & 1) {
2281 		aeaddata.key_type = RTA_DATA_IMM;
2282 	} else {
2283 		aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key);
2284 		aeaddata.key_type = RTA_DATA_PTR;
2285 	}
2286 	priv->flc_desc[0].desc[0] = 0;
2287 	priv->flc_desc[0].desc[1] = 0;
2288 
2289 	if (session->dir == DIR_ENC)
2290 		bufsize = cnstr_shdsc_gcm_encap(
2291 				priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
2292 				&aeaddata, session->iv.length,
2293 				session->digest_length);
2294 	else
2295 		bufsize = cnstr_shdsc_gcm_decap(
2296 				priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
2297 				&aeaddata, session->iv.length,
2298 				session->digest_length);
2299 	if (bufsize < 0) {
2300 		DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2301 		ret = -EINVAL;
2302 		goto error_out;
2303 	}
2304 
2305 	flc->word1_sdl = (uint8_t)bufsize;
2306 	session->ctxt = priv;
2307 #ifdef CAAM_DESC_DEBUG
2308 	int i;
2309 	for (i = 0; i < bufsize; i++)
2310 		DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n",
2311 			    i, priv->flc_desc[0].desc[i]);
2312 #endif
2313 	return ret;
2314 
2315 error_out:
2316 	rte_free(session->aead_key.data);
2317 	rte_free(priv);
2318 	return ret;
2319 }
2320 
2321 
2322 static int
2323 dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
2324 		    struct rte_crypto_sym_xform *xform,
2325 		    dpaa2_sec_session *session)
2326 {
2327 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2328 	struct alginfo authdata, cipherdata;
2329 	int bufsize;
2330 	struct ctxt_priv *priv;
2331 	struct sec_flow_context *flc;
2332 	struct rte_crypto_cipher_xform *cipher_xform;
2333 	struct rte_crypto_auth_xform *auth_xform;
2334 	int err, ret = 0;
2335 
2336 	PMD_INIT_FUNC_TRACE();
2337 
2338 	if (session->ext_params.aead_ctxt.auth_cipher_text) {
2339 		cipher_xform = &xform->cipher;
2340 		auth_xform = &xform->next->auth;
2341 		session->ctxt_type =
2342 			(cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2343 			DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER;
2344 	} else {
2345 		cipher_xform = &xform->next->cipher;
2346 		auth_xform = &xform->auth;
2347 		session->ctxt_type =
2348 			(cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2349 			DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH;
2350 	}
2351 
2352 	/* Set IV parameters */
2353 	session->iv.offset = cipher_xform->iv.offset;
2354 	session->iv.length = cipher_xform->iv.length;
2355 
2356 	/* For SEC AEAD only one descriptor is required */
2357 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2358 			sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
2359 			RTE_CACHE_LINE_SIZE);
2360 	if (priv == NULL) {
2361 		DPAA2_SEC_ERR("No Memory for priv CTXT");
2362 		return -ENOMEM;
2363 	}
2364 
2365 	priv->fle_pool = dev_priv->fle_pool;
2366 	flc = &priv->flc_desc[0].flc;
2367 
2368 	session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2369 					       RTE_CACHE_LINE_SIZE);
2370 	if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2371 		DPAA2_SEC_ERR("No Memory for cipher key");
2372 		rte_free(priv);
2373 		return -ENOMEM;
2374 	}
2375 	session->cipher_key.length = cipher_xform->key.length;
2376 	session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2377 					     RTE_CACHE_LINE_SIZE);
2378 	if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2379 		DPAA2_SEC_ERR("No Memory for auth key");
2380 		rte_free(session->cipher_key.data);
2381 		rte_free(priv);
2382 		return -ENOMEM;
2383 	}
2384 	session->auth_key.length = auth_xform->key.length;
2385 	memcpy(session->cipher_key.data, cipher_xform->key.data,
2386 	       cipher_xform->key.length);
2387 	memcpy(session->auth_key.data, auth_xform->key.data,
2388 	       auth_xform->key.length);
2389 
2390 	authdata.key = (size_t)session->auth_key.data;
2391 	authdata.keylen = session->auth_key.length;
2392 	authdata.key_enc_flags = 0;
2393 	authdata.key_type = RTA_DATA_IMM;
2394 
2395 	session->digest_length = auth_xform->digest_length;
2396 
2397 	switch (auth_xform->algo) {
2398 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
2399 		authdata.algtype = OP_ALG_ALGSEL_SHA1;
2400 		authdata.algmode = OP_ALG_AAI_HMAC;
2401 		session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
2402 		break;
2403 	case RTE_CRYPTO_AUTH_MD5_HMAC:
2404 		authdata.algtype = OP_ALG_ALGSEL_MD5;
2405 		authdata.algmode = OP_ALG_AAI_HMAC;
2406 		session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
2407 		break;
2408 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2409 		authdata.algtype = OP_ALG_ALGSEL_SHA224;
2410 		authdata.algmode = OP_ALG_AAI_HMAC;
2411 		session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
2412 		break;
2413 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2414 		authdata.algtype = OP_ALG_ALGSEL_SHA256;
2415 		authdata.algmode = OP_ALG_AAI_HMAC;
2416 		session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2417 		break;
2418 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2419 		authdata.algtype = OP_ALG_ALGSEL_SHA384;
2420 		authdata.algmode = OP_ALG_AAI_HMAC;
2421 		session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2422 		break;
2423 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2424 		authdata.algtype = OP_ALG_ALGSEL_SHA512;
2425 		authdata.algmode = OP_ALG_AAI_HMAC;
2426 		session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2427 		break;
2428 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2429 		authdata.algtype = OP_ALG_ALGSEL_AES;
2430 		authdata.algmode = OP_ALG_AAI_XCBC_MAC;
2431 		session->auth_alg = RTE_CRYPTO_AUTH_AES_XCBC_MAC;
2432 		break;
2433 	case RTE_CRYPTO_AUTH_AES_CMAC:
2434 		authdata.algtype = OP_ALG_ALGSEL_AES;
2435 		authdata.algmode = OP_ALG_AAI_CMAC;
2436 		session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
2437 		break;
2438 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2439 	case RTE_CRYPTO_AUTH_AES_GMAC:
2440 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2441 	case RTE_CRYPTO_AUTH_NULL:
2442 	case RTE_CRYPTO_AUTH_SHA1:
2443 	case RTE_CRYPTO_AUTH_SHA256:
2444 	case RTE_CRYPTO_AUTH_SHA512:
2445 	case RTE_CRYPTO_AUTH_SHA224:
2446 	case RTE_CRYPTO_AUTH_SHA384:
2447 	case RTE_CRYPTO_AUTH_MD5:
2448 	case RTE_CRYPTO_AUTH_KASUMI_F9:
2449 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
2450 		DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2451 			      auth_xform->algo);
2452 		ret = -ENOTSUP;
2453 		goto error_out;
2454 	default:
2455 		DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2456 			      auth_xform->algo);
2457 		ret = -ENOTSUP;
2458 		goto error_out;
2459 	}
2460 	cipherdata.key = (size_t)session->cipher_key.data;
2461 	cipherdata.keylen = session->cipher_key.length;
2462 	cipherdata.key_enc_flags = 0;
2463 	cipherdata.key_type = RTA_DATA_IMM;
2464 
2465 	switch (cipher_xform->algo) {
2466 	case RTE_CRYPTO_CIPHER_AES_CBC:
2467 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
2468 		cipherdata.algmode = OP_ALG_AAI_CBC;
2469 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2470 		break;
2471 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2472 		cipherdata.algtype = OP_ALG_ALGSEL_3DES;
2473 		cipherdata.algmode = OP_ALG_AAI_CBC;
2474 		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2475 		break;
2476 	case RTE_CRYPTO_CIPHER_DES_CBC:
2477 		cipherdata.algtype = OP_ALG_ALGSEL_DES;
2478 		cipherdata.algmode = OP_ALG_AAI_CBC;
2479 		session->cipher_alg = RTE_CRYPTO_CIPHER_DES_CBC;
2480 		break;
2481 	case RTE_CRYPTO_CIPHER_AES_CTR:
2482 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
2483 		cipherdata.algmode = OP_ALG_AAI_CTR;
2484 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2485 		break;
2486 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2487 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2488 	case RTE_CRYPTO_CIPHER_NULL:
2489 	case RTE_CRYPTO_CIPHER_3DES_ECB:
2490 	case RTE_CRYPTO_CIPHER_3DES_CTR:
2491 	case RTE_CRYPTO_CIPHER_AES_ECB:
2492 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
2493 		DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2494 			      cipher_xform->algo);
2495 		ret = -ENOTSUP;
2496 		goto error_out;
2497 	default:
2498 		DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2499 			      cipher_xform->algo);
2500 		ret = -ENOTSUP;
2501 		goto error_out;
2502 	}
2503 	session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2504 				DIR_ENC : DIR_DEC;
2505 
2506 	priv->flc_desc[0].desc[0] = cipherdata.keylen;
2507 	priv->flc_desc[0].desc[1] = authdata.keylen;
2508 	err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2509 			       DESC_JOB_IO_LEN,
2510 			       (unsigned int *)priv->flc_desc[0].desc,
2511 			       &priv->flc_desc[0].desc[2], 2);
2512 
2513 	if (err < 0) {
2514 		DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2515 		ret = -EINVAL;
2516 		goto error_out;
2517 	}
2518 	if (priv->flc_desc[0].desc[2] & 1) {
2519 		cipherdata.key_type = RTA_DATA_IMM;
2520 	} else {
2521 		cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
2522 		cipherdata.key_type = RTA_DATA_PTR;
2523 	}
2524 	if (priv->flc_desc[0].desc[2] & (1 << 1)) {
2525 		authdata.key_type = RTA_DATA_IMM;
2526 	} else {
2527 		authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
2528 		authdata.key_type = RTA_DATA_PTR;
2529 	}
2530 	priv->flc_desc[0].desc[0] = 0;
2531 	priv->flc_desc[0].desc[1] = 0;
2532 	priv->flc_desc[0].desc[2] = 0;
2533 
2534 	if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
2535 		bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
2536 					      0, SHR_SERIAL,
2537 					      &cipherdata, &authdata,
2538 					      session->iv.length,
2539 					      session->digest_length,
2540 					      session->dir);
2541 		if (bufsize < 0) {
2542 			DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2543 			ret = -EINVAL;
2544 			goto error_out;
2545 		}
2546 	} else {
2547 		DPAA2_SEC_ERR("Hash before cipher not supported");
2548 		ret = -ENOTSUP;
2549 		goto error_out;
2550 	}
2551 
2552 	flc->word1_sdl = (uint8_t)bufsize;
2553 	session->ctxt = priv;
2554 #ifdef CAAM_DESC_DEBUG
2555 	int i;
2556 	for (i = 0; i < bufsize; i++)
2557 		DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2558 			    i, priv->flc_desc[0].desc[i]);
2559 #endif
2560 
2561 	return ret;
2562 
2563 error_out:
2564 	rte_free(session->cipher_key.data);
2565 	rte_free(session->auth_key.data);
2566 	rte_free(priv);
2567 	return ret;
2568 }
2569 
2570 static int
2571 dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
2572 			    struct rte_crypto_sym_xform *xform,	void *sess)
2573 {
2574 	dpaa2_sec_session *session = sess;
2575 	int ret;
2576 
2577 	PMD_INIT_FUNC_TRACE();
2578 
2579 	if (unlikely(sess == NULL)) {
2580 		DPAA2_SEC_ERR("Invalid session struct");
2581 		return -EINVAL;
2582 	}
2583 
2584 	memset(session, 0, sizeof(dpaa2_sec_session));
2585 	/* Default IV length = 0 */
2586 	session->iv.length = 0;
2587 
2588 	/* Cipher Only */
2589 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2590 		ret = dpaa2_sec_cipher_init(dev, xform, session);
2591 
2592 	/* Authentication Only */
2593 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2594 		   xform->next == NULL) {
2595 		ret = dpaa2_sec_auth_init(dev, xform, session);
2596 
2597 	/* Cipher then Authenticate */
2598 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2599 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2600 		session->ext_params.aead_ctxt.auth_cipher_text = true;
2601 		if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2602 			ret = dpaa2_sec_auth_init(dev, xform, session);
2603 		else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
2604 			ret = dpaa2_sec_cipher_init(dev, xform, session);
2605 		else
2606 			ret = dpaa2_sec_aead_chain_init(dev, xform, session);
2607 	/* Authenticate then Cipher */
2608 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2609 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2610 		session->ext_params.aead_ctxt.auth_cipher_text = false;
2611 		if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
2612 			ret = dpaa2_sec_cipher_init(dev, xform, session);
2613 		else if (xform->next->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2614 			ret = dpaa2_sec_auth_init(dev, xform, session);
2615 		else
2616 			ret = dpaa2_sec_aead_chain_init(dev, xform, session);
2617 	/* AEAD operation for AES-GCM kind of Algorithms */
2618 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2619 		   xform->next == NULL) {
2620 		ret = dpaa2_sec_aead_init(dev, xform, session);
2621 
2622 	} else {
2623 		DPAA2_SEC_ERR("Invalid crypto type");
2624 		return -EINVAL;
2625 	}
2626 
2627 	return ret;
2628 }
2629 
2630 #ifdef RTE_LIB_SECURITY
2631 static int
2632 dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2633 			dpaa2_sec_session *session,
2634 			struct alginfo *aeaddata)
2635 {
2636 	PMD_INIT_FUNC_TRACE();
2637 
2638 	session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2639 					       RTE_CACHE_LINE_SIZE);
2640 	if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2641 		DPAA2_SEC_ERR("No Memory for aead key");
2642 		return -ENOMEM;
2643 	}
2644 	memcpy(session->aead_key.data, aead_xform->key.data,
2645 	       aead_xform->key.length);
2646 
2647 	session->digest_length = aead_xform->digest_length;
2648 	session->aead_key.length = aead_xform->key.length;
2649 
2650 	aeaddata->key = (size_t)session->aead_key.data;
2651 	aeaddata->keylen = session->aead_key.length;
2652 	aeaddata->key_enc_flags = 0;
2653 	aeaddata->key_type = RTA_DATA_IMM;
2654 
2655 	switch (aead_xform->algo) {
2656 	case RTE_CRYPTO_AEAD_AES_GCM:
2657 		switch (session->digest_length) {
2658 		case 8:
2659 			aeaddata->algtype = OP_PCL_IPSEC_AES_GCM8;
2660 			break;
2661 		case 12:
2662 			aeaddata->algtype = OP_PCL_IPSEC_AES_GCM12;
2663 			break;
2664 		case 16:
2665 			aeaddata->algtype = OP_PCL_IPSEC_AES_GCM16;
2666 			break;
2667 		default:
2668 			DPAA2_SEC_ERR("Crypto: Undefined GCM digest %d",
2669 				      session->digest_length);
2670 			return -EINVAL;
2671 		}
2672 		aeaddata->algmode = OP_ALG_AAI_GCM;
2673 		session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2674 		break;
2675 	case RTE_CRYPTO_AEAD_AES_CCM:
2676 		switch (session->digest_length) {
2677 		case 8:
2678 			aeaddata->algtype = OP_PCL_IPSEC_AES_CCM8;
2679 			break;
2680 		case 12:
2681 			aeaddata->algtype = OP_PCL_IPSEC_AES_CCM12;
2682 			break;
2683 		case 16:
2684 			aeaddata->algtype = OP_PCL_IPSEC_AES_CCM16;
2685 			break;
2686 		default:
2687 			DPAA2_SEC_ERR("Crypto: Undefined CCM digest %d",
2688 				      session->digest_length);
2689 			return -EINVAL;
2690 		}
2691 		aeaddata->algmode = OP_ALG_AAI_CCM;
2692 		session->aead_alg = RTE_CRYPTO_AEAD_AES_CCM;
2693 		break;
2694 	default:
2695 		DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
2696 			      aead_xform->algo);
2697 		return -ENOTSUP;
2698 	}
2699 	session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2700 				DIR_ENC : DIR_DEC;
2701 
2702 	return 0;
2703 }
2704 
2705 static int
2706 dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2707 	struct rte_crypto_auth_xform *auth_xform,
2708 	dpaa2_sec_session *session,
2709 	struct alginfo *cipherdata,
2710 	struct alginfo *authdata)
2711 {
2712 	if (cipher_xform) {
2713 		session->cipher_key.data = rte_zmalloc(NULL,
2714 						       cipher_xform->key.length,
2715 						       RTE_CACHE_LINE_SIZE);
2716 		if (session->cipher_key.data == NULL &&
2717 				cipher_xform->key.length > 0) {
2718 			DPAA2_SEC_ERR("No Memory for cipher key");
2719 			return -ENOMEM;
2720 		}
2721 
2722 		session->cipher_key.length = cipher_xform->key.length;
2723 		memcpy(session->cipher_key.data, cipher_xform->key.data,
2724 				cipher_xform->key.length);
2725 		session->cipher_alg = cipher_xform->algo;
2726 	} else {
2727 		session->cipher_key.data = NULL;
2728 		session->cipher_key.length = 0;
2729 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2730 	}
2731 
2732 	if (auth_xform) {
2733 		session->auth_key.data = rte_zmalloc(NULL,
2734 						auth_xform->key.length,
2735 						RTE_CACHE_LINE_SIZE);
2736 		if (session->auth_key.data == NULL &&
2737 				auth_xform->key.length > 0) {
2738 			DPAA2_SEC_ERR("No Memory for auth key");
2739 			return -ENOMEM;
2740 		}
2741 		session->auth_key.length = auth_xform->key.length;
2742 		memcpy(session->auth_key.data, auth_xform->key.data,
2743 				auth_xform->key.length);
2744 		session->auth_alg = auth_xform->algo;
2745 		session->digest_length = auth_xform->digest_length;
2746 	} else {
2747 		session->auth_key.data = NULL;
2748 		session->auth_key.length = 0;
2749 		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2750 	}
2751 
2752 	authdata->key = (size_t)session->auth_key.data;
2753 	authdata->keylen = session->auth_key.length;
2754 	authdata->key_enc_flags = 0;
2755 	authdata->key_type = RTA_DATA_IMM;
2756 	switch (session->auth_alg) {
2757 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
2758 		authdata->algtype = OP_PCL_IPSEC_HMAC_SHA1_96;
2759 		authdata->algmode = OP_ALG_AAI_HMAC;
2760 		break;
2761 	case RTE_CRYPTO_AUTH_MD5_HMAC:
2762 		authdata->algtype = OP_PCL_IPSEC_HMAC_MD5_96;
2763 		authdata->algmode = OP_ALG_AAI_HMAC;
2764 		break;
2765 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2766 		authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2767 		authdata->algmode = OP_ALG_AAI_HMAC;
2768 		if (session->digest_length != 16)
2769 			DPAA2_SEC_WARN(
2770 			"+++Using sha256-hmac truncated len is non-standard,"
2771 			"it will not work with lookaside proto");
2772 		break;
2773 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2774 		authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2775 		authdata->algmode = OP_ALG_AAI_HMAC;
2776 		break;
2777 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2778 		authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2779 		authdata->algmode = OP_ALG_AAI_HMAC;
2780 		break;
2781 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2782 		authdata->algtype = OP_PCL_IPSEC_AES_XCBC_MAC_96;
2783 		authdata->algmode = OP_ALG_AAI_XCBC_MAC;
2784 		break;
2785 	case RTE_CRYPTO_AUTH_AES_CMAC:
2786 		authdata->algtype = OP_PCL_IPSEC_AES_CMAC_96;
2787 		authdata->algmode = OP_ALG_AAI_CMAC;
2788 		break;
2789 	case RTE_CRYPTO_AUTH_NULL:
2790 		authdata->algtype = OP_PCL_IPSEC_HMAC_NULL;
2791 		break;
2792 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2793 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2794 	case RTE_CRYPTO_AUTH_SHA1:
2795 	case RTE_CRYPTO_AUTH_SHA256:
2796 	case RTE_CRYPTO_AUTH_SHA512:
2797 	case RTE_CRYPTO_AUTH_SHA224:
2798 	case RTE_CRYPTO_AUTH_SHA384:
2799 	case RTE_CRYPTO_AUTH_MD5:
2800 	case RTE_CRYPTO_AUTH_AES_GMAC:
2801 	case RTE_CRYPTO_AUTH_KASUMI_F9:
2802 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2803 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
2804 		DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2805 			      session->auth_alg);
2806 		return -ENOTSUP;
2807 	default:
2808 		DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2809 			      session->auth_alg);
2810 		return -ENOTSUP;
2811 	}
2812 	cipherdata->key = (size_t)session->cipher_key.data;
2813 	cipherdata->keylen = session->cipher_key.length;
2814 	cipherdata->key_enc_flags = 0;
2815 	cipherdata->key_type = RTA_DATA_IMM;
2816 
2817 	switch (session->cipher_alg) {
2818 	case RTE_CRYPTO_CIPHER_AES_CBC:
2819 		cipherdata->algtype = OP_PCL_IPSEC_AES_CBC;
2820 		cipherdata->algmode = OP_ALG_AAI_CBC;
2821 		break;
2822 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2823 		cipherdata->algtype = OP_PCL_IPSEC_3DES;
2824 		cipherdata->algmode = OP_ALG_AAI_CBC;
2825 		break;
2826 	case RTE_CRYPTO_CIPHER_DES_CBC:
2827 		cipherdata->algtype = OP_PCL_IPSEC_DES;
2828 		cipherdata->algmode = OP_ALG_AAI_CBC;
2829 		break;
2830 	case RTE_CRYPTO_CIPHER_AES_CTR:
2831 		cipherdata->algtype = OP_PCL_IPSEC_AES_CTR;
2832 		cipherdata->algmode = OP_ALG_AAI_CTR;
2833 		break;
2834 	case RTE_CRYPTO_CIPHER_NULL:
2835 		cipherdata->algtype = OP_PCL_IPSEC_NULL;
2836 		break;
2837 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2838 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2839 	case RTE_CRYPTO_CIPHER_3DES_ECB:
2840 	case RTE_CRYPTO_CIPHER_3DES_CTR:
2841 	case RTE_CRYPTO_CIPHER_AES_ECB:
2842 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
2843 		DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2844 			      session->cipher_alg);
2845 		return -ENOTSUP;
2846 	default:
2847 		DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2848 			      session->cipher_alg);
2849 		return -ENOTSUP;
2850 	}
2851 
2852 	return 0;
2853 }
2854 
2855 static int
2856 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
2857 			    struct rte_security_session_conf *conf,
2858 			    void *sess)
2859 {
2860 	struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2861 	struct rte_crypto_cipher_xform *cipher_xform = NULL;
2862 	struct rte_crypto_auth_xform *auth_xform = NULL;
2863 	struct rte_crypto_aead_xform *aead_xform = NULL;
2864 	dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
2865 	struct ctxt_priv *priv;
2866 	struct alginfo authdata, cipherdata;
2867 	int bufsize;
2868 	struct sec_flow_context *flc;
2869 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2870 	int ret = -1;
2871 
2872 	PMD_INIT_FUNC_TRACE();
2873 
2874 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2875 				sizeof(struct ctxt_priv) +
2876 				sizeof(struct sec_flc_desc),
2877 				RTE_CACHE_LINE_SIZE);
2878 
2879 	if (priv == NULL) {
2880 		DPAA2_SEC_ERR("No memory for priv CTXT");
2881 		return -ENOMEM;
2882 	}
2883 
2884 	priv->fle_pool = dev_priv->fle_pool;
2885 	flc = &priv->flc_desc[0].flc;
2886 
2887 	memset(session, 0, sizeof(dpaa2_sec_session));
2888 
2889 	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2890 		cipher_xform = &conf->crypto_xform->cipher;
2891 		if (conf->crypto_xform->next)
2892 			auth_xform = &conf->crypto_xform->next->auth;
2893 		ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
2894 					session, &cipherdata, &authdata);
2895 	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2896 		auth_xform = &conf->crypto_xform->auth;
2897 		if (conf->crypto_xform->next)
2898 			cipher_xform = &conf->crypto_xform->next->cipher;
2899 		ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
2900 					session, &cipherdata, &authdata);
2901 	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2902 		aead_xform = &conf->crypto_xform->aead;
2903 		ret = dpaa2_sec_ipsec_aead_init(aead_xform,
2904 					session, &cipherdata);
2905 		authdata.keylen = 0;
2906 		authdata.algtype = 0;
2907 	} else {
2908 		DPAA2_SEC_ERR("XFORM not specified");
2909 		ret = -EINVAL;
2910 		goto out;
2911 	}
2912 	if (ret) {
2913 		DPAA2_SEC_ERR("Failed to process xform");
2914 		goto out;
2915 	}
2916 
2917 	session->ctxt_type = DPAA2_SEC_IPSEC;
2918 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2919 		uint8_t *hdr = NULL;
2920 		struct ip ip4_hdr;
2921 		struct rte_ipv6_hdr ip6_hdr;
2922 		struct ipsec_encap_pdb encap_pdb;
2923 
2924 		flc->dhr = SEC_FLC_DHR_OUTBOUND;
2925 		/* For Sec Proto only one descriptor is required. */
2926 		memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb));
2927 
2928 		/* copy algo specific data to PDB */
2929 		switch (cipherdata.algtype) {
2930 		case OP_PCL_IPSEC_AES_CTR:
2931 			encap_pdb.ctr.ctr_initial = 0x00000001;
2932 			encap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2933 			break;
2934 		case OP_PCL_IPSEC_AES_GCM8:
2935 		case OP_PCL_IPSEC_AES_GCM12:
2936 		case OP_PCL_IPSEC_AES_GCM16:
2937 			memcpy(encap_pdb.gcm.salt,
2938 				(uint8_t *)&(ipsec_xform->salt), 4);
2939 			break;
2940 		}
2941 
2942 		encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2943 			PDBOPTS_ESP_OIHI_PDB_INL |
2944 			PDBOPTS_ESP_IVSRC |
2945 			PDBHMO_ESP_ENCAP_DTTL |
2946 			PDBHMO_ESP_SNR;
2947 		if (ipsec_xform->options.esn)
2948 			encap_pdb.options |= PDBOPTS_ESP_ESN;
2949 		encap_pdb.spi = ipsec_xform->spi;
2950 		session->dir = DIR_ENC;
2951 		if (ipsec_xform->tunnel.type ==
2952 				RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2953 			encap_pdb.ip_hdr_len = sizeof(struct ip);
2954 			ip4_hdr.ip_v = IPVERSION;
2955 			ip4_hdr.ip_hl = 5;
2956 			ip4_hdr.ip_len = rte_cpu_to_be_16(sizeof(ip4_hdr));
2957 			ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2958 			ip4_hdr.ip_id = 0;
2959 			ip4_hdr.ip_off = 0;
2960 			ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2961 			ip4_hdr.ip_p = IPPROTO_ESP;
2962 			ip4_hdr.ip_sum = 0;
2963 			ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2964 			ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2965 			ip4_hdr.ip_sum = calc_chksum((uint16_t *)(void *)
2966 					&ip4_hdr, sizeof(struct ip));
2967 			hdr = (uint8_t *)&ip4_hdr;
2968 		} else if (ipsec_xform->tunnel.type ==
2969 				RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2970 			ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2971 				DPAA2_IPv6_DEFAULT_VTC_FLOW |
2972 				((ipsec_xform->tunnel.ipv6.dscp <<
2973 					RTE_IPV6_HDR_TC_SHIFT) &
2974 					RTE_IPV6_HDR_TC_MASK) |
2975 				((ipsec_xform->tunnel.ipv6.flabel <<
2976 					RTE_IPV6_HDR_FL_SHIFT) &
2977 					RTE_IPV6_HDR_FL_MASK));
2978 			/* Payload length will be updated by HW */
2979 			ip6_hdr.payload_len = 0;
2980 			ip6_hdr.hop_limits =
2981 					ipsec_xform->tunnel.ipv6.hlimit;
2982 			ip6_hdr.proto = (ipsec_xform->proto ==
2983 					RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2984 					IPPROTO_ESP : IPPROTO_AH;
2985 			memcpy(&ip6_hdr.src_addr,
2986 				&ipsec_xform->tunnel.ipv6.src_addr, 16);
2987 			memcpy(&ip6_hdr.dst_addr,
2988 				&ipsec_xform->tunnel.ipv6.dst_addr, 16);
2989 			encap_pdb.ip_hdr_len = sizeof(struct rte_ipv6_hdr);
2990 			hdr = (uint8_t *)&ip6_hdr;
2991 		}
2992 
2993 		bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc,
2994 				1, 0, (rta_sec_era >= RTA_SEC_ERA_10) ?
2995 				SHR_WAIT : SHR_SERIAL, &encap_pdb,
2996 				hdr, &cipherdata, &authdata);
2997 	} else if (ipsec_xform->direction ==
2998 			RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2999 		struct ipsec_decap_pdb decap_pdb;
3000 
3001 		flc->dhr = SEC_FLC_DHR_INBOUND;
3002 		memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
3003 		/* copy algo specific data to PDB */
3004 		switch (cipherdata.algtype) {
3005 		case OP_PCL_IPSEC_AES_CTR:
3006 			decap_pdb.ctr.ctr_initial = 0x00000001;
3007 			decap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
3008 			break;
3009 		case OP_PCL_IPSEC_AES_GCM8:
3010 		case OP_PCL_IPSEC_AES_GCM12:
3011 		case OP_PCL_IPSEC_AES_GCM16:
3012 			memcpy(decap_pdb.gcm.salt,
3013 				(uint8_t *)&(ipsec_xform->salt), 4);
3014 			break;
3015 		}
3016 
3017 		decap_pdb.options = (ipsec_xform->tunnel.type ==
3018 				RTE_SECURITY_IPSEC_TUNNEL_IPV4) ?
3019 				sizeof(struct ip) << 16 :
3020 				sizeof(struct rte_ipv6_hdr) << 16;
3021 		if (ipsec_xform->options.esn)
3022 			decap_pdb.options |= PDBOPTS_ESP_ESN;
3023 
3024 		if (ipsec_xform->replay_win_sz) {
3025 			uint32_t win_sz;
3026 			win_sz = rte_align32pow2(ipsec_xform->replay_win_sz);
3027 
3028 			if (rta_sec_era < RTA_SEC_ERA_10 && win_sz > 128) {
3029 				DPAA2_SEC_INFO("Max Anti replay Win sz = 128");
3030 				win_sz = 128;
3031 			}
3032 			switch (win_sz) {
3033 			case 1:
3034 			case 2:
3035 			case 4:
3036 			case 8:
3037 			case 16:
3038 			case 32:
3039 				decap_pdb.options |= PDBOPTS_ESP_ARS32;
3040 				break;
3041 			case 64:
3042 				decap_pdb.options |= PDBOPTS_ESP_ARS64;
3043 				break;
3044 			case 256:
3045 				decap_pdb.options |= PDBOPTS_ESP_ARS256;
3046 				break;
3047 			case 512:
3048 				decap_pdb.options |= PDBOPTS_ESP_ARS512;
3049 				break;
3050 			case 1024:
3051 				decap_pdb.options |= PDBOPTS_ESP_ARS1024;
3052 				break;
3053 			case 128:
3054 			default:
3055 				decap_pdb.options |= PDBOPTS_ESP_ARS128;
3056 			}
3057 		}
3058 		session->dir = DIR_DEC;
3059 		bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc,
3060 				1, 0, (rta_sec_era >= RTA_SEC_ERA_10) ?
3061 				SHR_WAIT : SHR_SERIAL,
3062 				&decap_pdb, &cipherdata, &authdata);
3063 	} else
3064 		goto out;
3065 
3066 	if (bufsize < 0) {
3067 		DPAA2_SEC_ERR("Crypto: Invalid buffer length");
3068 		goto out;
3069 	}
3070 
3071 	flc->word1_sdl = (uint8_t)bufsize;
3072 
3073 	/* Enable the stashing control bit */
3074 	DPAA2_SET_FLC_RSC(flc);
3075 	flc->word2_rflc_31_0 = lower_32_bits(
3076 			(size_t)&(((struct dpaa2_sec_qp *)
3077 			dev->data->queue_pairs[0])->rx_vq) | 0x14);
3078 	flc->word3_rflc_63_32 = upper_32_bits(
3079 			(size_t)&(((struct dpaa2_sec_qp *)
3080 			dev->data->queue_pairs[0])->rx_vq));
3081 
3082 	/* Set EWS bit i.e. enable write-safe */
3083 	DPAA2_SET_FLC_EWS(flc);
3084 	/* Set BS = 1 i.e reuse input buffers as output buffers */
3085 	DPAA2_SET_FLC_REUSE_BS(flc);
3086 	/* Set FF = 10; reuse input buffers if they provide sufficient space */
3087 	DPAA2_SET_FLC_REUSE_FF(flc);
3088 
3089 	session->ctxt = priv;
3090 
3091 	return 0;
3092 out:
3093 	rte_free(session->auth_key.data);
3094 	rte_free(session->cipher_key.data);
3095 	rte_free(priv);
3096 	return ret;
3097 }
3098 
3099 static int
3100 dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
3101 			   struct rte_security_session_conf *conf,
3102 			   void *sess)
3103 {
3104 	struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
3105 	struct rte_crypto_sym_xform *xform = conf->crypto_xform;
3106 	struct rte_crypto_auth_xform *auth_xform = NULL;
3107 	struct rte_crypto_cipher_xform *cipher_xform = NULL;
3108 	dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
3109 	struct ctxt_priv *priv;
3110 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
3111 	struct alginfo authdata, cipherdata;
3112 	struct alginfo *p_authdata = NULL;
3113 	int bufsize = -1;
3114 	struct sec_flow_context *flc;
3115 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
3116 	int swap = true;
3117 #else
3118 	int swap = false;
3119 #endif
3120 
3121 	PMD_INIT_FUNC_TRACE();
3122 
3123 	memset(session, 0, sizeof(dpaa2_sec_session));
3124 
3125 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
3126 				sizeof(struct ctxt_priv) +
3127 				sizeof(struct sec_flc_desc),
3128 				RTE_CACHE_LINE_SIZE);
3129 
3130 	if (priv == NULL) {
3131 		DPAA2_SEC_ERR("No memory for priv CTXT");
3132 		return -ENOMEM;
3133 	}
3134 
3135 	priv->fle_pool = dev_priv->fle_pool;
3136 	flc = &priv->flc_desc[0].flc;
3137 
3138 	/* find xfrm types */
3139 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
3140 		cipher_xform = &xform->cipher;
3141 		if (xform->next != NULL) {
3142 			session->ext_params.aead_ctxt.auth_cipher_text = true;
3143 			auth_xform = &xform->next->auth;
3144 		}
3145 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
3146 		auth_xform = &xform->auth;
3147 		if (xform->next != NULL) {
3148 			session->ext_params.aead_ctxt.auth_cipher_text = false;
3149 			cipher_xform = &xform->next->cipher;
3150 		}
3151 	} else {
3152 		DPAA2_SEC_ERR("Invalid crypto type");
3153 		return -EINVAL;
3154 	}
3155 
3156 	session->ctxt_type = DPAA2_SEC_PDCP;
3157 	if (cipher_xform) {
3158 		session->cipher_key.data = rte_zmalloc(NULL,
3159 					       cipher_xform->key.length,
3160 					       RTE_CACHE_LINE_SIZE);
3161 		if (session->cipher_key.data == NULL &&
3162 				cipher_xform->key.length > 0) {
3163 			DPAA2_SEC_ERR("No Memory for cipher key");
3164 			rte_free(priv);
3165 			return -ENOMEM;
3166 		}
3167 		session->cipher_key.length = cipher_xform->key.length;
3168 		memcpy(session->cipher_key.data, cipher_xform->key.data,
3169 			cipher_xform->key.length);
3170 		session->dir =
3171 			(cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
3172 					DIR_ENC : DIR_DEC;
3173 		session->cipher_alg = cipher_xform->algo;
3174 	} else {
3175 		session->cipher_key.data = NULL;
3176 		session->cipher_key.length = 0;
3177 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
3178 		session->dir = DIR_ENC;
3179 	}
3180 
3181 	session->pdcp.domain = pdcp_xform->domain;
3182 	session->pdcp.bearer = pdcp_xform->bearer;
3183 	session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
3184 	session->pdcp.sn_size = pdcp_xform->sn_size;
3185 	session->pdcp.hfn = pdcp_xform->hfn;
3186 	session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
3187 	session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
3188 	/* hfv ovd offset location is stored in iv.offset value*/
3189 	if (cipher_xform)
3190 		session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
3191 
3192 	cipherdata.key = (size_t)session->cipher_key.data;
3193 	cipherdata.keylen = session->cipher_key.length;
3194 	cipherdata.key_enc_flags = 0;
3195 	cipherdata.key_type = RTA_DATA_IMM;
3196 
3197 	switch (session->cipher_alg) {
3198 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
3199 		cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW;
3200 		break;
3201 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
3202 		cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC;
3203 		break;
3204 	case RTE_CRYPTO_CIPHER_AES_CTR:
3205 		cipherdata.algtype = PDCP_CIPHER_TYPE_AES;
3206 		break;
3207 	case RTE_CRYPTO_CIPHER_NULL:
3208 		cipherdata.algtype = PDCP_CIPHER_TYPE_NULL;
3209 		break;
3210 	default:
3211 		DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
3212 			      session->cipher_alg);
3213 		goto out;
3214 	}
3215 
3216 	if (auth_xform) {
3217 		session->auth_key.data = rte_zmalloc(NULL,
3218 						     auth_xform->key.length,
3219 						     RTE_CACHE_LINE_SIZE);
3220 		if (!session->auth_key.data &&
3221 		    auth_xform->key.length > 0) {
3222 			DPAA2_SEC_ERR("No Memory for auth key");
3223 			rte_free(session->cipher_key.data);
3224 			rte_free(priv);
3225 			return -ENOMEM;
3226 		}
3227 		session->auth_key.length = auth_xform->key.length;
3228 		memcpy(session->auth_key.data, auth_xform->key.data,
3229 		       auth_xform->key.length);
3230 		session->auth_alg = auth_xform->algo;
3231 	} else {
3232 		session->auth_key.data = NULL;
3233 		session->auth_key.length = 0;
3234 		session->auth_alg = 0;
3235 	}
3236 	authdata.key = (size_t)session->auth_key.data;
3237 	authdata.keylen = session->auth_key.length;
3238 	authdata.key_enc_flags = 0;
3239 	authdata.key_type = RTA_DATA_IMM;
3240 
3241 	if (session->auth_alg) {
3242 		switch (session->auth_alg) {
3243 		case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
3244 			authdata.algtype = PDCP_AUTH_TYPE_SNOW;
3245 			break;
3246 		case RTE_CRYPTO_AUTH_ZUC_EIA3:
3247 			authdata.algtype = PDCP_AUTH_TYPE_ZUC;
3248 			break;
3249 		case RTE_CRYPTO_AUTH_AES_CMAC:
3250 			authdata.algtype = PDCP_AUTH_TYPE_AES;
3251 			break;
3252 		case RTE_CRYPTO_AUTH_NULL:
3253 			authdata.algtype = PDCP_AUTH_TYPE_NULL;
3254 			break;
3255 		default:
3256 			DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
3257 				      session->auth_alg);
3258 			goto out;
3259 		}
3260 
3261 		p_authdata = &authdata;
3262 	} else if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3263 		DPAA2_SEC_ERR("Crypto: Integrity must for c-plane");
3264 		goto out;
3265 	}
3266 
3267 	if (pdcp_xform->sdap_enabled) {
3268 		int nb_keys_to_inline =
3269 			rta_inline_pdcp_sdap_query(authdata.algtype,
3270 					cipherdata.algtype,
3271 					session->pdcp.sn_size,
3272 					session->pdcp.hfn_ovd);
3273 		if (nb_keys_to_inline >= 1) {
3274 			cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
3275 			cipherdata.key_type = RTA_DATA_PTR;
3276 		}
3277 		if (nb_keys_to_inline >= 2) {
3278 			authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
3279 			authdata.key_type = RTA_DATA_PTR;
3280 		}
3281 	} else {
3282 		if (rta_inline_pdcp_query(authdata.algtype,
3283 					cipherdata.algtype,
3284 					session->pdcp.sn_size,
3285 					session->pdcp.hfn_ovd)) {
3286 			cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
3287 			cipherdata.key_type = RTA_DATA_PTR;
3288 		}
3289 	}
3290 
3291 	if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3292 		if (session->dir == DIR_ENC)
3293 			bufsize = cnstr_shdsc_pdcp_c_plane_encap(
3294 					priv->flc_desc[0].desc, 1, swap,
3295 					pdcp_xform->hfn,
3296 					session->pdcp.sn_size,
3297 					pdcp_xform->bearer,
3298 					pdcp_xform->pkt_dir,
3299 					pdcp_xform->hfn_threshold,
3300 					&cipherdata, &authdata,
3301 					0);
3302 		else if (session->dir == DIR_DEC)
3303 			bufsize = cnstr_shdsc_pdcp_c_plane_decap(
3304 					priv->flc_desc[0].desc, 1, swap,
3305 					pdcp_xform->hfn,
3306 					session->pdcp.sn_size,
3307 					pdcp_xform->bearer,
3308 					pdcp_xform->pkt_dir,
3309 					pdcp_xform->hfn_threshold,
3310 					&cipherdata, &authdata,
3311 					0);
3312 
3313 	} else if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_SHORT_MAC) {
3314 		bufsize = cnstr_shdsc_pdcp_short_mac(priv->flc_desc[0].desc,
3315 						     1, swap, &authdata);
3316 	} else {
3317 		if (session->dir == DIR_ENC) {
3318 			if (pdcp_xform->sdap_enabled)
3319 				bufsize = cnstr_shdsc_pdcp_sdap_u_plane_encap(
3320 					priv->flc_desc[0].desc, 1, swap,
3321 					session->pdcp.sn_size,
3322 					pdcp_xform->hfn,
3323 					pdcp_xform->bearer,
3324 					pdcp_xform->pkt_dir,
3325 					pdcp_xform->hfn_threshold,
3326 					&cipherdata, p_authdata, 0);
3327 			else
3328 				bufsize = cnstr_shdsc_pdcp_u_plane_encap(
3329 					priv->flc_desc[0].desc, 1, swap,
3330 					session->pdcp.sn_size,
3331 					pdcp_xform->hfn,
3332 					pdcp_xform->bearer,
3333 					pdcp_xform->pkt_dir,
3334 					pdcp_xform->hfn_threshold,
3335 					&cipherdata, p_authdata, 0);
3336 		} else if (session->dir == DIR_DEC) {
3337 			if (pdcp_xform->sdap_enabled)
3338 				bufsize = cnstr_shdsc_pdcp_sdap_u_plane_decap(
3339 					priv->flc_desc[0].desc, 1, swap,
3340 					session->pdcp.sn_size,
3341 					pdcp_xform->hfn,
3342 					pdcp_xform->bearer,
3343 					pdcp_xform->pkt_dir,
3344 					pdcp_xform->hfn_threshold,
3345 					&cipherdata, p_authdata, 0);
3346 			else
3347 				bufsize = cnstr_shdsc_pdcp_u_plane_decap(
3348 					priv->flc_desc[0].desc, 1, swap,
3349 					session->pdcp.sn_size,
3350 					pdcp_xform->hfn,
3351 					pdcp_xform->bearer,
3352 					pdcp_xform->pkt_dir,
3353 					pdcp_xform->hfn_threshold,
3354 					&cipherdata, p_authdata, 0);
3355 		}
3356 	}
3357 
3358 	if (bufsize < 0) {
3359 		DPAA2_SEC_ERR("Crypto: Invalid buffer length");
3360 		goto out;
3361 	}
3362 
3363 	/* Enable the stashing control bit */
3364 	DPAA2_SET_FLC_RSC(flc);
3365 	flc->word2_rflc_31_0 = lower_32_bits(
3366 			(size_t)&(((struct dpaa2_sec_qp *)
3367 			dev->data->queue_pairs[0])->rx_vq) | 0x14);
3368 	flc->word3_rflc_63_32 = upper_32_bits(
3369 			(size_t)&(((struct dpaa2_sec_qp *)
3370 			dev->data->queue_pairs[0])->rx_vq));
3371 
3372 	flc->word1_sdl = (uint8_t)bufsize;
3373 
3374 	/* TODO - check the perf impact or
3375 	 * align as per descriptor type
3376 	 * Set EWS bit i.e. enable write-safe
3377 	 * DPAA2_SET_FLC_EWS(flc);
3378 	 */
3379 
3380 	/* Set BS = 1 i.e reuse input buffers as output buffers */
3381 	DPAA2_SET_FLC_REUSE_BS(flc);
3382 	/* Set FF = 10; reuse input buffers if they provide sufficient space */
3383 	DPAA2_SET_FLC_REUSE_FF(flc);
3384 
3385 	session->ctxt = priv;
3386 
3387 	return 0;
3388 out:
3389 	rte_free(session->auth_key.data);
3390 	rte_free(session->cipher_key.data);
3391 	rte_free(priv);
3392 	return -EINVAL;
3393 }
3394 
3395 static int
3396 dpaa2_sec_security_session_create(void *dev,
3397 				  struct rte_security_session_conf *conf,
3398 				  struct rte_security_session *sess,
3399 				  struct rte_mempool *mempool)
3400 {
3401 	void *sess_private_data;
3402 	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
3403 	int ret;
3404 
3405 	if (rte_mempool_get(mempool, &sess_private_data)) {
3406 		DPAA2_SEC_ERR("Couldn't get object from session mempool");
3407 		return -ENOMEM;
3408 	}
3409 
3410 	switch (conf->protocol) {
3411 	case RTE_SECURITY_PROTOCOL_IPSEC:
3412 		ret = dpaa2_sec_set_ipsec_session(cdev, conf,
3413 				sess_private_data);
3414 		break;
3415 	case RTE_SECURITY_PROTOCOL_MACSEC:
3416 		return -ENOTSUP;
3417 	case RTE_SECURITY_PROTOCOL_PDCP:
3418 		ret = dpaa2_sec_set_pdcp_session(cdev, conf,
3419 				sess_private_data);
3420 		break;
3421 	default:
3422 		return -EINVAL;
3423 	}
3424 	if (ret != 0) {
3425 		DPAA2_SEC_ERR("Failed to configure session parameters");
3426 		/* Return session to mempool */
3427 		rte_mempool_put(mempool, sess_private_data);
3428 		return ret;
3429 	}
3430 
3431 	set_sec_session_private_data(sess, sess_private_data);
3432 
3433 	return ret;
3434 }
3435 
3436 /** Clear the memory of session so it doesn't leave key material behind */
3437 static int
3438 dpaa2_sec_security_session_destroy(void *dev __rte_unused,
3439 		struct rte_security_session *sess)
3440 {
3441 	PMD_INIT_FUNC_TRACE();
3442 	void *sess_priv = get_sec_session_private_data(sess);
3443 
3444 	dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
3445 
3446 	if (sess_priv) {
3447 		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
3448 
3449 		rte_free(s->ctxt);
3450 		rte_free(s->cipher_key.data);
3451 		rte_free(s->auth_key.data);
3452 		memset(s, 0, sizeof(dpaa2_sec_session));
3453 		set_sec_session_private_data(sess, NULL);
3454 		rte_mempool_put(sess_mp, sess_priv);
3455 	}
3456 	return 0;
3457 }
3458 #endif
3459 static int
3460 dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev,
3461 		struct rte_crypto_sym_xform *xform,
3462 		struct rte_cryptodev_sym_session *sess,
3463 		struct rte_mempool *mempool)
3464 {
3465 	void *sess_private_data;
3466 	int ret;
3467 
3468 	if (rte_mempool_get(mempool, &sess_private_data)) {
3469 		DPAA2_SEC_ERR("Couldn't get object from session mempool");
3470 		return -ENOMEM;
3471 	}
3472 
3473 	ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data);
3474 	if (ret != 0) {
3475 		DPAA2_SEC_ERR("Failed to configure session parameters");
3476 		/* Return session to mempool */
3477 		rte_mempool_put(mempool, sess_private_data);
3478 		return ret;
3479 	}
3480 
3481 	set_sym_session_private_data(sess, dev->driver_id,
3482 		sess_private_data);
3483 
3484 	return 0;
3485 }
3486 
3487 /** Clear the memory of session so it doesn't leave key material behind */
3488 static void
3489 dpaa2_sec_sym_session_clear(struct rte_cryptodev *dev,
3490 		struct rte_cryptodev_sym_session *sess)
3491 {
3492 	PMD_INIT_FUNC_TRACE();
3493 	uint8_t index = dev->driver_id;
3494 	void *sess_priv = get_sym_session_private_data(sess, index);
3495 	dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
3496 
3497 	if (sess_priv) {
3498 		rte_free(s->ctxt);
3499 		rte_free(s->cipher_key.data);
3500 		rte_free(s->auth_key.data);
3501 		memset(s, 0, sizeof(dpaa2_sec_session));
3502 		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
3503 		set_sym_session_private_data(sess, index, NULL);
3504 		rte_mempool_put(sess_mp, sess_priv);
3505 	}
3506 }
3507 
3508 static int
3509 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
3510 			struct rte_cryptodev_config *config __rte_unused)
3511 {
3512 	PMD_INIT_FUNC_TRACE();
3513 
3514 	return 0;
3515 }
3516 
3517 static int
3518 dpaa2_sec_dev_start(struct rte_cryptodev *dev)
3519 {
3520 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3521 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3522 	struct dpseci_attr attr;
3523 	struct dpaa2_queue *dpaa2_q;
3524 	struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3525 					dev->data->queue_pairs;
3526 	struct dpseci_rx_queue_attr rx_attr;
3527 	struct dpseci_tx_queue_attr tx_attr;
3528 	int ret, i;
3529 
3530 	PMD_INIT_FUNC_TRACE();
3531 
3532 	memset(&attr, 0, sizeof(struct dpseci_attr));
3533 
3534 	ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
3535 	if (ret) {
3536 		DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED",
3537 			      priv->hw_id);
3538 		goto get_attr_failure;
3539 	}
3540 	ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr);
3541 	if (ret) {
3542 		DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC");
3543 		goto get_attr_failure;
3544 	}
3545 	for (i = 0; i < attr.num_rx_queues && qp[i]; i++) {
3546 		dpaa2_q = &qp[i]->rx_vq;
3547 		dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
3548 				    &rx_attr);
3549 		dpaa2_q->fqid = rx_attr.fqid;
3550 		DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid);
3551 	}
3552 	for (i = 0; i < attr.num_tx_queues && qp[i]; i++) {
3553 		dpaa2_q = &qp[i]->tx_vq;
3554 		dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
3555 				    &tx_attr);
3556 		dpaa2_q->fqid = tx_attr.fqid;
3557 		DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid);
3558 	}
3559 
3560 	return 0;
3561 get_attr_failure:
3562 	dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
3563 	return -1;
3564 }
3565 
3566 static void
3567 dpaa2_sec_dev_stop(struct rte_cryptodev *dev)
3568 {
3569 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3570 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3571 	int ret;
3572 
3573 	PMD_INIT_FUNC_TRACE();
3574 
3575 	ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
3576 	if (ret) {
3577 		DPAA2_SEC_ERR("Failure in disabling dpseci %d device",
3578 			     priv->hw_id);
3579 		return;
3580 	}
3581 
3582 	ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token);
3583 	if (ret < 0) {
3584 		DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret);
3585 		return;
3586 	}
3587 }
3588 
3589 static int
3590 dpaa2_sec_dev_close(struct rte_cryptodev *dev __rte_unused)
3591 {
3592 	PMD_INIT_FUNC_TRACE();
3593 
3594 	return 0;
3595 }
3596 
3597 static void
3598 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev,
3599 			struct rte_cryptodev_info *info)
3600 {
3601 	struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
3602 
3603 	PMD_INIT_FUNC_TRACE();
3604 	if (info != NULL) {
3605 		info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3606 		info->feature_flags = dev->feature_flags;
3607 		info->capabilities = dpaa2_sec_capabilities;
3608 		/* No limit of number of sessions */
3609 		info->sym.max_nb_sessions = 0;
3610 		info->driver_id = cryptodev_driver_id;
3611 	}
3612 }
3613 
3614 static
3615 void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
3616 			 struct rte_cryptodev_stats *stats)
3617 {
3618 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3619 	struct fsl_mc_io dpseci;
3620 	struct dpseci_sec_counters counters = {0};
3621 	struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3622 					dev->data->queue_pairs;
3623 	int ret, i;
3624 
3625 	PMD_INIT_FUNC_TRACE();
3626 	if (stats == NULL) {
3627 		DPAA2_SEC_ERR("Invalid stats ptr NULL");
3628 		return;
3629 	}
3630 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3631 		if (qp == NULL || qp[i] == NULL) {
3632 			DPAA2_SEC_DEBUG("Uninitialised queue pair");
3633 			continue;
3634 		}
3635 
3636 		stats->enqueued_count += qp[i]->tx_vq.tx_pkts;
3637 		stats->dequeued_count += qp[i]->rx_vq.rx_pkts;
3638 		stats->enqueue_err_count += qp[i]->tx_vq.err_pkts;
3639 		stats->dequeue_err_count += qp[i]->rx_vq.err_pkts;
3640 	}
3641 
3642 	/* In case as secondary process access stats, MCP portal in priv-hw
3643 	 * may have primary process address. Need the secondary process
3644 	 * based MCP portal address for this object.
3645 	 */
3646 	dpseci.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
3647 	ret = dpseci_get_sec_counters(&dpseci, CMD_PRI_LOW, priv->token,
3648 				      &counters);
3649 	if (ret) {
3650 		DPAA2_SEC_ERR("SEC counters failed");
3651 	} else {
3652 		DPAA2_SEC_INFO("dpseci hardware stats:"
3653 			    "\n\tNum of Requests Dequeued = %" PRIu64
3654 			    "\n\tNum of Outbound Encrypt Requests = %" PRIu64
3655 			    "\n\tNum of Inbound Decrypt Requests = %" PRIu64
3656 			    "\n\tNum of Outbound Bytes Encrypted = %" PRIu64
3657 			    "\n\tNum of Outbound Bytes Protected = %" PRIu64
3658 			    "\n\tNum of Inbound Bytes Decrypted = %" PRIu64
3659 			    "\n\tNum of Inbound Bytes Validated = %" PRIu64,
3660 			    counters.dequeued_requests,
3661 			    counters.ob_enc_requests,
3662 			    counters.ib_dec_requests,
3663 			    counters.ob_enc_bytes,
3664 			    counters.ob_prot_bytes,
3665 			    counters.ib_dec_bytes,
3666 			    counters.ib_valid_bytes);
3667 	}
3668 }
3669 
3670 static
3671 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev)
3672 {
3673 	int i;
3674 	struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3675 				   (dev->data->queue_pairs);
3676 
3677 	PMD_INIT_FUNC_TRACE();
3678 
3679 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3680 		if (qp[i] == NULL) {
3681 			DPAA2_SEC_DEBUG("Uninitialised queue pair");
3682 			continue;
3683 		}
3684 		qp[i]->tx_vq.rx_pkts = 0;
3685 		qp[i]->tx_vq.tx_pkts = 0;
3686 		qp[i]->tx_vq.err_pkts = 0;
3687 		qp[i]->rx_vq.rx_pkts = 0;
3688 		qp[i]->rx_vq.tx_pkts = 0;
3689 		qp[i]->rx_vq.err_pkts = 0;
3690 	}
3691 }
3692 
3693 static void __rte_hot
3694 dpaa2_sec_process_parallel_event(struct qbman_swp *swp,
3695 				 const struct qbman_fd *fd,
3696 				 const struct qbman_result *dq,
3697 				 struct dpaa2_queue *rxq,
3698 				 struct rte_event *ev)
3699 {
3700 	/* Prefetching mbuf */
3701 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
3702 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
3703 
3704 	/* Prefetching ipsec crypto_op stored in priv data of mbuf */
3705 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
3706 
3707 	ev->flow_id = rxq->ev.flow_id;
3708 	ev->sub_event_type = rxq->ev.sub_event_type;
3709 	ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3710 	ev->op = RTE_EVENT_OP_NEW;
3711 	ev->sched_type = rxq->ev.sched_type;
3712 	ev->queue_id = rxq->ev.queue_id;
3713 	ev->priority = rxq->ev.priority;
3714 	ev->event_ptr = sec_fd_to_mbuf(fd);
3715 
3716 	qbman_swp_dqrr_consume(swp, dq);
3717 }
3718 static void
3719 dpaa2_sec_process_atomic_event(struct qbman_swp *swp __rte_unused,
3720 				 const struct qbman_fd *fd,
3721 				 const struct qbman_result *dq,
3722 				 struct dpaa2_queue *rxq,
3723 				 struct rte_event *ev)
3724 {
3725 	uint8_t dqrr_index;
3726 	struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr;
3727 	/* Prefetching mbuf */
3728 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
3729 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
3730 
3731 	/* Prefetching ipsec crypto_op stored in priv data of mbuf */
3732 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
3733 
3734 	ev->flow_id = rxq->ev.flow_id;
3735 	ev->sub_event_type = rxq->ev.sub_event_type;
3736 	ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3737 	ev->op = RTE_EVENT_OP_NEW;
3738 	ev->sched_type = rxq->ev.sched_type;
3739 	ev->queue_id = rxq->ev.queue_id;
3740 	ev->priority = rxq->ev.priority;
3741 
3742 	ev->event_ptr = sec_fd_to_mbuf(fd);
3743 	dqrr_index = qbman_get_dqrr_idx(dq);
3744 	*dpaa2_seqn(crypto_op->sym->m_src) = dqrr_index + 1;
3745 	DPAA2_PER_LCORE_DQRR_SIZE++;
3746 	DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
3747 	DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src;
3748 }
3749 
3750 int
3751 dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
3752 		int qp_id,
3753 		struct dpaa2_dpcon_dev *dpcon,
3754 		const struct rte_event *event)
3755 {
3756 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3757 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3758 	struct dpaa2_sec_qp *qp = dev->data->queue_pairs[qp_id];
3759 	struct dpseci_rx_queue_cfg cfg;
3760 	uint8_t priority;
3761 	int ret;
3762 
3763 	if (event->sched_type == RTE_SCHED_TYPE_PARALLEL)
3764 		qp->rx_vq.cb = dpaa2_sec_process_parallel_event;
3765 	else if (event->sched_type == RTE_SCHED_TYPE_ATOMIC)
3766 		qp->rx_vq.cb = dpaa2_sec_process_atomic_event;
3767 	else
3768 		return -EINVAL;
3769 
3770 	priority = (RTE_EVENT_DEV_PRIORITY_LOWEST / event->priority) *
3771 		   (dpcon->num_priorities - 1);
3772 
3773 	memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
3774 	cfg.options = DPSECI_QUEUE_OPT_DEST;
3775 	cfg.dest_cfg.dest_type = DPSECI_DEST_DPCON;
3776 	cfg.dest_cfg.dest_id = dpcon->dpcon_id;
3777 	cfg.dest_cfg.priority = priority;
3778 
3779 	cfg.options |= DPSECI_QUEUE_OPT_USER_CTX;
3780 	cfg.user_ctx = (size_t)(qp);
3781 	if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) {
3782 		cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION;
3783 		cfg.order_preservation_en = 1;
3784 	}
3785 	ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
3786 				  qp_id, &cfg);
3787 	if (ret) {
3788 		RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
3789 		return ret;
3790 	}
3791 
3792 	memcpy(&qp->rx_vq.ev, event, sizeof(struct rte_event));
3793 
3794 	return 0;
3795 }
3796 
3797 int
3798 dpaa2_sec_eventq_detach(const struct rte_cryptodev *dev,
3799 			int qp_id)
3800 {
3801 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3802 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3803 	struct dpseci_rx_queue_cfg cfg;
3804 	int ret;
3805 
3806 	memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
3807 	cfg.options = DPSECI_QUEUE_OPT_DEST;
3808 	cfg.dest_cfg.dest_type = DPSECI_DEST_NONE;
3809 
3810 	ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
3811 				  qp_id, &cfg);
3812 	if (ret)
3813 		RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
3814 
3815 	return ret;
3816 }
3817 
3818 static struct rte_cryptodev_ops crypto_ops = {
3819 	.dev_configure	      = dpaa2_sec_dev_configure,
3820 	.dev_start	      = dpaa2_sec_dev_start,
3821 	.dev_stop	      = dpaa2_sec_dev_stop,
3822 	.dev_close	      = dpaa2_sec_dev_close,
3823 	.dev_infos_get        = dpaa2_sec_dev_infos_get,
3824 	.stats_get	      = dpaa2_sec_stats_get,
3825 	.stats_reset	      = dpaa2_sec_stats_reset,
3826 	.queue_pair_setup     = dpaa2_sec_queue_pair_setup,
3827 	.queue_pair_release   = dpaa2_sec_queue_pair_release,
3828 	.sym_session_get_size     = dpaa2_sec_sym_session_get_size,
3829 	.sym_session_configure    = dpaa2_sec_sym_session_configure,
3830 	.sym_session_clear        = dpaa2_sec_sym_session_clear,
3831 };
3832 
3833 #ifdef RTE_LIB_SECURITY
3834 static const struct rte_security_capability *
3835 dpaa2_sec_capabilities_get(void *device __rte_unused)
3836 {
3837 	return dpaa2_sec_security_cap;
3838 }
3839 
3840 static const struct rte_security_ops dpaa2_sec_security_ops = {
3841 	.session_create = dpaa2_sec_security_session_create,
3842 	.session_update = NULL,
3843 	.session_stats_get = NULL,
3844 	.session_destroy = dpaa2_sec_security_session_destroy,
3845 	.set_pkt_metadata = NULL,
3846 	.capabilities_get = dpaa2_sec_capabilities_get
3847 };
3848 #endif
3849 
3850 static int
3851 dpaa2_sec_uninit(const struct rte_cryptodev *dev)
3852 {
3853 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3854 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3855 	int ret;
3856 
3857 	PMD_INIT_FUNC_TRACE();
3858 
3859 	/* Function is reverse of dpaa2_sec_dev_init.
3860 	 * It does the following:
3861 	 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id
3862 	 * 2. Close the DPSECI device
3863 	 * 3. Free the allocated resources.
3864 	 */
3865 
3866 	/*Close the device at underlying layer*/
3867 	ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
3868 	if (ret) {
3869 		DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret);
3870 		return -1;
3871 	}
3872 
3873 	/*Free the allocated memory for ethernet private data and dpseci*/
3874 	priv->hw = NULL;
3875 	rte_free(dpseci);
3876 	rte_free(dev->security_ctx);
3877 	rte_mempool_free(priv->fle_pool);
3878 
3879 	DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u",
3880 		       dev->data->name, rte_socket_id());
3881 
3882 	return 0;
3883 }
3884 
3885 static int
3886 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
3887 {
3888 	struct dpaa2_sec_dev_private *internals;
3889 	struct rte_device *dev = cryptodev->device;
3890 	struct rte_dpaa2_device *dpaa2_dev;
3891 #ifdef RTE_LIB_SECURITY
3892 	struct rte_security_ctx *security_instance;
3893 #endif
3894 	struct fsl_mc_io *dpseci;
3895 	uint16_t token;
3896 	struct dpseci_attr attr;
3897 	int retcode, hw_id;
3898 	char str[30];
3899 
3900 	PMD_INIT_FUNC_TRACE();
3901 	dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
3902 	hw_id = dpaa2_dev->object_id;
3903 
3904 	cryptodev->driver_id = cryptodev_driver_id;
3905 	cryptodev->dev_ops = &crypto_ops;
3906 
3907 	cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst;
3908 	cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst;
3909 	cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3910 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
3911 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3912 			RTE_CRYPTODEV_FF_SECURITY |
3913 			RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3914 			RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3915 			RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3916 			RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3917 			RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3918 
3919 	internals = cryptodev->data->dev_private;
3920 
3921 	/*
3922 	 * For secondary processes, we don't initialise any further as primary
3923 	 * has already done this work. Only check we don't need a different
3924 	 * RX function
3925 	 */
3926 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3927 		DPAA2_SEC_DEBUG("Device already init by primary process");
3928 		return 0;
3929 	}
3930 #ifdef RTE_LIB_SECURITY
3931 	/* Initialize security_ctx only for primary process*/
3932 	security_instance = rte_malloc("rte_security_instances_ops",
3933 				sizeof(struct rte_security_ctx), 0);
3934 	if (security_instance == NULL)
3935 		return -ENOMEM;
3936 	security_instance->device = (void *)cryptodev;
3937 	security_instance->ops = &dpaa2_sec_security_ops;
3938 	security_instance->sess_cnt = 0;
3939 	cryptodev->security_ctx = security_instance;
3940 #endif
3941 	/*Open the rte device via MC and save the handle for further use*/
3942 	dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1,
3943 				sizeof(struct fsl_mc_io), 0);
3944 	if (!dpseci) {
3945 		DPAA2_SEC_ERR(
3946 			"Error in allocating the memory for dpsec object");
3947 		return -ENOMEM;
3948 	}
3949 	dpseci->regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
3950 
3951 	retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token);
3952 	if (retcode != 0) {
3953 		DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x",
3954 			      retcode);
3955 		goto init_error;
3956 	}
3957 	retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr);
3958 	if (retcode != 0) {
3959 		DPAA2_SEC_ERR(
3960 			     "Cannot get dpsec device attributed: Error = %x",
3961 			     retcode);
3962 		goto init_error;
3963 	}
3964 	snprintf(cryptodev->data->name, sizeof(cryptodev->data->name),
3965 			"dpsec-%u", hw_id);
3966 
3967 	internals->max_nb_queue_pairs = attr.num_tx_queues;
3968 	cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs;
3969 	internals->hw = dpseci;
3970 	internals->token = token;
3971 
3972 	snprintf(str, sizeof(str), "sec_fle_pool_p%d_%d",
3973 			getpid(), cryptodev->data->dev_id);
3974 	internals->fle_pool = rte_mempool_create((const char *)str,
3975 			FLE_POOL_NUM_BUFS,
3976 			FLE_POOL_BUF_SIZE,
3977 			FLE_POOL_CACHE_SIZE, 0,
3978 			NULL, NULL, NULL, NULL,
3979 			SOCKET_ID_ANY, 0);
3980 	if (!internals->fle_pool) {
3981 		DPAA2_SEC_ERR("Mempool (%s) creation failed", str);
3982 		goto init_error;
3983 	}
3984 
3985 	DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name);
3986 	return 0;
3987 
3988 init_error:
3989 	DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name);
3990 
3991 	/* dpaa2_sec_uninit(crypto_dev_name); */
3992 	return -EFAULT;
3993 }
3994 
3995 static int
3996 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
3997 			  struct rte_dpaa2_device *dpaa2_dev)
3998 {
3999 	struct rte_cryptodev *cryptodev;
4000 	char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
4001 
4002 	int retval;
4003 
4004 	snprintf(cryptodev_name, sizeof(cryptodev_name), "dpsec-%d",
4005 			dpaa2_dev->object_id);
4006 
4007 	cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
4008 	if (cryptodev == NULL)
4009 		return -ENOMEM;
4010 
4011 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
4012 		cryptodev->data->dev_private = rte_zmalloc_socket(
4013 					"cryptodev private structure",
4014 					sizeof(struct dpaa2_sec_dev_private),
4015 					RTE_CACHE_LINE_SIZE,
4016 					rte_socket_id());
4017 
4018 		if (cryptodev->data->dev_private == NULL)
4019 			rte_panic("Cannot allocate memzone for private "
4020 				  "device data");
4021 	}
4022 
4023 	dpaa2_dev->cryptodev = cryptodev;
4024 	cryptodev->device = &dpaa2_dev->device;
4025 
4026 	/* init user callbacks */
4027 	TAILQ_INIT(&(cryptodev->link_intr_cbs));
4028 
4029 	if (dpaa2_svr_family == SVR_LX2160A)
4030 		rta_set_sec_era(RTA_SEC_ERA_10);
4031 	else
4032 		rta_set_sec_era(RTA_SEC_ERA_8);
4033 
4034 	DPAA2_SEC_INFO("2-SEC ERA is %d", rta_get_sec_era());
4035 
4036 	/* Invoke PMD device initialization function */
4037 	retval = dpaa2_sec_dev_init(cryptodev);
4038 	if (retval == 0)
4039 		return 0;
4040 
4041 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
4042 		rte_free(cryptodev->data->dev_private);
4043 
4044 	cryptodev->attached = RTE_CRYPTODEV_DETACHED;
4045 
4046 	return -ENXIO;
4047 }
4048 
4049 static int
4050 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
4051 {
4052 	struct rte_cryptodev *cryptodev;
4053 	int ret;
4054 
4055 	cryptodev = dpaa2_dev->cryptodev;
4056 	if (cryptodev == NULL)
4057 		return -ENODEV;
4058 
4059 	ret = dpaa2_sec_uninit(cryptodev);
4060 	if (ret)
4061 		return ret;
4062 
4063 	return rte_cryptodev_pmd_destroy(cryptodev);
4064 }
4065 
4066 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
4067 	.drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
4068 	.drv_type = DPAA2_CRYPTO,
4069 	.driver = {
4070 		.name = "DPAA2 SEC PMD"
4071 	},
4072 	.probe = cryptodev_dpaa2_sec_probe,
4073 	.remove = cryptodev_dpaa2_sec_remove,
4074 };
4075 
4076 static struct cryptodev_driver dpaa2_sec_crypto_drv;
4077 
4078 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
4079 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv,
4080 		rte_dpaa2_sec_driver.driver, cryptodev_driver_id);
4081 RTE_LOG_REGISTER(dpaa2_logtype_sec, pmd.crypto.dpaa2, NOTICE);
4082