xref: /dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c (revision 1cde1b9a9b4dbf31cb5e5ccdfc5da3cb079f43a2)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016-2018 NXP
5  *
6  */
7 
8 #include <time.h>
9 #include <net/if.h>
10 #include <unistd.h>
11 
12 #include <rte_ip.h>
13 #include <rte_mbuf.h>
14 #include <rte_cryptodev.h>
15 #include <rte_malloc.h>
16 #include <rte_memcpy.h>
17 #include <rte_string_fns.h>
18 #include <rte_cycles.h>
19 #include <rte_kvargs.h>
20 #include <rte_dev.h>
21 #include <rte_cryptodev_pmd.h>
22 #include <rte_common.h>
23 #include <rte_fslmc.h>
24 #include <fslmc_vfio.h>
25 #include <dpaa2_hw_pvt.h>
26 #include <dpaa2_hw_dpio.h>
27 #include <dpaa2_hw_mempool.h>
28 #include <fsl_dpopr.h>
29 #include <fsl_dpseci.h>
30 #include <fsl_mc_sys.h>
31 
32 #include "dpaa2_sec_priv.h"
33 #include "dpaa2_sec_event.h"
34 #include "dpaa2_sec_logs.h"
35 
36 /* Required types */
37 typedef uint64_t	dma_addr_t;
38 
39 /* RTA header files */
40 #include <hw/desc/ipsec.h>
41 #include <hw/desc/pdcp.h>
42 #include <hw/desc/algo.h>
43 
44 /* Minimum job descriptor consists of a oneword job descriptor HEADER and
45  * a pointer to the shared descriptor
46  */
47 #define MIN_JOB_DESC_SIZE	(CAAM_CMD_SZ + CAAM_PTR_SZ)
48 #define FSL_VENDOR_ID           0x1957
49 #define FSL_DEVICE_ID           0x410
50 #define FSL_SUBSYSTEM_SEC       1
51 #define FSL_MC_DPSECI_DEVID     3
52 
53 #define NO_PREFETCH 0
54 /* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
55 #define FLE_POOL_NUM_BUFS	32000
56 #define FLE_POOL_BUF_SIZE	256
57 #define FLE_POOL_CACHE_SIZE	512
58 #define FLE_SG_MEM_SIZE(num)	(FLE_POOL_BUF_SIZE + ((num) * 32))
59 #define SEC_FLC_DHR_OUTBOUND	-114
60 #define SEC_FLC_DHR_INBOUND	0
61 
62 enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8;
63 
64 static uint8_t cryptodev_driver_id;
65 
66 int dpaa2_logtype_sec;
67 
68 static inline int
69 build_proto_compound_sg_fd(dpaa2_sec_session *sess,
70 			   struct rte_crypto_op *op,
71 			   struct qbman_fd *fd, uint16_t bpid)
72 {
73 	struct rte_crypto_sym_op *sym_op = op->sym;
74 	struct ctxt_priv *priv = sess->ctxt;
75 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
76 	struct sec_flow_context *flc;
77 	struct rte_mbuf *mbuf;
78 	uint32_t in_len = 0, out_len = 0;
79 
80 	if (sym_op->m_dst)
81 		mbuf = sym_op->m_dst;
82 	else
83 		mbuf = sym_op->m_src;
84 
85 	/* first FLE entry used to store mbuf and session ctxt */
86 	fle = (struct qbman_fle *)rte_malloc(NULL,
87 			FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
88 			RTE_CACHE_LINE_SIZE);
89 	if (unlikely(!fle)) {
90 		DPAA2_SEC_DP_ERR("Proto:SG: Memory alloc failed for SGE");
91 		return -1;
92 	}
93 	memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
94 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
95 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
96 
97 	/* Save the shared descriptor */
98 	flc = &priv->flc_desc[0].flc;
99 
100 	op_fle = fle + 1;
101 	ip_fle = fle + 2;
102 	sge = fle + 3;
103 
104 	if (likely(bpid < MAX_BPID)) {
105 		DPAA2_SET_FD_BPID(fd, bpid);
106 		DPAA2_SET_FLE_BPID(op_fle, bpid);
107 		DPAA2_SET_FLE_BPID(ip_fle, bpid);
108 	} else {
109 		DPAA2_SET_FD_IVP(fd);
110 		DPAA2_SET_FLE_IVP(op_fle);
111 		DPAA2_SET_FLE_IVP(ip_fle);
112 	}
113 
114 	/* Configure FD as a FRAME LIST */
115 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
116 	DPAA2_SET_FD_COMPOUND_FMT(fd);
117 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
118 
119 	/* Configure Output FLE with Scatter/Gather Entry */
120 	DPAA2_SET_FLE_SG_EXT(op_fle);
121 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
122 
123 	/* Configure Output SGE for Encap/Decap */
124 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
125 	DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
126 	/* o/p segs */
127 	while (mbuf->next) {
128 		sge->length = mbuf->data_len;
129 		out_len += sge->length;
130 		sge++;
131 		mbuf = mbuf->next;
132 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
133 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
134 	}
135 	/* using buf_len for last buf - so that extra data can be added */
136 	sge->length = mbuf->buf_len - mbuf->data_off;
137 	out_len += sge->length;
138 
139 	DPAA2_SET_FLE_FIN(sge);
140 	op_fle->length = out_len;
141 
142 	sge++;
143 	mbuf = sym_op->m_src;
144 
145 	/* Configure Input FLE with Scatter/Gather Entry */
146 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
147 	DPAA2_SET_FLE_SG_EXT(ip_fle);
148 	DPAA2_SET_FLE_FIN(ip_fle);
149 
150 	/* Configure input SGE for Encap/Decap */
151 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
152 	DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
153 	sge->length = mbuf->data_len;
154 	in_len += sge->length;
155 
156 	mbuf = mbuf->next;
157 	/* i/p segs */
158 	while (mbuf) {
159 		sge++;
160 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
161 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
162 		sge->length = mbuf->data_len;
163 		in_len += sge->length;
164 		mbuf = mbuf->next;
165 	}
166 	ip_fle->length = in_len;
167 	DPAA2_SET_FLE_FIN(sge);
168 
169 	/* In case of PDCP, per packet HFN is stored in
170 	 * mbuf priv after sym_op.
171 	 */
172 	if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
173 		uint32_t hfn_ovd = *((uint8_t *)op + sess->pdcp.hfn_ovd_offset);
174 		/*enable HFN override override */
175 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
176 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
177 		DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
178 	}
179 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
180 
181 	return 0;
182 }
183 
184 static inline int
185 build_proto_compound_fd(dpaa2_sec_session *sess,
186 	       struct rte_crypto_op *op,
187 	       struct qbman_fd *fd, uint16_t bpid)
188 {
189 	struct rte_crypto_sym_op *sym_op = op->sym;
190 	struct ctxt_priv *priv = sess->ctxt;
191 	struct qbman_fle *fle, *ip_fle, *op_fle;
192 	struct sec_flow_context *flc;
193 	struct rte_mbuf *src_mbuf = sym_op->m_src;
194 	struct rte_mbuf *dst_mbuf = sym_op->m_dst;
195 	int retval;
196 
197 	if (!dst_mbuf)
198 		dst_mbuf = src_mbuf;
199 
200 	/* Save the shared descriptor */
201 	flc = &priv->flc_desc[0].flc;
202 
203 	/* we are using the first FLE entry to store Mbuf */
204 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
205 	if (retval) {
206 		DPAA2_SEC_DP_ERR("Memory alloc failed");
207 		return -1;
208 	}
209 	memset(fle, 0, FLE_POOL_BUF_SIZE);
210 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
211 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
212 
213 	op_fle = fle + 1;
214 	ip_fle = fle + 2;
215 
216 	if (likely(bpid < MAX_BPID)) {
217 		DPAA2_SET_FD_BPID(fd, bpid);
218 		DPAA2_SET_FLE_BPID(op_fle, bpid);
219 		DPAA2_SET_FLE_BPID(ip_fle, bpid);
220 	} else {
221 		DPAA2_SET_FD_IVP(fd);
222 		DPAA2_SET_FLE_IVP(op_fle);
223 		DPAA2_SET_FLE_IVP(ip_fle);
224 	}
225 
226 	/* Configure FD as a FRAME LIST */
227 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
228 	DPAA2_SET_FD_COMPOUND_FMT(fd);
229 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
230 
231 	/* Configure Output FLE with dst mbuf data  */
232 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_MBUF_VADDR_TO_IOVA(dst_mbuf));
233 	DPAA2_SET_FLE_OFFSET(op_fle, dst_mbuf->data_off);
234 	DPAA2_SET_FLE_LEN(op_fle, dst_mbuf->buf_len);
235 
236 	/* Configure Input FLE with src mbuf data */
237 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_MBUF_VADDR_TO_IOVA(src_mbuf));
238 	DPAA2_SET_FLE_OFFSET(ip_fle, src_mbuf->data_off);
239 	DPAA2_SET_FLE_LEN(ip_fle, src_mbuf->pkt_len);
240 
241 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
242 	DPAA2_SET_FLE_FIN(ip_fle);
243 
244 	/* In case of PDCP, per packet HFN is stored in
245 	 * mbuf priv after sym_op.
246 	 */
247 	if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
248 		uint32_t hfn_ovd = *((uint8_t *)op + sess->pdcp.hfn_ovd_offset);
249 		/*enable HFN override override */
250 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
251 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
252 		DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
253 	}
254 
255 	return 0;
256 
257 }
258 
259 static inline int
260 build_proto_fd(dpaa2_sec_session *sess,
261 	       struct rte_crypto_op *op,
262 	       struct qbman_fd *fd, uint16_t bpid)
263 {
264 	struct rte_crypto_sym_op *sym_op = op->sym;
265 	if (sym_op->m_dst)
266 		return build_proto_compound_fd(sess, op, fd, bpid);
267 
268 	struct ctxt_priv *priv = sess->ctxt;
269 	struct sec_flow_context *flc;
270 	struct rte_mbuf *mbuf = sym_op->m_src;
271 
272 	if (likely(bpid < MAX_BPID))
273 		DPAA2_SET_FD_BPID(fd, bpid);
274 	else
275 		DPAA2_SET_FD_IVP(fd);
276 
277 	/* Save the shared descriptor */
278 	flc = &priv->flc_desc[0].flc;
279 
280 	DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
281 	DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off);
282 	DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len);
283 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
284 
285 	/* save physical address of mbuf */
286 	op->sym->aead.digest.phys_addr = mbuf->buf_iova;
287 	mbuf->buf_iova = (size_t)op;
288 
289 	return 0;
290 }
291 
292 static inline int
293 build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,
294 		 struct rte_crypto_op *op,
295 		 struct qbman_fd *fd, __rte_unused uint16_t bpid)
296 {
297 	struct rte_crypto_sym_op *sym_op = op->sym;
298 	struct ctxt_priv *priv = sess->ctxt;
299 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
300 	struct sec_flow_context *flc;
301 	uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
302 	int icv_len = sess->digest_length;
303 	uint8_t *old_icv;
304 	struct rte_mbuf *mbuf;
305 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
306 			sess->iv.offset);
307 
308 	if (sym_op->m_dst)
309 		mbuf = sym_op->m_dst;
310 	else
311 		mbuf = sym_op->m_src;
312 
313 	/* first FLE entry used to store mbuf and session ctxt */
314 	fle = (struct qbman_fle *)rte_malloc(NULL,
315 			FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
316 			RTE_CACHE_LINE_SIZE);
317 	if (unlikely(!fle)) {
318 		DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
319 		return -1;
320 	}
321 	memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
322 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
323 	DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv);
324 
325 	op_fle = fle + 1;
326 	ip_fle = fle + 2;
327 	sge = fle + 3;
328 
329 	/* Save the shared descriptor */
330 	flc = &priv->flc_desc[0].flc;
331 
332 	/* Configure FD as a FRAME LIST */
333 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
334 	DPAA2_SET_FD_COMPOUND_FMT(fd);
335 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
336 
337 	DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n"
338 		   "iv-len=%d data_off: 0x%x\n",
339 		   sym_op->aead.data.offset,
340 		   sym_op->aead.data.length,
341 		   sess->digest_length,
342 		   sess->iv.length,
343 		   sym_op->m_src->data_off);
344 
345 	/* Configure Output FLE with Scatter/Gather Entry */
346 	DPAA2_SET_FLE_SG_EXT(op_fle);
347 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
348 
349 	if (auth_only_len)
350 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
351 
352 	op_fle->length = (sess->dir == DIR_ENC) ?
353 			(sym_op->aead.data.length + icv_len + auth_only_len) :
354 			sym_op->aead.data.length + auth_only_len;
355 
356 	/* Configure Output SGE for Encap/Decap */
357 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
358 	DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off +
359 			RTE_ALIGN_CEIL(auth_only_len, 16) - auth_only_len);
360 	sge->length = mbuf->data_len - sym_op->aead.data.offset + auth_only_len;
361 
362 	mbuf = mbuf->next;
363 	/* o/p segs */
364 	while (mbuf) {
365 		sge++;
366 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
367 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
368 		sge->length = mbuf->data_len;
369 		mbuf = mbuf->next;
370 	}
371 	sge->length -= icv_len;
372 
373 	if (sess->dir == DIR_ENC) {
374 		sge++;
375 		DPAA2_SET_FLE_ADDR(sge,
376 				DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
377 		sge->length = icv_len;
378 	}
379 	DPAA2_SET_FLE_FIN(sge);
380 
381 	sge++;
382 	mbuf = sym_op->m_src;
383 
384 	/* Configure Input FLE with Scatter/Gather Entry */
385 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
386 	DPAA2_SET_FLE_SG_EXT(ip_fle);
387 	DPAA2_SET_FLE_FIN(ip_fle);
388 	ip_fle->length = (sess->dir == DIR_ENC) ?
389 		(sym_op->aead.data.length + sess->iv.length + auth_only_len) :
390 		(sym_op->aead.data.length + sess->iv.length + auth_only_len +
391 		 icv_len);
392 
393 	/* Configure Input SGE for Encap/Decap */
394 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
395 	sge->length = sess->iv.length;
396 
397 	sge++;
398 	if (auth_only_len) {
399 		DPAA2_SET_FLE_ADDR(sge,
400 				DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
401 		sge->length = auth_only_len;
402 		sge++;
403 	}
404 
405 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
406 	DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
407 				mbuf->data_off);
408 	sge->length = mbuf->data_len - sym_op->aead.data.offset;
409 
410 	mbuf = mbuf->next;
411 	/* i/p segs */
412 	while (mbuf) {
413 		sge++;
414 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
415 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
416 		sge->length = mbuf->data_len;
417 		mbuf = mbuf->next;
418 	}
419 
420 	if (sess->dir == DIR_DEC) {
421 		sge++;
422 		old_icv = (uint8_t *)(sge + 1);
423 		memcpy(old_icv,	sym_op->aead.digest.data, icv_len);
424 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
425 		sge->length = icv_len;
426 	}
427 
428 	DPAA2_SET_FLE_FIN(sge);
429 	if (auth_only_len) {
430 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
431 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
432 	}
433 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
434 
435 	return 0;
436 }
437 
438 static inline int
439 build_authenc_gcm_fd(dpaa2_sec_session *sess,
440 		     struct rte_crypto_op *op,
441 		     struct qbman_fd *fd, uint16_t bpid)
442 {
443 	struct rte_crypto_sym_op *sym_op = op->sym;
444 	struct ctxt_priv *priv = sess->ctxt;
445 	struct qbman_fle *fle, *sge;
446 	struct sec_flow_context *flc;
447 	uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
448 	int icv_len = sess->digest_length, retval;
449 	uint8_t *old_icv;
450 	struct rte_mbuf *dst;
451 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
452 			sess->iv.offset);
453 
454 	if (sym_op->m_dst)
455 		dst = sym_op->m_dst;
456 	else
457 		dst = sym_op->m_src;
458 
459 	/* TODO we are using the first FLE entry to store Mbuf and session ctxt.
460 	 * Currently we donot know which FLE has the mbuf stored.
461 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
462 	 * to get the MBUF Addr from the previous FLE.
463 	 * We can have a better approach to use the inline Mbuf
464 	 */
465 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
466 	if (retval) {
467 		DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE");
468 		return -1;
469 	}
470 	memset(fle, 0, FLE_POOL_BUF_SIZE);
471 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
472 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
473 	fle = fle + 1;
474 	sge = fle + 2;
475 	if (likely(bpid < MAX_BPID)) {
476 		DPAA2_SET_FD_BPID(fd, bpid);
477 		DPAA2_SET_FLE_BPID(fle, bpid);
478 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
479 		DPAA2_SET_FLE_BPID(sge, bpid);
480 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
481 		DPAA2_SET_FLE_BPID(sge + 2, bpid);
482 		DPAA2_SET_FLE_BPID(sge + 3, bpid);
483 	} else {
484 		DPAA2_SET_FD_IVP(fd);
485 		DPAA2_SET_FLE_IVP(fle);
486 		DPAA2_SET_FLE_IVP((fle + 1));
487 		DPAA2_SET_FLE_IVP(sge);
488 		DPAA2_SET_FLE_IVP((sge + 1));
489 		DPAA2_SET_FLE_IVP((sge + 2));
490 		DPAA2_SET_FLE_IVP((sge + 3));
491 	}
492 
493 	/* Save the shared descriptor */
494 	flc = &priv->flc_desc[0].flc;
495 	/* Configure FD as a FRAME LIST */
496 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
497 	DPAA2_SET_FD_COMPOUND_FMT(fd);
498 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
499 
500 	DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n"
501 		   "iv-len=%d data_off: 0x%x\n",
502 		   sym_op->aead.data.offset,
503 		   sym_op->aead.data.length,
504 		   sess->digest_length,
505 		   sess->iv.length,
506 		   sym_op->m_src->data_off);
507 
508 	/* Configure Output FLE with Scatter/Gather Entry */
509 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
510 	if (auth_only_len)
511 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
512 	fle->length = (sess->dir == DIR_ENC) ?
513 			(sym_op->aead.data.length + icv_len + auth_only_len) :
514 			sym_op->aead.data.length + auth_only_len;
515 
516 	DPAA2_SET_FLE_SG_EXT(fle);
517 
518 	/* Configure Output SGE for Encap/Decap */
519 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
520 	DPAA2_SET_FLE_OFFSET(sge, dst->data_off +
521 			RTE_ALIGN_CEIL(auth_only_len, 16) - auth_only_len);
522 	sge->length = sym_op->aead.data.length + auth_only_len;
523 
524 	if (sess->dir == DIR_ENC) {
525 		sge++;
526 		DPAA2_SET_FLE_ADDR(sge,
527 				DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
528 		sge->length = sess->digest_length;
529 		DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
530 					sess->iv.length + auth_only_len));
531 	}
532 	DPAA2_SET_FLE_FIN(sge);
533 
534 	sge++;
535 	fle++;
536 
537 	/* Configure Input FLE with Scatter/Gather Entry */
538 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
539 	DPAA2_SET_FLE_SG_EXT(fle);
540 	DPAA2_SET_FLE_FIN(fle);
541 	fle->length = (sess->dir == DIR_ENC) ?
542 		(sym_op->aead.data.length + sess->iv.length + auth_only_len) :
543 		(sym_op->aead.data.length + sess->iv.length + auth_only_len +
544 		 sess->digest_length);
545 
546 	/* Configure Input SGE for Encap/Decap */
547 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
548 	sge->length = sess->iv.length;
549 	sge++;
550 	if (auth_only_len) {
551 		DPAA2_SET_FLE_ADDR(sge,
552 				DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
553 		sge->length = auth_only_len;
554 		DPAA2_SET_FLE_BPID(sge, bpid);
555 		sge++;
556 	}
557 
558 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
559 	DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
560 				sym_op->m_src->data_off);
561 	sge->length = sym_op->aead.data.length;
562 	if (sess->dir == DIR_DEC) {
563 		sge++;
564 		old_icv = (uint8_t *)(sge + 1);
565 		memcpy(old_icv,	sym_op->aead.digest.data,
566 		       sess->digest_length);
567 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
568 		sge->length = sess->digest_length;
569 		DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
570 				 sess->digest_length +
571 				 sess->iv.length +
572 				 auth_only_len));
573 	}
574 	DPAA2_SET_FLE_FIN(sge);
575 
576 	if (auth_only_len) {
577 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
578 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
579 	}
580 
581 	return 0;
582 }
583 
584 static inline int
585 build_authenc_sg_fd(dpaa2_sec_session *sess,
586 		 struct rte_crypto_op *op,
587 		 struct qbman_fd *fd, __rte_unused uint16_t bpid)
588 {
589 	struct rte_crypto_sym_op *sym_op = op->sym;
590 	struct ctxt_priv *priv = sess->ctxt;
591 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
592 	struct sec_flow_context *flc;
593 	uint32_t auth_only_len = sym_op->auth.data.length -
594 				sym_op->cipher.data.length;
595 	int icv_len = sess->digest_length;
596 	uint8_t *old_icv;
597 	struct rte_mbuf *mbuf;
598 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
599 			sess->iv.offset);
600 
601 	if (sym_op->m_dst)
602 		mbuf = sym_op->m_dst;
603 	else
604 		mbuf = sym_op->m_src;
605 
606 	/* first FLE entry used to store mbuf and session ctxt */
607 	fle = (struct qbman_fle *)rte_malloc(NULL,
608 			FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
609 			RTE_CACHE_LINE_SIZE);
610 	if (unlikely(!fle)) {
611 		DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
612 		return -1;
613 	}
614 	memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
615 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
616 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
617 
618 	op_fle = fle + 1;
619 	ip_fle = fle + 2;
620 	sge = fle + 3;
621 
622 	/* Save the shared descriptor */
623 	flc = &priv->flc_desc[0].flc;
624 
625 	/* Configure FD as a FRAME LIST */
626 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
627 	DPAA2_SET_FD_COMPOUND_FMT(fd);
628 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
629 
630 	DPAA2_SEC_DP_DEBUG(
631 		"AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n"
632 		"cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
633 		sym_op->auth.data.offset,
634 		sym_op->auth.data.length,
635 		sess->digest_length,
636 		sym_op->cipher.data.offset,
637 		sym_op->cipher.data.length,
638 		sess->iv.length,
639 		sym_op->m_src->data_off);
640 
641 	/* Configure Output FLE with Scatter/Gather Entry */
642 	DPAA2_SET_FLE_SG_EXT(op_fle);
643 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
644 
645 	if (auth_only_len)
646 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
647 
648 	op_fle->length = (sess->dir == DIR_ENC) ?
649 			(sym_op->cipher.data.length + icv_len) :
650 			sym_op->cipher.data.length;
651 
652 	/* Configure Output SGE for Encap/Decap */
653 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
654 	DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->auth.data.offset);
655 	sge->length = mbuf->data_len - sym_op->auth.data.offset;
656 
657 	mbuf = mbuf->next;
658 	/* o/p segs */
659 	while (mbuf) {
660 		sge++;
661 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
662 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
663 		sge->length = mbuf->data_len;
664 		mbuf = mbuf->next;
665 	}
666 	sge->length -= icv_len;
667 
668 	if (sess->dir == DIR_ENC) {
669 		sge++;
670 		DPAA2_SET_FLE_ADDR(sge,
671 				DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
672 		sge->length = icv_len;
673 	}
674 	DPAA2_SET_FLE_FIN(sge);
675 
676 	sge++;
677 	mbuf = sym_op->m_src;
678 
679 	/* Configure Input FLE with Scatter/Gather Entry */
680 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
681 	DPAA2_SET_FLE_SG_EXT(ip_fle);
682 	DPAA2_SET_FLE_FIN(ip_fle);
683 	ip_fle->length = (sess->dir == DIR_ENC) ?
684 			(sym_op->auth.data.length + sess->iv.length) :
685 			(sym_op->auth.data.length + sess->iv.length +
686 			 icv_len);
687 
688 	/* Configure Input SGE for Encap/Decap */
689 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
690 	sge->length = sess->iv.length;
691 
692 	sge++;
693 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
694 	DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
695 				mbuf->data_off);
696 	sge->length = mbuf->data_len - sym_op->auth.data.offset;
697 
698 	mbuf = mbuf->next;
699 	/* i/p segs */
700 	while (mbuf) {
701 		sge++;
702 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
703 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
704 		sge->length = mbuf->data_len;
705 		mbuf = mbuf->next;
706 	}
707 	sge->length -= icv_len;
708 
709 	if (sess->dir == DIR_DEC) {
710 		sge++;
711 		old_icv = (uint8_t *)(sge + 1);
712 		memcpy(old_icv,	sym_op->auth.digest.data,
713 		       icv_len);
714 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
715 		sge->length = icv_len;
716 	}
717 
718 	DPAA2_SET_FLE_FIN(sge);
719 	if (auth_only_len) {
720 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
721 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
722 	}
723 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
724 
725 	return 0;
726 }
727 
728 static inline int
729 build_authenc_fd(dpaa2_sec_session *sess,
730 		 struct rte_crypto_op *op,
731 		 struct qbman_fd *fd, uint16_t bpid)
732 {
733 	struct rte_crypto_sym_op *sym_op = op->sym;
734 	struct ctxt_priv *priv = sess->ctxt;
735 	struct qbman_fle *fle, *sge;
736 	struct sec_flow_context *flc;
737 	uint32_t auth_only_len = sym_op->auth.data.length -
738 				sym_op->cipher.data.length;
739 	int icv_len = sess->digest_length, retval;
740 	uint8_t *old_icv;
741 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
742 			sess->iv.offset);
743 	struct rte_mbuf *dst;
744 
745 	if (sym_op->m_dst)
746 		dst = sym_op->m_dst;
747 	else
748 		dst = sym_op->m_src;
749 
750 	/* we are using the first FLE entry to store Mbuf.
751 	 * Currently we donot know which FLE has the mbuf stored.
752 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
753 	 * to get the MBUF Addr from the previous FLE.
754 	 * We can have a better approach to use the inline Mbuf
755 	 */
756 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
757 	if (retval) {
758 		DPAA2_SEC_ERR("Memory alloc failed for SGE");
759 		return -1;
760 	}
761 	memset(fle, 0, FLE_POOL_BUF_SIZE);
762 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
763 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
764 	fle = fle + 1;
765 	sge = fle + 2;
766 	if (likely(bpid < MAX_BPID)) {
767 		DPAA2_SET_FD_BPID(fd, bpid);
768 		DPAA2_SET_FLE_BPID(fle, bpid);
769 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
770 		DPAA2_SET_FLE_BPID(sge, bpid);
771 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
772 		DPAA2_SET_FLE_BPID(sge + 2, bpid);
773 		DPAA2_SET_FLE_BPID(sge + 3, bpid);
774 	} else {
775 		DPAA2_SET_FD_IVP(fd);
776 		DPAA2_SET_FLE_IVP(fle);
777 		DPAA2_SET_FLE_IVP((fle + 1));
778 		DPAA2_SET_FLE_IVP(sge);
779 		DPAA2_SET_FLE_IVP((sge + 1));
780 		DPAA2_SET_FLE_IVP((sge + 2));
781 		DPAA2_SET_FLE_IVP((sge + 3));
782 	}
783 
784 	/* Save the shared descriptor */
785 	flc = &priv->flc_desc[0].flc;
786 	/* Configure FD as a FRAME LIST */
787 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
788 	DPAA2_SET_FD_COMPOUND_FMT(fd);
789 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
790 
791 	DPAA2_SEC_DP_DEBUG(
792 		"AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n"
793 		"cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
794 		sym_op->auth.data.offset,
795 		sym_op->auth.data.length,
796 		sess->digest_length,
797 		sym_op->cipher.data.offset,
798 		sym_op->cipher.data.length,
799 		sess->iv.length,
800 		sym_op->m_src->data_off);
801 
802 	/* Configure Output FLE with Scatter/Gather Entry */
803 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
804 	if (auth_only_len)
805 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
806 	fle->length = (sess->dir == DIR_ENC) ?
807 			(sym_op->cipher.data.length + icv_len) :
808 			sym_op->cipher.data.length;
809 
810 	DPAA2_SET_FLE_SG_EXT(fle);
811 
812 	/* Configure Output SGE for Encap/Decap */
813 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
814 	DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
815 				dst->data_off);
816 	sge->length = sym_op->cipher.data.length;
817 
818 	if (sess->dir == DIR_ENC) {
819 		sge++;
820 		DPAA2_SET_FLE_ADDR(sge,
821 				DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
822 		sge->length = sess->digest_length;
823 		DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
824 					sess->iv.length));
825 	}
826 	DPAA2_SET_FLE_FIN(sge);
827 
828 	sge++;
829 	fle++;
830 
831 	/* Configure Input FLE with Scatter/Gather Entry */
832 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
833 	DPAA2_SET_FLE_SG_EXT(fle);
834 	DPAA2_SET_FLE_FIN(fle);
835 	fle->length = (sess->dir == DIR_ENC) ?
836 			(sym_op->auth.data.length + sess->iv.length) :
837 			(sym_op->auth.data.length + sess->iv.length +
838 			 sess->digest_length);
839 
840 	/* Configure Input SGE for Encap/Decap */
841 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
842 	sge->length = sess->iv.length;
843 	sge++;
844 
845 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
846 	DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
847 				sym_op->m_src->data_off);
848 	sge->length = sym_op->auth.data.length;
849 	if (sess->dir == DIR_DEC) {
850 		sge++;
851 		old_icv = (uint8_t *)(sge + 1);
852 		memcpy(old_icv,	sym_op->auth.digest.data,
853 		       sess->digest_length);
854 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
855 		sge->length = sess->digest_length;
856 		DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
857 				 sess->digest_length +
858 				 sess->iv.length));
859 	}
860 	DPAA2_SET_FLE_FIN(sge);
861 	if (auth_only_len) {
862 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
863 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
864 	}
865 	return 0;
866 }
867 
868 static inline int build_auth_sg_fd(
869 		dpaa2_sec_session *sess,
870 		struct rte_crypto_op *op,
871 		struct qbman_fd *fd,
872 		__rte_unused uint16_t bpid)
873 {
874 	struct rte_crypto_sym_op *sym_op = op->sym;
875 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
876 	struct sec_flow_context *flc;
877 	struct ctxt_priv *priv = sess->ctxt;
878 	int data_len, data_offset;
879 	uint8_t *old_digest;
880 	struct rte_mbuf *mbuf;
881 
882 	data_len = sym_op->auth.data.length;
883 	data_offset = sym_op->auth.data.offset;
884 
885 	if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
886 	    sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
887 		if ((data_len & 7) || (data_offset & 7)) {
888 			DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
889 			return -1;
890 		}
891 
892 		data_len = data_len >> 3;
893 		data_offset = data_offset >> 3;
894 	}
895 
896 	mbuf = sym_op->m_src;
897 	fle = (struct qbman_fle *)rte_malloc(NULL,
898 			FLE_SG_MEM_SIZE(mbuf->nb_segs),
899 			RTE_CACHE_LINE_SIZE);
900 	if (unlikely(!fle)) {
901 		DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
902 		return -1;
903 	}
904 	memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs));
905 	/* first FLE entry used to store mbuf and session ctxt */
906 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
907 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
908 	op_fle = fle + 1;
909 	ip_fle = fle + 2;
910 	sge = fle + 3;
911 
912 	flc = &priv->flc_desc[DESC_INITFINAL].flc;
913 	/* sg FD */
914 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
915 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
916 	DPAA2_SET_FD_COMPOUND_FMT(fd);
917 
918 	/* o/p fle */
919 	DPAA2_SET_FLE_ADDR(op_fle,
920 				DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
921 	op_fle->length = sess->digest_length;
922 
923 	/* i/p fle */
924 	DPAA2_SET_FLE_SG_EXT(ip_fle);
925 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
926 	ip_fle->length = data_len;
927 
928 	if (sess->iv.length) {
929 		uint8_t *iv_ptr;
930 
931 		iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
932 						   sess->iv.offset);
933 
934 		if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
935 			iv_ptr = conv_to_snow_f9_iv(iv_ptr);
936 			sge->length = 12;
937 		} else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
938 			iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
939 			sge->length = 8;
940 		} else {
941 			sge->length = sess->iv.length;
942 		}
943 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
944 		ip_fle->length += sge->length;
945 		sge++;
946 	}
947 	/* i/p 1st seg */
948 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
949 	DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
950 
951 	if (data_len <= (mbuf->data_len - data_offset)) {
952 		sge->length = data_len;
953 		data_len = 0;
954 	} else {
955 		sge->length = mbuf->data_len - data_offset;
956 
957 		/* remaining i/p segs */
958 		while ((data_len = data_len - sge->length) &&
959 		       (mbuf = mbuf->next)) {
960 			sge++;
961 			DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
962 			DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
963 			if (data_len > mbuf->data_len)
964 				sge->length = mbuf->data_len;
965 			else
966 				sge->length = data_len;
967 		}
968 	}
969 
970 	if (sess->dir == DIR_DEC) {
971 		/* Digest verification case */
972 		sge++;
973 		old_digest = (uint8_t *)(sge + 1);
974 		rte_memcpy(old_digest, sym_op->auth.digest.data,
975 			   sess->digest_length);
976 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
977 		sge->length = sess->digest_length;
978 		ip_fle->length += sess->digest_length;
979 	}
980 	DPAA2_SET_FLE_FIN(sge);
981 	DPAA2_SET_FLE_FIN(ip_fle);
982 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
983 
984 	return 0;
985 }
986 
987 static inline int
988 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
989 	      struct qbman_fd *fd, uint16_t bpid)
990 {
991 	struct rte_crypto_sym_op *sym_op = op->sym;
992 	struct qbman_fle *fle, *sge;
993 	struct sec_flow_context *flc;
994 	struct ctxt_priv *priv = sess->ctxt;
995 	int data_len, data_offset;
996 	uint8_t *old_digest;
997 	int retval;
998 
999 	data_len = sym_op->auth.data.length;
1000 	data_offset = sym_op->auth.data.offset;
1001 
1002 	if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
1003 	    sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1004 		if ((data_len & 7) || (data_offset & 7)) {
1005 			DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
1006 			return -1;
1007 		}
1008 
1009 		data_len = data_len >> 3;
1010 		data_offset = data_offset >> 3;
1011 	}
1012 
1013 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
1014 	if (retval) {
1015 		DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE");
1016 		return -1;
1017 	}
1018 	memset(fle, 0, FLE_POOL_BUF_SIZE);
1019 	/* TODO we are using the first FLE entry to store Mbuf.
1020 	 * Currently we donot know which FLE has the mbuf stored.
1021 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
1022 	 * to get the MBUF Addr from the previous FLE.
1023 	 * We can have a better approach to use the inline Mbuf
1024 	 */
1025 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1026 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1027 	fle = fle + 1;
1028 	sge = fle + 2;
1029 
1030 	if (likely(bpid < MAX_BPID)) {
1031 		DPAA2_SET_FD_BPID(fd, bpid);
1032 		DPAA2_SET_FLE_BPID(fle, bpid);
1033 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
1034 		DPAA2_SET_FLE_BPID(sge, bpid);
1035 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
1036 	} else {
1037 		DPAA2_SET_FD_IVP(fd);
1038 		DPAA2_SET_FLE_IVP(fle);
1039 		DPAA2_SET_FLE_IVP((fle + 1));
1040 		DPAA2_SET_FLE_IVP(sge);
1041 		DPAA2_SET_FLE_IVP((sge + 1));
1042 	}
1043 
1044 	flc = &priv->flc_desc[DESC_INITFINAL].flc;
1045 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1046 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1047 	DPAA2_SET_FD_COMPOUND_FMT(fd);
1048 
1049 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
1050 	fle->length = sess->digest_length;
1051 	fle++;
1052 
1053 	/* Setting input FLE */
1054 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1055 	DPAA2_SET_FLE_SG_EXT(fle);
1056 	fle->length = data_len;
1057 
1058 	if (sess->iv.length) {
1059 		uint8_t *iv_ptr;
1060 
1061 		iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1062 						   sess->iv.offset);
1063 
1064 		if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
1065 			iv_ptr = conv_to_snow_f9_iv(iv_ptr);
1066 			sge->length = 12;
1067 		} else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1068 			iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
1069 			sge->length = 8;
1070 		} else {
1071 			sge->length = sess->iv.length;
1072 		}
1073 
1074 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1075 		fle->length = fle->length + sge->length;
1076 		sge++;
1077 	}
1078 
1079 	/* Setting data to authenticate */
1080 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
1081 	DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off);
1082 	sge->length = data_len;
1083 
1084 	if (sess->dir == DIR_DEC) {
1085 		sge++;
1086 		old_digest = (uint8_t *)(sge + 1);
1087 		rte_memcpy(old_digest, sym_op->auth.digest.data,
1088 			   sess->digest_length);
1089 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
1090 		sge->length = sess->digest_length;
1091 		fle->length = fle->length + sess->digest_length;
1092 	}
1093 
1094 	DPAA2_SET_FLE_FIN(sge);
1095 	DPAA2_SET_FLE_FIN(fle);
1096 	DPAA2_SET_FD_LEN(fd, fle->length);
1097 
1098 	return 0;
1099 }
1100 
1101 static int
1102 build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
1103 		struct qbman_fd *fd, __rte_unused uint16_t bpid)
1104 {
1105 	struct rte_crypto_sym_op *sym_op = op->sym;
1106 	struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
1107 	int data_len, data_offset;
1108 	struct sec_flow_context *flc;
1109 	struct ctxt_priv *priv = sess->ctxt;
1110 	struct rte_mbuf *mbuf;
1111 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1112 			sess->iv.offset);
1113 
1114 	data_len = sym_op->cipher.data.length;
1115 	data_offset = sym_op->cipher.data.offset;
1116 
1117 	if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1118 		sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1119 		if ((data_len & 7) || (data_offset & 7)) {
1120 			DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
1121 			return -1;
1122 		}
1123 
1124 		data_len = data_len >> 3;
1125 		data_offset = data_offset >> 3;
1126 	}
1127 
1128 	if (sym_op->m_dst)
1129 		mbuf = sym_op->m_dst;
1130 	else
1131 		mbuf = sym_op->m_src;
1132 
1133 	/* first FLE entry used to store mbuf and session ctxt */
1134 	fle = (struct qbman_fle *)rte_malloc(NULL,
1135 			FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
1136 			RTE_CACHE_LINE_SIZE);
1137 	if (!fle) {
1138 		DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE");
1139 		return -1;
1140 	}
1141 	memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
1142 	/* first FLE entry used to store mbuf and session ctxt */
1143 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1144 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1145 
1146 	op_fle = fle + 1;
1147 	ip_fle = fle + 2;
1148 	sge = fle + 3;
1149 
1150 	flc = &priv->flc_desc[0].flc;
1151 
1152 	DPAA2_SEC_DP_DEBUG(
1153 		"CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d"
1154 		" data_off: 0x%x\n",
1155 		data_offset,
1156 		data_len,
1157 		sess->iv.length,
1158 		sym_op->m_src->data_off);
1159 
1160 	/* o/p fle */
1161 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
1162 	op_fle->length = data_len;
1163 	DPAA2_SET_FLE_SG_EXT(op_fle);
1164 
1165 	/* o/p 1st seg */
1166 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1167 	DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
1168 	sge->length = mbuf->data_len - data_offset;
1169 
1170 	mbuf = mbuf->next;
1171 	/* o/p segs */
1172 	while (mbuf) {
1173 		sge++;
1174 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1175 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
1176 		sge->length = mbuf->data_len;
1177 		mbuf = mbuf->next;
1178 	}
1179 	DPAA2_SET_FLE_FIN(sge);
1180 
1181 	DPAA2_SEC_DP_DEBUG(
1182 		"CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
1183 		flc, fle, fle->addr_hi, fle->addr_lo,
1184 		fle->length);
1185 
1186 	/* i/p fle */
1187 	mbuf = sym_op->m_src;
1188 	sge++;
1189 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
1190 	ip_fle->length = sess->iv.length + data_len;
1191 	DPAA2_SET_FLE_SG_EXT(ip_fle);
1192 
1193 	/* i/p IV */
1194 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1195 	DPAA2_SET_FLE_OFFSET(sge, 0);
1196 	sge->length = sess->iv.length;
1197 
1198 	sge++;
1199 
1200 	/* i/p 1st seg */
1201 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1202 	DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
1203 	sge->length = mbuf->data_len - data_offset;
1204 
1205 	mbuf = mbuf->next;
1206 	/* i/p segs */
1207 	while (mbuf) {
1208 		sge++;
1209 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1210 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
1211 		sge->length = mbuf->data_len;
1212 		mbuf = mbuf->next;
1213 	}
1214 	DPAA2_SET_FLE_FIN(sge);
1215 	DPAA2_SET_FLE_FIN(ip_fle);
1216 
1217 	/* sg fd */
1218 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
1219 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
1220 	DPAA2_SET_FD_COMPOUND_FMT(fd);
1221 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1222 
1223 	DPAA2_SEC_DP_DEBUG(
1224 		"CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1225 		" off =%d, len =%d\n",
1226 		DPAA2_GET_FD_ADDR(fd),
1227 		DPAA2_GET_FD_BPID(fd),
1228 		rte_dpaa2_bpid_info[bpid].meta_data_size,
1229 		DPAA2_GET_FD_OFFSET(fd),
1230 		DPAA2_GET_FD_LEN(fd));
1231 	return 0;
1232 }
1233 
1234 static int
1235 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
1236 		struct qbman_fd *fd, uint16_t bpid)
1237 {
1238 	struct rte_crypto_sym_op *sym_op = op->sym;
1239 	struct qbman_fle *fle, *sge;
1240 	int retval, data_len, data_offset;
1241 	struct sec_flow_context *flc;
1242 	struct ctxt_priv *priv = sess->ctxt;
1243 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1244 			sess->iv.offset);
1245 	struct rte_mbuf *dst;
1246 
1247 	data_len = sym_op->cipher.data.length;
1248 	data_offset = sym_op->cipher.data.offset;
1249 
1250 	if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1251 		sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1252 		if ((data_len & 7) || (data_offset & 7)) {
1253 			DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
1254 			return -1;
1255 		}
1256 
1257 		data_len = data_len >> 3;
1258 		data_offset = data_offset >> 3;
1259 	}
1260 
1261 	if (sym_op->m_dst)
1262 		dst = sym_op->m_dst;
1263 	else
1264 		dst = sym_op->m_src;
1265 
1266 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
1267 	if (retval) {
1268 		DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE");
1269 		return -1;
1270 	}
1271 	memset(fle, 0, FLE_POOL_BUF_SIZE);
1272 	/* TODO we are using the first FLE entry to store Mbuf.
1273 	 * Currently we donot know which FLE has the mbuf stored.
1274 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
1275 	 * to get the MBUF Addr from the previous FLE.
1276 	 * We can have a better approach to use the inline Mbuf
1277 	 */
1278 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1279 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1280 	fle = fle + 1;
1281 	sge = fle + 2;
1282 
1283 	if (likely(bpid < MAX_BPID)) {
1284 		DPAA2_SET_FD_BPID(fd, bpid);
1285 		DPAA2_SET_FLE_BPID(fle, bpid);
1286 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
1287 		DPAA2_SET_FLE_BPID(sge, bpid);
1288 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
1289 	} else {
1290 		DPAA2_SET_FD_IVP(fd);
1291 		DPAA2_SET_FLE_IVP(fle);
1292 		DPAA2_SET_FLE_IVP((fle + 1));
1293 		DPAA2_SET_FLE_IVP(sge);
1294 		DPAA2_SET_FLE_IVP((sge + 1));
1295 	}
1296 
1297 	flc = &priv->flc_desc[0].flc;
1298 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1299 	DPAA2_SET_FD_LEN(fd, data_len + sess->iv.length);
1300 	DPAA2_SET_FD_COMPOUND_FMT(fd);
1301 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1302 
1303 	DPAA2_SEC_DP_DEBUG(
1304 		"CIPHER: cipher_off: 0x%x/length %d, ivlen=%d,"
1305 		" data_off: 0x%x\n",
1306 		data_offset,
1307 		data_len,
1308 		sess->iv.length,
1309 		sym_op->m_src->data_off);
1310 
1311 	DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst));
1312 	DPAA2_SET_FLE_OFFSET(fle, data_offset + dst->data_off);
1313 
1314 	fle->length = data_len + sess->iv.length;
1315 
1316 	DPAA2_SEC_DP_DEBUG(
1317 		"CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n",
1318 		flc, fle, fle->addr_hi, fle->addr_lo,
1319 		fle->length);
1320 
1321 	fle++;
1322 
1323 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1324 	fle->length = data_len + sess->iv.length;
1325 
1326 	DPAA2_SET_FLE_SG_EXT(fle);
1327 
1328 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1329 	sge->length = sess->iv.length;
1330 
1331 	sge++;
1332 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
1333 	DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off);
1334 
1335 	sge->length = data_len;
1336 	DPAA2_SET_FLE_FIN(sge);
1337 	DPAA2_SET_FLE_FIN(fle);
1338 
1339 	DPAA2_SEC_DP_DEBUG(
1340 		"CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1341 		" off =%d, len =%d\n",
1342 		DPAA2_GET_FD_ADDR(fd),
1343 		DPAA2_GET_FD_BPID(fd),
1344 		rte_dpaa2_bpid_info[bpid].meta_data_size,
1345 		DPAA2_GET_FD_OFFSET(fd),
1346 		DPAA2_GET_FD_LEN(fd));
1347 
1348 	return 0;
1349 }
1350 
1351 static inline int
1352 build_sec_fd(struct rte_crypto_op *op,
1353 	     struct qbman_fd *fd, uint16_t bpid)
1354 {
1355 	int ret = -1;
1356 	dpaa2_sec_session *sess;
1357 
1358 	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
1359 		sess = (dpaa2_sec_session *)get_sym_session_private_data(
1360 				op->sym->session, cryptodev_driver_id);
1361 	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1362 		sess = (dpaa2_sec_session *)get_sec_session_private_data(
1363 				op->sym->sec_session);
1364 	else
1365 		return -1;
1366 
1367 	/* Any of the buffer is segmented*/
1368 	if (!rte_pktmbuf_is_contiguous(op->sym->m_src) ||
1369 		  ((op->sym->m_dst != NULL) &&
1370 		   !rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1371 		switch (sess->ctxt_type) {
1372 		case DPAA2_SEC_CIPHER:
1373 			ret = build_cipher_sg_fd(sess, op, fd, bpid);
1374 			break;
1375 		case DPAA2_SEC_AUTH:
1376 			ret = build_auth_sg_fd(sess, op, fd, bpid);
1377 			break;
1378 		case DPAA2_SEC_AEAD:
1379 			ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid);
1380 			break;
1381 		case DPAA2_SEC_CIPHER_HASH:
1382 			ret = build_authenc_sg_fd(sess, op, fd, bpid);
1383 			break;
1384 		case DPAA2_SEC_IPSEC:
1385 		case DPAA2_SEC_PDCP:
1386 			ret = build_proto_compound_sg_fd(sess, op, fd, bpid);
1387 			break;
1388 		case DPAA2_SEC_HASH_CIPHER:
1389 		default:
1390 			DPAA2_SEC_ERR("error: Unsupported session");
1391 		}
1392 	} else {
1393 		switch (sess->ctxt_type) {
1394 		case DPAA2_SEC_CIPHER:
1395 			ret = build_cipher_fd(sess, op, fd, bpid);
1396 			break;
1397 		case DPAA2_SEC_AUTH:
1398 			ret = build_auth_fd(sess, op, fd, bpid);
1399 			break;
1400 		case DPAA2_SEC_AEAD:
1401 			ret = build_authenc_gcm_fd(sess, op, fd, bpid);
1402 			break;
1403 		case DPAA2_SEC_CIPHER_HASH:
1404 			ret = build_authenc_fd(sess, op, fd, bpid);
1405 			break;
1406 		case DPAA2_SEC_IPSEC:
1407 			ret = build_proto_fd(sess, op, fd, bpid);
1408 			break;
1409 		case DPAA2_SEC_PDCP:
1410 			ret = build_proto_compound_fd(sess, op, fd, bpid);
1411 			break;
1412 		case DPAA2_SEC_HASH_CIPHER:
1413 		default:
1414 			DPAA2_SEC_ERR("error: Unsupported session");
1415 		}
1416 	}
1417 	return ret;
1418 }
1419 
1420 static uint16_t
1421 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1422 			uint16_t nb_ops)
1423 {
1424 	/* Function to transmit the frames to given device and VQ*/
1425 	uint32_t loop;
1426 	int32_t ret;
1427 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1428 	uint32_t frames_to_send;
1429 	struct qbman_eq_desc eqdesc;
1430 	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1431 	struct qbman_swp *swp;
1432 	uint16_t num_tx = 0;
1433 	uint32_t flags[MAX_TX_RING_SLOTS] = {0};
1434 	/*todo - need to support multiple buffer pools */
1435 	uint16_t bpid;
1436 	struct rte_mempool *mb_pool;
1437 
1438 	if (unlikely(nb_ops == 0))
1439 		return 0;
1440 
1441 	if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
1442 		DPAA2_SEC_ERR("sessionless crypto op not supported");
1443 		return 0;
1444 	}
1445 	/*Prepare enqueue descriptor*/
1446 	qbman_eq_desc_clear(&eqdesc);
1447 	qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1448 	qbman_eq_desc_set_response(&eqdesc, 0, 0);
1449 	qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
1450 
1451 	if (!DPAA2_PER_LCORE_DPIO) {
1452 		ret = dpaa2_affine_qbman_swp();
1453 		if (ret) {
1454 			DPAA2_SEC_ERR("Failure in affining portal");
1455 			return 0;
1456 		}
1457 	}
1458 	swp = DPAA2_PER_LCORE_PORTAL;
1459 
1460 	while (nb_ops) {
1461 		frames_to_send = (nb_ops > dpaa2_eqcr_size) ?
1462 			dpaa2_eqcr_size : nb_ops;
1463 
1464 		for (loop = 0; loop < frames_to_send; loop++) {
1465 			if ((*ops)->sym->m_src->seqn) {
1466 			 uint8_t dqrr_index = (*ops)->sym->m_src->seqn - 1;
1467 
1468 			 flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index;
1469 			 DPAA2_PER_LCORE_DQRR_SIZE--;
1470 			 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
1471 			 (*ops)->sym->m_src->seqn = DPAA2_INVALID_MBUF_SEQN;
1472 			}
1473 
1474 			/*Clear the unused FD fields before sending*/
1475 			memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
1476 			mb_pool = (*ops)->sym->m_src->pool;
1477 			bpid = mempool_to_bpid(mb_pool);
1478 			ret = build_sec_fd(*ops, &fd_arr[loop], bpid);
1479 			if (ret) {
1480 				DPAA2_SEC_ERR("error: Improper packet contents"
1481 					      " for crypto operation");
1482 				goto skip_tx;
1483 			}
1484 			ops++;
1485 		}
1486 		loop = 0;
1487 		while (loop < frames_to_send) {
1488 			loop += qbman_swp_enqueue_multiple(swp, &eqdesc,
1489 							&fd_arr[loop],
1490 							&flags[loop],
1491 							frames_to_send - loop);
1492 		}
1493 
1494 		num_tx += frames_to_send;
1495 		nb_ops -= frames_to_send;
1496 	}
1497 skip_tx:
1498 	dpaa2_qp->tx_vq.tx_pkts += num_tx;
1499 	dpaa2_qp->tx_vq.err_pkts += nb_ops;
1500 	return num_tx;
1501 }
1502 
1503 static inline struct rte_crypto_op *
1504 sec_simple_fd_to_mbuf(const struct qbman_fd *fd)
1505 {
1506 	struct rte_crypto_op *op;
1507 	uint16_t len = DPAA2_GET_FD_LEN(fd);
1508 	uint16_t diff = 0;
1509 	dpaa2_sec_session *sess_priv;
1510 
1511 	struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
1512 		DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
1513 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
1514 
1515 	diff = len - mbuf->pkt_len;
1516 	mbuf->pkt_len += diff;
1517 	mbuf->data_len += diff;
1518 	op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova;
1519 	mbuf->buf_iova = op->sym->aead.digest.phys_addr;
1520 	op->sym->aead.digest.phys_addr = 0L;
1521 
1522 	sess_priv = (dpaa2_sec_session *)get_sec_session_private_data(
1523 				op->sym->sec_session);
1524 	if (sess_priv->dir == DIR_ENC)
1525 		mbuf->data_off += SEC_FLC_DHR_OUTBOUND;
1526 	else
1527 		mbuf->data_off += SEC_FLC_DHR_INBOUND;
1528 
1529 	return op;
1530 }
1531 
1532 static inline struct rte_crypto_op *
1533 sec_fd_to_mbuf(const struct qbman_fd *fd)
1534 {
1535 	struct qbman_fle *fle;
1536 	struct rte_crypto_op *op;
1537 	struct ctxt_priv *priv;
1538 	struct rte_mbuf *dst, *src;
1539 
1540 	if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single)
1541 		return sec_simple_fd_to_mbuf(fd);
1542 
1543 	fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
1544 
1545 	DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
1546 			   fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
1547 
1548 	/* we are using the first FLE entry to store Mbuf.
1549 	 * Currently we donot know which FLE has the mbuf stored.
1550 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
1551 	 * to get the MBUF Addr from the previous FLE.
1552 	 * We can have a better approach to use the inline Mbuf
1553 	 */
1554 
1555 	if (unlikely(DPAA2_GET_FD_IVP(fd))) {
1556 		/* TODO complete it. */
1557 		DPAA2_SEC_ERR("error: non inline buffer");
1558 		return NULL;
1559 	}
1560 	op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
1561 
1562 	/* Prefeth op */
1563 	src = op->sym->m_src;
1564 	rte_prefetch0(src);
1565 
1566 	if (op->sym->m_dst) {
1567 		dst = op->sym->m_dst;
1568 		rte_prefetch0(dst);
1569 	} else
1570 		dst = src;
1571 
1572 	if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
1573 		dpaa2_sec_session *sess = (dpaa2_sec_session *)
1574 			get_sec_session_private_data(op->sym->sec_session);
1575 		if (sess->ctxt_type == DPAA2_SEC_IPSEC ||
1576 				sess->ctxt_type == DPAA2_SEC_PDCP) {
1577 			uint16_t len = DPAA2_GET_FD_LEN(fd);
1578 			dst->pkt_len = len;
1579 			while (dst->next != NULL) {
1580 				len -= dst->data_len;
1581 				dst = dst->next;
1582 			}
1583 			dst->data_len = len;
1584 		}
1585 	}
1586 
1587 	DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p,"
1588 		" fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n",
1589 		(void *)dst,
1590 		dst->buf_addr,
1591 		DPAA2_GET_FD_ADDR(fd),
1592 		DPAA2_GET_FD_BPID(fd),
1593 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
1594 		DPAA2_GET_FD_OFFSET(fd),
1595 		DPAA2_GET_FD_LEN(fd));
1596 
1597 	/* free the fle memory */
1598 	if (likely(rte_pktmbuf_is_contiguous(src))) {
1599 		priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1);
1600 		rte_mempool_put(priv->fle_pool, (void *)(fle-1));
1601 	} else
1602 		rte_free((void *)(fle-1));
1603 
1604 	return op;
1605 }
1606 
1607 static uint16_t
1608 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1609 			uint16_t nb_ops)
1610 {
1611 	/* Function is responsible to receive frames for a given device and VQ*/
1612 	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1613 	struct qbman_result *dq_storage;
1614 	uint32_t fqid = dpaa2_qp->rx_vq.fqid;
1615 	int ret, num_rx = 0;
1616 	uint8_t is_last = 0, status;
1617 	struct qbman_swp *swp;
1618 	const struct qbman_fd *fd;
1619 	struct qbman_pull_desc pulldesc;
1620 
1621 	if (!DPAA2_PER_LCORE_DPIO) {
1622 		ret = dpaa2_affine_qbman_swp();
1623 		if (ret) {
1624 			DPAA2_SEC_ERR("Failure in affining portal");
1625 			return 0;
1626 		}
1627 	}
1628 	swp = DPAA2_PER_LCORE_PORTAL;
1629 	dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
1630 
1631 	qbman_pull_desc_clear(&pulldesc);
1632 	qbman_pull_desc_set_numframes(&pulldesc,
1633 				      (nb_ops > dpaa2_dqrr_size) ?
1634 				      dpaa2_dqrr_size : nb_ops);
1635 	qbman_pull_desc_set_fq(&pulldesc, fqid);
1636 	qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1637 				    (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage),
1638 				    1);
1639 
1640 	/*Issue a volatile dequeue command. */
1641 	while (1) {
1642 		if (qbman_swp_pull(swp, &pulldesc)) {
1643 			DPAA2_SEC_WARN(
1644 				"SEC VDQ command is not issued : QBMAN busy");
1645 			/* Portal was busy, try again */
1646 			continue;
1647 		}
1648 		break;
1649 	};
1650 
1651 	/* Receive the packets till Last Dequeue entry is found with
1652 	 * respect to the above issues PULL command.
1653 	 */
1654 	while (!is_last) {
1655 		/* Check if the previous issued command is completed.
1656 		 * Also seems like the SWP is shared between the Ethernet Driver
1657 		 * and the SEC driver.
1658 		 */
1659 		while (!qbman_check_command_complete(dq_storage))
1660 			;
1661 
1662 		/* Loop until the dq_storage is updated with
1663 		 * new token by QBMAN
1664 		 */
1665 		while (!qbman_check_new_result(dq_storage))
1666 			;
1667 		/* Check whether Last Pull command is Expired and
1668 		 * setting Condition for Loop termination
1669 		 */
1670 		if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1671 			is_last = 1;
1672 			/* Check for valid frame. */
1673 			status = (uint8_t)qbman_result_DQ_flags(dq_storage);
1674 			if (unlikely(
1675 				(status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
1676 				DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
1677 				continue;
1678 			}
1679 		}
1680 
1681 		fd = qbman_result_DQ_fd(dq_storage);
1682 		ops[num_rx] = sec_fd_to_mbuf(fd);
1683 
1684 		if (unlikely(fd->simple.frc)) {
1685 			/* TODO Parse SEC errors */
1686 			DPAA2_SEC_ERR("SEC returned Error - %x",
1687 				      fd->simple.frc);
1688 			ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
1689 		} else {
1690 			ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1691 		}
1692 
1693 		num_rx++;
1694 		dq_storage++;
1695 	} /* End of Packet Rx loop */
1696 
1697 	dpaa2_qp->rx_vq.rx_pkts += num_rx;
1698 
1699 	DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1700 	/*Return the total number of packets received to DPAA2 app*/
1701 	return num_rx;
1702 }
1703 
1704 /** Release queue pair */
1705 static int
1706 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
1707 {
1708 	struct dpaa2_sec_qp *qp =
1709 		(struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id];
1710 
1711 	PMD_INIT_FUNC_TRACE();
1712 
1713 	if (qp->rx_vq.q_storage) {
1714 		dpaa2_free_dq_storage(qp->rx_vq.q_storage);
1715 		rte_free(qp->rx_vq.q_storage);
1716 	}
1717 	rte_free(qp);
1718 
1719 	dev->data->queue_pairs[queue_pair_id] = NULL;
1720 
1721 	return 0;
1722 }
1723 
1724 /** Setup a queue pair */
1725 static int
1726 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1727 		__rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1728 		__rte_unused int socket_id)
1729 {
1730 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1731 	struct dpaa2_sec_qp *qp;
1732 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1733 	struct dpseci_rx_queue_cfg cfg;
1734 	int32_t retcode;
1735 
1736 	PMD_INIT_FUNC_TRACE();
1737 
1738 	/* If qp is already in use free ring memory and qp metadata. */
1739 	if (dev->data->queue_pairs[qp_id] != NULL) {
1740 		DPAA2_SEC_INFO("QP already setup");
1741 		return 0;
1742 	}
1743 
1744 	DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p",
1745 		    dev, qp_id, qp_conf);
1746 
1747 	memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
1748 
1749 	qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp),
1750 			RTE_CACHE_LINE_SIZE);
1751 	if (!qp) {
1752 		DPAA2_SEC_ERR("malloc failed for rx/tx queues");
1753 		return -1;
1754 	}
1755 
1756 	qp->rx_vq.crypto_data = dev->data;
1757 	qp->tx_vq.crypto_data = dev->data;
1758 	qp->rx_vq.q_storage = rte_malloc("sec dq storage",
1759 		sizeof(struct queue_storage_info_t),
1760 		RTE_CACHE_LINE_SIZE);
1761 	if (!qp->rx_vq.q_storage) {
1762 		DPAA2_SEC_ERR("malloc failed for q_storage");
1763 		return -1;
1764 	}
1765 	memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
1766 
1767 	if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
1768 		DPAA2_SEC_ERR("Unable to allocate dequeue storage");
1769 		return -1;
1770 	}
1771 
1772 	dev->data->queue_pairs[qp_id] = qp;
1773 
1774 	cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
1775 	cfg.user_ctx = (size_t)(&qp->rx_vq);
1776 	retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
1777 				      qp_id, &cfg);
1778 	return retcode;
1779 }
1780 
1781 /** Return the number of allocated queue pairs */
1782 static uint32_t
1783 dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev)
1784 {
1785 	PMD_INIT_FUNC_TRACE();
1786 
1787 	return dev->data->nb_queue_pairs;
1788 }
1789 
1790 /** Returns the size of the aesni gcm session structure */
1791 static unsigned int
1792 dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1793 {
1794 	PMD_INIT_FUNC_TRACE();
1795 
1796 	return sizeof(dpaa2_sec_session);
1797 }
1798 
1799 static int
1800 dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
1801 		      struct rte_crypto_sym_xform *xform,
1802 		      dpaa2_sec_session *session)
1803 {
1804 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1805 	struct alginfo cipherdata;
1806 	int bufsize;
1807 	struct ctxt_priv *priv;
1808 	struct sec_flow_context *flc;
1809 
1810 	PMD_INIT_FUNC_TRACE();
1811 
1812 	/* For SEC CIPHER only one descriptor is required. */
1813 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1814 			sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1815 			RTE_CACHE_LINE_SIZE);
1816 	if (priv == NULL) {
1817 		DPAA2_SEC_ERR("No Memory for priv CTXT");
1818 		return -1;
1819 	}
1820 
1821 	priv->fle_pool = dev_priv->fle_pool;
1822 
1823 	flc = &priv->flc_desc[0].flc;
1824 
1825 	session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1826 			RTE_CACHE_LINE_SIZE);
1827 	if (session->cipher_key.data == NULL) {
1828 		DPAA2_SEC_ERR("No Memory for cipher key");
1829 		rte_free(priv);
1830 		return -1;
1831 	}
1832 	session->cipher_key.length = xform->cipher.key.length;
1833 
1834 	memcpy(session->cipher_key.data, xform->cipher.key.data,
1835 	       xform->cipher.key.length);
1836 	cipherdata.key = (size_t)session->cipher_key.data;
1837 	cipherdata.keylen = session->cipher_key.length;
1838 	cipherdata.key_enc_flags = 0;
1839 	cipherdata.key_type = RTA_DATA_IMM;
1840 
1841 	/* Set IV parameters */
1842 	session->iv.offset = xform->cipher.iv.offset;
1843 	session->iv.length = xform->cipher.iv.length;
1844 	session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1845 				DIR_ENC : DIR_DEC;
1846 
1847 	switch (xform->cipher.algo) {
1848 	case RTE_CRYPTO_CIPHER_AES_CBC:
1849 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
1850 		cipherdata.algmode = OP_ALG_AAI_CBC;
1851 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1852 		bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1853 						SHR_NEVER, &cipherdata, NULL,
1854 						session->iv.length,
1855 						session->dir);
1856 		break;
1857 	case RTE_CRYPTO_CIPHER_3DES_CBC:
1858 		cipherdata.algtype = OP_ALG_ALGSEL_3DES;
1859 		cipherdata.algmode = OP_ALG_AAI_CBC;
1860 		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1861 		bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1862 						SHR_NEVER, &cipherdata, NULL,
1863 						session->iv.length,
1864 						session->dir);
1865 		break;
1866 	case RTE_CRYPTO_CIPHER_AES_CTR:
1867 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
1868 		cipherdata.algmode = OP_ALG_AAI_CTR;
1869 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
1870 		bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1871 						SHR_NEVER, &cipherdata, NULL,
1872 						session->iv.length,
1873 						session->dir);
1874 		break;
1875 	case RTE_CRYPTO_CIPHER_3DES_CTR:
1876 		cipherdata.algtype = OP_ALG_ALGSEL_3DES;
1877 		cipherdata.algmode = OP_ALG_AAI_CTR;
1878 		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CTR;
1879 		bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1880 						SHR_NEVER, &cipherdata, NULL,
1881 						session->iv.length,
1882 						session->dir);
1883 		break;
1884 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1885 		cipherdata.algtype = OP_ALG_ALGSEL_SNOW_F8;
1886 		session->cipher_alg = RTE_CRYPTO_CIPHER_SNOW3G_UEA2;
1887 		bufsize = cnstr_shdsc_snow_f8(priv->flc_desc[0].desc, 1, 0,
1888 					      &cipherdata,
1889 					      session->dir);
1890 		break;
1891 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
1892 		cipherdata.algtype = OP_ALG_ALGSEL_ZUCE;
1893 		session->cipher_alg = RTE_CRYPTO_CIPHER_ZUC_EEA3;
1894 		bufsize = cnstr_shdsc_zuce(priv->flc_desc[0].desc, 1, 0,
1895 					      &cipherdata,
1896 					      session->dir);
1897 		break;
1898 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
1899 	case RTE_CRYPTO_CIPHER_AES_F8:
1900 	case RTE_CRYPTO_CIPHER_AES_ECB:
1901 	case RTE_CRYPTO_CIPHER_3DES_ECB:
1902 	case RTE_CRYPTO_CIPHER_AES_XTS:
1903 	case RTE_CRYPTO_CIPHER_ARC4:
1904 	case RTE_CRYPTO_CIPHER_NULL:
1905 		DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
1906 			xform->cipher.algo);
1907 		goto error_out;
1908 	default:
1909 		DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
1910 			xform->cipher.algo);
1911 		goto error_out;
1912 	}
1913 
1914 	if (bufsize < 0) {
1915 		DPAA2_SEC_ERR("Crypto: Descriptor build failed");
1916 		goto error_out;
1917 	}
1918 
1919 	flc->word1_sdl = (uint8_t)bufsize;
1920 	session->ctxt = priv;
1921 
1922 #ifdef CAAM_DESC_DEBUG
1923 	int i;
1924 	for (i = 0; i < bufsize; i++)
1925 		DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]);
1926 #endif
1927 	return 0;
1928 
1929 error_out:
1930 	rte_free(session->cipher_key.data);
1931 	rte_free(priv);
1932 	return -1;
1933 }
1934 
1935 static int
1936 dpaa2_sec_auth_init(struct rte_cryptodev *dev,
1937 		    struct rte_crypto_sym_xform *xform,
1938 		    dpaa2_sec_session *session)
1939 {
1940 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1941 	struct alginfo authdata;
1942 	int bufsize;
1943 	struct ctxt_priv *priv;
1944 	struct sec_flow_context *flc;
1945 
1946 	PMD_INIT_FUNC_TRACE();
1947 
1948 	/* For SEC AUTH three descriptors are required for various stages */
1949 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1950 			sizeof(struct ctxt_priv) + 3 *
1951 			sizeof(struct sec_flc_desc),
1952 			RTE_CACHE_LINE_SIZE);
1953 	if (priv == NULL) {
1954 		DPAA2_SEC_ERR("No Memory for priv CTXT");
1955 		return -1;
1956 	}
1957 
1958 	priv->fle_pool = dev_priv->fle_pool;
1959 	flc = &priv->flc_desc[DESC_INITFINAL].flc;
1960 
1961 	session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1962 			RTE_CACHE_LINE_SIZE);
1963 	if (session->auth_key.data == NULL) {
1964 		DPAA2_SEC_ERR("Unable to allocate memory for auth key");
1965 		rte_free(priv);
1966 		return -1;
1967 	}
1968 	session->auth_key.length = xform->auth.key.length;
1969 
1970 	memcpy(session->auth_key.data, xform->auth.key.data,
1971 	       xform->auth.key.length);
1972 	authdata.key = (size_t)session->auth_key.data;
1973 	authdata.keylen = session->auth_key.length;
1974 	authdata.key_enc_flags = 0;
1975 	authdata.key_type = RTA_DATA_IMM;
1976 
1977 	session->digest_length = xform->auth.digest_length;
1978 	session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1979 				DIR_ENC : DIR_DEC;
1980 
1981 	switch (xform->auth.algo) {
1982 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
1983 		authdata.algtype = OP_ALG_ALGSEL_SHA1;
1984 		authdata.algmode = OP_ALG_AAI_HMAC;
1985 		session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1986 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
1987 					   1, 0, SHR_NEVER, &authdata,
1988 					   !session->dir,
1989 					   session->digest_length);
1990 		break;
1991 	case RTE_CRYPTO_AUTH_MD5_HMAC:
1992 		authdata.algtype = OP_ALG_ALGSEL_MD5;
1993 		authdata.algmode = OP_ALG_AAI_HMAC;
1994 		session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1995 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
1996 					   1, 0, SHR_NEVER, &authdata,
1997 					   !session->dir,
1998 					   session->digest_length);
1999 		break;
2000 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2001 		authdata.algtype = OP_ALG_ALGSEL_SHA256;
2002 		authdata.algmode = OP_ALG_AAI_HMAC;
2003 		session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2004 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2005 					   1, 0, SHR_NEVER, &authdata,
2006 					   !session->dir,
2007 					   session->digest_length);
2008 		break;
2009 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2010 		authdata.algtype = OP_ALG_ALGSEL_SHA384;
2011 		authdata.algmode = OP_ALG_AAI_HMAC;
2012 		session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2013 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2014 					   1, 0, SHR_NEVER, &authdata,
2015 					   !session->dir,
2016 					   session->digest_length);
2017 		break;
2018 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2019 		authdata.algtype = OP_ALG_ALGSEL_SHA512;
2020 		authdata.algmode = OP_ALG_AAI_HMAC;
2021 		session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2022 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2023 					   1, 0, SHR_NEVER, &authdata,
2024 					   !session->dir,
2025 					   session->digest_length);
2026 		break;
2027 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2028 		authdata.algtype = OP_ALG_ALGSEL_SHA224;
2029 		authdata.algmode = OP_ALG_AAI_HMAC;
2030 		session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
2031 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2032 					   1, 0, SHR_NEVER, &authdata,
2033 					   !session->dir,
2034 					   session->digest_length);
2035 		break;
2036 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2037 		authdata.algtype = OP_ALG_ALGSEL_SNOW_F9;
2038 		authdata.algmode = OP_ALG_AAI_F9;
2039 		session->auth_alg = RTE_CRYPTO_AUTH_SNOW3G_UIA2;
2040 		session->iv.offset = xform->auth.iv.offset;
2041 		session->iv.length = xform->auth.iv.length;
2042 		bufsize = cnstr_shdsc_snow_f9(priv->flc_desc[DESC_INITFINAL].desc,
2043 					      1, 0, &authdata,
2044 					      !session->dir,
2045 					      session->digest_length);
2046 		break;
2047 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
2048 		authdata.algtype = OP_ALG_ALGSEL_ZUCA;
2049 		authdata.algmode = OP_ALG_AAI_F9;
2050 		session->auth_alg = RTE_CRYPTO_AUTH_ZUC_EIA3;
2051 		session->iv.offset = xform->auth.iv.offset;
2052 		session->iv.length = xform->auth.iv.length;
2053 		bufsize = cnstr_shdsc_zuca(priv->flc_desc[DESC_INITFINAL].desc,
2054 					   1, 0, &authdata,
2055 					   !session->dir,
2056 					   session->digest_length);
2057 		break;
2058 	case RTE_CRYPTO_AUTH_KASUMI_F9:
2059 	case RTE_CRYPTO_AUTH_NULL:
2060 	case RTE_CRYPTO_AUTH_SHA1:
2061 	case RTE_CRYPTO_AUTH_SHA256:
2062 	case RTE_CRYPTO_AUTH_SHA512:
2063 	case RTE_CRYPTO_AUTH_SHA224:
2064 	case RTE_CRYPTO_AUTH_SHA384:
2065 	case RTE_CRYPTO_AUTH_MD5:
2066 	case RTE_CRYPTO_AUTH_AES_GMAC:
2067 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2068 	case RTE_CRYPTO_AUTH_AES_CMAC:
2069 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2070 		DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un",
2071 			      xform->auth.algo);
2072 		goto error_out;
2073 	default:
2074 		DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2075 			      xform->auth.algo);
2076 		goto error_out;
2077 	}
2078 
2079 	if (bufsize < 0) {
2080 		DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2081 		goto error_out;
2082 	}
2083 
2084 	flc->word1_sdl = (uint8_t)bufsize;
2085 	session->ctxt = priv;
2086 #ifdef CAAM_DESC_DEBUG
2087 	int i;
2088 	for (i = 0; i < bufsize; i++)
2089 		DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2090 				i, priv->flc_desc[DESC_INITFINAL].desc[i]);
2091 #endif
2092 
2093 	return 0;
2094 
2095 error_out:
2096 	rte_free(session->auth_key.data);
2097 	rte_free(priv);
2098 	return -1;
2099 }
2100 
2101 static int
2102 dpaa2_sec_aead_init(struct rte_cryptodev *dev,
2103 		    struct rte_crypto_sym_xform *xform,
2104 		    dpaa2_sec_session *session)
2105 {
2106 	struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
2107 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2108 	struct alginfo aeaddata;
2109 	int bufsize;
2110 	struct ctxt_priv *priv;
2111 	struct sec_flow_context *flc;
2112 	struct rte_crypto_aead_xform *aead_xform = &xform->aead;
2113 	int err;
2114 
2115 	PMD_INIT_FUNC_TRACE();
2116 
2117 	/* Set IV parameters */
2118 	session->iv.offset = aead_xform->iv.offset;
2119 	session->iv.length = aead_xform->iv.length;
2120 	session->ctxt_type = DPAA2_SEC_AEAD;
2121 
2122 	/* For SEC AEAD only one descriptor is required */
2123 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2124 			sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
2125 			RTE_CACHE_LINE_SIZE);
2126 	if (priv == NULL) {
2127 		DPAA2_SEC_ERR("No Memory for priv CTXT");
2128 		return -1;
2129 	}
2130 
2131 	priv->fle_pool = dev_priv->fle_pool;
2132 	flc = &priv->flc_desc[0].flc;
2133 
2134 	session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2135 					       RTE_CACHE_LINE_SIZE);
2136 	if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2137 		DPAA2_SEC_ERR("No Memory for aead key");
2138 		rte_free(priv);
2139 		return -1;
2140 	}
2141 	memcpy(session->aead_key.data, aead_xform->key.data,
2142 	       aead_xform->key.length);
2143 
2144 	session->digest_length = aead_xform->digest_length;
2145 	session->aead_key.length = aead_xform->key.length;
2146 	ctxt->auth_only_len = aead_xform->aad_length;
2147 
2148 	aeaddata.key = (size_t)session->aead_key.data;
2149 	aeaddata.keylen = session->aead_key.length;
2150 	aeaddata.key_enc_flags = 0;
2151 	aeaddata.key_type = RTA_DATA_IMM;
2152 
2153 	switch (aead_xform->algo) {
2154 	case RTE_CRYPTO_AEAD_AES_GCM:
2155 		aeaddata.algtype = OP_ALG_ALGSEL_AES;
2156 		aeaddata.algmode = OP_ALG_AAI_GCM;
2157 		session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2158 		break;
2159 	case RTE_CRYPTO_AEAD_AES_CCM:
2160 		DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u",
2161 			      aead_xform->algo);
2162 		goto error_out;
2163 	default:
2164 		DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
2165 			      aead_xform->algo);
2166 		goto error_out;
2167 	}
2168 	session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2169 				DIR_ENC : DIR_DEC;
2170 
2171 	priv->flc_desc[0].desc[0] = aeaddata.keylen;
2172 	err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2173 			       MIN_JOB_DESC_SIZE,
2174 			       (unsigned int *)priv->flc_desc[0].desc,
2175 			       &priv->flc_desc[0].desc[1], 1);
2176 
2177 	if (err < 0) {
2178 		DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2179 		goto error_out;
2180 	}
2181 	if (priv->flc_desc[0].desc[1] & 1) {
2182 		aeaddata.key_type = RTA_DATA_IMM;
2183 	} else {
2184 		aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key);
2185 		aeaddata.key_type = RTA_DATA_PTR;
2186 	}
2187 	priv->flc_desc[0].desc[0] = 0;
2188 	priv->flc_desc[0].desc[1] = 0;
2189 
2190 	if (session->dir == DIR_ENC)
2191 		bufsize = cnstr_shdsc_gcm_encap(
2192 				priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
2193 				&aeaddata, session->iv.length,
2194 				session->digest_length);
2195 	else
2196 		bufsize = cnstr_shdsc_gcm_decap(
2197 				priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
2198 				&aeaddata, session->iv.length,
2199 				session->digest_length);
2200 	if (bufsize < 0) {
2201 		DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2202 		goto error_out;
2203 	}
2204 
2205 	flc->word1_sdl = (uint8_t)bufsize;
2206 	session->ctxt = priv;
2207 #ifdef CAAM_DESC_DEBUG
2208 	int i;
2209 	for (i = 0; i < bufsize; i++)
2210 		DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n",
2211 			    i, priv->flc_desc[0].desc[i]);
2212 #endif
2213 	return 0;
2214 
2215 error_out:
2216 	rte_free(session->aead_key.data);
2217 	rte_free(priv);
2218 	return -1;
2219 }
2220 
2221 
2222 static int
2223 dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
2224 		    struct rte_crypto_sym_xform *xform,
2225 		    dpaa2_sec_session *session)
2226 {
2227 	struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
2228 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2229 	struct alginfo authdata, cipherdata;
2230 	int bufsize;
2231 	struct ctxt_priv *priv;
2232 	struct sec_flow_context *flc;
2233 	struct rte_crypto_cipher_xform *cipher_xform;
2234 	struct rte_crypto_auth_xform *auth_xform;
2235 	int err;
2236 
2237 	PMD_INIT_FUNC_TRACE();
2238 
2239 	if (session->ext_params.aead_ctxt.auth_cipher_text) {
2240 		cipher_xform = &xform->cipher;
2241 		auth_xform = &xform->next->auth;
2242 		session->ctxt_type =
2243 			(cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2244 			DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER;
2245 	} else {
2246 		cipher_xform = &xform->next->cipher;
2247 		auth_xform = &xform->auth;
2248 		session->ctxt_type =
2249 			(cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2250 			DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH;
2251 	}
2252 
2253 	/* Set IV parameters */
2254 	session->iv.offset = cipher_xform->iv.offset;
2255 	session->iv.length = cipher_xform->iv.length;
2256 
2257 	/* For SEC AEAD only one descriptor is required */
2258 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2259 			sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
2260 			RTE_CACHE_LINE_SIZE);
2261 	if (priv == NULL) {
2262 		DPAA2_SEC_ERR("No Memory for priv CTXT");
2263 		return -1;
2264 	}
2265 
2266 	priv->fle_pool = dev_priv->fle_pool;
2267 	flc = &priv->flc_desc[0].flc;
2268 
2269 	session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2270 					       RTE_CACHE_LINE_SIZE);
2271 	if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2272 		DPAA2_SEC_ERR("No Memory for cipher key");
2273 		rte_free(priv);
2274 		return -1;
2275 	}
2276 	session->cipher_key.length = cipher_xform->key.length;
2277 	session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2278 					     RTE_CACHE_LINE_SIZE);
2279 	if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2280 		DPAA2_SEC_ERR("No Memory for auth key");
2281 		rte_free(session->cipher_key.data);
2282 		rte_free(priv);
2283 		return -1;
2284 	}
2285 	session->auth_key.length = auth_xform->key.length;
2286 	memcpy(session->cipher_key.data, cipher_xform->key.data,
2287 	       cipher_xform->key.length);
2288 	memcpy(session->auth_key.data, auth_xform->key.data,
2289 	       auth_xform->key.length);
2290 
2291 	authdata.key = (size_t)session->auth_key.data;
2292 	authdata.keylen = session->auth_key.length;
2293 	authdata.key_enc_flags = 0;
2294 	authdata.key_type = RTA_DATA_IMM;
2295 
2296 	session->digest_length = auth_xform->digest_length;
2297 
2298 	switch (auth_xform->algo) {
2299 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
2300 		authdata.algtype = OP_ALG_ALGSEL_SHA1;
2301 		authdata.algmode = OP_ALG_AAI_HMAC;
2302 		session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
2303 		break;
2304 	case RTE_CRYPTO_AUTH_MD5_HMAC:
2305 		authdata.algtype = OP_ALG_ALGSEL_MD5;
2306 		authdata.algmode = OP_ALG_AAI_HMAC;
2307 		session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
2308 		break;
2309 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2310 		authdata.algtype = OP_ALG_ALGSEL_SHA224;
2311 		authdata.algmode = OP_ALG_AAI_HMAC;
2312 		session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
2313 		break;
2314 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2315 		authdata.algtype = OP_ALG_ALGSEL_SHA256;
2316 		authdata.algmode = OP_ALG_AAI_HMAC;
2317 		session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2318 		break;
2319 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2320 		authdata.algtype = OP_ALG_ALGSEL_SHA384;
2321 		authdata.algmode = OP_ALG_AAI_HMAC;
2322 		session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2323 		break;
2324 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2325 		authdata.algtype = OP_ALG_ALGSEL_SHA512;
2326 		authdata.algmode = OP_ALG_AAI_HMAC;
2327 		session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2328 		break;
2329 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2330 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2331 	case RTE_CRYPTO_AUTH_NULL:
2332 	case RTE_CRYPTO_AUTH_SHA1:
2333 	case RTE_CRYPTO_AUTH_SHA256:
2334 	case RTE_CRYPTO_AUTH_SHA512:
2335 	case RTE_CRYPTO_AUTH_SHA224:
2336 	case RTE_CRYPTO_AUTH_SHA384:
2337 	case RTE_CRYPTO_AUTH_MD5:
2338 	case RTE_CRYPTO_AUTH_AES_GMAC:
2339 	case RTE_CRYPTO_AUTH_KASUMI_F9:
2340 	case RTE_CRYPTO_AUTH_AES_CMAC:
2341 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2342 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
2343 		DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2344 			      auth_xform->algo);
2345 		goto error_out;
2346 	default:
2347 		DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2348 			      auth_xform->algo);
2349 		goto error_out;
2350 	}
2351 	cipherdata.key = (size_t)session->cipher_key.data;
2352 	cipherdata.keylen = session->cipher_key.length;
2353 	cipherdata.key_enc_flags = 0;
2354 	cipherdata.key_type = RTA_DATA_IMM;
2355 
2356 	switch (cipher_xform->algo) {
2357 	case RTE_CRYPTO_CIPHER_AES_CBC:
2358 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
2359 		cipherdata.algmode = OP_ALG_AAI_CBC;
2360 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2361 		break;
2362 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2363 		cipherdata.algtype = OP_ALG_ALGSEL_3DES;
2364 		cipherdata.algmode = OP_ALG_AAI_CBC;
2365 		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2366 		break;
2367 	case RTE_CRYPTO_CIPHER_AES_CTR:
2368 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
2369 		cipherdata.algmode = OP_ALG_AAI_CTR;
2370 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2371 		break;
2372 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2373 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2374 	case RTE_CRYPTO_CIPHER_NULL:
2375 	case RTE_CRYPTO_CIPHER_3DES_ECB:
2376 	case RTE_CRYPTO_CIPHER_AES_ECB:
2377 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
2378 		DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2379 			      cipher_xform->algo);
2380 		goto error_out;
2381 	default:
2382 		DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2383 			      cipher_xform->algo);
2384 		goto error_out;
2385 	}
2386 	session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2387 				DIR_ENC : DIR_DEC;
2388 
2389 	priv->flc_desc[0].desc[0] = cipherdata.keylen;
2390 	priv->flc_desc[0].desc[1] = authdata.keylen;
2391 	err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2392 			       MIN_JOB_DESC_SIZE,
2393 			       (unsigned int *)priv->flc_desc[0].desc,
2394 			       &priv->flc_desc[0].desc[2], 2);
2395 
2396 	if (err < 0) {
2397 		DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2398 		goto error_out;
2399 	}
2400 	if (priv->flc_desc[0].desc[2] & 1) {
2401 		cipherdata.key_type = RTA_DATA_IMM;
2402 	} else {
2403 		cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
2404 		cipherdata.key_type = RTA_DATA_PTR;
2405 	}
2406 	if (priv->flc_desc[0].desc[2] & (1 << 1)) {
2407 		authdata.key_type = RTA_DATA_IMM;
2408 	} else {
2409 		authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
2410 		authdata.key_type = RTA_DATA_PTR;
2411 	}
2412 	priv->flc_desc[0].desc[0] = 0;
2413 	priv->flc_desc[0].desc[1] = 0;
2414 	priv->flc_desc[0].desc[2] = 0;
2415 
2416 	if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
2417 		bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
2418 					      0, SHR_SERIAL,
2419 					      &cipherdata, &authdata,
2420 					      session->iv.length,
2421 					      ctxt->auth_only_len,
2422 					      session->digest_length,
2423 					      session->dir);
2424 		if (bufsize < 0) {
2425 			DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2426 			goto error_out;
2427 		}
2428 	} else {
2429 		DPAA2_SEC_ERR("Hash before cipher not supported");
2430 		goto error_out;
2431 	}
2432 
2433 	flc->word1_sdl = (uint8_t)bufsize;
2434 	session->ctxt = priv;
2435 #ifdef CAAM_DESC_DEBUG
2436 	int i;
2437 	for (i = 0; i < bufsize; i++)
2438 		DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2439 			    i, priv->flc_desc[0].desc[i]);
2440 #endif
2441 
2442 	return 0;
2443 
2444 error_out:
2445 	rte_free(session->cipher_key.data);
2446 	rte_free(session->auth_key.data);
2447 	rte_free(priv);
2448 	return -1;
2449 }
2450 
2451 static int
2452 dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
2453 			    struct rte_crypto_sym_xform *xform,	void *sess)
2454 {
2455 	dpaa2_sec_session *session = sess;
2456 	int ret;
2457 
2458 	PMD_INIT_FUNC_TRACE();
2459 
2460 	if (unlikely(sess == NULL)) {
2461 		DPAA2_SEC_ERR("Invalid session struct");
2462 		return -1;
2463 	}
2464 
2465 	memset(session, 0, sizeof(dpaa2_sec_session));
2466 	/* Default IV length = 0 */
2467 	session->iv.length = 0;
2468 
2469 	/* Cipher Only */
2470 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2471 		session->ctxt_type = DPAA2_SEC_CIPHER;
2472 		ret = dpaa2_sec_cipher_init(dev, xform, session);
2473 
2474 	/* Authentication Only */
2475 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2476 		   xform->next == NULL) {
2477 		session->ctxt_type = DPAA2_SEC_AUTH;
2478 		ret = dpaa2_sec_auth_init(dev, xform, session);
2479 
2480 	/* Cipher then Authenticate */
2481 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2482 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2483 		session->ext_params.aead_ctxt.auth_cipher_text = true;
2484 		ret = dpaa2_sec_aead_chain_init(dev, xform, session);
2485 
2486 	/* Authenticate then Cipher */
2487 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2488 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2489 		session->ext_params.aead_ctxt.auth_cipher_text = false;
2490 		ret = dpaa2_sec_aead_chain_init(dev, xform, session);
2491 
2492 	/* AEAD operation for AES-GCM kind of Algorithms */
2493 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2494 		   xform->next == NULL) {
2495 		ret = dpaa2_sec_aead_init(dev, xform, session);
2496 
2497 	} else {
2498 		DPAA2_SEC_ERR("Invalid crypto type");
2499 		return -EINVAL;
2500 	}
2501 
2502 	return ret;
2503 }
2504 
2505 static int
2506 dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2507 			dpaa2_sec_session *session,
2508 			struct alginfo *aeaddata)
2509 {
2510 	PMD_INIT_FUNC_TRACE();
2511 
2512 	session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2513 					       RTE_CACHE_LINE_SIZE);
2514 	if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2515 		DPAA2_SEC_ERR("No Memory for aead key");
2516 		return -1;
2517 	}
2518 	memcpy(session->aead_key.data, aead_xform->key.data,
2519 	       aead_xform->key.length);
2520 
2521 	session->digest_length = aead_xform->digest_length;
2522 	session->aead_key.length = aead_xform->key.length;
2523 
2524 	aeaddata->key = (size_t)session->aead_key.data;
2525 	aeaddata->keylen = session->aead_key.length;
2526 	aeaddata->key_enc_flags = 0;
2527 	aeaddata->key_type = RTA_DATA_IMM;
2528 
2529 	switch (aead_xform->algo) {
2530 	case RTE_CRYPTO_AEAD_AES_GCM:
2531 		aeaddata->algtype = OP_ALG_ALGSEL_AES;
2532 		aeaddata->algmode = OP_ALG_AAI_GCM;
2533 		session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2534 		break;
2535 	case RTE_CRYPTO_AEAD_AES_CCM:
2536 		aeaddata->algtype = OP_ALG_ALGSEL_AES;
2537 		aeaddata->algmode = OP_ALG_AAI_CCM;
2538 		session->aead_alg = RTE_CRYPTO_AEAD_AES_CCM;
2539 		break;
2540 	default:
2541 		DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
2542 			      aead_xform->algo);
2543 		return -1;
2544 	}
2545 	session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2546 				DIR_ENC : DIR_DEC;
2547 
2548 	return 0;
2549 }
2550 
2551 static int
2552 dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2553 	struct rte_crypto_auth_xform *auth_xform,
2554 	dpaa2_sec_session *session,
2555 	struct alginfo *cipherdata,
2556 	struct alginfo *authdata)
2557 {
2558 	if (cipher_xform) {
2559 		session->cipher_key.data = rte_zmalloc(NULL,
2560 						       cipher_xform->key.length,
2561 						       RTE_CACHE_LINE_SIZE);
2562 		if (session->cipher_key.data == NULL &&
2563 				cipher_xform->key.length > 0) {
2564 			DPAA2_SEC_ERR("No Memory for cipher key");
2565 			return -ENOMEM;
2566 		}
2567 
2568 		session->cipher_key.length = cipher_xform->key.length;
2569 		memcpy(session->cipher_key.data, cipher_xform->key.data,
2570 				cipher_xform->key.length);
2571 		session->cipher_alg = cipher_xform->algo;
2572 	} else {
2573 		session->cipher_key.data = NULL;
2574 		session->cipher_key.length = 0;
2575 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2576 	}
2577 
2578 	if (auth_xform) {
2579 		session->auth_key.data = rte_zmalloc(NULL,
2580 						auth_xform->key.length,
2581 						RTE_CACHE_LINE_SIZE);
2582 		if (session->auth_key.data == NULL &&
2583 				auth_xform->key.length > 0) {
2584 			DPAA2_SEC_ERR("No Memory for auth key");
2585 			return -ENOMEM;
2586 		}
2587 		session->auth_key.length = auth_xform->key.length;
2588 		memcpy(session->auth_key.data, auth_xform->key.data,
2589 				auth_xform->key.length);
2590 		session->auth_alg = auth_xform->algo;
2591 	} else {
2592 		session->auth_key.data = NULL;
2593 		session->auth_key.length = 0;
2594 		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2595 	}
2596 
2597 	authdata->key = (size_t)session->auth_key.data;
2598 	authdata->keylen = session->auth_key.length;
2599 	authdata->key_enc_flags = 0;
2600 	authdata->key_type = RTA_DATA_IMM;
2601 	switch (session->auth_alg) {
2602 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
2603 		authdata->algtype = OP_PCL_IPSEC_HMAC_SHA1_96;
2604 		authdata->algmode = OP_ALG_AAI_HMAC;
2605 		break;
2606 	case RTE_CRYPTO_AUTH_MD5_HMAC:
2607 		authdata->algtype = OP_PCL_IPSEC_HMAC_MD5_96;
2608 		authdata->algmode = OP_ALG_AAI_HMAC;
2609 		break;
2610 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2611 		authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2612 		authdata->algmode = OP_ALG_AAI_HMAC;
2613 		break;
2614 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2615 		authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2616 		authdata->algmode = OP_ALG_AAI_HMAC;
2617 		break;
2618 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2619 		authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2620 		authdata->algmode = OP_ALG_AAI_HMAC;
2621 		break;
2622 	case RTE_CRYPTO_AUTH_AES_CMAC:
2623 		authdata->algtype = OP_PCL_IPSEC_AES_CMAC_96;
2624 		break;
2625 	case RTE_CRYPTO_AUTH_NULL:
2626 		authdata->algtype = OP_PCL_IPSEC_HMAC_NULL;
2627 		break;
2628 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2629 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2630 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2631 	case RTE_CRYPTO_AUTH_SHA1:
2632 	case RTE_CRYPTO_AUTH_SHA256:
2633 	case RTE_CRYPTO_AUTH_SHA512:
2634 	case RTE_CRYPTO_AUTH_SHA224:
2635 	case RTE_CRYPTO_AUTH_SHA384:
2636 	case RTE_CRYPTO_AUTH_MD5:
2637 	case RTE_CRYPTO_AUTH_AES_GMAC:
2638 	case RTE_CRYPTO_AUTH_KASUMI_F9:
2639 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2640 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
2641 		DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2642 			      session->auth_alg);
2643 		return -1;
2644 	default:
2645 		DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2646 			      session->auth_alg);
2647 		return -1;
2648 	}
2649 	cipherdata->key = (size_t)session->cipher_key.data;
2650 	cipherdata->keylen = session->cipher_key.length;
2651 	cipherdata->key_enc_flags = 0;
2652 	cipherdata->key_type = RTA_DATA_IMM;
2653 
2654 	switch (session->cipher_alg) {
2655 	case RTE_CRYPTO_CIPHER_AES_CBC:
2656 		cipherdata->algtype = OP_PCL_IPSEC_AES_CBC;
2657 		cipherdata->algmode = OP_ALG_AAI_CBC;
2658 		break;
2659 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2660 		cipherdata->algtype = OP_PCL_IPSEC_3DES;
2661 		cipherdata->algmode = OP_ALG_AAI_CBC;
2662 		break;
2663 	case RTE_CRYPTO_CIPHER_AES_CTR:
2664 		cipherdata->algtype = OP_PCL_IPSEC_AES_CTR;
2665 		cipherdata->algmode = OP_ALG_AAI_CTR;
2666 		break;
2667 	case RTE_CRYPTO_CIPHER_NULL:
2668 		cipherdata->algtype = OP_PCL_IPSEC_NULL;
2669 		break;
2670 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2671 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2672 	case RTE_CRYPTO_CIPHER_3DES_ECB:
2673 	case RTE_CRYPTO_CIPHER_AES_ECB:
2674 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
2675 		DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2676 			      session->cipher_alg);
2677 		return -1;
2678 	default:
2679 		DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2680 			      session->cipher_alg);
2681 		return -1;
2682 	}
2683 
2684 	return 0;
2685 }
2686 
2687 #ifdef RTE_LIBRTE_SECURITY_TEST
2688 static uint8_t aes_cbc_iv[] = {
2689 	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
2690 	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f };
2691 #endif
2692 
2693 static int
2694 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
2695 			    struct rte_security_session_conf *conf,
2696 			    void *sess)
2697 {
2698 	struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2699 	struct rte_crypto_cipher_xform *cipher_xform = NULL;
2700 	struct rte_crypto_auth_xform *auth_xform = NULL;
2701 	struct rte_crypto_aead_xform *aead_xform = NULL;
2702 	dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
2703 	struct ctxt_priv *priv;
2704 	struct ipsec_encap_pdb encap_pdb;
2705 	struct ipsec_decap_pdb decap_pdb;
2706 	struct alginfo authdata, cipherdata;
2707 	int bufsize;
2708 	struct sec_flow_context *flc;
2709 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2710 	int ret = -1;
2711 
2712 	PMD_INIT_FUNC_TRACE();
2713 
2714 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2715 				sizeof(struct ctxt_priv) +
2716 				sizeof(struct sec_flc_desc),
2717 				RTE_CACHE_LINE_SIZE);
2718 
2719 	if (priv == NULL) {
2720 		DPAA2_SEC_ERR("No memory for priv CTXT");
2721 		return -ENOMEM;
2722 	}
2723 
2724 	priv->fle_pool = dev_priv->fle_pool;
2725 	flc = &priv->flc_desc[0].flc;
2726 
2727 	memset(session, 0, sizeof(dpaa2_sec_session));
2728 
2729 	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2730 		cipher_xform = &conf->crypto_xform->cipher;
2731 		if (conf->crypto_xform->next)
2732 			auth_xform = &conf->crypto_xform->next->auth;
2733 		ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
2734 					session, &cipherdata, &authdata);
2735 	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2736 		auth_xform = &conf->crypto_xform->auth;
2737 		if (conf->crypto_xform->next)
2738 			cipher_xform = &conf->crypto_xform->next->cipher;
2739 		ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
2740 					session, &cipherdata, &authdata);
2741 	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2742 		aead_xform = &conf->crypto_xform->aead;
2743 		ret = dpaa2_sec_ipsec_aead_init(aead_xform,
2744 					session, &cipherdata);
2745 	} else {
2746 		DPAA2_SEC_ERR("XFORM not specified");
2747 		ret = -EINVAL;
2748 		goto out;
2749 	}
2750 	if (ret) {
2751 		DPAA2_SEC_ERR("Failed to process xform");
2752 		goto out;
2753 	}
2754 
2755 	session->ctxt_type = DPAA2_SEC_IPSEC;
2756 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2757 		uint8_t *hdr = NULL;
2758 		struct ip ip4_hdr;
2759 		struct rte_ipv6_hdr ip6_hdr;
2760 
2761 		flc->dhr = SEC_FLC_DHR_OUTBOUND;
2762 		/* For Sec Proto only one descriptor is required. */
2763 		memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb));
2764 		encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2765 			PDBOPTS_ESP_OIHI_PDB_INL |
2766 			PDBOPTS_ESP_IVSRC |
2767 			PDBHMO_ESP_ENCAP_DTTL |
2768 			PDBHMO_ESP_SNR;
2769 		if (ipsec_xform->options.esn)
2770 			encap_pdb.options |= PDBOPTS_ESP_ESN;
2771 		encap_pdb.spi = ipsec_xform->spi;
2772 		session->dir = DIR_ENC;
2773 		if (ipsec_xform->tunnel.type ==
2774 				RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2775 			encap_pdb.ip_hdr_len = sizeof(struct ip);
2776 			ip4_hdr.ip_v = IPVERSION;
2777 			ip4_hdr.ip_hl = 5;
2778 			ip4_hdr.ip_len = rte_cpu_to_be_16(sizeof(ip4_hdr));
2779 			ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2780 			ip4_hdr.ip_id = 0;
2781 			ip4_hdr.ip_off = 0;
2782 			ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2783 			ip4_hdr.ip_p = IPPROTO_ESP;
2784 			ip4_hdr.ip_sum = 0;
2785 			ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2786 			ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2787 			ip4_hdr.ip_sum = calc_chksum((uint16_t *)(void *)
2788 					&ip4_hdr, sizeof(struct ip));
2789 			hdr = (uint8_t *)&ip4_hdr;
2790 		} else if (ipsec_xform->tunnel.type ==
2791 				RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2792 			ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2793 				DPAA2_IPv6_DEFAULT_VTC_FLOW |
2794 				((ipsec_xform->tunnel.ipv6.dscp <<
2795 					RTE_IPV6_HDR_TC_SHIFT) &
2796 					RTE_IPV6_HDR_TC_MASK) |
2797 				((ipsec_xform->tunnel.ipv6.flabel <<
2798 					RTE_IPV6_HDR_FL_SHIFT) &
2799 					RTE_IPV6_HDR_FL_MASK));
2800 			/* Payload length will be updated by HW */
2801 			ip6_hdr.payload_len = 0;
2802 			ip6_hdr.hop_limits =
2803 					ipsec_xform->tunnel.ipv6.hlimit;
2804 			ip6_hdr.proto = (ipsec_xform->proto ==
2805 					RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2806 					IPPROTO_ESP : IPPROTO_AH;
2807 			memcpy(&ip6_hdr.src_addr,
2808 				&ipsec_xform->tunnel.ipv6.src_addr, 16);
2809 			memcpy(&ip6_hdr.dst_addr,
2810 				&ipsec_xform->tunnel.ipv6.dst_addr, 16);
2811 			encap_pdb.ip_hdr_len = sizeof(struct rte_ipv6_hdr);
2812 			hdr = (uint8_t *)&ip6_hdr;
2813 		}
2814 
2815 		bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc,
2816 				1, 0, SHR_SERIAL, &encap_pdb,
2817 				hdr, &cipherdata, &authdata);
2818 	} else if (ipsec_xform->direction ==
2819 			RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2820 		flc->dhr = SEC_FLC_DHR_INBOUND;
2821 		memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2822 		decap_pdb.options = sizeof(struct ip) << 16;
2823 		if (ipsec_xform->options.esn)
2824 			decap_pdb.options |= PDBOPTS_ESP_ESN;
2825 		decap_pdb.options = (ipsec_xform->tunnel.type ==
2826 				RTE_SECURITY_IPSEC_TUNNEL_IPV4) ?
2827 				sizeof(struct ip) << 16 :
2828 				sizeof(struct rte_ipv6_hdr) << 16;
2829 		session->dir = DIR_DEC;
2830 		bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc,
2831 				1, 0, SHR_SERIAL,
2832 				&decap_pdb, &cipherdata, &authdata);
2833 	} else
2834 		goto out;
2835 
2836 	if (bufsize < 0) {
2837 		DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2838 		goto out;
2839 	}
2840 
2841 	flc->word1_sdl = (uint8_t)bufsize;
2842 
2843 	/* Enable the stashing control bit */
2844 	DPAA2_SET_FLC_RSC(flc);
2845 	flc->word2_rflc_31_0 = lower_32_bits(
2846 			(size_t)&(((struct dpaa2_sec_qp *)
2847 			dev->data->queue_pairs[0])->rx_vq) | 0x14);
2848 	flc->word3_rflc_63_32 = upper_32_bits(
2849 			(size_t)&(((struct dpaa2_sec_qp *)
2850 			dev->data->queue_pairs[0])->rx_vq));
2851 
2852 	/* Set EWS bit i.e. enable write-safe */
2853 	DPAA2_SET_FLC_EWS(flc);
2854 	/* Set BS = 1 i.e reuse input buffers as output buffers */
2855 	DPAA2_SET_FLC_REUSE_BS(flc);
2856 	/* Set FF = 10; reuse input buffers if they provide sufficient space */
2857 	DPAA2_SET_FLC_REUSE_FF(flc);
2858 
2859 	session->ctxt = priv;
2860 
2861 	return 0;
2862 out:
2863 	rte_free(session->auth_key.data);
2864 	rte_free(session->cipher_key.data);
2865 	rte_free(priv);
2866 	return ret;
2867 }
2868 
2869 static int
2870 dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
2871 			   struct rte_security_session_conf *conf,
2872 			   void *sess)
2873 {
2874 	struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2875 	struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2876 	struct rte_crypto_auth_xform *auth_xform = NULL;
2877 	struct rte_crypto_cipher_xform *cipher_xform;
2878 	dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
2879 	struct ctxt_priv *priv;
2880 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2881 	struct alginfo authdata, cipherdata;
2882 	struct alginfo *p_authdata = NULL;
2883 	int bufsize = -1;
2884 	struct sec_flow_context *flc;
2885 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
2886 	int swap = true;
2887 #else
2888 	int swap = false;
2889 #endif
2890 
2891 	PMD_INIT_FUNC_TRACE();
2892 
2893 	memset(session, 0, sizeof(dpaa2_sec_session));
2894 
2895 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2896 				sizeof(struct ctxt_priv) +
2897 				sizeof(struct sec_flc_desc),
2898 				RTE_CACHE_LINE_SIZE);
2899 
2900 	if (priv == NULL) {
2901 		DPAA2_SEC_ERR("No memory for priv CTXT");
2902 		return -ENOMEM;
2903 	}
2904 
2905 	priv->fle_pool = dev_priv->fle_pool;
2906 	flc = &priv->flc_desc[0].flc;
2907 
2908 	/* find xfrm types */
2909 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2910 		cipher_xform = &xform->cipher;
2911 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2912 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2913 		session->ext_params.aead_ctxt.auth_cipher_text = true;
2914 		cipher_xform = &xform->cipher;
2915 		auth_xform = &xform->next->auth;
2916 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2917 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2918 		session->ext_params.aead_ctxt.auth_cipher_text = false;
2919 		cipher_xform = &xform->next->cipher;
2920 		auth_xform = &xform->auth;
2921 	} else {
2922 		DPAA2_SEC_ERR("Invalid crypto type");
2923 		return -EINVAL;
2924 	}
2925 
2926 	session->ctxt_type = DPAA2_SEC_PDCP;
2927 	if (cipher_xform) {
2928 		session->cipher_key.data = rte_zmalloc(NULL,
2929 					       cipher_xform->key.length,
2930 					       RTE_CACHE_LINE_SIZE);
2931 		if (session->cipher_key.data == NULL &&
2932 				cipher_xform->key.length > 0) {
2933 			DPAA2_SEC_ERR("No Memory for cipher key");
2934 			rte_free(priv);
2935 			return -ENOMEM;
2936 		}
2937 		session->cipher_key.length = cipher_xform->key.length;
2938 		memcpy(session->cipher_key.data, cipher_xform->key.data,
2939 			cipher_xform->key.length);
2940 		session->dir =
2941 			(cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2942 					DIR_ENC : DIR_DEC;
2943 		session->cipher_alg = cipher_xform->algo;
2944 	} else {
2945 		session->cipher_key.data = NULL;
2946 		session->cipher_key.length = 0;
2947 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2948 		session->dir = DIR_ENC;
2949 	}
2950 
2951 	session->pdcp.domain = pdcp_xform->domain;
2952 	session->pdcp.bearer = pdcp_xform->bearer;
2953 	session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
2954 	session->pdcp.sn_size = pdcp_xform->sn_size;
2955 	session->pdcp.hfn = pdcp_xform->hfn;
2956 	session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
2957 	session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
2958 	/* hfv ovd offset location is stored in iv.offset value*/
2959 	session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
2960 
2961 	cipherdata.key = (size_t)session->cipher_key.data;
2962 	cipherdata.keylen = session->cipher_key.length;
2963 	cipherdata.key_enc_flags = 0;
2964 	cipherdata.key_type = RTA_DATA_IMM;
2965 
2966 	switch (session->cipher_alg) {
2967 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2968 		cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW;
2969 		break;
2970 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2971 		cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC;
2972 		break;
2973 	case RTE_CRYPTO_CIPHER_AES_CTR:
2974 		cipherdata.algtype = PDCP_CIPHER_TYPE_AES;
2975 		break;
2976 	case RTE_CRYPTO_CIPHER_NULL:
2977 		cipherdata.algtype = PDCP_CIPHER_TYPE_NULL;
2978 		break;
2979 	default:
2980 		DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2981 			      session->cipher_alg);
2982 		goto out;
2983 	}
2984 
2985 	if (auth_xform) {
2986 		session->auth_key.data = rte_zmalloc(NULL,
2987 						     auth_xform->key.length,
2988 						     RTE_CACHE_LINE_SIZE);
2989 		if (!session->auth_key.data &&
2990 		    auth_xform->key.length > 0) {
2991 			DPAA2_SEC_ERR("No Memory for auth key");
2992 			rte_free(session->cipher_key.data);
2993 			rte_free(priv);
2994 			return -ENOMEM;
2995 		}
2996 		session->auth_key.length = auth_xform->key.length;
2997 		memcpy(session->auth_key.data, auth_xform->key.data,
2998 		       auth_xform->key.length);
2999 		session->auth_alg = auth_xform->algo;
3000 	} else {
3001 		session->auth_key.data = NULL;
3002 		session->auth_key.length = 0;
3003 		session->auth_alg = 0;
3004 	}
3005 	authdata.key = (size_t)session->auth_key.data;
3006 	authdata.keylen = session->auth_key.length;
3007 	authdata.key_enc_flags = 0;
3008 	authdata.key_type = RTA_DATA_IMM;
3009 
3010 	if (session->auth_alg) {
3011 		switch (session->auth_alg) {
3012 		case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
3013 			authdata.algtype = PDCP_AUTH_TYPE_SNOW;
3014 			break;
3015 		case RTE_CRYPTO_AUTH_ZUC_EIA3:
3016 			authdata.algtype = PDCP_AUTH_TYPE_ZUC;
3017 			break;
3018 		case RTE_CRYPTO_AUTH_AES_CMAC:
3019 			authdata.algtype = PDCP_AUTH_TYPE_AES;
3020 			break;
3021 		case RTE_CRYPTO_AUTH_NULL:
3022 			authdata.algtype = PDCP_AUTH_TYPE_NULL;
3023 			break;
3024 		default:
3025 			DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
3026 				      session->auth_alg);
3027 			goto out;
3028 		}
3029 
3030 		p_authdata = &authdata;
3031 	} else if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3032 		DPAA2_SEC_ERR("Crypto: Integrity must for c-plane");
3033 		goto out;
3034 	}
3035 
3036 	if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3037 		if (session->dir == DIR_ENC)
3038 			bufsize = cnstr_shdsc_pdcp_c_plane_encap(
3039 					priv->flc_desc[0].desc, 1, swap,
3040 					pdcp_xform->hfn,
3041 					session->pdcp.sn_size,
3042 					pdcp_xform->bearer,
3043 					pdcp_xform->pkt_dir,
3044 					pdcp_xform->hfn_threshold,
3045 					&cipherdata, &authdata,
3046 					0);
3047 		else if (session->dir == DIR_DEC)
3048 			bufsize = cnstr_shdsc_pdcp_c_plane_decap(
3049 					priv->flc_desc[0].desc, 1, swap,
3050 					pdcp_xform->hfn,
3051 					session->pdcp.sn_size,
3052 					pdcp_xform->bearer,
3053 					pdcp_xform->pkt_dir,
3054 					pdcp_xform->hfn_threshold,
3055 					&cipherdata, &authdata,
3056 					0);
3057 	} else {
3058 		if (session->dir == DIR_ENC)
3059 			bufsize = cnstr_shdsc_pdcp_u_plane_encap(
3060 					priv->flc_desc[0].desc, 1, swap,
3061 					session->pdcp.sn_size,
3062 					pdcp_xform->hfn,
3063 					pdcp_xform->bearer,
3064 					pdcp_xform->pkt_dir,
3065 					pdcp_xform->hfn_threshold,
3066 					&cipherdata, p_authdata, 0);
3067 		else if (session->dir == DIR_DEC)
3068 			bufsize = cnstr_shdsc_pdcp_u_plane_decap(
3069 					priv->flc_desc[0].desc, 1, swap,
3070 					session->pdcp.sn_size,
3071 					pdcp_xform->hfn,
3072 					pdcp_xform->bearer,
3073 					pdcp_xform->pkt_dir,
3074 					pdcp_xform->hfn_threshold,
3075 					&cipherdata, p_authdata, 0);
3076 	}
3077 
3078 	if (bufsize < 0) {
3079 		DPAA2_SEC_ERR("Crypto: Invalid buffer length");
3080 		goto out;
3081 	}
3082 
3083 	/* Enable the stashing control bit */
3084 	DPAA2_SET_FLC_RSC(flc);
3085 	flc->word2_rflc_31_0 = lower_32_bits(
3086 			(size_t)&(((struct dpaa2_sec_qp *)
3087 			dev->data->queue_pairs[0])->rx_vq) | 0x14);
3088 	flc->word3_rflc_63_32 = upper_32_bits(
3089 			(size_t)&(((struct dpaa2_sec_qp *)
3090 			dev->data->queue_pairs[0])->rx_vq));
3091 
3092 	flc->word1_sdl = (uint8_t)bufsize;
3093 
3094 	/* TODO - check the perf impact or
3095 	 * align as per descriptor type
3096 	 * Set EWS bit i.e. enable write-safe
3097 	 * DPAA2_SET_FLC_EWS(flc);
3098 	 */
3099 
3100 	/* Set BS = 1 i.e reuse input buffers as output buffers */
3101 	DPAA2_SET_FLC_REUSE_BS(flc);
3102 	/* Set FF = 10; reuse input buffers if they provide sufficient space */
3103 	DPAA2_SET_FLC_REUSE_FF(flc);
3104 
3105 	session->ctxt = priv;
3106 
3107 	return 0;
3108 out:
3109 	rte_free(session->auth_key.data);
3110 	rte_free(session->cipher_key.data);
3111 	rte_free(priv);
3112 	return -1;
3113 }
3114 
3115 static int
3116 dpaa2_sec_security_session_create(void *dev,
3117 				  struct rte_security_session_conf *conf,
3118 				  struct rte_security_session *sess,
3119 				  struct rte_mempool *mempool)
3120 {
3121 	void *sess_private_data;
3122 	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
3123 	int ret;
3124 
3125 	if (rte_mempool_get(mempool, &sess_private_data)) {
3126 		DPAA2_SEC_ERR("Couldn't get object from session mempool");
3127 		return -ENOMEM;
3128 	}
3129 
3130 	switch (conf->protocol) {
3131 	case RTE_SECURITY_PROTOCOL_IPSEC:
3132 		ret = dpaa2_sec_set_ipsec_session(cdev, conf,
3133 				sess_private_data);
3134 		break;
3135 	case RTE_SECURITY_PROTOCOL_MACSEC:
3136 		return -ENOTSUP;
3137 	case RTE_SECURITY_PROTOCOL_PDCP:
3138 		ret = dpaa2_sec_set_pdcp_session(cdev, conf,
3139 				sess_private_data);
3140 		break;
3141 	default:
3142 		return -EINVAL;
3143 	}
3144 	if (ret != 0) {
3145 		DPAA2_SEC_ERR("Failed to configure session parameters");
3146 		/* Return session to mempool */
3147 		rte_mempool_put(mempool, sess_private_data);
3148 		return ret;
3149 	}
3150 
3151 	set_sec_session_private_data(sess, sess_private_data);
3152 
3153 	return ret;
3154 }
3155 
3156 /** Clear the memory of session so it doesn't leave key material behind */
3157 static int
3158 dpaa2_sec_security_session_destroy(void *dev __rte_unused,
3159 		struct rte_security_session *sess)
3160 {
3161 	PMD_INIT_FUNC_TRACE();
3162 	void *sess_priv = get_sec_session_private_data(sess);
3163 
3164 	dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
3165 
3166 	if (sess_priv) {
3167 		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
3168 
3169 		rte_free(s->ctxt);
3170 		rte_free(s->cipher_key.data);
3171 		rte_free(s->auth_key.data);
3172 		memset(s, 0, sizeof(dpaa2_sec_session));
3173 		set_sec_session_private_data(sess, NULL);
3174 		rte_mempool_put(sess_mp, sess_priv);
3175 	}
3176 	return 0;
3177 }
3178 
3179 static int
3180 dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev,
3181 		struct rte_crypto_sym_xform *xform,
3182 		struct rte_cryptodev_sym_session *sess,
3183 		struct rte_mempool *mempool)
3184 {
3185 	void *sess_private_data;
3186 	int ret;
3187 
3188 	if (rte_mempool_get(mempool, &sess_private_data)) {
3189 		DPAA2_SEC_ERR("Couldn't get object from session mempool");
3190 		return -ENOMEM;
3191 	}
3192 
3193 	ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data);
3194 	if (ret != 0) {
3195 		DPAA2_SEC_ERR("Failed to configure session parameters");
3196 		/* Return session to mempool */
3197 		rte_mempool_put(mempool, sess_private_data);
3198 		return ret;
3199 	}
3200 
3201 	set_sym_session_private_data(sess, dev->driver_id,
3202 		sess_private_data);
3203 
3204 	return 0;
3205 }
3206 
3207 /** Clear the memory of session so it doesn't leave key material behind */
3208 static void
3209 dpaa2_sec_sym_session_clear(struct rte_cryptodev *dev,
3210 		struct rte_cryptodev_sym_session *sess)
3211 {
3212 	PMD_INIT_FUNC_TRACE();
3213 	uint8_t index = dev->driver_id;
3214 	void *sess_priv = get_sym_session_private_data(sess, index);
3215 	dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
3216 
3217 	if (sess_priv) {
3218 		rte_free(s->ctxt);
3219 		rte_free(s->cipher_key.data);
3220 		rte_free(s->auth_key.data);
3221 		memset(s, 0, sizeof(dpaa2_sec_session));
3222 		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
3223 		set_sym_session_private_data(sess, index, NULL);
3224 		rte_mempool_put(sess_mp, sess_priv);
3225 	}
3226 }
3227 
3228 static int
3229 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
3230 			struct rte_cryptodev_config *config __rte_unused)
3231 {
3232 	PMD_INIT_FUNC_TRACE();
3233 
3234 	return 0;
3235 }
3236 
3237 static int
3238 dpaa2_sec_dev_start(struct rte_cryptodev *dev)
3239 {
3240 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3241 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3242 	struct dpseci_attr attr;
3243 	struct dpaa2_queue *dpaa2_q;
3244 	struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3245 					dev->data->queue_pairs;
3246 	struct dpseci_rx_queue_attr rx_attr;
3247 	struct dpseci_tx_queue_attr tx_attr;
3248 	int ret, i;
3249 
3250 	PMD_INIT_FUNC_TRACE();
3251 
3252 	memset(&attr, 0, sizeof(struct dpseci_attr));
3253 
3254 	ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
3255 	if (ret) {
3256 		DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED",
3257 			      priv->hw_id);
3258 		goto get_attr_failure;
3259 	}
3260 	ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr);
3261 	if (ret) {
3262 		DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC");
3263 		goto get_attr_failure;
3264 	}
3265 	for (i = 0; i < attr.num_rx_queues && qp[i]; i++) {
3266 		dpaa2_q = &qp[i]->rx_vq;
3267 		dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
3268 				    &rx_attr);
3269 		dpaa2_q->fqid = rx_attr.fqid;
3270 		DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid);
3271 	}
3272 	for (i = 0; i < attr.num_tx_queues && qp[i]; i++) {
3273 		dpaa2_q = &qp[i]->tx_vq;
3274 		dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
3275 				    &tx_attr);
3276 		dpaa2_q->fqid = tx_attr.fqid;
3277 		DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid);
3278 	}
3279 
3280 	return 0;
3281 get_attr_failure:
3282 	dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
3283 	return -1;
3284 }
3285 
3286 static void
3287 dpaa2_sec_dev_stop(struct rte_cryptodev *dev)
3288 {
3289 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3290 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3291 	int ret;
3292 
3293 	PMD_INIT_FUNC_TRACE();
3294 
3295 	ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
3296 	if (ret) {
3297 		DPAA2_SEC_ERR("Failure in disabling dpseci %d device",
3298 			     priv->hw_id);
3299 		return;
3300 	}
3301 
3302 	ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token);
3303 	if (ret < 0) {
3304 		DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret);
3305 		return;
3306 	}
3307 }
3308 
3309 static int
3310 dpaa2_sec_dev_close(struct rte_cryptodev *dev)
3311 {
3312 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3313 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3314 	int ret;
3315 
3316 	PMD_INIT_FUNC_TRACE();
3317 
3318 	/* Function is reverse of dpaa2_sec_dev_init.
3319 	 * It does the following:
3320 	 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id
3321 	 * 2. Close the DPSECI device
3322 	 * 3. Free the allocated resources.
3323 	 */
3324 
3325 	/*Close the device at underlying layer*/
3326 	ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
3327 	if (ret) {
3328 		DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret);
3329 		return -1;
3330 	}
3331 
3332 	/*Free the allocated memory for ethernet private data and dpseci*/
3333 	priv->hw = NULL;
3334 	rte_free(dpseci);
3335 
3336 	return 0;
3337 }
3338 
3339 static void
3340 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev,
3341 			struct rte_cryptodev_info *info)
3342 {
3343 	struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
3344 
3345 	PMD_INIT_FUNC_TRACE();
3346 	if (info != NULL) {
3347 		info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3348 		info->feature_flags = dev->feature_flags;
3349 		info->capabilities = dpaa2_sec_capabilities;
3350 		/* No limit of number of sessions */
3351 		info->sym.max_nb_sessions = 0;
3352 		info->driver_id = cryptodev_driver_id;
3353 	}
3354 }
3355 
3356 static
3357 void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
3358 			 struct rte_cryptodev_stats *stats)
3359 {
3360 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3361 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3362 	struct dpseci_sec_counters counters = {0};
3363 	struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3364 					dev->data->queue_pairs;
3365 	int ret, i;
3366 
3367 	PMD_INIT_FUNC_TRACE();
3368 	if (stats == NULL) {
3369 		DPAA2_SEC_ERR("Invalid stats ptr NULL");
3370 		return;
3371 	}
3372 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3373 		if (qp[i] == NULL) {
3374 			DPAA2_SEC_DEBUG("Uninitialised queue pair");
3375 			continue;
3376 		}
3377 
3378 		stats->enqueued_count += qp[i]->tx_vq.tx_pkts;
3379 		stats->dequeued_count += qp[i]->rx_vq.rx_pkts;
3380 		stats->enqueue_err_count += qp[i]->tx_vq.err_pkts;
3381 		stats->dequeue_err_count += qp[i]->rx_vq.err_pkts;
3382 	}
3383 
3384 	ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token,
3385 				      &counters);
3386 	if (ret) {
3387 		DPAA2_SEC_ERR("SEC counters failed");
3388 	} else {
3389 		DPAA2_SEC_INFO("dpseci hardware stats:"
3390 			    "\n\tNum of Requests Dequeued = %" PRIu64
3391 			    "\n\tNum of Outbound Encrypt Requests = %" PRIu64
3392 			    "\n\tNum of Inbound Decrypt Requests = %" PRIu64
3393 			    "\n\tNum of Outbound Bytes Encrypted = %" PRIu64
3394 			    "\n\tNum of Outbound Bytes Protected = %" PRIu64
3395 			    "\n\tNum of Inbound Bytes Decrypted = %" PRIu64
3396 			    "\n\tNum of Inbound Bytes Validated = %" PRIu64,
3397 			    counters.dequeued_requests,
3398 			    counters.ob_enc_requests,
3399 			    counters.ib_dec_requests,
3400 			    counters.ob_enc_bytes,
3401 			    counters.ob_prot_bytes,
3402 			    counters.ib_dec_bytes,
3403 			    counters.ib_valid_bytes);
3404 	}
3405 }
3406 
3407 static
3408 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev)
3409 {
3410 	int i;
3411 	struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3412 				   (dev->data->queue_pairs);
3413 
3414 	PMD_INIT_FUNC_TRACE();
3415 
3416 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3417 		if (qp[i] == NULL) {
3418 			DPAA2_SEC_DEBUG("Uninitialised queue pair");
3419 			continue;
3420 		}
3421 		qp[i]->tx_vq.rx_pkts = 0;
3422 		qp[i]->tx_vq.tx_pkts = 0;
3423 		qp[i]->tx_vq.err_pkts = 0;
3424 		qp[i]->rx_vq.rx_pkts = 0;
3425 		qp[i]->rx_vq.tx_pkts = 0;
3426 		qp[i]->rx_vq.err_pkts = 0;
3427 	}
3428 }
3429 
3430 static void __attribute__((hot))
3431 dpaa2_sec_process_parallel_event(struct qbman_swp *swp,
3432 				 const struct qbman_fd *fd,
3433 				 const struct qbman_result *dq,
3434 				 struct dpaa2_queue *rxq,
3435 				 struct rte_event *ev)
3436 {
3437 	/* Prefetching mbuf */
3438 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
3439 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
3440 
3441 	/* Prefetching ipsec crypto_op stored in priv data of mbuf */
3442 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
3443 
3444 	ev->flow_id = rxq->ev.flow_id;
3445 	ev->sub_event_type = rxq->ev.sub_event_type;
3446 	ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3447 	ev->op = RTE_EVENT_OP_NEW;
3448 	ev->sched_type = rxq->ev.sched_type;
3449 	ev->queue_id = rxq->ev.queue_id;
3450 	ev->priority = rxq->ev.priority;
3451 	ev->event_ptr = sec_fd_to_mbuf(fd);
3452 
3453 	qbman_swp_dqrr_consume(swp, dq);
3454 }
3455 static void
3456 dpaa2_sec_process_atomic_event(struct qbman_swp *swp __attribute__((unused)),
3457 				 const struct qbman_fd *fd,
3458 				 const struct qbman_result *dq,
3459 				 struct dpaa2_queue *rxq,
3460 				 struct rte_event *ev)
3461 {
3462 	uint8_t dqrr_index;
3463 	struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr;
3464 	/* Prefetching mbuf */
3465 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
3466 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
3467 
3468 	/* Prefetching ipsec crypto_op stored in priv data of mbuf */
3469 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
3470 
3471 	ev->flow_id = rxq->ev.flow_id;
3472 	ev->sub_event_type = rxq->ev.sub_event_type;
3473 	ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3474 	ev->op = RTE_EVENT_OP_NEW;
3475 	ev->sched_type = rxq->ev.sched_type;
3476 	ev->queue_id = rxq->ev.queue_id;
3477 	ev->priority = rxq->ev.priority;
3478 
3479 	ev->event_ptr = sec_fd_to_mbuf(fd);
3480 	dqrr_index = qbman_get_dqrr_idx(dq);
3481 	crypto_op->sym->m_src->seqn = dqrr_index + 1;
3482 	DPAA2_PER_LCORE_DQRR_SIZE++;
3483 	DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
3484 	DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src;
3485 }
3486 
3487 int
3488 dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
3489 		int qp_id,
3490 		uint16_t dpcon_id,
3491 		const struct rte_event *event)
3492 {
3493 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3494 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3495 	struct dpaa2_sec_qp *qp = dev->data->queue_pairs[qp_id];
3496 	struct dpseci_rx_queue_cfg cfg;
3497 	int ret;
3498 
3499 	if (event->sched_type == RTE_SCHED_TYPE_PARALLEL)
3500 		qp->rx_vq.cb = dpaa2_sec_process_parallel_event;
3501 	else if (event->sched_type == RTE_SCHED_TYPE_ATOMIC)
3502 		qp->rx_vq.cb = dpaa2_sec_process_atomic_event;
3503 	else
3504 		return -EINVAL;
3505 
3506 	memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
3507 	cfg.options = DPSECI_QUEUE_OPT_DEST;
3508 	cfg.dest_cfg.dest_type = DPSECI_DEST_DPCON;
3509 	cfg.dest_cfg.dest_id = dpcon_id;
3510 	cfg.dest_cfg.priority = event->priority;
3511 
3512 	cfg.options |= DPSECI_QUEUE_OPT_USER_CTX;
3513 	cfg.user_ctx = (size_t)(qp);
3514 	if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) {
3515 		cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION;
3516 		cfg.order_preservation_en = 1;
3517 	}
3518 	ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
3519 				  qp_id, &cfg);
3520 	if (ret) {
3521 		RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
3522 		return ret;
3523 	}
3524 
3525 	memcpy(&qp->rx_vq.ev, event, sizeof(struct rte_event));
3526 
3527 	return 0;
3528 }
3529 
3530 int
3531 dpaa2_sec_eventq_detach(const struct rte_cryptodev *dev,
3532 			int qp_id)
3533 {
3534 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3535 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3536 	struct dpseci_rx_queue_cfg cfg;
3537 	int ret;
3538 
3539 	memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
3540 	cfg.options = DPSECI_QUEUE_OPT_DEST;
3541 	cfg.dest_cfg.dest_type = DPSECI_DEST_NONE;
3542 
3543 	ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
3544 				  qp_id, &cfg);
3545 	if (ret)
3546 		RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
3547 
3548 	return ret;
3549 }
3550 
3551 static struct rte_cryptodev_ops crypto_ops = {
3552 	.dev_configure	      = dpaa2_sec_dev_configure,
3553 	.dev_start	      = dpaa2_sec_dev_start,
3554 	.dev_stop	      = dpaa2_sec_dev_stop,
3555 	.dev_close	      = dpaa2_sec_dev_close,
3556 	.dev_infos_get        = dpaa2_sec_dev_infos_get,
3557 	.stats_get	      = dpaa2_sec_stats_get,
3558 	.stats_reset	      = dpaa2_sec_stats_reset,
3559 	.queue_pair_setup     = dpaa2_sec_queue_pair_setup,
3560 	.queue_pair_release   = dpaa2_sec_queue_pair_release,
3561 	.queue_pair_count     = dpaa2_sec_queue_pair_count,
3562 	.sym_session_get_size     = dpaa2_sec_sym_session_get_size,
3563 	.sym_session_configure    = dpaa2_sec_sym_session_configure,
3564 	.sym_session_clear        = dpaa2_sec_sym_session_clear,
3565 };
3566 
3567 static const struct rte_security_capability *
3568 dpaa2_sec_capabilities_get(void *device __rte_unused)
3569 {
3570 	return dpaa2_sec_security_cap;
3571 }
3572 
3573 static const struct rte_security_ops dpaa2_sec_security_ops = {
3574 	.session_create = dpaa2_sec_security_session_create,
3575 	.session_update = NULL,
3576 	.session_stats_get = NULL,
3577 	.session_destroy = dpaa2_sec_security_session_destroy,
3578 	.set_pkt_metadata = NULL,
3579 	.capabilities_get = dpaa2_sec_capabilities_get
3580 };
3581 
3582 static int
3583 dpaa2_sec_uninit(const struct rte_cryptodev *dev)
3584 {
3585 	struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
3586 
3587 	rte_free(dev->security_ctx);
3588 
3589 	rte_mempool_free(internals->fle_pool);
3590 
3591 	DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u",
3592 		       dev->data->name, rte_socket_id());
3593 
3594 	return 0;
3595 }
3596 
3597 static int
3598 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
3599 {
3600 	struct dpaa2_sec_dev_private *internals;
3601 	struct rte_device *dev = cryptodev->device;
3602 	struct rte_dpaa2_device *dpaa2_dev;
3603 	struct rte_security_ctx *security_instance;
3604 	struct fsl_mc_io *dpseci;
3605 	uint16_t token;
3606 	struct dpseci_attr attr;
3607 	int retcode, hw_id;
3608 	char str[30];
3609 
3610 	PMD_INIT_FUNC_TRACE();
3611 	dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
3612 	if (dpaa2_dev == NULL) {
3613 		DPAA2_SEC_ERR("DPAA2 SEC device not found");
3614 		return -1;
3615 	}
3616 	hw_id = dpaa2_dev->object_id;
3617 
3618 	cryptodev->driver_id = cryptodev_driver_id;
3619 	cryptodev->dev_ops = &crypto_ops;
3620 
3621 	cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst;
3622 	cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst;
3623 	cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3624 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
3625 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3626 			RTE_CRYPTODEV_FF_SECURITY |
3627 			RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3628 			RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3629 			RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3630 			RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3631 			RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3632 
3633 	internals = cryptodev->data->dev_private;
3634 
3635 	/*
3636 	 * For secondary processes, we don't initialise any further as primary
3637 	 * has already done this work. Only check we don't need a different
3638 	 * RX function
3639 	 */
3640 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3641 		DPAA2_SEC_DEBUG("Device already init by primary process");
3642 		return 0;
3643 	}
3644 
3645 	/* Initialize security_ctx only for primary process*/
3646 	security_instance = rte_malloc("rte_security_instances_ops",
3647 				sizeof(struct rte_security_ctx), 0);
3648 	if (security_instance == NULL)
3649 		return -ENOMEM;
3650 	security_instance->device = (void *)cryptodev;
3651 	security_instance->ops = &dpaa2_sec_security_ops;
3652 	security_instance->sess_cnt = 0;
3653 	cryptodev->security_ctx = security_instance;
3654 
3655 	/*Open the rte device via MC and save the handle for further use*/
3656 	dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1,
3657 				sizeof(struct fsl_mc_io), 0);
3658 	if (!dpseci) {
3659 		DPAA2_SEC_ERR(
3660 			"Error in allocating the memory for dpsec object");
3661 		return -1;
3662 	}
3663 	dpseci->regs = rte_mcp_ptr_list[0];
3664 
3665 	retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token);
3666 	if (retcode != 0) {
3667 		DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x",
3668 			      retcode);
3669 		goto init_error;
3670 	}
3671 	retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr);
3672 	if (retcode != 0) {
3673 		DPAA2_SEC_ERR(
3674 			     "Cannot get dpsec device attributed: Error = %x",
3675 			     retcode);
3676 		goto init_error;
3677 	}
3678 	snprintf(cryptodev->data->name, sizeof(cryptodev->data->name),
3679 			"dpsec-%u", hw_id);
3680 
3681 	internals->max_nb_queue_pairs = attr.num_tx_queues;
3682 	cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs;
3683 	internals->hw = dpseci;
3684 	internals->token = token;
3685 
3686 	snprintf(str, sizeof(str), "sec_fle_pool_p%d_%d",
3687 			getpid(), cryptodev->data->dev_id);
3688 	internals->fle_pool = rte_mempool_create((const char *)str,
3689 			FLE_POOL_NUM_BUFS,
3690 			FLE_POOL_BUF_SIZE,
3691 			FLE_POOL_CACHE_SIZE, 0,
3692 			NULL, NULL, NULL, NULL,
3693 			SOCKET_ID_ANY, 0);
3694 	if (!internals->fle_pool) {
3695 		DPAA2_SEC_ERR("Mempool (%s) creation failed", str);
3696 		goto init_error;
3697 	}
3698 
3699 	DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name);
3700 	return 0;
3701 
3702 init_error:
3703 	DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name);
3704 
3705 	/* dpaa2_sec_uninit(crypto_dev_name); */
3706 	return -EFAULT;
3707 }
3708 
3709 static int
3710 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
3711 			  struct rte_dpaa2_device *dpaa2_dev)
3712 {
3713 	struct rte_cryptodev *cryptodev;
3714 	char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3715 
3716 	int retval;
3717 
3718 	snprintf(cryptodev_name, sizeof(cryptodev_name), "dpsec-%d",
3719 			dpaa2_dev->object_id);
3720 
3721 	cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3722 	if (cryptodev == NULL)
3723 		return -ENOMEM;
3724 
3725 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3726 		cryptodev->data->dev_private = rte_zmalloc_socket(
3727 					"cryptodev private structure",
3728 					sizeof(struct dpaa2_sec_dev_private),
3729 					RTE_CACHE_LINE_SIZE,
3730 					rte_socket_id());
3731 
3732 		if (cryptodev->data->dev_private == NULL)
3733 			rte_panic("Cannot allocate memzone for private "
3734 				  "device data");
3735 	}
3736 
3737 	dpaa2_dev->cryptodev = cryptodev;
3738 	cryptodev->device = &dpaa2_dev->device;
3739 
3740 	/* init user callbacks */
3741 	TAILQ_INIT(&(cryptodev->link_intr_cbs));
3742 
3743 	if (dpaa2_svr_family == SVR_LX2160A)
3744 		rta_set_sec_era(RTA_SEC_ERA_10);
3745 
3746 	DPAA2_SEC_INFO("2-SEC ERA is %d", rta_get_sec_era());
3747 
3748 	/* Invoke PMD device initialization function */
3749 	retval = dpaa2_sec_dev_init(cryptodev);
3750 	if (retval == 0)
3751 		return 0;
3752 
3753 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3754 		rte_free(cryptodev->data->dev_private);
3755 
3756 	cryptodev->attached = RTE_CRYPTODEV_DETACHED;
3757 
3758 	return -ENXIO;
3759 }
3760 
3761 static int
3762 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
3763 {
3764 	struct rte_cryptodev *cryptodev;
3765 	int ret;
3766 
3767 	cryptodev = dpaa2_dev->cryptodev;
3768 	if (cryptodev == NULL)
3769 		return -ENODEV;
3770 
3771 	ret = dpaa2_sec_uninit(cryptodev);
3772 	if (ret)
3773 		return ret;
3774 
3775 	return rte_cryptodev_pmd_destroy(cryptodev);
3776 }
3777 
3778 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
3779 	.drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
3780 	.drv_type = DPAA2_CRYPTO,
3781 	.driver = {
3782 		.name = "DPAA2 SEC PMD"
3783 	},
3784 	.probe = cryptodev_dpaa2_sec_probe,
3785 	.remove = cryptodev_dpaa2_sec_remove,
3786 };
3787 
3788 static struct cryptodev_driver dpaa2_sec_crypto_drv;
3789 
3790 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
3791 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv,
3792 		rte_dpaa2_sec_driver.driver, cryptodev_driver_id);
3793 
3794 RTE_INIT(dpaa2_sec_init_log)
3795 {
3796 	/* Bus level logs */
3797 	dpaa2_logtype_sec = rte_log_register("pmd.crypto.dpaa2");
3798 	if (dpaa2_logtype_sec >= 0)
3799 		rte_log_set_level(dpaa2_logtype_sec, RTE_LOG_NOTICE);
3800 }
3801