xref: /dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c (revision c5788a1097ac998dc47727cc29dba312d190923a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016-2018 NXP
5  *
6  */
7 
8 #include <time.h>
9 #include <net/if.h>
10 #include <unistd.h>
11 
12 #include <rte_ip.h>
13 #include <rte_mbuf.h>
14 #include <rte_cryptodev.h>
15 #include <rte_malloc.h>
16 #include <rte_memcpy.h>
17 #include <rte_string_fns.h>
18 #include <rte_cycles.h>
19 #include <rte_kvargs.h>
20 #include <rte_dev.h>
21 #include <rte_cryptodev_pmd.h>
22 #include <rte_common.h>
23 #include <rte_fslmc.h>
24 #include <fslmc_vfio.h>
25 #include <dpaa2_hw_pvt.h>
26 #include <dpaa2_hw_dpio.h>
27 #include <dpaa2_hw_mempool.h>
28 #include <fsl_dpopr.h>
29 #include <fsl_dpseci.h>
30 #include <fsl_mc_sys.h>
31 
32 #include "dpaa2_sec_priv.h"
33 #include "dpaa2_sec_event.h"
34 #include "dpaa2_sec_logs.h"
35 
36 /* Required types */
37 typedef uint64_t	dma_addr_t;
38 
39 /* RTA header files */
40 #include <hw/desc/ipsec.h>
41 #include <hw/desc/pdcp.h>
42 #include <hw/desc/algo.h>
43 
44 /* Minimum job descriptor consists of a oneword job descriptor HEADER and
45  * a pointer to the shared descriptor
46  */
47 #define MIN_JOB_DESC_SIZE	(CAAM_CMD_SZ + CAAM_PTR_SZ)
48 #define FSL_VENDOR_ID           0x1957
49 #define FSL_DEVICE_ID           0x410
50 #define FSL_SUBSYSTEM_SEC       1
51 #define FSL_MC_DPSECI_DEVID     3
52 
53 #define NO_PREFETCH 0
54 /* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
55 #define FLE_POOL_NUM_BUFS	32000
56 #define FLE_POOL_BUF_SIZE	256
57 #define FLE_POOL_CACHE_SIZE	512
58 #define FLE_SG_MEM_SIZE(num)	(FLE_POOL_BUF_SIZE + ((num) * 32))
59 #define SEC_FLC_DHR_OUTBOUND	-114
60 #define SEC_FLC_DHR_INBOUND	0
61 
62 enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8;
63 
64 static uint8_t cryptodev_driver_id;
65 
66 int dpaa2_logtype_sec;
67 
68 static inline int
69 build_proto_compound_sg_fd(dpaa2_sec_session *sess,
70 			   struct rte_crypto_op *op,
71 			   struct qbman_fd *fd, uint16_t bpid)
72 {
73 	struct rte_crypto_sym_op *sym_op = op->sym;
74 	struct ctxt_priv *priv = sess->ctxt;
75 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
76 	struct sec_flow_context *flc;
77 	struct rte_mbuf *mbuf;
78 	uint32_t in_len = 0, out_len = 0;
79 
80 	if (sym_op->m_dst)
81 		mbuf = sym_op->m_dst;
82 	else
83 		mbuf = sym_op->m_src;
84 
85 	/* first FLE entry used to store mbuf and session ctxt */
86 	fle = (struct qbman_fle *)rte_malloc(NULL,
87 			FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
88 			RTE_CACHE_LINE_SIZE);
89 	if (unlikely(!fle)) {
90 		DPAA2_SEC_DP_ERR("Proto:SG: Memory alloc failed for SGE");
91 		return -1;
92 	}
93 	memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
94 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
95 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
96 
97 	/* Save the shared descriptor */
98 	flc = &priv->flc_desc[0].flc;
99 
100 	op_fle = fle + 1;
101 	ip_fle = fle + 2;
102 	sge = fle + 3;
103 
104 	if (likely(bpid < MAX_BPID)) {
105 		DPAA2_SET_FD_BPID(fd, bpid);
106 		DPAA2_SET_FLE_BPID(op_fle, bpid);
107 		DPAA2_SET_FLE_BPID(ip_fle, bpid);
108 	} else {
109 		DPAA2_SET_FD_IVP(fd);
110 		DPAA2_SET_FLE_IVP(op_fle);
111 		DPAA2_SET_FLE_IVP(ip_fle);
112 	}
113 
114 	/* Configure FD as a FRAME LIST */
115 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
116 	DPAA2_SET_FD_COMPOUND_FMT(fd);
117 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
118 
119 	/* Configure Output FLE with Scatter/Gather Entry */
120 	DPAA2_SET_FLE_SG_EXT(op_fle);
121 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
122 
123 	/* Configure Output SGE for Encap/Decap */
124 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
125 	DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
126 	/* o/p segs */
127 	while (mbuf->next) {
128 		sge->length = mbuf->data_len;
129 		out_len += sge->length;
130 		sge++;
131 		mbuf = mbuf->next;
132 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
133 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
134 	}
135 	/* using buf_len for last buf - so that extra data can be added */
136 	sge->length = mbuf->buf_len - mbuf->data_off;
137 	out_len += sge->length;
138 
139 	DPAA2_SET_FLE_FIN(sge);
140 	op_fle->length = out_len;
141 
142 	sge++;
143 	mbuf = sym_op->m_src;
144 
145 	/* Configure Input FLE with Scatter/Gather Entry */
146 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
147 	DPAA2_SET_FLE_SG_EXT(ip_fle);
148 	DPAA2_SET_FLE_FIN(ip_fle);
149 
150 	/* Configure input SGE for Encap/Decap */
151 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
152 	DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
153 	sge->length = mbuf->data_len;
154 	in_len += sge->length;
155 
156 	mbuf = mbuf->next;
157 	/* i/p segs */
158 	while (mbuf) {
159 		sge++;
160 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
161 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
162 		sge->length = mbuf->data_len;
163 		in_len += sge->length;
164 		mbuf = mbuf->next;
165 	}
166 	ip_fle->length = in_len;
167 	DPAA2_SET_FLE_FIN(sge);
168 
169 	/* In case of PDCP, per packet HFN is stored in
170 	 * mbuf priv after sym_op.
171 	 */
172 	if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
173 		uint32_t hfn_ovd = *((uint8_t *)op + sess->pdcp.hfn_ovd_offset);
174 		/*enable HFN override override */
175 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
176 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
177 		DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
178 	}
179 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
180 
181 	return 0;
182 }
183 
184 static inline int
185 build_proto_compound_fd(dpaa2_sec_session *sess,
186 	       struct rte_crypto_op *op,
187 	       struct qbman_fd *fd, uint16_t bpid)
188 {
189 	struct rte_crypto_sym_op *sym_op = op->sym;
190 	struct ctxt_priv *priv = sess->ctxt;
191 	struct qbman_fle *fle, *ip_fle, *op_fle;
192 	struct sec_flow_context *flc;
193 	struct rte_mbuf *src_mbuf = sym_op->m_src;
194 	struct rte_mbuf *dst_mbuf = sym_op->m_dst;
195 	int retval;
196 
197 	if (!dst_mbuf)
198 		dst_mbuf = src_mbuf;
199 
200 	/* Save the shared descriptor */
201 	flc = &priv->flc_desc[0].flc;
202 
203 	/* we are using the first FLE entry to store Mbuf */
204 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
205 	if (retval) {
206 		DPAA2_SEC_DP_ERR("Memory alloc failed");
207 		return -1;
208 	}
209 	memset(fle, 0, FLE_POOL_BUF_SIZE);
210 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
211 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
212 
213 	op_fle = fle + 1;
214 	ip_fle = fle + 2;
215 
216 	if (likely(bpid < MAX_BPID)) {
217 		DPAA2_SET_FD_BPID(fd, bpid);
218 		DPAA2_SET_FLE_BPID(op_fle, bpid);
219 		DPAA2_SET_FLE_BPID(ip_fle, bpid);
220 	} else {
221 		DPAA2_SET_FD_IVP(fd);
222 		DPAA2_SET_FLE_IVP(op_fle);
223 		DPAA2_SET_FLE_IVP(ip_fle);
224 	}
225 
226 	/* Configure FD as a FRAME LIST */
227 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
228 	DPAA2_SET_FD_COMPOUND_FMT(fd);
229 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
230 
231 	/* Configure Output FLE with dst mbuf data  */
232 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_MBUF_VADDR_TO_IOVA(dst_mbuf));
233 	DPAA2_SET_FLE_OFFSET(op_fle, dst_mbuf->data_off);
234 	DPAA2_SET_FLE_LEN(op_fle, dst_mbuf->buf_len);
235 
236 	/* Configure Input FLE with src mbuf data */
237 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_MBUF_VADDR_TO_IOVA(src_mbuf));
238 	DPAA2_SET_FLE_OFFSET(ip_fle, src_mbuf->data_off);
239 	DPAA2_SET_FLE_LEN(ip_fle, src_mbuf->pkt_len);
240 
241 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
242 	DPAA2_SET_FLE_FIN(ip_fle);
243 
244 	/* In case of PDCP, per packet HFN is stored in
245 	 * mbuf priv after sym_op.
246 	 */
247 	if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
248 		uint32_t hfn_ovd = *((uint8_t *)op + sess->pdcp.hfn_ovd_offset);
249 		/*enable HFN override override */
250 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
251 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
252 		DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
253 	}
254 
255 	return 0;
256 
257 }
258 
259 static inline int
260 build_proto_fd(dpaa2_sec_session *sess,
261 	       struct rte_crypto_op *op,
262 	       struct qbman_fd *fd, uint16_t bpid)
263 {
264 	struct rte_crypto_sym_op *sym_op = op->sym;
265 	if (sym_op->m_dst)
266 		return build_proto_compound_fd(sess, op, fd, bpid);
267 
268 	struct ctxt_priv *priv = sess->ctxt;
269 	struct sec_flow_context *flc;
270 	struct rte_mbuf *mbuf = sym_op->m_src;
271 
272 	if (likely(bpid < MAX_BPID))
273 		DPAA2_SET_FD_BPID(fd, bpid);
274 	else
275 		DPAA2_SET_FD_IVP(fd);
276 
277 	/* Save the shared descriptor */
278 	flc = &priv->flc_desc[0].flc;
279 
280 	DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
281 	DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off);
282 	DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len);
283 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
284 
285 	/* save physical address of mbuf */
286 	op->sym->aead.digest.phys_addr = mbuf->buf_iova;
287 	mbuf->buf_iova = (size_t)op;
288 
289 	return 0;
290 }
291 
292 static inline int
293 build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,
294 		 struct rte_crypto_op *op,
295 		 struct qbman_fd *fd, __rte_unused uint16_t bpid)
296 {
297 	struct rte_crypto_sym_op *sym_op = op->sym;
298 	struct ctxt_priv *priv = sess->ctxt;
299 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
300 	struct sec_flow_context *flc;
301 	uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
302 	int icv_len = sess->digest_length;
303 	uint8_t *old_icv;
304 	struct rte_mbuf *mbuf;
305 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
306 			sess->iv.offset);
307 
308 	if (sym_op->m_dst)
309 		mbuf = sym_op->m_dst;
310 	else
311 		mbuf = sym_op->m_src;
312 
313 	/* first FLE entry used to store mbuf and session ctxt */
314 	fle = (struct qbman_fle *)rte_malloc(NULL,
315 			FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
316 			RTE_CACHE_LINE_SIZE);
317 	if (unlikely(!fle)) {
318 		DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
319 		return -1;
320 	}
321 	memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
322 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
323 	DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv);
324 
325 	op_fle = fle + 1;
326 	ip_fle = fle + 2;
327 	sge = fle + 3;
328 
329 	/* Save the shared descriptor */
330 	flc = &priv->flc_desc[0].flc;
331 
332 	/* Configure FD as a FRAME LIST */
333 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
334 	DPAA2_SET_FD_COMPOUND_FMT(fd);
335 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
336 
337 	DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n"
338 		   "iv-len=%d data_off: 0x%x\n",
339 		   sym_op->aead.data.offset,
340 		   sym_op->aead.data.length,
341 		   sess->digest_length,
342 		   sess->iv.length,
343 		   sym_op->m_src->data_off);
344 
345 	/* Configure Output FLE with Scatter/Gather Entry */
346 	DPAA2_SET_FLE_SG_EXT(op_fle);
347 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
348 
349 	if (auth_only_len)
350 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
351 
352 	op_fle->length = (sess->dir == DIR_ENC) ?
353 			(sym_op->aead.data.length + icv_len) :
354 			sym_op->aead.data.length;
355 
356 	/* Configure Output SGE for Encap/Decap */
357 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
358 	DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->aead.data.offset);
359 	sge->length = mbuf->data_len - sym_op->aead.data.offset;
360 
361 	mbuf = mbuf->next;
362 	/* o/p segs */
363 	while (mbuf) {
364 		sge++;
365 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
366 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
367 		sge->length = mbuf->data_len;
368 		mbuf = mbuf->next;
369 	}
370 	sge->length -= icv_len;
371 
372 	if (sess->dir == DIR_ENC) {
373 		sge++;
374 		DPAA2_SET_FLE_ADDR(sge,
375 				DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
376 		sge->length = icv_len;
377 	}
378 	DPAA2_SET_FLE_FIN(sge);
379 
380 	sge++;
381 	mbuf = sym_op->m_src;
382 
383 	/* Configure Input FLE with Scatter/Gather Entry */
384 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
385 	DPAA2_SET_FLE_SG_EXT(ip_fle);
386 	DPAA2_SET_FLE_FIN(ip_fle);
387 	ip_fle->length = (sess->dir == DIR_ENC) ?
388 		(sym_op->aead.data.length + sess->iv.length + auth_only_len) :
389 		(sym_op->aead.data.length + sess->iv.length + auth_only_len +
390 		 icv_len);
391 
392 	/* Configure Input SGE for Encap/Decap */
393 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
394 	sge->length = sess->iv.length;
395 
396 	sge++;
397 	if (auth_only_len) {
398 		DPAA2_SET_FLE_ADDR(sge,
399 				DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
400 		sge->length = auth_only_len;
401 		sge++;
402 	}
403 
404 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
405 	DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
406 				mbuf->data_off);
407 	sge->length = mbuf->data_len - sym_op->aead.data.offset;
408 
409 	mbuf = mbuf->next;
410 	/* i/p segs */
411 	while (mbuf) {
412 		sge++;
413 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
414 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
415 		sge->length = mbuf->data_len;
416 		mbuf = mbuf->next;
417 	}
418 
419 	if (sess->dir == DIR_DEC) {
420 		sge++;
421 		old_icv = (uint8_t *)(sge + 1);
422 		memcpy(old_icv,	sym_op->aead.digest.data, icv_len);
423 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
424 		sge->length = icv_len;
425 	}
426 
427 	DPAA2_SET_FLE_FIN(sge);
428 	if (auth_only_len) {
429 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
430 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
431 	}
432 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
433 
434 	return 0;
435 }
436 
437 static inline int
438 build_authenc_gcm_fd(dpaa2_sec_session *sess,
439 		     struct rte_crypto_op *op,
440 		     struct qbman_fd *fd, uint16_t bpid)
441 {
442 	struct rte_crypto_sym_op *sym_op = op->sym;
443 	struct ctxt_priv *priv = sess->ctxt;
444 	struct qbman_fle *fle, *sge;
445 	struct sec_flow_context *flc;
446 	uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
447 	int icv_len = sess->digest_length, retval;
448 	uint8_t *old_icv;
449 	struct rte_mbuf *dst;
450 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
451 			sess->iv.offset);
452 
453 	if (sym_op->m_dst)
454 		dst = sym_op->m_dst;
455 	else
456 		dst = sym_op->m_src;
457 
458 	/* TODO we are using the first FLE entry to store Mbuf and session ctxt.
459 	 * Currently we donot know which FLE has the mbuf stored.
460 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
461 	 * to get the MBUF Addr from the previous FLE.
462 	 * We can have a better approach to use the inline Mbuf
463 	 */
464 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
465 	if (retval) {
466 		DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE");
467 		return -1;
468 	}
469 	memset(fle, 0, FLE_POOL_BUF_SIZE);
470 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
471 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
472 	fle = fle + 1;
473 	sge = fle + 2;
474 	if (likely(bpid < MAX_BPID)) {
475 		DPAA2_SET_FD_BPID(fd, bpid);
476 		DPAA2_SET_FLE_BPID(fle, bpid);
477 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
478 		DPAA2_SET_FLE_BPID(sge, bpid);
479 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
480 		DPAA2_SET_FLE_BPID(sge + 2, bpid);
481 		DPAA2_SET_FLE_BPID(sge + 3, bpid);
482 	} else {
483 		DPAA2_SET_FD_IVP(fd);
484 		DPAA2_SET_FLE_IVP(fle);
485 		DPAA2_SET_FLE_IVP((fle + 1));
486 		DPAA2_SET_FLE_IVP(sge);
487 		DPAA2_SET_FLE_IVP((sge + 1));
488 		DPAA2_SET_FLE_IVP((sge + 2));
489 		DPAA2_SET_FLE_IVP((sge + 3));
490 	}
491 
492 	/* Save the shared descriptor */
493 	flc = &priv->flc_desc[0].flc;
494 	/* Configure FD as a FRAME LIST */
495 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
496 	DPAA2_SET_FD_COMPOUND_FMT(fd);
497 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
498 
499 	DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n"
500 		   "iv-len=%d data_off: 0x%x\n",
501 		   sym_op->aead.data.offset,
502 		   sym_op->aead.data.length,
503 		   sess->digest_length,
504 		   sess->iv.length,
505 		   sym_op->m_src->data_off);
506 
507 	/* Configure Output FLE with Scatter/Gather Entry */
508 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
509 	if (auth_only_len)
510 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
511 	fle->length = (sess->dir == DIR_ENC) ?
512 			(sym_op->aead.data.length + icv_len) :
513 			sym_op->aead.data.length;
514 
515 	DPAA2_SET_FLE_SG_EXT(fle);
516 
517 	/* Configure Output SGE for Encap/Decap */
518 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
519 	DPAA2_SET_FLE_OFFSET(sge, dst->data_off + sym_op->aead.data.offset);
520 	sge->length = sym_op->aead.data.length;
521 
522 	if (sess->dir == DIR_ENC) {
523 		sge++;
524 		DPAA2_SET_FLE_ADDR(sge,
525 				DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
526 		sge->length = sess->digest_length;
527 	}
528 	DPAA2_SET_FLE_FIN(sge);
529 
530 	sge++;
531 	fle++;
532 
533 	/* Configure Input FLE with Scatter/Gather Entry */
534 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
535 	DPAA2_SET_FLE_SG_EXT(fle);
536 	DPAA2_SET_FLE_FIN(fle);
537 	fle->length = (sess->dir == DIR_ENC) ?
538 		(sym_op->aead.data.length + sess->iv.length + auth_only_len) :
539 		(sym_op->aead.data.length + sess->iv.length + auth_only_len +
540 		 sess->digest_length);
541 
542 	/* Configure Input SGE for Encap/Decap */
543 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
544 	sge->length = sess->iv.length;
545 	sge++;
546 	if (auth_only_len) {
547 		DPAA2_SET_FLE_ADDR(sge,
548 				DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
549 		sge->length = auth_only_len;
550 		DPAA2_SET_FLE_BPID(sge, bpid);
551 		sge++;
552 	}
553 
554 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
555 	DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
556 				sym_op->m_src->data_off);
557 	sge->length = sym_op->aead.data.length;
558 	if (sess->dir == DIR_DEC) {
559 		sge++;
560 		old_icv = (uint8_t *)(sge + 1);
561 		memcpy(old_icv,	sym_op->aead.digest.data,
562 		       sess->digest_length);
563 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
564 		sge->length = sess->digest_length;
565 	}
566 	DPAA2_SET_FLE_FIN(sge);
567 
568 	if (auth_only_len) {
569 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
570 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
571 	}
572 
573 	DPAA2_SET_FD_LEN(fd, fle->length);
574 	return 0;
575 }
576 
577 static inline int
578 build_authenc_sg_fd(dpaa2_sec_session *sess,
579 		 struct rte_crypto_op *op,
580 		 struct qbman_fd *fd, __rte_unused uint16_t bpid)
581 {
582 	struct rte_crypto_sym_op *sym_op = op->sym;
583 	struct ctxt_priv *priv = sess->ctxt;
584 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
585 	struct sec_flow_context *flc;
586 	uint16_t auth_hdr_len = sym_op->cipher.data.offset -
587 				sym_op->auth.data.offset;
588 	uint16_t auth_tail_len = sym_op->auth.data.length -
589 				sym_op->cipher.data.length - auth_hdr_len;
590 	uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
591 	int icv_len = sess->digest_length;
592 	uint8_t *old_icv;
593 	struct rte_mbuf *mbuf;
594 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
595 			sess->iv.offset);
596 
597 	if (sym_op->m_dst)
598 		mbuf = sym_op->m_dst;
599 	else
600 		mbuf = sym_op->m_src;
601 
602 	/* first FLE entry used to store mbuf and session ctxt */
603 	fle = (struct qbman_fle *)rte_malloc(NULL,
604 			FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
605 			RTE_CACHE_LINE_SIZE);
606 	if (unlikely(!fle)) {
607 		DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
608 		return -1;
609 	}
610 	memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
611 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
612 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
613 
614 	op_fle = fle + 1;
615 	ip_fle = fle + 2;
616 	sge = fle + 3;
617 
618 	/* Save the shared descriptor */
619 	flc = &priv->flc_desc[0].flc;
620 
621 	/* Configure FD as a FRAME LIST */
622 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
623 	DPAA2_SET_FD_COMPOUND_FMT(fd);
624 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
625 
626 	DPAA2_SEC_DP_DEBUG(
627 		"AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n"
628 		"cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
629 		sym_op->auth.data.offset,
630 		sym_op->auth.data.length,
631 		sess->digest_length,
632 		sym_op->cipher.data.offset,
633 		sym_op->cipher.data.length,
634 		sess->iv.length,
635 		sym_op->m_src->data_off);
636 
637 	/* Configure Output FLE with Scatter/Gather Entry */
638 	DPAA2_SET_FLE_SG_EXT(op_fle);
639 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
640 
641 	if (auth_only_len)
642 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
643 
644 	op_fle->length = (sess->dir == DIR_ENC) ?
645 			(sym_op->cipher.data.length + icv_len) :
646 			sym_op->cipher.data.length;
647 
648 	/* Configure Output SGE for Encap/Decap */
649 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
650 	DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->auth.data.offset);
651 	sge->length = mbuf->data_len - sym_op->auth.data.offset;
652 
653 	mbuf = mbuf->next;
654 	/* o/p segs */
655 	while (mbuf) {
656 		sge++;
657 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
658 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
659 		sge->length = mbuf->data_len;
660 		mbuf = mbuf->next;
661 	}
662 	sge->length -= icv_len;
663 
664 	if (sess->dir == DIR_ENC) {
665 		sge++;
666 		DPAA2_SET_FLE_ADDR(sge,
667 				DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
668 		sge->length = icv_len;
669 	}
670 	DPAA2_SET_FLE_FIN(sge);
671 
672 	sge++;
673 	mbuf = sym_op->m_src;
674 
675 	/* Configure Input FLE with Scatter/Gather Entry */
676 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
677 	DPAA2_SET_FLE_SG_EXT(ip_fle);
678 	DPAA2_SET_FLE_FIN(ip_fle);
679 	ip_fle->length = (sess->dir == DIR_ENC) ?
680 			(sym_op->auth.data.length + sess->iv.length) :
681 			(sym_op->auth.data.length + sess->iv.length +
682 			 icv_len);
683 
684 	/* Configure Input SGE for Encap/Decap */
685 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
686 	sge->length = sess->iv.length;
687 
688 	sge++;
689 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
690 	DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
691 				mbuf->data_off);
692 	sge->length = mbuf->data_len - sym_op->auth.data.offset;
693 
694 	mbuf = mbuf->next;
695 	/* i/p segs */
696 	while (mbuf) {
697 		sge++;
698 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
699 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
700 		sge->length = mbuf->data_len;
701 		mbuf = mbuf->next;
702 	}
703 	sge->length -= icv_len;
704 
705 	if (sess->dir == DIR_DEC) {
706 		sge++;
707 		old_icv = (uint8_t *)(sge + 1);
708 		memcpy(old_icv,	sym_op->auth.digest.data,
709 		       icv_len);
710 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
711 		sge->length = icv_len;
712 	}
713 
714 	DPAA2_SET_FLE_FIN(sge);
715 	if (auth_only_len) {
716 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
717 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
718 	}
719 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
720 
721 	return 0;
722 }
723 
724 static inline int
725 build_authenc_fd(dpaa2_sec_session *sess,
726 		 struct rte_crypto_op *op,
727 		 struct qbman_fd *fd, uint16_t bpid)
728 {
729 	struct rte_crypto_sym_op *sym_op = op->sym;
730 	struct ctxt_priv *priv = sess->ctxt;
731 	struct qbman_fle *fle, *sge;
732 	struct sec_flow_context *flc;
733 	uint16_t auth_hdr_len = sym_op->cipher.data.offset -
734 				sym_op->auth.data.offset;
735 	uint16_t auth_tail_len = sym_op->auth.data.length -
736 				sym_op->cipher.data.length - auth_hdr_len;
737 	uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
738 
739 	int icv_len = sess->digest_length, retval;
740 	uint8_t *old_icv;
741 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
742 			sess->iv.offset);
743 	struct rte_mbuf *dst;
744 
745 	if (sym_op->m_dst)
746 		dst = sym_op->m_dst;
747 	else
748 		dst = sym_op->m_src;
749 
750 	/* we are using the first FLE entry to store Mbuf.
751 	 * Currently we donot know which FLE has the mbuf stored.
752 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
753 	 * to get the MBUF Addr from the previous FLE.
754 	 * We can have a better approach to use the inline Mbuf
755 	 */
756 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
757 	if (retval) {
758 		DPAA2_SEC_ERR("Memory alloc failed for SGE");
759 		return -1;
760 	}
761 	memset(fle, 0, FLE_POOL_BUF_SIZE);
762 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
763 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
764 	fle = fle + 1;
765 	sge = fle + 2;
766 	if (likely(bpid < MAX_BPID)) {
767 		DPAA2_SET_FD_BPID(fd, bpid);
768 		DPAA2_SET_FLE_BPID(fle, bpid);
769 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
770 		DPAA2_SET_FLE_BPID(sge, bpid);
771 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
772 		DPAA2_SET_FLE_BPID(sge + 2, bpid);
773 		DPAA2_SET_FLE_BPID(sge + 3, bpid);
774 	} else {
775 		DPAA2_SET_FD_IVP(fd);
776 		DPAA2_SET_FLE_IVP(fle);
777 		DPAA2_SET_FLE_IVP((fle + 1));
778 		DPAA2_SET_FLE_IVP(sge);
779 		DPAA2_SET_FLE_IVP((sge + 1));
780 		DPAA2_SET_FLE_IVP((sge + 2));
781 		DPAA2_SET_FLE_IVP((sge + 3));
782 	}
783 
784 	/* Save the shared descriptor */
785 	flc = &priv->flc_desc[0].flc;
786 	/* Configure FD as a FRAME LIST */
787 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
788 	DPAA2_SET_FD_COMPOUND_FMT(fd);
789 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
790 
791 	DPAA2_SEC_DP_DEBUG(
792 		"AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n"
793 		"cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
794 		sym_op->auth.data.offset,
795 		sym_op->auth.data.length,
796 		sess->digest_length,
797 		sym_op->cipher.data.offset,
798 		sym_op->cipher.data.length,
799 		sess->iv.length,
800 		sym_op->m_src->data_off);
801 
802 	/* Configure Output FLE with Scatter/Gather Entry */
803 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
804 	if (auth_only_len)
805 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
806 	fle->length = (sess->dir == DIR_ENC) ?
807 			(sym_op->cipher.data.length + icv_len) :
808 			sym_op->cipher.data.length;
809 
810 	DPAA2_SET_FLE_SG_EXT(fle);
811 
812 	/* Configure Output SGE for Encap/Decap */
813 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
814 	DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
815 				dst->data_off);
816 	sge->length = sym_op->cipher.data.length;
817 
818 	if (sess->dir == DIR_ENC) {
819 		sge++;
820 		DPAA2_SET_FLE_ADDR(sge,
821 				DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
822 		sge->length = sess->digest_length;
823 		DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
824 					sess->iv.length));
825 	}
826 	DPAA2_SET_FLE_FIN(sge);
827 
828 	sge++;
829 	fle++;
830 
831 	/* Configure Input FLE with Scatter/Gather Entry */
832 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
833 	DPAA2_SET_FLE_SG_EXT(fle);
834 	DPAA2_SET_FLE_FIN(fle);
835 	fle->length = (sess->dir == DIR_ENC) ?
836 			(sym_op->auth.data.length + sess->iv.length) :
837 			(sym_op->auth.data.length + sess->iv.length +
838 			 sess->digest_length);
839 
840 	/* Configure Input SGE for Encap/Decap */
841 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
842 	sge->length = sess->iv.length;
843 	sge++;
844 
845 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
846 	DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
847 				sym_op->m_src->data_off);
848 	sge->length = sym_op->auth.data.length;
849 	if (sess->dir == DIR_DEC) {
850 		sge++;
851 		old_icv = (uint8_t *)(sge + 1);
852 		memcpy(old_icv,	sym_op->auth.digest.data,
853 		       sess->digest_length);
854 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
855 		sge->length = sess->digest_length;
856 		DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
857 				 sess->digest_length +
858 				 sess->iv.length));
859 	}
860 	DPAA2_SET_FLE_FIN(sge);
861 	if (auth_only_len) {
862 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
863 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
864 	}
865 	return 0;
866 }
867 
868 static inline int build_auth_sg_fd(
869 		dpaa2_sec_session *sess,
870 		struct rte_crypto_op *op,
871 		struct qbman_fd *fd,
872 		__rte_unused uint16_t bpid)
873 {
874 	struct rte_crypto_sym_op *sym_op = op->sym;
875 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
876 	struct sec_flow_context *flc;
877 	struct ctxt_priv *priv = sess->ctxt;
878 	int data_len, data_offset;
879 	uint8_t *old_digest;
880 	struct rte_mbuf *mbuf;
881 
882 	data_len = sym_op->auth.data.length;
883 	data_offset = sym_op->auth.data.offset;
884 
885 	if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
886 	    sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
887 		if ((data_len & 7) || (data_offset & 7)) {
888 			DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
889 			return -1;
890 		}
891 
892 		data_len = data_len >> 3;
893 		data_offset = data_offset >> 3;
894 	}
895 
896 	mbuf = sym_op->m_src;
897 	fle = (struct qbman_fle *)rte_malloc(NULL,
898 			FLE_SG_MEM_SIZE(mbuf->nb_segs),
899 			RTE_CACHE_LINE_SIZE);
900 	if (unlikely(!fle)) {
901 		DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
902 		return -1;
903 	}
904 	memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs));
905 	/* first FLE entry used to store mbuf and session ctxt */
906 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
907 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
908 	op_fle = fle + 1;
909 	ip_fle = fle + 2;
910 	sge = fle + 3;
911 
912 	flc = &priv->flc_desc[DESC_INITFINAL].flc;
913 	/* sg FD */
914 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
915 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
916 	DPAA2_SET_FD_COMPOUND_FMT(fd);
917 
918 	/* o/p fle */
919 	DPAA2_SET_FLE_ADDR(op_fle,
920 				DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
921 	op_fle->length = sess->digest_length;
922 
923 	/* i/p fle */
924 	DPAA2_SET_FLE_SG_EXT(ip_fle);
925 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
926 	ip_fle->length = data_len;
927 
928 	if (sess->iv.length) {
929 		uint8_t *iv_ptr;
930 
931 		iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
932 						   sess->iv.offset);
933 
934 		if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
935 			iv_ptr = conv_to_snow_f9_iv(iv_ptr);
936 			sge->length = 12;
937 		} else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
938 			iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
939 			sge->length = 8;
940 		} else {
941 			sge->length = sess->iv.length;
942 		}
943 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
944 		ip_fle->length += sge->length;
945 		sge++;
946 	}
947 	/* i/p 1st seg */
948 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
949 	DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
950 
951 	if (data_len <= (mbuf->data_len - data_offset)) {
952 		sge->length = data_len;
953 		data_len = 0;
954 	} else {
955 		sge->length = mbuf->data_len - data_offset;
956 
957 		/* remaining i/p segs */
958 		while ((data_len = data_len - sge->length) &&
959 		       (mbuf = mbuf->next)) {
960 			sge++;
961 			DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
962 			DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
963 			if (data_len > mbuf->data_len)
964 				sge->length = mbuf->data_len;
965 			else
966 				sge->length = data_len;
967 		}
968 	}
969 
970 	if (sess->dir == DIR_DEC) {
971 		/* Digest verification case */
972 		sge++;
973 		old_digest = (uint8_t *)(sge + 1);
974 		rte_memcpy(old_digest, sym_op->auth.digest.data,
975 			   sess->digest_length);
976 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
977 		sge->length = sess->digest_length;
978 		ip_fle->length += sess->digest_length;
979 	}
980 	DPAA2_SET_FLE_FIN(sge);
981 	DPAA2_SET_FLE_FIN(ip_fle);
982 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
983 
984 	return 0;
985 }
986 
987 static inline int
988 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
989 	      struct qbman_fd *fd, uint16_t bpid)
990 {
991 	struct rte_crypto_sym_op *sym_op = op->sym;
992 	struct qbman_fle *fle, *sge;
993 	struct sec_flow_context *flc;
994 	struct ctxt_priv *priv = sess->ctxt;
995 	int data_len, data_offset;
996 	uint8_t *old_digest;
997 	int retval;
998 
999 	data_len = sym_op->auth.data.length;
1000 	data_offset = sym_op->auth.data.offset;
1001 
1002 	if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
1003 	    sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1004 		if ((data_len & 7) || (data_offset & 7)) {
1005 			DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
1006 			return -1;
1007 		}
1008 
1009 		data_len = data_len >> 3;
1010 		data_offset = data_offset >> 3;
1011 	}
1012 
1013 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
1014 	if (retval) {
1015 		DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE");
1016 		return -1;
1017 	}
1018 	memset(fle, 0, FLE_POOL_BUF_SIZE);
1019 	/* TODO we are using the first FLE entry to store Mbuf.
1020 	 * Currently we donot know which FLE has the mbuf stored.
1021 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
1022 	 * to get the MBUF Addr from the previous FLE.
1023 	 * We can have a better approach to use the inline Mbuf
1024 	 */
1025 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1026 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1027 	fle = fle + 1;
1028 	sge = fle + 2;
1029 
1030 	if (likely(bpid < MAX_BPID)) {
1031 		DPAA2_SET_FD_BPID(fd, bpid);
1032 		DPAA2_SET_FLE_BPID(fle, bpid);
1033 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
1034 		DPAA2_SET_FLE_BPID(sge, bpid);
1035 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
1036 	} else {
1037 		DPAA2_SET_FD_IVP(fd);
1038 		DPAA2_SET_FLE_IVP(fle);
1039 		DPAA2_SET_FLE_IVP((fle + 1));
1040 		DPAA2_SET_FLE_IVP(sge);
1041 		DPAA2_SET_FLE_IVP((sge + 1));
1042 	}
1043 
1044 	flc = &priv->flc_desc[DESC_INITFINAL].flc;
1045 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1046 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1047 	DPAA2_SET_FD_COMPOUND_FMT(fd);
1048 
1049 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
1050 	fle->length = sess->digest_length;
1051 	fle++;
1052 
1053 	/* Setting input FLE */
1054 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1055 	DPAA2_SET_FLE_SG_EXT(fle);
1056 	fle->length = data_len;
1057 
1058 	if (sess->iv.length) {
1059 		uint8_t *iv_ptr;
1060 
1061 		iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1062 						   sess->iv.offset);
1063 
1064 		if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
1065 			iv_ptr = conv_to_snow_f9_iv(iv_ptr);
1066 			sge->length = 12;
1067 		} else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1068 			iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
1069 			sge->length = 8;
1070 		} else {
1071 			sge->length = sess->iv.length;
1072 		}
1073 
1074 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1075 		fle->length = fle->length + sge->length;
1076 		sge++;
1077 	}
1078 
1079 	/* Setting data to authenticate */
1080 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
1081 	DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off);
1082 	sge->length = data_len;
1083 
1084 	if (sess->dir == DIR_DEC) {
1085 		sge++;
1086 		old_digest = (uint8_t *)(sge + 1);
1087 		rte_memcpy(old_digest, sym_op->auth.digest.data,
1088 			   sess->digest_length);
1089 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
1090 		sge->length = sess->digest_length;
1091 		fle->length = fle->length + sess->digest_length;
1092 	}
1093 
1094 	DPAA2_SET_FLE_FIN(sge);
1095 	DPAA2_SET_FLE_FIN(fle);
1096 	DPAA2_SET_FD_LEN(fd, fle->length);
1097 
1098 	return 0;
1099 }
1100 
1101 static int
1102 build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
1103 		struct qbman_fd *fd, __rte_unused uint16_t bpid)
1104 {
1105 	struct rte_crypto_sym_op *sym_op = op->sym;
1106 	struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
1107 	int data_len, data_offset;
1108 	struct sec_flow_context *flc;
1109 	struct ctxt_priv *priv = sess->ctxt;
1110 	struct rte_mbuf *mbuf;
1111 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1112 			sess->iv.offset);
1113 
1114 	data_len = sym_op->cipher.data.length;
1115 	data_offset = sym_op->cipher.data.offset;
1116 
1117 	if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1118 		sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1119 		if ((data_len & 7) || (data_offset & 7)) {
1120 			DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
1121 			return -1;
1122 		}
1123 
1124 		data_len = data_len >> 3;
1125 		data_offset = data_offset >> 3;
1126 	}
1127 
1128 	if (sym_op->m_dst)
1129 		mbuf = sym_op->m_dst;
1130 	else
1131 		mbuf = sym_op->m_src;
1132 
1133 	/* first FLE entry used to store mbuf and session ctxt */
1134 	fle = (struct qbman_fle *)rte_malloc(NULL,
1135 			FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
1136 			RTE_CACHE_LINE_SIZE);
1137 	if (!fle) {
1138 		DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE");
1139 		return -1;
1140 	}
1141 	memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
1142 	/* first FLE entry used to store mbuf and session ctxt */
1143 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1144 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1145 
1146 	op_fle = fle + 1;
1147 	ip_fle = fle + 2;
1148 	sge = fle + 3;
1149 
1150 	flc = &priv->flc_desc[0].flc;
1151 
1152 	DPAA2_SEC_DP_DEBUG(
1153 		"CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d"
1154 		" data_off: 0x%x\n",
1155 		data_offset,
1156 		data_len,
1157 		sess->iv.length,
1158 		sym_op->m_src->data_off);
1159 
1160 	/* o/p fle */
1161 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
1162 	op_fle->length = data_len;
1163 	DPAA2_SET_FLE_SG_EXT(op_fle);
1164 
1165 	/* o/p 1st seg */
1166 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1167 	DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
1168 	sge->length = mbuf->data_len - data_offset;
1169 
1170 	mbuf = mbuf->next;
1171 	/* o/p segs */
1172 	while (mbuf) {
1173 		sge++;
1174 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1175 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
1176 		sge->length = mbuf->data_len;
1177 		mbuf = mbuf->next;
1178 	}
1179 	DPAA2_SET_FLE_FIN(sge);
1180 
1181 	DPAA2_SEC_DP_DEBUG(
1182 		"CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
1183 		flc, fle, fle->addr_hi, fle->addr_lo,
1184 		fle->length);
1185 
1186 	/* i/p fle */
1187 	mbuf = sym_op->m_src;
1188 	sge++;
1189 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
1190 	ip_fle->length = sess->iv.length + data_len;
1191 	DPAA2_SET_FLE_SG_EXT(ip_fle);
1192 
1193 	/* i/p IV */
1194 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1195 	DPAA2_SET_FLE_OFFSET(sge, 0);
1196 	sge->length = sess->iv.length;
1197 
1198 	sge++;
1199 
1200 	/* i/p 1st seg */
1201 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1202 	DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
1203 	sge->length = mbuf->data_len - data_offset;
1204 
1205 	mbuf = mbuf->next;
1206 	/* i/p segs */
1207 	while (mbuf) {
1208 		sge++;
1209 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1210 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
1211 		sge->length = mbuf->data_len;
1212 		mbuf = mbuf->next;
1213 	}
1214 	DPAA2_SET_FLE_FIN(sge);
1215 	DPAA2_SET_FLE_FIN(ip_fle);
1216 
1217 	/* sg fd */
1218 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
1219 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
1220 	DPAA2_SET_FD_COMPOUND_FMT(fd);
1221 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1222 
1223 	DPAA2_SEC_DP_DEBUG(
1224 		"CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1225 		" off =%d, len =%d\n",
1226 		DPAA2_GET_FD_ADDR(fd),
1227 		DPAA2_GET_FD_BPID(fd),
1228 		rte_dpaa2_bpid_info[bpid].meta_data_size,
1229 		DPAA2_GET_FD_OFFSET(fd),
1230 		DPAA2_GET_FD_LEN(fd));
1231 	return 0;
1232 }
1233 
1234 static int
1235 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
1236 		struct qbman_fd *fd, uint16_t bpid)
1237 {
1238 	struct rte_crypto_sym_op *sym_op = op->sym;
1239 	struct qbman_fle *fle, *sge;
1240 	int retval, data_len, data_offset;
1241 	struct sec_flow_context *flc;
1242 	struct ctxt_priv *priv = sess->ctxt;
1243 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1244 			sess->iv.offset);
1245 	struct rte_mbuf *dst;
1246 
1247 	data_len = sym_op->cipher.data.length;
1248 	data_offset = sym_op->cipher.data.offset;
1249 
1250 	if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1251 		sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1252 		if ((data_len & 7) || (data_offset & 7)) {
1253 			DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
1254 			return -1;
1255 		}
1256 
1257 		data_len = data_len >> 3;
1258 		data_offset = data_offset >> 3;
1259 	}
1260 
1261 	if (sym_op->m_dst)
1262 		dst = sym_op->m_dst;
1263 	else
1264 		dst = sym_op->m_src;
1265 
1266 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
1267 	if (retval) {
1268 		DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE");
1269 		return -1;
1270 	}
1271 	memset(fle, 0, FLE_POOL_BUF_SIZE);
1272 	/* TODO we are using the first FLE entry to store Mbuf.
1273 	 * Currently we donot know which FLE has the mbuf stored.
1274 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
1275 	 * to get the MBUF Addr from the previous FLE.
1276 	 * We can have a better approach to use the inline Mbuf
1277 	 */
1278 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1279 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1280 	fle = fle + 1;
1281 	sge = fle + 2;
1282 
1283 	if (likely(bpid < MAX_BPID)) {
1284 		DPAA2_SET_FD_BPID(fd, bpid);
1285 		DPAA2_SET_FLE_BPID(fle, bpid);
1286 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
1287 		DPAA2_SET_FLE_BPID(sge, bpid);
1288 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
1289 	} else {
1290 		DPAA2_SET_FD_IVP(fd);
1291 		DPAA2_SET_FLE_IVP(fle);
1292 		DPAA2_SET_FLE_IVP((fle + 1));
1293 		DPAA2_SET_FLE_IVP(sge);
1294 		DPAA2_SET_FLE_IVP((sge + 1));
1295 	}
1296 
1297 	flc = &priv->flc_desc[0].flc;
1298 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1299 	DPAA2_SET_FD_LEN(fd, data_len + sess->iv.length);
1300 	DPAA2_SET_FD_COMPOUND_FMT(fd);
1301 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1302 
1303 	DPAA2_SEC_DP_DEBUG(
1304 		"CIPHER: cipher_off: 0x%x/length %d, ivlen=%d,"
1305 		" data_off: 0x%x\n",
1306 		data_offset,
1307 		data_len,
1308 		sess->iv.length,
1309 		sym_op->m_src->data_off);
1310 
1311 	DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst));
1312 	DPAA2_SET_FLE_OFFSET(fle, data_offset + dst->data_off);
1313 
1314 	fle->length = data_len + sess->iv.length;
1315 
1316 	DPAA2_SEC_DP_DEBUG(
1317 		"CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n",
1318 		flc, fle, fle->addr_hi, fle->addr_lo,
1319 		fle->length);
1320 
1321 	fle++;
1322 
1323 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1324 	fle->length = data_len + sess->iv.length;
1325 
1326 	DPAA2_SET_FLE_SG_EXT(fle);
1327 
1328 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1329 	sge->length = sess->iv.length;
1330 
1331 	sge++;
1332 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
1333 	DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off);
1334 
1335 	sge->length = data_len;
1336 	DPAA2_SET_FLE_FIN(sge);
1337 	DPAA2_SET_FLE_FIN(fle);
1338 
1339 	DPAA2_SEC_DP_DEBUG(
1340 		"CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1341 		" off =%d, len =%d\n",
1342 		DPAA2_GET_FD_ADDR(fd),
1343 		DPAA2_GET_FD_BPID(fd),
1344 		rte_dpaa2_bpid_info[bpid].meta_data_size,
1345 		DPAA2_GET_FD_OFFSET(fd),
1346 		DPAA2_GET_FD_LEN(fd));
1347 
1348 	return 0;
1349 }
1350 
1351 static inline int
1352 build_sec_fd(struct rte_crypto_op *op,
1353 	     struct qbman_fd *fd, uint16_t bpid)
1354 {
1355 	int ret = -1;
1356 	dpaa2_sec_session *sess;
1357 
1358 	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
1359 		sess = (dpaa2_sec_session *)get_sym_session_private_data(
1360 				op->sym->session, cryptodev_driver_id);
1361 	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1362 		sess = (dpaa2_sec_session *)get_sec_session_private_data(
1363 				op->sym->sec_session);
1364 	else
1365 		return -1;
1366 
1367 	/* Any of the buffer is segmented*/
1368 	if (!rte_pktmbuf_is_contiguous(op->sym->m_src) ||
1369 		  ((op->sym->m_dst != NULL) &&
1370 		   !rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1371 		switch (sess->ctxt_type) {
1372 		case DPAA2_SEC_CIPHER:
1373 			ret = build_cipher_sg_fd(sess, op, fd, bpid);
1374 			break;
1375 		case DPAA2_SEC_AUTH:
1376 			ret = build_auth_sg_fd(sess, op, fd, bpid);
1377 			break;
1378 		case DPAA2_SEC_AEAD:
1379 			ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid);
1380 			break;
1381 		case DPAA2_SEC_CIPHER_HASH:
1382 			ret = build_authenc_sg_fd(sess, op, fd, bpid);
1383 			break;
1384 		case DPAA2_SEC_IPSEC:
1385 		case DPAA2_SEC_PDCP:
1386 			ret = build_proto_compound_sg_fd(sess, op, fd, bpid);
1387 			break;
1388 		case DPAA2_SEC_HASH_CIPHER:
1389 		default:
1390 			DPAA2_SEC_ERR("error: Unsupported session");
1391 		}
1392 	} else {
1393 		switch (sess->ctxt_type) {
1394 		case DPAA2_SEC_CIPHER:
1395 			ret = build_cipher_fd(sess, op, fd, bpid);
1396 			break;
1397 		case DPAA2_SEC_AUTH:
1398 			ret = build_auth_fd(sess, op, fd, bpid);
1399 			break;
1400 		case DPAA2_SEC_AEAD:
1401 			ret = build_authenc_gcm_fd(sess, op, fd, bpid);
1402 			break;
1403 		case DPAA2_SEC_CIPHER_HASH:
1404 			ret = build_authenc_fd(sess, op, fd, bpid);
1405 			break;
1406 		case DPAA2_SEC_IPSEC:
1407 			ret = build_proto_fd(sess, op, fd, bpid);
1408 			break;
1409 		case DPAA2_SEC_PDCP:
1410 			ret = build_proto_compound_fd(sess, op, fd, bpid);
1411 			break;
1412 		case DPAA2_SEC_HASH_CIPHER:
1413 		default:
1414 			DPAA2_SEC_ERR("error: Unsupported session");
1415 		}
1416 	}
1417 	return ret;
1418 }
1419 
1420 static uint16_t
1421 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1422 			uint16_t nb_ops)
1423 {
1424 	/* Function to transmit the frames to given device and VQ*/
1425 	uint32_t loop;
1426 	int32_t ret;
1427 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1428 	uint32_t frames_to_send;
1429 	struct qbman_eq_desc eqdesc;
1430 	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1431 	struct qbman_swp *swp;
1432 	uint16_t num_tx = 0;
1433 	uint32_t flags[MAX_TX_RING_SLOTS] = {0};
1434 	/*todo - need to support multiple buffer pools */
1435 	uint16_t bpid;
1436 	struct rte_mempool *mb_pool;
1437 
1438 	if (unlikely(nb_ops == 0))
1439 		return 0;
1440 
1441 	if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
1442 		DPAA2_SEC_ERR("sessionless crypto op not supported");
1443 		return 0;
1444 	}
1445 	/*Prepare enqueue descriptor*/
1446 	qbman_eq_desc_clear(&eqdesc);
1447 	qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1448 	qbman_eq_desc_set_response(&eqdesc, 0, 0);
1449 	qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
1450 
1451 	if (!DPAA2_PER_LCORE_DPIO) {
1452 		ret = dpaa2_affine_qbman_swp();
1453 		if (ret) {
1454 			DPAA2_SEC_ERR("Failure in affining portal");
1455 			return 0;
1456 		}
1457 	}
1458 	swp = DPAA2_PER_LCORE_PORTAL;
1459 
1460 	while (nb_ops) {
1461 		frames_to_send = (nb_ops > dpaa2_eqcr_size) ?
1462 			dpaa2_eqcr_size : nb_ops;
1463 
1464 		for (loop = 0; loop < frames_to_send; loop++) {
1465 			if ((*ops)->sym->m_src->seqn) {
1466 			 uint8_t dqrr_index = (*ops)->sym->m_src->seqn - 1;
1467 
1468 			 flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index;
1469 			 DPAA2_PER_LCORE_DQRR_SIZE--;
1470 			 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
1471 			 (*ops)->sym->m_src->seqn = DPAA2_INVALID_MBUF_SEQN;
1472 			}
1473 
1474 			/*Clear the unused FD fields before sending*/
1475 			memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
1476 			mb_pool = (*ops)->sym->m_src->pool;
1477 			bpid = mempool_to_bpid(mb_pool);
1478 			ret = build_sec_fd(*ops, &fd_arr[loop], bpid);
1479 			if (ret) {
1480 				DPAA2_SEC_ERR("error: Improper packet contents"
1481 					      " for crypto operation");
1482 				goto skip_tx;
1483 			}
1484 			ops++;
1485 		}
1486 		loop = 0;
1487 		while (loop < frames_to_send) {
1488 			loop += qbman_swp_enqueue_multiple(swp, &eqdesc,
1489 							&fd_arr[loop],
1490 							&flags[loop],
1491 							frames_to_send - loop);
1492 		}
1493 
1494 		num_tx += frames_to_send;
1495 		nb_ops -= frames_to_send;
1496 	}
1497 skip_tx:
1498 	dpaa2_qp->tx_vq.tx_pkts += num_tx;
1499 	dpaa2_qp->tx_vq.err_pkts += nb_ops;
1500 	return num_tx;
1501 }
1502 
1503 static inline struct rte_crypto_op *
1504 sec_simple_fd_to_mbuf(const struct qbman_fd *fd)
1505 {
1506 	struct rte_crypto_op *op;
1507 	uint16_t len = DPAA2_GET_FD_LEN(fd);
1508 	uint16_t diff = 0;
1509 	dpaa2_sec_session *sess_priv;
1510 
1511 	struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
1512 		DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
1513 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
1514 
1515 	diff = len - mbuf->pkt_len;
1516 	mbuf->pkt_len += diff;
1517 	mbuf->data_len += diff;
1518 	op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova;
1519 	mbuf->buf_iova = op->sym->aead.digest.phys_addr;
1520 	op->sym->aead.digest.phys_addr = 0L;
1521 
1522 	sess_priv = (dpaa2_sec_session *)get_sec_session_private_data(
1523 				op->sym->sec_session);
1524 	if (sess_priv->dir == DIR_ENC)
1525 		mbuf->data_off += SEC_FLC_DHR_OUTBOUND;
1526 	else
1527 		mbuf->data_off += SEC_FLC_DHR_INBOUND;
1528 
1529 	return op;
1530 }
1531 
1532 static inline struct rte_crypto_op *
1533 sec_fd_to_mbuf(const struct qbman_fd *fd)
1534 {
1535 	struct qbman_fle *fle;
1536 	struct rte_crypto_op *op;
1537 	struct ctxt_priv *priv;
1538 	struct rte_mbuf *dst, *src;
1539 
1540 	if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single)
1541 		return sec_simple_fd_to_mbuf(fd);
1542 
1543 	fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
1544 
1545 	DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
1546 			   fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
1547 
1548 	/* we are using the first FLE entry to store Mbuf.
1549 	 * Currently we donot know which FLE has the mbuf stored.
1550 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
1551 	 * to get the MBUF Addr from the previous FLE.
1552 	 * We can have a better approach to use the inline Mbuf
1553 	 */
1554 
1555 	if (unlikely(DPAA2_GET_FD_IVP(fd))) {
1556 		/* TODO complete it. */
1557 		DPAA2_SEC_ERR("error: non inline buffer");
1558 		return NULL;
1559 	}
1560 	op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
1561 
1562 	/* Prefeth op */
1563 	src = op->sym->m_src;
1564 	rte_prefetch0(src);
1565 
1566 	if (op->sym->m_dst) {
1567 		dst = op->sym->m_dst;
1568 		rte_prefetch0(dst);
1569 	} else
1570 		dst = src;
1571 
1572 	if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
1573 		dpaa2_sec_session *sess = (dpaa2_sec_session *)
1574 			get_sec_session_private_data(op->sym->sec_session);
1575 		if (sess->ctxt_type == DPAA2_SEC_IPSEC ||
1576 				sess->ctxt_type == DPAA2_SEC_PDCP) {
1577 			uint16_t len = DPAA2_GET_FD_LEN(fd);
1578 			dst->pkt_len = len;
1579 			while (dst->next != NULL) {
1580 				len -= dst->data_len;
1581 				dst = dst->next;
1582 			}
1583 			dst->data_len = len;
1584 		}
1585 	}
1586 
1587 	DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p,"
1588 		" fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n",
1589 		(void *)dst,
1590 		dst->buf_addr,
1591 		DPAA2_GET_FD_ADDR(fd),
1592 		DPAA2_GET_FD_BPID(fd),
1593 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
1594 		DPAA2_GET_FD_OFFSET(fd),
1595 		DPAA2_GET_FD_LEN(fd));
1596 
1597 	/* free the fle memory */
1598 	if (likely(rte_pktmbuf_is_contiguous(src))) {
1599 		priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1);
1600 		rte_mempool_put(priv->fle_pool, (void *)(fle-1));
1601 	} else
1602 		rte_free((void *)(fle-1));
1603 
1604 	return op;
1605 }
1606 
1607 static uint16_t
1608 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1609 			uint16_t nb_ops)
1610 {
1611 	/* Function is responsible to receive frames for a given device and VQ*/
1612 	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1613 	struct qbman_result *dq_storage;
1614 	uint32_t fqid = dpaa2_qp->rx_vq.fqid;
1615 	int ret, num_rx = 0;
1616 	uint8_t is_last = 0, status;
1617 	struct qbman_swp *swp;
1618 	const struct qbman_fd *fd;
1619 	struct qbman_pull_desc pulldesc;
1620 
1621 	if (!DPAA2_PER_LCORE_DPIO) {
1622 		ret = dpaa2_affine_qbman_swp();
1623 		if (ret) {
1624 			DPAA2_SEC_ERR("Failure in affining portal");
1625 			return 0;
1626 		}
1627 	}
1628 	swp = DPAA2_PER_LCORE_PORTAL;
1629 	dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
1630 
1631 	qbman_pull_desc_clear(&pulldesc);
1632 	qbman_pull_desc_set_numframes(&pulldesc,
1633 				      (nb_ops > dpaa2_dqrr_size) ?
1634 				      dpaa2_dqrr_size : nb_ops);
1635 	qbman_pull_desc_set_fq(&pulldesc, fqid);
1636 	qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1637 				    (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage),
1638 				    1);
1639 
1640 	/*Issue a volatile dequeue command. */
1641 	while (1) {
1642 		if (qbman_swp_pull(swp, &pulldesc)) {
1643 			DPAA2_SEC_WARN(
1644 				"SEC VDQ command is not issued : QBMAN busy");
1645 			/* Portal was busy, try again */
1646 			continue;
1647 		}
1648 		break;
1649 	};
1650 
1651 	/* Receive the packets till Last Dequeue entry is found with
1652 	 * respect to the above issues PULL command.
1653 	 */
1654 	while (!is_last) {
1655 		/* Check if the previous issued command is completed.
1656 		 * Also seems like the SWP is shared between the Ethernet Driver
1657 		 * and the SEC driver.
1658 		 */
1659 		while (!qbman_check_command_complete(dq_storage))
1660 			;
1661 
1662 		/* Loop until the dq_storage is updated with
1663 		 * new token by QBMAN
1664 		 */
1665 		while (!qbman_check_new_result(dq_storage))
1666 			;
1667 		/* Check whether Last Pull command is Expired and
1668 		 * setting Condition for Loop termination
1669 		 */
1670 		if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1671 			is_last = 1;
1672 			/* Check for valid frame. */
1673 			status = (uint8_t)qbman_result_DQ_flags(dq_storage);
1674 			if (unlikely(
1675 				(status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
1676 				DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
1677 				continue;
1678 			}
1679 		}
1680 
1681 		fd = qbman_result_DQ_fd(dq_storage);
1682 		ops[num_rx] = sec_fd_to_mbuf(fd);
1683 
1684 		if (unlikely(fd->simple.frc)) {
1685 			/* TODO Parse SEC errors */
1686 			DPAA2_SEC_ERR("SEC returned Error - %x",
1687 				      fd->simple.frc);
1688 			ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
1689 		} else {
1690 			ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1691 		}
1692 
1693 		num_rx++;
1694 		dq_storage++;
1695 	} /* End of Packet Rx loop */
1696 
1697 	dpaa2_qp->rx_vq.rx_pkts += num_rx;
1698 
1699 	DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1700 	/*Return the total number of packets received to DPAA2 app*/
1701 	return num_rx;
1702 }
1703 
1704 /** Release queue pair */
1705 static int
1706 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
1707 {
1708 	struct dpaa2_sec_qp *qp =
1709 		(struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id];
1710 
1711 	PMD_INIT_FUNC_TRACE();
1712 
1713 	if (qp->rx_vq.q_storage) {
1714 		dpaa2_free_dq_storage(qp->rx_vq.q_storage);
1715 		rte_free(qp->rx_vq.q_storage);
1716 	}
1717 	rte_free(qp);
1718 
1719 	dev->data->queue_pairs[queue_pair_id] = NULL;
1720 
1721 	return 0;
1722 }
1723 
1724 /** Setup a queue pair */
1725 static int
1726 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1727 		__rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1728 		__rte_unused int socket_id)
1729 {
1730 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1731 	struct dpaa2_sec_qp *qp;
1732 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1733 	struct dpseci_rx_queue_cfg cfg;
1734 	int32_t retcode;
1735 
1736 	PMD_INIT_FUNC_TRACE();
1737 
1738 	/* If qp is already in use free ring memory and qp metadata. */
1739 	if (dev->data->queue_pairs[qp_id] != NULL) {
1740 		DPAA2_SEC_INFO("QP already setup");
1741 		return 0;
1742 	}
1743 
1744 	DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p",
1745 		    dev, qp_id, qp_conf);
1746 
1747 	memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
1748 
1749 	qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp),
1750 			RTE_CACHE_LINE_SIZE);
1751 	if (!qp) {
1752 		DPAA2_SEC_ERR("malloc failed for rx/tx queues");
1753 		return -1;
1754 	}
1755 
1756 	qp->rx_vq.crypto_data = dev->data;
1757 	qp->tx_vq.crypto_data = dev->data;
1758 	qp->rx_vq.q_storage = rte_malloc("sec dq storage",
1759 		sizeof(struct queue_storage_info_t),
1760 		RTE_CACHE_LINE_SIZE);
1761 	if (!qp->rx_vq.q_storage) {
1762 		DPAA2_SEC_ERR("malloc failed for q_storage");
1763 		return -1;
1764 	}
1765 	memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
1766 
1767 	if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
1768 		DPAA2_SEC_ERR("Unable to allocate dequeue storage");
1769 		return -1;
1770 	}
1771 
1772 	dev->data->queue_pairs[qp_id] = qp;
1773 
1774 	cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
1775 	cfg.user_ctx = (size_t)(&qp->rx_vq);
1776 	retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
1777 				      qp_id, &cfg);
1778 	return retcode;
1779 }
1780 
1781 /** Return the number of allocated queue pairs */
1782 static uint32_t
1783 dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev)
1784 {
1785 	PMD_INIT_FUNC_TRACE();
1786 
1787 	return dev->data->nb_queue_pairs;
1788 }
1789 
1790 /** Returns the size of the aesni gcm session structure */
1791 static unsigned int
1792 dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1793 {
1794 	PMD_INIT_FUNC_TRACE();
1795 
1796 	return sizeof(dpaa2_sec_session);
1797 }
1798 
1799 static int
1800 dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
1801 		      struct rte_crypto_sym_xform *xform,
1802 		      dpaa2_sec_session *session)
1803 {
1804 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1805 	struct alginfo cipherdata;
1806 	int bufsize;
1807 	struct ctxt_priv *priv;
1808 	struct sec_flow_context *flc;
1809 
1810 	PMD_INIT_FUNC_TRACE();
1811 
1812 	/* For SEC CIPHER only one descriptor is required. */
1813 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1814 			sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1815 			RTE_CACHE_LINE_SIZE);
1816 	if (priv == NULL) {
1817 		DPAA2_SEC_ERR("No Memory for priv CTXT");
1818 		return -1;
1819 	}
1820 
1821 	priv->fle_pool = dev_priv->fle_pool;
1822 
1823 	flc = &priv->flc_desc[0].flc;
1824 
1825 	session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1826 			RTE_CACHE_LINE_SIZE);
1827 	if (session->cipher_key.data == NULL) {
1828 		DPAA2_SEC_ERR("No Memory for cipher key");
1829 		rte_free(priv);
1830 		return -1;
1831 	}
1832 	session->cipher_key.length = xform->cipher.key.length;
1833 
1834 	memcpy(session->cipher_key.data, xform->cipher.key.data,
1835 	       xform->cipher.key.length);
1836 	cipherdata.key = (size_t)session->cipher_key.data;
1837 	cipherdata.keylen = session->cipher_key.length;
1838 	cipherdata.key_enc_flags = 0;
1839 	cipherdata.key_type = RTA_DATA_IMM;
1840 
1841 	/* Set IV parameters */
1842 	session->iv.offset = xform->cipher.iv.offset;
1843 	session->iv.length = xform->cipher.iv.length;
1844 	session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1845 				DIR_ENC : DIR_DEC;
1846 
1847 	switch (xform->cipher.algo) {
1848 	case RTE_CRYPTO_CIPHER_AES_CBC:
1849 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
1850 		cipherdata.algmode = OP_ALG_AAI_CBC;
1851 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1852 		bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1853 						SHR_NEVER, &cipherdata, NULL,
1854 						session->iv.length,
1855 						session->dir);
1856 		break;
1857 	case RTE_CRYPTO_CIPHER_3DES_CBC:
1858 		cipherdata.algtype = OP_ALG_ALGSEL_3DES;
1859 		cipherdata.algmode = OP_ALG_AAI_CBC;
1860 		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1861 		bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1862 						SHR_NEVER, &cipherdata, NULL,
1863 						session->iv.length,
1864 						session->dir);
1865 		break;
1866 	case RTE_CRYPTO_CIPHER_AES_CTR:
1867 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
1868 		cipherdata.algmode = OP_ALG_AAI_CTR;
1869 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
1870 		bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1871 						SHR_NEVER, &cipherdata, NULL,
1872 						session->iv.length,
1873 						session->dir);
1874 		break;
1875 	case RTE_CRYPTO_CIPHER_3DES_CTR:
1876 		cipherdata.algtype = OP_ALG_ALGSEL_3DES;
1877 		cipherdata.algmode = OP_ALG_AAI_CTR;
1878 		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CTR;
1879 		bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1880 						SHR_NEVER, &cipherdata, NULL,
1881 						session->iv.length,
1882 						session->dir);
1883 		break;
1884 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1885 		cipherdata.algtype = OP_ALG_ALGSEL_SNOW_F8;
1886 		session->cipher_alg = RTE_CRYPTO_CIPHER_SNOW3G_UEA2;
1887 		bufsize = cnstr_shdsc_snow_f8(priv->flc_desc[0].desc, 1, 0,
1888 					      &cipherdata,
1889 					      session->dir);
1890 		break;
1891 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
1892 		cipherdata.algtype = OP_ALG_ALGSEL_ZUCE;
1893 		session->cipher_alg = RTE_CRYPTO_CIPHER_ZUC_EEA3;
1894 		bufsize = cnstr_shdsc_zuce(priv->flc_desc[0].desc, 1, 0,
1895 					      &cipherdata,
1896 					      session->dir);
1897 		break;
1898 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
1899 	case RTE_CRYPTO_CIPHER_AES_F8:
1900 	case RTE_CRYPTO_CIPHER_AES_ECB:
1901 	case RTE_CRYPTO_CIPHER_3DES_ECB:
1902 	case RTE_CRYPTO_CIPHER_AES_XTS:
1903 	case RTE_CRYPTO_CIPHER_ARC4:
1904 	case RTE_CRYPTO_CIPHER_NULL:
1905 		DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
1906 			xform->cipher.algo);
1907 		goto error_out;
1908 	default:
1909 		DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
1910 			xform->cipher.algo);
1911 		goto error_out;
1912 	}
1913 
1914 	if (bufsize < 0) {
1915 		DPAA2_SEC_ERR("Crypto: Descriptor build failed");
1916 		goto error_out;
1917 	}
1918 
1919 	flc->word1_sdl = (uint8_t)bufsize;
1920 	session->ctxt = priv;
1921 
1922 #ifdef CAAM_DESC_DEBUG
1923 	int i;
1924 	for (i = 0; i < bufsize; i++)
1925 		DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]);
1926 #endif
1927 	return 0;
1928 
1929 error_out:
1930 	rte_free(session->cipher_key.data);
1931 	rte_free(priv);
1932 	return -1;
1933 }
1934 
1935 static int
1936 dpaa2_sec_auth_init(struct rte_cryptodev *dev,
1937 		    struct rte_crypto_sym_xform *xform,
1938 		    dpaa2_sec_session *session)
1939 {
1940 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1941 	struct alginfo authdata;
1942 	int bufsize;
1943 	struct ctxt_priv *priv;
1944 	struct sec_flow_context *flc;
1945 
1946 	PMD_INIT_FUNC_TRACE();
1947 
1948 	/* For SEC AUTH three descriptors are required for various stages */
1949 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1950 			sizeof(struct ctxt_priv) + 3 *
1951 			sizeof(struct sec_flc_desc),
1952 			RTE_CACHE_LINE_SIZE);
1953 	if (priv == NULL) {
1954 		DPAA2_SEC_ERR("No Memory for priv CTXT");
1955 		return -1;
1956 	}
1957 
1958 	priv->fle_pool = dev_priv->fle_pool;
1959 	flc = &priv->flc_desc[DESC_INITFINAL].flc;
1960 
1961 	session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1962 			RTE_CACHE_LINE_SIZE);
1963 	if (session->auth_key.data == NULL) {
1964 		DPAA2_SEC_ERR("Unable to allocate memory for auth key");
1965 		rte_free(priv);
1966 		return -1;
1967 	}
1968 	session->auth_key.length = xform->auth.key.length;
1969 
1970 	memcpy(session->auth_key.data, xform->auth.key.data,
1971 	       xform->auth.key.length);
1972 	authdata.key = (size_t)session->auth_key.data;
1973 	authdata.keylen = session->auth_key.length;
1974 	authdata.key_enc_flags = 0;
1975 	authdata.key_type = RTA_DATA_IMM;
1976 
1977 	session->digest_length = xform->auth.digest_length;
1978 	session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1979 				DIR_ENC : DIR_DEC;
1980 
1981 	switch (xform->auth.algo) {
1982 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
1983 		authdata.algtype = OP_ALG_ALGSEL_SHA1;
1984 		authdata.algmode = OP_ALG_AAI_HMAC;
1985 		session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1986 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
1987 					   1, 0, SHR_NEVER, &authdata,
1988 					   !session->dir,
1989 					   session->digest_length);
1990 		break;
1991 	case RTE_CRYPTO_AUTH_MD5_HMAC:
1992 		authdata.algtype = OP_ALG_ALGSEL_MD5;
1993 		authdata.algmode = OP_ALG_AAI_HMAC;
1994 		session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1995 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
1996 					   1, 0, SHR_NEVER, &authdata,
1997 					   !session->dir,
1998 					   session->digest_length);
1999 		break;
2000 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2001 		authdata.algtype = OP_ALG_ALGSEL_SHA256;
2002 		authdata.algmode = OP_ALG_AAI_HMAC;
2003 		session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2004 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2005 					   1, 0, SHR_NEVER, &authdata,
2006 					   !session->dir,
2007 					   session->digest_length);
2008 		break;
2009 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2010 		authdata.algtype = OP_ALG_ALGSEL_SHA384;
2011 		authdata.algmode = OP_ALG_AAI_HMAC;
2012 		session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2013 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2014 					   1, 0, SHR_NEVER, &authdata,
2015 					   !session->dir,
2016 					   session->digest_length);
2017 		break;
2018 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2019 		authdata.algtype = OP_ALG_ALGSEL_SHA512;
2020 		authdata.algmode = OP_ALG_AAI_HMAC;
2021 		session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2022 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2023 					   1, 0, SHR_NEVER, &authdata,
2024 					   !session->dir,
2025 					   session->digest_length);
2026 		break;
2027 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2028 		authdata.algtype = OP_ALG_ALGSEL_SHA224;
2029 		authdata.algmode = OP_ALG_AAI_HMAC;
2030 		session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
2031 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2032 					   1, 0, SHR_NEVER, &authdata,
2033 					   !session->dir,
2034 					   session->digest_length);
2035 		break;
2036 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2037 		authdata.algtype = OP_ALG_ALGSEL_SNOW_F9;
2038 		authdata.algmode = OP_ALG_AAI_F9;
2039 		session->auth_alg = RTE_CRYPTO_AUTH_SNOW3G_UIA2;
2040 		session->iv.offset = xform->auth.iv.offset;
2041 		session->iv.length = xform->auth.iv.length;
2042 		bufsize = cnstr_shdsc_snow_f9(priv->flc_desc[DESC_INITFINAL].desc,
2043 					      1, 0, &authdata,
2044 					      !session->dir,
2045 					      session->digest_length);
2046 		break;
2047 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
2048 		authdata.algtype = OP_ALG_ALGSEL_ZUCA;
2049 		authdata.algmode = OP_ALG_AAI_F9;
2050 		session->auth_alg = RTE_CRYPTO_AUTH_ZUC_EIA3;
2051 		session->iv.offset = xform->auth.iv.offset;
2052 		session->iv.length = xform->auth.iv.length;
2053 		bufsize = cnstr_shdsc_zuca(priv->flc_desc[DESC_INITFINAL].desc,
2054 					   1, 0, &authdata,
2055 					   !session->dir,
2056 					   session->digest_length);
2057 		break;
2058 	case RTE_CRYPTO_AUTH_KASUMI_F9:
2059 	case RTE_CRYPTO_AUTH_NULL:
2060 	case RTE_CRYPTO_AUTH_SHA1:
2061 	case RTE_CRYPTO_AUTH_SHA256:
2062 	case RTE_CRYPTO_AUTH_SHA512:
2063 	case RTE_CRYPTO_AUTH_SHA224:
2064 	case RTE_CRYPTO_AUTH_SHA384:
2065 	case RTE_CRYPTO_AUTH_MD5:
2066 	case RTE_CRYPTO_AUTH_AES_GMAC:
2067 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2068 	case RTE_CRYPTO_AUTH_AES_CMAC:
2069 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2070 		DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un",
2071 			      xform->auth.algo);
2072 		goto error_out;
2073 	default:
2074 		DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2075 			      xform->auth.algo);
2076 		goto error_out;
2077 	}
2078 
2079 	if (bufsize < 0) {
2080 		DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2081 		goto error_out;
2082 	}
2083 
2084 	flc->word1_sdl = (uint8_t)bufsize;
2085 	session->ctxt = priv;
2086 #ifdef CAAM_DESC_DEBUG
2087 	int i;
2088 	for (i = 0; i < bufsize; i++)
2089 		DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2090 				i, priv->flc_desc[DESC_INITFINAL].desc[i]);
2091 #endif
2092 
2093 	return 0;
2094 
2095 error_out:
2096 	rte_free(session->auth_key.data);
2097 	rte_free(priv);
2098 	return -1;
2099 }
2100 
2101 static int
2102 dpaa2_sec_aead_init(struct rte_cryptodev *dev,
2103 		    struct rte_crypto_sym_xform *xform,
2104 		    dpaa2_sec_session *session)
2105 {
2106 	struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
2107 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2108 	struct alginfo aeaddata;
2109 	int bufsize;
2110 	struct ctxt_priv *priv;
2111 	struct sec_flow_context *flc;
2112 	struct rte_crypto_aead_xform *aead_xform = &xform->aead;
2113 	int err;
2114 
2115 	PMD_INIT_FUNC_TRACE();
2116 
2117 	/* Set IV parameters */
2118 	session->iv.offset = aead_xform->iv.offset;
2119 	session->iv.length = aead_xform->iv.length;
2120 	session->ctxt_type = DPAA2_SEC_AEAD;
2121 
2122 	/* For SEC AEAD only one descriptor is required */
2123 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2124 			sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
2125 			RTE_CACHE_LINE_SIZE);
2126 	if (priv == NULL) {
2127 		DPAA2_SEC_ERR("No Memory for priv CTXT");
2128 		return -1;
2129 	}
2130 
2131 	priv->fle_pool = dev_priv->fle_pool;
2132 	flc = &priv->flc_desc[0].flc;
2133 
2134 	session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2135 					       RTE_CACHE_LINE_SIZE);
2136 	if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2137 		DPAA2_SEC_ERR("No Memory for aead key");
2138 		rte_free(priv);
2139 		return -1;
2140 	}
2141 	memcpy(session->aead_key.data, aead_xform->key.data,
2142 	       aead_xform->key.length);
2143 
2144 	session->digest_length = aead_xform->digest_length;
2145 	session->aead_key.length = aead_xform->key.length;
2146 	ctxt->auth_only_len = aead_xform->aad_length;
2147 
2148 	aeaddata.key = (size_t)session->aead_key.data;
2149 	aeaddata.keylen = session->aead_key.length;
2150 	aeaddata.key_enc_flags = 0;
2151 	aeaddata.key_type = RTA_DATA_IMM;
2152 
2153 	switch (aead_xform->algo) {
2154 	case RTE_CRYPTO_AEAD_AES_GCM:
2155 		aeaddata.algtype = OP_ALG_ALGSEL_AES;
2156 		aeaddata.algmode = OP_ALG_AAI_GCM;
2157 		session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2158 		break;
2159 	case RTE_CRYPTO_AEAD_AES_CCM:
2160 		DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u",
2161 			      aead_xform->algo);
2162 		goto error_out;
2163 	default:
2164 		DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
2165 			      aead_xform->algo);
2166 		goto error_out;
2167 	}
2168 	session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2169 				DIR_ENC : DIR_DEC;
2170 
2171 	priv->flc_desc[0].desc[0] = aeaddata.keylen;
2172 	err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2173 			       MIN_JOB_DESC_SIZE,
2174 			       (unsigned int *)priv->flc_desc[0].desc,
2175 			       &priv->flc_desc[0].desc[1], 1);
2176 
2177 	if (err < 0) {
2178 		DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2179 		goto error_out;
2180 	}
2181 	if (priv->flc_desc[0].desc[1] & 1) {
2182 		aeaddata.key_type = RTA_DATA_IMM;
2183 	} else {
2184 		aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key);
2185 		aeaddata.key_type = RTA_DATA_PTR;
2186 	}
2187 	priv->flc_desc[0].desc[0] = 0;
2188 	priv->flc_desc[0].desc[1] = 0;
2189 
2190 	if (session->dir == DIR_ENC)
2191 		bufsize = cnstr_shdsc_gcm_encap(
2192 				priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
2193 				&aeaddata, session->iv.length,
2194 				session->digest_length);
2195 	else
2196 		bufsize = cnstr_shdsc_gcm_decap(
2197 				priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
2198 				&aeaddata, session->iv.length,
2199 				session->digest_length);
2200 	if (bufsize < 0) {
2201 		DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2202 		goto error_out;
2203 	}
2204 
2205 	flc->word1_sdl = (uint8_t)bufsize;
2206 	session->ctxt = priv;
2207 #ifdef CAAM_DESC_DEBUG
2208 	int i;
2209 	for (i = 0; i < bufsize; i++)
2210 		DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n",
2211 			    i, priv->flc_desc[0].desc[i]);
2212 #endif
2213 	return 0;
2214 
2215 error_out:
2216 	rte_free(session->aead_key.data);
2217 	rte_free(priv);
2218 	return -1;
2219 }
2220 
2221 
2222 static int
2223 dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
2224 		    struct rte_crypto_sym_xform *xform,
2225 		    dpaa2_sec_session *session)
2226 {
2227 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2228 	struct alginfo authdata, cipherdata;
2229 	int bufsize;
2230 	struct ctxt_priv *priv;
2231 	struct sec_flow_context *flc;
2232 	struct rte_crypto_cipher_xform *cipher_xform;
2233 	struct rte_crypto_auth_xform *auth_xform;
2234 	int err;
2235 
2236 	PMD_INIT_FUNC_TRACE();
2237 
2238 	if (session->ext_params.aead_ctxt.auth_cipher_text) {
2239 		cipher_xform = &xform->cipher;
2240 		auth_xform = &xform->next->auth;
2241 		session->ctxt_type =
2242 			(cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2243 			DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER;
2244 	} else {
2245 		cipher_xform = &xform->next->cipher;
2246 		auth_xform = &xform->auth;
2247 		session->ctxt_type =
2248 			(cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2249 			DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH;
2250 	}
2251 
2252 	/* Set IV parameters */
2253 	session->iv.offset = cipher_xform->iv.offset;
2254 	session->iv.length = cipher_xform->iv.length;
2255 
2256 	/* For SEC AEAD only one descriptor is required */
2257 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2258 			sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
2259 			RTE_CACHE_LINE_SIZE);
2260 	if (priv == NULL) {
2261 		DPAA2_SEC_ERR("No Memory for priv CTXT");
2262 		return -1;
2263 	}
2264 
2265 	priv->fle_pool = dev_priv->fle_pool;
2266 	flc = &priv->flc_desc[0].flc;
2267 
2268 	session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2269 					       RTE_CACHE_LINE_SIZE);
2270 	if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2271 		DPAA2_SEC_ERR("No Memory for cipher key");
2272 		rte_free(priv);
2273 		return -1;
2274 	}
2275 	session->cipher_key.length = cipher_xform->key.length;
2276 	session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2277 					     RTE_CACHE_LINE_SIZE);
2278 	if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2279 		DPAA2_SEC_ERR("No Memory for auth key");
2280 		rte_free(session->cipher_key.data);
2281 		rte_free(priv);
2282 		return -1;
2283 	}
2284 	session->auth_key.length = auth_xform->key.length;
2285 	memcpy(session->cipher_key.data, cipher_xform->key.data,
2286 	       cipher_xform->key.length);
2287 	memcpy(session->auth_key.data, auth_xform->key.data,
2288 	       auth_xform->key.length);
2289 
2290 	authdata.key = (size_t)session->auth_key.data;
2291 	authdata.keylen = session->auth_key.length;
2292 	authdata.key_enc_flags = 0;
2293 	authdata.key_type = RTA_DATA_IMM;
2294 
2295 	session->digest_length = auth_xform->digest_length;
2296 
2297 	switch (auth_xform->algo) {
2298 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
2299 		authdata.algtype = OP_ALG_ALGSEL_SHA1;
2300 		authdata.algmode = OP_ALG_AAI_HMAC;
2301 		session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
2302 		break;
2303 	case RTE_CRYPTO_AUTH_MD5_HMAC:
2304 		authdata.algtype = OP_ALG_ALGSEL_MD5;
2305 		authdata.algmode = OP_ALG_AAI_HMAC;
2306 		session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
2307 		break;
2308 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2309 		authdata.algtype = OP_ALG_ALGSEL_SHA224;
2310 		authdata.algmode = OP_ALG_AAI_HMAC;
2311 		session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
2312 		break;
2313 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2314 		authdata.algtype = OP_ALG_ALGSEL_SHA256;
2315 		authdata.algmode = OP_ALG_AAI_HMAC;
2316 		session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2317 		break;
2318 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2319 		authdata.algtype = OP_ALG_ALGSEL_SHA384;
2320 		authdata.algmode = OP_ALG_AAI_HMAC;
2321 		session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2322 		break;
2323 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2324 		authdata.algtype = OP_ALG_ALGSEL_SHA512;
2325 		authdata.algmode = OP_ALG_AAI_HMAC;
2326 		session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2327 		break;
2328 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2329 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2330 	case RTE_CRYPTO_AUTH_NULL:
2331 	case RTE_CRYPTO_AUTH_SHA1:
2332 	case RTE_CRYPTO_AUTH_SHA256:
2333 	case RTE_CRYPTO_AUTH_SHA512:
2334 	case RTE_CRYPTO_AUTH_SHA224:
2335 	case RTE_CRYPTO_AUTH_SHA384:
2336 	case RTE_CRYPTO_AUTH_MD5:
2337 	case RTE_CRYPTO_AUTH_AES_GMAC:
2338 	case RTE_CRYPTO_AUTH_KASUMI_F9:
2339 	case RTE_CRYPTO_AUTH_AES_CMAC:
2340 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2341 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
2342 		DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2343 			      auth_xform->algo);
2344 		goto error_out;
2345 	default:
2346 		DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2347 			      auth_xform->algo);
2348 		goto error_out;
2349 	}
2350 	cipherdata.key = (size_t)session->cipher_key.data;
2351 	cipherdata.keylen = session->cipher_key.length;
2352 	cipherdata.key_enc_flags = 0;
2353 	cipherdata.key_type = RTA_DATA_IMM;
2354 
2355 	switch (cipher_xform->algo) {
2356 	case RTE_CRYPTO_CIPHER_AES_CBC:
2357 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
2358 		cipherdata.algmode = OP_ALG_AAI_CBC;
2359 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2360 		break;
2361 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2362 		cipherdata.algtype = OP_ALG_ALGSEL_3DES;
2363 		cipherdata.algmode = OP_ALG_AAI_CBC;
2364 		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2365 		break;
2366 	case RTE_CRYPTO_CIPHER_AES_CTR:
2367 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
2368 		cipherdata.algmode = OP_ALG_AAI_CTR;
2369 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2370 		break;
2371 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2372 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2373 	case RTE_CRYPTO_CIPHER_NULL:
2374 	case RTE_CRYPTO_CIPHER_3DES_ECB:
2375 	case RTE_CRYPTO_CIPHER_AES_ECB:
2376 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
2377 		DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2378 			      cipher_xform->algo);
2379 		goto error_out;
2380 	default:
2381 		DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2382 			      cipher_xform->algo);
2383 		goto error_out;
2384 	}
2385 	session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2386 				DIR_ENC : DIR_DEC;
2387 
2388 	priv->flc_desc[0].desc[0] = cipherdata.keylen;
2389 	priv->flc_desc[0].desc[1] = authdata.keylen;
2390 	err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2391 			       MIN_JOB_DESC_SIZE,
2392 			       (unsigned int *)priv->flc_desc[0].desc,
2393 			       &priv->flc_desc[0].desc[2], 2);
2394 
2395 	if (err < 0) {
2396 		DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2397 		goto error_out;
2398 	}
2399 	if (priv->flc_desc[0].desc[2] & 1) {
2400 		cipherdata.key_type = RTA_DATA_IMM;
2401 	} else {
2402 		cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
2403 		cipherdata.key_type = RTA_DATA_PTR;
2404 	}
2405 	if (priv->flc_desc[0].desc[2] & (1 << 1)) {
2406 		authdata.key_type = RTA_DATA_IMM;
2407 	} else {
2408 		authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
2409 		authdata.key_type = RTA_DATA_PTR;
2410 	}
2411 	priv->flc_desc[0].desc[0] = 0;
2412 	priv->flc_desc[0].desc[1] = 0;
2413 	priv->flc_desc[0].desc[2] = 0;
2414 
2415 	if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
2416 		bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
2417 					      0, SHR_SERIAL,
2418 					      &cipherdata, &authdata,
2419 					      session->iv.length,
2420 					      session->digest_length,
2421 					      session->dir);
2422 		if (bufsize < 0) {
2423 			DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2424 			goto error_out;
2425 		}
2426 	} else {
2427 		DPAA2_SEC_ERR("Hash before cipher not supported");
2428 		goto error_out;
2429 	}
2430 
2431 	flc->word1_sdl = (uint8_t)bufsize;
2432 	session->ctxt = priv;
2433 #ifdef CAAM_DESC_DEBUG
2434 	int i;
2435 	for (i = 0; i < bufsize; i++)
2436 		DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2437 			    i, priv->flc_desc[0].desc[i]);
2438 #endif
2439 
2440 	return 0;
2441 
2442 error_out:
2443 	rte_free(session->cipher_key.data);
2444 	rte_free(session->auth_key.data);
2445 	rte_free(priv);
2446 	return -1;
2447 }
2448 
2449 static int
2450 dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
2451 			    struct rte_crypto_sym_xform *xform,	void *sess)
2452 {
2453 	dpaa2_sec_session *session = sess;
2454 	int ret;
2455 
2456 	PMD_INIT_FUNC_TRACE();
2457 
2458 	if (unlikely(sess == NULL)) {
2459 		DPAA2_SEC_ERR("Invalid session struct");
2460 		return -1;
2461 	}
2462 
2463 	memset(session, 0, sizeof(dpaa2_sec_session));
2464 	/* Default IV length = 0 */
2465 	session->iv.length = 0;
2466 
2467 	/* Cipher Only */
2468 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2469 		session->ctxt_type = DPAA2_SEC_CIPHER;
2470 		ret = dpaa2_sec_cipher_init(dev, xform, session);
2471 
2472 	/* Authentication Only */
2473 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2474 		   xform->next == NULL) {
2475 		session->ctxt_type = DPAA2_SEC_AUTH;
2476 		ret = dpaa2_sec_auth_init(dev, xform, session);
2477 
2478 	/* Cipher then Authenticate */
2479 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2480 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2481 		session->ext_params.aead_ctxt.auth_cipher_text = true;
2482 		ret = dpaa2_sec_aead_chain_init(dev, xform, session);
2483 
2484 	/* Authenticate then Cipher */
2485 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2486 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2487 		session->ext_params.aead_ctxt.auth_cipher_text = false;
2488 		ret = dpaa2_sec_aead_chain_init(dev, xform, session);
2489 
2490 	/* AEAD operation for AES-GCM kind of Algorithms */
2491 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2492 		   xform->next == NULL) {
2493 		ret = dpaa2_sec_aead_init(dev, xform, session);
2494 
2495 	} else {
2496 		DPAA2_SEC_ERR("Invalid crypto type");
2497 		return -EINVAL;
2498 	}
2499 
2500 	return ret;
2501 }
2502 
2503 static int
2504 dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2505 			dpaa2_sec_session *session,
2506 			struct alginfo *aeaddata)
2507 {
2508 	PMD_INIT_FUNC_TRACE();
2509 
2510 	session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2511 					       RTE_CACHE_LINE_SIZE);
2512 	if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2513 		DPAA2_SEC_ERR("No Memory for aead key");
2514 		return -1;
2515 	}
2516 	memcpy(session->aead_key.data, aead_xform->key.data,
2517 	       aead_xform->key.length);
2518 
2519 	session->digest_length = aead_xform->digest_length;
2520 	session->aead_key.length = aead_xform->key.length;
2521 
2522 	aeaddata->key = (size_t)session->aead_key.data;
2523 	aeaddata->keylen = session->aead_key.length;
2524 	aeaddata->key_enc_flags = 0;
2525 	aeaddata->key_type = RTA_DATA_IMM;
2526 
2527 	switch (aead_xform->algo) {
2528 	case RTE_CRYPTO_AEAD_AES_GCM:
2529 		aeaddata->algtype = OP_ALG_ALGSEL_AES;
2530 		aeaddata->algmode = OP_ALG_AAI_GCM;
2531 		session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2532 		break;
2533 	case RTE_CRYPTO_AEAD_AES_CCM:
2534 		aeaddata->algtype = OP_ALG_ALGSEL_AES;
2535 		aeaddata->algmode = OP_ALG_AAI_CCM;
2536 		session->aead_alg = RTE_CRYPTO_AEAD_AES_CCM;
2537 		break;
2538 	default:
2539 		DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
2540 			      aead_xform->algo);
2541 		return -1;
2542 	}
2543 	session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2544 				DIR_ENC : DIR_DEC;
2545 
2546 	return 0;
2547 }
2548 
2549 static int
2550 dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2551 	struct rte_crypto_auth_xform *auth_xform,
2552 	dpaa2_sec_session *session,
2553 	struct alginfo *cipherdata,
2554 	struct alginfo *authdata)
2555 {
2556 	if (cipher_xform) {
2557 		session->cipher_key.data = rte_zmalloc(NULL,
2558 						       cipher_xform->key.length,
2559 						       RTE_CACHE_LINE_SIZE);
2560 		if (session->cipher_key.data == NULL &&
2561 				cipher_xform->key.length > 0) {
2562 			DPAA2_SEC_ERR("No Memory for cipher key");
2563 			return -ENOMEM;
2564 		}
2565 
2566 		session->cipher_key.length = cipher_xform->key.length;
2567 		memcpy(session->cipher_key.data, cipher_xform->key.data,
2568 				cipher_xform->key.length);
2569 		session->cipher_alg = cipher_xform->algo;
2570 	} else {
2571 		session->cipher_key.data = NULL;
2572 		session->cipher_key.length = 0;
2573 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2574 	}
2575 
2576 	if (auth_xform) {
2577 		session->auth_key.data = rte_zmalloc(NULL,
2578 						auth_xform->key.length,
2579 						RTE_CACHE_LINE_SIZE);
2580 		if (session->auth_key.data == NULL &&
2581 				auth_xform->key.length > 0) {
2582 			DPAA2_SEC_ERR("No Memory for auth key");
2583 			return -ENOMEM;
2584 		}
2585 		session->auth_key.length = auth_xform->key.length;
2586 		memcpy(session->auth_key.data, auth_xform->key.data,
2587 				auth_xform->key.length);
2588 		session->auth_alg = auth_xform->algo;
2589 	} else {
2590 		session->auth_key.data = NULL;
2591 		session->auth_key.length = 0;
2592 		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2593 	}
2594 
2595 	authdata->key = (size_t)session->auth_key.data;
2596 	authdata->keylen = session->auth_key.length;
2597 	authdata->key_enc_flags = 0;
2598 	authdata->key_type = RTA_DATA_IMM;
2599 	switch (session->auth_alg) {
2600 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
2601 		authdata->algtype = OP_PCL_IPSEC_HMAC_SHA1_96;
2602 		authdata->algmode = OP_ALG_AAI_HMAC;
2603 		break;
2604 	case RTE_CRYPTO_AUTH_MD5_HMAC:
2605 		authdata->algtype = OP_PCL_IPSEC_HMAC_MD5_96;
2606 		authdata->algmode = OP_ALG_AAI_HMAC;
2607 		break;
2608 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2609 		authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2610 		authdata->algmode = OP_ALG_AAI_HMAC;
2611 		break;
2612 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2613 		authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2614 		authdata->algmode = OP_ALG_AAI_HMAC;
2615 		break;
2616 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2617 		authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2618 		authdata->algmode = OP_ALG_AAI_HMAC;
2619 		break;
2620 	case RTE_CRYPTO_AUTH_AES_CMAC:
2621 		authdata->algtype = OP_PCL_IPSEC_AES_CMAC_96;
2622 		break;
2623 	case RTE_CRYPTO_AUTH_NULL:
2624 		authdata->algtype = OP_PCL_IPSEC_HMAC_NULL;
2625 		break;
2626 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2627 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2628 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2629 	case RTE_CRYPTO_AUTH_SHA1:
2630 	case RTE_CRYPTO_AUTH_SHA256:
2631 	case RTE_CRYPTO_AUTH_SHA512:
2632 	case RTE_CRYPTO_AUTH_SHA224:
2633 	case RTE_CRYPTO_AUTH_SHA384:
2634 	case RTE_CRYPTO_AUTH_MD5:
2635 	case RTE_CRYPTO_AUTH_AES_GMAC:
2636 	case RTE_CRYPTO_AUTH_KASUMI_F9:
2637 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2638 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
2639 		DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2640 			      session->auth_alg);
2641 		return -1;
2642 	default:
2643 		DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2644 			      session->auth_alg);
2645 		return -1;
2646 	}
2647 	cipherdata->key = (size_t)session->cipher_key.data;
2648 	cipherdata->keylen = session->cipher_key.length;
2649 	cipherdata->key_enc_flags = 0;
2650 	cipherdata->key_type = RTA_DATA_IMM;
2651 
2652 	switch (session->cipher_alg) {
2653 	case RTE_CRYPTO_CIPHER_AES_CBC:
2654 		cipherdata->algtype = OP_PCL_IPSEC_AES_CBC;
2655 		cipherdata->algmode = OP_ALG_AAI_CBC;
2656 		break;
2657 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2658 		cipherdata->algtype = OP_PCL_IPSEC_3DES;
2659 		cipherdata->algmode = OP_ALG_AAI_CBC;
2660 		break;
2661 	case RTE_CRYPTO_CIPHER_AES_CTR:
2662 		cipherdata->algtype = OP_PCL_IPSEC_AES_CTR;
2663 		cipherdata->algmode = OP_ALG_AAI_CTR;
2664 		break;
2665 	case RTE_CRYPTO_CIPHER_NULL:
2666 		cipherdata->algtype = OP_PCL_IPSEC_NULL;
2667 		break;
2668 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2669 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2670 	case RTE_CRYPTO_CIPHER_3DES_ECB:
2671 	case RTE_CRYPTO_CIPHER_AES_ECB:
2672 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
2673 		DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2674 			      session->cipher_alg);
2675 		return -1;
2676 	default:
2677 		DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2678 			      session->cipher_alg);
2679 		return -1;
2680 	}
2681 
2682 	return 0;
2683 }
2684 
2685 #ifdef RTE_LIBRTE_SECURITY_TEST
2686 static uint8_t aes_cbc_iv[] = {
2687 	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
2688 	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f };
2689 #endif
2690 
2691 static int
2692 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
2693 			    struct rte_security_session_conf *conf,
2694 			    void *sess)
2695 {
2696 	struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2697 	struct rte_crypto_cipher_xform *cipher_xform = NULL;
2698 	struct rte_crypto_auth_xform *auth_xform = NULL;
2699 	struct rte_crypto_aead_xform *aead_xform = NULL;
2700 	dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
2701 	struct ctxt_priv *priv;
2702 	struct ipsec_encap_pdb encap_pdb;
2703 	struct ipsec_decap_pdb decap_pdb;
2704 	struct alginfo authdata, cipherdata;
2705 	int bufsize;
2706 	struct sec_flow_context *flc;
2707 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2708 	int ret = -1;
2709 
2710 	PMD_INIT_FUNC_TRACE();
2711 
2712 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2713 				sizeof(struct ctxt_priv) +
2714 				sizeof(struct sec_flc_desc),
2715 				RTE_CACHE_LINE_SIZE);
2716 
2717 	if (priv == NULL) {
2718 		DPAA2_SEC_ERR("No memory for priv CTXT");
2719 		return -ENOMEM;
2720 	}
2721 
2722 	priv->fle_pool = dev_priv->fle_pool;
2723 	flc = &priv->flc_desc[0].flc;
2724 
2725 	memset(session, 0, sizeof(dpaa2_sec_session));
2726 
2727 	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2728 		cipher_xform = &conf->crypto_xform->cipher;
2729 		if (conf->crypto_xform->next)
2730 			auth_xform = &conf->crypto_xform->next->auth;
2731 		ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
2732 					session, &cipherdata, &authdata);
2733 	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2734 		auth_xform = &conf->crypto_xform->auth;
2735 		if (conf->crypto_xform->next)
2736 			cipher_xform = &conf->crypto_xform->next->cipher;
2737 		ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
2738 					session, &cipherdata, &authdata);
2739 	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2740 		aead_xform = &conf->crypto_xform->aead;
2741 		ret = dpaa2_sec_ipsec_aead_init(aead_xform,
2742 					session, &cipherdata);
2743 	} else {
2744 		DPAA2_SEC_ERR("XFORM not specified");
2745 		ret = -EINVAL;
2746 		goto out;
2747 	}
2748 	if (ret) {
2749 		DPAA2_SEC_ERR("Failed to process xform");
2750 		goto out;
2751 	}
2752 
2753 	session->ctxt_type = DPAA2_SEC_IPSEC;
2754 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2755 		uint8_t *hdr = NULL;
2756 		struct ip ip4_hdr;
2757 		struct rte_ipv6_hdr ip6_hdr;
2758 
2759 		flc->dhr = SEC_FLC_DHR_OUTBOUND;
2760 		/* For Sec Proto only one descriptor is required. */
2761 		memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb));
2762 		encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2763 			PDBOPTS_ESP_OIHI_PDB_INL |
2764 			PDBOPTS_ESP_IVSRC |
2765 			PDBHMO_ESP_ENCAP_DTTL |
2766 			PDBHMO_ESP_SNR;
2767 		if (ipsec_xform->options.esn)
2768 			encap_pdb.options |= PDBOPTS_ESP_ESN;
2769 		encap_pdb.spi = ipsec_xform->spi;
2770 		session->dir = DIR_ENC;
2771 		if (ipsec_xform->tunnel.type ==
2772 				RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2773 			encap_pdb.ip_hdr_len = sizeof(struct ip);
2774 			ip4_hdr.ip_v = IPVERSION;
2775 			ip4_hdr.ip_hl = 5;
2776 			ip4_hdr.ip_len = rte_cpu_to_be_16(sizeof(ip4_hdr));
2777 			ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2778 			ip4_hdr.ip_id = 0;
2779 			ip4_hdr.ip_off = 0;
2780 			ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2781 			ip4_hdr.ip_p = IPPROTO_ESP;
2782 			ip4_hdr.ip_sum = 0;
2783 			ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2784 			ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2785 			ip4_hdr.ip_sum = calc_chksum((uint16_t *)(void *)
2786 					&ip4_hdr, sizeof(struct ip));
2787 			hdr = (uint8_t *)&ip4_hdr;
2788 		} else if (ipsec_xform->tunnel.type ==
2789 				RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2790 			ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2791 				DPAA2_IPv6_DEFAULT_VTC_FLOW |
2792 				((ipsec_xform->tunnel.ipv6.dscp <<
2793 					RTE_IPV6_HDR_TC_SHIFT) &
2794 					RTE_IPV6_HDR_TC_MASK) |
2795 				((ipsec_xform->tunnel.ipv6.flabel <<
2796 					RTE_IPV6_HDR_FL_SHIFT) &
2797 					RTE_IPV6_HDR_FL_MASK));
2798 			/* Payload length will be updated by HW */
2799 			ip6_hdr.payload_len = 0;
2800 			ip6_hdr.hop_limits =
2801 					ipsec_xform->tunnel.ipv6.hlimit;
2802 			ip6_hdr.proto = (ipsec_xform->proto ==
2803 					RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2804 					IPPROTO_ESP : IPPROTO_AH;
2805 			memcpy(&ip6_hdr.src_addr,
2806 				&ipsec_xform->tunnel.ipv6.src_addr, 16);
2807 			memcpy(&ip6_hdr.dst_addr,
2808 				&ipsec_xform->tunnel.ipv6.dst_addr, 16);
2809 			encap_pdb.ip_hdr_len = sizeof(struct rte_ipv6_hdr);
2810 			hdr = (uint8_t *)&ip6_hdr;
2811 		}
2812 
2813 		bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc,
2814 				1, 0, SHR_SERIAL, &encap_pdb,
2815 				hdr, &cipherdata, &authdata);
2816 	} else if (ipsec_xform->direction ==
2817 			RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2818 		flc->dhr = SEC_FLC_DHR_INBOUND;
2819 		memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2820 		decap_pdb.options = (ipsec_xform->tunnel.type ==
2821 				RTE_SECURITY_IPSEC_TUNNEL_IPV4) ?
2822 				sizeof(struct ip) << 16 :
2823 				sizeof(struct rte_ipv6_hdr) << 16;
2824 		if (ipsec_xform->options.esn)
2825 			decap_pdb.options |= PDBOPTS_ESP_ESN;
2826 		session->dir = DIR_DEC;
2827 		bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc,
2828 				1, 0, SHR_SERIAL,
2829 				&decap_pdb, &cipherdata, &authdata);
2830 	} else
2831 		goto out;
2832 
2833 	if (bufsize < 0) {
2834 		DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2835 		goto out;
2836 	}
2837 
2838 	flc->word1_sdl = (uint8_t)bufsize;
2839 
2840 	/* Enable the stashing control bit */
2841 	DPAA2_SET_FLC_RSC(flc);
2842 	flc->word2_rflc_31_0 = lower_32_bits(
2843 			(size_t)&(((struct dpaa2_sec_qp *)
2844 			dev->data->queue_pairs[0])->rx_vq) | 0x14);
2845 	flc->word3_rflc_63_32 = upper_32_bits(
2846 			(size_t)&(((struct dpaa2_sec_qp *)
2847 			dev->data->queue_pairs[0])->rx_vq));
2848 
2849 	/* Set EWS bit i.e. enable write-safe */
2850 	DPAA2_SET_FLC_EWS(flc);
2851 	/* Set BS = 1 i.e reuse input buffers as output buffers */
2852 	DPAA2_SET_FLC_REUSE_BS(flc);
2853 	/* Set FF = 10; reuse input buffers if they provide sufficient space */
2854 	DPAA2_SET_FLC_REUSE_FF(flc);
2855 
2856 	session->ctxt = priv;
2857 
2858 	return 0;
2859 out:
2860 	rte_free(session->auth_key.data);
2861 	rte_free(session->cipher_key.data);
2862 	rte_free(priv);
2863 	return ret;
2864 }
2865 
2866 static int
2867 dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
2868 			   struct rte_security_session_conf *conf,
2869 			   void *sess)
2870 {
2871 	struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2872 	struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2873 	struct rte_crypto_auth_xform *auth_xform = NULL;
2874 	struct rte_crypto_cipher_xform *cipher_xform;
2875 	dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
2876 	struct ctxt_priv *priv;
2877 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2878 	struct alginfo authdata, cipherdata;
2879 	struct alginfo *p_authdata = NULL;
2880 	int bufsize = -1;
2881 	struct sec_flow_context *flc;
2882 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
2883 	int swap = true;
2884 #else
2885 	int swap = false;
2886 #endif
2887 
2888 	PMD_INIT_FUNC_TRACE();
2889 
2890 	memset(session, 0, sizeof(dpaa2_sec_session));
2891 
2892 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2893 				sizeof(struct ctxt_priv) +
2894 				sizeof(struct sec_flc_desc),
2895 				RTE_CACHE_LINE_SIZE);
2896 
2897 	if (priv == NULL) {
2898 		DPAA2_SEC_ERR("No memory for priv CTXT");
2899 		return -ENOMEM;
2900 	}
2901 
2902 	priv->fle_pool = dev_priv->fle_pool;
2903 	flc = &priv->flc_desc[0].flc;
2904 
2905 	/* find xfrm types */
2906 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2907 		cipher_xform = &xform->cipher;
2908 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2909 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2910 		session->ext_params.aead_ctxt.auth_cipher_text = true;
2911 		cipher_xform = &xform->cipher;
2912 		auth_xform = &xform->next->auth;
2913 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2914 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2915 		session->ext_params.aead_ctxt.auth_cipher_text = false;
2916 		cipher_xform = &xform->next->cipher;
2917 		auth_xform = &xform->auth;
2918 	} else {
2919 		DPAA2_SEC_ERR("Invalid crypto type");
2920 		return -EINVAL;
2921 	}
2922 
2923 	session->ctxt_type = DPAA2_SEC_PDCP;
2924 	if (cipher_xform) {
2925 		session->cipher_key.data = rte_zmalloc(NULL,
2926 					       cipher_xform->key.length,
2927 					       RTE_CACHE_LINE_SIZE);
2928 		if (session->cipher_key.data == NULL &&
2929 				cipher_xform->key.length > 0) {
2930 			DPAA2_SEC_ERR("No Memory for cipher key");
2931 			rte_free(priv);
2932 			return -ENOMEM;
2933 		}
2934 		session->cipher_key.length = cipher_xform->key.length;
2935 		memcpy(session->cipher_key.data, cipher_xform->key.data,
2936 			cipher_xform->key.length);
2937 		session->dir =
2938 			(cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2939 					DIR_ENC : DIR_DEC;
2940 		session->cipher_alg = cipher_xform->algo;
2941 	} else {
2942 		session->cipher_key.data = NULL;
2943 		session->cipher_key.length = 0;
2944 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2945 		session->dir = DIR_ENC;
2946 	}
2947 
2948 	session->pdcp.domain = pdcp_xform->domain;
2949 	session->pdcp.bearer = pdcp_xform->bearer;
2950 	session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
2951 	session->pdcp.sn_size = pdcp_xform->sn_size;
2952 	session->pdcp.hfn = pdcp_xform->hfn;
2953 	session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
2954 	session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
2955 	/* hfv ovd offset location is stored in iv.offset value*/
2956 	session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
2957 
2958 	cipherdata.key = (size_t)session->cipher_key.data;
2959 	cipherdata.keylen = session->cipher_key.length;
2960 	cipherdata.key_enc_flags = 0;
2961 	cipherdata.key_type = RTA_DATA_IMM;
2962 
2963 	switch (session->cipher_alg) {
2964 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2965 		cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW;
2966 		break;
2967 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2968 		cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC;
2969 		break;
2970 	case RTE_CRYPTO_CIPHER_AES_CTR:
2971 		cipherdata.algtype = PDCP_CIPHER_TYPE_AES;
2972 		break;
2973 	case RTE_CRYPTO_CIPHER_NULL:
2974 		cipherdata.algtype = PDCP_CIPHER_TYPE_NULL;
2975 		break;
2976 	default:
2977 		DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2978 			      session->cipher_alg);
2979 		goto out;
2980 	}
2981 
2982 	if (auth_xform) {
2983 		session->auth_key.data = rte_zmalloc(NULL,
2984 						     auth_xform->key.length,
2985 						     RTE_CACHE_LINE_SIZE);
2986 		if (!session->auth_key.data &&
2987 		    auth_xform->key.length > 0) {
2988 			DPAA2_SEC_ERR("No Memory for auth key");
2989 			rte_free(session->cipher_key.data);
2990 			rte_free(priv);
2991 			return -ENOMEM;
2992 		}
2993 		session->auth_key.length = auth_xform->key.length;
2994 		memcpy(session->auth_key.data, auth_xform->key.data,
2995 		       auth_xform->key.length);
2996 		session->auth_alg = auth_xform->algo;
2997 	} else {
2998 		session->auth_key.data = NULL;
2999 		session->auth_key.length = 0;
3000 		session->auth_alg = 0;
3001 	}
3002 	authdata.key = (size_t)session->auth_key.data;
3003 	authdata.keylen = session->auth_key.length;
3004 	authdata.key_enc_flags = 0;
3005 	authdata.key_type = RTA_DATA_IMM;
3006 
3007 	if (session->auth_alg) {
3008 		switch (session->auth_alg) {
3009 		case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
3010 			authdata.algtype = PDCP_AUTH_TYPE_SNOW;
3011 			break;
3012 		case RTE_CRYPTO_AUTH_ZUC_EIA3:
3013 			authdata.algtype = PDCP_AUTH_TYPE_ZUC;
3014 			break;
3015 		case RTE_CRYPTO_AUTH_AES_CMAC:
3016 			authdata.algtype = PDCP_AUTH_TYPE_AES;
3017 			break;
3018 		case RTE_CRYPTO_AUTH_NULL:
3019 			authdata.algtype = PDCP_AUTH_TYPE_NULL;
3020 			break;
3021 		default:
3022 			DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
3023 				      session->auth_alg);
3024 			goto out;
3025 		}
3026 
3027 		p_authdata = &authdata;
3028 	} else if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3029 		DPAA2_SEC_ERR("Crypto: Integrity must for c-plane");
3030 		goto out;
3031 	}
3032 
3033 	if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3034 		if (session->dir == DIR_ENC)
3035 			bufsize = cnstr_shdsc_pdcp_c_plane_encap(
3036 					priv->flc_desc[0].desc, 1, swap,
3037 					pdcp_xform->hfn,
3038 					session->pdcp.sn_size,
3039 					pdcp_xform->bearer,
3040 					pdcp_xform->pkt_dir,
3041 					pdcp_xform->hfn_threshold,
3042 					&cipherdata, &authdata,
3043 					0);
3044 		else if (session->dir == DIR_DEC)
3045 			bufsize = cnstr_shdsc_pdcp_c_plane_decap(
3046 					priv->flc_desc[0].desc, 1, swap,
3047 					pdcp_xform->hfn,
3048 					session->pdcp.sn_size,
3049 					pdcp_xform->bearer,
3050 					pdcp_xform->pkt_dir,
3051 					pdcp_xform->hfn_threshold,
3052 					&cipherdata, &authdata,
3053 					0);
3054 	} else {
3055 		if (session->dir == DIR_ENC)
3056 			bufsize = cnstr_shdsc_pdcp_u_plane_encap(
3057 					priv->flc_desc[0].desc, 1, swap,
3058 					session->pdcp.sn_size,
3059 					pdcp_xform->hfn,
3060 					pdcp_xform->bearer,
3061 					pdcp_xform->pkt_dir,
3062 					pdcp_xform->hfn_threshold,
3063 					&cipherdata, p_authdata, 0);
3064 		else if (session->dir == DIR_DEC)
3065 			bufsize = cnstr_shdsc_pdcp_u_plane_decap(
3066 					priv->flc_desc[0].desc, 1, swap,
3067 					session->pdcp.sn_size,
3068 					pdcp_xform->hfn,
3069 					pdcp_xform->bearer,
3070 					pdcp_xform->pkt_dir,
3071 					pdcp_xform->hfn_threshold,
3072 					&cipherdata, p_authdata, 0);
3073 	}
3074 
3075 	if (bufsize < 0) {
3076 		DPAA2_SEC_ERR("Crypto: Invalid buffer length");
3077 		goto out;
3078 	}
3079 
3080 	/* Enable the stashing control bit */
3081 	DPAA2_SET_FLC_RSC(flc);
3082 	flc->word2_rflc_31_0 = lower_32_bits(
3083 			(size_t)&(((struct dpaa2_sec_qp *)
3084 			dev->data->queue_pairs[0])->rx_vq) | 0x14);
3085 	flc->word3_rflc_63_32 = upper_32_bits(
3086 			(size_t)&(((struct dpaa2_sec_qp *)
3087 			dev->data->queue_pairs[0])->rx_vq));
3088 
3089 	flc->word1_sdl = (uint8_t)bufsize;
3090 
3091 	/* TODO - check the perf impact or
3092 	 * align as per descriptor type
3093 	 * Set EWS bit i.e. enable write-safe
3094 	 * DPAA2_SET_FLC_EWS(flc);
3095 	 */
3096 
3097 	/* Set BS = 1 i.e reuse input buffers as output buffers */
3098 	DPAA2_SET_FLC_REUSE_BS(flc);
3099 	/* Set FF = 10; reuse input buffers if they provide sufficient space */
3100 	DPAA2_SET_FLC_REUSE_FF(flc);
3101 
3102 	session->ctxt = priv;
3103 
3104 	return 0;
3105 out:
3106 	rte_free(session->auth_key.data);
3107 	rte_free(session->cipher_key.data);
3108 	rte_free(priv);
3109 	return -1;
3110 }
3111 
3112 static int
3113 dpaa2_sec_security_session_create(void *dev,
3114 				  struct rte_security_session_conf *conf,
3115 				  struct rte_security_session *sess,
3116 				  struct rte_mempool *mempool)
3117 {
3118 	void *sess_private_data;
3119 	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
3120 	int ret;
3121 
3122 	if (rte_mempool_get(mempool, &sess_private_data)) {
3123 		DPAA2_SEC_ERR("Couldn't get object from session mempool");
3124 		return -ENOMEM;
3125 	}
3126 
3127 	switch (conf->protocol) {
3128 	case RTE_SECURITY_PROTOCOL_IPSEC:
3129 		ret = dpaa2_sec_set_ipsec_session(cdev, conf,
3130 				sess_private_data);
3131 		break;
3132 	case RTE_SECURITY_PROTOCOL_MACSEC:
3133 		return -ENOTSUP;
3134 	case RTE_SECURITY_PROTOCOL_PDCP:
3135 		ret = dpaa2_sec_set_pdcp_session(cdev, conf,
3136 				sess_private_data);
3137 		break;
3138 	default:
3139 		return -EINVAL;
3140 	}
3141 	if (ret != 0) {
3142 		DPAA2_SEC_ERR("Failed to configure session parameters");
3143 		/* Return session to mempool */
3144 		rte_mempool_put(mempool, sess_private_data);
3145 		return ret;
3146 	}
3147 
3148 	set_sec_session_private_data(sess, sess_private_data);
3149 
3150 	return ret;
3151 }
3152 
3153 /** Clear the memory of session so it doesn't leave key material behind */
3154 static int
3155 dpaa2_sec_security_session_destroy(void *dev __rte_unused,
3156 		struct rte_security_session *sess)
3157 {
3158 	PMD_INIT_FUNC_TRACE();
3159 	void *sess_priv = get_sec_session_private_data(sess);
3160 
3161 	dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
3162 
3163 	if (sess_priv) {
3164 		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
3165 
3166 		rte_free(s->ctxt);
3167 		rte_free(s->cipher_key.data);
3168 		rte_free(s->auth_key.data);
3169 		memset(s, 0, sizeof(dpaa2_sec_session));
3170 		set_sec_session_private_data(sess, NULL);
3171 		rte_mempool_put(sess_mp, sess_priv);
3172 	}
3173 	return 0;
3174 }
3175 
3176 static int
3177 dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev,
3178 		struct rte_crypto_sym_xform *xform,
3179 		struct rte_cryptodev_sym_session *sess,
3180 		struct rte_mempool *mempool)
3181 {
3182 	void *sess_private_data;
3183 	int ret;
3184 
3185 	if (rte_mempool_get(mempool, &sess_private_data)) {
3186 		DPAA2_SEC_ERR("Couldn't get object from session mempool");
3187 		return -ENOMEM;
3188 	}
3189 
3190 	ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data);
3191 	if (ret != 0) {
3192 		DPAA2_SEC_ERR("Failed to configure session parameters");
3193 		/* Return session to mempool */
3194 		rte_mempool_put(mempool, sess_private_data);
3195 		return ret;
3196 	}
3197 
3198 	set_sym_session_private_data(sess, dev->driver_id,
3199 		sess_private_data);
3200 
3201 	return 0;
3202 }
3203 
3204 /** Clear the memory of session so it doesn't leave key material behind */
3205 static void
3206 dpaa2_sec_sym_session_clear(struct rte_cryptodev *dev,
3207 		struct rte_cryptodev_sym_session *sess)
3208 {
3209 	PMD_INIT_FUNC_TRACE();
3210 	uint8_t index = dev->driver_id;
3211 	void *sess_priv = get_sym_session_private_data(sess, index);
3212 	dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
3213 
3214 	if (sess_priv) {
3215 		rte_free(s->ctxt);
3216 		rte_free(s->cipher_key.data);
3217 		rte_free(s->auth_key.data);
3218 		memset(s, 0, sizeof(dpaa2_sec_session));
3219 		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
3220 		set_sym_session_private_data(sess, index, NULL);
3221 		rte_mempool_put(sess_mp, sess_priv);
3222 	}
3223 }
3224 
3225 static int
3226 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
3227 			struct rte_cryptodev_config *config __rte_unused)
3228 {
3229 	PMD_INIT_FUNC_TRACE();
3230 
3231 	return 0;
3232 }
3233 
3234 static int
3235 dpaa2_sec_dev_start(struct rte_cryptodev *dev)
3236 {
3237 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3238 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3239 	struct dpseci_attr attr;
3240 	struct dpaa2_queue *dpaa2_q;
3241 	struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3242 					dev->data->queue_pairs;
3243 	struct dpseci_rx_queue_attr rx_attr;
3244 	struct dpseci_tx_queue_attr tx_attr;
3245 	int ret, i;
3246 
3247 	PMD_INIT_FUNC_TRACE();
3248 
3249 	memset(&attr, 0, sizeof(struct dpseci_attr));
3250 
3251 	ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
3252 	if (ret) {
3253 		DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED",
3254 			      priv->hw_id);
3255 		goto get_attr_failure;
3256 	}
3257 	ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr);
3258 	if (ret) {
3259 		DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC");
3260 		goto get_attr_failure;
3261 	}
3262 	for (i = 0; i < attr.num_rx_queues && qp[i]; i++) {
3263 		dpaa2_q = &qp[i]->rx_vq;
3264 		dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
3265 				    &rx_attr);
3266 		dpaa2_q->fqid = rx_attr.fqid;
3267 		DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid);
3268 	}
3269 	for (i = 0; i < attr.num_tx_queues && qp[i]; i++) {
3270 		dpaa2_q = &qp[i]->tx_vq;
3271 		dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
3272 				    &tx_attr);
3273 		dpaa2_q->fqid = tx_attr.fqid;
3274 		DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid);
3275 	}
3276 
3277 	return 0;
3278 get_attr_failure:
3279 	dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
3280 	return -1;
3281 }
3282 
3283 static void
3284 dpaa2_sec_dev_stop(struct rte_cryptodev *dev)
3285 {
3286 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3287 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3288 	int ret;
3289 
3290 	PMD_INIT_FUNC_TRACE();
3291 
3292 	ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
3293 	if (ret) {
3294 		DPAA2_SEC_ERR("Failure in disabling dpseci %d device",
3295 			     priv->hw_id);
3296 		return;
3297 	}
3298 
3299 	ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token);
3300 	if (ret < 0) {
3301 		DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret);
3302 		return;
3303 	}
3304 }
3305 
3306 static int
3307 dpaa2_sec_dev_close(struct rte_cryptodev *dev)
3308 {
3309 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3310 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3311 	int ret;
3312 
3313 	PMD_INIT_FUNC_TRACE();
3314 
3315 	/* Function is reverse of dpaa2_sec_dev_init.
3316 	 * It does the following:
3317 	 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id
3318 	 * 2. Close the DPSECI device
3319 	 * 3. Free the allocated resources.
3320 	 */
3321 
3322 	/*Close the device at underlying layer*/
3323 	ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
3324 	if (ret) {
3325 		DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret);
3326 		return -1;
3327 	}
3328 
3329 	/*Free the allocated memory for ethernet private data and dpseci*/
3330 	priv->hw = NULL;
3331 	rte_free(dpseci);
3332 
3333 	return 0;
3334 }
3335 
3336 static void
3337 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev,
3338 			struct rte_cryptodev_info *info)
3339 {
3340 	struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
3341 
3342 	PMD_INIT_FUNC_TRACE();
3343 	if (info != NULL) {
3344 		info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3345 		info->feature_flags = dev->feature_flags;
3346 		info->capabilities = dpaa2_sec_capabilities;
3347 		/* No limit of number of sessions */
3348 		info->sym.max_nb_sessions = 0;
3349 		info->driver_id = cryptodev_driver_id;
3350 	}
3351 }
3352 
3353 static
3354 void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
3355 			 struct rte_cryptodev_stats *stats)
3356 {
3357 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3358 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3359 	struct dpseci_sec_counters counters = {0};
3360 	struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3361 					dev->data->queue_pairs;
3362 	int ret, i;
3363 
3364 	PMD_INIT_FUNC_TRACE();
3365 	if (stats == NULL) {
3366 		DPAA2_SEC_ERR("Invalid stats ptr NULL");
3367 		return;
3368 	}
3369 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3370 		if (qp[i] == NULL) {
3371 			DPAA2_SEC_DEBUG("Uninitialised queue pair");
3372 			continue;
3373 		}
3374 
3375 		stats->enqueued_count += qp[i]->tx_vq.tx_pkts;
3376 		stats->dequeued_count += qp[i]->rx_vq.rx_pkts;
3377 		stats->enqueue_err_count += qp[i]->tx_vq.err_pkts;
3378 		stats->dequeue_err_count += qp[i]->rx_vq.err_pkts;
3379 	}
3380 
3381 	ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token,
3382 				      &counters);
3383 	if (ret) {
3384 		DPAA2_SEC_ERR("SEC counters failed");
3385 	} else {
3386 		DPAA2_SEC_INFO("dpseci hardware stats:"
3387 			    "\n\tNum of Requests Dequeued = %" PRIu64
3388 			    "\n\tNum of Outbound Encrypt Requests = %" PRIu64
3389 			    "\n\tNum of Inbound Decrypt Requests = %" PRIu64
3390 			    "\n\tNum of Outbound Bytes Encrypted = %" PRIu64
3391 			    "\n\tNum of Outbound Bytes Protected = %" PRIu64
3392 			    "\n\tNum of Inbound Bytes Decrypted = %" PRIu64
3393 			    "\n\tNum of Inbound Bytes Validated = %" PRIu64,
3394 			    counters.dequeued_requests,
3395 			    counters.ob_enc_requests,
3396 			    counters.ib_dec_requests,
3397 			    counters.ob_enc_bytes,
3398 			    counters.ob_prot_bytes,
3399 			    counters.ib_dec_bytes,
3400 			    counters.ib_valid_bytes);
3401 	}
3402 }
3403 
3404 static
3405 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev)
3406 {
3407 	int i;
3408 	struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3409 				   (dev->data->queue_pairs);
3410 
3411 	PMD_INIT_FUNC_TRACE();
3412 
3413 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3414 		if (qp[i] == NULL) {
3415 			DPAA2_SEC_DEBUG("Uninitialised queue pair");
3416 			continue;
3417 		}
3418 		qp[i]->tx_vq.rx_pkts = 0;
3419 		qp[i]->tx_vq.tx_pkts = 0;
3420 		qp[i]->tx_vq.err_pkts = 0;
3421 		qp[i]->rx_vq.rx_pkts = 0;
3422 		qp[i]->rx_vq.tx_pkts = 0;
3423 		qp[i]->rx_vq.err_pkts = 0;
3424 	}
3425 }
3426 
3427 static void __attribute__((hot))
3428 dpaa2_sec_process_parallel_event(struct qbman_swp *swp,
3429 				 const struct qbman_fd *fd,
3430 				 const struct qbman_result *dq,
3431 				 struct dpaa2_queue *rxq,
3432 				 struct rte_event *ev)
3433 {
3434 	/* Prefetching mbuf */
3435 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
3436 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
3437 
3438 	/* Prefetching ipsec crypto_op stored in priv data of mbuf */
3439 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
3440 
3441 	ev->flow_id = rxq->ev.flow_id;
3442 	ev->sub_event_type = rxq->ev.sub_event_type;
3443 	ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3444 	ev->op = RTE_EVENT_OP_NEW;
3445 	ev->sched_type = rxq->ev.sched_type;
3446 	ev->queue_id = rxq->ev.queue_id;
3447 	ev->priority = rxq->ev.priority;
3448 	ev->event_ptr = sec_fd_to_mbuf(fd);
3449 
3450 	qbman_swp_dqrr_consume(swp, dq);
3451 }
3452 static void
3453 dpaa2_sec_process_atomic_event(struct qbman_swp *swp __attribute__((unused)),
3454 				 const struct qbman_fd *fd,
3455 				 const struct qbman_result *dq,
3456 				 struct dpaa2_queue *rxq,
3457 				 struct rte_event *ev)
3458 {
3459 	uint8_t dqrr_index;
3460 	struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr;
3461 	/* Prefetching mbuf */
3462 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
3463 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
3464 
3465 	/* Prefetching ipsec crypto_op stored in priv data of mbuf */
3466 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
3467 
3468 	ev->flow_id = rxq->ev.flow_id;
3469 	ev->sub_event_type = rxq->ev.sub_event_type;
3470 	ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3471 	ev->op = RTE_EVENT_OP_NEW;
3472 	ev->sched_type = rxq->ev.sched_type;
3473 	ev->queue_id = rxq->ev.queue_id;
3474 	ev->priority = rxq->ev.priority;
3475 
3476 	ev->event_ptr = sec_fd_to_mbuf(fd);
3477 	dqrr_index = qbman_get_dqrr_idx(dq);
3478 	crypto_op->sym->m_src->seqn = dqrr_index + 1;
3479 	DPAA2_PER_LCORE_DQRR_SIZE++;
3480 	DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
3481 	DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src;
3482 }
3483 
3484 int
3485 dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
3486 		int qp_id,
3487 		struct dpaa2_dpcon_dev *dpcon,
3488 		const struct rte_event *event)
3489 {
3490 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3491 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3492 	struct dpaa2_sec_qp *qp = dev->data->queue_pairs[qp_id];
3493 	struct dpseci_rx_queue_cfg cfg;
3494 	uint8_t priority;
3495 	int ret;
3496 
3497 	if (event->sched_type == RTE_SCHED_TYPE_PARALLEL)
3498 		qp->rx_vq.cb = dpaa2_sec_process_parallel_event;
3499 	else if (event->sched_type == RTE_SCHED_TYPE_ATOMIC)
3500 		qp->rx_vq.cb = dpaa2_sec_process_atomic_event;
3501 	else
3502 		return -EINVAL;
3503 
3504 	priority = (RTE_EVENT_DEV_PRIORITY_LOWEST / event->priority) *
3505 		   (dpcon->num_priorities - 1);
3506 
3507 	memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
3508 	cfg.options = DPSECI_QUEUE_OPT_DEST;
3509 	cfg.dest_cfg.dest_type = DPSECI_DEST_DPCON;
3510 	cfg.dest_cfg.dest_id = dpcon->dpcon_id;
3511 	cfg.dest_cfg.priority = priority;
3512 
3513 	cfg.options |= DPSECI_QUEUE_OPT_USER_CTX;
3514 	cfg.user_ctx = (size_t)(qp);
3515 	if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) {
3516 		cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION;
3517 		cfg.order_preservation_en = 1;
3518 	}
3519 	ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
3520 				  qp_id, &cfg);
3521 	if (ret) {
3522 		RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
3523 		return ret;
3524 	}
3525 
3526 	memcpy(&qp->rx_vq.ev, event, sizeof(struct rte_event));
3527 
3528 	return 0;
3529 }
3530 
3531 int
3532 dpaa2_sec_eventq_detach(const struct rte_cryptodev *dev,
3533 			int qp_id)
3534 {
3535 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3536 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3537 	struct dpseci_rx_queue_cfg cfg;
3538 	int ret;
3539 
3540 	memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
3541 	cfg.options = DPSECI_QUEUE_OPT_DEST;
3542 	cfg.dest_cfg.dest_type = DPSECI_DEST_NONE;
3543 
3544 	ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
3545 				  qp_id, &cfg);
3546 	if (ret)
3547 		RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
3548 
3549 	return ret;
3550 }
3551 
3552 static struct rte_cryptodev_ops crypto_ops = {
3553 	.dev_configure	      = dpaa2_sec_dev_configure,
3554 	.dev_start	      = dpaa2_sec_dev_start,
3555 	.dev_stop	      = dpaa2_sec_dev_stop,
3556 	.dev_close	      = dpaa2_sec_dev_close,
3557 	.dev_infos_get        = dpaa2_sec_dev_infos_get,
3558 	.stats_get	      = dpaa2_sec_stats_get,
3559 	.stats_reset	      = dpaa2_sec_stats_reset,
3560 	.queue_pair_setup     = dpaa2_sec_queue_pair_setup,
3561 	.queue_pair_release   = dpaa2_sec_queue_pair_release,
3562 	.queue_pair_count     = dpaa2_sec_queue_pair_count,
3563 	.sym_session_get_size     = dpaa2_sec_sym_session_get_size,
3564 	.sym_session_configure    = dpaa2_sec_sym_session_configure,
3565 	.sym_session_clear        = dpaa2_sec_sym_session_clear,
3566 };
3567 
3568 static const struct rte_security_capability *
3569 dpaa2_sec_capabilities_get(void *device __rte_unused)
3570 {
3571 	return dpaa2_sec_security_cap;
3572 }
3573 
3574 static const struct rte_security_ops dpaa2_sec_security_ops = {
3575 	.session_create = dpaa2_sec_security_session_create,
3576 	.session_update = NULL,
3577 	.session_stats_get = NULL,
3578 	.session_destroy = dpaa2_sec_security_session_destroy,
3579 	.set_pkt_metadata = NULL,
3580 	.capabilities_get = dpaa2_sec_capabilities_get
3581 };
3582 
3583 static int
3584 dpaa2_sec_uninit(const struct rte_cryptodev *dev)
3585 {
3586 	struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
3587 
3588 	rte_free(dev->security_ctx);
3589 
3590 	rte_mempool_free(internals->fle_pool);
3591 
3592 	DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u",
3593 		       dev->data->name, rte_socket_id());
3594 
3595 	return 0;
3596 }
3597 
3598 static int
3599 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
3600 {
3601 	struct dpaa2_sec_dev_private *internals;
3602 	struct rte_device *dev = cryptodev->device;
3603 	struct rte_dpaa2_device *dpaa2_dev;
3604 	struct rte_security_ctx *security_instance;
3605 	struct fsl_mc_io *dpseci;
3606 	uint16_t token;
3607 	struct dpseci_attr attr;
3608 	int retcode, hw_id;
3609 	char str[30];
3610 
3611 	PMD_INIT_FUNC_TRACE();
3612 	dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
3613 	if (dpaa2_dev == NULL) {
3614 		DPAA2_SEC_ERR("DPAA2 SEC device not found");
3615 		return -1;
3616 	}
3617 	hw_id = dpaa2_dev->object_id;
3618 
3619 	cryptodev->driver_id = cryptodev_driver_id;
3620 	cryptodev->dev_ops = &crypto_ops;
3621 
3622 	cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst;
3623 	cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst;
3624 	cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3625 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
3626 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3627 			RTE_CRYPTODEV_FF_SECURITY |
3628 			RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3629 			RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3630 			RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3631 			RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3632 			RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3633 
3634 	internals = cryptodev->data->dev_private;
3635 
3636 	/*
3637 	 * For secondary processes, we don't initialise any further as primary
3638 	 * has already done this work. Only check we don't need a different
3639 	 * RX function
3640 	 */
3641 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3642 		DPAA2_SEC_DEBUG("Device already init by primary process");
3643 		return 0;
3644 	}
3645 
3646 	/* Initialize security_ctx only for primary process*/
3647 	security_instance = rte_malloc("rte_security_instances_ops",
3648 				sizeof(struct rte_security_ctx), 0);
3649 	if (security_instance == NULL)
3650 		return -ENOMEM;
3651 	security_instance->device = (void *)cryptodev;
3652 	security_instance->ops = &dpaa2_sec_security_ops;
3653 	security_instance->sess_cnt = 0;
3654 	cryptodev->security_ctx = security_instance;
3655 
3656 	/*Open the rte device via MC and save the handle for further use*/
3657 	dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1,
3658 				sizeof(struct fsl_mc_io), 0);
3659 	if (!dpseci) {
3660 		DPAA2_SEC_ERR(
3661 			"Error in allocating the memory for dpsec object");
3662 		return -1;
3663 	}
3664 	dpseci->regs = rte_mcp_ptr_list[0];
3665 
3666 	retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token);
3667 	if (retcode != 0) {
3668 		DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x",
3669 			      retcode);
3670 		goto init_error;
3671 	}
3672 	retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr);
3673 	if (retcode != 0) {
3674 		DPAA2_SEC_ERR(
3675 			     "Cannot get dpsec device attributed: Error = %x",
3676 			     retcode);
3677 		goto init_error;
3678 	}
3679 	snprintf(cryptodev->data->name, sizeof(cryptodev->data->name),
3680 			"dpsec-%u", hw_id);
3681 
3682 	internals->max_nb_queue_pairs = attr.num_tx_queues;
3683 	cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs;
3684 	internals->hw = dpseci;
3685 	internals->token = token;
3686 
3687 	snprintf(str, sizeof(str), "sec_fle_pool_p%d_%d",
3688 			getpid(), cryptodev->data->dev_id);
3689 	internals->fle_pool = rte_mempool_create((const char *)str,
3690 			FLE_POOL_NUM_BUFS,
3691 			FLE_POOL_BUF_SIZE,
3692 			FLE_POOL_CACHE_SIZE, 0,
3693 			NULL, NULL, NULL, NULL,
3694 			SOCKET_ID_ANY, 0);
3695 	if (!internals->fle_pool) {
3696 		DPAA2_SEC_ERR("Mempool (%s) creation failed", str);
3697 		goto init_error;
3698 	}
3699 
3700 	DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name);
3701 	return 0;
3702 
3703 init_error:
3704 	DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name);
3705 
3706 	/* dpaa2_sec_uninit(crypto_dev_name); */
3707 	return -EFAULT;
3708 }
3709 
3710 static int
3711 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
3712 			  struct rte_dpaa2_device *dpaa2_dev)
3713 {
3714 	struct rte_cryptodev *cryptodev;
3715 	char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3716 
3717 	int retval;
3718 
3719 	snprintf(cryptodev_name, sizeof(cryptodev_name), "dpsec-%d",
3720 			dpaa2_dev->object_id);
3721 
3722 	cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3723 	if (cryptodev == NULL)
3724 		return -ENOMEM;
3725 
3726 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3727 		cryptodev->data->dev_private = rte_zmalloc_socket(
3728 					"cryptodev private structure",
3729 					sizeof(struct dpaa2_sec_dev_private),
3730 					RTE_CACHE_LINE_SIZE,
3731 					rte_socket_id());
3732 
3733 		if (cryptodev->data->dev_private == NULL)
3734 			rte_panic("Cannot allocate memzone for private "
3735 				  "device data");
3736 	}
3737 
3738 	dpaa2_dev->cryptodev = cryptodev;
3739 	cryptodev->device = &dpaa2_dev->device;
3740 
3741 	/* init user callbacks */
3742 	TAILQ_INIT(&(cryptodev->link_intr_cbs));
3743 
3744 	if (dpaa2_svr_family == SVR_LX2160A)
3745 		rta_set_sec_era(RTA_SEC_ERA_10);
3746 
3747 	DPAA2_SEC_INFO("2-SEC ERA is %d", rta_get_sec_era());
3748 
3749 	/* Invoke PMD device initialization function */
3750 	retval = dpaa2_sec_dev_init(cryptodev);
3751 	if (retval == 0)
3752 		return 0;
3753 
3754 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3755 		rte_free(cryptodev->data->dev_private);
3756 
3757 	cryptodev->attached = RTE_CRYPTODEV_DETACHED;
3758 
3759 	return -ENXIO;
3760 }
3761 
3762 static int
3763 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
3764 {
3765 	struct rte_cryptodev *cryptodev;
3766 	int ret;
3767 
3768 	cryptodev = dpaa2_dev->cryptodev;
3769 	if (cryptodev == NULL)
3770 		return -ENODEV;
3771 
3772 	ret = dpaa2_sec_uninit(cryptodev);
3773 	if (ret)
3774 		return ret;
3775 
3776 	return rte_cryptodev_pmd_destroy(cryptodev);
3777 }
3778 
3779 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
3780 	.drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
3781 	.drv_type = DPAA2_CRYPTO,
3782 	.driver = {
3783 		.name = "DPAA2 SEC PMD"
3784 	},
3785 	.probe = cryptodev_dpaa2_sec_probe,
3786 	.remove = cryptodev_dpaa2_sec_remove,
3787 };
3788 
3789 static struct cryptodev_driver dpaa2_sec_crypto_drv;
3790 
3791 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
3792 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv,
3793 		rte_dpaa2_sec_driver.driver, cryptodev_driver_id);
3794 
3795 RTE_INIT(dpaa2_sec_init_log)
3796 {
3797 	/* Bus level logs */
3798 	dpaa2_logtype_sec = rte_log_register("pmd.crypto.dpaa2");
3799 	if (dpaa2_logtype_sec >= 0)
3800 		rte_log_set_level(dpaa2_logtype_sec, RTE_LOG_NOTICE);
3801 }
3802