xref: /dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c (revision bbbe38a6d59ccdda25917712701e629d0b10af6f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016-2021 NXP
5  *
6  */
7 
8 #include <time.h>
9 #include <net/if.h>
10 #include <unistd.h>
11 
12 #include <rte_ip.h>
13 #include <rte_mbuf.h>
14 #include <rte_cryptodev.h>
15 #include <rte_malloc.h>
16 #include <rte_memcpy.h>
17 #include <rte_string_fns.h>
18 #include <rte_cycles.h>
19 #include <rte_kvargs.h>
20 #include <rte_dev.h>
21 #include <rte_cryptodev_pmd.h>
22 #include <rte_common.h>
23 #include <rte_fslmc.h>
24 #include <fslmc_vfio.h>
25 #include <dpaa2_hw_pvt.h>
26 #include <dpaa2_hw_dpio.h>
27 #include <dpaa2_hw_mempool.h>
28 #include <fsl_dpopr.h>
29 #include <fsl_dpseci.h>
30 #include <fsl_mc_sys.h>
31 
32 #include "dpaa2_sec_priv.h"
33 #include "dpaa2_sec_event.h"
34 #include "dpaa2_sec_logs.h"
35 
36 /* RTA header files */
37 #include <desc/ipsec.h>
38 #include <desc/pdcp.h>
39 #include <desc/sdap.h>
40 #include <desc/algo.h>
41 
42 /* Minimum job descriptor consists of a oneword job descriptor HEADER and
43  * a pointer to the shared descriptor
44  */
45 #define MIN_JOB_DESC_SIZE	(CAAM_CMD_SZ + CAAM_PTR_SZ)
46 #define FSL_VENDOR_ID           0x1957
47 #define FSL_DEVICE_ID           0x410
48 #define FSL_SUBSYSTEM_SEC       1
49 #define FSL_MC_DPSECI_DEVID     3
50 
51 #define NO_PREFETCH 0
52 /* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
53 #define FLE_POOL_NUM_BUFS	32000
54 #define FLE_POOL_BUF_SIZE	256
55 #define FLE_POOL_CACHE_SIZE	512
56 #define FLE_SG_MEM_SIZE(num)	(FLE_POOL_BUF_SIZE + ((num) * 32))
57 #define SEC_FLC_DHR_OUTBOUND	-114
58 #define SEC_FLC_DHR_INBOUND	0
59 
60 static uint8_t cryptodev_driver_id;
61 
62 #ifdef RTE_LIB_SECURITY
63 static inline int
64 build_proto_compound_sg_fd(dpaa2_sec_session *sess,
65 			   struct rte_crypto_op *op,
66 			   struct qbman_fd *fd, uint16_t bpid)
67 {
68 	struct rte_crypto_sym_op *sym_op = op->sym;
69 	struct ctxt_priv *priv = sess->ctxt;
70 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
71 	struct sec_flow_context *flc;
72 	struct rte_mbuf *mbuf;
73 	uint32_t in_len = 0, out_len = 0;
74 
75 	if (sym_op->m_dst)
76 		mbuf = sym_op->m_dst;
77 	else
78 		mbuf = sym_op->m_src;
79 
80 	/* first FLE entry used to store mbuf and session ctxt */
81 	fle = (struct qbman_fle *)rte_malloc(NULL,
82 			FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
83 			RTE_CACHE_LINE_SIZE);
84 	if (unlikely(!fle)) {
85 		DPAA2_SEC_DP_ERR("Proto:SG: Memory alloc failed for SGE");
86 		return -ENOMEM;
87 	}
88 	memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
89 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
90 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
91 
92 	/* Save the shared descriptor */
93 	flc = &priv->flc_desc[0].flc;
94 
95 	op_fle = fle + 1;
96 	ip_fle = fle + 2;
97 	sge = fle + 3;
98 
99 	if (likely(bpid < MAX_BPID)) {
100 		DPAA2_SET_FD_BPID(fd, bpid);
101 		DPAA2_SET_FLE_BPID(op_fle, bpid);
102 		DPAA2_SET_FLE_BPID(ip_fle, bpid);
103 	} else {
104 		DPAA2_SET_FD_IVP(fd);
105 		DPAA2_SET_FLE_IVP(op_fle);
106 		DPAA2_SET_FLE_IVP(ip_fle);
107 	}
108 
109 	/* Configure FD as a FRAME LIST */
110 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
111 	DPAA2_SET_FD_COMPOUND_FMT(fd);
112 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
113 
114 	/* Configure Output FLE with Scatter/Gather Entry */
115 	DPAA2_SET_FLE_SG_EXT(op_fle);
116 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
117 
118 	/* Configure Output SGE for Encap/Decap */
119 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
120 	DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
121 	/* o/p segs */
122 	while (mbuf->next) {
123 		sge->length = mbuf->data_len;
124 		out_len += sge->length;
125 		sge++;
126 		mbuf = mbuf->next;
127 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
128 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
129 	}
130 	/* using buf_len for last buf - so that extra data can be added */
131 	sge->length = mbuf->buf_len - mbuf->data_off;
132 	out_len += sge->length;
133 
134 	DPAA2_SET_FLE_FIN(sge);
135 	op_fle->length = out_len;
136 
137 	sge++;
138 	mbuf = sym_op->m_src;
139 
140 	/* Configure Input FLE with Scatter/Gather Entry */
141 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
142 	DPAA2_SET_FLE_SG_EXT(ip_fle);
143 	DPAA2_SET_FLE_FIN(ip_fle);
144 
145 	/* Configure input SGE for Encap/Decap */
146 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
147 	DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
148 	sge->length = mbuf->data_len;
149 	in_len += sge->length;
150 
151 	mbuf = mbuf->next;
152 	/* i/p segs */
153 	while (mbuf) {
154 		sge++;
155 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
156 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
157 		sge->length = mbuf->data_len;
158 		in_len += sge->length;
159 		mbuf = mbuf->next;
160 	}
161 	ip_fle->length = in_len;
162 	DPAA2_SET_FLE_FIN(sge);
163 
164 	/* In case of PDCP, per packet HFN is stored in
165 	 * mbuf priv after sym_op.
166 	 */
167 	if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
168 		uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)op +
169 					sess->pdcp.hfn_ovd_offset);
170 		/*enable HFN override override */
171 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
172 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
173 		DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
174 	}
175 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
176 
177 	return 0;
178 }
179 
180 static inline int
181 build_proto_compound_fd(dpaa2_sec_session *sess,
182 	       struct rte_crypto_op *op,
183 	       struct qbman_fd *fd, uint16_t bpid)
184 {
185 	struct rte_crypto_sym_op *sym_op = op->sym;
186 	struct ctxt_priv *priv = sess->ctxt;
187 	struct qbman_fle *fle, *ip_fle, *op_fle;
188 	struct sec_flow_context *flc;
189 	struct rte_mbuf *src_mbuf = sym_op->m_src;
190 	struct rte_mbuf *dst_mbuf = sym_op->m_dst;
191 	int retval;
192 
193 	if (!dst_mbuf)
194 		dst_mbuf = src_mbuf;
195 
196 	/* Save the shared descriptor */
197 	flc = &priv->flc_desc[0].flc;
198 
199 	/* we are using the first FLE entry to store Mbuf */
200 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
201 	if (retval) {
202 		DPAA2_SEC_DP_ERR("Memory alloc failed");
203 		return -ENOMEM;
204 	}
205 	memset(fle, 0, FLE_POOL_BUF_SIZE);
206 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
207 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
208 
209 	op_fle = fle + 1;
210 	ip_fle = fle + 2;
211 
212 	if (likely(bpid < MAX_BPID)) {
213 		DPAA2_SET_FD_BPID(fd, bpid);
214 		DPAA2_SET_FLE_BPID(op_fle, bpid);
215 		DPAA2_SET_FLE_BPID(ip_fle, bpid);
216 	} else {
217 		DPAA2_SET_FD_IVP(fd);
218 		DPAA2_SET_FLE_IVP(op_fle);
219 		DPAA2_SET_FLE_IVP(ip_fle);
220 	}
221 
222 	/* Configure FD as a FRAME LIST */
223 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
224 	DPAA2_SET_FD_COMPOUND_FMT(fd);
225 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
226 
227 	/* Configure Output FLE with dst mbuf data  */
228 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_MBUF_VADDR_TO_IOVA(dst_mbuf));
229 	DPAA2_SET_FLE_OFFSET(op_fle, dst_mbuf->data_off);
230 	DPAA2_SET_FLE_LEN(op_fle, dst_mbuf->buf_len);
231 
232 	/* Configure Input FLE with src mbuf data */
233 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_MBUF_VADDR_TO_IOVA(src_mbuf));
234 	DPAA2_SET_FLE_OFFSET(ip_fle, src_mbuf->data_off);
235 	DPAA2_SET_FLE_LEN(ip_fle, src_mbuf->pkt_len);
236 
237 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
238 	DPAA2_SET_FLE_FIN(ip_fle);
239 
240 	/* In case of PDCP, per packet HFN is stored in
241 	 * mbuf priv after sym_op.
242 	 */
243 	if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
244 		uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)op +
245 					sess->pdcp.hfn_ovd_offset);
246 		/*enable HFN override override */
247 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
248 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
249 		DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
250 	}
251 
252 	return 0;
253 
254 }
255 
256 static inline int
257 build_proto_fd(dpaa2_sec_session *sess,
258 	       struct rte_crypto_op *op,
259 	       struct qbman_fd *fd, uint16_t bpid)
260 {
261 	struct rte_crypto_sym_op *sym_op = op->sym;
262 	if (sym_op->m_dst)
263 		return build_proto_compound_fd(sess, op, fd, bpid);
264 
265 	struct ctxt_priv *priv = sess->ctxt;
266 	struct sec_flow_context *flc;
267 	struct rte_mbuf *mbuf = sym_op->m_src;
268 
269 	if (likely(bpid < MAX_BPID))
270 		DPAA2_SET_FD_BPID(fd, bpid);
271 	else
272 		DPAA2_SET_FD_IVP(fd);
273 
274 	/* Save the shared descriptor */
275 	flc = &priv->flc_desc[0].flc;
276 
277 	DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
278 	DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off);
279 	DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len);
280 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
281 
282 	/* save physical address of mbuf */
283 	op->sym->aead.digest.phys_addr = mbuf->buf_iova;
284 	mbuf->buf_iova = (size_t)op;
285 
286 	return 0;
287 }
288 #endif
289 
290 static inline int
291 build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,
292 		 struct rte_crypto_op *op,
293 		 struct qbman_fd *fd, __rte_unused uint16_t bpid)
294 {
295 	struct rte_crypto_sym_op *sym_op = op->sym;
296 	struct ctxt_priv *priv = sess->ctxt;
297 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
298 	struct sec_flow_context *flc;
299 	uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
300 	int icv_len = sess->digest_length;
301 	uint8_t *old_icv;
302 	struct rte_mbuf *mbuf;
303 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
304 			sess->iv.offset);
305 
306 	if (sym_op->m_dst)
307 		mbuf = sym_op->m_dst;
308 	else
309 		mbuf = sym_op->m_src;
310 
311 	/* first FLE entry used to store mbuf and session ctxt */
312 	fle = (struct qbman_fle *)rte_malloc(NULL,
313 			FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
314 			RTE_CACHE_LINE_SIZE);
315 	if (unlikely(!fle)) {
316 		DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
317 		return -ENOMEM;
318 	}
319 	memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
320 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
321 	DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv);
322 
323 	op_fle = fle + 1;
324 	ip_fle = fle + 2;
325 	sge = fle + 3;
326 
327 	/* Save the shared descriptor */
328 	flc = &priv->flc_desc[0].flc;
329 
330 	/* Configure FD as a FRAME LIST */
331 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
332 	DPAA2_SET_FD_COMPOUND_FMT(fd);
333 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
334 
335 	DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n"
336 		   "iv-len=%d data_off: 0x%x\n",
337 		   sym_op->aead.data.offset,
338 		   sym_op->aead.data.length,
339 		   sess->digest_length,
340 		   sess->iv.length,
341 		   sym_op->m_src->data_off);
342 
343 	/* Configure Output FLE with Scatter/Gather Entry */
344 	DPAA2_SET_FLE_SG_EXT(op_fle);
345 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
346 
347 	if (auth_only_len)
348 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
349 
350 	op_fle->length = (sess->dir == DIR_ENC) ?
351 			(sym_op->aead.data.length + icv_len) :
352 			sym_op->aead.data.length;
353 
354 	/* Configure Output SGE for Encap/Decap */
355 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
356 	DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->aead.data.offset);
357 	sge->length = mbuf->data_len - sym_op->aead.data.offset;
358 
359 	mbuf = mbuf->next;
360 	/* o/p segs */
361 	while (mbuf) {
362 		sge++;
363 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
364 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
365 		sge->length = mbuf->data_len;
366 		mbuf = mbuf->next;
367 	}
368 	sge->length -= icv_len;
369 
370 	if (sess->dir == DIR_ENC) {
371 		sge++;
372 		DPAA2_SET_FLE_ADDR(sge,
373 				DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
374 		sge->length = icv_len;
375 	}
376 	DPAA2_SET_FLE_FIN(sge);
377 
378 	sge++;
379 	mbuf = sym_op->m_src;
380 
381 	/* Configure Input FLE with Scatter/Gather Entry */
382 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
383 	DPAA2_SET_FLE_SG_EXT(ip_fle);
384 	DPAA2_SET_FLE_FIN(ip_fle);
385 	ip_fle->length = (sess->dir == DIR_ENC) ?
386 		(sym_op->aead.data.length + sess->iv.length + auth_only_len) :
387 		(sym_op->aead.data.length + sess->iv.length + auth_only_len +
388 		 icv_len);
389 
390 	/* Configure Input SGE for Encap/Decap */
391 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
392 	sge->length = sess->iv.length;
393 
394 	sge++;
395 	if (auth_only_len) {
396 		DPAA2_SET_FLE_ADDR(sge,
397 				DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
398 		sge->length = auth_only_len;
399 		sge++;
400 	}
401 
402 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
403 	DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
404 				mbuf->data_off);
405 	sge->length = mbuf->data_len - sym_op->aead.data.offset;
406 
407 	mbuf = mbuf->next;
408 	/* i/p segs */
409 	while (mbuf) {
410 		sge++;
411 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
412 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
413 		sge->length = mbuf->data_len;
414 		mbuf = mbuf->next;
415 	}
416 
417 	if (sess->dir == DIR_DEC) {
418 		sge++;
419 		old_icv = (uint8_t *)(sge + 1);
420 		memcpy(old_icv,	sym_op->aead.digest.data, icv_len);
421 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
422 		sge->length = icv_len;
423 	}
424 
425 	DPAA2_SET_FLE_FIN(sge);
426 	if (auth_only_len) {
427 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
428 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
429 	}
430 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
431 
432 	return 0;
433 }
434 
435 static inline int
436 build_authenc_gcm_fd(dpaa2_sec_session *sess,
437 		     struct rte_crypto_op *op,
438 		     struct qbman_fd *fd, uint16_t bpid)
439 {
440 	struct rte_crypto_sym_op *sym_op = op->sym;
441 	struct ctxt_priv *priv = sess->ctxt;
442 	struct qbman_fle *fle, *sge;
443 	struct sec_flow_context *flc;
444 	uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
445 	int icv_len = sess->digest_length, retval;
446 	uint8_t *old_icv;
447 	struct rte_mbuf *dst;
448 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
449 			sess->iv.offset);
450 
451 	if (sym_op->m_dst)
452 		dst = sym_op->m_dst;
453 	else
454 		dst = sym_op->m_src;
455 
456 	/* TODO we are using the first FLE entry to store Mbuf and session ctxt.
457 	 * Currently we donot know which FLE has the mbuf stored.
458 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
459 	 * to get the MBUF Addr from the previous FLE.
460 	 * We can have a better approach to use the inline Mbuf
461 	 */
462 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
463 	if (retval) {
464 		DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE");
465 		return -ENOMEM;
466 	}
467 	memset(fle, 0, FLE_POOL_BUF_SIZE);
468 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
469 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
470 	fle = fle + 1;
471 	sge = fle + 2;
472 	if (likely(bpid < MAX_BPID)) {
473 		DPAA2_SET_FD_BPID(fd, bpid);
474 		DPAA2_SET_FLE_BPID(fle, bpid);
475 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
476 		DPAA2_SET_FLE_BPID(sge, bpid);
477 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
478 		DPAA2_SET_FLE_BPID(sge + 2, bpid);
479 		DPAA2_SET_FLE_BPID(sge + 3, bpid);
480 	} else {
481 		DPAA2_SET_FD_IVP(fd);
482 		DPAA2_SET_FLE_IVP(fle);
483 		DPAA2_SET_FLE_IVP((fle + 1));
484 		DPAA2_SET_FLE_IVP(sge);
485 		DPAA2_SET_FLE_IVP((sge + 1));
486 		DPAA2_SET_FLE_IVP((sge + 2));
487 		DPAA2_SET_FLE_IVP((sge + 3));
488 	}
489 
490 	/* Save the shared descriptor */
491 	flc = &priv->flc_desc[0].flc;
492 	/* Configure FD as a FRAME LIST */
493 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
494 	DPAA2_SET_FD_COMPOUND_FMT(fd);
495 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
496 
497 	DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n"
498 		   "iv-len=%d data_off: 0x%x\n",
499 		   sym_op->aead.data.offset,
500 		   sym_op->aead.data.length,
501 		   sess->digest_length,
502 		   sess->iv.length,
503 		   sym_op->m_src->data_off);
504 
505 	/* Configure Output FLE with Scatter/Gather Entry */
506 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
507 	if (auth_only_len)
508 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
509 	fle->length = (sess->dir == DIR_ENC) ?
510 			(sym_op->aead.data.length + icv_len) :
511 			sym_op->aead.data.length;
512 
513 	DPAA2_SET_FLE_SG_EXT(fle);
514 
515 	/* Configure Output SGE for Encap/Decap */
516 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
517 	DPAA2_SET_FLE_OFFSET(sge, dst->data_off + sym_op->aead.data.offset);
518 	sge->length = sym_op->aead.data.length;
519 
520 	if (sess->dir == DIR_ENC) {
521 		sge++;
522 		DPAA2_SET_FLE_ADDR(sge,
523 				DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
524 		sge->length = sess->digest_length;
525 	}
526 	DPAA2_SET_FLE_FIN(sge);
527 
528 	sge++;
529 	fle++;
530 
531 	/* Configure Input FLE with Scatter/Gather Entry */
532 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
533 	DPAA2_SET_FLE_SG_EXT(fle);
534 	DPAA2_SET_FLE_FIN(fle);
535 	fle->length = (sess->dir == DIR_ENC) ?
536 		(sym_op->aead.data.length + sess->iv.length + auth_only_len) :
537 		(sym_op->aead.data.length + sess->iv.length + auth_only_len +
538 		 sess->digest_length);
539 
540 	/* Configure Input SGE for Encap/Decap */
541 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
542 	sge->length = sess->iv.length;
543 	sge++;
544 	if (auth_only_len) {
545 		DPAA2_SET_FLE_ADDR(sge,
546 				DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
547 		sge->length = auth_only_len;
548 		DPAA2_SET_FLE_BPID(sge, bpid);
549 		sge++;
550 	}
551 
552 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
553 	DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
554 				sym_op->m_src->data_off);
555 	sge->length = sym_op->aead.data.length;
556 	if (sess->dir == DIR_DEC) {
557 		sge++;
558 		old_icv = (uint8_t *)(sge + 1);
559 		memcpy(old_icv,	sym_op->aead.digest.data,
560 		       sess->digest_length);
561 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
562 		sge->length = sess->digest_length;
563 	}
564 	DPAA2_SET_FLE_FIN(sge);
565 
566 	if (auth_only_len) {
567 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
568 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
569 	}
570 
571 	DPAA2_SET_FD_LEN(fd, fle->length);
572 	return 0;
573 }
574 
575 static inline int
576 build_authenc_sg_fd(dpaa2_sec_session *sess,
577 		 struct rte_crypto_op *op,
578 		 struct qbman_fd *fd, __rte_unused uint16_t bpid)
579 {
580 	struct rte_crypto_sym_op *sym_op = op->sym;
581 	struct ctxt_priv *priv = sess->ctxt;
582 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
583 	struct sec_flow_context *flc;
584 	uint16_t auth_hdr_len = sym_op->cipher.data.offset -
585 				sym_op->auth.data.offset;
586 	uint16_t auth_tail_len = sym_op->auth.data.length -
587 				sym_op->cipher.data.length - auth_hdr_len;
588 	uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
589 	int icv_len = sess->digest_length;
590 	uint8_t *old_icv;
591 	struct rte_mbuf *mbuf;
592 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
593 			sess->iv.offset);
594 
595 	if (sym_op->m_dst)
596 		mbuf = sym_op->m_dst;
597 	else
598 		mbuf = sym_op->m_src;
599 
600 	/* first FLE entry used to store mbuf and session ctxt */
601 	fle = (struct qbman_fle *)rte_malloc(NULL,
602 			FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
603 			RTE_CACHE_LINE_SIZE);
604 	if (unlikely(!fle)) {
605 		DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
606 		return -ENOMEM;
607 	}
608 	memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
609 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
610 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
611 
612 	op_fle = fle + 1;
613 	ip_fle = fle + 2;
614 	sge = fle + 3;
615 
616 	/* Save the shared descriptor */
617 	flc = &priv->flc_desc[0].flc;
618 
619 	/* Configure FD as a FRAME LIST */
620 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
621 	DPAA2_SET_FD_COMPOUND_FMT(fd);
622 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
623 
624 	DPAA2_SEC_DP_DEBUG(
625 		"AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n"
626 		"cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
627 		sym_op->auth.data.offset,
628 		sym_op->auth.data.length,
629 		sess->digest_length,
630 		sym_op->cipher.data.offset,
631 		sym_op->cipher.data.length,
632 		sess->iv.length,
633 		sym_op->m_src->data_off);
634 
635 	/* Configure Output FLE with Scatter/Gather Entry */
636 	DPAA2_SET_FLE_SG_EXT(op_fle);
637 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
638 
639 	if (auth_only_len)
640 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
641 
642 	op_fle->length = (sess->dir == DIR_ENC) ?
643 			(sym_op->cipher.data.length + icv_len) :
644 			sym_op->cipher.data.length;
645 
646 	/* Configure Output SGE for Encap/Decap */
647 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
648 	DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->auth.data.offset);
649 	sge->length = mbuf->data_len - sym_op->auth.data.offset;
650 
651 	mbuf = mbuf->next;
652 	/* o/p segs */
653 	while (mbuf) {
654 		sge++;
655 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
656 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
657 		sge->length = mbuf->data_len;
658 		mbuf = mbuf->next;
659 	}
660 	sge->length -= icv_len;
661 
662 	if (sess->dir == DIR_ENC) {
663 		sge++;
664 		DPAA2_SET_FLE_ADDR(sge,
665 				DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
666 		sge->length = icv_len;
667 	}
668 	DPAA2_SET_FLE_FIN(sge);
669 
670 	sge++;
671 	mbuf = sym_op->m_src;
672 
673 	/* Configure Input FLE with Scatter/Gather Entry */
674 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
675 	DPAA2_SET_FLE_SG_EXT(ip_fle);
676 	DPAA2_SET_FLE_FIN(ip_fle);
677 	ip_fle->length = (sess->dir == DIR_ENC) ?
678 			(sym_op->auth.data.length + sess->iv.length) :
679 			(sym_op->auth.data.length + sess->iv.length +
680 			 icv_len);
681 
682 	/* Configure Input SGE for Encap/Decap */
683 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
684 	sge->length = sess->iv.length;
685 
686 	sge++;
687 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
688 	DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
689 				mbuf->data_off);
690 	sge->length = mbuf->data_len - sym_op->auth.data.offset;
691 
692 	mbuf = mbuf->next;
693 	/* i/p segs */
694 	while (mbuf) {
695 		sge++;
696 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
697 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
698 		sge->length = mbuf->data_len;
699 		mbuf = mbuf->next;
700 	}
701 	sge->length -= icv_len;
702 
703 	if (sess->dir == DIR_DEC) {
704 		sge++;
705 		old_icv = (uint8_t *)(sge + 1);
706 		memcpy(old_icv,	sym_op->auth.digest.data,
707 		       icv_len);
708 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
709 		sge->length = icv_len;
710 	}
711 
712 	DPAA2_SET_FLE_FIN(sge);
713 	if (auth_only_len) {
714 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
715 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
716 	}
717 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
718 
719 	return 0;
720 }
721 
722 static inline int
723 build_authenc_fd(dpaa2_sec_session *sess,
724 		 struct rte_crypto_op *op,
725 		 struct qbman_fd *fd, uint16_t bpid)
726 {
727 	struct rte_crypto_sym_op *sym_op = op->sym;
728 	struct ctxt_priv *priv = sess->ctxt;
729 	struct qbman_fle *fle, *sge;
730 	struct sec_flow_context *flc;
731 	uint16_t auth_hdr_len = sym_op->cipher.data.offset -
732 				sym_op->auth.data.offset;
733 	uint16_t auth_tail_len = sym_op->auth.data.length -
734 				sym_op->cipher.data.length - auth_hdr_len;
735 	uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
736 
737 	int icv_len = sess->digest_length, retval;
738 	uint8_t *old_icv;
739 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
740 			sess->iv.offset);
741 	struct rte_mbuf *dst;
742 
743 	if (sym_op->m_dst)
744 		dst = sym_op->m_dst;
745 	else
746 		dst = sym_op->m_src;
747 
748 	/* we are using the first FLE entry to store Mbuf.
749 	 * Currently we donot know which FLE has the mbuf stored.
750 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
751 	 * to get the MBUF Addr from the previous FLE.
752 	 * We can have a better approach to use the inline Mbuf
753 	 */
754 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
755 	if (retval) {
756 		DPAA2_SEC_ERR("Memory alloc failed for SGE");
757 		return -ENOMEM;
758 	}
759 	memset(fle, 0, FLE_POOL_BUF_SIZE);
760 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
761 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
762 	fle = fle + 1;
763 	sge = fle + 2;
764 	if (likely(bpid < MAX_BPID)) {
765 		DPAA2_SET_FD_BPID(fd, bpid);
766 		DPAA2_SET_FLE_BPID(fle, bpid);
767 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
768 		DPAA2_SET_FLE_BPID(sge, bpid);
769 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
770 		DPAA2_SET_FLE_BPID(sge + 2, bpid);
771 		DPAA2_SET_FLE_BPID(sge + 3, bpid);
772 	} else {
773 		DPAA2_SET_FD_IVP(fd);
774 		DPAA2_SET_FLE_IVP(fle);
775 		DPAA2_SET_FLE_IVP((fle + 1));
776 		DPAA2_SET_FLE_IVP(sge);
777 		DPAA2_SET_FLE_IVP((sge + 1));
778 		DPAA2_SET_FLE_IVP((sge + 2));
779 		DPAA2_SET_FLE_IVP((sge + 3));
780 	}
781 
782 	/* Save the shared descriptor */
783 	flc = &priv->flc_desc[0].flc;
784 	/* Configure FD as a FRAME LIST */
785 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
786 	DPAA2_SET_FD_COMPOUND_FMT(fd);
787 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
788 
789 	DPAA2_SEC_DP_DEBUG(
790 		"AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n"
791 		"cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
792 		sym_op->auth.data.offset,
793 		sym_op->auth.data.length,
794 		sess->digest_length,
795 		sym_op->cipher.data.offset,
796 		sym_op->cipher.data.length,
797 		sess->iv.length,
798 		sym_op->m_src->data_off);
799 
800 	/* Configure Output FLE with Scatter/Gather Entry */
801 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
802 	if (auth_only_len)
803 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
804 	fle->length = (sess->dir == DIR_ENC) ?
805 			(sym_op->cipher.data.length + icv_len) :
806 			sym_op->cipher.data.length;
807 
808 	DPAA2_SET_FLE_SG_EXT(fle);
809 
810 	/* Configure Output SGE for Encap/Decap */
811 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
812 	DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
813 				dst->data_off);
814 	sge->length = sym_op->cipher.data.length;
815 
816 	if (sess->dir == DIR_ENC) {
817 		sge++;
818 		DPAA2_SET_FLE_ADDR(sge,
819 				DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
820 		sge->length = sess->digest_length;
821 		DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
822 					sess->iv.length));
823 	}
824 	DPAA2_SET_FLE_FIN(sge);
825 
826 	sge++;
827 	fle++;
828 
829 	/* Configure Input FLE with Scatter/Gather Entry */
830 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
831 	DPAA2_SET_FLE_SG_EXT(fle);
832 	DPAA2_SET_FLE_FIN(fle);
833 	fle->length = (sess->dir == DIR_ENC) ?
834 			(sym_op->auth.data.length + sess->iv.length) :
835 			(sym_op->auth.data.length + sess->iv.length +
836 			 sess->digest_length);
837 
838 	/* Configure Input SGE for Encap/Decap */
839 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
840 	sge->length = sess->iv.length;
841 	sge++;
842 
843 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
844 	DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
845 				sym_op->m_src->data_off);
846 	sge->length = sym_op->auth.data.length;
847 	if (sess->dir == DIR_DEC) {
848 		sge++;
849 		old_icv = (uint8_t *)(sge + 1);
850 		memcpy(old_icv,	sym_op->auth.digest.data,
851 		       sess->digest_length);
852 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
853 		sge->length = sess->digest_length;
854 		DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
855 				 sess->digest_length +
856 				 sess->iv.length));
857 	}
858 	DPAA2_SET_FLE_FIN(sge);
859 	if (auth_only_len) {
860 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
861 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
862 	}
863 	return 0;
864 }
865 
866 static inline int build_auth_sg_fd(
867 		dpaa2_sec_session *sess,
868 		struct rte_crypto_op *op,
869 		struct qbman_fd *fd,
870 		__rte_unused uint16_t bpid)
871 {
872 	struct rte_crypto_sym_op *sym_op = op->sym;
873 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
874 	struct sec_flow_context *flc;
875 	struct ctxt_priv *priv = sess->ctxt;
876 	int data_len, data_offset;
877 	uint8_t *old_digest;
878 	struct rte_mbuf *mbuf;
879 
880 	data_len = sym_op->auth.data.length;
881 	data_offset = sym_op->auth.data.offset;
882 
883 	if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
884 	    sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
885 		if ((data_len & 7) || (data_offset & 7)) {
886 			DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
887 			return -ENOTSUP;
888 		}
889 
890 		data_len = data_len >> 3;
891 		data_offset = data_offset >> 3;
892 	}
893 
894 	mbuf = sym_op->m_src;
895 	fle = (struct qbman_fle *)rte_malloc(NULL,
896 			FLE_SG_MEM_SIZE(mbuf->nb_segs),
897 			RTE_CACHE_LINE_SIZE);
898 	if (unlikely(!fle)) {
899 		DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
900 		return -ENOMEM;
901 	}
902 	memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs));
903 	/* first FLE entry used to store mbuf and session ctxt */
904 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
905 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
906 	op_fle = fle + 1;
907 	ip_fle = fle + 2;
908 	sge = fle + 3;
909 
910 	flc = &priv->flc_desc[DESC_INITFINAL].flc;
911 	/* sg FD */
912 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
913 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
914 	DPAA2_SET_FD_COMPOUND_FMT(fd);
915 
916 	/* o/p fle */
917 	DPAA2_SET_FLE_ADDR(op_fle,
918 				DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
919 	op_fle->length = sess->digest_length;
920 
921 	/* i/p fle */
922 	DPAA2_SET_FLE_SG_EXT(ip_fle);
923 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
924 	ip_fle->length = data_len;
925 
926 	if (sess->iv.length) {
927 		uint8_t *iv_ptr;
928 
929 		iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
930 						   sess->iv.offset);
931 
932 		if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
933 			iv_ptr = conv_to_snow_f9_iv(iv_ptr);
934 			sge->length = 12;
935 		} else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
936 			iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
937 			sge->length = 8;
938 		} else {
939 			sge->length = sess->iv.length;
940 		}
941 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
942 		ip_fle->length += sge->length;
943 		sge++;
944 	}
945 	/* i/p 1st seg */
946 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
947 	DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
948 
949 	if (data_len <= (mbuf->data_len - data_offset)) {
950 		sge->length = data_len;
951 		data_len = 0;
952 	} else {
953 		sge->length = mbuf->data_len - data_offset;
954 
955 		/* remaining i/p segs */
956 		while ((data_len = data_len - sge->length) &&
957 		       (mbuf = mbuf->next)) {
958 			sge++;
959 			DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
960 			DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
961 			if (data_len > mbuf->data_len)
962 				sge->length = mbuf->data_len;
963 			else
964 				sge->length = data_len;
965 		}
966 	}
967 
968 	if (sess->dir == DIR_DEC) {
969 		/* Digest verification case */
970 		sge++;
971 		old_digest = (uint8_t *)(sge + 1);
972 		rte_memcpy(old_digest, sym_op->auth.digest.data,
973 			   sess->digest_length);
974 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
975 		sge->length = sess->digest_length;
976 		ip_fle->length += sess->digest_length;
977 	}
978 	DPAA2_SET_FLE_FIN(sge);
979 	DPAA2_SET_FLE_FIN(ip_fle);
980 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
981 
982 	return 0;
983 }
984 
985 static inline int
986 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
987 	      struct qbman_fd *fd, uint16_t bpid)
988 {
989 	struct rte_crypto_sym_op *sym_op = op->sym;
990 	struct qbman_fle *fle, *sge;
991 	struct sec_flow_context *flc;
992 	struct ctxt_priv *priv = sess->ctxt;
993 	int data_len, data_offset;
994 	uint8_t *old_digest;
995 	int retval;
996 
997 	data_len = sym_op->auth.data.length;
998 	data_offset = sym_op->auth.data.offset;
999 
1000 	if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
1001 	    sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1002 		if ((data_len & 7) || (data_offset & 7)) {
1003 			DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
1004 			return -ENOTSUP;
1005 		}
1006 
1007 		data_len = data_len >> 3;
1008 		data_offset = data_offset >> 3;
1009 	}
1010 
1011 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
1012 	if (retval) {
1013 		DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE");
1014 		return -ENOMEM;
1015 	}
1016 	memset(fle, 0, FLE_POOL_BUF_SIZE);
1017 	/* TODO we are using the first FLE entry to store Mbuf.
1018 	 * Currently we donot know which FLE has the mbuf stored.
1019 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
1020 	 * to get the MBUF Addr from the previous FLE.
1021 	 * We can have a better approach to use the inline Mbuf
1022 	 */
1023 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1024 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1025 	fle = fle + 1;
1026 	sge = fle + 2;
1027 
1028 	if (likely(bpid < MAX_BPID)) {
1029 		DPAA2_SET_FD_BPID(fd, bpid);
1030 		DPAA2_SET_FLE_BPID(fle, bpid);
1031 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
1032 		DPAA2_SET_FLE_BPID(sge, bpid);
1033 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
1034 	} else {
1035 		DPAA2_SET_FD_IVP(fd);
1036 		DPAA2_SET_FLE_IVP(fle);
1037 		DPAA2_SET_FLE_IVP((fle + 1));
1038 		DPAA2_SET_FLE_IVP(sge);
1039 		DPAA2_SET_FLE_IVP((sge + 1));
1040 	}
1041 
1042 	flc = &priv->flc_desc[DESC_INITFINAL].flc;
1043 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1044 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1045 	DPAA2_SET_FD_COMPOUND_FMT(fd);
1046 
1047 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
1048 	fle->length = sess->digest_length;
1049 	fle++;
1050 
1051 	/* Setting input FLE */
1052 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1053 	DPAA2_SET_FLE_SG_EXT(fle);
1054 	fle->length = data_len;
1055 
1056 	if (sess->iv.length) {
1057 		uint8_t *iv_ptr;
1058 
1059 		iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1060 						   sess->iv.offset);
1061 
1062 		if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
1063 			iv_ptr = conv_to_snow_f9_iv(iv_ptr);
1064 			sge->length = 12;
1065 		} else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1066 			iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
1067 			sge->length = 8;
1068 		} else {
1069 			sge->length = sess->iv.length;
1070 		}
1071 
1072 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1073 		fle->length = fle->length + sge->length;
1074 		sge++;
1075 	}
1076 
1077 	/* Setting data to authenticate */
1078 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
1079 	DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off);
1080 	sge->length = data_len;
1081 
1082 	if (sess->dir == DIR_DEC) {
1083 		sge++;
1084 		old_digest = (uint8_t *)(sge + 1);
1085 		rte_memcpy(old_digest, sym_op->auth.digest.data,
1086 			   sess->digest_length);
1087 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
1088 		sge->length = sess->digest_length;
1089 		fle->length = fle->length + sess->digest_length;
1090 	}
1091 
1092 	DPAA2_SET_FLE_FIN(sge);
1093 	DPAA2_SET_FLE_FIN(fle);
1094 	DPAA2_SET_FD_LEN(fd, fle->length);
1095 
1096 	return 0;
1097 }
1098 
1099 static int
1100 build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
1101 		struct qbman_fd *fd, __rte_unused uint16_t bpid)
1102 {
1103 	struct rte_crypto_sym_op *sym_op = op->sym;
1104 	struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
1105 	int data_len, data_offset;
1106 	struct sec_flow_context *flc;
1107 	struct ctxt_priv *priv = sess->ctxt;
1108 	struct rte_mbuf *mbuf;
1109 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1110 			sess->iv.offset);
1111 
1112 	data_len = sym_op->cipher.data.length;
1113 	data_offset = sym_op->cipher.data.offset;
1114 
1115 	if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1116 		sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1117 		if ((data_len & 7) || (data_offset & 7)) {
1118 			DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
1119 			return -ENOTSUP;
1120 		}
1121 
1122 		data_len = data_len >> 3;
1123 		data_offset = data_offset >> 3;
1124 	}
1125 
1126 	if (sym_op->m_dst)
1127 		mbuf = sym_op->m_dst;
1128 	else
1129 		mbuf = sym_op->m_src;
1130 
1131 	/* first FLE entry used to store mbuf and session ctxt */
1132 	fle = (struct qbman_fle *)rte_malloc(NULL,
1133 			FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
1134 			RTE_CACHE_LINE_SIZE);
1135 	if (!fle) {
1136 		DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE");
1137 		return -ENOMEM;
1138 	}
1139 	memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
1140 	/* first FLE entry used to store mbuf and session ctxt */
1141 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1142 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1143 
1144 	op_fle = fle + 1;
1145 	ip_fle = fle + 2;
1146 	sge = fle + 3;
1147 
1148 	flc = &priv->flc_desc[0].flc;
1149 
1150 	DPAA2_SEC_DP_DEBUG(
1151 		"CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d"
1152 		" data_off: 0x%x\n",
1153 		data_offset,
1154 		data_len,
1155 		sess->iv.length,
1156 		sym_op->m_src->data_off);
1157 
1158 	/* o/p fle */
1159 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
1160 	op_fle->length = data_len;
1161 	DPAA2_SET_FLE_SG_EXT(op_fle);
1162 
1163 	/* o/p 1st seg */
1164 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1165 	DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
1166 	sge->length = mbuf->data_len - data_offset;
1167 
1168 	mbuf = mbuf->next;
1169 	/* o/p segs */
1170 	while (mbuf) {
1171 		sge++;
1172 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1173 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
1174 		sge->length = mbuf->data_len;
1175 		mbuf = mbuf->next;
1176 	}
1177 	DPAA2_SET_FLE_FIN(sge);
1178 
1179 	DPAA2_SEC_DP_DEBUG(
1180 		"CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
1181 		flc, fle, fle->addr_hi, fle->addr_lo,
1182 		fle->length);
1183 
1184 	/* i/p fle */
1185 	mbuf = sym_op->m_src;
1186 	sge++;
1187 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
1188 	ip_fle->length = sess->iv.length + data_len;
1189 	DPAA2_SET_FLE_SG_EXT(ip_fle);
1190 
1191 	/* i/p IV */
1192 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1193 	DPAA2_SET_FLE_OFFSET(sge, 0);
1194 	sge->length = sess->iv.length;
1195 
1196 	sge++;
1197 
1198 	/* i/p 1st seg */
1199 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1200 	DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
1201 	sge->length = mbuf->data_len - data_offset;
1202 
1203 	mbuf = mbuf->next;
1204 	/* i/p segs */
1205 	while (mbuf) {
1206 		sge++;
1207 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1208 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
1209 		sge->length = mbuf->data_len;
1210 		mbuf = mbuf->next;
1211 	}
1212 	DPAA2_SET_FLE_FIN(sge);
1213 	DPAA2_SET_FLE_FIN(ip_fle);
1214 
1215 	/* sg fd */
1216 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
1217 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
1218 	DPAA2_SET_FD_COMPOUND_FMT(fd);
1219 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1220 
1221 	DPAA2_SEC_DP_DEBUG(
1222 		"CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1223 		" off =%d, len =%d\n",
1224 		DPAA2_GET_FD_ADDR(fd),
1225 		DPAA2_GET_FD_BPID(fd),
1226 		rte_dpaa2_bpid_info[bpid].meta_data_size,
1227 		DPAA2_GET_FD_OFFSET(fd),
1228 		DPAA2_GET_FD_LEN(fd));
1229 	return 0;
1230 }
1231 
1232 static int
1233 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
1234 		struct qbman_fd *fd, uint16_t bpid)
1235 {
1236 	struct rte_crypto_sym_op *sym_op = op->sym;
1237 	struct qbman_fle *fle, *sge;
1238 	int retval, data_len, data_offset;
1239 	struct sec_flow_context *flc;
1240 	struct ctxt_priv *priv = sess->ctxt;
1241 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1242 			sess->iv.offset);
1243 	struct rte_mbuf *dst;
1244 
1245 	data_len = sym_op->cipher.data.length;
1246 	data_offset = sym_op->cipher.data.offset;
1247 
1248 	if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1249 		sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1250 		if ((data_len & 7) || (data_offset & 7)) {
1251 			DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
1252 			return -ENOTSUP;
1253 		}
1254 
1255 		data_len = data_len >> 3;
1256 		data_offset = data_offset >> 3;
1257 	}
1258 
1259 	if (sym_op->m_dst)
1260 		dst = sym_op->m_dst;
1261 	else
1262 		dst = sym_op->m_src;
1263 
1264 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
1265 	if (retval) {
1266 		DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE");
1267 		return -ENOMEM;
1268 	}
1269 	memset(fle, 0, FLE_POOL_BUF_SIZE);
1270 	/* TODO we are using the first FLE entry to store Mbuf.
1271 	 * Currently we donot know which FLE has the mbuf stored.
1272 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
1273 	 * to get the MBUF Addr from the previous FLE.
1274 	 * We can have a better approach to use the inline Mbuf
1275 	 */
1276 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1277 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1278 	fle = fle + 1;
1279 	sge = fle + 2;
1280 
1281 	if (likely(bpid < MAX_BPID)) {
1282 		DPAA2_SET_FD_BPID(fd, bpid);
1283 		DPAA2_SET_FLE_BPID(fle, bpid);
1284 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
1285 		DPAA2_SET_FLE_BPID(sge, bpid);
1286 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
1287 	} else {
1288 		DPAA2_SET_FD_IVP(fd);
1289 		DPAA2_SET_FLE_IVP(fle);
1290 		DPAA2_SET_FLE_IVP((fle + 1));
1291 		DPAA2_SET_FLE_IVP(sge);
1292 		DPAA2_SET_FLE_IVP((sge + 1));
1293 	}
1294 
1295 	flc = &priv->flc_desc[0].flc;
1296 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1297 	DPAA2_SET_FD_LEN(fd, data_len + sess->iv.length);
1298 	DPAA2_SET_FD_COMPOUND_FMT(fd);
1299 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1300 
1301 	DPAA2_SEC_DP_DEBUG(
1302 		"CIPHER: cipher_off: 0x%x/length %d, ivlen=%d,"
1303 		" data_off: 0x%x\n",
1304 		data_offset,
1305 		data_len,
1306 		sess->iv.length,
1307 		sym_op->m_src->data_off);
1308 
1309 	DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst));
1310 	DPAA2_SET_FLE_OFFSET(fle, data_offset + dst->data_off);
1311 
1312 	fle->length = data_len + sess->iv.length;
1313 
1314 	DPAA2_SEC_DP_DEBUG(
1315 		"CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n",
1316 		flc, fle, fle->addr_hi, fle->addr_lo,
1317 		fle->length);
1318 
1319 	fle++;
1320 
1321 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1322 	fle->length = data_len + sess->iv.length;
1323 
1324 	DPAA2_SET_FLE_SG_EXT(fle);
1325 
1326 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1327 	sge->length = sess->iv.length;
1328 
1329 	sge++;
1330 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
1331 	DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off);
1332 
1333 	sge->length = data_len;
1334 	DPAA2_SET_FLE_FIN(sge);
1335 	DPAA2_SET_FLE_FIN(fle);
1336 
1337 	DPAA2_SEC_DP_DEBUG(
1338 		"CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1339 		" off =%d, len =%d\n",
1340 		DPAA2_GET_FD_ADDR(fd),
1341 		DPAA2_GET_FD_BPID(fd),
1342 		rte_dpaa2_bpid_info[bpid].meta_data_size,
1343 		DPAA2_GET_FD_OFFSET(fd),
1344 		DPAA2_GET_FD_LEN(fd));
1345 
1346 	return 0;
1347 }
1348 
1349 static inline int
1350 build_sec_fd(struct rte_crypto_op *op,
1351 	     struct qbman_fd *fd, uint16_t bpid)
1352 {
1353 	int ret = -1;
1354 	dpaa2_sec_session *sess;
1355 
1356 	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
1357 		sess = (dpaa2_sec_session *)get_sym_session_private_data(
1358 				op->sym->session, cryptodev_driver_id);
1359 #ifdef RTE_LIB_SECURITY
1360 	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1361 		sess = (dpaa2_sec_session *)get_sec_session_private_data(
1362 				op->sym->sec_session);
1363 #endif
1364 	else
1365 		return -ENOTSUP;
1366 
1367 	if (!sess)
1368 		return -EINVAL;
1369 
1370 	/* Any of the buffer is segmented*/
1371 	if (!rte_pktmbuf_is_contiguous(op->sym->m_src) ||
1372 		  ((op->sym->m_dst != NULL) &&
1373 		   !rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1374 		switch (sess->ctxt_type) {
1375 		case DPAA2_SEC_CIPHER:
1376 			ret = build_cipher_sg_fd(sess, op, fd, bpid);
1377 			break;
1378 		case DPAA2_SEC_AUTH:
1379 			ret = build_auth_sg_fd(sess, op, fd, bpid);
1380 			break;
1381 		case DPAA2_SEC_AEAD:
1382 			ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid);
1383 			break;
1384 		case DPAA2_SEC_CIPHER_HASH:
1385 			ret = build_authenc_sg_fd(sess, op, fd, bpid);
1386 			break;
1387 #ifdef RTE_LIB_SECURITY
1388 		case DPAA2_SEC_IPSEC:
1389 		case DPAA2_SEC_PDCP:
1390 			ret = build_proto_compound_sg_fd(sess, op, fd, bpid);
1391 			break;
1392 #endif
1393 		case DPAA2_SEC_HASH_CIPHER:
1394 		default:
1395 			DPAA2_SEC_ERR("error: Unsupported session");
1396 		}
1397 	} else {
1398 		switch (sess->ctxt_type) {
1399 		case DPAA2_SEC_CIPHER:
1400 			ret = build_cipher_fd(sess, op, fd, bpid);
1401 			break;
1402 		case DPAA2_SEC_AUTH:
1403 			ret = build_auth_fd(sess, op, fd, bpid);
1404 			break;
1405 		case DPAA2_SEC_AEAD:
1406 			ret = build_authenc_gcm_fd(sess, op, fd, bpid);
1407 			break;
1408 		case DPAA2_SEC_CIPHER_HASH:
1409 			ret = build_authenc_fd(sess, op, fd, bpid);
1410 			break;
1411 #ifdef RTE_LIB_SECURITY
1412 		case DPAA2_SEC_IPSEC:
1413 			ret = build_proto_fd(sess, op, fd, bpid);
1414 			break;
1415 		case DPAA2_SEC_PDCP:
1416 			ret = build_proto_compound_fd(sess, op, fd, bpid);
1417 			break;
1418 #endif
1419 		case DPAA2_SEC_HASH_CIPHER:
1420 		default:
1421 			DPAA2_SEC_ERR("error: Unsupported session");
1422 			ret = -ENOTSUP;
1423 		}
1424 	}
1425 	return ret;
1426 }
1427 
1428 static uint16_t
1429 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1430 			uint16_t nb_ops)
1431 {
1432 	/* Function to transmit the frames to given device and VQ*/
1433 	uint32_t loop;
1434 	int32_t ret;
1435 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1436 	uint32_t frames_to_send, retry_count;
1437 	struct qbman_eq_desc eqdesc;
1438 	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1439 	struct qbman_swp *swp;
1440 	uint16_t num_tx = 0;
1441 	uint32_t flags[MAX_TX_RING_SLOTS] = {0};
1442 	/*todo - need to support multiple buffer pools */
1443 	uint16_t bpid;
1444 	struct rte_mempool *mb_pool;
1445 
1446 	if (unlikely(nb_ops == 0))
1447 		return 0;
1448 
1449 	if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
1450 		DPAA2_SEC_ERR("sessionless crypto op not supported");
1451 		return 0;
1452 	}
1453 	/*Prepare enqueue descriptor*/
1454 	qbman_eq_desc_clear(&eqdesc);
1455 	qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1456 	qbman_eq_desc_set_response(&eqdesc, 0, 0);
1457 	qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
1458 
1459 	if (!DPAA2_PER_LCORE_DPIO) {
1460 		ret = dpaa2_affine_qbman_swp();
1461 		if (ret) {
1462 			DPAA2_SEC_ERR(
1463 				"Failed to allocate IO portal, tid: %d\n",
1464 				rte_gettid());
1465 			return 0;
1466 		}
1467 	}
1468 	swp = DPAA2_PER_LCORE_PORTAL;
1469 
1470 	while (nb_ops) {
1471 		frames_to_send = (nb_ops > dpaa2_eqcr_size) ?
1472 			dpaa2_eqcr_size : nb_ops;
1473 
1474 		for (loop = 0; loop < frames_to_send; loop++) {
1475 			if (*dpaa2_seqn((*ops)->sym->m_src)) {
1476 				uint8_t dqrr_index =
1477 					*dpaa2_seqn((*ops)->sym->m_src) - 1;
1478 
1479 				flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index;
1480 				DPAA2_PER_LCORE_DQRR_SIZE--;
1481 				DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
1482 				*dpaa2_seqn((*ops)->sym->m_src) =
1483 					DPAA2_INVALID_MBUF_SEQN;
1484 			}
1485 
1486 			/*Clear the unused FD fields before sending*/
1487 			memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
1488 			mb_pool = (*ops)->sym->m_src->pool;
1489 			bpid = mempool_to_bpid(mb_pool);
1490 			ret = build_sec_fd(*ops, &fd_arr[loop], bpid);
1491 			if (ret) {
1492 				DPAA2_SEC_ERR("error: Improper packet contents"
1493 					      " for crypto operation");
1494 				goto skip_tx;
1495 			}
1496 			ops++;
1497 		}
1498 
1499 		loop = 0;
1500 		retry_count = 0;
1501 		while (loop < frames_to_send) {
1502 			ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
1503 							 &fd_arr[loop],
1504 							 &flags[loop],
1505 							 frames_to_send - loop);
1506 			if (unlikely(ret < 0)) {
1507 				retry_count++;
1508 				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1509 					num_tx += loop;
1510 					nb_ops -= loop;
1511 					goto skip_tx;
1512 				}
1513 			} else {
1514 				loop += ret;
1515 				retry_count = 0;
1516 			}
1517 		}
1518 
1519 		num_tx += loop;
1520 		nb_ops -= loop;
1521 	}
1522 skip_tx:
1523 	dpaa2_qp->tx_vq.tx_pkts += num_tx;
1524 	dpaa2_qp->tx_vq.err_pkts += nb_ops;
1525 	return num_tx;
1526 }
1527 
1528 #ifdef RTE_LIB_SECURITY
1529 static inline struct rte_crypto_op *
1530 sec_simple_fd_to_mbuf(const struct qbman_fd *fd)
1531 {
1532 	struct rte_crypto_op *op;
1533 	uint16_t len = DPAA2_GET_FD_LEN(fd);
1534 	int16_t diff = 0;
1535 	dpaa2_sec_session *sess_priv __rte_unused;
1536 
1537 	struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
1538 		DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
1539 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
1540 
1541 	diff = len - mbuf->pkt_len;
1542 	mbuf->pkt_len += diff;
1543 	mbuf->data_len += diff;
1544 	op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova;
1545 	mbuf->buf_iova = op->sym->aead.digest.phys_addr;
1546 	op->sym->aead.digest.phys_addr = 0L;
1547 
1548 	sess_priv = (dpaa2_sec_session *)get_sec_session_private_data(
1549 				op->sym->sec_session);
1550 	if (sess_priv->dir == DIR_ENC)
1551 		mbuf->data_off += SEC_FLC_DHR_OUTBOUND;
1552 	else
1553 		mbuf->data_off += SEC_FLC_DHR_INBOUND;
1554 
1555 	return op;
1556 }
1557 #endif
1558 
1559 static inline struct rte_crypto_op *
1560 sec_fd_to_mbuf(const struct qbman_fd *fd)
1561 {
1562 	struct qbman_fle *fle;
1563 	struct rte_crypto_op *op;
1564 	struct ctxt_priv *priv;
1565 	struct rte_mbuf *dst, *src;
1566 
1567 #ifdef RTE_LIB_SECURITY
1568 	if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single)
1569 		return sec_simple_fd_to_mbuf(fd);
1570 #endif
1571 	fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
1572 
1573 	DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
1574 			   fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
1575 
1576 	/* we are using the first FLE entry to store Mbuf.
1577 	 * Currently we donot know which FLE has the mbuf stored.
1578 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
1579 	 * to get the MBUF Addr from the previous FLE.
1580 	 * We can have a better approach to use the inline Mbuf
1581 	 */
1582 
1583 	if (unlikely(DPAA2_GET_FD_IVP(fd))) {
1584 		/* TODO complete it. */
1585 		DPAA2_SEC_ERR("error: non inline buffer");
1586 		return NULL;
1587 	}
1588 	op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
1589 
1590 	/* Prefeth op */
1591 	src = op->sym->m_src;
1592 	rte_prefetch0(src);
1593 
1594 	if (op->sym->m_dst) {
1595 		dst = op->sym->m_dst;
1596 		rte_prefetch0(dst);
1597 	} else
1598 		dst = src;
1599 
1600 #ifdef RTE_LIB_SECURITY
1601 	if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
1602 		uint16_t len = DPAA2_GET_FD_LEN(fd);
1603 		dst->pkt_len = len;
1604 		while (dst->next != NULL) {
1605 			len -= dst->data_len;
1606 			dst = dst->next;
1607 		}
1608 		dst->data_len = len;
1609 	}
1610 #endif
1611 	DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p,"
1612 		" fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n",
1613 		(void *)dst,
1614 		dst->buf_addr,
1615 		DPAA2_GET_FD_ADDR(fd),
1616 		DPAA2_GET_FD_BPID(fd),
1617 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
1618 		DPAA2_GET_FD_OFFSET(fd),
1619 		DPAA2_GET_FD_LEN(fd));
1620 
1621 	/* free the fle memory */
1622 	if (likely(rte_pktmbuf_is_contiguous(src))) {
1623 		priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1);
1624 		rte_mempool_put(priv->fle_pool, (void *)(fle-1));
1625 	} else
1626 		rte_free((void *)(fle-1));
1627 
1628 	return op;
1629 }
1630 
1631 static uint16_t
1632 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1633 			uint16_t nb_ops)
1634 {
1635 	/* Function is responsible to receive frames for a given device and VQ*/
1636 	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1637 	struct qbman_result *dq_storage;
1638 	uint32_t fqid = dpaa2_qp->rx_vq.fqid;
1639 	int ret, num_rx = 0;
1640 	uint8_t is_last = 0, status;
1641 	struct qbman_swp *swp;
1642 	const struct qbman_fd *fd;
1643 	struct qbman_pull_desc pulldesc;
1644 
1645 	if (!DPAA2_PER_LCORE_DPIO) {
1646 		ret = dpaa2_affine_qbman_swp();
1647 		if (ret) {
1648 			DPAA2_SEC_ERR(
1649 				"Failed to allocate IO portal, tid: %d\n",
1650 				rte_gettid());
1651 			return 0;
1652 		}
1653 	}
1654 	swp = DPAA2_PER_LCORE_PORTAL;
1655 	dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
1656 
1657 	qbman_pull_desc_clear(&pulldesc);
1658 	qbman_pull_desc_set_numframes(&pulldesc,
1659 				      (nb_ops > dpaa2_dqrr_size) ?
1660 				      dpaa2_dqrr_size : nb_ops);
1661 	qbman_pull_desc_set_fq(&pulldesc, fqid);
1662 	qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1663 				    (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage),
1664 				    1);
1665 
1666 	/*Issue a volatile dequeue command. */
1667 	while (1) {
1668 		if (qbman_swp_pull(swp, &pulldesc)) {
1669 			DPAA2_SEC_WARN(
1670 				"SEC VDQ command is not issued : QBMAN busy");
1671 			/* Portal was busy, try again */
1672 			continue;
1673 		}
1674 		break;
1675 	};
1676 
1677 	/* Receive the packets till Last Dequeue entry is found with
1678 	 * respect to the above issues PULL command.
1679 	 */
1680 	while (!is_last) {
1681 		/* Check if the previous issued command is completed.
1682 		 * Also seems like the SWP is shared between the Ethernet Driver
1683 		 * and the SEC driver.
1684 		 */
1685 		while (!qbman_check_command_complete(dq_storage))
1686 			;
1687 
1688 		/* Loop until the dq_storage is updated with
1689 		 * new token by QBMAN
1690 		 */
1691 		while (!qbman_check_new_result(dq_storage))
1692 			;
1693 		/* Check whether Last Pull command is Expired and
1694 		 * setting Condition for Loop termination
1695 		 */
1696 		if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1697 			is_last = 1;
1698 			/* Check for valid frame. */
1699 			status = (uint8_t)qbman_result_DQ_flags(dq_storage);
1700 			if (unlikely(
1701 				(status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
1702 				DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
1703 				continue;
1704 			}
1705 		}
1706 
1707 		fd = qbman_result_DQ_fd(dq_storage);
1708 		ops[num_rx] = sec_fd_to_mbuf(fd);
1709 
1710 		if (unlikely(fd->simple.frc)) {
1711 			/* TODO Parse SEC errors */
1712 			DPAA2_SEC_ERR("SEC returned Error - %x",
1713 				      fd->simple.frc);
1714 			ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
1715 		} else {
1716 			ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1717 		}
1718 
1719 		num_rx++;
1720 		dq_storage++;
1721 	} /* End of Packet Rx loop */
1722 
1723 	dpaa2_qp->rx_vq.rx_pkts += num_rx;
1724 
1725 	DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1726 	/*Return the total number of packets received to DPAA2 app*/
1727 	return num_rx;
1728 }
1729 
1730 /** Release queue pair */
1731 static int
1732 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
1733 {
1734 	struct dpaa2_sec_qp *qp =
1735 		(struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id];
1736 
1737 	PMD_INIT_FUNC_TRACE();
1738 
1739 	if (qp->rx_vq.q_storage) {
1740 		dpaa2_free_dq_storage(qp->rx_vq.q_storage);
1741 		rte_free(qp->rx_vq.q_storage);
1742 	}
1743 	rte_free(qp);
1744 
1745 	dev->data->queue_pairs[queue_pair_id] = NULL;
1746 
1747 	return 0;
1748 }
1749 
1750 /** Setup a queue pair */
1751 static int
1752 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1753 		__rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1754 		__rte_unused int socket_id)
1755 {
1756 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1757 	struct dpaa2_sec_qp *qp;
1758 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1759 	struct dpseci_rx_queue_cfg cfg;
1760 	int32_t retcode;
1761 
1762 	PMD_INIT_FUNC_TRACE();
1763 
1764 	/* If qp is already in use free ring memory and qp metadata. */
1765 	if (dev->data->queue_pairs[qp_id] != NULL) {
1766 		DPAA2_SEC_INFO("QP already setup");
1767 		return 0;
1768 	}
1769 
1770 	DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p",
1771 		    dev, qp_id, qp_conf);
1772 
1773 	memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
1774 
1775 	qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp),
1776 			RTE_CACHE_LINE_SIZE);
1777 	if (!qp) {
1778 		DPAA2_SEC_ERR("malloc failed for rx/tx queues");
1779 		return -ENOMEM;
1780 	}
1781 
1782 	qp->rx_vq.crypto_data = dev->data;
1783 	qp->tx_vq.crypto_data = dev->data;
1784 	qp->rx_vq.q_storage = rte_malloc("sec dq storage",
1785 		sizeof(struct queue_storage_info_t),
1786 		RTE_CACHE_LINE_SIZE);
1787 	if (!qp->rx_vq.q_storage) {
1788 		DPAA2_SEC_ERR("malloc failed for q_storage");
1789 		return -ENOMEM;
1790 	}
1791 	memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
1792 
1793 	if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
1794 		DPAA2_SEC_ERR("Unable to allocate dequeue storage");
1795 		return -ENOMEM;
1796 	}
1797 
1798 	dev->data->queue_pairs[qp_id] = qp;
1799 
1800 	cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
1801 	cfg.user_ctx = (size_t)(&qp->rx_vq);
1802 	retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
1803 				      qp_id, &cfg);
1804 	return retcode;
1805 }
1806 
1807 /** Returns the size of the aesni gcm session structure */
1808 static unsigned int
1809 dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1810 {
1811 	PMD_INIT_FUNC_TRACE();
1812 
1813 	return sizeof(dpaa2_sec_session);
1814 }
1815 
1816 static int
1817 dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
1818 		      struct rte_crypto_sym_xform *xform,
1819 		      dpaa2_sec_session *session)
1820 {
1821 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1822 	struct alginfo cipherdata;
1823 	int bufsize, ret = 0;
1824 	struct ctxt_priv *priv;
1825 	struct sec_flow_context *flc;
1826 
1827 	PMD_INIT_FUNC_TRACE();
1828 
1829 	/* For SEC CIPHER only one descriptor is required. */
1830 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1831 			sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1832 			RTE_CACHE_LINE_SIZE);
1833 	if (priv == NULL) {
1834 		DPAA2_SEC_ERR("No Memory for priv CTXT");
1835 		return -ENOMEM;
1836 	}
1837 
1838 	priv->fle_pool = dev_priv->fle_pool;
1839 
1840 	flc = &priv->flc_desc[0].flc;
1841 
1842 	session->ctxt_type = DPAA2_SEC_CIPHER;
1843 	session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1844 			RTE_CACHE_LINE_SIZE);
1845 	if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1846 		DPAA2_SEC_ERR("No Memory for cipher key");
1847 		rte_free(priv);
1848 		return -ENOMEM;
1849 	}
1850 	session->cipher_key.length = xform->cipher.key.length;
1851 
1852 	memcpy(session->cipher_key.data, xform->cipher.key.data,
1853 	       xform->cipher.key.length);
1854 	cipherdata.key = (size_t)session->cipher_key.data;
1855 	cipherdata.keylen = session->cipher_key.length;
1856 	cipherdata.key_enc_flags = 0;
1857 	cipherdata.key_type = RTA_DATA_IMM;
1858 
1859 	/* Set IV parameters */
1860 	session->iv.offset = xform->cipher.iv.offset;
1861 	session->iv.length = xform->cipher.iv.length;
1862 	session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1863 				DIR_ENC : DIR_DEC;
1864 
1865 	switch (xform->cipher.algo) {
1866 	case RTE_CRYPTO_CIPHER_AES_CBC:
1867 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
1868 		cipherdata.algmode = OP_ALG_AAI_CBC;
1869 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1870 		bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1871 						SHR_NEVER, &cipherdata,
1872 						session->iv.length,
1873 						session->dir);
1874 		break;
1875 	case RTE_CRYPTO_CIPHER_3DES_CBC:
1876 		cipherdata.algtype = OP_ALG_ALGSEL_3DES;
1877 		cipherdata.algmode = OP_ALG_AAI_CBC;
1878 		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1879 		bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1880 						SHR_NEVER, &cipherdata,
1881 						session->iv.length,
1882 						session->dir);
1883 		break;
1884 	case RTE_CRYPTO_CIPHER_DES_CBC:
1885 		cipherdata.algtype = OP_ALG_ALGSEL_DES;
1886 		cipherdata.algmode = OP_ALG_AAI_CBC;
1887 		session->cipher_alg = RTE_CRYPTO_CIPHER_DES_CBC;
1888 		bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1889 						SHR_NEVER, &cipherdata,
1890 						session->iv.length,
1891 						session->dir);
1892 		break;
1893 	case RTE_CRYPTO_CIPHER_AES_CTR:
1894 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
1895 		cipherdata.algmode = OP_ALG_AAI_CTR;
1896 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
1897 		bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1898 						SHR_NEVER, &cipherdata,
1899 						session->iv.length,
1900 						session->dir);
1901 		break;
1902 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1903 		cipherdata.algtype = OP_ALG_ALGSEL_SNOW_F8;
1904 		session->cipher_alg = RTE_CRYPTO_CIPHER_SNOW3G_UEA2;
1905 		bufsize = cnstr_shdsc_snow_f8(priv->flc_desc[0].desc, 1, 0,
1906 					      &cipherdata,
1907 					      session->dir);
1908 		break;
1909 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
1910 		cipherdata.algtype = OP_ALG_ALGSEL_ZUCE;
1911 		session->cipher_alg = RTE_CRYPTO_CIPHER_ZUC_EEA3;
1912 		bufsize = cnstr_shdsc_zuce(priv->flc_desc[0].desc, 1, 0,
1913 					      &cipherdata,
1914 					      session->dir);
1915 		break;
1916 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
1917 	case RTE_CRYPTO_CIPHER_AES_F8:
1918 	case RTE_CRYPTO_CIPHER_AES_ECB:
1919 	case RTE_CRYPTO_CIPHER_3DES_ECB:
1920 	case RTE_CRYPTO_CIPHER_3DES_CTR:
1921 	case RTE_CRYPTO_CIPHER_AES_XTS:
1922 	case RTE_CRYPTO_CIPHER_ARC4:
1923 	case RTE_CRYPTO_CIPHER_NULL:
1924 		DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
1925 			xform->cipher.algo);
1926 		ret = -ENOTSUP;
1927 		goto error_out;
1928 	default:
1929 		DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
1930 			xform->cipher.algo);
1931 		ret = -ENOTSUP;
1932 		goto error_out;
1933 	}
1934 
1935 	if (bufsize < 0) {
1936 		DPAA2_SEC_ERR("Crypto: Descriptor build failed");
1937 		ret = -EINVAL;
1938 		goto error_out;
1939 	}
1940 
1941 	flc->word1_sdl = (uint8_t)bufsize;
1942 	session->ctxt = priv;
1943 
1944 #ifdef CAAM_DESC_DEBUG
1945 	int i;
1946 	for (i = 0; i < bufsize; i++)
1947 		DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]);
1948 #endif
1949 	return ret;
1950 
1951 error_out:
1952 	rte_free(session->cipher_key.data);
1953 	rte_free(priv);
1954 	return ret;
1955 }
1956 
1957 static int
1958 dpaa2_sec_auth_init(struct rte_cryptodev *dev,
1959 		    struct rte_crypto_sym_xform *xform,
1960 		    dpaa2_sec_session *session)
1961 {
1962 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1963 	struct alginfo authdata;
1964 	int bufsize, ret = 0;
1965 	struct ctxt_priv *priv;
1966 	struct sec_flow_context *flc;
1967 
1968 	PMD_INIT_FUNC_TRACE();
1969 
1970 	/* For SEC AUTH three descriptors are required for various stages */
1971 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1972 			sizeof(struct ctxt_priv) + 3 *
1973 			sizeof(struct sec_flc_desc),
1974 			RTE_CACHE_LINE_SIZE);
1975 	if (priv == NULL) {
1976 		DPAA2_SEC_ERR("No Memory for priv CTXT");
1977 		return -ENOMEM;
1978 	}
1979 
1980 	priv->fle_pool = dev_priv->fle_pool;
1981 	flc = &priv->flc_desc[DESC_INITFINAL].flc;
1982 
1983 	session->ctxt_type = DPAA2_SEC_AUTH;
1984 	session->auth_key.length = xform->auth.key.length;
1985 	if (xform->auth.key.length) {
1986 		session->auth_key.data = rte_zmalloc(NULL,
1987 			xform->auth.key.length,
1988 			RTE_CACHE_LINE_SIZE);
1989 		if (session->auth_key.data == NULL) {
1990 			DPAA2_SEC_ERR("Unable to allocate memory for auth key");
1991 			rte_free(priv);
1992 			return -ENOMEM;
1993 		}
1994 		memcpy(session->auth_key.data, xform->auth.key.data,
1995 		       xform->auth.key.length);
1996 		authdata.key = (size_t)session->auth_key.data;
1997 		authdata.key_enc_flags = 0;
1998 		authdata.key_type = RTA_DATA_IMM;
1999 	}
2000 	authdata.keylen = session->auth_key.length;
2001 
2002 	session->digest_length = xform->auth.digest_length;
2003 	session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2004 				DIR_ENC : DIR_DEC;
2005 
2006 	switch (xform->auth.algo) {
2007 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
2008 		authdata.algtype = OP_ALG_ALGSEL_SHA1;
2009 		authdata.algmode = OP_ALG_AAI_HMAC;
2010 		session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
2011 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2012 					   1, 0, SHR_NEVER, &authdata,
2013 					   !session->dir,
2014 					   session->digest_length);
2015 		break;
2016 	case RTE_CRYPTO_AUTH_MD5_HMAC:
2017 		authdata.algtype = OP_ALG_ALGSEL_MD5;
2018 		authdata.algmode = OP_ALG_AAI_HMAC;
2019 		session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
2020 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2021 					   1, 0, SHR_NEVER, &authdata,
2022 					   !session->dir,
2023 					   session->digest_length);
2024 		break;
2025 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2026 		authdata.algtype = OP_ALG_ALGSEL_SHA256;
2027 		authdata.algmode = OP_ALG_AAI_HMAC;
2028 		session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2029 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2030 					   1, 0, SHR_NEVER, &authdata,
2031 					   !session->dir,
2032 					   session->digest_length);
2033 		break;
2034 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2035 		authdata.algtype = OP_ALG_ALGSEL_SHA384;
2036 		authdata.algmode = OP_ALG_AAI_HMAC;
2037 		session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2038 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2039 					   1, 0, SHR_NEVER, &authdata,
2040 					   !session->dir,
2041 					   session->digest_length);
2042 		break;
2043 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2044 		authdata.algtype = OP_ALG_ALGSEL_SHA512;
2045 		authdata.algmode = OP_ALG_AAI_HMAC;
2046 		session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2047 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2048 					   1, 0, SHR_NEVER, &authdata,
2049 					   !session->dir,
2050 					   session->digest_length);
2051 		break;
2052 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2053 		authdata.algtype = OP_ALG_ALGSEL_SHA224;
2054 		authdata.algmode = OP_ALG_AAI_HMAC;
2055 		session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
2056 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2057 					   1, 0, SHR_NEVER, &authdata,
2058 					   !session->dir,
2059 					   session->digest_length);
2060 		break;
2061 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2062 		authdata.algtype = OP_ALG_ALGSEL_SNOW_F9;
2063 		authdata.algmode = OP_ALG_AAI_F9;
2064 		session->auth_alg = RTE_CRYPTO_AUTH_SNOW3G_UIA2;
2065 		session->iv.offset = xform->auth.iv.offset;
2066 		session->iv.length = xform->auth.iv.length;
2067 		bufsize = cnstr_shdsc_snow_f9(priv->flc_desc[DESC_INITFINAL].desc,
2068 					      1, 0, &authdata,
2069 					      !session->dir,
2070 					      session->digest_length);
2071 		break;
2072 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
2073 		authdata.algtype = OP_ALG_ALGSEL_ZUCA;
2074 		authdata.algmode = OP_ALG_AAI_F9;
2075 		session->auth_alg = RTE_CRYPTO_AUTH_ZUC_EIA3;
2076 		session->iv.offset = xform->auth.iv.offset;
2077 		session->iv.length = xform->auth.iv.length;
2078 		bufsize = cnstr_shdsc_zuca(priv->flc_desc[DESC_INITFINAL].desc,
2079 					   1, 0, &authdata,
2080 					   !session->dir,
2081 					   session->digest_length);
2082 		break;
2083 	case RTE_CRYPTO_AUTH_SHA1:
2084 		authdata.algtype = OP_ALG_ALGSEL_SHA1;
2085 		authdata.algmode = OP_ALG_AAI_HASH;
2086 		session->auth_alg = RTE_CRYPTO_AUTH_SHA1;
2087 		bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2088 					   1, 0, SHR_NEVER, &authdata,
2089 					   !session->dir,
2090 					   session->digest_length);
2091 		break;
2092 	case RTE_CRYPTO_AUTH_MD5:
2093 		authdata.algtype = OP_ALG_ALGSEL_MD5;
2094 		authdata.algmode = OP_ALG_AAI_HASH;
2095 		session->auth_alg = RTE_CRYPTO_AUTH_MD5;
2096 		bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2097 					   1, 0, SHR_NEVER, &authdata,
2098 					   !session->dir,
2099 					   session->digest_length);
2100 		break;
2101 	case RTE_CRYPTO_AUTH_SHA256:
2102 		authdata.algtype = OP_ALG_ALGSEL_SHA256;
2103 		authdata.algmode = OP_ALG_AAI_HASH;
2104 		session->auth_alg = RTE_CRYPTO_AUTH_SHA256;
2105 		bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2106 					   1, 0, SHR_NEVER, &authdata,
2107 					   !session->dir,
2108 					   session->digest_length);
2109 		break;
2110 	case RTE_CRYPTO_AUTH_SHA384:
2111 		authdata.algtype = OP_ALG_ALGSEL_SHA384;
2112 		authdata.algmode = OP_ALG_AAI_HASH;
2113 		session->auth_alg = RTE_CRYPTO_AUTH_SHA384;
2114 		bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2115 					   1, 0, SHR_NEVER, &authdata,
2116 					   !session->dir,
2117 					   session->digest_length);
2118 		break;
2119 	case RTE_CRYPTO_AUTH_SHA512:
2120 		authdata.algtype = OP_ALG_ALGSEL_SHA512;
2121 		authdata.algmode = OP_ALG_AAI_HASH;
2122 		session->auth_alg = RTE_CRYPTO_AUTH_SHA512;
2123 		bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2124 					   1, 0, SHR_NEVER, &authdata,
2125 					   !session->dir,
2126 					   session->digest_length);
2127 		break;
2128 	case RTE_CRYPTO_AUTH_SHA224:
2129 		authdata.algtype = OP_ALG_ALGSEL_SHA224;
2130 		authdata.algmode = OP_ALG_AAI_HASH;
2131 		session->auth_alg = RTE_CRYPTO_AUTH_SHA224;
2132 		bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2133 					   1, 0, SHR_NEVER, &authdata,
2134 					   !session->dir,
2135 					   session->digest_length);
2136 		break;
2137 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2138 		authdata.algtype = OP_ALG_ALGSEL_AES;
2139 		authdata.algmode = OP_ALG_AAI_XCBC_MAC;
2140 		session->auth_alg = RTE_CRYPTO_AUTH_AES_XCBC_MAC;
2141 		bufsize = cnstr_shdsc_aes_mac(
2142 					priv->flc_desc[DESC_INITFINAL].desc,
2143 					1, 0, SHR_NEVER, &authdata,
2144 					!session->dir,
2145 					session->digest_length);
2146 		break;
2147 	case RTE_CRYPTO_AUTH_AES_CMAC:
2148 		authdata.algtype = OP_ALG_ALGSEL_AES;
2149 		authdata.algmode = OP_ALG_AAI_CMAC;
2150 		session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
2151 		bufsize = cnstr_shdsc_aes_mac(
2152 					   priv->flc_desc[DESC_INITFINAL].desc,
2153 					   1, 0, SHR_NEVER, &authdata,
2154 					   !session->dir,
2155 					   session->digest_length);
2156 		break;
2157 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2158 	case RTE_CRYPTO_AUTH_AES_GMAC:
2159 	case RTE_CRYPTO_AUTH_KASUMI_F9:
2160 	case RTE_CRYPTO_AUTH_NULL:
2161 		DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un",
2162 			      xform->auth.algo);
2163 		ret = -ENOTSUP;
2164 		goto error_out;
2165 	default:
2166 		DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2167 			      xform->auth.algo);
2168 		ret = -ENOTSUP;
2169 		goto error_out;
2170 	}
2171 
2172 	if (bufsize < 0) {
2173 		DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2174 		ret = -EINVAL;
2175 		goto error_out;
2176 	}
2177 
2178 	flc->word1_sdl = (uint8_t)bufsize;
2179 	session->ctxt = priv;
2180 #ifdef CAAM_DESC_DEBUG
2181 	int i;
2182 	for (i = 0; i < bufsize; i++)
2183 		DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2184 				i, priv->flc_desc[DESC_INITFINAL].desc[i]);
2185 #endif
2186 
2187 	return ret;
2188 
2189 error_out:
2190 	rte_free(session->auth_key.data);
2191 	rte_free(priv);
2192 	return ret;
2193 }
2194 
2195 static int
2196 dpaa2_sec_aead_init(struct rte_cryptodev *dev,
2197 		    struct rte_crypto_sym_xform *xform,
2198 		    dpaa2_sec_session *session)
2199 {
2200 	struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
2201 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2202 	struct alginfo aeaddata;
2203 	int bufsize;
2204 	struct ctxt_priv *priv;
2205 	struct sec_flow_context *flc;
2206 	struct rte_crypto_aead_xform *aead_xform = &xform->aead;
2207 	int err, ret = 0;
2208 
2209 	PMD_INIT_FUNC_TRACE();
2210 
2211 	/* Set IV parameters */
2212 	session->iv.offset = aead_xform->iv.offset;
2213 	session->iv.length = aead_xform->iv.length;
2214 	session->ctxt_type = DPAA2_SEC_AEAD;
2215 
2216 	/* For SEC AEAD only one descriptor is required */
2217 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2218 			sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
2219 			RTE_CACHE_LINE_SIZE);
2220 	if (priv == NULL) {
2221 		DPAA2_SEC_ERR("No Memory for priv CTXT");
2222 		return -ENOMEM;
2223 	}
2224 
2225 	priv->fle_pool = dev_priv->fle_pool;
2226 	flc = &priv->flc_desc[0].flc;
2227 
2228 	session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2229 					       RTE_CACHE_LINE_SIZE);
2230 	if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2231 		DPAA2_SEC_ERR("No Memory for aead key");
2232 		rte_free(priv);
2233 		return -ENOMEM;
2234 	}
2235 	memcpy(session->aead_key.data, aead_xform->key.data,
2236 	       aead_xform->key.length);
2237 
2238 	session->digest_length = aead_xform->digest_length;
2239 	session->aead_key.length = aead_xform->key.length;
2240 	ctxt->auth_only_len = aead_xform->aad_length;
2241 
2242 	aeaddata.key = (size_t)session->aead_key.data;
2243 	aeaddata.keylen = session->aead_key.length;
2244 	aeaddata.key_enc_flags = 0;
2245 	aeaddata.key_type = RTA_DATA_IMM;
2246 
2247 	switch (aead_xform->algo) {
2248 	case RTE_CRYPTO_AEAD_AES_GCM:
2249 		aeaddata.algtype = OP_ALG_ALGSEL_AES;
2250 		aeaddata.algmode = OP_ALG_AAI_GCM;
2251 		session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2252 		break;
2253 	case RTE_CRYPTO_AEAD_AES_CCM:
2254 		DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u",
2255 			      aead_xform->algo);
2256 		ret = -ENOTSUP;
2257 		goto error_out;
2258 	default:
2259 		DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
2260 			      aead_xform->algo);
2261 		ret = -ENOTSUP;
2262 		goto error_out;
2263 	}
2264 	session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2265 				DIR_ENC : DIR_DEC;
2266 
2267 	priv->flc_desc[0].desc[0] = aeaddata.keylen;
2268 	err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2269 			       DESC_JOB_IO_LEN,
2270 			       (unsigned int *)priv->flc_desc[0].desc,
2271 			       &priv->flc_desc[0].desc[1], 1);
2272 
2273 	if (err < 0) {
2274 		DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2275 		ret = -EINVAL;
2276 		goto error_out;
2277 	}
2278 	if (priv->flc_desc[0].desc[1] & 1) {
2279 		aeaddata.key_type = RTA_DATA_IMM;
2280 	} else {
2281 		aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key);
2282 		aeaddata.key_type = RTA_DATA_PTR;
2283 	}
2284 	priv->flc_desc[0].desc[0] = 0;
2285 	priv->flc_desc[0].desc[1] = 0;
2286 
2287 	if (session->dir == DIR_ENC)
2288 		bufsize = cnstr_shdsc_gcm_encap(
2289 				priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
2290 				&aeaddata, session->iv.length,
2291 				session->digest_length);
2292 	else
2293 		bufsize = cnstr_shdsc_gcm_decap(
2294 				priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
2295 				&aeaddata, session->iv.length,
2296 				session->digest_length);
2297 	if (bufsize < 0) {
2298 		DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2299 		ret = -EINVAL;
2300 		goto error_out;
2301 	}
2302 
2303 	flc->word1_sdl = (uint8_t)bufsize;
2304 	session->ctxt = priv;
2305 #ifdef CAAM_DESC_DEBUG
2306 	int i;
2307 	for (i = 0; i < bufsize; i++)
2308 		DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n",
2309 			    i, priv->flc_desc[0].desc[i]);
2310 #endif
2311 	return ret;
2312 
2313 error_out:
2314 	rte_free(session->aead_key.data);
2315 	rte_free(priv);
2316 	return ret;
2317 }
2318 
2319 
2320 static int
2321 dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
2322 		    struct rte_crypto_sym_xform *xform,
2323 		    dpaa2_sec_session *session)
2324 {
2325 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2326 	struct alginfo authdata, cipherdata;
2327 	int bufsize;
2328 	struct ctxt_priv *priv;
2329 	struct sec_flow_context *flc;
2330 	struct rte_crypto_cipher_xform *cipher_xform;
2331 	struct rte_crypto_auth_xform *auth_xform;
2332 	int err, ret = 0;
2333 
2334 	PMD_INIT_FUNC_TRACE();
2335 
2336 	if (session->ext_params.aead_ctxt.auth_cipher_text) {
2337 		cipher_xform = &xform->cipher;
2338 		auth_xform = &xform->next->auth;
2339 		session->ctxt_type =
2340 			(cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2341 			DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER;
2342 	} else {
2343 		cipher_xform = &xform->next->cipher;
2344 		auth_xform = &xform->auth;
2345 		session->ctxt_type =
2346 			(cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2347 			DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH;
2348 	}
2349 
2350 	/* Set IV parameters */
2351 	session->iv.offset = cipher_xform->iv.offset;
2352 	session->iv.length = cipher_xform->iv.length;
2353 
2354 	/* For SEC AEAD only one descriptor is required */
2355 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2356 			sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
2357 			RTE_CACHE_LINE_SIZE);
2358 	if (priv == NULL) {
2359 		DPAA2_SEC_ERR("No Memory for priv CTXT");
2360 		return -ENOMEM;
2361 	}
2362 
2363 	priv->fle_pool = dev_priv->fle_pool;
2364 	flc = &priv->flc_desc[0].flc;
2365 
2366 	session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2367 					       RTE_CACHE_LINE_SIZE);
2368 	if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2369 		DPAA2_SEC_ERR("No Memory for cipher key");
2370 		rte_free(priv);
2371 		return -ENOMEM;
2372 	}
2373 	session->cipher_key.length = cipher_xform->key.length;
2374 	session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2375 					     RTE_CACHE_LINE_SIZE);
2376 	if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2377 		DPAA2_SEC_ERR("No Memory for auth key");
2378 		rte_free(session->cipher_key.data);
2379 		rte_free(priv);
2380 		return -ENOMEM;
2381 	}
2382 	session->auth_key.length = auth_xform->key.length;
2383 	memcpy(session->cipher_key.data, cipher_xform->key.data,
2384 	       cipher_xform->key.length);
2385 	memcpy(session->auth_key.data, auth_xform->key.data,
2386 	       auth_xform->key.length);
2387 
2388 	authdata.key = (size_t)session->auth_key.data;
2389 	authdata.keylen = session->auth_key.length;
2390 	authdata.key_enc_flags = 0;
2391 	authdata.key_type = RTA_DATA_IMM;
2392 
2393 	session->digest_length = auth_xform->digest_length;
2394 
2395 	switch (auth_xform->algo) {
2396 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
2397 		authdata.algtype = OP_ALG_ALGSEL_SHA1;
2398 		authdata.algmode = OP_ALG_AAI_HMAC;
2399 		session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
2400 		break;
2401 	case RTE_CRYPTO_AUTH_MD5_HMAC:
2402 		authdata.algtype = OP_ALG_ALGSEL_MD5;
2403 		authdata.algmode = OP_ALG_AAI_HMAC;
2404 		session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
2405 		break;
2406 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2407 		authdata.algtype = OP_ALG_ALGSEL_SHA224;
2408 		authdata.algmode = OP_ALG_AAI_HMAC;
2409 		session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
2410 		break;
2411 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2412 		authdata.algtype = OP_ALG_ALGSEL_SHA256;
2413 		authdata.algmode = OP_ALG_AAI_HMAC;
2414 		session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2415 		break;
2416 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2417 		authdata.algtype = OP_ALG_ALGSEL_SHA384;
2418 		authdata.algmode = OP_ALG_AAI_HMAC;
2419 		session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2420 		break;
2421 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2422 		authdata.algtype = OP_ALG_ALGSEL_SHA512;
2423 		authdata.algmode = OP_ALG_AAI_HMAC;
2424 		session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2425 		break;
2426 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2427 		authdata.algtype = OP_ALG_ALGSEL_AES;
2428 		authdata.algmode = OP_ALG_AAI_XCBC_MAC;
2429 		session->auth_alg = RTE_CRYPTO_AUTH_AES_XCBC_MAC;
2430 		break;
2431 	case RTE_CRYPTO_AUTH_AES_CMAC:
2432 		authdata.algtype = OP_ALG_ALGSEL_AES;
2433 		authdata.algmode = OP_ALG_AAI_CMAC;
2434 		session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
2435 		break;
2436 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2437 	case RTE_CRYPTO_AUTH_AES_GMAC:
2438 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2439 	case RTE_CRYPTO_AUTH_NULL:
2440 	case RTE_CRYPTO_AUTH_SHA1:
2441 	case RTE_CRYPTO_AUTH_SHA256:
2442 	case RTE_CRYPTO_AUTH_SHA512:
2443 	case RTE_CRYPTO_AUTH_SHA224:
2444 	case RTE_CRYPTO_AUTH_SHA384:
2445 	case RTE_CRYPTO_AUTH_MD5:
2446 	case RTE_CRYPTO_AUTH_KASUMI_F9:
2447 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
2448 		DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2449 			      auth_xform->algo);
2450 		ret = -ENOTSUP;
2451 		goto error_out;
2452 	default:
2453 		DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2454 			      auth_xform->algo);
2455 		ret = -ENOTSUP;
2456 		goto error_out;
2457 	}
2458 	cipherdata.key = (size_t)session->cipher_key.data;
2459 	cipherdata.keylen = session->cipher_key.length;
2460 	cipherdata.key_enc_flags = 0;
2461 	cipherdata.key_type = RTA_DATA_IMM;
2462 
2463 	switch (cipher_xform->algo) {
2464 	case RTE_CRYPTO_CIPHER_AES_CBC:
2465 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
2466 		cipherdata.algmode = OP_ALG_AAI_CBC;
2467 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2468 		break;
2469 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2470 		cipherdata.algtype = OP_ALG_ALGSEL_3DES;
2471 		cipherdata.algmode = OP_ALG_AAI_CBC;
2472 		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2473 		break;
2474 	case RTE_CRYPTO_CIPHER_DES_CBC:
2475 		cipherdata.algtype = OP_ALG_ALGSEL_DES;
2476 		cipherdata.algmode = OP_ALG_AAI_CBC;
2477 		session->cipher_alg = RTE_CRYPTO_CIPHER_DES_CBC;
2478 		break;
2479 	case RTE_CRYPTO_CIPHER_AES_CTR:
2480 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
2481 		cipherdata.algmode = OP_ALG_AAI_CTR;
2482 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2483 		break;
2484 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2485 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2486 	case RTE_CRYPTO_CIPHER_NULL:
2487 	case RTE_CRYPTO_CIPHER_3DES_ECB:
2488 	case RTE_CRYPTO_CIPHER_3DES_CTR:
2489 	case RTE_CRYPTO_CIPHER_AES_ECB:
2490 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
2491 		DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2492 			      cipher_xform->algo);
2493 		ret = -ENOTSUP;
2494 		goto error_out;
2495 	default:
2496 		DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2497 			      cipher_xform->algo);
2498 		ret = -ENOTSUP;
2499 		goto error_out;
2500 	}
2501 	session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2502 				DIR_ENC : DIR_DEC;
2503 
2504 	priv->flc_desc[0].desc[0] = cipherdata.keylen;
2505 	priv->flc_desc[0].desc[1] = authdata.keylen;
2506 	err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2507 			       DESC_JOB_IO_LEN,
2508 			       (unsigned int *)priv->flc_desc[0].desc,
2509 			       &priv->flc_desc[0].desc[2], 2);
2510 
2511 	if (err < 0) {
2512 		DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2513 		ret = -EINVAL;
2514 		goto error_out;
2515 	}
2516 	if (priv->flc_desc[0].desc[2] & 1) {
2517 		cipherdata.key_type = RTA_DATA_IMM;
2518 	} else {
2519 		cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
2520 		cipherdata.key_type = RTA_DATA_PTR;
2521 	}
2522 	if (priv->flc_desc[0].desc[2] & (1 << 1)) {
2523 		authdata.key_type = RTA_DATA_IMM;
2524 	} else {
2525 		authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
2526 		authdata.key_type = RTA_DATA_PTR;
2527 	}
2528 	priv->flc_desc[0].desc[0] = 0;
2529 	priv->flc_desc[0].desc[1] = 0;
2530 	priv->flc_desc[0].desc[2] = 0;
2531 
2532 	if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
2533 		bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
2534 					      0, SHR_SERIAL,
2535 					      &cipherdata, &authdata,
2536 					      session->iv.length,
2537 					      session->digest_length,
2538 					      session->dir);
2539 		if (bufsize < 0) {
2540 			DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2541 			ret = -EINVAL;
2542 			goto error_out;
2543 		}
2544 	} else {
2545 		DPAA2_SEC_ERR("Hash before cipher not supported");
2546 		ret = -ENOTSUP;
2547 		goto error_out;
2548 	}
2549 
2550 	flc->word1_sdl = (uint8_t)bufsize;
2551 	session->ctxt = priv;
2552 #ifdef CAAM_DESC_DEBUG
2553 	int i;
2554 	for (i = 0; i < bufsize; i++)
2555 		DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2556 			    i, priv->flc_desc[0].desc[i]);
2557 #endif
2558 
2559 	return ret;
2560 
2561 error_out:
2562 	rte_free(session->cipher_key.data);
2563 	rte_free(session->auth_key.data);
2564 	rte_free(priv);
2565 	return ret;
2566 }
2567 
2568 static int
2569 dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
2570 			    struct rte_crypto_sym_xform *xform,	void *sess)
2571 {
2572 	dpaa2_sec_session *session = sess;
2573 	int ret;
2574 
2575 	PMD_INIT_FUNC_TRACE();
2576 
2577 	if (unlikely(sess == NULL)) {
2578 		DPAA2_SEC_ERR("Invalid session struct");
2579 		return -EINVAL;
2580 	}
2581 
2582 	memset(session, 0, sizeof(dpaa2_sec_session));
2583 	/* Default IV length = 0 */
2584 	session->iv.length = 0;
2585 
2586 	/* Cipher Only */
2587 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2588 		ret = dpaa2_sec_cipher_init(dev, xform, session);
2589 
2590 	/* Authentication Only */
2591 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2592 		   xform->next == NULL) {
2593 		ret = dpaa2_sec_auth_init(dev, xform, session);
2594 
2595 	/* Cipher then Authenticate */
2596 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2597 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2598 		session->ext_params.aead_ctxt.auth_cipher_text = true;
2599 		if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2600 			ret = dpaa2_sec_auth_init(dev, xform, session);
2601 		else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
2602 			ret = dpaa2_sec_cipher_init(dev, xform, session);
2603 		else
2604 			ret = dpaa2_sec_aead_chain_init(dev, xform, session);
2605 	/* Authenticate then Cipher */
2606 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2607 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2608 		session->ext_params.aead_ctxt.auth_cipher_text = false;
2609 		if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
2610 			ret = dpaa2_sec_cipher_init(dev, xform, session);
2611 		else if (xform->next->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2612 			ret = dpaa2_sec_auth_init(dev, xform, session);
2613 		else
2614 			ret = dpaa2_sec_aead_chain_init(dev, xform, session);
2615 	/* AEAD operation for AES-GCM kind of Algorithms */
2616 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2617 		   xform->next == NULL) {
2618 		ret = dpaa2_sec_aead_init(dev, xform, session);
2619 
2620 	} else {
2621 		DPAA2_SEC_ERR("Invalid crypto type");
2622 		return -EINVAL;
2623 	}
2624 
2625 	return ret;
2626 }
2627 
2628 #ifdef RTE_LIB_SECURITY
2629 static int
2630 dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2631 			dpaa2_sec_session *session,
2632 			struct alginfo *aeaddata)
2633 {
2634 	PMD_INIT_FUNC_TRACE();
2635 
2636 	session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2637 					       RTE_CACHE_LINE_SIZE);
2638 	if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2639 		DPAA2_SEC_ERR("No Memory for aead key");
2640 		return -ENOMEM;
2641 	}
2642 	memcpy(session->aead_key.data, aead_xform->key.data,
2643 	       aead_xform->key.length);
2644 
2645 	session->digest_length = aead_xform->digest_length;
2646 	session->aead_key.length = aead_xform->key.length;
2647 
2648 	aeaddata->key = (size_t)session->aead_key.data;
2649 	aeaddata->keylen = session->aead_key.length;
2650 	aeaddata->key_enc_flags = 0;
2651 	aeaddata->key_type = RTA_DATA_IMM;
2652 
2653 	switch (aead_xform->algo) {
2654 	case RTE_CRYPTO_AEAD_AES_GCM:
2655 		switch (session->digest_length) {
2656 		case 8:
2657 			aeaddata->algtype = OP_PCL_IPSEC_AES_GCM8;
2658 			break;
2659 		case 12:
2660 			aeaddata->algtype = OP_PCL_IPSEC_AES_GCM12;
2661 			break;
2662 		case 16:
2663 			aeaddata->algtype = OP_PCL_IPSEC_AES_GCM16;
2664 			break;
2665 		default:
2666 			DPAA2_SEC_ERR("Crypto: Undefined GCM digest %d",
2667 				      session->digest_length);
2668 			return -EINVAL;
2669 		}
2670 		aeaddata->algmode = OP_ALG_AAI_GCM;
2671 		session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2672 		break;
2673 	case RTE_CRYPTO_AEAD_AES_CCM:
2674 		switch (session->digest_length) {
2675 		case 8:
2676 			aeaddata->algtype = OP_PCL_IPSEC_AES_CCM8;
2677 			break;
2678 		case 12:
2679 			aeaddata->algtype = OP_PCL_IPSEC_AES_CCM12;
2680 			break;
2681 		case 16:
2682 			aeaddata->algtype = OP_PCL_IPSEC_AES_CCM16;
2683 			break;
2684 		default:
2685 			DPAA2_SEC_ERR("Crypto: Undefined CCM digest %d",
2686 				      session->digest_length);
2687 			return -EINVAL;
2688 		}
2689 		aeaddata->algmode = OP_ALG_AAI_CCM;
2690 		session->aead_alg = RTE_CRYPTO_AEAD_AES_CCM;
2691 		break;
2692 	default:
2693 		DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
2694 			      aead_xform->algo);
2695 		return -ENOTSUP;
2696 	}
2697 	session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2698 				DIR_ENC : DIR_DEC;
2699 
2700 	return 0;
2701 }
2702 
2703 static int
2704 dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2705 	struct rte_crypto_auth_xform *auth_xform,
2706 	dpaa2_sec_session *session,
2707 	struct alginfo *cipherdata,
2708 	struct alginfo *authdata)
2709 {
2710 	if (cipher_xform) {
2711 		session->cipher_key.data = rte_zmalloc(NULL,
2712 						       cipher_xform->key.length,
2713 						       RTE_CACHE_LINE_SIZE);
2714 		if (session->cipher_key.data == NULL &&
2715 				cipher_xform->key.length > 0) {
2716 			DPAA2_SEC_ERR("No Memory for cipher key");
2717 			return -ENOMEM;
2718 		}
2719 
2720 		session->cipher_key.length = cipher_xform->key.length;
2721 		memcpy(session->cipher_key.data, cipher_xform->key.data,
2722 				cipher_xform->key.length);
2723 		session->cipher_alg = cipher_xform->algo;
2724 	} else {
2725 		session->cipher_key.data = NULL;
2726 		session->cipher_key.length = 0;
2727 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2728 	}
2729 
2730 	if (auth_xform) {
2731 		session->auth_key.data = rte_zmalloc(NULL,
2732 						auth_xform->key.length,
2733 						RTE_CACHE_LINE_SIZE);
2734 		if (session->auth_key.data == NULL &&
2735 				auth_xform->key.length > 0) {
2736 			DPAA2_SEC_ERR("No Memory for auth key");
2737 			return -ENOMEM;
2738 		}
2739 		session->auth_key.length = auth_xform->key.length;
2740 		memcpy(session->auth_key.data, auth_xform->key.data,
2741 				auth_xform->key.length);
2742 		session->auth_alg = auth_xform->algo;
2743 		session->digest_length = auth_xform->digest_length;
2744 	} else {
2745 		session->auth_key.data = NULL;
2746 		session->auth_key.length = 0;
2747 		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2748 	}
2749 
2750 	authdata->key = (size_t)session->auth_key.data;
2751 	authdata->keylen = session->auth_key.length;
2752 	authdata->key_enc_flags = 0;
2753 	authdata->key_type = RTA_DATA_IMM;
2754 	switch (session->auth_alg) {
2755 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
2756 		authdata->algtype = OP_PCL_IPSEC_HMAC_SHA1_96;
2757 		authdata->algmode = OP_ALG_AAI_HMAC;
2758 		break;
2759 	case RTE_CRYPTO_AUTH_MD5_HMAC:
2760 		authdata->algtype = OP_PCL_IPSEC_HMAC_MD5_96;
2761 		authdata->algmode = OP_ALG_AAI_HMAC;
2762 		break;
2763 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2764 		authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2765 		authdata->algmode = OP_ALG_AAI_HMAC;
2766 		if (session->digest_length != 16)
2767 			DPAA2_SEC_WARN(
2768 			"+++Using sha256-hmac truncated len is non-standard,"
2769 			"it will not work with lookaside proto");
2770 		break;
2771 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2772 		authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2773 		authdata->algmode = OP_ALG_AAI_HMAC;
2774 		break;
2775 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2776 		authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2777 		authdata->algmode = OP_ALG_AAI_HMAC;
2778 		break;
2779 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2780 		authdata->algtype = OP_PCL_IPSEC_AES_XCBC_MAC_96;
2781 		authdata->algmode = OP_ALG_AAI_XCBC_MAC;
2782 		break;
2783 	case RTE_CRYPTO_AUTH_AES_CMAC:
2784 		authdata->algtype = OP_PCL_IPSEC_AES_CMAC_96;
2785 		authdata->algmode = OP_ALG_AAI_CMAC;
2786 		break;
2787 	case RTE_CRYPTO_AUTH_NULL:
2788 		authdata->algtype = OP_PCL_IPSEC_HMAC_NULL;
2789 		break;
2790 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2791 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2792 	case RTE_CRYPTO_AUTH_SHA1:
2793 	case RTE_CRYPTO_AUTH_SHA256:
2794 	case RTE_CRYPTO_AUTH_SHA512:
2795 	case RTE_CRYPTO_AUTH_SHA224:
2796 	case RTE_CRYPTO_AUTH_SHA384:
2797 	case RTE_CRYPTO_AUTH_MD5:
2798 	case RTE_CRYPTO_AUTH_AES_GMAC:
2799 	case RTE_CRYPTO_AUTH_KASUMI_F9:
2800 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2801 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
2802 		DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2803 			      session->auth_alg);
2804 		return -ENOTSUP;
2805 	default:
2806 		DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2807 			      session->auth_alg);
2808 		return -ENOTSUP;
2809 	}
2810 	cipherdata->key = (size_t)session->cipher_key.data;
2811 	cipherdata->keylen = session->cipher_key.length;
2812 	cipherdata->key_enc_flags = 0;
2813 	cipherdata->key_type = RTA_DATA_IMM;
2814 
2815 	switch (session->cipher_alg) {
2816 	case RTE_CRYPTO_CIPHER_AES_CBC:
2817 		cipherdata->algtype = OP_PCL_IPSEC_AES_CBC;
2818 		cipherdata->algmode = OP_ALG_AAI_CBC;
2819 		break;
2820 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2821 		cipherdata->algtype = OP_PCL_IPSEC_3DES;
2822 		cipherdata->algmode = OP_ALG_AAI_CBC;
2823 		break;
2824 	case RTE_CRYPTO_CIPHER_DES_CBC:
2825 		cipherdata->algtype = OP_PCL_IPSEC_DES;
2826 		cipherdata->algmode = OP_ALG_AAI_CBC;
2827 		break;
2828 	case RTE_CRYPTO_CIPHER_AES_CTR:
2829 		cipherdata->algtype = OP_PCL_IPSEC_AES_CTR;
2830 		cipherdata->algmode = OP_ALG_AAI_CTR;
2831 		break;
2832 	case RTE_CRYPTO_CIPHER_NULL:
2833 		cipherdata->algtype = OP_PCL_IPSEC_NULL;
2834 		break;
2835 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2836 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2837 	case RTE_CRYPTO_CIPHER_3DES_ECB:
2838 	case RTE_CRYPTO_CIPHER_3DES_CTR:
2839 	case RTE_CRYPTO_CIPHER_AES_ECB:
2840 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
2841 		DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2842 			      session->cipher_alg);
2843 		return -ENOTSUP;
2844 	default:
2845 		DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2846 			      session->cipher_alg);
2847 		return -ENOTSUP;
2848 	}
2849 
2850 	return 0;
2851 }
2852 
2853 static int
2854 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
2855 			    struct rte_security_session_conf *conf,
2856 			    void *sess)
2857 {
2858 	struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2859 	struct rte_crypto_cipher_xform *cipher_xform = NULL;
2860 	struct rte_crypto_auth_xform *auth_xform = NULL;
2861 	struct rte_crypto_aead_xform *aead_xform = NULL;
2862 	dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
2863 	struct ctxt_priv *priv;
2864 	struct alginfo authdata, cipherdata;
2865 	int bufsize;
2866 	struct sec_flow_context *flc;
2867 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2868 	int ret = -1;
2869 
2870 	PMD_INIT_FUNC_TRACE();
2871 
2872 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2873 				sizeof(struct ctxt_priv) +
2874 				sizeof(struct sec_flc_desc),
2875 				RTE_CACHE_LINE_SIZE);
2876 
2877 	if (priv == NULL) {
2878 		DPAA2_SEC_ERR("No memory for priv CTXT");
2879 		return -ENOMEM;
2880 	}
2881 
2882 	priv->fle_pool = dev_priv->fle_pool;
2883 	flc = &priv->flc_desc[0].flc;
2884 
2885 	memset(session, 0, sizeof(dpaa2_sec_session));
2886 
2887 	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2888 		cipher_xform = &conf->crypto_xform->cipher;
2889 		if (conf->crypto_xform->next)
2890 			auth_xform = &conf->crypto_xform->next->auth;
2891 		ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
2892 					session, &cipherdata, &authdata);
2893 	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2894 		auth_xform = &conf->crypto_xform->auth;
2895 		if (conf->crypto_xform->next)
2896 			cipher_xform = &conf->crypto_xform->next->cipher;
2897 		ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
2898 					session, &cipherdata, &authdata);
2899 	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2900 		aead_xform = &conf->crypto_xform->aead;
2901 		ret = dpaa2_sec_ipsec_aead_init(aead_xform,
2902 					session, &cipherdata);
2903 		authdata.keylen = 0;
2904 		authdata.algtype = 0;
2905 	} else {
2906 		DPAA2_SEC_ERR("XFORM not specified");
2907 		ret = -EINVAL;
2908 		goto out;
2909 	}
2910 	if (ret) {
2911 		DPAA2_SEC_ERR("Failed to process xform");
2912 		goto out;
2913 	}
2914 
2915 	session->ctxt_type = DPAA2_SEC_IPSEC;
2916 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2917 		uint8_t *hdr = NULL;
2918 		struct ip ip4_hdr;
2919 		struct rte_ipv6_hdr ip6_hdr;
2920 		struct ipsec_encap_pdb encap_pdb;
2921 
2922 		flc->dhr = SEC_FLC_DHR_OUTBOUND;
2923 		/* For Sec Proto only one descriptor is required. */
2924 		memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb));
2925 
2926 		/* copy algo specific data to PDB */
2927 		switch (cipherdata.algtype) {
2928 		case OP_PCL_IPSEC_AES_CTR:
2929 			encap_pdb.ctr.ctr_initial = 0x00000001;
2930 			encap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2931 			break;
2932 		case OP_PCL_IPSEC_AES_GCM8:
2933 		case OP_PCL_IPSEC_AES_GCM12:
2934 		case OP_PCL_IPSEC_AES_GCM16:
2935 			memcpy(encap_pdb.gcm.salt,
2936 				(uint8_t *)&(ipsec_xform->salt), 4);
2937 			break;
2938 		}
2939 
2940 		encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2941 			PDBOPTS_ESP_OIHI_PDB_INL |
2942 			PDBOPTS_ESP_IVSRC |
2943 			PDBHMO_ESP_ENCAP_DTTL |
2944 			PDBHMO_ESP_SNR;
2945 		if (ipsec_xform->options.esn)
2946 			encap_pdb.options |= PDBOPTS_ESP_ESN;
2947 		encap_pdb.spi = ipsec_xform->spi;
2948 		session->dir = DIR_ENC;
2949 		if (ipsec_xform->tunnel.type ==
2950 				RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2951 			encap_pdb.ip_hdr_len = sizeof(struct ip);
2952 			ip4_hdr.ip_v = IPVERSION;
2953 			ip4_hdr.ip_hl = 5;
2954 			ip4_hdr.ip_len = rte_cpu_to_be_16(sizeof(ip4_hdr));
2955 			ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2956 			ip4_hdr.ip_id = 0;
2957 			ip4_hdr.ip_off = 0;
2958 			ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2959 			ip4_hdr.ip_p = IPPROTO_ESP;
2960 			ip4_hdr.ip_sum = 0;
2961 			ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2962 			ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2963 			ip4_hdr.ip_sum = calc_chksum((uint16_t *)(void *)
2964 					&ip4_hdr, sizeof(struct ip));
2965 			hdr = (uint8_t *)&ip4_hdr;
2966 		} else if (ipsec_xform->tunnel.type ==
2967 				RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2968 			ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2969 				DPAA2_IPv6_DEFAULT_VTC_FLOW |
2970 				((ipsec_xform->tunnel.ipv6.dscp <<
2971 					RTE_IPV6_HDR_TC_SHIFT) &
2972 					RTE_IPV6_HDR_TC_MASK) |
2973 				((ipsec_xform->tunnel.ipv6.flabel <<
2974 					RTE_IPV6_HDR_FL_SHIFT) &
2975 					RTE_IPV6_HDR_FL_MASK));
2976 			/* Payload length will be updated by HW */
2977 			ip6_hdr.payload_len = 0;
2978 			ip6_hdr.hop_limits =
2979 					ipsec_xform->tunnel.ipv6.hlimit;
2980 			ip6_hdr.proto = (ipsec_xform->proto ==
2981 					RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2982 					IPPROTO_ESP : IPPROTO_AH;
2983 			memcpy(&ip6_hdr.src_addr,
2984 				&ipsec_xform->tunnel.ipv6.src_addr, 16);
2985 			memcpy(&ip6_hdr.dst_addr,
2986 				&ipsec_xform->tunnel.ipv6.dst_addr, 16);
2987 			encap_pdb.ip_hdr_len = sizeof(struct rte_ipv6_hdr);
2988 			hdr = (uint8_t *)&ip6_hdr;
2989 		}
2990 
2991 		bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc,
2992 				1, 0, (rta_sec_era >= RTA_SEC_ERA_10) ?
2993 				SHR_WAIT : SHR_SERIAL, &encap_pdb,
2994 				hdr, &cipherdata, &authdata);
2995 	} else if (ipsec_xform->direction ==
2996 			RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2997 		struct ipsec_decap_pdb decap_pdb;
2998 
2999 		flc->dhr = SEC_FLC_DHR_INBOUND;
3000 		memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
3001 		/* copy algo specific data to PDB */
3002 		switch (cipherdata.algtype) {
3003 		case OP_PCL_IPSEC_AES_CTR:
3004 			decap_pdb.ctr.ctr_initial = 0x00000001;
3005 			decap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
3006 			break;
3007 		case OP_PCL_IPSEC_AES_GCM8:
3008 		case OP_PCL_IPSEC_AES_GCM12:
3009 		case OP_PCL_IPSEC_AES_GCM16:
3010 			memcpy(decap_pdb.gcm.salt,
3011 				(uint8_t *)&(ipsec_xform->salt), 4);
3012 			break;
3013 		}
3014 
3015 		decap_pdb.options = (ipsec_xform->tunnel.type ==
3016 				RTE_SECURITY_IPSEC_TUNNEL_IPV4) ?
3017 				sizeof(struct ip) << 16 :
3018 				sizeof(struct rte_ipv6_hdr) << 16;
3019 		if (ipsec_xform->options.esn)
3020 			decap_pdb.options |= PDBOPTS_ESP_ESN;
3021 
3022 		if (ipsec_xform->replay_win_sz) {
3023 			uint32_t win_sz;
3024 			win_sz = rte_align32pow2(ipsec_xform->replay_win_sz);
3025 
3026 			if (rta_sec_era < RTA_SEC_ERA_10 && win_sz > 128) {
3027 				DPAA2_SEC_INFO("Max Anti replay Win sz = 128");
3028 				win_sz = 128;
3029 			}
3030 			switch (win_sz) {
3031 			case 1:
3032 			case 2:
3033 			case 4:
3034 			case 8:
3035 			case 16:
3036 			case 32:
3037 				decap_pdb.options |= PDBOPTS_ESP_ARS32;
3038 				break;
3039 			case 64:
3040 				decap_pdb.options |= PDBOPTS_ESP_ARS64;
3041 				break;
3042 			case 256:
3043 				decap_pdb.options |= PDBOPTS_ESP_ARS256;
3044 				break;
3045 			case 512:
3046 				decap_pdb.options |= PDBOPTS_ESP_ARS512;
3047 				break;
3048 			case 1024:
3049 				decap_pdb.options |= PDBOPTS_ESP_ARS1024;
3050 				break;
3051 			case 128:
3052 			default:
3053 				decap_pdb.options |= PDBOPTS_ESP_ARS128;
3054 			}
3055 		}
3056 		session->dir = DIR_DEC;
3057 		bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc,
3058 				1, 0, (rta_sec_era >= RTA_SEC_ERA_10) ?
3059 				SHR_WAIT : SHR_SERIAL,
3060 				&decap_pdb, &cipherdata, &authdata);
3061 	} else
3062 		goto out;
3063 
3064 	if (bufsize < 0) {
3065 		DPAA2_SEC_ERR("Crypto: Invalid buffer length");
3066 		goto out;
3067 	}
3068 
3069 	flc->word1_sdl = (uint8_t)bufsize;
3070 
3071 	/* Enable the stashing control bit */
3072 	DPAA2_SET_FLC_RSC(flc);
3073 	flc->word2_rflc_31_0 = lower_32_bits(
3074 			(size_t)&(((struct dpaa2_sec_qp *)
3075 			dev->data->queue_pairs[0])->rx_vq) | 0x14);
3076 	flc->word3_rflc_63_32 = upper_32_bits(
3077 			(size_t)&(((struct dpaa2_sec_qp *)
3078 			dev->data->queue_pairs[0])->rx_vq));
3079 
3080 	/* Set EWS bit i.e. enable write-safe */
3081 	DPAA2_SET_FLC_EWS(flc);
3082 	/* Set BS = 1 i.e reuse input buffers as output buffers */
3083 	DPAA2_SET_FLC_REUSE_BS(flc);
3084 	/* Set FF = 10; reuse input buffers if they provide sufficient space */
3085 	DPAA2_SET_FLC_REUSE_FF(flc);
3086 
3087 	session->ctxt = priv;
3088 
3089 	return 0;
3090 out:
3091 	rte_free(session->auth_key.data);
3092 	rte_free(session->cipher_key.data);
3093 	rte_free(priv);
3094 	return ret;
3095 }
3096 
3097 static int
3098 dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
3099 			   struct rte_security_session_conf *conf,
3100 			   void *sess)
3101 {
3102 	struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
3103 	struct rte_crypto_sym_xform *xform = conf->crypto_xform;
3104 	struct rte_crypto_auth_xform *auth_xform = NULL;
3105 	struct rte_crypto_cipher_xform *cipher_xform;
3106 	dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
3107 	struct ctxt_priv *priv;
3108 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
3109 	struct alginfo authdata, cipherdata;
3110 	struct alginfo *p_authdata = NULL;
3111 	int bufsize = -1;
3112 	struct sec_flow_context *flc;
3113 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
3114 	int swap = true;
3115 #else
3116 	int swap = false;
3117 #endif
3118 
3119 	PMD_INIT_FUNC_TRACE();
3120 
3121 	memset(session, 0, sizeof(dpaa2_sec_session));
3122 
3123 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
3124 				sizeof(struct ctxt_priv) +
3125 				sizeof(struct sec_flc_desc),
3126 				RTE_CACHE_LINE_SIZE);
3127 
3128 	if (priv == NULL) {
3129 		DPAA2_SEC_ERR("No memory for priv CTXT");
3130 		return -ENOMEM;
3131 	}
3132 
3133 	priv->fle_pool = dev_priv->fle_pool;
3134 	flc = &priv->flc_desc[0].flc;
3135 
3136 	/* find xfrm types */
3137 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
3138 		cipher_xform = &xform->cipher;
3139 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
3140 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
3141 		session->ext_params.aead_ctxt.auth_cipher_text = true;
3142 		cipher_xform = &xform->cipher;
3143 		auth_xform = &xform->next->auth;
3144 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
3145 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
3146 		session->ext_params.aead_ctxt.auth_cipher_text = false;
3147 		cipher_xform = &xform->next->cipher;
3148 		auth_xform = &xform->auth;
3149 	} else {
3150 		DPAA2_SEC_ERR("Invalid crypto type");
3151 		return -EINVAL;
3152 	}
3153 
3154 	session->ctxt_type = DPAA2_SEC_PDCP;
3155 	if (cipher_xform) {
3156 		session->cipher_key.data = rte_zmalloc(NULL,
3157 					       cipher_xform->key.length,
3158 					       RTE_CACHE_LINE_SIZE);
3159 		if (session->cipher_key.data == NULL &&
3160 				cipher_xform->key.length > 0) {
3161 			DPAA2_SEC_ERR("No Memory for cipher key");
3162 			rte_free(priv);
3163 			return -ENOMEM;
3164 		}
3165 		session->cipher_key.length = cipher_xform->key.length;
3166 		memcpy(session->cipher_key.data, cipher_xform->key.data,
3167 			cipher_xform->key.length);
3168 		session->dir =
3169 			(cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
3170 					DIR_ENC : DIR_DEC;
3171 		session->cipher_alg = cipher_xform->algo;
3172 	} else {
3173 		session->cipher_key.data = NULL;
3174 		session->cipher_key.length = 0;
3175 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
3176 		session->dir = DIR_ENC;
3177 	}
3178 
3179 	session->pdcp.domain = pdcp_xform->domain;
3180 	session->pdcp.bearer = pdcp_xform->bearer;
3181 	session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
3182 	session->pdcp.sn_size = pdcp_xform->sn_size;
3183 	session->pdcp.hfn = pdcp_xform->hfn;
3184 	session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
3185 	session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
3186 	/* hfv ovd offset location is stored in iv.offset value*/
3187 	session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
3188 
3189 	cipherdata.key = (size_t)session->cipher_key.data;
3190 	cipherdata.keylen = session->cipher_key.length;
3191 	cipherdata.key_enc_flags = 0;
3192 	cipherdata.key_type = RTA_DATA_IMM;
3193 
3194 	switch (session->cipher_alg) {
3195 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
3196 		cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW;
3197 		break;
3198 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
3199 		cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC;
3200 		break;
3201 	case RTE_CRYPTO_CIPHER_AES_CTR:
3202 		cipherdata.algtype = PDCP_CIPHER_TYPE_AES;
3203 		break;
3204 	case RTE_CRYPTO_CIPHER_NULL:
3205 		cipherdata.algtype = PDCP_CIPHER_TYPE_NULL;
3206 		break;
3207 	default:
3208 		DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
3209 			      session->cipher_alg);
3210 		goto out;
3211 	}
3212 
3213 	if (auth_xform) {
3214 		session->auth_key.data = rte_zmalloc(NULL,
3215 						     auth_xform->key.length,
3216 						     RTE_CACHE_LINE_SIZE);
3217 		if (!session->auth_key.data &&
3218 		    auth_xform->key.length > 0) {
3219 			DPAA2_SEC_ERR("No Memory for auth key");
3220 			rte_free(session->cipher_key.data);
3221 			rte_free(priv);
3222 			return -ENOMEM;
3223 		}
3224 		session->auth_key.length = auth_xform->key.length;
3225 		memcpy(session->auth_key.data, auth_xform->key.data,
3226 		       auth_xform->key.length);
3227 		session->auth_alg = auth_xform->algo;
3228 	} else {
3229 		session->auth_key.data = NULL;
3230 		session->auth_key.length = 0;
3231 		session->auth_alg = 0;
3232 	}
3233 	authdata.key = (size_t)session->auth_key.data;
3234 	authdata.keylen = session->auth_key.length;
3235 	authdata.key_enc_flags = 0;
3236 	authdata.key_type = RTA_DATA_IMM;
3237 
3238 	if (session->auth_alg) {
3239 		switch (session->auth_alg) {
3240 		case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
3241 			authdata.algtype = PDCP_AUTH_TYPE_SNOW;
3242 			break;
3243 		case RTE_CRYPTO_AUTH_ZUC_EIA3:
3244 			authdata.algtype = PDCP_AUTH_TYPE_ZUC;
3245 			break;
3246 		case RTE_CRYPTO_AUTH_AES_CMAC:
3247 			authdata.algtype = PDCP_AUTH_TYPE_AES;
3248 			break;
3249 		case RTE_CRYPTO_AUTH_NULL:
3250 			authdata.algtype = PDCP_AUTH_TYPE_NULL;
3251 			break;
3252 		default:
3253 			DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
3254 				      session->auth_alg);
3255 			goto out;
3256 		}
3257 
3258 		p_authdata = &authdata;
3259 	} else if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3260 		DPAA2_SEC_ERR("Crypto: Integrity must for c-plane");
3261 		goto out;
3262 	}
3263 
3264 	if (rta_inline_pdcp_query(authdata.algtype,
3265 				cipherdata.algtype,
3266 				session->pdcp.sn_size,
3267 				session->pdcp.hfn_ovd)) {
3268 		cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
3269 		cipherdata.key_type = RTA_DATA_PTR;
3270 	}
3271 
3272 	if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3273 		if (session->dir == DIR_ENC)
3274 			bufsize = cnstr_shdsc_pdcp_c_plane_encap(
3275 					priv->flc_desc[0].desc, 1, swap,
3276 					pdcp_xform->hfn,
3277 					session->pdcp.sn_size,
3278 					pdcp_xform->bearer,
3279 					pdcp_xform->pkt_dir,
3280 					pdcp_xform->hfn_threshold,
3281 					&cipherdata, &authdata,
3282 					0);
3283 		else if (session->dir == DIR_DEC)
3284 			bufsize = cnstr_shdsc_pdcp_c_plane_decap(
3285 					priv->flc_desc[0].desc, 1, swap,
3286 					pdcp_xform->hfn,
3287 					session->pdcp.sn_size,
3288 					pdcp_xform->bearer,
3289 					pdcp_xform->pkt_dir,
3290 					pdcp_xform->hfn_threshold,
3291 					&cipherdata, &authdata,
3292 					0);
3293 	} else {
3294 		if (session->dir == DIR_ENC) {
3295 			if (pdcp_xform->sdap_enabled)
3296 				bufsize = cnstr_shdsc_pdcp_sdap_u_plane_encap(
3297 					priv->flc_desc[0].desc, 1, swap,
3298 					session->pdcp.sn_size,
3299 					pdcp_xform->hfn,
3300 					pdcp_xform->bearer,
3301 					pdcp_xform->pkt_dir,
3302 					pdcp_xform->hfn_threshold,
3303 					&cipherdata, p_authdata, 0);
3304 			else
3305 				bufsize = cnstr_shdsc_pdcp_u_plane_encap(
3306 					priv->flc_desc[0].desc, 1, swap,
3307 					session->pdcp.sn_size,
3308 					pdcp_xform->hfn,
3309 					pdcp_xform->bearer,
3310 					pdcp_xform->pkt_dir,
3311 					pdcp_xform->hfn_threshold,
3312 					&cipherdata, p_authdata, 0);
3313 		} else if (session->dir == DIR_DEC) {
3314 			if (pdcp_xform->sdap_enabled)
3315 				bufsize = cnstr_shdsc_pdcp_sdap_u_plane_decap(
3316 					priv->flc_desc[0].desc, 1, swap,
3317 					session->pdcp.sn_size,
3318 					pdcp_xform->hfn,
3319 					pdcp_xform->bearer,
3320 					pdcp_xform->pkt_dir,
3321 					pdcp_xform->hfn_threshold,
3322 					&cipherdata, p_authdata, 0);
3323 			else
3324 				bufsize = cnstr_shdsc_pdcp_u_plane_decap(
3325 					priv->flc_desc[0].desc, 1, swap,
3326 					session->pdcp.sn_size,
3327 					pdcp_xform->hfn,
3328 					pdcp_xform->bearer,
3329 					pdcp_xform->pkt_dir,
3330 					pdcp_xform->hfn_threshold,
3331 					&cipherdata, p_authdata, 0);
3332 		}
3333 	}
3334 
3335 	if (bufsize < 0) {
3336 		DPAA2_SEC_ERR("Crypto: Invalid buffer length");
3337 		goto out;
3338 	}
3339 
3340 	/* Enable the stashing control bit */
3341 	DPAA2_SET_FLC_RSC(flc);
3342 	flc->word2_rflc_31_0 = lower_32_bits(
3343 			(size_t)&(((struct dpaa2_sec_qp *)
3344 			dev->data->queue_pairs[0])->rx_vq) | 0x14);
3345 	flc->word3_rflc_63_32 = upper_32_bits(
3346 			(size_t)&(((struct dpaa2_sec_qp *)
3347 			dev->data->queue_pairs[0])->rx_vq));
3348 
3349 	flc->word1_sdl = (uint8_t)bufsize;
3350 
3351 	/* TODO - check the perf impact or
3352 	 * align as per descriptor type
3353 	 * Set EWS bit i.e. enable write-safe
3354 	 * DPAA2_SET_FLC_EWS(flc);
3355 	 */
3356 
3357 	/* Set BS = 1 i.e reuse input buffers as output buffers */
3358 	DPAA2_SET_FLC_REUSE_BS(flc);
3359 	/* Set FF = 10; reuse input buffers if they provide sufficient space */
3360 	DPAA2_SET_FLC_REUSE_FF(flc);
3361 
3362 	session->ctxt = priv;
3363 
3364 	return 0;
3365 out:
3366 	rte_free(session->auth_key.data);
3367 	rte_free(session->cipher_key.data);
3368 	rte_free(priv);
3369 	return -EINVAL;
3370 }
3371 
3372 static int
3373 dpaa2_sec_security_session_create(void *dev,
3374 				  struct rte_security_session_conf *conf,
3375 				  struct rte_security_session *sess,
3376 				  struct rte_mempool *mempool)
3377 {
3378 	void *sess_private_data;
3379 	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
3380 	int ret;
3381 
3382 	if (rte_mempool_get(mempool, &sess_private_data)) {
3383 		DPAA2_SEC_ERR("Couldn't get object from session mempool");
3384 		return -ENOMEM;
3385 	}
3386 
3387 	switch (conf->protocol) {
3388 	case RTE_SECURITY_PROTOCOL_IPSEC:
3389 		ret = dpaa2_sec_set_ipsec_session(cdev, conf,
3390 				sess_private_data);
3391 		break;
3392 	case RTE_SECURITY_PROTOCOL_MACSEC:
3393 		return -ENOTSUP;
3394 	case RTE_SECURITY_PROTOCOL_PDCP:
3395 		ret = dpaa2_sec_set_pdcp_session(cdev, conf,
3396 				sess_private_data);
3397 		break;
3398 	default:
3399 		return -EINVAL;
3400 	}
3401 	if (ret != 0) {
3402 		DPAA2_SEC_ERR("Failed to configure session parameters");
3403 		/* Return session to mempool */
3404 		rte_mempool_put(mempool, sess_private_data);
3405 		return ret;
3406 	}
3407 
3408 	set_sec_session_private_data(sess, sess_private_data);
3409 
3410 	return ret;
3411 }
3412 
3413 /** Clear the memory of session so it doesn't leave key material behind */
3414 static int
3415 dpaa2_sec_security_session_destroy(void *dev __rte_unused,
3416 		struct rte_security_session *sess)
3417 {
3418 	PMD_INIT_FUNC_TRACE();
3419 	void *sess_priv = get_sec_session_private_data(sess);
3420 
3421 	dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
3422 
3423 	if (sess_priv) {
3424 		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
3425 
3426 		rte_free(s->ctxt);
3427 		rte_free(s->cipher_key.data);
3428 		rte_free(s->auth_key.data);
3429 		memset(s, 0, sizeof(dpaa2_sec_session));
3430 		set_sec_session_private_data(sess, NULL);
3431 		rte_mempool_put(sess_mp, sess_priv);
3432 	}
3433 	return 0;
3434 }
3435 #endif
3436 static int
3437 dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev,
3438 		struct rte_crypto_sym_xform *xform,
3439 		struct rte_cryptodev_sym_session *sess,
3440 		struct rte_mempool *mempool)
3441 {
3442 	void *sess_private_data;
3443 	int ret;
3444 
3445 	if (rte_mempool_get(mempool, &sess_private_data)) {
3446 		DPAA2_SEC_ERR("Couldn't get object from session mempool");
3447 		return -ENOMEM;
3448 	}
3449 
3450 	ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data);
3451 	if (ret != 0) {
3452 		DPAA2_SEC_ERR("Failed to configure session parameters");
3453 		/* Return session to mempool */
3454 		rte_mempool_put(mempool, sess_private_data);
3455 		return ret;
3456 	}
3457 
3458 	set_sym_session_private_data(sess, dev->driver_id,
3459 		sess_private_data);
3460 
3461 	return 0;
3462 }
3463 
3464 /** Clear the memory of session so it doesn't leave key material behind */
3465 static void
3466 dpaa2_sec_sym_session_clear(struct rte_cryptodev *dev,
3467 		struct rte_cryptodev_sym_session *sess)
3468 {
3469 	PMD_INIT_FUNC_TRACE();
3470 	uint8_t index = dev->driver_id;
3471 	void *sess_priv = get_sym_session_private_data(sess, index);
3472 	dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
3473 
3474 	if (sess_priv) {
3475 		rte_free(s->ctxt);
3476 		rte_free(s->cipher_key.data);
3477 		rte_free(s->auth_key.data);
3478 		memset(s, 0, sizeof(dpaa2_sec_session));
3479 		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
3480 		set_sym_session_private_data(sess, index, NULL);
3481 		rte_mempool_put(sess_mp, sess_priv);
3482 	}
3483 }
3484 
3485 static int
3486 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
3487 			struct rte_cryptodev_config *config __rte_unused)
3488 {
3489 	PMD_INIT_FUNC_TRACE();
3490 
3491 	return 0;
3492 }
3493 
3494 static int
3495 dpaa2_sec_dev_start(struct rte_cryptodev *dev)
3496 {
3497 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3498 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3499 	struct dpseci_attr attr;
3500 	struct dpaa2_queue *dpaa2_q;
3501 	struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3502 					dev->data->queue_pairs;
3503 	struct dpseci_rx_queue_attr rx_attr;
3504 	struct dpseci_tx_queue_attr tx_attr;
3505 	int ret, i;
3506 
3507 	PMD_INIT_FUNC_TRACE();
3508 
3509 	memset(&attr, 0, sizeof(struct dpseci_attr));
3510 
3511 	ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
3512 	if (ret) {
3513 		DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED",
3514 			      priv->hw_id);
3515 		goto get_attr_failure;
3516 	}
3517 	ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr);
3518 	if (ret) {
3519 		DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC");
3520 		goto get_attr_failure;
3521 	}
3522 	for (i = 0; i < attr.num_rx_queues && qp[i]; i++) {
3523 		dpaa2_q = &qp[i]->rx_vq;
3524 		dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
3525 				    &rx_attr);
3526 		dpaa2_q->fqid = rx_attr.fqid;
3527 		DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid);
3528 	}
3529 	for (i = 0; i < attr.num_tx_queues && qp[i]; i++) {
3530 		dpaa2_q = &qp[i]->tx_vq;
3531 		dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
3532 				    &tx_attr);
3533 		dpaa2_q->fqid = tx_attr.fqid;
3534 		DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid);
3535 	}
3536 
3537 	return 0;
3538 get_attr_failure:
3539 	dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
3540 	return -1;
3541 }
3542 
3543 static void
3544 dpaa2_sec_dev_stop(struct rte_cryptodev *dev)
3545 {
3546 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3547 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3548 	int ret;
3549 
3550 	PMD_INIT_FUNC_TRACE();
3551 
3552 	ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
3553 	if (ret) {
3554 		DPAA2_SEC_ERR("Failure in disabling dpseci %d device",
3555 			     priv->hw_id);
3556 		return;
3557 	}
3558 
3559 	ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token);
3560 	if (ret < 0) {
3561 		DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret);
3562 		return;
3563 	}
3564 }
3565 
3566 static int
3567 dpaa2_sec_dev_close(struct rte_cryptodev *dev __rte_unused)
3568 {
3569 	PMD_INIT_FUNC_TRACE();
3570 
3571 	return 0;
3572 }
3573 
3574 static void
3575 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev,
3576 			struct rte_cryptodev_info *info)
3577 {
3578 	struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
3579 
3580 	PMD_INIT_FUNC_TRACE();
3581 	if (info != NULL) {
3582 		info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3583 		info->feature_flags = dev->feature_flags;
3584 		info->capabilities = dpaa2_sec_capabilities;
3585 		/* No limit of number of sessions */
3586 		info->sym.max_nb_sessions = 0;
3587 		info->driver_id = cryptodev_driver_id;
3588 	}
3589 }
3590 
3591 static
3592 void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
3593 			 struct rte_cryptodev_stats *stats)
3594 {
3595 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3596 	struct fsl_mc_io dpseci;
3597 	struct dpseci_sec_counters counters = {0};
3598 	struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3599 					dev->data->queue_pairs;
3600 	int ret, i;
3601 
3602 	PMD_INIT_FUNC_TRACE();
3603 	if (stats == NULL) {
3604 		DPAA2_SEC_ERR("Invalid stats ptr NULL");
3605 		return;
3606 	}
3607 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3608 		if (qp == NULL || qp[i] == NULL) {
3609 			DPAA2_SEC_DEBUG("Uninitialised queue pair");
3610 			continue;
3611 		}
3612 
3613 		stats->enqueued_count += qp[i]->tx_vq.tx_pkts;
3614 		stats->dequeued_count += qp[i]->rx_vq.rx_pkts;
3615 		stats->enqueue_err_count += qp[i]->tx_vq.err_pkts;
3616 		stats->dequeue_err_count += qp[i]->rx_vq.err_pkts;
3617 	}
3618 
3619 	/* In case as secondary process access stats, MCP portal in priv-hw
3620 	 * may have primary process address. Need the secondary process
3621 	 * based MCP portal address for this object.
3622 	 */
3623 	dpseci.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
3624 	ret = dpseci_get_sec_counters(&dpseci, CMD_PRI_LOW, priv->token,
3625 				      &counters);
3626 	if (ret) {
3627 		DPAA2_SEC_ERR("SEC counters failed");
3628 	} else {
3629 		DPAA2_SEC_INFO("dpseci hardware stats:"
3630 			    "\n\tNum of Requests Dequeued = %" PRIu64
3631 			    "\n\tNum of Outbound Encrypt Requests = %" PRIu64
3632 			    "\n\tNum of Inbound Decrypt Requests = %" PRIu64
3633 			    "\n\tNum of Outbound Bytes Encrypted = %" PRIu64
3634 			    "\n\tNum of Outbound Bytes Protected = %" PRIu64
3635 			    "\n\tNum of Inbound Bytes Decrypted = %" PRIu64
3636 			    "\n\tNum of Inbound Bytes Validated = %" PRIu64,
3637 			    counters.dequeued_requests,
3638 			    counters.ob_enc_requests,
3639 			    counters.ib_dec_requests,
3640 			    counters.ob_enc_bytes,
3641 			    counters.ob_prot_bytes,
3642 			    counters.ib_dec_bytes,
3643 			    counters.ib_valid_bytes);
3644 	}
3645 }
3646 
3647 static
3648 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev)
3649 {
3650 	int i;
3651 	struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3652 				   (dev->data->queue_pairs);
3653 
3654 	PMD_INIT_FUNC_TRACE();
3655 
3656 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3657 		if (qp[i] == NULL) {
3658 			DPAA2_SEC_DEBUG("Uninitialised queue pair");
3659 			continue;
3660 		}
3661 		qp[i]->tx_vq.rx_pkts = 0;
3662 		qp[i]->tx_vq.tx_pkts = 0;
3663 		qp[i]->tx_vq.err_pkts = 0;
3664 		qp[i]->rx_vq.rx_pkts = 0;
3665 		qp[i]->rx_vq.tx_pkts = 0;
3666 		qp[i]->rx_vq.err_pkts = 0;
3667 	}
3668 }
3669 
3670 static void __rte_hot
3671 dpaa2_sec_process_parallel_event(struct qbman_swp *swp,
3672 				 const struct qbman_fd *fd,
3673 				 const struct qbman_result *dq,
3674 				 struct dpaa2_queue *rxq,
3675 				 struct rte_event *ev)
3676 {
3677 	/* Prefetching mbuf */
3678 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
3679 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
3680 
3681 	/* Prefetching ipsec crypto_op stored in priv data of mbuf */
3682 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
3683 
3684 	ev->flow_id = rxq->ev.flow_id;
3685 	ev->sub_event_type = rxq->ev.sub_event_type;
3686 	ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3687 	ev->op = RTE_EVENT_OP_NEW;
3688 	ev->sched_type = rxq->ev.sched_type;
3689 	ev->queue_id = rxq->ev.queue_id;
3690 	ev->priority = rxq->ev.priority;
3691 	ev->event_ptr = sec_fd_to_mbuf(fd);
3692 
3693 	qbman_swp_dqrr_consume(swp, dq);
3694 }
3695 static void
3696 dpaa2_sec_process_atomic_event(struct qbman_swp *swp __rte_unused,
3697 				 const struct qbman_fd *fd,
3698 				 const struct qbman_result *dq,
3699 				 struct dpaa2_queue *rxq,
3700 				 struct rte_event *ev)
3701 {
3702 	uint8_t dqrr_index;
3703 	struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr;
3704 	/* Prefetching mbuf */
3705 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
3706 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
3707 
3708 	/* Prefetching ipsec crypto_op stored in priv data of mbuf */
3709 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
3710 
3711 	ev->flow_id = rxq->ev.flow_id;
3712 	ev->sub_event_type = rxq->ev.sub_event_type;
3713 	ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3714 	ev->op = RTE_EVENT_OP_NEW;
3715 	ev->sched_type = rxq->ev.sched_type;
3716 	ev->queue_id = rxq->ev.queue_id;
3717 	ev->priority = rxq->ev.priority;
3718 
3719 	ev->event_ptr = sec_fd_to_mbuf(fd);
3720 	dqrr_index = qbman_get_dqrr_idx(dq);
3721 	*dpaa2_seqn(crypto_op->sym->m_src) = dqrr_index + 1;
3722 	DPAA2_PER_LCORE_DQRR_SIZE++;
3723 	DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
3724 	DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src;
3725 }
3726 
3727 int
3728 dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
3729 		int qp_id,
3730 		struct dpaa2_dpcon_dev *dpcon,
3731 		const struct rte_event *event)
3732 {
3733 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3734 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3735 	struct dpaa2_sec_qp *qp = dev->data->queue_pairs[qp_id];
3736 	struct dpseci_rx_queue_cfg cfg;
3737 	uint8_t priority;
3738 	int ret;
3739 
3740 	if (event->sched_type == RTE_SCHED_TYPE_PARALLEL)
3741 		qp->rx_vq.cb = dpaa2_sec_process_parallel_event;
3742 	else if (event->sched_type == RTE_SCHED_TYPE_ATOMIC)
3743 		qp->rx_vq.cb = dpaa2_sec_process_atomic_event;
3744 	else
3745 		return -EINVAL;
3746 
3747 	priority = (RTE_EVENT_DEV_PRIORITY_LOWEST / event->priority) *
3748 		   (dpcon->num_priorities - 1);
3749 
3750 	memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
3751 	cfg.options = DPSECI_QUEUE_OPT_DEST;
3752 	cfg.dest_cfg.dest_type = DPSECI_DEST_DPCON;
3753 	cfg.dest_cfg.dest_id = dpcon->dpcon_id;
3754 	cfg.dest_cfg.priority = priority;
3755 
3756 	cfg.options |= DPSECI_QUEUE_OPT_USER_CTX;
3757 	cfg.user_ctx = (size_t)(qp);
3758 	if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) {
3759 		cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION;
3760 		cfg.order_preservation_en = 1;
3761 	}
3762 	ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
3763 				  qp_id, &cfg);
3764 	if (ret) {
3765 		RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
3766 		return ret;
3767 	}
3768 
3769 	memcpy(&qp->rx_vq.ev, event, sizeof(struct rte_event));
3770 
3771 	return 0;
3772 }
3773 
3774 int
3775 dpaa2_sec_eventq_detach(const struct rte_cryptodev *dev,
3776 			int qp_id)
3777 {
3778 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3779 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3780 	struct dpseci_rx_queue_cfg cfg;
3781 	int ret;
3782 
3783 	memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
3784 	cfg.options = DPSECI_QUEUE_OPT_DEST;
3785 	cfg.dest_cfg.dest_type = DPSECI_DEST_NONE;
3786 
3787 	ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
3788 				  qp_id, &cfg);
3789 	if (ret)
3790 		RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
3791 
3792 	return ret;
3793 }
3794 
3795 static struct rte_cryptodev_ops crypto_ops = {
3796 	.dev_configure	      = dpaa2_sec_dev_configure,
3797 	.dev_start	      = dpaa2_sec_dev_start,
3798 	.dev_stop	      = dpaa2_sec_dev_stop,
3799 	.dev_close	      = dpaa2_sec_dev_close,
3800 	.dev_infos_get        = dpaa2_sec_dev_infos_get,
3801 	.stats_get	      = dpaa2_sec_stats_get,
3802 	.stats_reset	      = dpaa2_sec_stats_reset,
3803 	.queue_pair_setup     = dpaa2_sec_queue_pair_setup,
3804 	.queue_pair_release   = dpaa2_sec_queue_pair_release,
3805 	.sym_session_get_size     = dpaa2_sec_sym_session_get_size,
3806 	.sym_session_configure    = dpaa2_sec_sym_session_configure,
3807 	.sym_session_clear        = dpaa2_sec_sym_session_clear,
3808 };
3809 
3810 #ifdef RTE_LIB_SECURITY
3811 static const struct rte_security_capability *
3812 dpaa2_sec_capabilities_get(void *device __rte_unused)
3813 {
3814 	return dpaa2_sec_security_cap;
3815 }
3816 
3817 static const struct rte_security_ops dpaa2_sec_security_ops = {
3818 	.session_create = dpaa2_sec_security_session_create,
3819 	.session_update = NULL,
3820 	.session_stats_get = NULL,
3821 	.session_destroy = dpaa2_sec_security_session_destroy,
3822 	.set_pkt_metadata = NULL,
3823 	.capabilities_get = dpaa2_sec_capabilities_get
3824 };
3825 #endif
3826 
3827 static int
3828 dpaa2_sec_uninit(const struct rte_cryptodev *dev)
3829 {
3830 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3831 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3832 	int ret;
3833 
3834 	PMD_INIT_FUNC_TRACE();
3835 
3836 	/* Function is reverse of dpaa2_sec_dev_init.
3837 	 * It does the following:
3838 	 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id
3839 	 * 2. Close the DPSECI device
3840 	 * 3. Free the allocated resources.
3841 	 */
3842 
3843 	/*Close the device at underlying layer*/
3844 	ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
3845 	if (ret) {
3846 		DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret);
3847 		return -1;
3848 	}
3849 
3850 	/*Free the allocated memory for ethernet private data and dpseci*/
3851 	priv->hw = NULL;
3852 	rte_free(dpseci);
3853 	rte_free(dev->security_ctx);
3854 	rte_mempool_free(priv->fle_pool);
3855 
3856 	DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u",
3857 		       dev->data->name, rte_socket_id());
3858 
3859 	return 0;
3860 }
3861 
3862 static int
3863 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
3864 {
3865 	struct dpaa2_sec_dev_private *internals;
3866 	struct rte_device *dev = cryptodev->device;
3867 	struct rte_dpaa2_device *dpaa2_dev;
3868 #ifdef RTE_LIB_SECURITY
3869 	struct rte_security_ctx *security_instance;
3870 #endif
3871 	struct fsl_mc_io *dpseci;
3872 	uint16_t token;
3873 	struct dpseci_attr attr;
3874 	int retcode, hw_id;
3875 	char str[30];
3876 
3877 	PMD_INIT_FUNC_TRACE();
3878 	dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
3879 	hw_id = dpaa2_dev->object_id;
3880 
3881 	cryptodev->driver_id = cryptodev_driver_id;
3882 	cryptodev->dev_ops = &crypto_ops;
3883 
3884 	cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst;
3885 	cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst;
3886 	cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3887 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
3888 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3889 			RTE_CRYPTODEV_FF_SECURITY |
3890 			RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3891 			RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3892 			RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3893 			RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3894 			RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3895 
3896 	internals = cryptodev->data->dev_private;
3897 
3898 	/*
3899 	 * For secondary processes, we don't initialise any further as primary
3900 	 * has already done this work. Only check we don't need a different
3901 	 * RX function
3902 	 */
3903 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3904 		DPAA2_SEC_DEBUG("Device already init by primary process");
3905 		return 0;
3906 	}
3907 #ifdef RTE_LIB_SECURITY
3908 	/* Initialize security_ctx only for primary process*/
3909 	security_instance = rte_malloc("rte_security_instances_ops",
3910 				sizeof(struct rte_security_ctx), 0);
3911 	if (security_instance == NULL)
3912 		return -ENOMEM;
3913 	security_instance->device = (void *)cryptodev;
3914 	security_instance->ops = &dpaa2_sec_security_ops;
3915 	security_instance->sess_cnt = 0;
3916 	cryptodev->security_ctx = security_instance;
3917 #endif
3918 	/*Open the rte device via MC and save the handle for further use*/
3919 	dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1,
3920 				sizeof(struct fsl_mc_io), 0);
3921 	if (!dpseci) {
3922 		DPAA2_SEC_ERR(
3923 			"Error in allocating the memory for dpsec object");
3924 		return -ENOMEM;
3925 	}
3926 	dpseci->regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
3927 
3928 	retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token);
3929 	if (retcode != 0) {
3930 		DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x",
3931 			      retcode);
3932 		goto init_error;
3933 	}
3934 	retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr);
3935 	if (retcode != 0) {
3936 		DPAA2_SEC_ERR(
3937 			     "Cannot get dpsec device attributed: Error = %x",
3938 			     retcode);
3939 		goto init_error;
3940 	}
3941 	snprintf(cryptodev->data->name, sizeof(cryptodev->data->name),
3942 			"dpsec-%u", hw_id);
3943 
3944 	internals->max_nb_queue_pairs = attr.num_tx_queues;
3945 	cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs;
3946 	internals->hw = dpseci;
3947 	internals->token = token;
3948 
3949 	snprintf(str, sizeof(str), "sec_fle_pool_p%d_%d",
3950 			getpid(), cryptodev->data->dev_id);
3951 	internals->fle_pool = rte_mempool_create((const char *)str,
3952 			FLE_POOL_NUM_BUFS,
3953 			FLE_POOL_BUF_SIZE,
3954 			FLE_POOL_CACHE_SIZE, 0,
3955 			NULL, NULL, NULL, NULL,
3956 			SOCKET_ID_ANY, 0);
3957 	if (!internals->fle_pool) {
3958 		DPAA2_SEC_ERR("Mempool (%s) creation failed", str);
3959 		goto init_error;
3960 	}
3961 
3962 	DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name);
3963 	return 0;
3964 
3965 init_error:
3966 	DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name);
3967 
3968 	/* dpaa2_sec_uninit(crypto_dev_name); */
3969 	return -EFAULT;
3970 }
3971 
3972 static int
3973 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
3974 			  struct rte_dpaa2_device *dpaa2_dev)
3975 {
3976 	struct rte_cryptodev *cryptodev;
3977 	char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3978 
3979 	int retval;
3980 
3981 	snprintf(cryptodev_name, sizeof(cryptodev_name), "dpsec-%d",
3982 			dpaa2_dev->object_id);
3983 
3984 	cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3985 	if (cryptodev == NULL)
3986 		return -ENOMEM;
3987 
3988 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3989 		cryptodev->data->dev_private = rte_zmalloc_socket(
3990 					"cryptodev private structure",
3991 					sizeof(struct dpaa2_sec_dev_private),
3992 					RTE_CACHE_LINE_SIZE,
3993 					rte_socket_id());
3994 
3995 		if (cryptodev->data->dev_private == NULL)
3996 			rte_panic("Cannot allocate memzone for private "
3997 				  "device data");
3998 	}
3999 
4000 	dpaa2_dev->cryptodev = cryptodev;
4001 	cryptodev->device = &dpaa2_dev->device;
4002 
4003 	/* init user callbacks */
4004 	TAILQ_INIT(&(cryptodev->link_intr_cbs));
4005 
4006 	if (dpaa2_svr_family == SVR_LX2160A)
4007 		rta_set_sec_era(RTA_SEC_ERA_10);
4008 	else
4009 		rta_set_sec_era(RTA_SEC_ERA_8);
4010 
4011 	DPAA2_SEC_INFO("2-SEC ERA is %d", rta_get_sec_era());
4012 
4013 	/* Invoke PMD device initialization function */
4014 	retval = dpaa2_sec_dev_init(cryptodev);
4015 	if (retval == 0)
4016 		return 0;
4017 
4018 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
4019 		rte_free(cryptodev->data->dev_private);
4020 
4021 	cryptodev->attached = RTE_CRYPTODEV_DETACHED;
4022 
4023 	return -ENXIO;
4024 }
4025 
4026 static int
4027 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
4028 {
4029 	struct rte_cryptodev *cryptodev;
4030 	int ret;
4031 
4032 	cryptodev = dpaa2_dev->cryptodev;
4033 	if (cryptodev == NULL)
4034 		return -ENODEV;
4035 
4036 	ret = dpaa2_sec_uninit(cryptodev);
4037 	if (ret)
4038 		return ret;
4039 
4040 	return rte_cryptodev_pmd_destroy(cryptodev);
4041 }
4042 
4043 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
4044 	.drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
4045 	.drv_type = DPAA2_CRYPTO,
4046 	.driver = {
4047 		.name = "DPAA2 SEC PMD"
4048 	},
4049 	.probe = cryptodev_dpaa2_sec_probe,
4050 	.remove = cryptodev_dpaa2_sec_remove,
4051 };
4052 
4053 static struct cryptodev_driver dpaa2_sec_crypto_drv;
4054 
4055 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
4056 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv,
4057 		rte_dpaa2_sec_driver.driver, cryptodev_driver_id);
4058 RTE_LOG_REGISTER(dpaa2_logtype_sec, pmd.crypto.dpaa2, NOTICE);
4059