xref: /dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c (revision 3b8bcfcd96e64d199392550928be7c7665571bcb)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016 NXP
5  *
6  */
7 
8 #include <time.h>
9 #include <net/if.h>
10 
11 #include <rte_mbuf.h>
12 #include <rte_cryptodev.h>
13 #include <rte_security_driver.h>
14 #include <rte_malloc.h>
15 #include <rte_memcpy.h>
16 #include <rte_string_fns.h>
17 #include <rte_cycles.h>
18 #include <rte_kvargs.h>
19 #include <rte_dev.h>
20 #include <rte_cryptodev_pmd.h>
21 #include <rte_common.h>
22 #include <rte_fslmc.h>
23 #include <fslmc_vfio.h>
24 #include <dpaa2_hw_pvt.h>
25 #include <dpaa2_hw_dpio.h>
26 #include <dpaa2_hw_mempool.h>
27 #include <fsl_dpseci.h>
28 #include <fsl_mc_sys.h>
29 
30 #include "dpaa2_sec_priv.h"
31 #include "dpaa2_sec_logs.h"
32 
33 /* Required types */
34 typedef uint64_t	dma_addr_t;
35 
36 /* RTA header files */
37 #include <hw/desc/ipsec.h>
38 #include <hw/desc/algo.h>
39 
40 /* Minimum job descriptor consists of a oneword job descriptor HEADER and
41  * a pointer to the shared descriptor
42  */
43 #define MIN_JOB_DESC_SIZE	(CAAM_CMD_SZ + CAAM_PTR_SZ)
44 #define FSL_VENDOR_ID           0x1957
45 #define FSL_DEVICE_ID           0x410
46 #define FSL_SUBSYSTEM_SEC       1
47 #define FSL_MC_DPSECI_DEVID     3
48 
49 #define NO_PREFETCH 0
50 /* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
51 #define FLE_POOL_NUM_BUFS	32000
52 #define FLE_POOL_BUF_SIZE	256
53 #define FLE_POOL_CACHE_SIZE	512
54 #define FLE_SG_MEM_SIZE		2048
55 #define SEC_FLC_DHR_OUTBOUND	-114
56 #define SEC_FLC_DHR_INBOUND	0
57 
58 enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8;
59 
60 static uint8_t cryptodev_driver_id;
61 
62 int dpaa2_logtype_sec;
63 
64 static inline int
65 build_proto_fd(dpaa2_sec_session *sess,
66 	       struct rte_crypto_op *op,
67 	       struct qbman_fd *fd, uint16_t bpid)
68 {
69 	struct rte_crypto_sym_op *sym_op = op->sym;
70 	struct ctxt_priv *priv = sess->ctxt;
71 	struct sec_flow_context *flc;
72 	struct rte_mbuf *mbuf = sym_op->m_src;
73 
74 	if (likely(bpid < MAX_BPID))
75 		DPAA2_SET_FD_BPID(fd, bpid);
76 	else
77 		DPAA2_SET_FD_IVP(fd);
78 
79 	/* Save the shared descriptor */
80 	flc = &priv->flc_desc[0].flc;
81 
82 	DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
83 	DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off);
84 	DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len);
85 	DPAA2_SET_FD_FLC(fd, (ptrdiff_t)flc);
86 
87 	/* save physical address of mbuf */
88 	op->sym->aead.digest.phys_addr = mbuf->buf_iova;
89 	mbuf->buf_iova = (size_t)op;
90 
91 	return 0;
92 }
93 
94 static inline int
95 build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,
96 		 struct rte_crypto_op *op,
97 		 struct qbman_fd *fd, __rte_unused uint16_t bpid)
98 {
99 	struct rte_crypto_sym_op *sym_op = op->sym;
100 	struct ctxt_priv *priv = sess->ctxt;
101 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
102 	struct sec_flow_context *flc;
103 	uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
104 	int icv_len = sess->digest_length;
105 	uint8_t *old_icv;
106 	struct rte_mbuf *mbuf;
107 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
108 			sess->iv.offset);
109 
110 	PMD_INIT_FUNC_TRACE();
111 
112 	if (sym_op->m_dst)
113 		mbuf = sym_op->m_dst;
114 	else
115 		mbuf = sym_op->m_src;
116 
117 	/* first FLE entry used to store mbuf and session ctxt */
118 	fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
119 			RTE_CACHE_LINE_SIZE);
120 	if (unlikely(!fle)) {
121 		DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
122 		return -1;
123 	}
124 	memset(fle, 0, FLE_SG_MEM_SIZE);
125 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
126 	DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv);
127 
128 	op_fle = fle + 1;
129 	ip_fle = fle + 2;
130 	sge = fle + 3;
131 
132 	/* Save the shared descriptor */
133 	flc = &priv->flc_desc[0].flc;
134 
135 	/* Configure FD as a FRAME LIST */
136 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
137 	DPAA2_SET_FD_COMPOUND_FMT(fd);
138 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
139 
140 	DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n"
141 		   "iv-len=%d data_off: 0x%x\n",
142 		   sym_op->aead.data.offset,
143 		   sym_op->aead.data.length,
144 		   sess->digest_length,
145 		   sess->iv.length,
146 		   sym_op->m_src->data_off);
147 
148 	/* Configure Output FLE with Scatter/Gather Entry */
149 	DPAA2_SET_FLE_SG_EXT(op_fle);
150 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
151 
152 	if (auth_only_len)
153 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
154 
155 	op_fle->length = (sess->dir == DIR_ENC) ?
156 			(sym_op->aead.data.length + icv_len + auth_only_len) :
157 			sym_op->aead.data.length + auth_only_len;
158 
159 	/* Configure Output SGE for Encap/Decap */
160 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
161 	DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->aead.data.offset -
162 								auth_only_len);
163 	sge->length = mbuf->data_len - sym_op->aead.data.offset + auth_only_len;
164 
165 	mbuf = mbuf->next;
166 	/* o/p segs */
167 	while (mbuf) {
168 		sge++;
169 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
170 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
171 		sge->length = mbuf->data_len;
172 		mbuf = mbuf->next;
173 	}
174 	sge->length -= icv_len;
175 
176 	if (sess->dir == DIR_ENC) {
177 		sge++;
178 		DPAA2_SET_FLE_ADDR(sge,
179 				DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
180 		sge->length = icv_len;
181 	}
182 	DPAA2_SET_FLE_FIN(sge);
183 
184 	sge++;
185 	mbuf = sym_op->m_src;
186 
187 	/* Configure Input FLE with Scatter/Gather Entry */
188 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
189 	DPAA2_SET_FLE_SG_EXT(ip_fle);
190 	DPAA2_SET_FLE_FIN(ip_fle);
191 	ip_fle->length = (sess->dir == DIR_ENC) ?
192 		(sym_op->aead.data.length + sess->iv.length + auth_only_len) :
193 		(sym_op->aead.data.length + sess->iv.length + auth_only_len +
194 		 icv_len);
195 
196 	/* Configure Input SGE for Encap/Decap */
197 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
198 	sge->length = sess->iv.length;
199 
200 	sge++;
201 	if (auth_only_len) {
202 		DPAA2_SET_FLE_ADDR(sge,
203 				DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
204 		sge->length = auth_only_len;
205 		sge++;
206 	}
207 
208 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
209 	DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
210 				mbuf->data_off);
211 	sge->length = mbuf->data_len - sym_op->aead.data.offset;
212 
213 	mbuf = mbuf->next;
214 	/* i/p segs */
215 	while (mbuf) {
216 		sge++;
217 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
218 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
219 		sge->length = mbuf->data_len;
220 		mbuf = mbuf->next;
221 	}
222 
223 	if (sess->dir == DIR_DEC) {
224 		sge++;
225 		old_icv = (uint8_t *)(sge + 1);
226 		memcpy(old_icv,	sym_op->aead.digest.data, icv_len);
227 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
228 		sge->length = icv_len;
229 	}
230 
231 	DPAA2_SET_FLE_FIN(sge);
232 	if (auth_only_len) {
233 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
234 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
235 	}
236 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
237 
238 	return 0;
239 }
240 
241 static inline int
242 build_authenc_gcm_fd(dpaa2_sec_session *sess,
243 		     struct rte_crypto_op *op,
244 		     struct qbman_fd *fd, uint16_t bpid)
245 {
246 	struct rte_crypto_sym_op *sym_op = op->sym;
247 	struct ctxt_priv *priv = sess->ctxt;
248 	struct qbman_fle *fle, *sge;
249 	struct sec_flow_context *flc;
250 	uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
251 	int icv_len = sess->digest_length, retval;
252 	uint8_t *old_icv;
253 	struct rte_mbuf *dst;
254 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
255 			sess->iv.offset);
256 
257 	PMD_INIT_FUNC_TRACE();
258 
259 	if (sym_op->m_dst)
260 		dst = sym_op->m_dst;
261 	else
262 		dst = sym_op->m_src;
263 
264 	/* TODO we are using the first FLE entry to store Mbuf and session ctxt.
265 	 * Currently we donot know which FLE has the mbuf stored.
266 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
267 	 * to get the MBUF Addr from the previous FLE.
268 	 * We can have a better approach to use the inline Mbuf
269 	 */
270 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
271 	if (retval) {
272 		DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE");
273 		return -1;
274 	}
275 	memset(fle, 0, FLE_POOL_BUF_SIZE);
276 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
277 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
278 	fle = fle + 1;
279 	sge = fle + 2;
280 	if (likely(bpid < MAX_BPID)) {
281 		DPAA2_SET_FD_BPID(fd, bpid);
282 		DPAA2_SET_FLE_BPID(fle, bpid);
283 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
284 		DPAA2_SET_FLE_BPID(sge, bpid);
285 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
286 		DPAA2_SET_FLE_BPID(sge + 2, bpid);
287 		DPAA2_SET_FLE_BPID(sge + 3, bpid);
288 	} else {
289 		DPAA2_SET_FD_IVP(fd);
290 		DPAA2_SET_FLE_IVP(fle);
291 		DPAA2_SET_FLE_IVP((fle + 1));
292 		DPAA2_SET_FLE_IVP(sge);
293 		DPAA2_SET_FLE_IVP((sge + 1));
294 		DPAA2_SET_FLE_IVP((sge + 2));
295 		DPAA2_SET_FLE_IVP((sge + 3));
296 	}
297 
298 	/* Save the shared descriptor */
299 	flc = &priv->flc_desc[0].flc;
300 	/* Configure FD as a FRAME LIST */
301 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
302 	DPAA2_SET_FD_COMPOUND_FMT(fd);
303 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
304 
305 	DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n"
306 		   "iv-len=%d data_off: 0x%x\n",
307 		   sym_op->aead.data.offset,
308 		   sym_op->aead.data.length,
309 		   sess->digest_length,
310 		   sess->iv.length,
311 		   sym_op->m_src->data_off);
312 
313 	/* Configure Output FLE with Scatter/Gather Entry */
314 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
315 	if (auth_only_len)
316 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
317 	fle->length = (sess->dir == DIR_ENC) ?
318 			(sym_op->aead.data.length + icv_len + auth_only_len) :
319 			sym_op->aead.data.length + auth_only_len;
320 
321 	DPAA2_SET_FLE_SG_EXT(fle);
322 
323 	/* Configure Output SGE for Encap/Decap */
324 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
325 	DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
326 				dst->data_off - auth_only_len);
327 	sge->length = sym_op->aead.data.length + auth_only_len;
328 
329 	if (sess->dir == DIR_ENC) {
330 		sge++;
331 		DPAA2_SET_FLE_ADDR(sge,
332 				DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
333 		sge->length = sess->digest_length;
334 		DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
335 					sess->iv.length + auth_only_len));
336 	}
337 	DPAA2_SET_FLE_FIN(sge);
338 
339 	sge++;
340 	fle++;
341 
342 	/* Configure Input FLE with Scatter/Gather Entry */
343 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
344 	DPAA2_SET_FLE_SG_EXT(fle);
345 	DPAA2_SET_FLE_FIN(fle);
346 	fle->length = (sess->dir == DIR_ENC) ?
347 		(sym_op->aead.data.length + sess->iv.length + auth_only_len) :
348 		(sym_op->aead.data.length + sess->iv.length + auth_only_len +
349 		 sess->digest_length);
350 
351 	/* Configure Input SGE for Encap/Decap */
352 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
353 	sge->length = sess->iv.length;
354 	sge++;
355 	if (auth_only_len) {
356 		DPAA2_SET_FLE_ADDR(sge,
357 				DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
358 		sge->length = auth_only_len;
359 		DPAA2_SET_FLE_BPID(sge, bpid);
360 		sge++;
361 	}
362 
363 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
364 	DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
365 				sym_op->m_src->data_off);
366 	sge->length = sym_op->aead.data.length;
367 	if (sess->dir == DIR_DEC) {
368 		sge++;
369 		old_icv = (uint8_t *)(sge + 1);
370 		memcpy(old_icv,	sym_op->aead.digest.data,
371 		       sess->digest_length);
372 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
373 		sge->length = sess->digest_length;
374 		DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
375 				 sess->digest_length +
376 				 sess->iv.length +
377 				 auth_only_len));
378 	}
379 	DPAA2_SET_FLE_FIN(sge);
380 
381 	if (auth_only_len) {
382 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
383 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
384 	}
385 
386 	return 0;
387 }
388 
389 static inline int
390 build_authenc_sg_fd(dpaa2_sec_session *sess,
391 		 struct rte_crypto_op *op,
392 		 struct qbman_fd *fd, __rte_unused uint16_t bpid)
393 {
394 	struct rte_crypto_sym_op *sym_op = op->sym;
395 	struct ctxt_priv *priv = sess->ctxt;
396 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
397 	struct sec_flow_context *flc;
398 	uint32_t auth_only_len = sym_op->auth.data.length -
399 				sym_op->cipher.data.length;
400 	int icv_len = sess->digest_length;
401 	uint8_t *old_icv;
402 	struct rte_mbuf *mbuf;
403 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
404 			sess->iv.offset);
405 
406 	PMD_INIT_FUNC_TRACE();
407 
408 	if (sym_op->m_dst)
409 		mbuf = sym_op->m_dst;
410 	else
411 		mbuf = sym_op->m_src;
412 
413 	/* first FLE entry used to store mbuf and session ctxt */
414 	fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
415 			RTE_CACHE_LINE_SIZE);
416 	if (unlikely(!fle)) {
417 		DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
418 		return -1;
419 	}
420 	memset(fle, 0, FLE_SG_MEM_SIZE);
421 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
422 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
423 
424 	op_fle = fle + 1;
425 	ip_fle = fle + 2;
426 	sge = fle + 3;
427 
428 	/* Save the shared descriptor */
429 	flc = &priv->flc_desc[0].flc;
430 
431 	/* Configure FD as a FRAME LIST */
432 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
433 	DPAA2_SET_FD_COMPOUND_FMT(fd);
434 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
435 
436 	DPAA2_SEC_DP_DEBUG(
437 		"AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n"
438 		"cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
439 		sym_op->auth.data.offset,
440 		sym_op->auth.data.length,
441 		sess->digest_length,
442 		sym_op->cipher.data.offset,
443 		sym_op->cipher.data.length,
444 		sess->iv.length,
445 		sym_op->m_src->data_off);
446 
447 	/* Configure Output FLE with Scatter/Gather Entry */
448 	DPAA2_SET_FLE_SG_EXT(op_fle);
449 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
450 
451 	if (auth_only_len)
452 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
453 
454 	op_fle->length = (sess->dir == DIR_ENC) ?
455 			(sym_op->cipher.data.length + icv_len) :
456 			sym_op->cipher.data.length;
457 
458 	/* Configure Output SGE for Encap/Decap */
459 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
460 	DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->auth.data.offset);
461 	sge->length = mbuf->data_len - sym_op->auth.data.offset;
462 
463 	mbuf = mbuf->next;
464 	/* o/p segs */
465 	while (mbuf) {
466 		sge++;
467 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
468 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
469 		sge->length = mbuf->data_len;
470 		mbuf = mbuf->next;
471 	}
472 	sge->length -= icv_len;
473 
474 	if (sess->dir == DIR_ENC) {
475 		sge++;
476 		DPAA2_SET_FLE_ADDR(sge,
477 				DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
478 		sge->length = icv_len;
479 	}
480 	DPAA2_SET_FLE_FIN(sge);
481 
482 	sge++;
483 	mbuf = sym_op->m_src;
484 
485 	/* Configure Input FLE with Scatter/Gather Entry */
486 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
487 	DPAA2_SET_FLE_SG_EXT(ip_fle);
488 	DPAA2_SET_FLE_FIN(ip_fle);
489 	ip_fle->length = (sess->dir == DIR_ENC) ?
490 			(sym_op->auth.data.length + sess->iv.length) :
491 			(sym_op->auth.data.length + sess->iv.length +
492 			 icv_len);
493 
494 	/* Configure Input SGE for Encap/Decap */
495 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
496 	sge->length = sess->iv.length;
497 
498 	sge++;
499 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
500 	DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
501 				mbuf->data_off);
502 	sge->length = mbuf->data_len - sym_op->auth.data.offset;
503 
504 	mbuf = mbuf->next;
505 	/* i/p segs */
506 	while (mbuf) {
507 		sge++;
508 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
509 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
510 		sge->length = mbuf->data_len;
511 		mbuf = mbuf->next;
512 	}
513 	sge->length -= icv_len;
514 
515 	if (sess->dir == DIR_DEC) {
516 		sge++;
517 		old_icv = (uint8_t *)(sge + 1);
518 		memcpy(old_icv,	sym_op->auth.digest.data,
519 		       icv_len);
520 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
521 		sge->length = icv_len;
522 	}
523 
524 	DPAA2_SET_FLE_FIN(sge);
525 	if (auth_only_len) {
526 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
527 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
528 	}
529 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
530 
531 	return 0;
532 }
533 
534 static inline int
535 build_authenc_fd(dpaa2_sec_session *sess,
536 		 struct rte_crypto_op *op,
537 		 struct qbman_fd *fd, uint16_t bpid)
538 {
539 	struct rte_crypto_sym_op *sym_op = op->sym;
540 	struct ctxt_priv *priv = sess->ctxt;
541 	struct qbman_fle *fle, *sge;
542 	struct sec_flow_context *flc;
543 	uint32_t auth_only_len = sym_op->auth.data.length -
544 				sym_op->cipher.data.length;
545 	int icv_len = sess->digest_length, retval;
546 	uint8_t *old_icv;
547 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
548 			sess->iv.offset);
549 	struct rte_mbuf *dst;
550 
551 	PMD_INIT_FUNC_TRACE();
552 
553 	if (sym_op->m_dst)
554 		dst = sym_op->m_dst;
555 	else
556 		dst = sym_op->m_src;
557 
558 	/* we are using the first FLE entry to store Mbuf.
559 	 * Currently we donot know which FLE has the mbuf stored.
560 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
561 	 * to get the MBUF Addr from the previous FLE.
562 	 * We can have a better approach to use the inline Mbuf
563 	 */
564 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
565 	if (retval) {
566 		DPAA2_SEC_ERR("Memory alloc failed for SGE");
567 		return -1;
568 	}
569 	memset(fle, 0, FLE_POOL_BUF_SIZE);
570 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
571 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
572 	fle = fle + 1;
573 	sge = fle + 2;
574 	if (likely(bpid < MAX_BPID)) {
575 		DPAA2_SET_FD_BPID(fd, bpid);
576 		DPAA2_SET_FLE_BPID(fle, bpid);
577 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
578 		DPAA2_SET_FLE_BPID(sge, bpid);
579 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
580 		DPAA2_SET_FLE_BPID(sge + 2, bpid);
581 		DPAA2_SET_FLE_BPID(sge + 3, bpid);
582 	} else {
583 		DPAA2_SET_FD_IVP(fd);
584 		DPAA2_SET_FLE_IVP(fle);
585 		DPAA2_SET_FLE_IVP((fle + 1));
586 		DPAA2_SET_FLE_IVP(sge);
587 		DPAA2_SET_FLE_IVP((sge + 1));
588 		DPAA2_SET_FLE_IVP((sge + 2));
589 		DPAA2_SET_FLE_IVP((sge + 3));
590 	}
591 
592 	/* Save the shared descriptor */
593 	flc = &priv->flc_desc[0].flc;
594 	/* Configure FD as a FRAME LIST */
595 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
596 	DPAA2_SET_FD_COMPOUND_FMT(fd);
597 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
598 
599 	DPAA2_SEC_DP_DEBUG(
600 		"AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n"
601 		"cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
602 		sym_op->auth.data.offset,
603 		sym_op->auth.data.length,
604 		sess->digest_length,
605 		sym_op->cipher.data.offset,
606 		sym_op->cipher.data.length,
607 		sess->iv.length,
608 		sym_op->m_src->data_off);
609 
610 	/* Configure Output FLE with Scatter/Gather Entry */
611 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
612 	if (auth_only_len)
613 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
614 	fle->length = (sess->dir == DIR_ENC) ?
615 			(sym_op->cipher.data.length + icv_len) :
616 			sym_op->cipher.data.length;
617 
618 	DPAA2_SET_FLE_SG_EXT(fle);
619 
620 	/* Configure Output SGE for Encap/Decap */
621 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
622 	DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
623 				dst->data_off);
624 	sge->length = sym_op->cipher.data.length;
625 
626 	if (sess->dir == DIR_ENC) {
627 		sge++;
628 		DPAA2_SET_FLE_ADDR(sge,
629 				DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
630 		sge->length = sess->digest_length;
631 		DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
632 					sess->iv.length));
633 	}
634 	DPAA2_SET_FLE_FIN(sge);
635 
636 	sge++;
637 	fle++;
638 
639 	/* Configure Input FLE with Scatter/Gather Entry */
640 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
641 	DPAA2_SET_FLE_SG_EXT(fle);
642 	DPAA2_SET_FLE_FIN(fle);
643 	fle->length = (sess->dir == DIR_ENC) ?
644 			(sym_op->auth.data.length + sess->iv.length) :
645 			(sym_op->auth.data.length + sess->iv.length +
646 			 sess->digest_length);
647 
648 	/* Configure Input SGE for Encap/Decap */
649 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
650 	sge->length = sess->iv.length;
651 	sge++;
652 
653 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
654 	DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
655 				sym_op->m_src->data_off);
656 	sge->length = sym_op->auth.data.length;
657 	if (sess->dir == DIR_DEC) {
658 		sge++;
659 		old_icv = (uint8_t *)(sge + 1);
660 		memcpy(old_icv,	sym_op->auth.digest.data,
661 		       sess->digest_length);
662 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
663 		sge->length = sess->digest_length;
664 		DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
665 				 sess->digest_length +
666 				 sess->iv.length));
667 	}
668 	DPAA2_SET_FLE_FIN(sge);
669 	if (auth_only_len) {
670 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
671 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
672 	}
673 	return 0;
674 }
675 
676 static inline int build_auth_sg_fd(
677 		dpaa2_sec_session *sess,
678 		struct rte_crypto_op *op,
679 		struct qbman_fd *fd,
680 		__rte_unused uint16_t bpid)
681 {
682 	struct rte_crypto_sym_op *sym_op = op->sym;
683 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
684 	struct sec_flow_context *flc;
685 	struct ctxt_priv *priv = sess->ctxt;
686 	uint8_t *old_digest;
687 	struct rte_mbuf *mbuf;
688 
689 	PMD_INIT_FUNC_TRACE();
690 
691 	mbuf = sym_op->m_src;
692 	fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
693 			RTE_CACHE_LINE_SIZE);
694 	if (unlikely(!fle)) {
695 		DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
696 		return -1;
697 	}
698 	memset(fle, 0, FLE_SG_MEM_SIZE);
699 	/* first FLE entry used to store mbuf and session ctxt */
700 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
701 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
702 	op_fle = fle + 1;
703 	ip_fle = fle + 2;
704 	sge = fle + 3;
705 
706 	flc = &priv->flc_desc[DESC_INITFINAL].flc;
707 	/* sg FD */
708 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
709 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
710 	DPAA2_SET_FD_COMPOUND_FMT(fd);
711 
712 	/* o/p fle */
713 	DPAA2_SET_FLE_ADDR(op_fle,
714 				DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
715 	op_fle->length = sess->digest_length;
716 
717 	/* i/p fle */
718 	DPAA2_SET_FLE_SG_EXT(ip_fle);
719 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
720 	/* i/p 1st seg */
721 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
722 	DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + mbuf->data_off);
723 	sge->length = mbuf->data_len - sym_op->auth.data.offset;
724 
725 	/* i/p segs */
726 	mbuf = mbuf->next;
727 	while (mbuf) {
728 		sge++;
729 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
730 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
731 		sge->length = mbuf->data_len;
732 		mbuf = mbuf->next;
733 	}
734 	if (sess->dir == DIR_ENC) {
735 		/* Digest calculation case */
736 		sge->length -= sess->digest_length;
737 		ip_fle->length = sym_op->auth.data.length;
738 	} else {
739 		/* Digest verification case */
740 		sge++;
741 		old_digest = (uint8_t *)(sge + 1);
742 		rte_memcpy(old_digest, sym_op->auth.digest.data,
743 			   sess->digest_length);
744 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
745 		sge->length = sess->digest_length;
746 		ip_fle->length = sym_op->auth.data.length +
747 				sess->digest_length;
748 	}
749 	DPAA2_SET_FLE_FIN(sge);
750 	DPAA2_SET_FLE_FIN(ip_fle);
751 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
752 
753 	return 0;
754 }
755 
756 static inline int
757 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
758 	      struct qbman_fd *fd, uint16_t bpid)
759 {
760 	struct rte_crypto_sym_op *sym_op = op->sym;
761 	struct qbman_fle *fle, *sge;
762 	struct sec_flow_context *flc;
763 	struct ctxt_priv *priv = sess->ctxt;
764 	uint8_t *old_digest;
765 	int retval;
766 
767 	PMD_INIT_FUNC_TRACE();
768 
769 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
770 	if (retval) {
771 		DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE");
772 		return -1;
773 	}
774 	memset(fle, 0, FLE_POOL_BUF_SIZE);
775 	/* TODO we are using the first FLE entry to store Mbuf.
776 	 * Currently we donot know which FLE has the mbuf stored.
777 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
778 	 * to get the MBUF Addr from the previous FLE.
779 	 * We can have a better approach to use the inline Mbuf
780 	 */
781 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
782 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
783 	fle = fle + 1;
784 
785 	if (likely(bpid < MAX_BPID)) {
786 		DPAA2_SET_FD_BPID(fd, bpid);
787 		DPAA2_SET_FLE_BPID(fle, bpid);
788 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
789 	} else {
790 		DPAA2_SET_FD_IVP(fd);
791 		DPAA2_SET_FLE_IVP(fle);
792 		DPAA2_SET_FLE_IVP((fle + 1));
793 	}
794 	flc = &priv->flc_desc[DESC_INITFINAL].flc;
795 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
796 
797 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
798 	fle->length = sess->digest_length;
799 
800 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
801 	DPAA2_SET_FD_COMPOUND_FMT(fd);
802 	fle++;
803 
804 	if (sess->dir == DIR_ENC) {
805 		DPAA2_SET_FLE_ADDR(fle,
806 				   DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
807 		DPAA2_SET_FLE_OFFSET(fle, sym_op->auth.data.offset +
808 				     sym_op->m_src->data_off);
809 		DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length);
810 		fle->length = sym_op->auth.data.length;
811 	} else {
812 		sge = fle + 2;
813 		DPAA2_SET_FLE_SG_EXT(fle);
814 		DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
815 
816 		if (likely(bpid < MAX_BPID)) {
817 			DPAA2_SET_FLE_BPID(sge, bpid);
818 			DPAA2_SET_FLE_BPID(sge + 1, bpid);
819 		} else {
820 			DPAA2_SET_FLE_IVP(sge);
821 			DPAA2_SET_FLE_IVP((sge + 1));
822 		}
823 		DPAA2_SET_FLE_ADDR(sge,
824 				   DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
825 		DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
826 				     sym_op->m_src->data_off);
827 
828 		DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length +
829 				 sess->digest_length);
830 		sge->length = sym_op->auth.data.length;
831 		sge++;
832 		old_digest = (uint8_t *)(sge + 1);
833 		rte_memcpy(old_digest, sym_op->auth.digest.data,
834 			   sess->digest_length);
835 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
836 		sge->length = sess->digest_length;
837 		fle->length = sym_op->auth.data.length +
838 				sess->digest_length;
839 		DPAA2_SET_FLE_FIN(sge);
840 	}
841 	DPAA2_SET_FLE_FIN(fle);
842 
843 	return 0;
844 }
845 
846 static int
847 build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
848 		struct qbman_fd *fd, __rte_unused uint16_t bpid)
849 {
850 	struct rte_crypto_sym_op *sym_op = op->sym;
851 	struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
852 	struct sec_flow_context *flc;
853 	struct ctxt_priv *priv = sess->ctxt;
854 	struct rte_mbuf *mbuf;
855 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
856 			sess->iv.offset);
857 
858 	PMD_INIT_FUNC_TRACE();
859 
860 	if (sym_op->m_dst)
861 		mbuf = sym_op->m_dst;
862 	else
863 		mbuf = sym_op->m_src;
864 
865 	fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
866 			RTE_CACHE_LINE_SIZE);
867 	if (!fle) {
868 		DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE");
869 		return -1;
870 	}
871 	memset(fle, 0, FLE_SG_MEM_SIZE);
872 	/* first FLE entry used to store mbuf and session ctxt */
873 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
874 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
875 
876 	op_fle = fle + 1;
877 	ip_fle = fle + 2;
878 	sge = fle + 3;
879 
880 	flc = &priv->flc_desc[0].flc;
881 
882 	DPAA2_SEC_DP_DEBUG(
883 		"CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d"
884 		" data_off: 0x%x\n",
885 		sym_op->cipher.data.offset,
886 		sym_op->cipher.data.length,
887 		sess->iv.length,
888 		sym_op->m_src->data_off);
889 
890 	/* o/p fle */
891 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
892 	op_fle->length = sym_op->cipher.data.length;
893 	DPAA2_SET_FLE_SG_EXT(op_fle);
894 
895 	/* o/p 1st seg */
896 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
897 	DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset + mbuf->data_off);
898 	sge->length = mbuf->data_len - sym_op->cipher.data.offset;
899 
900 	mbuf = mbuf->next;
901 	/* o/p segs */
902 	while (mbuf) {
903 		sge++;
904 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
905 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
906 		sge->length = mbuf->data_len;
907 		mbuf = mbuf->next;
908 	}
909 	DPAA2_SET_FLE_FIN(sge);
910 
911 	DPAA2_SEC_DP_DEBUG(
912 		"CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
913 		flc, fle, fle->addr_hi, fle->addr_lo,
914 		fle->length);
915 
916 	/* i/p fle */
917 	mbuf = sym_op->m_src;
918 	sge++;
919 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
920 	ip_fle->length = sess->iv.length + sym_op->cipher.data.length;
921 	DPAA2_SET_FLE_SG_EXT(ip_fle);
922 
923 	/* i/p IV */
924 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
925 	DPAA2_SET_FLE_OFFSET(sge, 0);
926 	sge->length = sess->iv.length;
927 
928 	sge++;
929 
930 	/* i/p 1st seg */
931 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
932 	DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
933 			     mbuf->data_off);
934 	sge->length = mbuf->data_len - sym_op->cipher.data.offset;
935 
936 	mbuf = mbuf->next;
937 	/* i/p segs */
938 	while (mbuf) {
939 		sge++;
940 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
941 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
942 		sge->length = mbuf->data_len;
943 		mbuf = mbuf->next;
944 	}
945 	DPAA2_SET_FLE_FIN(sge);
946 	DPAA2_SET_FLE_FIN(ip_fle);
947 
948 	/* sg fd */
949 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
950 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
951 	DPAA2_SET_FD_COMPOUND_FMT(fd);
952 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
953 
954 	DPAA2_SEC_DP_DEBUG(
955 		"CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d"
956 		" off =%d, len =%d\n",
957 		DPAA2_GET_FD_ADDR(fd),
958 		DPAA2_GET_FD_BPID(fd),
959 		rte_dpaa2_bpid_info[bpid].meta_data_size,
960 		DPAA2_GET_FD_OFFSET(fd),
961 		DPAA2_GET_FD_LEN(fd));
962 	return 0;
963 }
964 
965 static int
966 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
967 		struct qbman_fd *fd, uint16_t bpid)
968 {
969 	struct rte_crypto_sym_op *sym_op = op->sym;
970 	struct qbman_fle *fle, *sge;
971 	int retval;
972 	struct sec_flow_context *flc;
973 	struct ctxt_priv *priv = sess->ctxt;
974 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
975 			sess->iv.offset);
976 	struct rte_mbuf *dst;
977 
978 	PMD_INIT_FUNC_TRACE();
979 
980 	if (sym_op->m_dst)
981 		dst = sym_op->m_dst;
982 	else
983 		dst = sym_op->m_src;
984 
985 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
986 	if (retval) {
987 		DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE");
988 		return -1;
989 	}
990 	memset(fle, 0, FLE_POOL_BUF_SIZE);
991 	/* TODO we are using the first FLE entry to store Mbuf.
992 	 * Currently we donot know which FLE has the mbuf stored.
993 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
994 	 * to get the MBUF Addr from the previous FLE.
995 	 * We can have a better approach to use the inline Mbuf
996 	 */
997 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
998 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
999 	fle = fle + 1;
1000 	sge = fle + 2;
1001 
1002 	if (likely(bpid < MAX_BPID)) {
1003 		DPAA2_SET_FD_BPID(fd, bpid);
1004 		DPAA2_SET_FLE_BPID(fle, bpid);
1005 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
1006 		DPAA2_SET_FLE_BPID(sge, bpid);
1007 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
1008 	} else {
1009 		DPAA2_SET_FD_IVP(fd);
1010 		DPAA2_SET_FLE_IVP(fle);
1011 		DPAA2_SET_FLE_IVP((fle + 1));
1012 		DPAA2_SET_FLE_IVP(sge);
1013 		DPAA2_SET_FLE_IVP((sge + 1));
1014 	}
1015 
1016 	flc = &priv->flc_desc[0].flc;
1017 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1018 	DPAA2_SET_FD_LEN(fd, sym_op->cipher.data.length +
1019 			 sess->iv.length);
1020 	DPAA2_SET_FD_COMPOUND_FMT(fd);
1021 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1022 
1023 	DPAA2_SEC_DP_DEBUG(
1024 		"CIPHER: cipher_off: 0x%x/length %d, ivlen=%d,"
1025 		" data_off: 0x%x\n",
1026 		sym_op->cipher.data.offset,
1027 		sym_op->cipher.data.length,
1028 		sess->iv.length,
1029 		sym_op->m_src->data_off);
1030 
1031 	DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst));
1032 	DPAA2_SET_FLE_OFFSET(fle, sym_op->cipher.data.offset +
1033 			     dst->data_off);
1034 
1035 	fle->length = sym_op->cipher.data.length + sess->iv.length;
1036 
1037 	DPAA2_SEC_DP_DEBUG(
1038 		"CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n",
1039 		flc, fle, fle->addr_hi, fle->addr_lo,
1040 		fle->length);
1041 
1042 	fle++;
1043 
1044 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1045 	fle->length = sym_op->cipher.data.length + sess->iv.length;
1046 
1047 	DPAA2_SET_FLE_SG_EXT(fle);
1048 
1049 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1050 	sge->length = sess->iv.length;
1051 
1052 	sge++;
1053 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
1054 	DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
1055 			     sym_op->m_src->data_off);
1056 
1057 	sge->length = sym_op->cipher.data.length;
1058 	DPAA2_SET_FLE_FIN(sge);
1059 	DPAA2_SET_FLE_FIN(fle);
1060 
1061 	DPAA2_SEC_DP_DEBUG(
1062 		"CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1063 		" off =%d, len =%d\n",
1064 		DPAA2_GET_FD_ADDR(fd),
1065 		DPAA2_GET_FD_BPID(fd),
1066 		rte_dpaa2_bpid_info[bpid].meta_data_size,
1067 		DPAA2_GET_FD_OFFSET(fd),
1068 		DPAA2_GET_FD_LEN(fd));
1069 
1070 	return 0;
1071 }
1072 
1073 static inline int
1074 build_sec_fd(struct rte_crypto_op *op,
1075 	     struct qbman_fd *fd, uint16_t bpid)
1076 {
1077 	int ret = -1;
1078 	dpaa2_sec_session *sess;
1079 
1080 	PMD_INIT_FUNC_TRACE();
1081 
1082 	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
1083 		sess = (dpaa2_sec_session *)get_session_private_data(
1084 				op->sym->session, cryptodev_driver_id);
1085 	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1086 		sess = (dpaa2_sec_session *)get_sec_session_private_data(
1087 				op->sym->sec_session);
1088 	else
1089 		return -1;
1090 
1091 	/* Segmented buffer */
1092 	if (unlikely(!rte_pktmbuf_is_contiguous(op->sym->m_src))) {
1093 		switch (sess->ctxt_type) {
1094 		case DPAA2_SEC_CIPHER:
1095 			ret = build_cipher_sg_fd(sess, op, fd, bpid);
1096 			break;
1097 		case DPAA2_SEC_AUTH:
1098 			ret = build_auth_sg_fd(sess, op, fd, bpid);
1099 			break;
1100 		case DPAA2_SEC_AEAD:
1101 			ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid);
1102 			break;
1103 		case DPAA2_SEC_CIPHER_HASH:
1104 			ret = build_authenc_sg_fd(sess, op, fd, bpid);
1105 			break;
1106 		case DPAA2_SEC_HASH_CIPHER:
1107 		default:
1108 			DPAA2_SEC_ERR("error: Unsupported session");
1109 		}
1110 	} else {
1111 		switch (sess->ctxt_type) {
1112 		case DPAA2_SEC_CIPHER:
1113 			ret = build_cipher_fd(sess, op, fd, bpid);
1114 			break;
1115 		case DPAA2_SEC_AUTH:
1116 			ret = build_auth_fd(sess, op, fd, bpid);
1117 			break;
1118 		case DPAA2_SEC_AEAD:
1119 			ret = build_authenc_gcm_fd(sess, op, fd, bpid);
1120 			break;
1121 		case DPAA2_SEC_CIPHER_HASH:
1122 			ret = build_authenc_fd(sess, op, fd, bpid);
1123 			break;
1124 		case DPAA2_SEC_IPSEC:
1125 			ret = build_proto_fd(sess, op, fd, bpid);
1126 			break;
1127 		case DPAA2_SEC_HASH_CIPHER:
1128 		default:
1129 			DPAA2_SEC_ERR("error: Unsupported session");
1130 		}
1131 	}
1132 	return ret;
1133 }
1134 
1135 static uint16_t
1136 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1137 			uint16_t nb_ops)
1138 {
1139 	/* Function to transmit the frames to given device and VQ*/
1140 	uint32_t loop;
1141 	int32_t ret;
1142 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1143 	uint32_t frames_to_send;
1144 	struct qbman_eq_desc eqdesc;
1145 	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1146 	struct qbman_swp *swp;
1147 	uint16_t num_tx = 0;
1148 	/*todo - need to support multiple buffer pools */
1149 	uint16_t bpid;
1150 	struct rte_mempool *mb_pool;
1151 
1152 	if (unlikely(nb_ops == 0))
1153 		return 0;
1154 
1155 	if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
1156 		DPAA2_SEC_ERR("sessionless crypto op not supported");
1157 		return 0;
1158 	}
1159 	/*Prepare enqueue descriptor*/
1160 	qbman_eq_desc_clear(&eqdesc);
1161 	qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1162 	qbman_eq_desc_set_response(&eqdesc, 0, 0);
1163 	qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
1164 
1165 	if (!DPAA2_PER_LCORE_DPIO) {
1166 		ret = dpaa2_affine_qbman_swp();
1167 		if (ret) {
1168 			DPAA2_SEC_ERR("Failure in affining portal");
1169 			return 0;
1170 		}
1171 	}
1172 	swp = DPAA2_PER_LCORE_PORTAL;
1173 
1174 	while (nb_ops) {
1175 		frames_to_send = (nb_ops >> 3) ? MAX_TX_RING_SLOTS : nb_ops;
1176 
1177 		for (loop = 0; loop < frames_to_send; loop++) {
1178 			/*Clear the unused FD fields before sending*/
1179 			memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
1180 			mb_pool = (*ops)->sym->m_src->pool;
1181 			bpid = mempool_to_bpid(mb_pool);
1182 			ret = build_sec_fd(*ops, &fd_arr[loop], bpid);
1183 			if (ret) {
1184 				DPAA2_SEC_ERR("error: Improper packet contents"
1185 					      " for crypto operation");
1186 				goto skip_tx;
1187 			}
1188 			ops++;
1189 		}
1190 		loop = 0;
1191 		while (loop < frames_to_send) {
1192 			loop += qbman_swp_enqueue_multiple(swp, &eqdesc,
1193 							&fd_arr[loop],
1194 							NULL,
1195 							frames_to_send - loop);
1196 		}
1197 
1198 		num_tx += frames_to_send;
1199 		nb_ops -= frames_to_send;
1200 	}
1201 skip_tx:
1202 	dpaa2_qp->tx_vq.tx_pkts += num_tx;
1203 	dpaa2_qp->tx_vq.err_pkts += nb_ops;
1204 	return num_tx;
1205 }
1206 
1207 static inline struct rte_crypto_op *
1208 sec_simple_fd_to_mbuf(const struct qbman_fd *fd, __rte_unused uint8_t id)
1209 {
1210 	struct rte_crypto_op *op;
1211 	uint16_t len = DPAA2_GET_FD_LEN(fd);
1212 	uint16_t diff = 0;
1213 	dpaa2_sec_session *sess_priv;
1214 
1215 	struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
1216 		DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
1217 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
1218 
1219 	op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova;
1220 	mbuf->buf_iova = op->sym->aead.digest.phys_addr;
1221 	op->sym->aead.digest.phys_addr = 0L;
1222 
1223 	sess_priv = (dpaa2_sec_session *)get_sec_session_private_data(
1224 				op->sym->sec_session);
1225 	if (sess_priv->dir == DIR_ENC)
1226 		mbuf->data_off += SEC_FLC_DHR_OUTBOUND;
1227 	else
1228 		mbuf->data_off += SEC_FLC_DHR_INBOUND;
1229 	diff = len - mbuf->pkt_len;
1230 	mbuf->pkt_len += diff;
1231 	mbuf->data_len += diff;
1232 
1233 	return op;
1234 }
1235 
1236 static inline struct rte_crypto_op *
1237 sec_fd_to_mbuf(const struct qbman_fd *fd, uint8_t driver_id)
1238 {
1239 	struct qbman_fle *fle;
1240 	struct rte_crypto_op *op;
1241 	struct ctxt_priv *priv;
1242 	struct rte_mbuf *dst, *src;
1243 
1244 	if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single)
1245 		return sec_simple_fd_to_mbuf(fd, driver_id);
1246 
1247 	fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
1248 
1249 	DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
1250 			   fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
1251 
1252 	/* we are using the first FLE entry to store Mbuf.
1253 	 * Currently we donot know which FLE has the mbuf stored.
1254 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
1255 	 * to get the MBUF Addr from the previous FLE.
1256 	 * We can have a better approach to use the inline Mbuf
1257 	 */
1258 
1259 	if (unlikely(DPAA2_GET_FD_IVP(fd))) {
1260 		/* TODO complete it. */
1261 		DPAA2_SEC_ERR("error: non inline buffer");
1262 		return NULL;
1263 	}
1264 	op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
1265 
1266 	/* Prefeth op */
1267 	src = op->sym->m_src;
1268 	rte_prefetch0(src);
1269 
1270 	if (op->sym->m_dst) {
1271 		dst = op->sym->m_dst;
1272 		rte_prefetch0(dst);
1273 	} else
1274 		dst = src;
1275 
1276 	DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p,"
1277 		" fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n",
1278 		(void *)dst,
1279 		dst->buf_addr,
1280 		DPAA2_GET_FD_ADDR(fd),
1281 		DPAA2_GET_FD_BPID(fd),
1282 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
1283 		DPAA2_GET_FD_OFFSET(fd),
1284 		DPAA2_GET_FD_LEN(fd));
1285 
1286 	/* free the fle memory */
1287 	if (likely(rte_pktmbuf_is_contiguous(src))) {
1288 		priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1);
1289 		rte_mempool_put(priv->fle_pool, (void *)(fle-1));
1290 	} else
1291 		rte_free((void *)(fle-1));
1292 
1293 	return op;
1294 }
1295 
1296 static uint16_t
1297 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1298 			uint16_t nb_ops)
1299 {
1300 	/* Function is responsible to receive frames for a given device and VQ*/
1301 	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1302 	struct rte_cryptodev *dev =
1303 			(struct rte_cryptodev *)(dpaa2_qp->rx_vq.dev);
1304 	struct qbman_result *dq_storage;
1305 	uint32_t fqid = dpaa2_qp->rx_vq.fqid;
1306 	int ret, num_rx = 0;
1307 	uint8_t is_last = 0, status;
1308 	struct qbman_swp *swp;
1309 	const struct qbman_fd *fd;
1310 	struct qbman_pull_desc pulldesc;
1311 
1312 	if (!DPAA2_PER_LCORE_DPIO) {
1313 		ret = dpaa2_affine_qbman_swp();
1314 		if (ret) {
1315 			DPAA2_SEC_ERR("Failure in affining portal");
1316 			return 0;
1317 		}
1318 	}
1319 	swp = DPAA2_PER_LCORE_PORTAL;
1320 	dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
1321 
1322 	qbman_pull_desc_clear(&pulldesc);
1323 	qbman_pull_desc_set_numframes(&pulldesc,
1324 				      (nb_ops > DPAA2_DQRR_RING_SIZE) ?
1325 				      DPAA2_DQRR_RING_SIZE : nb_ops);
1326 	qbman_pull_desc_set_fq(&pulldesc, fqid);
1327 	qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1328 				    (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage),
1329 				    1);
1330 
1331 	/*Issue a volatile dequeue command. */
1332 	while (1) {
1333 		if (qbman_swp_pull(swp, &pulldesc)) {
1334 			DPAA2_SEC_WARN(
1335 				"SEC VDQ command is not issued : QBMAN busy");
1336 			/* Portal was busy, try again */
1337 			continue;
1338 		}
1339 		break;
1340 	};
1341 
1342 	/* Receive the packets till Last Dequeue entry is found with
1343 	 * respect to the above issues PULL command.
1344 	 */
1345 	while (!is_last) {
1346 		/* Check if the previous issued command is completed.
1347 		 * Also seems like the SWP is shared between the Ethernet Driver
1348 		 * and the SEC driver.
1349 		 */
1350 		while (!qbman_check_command_complete(dq_storage))
1351 			;
1352 
1353 		/* Loop until the dq_storage is updated with
1354 		 * new token by QBMAN
1355 		 */
1356 		while (!qbman_check_new_result(dq_storage))
1357 			;
1358 		/* Check whether Last Pull command is Expired and
1359 		 * setting Condition for Loop termination
1360 		 */
1361 		if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1362 			is_last = 1;
1363 			/* Check for valid frame. */
1364 			status = (uint8_t)qbman_result_DQ_flags(dq_storage);
1365 			if (unlikely(
1366 				(status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
1367 				DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
1368 				continue;
1369 			}
1370 		}
1371 
1372 		fd = qbman_result_DQ_fd(dq_storage);
1373 		ops[num_rx] = sec_fd_to_mbuf(fd, dev->driver_id);
1374 
1375 		if (unlikely(fd->simple.frc)) {
1376 			/* TODO Parse SEC errors */
1377 			DPAA2_SEC_ERR("SEC returned Error - %x",
1378 				      fd->simple.frc);
1379 			ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
1380 		} else {
1381 			ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1382 		}
1383 
1384 		num_rx++;
1385 		dq_storage++;
1386 	} /* End of Packet Rx loop */
1387 
1388 	dpaa2_qp->rx_vq.rx_pkts += num_rx;
1389 
1390 	DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1391 	/*Return the total number of packets received to DPAA2 app*/
1392 	return num_rx;
1393 }
1394 
1395 /** Release queue pair */
1396 static int
1397 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
1398 {
1399 	struct dpaa2_sec_qp *qp =
1400 		(struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id];
1401 
1402 	PMD_INIT_FUNC_TRACE();
1403 
1404 	if (qp->rx_vq.q_storage) {
1405 		dpaa2_free_dq_storage(qp->rx_vq.q_storage);
1406 		rte_free(qp->rx_vq.q_storage);
1407 	}
1408 	rte_free(qp);
1409 
1410 	dev->data->queue_pairs[queue_pair_id] = NULL;
1411 
1412 	return 0;
1413 }
1414 
1415 /** Setup a queue pair */
1416 static int
1417 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1418 		__rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1419 		__rte_unused int socket_id,
1420 		__rte_unused struct rte_mempool *session_pool)
1421 {
1422 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1423 	struct dpaa2_sec_qp *qp;
1424 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1425 	struct dpseci_rx_queue_cfg cfg;
1426 	int32_t retcode;
1427 
1428 	PMD_INIT_FUNC_TRACE();
1429 
1430 	/* If qp is already in use free ring memory and qp metadata. */
1431 	if (dev->data->queue_pairs[qp_id] != NULL) {
1432 		DPAA2_SEC_INFO("QP already setup");
1433 		return 0;
1434 	}
1435 
1436 	DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p",
1437 		    dev, qp_id, qp_conf);
1438 
1439 	memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
1440 
1441 	qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp),
1442 			RTE_CACHE_LINE_SIZE);
1443 	if (!qp) {
1444 		DPAA2_SEC_ERR("malloc failed for rx/tx queues");
1445 		return -1;
1446 	}
1447 
1448 	qp->rx_vq.dev = dev;
1449 	qp->tx_vq.dev = dev;
1450 	qp->rx_vq.q_storage = rte_malloc("sec dq storage",
1451 		sizeof(struct queue_storage_info_t),
1452 		RTE_CACHE_LINE_SIZE);
1453 	if (!qp->rx_vq.q_storage) {
1454 		DPAA2_SEC_ERR("malloc failed for q_storage");
1455 		return -1;
1456 	}
1457 	memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
1458 
1459 	if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
1460 		DPAA2_SEC_ERR("Unable to allocate dequeue storage");
1461 		return -1;
1462 	}
1463 
1464 	dev->data->queue_pairs[qp_id] = qp;
1465 
1466 	cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
1467 	cfg.user_ctx = (size_t)(&qp->rx_vq);
1468 	retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
1469 				      qp_id, &cfg);
1470 	return retcode;
1471 }
1472 
1473 /** Start queue pair */
1474 static int
1475 dpaa2_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev,
1476 			   __rte_unused uint16_t queue_pair_id)
1477 {
1478 	PMD_INIT_FUNC_TRACE();
1479 
1480 	return 0;
1481 }
1482 
1483 /** Stop queue pair */
1484 static int
1485 dpaa2_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev,
1486 			  __rte_unused uint16_t queue_pair_id)
1487 {
1488 	PMD_INIT_FUNC_TRACE();
1489 
1490 	return 0;
1491 }
1492 
1493 /** Return the number of allocated queue pairs */
1494 static uint32_t
1495 dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev)
1496 {
1497 	PMD_INIT_FUNC_TRACE();
1498 
1499 	return dev->data->nb_queue_pairs;
1500 }
1501 
1502 /** Returns the size of the aesni gcm session structure */
1503 static unsigned int
1504 dpaa2_sec_session_get_size(struct rte_cryptodev *dev __rte_unused)
1505 {
1506 	PMD_INIT_FUNC_TRACE();
1507 
1508 	return sizeof(dpaa2_sec_session);
1509 }
1510 
1511 static int
1512 dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
1513 		      struct rte_crypto_sym_xform *xform,
1514 		      dpaa2_sec_session *session)
1515 {
1516 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1517 	struct alginfo cipherdata;
1518 	int bufsize, i;
1519 	struct ctxt_priv *priv;
1520 	struct sec_flow_context *flc;
1521 
1522 	PMD_INIT_FUNC_TRACE();
1523 
1524 	/* For SEC CIPHER only one descriptor is required. */
1525 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1526 			sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1527 			RTE_CACHE_LINE_SIZE);
1528 	if (priv == NULL) {
1529 		DPAA2_SEC_ERR("No Memory for priv CTXT");
1530 		return -1;
1531 	}
1532 
1533 	priv->fle_pool = dev_priv->fle_pool;
1534 
1535 	flc = &priv->flc_desc[0].flc;
1536 
1537 	session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1538 			RTE_CACHE_LINE_SIZE);
1539 	if (session->cipher_key.data == NULL) {
1540 		DPAA2_SEC_ERR("No Memory for cipher key");
1541 		rte_free(priv);
1542 		return -1;
1543 	}
1544 	session->cipher_key.length = xform->cipher.key.length;
1545 
1546 	memcpy(session->cipher_key.data, xform->cipher.key.data,
1547 	       xform->cipher.key.length);
1548 	cipherdata.key = (size_t)session->cipher_key.data;
1549 	cipherdata.keylen = session->cipher_key.length;
1550 	cipherdata.key_enc_flags = 0;
1551 	cipherdata.key_type = RTA_DATA_IMM;
1552 
1553 	/* Set IV parameters */
1554 	session->iv.offset = xform->cipher.iv.offset;
1555 	session->iv.length = xform->cipher.iv.length;
1556 
1557 	switch (xform->cipher.algo) {
1558 	case RTE_CRYPTO_CIPHER_AES_CBC:
1559 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
1560 		cipherdata.algmode = OP_ALG_AAI_CBC;
1561 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1562 		break;
1563 	case RTE_CRYPTO_CIPHER_3DES_CBC:
1564 		cipherdata.algtype = OP_ALG_ALGSEL_3DES;
1565 		cipherdata.algmode = OP_ALG_AAI_CBC;
1566 		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1567 		break;
1568 	case RTE_CRYPTO_CIPHER_AES_CTR:
1569 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
1570 		cipherdata.algmode = OP_ALG_AAI_CTR;
1571 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
1572 		break;
1573 	case RTE_CRYPTO_CIPHER_3DES_CTR:
1574 	case RTE_CRYPTO_CIPHER_AES_ECB:
1575 	case RTE_CRYPTO_CIPHER_3DES_ECB:
1576 	case RTE_CRYPTO_CIPHER_AES_XTS:
1577 	case RTE_CRYPTO_CIPHER_AES_F8:
1578 	case RTE_CRYPTO_CIPHER_ARC4:
1579 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
1580 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1581 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
1582 	case RTE_CRYPTO_CIPHER_NULL:
1583 		DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
1584 			xform->cipher.algo);
1585 		goto error_out;
1586 	default:
1587 		DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
1588 			xform->cipher.algo);
1589 		goto error_out;
1590 	}
1591 	session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1592 				DIR_ENC : DIR_DEC;
1593 
1594 	bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1595 					&cipherdata, NULL, session->iv.length,
1596 					session->dir);
1597 	if (bufsize < 0) {
1598 		DPAA2_SEC_ERR("Crypto: Descriptor build failed");
1599 		goto error_out;
1600 	}
1601 	flc->dhr = 0;
1602 	flc->bpv0 = 0x1;
1603 	flc->mode_bits = 0x8000;
1604 
1605 	flc->word1_sdl = (uint8_t)bufsize;
1606 	flc->word2_rflc_31_0 = lower_32_bits(
1607 			(size_t)&(((struct dpaa2_sec_qp *)
1608 			dev->data->queue_pairs[0])->rx_vq));
1609 	flc->word3_rflc_63_32 = upper_32_bits(
1610 			(size_t)&(((struct dpaa2_sec_qp *)
1611 			dev->data->queue_pairs[0])->rx_vq));
1612 	session->ctxt = priv;
1613 
1614 	for (i = 0; i < bufsize; i++)
1615 		DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]);
1616 
1617 	return 0;
1618 
1619 error_out:
1620 	rte_free(session->cipher_key.data);
1621 	rte_free(priv);
1622 	return -1;
1623 }
1624 
1625 static int
1626 dpaa2_sec_auth_init(struct rte_cryptodev *dev,
1627 		    struct rte_crypto_sym_xform *xform,
1628 		    dpaa2_sec_session *session)
1629 {
1630 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1631 	struct alginfo authdata;
1632 	int bufsize, i;
1633 	struct ctxt_priv *priv;
1634 	struct sec_flow_context *flc;
1635 
1636 	PMD_INIT_FUNC_TRACE();
1637 
1638 	/* For SEC AUTH three descriptors are required for various stages */
1639 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1640 			sizeof(struct ctxt_priv) + 3 *
1641 			sizeof(struct sec_flc_desc),
1642 			RTE_CACHE_LINE_SIZE);
1643 	if (priv == NULL) {
1644 		DPAA2_SEC_ERR("No Memory for priv CTXT");
1645 		return -1;
1646 	}
1647 
1648 	priv->fle_pool = dev_priv->fle_pool;
1649 	flc = &priv->flc_desc[DESC_INITFINAL].flc;
1650 
1651 	session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1652 			RTE_CACHE_LINE_SIZE);
1653 	if (session->auth_key.data == NULL) {
1654 		DPAA2_SEC_ERR("Unable to allocate memory for auth key");
1655 		rte_free(priv);
1656 		return -1;
1657 	}
1658 	session->auth_key.length = xform->auth.key.length;
1659 
1660 	memcpy(session->auth_key.data, xform->auth.key.data,
1661 	       xform->auth.key.length);
1662 	authdata.key = (size_t)session->auth_key.data;
1663 	authdata.keylen = session->auth_key.length;
1664 	authdata.key_enc_flags = 0;
1665 	authdata.key_type = RTA_DATA_IMM;
1666 
1667 	session->digest_length = xform->auth.digest_length;
1668 
1669 	switch (xform->auth.algo) {
1670 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
1671 		authdata.algtype = OP_ALG_ALGSEL_SHA1;
1672 		authdata.algmode = OP_ALG_AAI_HMAC;
1673 		session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1674 		break;
1675 	case RTE_CRYPTO_AUTH_MD5_HMAC:
1676 		authdata.algtype = OP_ALG_ALGSEL_MD5;
1677 		authdata.algmode = OP_ALG_AAI_HMAC;
1678 		session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1679 		break;
1680 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
1681 		authdata.algtype = OP_ALG_ALGSEL_SHA256;
1682 		authdata.algmode = OP_ALG_AAI_HMAC;
1683 		session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1684 		break;
1685 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
1686 		authdata.algtype = OP_ALG_ALGSEL_SHA384;
1687 		authdata.algmode = OP_ALG_AAI_HMAC;
1688 		session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1689 		break;
1690 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
1691 		authdata.algtype = OP_ALG_ALGSEL_SHA512;
1692 		authdata.algmode = OP_ALG_AAI_HMAC;
1693 		session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1694 		break;
1695 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
1696 		authdata.algtype = OP_ALG_ALGSEL_SHA224;
1697 		authdata.algmode = OP_ALG_AAI_HMAC;
1698 		session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
1699 		break;
1700 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1701 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1702 	case RTE_CRYPTO_AUTH_NULL:
1703 	case RTE_CRYPTO_AUTH_SHA1:
1704 	case RTE_CRYPTO_AUTH_SHA256:
1705 	case RTE_CRYPTO_AUTH_SHA512:
1706 	case RTE_CRYPTO_AUTH_SHA224:
1707 	case RTE_CRYPTO_AUTH_SHA384:
1708 	case RTE_CRYPTO_AUTH_MD5:
1709 	case RTE_CRYPTO_AUTH_AES_GMAC:
1710 	case RTE_CRYPTO_AUTH_KASUMI_F9:
1711 	case RTE_CRYPTO_AUTH_AES_CMAC:
1712 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1713 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
1714 		DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un",
1715 			      xform->auth.algo);
1716 		goto error_out;
1717 	default:
1718 		DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
1719 			      xform->auth.algo);
1720 		goto error_out;
1721 	}
1722 	session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1723 				DIR_ENC : DIR_DEC;
1724 
1725 	bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
1726 				   1, 0, &authdata, !session->dir,
1727 				   session->digest_length);
1728 	if (bufsize < 0) {
1729 		DPAA2_SEC_ERR("Crypto: Invalid buffer length");
1730 		goto error_out;
1731 	}
1732 
1733 	flc->word1_sdl = (uint8_t)bufsize;
1734 	flc->word2_rflc_31_0 = lower_32_bits(
1735 			(size_t)&(((struct dpaa2_sec_qp *)
1736 			dev->data->queue_pairs[0])->rx_vq));
1737 	flc->word3_rflc_63_32 = upper_32_bits(
1738 			(size_t)&(((struct dpaa2_sec_qp *)
1739 			dev->data->queue_pairs[0])->rx_vq));
1740 	session->ctxt = priv;
1741 	for (i = 0; i < bufsize; i++)
1742 		DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
1743 				i, priv->flc_desc[DESC_INITFINAL].desc[i]);
1744 
1745 
1746 	return 0;
1747 
1748 error_out:
1749 	rte_free(session->auth_key.data);
1750 	rte_free(priv);
1751 	return -1;
1752 }
1753 
1754 static int
1755 dpaa2_sec_aead_init(struct rte_cryptodev *dev,
1756 		    struct rte_crypto_sym_xform *xform,
1757 		    dpaa2_sec_session *session)
1758 {
1759 	struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
1760 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1761 	struct alginfo aeaddata;
1762 	int bufsize, i;
1763 	struct ctxt_priv *priv;
1764 	struct sec_flow_context *flc;
1765 	struct rte_crypto_aead_xform *aead_xform = &xform->aead;
1766 	int err;
1767 
1768 	PMD_INIT_FUNC_TRACE();
1769 
1770 	/* Set IV parameters */
1771 	session->iv.offset = aead_xform->iv.offset;
1772 	session->iv.length = aead_xform->iv.length;
1773 	session->ctxt_type = DPAA2_SEC_AEAD;
1774 
1775 	/* For SEC AEAD only one descriptor is required */
1776 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1777 			sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1778 			RTE_CACHE_LINE_SIZE);
1779 	if (priv == NULL) {
1780 		DPAA2_SEC_ERR("No Memory for priv CTXT");
1781 		return -1;
1782 	}
1783 
1784 	priv->fle_pool = dev_priv->fle_pool;
1785 	flc = &priv->flc_desc[0].flc;
1786 
1787 	session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
1788 					       RTE_CACHE_LINE_SIZE);
1789 	if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
1790 		DPAA2_SEC_ERR("No Memory for aead key");
1791 		rte_free(priv);
1792 		return -1;
1793 	}
1794 	memcpy(session->aead_key.data, aead_xform->key.data,
1795 	       aead_xform->key.length);
1796 
1797 	session->digest_length = aead_xform->digest_length;
1798 	session->aead_key.length = aead_xform->key.length;
1799 	ctxt->auth_only_len = aead_xform->aad_length;
1800 
1801 	aeaddata.key = (size_t)session->aead_key.data;
1802 	aeaddata.keylen = session->aead_key.length;
1803 	aeaddata.key_enc_flags = 0;
1804 	aeaddata.key_type = RTA_DATA_IMM;
1805 
1806 	switch (aead_xform->algo) {
1807 	case RTE_CRYPTO_AEAD_AES_GCM:
1808 		aeaddata.algtype = OP_ALG_ALGSEL_AES;
1809 		aeaddata.algmode = OP_ALG_AAI_GCM;
1810 		session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
1811 		break;
1812 	case RTE_CRYPTO_AEAD_AES_CCM:
1813 		DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u",
1814 			      aead_xform->algo);
1815 		goto error_out;
1816 	default:
1817 		DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
1818 			      aead_xform->algo);
1819 		goto error_out;
1820 	}
1821 	session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1822 				DIR_ENC : DIR_DEC;
1823 
1824 	priv->flc_desc[0].desc[0] = aeaddata.keylen;
1825 	err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
1826 			       MIN_JOB_DESC_SIZE,
1827 			       (unsigned int *)priv->flc_desc[0].desc,
1828 			       &priv->flc_desc[0].desc[1], 1);
1829 
1830 	if (err < 0) {
1831 		DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
1832 		goto error_out;
1833 	}
1834 	if (priv->flc_desc[0].desc[1] & 1) {
1835 		aeaddata.key_type = RTA_DATA_IMM;
1836 	} else {
1837 		aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key);
1838 		aeaddata.key_type = RTA_DATA_PTR;
1839 	}
1840 	priv->flc_desc[0].desc[0] = 0;
1841 	priv->flc_desc[0].desc[1] = 0;
1842 
1843 	if (session->dir == DIR_ENC)
1844 		bufsize = cnstr_shdsc_gcm_encap(
1845 				priv->flc_desc[0].desc, 1, 0,
1846 				&aeaddata, session->iv.length,
1847 				session->digest_length);
1848 	else
1849 		bufsize = cnstr_shdsc_gcm_decap(
1850 				priv->flc_desc[0].desc, 1, 0,
1851 				&aeaddata, session->iv.length,
1852 				session->digest_length);
1853 	if (bufsize < 0) {
1854 		DPAA2_SEC_ERR("Crypto: Invalid buffer length");
1855 		goto error_out;
1856 	}
1857 
1858 	flc->word1_sdl = (uint8_t)bufsize;
1859 	flc->word2_rflc_31_0 = lower_32_bits(
1860 			(size_t)&(((struct dpaa2_sec_qp *)
1861 			dev->data->queue_pairs[0])->rx_vq));
1862 	flc->word3_rflc_63_32 = upper_32_bits(
1863 			(size_t)&(((struct dpaa2_sec_qp *)
1864 			dev->data->queue_pairs[0])->rx_vq));
1865 	session->ctxt = priv;
1866 	for (i = 0; i < bufsize; i++)
1867 		DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n",
1868 			    i, priv->flc_desc[0].desc[i]);
1869 
1870 	return 0;
1871 
1872 error_out:
1873 	rte_free(session->aead_key.data);
1874 	rte_free(priv);
1875 	return -1;
1876 }
1877 
1878 
1879 static int
1880 dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
1881 		    struct rte_crypto_sym_xform *xform,
1882 		    dpaa2_sec_session *session)
1883 {
1884 	struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
1885 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1886 	struct alginfo authdata, cipherdata;
1887 	int bufsize, i;
1888 	struct ctxt_priv *priv;
1889 	struct sec_flow_context *flc;
1890 	struct rte_crypto_cipher_xform *cipher_xform;
1891 	struct rte_crypto_auth_xform *auth_xform;
1892 	int err;
1893 
1894 	PMD_INIT_FUNC_TRACE();
1895 
1896 	if (session->ext_params.aead_ctxt.auth_cipher_text) {
1897 		cipher_xform = &xform->cipher;
1898 		auth_xform = &xform->next->auth;
1899 		session->ctxt_type =
1900 			(cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1901 			DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER;
1902 	} else {
1903 		cipher_xform = &xform->next->cipher;
1904 		auth_xform = &xform->auth;
1905 		session->ctxt_type =
1906 			(cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1907 			DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH;
1908 	}
1909 
1910 	/* Set IV parameters */
1911 	session->iv.offset = cipher_xform->iv.offset;
1912 	session->iv.length = cipher_xform->iv.length;
1913 
1914 	/* For SEC AEAD only one descriptor is required */
1915 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1916 			sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1917 			RTE_CACHE_LINE_SIZE);
1918 	if (priv == NULL) {
1919 		DPAA2_SEC_ERR("No Memory for priv CTXT");
1920 		return -1;
1921 	}
1922 
1923 	priv->fle_pool = dev_priv->fle_pool;
1924 	flc = &priv->flc_desc[0].flc;
1925 
1926 	session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
1927 					       RTE_CACHE_LINE_SIZE);
1928 	if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
1929 		DPAA2_SEC_ERR("No Memory for cipher key");
1930 		rte_free(priv);
1931 		return -1;
1932 	}
1933 	session->cipher_key.length = cipher_xform->key.length;
1934 	session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
1935 					     RTE_CACHE_LINE_SIZE);
1936 	if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
1937 		DPAA2_SEC_ERR("No Memory for auth key");
1938 		rte_free(session->cipher_key.data);
1939 		rte_free(priv);
1940 		return -1;
1941 	}
1942 	session->auth_key.length = auth_xform->key.length;
1943 	memcpy(session->cipher_key.data, cipher_xform->key.data,
1944 	       cipher_xform->key.length);
1945 	memcpy(session->auth_key.data, auth_xform->key.data,
1946 	       auth_xform->key.length);
1947 
1948 	authdata.key = (size_t)session->auth_key.data;
1949 	authdata.keylen = session->auth_key.length;
1950 	authdata.key_enc_flags = 0;
1951 	authdata.key_type = RTA_DATA_IMM;
1952 
1953 	session->digest_length = auth_xform->digest_length;
1954 
1955 	switch (auth_xform->algo) {
1956 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
1957 		authdata.algtype = OP_ALG_ALGSEL_SHA1;
1958 		authdata.algmode = OP_ALG_AAI_HMAC;
1959 		session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1960 		break;
1961 	case RTE_CRYPTO_AUTH_MD5_HMAC:
1962 		authdata.algtype = OP_ALG_ALGSEL_MD5;
1963 		authdata.algmode = OP_ALG_AAI_HMAC;
1964 		session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1965 		break;
1966 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
1967 		authdata.algtype = OP_ALG_ALGSEL_SHA224;
1968 		authdata.algmode = OP_ALG_AAI_HMAC;
1969 		session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
1970 		break;
1971 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
1972 		authdata.algtype = OP_ALG_ALGSEL_SHA256;
1973 		authdata.algmode = OP_ALG_AAI_HMAC;
1974 		session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1975 		break;
1976 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
1977 		authdata.algtype = OP_ALG_ALGSEL_SHA384;
1978 		authdata.algmode = OP_ALG_AAI_HMAC;
1979 		session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1980 		break;
1981 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
1982 		authdata.algtype = OP_ALG_ALGSEL_SHA512;
1983 		authdata.algmode = OP_ALG_AAI_HMAC;
1984 		session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1985 		break;
1986 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1987 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1988 	case RTE_CRYPTO_AUTH_NULL:
1989 	case RTE_CRYPTO_AUTH_SHA1:
1990 	case RTE_CRYPTO_AUTH_SHA256:
1991 	case RTE_CRYPTO_AUTH_SHA512:
1992 	case RTE_CRYPTO_AUTH_SHA224:
1993 	case RTE_CRYPTO_AUTH_SHA384:
1994 	case RTE_CRYPTO_AUTH_MD5:
1995 	case RTE_CRYPTO_AUTH_AES_GMAC:
1996 	case RTE_CRYPTO_AUTH_KASUMI_F9:
1997 	case RTE_CRYPTO_AUTH_AES_CMAC:
1998 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1999 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
2000 		DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2001 			      auth_xform->algo);
2002 		goto error_out;
2003 	default:
2004 		DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2005 			      auth_xform->algo);
2006 		goto error_out;
2007 	}
2008 	cipherdata.key = (size_t)session->cipher_key.data;
2009 	cipherdata.keylen = session->cipher_key.length;
2010 	cipherdata.key_enc_flags = 0;
2011 	cipherdata.key_type = RTA_DATA_IMM;
2012 
2013 	switch (cipher_xform->algo) {
2014 	case RTE_CRYPTO_CIPHER_AES_CBC:
2015 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
2016 		cipherdata.algmode = OP_ALG_AAI_CBC;
2017 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2018 		break;
2019 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2020 		cipherdata.algtype = OP_ALG_ALGSEL_3DES;
2021 		cipherdata.algmode = OP_ALG_AAI_CBC;
2022 		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2023 		break;
2024 	case RTE_CRYPTO_CIPHER_AES_CTR:
2025 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
2026 		cipherdata.algmode = OP_ALG_AAI_CTR;
2027 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2028 		break;
2029 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2030 	case RTE_CRYPTO_CIPHER_NULL:
2031 	case RTE_CRYPTO_CIPHER_3DES_ECB:
2032 	case RTE_CRYPTO_CIPHER_AES_ECB:
2033 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
2034 		DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2035 			      cipher_xform->algo);
2036 		goto error_out;
2037 	default:
2038 		DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2039 			      cipher_xform->algo);
2040 		goto error_out;
2041 	}
2042 	session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2043 				DIR_ENC : DIR_DEC;
2044 
2045 	priv->flc_desc[0].desc[0] = cipherdata.keylen;
2046 	priv->flc_desc[0].desc[1] = authdata.keylen;
2047 	err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2048 			       MIN_JOB_DESC_SIZE,
2049 			       (unsigned int *)priv->flc_desc[0].desc,
2050 			       &priv->flc_desc[0].desc[2], 2);
2051 
2052 	if (err < 0) {
2053 		DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2054 		goto error_out;
2055 	}
2056 	if (priv->flc_desc[0].desc[2] & 1) {
2057 		cipherdata.key_type = RTA_DATA_IMM;
2058 	} else {
2059 		cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
2060 		cipherdata.key_type = RTA_DATA_PTR;
2061 	}
2062 	if (priv->flc_desc[0].desc[2] & (1 << 1)) {
2063 		authdata.key_type = RTA_DATA_IMM;
2064 	} else {
2065 		authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
2066 		authdata.key_type = RTA_DATA_PTR;
2067 	}
2068 	priv->flc_desc[0].desc[0] = 0;
2069 	priv->flc_desc[0].desc[1] = 0;
2070 	priv->flc_desc[0].desc[2] = 0;
2071 
2072 	if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
2073 		bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
2074 					      0, &cipherdata, &authdata,
2075 					      session->iv.length,
2076 					      ctxt->auth_only_len,
2077 					      session->digest_length,
2078 					      session->dir);
2079 		if (bufsize < 0) {
2080 			DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2081 			goto error_out;
2082 		}
2083 	} else {
2084 		DPAA2_SEC_ERR("Hash before cipher not supported");
2085 		goto error_out;
2086 	}
2087 
2088 	flc->word1_sdl = (uint8_t)bufsize;
2089 	flc->word2_rflc_31_0 = lower_32_bits(
2090 			(size_t)&(((struct dpaa2_sec_qp *)
2091 			dev->data->queue_pairs[0])->rx_vq));
2092 	flc->word3_rflc_63_32 = upper_32_bits(
2093 			(size_t)&(((struct dpaa2_sec_qp *)
2094 			dev->data->queue_pairs[0])->rx_vq));
2095 	session->ctxt = priv;
2096 	for (i = 0; i < bufsize; i++)
2097 		DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2098 			    i, priv->flc_desc[0].desc[i]);
2099 
2100 	return 0;
2101 
2102 error_out:
2103 	rte_free(session->cipher_key.data);
2104 	rte_free(session->auth_key.data);
2105 	rte_free(priv);
2106 	return -1;
2107 }
2108 
2109 static int
2110 dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
2111 			    struct rte_crypto_sym_xform *xform,	void *sess)
2112 {
2113 	dpaa2_sec_session *session = sess;
2114 
2115 	PMD_INIT_FUNC_TRACE();
2116 
2117 	if (unlikely(sess == NULL)) {
2118 		DPAA2_SEC_ERR("Invalid session struct");
2119 		return -1;
2120 	}
2121 
2122 	/* Default IV length = 0 */
2123 	session->iv.length = 0;
2124 
2125 	/* Cipher Only */
2126 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2127 		session->ctxt_type = DPAA2_SEC_CIPHER;
2128 		dpaa2_sec_cipher_init(dev, xform, session);
2129 
2130 	/* Authentication Only */
2131 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2132 		   xform->next == NULL) {
2133 		session->ctxt_type = DPAA2_SEC_AUTH;
2134 		dpaa2_sec_auth_init(dev, xform, session);
2135 
2136 	/* Cipher then Authenticate */
2137 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2138 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2139 		session->ext_params.aead_ctxt.auth_cipher_text = true;
2140 		dpaa2_sec_aead_chain_init(dev, xform, session);
2141 
2142 	/* Authenticate then Cipher */
2143 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2144 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2145 		session->ext_params.aead_ctxt.auth_cipher_text = false;
2146 		dpaa2_sec_aead_chain_init(dev, xform, session);
2147 
2148 	/* AEAD operation for AES-GCM kind of Algorithms */
2149 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2150 		   xform->next == NULL) {
2151 		dpaa2_sec_aead_init(dev, xform, session);
2152 
2153 	} else {
2154 		DPAA2_SEC_ERR("Invalid crypto type");
2155 		return -EINVAL;
2156 	}
2157 
2158 	return 0;
2159 }
2160 
2161 static int
2162 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
2163 			    struct rte_security_session_conf *conf,
2164 			    void *sess)
2165 {
2166 	struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2167 	struct rte_crypto_auth_xform *auth_xform;
2168 	struct rte_crypto_cipher_xform *cipher_xform;
2169 	dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
2170 	struct ctxt_priv *priv;
2171 	struct ipsec_encap_pdb encap_pdb;
2172 	struct ipsec_decap_pdb decap_pdb;
2173 	struct alginfo authdata, cipherdata;
2174 	int bufsize;
2175 	struct sec_flow_context *flc;
2176 
2177 	PMD_INIT_FUNC_TRACE();
2178 
2179 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2180 		cipher_xform = &conf->crypto_xform->cipher;
2181 		auth_xform = &conf->crypto_xform->next->auth;
2182 	} else {
2183 		auth_xform = &conf->crypto_xform->auth;
2184 		cipher_xform = &conf->crypto_xform->next->cipher;
2185 	}
2186 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2187 				sizeof(struct ctxt_priv) +
2188 				sizeof(struct sec_flc_desc),
2189 				RTE_CACHE_LINE_SIZE);
2190 
2191 	if (priv == NULL) {
2192 		DPAA2_SEC_ERR("No memory for priv CTXT");
2193 		return -ENOMEM;
2194 	}
2195 
2196 	flc = &priv->flc_desc[0].flc;
2197 
2198 	session->ctxt_type = DPAA2_SEC_IPSEC;
2199 	session->cipher_key.data = rte_zmalloc(NULL,
2200 					       cipher_xform->key.length,
2201 					       RTE_CACHE_LINE_SIZE);
2202 	if (session->cipher_key.data == NULL &&
2203 			cipher_xform->key.length > 0) {
2204 		DPAA2_SEC_ERR("No Memory for cipher key");
2205 		rte_free(priv);
2206 		return -ENOMEM;
2207 	}
2208 
2209 	session->cipher_key.length = cipher_xform->key.length;
2210 	session->auth_key.data = rte_zmalloc(NULL,
2211 					auth_xform->key.length,
2212 					RTE_CACHE_LINE_SIZE);
2213 	if (session->auth_key.data == NULL &&
2214 			auth_xform->key.length > 0) {
2215 		DPAA2_SEC_ERR("No Memory for auth key");
2216 		rte_free(session->cipher_key.data);
2217 		rte_free(priv);
2218 		return -ENOMEM;
2219 	}
2220 	session->auth_key.length = auth_xform->key.length;
2221 	memcpy(session->cipher_key.data, cipher_xform->key.data,
2222 			cipher_xform->key.length);
2223 	memcpy(session->auth_key.data, auth_xform->key.data,
2224 			auth_xform->key.length);
2225 
2226 	authdata.key = (size_t)session->auth_key.data;
2227 	authdata.keylen = session->auth_key.length;
2228 	authdata.key_enc_flags = 0;
2229 	authdata.key_type = RTA_DATA_IMM;
2230 	switch (auth_xform->algo) {
2231 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
2232 		authdata.algtype = OP_PCL_IPSEC_HMAC_SHA1_96;
2233 		authdata.algmode = OP_ALG_AAI_HMAC;
2234 		session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
2235 		break;
2236 	case RTE_CRYPTO_AUTH_MD5_HMAC:
2237 		authdata.algtype = OP_PCL_IPSEC_HMAC_MD5_96;
2238 		authdata.algmode = OP_ALG_AAI_HMAC;
2239 		session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
2240 		break;
2241 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2242 		authdata.algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2243 		authdata.algmode = OP_ALG_AAI_HMAC;
2244 		session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2245 		break;
2246 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2247 		authdata.algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2248 		authdata.algmode = OP_ALG_AAI_HMAC;
2249 		session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2250 		break;
2251 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2252 		authdata.algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2253 		authdata.algmode = OP_ALG_AAI_HMAC;
2254 		session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2255 		break;
2256 	case RTE_CRYPTO_AUTH_AES_CMAC:
2257 		authdata.algtype = OP_PCL_IPSEC_AES_CMAC_96;
2258 		session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
2259 		break;
2260 	case RTE_CRYPTO_AUTH_NULL:
2261 		authdata.algtype = OP_PCL_IPSEC_HMAC_NULL;
2262 		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2263 		break;
2264 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2265 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2266 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2267 	case RTE_CRYPTO_AUTH_SHA1:
2268 	case RTE_CRYPTO_AUTH_SHA256:
2269 	case RTE_CRYPTO_AUTH_SHA512:
2270 	case RTE_CRYPTO_AUTH_SHA224:
2271 	case RTE_CRYPTO_AUTH_SHA384:
2272 	case RTE_CRYPTO_AUTH_MD5:
2273 	case RTE_CRYPTO_AUTH_AES_GMAC:
2274 	case RTE_CRYPTO_AUTH_KASUMI_F9:
2275 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2276 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
2277 		DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2278 			      auth_xform->algo);
2279 		goto out;
2280 	default:
2281 		DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2282 			      auth_xform->algo);
2283 		goto out;
2284 	}
2285 	cipherdata.key = (size_t)session->cipher_key.data;
2286 	cipherdata.keylen = session->cipher_key.length;
2287 	cipherdata.key_enc_flags = 0;
2288 	cipherdata.key_type = RTA_DATA_IMM;
2289 
2290 	switch (cipher_xform->algo) {
2291 	case RTE_CRYPTO_CIPHER_AES_CBC:
2292 		cipherdata.algtype = OP_PCL_IPSEC_AES_CBC;
2293 		cipherdata.algmode = OP_ALG_AAI_CBC;
2294 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2295 		break;
2296 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2297 		cipherdata.algtype = OP_PCL_IPSEC_3DES;
2298 		cipherdata.algmode = OP_ALG_AAI_CBC;
2299 		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2300 		break;
2301 	case RTE_CRYPTO_CIPHER_AES_CTR:
2302 		cipherdata.algtype = OP_PCL_IPSEC_AES_CTR;
2303 		cipherdata.algmode = OP_ALG_AAI_CTR;
2304 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2305 		break;
2306 	case RTE_CRYPTO_CIPHER_NULL:
2307 		cipherdata.algtype = OP_PCL_IPSEC_NULL;
2308 		break;
2309 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2310 	case RTE_CRYPTO_CIPHER_3DES_ECB:
2311 	case RTE_CRYPTO_CIPHER_AES_ECB:
2312 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
2313 		DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2314 			      cipher_xform->algo);
2315 		goto out;
2316 	default:
2317 		DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2318 			      cipher_xform->algo);
2319 		goto out;
2320 	}
2321 
2322 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2323 		struct ip ip4_hdr;
2324 
2325 		flc->dhr = SEC_FLC_DHR_OUTBOUND;
2326 		ip4_hdr.ip_v = IPVERSION;
2327 		ip4_hdr.ip_hl = 5;
2328 		ip4_hdr.ip_len = rte_cpu_to_be_16(sizeof(ip4_hdr));
2329 		ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2330 		ip4_hdr.ip_id = 0;
2331 		ip4_hdr.ip_off = 0;
2332 		ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2333 		ip4_hdr.ip_p = 0x32;
2334 		ip4_hdr.ip_sum = 0;
2335 		ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2336 		ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2337 		ip4_hdr.ip_sum = calc_chksum((uint16_t *)(void *)&ip4_hdr,
2338 			sizeof(struct ip));
2339 
2340 		/* For Sec Proto only one descriptor is required. */
2341 		memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb));
2342 		encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2343 			PDBOPTS_ESP_OIHI_PDB_INL |
2344 			PDBOPTS_ESP_IVSRC |
2345 			PDBHMO_ESP_ENCAP_DTTL;
2346 		encap_pdb.spi = ipsec_xform->spi;
2347 		encap_pdb.ip_hdr_len = sizeof(struct ip);
2348 
2349 		session->dir = DIR_ENC;
2350 		bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc,
2351 				1, 0, &encap_pdb,
2352 				(uint8_t *)&ip4_hdr,
2353 				&cipherdata, &authdata);
2354 	} else if (ipsec_xform->direction ==
2355 			RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2356 		flc->dhr = SEC_FLC_DHR_INBOUND;
2357 		memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2358 		decap_pdb.options = sizeof(struct ip) << 16;
2359 		session->dir = DIR_DEC;
2360 		bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc,
2361 				1, 0, &decap_pdb, &cipherdata, &authdata);
2362 	} else
2363 		goto out;
2364 
2365 	if (bufsize < 0) {
2366 		DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2367 		goto out;
2368 	}
2369 
2370 	flc->word1_sdl = (uint8_t)bufsize;
2371 
2372 	/* Enable the stashing control bit */
2373 	DPAA2_SET_FLC_RSC(flc);
2374 	flc->word2_rflc_31_0 = lower_32_bits(
2375 			(size_t)&(((struct dpaa2_sec_qp *)
2376 			dev->data->queue_pairs[0])->rx_vq) | 0x14);
2377 	flc->word3_rflc_63_32 = upper_32_bits(
2378 			(size_t)&(((struct dpaa2_sec_qp *)
2379 			dev->data->queue_pairs[0])->rx_vq));
2380 
2381 	/* Set EWS bit i.e. enable write-safe */
2382 	DPAA2_SET_FLC_EWS(flc);
2383 	/* Set BS = 1 i.e reuse input buffers as output buffers */
2384 	DPAA2_SET_FLC_REUSE_BS(flc);
2385 	/* Set FF = 10; reuse input buffers if they provide sufficient space */
2386 	DPAA2_SET_FLC_REUSE_FF(flc);
2387 
2388 	session->ctxt = priv;
2389 
2390 	return 0;
2391 out:
2392 	rte_free(session->auth_key.data);
2393 	rte_free(session->cipher_key.data);
2394 	rte_free(priv);
2395 	return -1;
2396 }
2397 
2398 static int
2399 dpaa2_sec_security_session_create(void *dev,
2400 				  struct rte_security_session_conf *conf,
2401 				  struct rte_security_session *sess,
2402 				  struct rte_mempool *mempool)
2403 {
2404 	void *sess_private_data;
2405 	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2406 	int ret;
2407 
2408 	if (rte_mempool_get(mempool, &sess_private_data)) {
2409 		DPAA2_SEC_ERR("Couldn't get object from session mempool");
2410 		return -ENOMEM;
2411 	}
2412 
2413 	switch (conf->protocol) {
2414 	case RTE_SECURITY_PROTOCOL_IPSEC:
2415 		ret = dpaa2_sec_set_ipsec_session(cdev, conf,
2416 				sess_private_data);
2417 		break;
2418 	case RTE_SECURITY_PROTOCOL_MACSEC:
2419 		return -ENOTSUP;
2420 	default:
2421 		return -EINVAL;
2422 	}
2423 	if (ret != 0) {
2424 		DPAA2_SEC_ERR("Failed to configure session parameters");
2425 		/* Return session to mempool */
2426 		rte_mempool_put(mempool, sess_private_data);
2427 		return ret;
2428 	}
2429 
2430 	set_sec_session_private_data(sess, sess_private_data);
2431 
2432 	return ret;
2433 }
2434 
2435 /** Clear the memory of session so it doesn't leave key material behind */
2436 static int
2437 dpaa2_sec_security_session_destroy(void *dev __rte_unused,
2438 		struct rte_security_session *sess)
2439 {
2440 	PMD_INIT_FUNC_TRACE();
2441 	void *sess_priv = get_sec_session_private_data(sess);
2442 
2443 	dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
2444 
2445 	if (sess_priv) {
2446 		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2447 
2448 		rte_free(s->ctxt);
2449 		rte_free(s->cipher_key.data);
2450 		rte_free(s->auth_key.data);
2451 		memset(sess, 0, sizeof(dpaa2_sec_session));
2452 		set_sec_session_private_data(sess, NULL);
2453 		rte_mempool_put(sess_mp, sess_priv);
2454 	}
2455 	return 0;
2456 }
2457 
2458 static int
2459 dpaa2_sec_session_configure(struct rte_cryptodev *dev,
2460 		struct rte_crypto_sym_xform *xform,
2461 		struct rte_cryptodev_sym_session *sess,
2462 		struct rte_mempool *mempool)
2463 {
2464 	void *sess_private_data;
2465 	int ret;
2466 
2467 	if (rte_mempool_get(mempool, &sess_private_data)) {
2468 		DPAA2_SEC_ERR("Couldn't get object from session mempool");
2469 		return -ENOMEM;
2470 	}
2471 
2472 	ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data);
2473 	if (ret != 0) {
2474 		DPAA2_SEC_ERR("Failed to configure session parameters");
2475 		/* Return session to mempool */
2476 		rte_mempool_put(mempool, sess_private_data);
2477 		return ret;
2478 	}
2479 
2480 	set_session_private_data(sess, dev->driver_id,
2481 		sess_private_data);
2482 
2483 	return 0;
2484 }
2485 
2486 /** Clear the memory of session so it doesn't leave key material behind */
2487 static void
2488 dpaa2_sec_session_clear(struct rte_cryptodev *dev,
2489 		struct rte_cryptodev_sym_session *sess)
2490 {
2491 	PMD_INIT_FUNC_TRACE();
2492 	uint8_t index = dev->driver_id;
2493 	void *sess_priv = get_session_private_data(sess, index);
2494 	dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
2495 
2496 	if (sess_priv) {
2497 		rte_free(s->ctxt);
2498 		rte_free(s->cipher_key.data);
2499 		rte_free(s->auth_key.data);
2500 		memset(sess, 0, sizeof(dpaa2_sec_session));
2501 		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2502 		set_session_private_data(sess, index, NULL);
2503 		rte_mempool_put(sess_mp, sess_priv);
2504 	}
2505 }
2506 
2507 static int
2508 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
2509 			struct rte_cryptodev_config *config __rte_unused)
2510 {
2511 	PMD_INIT_FUNC_TRACE();
2512 
2513 	return 0;
2514 }
2515 
2516 static int
2517 dpaa2_sec_dev_start(struct rte_cryptodev *dev)
2518 {
2519 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
2520 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
2521 	struct dpseci_attr attr;
2522 	struct dpaa2_queue *dpaa2_q;
2523 	struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
2524 					dev->data->queue_pairs;
2525 	struct dpseci_rx_queue_attr rx_attr;
2526 	struct dpseci_tx_queue_attr tx_attr;
2527 	int ret, i;
2528 
2529 	PMD_INIT_FUNC_TRACE();
2530 
2531 	memset(&attr, 0, sizeof(struct dpseci_attr));
2532 
2533 	ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
2534 	if (ret) {
2535 		DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED",
2536 			      priv->hw_id);
2537 		goto get_attr_failure;
2538 	}
2539 	ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr);
2540 	if (ret) {
2541 		DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC");
2542 		goto get_attr_failure;
2543 	}
2544 	for (i = 0; i < attr.num_rx_queues && qp[i]; i++) {
2545 		dpaa2_q = &qp[i]->rx_vq;
2546 		dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
2547 				    &rx_attr);
2548 		dpaa2_q->fqid = rx_attr.fqid;
2549 		DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid);
2550 	}
2551 	for (i = 0; i < attr.num_tx_queues && qp[i]; i++) {
2552 		dpaa2_q = &qp[i]->tx_vq;
2553 		dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
2554 				    &tx_attr);
2555 		dpaa2_q->fqid = tx_attr.fqid;
2556 		DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid);
2557 	}
2558 
2559 	return 0;
2560 get_attr_failure:
2561 	dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
2562 	return -1;
2563 }
2564 
2565 static void
2566 dpaa2_sec_dev_stop(struct rte_cryptodev *dev)
2567 {
2568 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
2569 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
2570 	int ret;
2571 
2572 	PMD_INIT_FUNC_TRACE();
2573 
2574 	ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
2575 	if (ret) {
2576 		DPAA2_SEC_ERR("Failure in disabling dpseci %d device",
2577 			     priv->hw_id);
2578 		return;
2579 	}
2580 
2581 	ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token);
2582 	if (ret < 0) {
2583 		DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret);
2584 		return;
2585 	}
2586 }
2587 
2588 static int
2589 dpaa2_sec_dev_close(struct rte_cryptodev *dev)
2590 {
2591 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
2592 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
2593 	int ret;
2594 
2595 	PMD_INIT_FUNC_TRACE();
2596 
2597 	/* Function is reverse of dpaa2_sec_dev_init.
2598 	 * It does the following:
2599 	 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id
2600 	 * 2. Close the DPSECI device
2601 	 * 3. Free the allocated resources.
2602 	 */
2603 
2604 	/*Close the device at underlying layer*/
2605 	ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
2606 	if (ret) {
2607 		DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret);
2608 		return -1;
2609 	}
2610 
2611 	/*Free the allocated memory for ethernet private data and dpseci*/
2612 	priv->hw = NULL;
2613 	rte_free(dpseci);
2614 
2615 	return 0;
2616 }
2617 
2618 static void
2619 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev,
2620 			struct rte_cryptodev_info *info)
2621 {
2622 	struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
2623 
2624 	PMD_INIT_FUNC_TRACE();
2625 	if (info != NULL) {
2626 		info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2627 		info->feature_flags = dev->feature_flags;
2628 		info->capabilities = dpaa2_sec_capabilities;
2629 		info->sym.max_nb_sessions = internals->max_nb_sessions;
2630 		info->driver_id = cryptodev_driver_id;
2631 	}
2632 }
2633 
2634 static
2635 void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
2636 			 struct rte_cryptodev_stats *stats)
2637 {
2638 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
2639 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
2640 	struct dpseci_sec_counters counters = {0};
2641 	struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
2642 					dev->data->queue_pairs;
2643 	int ret, i;
2644 
2645 	PMD_INIT_FUNC_TRACE();
2646 	if (stats == NULL) {
2647 		DPAA2_SEC_ERR("Invalid stats ptr NULL");
2648 		return;
2649 	}
2650 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
2651 		if (qp[i] == NULL) {
2652 			DPAA2_SEC_DEBUG("Uninitialised queue pair");
2653 			continue;
2654 		}
2655 
2656 		stats->enqueued_count += qp[i]->tx_vq.tx_pkts;
2657 		stats->dequeued_count += qp[i]->rx_vq.rx_pkts;
2658 		stats->enqueue_err_count += qp[i]->tx_vq.err_pkts;
2659 		stats->dequeue_err_count += qp[i]->rx_vq.err_pkts;
2660 	}
2661 
2662 	ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token,
2663 				      &counters);
2664 	if (ret) {
2665 		DPAA2_SEC_ERR("SEC counters failed");
2666 	} else {
2667 		DPAA2_SEC_INFO("dpseci hardware stats:"
2668 			    "\n\tNum of Requests Dequeued = %" PRIu64
2669 			    "\n\tNum of Outbound Encrypt Requests = %" PRIu64
2670 			    "\n\tNum of Inbound Decrypt Requests = %" PRIu64
2671 			    "\n\tNum of Outbound Bytes Encrypted = %" PRIu64
2672 			    "\n\tNum of Outbound Bytes Protected = %" PRIu64
2673 			    "\n\tNum of Inbound Bytes Decrypted = %" PRIu64
2674 			    "\n\tNum of Inbound Bytes Validated = %" PRIu64,
2675 			    counters.dequeued_requests,
2676 			    counters.ob_enc_requests,
2677 			    counters.ib_dec_requests,
2678 			    counters.ob_enc_bytes,
2679 			    counters.ob_prot_bytes,
2680 			    counters.ib_dec_bytes,
2681 			    counters.ib_valid_bytes);
2682 	}
2683 }
2684 
2685 static
2686 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev)
2687 {
2688 	int i;
2689 	struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
2690 				   (dev->data->queue_pairs);
2691 
2692 	PMD_INIT_FUNC_TRACE();
2693 
2694 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
2695 		if (qp[i] == NULL) {
2696 			DPAA2_SEC_DEBUG("Uninitialised queue pair");
2697 			continue;
2698 		}
2699 		qp[i]->tx_vq.rx_pkts = 0;
2700 		qp[i]->tx_vq.tx_pkts = 0;
2701 		qp[i]->tx_vq.err_pkts = 0;
2702 		qp[i]->rx_vq.rx_pkts = 0;
2703 		qp[i]->rx_vq.tx_pkts = 0;
2704 		qp[i]->rx_vq.err_pkts = 0;
2705 	}
2706 }
2707 
2708 static struct rte_cryptodev_ops crypto_ops = {
2709 	.dev_configure	      = dpaa2_sec_dev_configure,
2710 	.dev_start	      = dpaa2_sec_dev_start,
2711 	.dev_stop	      = dpaa2_sec_dev_stop,
2712 	.dev_close	      = dpaa2_sec_dev_close,
2713 	.dev_infos_get        = dpaa2_sec_dev_infos_get,
2714 	.stats_get	      = dpaa2_sec_stats_get,
2715 	.stats_reset	      = dpaa2_sec_stats_reset,
2716 	.queue_pair_setup     = dpaa2_sec_queue_pair_setup,
2717 	.queue_pair_release   = dpaa2_sec_queue_pair_release,
2718 	.queue_pair_start     = dpaa2_sec_queue_pair_start,
2719 	.queue_pair_stop      = dpaa2_sec_queue_pair_stop,
2720 	.queue_pair_count     = dpaa2_sec_queue_pair_count,
2721 	.session_get_size     = dpaa2_sec_session_get_size,
2722 	.session_configure    = dpaa2_sec_session_configure,
2723 	.session_clear        = dpaa2_sec_session_clear,
2724 };
2725 
2726 static const struct rte_security_capability *
2727 dpaa2_sec_capabilities_get(void *device __rte_unused)
2728 {
2729 	return dpaa2_sec_security_cap;
2730 }
2731 
2732 struct rte_security_ops dpaa2_sec_security_ops = {
2733 	.session_create = dpaa2_sec_security_session_create,
2734 	.session_update = NULL,
2735 	.session_stats_get = NULL,
2736 	.session_destroy = dpaa2_sec_security_session_destroy,
2737 	.set_pkt_metadata = NULL,
2738 	.capabilities_get = dpaa2_sec_capabilities_get
2739 };
2740 
2741 static int
2742 dpaa2_sec_uninit(const struct rte_cryptodev *dev)
2743 {
2744 	struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
2745 
2746 	rte_free(dev->security_ctx);
2747 
2748 	rte_mempool_free(internals->fle_pool);
2749 
2750 	DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u",
2751 		       dev->data->name, rte_socket_id());
2752 
2753 	return 0;
2754 }
2755 
2756 static int
2757 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
2758 {
2759 	struct dpaa2_sec_dev_private *internals;
2760 	struct rte_device *dev = cryptodev->device;
2761 	struct rte_dpaa2_device *dpaa2_dev;
2762 	struct rte_security_ctx *security_instance;
2763 	struct fsl_mc_io *dpseci;
2764 	uint16_t token;
2765 	struct dpseci_attr attr;
2766 	int retcode, hw_id;
2767 	char str[20];
2768 
2769 	PMD_INIT_FUNC_TRACE();
2770 	dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
2771 	if (dpaa2_dev == NULL) {
2772 		DPAA2_SEC_ERR("DPAA2 SEC device not found");
2773 		return -1;
2774 	}
2775 	hw_id = dpaa2_dev->object_id;
2776 
2777 	cryptodev->driver_id = cryptodev_driver_id;
2778 	cryptodev->dev_ops = &crypto_ops;
2779 
2780 	cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst;
2781 	cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst;
2782 	cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2783 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
2784 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2785 			RTE_CRYPTODEV_FF_SECURITY |
2786 			RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
2787 
2788 	internals = cryptodev->data->dev_private;
2789 	internals->max_nb_sessions = RTE_DPAA2_SEC_PMD_MAX_NB_SESSIONS;
2790 
2791 	/*
2792 	 * For secondary processes, we don't initialise any further as primary
2793 	 * has already done this work. Only check we don't need a different
2794 	 * RX function
2795 	 */
2796 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2797 		DPAA2_SEC_DEBUG("Device already init by primary process");
2798 		return 0;
2799 	}
2800 
2801 	/* Initialize security_ctx only for primary process*/
2802 	security_instance = rte_malloc("rte_security_instances_ops",
2803 				sizeof(struct rte_security_ctx), 0);
2804 	if (security_instance == NULL)
2805 		return -ENOMEM;
2806 	security_instance->device = (void *)cryptodev;
2807 	security_instance->ops = &dpaa2_sec_security_ops;
2808 	security_instance->sess_cnt = 0;
2809 	cryptodev->security_ctx = security_instance;
2810 
2811 	/*Open the rte device via MC and save the handle for further use*/
2812 	dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1,
2813 				sizeof(struct fsl_mc_io), 0);
2814 	if (!dpseci) {
2815 		DPAA2_SEC_ERR(
2816 			"Error in allocating the memory for dpsec object");
2817 		return -1;
2818 	}
2819 	dpseci->regs = rte_mcp_ptr_list[0];
2820 
2821 	retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token);
2822 	if (retcode != 0) {
2823 		DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x",
2824 			      retcode);
2825 		goto init_error;
2826 	}
2827 	retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr);
2828 	if (retcode != 0) {
2829 		DPAA2_SEC_ERR(
2830 			     "Cannot get dpsec device attributed: Error = %x",
2831 			     retcode);
2832 		goto init_error;
2833 	}
2834 	sprintf(cryptodev->data->name, "dpsec-%u", hw_id);
2835 
2836 	internals->max_nb_queue_pairs = attr.num_tx_queues;
2837 	cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs;
2838 	internals->hw = dpseci;
2839 	internals->token = token;
2840 
2841 	sprintf(str, "fle_pool_%d", cryptodev->data->dev_id);
2842 	internals->fle_pool = rte_mempool_create((const char *)str,
2843 			FLE_POOL_NUM_BUFS,
2844 			FLE_POOL_BUF_SIZE,
2845 			FLE_POOL_CACHE_SIZE, 0,
2846 			NULL, NULL, NULL, NULL,
2847 			SOCKET_ID_ANY, 0);
2848 	if (!internals->fle_pool) {
2849 		DPAA2_SEC_ERR("Mempool (%s) creation failed", str);
2850 		goto init_error;
2851 	}
2852 
2853 	DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name);
2854 	return 0;
2855 
2856 init_error:
2857 	DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name);
2858 
2859 	/* dpaa2_sec_uninit(crypto_dev_name); */
2860 	return -EFAULT;
2861 }
2862 
2863 static int
2864 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv,
2865 			  struct rte_dpaa2_device *dpaa2_dev)
2866 {
2867 	struct rte_cryptodev *cryptodev;
2868 	char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
2869 
2870 	int retval;
2871 
2872 	sprintf(cryptodev_name, "dpsec-%d", dpaa2_dev->object_id);
2873 
2874 	cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
2875 	if (cryptodev == NULL)
2876 		return -ENOMEM;
2877 
2878 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2879 		cryptodev->data->dev_private = rte_zmalloc_socket(
2880 					"cryptodev private structure",
2881 					sizeof(struct dpaa2_sec_dev_private),
2882 					RTE_CACHE_LINE_SIZE,
2883 					rte_socket_id());
2884 
2885 		if (cryptodev->data->dev_private == NULL)
2886 			rte_panic("Cannot allocate memzone for private "
2887 				  "device data");
2888 	}
2889 
2890 	dpaa2_dev->cryptodev = cryptodev;
2891 	cryptodev->device = &dpaa2_dev->device;
2892 	cryptodev->device->driver = &dpaa2_drv->driver;
2893 
2894 	/* init user callbacks */
2895 	TAILQ_INIT(&(cryptodev->link_intr_cbs));
2896 
2897 	/* Invoke PMD device initialization function */
2898 	retval = dpaa2_sec_dev_init(cryptodev);
2899 	if (retval == 0)
2900 		return 0;
2901 
2902 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2903 		rte_free(cryptodev->data->dev_private);
2904 
2905 	cryptodev->attached = RTE_CRYPTODEV_DETACHED;
2906 
2907 	return -ENXIO;
2908 }
2909 
2910 static int
2911 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
2912 {
2913 	struct rte_cryptodev *cryptodev;
2914 	int ret;
2915 
2916 	cryptodev = dpaa2_dev->cryptodev;
2917 	if (cryptodev == NULL)
2918 		return -ENODEV;
2919 
2920 	ret = dpaa2_sec_uninit(cryptodev);
2921 	if (ret)
2922 		return ret;
2923 
2924 	return rte_cryptodev_pmd_destroy(cryptodev);
2925 }
2926 
2927 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
2928 	.drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
2929 	.drv_type = DPAA2_CRYPTO,
2930 	.driver = {
2931 		.name = "DPAA2 SEC PMD"
2932 	},
2933 	.probe = cryptodev_dpaa2_sec_probe,
2934 	.remove = cryptodev_dpaa2_sec_remove,
2935 };
2936 
2937 static struct cryptodev_driver dpaa2_sec_crypto_drv;
2938 
2939 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
2940 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv,
2941 		rte_dpaa2_sec_driver.driver, cryptodev_driver_id);
2942 
2943 RTE_INIT(dpaa2_sec_init_log);
2944 static void
2945 dpaa2_sec_init_log(void)
2946 {
2947 	/* Bus level logs */
2948 	dpaa2_logtype_sec = rte_log_register("pmd.crypto.dpaa2");
2949 	if (dpaa2_logtype_sec >= 0)
2950 		rte_log_set_level(dpaa2_logtype_sec, RTE_LOG_NOTICE);
2951 }
2952