xref: /dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c (revision 89f0711f9ddfb5822da9d34f384b92f72a61c4dc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016 NXP
5  *
6  */
7 
8 #include <time.h>
9 #include <net/if.h>
10 
11 #include <rte_mbuf.h>
12 #include <rte_cryptodev.h>
13 #include <rte_security_driver.h>
14 #include <rte_malloc.h>
15 #include <rte_memcpy.h>
16 #include <rte_string_fns.h>
17 #include <rte_cycles.h>
18 #include <rte_kvargs.h>
19 #include <rte_dev.h>
20 #include <rte_cryptodev_pmd.h>
21 #include <rte_common.h>
22 #include <rte_fslmc.h>
23 #include <fslmc_vfio.h>
24 #include <dpaa2_hw_pvt.h>
25 #include <dpaa2_hw_dpio.h>
26 #include <dpaa2_hw_mempool.h>
27 #include <fsl_dpseci.h>
28 #include <fsl_mc_sys.h>
29 
30 #include "dpaa2_sec_priv.h"
31 #include "dpaa2_sec_logs.h"
32 
33 /* RTA header files */
34 #include <hw/desc/ipsec.h>
35 #include <hw/desc/algo.h>
36 
37 /* Minimum job descriptor consists of a oneword job descriptor HEADER and
38  * a pointer to the shared descriptor
39  */
40 #define MIN_JOB_DESC_SIZE	(CAAM_CMD_SZ + CAAM_PTR_SZ)
41 #define FSL_VENDOR_ID           0x1957
42 #define FSL_DEVICE_ID           0x410
43 #define FSL_SUBSYSTEM_SEC       1
44 #define FSL_MC_DPSECI_DEVID     3
45 
46 #define NO_PREFETCH 0
47 /* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
48 #define FLE_POOL_NUM_BUFS	32000
49 #define FLE_POOL_BUF_SIZE	256
50 #define FLE_POOL_CACHE_SIZE	512
51 #define FLE_SG_MEM_SIZE		2048
52 #define SEC_FLC_DHR_OUTBOUND	-114
53 #define SEC_FLC_DHR_INBOUND	0
54 
55 enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8;
56 
57 static uint8_t cryptodev_driver_id;
58 
59 static inline int
60 build_proto_fd(dpaa2_sec_session *sess,
61 	       struct rte_crypto_op *op,
62 	       struct qbman_fd *fd, uint16_t bpid)
63 {
64 	struct rte_crypto_sym_op *sym_op = op->sym;
65 	struct ctxt_priv *priv = sess->ctxt;
66 	struct sec_flow_context *flc;
67 	struct rte_mbuf *mbuf = sym_op->m_src;
68 
69 	if (likely(bpid < MAX_BPID))
70 		DPAA2_SET_FD_BPID(fd, bpid);
71 	else
72 		DPAA2_SET_FD_IVP(fd);
73 
74 	/* Save the shared descriptor */
75 	flc = &priv->flc_desc[0].flc;
76 
77 	DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
78 	DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off);
79 	DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len);
80 	DPAA2_SET_FD_FLC(fd, ((uint64_t)flc));
81 
82 	/* save physical address of mbuf */
83 	op->sym->aead.digest.phys_addr = mbuf->buf_iova;
84 	mbuf->buf_iova = (uint64_t)op;
85 
86 	return 0;
87 }
88 
89 static inline int
90 build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,
91 		 struct rte_crypto_op *op,
92 		 struct qbman_fd *fd, __rte_unused uint16_t bpid)
93 {
94 	struct rte_crypto_sym_op *sym_op = op->sym;
95 	struct ctxt_priv *priv = sess->ctxt;
96 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
97 	struct sec_flow_context *flc;
98 	uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
99 	int icv_len = sess->digest_length;
100 	uint8_t *old_icv;
101 	struct rte_mbuf *mbuf;
102 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
103 			sess->iv.offset);
104 
105 	PMD_INIT_FUNC_TRACE();
106 
107 	if (sym_op->m_dst)
108 		mbuf = sym_op->m_dst;
109 	else
110 		mbuf = sym_op->m_src;
111 
112 	/* first FLE entry used to store mbuf and session ctxt */
113 	fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
114 			RTE_CACHE_LINE_SIZE);
115 	if (unlikely(!fle)) {
116 		RTE_LOG(ERR, PMD, "GCM SG: Memory alloc failed for SGE\n");
117 		return -1;
118 	}
119 	memset(fle, 0, FLE_SG_MEM_SIZE);
120 	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
121 	DPAA2_FLE_SAVE_CTXT(fle, priv);
122 
123 	op_fle = fle + 1;
124 	ip_fle = fle + 2;
125 	sge = fle + 3;
126 
127 	/* Save the shared descriptor */
128 	flc = &priv->flc_desc[0].flc;
129 
130 	/* Configure FD as a FRAME LIST */
131 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
132 	DPAA2_SET_FD_COMPOUND_FMT(fd);
133 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
134 
135 	PMD_TX_LOG(DEBUG, "GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n"
136 		   "iv-len=%d data_off: 0x%x\n",
137 		   sym_op->aead.data.offset,
138 		   sym_op->aead.data.length,
139 		   sym_op->aead.digest.length,
140 		   sess->iv.length,
141 		   sym_op->m_src->data_off);
142 
143 	/* Configure Output FLE with Scatter/Gather Entry */
144 	DPAA2_SET_FLE_SG_EXT(op_fle);
145 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
146 
147 	if (auth_only_len)
148 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
149 
150 	op_fle->length = (sess->dir == DIR_ENC) ?
151 			(sym_op->aead.data.length + icv_len + auth_only_len) :
152 			sym_op->aead.data.length + auth_only_len;
153 
154 	/* Configure Output SGE for Encap/Decap */
155 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
156 	DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->aead.data.offset -
157 								auth_only_len);
158 	sge->length = mbuf->data_len - sym_op->aead.data.offset + auth_only_len;
159 
160 	mbuf = mbuf->next;
161 	/* o/p segs */
162 	while (mbuf) {
163 		sge++;
164 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
165 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
166 		sge->length = mbuf->data_len;
167 		mbuf = mbuf->next;
168 	}
169 	sge->length -= icv_len;
170 
171 	if (sess->dir == DIR_ENC) {
172 		sge++;
173 		DPAA2_SET_FLE_ADDR(sge,
174 				DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
175 		sge->length = icv_len;
176 	}
177 	DPAA2_SET_FLE_FIN(sge);
178 
179 	sge++;
180 	mbuf = sym_op->m_src;
181 
182 	/* Configure Input FLE with Scatter/Gather Entry */
183 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
184 	DPAA2_SET_FLE_SG_EXT(ip_fle);
185 	DPAA2_SET_FLE_FIN(ip_fle);
186 	ip_fle->length = (sess->dir == DIR_ENC) ?
187 		(sym_op->aead.data.length + sess->iv.length + auth_only_len) :
188 		(sym_op->aead.data.length + sess->iv.length + auth_only_len +
189 		 icv_len);
190 
191 	/* Configure Input SGE for Encap/Decap */
192 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
193 	sge->length = sess->iv.length;
194 
195 	sge++;
196 	if (auth_only_len) {
197 		DPAA2_SET_FLE_ADDR(sge,
198 				DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
199 		sge->length = auth_only_len;
200 		sge++;
201 	}
202 
203 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
204 	DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
205 				mbuf->data_off);
206 	sge->length = mbuf->data_len - sym_op->aead.data.offset;
207 
208 	mbuf = mbuf->next;
209 	/* i/p segs */
210 	while (mbuf) {
211 		sge++;
212 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
213 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
214 		sge->length = mbuf->data_len;
215 		mbuf = mbuf->next;
216 	}
217 
218 	if (sess->dir == DIR_DEC) {
219 		sge++;
220 		old_icv = (uint8_t *)(sge + 1);
221 		memcpy(old_icv,	sym_op->aead.digest.data, icv_len);
222 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
223 		sge->length = icv_len;
224 	}
225 
226 	DPAA2_SET_FLE_FIN(sge);
227 	if (auth_only_len) {
228 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
229 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
230 	}
231 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
232 
233 	return 0;
234 }
235 
236 static inline int
237 build_authenc_gcm_fd(dpaa2_sec_session *sess,
238 		     struct rte_crypto_op *op,
239 		     struct qbman_fd *fd, uint16_t bpid)
240 {
241 	struct rte_crypto_sym_op *sym_op = op->sym;
242 	struct ctxt_priv *priv = sess->ctxt;
243 	struct qbman_fle *fle, *sge;
244 	struct sec_flow_context *flc;
245 	uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
246 	int icv_len = sess->digest_length, retval;
247 	uint8_t *old_icv;
248 	struct rte_mbuf *dst;
249 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
250 			sess->iv.offset);
251 
252 	PMD_INIT_FUNC_TRACE();
253 
254 	if (sym_op->m_dst)
255 		dst = sym_op->m_dst;
256 	else
257 		dst = sym_op->m_src;
258 
259 	/* TODO we are using the first FLE entry to store Mbuf and session ctxt.
260 	 * Currently we donot know which FLE has the mbuf stored.
261 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
262 	 * to get the MBUF Addr from the previous FLE.
263 	 * We can have a better approach to use the inline Mbuf
264 	 */
265 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
266 	if (retval) {
267 		RTE_LOG(ERR, PMD, "GCM: Memory alloc failed for SGE\n");
268 		return -1;
269 	}
270 	memset(fle, 0, FLE_POOL_BUF_SIZE);
271 	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
272 	DPAA2_FLE_SAVE_CTXT(fle, priv);
273 	fle = fle + 1;
274 	sge = fle + 2;
275 	if (likely(bpid < MAX_BPID)) {
276 		DPAA2_SET_FD_BPID(fd, bpid);
277 		DPAA2_SET_FLE_BPID(fle, bpid);
278 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
279 		DPAA2_SET_FLE_BPID(sge, bpid);
280 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
281 		DPAA2_SET_FLE_BPID(sge + 2, bpid);
282 		DPAA2_SET_FLE_BPID(sge + 3, bpid);
283 	} else {
284 		DPAA2_SET_FD_IVP(fd);
285 		DPAA2_SET_FLE_IVP(fle);
286 		DPAA2_SET_FLE_IVP((fle + 1));
287 		DPAA2_SET_FLE_IVP(sge);
288 		DPAA2_SET_FLE_IVP((sge + 1));
289 		DPAA2_SET_FLE_IVP((sge + 2));
290 		DPAA2_SET_FLE_IVP((sge + 3));
291 	}
292 
293 	/* Save the shared descriptor */
294 	flc = &priv->flc_desc[0].flc;
295 	/* Configure FD as a FRAME LIST */
296 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
297 	DPAA2_SET_FD_COMPOUND_FMT(fd);
298 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
299 
300 	PMD_TX_LOG(DEBUG, "GCM: auth_off: 0x%x/length %d, digest-len=%d\n"
301 		   "iv-len=%d data_off: 0x%x\n",
302 		   sym_op->aead.data.offset,
303 		   sym_op->aead.data.length,
304 		   sym_op->aead.digest.length,
305 		   sess->iv.length,
306 		   sym_op->m_src->data_off);
307 
308 	/* Configure Output FLE with Scatter/Gather Entry */
309 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
310 	if (auth_only_len)
311 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
312 	fle->length = (sess->dir == DIR_ENC) ?
313 			(sym_op->aead.data.length + icv_len + auth_only_len) :
314 			sym_op->aead.data.length + auth_only_len;
315 
316 	DPAA2_SET_FLE_SG_EXT(fle);
317 
318 	/* Configure Output SGE for Encap/Decap */
319 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
320 	DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
321 				dst->data_off - auth_only_len);
322 	sge->length = sym_op->aead.data.length + auth_only_len;
323 
324 	if (sess->dir == DIR_ENC) {
325 		sge++;
326 		DPAA2_SET_FLE_ADDR(sge,
327 				DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
328 		sge->length = sess->digest_length;
329 		DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
330 					sess->iv.length + auth_only_len));
331 	}
332 	DPAA2_SET_FLE_FIN(sge);
333 
334 	sge++;
335 	fle++;
336 
337 	/* Configure Input FLE with Scatter/Gather Entry */
338 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
339 	DPAA2_SET_FLE_SG_EXT(fle);
340 	DPAA2_SET_FLE_FIN(fle);
341 	fle->length = (sess->dir == DIR_ENC) ?
342 		(sym_op->aead.data.length + sess->iv.length + auth_only_len) :
343 		(sym_op->aead.data.length + sess->iv.length + auth_only_len +
344 		 sess->digest_length);
345 
346 	/* Configure Input SGE for Encap/Decap */
347 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
348 	sge->length = sess->iv.length;
349 	sge++;
350 	if (auth_only_len) {
351 		DPAA2_SET_FLE_ADDR(sge,
352 				DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
353 		sge->length = auth_only_len;
354 		DPAA2_SET_FLE_BPID(sge, bpid);
355 		sge++;
356 	}
357 
358 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
359 	DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
360 				sym_op->m_src->data_off);
361 	sge->length = sym_op->aead.data.length;
362 	if (sess->dir == DIR_DEC) {
363 		sge++;
364 		old_icv = (uint8_t *)(sge + 1);
365 		memcpy(old_icv,	sym_op->aead.digest.data,
366 		       sess->digest_length);
367 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
368 		sge->length = sess->digest_length;
369 		DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
370 				 sess->digest_length +
371 				 sess->iv.length +
372 				 auth_only_len));
373 	}
374 	DPAA2_SET_FLE_FIN(sge);
375 
376 	if (auth_only_len) {
377 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
378 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
379 	}
380 
381 	return 0;
382 }
383 
384 static inline int
385 build_authenc_sg_fd(dpaa2_sec_session *sess,
386 		 struct rte_crypto_op *op,
387 		 struct qbman_fd *fd, __rte_unused uint16_t bpid)
388 {
389 	struct rte_crypto_sym_op *sym_op = op->sym;
390 	struct ctxt_priv *priv = sess->ctxt;
391 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
392 	struct sec_flow_context *flc;
393 	uint32_t auth_only_len = sym_op->auth.data.length -
394 				sym_op->cipher.data.length;
395 	int icv_len = sess->digest_length;
396 	uint8_t *old_icv;
397 	struct rte_mbuf *mbuf;
398 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
399 			sess->iv.offset);
400 
401 	PMD_INIT_FUNC_TRACE();
402 
403 	if (sym_op->m_dst)
404 		mbuf = sym_op->m_dst;
405 	else
406 		mbuf = sym_op->m_src;
407 
408 	/* first FLE entry used to store mbuf and session ctxt */
409 	fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
410 			RTE_CACHE_LINE_SIZE);
411 	if (unlikely(!fle)) {
412 		RTE_LOG(ERR, PMD, "AUTHENC SG: Memory alloc failed for SGE\n");
413 		return -1;
414 	}
415 	memset(fle, 0, FLE_SG_MEM_SIZE);
416 	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
417 	DPAA2_FLE_SAVE_CTXT(fle, priv);
418 
419 	op_fle = fle + 1;
420 	ip_fle = fle + 2;
421 	sge = fle + 3;
422 
423 	/* Save the shared descriptor */
424 	flc = &priv->flc_desc[0].flc;
425 
426 	/* Configure FD as a FRAME LIST */
427 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
428 	DPAA2_SET_FD_COMPOUND_FMT(fd);
429 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
430 
431 	PMD_TX_LOG(DEBUG,
432 			"AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n"
433 			"cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
434 		   sym_op->auth.data.offset,
435 		   sym_op->auth.data.length,
436 		   sym_op->auth.digest.length,
437 		   sym_op->cipher.data.offset,
438 		   sym_op->cipher.data.length,
439 		   sym_op->cipher.iv.length,
440 		   sym_op->m_src->data_off);
441 
442 	/* Configure Output FLE with Scatter/Gather Entry */
443 	DPAA2_SET_FLE_SG_EXT(op_fle);
444 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
445 
446 	if (auth_only_len)
447 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
448 
449 	op_fle->length = (sess->dir == DIR_ENC) ?
450 			(sym_op->cipher.data.length + icv_len) :
451 			sym_op->cipher.data.length;
452 
453 	/* Configure Output SGE for Encap/Decap */
454 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
455 	DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->auth.data.offset);
456 	sge->length = mbuf->data_len - sym_op->auth.data.offset;
457 
458 	mbuf = mbuf->next;
459 	/* o/p segs */
460 	while (mbuf) {
461 		sge++;
462 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
463 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
464 		sge->length = mbuf->data_len;
465 		mbuf = mbuf->next;
466 	}
467 	sge->length -= icv_len;
468 
469 	if (sess->dir == DIR_ENC) {
470 		sge++;
471 		DPAA2_SET_FLE_ADDR(sge,
472 				DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
473 		sge->length = icv_len;
474 	}
475 	DPAA2_SET_FLE_FIN(sge);
476 
477 	sge++;
478 	mbuf = sym_op->m_src;
479 
480 	/* Configure Input FLE with Scatter/Gather Entry */
481 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
482 	DPAA2_SET_FLE_SG_EXT(ip_fle);
483 	DPAA2_SET_FLE_FIN(ip_fle);
484 	ip_fle->length = (sess->dir == DIR_ENC) ?
485 			(sym_op->auth.data.length + sess->iv.length) :
486 			(sym_op->auth.data.length + sess->iv.length +
487 			 icv_len);
488 
489 	/* Configure Input SGE for Encap/Decap */
490 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
491 	sge->length = sess->iv.length;
492 
493 	sge++;
494 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
495 	DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
496 				mbuf->data_off);
497 	sge->length = mbuf->data_len - sym_op->auth.data.offset;
498 
499 	mbuf = mbuf->next;
500 	/* i/p segs */
501 	while (mbuf) {
502 		sge++;
503 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
504 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
505 		sge->length = mbuf->data_len;
506 		mbuf = mbuf->next;
507 	}
508 	sge->length -= icv_len;
509 
510 	if (sess->dir == DIR_DEC) {
511 		sge++;
512 		old_icv = (uint8_t *)(sge + 1);
513 		memcpy(old_icv,	sym_op->auth.digest.data,
514 		       icv_len);
515 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
516 		sge->length = icv_len;
517 	}
518 
519 	DPAA2_SET_FLE_FIN(sge);
520 	if (auth_only_len) {
521 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
522 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
523 	}
524 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
525 
526 	return 0;
527 }
528 
529 static inline int
530 build_authenc_fd(dpaa2_sec_session *sess,
531 		 struct rte_crypto_op *op,
532 		 struct qbman_fd *fd, uint16_t bpid)
533 {
534 	struct rte_crypto_sym_op *sym_op = op->sym;
535 	struct ctxt_priv *priv = sess->ctxt;
536 	struct qbman_fle *fle, *sge;
537 	struct sec_flow_context *flc;
538 	uint32_t auth_only_len = sym_op->auth.data.length -
539 				sym_op->cipher.data.length;
540 	int icv_len = sess->digest_length, retval;
541 	uint8_t *old_icv;
542 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
543 			sess->iv.offset);
544 	struct rte_mbuf *dst;
545 
546 	PMD_INIT_FUNC_TRACE();
547 
548 	if (sym_op->m_dst)
549 		dst = sym_op->m_dst;
550 	else
551 		dst = sym_op->m_src;
552 
553 	/* we are using the first FLE entry to store Mbuf.
554 	 * Currently we donot know which FLE has the mbuf stored.
555 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
556 	 * to get the MBUF Addr from the previous FLE.
557 	 * We can have a better approach to use the inline Mbuf
558 	 */
559 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
560 	if (retval) {
561 		RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
562 		return -1;
563 	}
564 	memset(fle, 0, FLE_POOL_BUF_SIZE);
565 	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
566 	DPAA2_FLE_SAVE_CTXT(fle, priv);
567 	fle = fle + 1;
568 	sge = fle + 2;
569 	if (likely(bpid < MAX_BPID)) {
570 		DPAA2_SET_FD_BPID(fd, bpid);
571 		DPAA2_SET_FLE_BPID(fle, bpid);
572 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
573 		DPAA2_SET_FLE_BPID(sge, bpid);
574 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
575 		DPAA2_SET_FLE_BPID(sge + 2, bpid);
576 		DPAA2_SET_FLE_BPID(sge + 3, bpid);
577 	} else {
578 		DPAA2_SET_FD_IVP(fd);
579 		DPAA2_SET_FLE_IVP(fle);
580 		DPAA2_SET_FLE_IVP((fle + 1));
581 		DPAA2_SET_FLE_IVP(sge);
582 		DPAA2_SET_FLE_IVP((sge + 1));
583 		DPAA2_SET_FLE_IVP((sge + 2));
584 		DPAA2_SET_FLE_IVP((sge + 3));
585 	}
586 
587 	/* Save the shared descriptor */
588 	flc = &priv->flc_desc[0].flc;
589 	/* Configure FD as a FRAME LIST */
590 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
591 	DPAA2_SET_FD_COMPOUND_FMT(fd);
592 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
593 
594 	PMD_TX_LOG(DEBUG, "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n"
595 		   "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
596 		   sym_op->auth.data.offset,
597 		   sym_op->auth.data.length,
598 		   sess->digest_length,
599 		   sym_op->cipher.data.offset,
600 		   sym_op->cipher.data.length,
601 		   sess->iv.length,
602 		   sym_op->m_src->data_off);
603 
604 	/* Configure Output FLE with Scatter/Gather Entry */
605 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
606 	if (auth_only_len)
607 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
608 	fle->length = (sess->dir == DIR_ENC) ?
609 			(sym_op->cipher.data.length + icv_len) :
610 			sym_op->cipher.data.length;
611 
612 	DPAA2_SET_FLE_SG_EXT(fle);
613 
614 	/* Configure Output SGE for Encap/Decap */
615 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
616 	DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
617 				dst->data_off);
618 	sge->length = sym_op->cipher.data.length;
619 
620 	if (sess->dir == DIR_ENC) {
621 		sge++;
622 		DPAA2_SET_FLE_ADDR(sge,
623 				DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
624 		sge->length = sess->digest_length;
625 		DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
626 					sess->iv.length));
627 	}
628 	DPAA2_SET_FLE_FIN(sge);
629 
630 	sge++;
631 	fle++;
632 
633 	/* Configure Input FLE with Scatter/Gather Entry */
634 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
635 	DPAA2_SET_FLE_SG_EXT(fle);
636 	DPAA2_SET_FLE_FIN(fle);
637 	fle->length = (sess->dir == DIR_ENC) ?
638 			(sym_op->auth.data.length + sess->iv.length) :
639 			(sym_op->auth.data.length + sess->iv.length +
640 			 sess->digest_length);
641 
642 	/* Configure Input SGE for Encap/Decap */
643 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
644 	sge->length = sess->iv.length;
645 	sge++;
646 
647 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
648 	DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
649 				sym_op->m_src->data_off);
650 	sge->length = sym_op->auth.data.length;
651 	if (sess->dir == DIR_DEC) {
652 		sge++;
653 		old_icv = (uint8_t *)(sge + 1);
654 		memcpy(old_icv,	sym_op->auth.digest.data,
655 		       sess->digest_length);
656 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
657 		sge->length = sess->digest_length;
658 		DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
659 				 sess->digest_length +
660 				 sess->iv.length));
661 	}
662 	DPAA2_SET_FLE_FIN(sge);
663 	if (auth_only_len) {
664 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
665 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
666 	}
667 	return 0;
668 }
669 
670 static inline int build_auth_sg_fd(
671 		dpaa2_sec_session *sess,
672 		struct rte_crypto_op *op,
673 		struct qbman_fd *fd,
674 		__rte_unused uint16_t bpid)
675 {
676 	struct rte_crypto_sym_op *sym_op = op->sym;
677 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
678 	struct sec_flow_context *flc;
679 	struct ctxt_priv *priv = sess->ctxt;
680 	uint8_t *old_digest;
681 	struct rte_mbuf *mbuf;
682 
683 	PMD_INIT_FUNC_TRACE();
684 
685 	mbuf = sym_op->m_src;
686 	fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
687 			RTE_CACHE_LINE_SIZE);
688 	if (unlikely(!fle)) {
689 		RTE_LOG(ERR, PMD, "AUTH SG: Memory alloc failed for SGE\n");
690 		return -1;
691 	}
692 	memset(fle, 0, FLE_SG_MEM_SIZE);
693 	/* first FLE entry used to store mbuf and session ctxt */
694 	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
695 	DPAA2_FLE_SAVE_CTXT(fle, priv);
696 	op_fle = fle + 1;
697 	ip_fle = fle + 2;
698 	sge = fle + 3;
699 
700 	flc = &priv->flc_desc[DESC_INITFINAL].flc;
701 	/* sg FD */
702 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
703 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
704 	DPAA2_SET_FD_COMPOUND_FMT(fd);
705 
706 	/* o/p fle */
707 	DPAA2_SET_FLE_ADDR(op_fle,
708 				DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
709 	op_fle->length = sess->digest_length;
710 
711 	/* i/p fle */
712 	DPAA2_SET_FLE_SG_EXT(ip_fle);
713 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
714 	/* i/p 1st seg */
715 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
716 	DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + mbuf->data_off);
717 	sge->length = mbuf->data_len - sym_op->auth.data.offset;
718 
719 	/* i/p segs */
720 	mbuf = mbuf->next;
721 	while (mbuf) {
722 		sge++;
723 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
724 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
725 		sge->length = mbuf->data_len;
726 		mbuf = mbuf->next;
727 	}
728 	if (sess->dir == DIR_ENC) {
729 		/* Digest calculation case */
730 		sge->length -= sess->digest_length;
731 		ip_fle->length = sym_op->auth.data.length;
732 	} else {
733 		/* Digest verification case */
734 		sge++;
735 		old_digest = (uint8_t *)(sge + 1);
736 		rte_memcpy(old_digest, sym_op->auth.digest.data,
737 			   sess->digest_length);
738 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
739 		sge->length = sess->digest_length;
740 		ip_fle->length = sym_op->auth.data.length +
741 				sess->digest_length;
742 	}
743 	DPAA2_SET_FLE_FIN(sge);
744 	DPAA2_SET_FLE_FIN(ip_fle);
745 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
746 
747 	return 0;
748 }
749 
750 static inline int
751 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
752 	      struct qbman_fd *fd, uint16_t bpid)
753 {
754 	struct rte_crypto_sym_op *sym_op = op->sym;
755 	struct qbman_fle *fle, *sge;
756 	struct sec_flow_context *flc;
757 	struct ctxt_priv *priv = sess->ctxt;
758 	uint8_t *old_digest;
759 	int retval;
760 
761 	PMD_INIT_FUNC_TRACE();
762 
763 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
764 	if (retval) {
765 		RTE_LOG(ERR, PMD, "AUTH Memory alloc failed for SGE\n");
766 		return -1;
767 	}
768 	memset(fle, 0, FLE_POOL_BUF_SIZE);
769 	/* TODO we are using the first FLE entry to store Mbuf.
770 	 * Currently we donot know which FLE has the mbuf stored.
771 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
772 	 * to get the MBUF Addr from the previous FLE.
773 	 * We can have a better approach to use the inline Mbuf
774 	 */
775 	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
776 	DPAA2_FLE_SAVE_CTXT(fle, priv);
777 	fle = fle + 1;
778 
779 	if (likely(bpid < MAX_BPID)) {
780 		DPAA2_SET_FD_BPID(fd, bpid);
781 		DPAA2_SET_FLE_BPID(fle, bpid);
782 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
783 	} else {
784 		DPAA2_SET_FD_IVP(fd);
785 		DPAA2_SET_FLE_IVP(fle);
786 		DPAA2_SET_FLE_IVP((fle + 1));
787 	}
788 	flc = &priv->flc_desc[DESC_INITFINAL].flc;
789 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
790 
791 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
792 	fle->length = sess->digest_length;
793 
794 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
795 	DPAA2_SET_FD_COMPOUND_FMT(fd);
796 	fle++;
797 
798 	if (sess->dir == DIR_ENC) {
799 		DPAA2_SET_FLE_ADDR(fle,
800 				   DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
801 		DPAA2_SET_FLE_OFFSET(fle, sym_op->auth.data.offset +
802 				     sym_op->m_src->data_off);
803 		DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length);
804 		fle->length = sym_op->auth.data.length;
805 	} else {
806 		sge = fle + 2;
807 		DPAA2_SET_FLE_SG_EXT(fle);
808 		DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
809 
810 		if (likely(bpid < MAX_BPID)) {
811 			DPAA2_SET_FLE_BPID(sge, bpid);
812 			DPAA2_SET_FLE_BPID(sge + 1, bpid);
813 		} else {
814 			DPAA2_SET_FLE_IVP(sge);
815 			DPAA2_SET_FLE_IVP((sge + 1));
816 		}
817 		DPAA2_SET_FLE_ADDR(sge,
818 				   DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
819 		DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
820 				     sym_op->m_src->data_off);
821 
822 		DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length +
823 				 sess->digest_length);
824 		sge->length = sym_op->auth.data.length;
825 		sge++;
826 		old_digest = (uint8_t *)(sge + 1);
827 		rte_memcpy(old_digest, sym_op->auth.digest.data,
828 			   sess->digest_length);
829 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
830 		sge->length = sess->digest_length;
831 		fle->length = sym_op->auth.data.length +
832 				sess->digest_length;
833 		DPAA2_SET_FLE_FIN(sge);
834 	}
835 	DPAA2_SET_FLE_FIN(fle);
836 
837 	return 0;
838 }
839 
840 static int
841 build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
842 		struct qbman_fd *fd, __rte_unused uint16_t bpid)
843 {
844 	struct rte_crypto_sym_op *sym_op = op->sym;
845 	struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
846 	struct sec_flow_context *flc;
847 	struct ctxt_priv *priv = sess->ctxt;
848 	struct rte_mbuf *mbuf;
849 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
850 			sess->iv.offset);
851 
852 	PMD_INIT_FUNC_TRACE();
853 
854 	if (sym_op->m_dst)
855 		mbuf = sym_op->m_dst;
856 	else
857 		mbuf = sym_op->m_src;
858 
859 	fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
860 			RTE_CACHE_LINE_SIZE);
861 	if (!fle) {
862 		RTE_LOG(ERR, PMD, "CIPHER SG: Memory alloc failed for SGE\n");
863 		return -1;
864 	}
865 	memset(fle, 0, FLE_SG_MEM_SIZE);
866 	/* first FLE entry used to store mbuf and session ctxt */
867 	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
868 	DPAA2_FLE_SAVE_CTXT(fle, priv);
869 
870 	op_fle = fle + 1;
871 	ip_fle = fle + 2;
872 	sge = fle + 3;
873 
874 	flc = &priv->flc_desc[0].flc;
875 
876 	PMD_TX_LOG(DEBUG,
877 			"CIPHER SG: cipher_off: 0x%x/length %d,ivlen=%d data_off: 0x%x",
878 		   sym_op->cipher.data.offset,
879 		   sym_op->cipher.data.length,
880 		   sym_op->cipher.iv.length,
881 		   sym_op->m_src->data_off);
882 
883 	/* o/p fle */
884 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
885 	op_fle->length = sym_op->cipher.data.length;
886 	DPAA2_SET_FLE_SG_EXT(op_fle);
887 
888 	/* o/p 1st seg */
889 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
890 	DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset + mbuf->data_off);
891 	sge->length = mbuf->data_len - sym_op->cipher.data.offset;
892 
893 	mbuf = mbuf->next;
894 	/* o/p segs */
895 	while (mbuf) {
896 		sge++;
897 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
898 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
899 		sge->length = mbuf->data_len;
900 		mbuf = mbuf->next;
901 	}
902 	DPAA2_SET_FLE_FIN(sge);
903 
904 	PMD_TX_LOG(DEBUG,
905 			"CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d",
906 			flc, fle, fle->addr_hi, fle->addr_lo,
907 			fle->length);
908 
909 	/* i/p fle */
910 	mbuf = sym_op->m_src;
911 	sge++;
912 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
913 	ip_fle->length = sess->iv.length + sym_op->cipher.data.length;
914 	DPAA2_SET_FLE_SG_EXT(ip_fle);
915 
916 	/* i/p IV */
917 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
918 	DPAA2_SET_FLE_OFFSET(sge, 0);
919 	sge->length = sess->iv.length;
920 
921 	sge++;
922 
923 	/* i/p 1st seg */
924 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
925 	DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
926 			     mbuf->data_off);
927 	sge->length = mbuf->data_len - sym_op->cipher.data.offset;
928 
929 	mbuf = mbuf->next;
930 	/* i/p segs */
931 	while (mbuf) {
932 		sge++;
933 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
934 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
935 		sge->length = mbuf->data_len;
936 		mbuf = mbuf->next;
937 	}
938 	DPAA2_SET_FLE_FIN(sge);
939 	DPAA2_SET_FLE_FIN(ip_fle);
940 
941 	/* sg fd */
942 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
943 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
944 	DPAA2_SET_FD_COMPOUND_FMT(fd);
945 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
946 
947 	PMD_TX_LOG(DEBUG,
948 			"CIPHER SG: fdaddr =%p bpid =%d meta =%d off =%d, len =%d",
949 		   (void *)DPAA2_GET_FD_ADDR(fd),
950 		   DPAA2_GET_FD_BPID(fd),
951 		   rte_dpaa2_bpid_info[bpid].meta_data_size,
952 		   DPAA2_GET_FD_OFFSET(fd),
953 		   DPAA2_GET_FD_LEN(fd));
954 	return 0;
955 }
956 
957 static int
958 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
959 		struct qbman_fd *fd, uint16_t bpid)
960 {
961 	struct rte_crypto_sym_op *sym_op = op->sym;
962 	struct qbman_fle *fle, *sge;
963 	int retval;
964 	struct sec_flow_context *flc;
965 	struct ctxt_priv *priv = sess->ctxt;
966 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
967 			sess->iv.offset);
968 	struct rte_mbuf *dst;
969 
970 	PMD_INIT_FUNC_TRACE();
971 
972 	if (sym_op->m_dst)
973 		dst = sym_op->m_dst;
974 	else
975 		dst = sym_op->m_src;
976 
977 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
978 	if (retval) {
979 		RTE_LOG(ERR, PMD, "CIPHER: Memory alloc failed for SGE\n");
980 		return -1;
981 	}
982 	memset(fle, 0, FLE_POOL_BUF_SIZE);
983 	/* TODO we are using the first FLE entry to store Mbuf.
984 	 * Currently we donot know which FLE has the mbuf stored.
985 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
986 	 * to get the MBUF Addr from the previous FLE.
987 	 * We can have a better approach to use the inline Mbuf
988 	 */
989 	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
990 	DPAA2_FLE_SAVE_CTXT(fle, priv);
991 	fle = fle + 1;
992 	sge = fle + 2;
993 
994 	if (likely(bpid < MAX_BPID)) {
995 		DPAA2_SET_FD_BPID(fd, bpid);
996 		DPAA2_SET_FLE_BPID(fle, bpid);
997 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
998 		DPAA2_SET_FLE_BPID(sge, bpid);
999 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
1000 	} else {
1001 		DPAA2_SET_FD_IVP(fd);
1002 		DPAA2_SET_FLE_IVP(fle);
1003 		DPAA2_SET_FLE_IVP((fle + 1));
1004 		DPAA2_SET_FLE_IVP(sge);
1005 		DPAA2_SET_FLE_IVP((sge + 1));
1006 	}
1007 
1008 	flc = &priv->flc_desc[0].flc;
1009 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1010 	DPAA2_SET_FD_LEN(fd, sym_op->cipher.data.length +
1011 			 sess->iv.length);
1012 	DPAA2_SET_FD_COMPOUND_FMT(fd);
1013 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1014 
1015 	PMD_TX_LOG(DEBUG,
1016 			"CIPHER: cipher_off: 0x%x/length %d, ivlen=%d, data_off: 0x%x",
1017 		   sym_op->cipher.data.offset,
1018 		   sym_op->cipher.data.length,
1019 		   sess->iv.length,
1020 		   sym_op->m_src->data_off);
1021 
1022 	DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst));
1023 	DPAA2_SET_FLE_OFFSET(fle, sym_op->cipher.data.offset +
1024 			     dst->data_off);
1025 
1026 	fle->length = sym_op->cipher.data.length + sess->iv.length;
1027 
1028 	PMD_TX_LOG(DEBUG,
1029 			"CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d",
1030 			flc, fle, fle->addr_hi, fle->addr_lo,
1031 			fle->length);
1032 
1033 	fle++;
1034 
1035 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1036 	fle->length = sym_op->cipher.data.length + sess->iv.length;
1037 
1038 	DPAA2_SET_FLE_SG_EXT(fle);
1039 
1040 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1041 	sge->length = sess->iv.length;
1042 
1043 	sge++;
1044 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
1045 	DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
1046 			     sym_op->m_src->data_off);
1047 
1048 	sge->length = sym_op->cipher.data.length;
1049 	DPAA2_SET_FLE_FIN(sge);
1050 	DPAA2_SET_FLE_FIN(fle);
1051 
1052 	PMD_TX_LOG(DEBUG,
1053 			"CIPHER: fdaddr =%p bpid =%d meta =%d off =%d, len =%d",
1054 		   (void *)DPAA2_GET_FD_ADDR(fd),
1055 		   DPAA2_GET_FD_BPID(fd),
1056 		   rte_dpaa2_bpid_info[bpid].meta_data_size,
1057 		   DPAA2_GET_FD_OFFSET(fd),
1058 		   DPAA2_GET_FD_LEN(fd));
1059 
1060 	return 0;
1061 }
1062 
1063 static inline int
1064 build_sec_fd(struct rte_crypto_op *op,
1065 	     struct qbman_fd *fd, uint16_t bpid)
1066 {
1067 	int ret = -1;
1068 	dpaa2_sec_session *sess;
1069 
1070 	PMD_INIT_FUNC_TRACE();
1071 
1072 	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
1073 		sess = (dpaa2_sec_session *)get_session_private_data(
1074 				op->sym->session, cryptodev_driver_id);
1075 	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1076 		sess = (dpaa2_sec_session *)get_sec_session_private_data(
1077 				op->sym->sec_session);
1078 	else
1079 		return -1;
1080 
1081 	/* Segmented buffer */
1082 	if (unlikely(!rte_pktmbuf_is_contiguous(op->sym->m_src))) {
1083 		switch (sess->ctxt_type) {
1084 		case DPAA2_SEC_CIPHER:
1085 			ret = build_cipher_sg_fd(sess, op, fd, bpid);
1086 			break;
1087 		case DPAA2_SEC_AUTH:
1088 			ret = build_auth_sg_fd(sess, op, fd, bpid);
1089 			break;
1090 		case DPAA2_SEC_AEAD:
1091 			ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid);
1092 			break;
1093 		case DPAA2_SEC_CIPHER_HASH:
1094 			ret = build_authenc_sg_fd(sess, op, fd, bpid);
1095 			break;
1096 		case DPAA2_SEC_HASH_CIPHER:
1097 		default:
1098 			RTE_LOG(ERR, PMD, "error: Unsupported session\n");
1099 		}
1100 	} else {
1101 		switch (sess->ctxt_type) {
1102 		case DPAA2_SEC_CIPHER:
1103 			ret = build_cipher_fd(sess, op, fd, bpid);
1104 			break;
1105 		case DPAA2_SEC_AUTH:
1106 			ret = build_auth_fd(sess, op, fd, bpid);
1107 			break;
1108 		case DPAA2_SEC_AEAD:
1109 			ret = build_authenc_gcm_fd(sess, op, fd, bpid);
1110 			break;
1111 		case DPAA2_SEC_CIPHER_HASH:
1112 			ret = build_authenc_fd(sess, op, fd, bpid);
1113 			break;
1114 		case DPAA2_SEC_IPSEC:
1115 			ret = build_proto_fd(sess, op, fd, bpid);
1116 			break;
1117 		case DPAA2_SEC_HASH_CIPHER:
1118 		default:
1119 			RTE_LOG(ERR, PMD, "error: Unsupported session\n");
1120 		}
1121 	}
1122 	return ret;
1123 }
1124 
1125 static uint16_t
1126 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1127 			uint16_t nb_ops)
1128 {
1129 	/* Function to transmit the frames to given device and VQ*/
1130 	uint32_t loop;
1131 	int32_t ret;
1132 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1133 	uint32_t frames_to_send;
1134 	struct qbman_eq_desc eqdesc;
1135 	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1136 	struct qbman_swp *swp;
1137 	uint16_t num_tx = 0;
1138 	/*todo - need to support multiple buffer pools */
1139 	uint16_t bpid;
1140 	struct rte_mempool *mb_pool;
1141 
1142 	if (unlikely(nb_ops == 0))
1143 		return 0;
1144 
1145 	if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
1146 		RTE_LOG(ERR, PMD, "sessionless crypto op not supported\n");
1147 		return 0;
1148 	}
1149 	/*Prepare enqueue descriptor*/
1150 	qbman_eq_desc_clear(&eqdesc);
1151 	qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1152 	qbman_eq_desc_set_response(&eqdesc, 0, 0);
1153 	qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
1154 
1155 	if (!DPAA2_PER_LCORE_SEC_DPIO) {
1156 		ret = dpaa2_affine_qbman_swp_sec();
1157 		if (ret) {
1158 			RTE_LOG(ERR, PMD, "Failure in affining portal\n");
1159 			return 0;
1160 		}
1161 	}
1162 	swp = DPAA2_PER_LCORE_SEC_PORTAL;
1163 
1164 	while (nb_ops) {
1165 		frames_to_send = (nb_ops >> 3) ? MAX_TX_RING_SLOTS : nb_ops;
1166 
1167 		for (loop = 0; loop < frames_to_send; loop++) {
1168 			/*Clear the unused FD fields before sending*/
1169 			memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
1170 			mb_pool = (*ops)->sym->m_src->pool;
1171 			bpid = mempool_to_bpid(mb_pool);
1172 			ret = build_sec_fd(*ops, &fd_arr[loop], bpid);
1173 			if (ret) {
1174 				PMD_DRV_LOG(ERR, "error: Improper packet"
1175 					    " contents for crypto operation\n");
1176 				goto skip_tx;
1177 			}
1178 			ops++;
1179 		}
1180 		loop = 0;
1181 		while (loop < frames_to_send) {
1182 			loop += qbman_swp_enqueue_multiple(swp, &eqdesc,
1183 							&fd_arr[loop],
1184 							NULL,
1185 							frames_to_send - loop);
1186 		}
1187 
1188 		num_tx += frames_to_send;
1189 		nb_ops -= frames_to_send;
1190 	}
1191 skip_tx:
1192 	dpaa2_qp->tx_vq.tx_pkts += num_tx;
1193 	dpaa2_qp->tx_vq.err_pkts += nb_ops;
1194 	return num_tx;
1195 }
1196 
1197 static inline struct rte_crypto_op *
1198 sec_simple_fd_to_mbuf(const struct qbman_fd *fd, __rte_unused uint8_t id)
1199 {
1200 	struct rte_crypto_op *op;
1201 	uint16_t len = DPAA2_GET_FD_LEN(fd);
1202 	uint16_t diff = 0;
1203 	dpaa2_sec_session *sess_priv;
1204 
1205 	struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
1206 		DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
1207 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
1208 
1209 	op = (struct rte_crypto_op *)mbuf->buf_iova;
1210 	mbuf->buf_iova = op->sym->aead.digest.phys_addr;
1211 	op->sym->aead.digest.phys_addr = 0L;
1212 
1213 	sess_priv = (dpaa2_sec_session *)get_sec_session_private_data(
1214 				op->sym->sec_session);
1215 	if (sess_priv->dir == DIR_ENC)
1216 		mbuf->data_off += SEC_FLC_DHR_OUTBOUND;
1217 	else
1218 		mbuf->data_off += SEC_FLC_DHR_INBOUND;
1219 	diff = len - mbuf->pkt_len;
1220 	mbuf->pkt_len += diff;
1221 	mbuf->data_len += diff;
1222 
1223 	return op;
1224 }
1225 
1226 static inline struct rte_crypto_op *
1227 sec_fd_to_mbuf(const struct qbman_fd *fd, uint8_t driver_id)
1228 {
1229 	struct qbman_fle *fle;
1230 	struct rte_crypto_op *op;
1231 	struct ctxt_priv *priv;
1232 	struct rte_mbuf *dst, *src;
1233 
1234 	if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single)
1235 		return sec_simple_fd_to_mbuf(fd, driver_id);
1236 
1237 	fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
1238 
1239 	PMD_RX_LOG(DEBUG, "FLE addr = %x - %x, offset = %x",
1240 		   fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
1241 
1242 	/* we are using the first FLE entry to store Mbuf.
1243 	 * Currently we donot know which FLE has the mbuf stored.
1244 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
1245 	 * to get the MBUF Addr from the previous FLE.
1246 	 * We can have a better approach to use the inline Mbuf
1247 	 */
1248 
1249 	if (unlikely(DPAA2_GET_FD_IVP(fd))) {
1250 		/* TODO complete it. */
1251 		RTE_LOG(ERR, PMD, "error: Non inline buffer - WHAT to DO?\n");
1252 		return NULL;
1253 	}
1254 	op = (struct rte_crypto_op *)DPAA2_IOVA_TO_VADDR(
1255 			DPAA2_GET_FLE_ADDR((fle - 1)));
1256 
1257 	/* Prefeth op */
1258 	src = op->sym->m_src;
1259 	rte_prefetch0(src);
1260 
1261 	if (op->sym->m_dst) {
1262 		dst = op->sym->m_dst;
1263 		rte_prefetch0(dst);
1264 	} else
1265 		dst = src;
1266 
1267 	PMD_RX_LOG(DEBUG, "mbuf %p BMAN buf addr %p",
1268 		   (void *)dst, dst->buf_addr);
1269 
1270 	PMD_RX_LOG(DEBUG, "fdaddr =%p bpid =%d meta =%d off =%d, len =%d",
1271 		   (void *)DPAA2_GET_FD_ADDR(fd),
1272 		   DPAA2_GET_FD_BPID(fd),
1273 		   rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
1274 		   DPAA2_GET_FD_OFFSET(fd),
1275 		   DPAA2_GET_FD_LEN(fd));
1276 
1277 	/* free the fle memory */
1278 	if (likely(rte_pktmbuf_is_contiguous(src))) {
1279 		priv = (struct ctxt_priv *)DPAA2_GET_FLE_CTXT(fle - 1);
1280 		rte_mempool_put(priv->fle_pool, (void *)(fle-1));
1281 	} else
1282 		rte_free((void *)(fle-1));
1283 
1284 	return op;
1285 }
1286 
1287 static uint16_t
1288 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1289 			uint16_t nb_ops)
1290 {
1291 	/* Function is responsible to receive frames for a given device and VQ*/
1292 	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1293 	struct rte_cryptodev *dev =
1294 			(struct rte_cryptodev *)(dpaa2_qp->rx_vq.dev);
1295 	struct qbman_result *dq_storage;
1296 	uint32_t fqid = dpaa2_qp->rx_vq.fqid;
1297 	int ret, num_rx = 0;
1298 	uint8_t is_last = 0, status;
1299 	struct qbman_swp *swp;
1300 	const struct qbman_fd *fd;
1301 	struct qbman_pull_desc pulldesc;
1302 
1303 	if (!DPAA2_PER_LCORE_SEC_DPIO) {
1304 		ret = dpaa2_affine_qbman_swp_sec();
1305 		if (ret) {
1306 			RTE_LOG(ERR, PMD, "Failure in affining portal\n");
1307 			return 0;
1308 		}
1309 	}
1310 	swp = DPAA2_PER_LCORE_SEC_PORTAL;
1311 	dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
1312 
1313 	qbman_pull_desc_clear(&pulldesc);
1314 	qbman_pull_desc_set_numframes(&pulldesc,
1315 				      (nb_ops > DPAA2_DQRR_RING_SIZE) ?
1316 				      DPAA2_DQRR_RING_SIZE : nb_ops);
1317 	qbman_pull_desc_set_fq(&pulldesc, fqid);
1318 	qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1319 				    (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage),
1320 				    1);
1321 
1322 	/*Issue a volatile dequeue command. */
1323 	while (1) {
1324 		if (qbman_swp_pull(swp, &pulldesc)) {
1325 			RTE_LOG(WARNING, PMD,
1326 				"SEC VDQ command is not issued : QBMAN busy\n");
1327 			/* Portal was busy, try again */
1328 			continue;
1329 		}
1330 		break;
1331 	};
1332 
1333 	/* Receive the packets till Last Dequeue entry is found with
1334 	 * respect to the above issues PULL command.
1335 	 */
1336 	while (!is_last) {
1337 		/* Check if the previous issued command is completed.
1338 		 * Also seems like the SWP is shared between the Ethernet Driver
1339 		 * and the SEC driver.
1340 		 */
1341 		while (!qbman_check_command_complete(dq_storage))
1342 			;
1343 
1344 		/* Loop until the dq_storage is updated with
1345 		 * new token by QBMAN
1346 		 */
1347 		while (!qbman_check_new_result(dq_storage))
1348 			;
1349 		/* Check whether Last Pull command is Expired and
1350 		 * setting Condition for Loop termination
1351 		 */
1352 		if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1353 			is_last = 1;
1354 			/* Check for valid frame. */
1355 			status = (uint8_t)qbman_result_DQ_flags(dq_storage);
1356 			if (unlikely(
1357 				(status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
1358 				PMD_RX_LOG(DEBUG, "No frame is delivered");
1359 				continue;
1360 			}
1361 		}
1362 
1363 		fd = qbman_result_DQ_fd(dq_storage);
1364 		ops[num_rx] = sec_fd_to_mbuf(fd, dev->driver_id);
1365 
1366 		if (unlikely(fd->simple.frc)) {
1367 			/* TODO Parse SEC errors */
1368 			RTE_LOG(ERR, PMD, "SEC returned Error - %x\n",
1369 				fd->simple.frc);
1370 			ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
1371 		} else {
1372 			ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1373 		}
1374 
1375 		num_rx++;
1376 		dq_storage++;
1377 	} /* End of Packet Rx loop */
1378 
1379 	dpaa2_qp->rx_vq.rx_pkts += num_rx;
1380 
1381 	PMD_RX_LOG(DEBUG, "SEC Received %d Packets", num_rx);
1382 	/*Return the total number of packets received to DPAA2 app*/
1383 	return num_rx;
1384 }
1385 
1386 /** Release queue pair */
1387 static int
1388 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
1389 {
1390 	struct dpaa2_sec_qp *qp =
1391 		(struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id];
1392 
1393 	PMD_INIT_FUNC_TRACE();
1394 
1395 	if (qp->rx_vq.q_storage) {
1396 		dpaa2_free_dq_storage(qp->rx_vq.q_storage);
1397 		rte_free(qp->rx_vq.q_storage);
1398 	}
1399 	rte_free(qp);
1400 
1401 	dev->data->queue_pairs[queue_pair_id] = NULL;
1402 
1403 	return 0;
1404 }
1405 
1406 /** Setup a queue pair */
1407 static int
1408 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1409 		__rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1410 		__rte_unused int socket_id,
1411 		__rte_unused struct rte_mempool *session_pool)
1412 {
1413 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1414 	struct dpaa2_sec_qp *qp;
1415 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1416 	struct dpseci_rx_queue_cfg cfg;
1417 	int32_t retcode;
1418 
1419 	PMD_INIT_FUNC_TRACE();
1420 
1421 	/* If qp is already in use free ring memory and qp metadata. */
1422 	if (dev->data->queue_pairs[qp_id] != NULL) {
1423 		PMD_DRV_LOG(INFO, "QP already setup");
1424 		return 0;
1425 	}
1426 
1427 	PMD_DRV_LOG(DEBUG, "dev =%p, queue =%d, conf =%p",
1428 		    dev, qp_id, qp_conf);
1429 
1430 	memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
1431 
1432 	qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp),
1433 			RTE_CACHE_LINE_SIZE);
1434 	if (!qp) {
1435 		RTE_LOG(ERR, PMD, "malloc failed for rx/tx queues\n");
1436 		return -1;
1437 	}
1438 
1439 	qp->rx_vq.dev = dev;
1440 	qp->tx_vq.dev = dev;
1441 	qp->rx_vq.q_storage = rte_malloc("sec dq storage",
1442 		sizeof(struct queue_storage_info_t),
1443 		RTE_CACHE_LINE_SIZE);
1444 	if (!qp->rx_vq.q_storage) {
1445 		RTE_LOG(ERR, PMD, "malloc failed for q_storage\n");
1446 		return -1;
1447 	}
1448 	memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
1449 
1450 	if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
1451 		RTE_LOG(ERR, PMD, "dpaa2_alloc_dq_storage failed\n");
1452 		return -1;
1453 	}
1454 
1455 	dev->data->queue_pairs[qp_id] = qp;
1456 
1457 	cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
1458 	cfg.user_ctx = (uint64_t)(&qp->rx_vq);
1459 	retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
1460 				      qp_id, &cfg);
1461 	return retcode;
1462 }
1463 
1464 /** Start queue pair */
1465 static int
1466 dpaa2_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev,
1467 			   __rte_unused uint16_t queue_pair_id)
1468 {
1469 	PMD_INIT_FUNC_TRACE();
1470 
1471 	return 0;
1472 }
1473 
1474 /** Stop queue pair */
1475 static int
1476 dpaa2_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev,
1477 			  __rte_unused uint16_t queue_pair_id)
1478 {
1479 	PMD_INIT_FUNC_TRACE();
1480 
1481 	return 0;
1482 }
1483 
1484 /** Return the number of allocated queue pairs */
1485 static uint32_t
1486 dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev)
1487 {
1488 	PMD_INIT_FUNC_TRACE();
1489 
1490 	return dev->data->nb_queue_pairs;
1491 }
1492 
1493 /** Returns the size of the aesni gcm session structure */
1494 static unsigned int
1495 dpaa2_sec_session_get_size(struct rte_cryptodev *dev __rte_unused)
1496 {
1497 	PMD_INIT_FUNC_TRACE();
1498 
1499 	return sizeof(dpaa2_sec_session);
1500 }
1501 
1502 static int
1503 dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
1504 		      struct rte_crypto_sym_xform *xform,
1505 		      dpaa2_sec_session *session)
1506 {
1507 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1508 	struct alginfo cipherdata;
1509 	int bufsize, i;
1510 	struct ctxt_priv *priv;
1511 	struct sec_flow_context *flc;
1512 
1513 	PMD_INIT_FUNC_TRACE();
1514 
1515 	/* For SEC CIPHER only one descriptor is required. */
1516 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1517 			sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1518 			RTE_CACHE_LINE_SIZE);
1519 	if (priv == NULL) {
1520 		RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n");
1521 		return -1;
1522 	}
1523 
1524 	priv->fle_pool = dev_priv->fle_pool;
1525 
1526 	flc = &priv->flc_desc[0].flc;
1527 
1528 	session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1529 			RTE_CACHE_LINE_SIZE);
1530 	if (session->cipher_key.data == NULL) {
1531 		RTE_LOG(ERR, PMD, "No Memory for cipher key\n");
1532 		rte_free(priv);
1533 		return -1;
1534 	}
1535 	session->cipher_key.length = xform->cipher.key.length;
1536 
1537 	memcpy(session->cipher_key.data, xform->cipher.key.data,
1538 	       xform->cipher.key.length);
1539 	cipherdata.key = (uint64_t)session->cipher_key.data;
1540 	cipherdata.keylen = session->cipher_key.length;
1541 	cipherdata.key_enc_flags = 0;
1542 	cipherdata.key_type = RTA_DATA_IMM;
1543 
1544 	/* Set IV parameters */
1545 	session->iv.offset = xform->cipher.iv.offset;
1546 	session->iv.length = xform->cipher.iv.length;
1547 
1548 	switch (xform->cipher.algo) {
1549 	case RTE_CRYPTO_CIPHER_AES_CBC:
1550 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
1551 		cipherdata.algmode = OP_ALG_AAI_CBC;
1552 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1553 		break;
1554 	case RTE_CRYPTO_CIPHER_3DES_CBC:
1555 		cipherdata.algtype = OP_ALG_ALGSEL_3DES;
1556 		cipherdata.algmode = OP_ALG_AAI_CBC;
1557 		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1558 		break;
1559 	case RTE_CRYPTO_CIPHER_AES_CTR:
1560 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
1561 		cipherdata.algmode = OP_ALG_AAI_CTR;
1562 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
1563 		break;
1564 	case RTE_CRYPTO_CIPHER_3DES_CTR:
1565 	case RTE_CRYPTO_CIPHER_AES_ECB:
1566 	case RTE_CRYPTO_CIPHER_3DES_ECB:
1567 	case RTE_CRYPTO_CIPHER_AES_XTS:
1568 	case RTE_CRYPTO_CIPHER_AES_F8:
1569 	case RTE_CRYPTO_CIPHER_ARC4:
1570 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
1571 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1572 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
1573 	case RTE_CRYPTO_CIPHER_NULL:
1574 		RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n",
1575 			xform->cipher.algo);
1576 		goto error_out;
1577 	default:
1578 		RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
1579 			xform->cipher.algo);
1580 		goto error_out;
1581 	}
1582 	session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1583 				DIR_ENC : DIR_DEC;
1584 
1585 	bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1586 					&cipherdata, NULL, session->iv.length,
1587 					session->dir);
1588 	if (bufsize < 0) {
1589 		RTE_LOG(ERR, PMD, "Crypto: Descriptor build failed\n");
1590 		goto error_out;
1591 	}
1592 	flc->dhr = 0;
1593 	flc->bpv0 = 0x1;
1594 	flc->mode_bits = 0x8000;
1595 
1596 	flc->word1_sdl = (uint8_t)bufsize;
1597 	flc->word2_rflc_31_0 = lower_32_bits(
1598 			(uint64_t)&(((struct dpaa2_sec_qp *)
1599 			dev->data->queue_pairs[0])->rx_vq));
1600 	flc->word3_rflc_63_32 = upper_32_bits(
1601 			(uint64_t)&(((struct dpaa2_sec_qp *)
1602 			dev->data->queue_pairs[0])->rx_vq));
1603 	session->ctxt = priv;
1604 
1605 	for (i = 0; i < bufsize; i++)
1606 		PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
1607 			    i, priv->flc_desc[0].desc[i]);
1608 
1609 	return 0;
1610 
1611 error_out:
1612 	rte_free(session->cipher_key.data);
1613 	rte_free(priv);
1614 	return -1;
1615 }
1616 
1617 static int
1618 dpaa2_sec_auth_init(struct rte_cryptodev *dev,
1619 		    struct rte_crypto_sym_xform *xform,
1620 		    dpaa2_sec_session *session)
1621 {
1622 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1623 	struct alginfo authdata;
1624 	unsigned int bufsize, i;
1625 	struct ctxt_priv *priv;
1626 	struct sec_flow_context *flc;
1627 
1628 	PMD_INIT_FUNC_TRACE();
1629 
1630 	/* For SEC AUTH three descriptors are required for various stages */
1631 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1632 			sizeof(struct ctxt_priv) + 3 *
1633 			sizeof(struct sec_flc_desc),
1634 			RTE_CACHE_LINE_SIZE);
1635 	if (priv == NULL) {
1636 		RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n");
1637 		return -1;
1638 	}
1639 
1640 	priv->fle_pool = dev_priv->fle_pool;
1641 	flc = &priv->flc_desc[DESC_INITFINAL].flc;
1642 
1643 	session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1644 			RTE_CACHE_LINE_SIZE);
1645 	if (session->auth_key.data == NULL) {
1646 		RTE_LOG(ERR, PMD, "No Memory for auth key\n");
1647 		rte_free(priv);
1648 		return -1;
1649 	}
1650 	session->auth_key.length = xform->auth.key.length;
1651 
1652 	memcpy(session->auth_key.data, xform->auth.key.data,
1653 	       xform->auth.key.length);
1654 	authdata.key = (uint64_t)session->auth_key.data;
1655 	authdata.keylen = session->auth_key.length;
1656 	authdata.key_enc_flags = 0;
1657 	authdata.key_type = RTA_DATA_IMM;
1658 
1659 	session->digest_length = xform->auth.digest_length;
1660 
1661 	switch (xform->auth.algo) {
1662 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
1663 		authdata.algtype = OP_ALG_ALGSEL_SHA1;
1664 		authdata.algmode = OP_ALG_AAI_HMAC;
1665 		session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1666 		break;
1667 	case RTE_CRYPTO_AUTH_MD5_HMAC:
1668 		authdata.algtype = OP_ALG_ALGSEL_MD5;
1669 		authdata.algmode = OP_ALG_AAI_HMAC;
1670 		session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1671 		break;
1672 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
1673 		authdata.algtype = OP_ALG_ALGSEL_SHA256;
1674 		authdata.algmode = OP_ALG_AAI_HMAC;
1675 		session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1676 		break;
1677 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
1678 		authdata.algtype = OP_ALG_ALGSEL_SHA384;
1679 		authdata.algmode = OP_ALG_AAI_HMAC;
1680 		session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1681 		break;
1682 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
1683 		authdata.algtype = OP_ALG_ALGSEL_SHA512;
1684 		authdata.algmode = OP_ALG_AAI_HMAC;
1685 		session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1686 		break;
1687 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
1688 		authdata.algtype = OP_ALG_ALGSEL_SHA224;
1689 		authdata.algmode = OP_ALG_AAI_HMAC;
1690 		session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
1691 		break;
1692 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1693 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1694 	case RTE_CRYPTO_AUTH_NULL:
1695 	case RTE_CRYPTO_AUTH_SHA1:
1696 	case RTE_CRYPTO_AUTH_SHA256:
1697 	case RTE_CRYPTO_AUTH_SHA512:
1698 	case RTE_CRYPTO_AUTH_SHA224:
1699 	case RTE_CRYPTO_AUTH_SHA384:
1700 	case RTE_CRYPTO_AUTH_MD5:
1701 	case RTE_CRYPTO_AUTH_AES_GMAC:
1702 	case RTE_CRYPTO_AUTH_KASUMI_F9:
1703 	case RTE_CRYPTO_AUTH_AES_CMAC:
1704 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1705 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
1706 		RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n",
1707 			xform->auth.algo);
1708 		goto error_out;
1709 	default:
1710 		RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
1711 			xform->auth.algo);
1712 		goto error_out;
1713 	}
1714 	session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1715 				DIR_ENC : DIR_DEC;
1716 
1717 	bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
1718 				   1, 0, &authdata, !session->dir,
1719 				   session->digest_length);
1720 
1721 	flc->word1_sdl = (uint8_t)bufsize;
1722 	flc->word2_rflc_31_0 = lower_32_bits(
1723 			(uint64_t)&(((struct dpaa2_sec_qp *)
1724 			dev->data->queue_pairs[0])->rx_vq));
1725 	flc->word3_rflc_63_32 = upper_32_bits(
1726 			(uint64_t)&(((struct dpaa2_sec_qp *)
1727 			dev->data->queue_pairs[0])->rx_vq));
1728 	session->ctxt = priv;
1729 	for (i = 0; i < bufsize; i++)
1730 		PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
1731 			    i, priv->flc_desc[DESC_INITFINAL].desc[i]);
1732 
1733 
1734 	return 0;
1735 
1736 error_out:
1737 	rte_free(session->auth_key.data);
1738 	rte_free(priv);
1739 	return -1;
1740 }
1741 
1742 static int
1743 dpaa2_sec_aead_init(struct rte_cryptodev *dev,
1744 		    struct rte_crypto_sym_xform *xform,
1745 		    dpaa2_sec_session *session)
1746 {
1747 	struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
1748 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1749 	struct alginfo aeaddata;
1750 	unsigned int bufsize, i;
1751 	struct ctxt_priv *priv;
1752 	struct sec_flow_context *flc;
1753 	struct rte_crypto_aead_xform *aead_xform = &xform->aead;
1754 	int err;
1755 
1756 	PMD_INIT_FUNC_TRACE();
1757 
1758 	/* Set IV parameters */
1759 	session->iv.offset = aead_xform->iv.offset;
1760 	session->iv.length = aead_xform->iv.length;
1761 	session->ctxt_type = DPAA2_SEC_AEAD;
1762 
1763 	/* For SEC AEAD only one descriptor is required */
1764 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1765 			sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1766 			RTE_CACHE_LINE_SIZE);
1767 	if (priv == NULL) {
1768 		RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n");
1769 		return -1;
1770 	}
1771 
1772 	priv->fle_pool = dev_priv->fle_pool;
1773 	flc = &priv->flc_desc[0].flc;
1774 
1775 	session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
1776 					       RTE_CACHE_LINE_SIZE);
1777 	if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
1778 		RTE_LOG(ERR, PMD, "No Memory for aead key\n");
1779 		rte_free(priv);
1780 		return -1;
1781 	}
1782 	memcpy(session->aead_key.data, aead_xform->key.data,
1783 	       aead_xform->key.length);
1784 
1785 	session->digest_length = aead_xform->digest_length;
1786 	session->aead_key.length = aead_xform->key.length;
1787 	ctxt->auth_only_len = aead_xform->aad_length;
1788 
1789 	aeaddata.key = (uint64_t)session->aead_key.data;
1790 	aeaddata.keylen = session->aead_key.length;
1791 	aeaddata.key_enc_flags = 0;
1792 	aeaddata.key_type = RTA_DATA_IMM;
1793 
1794 	switch (aead_xform->algo) {
1795 	case RTE_CRYPTO_AEAD_AES_GCM:
1796 		aeaddata.algtype = OP_ALG_ALGSEL_AES;
1797 		aeaddata.algmode = OP_ALG_AAI_GCM;
1798 		session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
1799 		break;
1800 	case RTE_CRYPTO_AEAD_AES_CCM:
1801 		RTE_LOG(ERR, PMD, "Crypto: Unsupported AEAD alg %u\n",
1802 			aead_xform->algo);
1803 		goto error_out;
1804 	default:
1805 		RTE_LOG(ERR, PMD, "Crypto: Undefined AEAD specified %u\n",
1806 			aead_xform->algo);
1807 		goto error_out;
1808 	}
1809 	session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1810 				DIR_ENC : DIR_DEC;
1811 
1812 	priv->flc_desc[0].desc[0] = aeaddata.keylen;
1813 	err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
1814 			       MIN_JOB_DESC_SIZE,
1815 			       (unsigned int *)priv->flc_desc[0].desc,
1816 			       &priv->flc_desc[0].desc[1], 1);
1817 
1818 	if (err < 0) {
1819 		PMD_DRV_LOG(ERR, "Crypto: Incorrect key lengths\n");
1820 		goto error_out;
1821 	}
1822 	if (priv->flc_desc[0].desc[1] & 1) {
1823 		aeaddata.key_type = RTA_DATA_IMM;
1824 	} else {
1825 		aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key);
1826 		aeaddata.key_type = RTA_DATA_PTR;
1827 	}
1828 	priv->flc_desc[0].desc[0] = 0;
1829 	priv->flc_desc[0].desc[1] = 0;
1830 
1831 	if (session->dir == DIR_ENC)
1832 		bufsize = cnstr_shdsc_gcm_encap(
1833 				priv->flc_desc[0].desc, 1, 0,
1834 				&aeaddata, session->iv.length,
1835 				session->digest_length);
1836 	else
1837 		bufsize = cnstr_shdsc_gcm_decap(
1838 				priv->flc_desc[0].desc, 1, 0,
1839 				&aeaddata, session->iv.length,
1840 				session->digest_length);
1841 	flc->word1_sdl = (uint8_t)bufsize;
1842 	flc->word2_rflc_31_0 = lower_32_bits(
1843 			(uint64_t)&(((struct dpaa2_sec_qp *)
1844 			dev->data->queue_pairs[0])->rx_vq));
1845 	flc->word3_rflc_63_32 = upper_32_bits(
1846 			(uint64_t)&(((struct dpaa2_sec_qp *)
1847 			dev->data->queue_pairs[0])->rx_vq));
1848 	session->ctxt = priv;
1849 	for (i = 0; i < bufsize; i++)
1850 		PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
1851 			    i, priv->flc_desc[0].desc[i]);
1852 
1853 	return 0;
1854 
1855 error_out:
1856 	rte_free(session->aead_key.data);
1857 	rte_free(priv);
1858 	return -1;
1859 }
1860 
1861 
1862 static int
1863 dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
1864 		    struct rte_crypto_sym_xform *xform,
1865 		    dpaa2_sec_session *session)
1866 {
1867 	struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
1868 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1869 	struct alginfo authdata, cipherdata;
1870 	unsigned int bufsize, i;
1871 	struct ctxt_priv *priv;
1872 	struct sec_flow_context *flc;
1873 	struct rte_crypto_cipher_xform *cipher_xform;
1874 	struct rte_crypto_auth_xform *auth_xform;
1875 	int err;
1876 
1877 	PMD_INIT_FUNC_TRACE();
1878 
1879 	if (session->ext_params.aead_ctxt.auth_cipher_text) {
1880 		cipher_xform = &xform->cipher;
1881 		auth_xform = &xform->next->auth;
1882 		session->ctxt_type =
1883 			(cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1884 			DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER;
1885 	} else {
1886 		cipher_xform = &xform->next->cipher;
1887 		auth_xform = &xform->auth;
1888 		session->ctxt_type =
1889 			(cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1890 			DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH;
1891 	}
1892 
1893 	/* Set IV parameters */
1894 	session->iv.offset = cipher_xform->iv.offset;
1895 	session->iv.length = cipher_xform->iv.length;
1896 
1897 	/* For SEC AEAD only one descriptor is required */
1898 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1899 			sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1900 			RTE_CACHE_LINE_SIZE);
1901 	if (priv == NULL) {
1902 		RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n");
1903 		return -1;
1904 	}
1905 
1906 	priv->fle_pool = dev_priv->fle_pool;
1907 	flc = &priv->flc_desc[0].flc;
1908 
1909 	session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
1910 					       RTE_CACHE_LINE_SIZE);
1911 	if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
1912 		RTE_LOG(ERR, PMD, "No Memory for cipher key\n");
1913 		rte_free(priv);
1914 		return -1;
1915 	}
1916 	session->cipher_key.length = cipher_xform->key.length;
1917 	session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
1918 					     RTE_CACHE_LINE_SIZE);
1919 	if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
1920 		RTE_LOG(ERR, PMD, "No Memory for auth key\n");
1921 		rte_free(session->cipher_key.data);
1922 		rte_free(priv);
1923 		return -1;
1924 	}
1925 	session->auth_key.length = auth_xform->key.length;
1926 	memcpy(session->cipher_key.data, cipher_xform->key.data,
1927 	       cipher_xform->key.length);
1928 	memcpy(session->auth_key.data, auth_xform->key.data,
1929 	       auth_xform->key.length);
1930 
1931 	authdata.key = (uint64_t)session->auth_key.data;
1932 	authdata.keylen = session->auth_key.length;
1933 	authdata.key_enc_flags = 0;
1934 	authdata.key_type = RTA_DATA_IMM;
1935 
1936 	session->digest_length = auth_xform->digest_length;
1937 
1938 	switch (auth_xform->algo) {
1939 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
1940 		authdata.algtype = OP_ALG_ALGSEL_SHA1;
1941 		authdata.algmode = OP_ALG_AAI_HMAC;
1942 		session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1943 		break;
1944 	case RTE_CRYPTO_AUTH_MD5_HMAC:
1945 		authdata.algtype = OP_ALG_ALGSEL_MD5;
1946 		authdata.algmode = OP_ALG_AAI_HMAC;
1947 		session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1948 		break;
1949 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
1950 		authdata.algtype = OP_ALG_ALGSEL_SHA224;
1951 		authdata.algmode = OP_ALG_AAI_HMAC;
1952 		session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
1953 		break;
1954 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
1955 		authdata.algtype = OP_ALG_ALGSEL_SHA256;
1956 		authdata.algmode = OP_ALG_AAI_HMAC;
1957 		session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1958 		break;
1959 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
1960 		authdata.algtype = OP_ALG_ALGSEL_SHA384;
1961 		authdata.algmode = OP_ALG_AAI_HMAC;
1962 		session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1963 		break;
1964 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
1965 		authdata.algtype = OP_ALG_ALGSEL_SHA512;
1966 		authdata.algmode = OP_ALG_AAI_HMAC;
1967 		session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1968 		break;
1969 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1970 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1971 	case RTE_CRYPTO_AUTH_NULL:
1972 	case RTE_CRYPTO_AUTH_SHA1:
1973 	case RTE_CRYPTO_AUTH_SHA256:
1974 	case RTE_CRYPTO_AUTH_SHA512:
1975 	case RTE_CRYPTO_AUTH_SHA224:
1976 	case RTE_CRYPTO_AUTH_SHA384:
1977 	case RTE_CRYPTO_AUTH_MD5:
1978 	case RTE_CRYPTO_AUTH_AES_GMAC:
1979 	case RTE_CRYPTO_AUTH_KASUMI_F9:
1980 	case RTE_CRYPTO_AUTH_AES_CMAC:
1981 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1982 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
1983 		RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n",
1984 			auth_xform->algo);
1985 		goto error_out;
1986 	default:
1987 		RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
1988 			auth_xform->algo);
1989 		goto error_out;
1990 	}
1991 	cipherdata.key = (uint64_t)session->cipher_key.data;
1992 	cipherdata.keylen = session->cipher_key.length;
1993 	cipherdata.key_enc_flags = 0;
1994 	cipherdata.key_type = RTA_DATA_IMM;
1995 
1996 	switch (cipher_xform->algo) {
1997 	case RTE_CRYPTO_CIPHER_AES_CBC:
1998 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
1999 		cipherdata.algmode = OP_ALG_AAI_CBC;
2000 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2001 		break;
2002 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2003 		cipherdata.algtype = OP_ALG_ALGSEL_3DES;
2004 		cipherdata.algmode = OP_ALG_AAI_CBC;
2005 		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2006 		break;
2007 	case RTE_CRYPTO_CIPHER_AES_CTR:
2008 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
2009 		cipherdata.algmode = OP_ALG_AAI_CTR;
2010 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2011 		break;
2012 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2013 	case RTE_CRYPTO_CIPHER_NULL:
2014 	case RTE_CRYPTO_CIPHER_3DES_ECB:
2015 	case RTE_CRYPTO_CIPHER_AES_ECB:
2016 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
2017 		RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n",
2018 			cipher_xform->algo);
2019 		goto error_out;
2020 	default:
2021 		RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
2022 			cipher_xform->algo);
2023 		goto error_out;
2024 	}
2025 	session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2026 				DIR_ENC : DIR_DEC;
2027 
2028 	priv->flc_desc[0].desc[0] = cipherdata.keylen;
2029 	priv->flc_desc[0].desc[1] = authdata.keylen;
2030 	err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2031 			       MIN_JOB_DESC_SIZE,
2032 			       (unsigned int *)priv->flc_desc[0].desc,
2033 			       &priv->flc_desc[0].desc[2], 2);
2034 
2035 	if (err < 0) {
2036 		PMD_DRV_LOG(ERR, "Crypto: Incorrect key lengths\n");
2037 		goto error_out;
2038 	}
2039 	if (priv->flc_desc[0].desc[2] & 1) {
2040 		cipherdata.key_type = RTA_DATA_IMM;
2041 	} else {
2042 		cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
2043 		cipherdata.key_type = RTA_DATA_PTR;
2044 	}
2045 	if (priv->flc_desc[0].desc[2] & (1 << 1)) {
2046 		authdata.key_type = RTA_DATA_IMM;
2047 	} else {
2048 		authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
2049 		authdata.key_type = RTA_DATA_PTR;
2050 	}
2051 	priv->flc_desc[0].desc[0] = 0;
2052 	priv->flc_desc[0].desc[1] = 0;
2053 	priv->flc_desc[0].desc[2] = 0;
2054 
2055 	if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
2056 		bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
2057 					      0, &cipherdata, &authdata,
2058 					      session->iv.length,
2059 					      ctxt->auth_only_len,
2060 					      session->digest_length,
2061 					      session->dir);
2062 	} else {
2063 		RTE_LOG(ERR, PMD, "Hash before cipher not supported\n");
2064 		goto error_out;
2065 	}
2066 
2067 	flc->word1_sdl = (uint8_t)bufsize;
2068 	flc->word2_rflc_31_0 = lower_32_bits(
2069 			(uint64_t)&(((struct dpaa2_sec_qp *)
2070 			dev->data->queue_pairs[0])->rx_vq));
2071 	flc->word3_rflc_63_32 = upper_32_bits(
2072 			(uint64_t)&(((struct dpaa2_sec_qp *)
2073 			dev->data->queue_pairs[0])->rx_vq));
2074 	session->ctxt = priv;
2075 	for (i = 0; i < bufsize; i++)
2076 		PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
2077 			    i, priv->flc_desc[0].desc[i]);
2078 
2079 	return 0;
2080 
2081 error_out:
2082 	rte_free(session->cipher_key.data);
2083 	rte_free(session->auth_key.data);
2084 	rte_free(priv);
2085 	return -1;
2086 }
2087 
2088 static int
2089 dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
2090 			    struct rte_crypto_sym_xform *xform,	void *sess)
2091 {
2092 	dpaa2_sec_session *session = sess;
2093 
2094 	PMD_INIT_FUNC_TRACE();
2095 
2096 	if (unlikely(sess == NULL)) {
2097 		RTE_LOG(ERR, PMD, "invalid session struct\n");
2098 		return -1;
2099 	}
2100 
2101 	/* Default IV length = 0 */
2102 	session->iv.length = 0;
2103 
2104 	/* Cipher Only */
2105 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2106 		session->ctxt_type = DPAA2_SEC_CIPHER;
2107 		dpaa2_sec_cipher_init(dev, xform, session);
2108 
2109 	/* Authentication Only */
2110 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2111 		   xform->next == NULL) {
2112 		session->ctxt_type = DPAA2_SEC_AUTH;
2113 		dpaa2_sec_auth_init(dev, xform, session);
2114 
2115 	/* Cipher then Authenticate */
2116 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2117 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2118 		session->ext_params.aead_ctxt.auth_cipher_text = true;
2119 		dpaa2_sec_aead_chain_init(dev, xform, session);
2120 
2121 	/* Authenticate then Cipher */
2122 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2123 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2124 		session->ext_params.aead_ctxt.auth_cipher_text = false;
2125 		dpaa2_sec_aead_chain_init(dev, xform, session);
2126 
2127 	/* AEAD operation for AES-GCM kind of Algorithms */
2128 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2129 		   xform->next == NULL) {
2130 		dpaa2_sec_aead_init(dev, xform, session);
2131 
2132 	} else {
2133 		RTE_LOG(ERR, PMD, "Invalid crypto type\n");
2134 		return -EINVAL;
2135 	}
2136 
2137 	return 0;
2138 }
2139 
2140 static int
2141 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
2142 			    struct rte_security_session_conf *conf,
2143 			    void *sess)
2144 {
2145 	struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2146 	struct rte_crypto_auth_xform *auth_xform;
2147 	struct rte_crypto_cipher_xform *cipher_xform;
2148 	dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
2149 	struct ctxt_priv *priv;
2150 	struct ipsec_encap_pdb encap_pdb;
2151 	struct ipsec_decap_pdb decap_pdb;
2152 	struct alginfo authdata, cipherdata;
2153 	unsigned int bufsize;
2154 	struct sec_flow_context *flc;
2155 
2156 	PMD_INIT_FUNC_TRACE();
2157 
2158 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2159 		cipher_xform = &conf->crypto_xform->cipher;
2160 		auth_xform = &conf->crypto_xform->next->auth;
2161 	} else {
2162 		auth_xform = &conf->crypto_xform->auth;
2163 		cipher_xform = &conf->crypto_xform->next->cipher;
2164 	}
2165 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2166 				sizeof(struct ctxt_priv) +
2167 				sizeof(struct sec_flc_desc),
2168 				RTE_CACHE_LINE_SIZE);
2169 
2170 	if (priv == NULL) {
2171 		RTE_LOG(ERR, PMD, "\nNo memory for priv CTXT");
2172 		return -ENOMEM;
2173 	}
2174 
2175 	flc = &priv->flc_desc[0].flc;
2176 
2177 	session->ctxt_type = DPAA2_SEC_IPSEC;
2178 	session->cipher_key.data = rte_zmalloc(NULL,
2179 					       cipher_xform->key.length,
2180 					       RTE_CACHE_LINE_SIZE);
2181 	if (session->cipher_key.data == NULL &&
2182 			cipher_xform->key.length > 0) {
2183 		RTE_LOG(ERR, PMD, "No Memory for cipher key\n");
2184 		rte_free(priv);
2185 		return -ENOMEM;
2186 	}
2187 
2188 	session->cipher_key.length = cipher_xform->key.length;
2189 	session->auth_key.data = rte_zmalloc(NULL,
2190 					auth_xform->key.length,
2191 					RTE_CACHE_LINE_SIZE);
2192 	if (session->auth_key.data == NULL &&
2193 			auth_xform->key.length > 0) {
2194 		RTE_LOG(ERR, PMD, "No Memory for auth key\n");
2195 		rte_free(session->cipher_key.data);
2196 		rte_free(priv);
2197 		return -ENOMEM;
2198 	}
2199 	session->auth_key.length = auth_xform->key.length;
2200 	memcpy(session->cipher_key.data, cipher_xform->key.data,
2201 			cipher_xform->key.length);
2202 	memcpy(session->auth_key.data, auth_xform->key.data,
2203 			auth_xform->key.length);
2204 
2205 	authdata.key = (uint64_t)session->auth_key.data;
2206 	authdata.keylen = session->auth_key.length;
2207 	authdata.key_enc_flags = 0;
2208 	authdata.key_type = RTA_DATA_IMM;
2209 	switch (auth_xform->algo) {
2210 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
2211 		authdata.algtype = OP_PCL_IPSEC_HMAC_SHA1_96;
2212 		authdata.algmode = OP_ALG_AAI_HMAC;
2213 		session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
2214 		break;
2215 	case RTE_CRYPTO_AUTH_MD5_HMAC:
2216 		authdata.algtype = OP_PCL_IPSEC_HMAC_MD5_96;
2217 		authdata.algmode = OP_ALG_AAI_HMAC;
2218 		session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
2219 		break;
2220 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2221 		authdata.algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2222 		authdata.algmode = OP_ALG_AAI_HMAC;
2223 		session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2224 		break;
2225 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2226 		authdata.algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2227 		authdata.algmode = OP_ALG_AAI_HMAC;
2228 		session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2229 		break;
2230 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2231 		authdata.algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2232 		authdata.algmode = OP_ALG_AAI_HMAC;
2233 		session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2234 		break;
2235 	case RTE_CRYPTO_AUTH_AES_CMAC:
2236 		authdata.algtype = OP_PCL_IPSEC_AES_CMAC_96;
2237 		session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
2238 		break;
2239 	case RTE_CRYPTO_AUTH_NULL:
2240 		authdata.algtype = OP_PCL_IPSEC_HMAC_NULL;
2241 		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2242 		break;
2243 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2244 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2245 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2246 	case RTE_CRYPTO_AUTH_SHA1:
2247 	case RTE_CRYPTO_AUTH_SHA256:
2248 	case RTE_CRYPTO_AUTH_SHA512:
2249 	case RTE_CRYPTO_AUTH_SHA224:
2250 	case RTE_CRYPTO_AUTH_SHA384:
2251 	case RTE_CRYPTO_AUTH_MD5:
2252 	case RTE_CRYPTO_AUTH_AES_GMAC:
2253 	case RTE_CRYPTO_AUTH_KASUMI_F9:
2254 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2255 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
2256 		RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n",
2257 			auth_xform->algo);
2258 		goto out;
2259 	default:
2260 		RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
2261 			auth_xform->algo);
2262 		goto out;
2263 	}
2264 	cipherdata.key = (uint64_t)session->cipher_key.data;
2265 	cipherdata.keylen = session->cipher_key.length;
2266 	cipherdata.key_enc_flags = 0;
2267 	cipherdata.key_type = RTA_DATA_IMM;
2268 
2269 	switch (cipher_xform->algo) {
2270 	case RTE_CRYPTO_CIPHER_AES_CBC:
2271 		cipherdata.algtype = OP_PCL_IPSEC_AES_CBC;
2272 		cipherdata.algmode = OP_ALG_AAI_CBC;
2273 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2274 		break;
2275 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2276 		cipherdata.algtype = OP_PCL_IPSEC_3DES;
2277 		cipherdata.algmode = OP_ALG_AAI_CBC;
2278 		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2279 		break;
2280 	case RTE_CRYPTO_CIPHER_AES_CTR:
2281 		cipherdata.algtype = OP_PCL_IPSEC_AES_CTR;
2282 		cipherdata.algmode = OP_ALG_AAI_CTR;
2283 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2284 		break;
2285 	case RTE_CRYPTO_CIPHER_NULL:
2286 		cipherdata.algtype = OP_PCL_IPSEC_NULL;
2287 		break;
2288 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2289 	case RTE_CRYPTO_CIPHER_3DES_ECB:
2290 	case RTE_CRYPTO_CIPHER_AES_ECB:
2291 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
2292 		RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n",
2293 			cipher_xform->algo);
2294 		goto out;
2295 	default:
2296 		RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
2297 			cipher_xform->algo);
2298 		goto out;
2299 	}
2300 
2301 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2302 		struct ip ip4_hdr;
2303 
2304 		flc->dhr = SEC_FLC_DHR_OUTBOUND;
2305 		ip4_hdr.ip_v = IPVERSION;
2306 		ip4_hdr.ip_hl = 5;
2307 		ip4_hdr.ip_len = rte_cpu_to_be_16(sizeof(ip4_hdr));
2308 		ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2309 		ip4_hdr.ip_id = 0;
2310 		ip4_hdr.ip_off = 0;
2311 		ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2312 		ip4_hdr.ip_p = 0x32;
2313 		ip4_hdr.ip_sum = 0;
2314 		ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2315 		ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2316 		ip4_hdr.ip_sum = calc_chksum((uint16_t *)(void *)&ip4_hdr,
2317 			sizeof(struct ip));
2318 
2319 		/* For Sec Proto only one descriptor is required. */
2320 		memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb));
2321 		encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2322 			PDBOPTS_ESP_OIHI_PDB_INL |
2323 			PDBOPTS_ESP_IVSRC |
2324 			PDBHMO_ESP_ENCAP_DTTL;
2325 		encap_pdb.spi = ipsec_xform->spi;
2326 		encap_pdb.ip_hdr_len = sizeof(struct ip);
2327 
2328 		session->dir = DIR_ENC;
2329 		bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc,
2330 				1, 0, &encap_pdb,
2331 				(uint8_t *)&ip4_hdr,
2332 				&cipherdata, &authdata);
2333 	} else if (ipsec_xform->direction ==
2334 			RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2335 		flc->dhr = SEC_FLC_DHR_INBOUND;
2336 		memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2337 		decap_pdb.options = sizeof(struct ip) << 16;
2338 		session->dir = DIR_DEC;
2339 		bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc,
2340 				1, 0, &decap_pdb, &cipherdata, &authdata);
2341 	} else
2342 		goto out;
2343 	flc->word1_sdl = (uint8_t)bufsize;
2344 
2345 	/* Enable the stashing control bit */
2346 	DPAA2_SET_FLC_RSC(flc);
2347 	flc->word2_rflc_31_0 = lower_32_bits(
2348 			(uint64_t)&(((struct dpaa2_sec_qp *)
2349 			dev->data->queue_pairs[0])->rx_vq) | 0x14);
2350 	flc->word3_rflc_63_32 = upper_32_bits(
2351 			(uint64_t)&(((struct dpaa2_sec_qp *)
2352 			dev->data->queue_pairs[0])->rx_vq));
2353 
2354 	/* Set EWS bit i.e. enable write-safe */
2355 	DPAA2_SET_FLC_EWS(flc);
2356 	/* Set BS = 1 i.e reuse input buffers as output buffers */
2357 	DPAA2_SET_FLC_REUSE_BS(flc);
2358 	/* Set FF = 10; reuse input buffers if they provide sufficient space */
2359 	DPAA2_SET_FLC_REUSE_FF(flc);
2360 
2361 	session->ctxt = priv;
2362 
2363 	return 0;
2364 out:
2365 	rte_free(session->auth_key.data);
2366 	rte_free(session->cipher_key.data);
2367 	rte_free(priv);
2368 	return -1;
2369 }
2370 
2371 static int
2372 dpaa2_sec_security_session_create(void *dev,
2373 				  struct rte_security_session_conf *conf,
2374 				  struct rte_security_session *sess,
2375 				  struct rte_mempool *mempool)
2376 {
2377 	void *sess_private_data;
2378 	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2379 	int ret;
2380 
2381 	if (rte_mempool_get(mempool, &sess_private_data)) {
2382 		CDEV_LOG_ERR(
2383 			"Couldn't get object from session mempool");
2384 		return -ENOMEM;
2385 	}
2386 
2387 	switch (conf->protocol) {
2388 	case RTE_SECURITY_PROTOCOL_IPSEC:
2389 		ret = dpaa2_sec_set_ipsec_session(cdev, conf,
2390 				sess_private_data);
2391 		break;
2392 	case RTE_SECURITY_PROTOCOL_MACSEC:
2393 		return -ENOTSUP;
2394 	default:
2395 		return -EINVAL;
2396 	}
2397 	if (ret != 0) {
2398 		PMD_DRV_LOG(ERR,
2399 			"DPAA2 PMD: failed to configure session parameters");
2400 
2401 		/* Return session to mempool */
2402 		rte_mempool_put(mempool, sess_private_data);
2403 		return ret;
2404 	}
2405 
2406 	set_sec_session_private_data(sess, sess_private_data);
2407 
2408 	return ret;
2409 }
2410 
2411 /** Clear the memory of session so it doesn't leave key material behind */
2412 static int
2413 dpaa2_sec_security_session_destroy(void *dev __rte_unused,
2414 		struct rte_security_session *sess)
2415 {
2416 	PMD_INIT_FUNC_TRACE();
2417 	void *sess_priv = get_sec_session_private_data(sess);
2418 
2419 	dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
2420 
2421 	if (sess_priv) {
2422 		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2423 
2424 		rte_free(s->ctxt);
2425 		rte_free(s->cipher_key.data);
2426 		rte_free(s->auth_key.data);
2427 		memset(sess, 0, sizeof(dpaa2_sec_session));
2428 		set_sec_session_private_data(sess, NULL);
2429 		rte_mempool_put(sess_mp, sess_priv);
2430 	}
2431 	return 0;
2432 }
2433 
2434 static int
2435 dpaa2_sec_session_configure(struct rte_cryptodev *dev,
2436 		struct rte_crypto_sym_xform *xform,
2437 		struct rte_cryptodev_sym_session *sess,
2438 		struct rte_mempool *mempool)
2439 {
2440 	void *sess_private_data;
2441 	int ret;
2442 
2443 	if (rte_mempool_get(mempool, &sess_private_data)) {
2444 		CDEV_LOG_ERR(
2445 			"Couldn't get object from session mempool");
2446 		return -ENOMEM;
2447 	}
2448 
2449 	ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data);
2450 	if (ret != 0) {
2451 		PMD_DRV_LOG(ERR, "DPAA2 PMD: failed to configure "
2452 				"session parameters");
2453 
2454 		/* Return session to mempool */
2455 		rte_mempool_put(mempool, sess_private_data);
2456 		return ret;
2457 	}
2458 
2459 	set_session_private_data(sess, dev->driver_id,
2460 		sess_private_data);
2461 
2462 	return 0;
2463 }
2464 
2465 /** Clear the memory of session so it doesn't leave key material behind */
2466 static void
2467 dpaa2_sec_session_clear(struct rte_cryptodev *dev,
2468 		struct rte_cryptodev_sym_session *sess)
2469 {
2470 	PMD_INIT_FUNC_TRACE();
2471 	uint8_t index = dev->driver_id;
2472 	void *sess_priv = get_session_private_data(sess, index);
2473 	dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
2474 
2475 	if (sess_priv) {
2476 		rte_free(s->ctxt);
2477 		rte_free(s->cipher_key.data);
2478 		rte_free(s->auth_key.data);
2479 		memset(sess, 0, sizeof(dpaa2_sec_session));
2480 		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2481 		set_session_private_data(sess, index, NULL);
2482 		rte_mempool_put(sess_mp, sess_priv);
2483 	}
2484 }
2485 
2486 static int
2487 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
2488 			struct rte_cryptodev_config *config __rte_unused)
2489 {
2490 	PMD_INIT_FUNC_TRACE();
2491 
2492 	return 0;
2493 }
2494 
2495 static int
2496 dpaa2_sec_dev_start(struct rte_cryptodev *dev)
2497 {
2498 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
2499 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
2500 	struct dpseci_attr attr;
2501 	struct dpaa2_queue *dpaa2_q;
2502 	struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
2503 					dev->data->queue_pairs;
2504 	struct dpseci_rx_queue_attr rx_attr;
2505 	struct dpseci_tx_queue_attr tx_attr;
2506 	int ret, i;
2507 
2508 	PMD_INIT_FUNC_TRACE();
2509 
2510 	memset(&attr, 0, sizeof(struct dpseci_attr));
2511 
2512 	ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
2513 	if (ret) {
2514 		PMD_INIT_LOG(ERR, "DPSECI with HW_ID = %d ENABLE FAILED\n",
2515 			     priv->hw_id);
2516 		goto get_attr_failure;
2517 	}
2518 	ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr);
2519 	if (ret) {
2520 		PMD_INIT_LOG(ERR,
2521 			     "DPSEC ATTRIBUTE READ FAILED, disabling DPSEC\n");
2522 		goto get_attr_failure;
2523 	}
2524 	for (i = 0; i < attr.num_rx_queues && qp[i]; i++) {
2525 		dpaa2_q = &qp[i]->rx_vq;
2526 		dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
2527 				    &rx_attr);
2528 		dpaa2_q->fqid = rx_attr.fqid;
2529 		PMD_INIT_LOG(DEBUG, "rx_fqid: %d", dpaa2_q->fqid);
2530 	}
2531 	for (i = 0; i < attr.num_tx_queues && qp[i]; i++) {
2532 		dpaa2_q = &qp[i]->tx_vq;
2533 		dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
2534 				    &tx_attr);
2535 		dpaa2_q->fqid = tx_attr.fqid;
2536 		PMD_INIT_LOG(DEBUG, "tx_fqid: %d", dpaa2_q->fqid);
2537 	}
2538 
2539 	return 0;
2540 get_attr_failure:
2541 	dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
2542 	return -1;
2543 }
2544 
2545 static void
2546 dpaa2_sec_dev_stop(struct rte_cryptodev *dev)
2547 {
2548 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
2549 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
2550 	int ret;
2551 
2552 	PMD_INIT_FUNC_TRACE();
2553 
2554 	ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
2555 	if (ret) {
2556 		PMD_INIT_LOG(ERR, "Failure in disabling dpseci %d device",
2557 			     priv->hw_id);
2558 		return;
2559 	}
2560 
2561 	ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token);
2562 	if (ret < 0) {
2563 		PMD_INIT_LOG(ERR, "SEC Device cannot be reset:Error = %0x\n",
2564 			     ret);
2565 		return;
2566 	}
2567 }
2568 
2569 static int
2570 dpaa2_sec_dev_close(struct rte_cryptodev *dev)
2571 {
2572 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
2573 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
2574 	int ret;
2575 
2576 	PMD_INIT_FUNC_TRACE();
2577 
2578 	/* Function is reverse of dpaa2_sec_dev_init.
2579 	 * It does the following:
2580 	 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id
2581 	 * 2. Close the DPSECI device
2582 	 * 3. Free the allocated resources.
2583 	 */
2584 
2585 	/*Close the device at underlying layer*/
2586 	ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
2587 	if (ret) {
2588 		PMD_INIT_LOG(ERR, "Failure closing dpseci device with"
2589 			     " error code %d\n", ret);
2590 		return -1;
2591 	}
2592 
2593 	/*Free the allocated memory for ethernet private data and dpseci*/
2594 	priv->hw = NULL;
2595 	rte_free(dpseci);
2596 
2597 	return 0;
2598 }
2599 
2600 static void
2601 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev,
2602 			struct rte_cryptodev_info *info)
2603 {
2604 	struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
2605 
2606 	PMD_INIT_FUNC_TRACE();
2607 	if (info != NULL) {
2608 		info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2609 		info->feature_flags = dev->feature_flags;
2610 		info->capabilities = dpaa2_sec_capabilities;
2611 		info->sym.max_nb_sessions = internals->max_nb_sessions;
2612 		info->driver_id = cryptodev_driver_id;
2613 	}
2614 }
2615 
2616 static
2617 void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
2618 			 struct rte_cryptodev_stats *stats)
2619 {
2620 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
2621 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
2622 	struct dpseci_sec_counters counters = {0};
2623 	struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
2624 					dev->data->queue_pairs;
2625 	int ret, i;
2626 
2627 	PMD_INIT_FUNC_TRACE();
2628 	if (stats == NULL) {
2629 		PMD_DRV_LOG(ERR, "invalid stats ptr NULL");
2630 		return;
2631 	}
2632 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
2633 		if (qp[i] == NULL) {
2634 			PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
2635 			continue;
2636 		}
2637 
2638 		stats->enqueued_count += qp[i]->tx_vq.tx_pkts;
2639 		stats->dequeued_count += qp[i]->rx_vq.rx_pkts;
2640 		stats->enqueue_err_count += qp[i]->tx_vq.err_pkts;
2641 		stats->dequeue_err_count += qp[i]->rx_vq.err_pkts;
2642 	}
2643 
2644 	ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token,
2645 				      &counters);
2646 	if (ret) {
2647 		PMD_DRV_LOG(ERR, "dpseci_get_sec_counters failed\n");
2648 	} else {
2649 		PMD_DRV_LOG(INFO, "dpseci hw stats:"
2650 			    "\n\tNumber of Requests Dequeued = %lu"
2651 			    "\n\tNumber of Outbound Encrypt Requests = %lu"
2652 			    "\n\tNumber of Inbound Decrypt Requests = %lu"
2653 			    "\n\tNumber of Outbound Bytes Encrypted = %lu"
2654 			    "\n\tNumber of Outbound Bytes Protected = %lu"
2655 			    "\n\tNumber of Inbound Bytes Decrypted = %lu"
2656 			    "\n\tNumber of Inbound Bytes Validated = %lu",
2657 			    counters.dequeued_requests,
2658 			    counters.ob_enc_requests,
2659 			    counters.ib_dec_requests,
2660 			    counters.ob_enc_bytes,
2661 			    counters.ob_prot_bytes,
2662 			    counters.ib_dec_bytes,
2663 			    counters.ib_valid_bytes);
2664 	}
2665 }
2666 
2667 static
2668 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev)
2669 {
2670 	int i;
2671 	struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
2672 				   (dev->data->queue_pairs);
2673 
2674 	PMD_INIT_FUNC_TRACE();
2675 
2676 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
2677 		if (qp[i] == NULL) {
2678 			PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
2679 			continue;
2680 		}
2681 		qp[i]->tx_vq.rx_pkts = 0;
2682 		qp[i]->tx_vq.tx_pkts = 0;
2683 		qp[i]->tx_vq.err_pkts = 0;
2684 		qp[i]->rx_vq.rx_pkts = 0;
2685 		qp[i]->rx_vq.tx_pkts = 0;
2686 		qp[i]->rx_vq.err_pkts = 0;
2687 	}
2688 }
2689 
2690 static struct rte_cryptodev_ops crypto_ops = {
2691 	.dev_configure	      = dpaa2_sec_dev_configure,
2692 	.dev_start	      = dpaa2_sec_dev_start,
2693 	.dev_stop	      = dpaa2_sec_dev_stop,
2694 	.dev_close	      = dpaa2_sec_dev_close,
2695 	.dev_infos_get        = dpaa2_sec_dev_infos_get,
2696 	.stats_get	      = dpaa2_sec_stats_get,
2697 	.stats_reset	      = dpaa2_sec_stats_reset,
2698 	.queue_pair_setup     = dpaa2_sec_queue_pair_setup,
2699 	.queue_pair_release   = dpaa2_sec_queue_pair_release,
2700 	.queue_pair_start     = dpaa2_sec_queue_pair_start,
2701 	.queue_pair_stop      = dpaa2_sec_queue_pair_stop,
2702 	.queue_pair_count     = dpaa2_sec_queue_pair_count,
2703 	.session_get_size     = dpaa2_sec_session_get_size,
2704 	.session_configure    = dpaa2_sec_session_configure,
2705 	.session_clear        = dpaa2_sec_session_clear,
2706 };
2707 
2708 static const struct rte_security_capability *
2709 dpaa2_sec_capabilities_get(void *device __rte_unused)
2710 {
2711 	return dpaa2_sec_security_cap;
2712 }
2713 
2714 struct rte_security_ops dpaa2_sec_security_ops = {
2715 	.session_create = dpaa2_sec_security_session_create,
2716 	.session_update = NULL,
2717 	.session_stats_get = NULL,
2718 	.session_destroy = dpaa2_sec_security_session_destroy,
2719 	.set_pkt_metadata = NULL,
2720 	.capabilities_get = dpaa2_sec_capabilities_get
2721 };
2722 
2723 static int
2724 dpaa2_sec_uninit(const struct rte_cryptodev *dev)
2725 {
2726 	struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
2727 
2728 	rte_free(dev->security_ctx);
2729 
2730 	rte_mempool_free(internals->fle_pool);
2731 
2732 	PMD_INIT_LOG(INFO, "Closing DPAA2_SEC device %s on numa socket %u\n",
2733 		     dev->data->name, rte_socket_id());
2734 
2735 	return 0;
2736 }
2737 
2738 static int
2739 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
2740 {
2741 	struct dpaa2_sec_dev_private *internals;
2742 	struct rte_device *dev = cryptodev->device;
2743 	struct rte_dpaa2_device *dpaa2_dev;
2744 	struct rte_security_ctx *security_instance;
2745 	struct fsl_mc_io *dpseci;
2746 	uint16_t token;
2747 	struct dpseci_attr attr;
2748 	int retcode, hw_id;
2749 	char str[20];
2750 
2751 	PMD_INIT_FUNC_TRACE();
2752 	dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
2753 	if (dpaa2_dev == NULL) {
2754 		PMD_INIT_LOG(ERR, "dpaa2_device not found\n");
2755 		return -1;
2756 	}
2757 	hw_id = dpaa2_dev->object_id;
2758 
2759 	cryptodev->driver_id = cryptodev_driver_id;
2760 	cryptodev->dev_ops = &crypto_ops;
2761 
2762 	cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst;
2763 	cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst;
2764 	cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2765 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
2766 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2767 			RTE_CRYPTODEV_FF_SECURITY |
2768 			RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
2769 
2770 	internals = cryptodev->data->dev_private;
2771 	internals->max_nb_sessions = RTE_DPAA2_SEC_PMD_MAX_NB_SESSIONS;
2772 
2773 	/*
2774 	 * For secondary processes, we don't initialise any further as primary
2775 	 * has already done this work. Only check we don't need a different
2776 	 * RX function
2777 	 */
2778 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2779 		PMD_INIT_LOG(DEBUG, "Device already init by primary process");
2780 		return 0;
2781 	}
2782 
2783 	/* Initialize security_ctx only for primary process*/
2784 	security_instance = rte_malloc("rte_security_instances_ops",
2785 				sizeof(struct rte_security_ctx), 0);
2786 	if (security_instance == NULL)
2787 		return -ENOMEM;
2788 	security_instance->device = (void *)cryptodev;
2789 	security_instance->ops = &dpaa2_sec_security_ops;
2790 	security_instance->sess_cnt = 0;
2791 	cryptodev->security_ctx = security_instance;
2792 
2793 	/*Open the rte device via MC and save the handle for further use*/
2794 	dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1,
2795 				sizeof(struct fsl_mc_io), 0);
2796 	if (!dpseci) {
2797 		PMD_INIT_LOG(ERR,
2798 			     "Error in allocating the memory for dpsec object");
2799 		return -1;
2800 	}
2801 	dpseci->regs = rte_mcp_ptr_list[0];
2802 
2803 	retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token);
2804 	if (retcode != 0) {
2805 		PMD_INIT_LOG(ERR, "Cannot open the dpsec device: Error = %x",
2806 			     retcode);
2807 		goto init_error;
2808 	}
2809 	retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr);
2810 	if (retcode != 0) {
2811 		PMD_INIT_LOG(ERR,
2812 			     "Cannot get dpsec device attributed: Error = %x",
2813 			     retcode);
2814 		goto init_error;
2815 	}
2816 	sprintf(cryptodev->data->name, "dpsec-%u", hw_id);
2817 
2818 	internals->max_nb_queue_pairs = attr.num_tx_queues;
2819 	cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs;
2820 	internals->hw = dpseci;
2821 	internals->token = token;
2822 
2823 	sprintf(str, "fle_pool_%d", cryptodev->data->dev_id);
2824 	internals->fle_pool = rte_mempool_create((const char *)str,
2825 			FLE_POOL_NUM_BUFS,
2826 			FLE_POOL_BUF_SIZE,
2827 			FLE_POOL_CACHE_SIZE, 0,
2828 			NULL, NULL, NULL, NULL,
2829 			SOCKET_ID_ANY, 0);
2830 	if (!internals->fle_pool) {
2831 		RTE_LOG(ERR, PMD, "%s create failed\n", str);
2832 		goto init_error;
2833 	}
2834 
2835 	PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name);
2836 	return 0;
2837 
2838 init_error:
2839 	PMD_INIT_LOG(ERR, "driver %s: create failed\n", cryptodev->data->name);
2840 
2841 	/* dpaa2_sec_uninit(crypto_dev_name); */
2842 	return -EFAULT;
2843 }
2844 
2845 static int
2846 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv,
2847 			  struct rte_dpaa2_device *dpaa2_dev)
2848 {
2849 	struct rte_cryptodev *cryptodev;
2850 	char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
2851 
2852 	int retval;
2853 
2854 	sprintf(cryptodev_name, "dpsec-%d", dpaa2_dev->object_id);
2855 
2856 	cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
2857 	if (cryptodev == NULL)
2858 		return -ENOMEM;
2859 
2860 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2861 		cryptodev->data->dev_private = rte_zmalloc_socket(
2862 					"cryptodev private structure",
2863 					sizeof(struct dpaa2_sec_dev_private),
2864 					RTE_CACHE_LINE_SIZE,
2865 					rte_socket_id());
2866 
2867 		if (cryptodev->data->dev_private == NULL)
2868 			rte_panic("Cannot allocate memzone for private "
2869 					"device data");
2870 	}
2871 
2872 	dpaa2_dev->cryptodev = cryptodev;
2873 	cryptodev->device = &dpaa2_dev->device;
2874 	cryptodev->device->driver = &dpaa2_drv->driver;
2875 
2876 	/* init user callbacks */
2877 	TAILQ_INIT(&(cryptodev->link_intr_cbs));
2878 
2879 	/* Invoke PMD device initialization function */
2880 	retval = dpaa2_sec_dev_init(cryptodev);
2881 	if (retval == 0)
2882 		return 0;
2883 
2884 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2885 		rte_free(cryptodev->data->dev_private);
2886 
2887 	cryptodev->attached = RTE_CRYPTODEV_DETACHED;
2888 
2889 	return -ENXIO;
2890 }
2891 
2892 static int
2893 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
2894 {
2895 	struct rte_cryptodev *cryptodev;
2896 	int ret;
2897 
2898 	cryptodev = dpaa2_dev->cryptodev;
2899 	if (cryptodev == NULL)
2900 		return -ENODEV;
2901 
2902 	ret = dpaa2_sec_uninit(cryptodev);
2903 	if (ret)
2904 		return ret;
2905 
2906 	return rte_cryptodev_pmd_destroy(cryptodev);
2907 }
2908 
2909 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
2910 	.drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
2911 	.drv_type = DPAA2_CRYPTO,
2912 	.driver = {
2913 		.name = "DPAA2 SEC PMD"
2914 	},
2915 	.probe = cryptodev_dpaa2_sec_probe,
2916 	.remove = cryptodev_dpaa2_sec_remove,
2917 };
2918 
2919 static struct cryptodev_driver dpaa2_sec_crypto_drv;
2920 
2921 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
2922 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv, rte_dpaa2_sec_driver,
2923 		cryptodev_driver_id);
2924