xref: /dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c (revision 12a652a02b080f26a1e9fd0169a58d6bcbe7b03c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2017-2019 NXP
5  *
6  */
7 
8 #include <fcntl.h>
9 #include <unistd.h>
10 #include <sched.h>
11 #include <net/if.h>
12 
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #ifdef RTE_LIBRTE_SECURITY
19 #include <rte_security_driver.h>
20 #endif
21 #include <rte_cycles.h>
22 #include <rte_dev.h>
23 #include <rte_kvargs.h>
24 #include <rte_malloc.h>
25 #include <rte_mbuf.h>
26 #include <rte_memcpy.h>
27 #include <rte_string_fns.h>
28 #include <rte_spinlock.h>
29 
30 #include <fsl_usd.h>
31 #include <fsl_qman.h>
32 #include <dpaa_of.h>
33 
34 /* RTA header files */
35 #include <desc/common.h>
36 #include <desc/algo.h>
37 #include <desc/ipsec.h>
38 #include <desc/pdcp.h>
39 
40 #include <rte_dpaa_bus.h>
41 #include <dpaa_sec.h>
42 #include <dpaa_sec_event.h>
43 #include <dpaa_sec_log.h>
44 #include <dpaax_iova_table.h>
45 
46 enum rta_sec_era rta_sec_era;
47 
48 int dpaa_logtype_sec;
49 
50 static uint8_t cryptodev_driver_id;
51 
52 static __thread struct rte_crypto_op **dpaa_sec_ops;
53 static __thread int dpaa_sec_op_nb;
54 
55 static int
56 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
57 
58 static inline void
59 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
60 {
61 	if (!ctx->fd_status) {
62 		ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
63 	} else {
64 		DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
65 		ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
66 	}
67 }
68 
69 static inline struct dpaa_sec_op_ctx *
70 dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count)
71 {
72 	struct dpaa_sec_op_ctx *ctx;
73 	int i, retval;
74 
75 	retval = rte_mempool_get(
76 			ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
77 			(void **)(&ctx));
78 	if (!ctx || retval) {
79 		DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
80 		return NULL;
81 	}
82 	/*
83 	 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
84 	 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
85 	 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
86 	 * each packet, memset is costlier than dcbz_64().
87 	 */
88 	for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
89 		dcbz_64(&ctx->job.sg[i]);
90 
91 	ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
92 	ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
93 
94 	return ctx;
95 }
96 
97 static inline rte_iova_t
98 dpaa_mem_vtop(void *vaddr)
99 {
100 	const struct rte_memseg *ms;
101 
102 	ms = rte_mem_virt2memseg(vaddr, NULL);
103 	if (ms) {
104 		dpaax_iova_table_update(ms->iova, ms->addr, ms->len);
105 		return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
106 	}
107 	return (size_t)NULL;
108 }
109 
110 static inline void *
111 dpaa_mem_ptov(rte_iova_t paddr)
112 {
113 	void *va;
114 
115 	va = (void *)dpaax_iova_table_get_va(paddr);
116 	if (likely(va))
117 		return va;
118 
119 	return rte_mem_iova2virt(paddr);
120 }
121 
122 static void
123 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
124 		   struct qman_fq *fq,
125 		   const struct qm_mr_entry *msg)
126 {
127 	DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
128 			fq->fqid, msg->ern.rc, msg->ern.seqnum);
129 }
130 
131 /* initialize the queue with dest chan as caam chan so that
132  * all the packets in this queue could be dispatched into caam
133  */
134 static int
135 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
136 		 uint32_t fqid_out)
137 {
138 	struct qm_mcc_initfq fq_opts;
139 	uint32_t flags;
140 	int ret = -1;
141 
142 	/* Clear FQ options */
143 	memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
144 
145 	flags = QMAN_INITFQ_FLAG_SCHED;
146 	fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
147 			  QM_INITFQ_WE_CONTEXTB;
148 
149 	qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
150 	fq_opts.fqd.context_b = fqid_out;
151 	fq_opts.fqd.dest.channel = qm_channel_caam;
152 	fq_opts.fqd.dest.wq = 0;
153 
154 	fq_in->cb.ern  = ern_sec_fq_handler;
155 
156 	DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
157 
158 	ret = qman_init_fq(fq_in, flags, &fq_opts);
159 	if (unlikely(ret != 0))
160 		DPAA_SEC_ERR("qman_init_fq failed %d", ret);
161 
162 	return ret;
163 }
164 
165 /* something is put into in_fq and caam put the crypto result into out_fq */
166 static enum qman_cb_dqrr_result
167 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
168 		  struct qman_fq *fq __always_unused,
169 		  const struct qm_dqrr_entry *dqrr)
170 {
171 	const struct qm_fd *fd;
172 	struct dpaa_sec_job *job;
173 	struct dpaa_sec_op_ctx *ctx;
174 
175 	if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
176 		return qman_cb_dqrr_defer;
177 
178 	if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
179 		return qman_cb_dqrr_consume;
180 
181 	fd = &dqrr->fd;
182 	/* sg is embedded in an op ctx,
183 	 * sg[0] is for output
184 	 * sg[1] for input
185 	 */
186 	job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
187 
188 	ctx = container_of(job, struct dpaa_sec_op_ctx, job);
189 	ctx->fd_status = fd->status;
190 	if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
191 		struct qm_sg_entry *sg_out;
192 		uint32_t len;
193 		struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ?
194 				ctx->op->sym->m_src : ctx->op->sym->m_dst;
195 
196 		sg_out = &job->sg[0];
197 		hw_sg_to_cpu(sg_out);
198 		len = sg_out->length;
199 		mbuf->pkt_len = len;
200 		while (mbuf->next != NULL) {
201 			len -= mbuf->data_len;
202 			mbuf = mbuf->next;
203 		}
204 		mbuf->data_len = len;
205 	}
206 	dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
207 	dpaa_sec_op_ending(ctx);
208 
209 	return qman_cb_dqrr_consume;
210 }
211 
212 /* caam result is put into this queue */
213 static int
214 dpaa_sec_init_tx(struct qman_fq *fq)
215 {
216 	int ret;
217 	struct qm_mcc_initfq opts;
218 	uint32_t flags;
219 
220 	flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
221 		QMAN_FQ_FLAG_DYNAMIC_FQID;
222 
223 	ret = qman_create_fq(0, flags, fq);
224 	if (unlikely(ret)) {
225 		DPAA_SEC_ERR("qman_create_fq failed");
226 		return ret;
227 	}
228 
229 	memset(&opts, 0, sizeof(opts));
230 	opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
231 		       QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
232 
233 	/* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
234 
235 	fq->cb.dqrr = dqrr_out_fq_cb_rx;
236 	fq->cb.ern  = ern_sec_fq_handler;
237 
238 	ret = qman_init_fq(fq, 0, &opts);
239 	if (unlikely(ret)) {
240 		DPAA_SEC_ERR("unable to init caam source fq!");
241 		return ret;
242 	}
243 
244 	return ret;
245 }
246 
247 static inline int is_encode(dpaa_sec_session *ses)
248 {
249 	return ses->dir == DIR_ENC;
250 }
251 
252 static inline int is_decode(dpaa_sec_session *ses)
253 {
254 	return ses->dir == DIR_DEC;
255 }
256 
257 #ifdef RTE_LIBRTE_SECURITY
258 static int
259 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
260 {
261 	struct alginfo authdata = {0}, cipherdata = {0};
262 	struct sec_cdb *cdb = &ses->cdb;
263 	struct alginfo *p_authdata = NULL;
264 	int32_t shared_desc_len = 0;
265 	int err;
266 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
267 	int swap = false;
268 #else
269 	int swap = true;
270 #endif
271 
272 	cipherdata.key = (size_t)ses->cipher_key.data;
273 	cipherdata.keylen = ses->cipher_key.length;
274 	cipherdata.key_enc_flags = 0;
275 	cipherdata.key_type = RTA_DATA_IMM;
276 	cipherdata.algtype = ses->cipher_key.alg;
277 	cipherdata.algmode = ses->cipher_key.algmode;
278 
279 	cdb->sh_desc[0] = cipherdata.keylen;
280 	cdb->sh_desc[1] = 0;
281 	cdb->sh_desc[2] = 0;
282 
283 	if (ses->auth_alg) {
284 		authdata.key = (size_t)ses->auth_key.data;
285 		authdata.keylen = ses->auth_key.length;
286 		authdata.key_enc_flags = 0;
287 		authdata.key_type = RTA_DATA_IMM;
288 		authdata.algtype = ses->auth_key.alg;
289 		authdata.algmode = ses->auth_key.algmode;
290 
291 		p_authdata = &authdata;
292 
293 		cdb->sh_desc[1] = authdata.keylen;
294 	}
295 
296 	err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
297 			       MIN_JOB_DESC_SIZE,
298 			       (unsigned int *)cdb->sh_desc,
299 			       &cdb->sh_desc[2], 2);
300 	if (err < 0) {
301 		DPAA_SEC_ERR("Crypto: Incorrect key lengths");
302 		return err;
303 	}
304 
305 	if (!(cdb->sh_desc[2] & 1) && cipherdata.keylen) {
306 		cipherdata.key =
307 			(size_t)dpaa_mem_vtop((void *)(size_t)cipherdata.key);
308 		cipherdata.key_type = RTA_DATA_PTR;
309 	}
310 	if (!(cdb->sh_desc[2] & (1 << 1)) &&  authdata.keylen) {
311 		authdata.key =
312 			(size_t)dpaa_mem_vtop((void *)(size_t)authdata.key);
313 		authdata.key_type = RTA_DATA_PTR;
314 	}
315 
316 	cdb->sh_desc[0] = 0;
317 	cdb->sh_desc[1] = 0;
318 	cdb->sh_desc[2] = 0;
319 
320 	if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
321 		if (ses->dir == DIR_ENC)
322 			shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
323 					cdb->sh_desc, 1, swap,
324 					ses->pdcp.hfn,
325 					ses->pdcp.sn_size,
326 					ses->pdcp.bearer,
327 					ses->pdcp.pkt_dir,
328 					ses->pdcp.hfn_threshold,
329 					&cipherdata, &authdata,
330 					0);
331 		else if (ses->dir == DIR_DEC)
332 			shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
333 					cdb->sh_desc, 1, swap,
334 					ses->pdcp.hfn,
335 					ses->pdcp.sn_size,
336 					ses->pdcp.bearer,
337 					ses->pdcp.pkt_dir,
338 					ses->pdcp.hfn_threshold,
339 					&cipherdata, &authdata,
340 					0);
341 	} else {
342 		if (ses->dir == DIR_ENC)
343 			shared_desc_len = cnstr_shdsc_pdcp_u_plane_encap(
344 					cdb->sh_desc, 1, swap,
345 					ses->pdcp.sn_size,
346 					ses->pdcp.hfn,
347 					ses->pdcp.bearer,
348 					ses->pdcp.pkt_dir,
349 					ses->pdcp.hfn_threshold,
350 					&cipherdata, p_authdata, 0);
351 		else if (ses->dir == DIR_DEC)
352 			shared_desc_len = cnstr_shdsc_pdcp_u_plane_decap(
353 					cdb->sh_desc, 1, swap,
354 					ses->pdcp.sn_size,
355 					ses->pdcp.hfn,
356 					ses->pdcp.bearer,
357 					ses->pdcp.pkt_dir,
358 					ses->pdcp.hfn_threshold,
359 					&cipherdata, p_authdata, 0);
360 	}
361 	return shared_desc_len;
362 }
363 
364 /* prepare ipsec proto command block of the session */
365 static int
366 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
367 {
368 	struct alginfo cipherdata = {0}, authdata = {0};
369 	struct sec_cdb *cdb = &ses->cdb;
370 	int32_t shared_desc_len = 0;
371 	int err;
372 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
373 	int swap = false;
374 #else
375 	int swap = true;
376 #endif
377 
378 	cipherdata.key = (size_t)ses->cipher_key.data;
379 	cipherdata.keylen = ses->cipher_key.length;
380 	cipherdata.key_enc_flags = 0;
381 	cipherdata.key_type = RTA_DATA_IMM;
382 	cipherdata.algtype = ses->cipher_key.alg;
383 	cipherdata.algmode = ses->cipher_key.algmode;
384 
385 	if (ses->auth_key.length) {
386 		authdata.key = (size_t)ses->auth_key.data;
387 		authdata.keylen = ses->auth_key.length;
388 		authdata.key_enc_flags = 0;
389 		authdata.key_type = RTA_DATA_IMM;
390 		authdata.algtype = ses->auth_key.alg;
391 		authdata.algmode = ses->auth_key.algmode;
392 	}
393 
394 	cdb->sh_desc[0] = cipherdata.keylen;
395 	cdb->sh_desc[1] = authdata.keylen;
396 	err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
397 			       MIN_JOB_DESC_SIZE,
398 			       (unsigned int *)cdb->sh_desc,
399 			       &cdb->sh_desc[2], 2);
400 
401 	if (err < 0) {
402 		DPAA_SEC_ERR("Crypto: Incorrect key lengths");
403 		return err;
404 	}
405 	if (cdb->sh_desc[2] & 1)
406 		cipherdata.key_type = RTA_DATA_IMM;
407 	else {
408 		cipherdata.key = (size_t)dpaa_mem_vtop(
409 					(void *)(size_t)cipherdata.key);
410 		cipherdata.key_type = RTA_DATA_PTR;
411 	}
412 	if (cdb->sh_desc[2] & (1<<1))
413 		authdata.key_type = RTA_DATA_IMM;
414 	else {
415 		authdata.key = (size_t)dpaa_mem_vtop(
416 					(void *)(size_t)authdata.key);
417 		authdata.key_type = RTA_DATA_PTR;
418 	}
419 
420 	cdb->sh_desc[0] = 0;
421 	cdb->sh_desc[1] = 0;
422 	cdb->sh_desc[2] = 0;
423 	if (ses->dir == DIR_ENC) {
424 		shared_desc_len = cnstr_shdsc_ipsec_new_encap(
425 				cdb->sh_desc,
426 				true, swap, SHR_SERIAL,
427 				&ses->encap_pdb,
428 				(uint8_t *)&ses->ip4_hdr,
429 				&cipherdata, &authdata);
430 	} else if (ses->dir == DIR_DEC) {
431 		shared_desc_len = cnstr_shdsc_ipsec_new_decap(
432 				cdb->sh_desc,
433 				true, swap, SHR_SERIAL,
434 				&ses->decap_pdb,
435 				&cipherdata, &authdata);
436 	}
437 	return shared_desc_len;
438 }
439 #endif
440 /* prepare command block of the session */
441 static int
442 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
443 {
444 	struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
445 	int32_t shared_desc_len = 0;
446 	struct sec_cdb *cdb = &ses->cdb;
447 	int err;
448 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
449 	int swap = false;
450 #else
451 	int swap = true;
452 #endif
453 
454 	memset(cdb, 0, sizeof(struct sec_cdb));
455 
456 	switch (ses->ctxt) {
457 #ifdef RTE_LIBRTE_SECURITY
458 	case DPAA_SEC_IPSEC:
459 		shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
460 		break;
461 	case DPAA_SEC_PDCP:
462 		shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
463 		break;
464 #endif
465 	case DPAA_SEC_CIPHER:
466 		alginfo_c.key = (size_t)ses->cipher_key.data;
467 		alginfo_c.keylen = ses->cipher_key.length;
468 		alginfo_c.key_enc_flags = 0;
469 		alginfo_c.key_type = RTA_DATA_IMM;
470 		alginfo_c.algtype = ses->cipher_key.alg;
471 		alginfo_c.algmode = ses->cipher_key.algmode;
472 
473 		switch (ses->cipher_alg) {
474 		case RTE_CRYPTO_CIPHER_AES_CBC:
475 		case RTE_CRYPTO_CIPHER_3DES_CBC:
476 		case RTE_CRYPTO_CIPHER_AES_CTR:
477 		case RTE_CRYPTO_CIPHER_3DES_CTR:
478 			shared_desc_len = cnstr_shdsc_blkcipher(
479 					cdb->sh_desc, true,
480 					swap, SHR_NEVER, &alginfo_c,
481 					ses->iv.length,
482 					ses->dir);
483 			break;
484 		case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
485 			shared_desc_len = cnstr_shdsc_snow_f8(
486 					cdb->sh_desc, true, swap,
487 					&alginfo_c,
488 					ses->dir);
489 			break;
490 		case RTE_CRYPTO_CIPHER_ZUC_EEA3:
491 			shared_desc_len = cnstr_shdsc_zuce(
492 					cdb->sh_desc, true, swap,
493 					&alginfo_c,
494 					ses->dir);
495 			break;
496 		default:
497 			DPAA_SEC_ERR("unsupported cipher alg %d",
498 				     ses->cipher_alg);
499 			return -ENOTSUP;
500 		}
501 		break;
502 	case DPAA_SEC_AUTH:
503 		alginfo_a.key = (size_t)ses->auth_key.data;
504 		alginfo_a.keylen = ses->auth_key.length;
505 		alginfo_a.key_enc_flags = 0;
506 		alginfo_a.key_type = RTA_DATA_IMM;
507 		alginfo_a.algtype = ses->auth_key.alg;
508 		alginfo_a.algmode = ses->auth_key.algmode;
509 		switch (ses->auth_alg) {
510 		case RTE_CRYPTO_AUTH_MD5_HMAC:
511 		case RTE_CRYPTO_AUTH_SHA1_HMAC:
512 		case RTE_CRYPTO_AUTH_SHA224_HMAC:
513 		case RTE_CRYPTO_AUTH_SHA256_HMAC:
514 		case RTE_CRYPTO_AUTH_SHA384_HMAC:
515 		case RTE_CRYPTO_AUTH_SHA512_HMAC:
516 			shared_desc_len = cnstr_shdsc_hmac(
517 						cdb->sh_desc, true,
518 						swap, SHR_NEVER, &alginfo_a,
519 						!ses->dir,
520 						ses->digest_length);
521 			break;
522 		case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
523 			shared_desc_len = cnstr_shdsc_snow_f9(
524 						cdb->sh_desc, true, swap,
525 						&alginfo_a,
526 						!ses->dir,
527 						ses->digest_length);
528 			break;
529 		case RTE_CRYPTO_AUTH_ZUC_EIA3:
530 			shared_desc_len = cnstr_shdsc_zuca(
531 						cdb->sh_desc, true, swap,
532 						&alginfo_a,
533 						!ses->dir,
534 						ses->digest_length);
535 			break;
536 		default:
537 			DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
538 		}
539 		break;
540 	case DPAA_SEC_AEAD:
541 		if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
542 			DPAA_SEC_ERR("not supported aead alg");
543 			return -ENOTSUP;
544 		}
545 		alginfo.key = (size_t)ses->aead_key.data;
546 		alginfo.keylen = ses->aead_key.length;
547 		alginfo.key_enc_flags = 0;
548 		alginfo.key_type = RTA_DATA_IMM;
549 		alginfo.algtype = ses->aead_key.alg;
550 		alginfo.algmode = ses->aead_key.algmode;
551 
552 		if (ses->dir == DIR_ENC)
553 			shared_desc_len = cnstr_shdsc_gcm_encap(
554 					cdb->sh_desc, true, swap, SHR_NEVER,
555 					&alginfo,
556 					ses->iv.length,
557 					ses->digest_length);
558 		else
559 			shared_desc_len = cnstr_shdsc_gcm_decap(
560 					cdb->sh_desc, true, swap, SHR_NEVER,
561 					&alginfo,
562 					ses->iv.length,
563 					ses->digest_length);
564 		break;
565 	case DPAA_SEC_CIPHER_HASH:
566 		alginfo_c.key = (size_t)ses->cipher_key.data;
567 		alginfo_c.keylen = ses->cipher_key.length;
568 		alginfo_c.key_enc_flags = 0;
569 		alginfo_c.key_type = RTA_DATA_IMM;
570 		alginfo_c.algtype = ses->cipher_key.alg;
571 		alginfo_c.algmode = ses->cipher_key.algmode;
572 
573 		alginfo_a.key = (size_t)ses->auth_key.data;
574 		alginfo_a.keylen = ses->auth_key.length;
575 		alginfo_a.key_enc_flags = 0;
576 		alginfo_a.key_type = RTA_DATA_IMM;
577 		alginfo_a.algtype = ses->auth_key.alg;
578 		alginfo_a.algmode = ses->auth_key.algmode;
579 
580 		cdb->sh_desc[0] = alginfo_c.keylen;
581 		cdb->sh_desc[1] = alginfo_a.keylen;
582 		err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
583 				       MIN_JOB_DESC_SIZE,
584 				       (unsigned int *)cdb->sh_desc,
585 				       &cdb->sh_desc[2], 2);
586 
587 		if (err < 0) {
588 			DPAA_SEC_ERR("Crypto: Incorrect key lengths");
589 			return err;
590 		}
591 		if (cdb->sh_desc[2] & 1)
592 			alginfo_c.key_type = RTA_DATA_IMM;
593 		else {
594 			alginfo_c.key = (size_t)dpaa_mem_vtop(
595 						(void *)(size_t)alginfo_c.key);
596 			alginfo_c.key_type = RTA_DATA_PTR;
597 		}
598 		if (cdb->sh_desc[2] & (1<<1))
599 			alginfo_a.key_type = RTA_DATA_IMM;
600 		else {
601 			alginfo_a.key = (size_t)dpaa_mem_vtop(
602 						(void *)(size_t)alginfo_a.key);
603 			alginfo_a.key_type = RTA_DATA_PTR;
604 		}
605 		cdb->sh_desc[0] = 0;
606 		cdb->sh_desc[1] = 0;
607 		cdb->sh_desc[2] = 0;
608 		/* Auth_only_len is set as 0 here and it will be
609 		 * overwritten in fd for each packet.
610 		 */
611 		shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
612 				true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a,
613 				ses->iv.length,
614 				ses->digest_length, ses->dir);
615 		break;
616 	case DPAA_SEC_HASH_CIPHER:
617 	default:
618 		DPAA_SEC_ERR("error: Unsupported session");
619 		return -ENOTSUP;
620 	}
621 
622 	if (shared_desc_len < 0) {
623 		DPAA_SEC_ERR("error in preparing command block");
624 		return shared_desc_len;
625 	}
626 
627 	cdb->sh_hdr.hi.field.idlen = shared_desc_len;
628 	cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
629 	cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
630 
631 	return 0;
632 }
633 
634 /* qp is lockless, should be accessed by only one thread */
635 static int
636 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
637 {
638 	struct qman_fq *fq;
639 	unsigned int pkts = 0;
640 	int num_rx_bufs, ret;
641 	struct qm_dqrr_entry *dq;
642 	uint32_t vdqcr_flags = 0;
643 
644 	fq = &qp->outq;
645 	/*
646 	 * Until request for four buffers, we provide exact number of buffers.
647 	 * Otherwise we do not set the QM_VDQCR_EXACT flag.
648 	 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
649 	 * requested, so we request two less in this case.
650 	 */
651 	if (nb_ops < 4) {
652 		vdqcr_flags = QM_VDQCR_EXACT;
653 		num_rx_bufs = nb_ops;
654 	} else {
655 		num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
656 			(DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
657 	}
658 	ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
659 	if (ret)
660 		return 0;
661 
662 	do {
663 		const struct qm_fd *fd;
664 		struct dpaa_sec_job *job;
665 		struct dpaa_sec_op_ctx *ctx;
666 		struct rte_crypto_op *op;
667 
668 		dq = qman_dequeue(fq);
669 		if (!dq)
670 			continue;
671 
672 		fd = &dq->fd;
673 		/* sg is embedded in an op ctx,
674 		 * sg[0] is for output
675 		 * sg[1] for input
676 		 */
677 		job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
678 
679 		ctx = container_of(job, struct dpaa_sec_op_ctx, job);
680 		ctx->fd_status = fd->status;
681 		op = ctx->op;
682 		if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
683 			struct qm_sg_entry *sg_out;
684 			uint32_t len;
685 			struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ?
686 						op->sym->m_src : op->sym->m_dst;
687 
688 			sg_out = &job->sg[0];
689 			hw_sg_to_cpu(sg_out);
690 			len = sg_out->length;
691 			mbuf->pkt_len = len;
692 			while (mbuf->next != NULL) {
693 				len -= mbuf->data_len;
694 				mbuf = mbuf->next;
695 			}
696 			mbuf->data_len = len;
697 		}
698 		if (!ctx->fd_status) {
699 			op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
700 		} else {
701 			DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
702 			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
703 		}
704 		ops[pkts++] = op;
705 
706 		/* report op status to sym->op and then free the ctx memeory */
707 		rte_mempool_put(ctx->ctx_pool, (void *)ctx);
708 
709 		qman_dqrr_consume(fq, dq);
710 	} while (fq->flags & QMAN_FQ_STATE_VDQCR);
711 
712 	return pkts;
713 }
714 
715 static inline struct dpaa_sec_job *
716 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
717 {
718 	struct rte_crypto_sym_op *sym = op->sym;
719 	struct rte_mbuf *mbuf = sym->m_src;
720 	struct dpaa_sec_job *cf;
721 	struct dpaa_sec_op_ctx *ctx;
722 	struct qm_sg_entry *sg, *out_sg, *in_sg;
723 	phys_addr_t start_addr;
724 	uint8_t *old_digest, extra_segs;
725 	int data_len, data_offset;
726 
727 	data_len = sym->auth.data.length;
728 	data_offset = sym->auth.data.offset;
729 
730 	if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
731 	    ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
732 		if ((data_len & 7) || (data_offset & 7)) {
733 			DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
734 			return NULL;
735 		}
736 
737 		data_len = data_len >> 3;
738 		data_offset = data_offset >> 3;
739 	}
740 
741 	if (is_decode(ses))
742 		extra_segs = 3;
743 	else
744 		extra_segs = 2;
745 
746 	if (mbuf->nb_segs > MAX_SG_ENTRIES) {
747 		DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
748 				MAX_SG_ENTRIES);
749 		return NULL;
750 	}
751 	ctx = dpaa_sec_alloc_ctx(ses, mbuf->nb_segs + extra_segs);
752 	if (!ctx)
753 		return NULL;
754 
755 	cf = &ctx->job;
756 	ctx->op = op;
757 	old_digest = ctx->digest;
758 
759 	/* output */
760 	out_sg = &cf->sg[0];
761 	qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
762 	out_sg->length = ses->digest_length;
763 	cpu_to_hw_sg(out_sg);
764 
765 	/* input */
766 	in_sg = &cf->sg[1];
767 	/* need to extend the input to a compound frame */
768 	in_sg->extension = 1;
769 	in_sg->final = 1;
770 	in_sg->length = data_len;
771 	qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
772 
773 	/* 1st seg */
774 	sg = in_sg + 1;
775 
776 	if (ses->iv.length) {
777 		uint8_t *iv_ptr;
778 
779 		iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
780 						   ses->iv.offset);
781 
782 		if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
783 			iv_ptr = conv_to_snow_f9_iv(iv_ptr);
784 			sg->length = 12;
785 		} else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
786 			iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
787 			sg->length = 8;
788 		} else {
789 			sg->length = ses->iv.length;
790 		}
791 		qm_sg_entry_set64(sg, dpaa_mem_vtop(iv_ptr));
792 		in_sg->length += sg->length;
793 		cpu_to_hw_sg(sg);
794 		sg++;
795 	}
796 
797 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
798 	sg->offset = data_offset;
799 
800 	if (data_len <= (mbuf->data_len - data_offset)) {
801 		sg->length = data_len;
802 	} else {
803 		sg->length = mbuf->data_len - data_offset;
804 
805 		/* remaining i/p segs */
806 		while ((data_len = data_len - sg->length) &&
807 		       (mbuf = mbuf->next)) {
808 			cpu_to_hw_sg(sg);
809 			sg++;
810 			qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
811 			if (data_len > mbuf->data_len)
812 				sg->length = mbuf->data_len;
813 			else
814 				sg->length = data_len;
815 		}
816 	}
817 
818 	if (is_decode(ses)) {
819 		/* Digest verification case */
820 		cpu_to_hw_sg(sg);
821 		sg++;
822 		rte_memcpy(old_digest, sym->auth.digest.data,
823 				ses->digest_length);
824 		start_addr = dpaa_mem_vtop(old_digest);
825 		qm_sg_entry_set64(sg, start_addr);
826 		sg->length = ses->digest_length;
827 		in_sg->length += ses->digest_length;
828 	}
829 	sg->final = 1;
830 	cpu_to_hw_sg(sg);
831 	cpu_to_hw_sg(in_sg);
832 
833 	return cf;
834 }
835 
836 /**
837  * packet looks like:
838  *		|<----data_len------->|
839  *    |ip_header|ah_header|icv|payload|
840  *              ^
841  *		|
842  *	   mbuf->pkt.data
843  */
844 static inline struct dpaa_sec_job *
845 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
846 {
847 	struct rte_crypto_sym_op *sym = op->sym;
848 	struct rte_mbuf *mbuf = sym->m_src;
849 	struct dpaa_sec_job *cf;
850 	struct dpaa_sec_op_ctx *ctx;
851 	struct qm_sg_entry *sg, *in_sg;
852 	rte_iova_t start_addr;
853 	uint8_t *old_digest;
854 	int data_len, data_offset;
855 
856 	data_len = sym->auth.data.length;
857 	data_offset = sym->auth.data.offset;
858 
859 	if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
860 	    ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
861 		if ((data_len & 7) || (data_offset & 7)) {
862 			DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
863 			return NULL;
864 		}
865 
866 		data_len = data_len >> 3;
867 		data_offset = data_offset >> 3;
868 	}
869 
870 	ctx = dpaa_sec_alloc_ctx(ses, 4);
871 	if (!ctx)
872 		return NULL;
873 
874 	cf = &ctx->job;
875 	ctx->op = op;
876 	old_digest = ctx->digest;
877 
878 	start_addr = rte_pktmbuf_iova(mbuf);
879 	/* output */
880 	sg = &cf->sg[0];
881 	qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
882 	sg->length = ses->digest_length;
883 	cpu_to_hw_sg(sg);
884 
885 	/* input */
886 	in_sg = &cf->sg[1];
887 	/* need to extend the input to a compound frame */
888 	in_sg->extension = 1;
889 	in_sg->final = 1;
890 	in_sg->length = data_len;
891 	qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
892 	sg = &cf->sg[2];
893 
894 	if (ses->iv.length) {
895 		uint8_t *iv_ptr;
896 
897 		iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
898 						   ses->iv.offset);
899 
900 		if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
901 			iv_ptr = conv_to_snow_f9_iv(iv_ptr);
902 			sg->length = 12;
903 		} else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
904 			iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
905 			sg->length = 8;
906 		} else {
907 			sg->length = ses->iv.length;
908 		}
909 		qm_sg_entry_set64(sg, dpaa_mem_vtop(iv_ptr));
910 		in_sg->length += sg->length;
911 		cpu_to_hw_sg(sg);
912 		sg++;
913 	}
914 
915 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
916 	sg->offset = data_offset;
917 	sg->length = data_len;
918 
919 	if (is_decode(ses)) {
920 		/* Digest verification case */
921 		cpu_to_hw_sg(sg);
922 		/* hash result or digest, save digest first */
923 		rte_memcpy(old_digest, sym->auth.digest.data,
924 				ses->digest_length);
925 		/* let's check digest by hw */
926 		start_addr = dpaa_mem_vtop(old_digest);
927 		sg++;
928 		qm_sg_entry_set64(sg, start_addr);
929 		sg->length = ses->digest_length;
930 		in_sg->length += ses->digest_length;
931 	}
932 	sg->final = 1;
933 	cpu_to_hw_sg(sg);
934 	cpu_to_hw_sg(in_sg);
935 
936 	return cf;
937 }
938 
939 static inline struct dpaa_sec_job *
940 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
941 {
942 	struct rte_crypto_sym_op *sym = op->sym;
943 	struct dpaa_sec_job *cf;
944 	struct dpaa_sec_op_ctx *ctx;
945 	struct qm_sg_entry *sg, *out_sg, *in_sg;
946 	struct rte_mbuf *mbuf;
947 	uint8_t req_segs;
948 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
949 			ses->iv.offset);
950 	int data_len, data_offset;
951 
952 	data_len = sym->cipher.data.length;
953 	data_offset = sym->cipher.data.offset;
954 
955 	if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
956 		ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
957 		if ((data_len & 7) || (data_offset & 7)) {
958 			DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
959 			return NULL;
960 		}
961 
962 		data_len = data_len >> 3;
963 		data_offset = data_offset >> 3;
964 	}
965 
966 	if (sym->m_dst) {
967 		mbuf = sym->m_dst;
968 		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
969 	} else {
970 		mbuf = sym->m_src;
971 		req_segs = mbuf->nb_segs * 2 + 3;
972 	}
973 	if (mbuf->nb_segs > MAX_SG_ENTRIES) {
974 		DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
975 				MAX_SG_ENTRIES);
976 		return NULL;
977 	}
978 
979 	ctx = dpaa_sec_alloc_ctx(ses, req_segs);
980 	if (!ctx)
981 		return NULL;
982 
983 	cf = &ctx->job;
984 	ctx->op = op;
985 
986 	/* output */
987 	out_sg = &cf->sg[0];
988 	out_sg->extension = 1;
989 	out_sg->length = data_len;
990 	qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
991 	cpu_to_hw_sg(out_sg);
992 
993 	/* 1st seg */
994 	sg = &cf->sg[2];
995 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
996 	sg->length = mbuf->data_len - data_offset;
997 	sg->offset = data_offset;
998 
999 	/* Successive segs */
1000 	mbuf = mbuf->next;
1001 	while (mbuf) {
1002 		cpu_to_hw_sg(sg);
1003 		sg++;
1004 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1005 		sg->length = mbuf->data_len;
1006 		mbuf = mbuf->next;
1007 	}
1008 	sg->final = 1;
1009 	cpu_to_hw_sg(sg);
1010 
1011 	/* input */
1012 	mbuf = sym->m_src;
1013 	in_sg = &cf->sg[1];
1014 	in_sg->extension = 1;
1015 	in_sg->final = 1;
1016 	in_sg->length = data_len + ses->iv.length;
1017 
1018 	sg++;
1019 	qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1020 	cpu_to_hw_sg(in_sg);
1021 
1022 	/* IV */
1023 	qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1024 	sg->length = ses->iv.length;
1025 	cpu_to_hw_sg(sg);
1026 
1027 	/* 1st seg */
1028 	sg++;
1029 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1030 	sg->length = mbuf->data_len - data_offset;
1031 	sg->offset = data_offset;
1032 
1033 	/* Successive segs */
1034 	mbuf = mbuf->next;
1035 	while (mbuf) {
1036 		cpu_to_hw_sg(sg);
1037 		sg++;
1038 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1039 		sg->length = mbuf->data_len;
1040 		mbuf = mbuf->next;
1041 	}
1042 	sg->final = 1;
1043 	cpu_to_hw_sg(sg);
1044 
1045 	return cf;
1046 }
1047 
1048 static inline struct dpaa_sec_job *
1049 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1050 {
1051 	struct rte_crypto_sym_op *sym = op->sym;
1052 	struct dpaa_sec_job *cf;
1053 	struct dpaa_sec_op_ctx *ctx;
1054 	struct qm_sg_entry *sg;
1055 	rte_iova_t src_start_addr, dst_start_addr;
1056 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1057 			ses->iv.offset);
1058 	int data_len, data_offset;
1059 
1060 	data_len = sym->cipher.data.length;
1061 	data_offset = sym->cipher.data.offset;
1062 
1063 	if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1064 		ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1065 		if ((data_len & 7) || (data_offset & 7)) {
1066 			DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
1067 			return NULL;
1068 		}
1069 
1070 		data_len = data_len >> 3;
1071 		data_offset = data_offset >> 3;
1072 	}
1073 
1074 	ctx = dpaa_sec_alloc_ctx(ses, 4);
1075 	if (!ctx)
1076 		return NULL;
1077 
1078 	cf = &ctx->job;
1079 	ctx->op = op;
1080 
1081 	src_start_addr = rte_pktmbuf_iova(sym->m_src);
1082 
1083 	if (sym->m_dst)
1084 		dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1085 	else
1086 		dst_start_addr = src_start_addr;
1087 
1088 	/* output */
1089 	sg = &cf->sg[0];
1090 	qm_sg_entry_set64(sg, dst_start_addr + data_offset);
1091 	sg->length = data_len + ses->iv.length;
1092 	cpu_to_hw_sg(sg);
1093 
1094 	/* input */
1095 	sg = &cf->sg[1];
1096 
1097 	/* need to extend the input to a compound frame */
1098 	sg->extension = 1;
1099 	sg->final = 1;
1100 	sg->length = data_len + ses->iv.length;
1101 	qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
1102 	cpu_to_hw_sg(sg);
1103 
1104 	sg = &cf->sg[2];
1105 	qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1106 	sg->length = ses->iv.length;
1107 	cpu_to_hw_sg(sg);
1108 
1109 	sg++;
1110 	qm_sg_entry_set64(sg, src_start_addr + data_offset);
1111 	sg->length = data_len;
1112 	sg->final = 1;
1113 	cpu_to_hw_sg(sg);
1114 
1115 	return cf;
1116 }
1117 
1118 static inline struct dpaa_sec_job *
1119 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1120 {
1121 	struct rte_crypto_sym_op *sym = op->sym;
1122 	struct dpaa_sec_job *cf;
1123 	struct dpaa_sec_op_ctx *ctx;
1124 	struct qm_sg_entry *sg, *out_sg, *in_sg;
1125 	struct rte_mbuf *mbuf;
1126 	uint8_t req_segs;
1127 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1128 			ses->iv.offset);
1129 
1130 	if (sym->m_dst) {
1131 		mbuf = sym->m_dst;
1132 		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1133 	} else {
1134 		mbuf = sym->m_src;
1135 		req_segs = mbuf->nb_segs * 2 + 4;
1136 	}
1137 
1138 	if (ses->auth_only_len)
1139 		req_segs++;
1140 
1141 	if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1142 		DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1143 				MAX_SG_ENTRIES);
1144 		return NULL;
1145 	}
1146 
1147 	ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1148 	if (!ctx)
1149 		return NULL;
1150 
1151 	cf = &ctx->job;
1152 	ctx->op = op;
1153 
1154 	rte_prefetch0(cf->sg);
1155 
1156 	/* output */
1157 	out_sg = &cf->sg[0];
1158 	out_sg->extension = 1;
1159 	if (is_encode(ses))
1160 		out_sg->length = sym->aead.data.length + ses->digest_length;
1161 	else
1162 		out_sg->length = sym->aead.data.length;
1163 
1164 	/* output sg entries */
1165 	sg = &cf->sg[2];
1166 	qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1167 	cpu_to_hw_sg(out_sg);
1168 
1169 	/* 1st seg */
1170 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1171 	sg->length = mbuf->data_len - sym->aead.data.offset;
1172 	sg->offset = sym->aead.data.offset;
1173 
1174 	/* Successive segs */
1175 	mbuf = mbuf->next;
1176 	while (mbuf) {
1177 		cpu_to_hw_sg(sg);
1178 		sg++;
1179 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1180 		sg->length = mbuf->data_len;
1181 		mbuf = mbuf->next;
1182 	}
1183 	sg->length -= ses->digest_length;
1184 
1185 	if (is_encode(ses)) {
1186 		cpu_to_hw_sg(sg);
1187 		/* set auth output */
1188 		sg++;
1189 		qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1190 		sg->length = ses->digest_length;
1191 	}
1192 	sg->final = 1;
1193 	cpu_to_hw_sg(sg);
1194 
1195 	/* input */
1196 	mbuf = sym->m_src;
1197 	in_sg = &cf->sg[1];
1198 	in_sg->extension = 1;
1199 	in_sg->final = 1;
1200 	if (is_encode(ses))
1201 		in_sg->length = ses->iv.length + sym->aead.data.length
1202 							+ ses->auth_only_len;
1203 	else
1204 		in_sg->length = ses->iv.length + sym->aead.data.length
1205 				+ ses->auth_only_len + ses->digest_length;
1206 
1207 	/* input sg entries */
1208 	sg++;
1209 	qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1210 	cpu_to_hw_sg(in_sg);
1211 
1212 	/* 1st seg IV */
1213 	qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1214 	sg->length = ses->iv.length;
1215 	cpu_to_hw_sg(sg);
1216 
1217 	/* 2nd seg auth only */
1218 	if (ses->auth_only_len) {
1219 		sg++;
1220 		qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
1221 		sg->length = ses->auth_only_len;
1222 		cpu_to_hw_sg(sg);
1223 	}
1224 
1225 	/* 3rd seg */
1226 	sg++;
1227 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1228 	sg->length = mbuf->data_len - sym->aead.data.offset;
1229 	sg->offset = sym->aead.data.offset;
1230 
1231 	/* Successive segs */
1232 	mbuf = mbuf->next;
1233 	while (mbuf) {
1234 		cpu_to_hw_sg(sg);
1235 		sg++;
1236 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1237 		sg->length = mbuf->data_len;
1238 		mbuf = mbuf->next;
1239 	}
1240 
1241 	if (is_decode(ses)) {
1242 		cpu_to_hw_sg(sg);
1243 		sg++;
1244 		memcpy(ctx->digest, sym->aead.digest.data,
1245 			ses->digest_length);
1246 		qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1247 		sg->length = ses->digest_length;
1248 	}
1249 	sg->final = 1;
1250 	cpu_to_hw_sg(sg);
1251 
1252 	return cf;
1253 }
1254 
1255 static inline struct dpaa_sec_job *
1256 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1257 {
1258 	struct rte_crypto_sym_op *sym = op->sym;
1259 	struct dpaa_sec_job *cf;
1260 	struct dpaa_sec_op_ctx *ctx;
1261 	struct qm_sg_entry *sg;
1262 	uint32_t length = 0;
1263 	rte_iova_t src_start_addr, dst_start_addr;
1264 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1265 			ses->iv.offset);
1266 
1267 	src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1268 
1269 	if (sym->m_dst)
1270 		dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1271 	else
1272 		dst_start_addr = src_start_addr;
1273 
1274 	ctx = dpaa_sec_alloc_ctx(ses, 7);
1275 	if (!ctx)
1276 		return NULL;
1277 
1278 	cf = &ctx->job;
1279 	ctx->op = op;
1280 
1281 	/* input */
1282 	rte_prefetch0(cf->sg);
1283 	sg = &cf->sg[2];
1284 	qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1285 	if (is_encode(ses)) {
1286 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1287 		sg->length = ses->iv.length;
1288 		length += sg->length;
1289 		cpu_to_hw_sg(sg);
1290 
1291 		sg++;
1292 		if (ses->auth_only_len) {
1293 			qm_sg_entry_set64(sg,
1294 					  dpaa_mem_vtop(sym->aead.aad.data));
1295 			sg->length = ses->auth_only_len;
1296 			length += sg->length;
1297 			cpu_to_hw_sg(sg);
1298 			sg++;
1299 		}
1300 		qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1301 		sg->length = sym->aead.data.length;
1302 		length += sg->length;
1303 		sg->final = 1;
1304 		cpu_to_hw_sg(sg);
1305 	} else {
1306 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1307 		sg->length = ses->iv.length;
1308 		length += sg->length;
1309 		cpu_to_hw_sg(sg);
1310 
1311 		sg++;
1312 		if (ses->auth_only_len) {
1313 			qm_sg_entry_set64(sg,
1314 					  dpaa_mem_vtop(sym->aead.aad.data));
1315 			sg->length = ses->auth_only_len;
1316 			length += sg->length;
1317 			cpu_to_hw_sg(sg);
1318 			sg++;
1319 		}
1320 		qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1321 		sg->length = sym->aead.data.length;
1322 		length += sg->length;
1323 		cpu_to_hw_sg(sg);
1324 
1325 		memcpy(ctx->digest, sym->aead.digest.data,
1326 		       ses->digest_length);
1327 		sg++;
1328 
1329 		qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1330 		sg->length = ses->digest_length;
1331 		length += sg->length;
1332 		sg->final = 1;
1333 		cpu_to_hw_sg(sg);
1334 	}
1335 	/* input compound frame */
1336 	cf->sg[1].length = length;
1337 	cf->sg[1].extension = 1;
1338 	cf->sg[1].final = 1;
1339 	cpu_to_hw_sg(&cf->sg[1]);
1340 
1341 	/* output */
1342 	sg++;
1343 	qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1344 	qm_sg_entry_set64(sg,
1345 		dst_start_addr + sym->aead.data.offset);
1346 	sg->length = sym->aead.data.length;
1347 	length = sg->length;
1348 	if (is_encode(ses)) {
1349 		cpu_to_hw_sg(sg);
1350 		/* set auth output */
1351 		sg++;
1352 		qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1353 		sg->length = ses->digest_length;
1354 		length += sg->length;
1355 	}
1356 	sg->final = 1;
1357 	cpu_to_hw_sg(sg);
1358 
1359 	/* output compound frame */
1360 	cf->sg[0].length = length;
1361 	cf->sg[0].extension = 1;
1362 	cpu_to_hw_sg(&cf->sg[0]);
1363 
1364 	return cf;
1365 }
1366 
1367 static inline struct dpaa_sec_job *
1368 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1369 {
1370 	struct rte_crypto_sym_op *sym = op->sym;
1371 	struct dpaa_sec_job *cf;
1372 	struct dpaa_sec_op_ctx *ctx;
1373 	struct qm_sg_entry *sg, *out_sg, *in_sg;
1374 	struct rte_mbuf *mbuf;
1375 	uint8_t req_segs;
1376 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1377 			ses->iv.offset);
1378 
1379 	if (sym->m_dst) {
1380 		mbuf = sym->m_dst;
1381 		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1382 	} else {
1383 		mbuf = sym->m_src;
1384 		req_segs = mbuf->nb_segs * 2 + 4;
1385 	}
1386 
1387 	if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1388 		DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1389 				MAX_SG_ENTRIES);
1390 		return NULL;
1391 	}
1392 
1393 	ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1394 	if (!ctx)
1395 		return NULL;
1396 
1397 	cf = &ctx->job;
1398 	ctx->op = op;
1399 
1400 	rte_prefetch0(cf->sg);
1401 
1402 	/* output */
1403 	out_sg = &cf->sg[0];
1404 	out_sg->extension = 1;
1405 	if (is_encode(ses))
1406 		out_sg->length = sym->auth.data.length + ses->digest_length;
1407 	else
1408 		out_sg->length = sym->auth.data.length;
1409 
1410 	/* output sg entries */
1411 	sg = &cf->sg[2];
1412 	qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1413 	cpu_to_hw_sg(out_sg);
1414 
1415 	/* 1st seg */
1416 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1417 	sg->length = mbuf->data_len - sym->auth.data.offset;
1418 	sg->offset = sym->auth.data.offset;
1419 
1420 	/* Successive segs */
1421 	mbuf = mbuf->next;
1422 	while (mbuf) {
1423 		cpu_to_hw_sg(sg);
1424 		sg++;
1425 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1426 		sg->length = mbuf->data_len;
1427 		mbuf = mbuf->next;
1428 	}
1429 	sg->length -= ses->digest_length;
1430 
1431 	if (is_encode(ses)) {
1432 		cpu_to_hw_sg(sg);
1433 		/* set auth output */
1434 		sg++;
1435 		qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1436 		sg->length = ses->digest_length;
1437 	}
1438 	sg->final = 1;
1439 	cpu_to_hw_sg(sg);
1440 
1441 	/* input */
1442 	mbuf = sym->m_src;
1443 	in_sg = &cf->sg[1];
1444 	in_sg->extension = 1;
1445 	in_sg->final = 1;
1446 	if (is_encode(ses))
1447 		in_sg->length = ses->iv.length + sym->auth.data.length;
1448 	else
1449 		in_sg->length = ses->iv.length + sym->auth.data.length
1450 						+ ses->digest_length;
1451 
1452 	/* input sg entries */
1453 	sg++;
1454 	qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1455 	cpu_to_hw_sg(in_sg);
1456 
1457 	/* 1st seg IV */
1458 	qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1459 	sg->length = ses->iv.length;
1460 	cpu_to_hw_sg(sg);
1461 
1462 	/* 2nd seg */
1463 	sg++;
1464 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1465 	sg->length = mbuf->data_len - sym->auth.data.offset;
1466 	sg->offset = sym->auth.data.offset;
1467 
1468 	/* Successive segs */
1469 	mbuf = mbuf->next;
1470 	while (mbuf) {
1471 		cpu_to_hw_sg(sg);
1472 		sg++;
1473 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1474 		sg->length = mbuf->data_len;
1475 		mbuf = mbuf->next;
1476 	}
1477 
1478 	sg->length -= ses->digest_length;
1479 	if (is_decode(ses)) {
1480 		cpu_to_hw_sg(sg);
1481 		sg++;
1482 		memcpy(ctx->digest, sym->auth.digest.data,
1483 			ses->digest_length);
1484 		qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1485 		sg->length = ses->digest_length;
1486 	}
1487 	sg->final = 1;
1488 	cpu_to_hw_sg(sg);
1489 
1490 	return cf;
1491 }
1492 
1493 static inline struct dpaa_sec_job *
1494 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1495 {
1496 	struct rte_crypto_sym_op *sym = op->sym;
1497 	struct dpaa_sec_job *cf;
1498 	struct dpaa_sec_op_ctx *ctx;
1499 	struct qm_sg_entry *sg;
1500 	rte_iova_t src_start_addr, dst_start_addr;
1501 	uint32_t length = 0;
1502 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1503 			ses->iv.offset);
1504 
1505 	src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1506 	if (sym->m_dst)
1507 		dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1508 	else
1509 		dst_start_addr = src_start_addr;
1510 
1511 	ctx = dpaa_sec_alloc_ctx(ses, 7);
1512 	if (!ctx)
1513 		return NULL;
1514 
1515 	cf = &ctx->job;
1516 	ctx->op = op;
1517 
1518 	/* input */
1519 	rte_prefetch0(cf->sg);
1520 	sg = &cf->sg[2];
1521 	qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1522 	if (is_encode(ses)) {
1523 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1524 		sg->length = ses->iv.length;
1525 		length += sg->length;
1526 		cpu_to_hw_sg(sg);
1527 
1528 		sg++;
1529 		qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1530 		sg->length = sym->auth.data.length;
1531 		length += sg->length;
1532 		sg->final = 1;
1533 		cpu_to_hw_sg(sg);
1534 	} else {
1535 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1536 		sg->length = ses->iv.length;
1537 		length += sg->length;
1538 		cpu_to_hw_sg(sg);
1539 
1540 		sg++;
1541 
1542 		qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1543 		sg->length = sym->auth.data.length;
1544 		length += sg->length;
1545 		cpu_to_hw_sg(sg);
1546 
1547 		memcpy(ctx->digest, sym->auth.digest.data,
1548 		       ses->digest_length);
1549 		sg++;
1550 
1551 		qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1552 		sg->length = ses->digest_length;
1553 		length += sg->length;
1554 		sg->final = 1;
1555 		cpu_to_hw_sg(sg);
1556 	}
1557 	/* input compound frame */
1558 	cf->sg[1].length = length;
1559 	cf->sg[1].extension = 1;
1560 	cf->sg[1].final = 1;
1561 	cpu_to_hw_sg(&cf->sg[1]);
1562 
1563 	/* output */
1564 	sg++;
1565 	qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1566 	qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1567 	sg->length = sym->cipher.data.length;
1568 	length = sg->length;
1569 	if (is_encode(ses)) {
1570 		cpu_to_hw_sg(sg);
1571 		/* set auth output */
1572 		sg++;
1573 		qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1574 		sg->length = ses->digest_length;
1575 		length += sg->length;
1576 	}
1577 	sg->final = 1;
1578 	cpu_to_hw_sg(sg);
1579 
1580 	/* output compound frame */
1581 	cf->sg[0].length = length;
1582 	cf->sg[0].extension = 1;
1583 	cpu_to_hw_sg(&cf->sg[0]);
1584 
1585 	return cf;
1586 }
1587 
1588 #ifdef RTE_LIBRTE_SECURITY
1589 static inline struct dpaa_sec_job *
1590 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1591 {
1592 	struct rte_crypto_sym_op *sym = op->sym;
1593 	struct dpaa_sec_job *cf;
1594 	struct dpaa_sec_op_ctx *ctx;
1595 	struct qm_sg_entry *sg;
1596 	phys_addr_t src_start_addr, dst_start_addr;
1597 
1598 	ctx = dpaa_sec_alloc_ctx(ses, 2);
1599 	if (!ctx)
1600 		return NULL;
1601 	cf = &ctx->job;
1602 	ctx->op = op;
1603 
1604 	src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
1605 
1606 	if (sym->m_dst)
1607 		dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
1608 	else
1609 		dst_start_addr = src_start_addr;
1610 
1611 	/* input */
1612 	sg = &cf->sg[1];
1613 	qm_sg_entry_set64(sg, src_start_addr);
1614 	sg->length = sym->m_src->pkt_len;
1615 	sg->final = 1;
1616 	cpu_to_hw_sg(sg);
1617 
1618 	sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1619 	/* output */
1620 	sg = &cf->sg[0];
1621 	qm_sg_entry_set64(sg, dst_start_addr);
1622 	sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1623 	cpu_to_hw_sg(sg);
1624 
1625 	return cf;
1626 }
1627 
1628 static inline struct dpaa_sec_job *
1629 build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1630 {
1631 	struct rte_crypto_sym_op *sym = op->sym;
1632 	struct dpaa_sec_job *cf;
1633 	struct dpaa_sec_op_ctx *ctx;
1634 	struct qm_sg_entry *sg, *out_sg, *in_sg;
1635 	struct rte_mbuf *mbuf;
1636 	uint8_t req_segs;
1637 	uint32_t in_len = 0, out_len = 0;
1638 
1639 	if (sym->m_dst)
1640 		mbuf = sym->m_dst;
1641 	else
1642 		mbuf = sym->m_src;
1643 
1644 	req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
1645 	if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1646 		DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
1647 				MAX_SG_ENTRIES);
1648 		return NULL;
1649 	}
1650 
1651 	ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1652 	if (!ctx)
1653 		return NULL;
1654 	cf = &ctx->job;
1655 	ctx->op = op;
1656 	/* output */
1657 	out_sg = &cf->sg[0];
1658 	out_sg->extension = 1;
1659 	qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
1660 
1661 	/* 1st seg */
1662 	sg = &cf->sg[2];
1663 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1664 	sg->offset = 0;
1665 
1666 	/* Successive segs */
1667 	while (mbuf->next) {
1668 		sg->length = mbuf->data_len;
1669 		out_len += sg->length;
1670 		mbuf = mbuf->next;
1671 		cpu_to_hw_sg(sg);
1672 		sg++;
1673 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1674 		sg->offset = 0;
1675 	}
1676 	sg->length = mbuf->buf_len - mbuf->data_off;
1677 	out_len += sg->length;
1678 	sg->final = 1;
1679 	cpu_to_hw_sg(sg);
1680 
1681 	out_sg->length = out_len;
1682 	cpu_to_hw_sg(out_sg);
1683 
1684 	/* input */
1685 	mbuf = sym->m_src;
1686 	in_sg = &cf->sg[1];
1687 	in_sg->extension = 1;
1688 	in_sg->final = 1;
1689 	in_len = mbuf->data_len;
1690 
1691 	sg++;
1692 	qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1693 
1694 	/* 1st seg */
1695 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1696 	sg->length = mbuf->data_len;
1697 	sg->offset = 0;
1698 
1699 	/* Successive segs */
1700 	mbuf = mbuf->next;
1701 	while (mbuf) {
1702 		cpu_to_hw_sg(sg);
1703 		sg++;
1704 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1705 		sg->length = mbuf->data_len;
1706 		sg->offset = 0;
1707 		in_len += sg->length;
1708 		mbuf = mbuf->next;
1709 	}
1710 	sg->final = 1;
1711 	cpu_to_hw_sg(sg);
1712 
1713 	in_sg->length = in_len;
1714 	cpu_to_hw_sg(in_sg);
1715 
1716 	sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1717 
1718 	return cf;
1719 }
1720 #endif
1721 
1722 static uint16_t
1723 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1724 		       uint16_t nb_ops)
1725 {
1726 	/* Function to transmit the frames to given device and queuepair */
1727 	uint32_t loop;
1728 	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1729 	uint16_t num_tx = 0;
1730 	struct qm_fd fds[DPAA_SEC_BURST], *fd;
1731 	uint32_t frames_to_send;
1732 	struct rte_crypto_op *op;
1733 	struct dpaa_sec_job *cf;
1734 	dpaa_sec_session *ses;
1735 	uint16_t auth_hdr_len, auth_tail_len;
1736 	uint32_t index, flags[DPAA_SEC_BURST] = {0};
1737 	struct qman_fq *inq[DPAA_SEC_BURST];
1738 
1739 	while (nb_ops) {
1740 		frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1741 				DPAA_SEC_BURST : nb_ops;
1742 		for (loop = 0; loop < frames_to_send; loop++) {
1743 			op = *(ops++);
1744 			if (op->sym->m_src->seqn != 0) {
1745 				index = op->sym->m_src->seqn - 1;
1746 				if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
1747 					/* QM_EQCR_DCA_IDXMASK = 0x0f */
1748 					flags[loop] = ((index & 0x0f) << 8);
1749 					flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
1750 					DPAA_PER_LCORE_DQRR_SIZE--;
1751 					DPAA_PER_LCORE_DQRR_HELD &=
1752 								~(1 << index);
1753 				}
1754 			}
1755 
1756 			switch (op->sess_type) {
1757 			case RTE_CRYPTO_OP_WITH_SESSION:
1758 				ses = (dpaa_sec_session *)
1759 					get_sym_session_private_data(
1760 							op->sym->session,
1761 							cryptodev_driver_id);
1762 				break;
1763 #ifdef RTE_LIBRTE_SECURITY
1764 			case RTE_CRYPTO_OP_SECURITY_SESSION:
1765 				ses = (dpaa_sec_session *)
1766 					get_sec_session_private_data(
1767 							op->sym->sec_session);
1768 				break;
1769 #endif
1770 			default:
1771 				DPAA_SEC_DP_ERR(
1772 					"sessionless crypto op not supported");
1773 				frames_to_send = loop;
1774 				nb_ops = loop;
1775 				goto send_pkts;
1776 			}
1777 
1778 			if (!ses) {
1779 				DPAA_SEC_DP_ERR("session not available");
1780 				frames_to_send = loop;
1781 				nb_ops = loop;
1782 				goto send_pkts;
1783 			}
1784 
1785 			if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
1786 				if (dpaa_sec_attach_sess_q(qp, ses)) {
1787 					frames_to_send = loop;
1788 					nb_ops = loop;
1789 					goto send_pkts;
1790 				}
1791 			} else if (unlikely(ses->qp[rte_lcore_id() %
1792 						MAX_DPAA_CORES] != qp)) {
1793 				DPAA_SEC_DP_ERR("Old:sess->qp = %p"
1794 					" New qp = %p\n",
1795 					ses->qp[rte_lcore_id() %
1796 					MAX_DPAA_CORES], qp);
1797 				frames_to_send = loop;
1798 				nb_ops = loop;
1799 				goto send_pkts;
1800 			}
1801 
1802 			auth_hdr_len = op->sym->auth.data.length -
1803 						op->sym->cipher.data.length;
1804 			auth_tail_len = 0;
1805 
1806 			if (rte_pktmbuf_is_contiguous(op->sym->m_src) &&
1807 				  ((op->sym->m_dst == NULL) ||
1808 				   rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1809 				switch (ses->ctxt) {
1810 #ifdef RTE_LIBRTE_SECURITY
1811 				case DPAA_SEC_PDCP:
1812 				case DPAA_SEC_IPSEC:
1813 					cf = build_proto(op, ses);
1814 					break;
1815 #endif
1816 				case DPAA_SEC_AUTH:
1817 					cf = build_auth_only(op, ses);
1818 					break;
1819 				case DPAA_SEC_CIPHER:
1820 					cf = build_cipher_only(op, ses);
1821 					break;
1822 				case DPAA_SEC_AEAD:
1823 					cf = build_cipher_auth_gcm(op, ses);
1824 					auth_hdr_len = ses->auth_only_len;
1825 					break;
1826 				case DPAA_SEC_CIPHER_HASH:
1827 					auth_hdr_len =
1828 						op->sym->cipher.data.offset
1829 						- op->sym->auth.data.offset;
1830 					auth_tail_len =
1831 						op->sym->auth.data.length
1832 						- op->sym->cipher.data.length
1833 						- auth_hdr_len;
1834 					cf = build_cipher_auth(op, ses);
1835 					break;
1836 				default:
1837 					DPAA_SEC_DP_ERR("not supported ops");
1838 					frames_to_send = loop;
1839 					nb_ops = loop;
1840 					goto send_pkts;
1841 				}
1842 			} else {
1843 				switch (ses->ctxt) {
1844 #ifdef RTE_LIBRTE_SECURITY
1845 				case DPAA_SEC_PDCP:
1846 				case DPAA_SEC_IPSEC:
1847 					cf = build_proto_sg(op, ses);
1848 					break;
1849 #endif
1850 				case DPAA_SEC_AUTH:
1851 					cf = build_auth_only_sg(op, ses);
1852 					break;
1853 				case DPAA_SEC_CIPHER:
1854 					cf = build_cipher_only_sg(op, ses);
1855 					break;
1856 				case DPAA_SEC_AEAD:
1857 					cf = build_cipher_auth_gcm_sg(op, ses);
1858 					auth_hdr_len = ses->auth_only_len;
1859 					break;
1860 				case DPAA_SEC_CIPHER_HASH:
1861 					auth_hdr_len =
1862 						op->sym->cipher.data.offset
1863 						- op->sym->auth.data.offset;
1864 					auth_tail_len =
1865 						op->sym->auth.data.length
1866 						- op->sym->cipher.data.length
1867 						- auth_hdr_len;
1868 					cf = build_cipher_auth_sg(op, ses);
1869 					break;
1870 				default:
1871 					DPAA_SEC_DP_ERR("not supported ops");
1872 					frames_to_send = loop;
1873 					nb_ops = loop;
1874 					goto send_pkts;
1875 				}
1876 			}
1877 			if (unlikely(!cf)) {
1878 				frames_to_send = loop;
1879 				nb_ops = loop;
1880 				goto send_pkts;
1881 			}
1882 
1883 			fd = &fds[loop];
1884 			inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
1885 			fd->opaque_addr = 0;
1886 			fd->cmd = 0;
1887 			qm_fd_addr_set64(fd, dpaa_mem_vtop(cf->sg));
1888 			fd->_format1 = qm_fd_compound;
1889 			fd->length29 = 2 * sizeof(struct qm_sg_entry);
1890 
1891 			/* Auth_only_len is set as 0 in descriptor and it is
1892 			 * overwritten here in the fd.cmd which will update
1893 			 * the DPOVRD reg.
1894 			 */
1895 			if (auth_hdr_len || auth_tail_len) {
1896 				fd->cmd = 0x80000000;
1897 				fd->cmd |=
1898 					((auth_tail_len << 16) | auth_hdr_len);
1899 			}
1900 
1901 #ifdef RTE_LIBRTE_SECURITY
1902 			/* In case of PDCP, per packet HFN is stored in
1903 			 * mbuf priv after sym_op.
1904 			 */
1905 			if ((ses->ctxt == DPAA_SEC_PDCP) && ses->pdcp.hfn_ovd) {
1906 				fd->cmd = 0x80000000 |
1907 					*((uint32_t *)((uint8_t *)op +
1908 					ses->pdcp.hfn_ovd_offset));
1909 				DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n",
1910 					*((uint32_t *)((uint8_t *)op +
1911 					ses->pdcp.hfn_ovd_offset)),
1912 					ses->pdcp.hfn_ovd);
1913 			}
1914 #endif
1915 		}
1916 send_pkts:
1917 		loop = 0;
1918 		while (loop < frames_to_send) {
1919 			loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1920 					&flags[loop], frames_to_send - loop);
1921 		}
1922 		nb_ops -= frames_to_send;
1923 		num_tx += frames_to_send;
1924 	}
1925 
1926 	dpaa_qp->tx_pkts += num_tx;
1927 	dpaa_qp->tx_errs += nb_ops - num_tx;
1928 
1929 	return num_tx;
1930 }
1931 
1932 static uint16_t
1933 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1934 		       uint16_t nb_ops)
1935 {
1936 	uint16_t num_rx;
1937 	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1938 
1939 	num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1940 
1941 	dpaa_qp->rx_pkts += num_rx;
1942 	dpaa_qp->rx_errs += nb_ops - num_rx;
1943 
1944 	DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1945 
1946 	return num_rx;
1947 }
1948 
1949 /** Release queue pair */
1950 static int
1951 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1952 			    uint16_t qp_id)
1953 {
1954 	struct dpaa_sec_dev_private *internals;
1955 	struct dpaa_sec_qp *qp = NULL;
1956 
1957 	PMD_INIT_FUNC_TRACE();
1958 
1959 	DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1960 
1961 	internals = dev->data->dev_private;
1962 	if (qp_id >= internals->max_nb_queue_pairs) {
1963 		DPAA_SEC_ERR("Max supported qpid %d",
1964 			     internals->max_nb_queue_pairs);
1965 		return -EINVAL;
1966 	}
1967 
1968 	qp = &internals->qps[qp_id];
1969 	rte_mempool_free(qp->ctx_pool);
1970 	qp->internals = NULL;
1971 	dev->data->queue_pairs[qp_id] = NULL;
1972 
1973 	return 0;
1974 }
1975 
1976 /** Setup a queue pair */
1977 static int
1978 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1979 		__rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1980 		__rte_unused int socket_id)
1981 {
1982 	struct dpaa_sec_dev_private *internals;
1983 	struct dpaa_sec_qp *qp = NULL;
1984 	char str[20];
1985 
1986 	DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1987 
1988 	internals = dev->data->dev_private;
1989 	if (qp_id >= internals->max_nb_queue_pairs) {
1990 		DPAA_SEC_ERR("Max supported qpid %d",
1991 			     internals->max_nb_queue_pairs);
1992 		return -EINVAL;
1993 	}
1994 
1995 	qp = &internals->qps[qp_id];
1996 	qp->internals = internals;
1997 	snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d",
1998 			dev->data->dev_id, qp_id);
1999 	if (!qp->ctx_pool) {
2000 		qp->ctx_pool = rte_mempool_create((const char *)str,
2001 							CTX_POOL_NUM_BUFS,
2002 							CTX_POOL_BUF_SIZE,
2003 							CTX_POOL_CACHE_SIZE, 0,
2004 							NULL, NULL, NULL, NULL,
2005 							SOCKET_ID_ANY, 0);
2006 		if (!qp->ctx_pool) {
2007 			DPAA_SEC_ERR("%s create failed\n", str);
2008 			return -ENOMEM;
2009 		}
2010 	} else
2011 		DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d",
2012 				dev->data->dev_id, qp_id);
2013 	dev->data->queue_pairs[qp_id] = qp;
2014 
2015 	return 0;
2016 }
2017 
2018 /** Return the number of allocated queue pairs */
2019 static uint32_t
2020 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
2021 {
2022 	PMD_INIT_FUNC_TRACE();
2023 
2024 	return dev->data->nb_queue_pairs;
2025 }
2026 
2027 /** Returns the size of session structure */
2028 static unsigned int
2029 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
2030 {
2031 	PMD_INIT_FUNC_TRACE();
2032 
2033 	return sizeof(dpaa_sec_session);
2034 }
2035 
2036 static int
2037 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
2038 		     struct rte_crypto_sym_xform *xform,
2039 		     dpaa_sec_session *session)
2040 {
2041 	session->ctxt = DPAA_SEC_CIPHER;
2042 	session->cipher_alg = xform->cipher.algo;
2043 	session->iv.length = xform->cipher.iv.length;
2044 	session->iv.offset = xform->cipher.iv.offset;
2045 	session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
2046 					       RTE_CACHE_LINE_SIZE);
2047 	if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
2048 		DPAA_SEC_ERR("No Memory for cipher key");
2049 		return -ENOMEM;
2050 	}
2051 	session->cipher_key.length = xform->cipher.key.length;
2052 
2053 	memcpy(session->cipher_key.data, xform->cipher.key.data,
2054 	       xform->cipher.key.length);
2055 	switch (xform->cipher.algo) {
2056 	case RTE_CRYPTO_CIPHER_AES_CBC:
2057 		session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2058 		session->cipher_key.algmode = OP_ALG_AAI_CBC;
2059 		break;
2060 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2061 		session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2062 		session->cipher_key.algmode = OP_ALG_AAI_CBC;
2063 		break;
2064 	case RTE_CRYPTO_CIPHER_AES_CTR:
2065 		session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2066 		session->cipher_key.algmode = OP_ALG_AAI_CTR;
2067 		break;
2068 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2069 		session->cipher_key.alg = OP_ALG_ALGSEL_SNOW_F8;
2070 		break;
2071 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2072 		session->cipher_key.alg = OP_ALG_ALGSEL_ZUCE;
2073 		break;
2074 	default:
2075 		DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2076 			      xform->cipher.algo);
2077 		rte_free(session->cipher_key.data);
2078 		return -1;
2079 	}
2080 	session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2081 			DIR_ENC : DIR_DEC;
2082 
2083 	return 0;
2084 }
2085 
2086 static int
2087 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
2088 		   struct rte_crypto_sym_xform *xform,
2089 		   dpaa_sec_session *session)
2090 {
2091 	session->ctxt = DPAA_SEC_AUTH;
2092 	session->auth_alg = xform->auth.algo;
2093 	session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
2094 					     RTE_CACHE_LINE_SIZE);
2095 	if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
2096 		DPAA_SEC_ERR("No Memory for auth key");
2097 		return -ENOMEM;
2098 	}
2099 	session->auth_key.length = xform->auth.key.length;
2100 	session->digest_length = xform->auth.digest_length;
2101 	if (session->cipher_alg == RTE_CRYPTO_CIPHER_NULL) {
2102 		session->iv.offset = xform->auth.iv.offset;
2103 		session->iv.length = xform->auth.iv.length;
2104 	}
2105 
2106 	memcpy(session->auth_key.data, xform->auth.key.data,
2107 	       xform->auth.key.length);
2108 
2109 	switch (xform->auth.algo) {
2110 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
2111 		session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2112 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2113 		break;
2114 	case RTE_CRYPTO_AUTH_MD5_HMAC:
2115 		session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2116 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2117 		break;
2118 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2119 		session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2120 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2121 		break;
2122 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2123 		session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2124 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2125 		break;
2126 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2127 		session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2128 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2129 		break;
2130 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2131 		session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2132 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2133 		break;
2134 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2135 		session->auth_key.alg = OP_ALG_ALGSEL_SNOW_F9;
2136 		session->auth_key.algmode = OP_ALG_AAI_F9;
2137 		break;
2138 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
2139 		session->auth_key.alg = OP_ALG_ALGSEL_ZUCA;
2140 		session->auth_key.algmode = OP_ALG_AAI_F9;
2141 		break;
2142 	default:
2143 		DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2144 			      xform->auth.algo);
2145 		rte_free(session->auth_key.data);
2146 		return -1;
2147 	}
2148 
2149 	session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2150 			DIR_ENC : DIR_DEC;
2151 
2152 	return 0;
2153 }
2154 
2155 static int
2156 dpaa_sec_chain_init(struct rte_cryptodev *dev __rte_unused,
2157 		   struct rte_crypto_sym_xform *xform,
2158 		   dpaa_sec_session *session)
2159 {
2160 
2161 	struct rte_crypto_cipher_xform *cipher_xform;
2162 	struct rte_crypto_auth_xform *auth_xform;
2163 
2164 	session->ctxt = DPAA_SEC_CIPHER_HASH;
2165 	if (session->auth_cipher_text) {
2166 		cipher_xform = &xform->cipher;
2167 		auth_xform = &xform->next->auth;
2168 	} else {
2169 		cipher_xform = &xform->next->cipher;
2170 		auth_xform = &xform->auth;
2171 	}
2172 
2173 	/* Set IV parameters */
2174 	session->iv.offset = cipher_xform->iv.offset;
2175 	session->iv.length = cipher_xform->iv.length;
2176 
2177 	session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2178 					       RTE_CACHE_LINE_SIZE);
2179 	if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2180 		DPAA_SEC_ERR("No Memory for cipher key");
2181 		return -1;
2182 	}
2183 	session->cipher_key.length = cipher_xform->key.length;
2184 	session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2185 					     RTE_CACHE_LINE_SIZE);
2186 	if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2187 		DPAA_SEC_ERR("No Memory for auth key");
2188 		rte_free(session->cipher_key.data);
2189 		return -ENOMEM;
2190 	}
2191 	session->auth_key.length = auth_xform->key.length;
2192 	memcpy(session->cipher_key.data, cipher_xform->key.data,
2193 	       cipher_xform->key.length);
2194 	memcpy(session->auth_key.data, auth_xform->key.data,
2195 	       auth_xform->key.length);
2196 
2197 	session->digest_length = auth_xform->digest_length;
2198 	session->auth_alg = auth_xform->algo;
2199 
2200 	switch (auth_xform->algo) {
2201 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
2202 		session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2203 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2204 		break;
2205 	case RTE_CRYPTO_AUTH_MD5_HMAC:
2206 		session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2207 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2208 		break;
2209 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2210 		session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2211 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2212 		break;
2213 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2214 		session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2215 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2216 		break;
2217 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2218 		session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2219 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2220 		break;
2221 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2222 		session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2223 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2224 		break;
2225 	default:
2226 		DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2227 			      auth_xform->algo);
2228 		goto error_out;
2229 	}
2230 
2231 	session->cipher_alg = cipher_xform->algo;
2232 
2233 	switch (cipher_xform->algo) {
2234 	case RTE_CRYPTO_CIPHER_AES_CBC:
2235 		session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2236 		session->cipher_key.algmode = OP_ALG_AAI_CBC;
2237 		break;
2238 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2239 		session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2240 		session->cipher_key.algmode = OP_ALG_AAI_CBC;
2241 		break;
2242 	case RTE_CRYPTO_CIPHER_AES_CTR:
2243 		session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2244 		session->cipher_key.algmode = OP_ALG_AAI_CTR;
2245 		break;
2246 	default:
2247 		DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2248 			      cipher_xform->algo);
2249 		goto error_out;
2250 	}
2251 	session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2252 				DIR_ENC : DIR_DEC;
2253 	return 0;
2254 
2255 error_out:
2256 	rte_free(session->cipher_key.data);
2257 	rte_free(session->auth_key.data);
2258 	return -1;
2259 }
2260 
2261 static int
2262 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
2263 		   struct rte_crypto_sym_xform *xform,
2264 		   dpaa_sec_session *session)
2265 {
2266 	session->aead_alg = xform->aead.algo;
2267 	session->ctxt = DPAA_SEC_AEAD;
2268 	session->iv.length = xform->aead.iv.length;
2269 	session->iv.offset = xform->aead.iv.offset;
2270 	session->auth_only_len = xform->aead.aad_length;
2271 	session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
2272 					     RTE_CACHE_LINE_SIZE);
2273 	if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
2274 		DPAA_SEC_ERR("No Memory for aead key\n");
2275 		return -ENOMEM;
2276 	}
2277 	session->aead_key.length = xform->aead.key.length;
2278 	session->digest_length = xform->aead.digest_length;
2279 
2280 	memcpy(session->aead_key.data, xform->aead.key.data,
2281 	       xform->aead.key.length);
2282 
2283 	switch (session->aead_alg) {
2284 	case RTE_CRYPTO_AEAD_AES_GCM:
2285 		session->aead_key.alg = OP_ALG_ALGSEL_AES;
2286 		session->aead_key.algmode = OP_ALG_AAI_GCM;
2287 		break;
2288 	default:
2289 		DPAA_SEC_ERR("unsupported AEAD alg %d", session->aead_alg);
2290 		rte_free(session->aead_key.data);
2291 		return -ENOMEM;
2292 	}
2293 
2294 	session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2295 			DIR_ENC : DIR_DEC;
2296 
2297 	return 0;
2298 }
2299 
2300 static struct qman_fq *
2301 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
2302 {
2303 	unsigned int i;
2304 
2305 	for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2306 		if (qi->inq_attach[i] == 0) {
2307 			qi->inq_attach[i] = 1;
2308 			return &qi->inq[i];
2309 		}
2310 	}
2311 	DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions);
2312 
2313 	return NULL;
2314 }
2315 
2316 static int
2317 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
2318 {
2319 	unsigned int i;
2320 
2321 	for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2322 		if (&qi->inq[i] == fq) {
2323 			if (qman_retire_fq(fq, NULL) != 0)
2324 				DPAA_SEC_WARN("Queue is not retired\n");
2325 			qman_oos_fq(fq);
2326 			qi->inq_attach[i] = 0;
2327 			return 0;
2328 		}
2329 	}
2330 	return -1;
2331 }
2332 
2333 static int
2334 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
2335 {
2336 	int ret;
2337 
2338 	sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp;
2339 	ret = dpaa_sec_prep_cdb(sess);
2340 	if (ret) {
2341 		DPAA_SEC_ERR("Unable to prepare sec cdb");
2342 		return -1;
2343 	}
2344 	if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
2345 		ret = rte_dpaa_portal_init((void *)0);
2346 		if (ret) {
2347 			DPAA_SEC_ERR("Failure in affining portal");
2348 			return ret;
2349 		}
2350 	}
2351 	ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
2352 			       dpaa_mem_vtop(&sess->cdb),
2353 			       qman_fq_fqid(&qp->outq));
2354 	if (ret)
2355 		DPAA_SEC_ERR("Unable to init sec queue");
2356 
2357 	return ret;
2358 }
2359 
2360 static int
2361 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
2362 			    struct rte_crypto_sym_xform *xform,	void *sess)
2363 {
2364 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2365 	dpaa_sec_session *session = sess;
2366 	uint32_t i;
2367 	int ret;
2368 
2369 	PMD_INIT_FUNC_TRACE();
2370 
2371 	if (unlikely(sess == NULL)) {
2372 		DPAA_SEC_ERR("invalid session struct");
2373 		return -EINVAL;
2374 	}
2375 	memset(session, 0, sizeof(dpaa_sec_session));
2376 
2377 	/* Default IV length = 0 */
2378 	session->iv.length = 0;
2379 
2380 	/* Cipher Only */
2381 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2382 		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2383 		ret = dpaa_sec_cipher_init(dev, xform, session);
2384 
2385 	/* Authentication Only */
2386 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2387 		   xform->next == NULL) {
2388 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2389 		session->ctxt = DPAA_SEC_AUTH;
2390 		ret = dpaa_sec_auth_init(dev, xform, session);
2391 
2392 	/* Cipher then Authenticate */
2393 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2394 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2395 		if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2396 			session->auth_cipher_text = 1;
2397 			if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2398 				ret = dpaa_sec_auth_init(dev, xform, session);
2399 			else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
2400 				ret = dpaa_sec_cipher_init(dev, xform, session);
2401 			else
2402 				ret = dpaa_sec_chain_init(dev, xform, session);
2403 		} else {
2404 			DPAA_SEC_ERR("Not supported: Auth then Cipher");
2405 			return -EINVAL;
2406 		}
2407 	/* Authenticate then Cipher */
2408 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2409 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2410 		if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2411 			session->auth_cipher_text = 0;
2412 			if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
2413 				ret = dpaa_sec_cipher_init(dev, xform, session);
2414 			else if (xform->next->cipher.algo
2415 					== RTE_CRYPTO_CIPHER_NULL)
2416 				ret = dpaa_sec_auth_init(dev, xform, session);
2417 			else
2418 				ret = dpaa_sec_chain_init(dev, xform, session);
2419 		} else {
2420 			DPAA_SEC_ERR("Not supported: Auth then Cipher");
2421 			return -EINVAL;
2422 		}
2423 
2424 	/* AEAD operation for AES-GCM kind of Algorithms */
2425 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2426 		   xform->next == NULL) {
2427 		ret = dpaa_sec_aead_init(dev, xform, session);
2428 
2429 	} else {
2430 		DPAA_SEC_ERR("Invalid crypto type");
2431 		return -EINVAL;
2432 	}
2433 	if (ret) {
2434 		DPAA_SEC_ERR("unable to init session");
2435 		goto err1;
2436 	}
2437 
2438 	rte_spinlock_lock(&internals->lock);
2439 	for (i = 0; i < MAX_DPAA_CORES; i++) {
2440 		session->inq[i] = dpaa_sec_attach_rxq(internals);
2441 		if (session->inq[i] == NULL) {
2442 			DPAA_SEC_ERR("unable to attach sec queue");
2443 			rte_spinlock_unlock(&internals->lock);
2444 			goto err1;
2445 		}
2446 	}
2447 	rte_spinlock_unlock(&internals->lock);
2448 
2449 	return 0;
2450 
2451 err1:
2452 	rte_free(session->cipher_key.data);
2453 	rte_free(session->auth_key.data);
2454 	memset(session, 0, sizeof(dpaa_sec_session));
2455 
2456 	return -EINVAL;
2457 }
2458 
2459 static int
2460 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
2461 		struct rte_crypto_sym_xform *xform,
2462 		struct rte_cryptodev_sym_session *sess,
2463 		struct rte_mempool *mempool)
2464 {
2465 	void *sess_private_data;
2466 	int ret;
2467 
2468 	PMD_INIT_FUNC_TRACE();
2469 
2470 	if (rte_mempool_get(mempool, &sess_private_data)) {
2471 		DPAA_SEC_ERR("Couldn't get object from session mempool");
2472 		return -ENOMEM;
2473 	}
2474 
2475 	ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
2476 	if (ret != 0) {
2477 		DPAA_SEC_ERR("failed to configure session parameters");
2478 
2479 		/* Return session to mempool */
2480 		rte_mempool_put(mempool, sess_private_data);
2481 		return ret;
2482 	}
2483 
2484 	set_sym_session_private_data(sess, dev->driver_id,
2485 			sess_private_data);
2486 
2487 
2488 	return 0;
2489 }
2490 
2491 static inline void
2492 free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s)
2493 {
2494 	struct dpaa_sec_dev_private *qi = dev->data->dev_private;
2495 	struct rte_mempool *sess_mp = rte_mempool_from_obj((void *)s);
2496 	uint8_t i;
2497 
2498 	for (i = 0; i < MAX_DPAA_CORES; i++) {
2499 		if (s->inq[i])
2500 			dpaa_sec_detach_rxq(qi, s->inq[i]);
2501 		s->inq[i] = NULL;
2502 		s->qp[i] = NULL;
2503 	}
2504 	rte_free(s->cipher_key.data);
2505 	rte_free(s->auth_key.data);
2506 	memset(s, 0, sizeof(dpaa_sec_session));
2507 	rte_mempool_put(sess_mp, (void *)s);
2508 }
2509 
2510 /** Clear the memory of session so it doesn't leave key material behind */
2511 static void
2512 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
2513 		struct rte_cryptodev_sym_session *sess)
2514 {
2515 	PMD_INIT_FUNC_TRACE();
2516 	uint8_t index = dev->driver_id;
2517 	void *sess_priv = get_sym_session_private_data(sess, index);
2518 	dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2519 
2520 	if (sess_priv) {
2521 		free_session_memory(dev, s);
2522 		set_sym_session_private_data(sess, index, NULL);
2523 	}
2524 }
2525 
2526 #ifdef RTE_LIBRTE_SECURITY
2527 static int
2528 dpaa_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2529 			struct rte_security_ipsec_xform *ipsec_xform,
2530 			dpaa_sec_session *session)
2531 {
2532 	PMD_INIT_FUNC_TRACE();
2533 
2534 	session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2535 					       RTE_CACHE_LINE_SIZE);
2536 	if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2537 		DPAA_SEC_ERR("No Memory for aead key");
2538 		return -1;
2539 	}
2540 	memcpy(session->aead_key.data, aead_xform->key.data,
2541 	       aead_xform->key.length);
2542 
2543 	session->digest_length = aead_xform->digest_length;
2544 	session->aead_key.length = aead_xform->key.length;
2545 
2546 	switch (aead_xform->algo) {
2547 	case RTE_CRYPTO_AEAD_AES_GCM:
2548 		switch (session->digest_length) {
2549 		case 8:
2550 			session->aead_key.alg = OP_PCL_IPSEC_AES_GCM8;
2551 			break;
2552 		case 12:
2553 			session->aead_key.alg = OP_PCL_IPSEC_AES_GCM12;
2554 			break;
2555 		case 16:
2556 			session->aead_key.alg = OP_PCL_IPSEC_AES_GCM16;
2557 			break;
2558 		default:
2559 			DPAA_SEC_ERR("Crypto: Undefined GCM digest %d",
2560 				     session->digest_length);
2561 			return -1;
2562 		}
2563 		if (session->dir == DIR_ENC) {
2564 			memcpy(session->encap_pdb.gcm.salt,
2565 				(uint8_t *)&(ipsec_xform->salt), 4);
2566 		} else {
2567 			memcpy(session->decap_pdb.gcm.salt,
2568 				(uint8_t *)&(ipsec_xform->salt), 4);
2569 		}
2570 		session->aead_key.algmode = OP_ALG_AAI_GCM;
2571 		session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2572 		break;
2573 	default:
2574 		DPAA_SEC_ERR("Crypto: Undefined AEAD specified %u",
2575 			      aead_xform->algo);
2576 		return -1;
2577 	}
2578 	return 0;
2579 }
2580 
2581 static int
2582 dpaa_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2583 	struct rte_crypto_auth_xform *auth_xform,
2584 	struct rte_security_ipsec_xform *ipsec_xform,
2585 	dpaa_sec_session *session)
2586 {
2587 	if (cipher_xform) {
2588 		session->cipher_key.data = rte_zmalloc(NULL,
2589 						       cipher_xform->key.length,
2590 						       RTE_CACHE_LINE_SIZE);
2591 		if (session->cipher_key.data == NULL &&
2592 				cipher_xform->key.length > 0) {
2593 			DPAA_SEC_ERR("No Memory for cipher key");
2594 			return -ENOMEM;
2595 		}
2596 
2597 		session->cipher_key.length = cipher_xform->key.length;
2598 		memcpy(session->cipher_key.data, cipher_xform->key.data,
2599 				cipher_xform->key.length);
2600 		session->cipher_alg = cipher_xform->algo;
2601 	} else {
2602 		session->cipher_key.data = NULL;
2603 		session->cipher_key.length = 0;
2604 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2605 	}
2606 
2607 	if (auth_xform) {
2608 		session->auth_key.data = rte_zmalloc(NULL,
2609 						auth_xform->key.length,
2610 						RTE_CACHE_LINE_SIZE);
2611 		if (session->auth_key.data == NULL &&
2612 				auth_xform->key.length > 0) {
2613 			DPAA_SEC_ERR("No Memory for auth key");
2614 			return -ENOMEM;
2615 		}
2616 		session->auth_key.length = auth_xform->key.length;
2617 		memcpy(session->auth_key.data, auth_xform->key.data,
2618 				auth_xform->key.length);
2619 		session->auth_alg = auth_xform->algo;
2620 		session->digest_length = auth_xform->digest_length;
2621 	} else {
2622 		session->auth_key.data = NULL;
2623 		session->auth_key.length = 0;
2624 		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2625 	}
2626 
2627 	switch (session->auth_alg) {
2628 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
2629 		session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA1_96;
2630 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2631 		break;
2632 	case RTE_CRYPTO_AUTH_MD5_HMAC:
2633 		session->auth_key.alg = OP_PCL_IPSEC_HMAC_MD5_96;
2634 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2635 		break;
2636 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2637 		session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2638 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2639 		if (session->digest_length != 16)
2640 			DPAA_SEC_WARN(
2641 			"+++Using sha256-hmac truncated len is non-standard,"
2642 			"it will not work with lookaside proto");
2643 		break;
2644 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2645 		session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2646 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2647 		break;
2648 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2649 		session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2650 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2651 		break;
2652 	case RTE_CRYPTO_AUTH_AES_CMAC:
2653 		session->auth_key.alg = OP_PCL_IPSEC_AES_CMAC_96;
2654 		break;
2655 	case RTE_CRYPTO_AUTH_NULL:
2656 		session->auth_key.alg = OP_PCL_IPSEC_HMAC_NULL;
2657 		break;
2658 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2659 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2660 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2661 	case RTE_CRYPTO_AUTH_SHA1:
2662 	case RTE_CRYPTO_AUTH_SHA256:
2663 	case RTE_CRYPTO_AUTH_SHA512:
2664 	case RTE_CRYPTO_AUTH_SHA224:
2665 	case RTE_CRYPTO_AUTH_SHA384:
2666 	case RTE_CRYPTO_AUTH_MD5:
2667 	case RTE_CRYPTO_AUTH_AES_GMAC:
2668 	case RTE_CRYPTO_AUTH_KASUMI_F9:
2669 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2670 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
2671 		DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2672 			      session->auth_alg);
2673 		return -1;
2674 	default:
2675 		DPAA_SEC_ERR("Crypto: Undefined Auth specified %u",
2676 			      session->auth_alg);
2677 		return -1;
2678 	}
2679 
2680 	switch (session->cipher_alg) {
2681 	case RTE_CRYPTO_CIPHER_AES_CBC:
2682 		session->cipher_key.alg = OP_PCL_IPSEC_AES_CBC;
2683 		session->cipher_key.algmode = OP_ALG_AAI_CBC;
2684 		break;
2685 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2686 		session->cipher_key.alg = OP_PCL_IPSEC_3DES;
2687 		session->cipher_key.algmode = OP_ALG_AAI_CBC;
2688 		break;
2689 	case RTE_CRYPTO_CIPHER_AES_CTR:
2690 		session->cipher_key.alg = OP_PCL_IPSEC_AES_CTR;
2691 		session->cipher_key.algmode = OP_ALG_AAI_CTR;
2692 		if (session->dir == DIR_ENC) {
2693 			session->encap_pdb.ctr.ctr_initial = 0x00000001;
2694 			session->encap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2695 		} else {
2696 			session->decap_pdb.ctr.ctr_initial = 0x00000001;
2697 			session->decap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2698 		}
2699 		break;
2700 	case RTE_CRYPTO_CIPHER_NULL:
2701 		session->cipher_key.alg = OP_PCL_IPSEC_NULL;
2702 		break;
2703 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2704 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2705 	case RTE_CRYPTO_CIPHER_3DES_ECB:
2706 	case RTE_CRYPTO_CIPHER_AES_ECB:
2707 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
2708 		DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2709 			      session->cipher_alg);
2710 		return -1;
2711 	default:
2712 		DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2713 			      session->cipher_alg);
2714 		return -1;
2715 	}
2716 
2717 	return 0;
2718 }
2719 
2720 static int
2721 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
2722 			   struct rte_security_session_conf *conf,
2723 			   void *sess)
2724 {
2725 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2726 	struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2727 	struct rte_crypto_auth_xform *auth_xform = NULL;
2728 	struct rte_crypto_cipher_xform *cipher_xform = NULL;
2729 	struct rte_crypto_aead_xform *aead_xform = NULL;
2730 	dpaa_sec_session *session = (dpaa_sec_session *)sess;
2731 	uint32_t i;
2732 	int ret;
2733 
2734 	PMD_INIT_FUNC_TRACE();
2735 
2736 	memset(session, 0, sizeof(dpaa_sec_session));
2737 	session->proto_alg = conf->protocol;
2738 	session->ctxt = DPAA_SEC_IPSEC;
2739 
2740 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
2741 		session->dir = DIR_ENC;
2742 	else
2743 		session->dir = DIR_DEC;
2744 
2745 	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2746 		cipher_xform = &conf->crypto_xform->cipher;
2747 		if (conf->crypto_xform->next)
2748 			auth_xform = &conf->crypto_xform->next->auth;
2749 		ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2750 					ipsec_xform, session);
2751 	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2752 		auth_xform = &conf->crypto_xform->auth;
2753 		if (conf->crypto_xform->next)
2754 			cipher_xform = &conf->crypto_xform->next->cipher;
2755 		ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2756 					ipsec_xform, session);
2757 	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2758 		aead_xform = &conf->crypto_xform->aead;
2759 		ret = dpaa_sec_ipsec_aead_init(aead_xform,
2760 					ipsec_xform, session);
2761 	} else {
2762 		DPAA_SEC_ERR("XFORM not specified");
2763 		ret = -EINVAL;
2764 		goto out;
2765 	}
2766 	if (ret) {
2767 		DPAA_SEC_ERR("Failed to process xform");
2768 		goto out;
2769 	}
2770 
2771 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2772 		if (ipsec_xform->tunnel.type ==
2773 				RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2774 			session->ip4_hdr.ip_v = IPVERSION;
2775 			session->ip4_hdr.ip_hl = 5;
2776 			session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2777 						sizeof(session->ip4_hdr));
2778 			session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2779 			session->ip4_hdr.ip_id = 0;
2780 			session->ip4_hdr.ip_off = 0;
2781 			session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2782 			session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2783 					RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2784 					IPPROTO_ESP : IPPROTO_AH;
2785 			session->ip4_hdr.ip_sum = 0;
2786 			session->ip4_hdr.ip_src =
2787 					ipsec_xform->tunnel.ipv4.src_ip;
2788 			session->ip4_hdr.ip_dst =
2789 					ipsec_xform->tunnel.ipv4.dst_ip;
2790 			session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2791 						(void *)&session->ip4_hdr,
2792 						sizeof(struct ip));
2793 			session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2794 		} else if (ipsec_xform->tunnel.type ==
2795 				RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2796 			session->ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2797 				DPAA_IPv6_DEFAULT_VTC_FLOW |
2798 				((ipsec_xform->tunnel.ipv6.dscp <<
2799 					RTE_IPV6_HDR_TC_SHIFT) &
2800 					RTE_IPV6_HDR_TC_MASK) |
2801 				((ipsec_xform->tunnel.ipv6.flabel <<
2802 					RTE_IPV6_HDR_FL_SHIFT) &
2803 					RTE_IPV6_HDR_FL_MASK));
2804 			/* Payload length will be updated by HW */
2805 			session->ip6_hdr.payload_len = 0;
2806 			session->ip6_hdr.hop_limits =
2807 					ipsec_xform->tunnel.ipv6.hlimit;
2808 			session->ip6_hdr.proto = (ipsec_xform->proto ==
2809 					RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2810 					IPPROTO_ESP : IPPROTO_AH;
2811 			memcpy(&session->ip6_hdr.src_addr,
2812 					&ipsec_xform->tunnel.ipv6.src_addr, 16);
2813 			memcpy(&session->ip6_hdr.dst_addr,
2814 					&ipsec_xform->tunnel.ipv6.dst_addr, 16);
2815 			session->encap_pdb.ip_hdr_len =
2816 						sizeof(struct rte_ipv6_hdr);
2817 		}
2818 		session->encap_pdb.options =
2819 			(IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2820 			PDBOPTS_ESP_OIHI_PDB_INL |
2821 			PDBOPTS_ESP_IVSRC |
2822 			PDBHMO_ESP_ENCAP_DTTL |
2823 			PDBHMO_ESP_SNR;
2824 		if (ipsec_xform->options.esn)
2825 			session->encap_pdb.options |= PDBOPTS_ESP_ESN;
2826 		session->encap_pdb.spi = ipsec_xform->spi;
2827 
2828 	} else if (ipsec_xform->direction ==
2829 			RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2830 		if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
2831 			session->decap_pdb.options = sizeof(struct ip) << 16;
2832 		else
2833 			session->decap_pdb.options =
2834 					sizeof(struct rte_ipv6_hdr) << 16;
2835 		if (ipsec_xform->options.esn)
2836 			session->decap_pdb.options |= PDBOPTS_ESP_ESN;
2837 		if (ipsec_xform->replay_win_sz) {
2838 			uint32_t win_sz;
2839 			win_sz = rte_align32pow2(ipsec_xform->replay_win_sz);
2840 
2841 			switch (win_sz) {
2842 			case 1:
2843 			case 2:
2844 			case 4:
2845 			case 8:
2846 			case 16:
2847 			case 32:
2848 				session->decap_pdb.options |= PDBOPTS_ESP_ARS32;
2849 				break;
2850 			case 64:
2851 				session->decap_pdb.options |= PDBOPTS_ESP_ARS64;
2852 				break;
2853 			default:
2854 				session->decap_pdb.options |=
2855 							PDBOPTS_ESP_ARS128;
2856 			}
2857 		}
2858 	} else
2859 		goto out;
2860 	rte_spinlock_lock(&internals->lock);
2861 	for (i = 0; i < MAX_DPAA_CORES; i++) {
2862 		session->inq[i] = dpaa_sec_attach_rxq(internals);
2863 		if (session->inq[i] == NULL) {
2864 			DPAA_SEC_ERR("unable to attach sec queue");
2865 			rte_spinlock_unlock(&internals->lock);
2866 			goto out;
2867 		}
2868 	}
2869 	rte_spinlock_unlock(&internals->lock);
2870 
2871 	return 0;
2872 out:
2873 	rte_free(session->auth_key.data);
2874 	rte_free(session->cipher_key.data);
2875 	memset(session, 0, sizeof(dpaa_sec_session));
2876 	return -1;
2877 }
2878 
2879 static int
2880 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
2881 			  struct rte_security_session_conf *conf,
2882 			  void *sess)
2883 {
2884 	struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2885 	struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2886 	struct rte_crypto_auth_xform *auth_xform = NULL;
2887 	struct rte_crypto_cipher_xform *cipher_xform = NULL;
2888 	dpaa_sec_session *session = (dpaa_sec_session *)sess;
2889 	struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
2890 	uint32_t i;
2891 
2892 	PMD_INIT_FUNC_TRACE();
2893 
2894 	memset(session, 0, sizeof(dpaa_sec_session));
2895 
2896 	/* find xfrm types */
2897 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2898 		cipher_xform = &xform->cipher;
2899 		if (xform->next != NULL)
2900 			auth_xform = &xform->next->auth;
2901 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2902 		auth_xform = &xform->auth;
2903 		if (xform->next != NULL)
2904 			cipher_xform = &xform->next->cipher;
2905 	} else {
2906 		DPAA_SEC_ERR("Invalid crypto type");
2907 		return -EINVAL;
2908 	}
2909 
2910 	session->proto_alg = conf->protocol;
2911 	session->ctxt = DPAA_SEC_PDCP;
2912 
2913 	if (cipher_xform) {
2914 		switch (cipher_xform->algo) {
2915 		case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2916 			session->cipher_key.alg = PDCP_CIPHER_TYPE_SNOW;
2917 			break;
2918 		case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2919 			session->cipher_key.alg = PDCP_CIPHER_TYPE_ZUC;
2920 			break;
2921 		case RTE_CRYPTO_CIPHER_AES_CTR:
2922 			session->cipher_key.alg = PDCP_CIPHER_TYPE_AES;
2923 			break;
2924 		case RTE_CRYPTO_CIPHER_NULL:
2925 			session->cipher_key.alg = PDCP_CIPHER_TYPE_NULL;
2926 			break;
2927 		default:
2928 			DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2929 				      session->cipher_alg);
2930 			return -1;
2931 		}
2932 
2933 		session->cipher_key.data = rte_zmalloc(NULL,
2934 					       cipher_xform->key.length,
2935 					       RTE_CACHE_LINE_SIZE);
2936 		if (session->cipher_key.data == NULL &&
2937 				cipher_xform->key.length > 0) {
2938 			DPAA_SEC_ERR("No Memory for cipher key");
2939 			return -ENOMEM;
2940 		}
2941 		session->cipher_key.length = cipher_xform->key.length;
2942 		memcpy(session->cipher_key.data, cipher_xform->key.data,
2943 			cipher_xform->key.length);
2944 		session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2945 					DIR_ENC : DIR_DEC;
2946 		session->cipher_alg = cipher_xform->algo;
2947 	} else {
2948 		session->cipher_key.data = NULL;
2949 		session->cipher_key.length = 0;
2950 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2951 		session->dir = DIR_ENC;
2952 	}
2953 
2954 	if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
2955 		if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 &&
2956 		    pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) {
2957 			DPAA_SEC_ERR(
2958 				"PDCP Seq Num size should be 5/12 bits for cmode");
2959 			goto out;
2960 		}
2961 	}
2962 
2963 	if (auth_xform) {
2964 		switch (auth_xform->algo) {
2965 		case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2966 			session->auth_key.alg = PDCP_AUTH_TYPE_SNOW;
2967 			break;
2968 		case RTE_CRYPTO_AUTH_ZUC_EIA3:
2969 			session->auth_key.alg = PDCP_AUTH_TYPE_ZUC;
2970 			break;
2971 		case RTE_CRYPTO_AUTH_AES_CMAC:
2972 			session->auth_key.alg = PDCP_AUTH_TYPE_AES;
2973 			break;
2974 		case RTE_CRYPTO_AUTH_NULL:
2975 			session->auth_key.alg = PDCP_AUTH_TYPE_NULL;
2976 			break;
2977 		default:
2978 			DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2979 				      session->auth_alg);
2980 			rte_free(session->cipher_key.data);
2981 			return -1;
2982 		}
2983 		session->auth_key.data = rte_zmalloc(NULL,
2984 						     auth_xform->key.length,
2985 						     RTE_CACHE_LINE_SIZE);
2986 		if (!session->auth_key.data &&
2987 		    auth_xform->key.length > 0) {
2988 			DPAA_SEC_ERR("No Memory for auth key");
2989 			rte_free(session->cipher_key.data);
2990 			return -ENOMEM;
2991 		}
2992 		session->auth_key.length = auth_xform->key.length;
2993 		memcpy(session->auth_key.data, auth_xform->key.data,
2994 		       auth_xform->key.length);
2995 		session->auth_alg = auth_xform->algo;
2996 	} else {
2997 		session->auth_key.data = NULL;
2998 		session->auth_key.length = 0;
2999 		session->auth_alg = 0;
3000 	}
3001 	session->pdcp.domain = pdcp_xform->domain;
3002 	session->pdcp.bearer = pdcp_xform->bearer;
3003 	session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
3004 	session->pdcp.sn_size = pdcp_xform->sn_size;
3005 	session->pdcp.hfn = pdcp_xform->hfn;
3006 	session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
3007 	session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
3008 	session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
3009 
3010 	rte_spinlock_lock(&dev_priv->lock);
3011 	for (i = 0; i < MAX_DPAA_CORES; i++) {
3012 		session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
3013 		if (session->inq[i] == NULL) {
3014 			DPAA_SEC_ERR("unable to attach sec queue");
3015 			rte_spinlock_unlock(&dev_priv->lock);
3016 			goto out;
3017 		}
3018 	}
3019 	rte_spinlock_unlock(&dev_priv->lock);
3020 	return 0;
3021 out:
3022 	rte_free(session->auth_key.data);
3023 	rte_free(session->cipher_key.data);
3024 	memset(session, 0, sizeof(dpaa_sec_session));
3025 	return -1;
3026 }
3027 
3028 static int
3029 dpaa_sec_security_session_create(void *dev,
3030 				 struct rte_security_session_conf *conf,
3031 				 struct rte_security_session *sess,
3032 				 struct rte_mempool *mempool)
3033 {
3034 	void *sess_private_data;
3035 	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
3036 	int ret;
3037 
3038 	if (rte_mempool_get(mempool, &sess_private_data)) {
3039 		DPAA_SEC_ERR("Couldn't get object from session mempool");
3040 		return -ENOMEM;
3041 	}
3042 
3043 	switch (conf->protocol) {
3044 	case RTE_SECURITY_PROTOCOL_IPSEC:
3045 		ret = dpaa_sec_set_ipsec_session(cdev, conf,
3046 				sess_private_data);
3047 		break;
3048 	case RTE_SECURITY_PROTOCOL_PDCP:
3049 		ret = dpaa_sec_set_pdcp_session(cdev, conf,
3050 				sess_private_data);
3051 		break;
3052 	case RTE_SECURITY_PROTOCOL_MACSEC:
3053 		return -ENOTSUP;
3054 	default:
3055 		return -EINVAL;
3056 	}
3057 	if (ret != 0) {
3058 		DPAA_SEC_ERR("failed to configure session parameters");
3059 		/* Return session to mempool */
3060 		rte_mempool_put(mempool, sess_private_data);
3061 		return ret;
3062 	}
3063 
3064 	set_sec_session_private_data(sess, sess_private_data);
3065 
3066 	return ret;
3067 }
3068 
3069 /** Clear the memory of session so it doesn't leave key material behind */
3070 static int
3071 dpaa_sec_security_session_destroy(void *dev __rte_unused,
3072 		struct rte_security_session *sess)
3073 {
3074 	PMD_INIT_FUNC_TRACE();
3075 	void *sess_priv = get_sec_session_private_data(sess);
3076 	dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
3077 
3078 	if (sess_priv) {
3079 		free_session_memory((struct rte_cryptodev *)dev, s);
3080 		set_sec_session_private_data(sess, NULL);
3081 	}
3082 	return 0;
3083 }
3084 #endif
3085 static int
3086 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
3087 		       struct rte_cryptodev_config *config __rte_unused)
3088 {
3089 	PMD_INIT_FUNC_TRACE();
3090 
3091 	return 0;
3092 }
3093 
3094 static int
3095 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
3096 {
3097 	PMD_INIT_FUNC_TRACE();
3098 	return 0;
3099 }
3100 
3101 static void
3102 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
3103 {
3104 	PMD_INIT_FUNC_TRACE();
3105 }
3106 
3107 static int
3108 dpaa_sec_dev_close(struct rte_cryptodev *dev)
3109 {
3110 	PMD_INIT_FUNC_TRACE();
3111 
3112 	if (dev == NULL)
3113 		return -ENOMEM;
3114 
3115 	return 0;
3116 }
3117 
3118 static void
3119 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
3120 		       struct rte_cryptodev_info *info)
3121 {
3122 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
3123 
3124 	PMD_INIT_FUNC_TRACE();
3125 	if (info != NULL) {
3126 		info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3127 		info->feature_flags = dev->feature_flags;
3128 		info->capabilities = dpaa_sec_capabilities;
3129 		info->sym.max_nb_sessions = internals->max_nb_sessions;
3130 		info->driver_id = cryptodev_driver_id;
3131 	}
3132 }
3133 
3134 static enum qman_cb_dqrr_result
3135 dpaa_sec_process_parallel_event(void *event,
3136 			struct qman_portal *qm __always_unused,
3137 			struct qman_fq *outq,
3138 			const struct qm_dqrr_entry *dqrr,
3139 			void **bufs)
3140 {
3141 	const struct qm_fd *fd;
3142 	struct dpaa_sec_job *job;
3143 	struct dpaa_sec_op_ctx *ctx;
3144 	struct rte_event *ev = (struct rte_event *)event;
3145 
3146 	fd = &dqrr->fd;
3147 
3148 	/* sg is embedded in an op ctx,
3149 	 * sg[0] is for output
3150 	 * sg[1] for input
3151 	 */
3152 	job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
3153 
3154 	ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3155 	ctx->fd_status = fd->status;
3156 	if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3157 		struct qm_sg_entry *sg_out;
3158 		uint32_t len;
3159 
3160 		sg_out = &job->sg[0];
3161 		hw_sg_to_cpu(sg_out);
3162 		len = sg_out->length;
3163 		ctx->op->sym->m_src->pkt_len = len;
3164 		ctx->op->sym->m_src->data_len = len;
3165 	}
3166 	if (!ctx->fd_status) {
3167 		ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3168 	} else {
3169 		DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3170 		ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3171 	}
3172 	ev->event_ptr = (void *)ctx->op;
3173 
3174 	ev->flow_id = outq->ev.flow_id;
3175 	ev->sub_event_type = outq->ev.sub_event_type;
3176 	ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3177 	ev->op = RTE_EVENT_OP_NEW;
3178 	ev->sched_type = outq->ev.sched_type;
3179 	ev->queue_id = outq->ev.queue_id;
3180 	ev->priority = outq->ev.priority;
3181 	*bufs = (void *)ctx->op;
3182 
3183 	rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3184 
3185 	return qman_cb_dqrr_consume;
3186 }
3187 
3188 static enum qman_cb_dqrr_result
3189 dpaa_sec_process_atomic_event(void *event,
3190 			struct qman_portal *qm __rte_unused,
3191 			struct qman_fq *outq,
3192 			const struct qm_dqrr_entry *dqrr,
3193 			void **bufs)
3194 {
3195 	u8 index;
3196 	const struct qm_fd *fd;
3197 	struct dpaa_sec_job *job;
3198 	struct dpaa_sec_op_ctx *ctx;
3199 	struct rte_event *ev = (struct rte_event *)event;
3200 
3201 	fd = &dqrr->fd;
3202 
3203 	/* sg is embedded in an op ctx,
3204 	 * sg[0] is for output
3205 	 * sg[1] for input
3206 	 */
3207 	job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
3208 
3209 	ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3210 	ctx->fd_status = fd->status;
3211 	if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3212 		struct qm_sg_entry *sg_out;
3213 		uint32_t len;
3214 
3215 		sg_out = &job->sg[0];
3216 		hw_sg_to_cpu(sg_out);
3217 		len = sg_out->length;
3218 		ctx->op->sym->m_src->pkt_len = len;
3219 		ctx->op->sym->m_src->data_len = len;
3220 	}
3221 	if (!ctx->fd_status) {
3222 		ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3223 	} else {
3224 		DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3225 		ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3226 	}
3227 	ev->event_ptr = (void *)ctx->op;
3228 	ev->flow_id = outq->ev.flow_id;
3229 	ev->sub_event_type = outq->ev.sub_event_type;
3230 	ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3231 	ev->op = RTE_EVENT_OP_NEW;
3232 	ev->sched_type = outq->ev.sched_type;
3233 	ev->queue_id = outq->ev.queue_id;
3234 	ev->priority = outq->ev.priority;
3235 
3236 	/* Save active dqrr entries */
3237 	index = ((uintptr_t)dqrr >> 6) & (16/*QM_DQRR_SIZE*/ - 1);
3238 	DPAA_PER_LCORE_DQRR_SIZE++;
3239 	DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
3240 	DPAA_PER_LCORE_DQRR_MBUF(index) = ctx->op->sym->m_src;
3241 	ev->impl_opaque = index + 1;
3242 	ctx->op->sym->m_src->seqn = (uint32_t)index + 1;
3243 	*bufs = (void *)ctx->op;
3244 
3245 	rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3246 
3247 	return qman_cb_dqrr_defer;
3248 }
3249 
3250 int
3251 dpaa_sec_eventq_attach(const struct rte_cryptodev *dev,
3252 		int qp_id,
3253 		uint16_t ch_id,
3254 		const struct rte_event *event)
3255 {
3256 	struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3257 	struct qm_mcc_initfq opts = {0};
3258 
3259 	int ret;
3260 
3261 	opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3262 		       QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3263 	opts.fqd.dest.channel = ch_id;
3264 
3265 	switch (event->sched_type) {
3266 	case RTE_SCHED_TYPE_ATOMIC:
3267 		opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
3268 		/* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
3269 		 * configuration with HOLD_ACTIVE setting
3270 		 */
3271 		opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
3272 		qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_atomic_event;
3273 		break;
3274 	case RTE_SCHED_TYPE_ORDERED:
3275 		DPAA_SEC_ERR("Ordered queue schedule type is not supported\n");
3276 		return -1;
3277 	default:
3278 		opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
3279 		qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_parallel_event;
3280 		break;
3281 	}
3282 
3283 	ret = qman_init_fq(&qp->outq, QMAN_INITFQ_FLAG_SCHED, &opts);
3284 	if (unlikely(ret)) {
3285 		DPAA_SEC_ERR("unable to init caam source fq!");
3286 		return ret;
3287 	}
3288 
3289 	memcpy(&qp->outq.ev, event, sizeof(struct rte_event));
3290 
3291 	return 0;
3292 }
3293 
3294 int
3295 dpaa_sec_eventq_detach(const struct rte_cryptodev *dev,
3296 			int qp_id)
3297 {
3298 	struct qm_mcc_initfq opts = {0};
3299 	int ret;
3300 	struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3301 
3302 	opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3303 		       QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3304 	qp->outq.cb.dqrr = dqrr_out_fq_cb_rx;
3305 	qp->outq.cb.ern  = ern_sec_fq_handler;
3306 	qman_retire_fq(&qp->outq, NULL);
3307 	qman_oos_fq(&qp->outq);
3308 	ret = qman_init_fq(&qp->outq, 0, &opts);
3309 	if (ret)
3310 		RTE_LOG(ERR, PMD, "Error in qman_init_fq: ret: %d\n", ret);
3311 	qp->outq.cb.dqrr = NULL;
3312 
3313 	return ret;
3314 }
3315 
3316 static struct rte_cryptodev_ops crypto_ops = {
3317 	.dev_configure	      = dpaa_sec_dev_configure,
3318 	.dev_start	      = dpaa_sec_dev_start,
3319 	.dev_stop	      = dpaa_sec_dev_stop,
3320 	.dev_close	      = dpaa_sec_dev_close,
3321 	.dev_infos_get        = dpaa_sec_dev_infos_get,
3322 	.queue_pair_setup     = dpaa_sec_queue_pair_setup,
3323 	.queue_pair_release   = dpaa_sec_queue_pair_release,
3324 	.queue_pair_count     = dpaa_sec_queue_pair_count,
3325 	.sym_session_get_size     = dpaa_sec_sym_session_get_size,
3326 	.sym_session_configure    = dpaa_sec_sym_session_configure,
3327 	.sym_session_clear        = dpaa_sec_sym_session_clear
3328 };
3329 
3330 #ifdef RTE_LIBRTE_SECURITY
3331 static const struct rte_security_capability *
3332 dpaa_sec_capabilities_get(void *device __rte_unused)
3333 {
3334 	return dpaa_sec_security_cap;
3335 }
3336 
3337 static const struct rte_security_ops dpaa_sec_security_ops = {
3338 	.session_create = dpaa_sec_security_session_create,
3339 	.session_update = NULL,
3340 	.session_stats_get = NULL,
3341 	.session_destroy = dpaa_sec_security_session_destroy,
3342 	.set_pkt_metadata = NULL,
3343 	.capabilities_get = dpaa_sec_capabilities_get
3344 };
3345 #endif
3346 static int
3347 dpaa_sec_uninit(struct rte_cryptodev *dev)
3348 {
3349 	struct dpaa_sec_dev_private *internals;
3350 
3351 	if (dev == NULL)
3352 		return -ENODEV;
3353 
3354 	internals = dev->data->dev_private;
3355 	rte_free(dev->security_ctx);
3356 
3357 	rte_free(internals);
3358 
3359 	DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
3360 		      dev->data->name, rte_socket_id());
3361 
3362 	return 0;
3363 }
3364 
3365 static int
3366 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
3367 {
3368 	struct dpaa_sec_dev_private *internals;
3369 #ifdef RTE_LIBRTE_SECURITY
3370 	struct rte_security_ctx *security_instance;
3371 #endif
3372 	struct dpaa_sec_qp *qp;
3373 	uint32_t i, flags;
3374 	int ret;
3375 
3376 	PMD_INIT_FUNC_TRACE();
3377 
3378 	cryptodev->driver_id = cryptodev_driver_id;
3379 	cryptodev->dev_ops = &crypto_ops;
3380 
3381 	cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
3382 	cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
3383 	cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3384 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
3385 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3386 			RTE_CRYPTODEV_FF_SECURITY |
3387 			RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3388 			RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3389 			RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3390 			RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3391 			RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3392 
3393 	internals = cryptodev->data->dev_private;
3394 	internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
3395 	internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
3396 
3397 	/*
3398 	 * For secondary processes, we don't initialise any further as primary
3399 	 * has already done this work. Only check we don't need a different
3400 	 * RX function
3401 	 */
3402 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3403 		DPAA_SEC_WARN("Device already init by primary process");
3404 		return 0;
3405 	}
3406 #ifdef RTE_LIBRTE_SECURITY
3407 	/* Initialize security_ctx only for primary process*/
3408 	security_instance = rte_malloc("rte_security_instances_ops",
3409 				sizeof(struct rte_security_ctx), 0);
3410 	if (security_instance == NULL)
3411 		return -ENOMEM;
3412 	security_instance->device = (void *)cryptodev;
3413 	security_instance->ops = &dpaa_sec_security_ops;
3414 	security_instance->sess_cnt = 0;
3415 	cryptodev->security_ctx = security_instance;
3416 #endif
3417 	rte_spinlock_init(&internals->lock);
3418 	for (i = 0; i < internals->max_nb_queue_pairs; i++) {
3419 		/* init qman fq for queue pair */
3420 		qp = &internals->qps[i];
3421 		ret = dpaa_sec_init_tx(&qp->outq);
3422 		if (ret) {
3423 			DPAA_SEC_ERR("config tx of queue pair  %d", i);
3424 			goto init_error;
3425 		}
3426 	}
3427 
3428 	flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
3429 		QMAN_FQ_FLAG_TO_DCPORTAL;
3430 	for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
3431 		/* create rx qman fq for sessions*/
3432 		ret = qman_create_fq(0, flags, &internals->inq[i]);
3433 		if (unlikely(ret != 0)) {
3434 			DPAA_SEC_ERR("sec qman_create_fq failed");
3435 			goto init_error;
3436 		}
3437 	}
3438 
3439 	RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
3440 	return 0;
3441 
3442 init_error:
3443 	DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
3444 
3445 	dpaa_sec_uninit(cryptodev);
3446 	return -EFAULT;
3447 }
3448 
3449 static int
3450 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
3451 				struct rte_dpaa_device *dpaa_dev)
3452 {
3453 	struct rte_cryptodev *cryptodev;
3454 	char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3455 
3456 	int retval;
3457 
3458 	snprintf(cryptodev_name, sizeof(cryptodev_name), "%s", dpaa_dev->name);
3459 
3460 	cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3461 	if (cryptodev == NULL)
3462 		return -ENOMEM;
3463 
3464 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3465 		cryptodev->data->dev_private = rte_zmalloc_socket(
3466 					"cryptodev private structure",
3467 					sizeof(struct dpaa_sec_dev_private),
3468 					RTE_CACHE_LINE_SIZE,
3469 					rte_socket_id());
3470 
3471 		if (cryptodev->data->dev_private == NULL)
3472 			rte_panic("Cannot allocate memzone for private "
3473 					"device data");
3474 	}
3475 
3476 	dpaa_dev->crypto_dev = cryptodev;
3477 	cryptodev->device = &dpaa_dev->device;
3478 
3479 	/* init user callbacks */
3480 	TAILQ_INIT(&(cryptodev->link_intr_cbs));
3481 
3482 	/* if sec device version is not configured */
3483 	if (!rta_get_sec_era()) {
3484 		const struct device_node *caam_node;
3485 
3486 		for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
3487 			const uint32_t *prop = of_get_property(caam_node,
3488 					"fsl,sec-era",
3489 					NULL);
3490 			if (prop) {
3491 				rta_set_sec_era(
3492 					INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
3493 				break;
3494 			}
3495 		}
3496 	}
3497 
3498 	if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
3499 		retval = rte_dpaa_portal_init((void *)1);
3500 		if (retval) {
3501 			DPAA_SEC_ERR("Unable to initialize portal");
3502 			return retval;
3503 		}
3504 	}
3505 
3506 	/* Invoke PMD device initialization function */
3507 	retval = dpaa_sec_dev_init(cryptodev);
3508 	if (retval == 0)
3509 		return 0;
3510 
3511 	/* In case of error, cleanup is done */
3512 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3513 		rte_free(cryptodev->data->dev_private);
3514 
3515 	rte_cryptodev_pmd_release_device(cryptodev);
3516 
3517 	return -ENXIO;
3518 }
3519 
3520 static int
3521 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
3522 {
3523 	struct rte_cryptodev *cryptodev;
3524 	int ret;
3525 
3526 	cryptodev = dpaa_dev->crypto_dev;
3527 	if (cryptodev == NULL)
3528 		return -ENODEV;
3529 
3530 	ret = dpaa_sec_uninit(cryptodev);
3531 	if (ret)
3532 		return ret;
3533 
3534 	return rte_cryptodev_pmd_destroy(cryptodev);
3535 }
3536 
3537 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
3538 	.drv_type = FSL_DPAA_CRYPTO,
3539 	.driver = {
3540 		.name = "DPAA SEC PMD"
3541 	},
3542 	.probe = cryptodev_dpaa_sec_probe,
3543 	.remove = cryptodev_dpaa_sec_remove,
3544 };
3545 
3546 static struct cryptodev_driver dpaa_sec_crypto_drv;
3547 
3548 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
3549 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
3550 		cryptodev_driver_id);
3551 
3552 RTE_INIT(dpaa_sec_init_log)
3553 {
3554 	dpaa_logtype_sec = rte_log_register("pmd.crypto.dpaa");
3555 	if (dpaa_logtype_sec >= 0)
3556 		rte_log_set_level(dpaa_logtype_sec, RTE_LOG_NOTICE);
3557 }
3558