xref: /dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c (revision 2ed12d9b63c6fef17c779426b4231fc4ed72105c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2017-2019 NXP
5  *
6  */
7 
8 #include <fcntl.h>
9 #include <unistd.h>
10 #include <sched.h>
11 #include <net/if.h>
12 
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #ifdef RTE_LIB_SECURITY
19 #include <rte_security_driver.h>
20 #endif
21 #include <rte_cycles.h>
22 #include <rte_dev.h>
23 #include <rte_ip.h>
24 #include <rte_kvargs.h>
25 #include <rte_malloc.h>
26 #include <rte_mbuf.h>
27 #include <rte_memcpy.h>
28 #include <rte_string_fns.h>
29 #include <rte_spinlock.h>
30 
31 #include <fsl_usd.h>
32 #include <fsl_qman.h>
33 #include <dpaa_of.h>
34 
35 /* RTA header files */
36 #include <desc/common.h>
37 #include <desc/algo.h>
38 #include <desc/ipsec.h>
39 #include <desc/pdcp.h>
40 #include <desc/sdap.h>
41 
42 #include <rte_dpaa_bus.h>
43 #include <dpaa_sec.h>
44 #include <dpaa_sec_event.h>
45 #include <dpaa_sec_log.h>
46 #include <dpaax_iova_table.h>
47 
48 static uint8_t cryptodev_driver_id;
49 
50 static int
51 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
52 
53 static inline void
54 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
55 {
56 	if (!ctx->fd_status) {
57 		ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
58 	} else {
59 		DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
60 		ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
61 	}
62 }
63 
64 static inline struct dpaa_sec_op_ctx *
65 dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count)
66 {
67 	struct dpaa_sec_op_ctx *ctx;
68 	int i, retval;
69 
70 	retval = rte_mempool_get(
71 			ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
72 			(void **)(&ctx));
73 	if (!ctx || retval) {
74 		DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
75 		return NULL;
76 	}
77 	/*
78 	 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
79 	 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
80 	 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
81 	 * each packet, memset is costlier than dcbz_64().
82 	 */
83 	for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
84 		dcbz_64(&ctx->job.sg[i]);
85 
86 	ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
87 	ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
88 
89 	return ctx;
90 }
91 
92 static void
93 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
94 		   struct qman_fq *fq,
95 		   const struct qm_mr_entry *msg)
96 {
97 	DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
98 			fq->fqid, msg->ern.rc, msg->ern.seqnum);
99 }
100 
101 /* initialize the queue with dest chan as caam chan so that
102  * all the packets in this queue could be dispatched into caam
103  */
104 static int
105 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
106 		 uint32_t fqid_out)
107 {
108 	struct qm_mcc_initfq fq_opts;
109 	uint32_t flags;
110 	int ret = -1;
111 
112 	/* Clear FQ options */
113 	memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
114 
115 	flags = QMAN_INITFQ_FLAG_SCHED;
116 	fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
117 			  QM_INITFQ_WE_CONTEXTB;
118 
119 	qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
120 	fq_opts.fqd.context_b = fqid_out;
121 	fq_opts.fqd.dest.channel = dpaa_get_qm_channel_caam();
122 	fq_opts.fqd.dest.wq = 0;
123 
124 	fq_in->cb.ern  = ern_sec_fq_handler;
125 
126 	DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
127 
128 	ret = qman_init_fq(fq_in, flags, &fq_opts);
129 	if (unlikely(ret != 0))
130 		DPAA_SEC_ERR("qman_init_fq failed %d", ret);
131 
132 	return ret;
133 }
134 
135 /* something is put into in_fq and caam put the crypto result into out_fq */
136 static enum qman_cb_dqrr_result
137 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
138 		  struct qman_fq *fq __always_unused,
139 		  const struct qm_dqrr_entry *dqrr)
140 {
141 	const struct qm_fd *fd;
142 	struct dpaa_sec_job *job;
143 	struct dpaa_sec_op_ctx *ctx;
144 
145 	if (DPAA_PER_LCORE_DPAA_SEC_OP_NB >= DPAA_SEC_BURST)
146 		return qman_cb_dqrr_defer;
147 
148 	if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
149 		return qman_cb_dqrr_consume;
150 
151 	fd = &dqrr->fd;
152 	/* sg is embedded in an op ctx,
153 	 * sg[0] is for output
154 	 * sg[1] for input
155 	 */
156 	job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
157 
158 	ctx = container_of(job, struct dpaa_sec_op_ctx, job);
159 	ctx->fd_status = fd->status;
160 	if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
161 		struct qm_sg_entry *sg_out;
162 		uint32_t len;
163 		struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ?
164 				ctx->op->sym->m_src : ctx->op->sym->m_dst;
165 
166 		sg_out = &job->sg[0];
167 		hw_sg_to_cpu(sg_out);
168 		len = sg_out->length;
169 		mbuf->pkt_len = len;
170 		while (mbuf->next != NULL) {
171 			len -= mbuf->data_len;
172 			mbuf = mbuf->next;
173 		}
174 		mbuf->data_len = len;
175 	}
176 	DPAA_PER_LCORE_RTE_CRYPTO_OP[DPAA_PER_LCORE_DPAA_SEC_OP_NB++] = ctx->op;
177 	dpaa_sec_op_ending(ctx);
178 
179 	return qman_cb_dqrr_consume;
180 }
181 
182 /* caam result is put into this queue */
183 static int
184 dpaa_sec_init_tx(struct qman_fq *fq)
185 {
186 	int ret;
187 	struct qm_mcc_initfq opts;
188 	uint32_t flags;
189 
190 	flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
191 		QMAN_FQ_FLAG_DYNAMIC_FQID;
192 
193 	ret = qman_create_fq(0, flags, fq);
194 	if (unlikely(ret)) {
195 		DPAA_SEC_ERR("qman_create_fq failed");
196 		return ret;
197 	}
198 
199 	memset(&opts, 0, sizeof(opts));
200 	opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
201 		       QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
202 
203 	/* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
204 
205 	fq->cb.dqrr = dqrr_out_fq_cb_rx;
206 	fq->cb.ern  = ern_sec_fq_handler;
207 
208 	ret = qman_init_fq(fq, 0, &opts);
209 	if (unlikely(ret)) {
210 		DPAA_SEC_ERR("unable to init caam source fq!");
211 		return ret;
212 	}
213 
214 	return ret;
215 }
216 
217 static inline int is_aead(dpaa_sec_session *ses)
218 {
219 	return ((ses->cipher_alg == 0) &&
220 		(ses->auth_alg == 0) &&
221 		(ses->aead_alg != 0));
222 }
223 
224 static inline int is_encode(dpaa_sec_session *ses)
225 {
226 	return ses->dir == DIR_ENC;
227 }
228 
229 static inline int is_decode(dpaa_sec_session *ses)
230 {
231 	return ses->dir == DIR_DEC;
232 }
233 
234 #ifdef RTE_LIB_SECURITY
235 static int
236 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
237 {
238 	struct alginfo authdata = {0}, cipherdata = {0};
239 	struct sec_cdb *cdb = &ses->cdb;
240 	struct alginfo *p_authdata = NULL;
241 	int32_t shared_desc_len = 0;
242 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
243 	int swap = false;
244 #else
245 	int swap = true;
246 #endif
247 
248 	cipherdata.key = (size_t)ses->cipher_key.data;
249 	cipherdata.keylen = ses->cipher_key.length;
250 	cipherdata.key_enc_flags = 0;
251 	cipherdata.key_type = RTA_DATA_IMM;
252 	cipherdata.algtype = ses->cipher_key.alg;
253 	cipherdata.algmode = ses->cipher_key.algmode;
254 
255 	if (ses->auth_alg) {
256 		authdata.key = (size_t)ses->auth_key.data;
257 		authdata.keylen = ses->auth_key.length;
258 		authdata.key_enc_flags = 0;
259 		authdata.key_type = RTA_DATA_IMM;
260 		authdata.algtype = ses->auth_key.alg;
261 		authdata.algmode = ses->auth_key.algmode;
262 
263 		p_authdata = &authdata;
264 	}
265 
266 	if (rta_inline_pdcp_query(authdata.algtype,
267 				cipherdata.algtype,
268 				ses->pdcp.sn_size,
269 				ses->pdcp.hfn_ovd)) {
270 		cipherdata.key =
271 			(size_t)rte_dpaa_mem_vtop((void *)
272 					(size_t)cipherdata.key);
273 		cipherdata.key_type = RTA_DATA_PTR;
274 	}
275 
276 	if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
277 		if (ses->dir == DIR_ENC)
278 			shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
279 					cdb->sh_desc, 1, swap,
280 					ses->pdcp.hfn,
281 					ses->pdcp.sn_size,
282 					ses->pdcp.bearer,
283 					ses->pdcp.pkt_dir,
284 					ses->pdcp.hfn_threshold,
285 					&cipherdata, &authdata,
286 					0);
287 		else if (ses->dir == DIR_DEC)
288 			shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
289 					cdb->sh_desc, 1, swap,
290 					ses->pdcp.hfn,
291 					ses->pdcp.sn_size,
292 					ses->pdcp.bearer,
293 					ses->pdcp.pkt_dir,
294 					ses->pdcp.hfn_threshold,
295 					&cipherdata, &authdata,
296 					0);
297 	} else {
298 		if (ses->dir == DIR_ENC) {
299 			if (ses->pdcp.sdap_enabled)
300 				shared_desc_len =
301 					cnstr_shdsc_pdcp_sdap_u_plane_encap(
302 						cdb->sh_desc, 1, swap,
303 						ses->pdcp.sn_size,
304 						ses->pdcp.hfn,
305 						ses->pdcp.bearer,
306 						ses->pdcp.pkt_dir,
307 						ses->pdcp.hfn_threshold,
308 						&cipherdata, p_authdata, 0);
309 			else
310 				shared_desc_len =
311 					cnstr_shdsc_pdcp_u_plane_encap(
312 						cdb->sh_desc, 1, swap,
313 						ses->pdcp.sn_size,
314 						ses->pdcp.hfn,
315 						ses->pdcp.bearer,
316 						ses->pdcp.pkt_dir,
317 						ses->pdcp.hfn_threshold,
318 						&cipherdata, p_authdata, 0);
319 		} else if (ses->dir == DIR_DEC) {
320 			if (ses->pdcp.sdap_enabled)
321 				shared_desc_len =
322 					cnstr_shdsc_pdcp_sdap_u_plane_decap(
323 						cdb->sh_desc, 1, swap,
324 						ses->pdcp.sn_size,
325 						ses->pdcp.hfn,
326 						ses->pdcp.bearer,
327 						ses->pdcp.pkt_dir,
328 						ses->pdcp.hfn_threshold,
329 						&cipherdata, p_authdata, 0);
330 			else
331 				shared_desc_len =
332 					cnstr_shdsc_pdcp_u_plane_decap(
333 						cdb->sh_desc, 1, swap,
334 						ses->pdcp.sn_size,
335 						ses->pdcp.hfn,
336 						ses->pdcp.bearer,
337 						ses->pdcp.pkt_dir,
338 						ses->pdcp.hfn_threshold,
339 						&cipherdata, p_authdata, 0);
340 		}
341 	}
342 	return shared_desc_len;
343 }
344 
345 /* prepare ipsec proto command block of the session */
346 static int
347 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
348 {
349 	struct alginfo cipherdata = {0}, authdata = {0};
350 	struct sec_cdb *cdb = &ses->cdb;
351 	int32_t shared_desc_len = 0;
352 	int err;
353 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
354 	int swap = false;
355 #else
356 	int swap = true;
357 #endif
358 
359 	cipherdata.key = (size_t)ses->cipher_key.data;
360 	cipherdata.keylen = ses->cipher_key.length;
361 	cipherdata.key_enc_flags = 0;
362 	cipherdata.key_type = RTA_DATA_IMM;
363 	cipherdata.algtype = ses->cipher_key.alg;
364 	cipherdata.algmode = ses->cipher_key.algmode;
365 
366 	if (ses->auth_key.length) {
367 		authdata.key = (size_t)ses->auth_key.data;
368 		authdata.keylen = ses->auth_key.length;
369 		authdata.key_enc_flags = 0;
370 		authdata.key_type = RTA_DATA_IMM;
371 		authdata.algtype = ses->auth_key.alg;
372 		authdata.algmode = ses->auth_key.algmode;
373 	}
374 
375 	cdb->sh_desc[0] = cipherdata.keylen;
376 	cdb->sh_desc[1] = authdata.keylen;
377 	err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
378 			       DESC_JOB_IO_LEN,
379 			       (unsigned int *)cdb->sh_desc,
380 			       &cdb->sh_desc[2], 2);
381 
382 	if (err < 0) {
383 		DPAA_SEC_ERR("Crypto: Incorrect key lengths");
384 		return err;
385 	}
386 	if (cdb->sh_desc[2] & 1)
387 		cipherdata.key_type = RTA_DATA_IMM;
388 	else {
389 		cipherdata.key = (size_t)rte_dpaa_mem_vtop(
390 					(void *)(size_t)cipherdata.key);
391 		cipherdata.key_type = RTA_DATA_PTR;
392 	}
393 	if (cdb->sh_desc[2] & (1<<1))
394 		authdata.key_type = RTA_DATA_IMM;
395 	else {
396 		authdata.key = (size_t)rte_dpaa_mem_vtop(
397 					(void *)(size_t)authdata.key);
398 		authdata.key_type = RTA_DATA_PTR;
399 	}
400 
401 	cdb->sh_desc[0] = 0;
402 	cdb->sh_desc[1] = 0;
403 	cdb->sh_desc[2] = 0;
404 	if (ses->dir == DIR_ENC) {
405 		shared_desc_len = cnstr_shdsc_ipsec_new_encap(
406 				cdb->sh_desc,
407 				true, swap, SHR_SERIAL,
408 				&ses->encap_pdb,
409 				(uint8_t *)&ses->ip4_hdr,
410 				&cipherdata, &authdata);
411 	} else if (ses->dir == DIR_DEC) {
412 		shared_desc_len = cnstr_shdsc_ipsec_new_decap(
413 				cdb->sh_desc,
414 				true, swap, SHR_SERIAL,
415 				&ses->decap_pdb,
416 				&cipherdata, &authdata);
417 	}
418 	return shared_desc_len;
419 }
420 #endif
421 /* prepare command block of the session */
422 static int
423 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
424 {
425 	struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
426 	int32_t shared_desc_len = 0;
427 	struct sec_cdb *cdb = &ses->cdb;
428 	int err;
429 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
430 	int swap = false;
431 #else
432 	int swap = true;
433 #endif
434 
435 	memset(cdb, 0, sizeof(struct sec_cdb));
436 
437 	switch (ses->ctxt) {
438 #ifdef RTE_LIB_SECURITY
439 	case DPAA_SEC_IPSEC:
440 		shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
441 		break;
442 	case DPAA_SEC_PDCP:
443 		shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
444 		break;
445 #endif
446 	case DPAA_SEC_CIPHER:
447 		alginfo_c.key = (size_t)ses->cipher_key.data;
448 		alginfo_c.keylen = ses->cipher_key.length;
449 		alginfo_c.key_enc_flags = 0;
450 		alginfo_c.key_type = RTA_DATA_IMM;
451 		alginfo_c.algtype = ses->cipher_key.alg;
452 		alginfo_c.algmode = ses->cipher_key.algmode;
453 
454 		switch (ses->cipher_alg) {
455 		case RTE_CRYPTO_CIPHER_AES_CBC:
456 		case RTE_CRYPTO_CIPHER_3DES_CBC:
457 		case RTE_CRYPTO_CIPHER_DES_CBC:
458 		case RTE_CRYPTO_CIPHER_AES_CTR:
459 		case RTE_CRYPTO_CIPHER_3DES_CTR:
460 			shared_desc_len = cnstr_shdsc_blkcipher(
461 					cdb->sh_desc, true,
462 					swap, SHR_NEVER, &alginfo_c,
463 					ses->iv.length,
464 					ses->dir);
465 			break;
466 		case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
467 			shared_desc_len = cnstr_shdsc_snow_f8(
468 					cdb->sh_desc, true, swap,
469 					&alginfo_c,
470 					ses->dir);
471 			break;
472 		case RTE_CRYPTO_CIPHER_ZUC_EEA3:
473 			shared_desc_len = cnstr_shdsc_zuce(
474 					cdb->sh_desc, true, swap,
475 					&alginfo_c,
476 					ses->dir);
477 			break;
478 		default:
479 			DPAA_SEC_ERR("unsupported cipher alg %d",
480 				     ses->cipher_alg);
481 			return -ENOTSUP;
482 		}
483 		break;
484 	case DPAA_SEC_AUTH:
485 		alginfo_a.key = (size_t)ses->auth_key.data;
486 		alginfo_a.keylen = ses->auth_key.length;
487 		alginfo_a.key_enc_flags = 0;
488 		alginfo_a.key_type = RTA_DATA_IMM;
489 		alginfo_a.algtype = ses->auth_key.alg;
490 		alginfo_a.algmode = ses->auth_key.algmode;
491 		switch (ses->auth_alg) {
492 		case RTE_CRYPTO_AUTH_MD5:
493 		case RTE_CRYPTO_AUTH_SHA1:
494 		case RTE_CRYPTO_AUTH_SHA224:
495 		case RTE_CRYPTO_AUTH_SHA256:
496 		case RTE_CRYPTO_AUTH_SHA384:
497 		case RTE_CRYPTO_AUTH_SHA512:
498 			shared_desc_len = cnstr_shdsc_hash(
499 						cdb->sh_desc, true,
500 						swap, SHR_NEVER, &alginfo_a,
501 						!ses->dir,
502 						ses->digest_length);
503 			break;
504 		case RTE_CRYPTO_AUTH_MD5_HMAC:
505 		case RTE_CRYPTO_AUTH_SHA1_HMAC:
506 		case RTE_CRYPTO_AUTH_SHA224_HMAC:
507 		case RTE_CRYPTO_AUTH_SHA256_HMAC:
508 		case RTE_CRYPTO_AUTH_SHA384_HMAC:
509 		case RTE_CRYPTO_AUTH_SHA512_HMAC:
510 			shared_desc_len = cnstr_shdsc_hmac(
511 						cdb->sh_desc, true,
512 						swap, SHR_NEVER, &alginfo_a,
513 						!ses->dir,
514 						ses->digest_length);
515 			break;
516 		case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
517 			shared_desc_len = cnstr_shdsc_snow_f9(
518 						cdb->sh_desc, true, swap,
519 						&alginfo_a,
520 						!ses->dir,
521 						ses->digest_length);
522 			break;
523 		case RTE_CRYPTO_AUTH_ZUC_EIA3:
524 			shared_desc_len = cnstr_shdsc_zuca(
525 						cdb->sh_desc, true, swap,
526 						&alginfo_a,
527 						!ses->dir,
528 						ses->digest_length);
529 			break;
530 		case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
531 		case RTE_CRYPTO_AUTH_AES_CMAC:
532 			shared_desc_len = cnstr_shdsc_aes_mac(
533 						cdb->sh_desc,
534 						true, swap, SHR_NEVER,
535 						&alginfo_a,
536 						!ses->dir,
537 						ses->digest_length);
538 			break;
539 		default:
540 			DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
541 		}
542 		break;
543 	case DPAA_SEC_AEAD:
544 		if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
545 			DPAA_SEC_ERR("not supported aead alg");
546 			return -ENOTSUP;
547 		}
548 		alginfo.key = (size_t)ses->aead_key.data;
549 		alginfo.keylen = ses->aead_key.length;
550 		alginfo.key_enc_flags = 0;
551 		alginfo.key_type = RTA_DATA_IMM;
552 		alginfo.algtype = ses->aead_key.alg;
553 		alginfo.algmode = ses->aead_key.algmode;
554 
555 		if (ses->dir == DIR_ENC)
556 			shared_desc_len = cnstr_shdsc_gcm_encap(
557 					cdb->sh_desc, true, swap, SHR_NEVER,
558 					&alginfo,
559 					ses->iv.length,
560 					ses->digest_length);
561 		else
562 			shared_desc_len = cnstr_shdsc_gcm_decap(
563 					cdb->sh_desc, true, swap, SHR_NEVER,
564 					&alginfo,
565 					ses->iv.length,
566 					ses->digest_length);
567 		break;
568 	case DPAA_SEC_CIPHER_HASH:
569 		alginfo_c.key = (size_t)ses->cipher_key.data;
570 		alginfo_c.keylen = ses->cipher_key.length;
571 		alginfo_c.key_enc_flags = 0;
572 		alginfo_c.key_type = RTA_DATA_IMM;
573 		alginfo_c.algtype = ses->cipher_key.alg;
574 		alginfo_c.algmode = ses->cipher_key.algmode;
575 
576 		alginfo_a.key = (size_t)ses->auth_key.data;
577 		alginfo_a.keylen = ses->auth_key.length;
578 		alginfo_a.key_enc_flags = 0;
579 		alginfo_a.key_type = RTA_DATA_IMM;
580 		alginfo_a.algtype = ses->auth_key.alg;
581 		alginfo_a.algmode = ses->auth_key.algmode;
582 
583 		cdb->sh_desc[0] = alginfo_c.keylen;
584 		cdb->sh_desc[1] = alginfo_a.keylen;
585 		err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
586 				       DESC_JOB_IO_LEN,
587 				       (unsigned int *)cdb->sh_desc,
588 				       &cdb->sh_desc[2], 2);
589 
590 		if (err < 0) {
591 			DPAA_SEC_ERR("Crypto: Incorrect key lengths");
592 			return err;
593 		}
594 		if (cdb->sh_desc[2] & 1)
595 			alginfo_c.key_type = RTA_DATA_IMM;
596 		else {
597 			alginfo_c.key = (size_t)rte_dpaa_mem_vtop(
598 						(void *)(size_t)alginfo_c.key);
599 			alginfo_c.key_type = RTA_DATA_PTR;
600 		}
601 		if (cdb->sh_desc[2] & (1<<1))
602 			alginfo_a.key_type = RTA_DATA_IMM;
603 		else {
604 			alginfo_a.key = (size_t)rte_dpaa_mem_vtop(
605 						(void *)(size_t)alginfo_a.key);
606 			alginfo_a.key_type = RTA_DATA_PTR;
607 		}
608 		cdb->sh_desc[0] = 0;
609 		cdb->sh_desc[1] = 0;
610 		cdb->sh_desc[2] = 0;
611 		/* Auth_only_len is set as 0 here and it will be
612 		 * overwritten in fd for each packet.
613 		 */
614 		shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
615 				true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a,
616 				ses->iv.length,
617 				ses->digest_length, ses->dir);
618 		break;
619 	case DPAA_SEC_HASH_CIPHER:
620 	default:
621 		DPAA_SEC_ERR("error: Unsupported session");
622 		return -ENOTSUP;
623 	}
624 
625 	if (shared_desc_len < 0) {
626 		DPAA_SEC_ERR("error in preparing command block");
627 		return shared_desc_len;
628 	}
629 
630 	cdb->sh_hdr.hi.field.idlen = shared_desc_len;
631 	cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
632 	cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
633 
634 	return 0;
635 }
636 
637 /* qp is lockless, should be accessed by only one thread */
638 static int
639 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
640 {
641 	struct qman_fq *fq;
642 	unsigned int pkts = 0;
643 	int num_rx_bufs, ret;
644 	struct qm_dqrr_entry *dq;
645 	uint32_t vdqcr_flags = 0;
646 
647 	fq = &qp->outq;
648 	/*
649 	 * Until request for four buffers, we provide exact number of buffers.
650 	 * Otherwise we do not set the QM_VDQCR_EXACT flag.
651 	 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
652 	 * requested, so we request two less in this case.
653 	 */
654 	if (nb_ops < 4) {
655 		vdqcr_flags = QM_VDQCR_EXACT;
656 		num_rx_bufs = nb_ops;
657 	} else {
658 		num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
659 			(DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
660 	}
661 	ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
662 	if (ret)
663 		return 0;
664 
665 	do {
666 		const struct qm_fd *fd;
667 		struct dpaa_sec_job *job;
668 		struct dpaa_sec_op_ctx *ctx;
669 		struct rte_crypto_op *op;
670 
671 		dq = qman_dequeue(fq);
672 		if (!dq)
673 			continue;
674 
675 		fd = &dq->fd;
676 		/* sg is embedded in an op ctx,
677 		 * sg[0] is for output
678 		 * sg[1] for input
679 		 */
680 		job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
681 
682 		ctx = container_of(job, struct dpaa_sec_op_ctx, job);
683 		ctx->fd_status = fd->status;
684 		op = ctx->op;
685 		if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
686 			struct qm_sg_entry *sg_out;
687 			uint32_t len;
688 			struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ?
689 						op->sym->m_src : op->sym->m_dst;
690 
691 			sg_out = &job->sg[0];
692 			hw_sg_to_cpu(sg_out);
693 			len = sg_out->length;
694 			mbuf->pkt_len = len;
695 			while (mbuf->next != NULL) {
696 				len -= mbuf->data_len;
697 				mbuf = mbuf->next;
698 			}
699 			mbuf->data_len = len;
700 		}
701 		if (!ctx->fd_status) {
702 			op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
703 		} else {
704 			DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
705 			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
706 		}
707 		ops[pkts++] = op;
708 
709 		/* report op status to sym->op and then free the ctx memeory */
710 		rte_mempool_put(ctx->ctx_pool, (void *)ctx);
711 
712 		qman_dqrr_consume(fq, dq);
713 	} while (fq->flags & QMAN_FQ_STATE_VDQCR);
714 
715 	return pkts;
716 }
717 
718 static inline struct dpaa_sec_job *
719 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
720 {
721 	struct rte_crypto_sym_op *sym = op->sym;
722 	struct rte_mbuf *mbuf = sym->m_src;
723 	struct dpaa_sec_job *cf;
724 	struct dpaa_sec_op_ctx *ctx;
725 	struct qm_sg_entry *sg, *out_sg, *in_sg;
726 	phys_addr_t start_addr;
727 	uint8_t *old_digest, extra_segs;
728 	int data_len, data_offset;
729 
730 	data_len = sym->auth.data.length;
731 	data_offset = sym->auth.data.offset;
732 
733 	if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
734 	    ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
735 		if ((data_len & 7) || (data_offset & 7)) {
736 			DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
737 			return NULL;
738 		}
739 
740 		data_len = data_len >> 3;
741 		data_offset = data_offset >> 3;
742 	}
743 
744 	if (is_decode(ses))
745 		extra_segs = 3;
746 	else
747 		extra_segs = 2;
748 
749 	if (mbuf->nb_segs > MAX_SG_ENTRIES) {
750 		DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
751 				MAX_SG_ENTRIES);
752 		return NULL;
753 	}
754 	ctx = dpaa_sec_alloc_ctx(ses, mbuf->nb_segs + extra_segs);
755 	if (!ctx)
756 		return NULL;
757 
758 	cf = &ctx->job;
759 	ctx->op = op;
760 	old_digest = ctx->digest;
761 
762 	/* output */
763 	out_sg = &cf->sg[0];
764 	qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
765 	out_sg->length = ses->digest_length;
766 	cpu_to_hw_sg(out_sg);
767 
768 	/* input */
769 	in_sg = &cf->sg[1];
770 	/* need to extend the input to a compound frame */
771 	in_sg->extension = 1;
772 	in_sg->final = 1;
773 	in_sg->length = data_len;
774 	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
775 
776 	/* 1st seg */
777 	sg = in_sg + 1;
778 
779 	if (ses->iv.length) {
780 		uint8_t *iv_ptr;
781 
782 		iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
783 						   ses->iv.offset);
784 
785 		if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
786 			iv_ptr = conv_to_snow_f9_iv(iv_ptr);
787 			sg->length = 12;
788 		} else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
789 			iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
790 			sg->length = 8;
791 		} else {
792 			sg->length = ses->iv.length;
793 		}
794 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
795 		in_sg->length += sg->length;
796 		cpu_to_hw_sg(sg);
797 		sg++;
798 	}
799 
800 	qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
801 	sg->offset = data_offset;
802 
803 	if (data_len <= (mbuf->data_len - data_offset)) {
804 		sg->length = data_len;
805 	} else {
806 		sg->length = mbuf->data_len - data_offset;
807 
808 		/* remaining i/p segs */
809 		while ((data_len = data_len - sg->length) &&
810 		       (mbuf = mbuf->next)) {
811 			cpu_to_hw_sg(sg);
812 			sg++;
813 			qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
814 			if (data_len > mbuf->data_len)
815 				sg->length = mbuf->data_len;
816 			else
817 				sg->length = data_len;
818 		}
819 	}
820 
821 	if (is_decode(ses)) {
822 		/* Digest verification case */
823 		cpu_to_hw_sg(sg);
824 		sg++;
825 		rte_memcpy(old_digest, sym->auth.digest.data,
826 				ses->digest_length);
827 		start_addr = rte_dpaa_mem_vtop(old_digest);
828 		qm_sg_entry_set64(sg, start_addr);
829 		sg->length = ses->digest_length;
830 		in_sg->length += ses->digest_length;
831 	}
832 	sg->final = 1;
833 	cpu_to_hw_sg(sg);
834 	cpu_to_hw_sg(in_sg);
835 
836 	return cf;
837 }
838 
839 /**
840  * packet looks like:
841  *		|<----data_len------->|
842  *    |ip_header|ah_header|icv|payload|
843  *              ^
844  *		|
845  *	   mbuf->pkt.data
846  */
847 static inline struct dpaa_sec_job *
848 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
849 {
850 	struct rte_crypto_sym_op *sym = op->sym;
851 	struct rte_mbuf *mbuf = sym->m_src;
852 	struct dpaa_sec_job *cf;
853 	struct dpaa_sec_op_ctx *ctx;
854 	struct qm_sg_entry *sg, *in_sg;
855 	rte_iova_t start_addr;
856 	uint8_t *old_digest;
857 	int data_len, data_offset;
858 
859 	data_len = sym->auth.data.length;
860 	data_offset = sym->auth.data.offset;
861 
862 	if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
863 	    ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
864 		if ((data_len & 7) || (data_offset & 7)) {
865 			DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
866 			return NULL;
867 		}
868 
869 		data_len = data_len >> 3;
870 		data_offset = data_offset >> 3;
871 	}
872 
873 	ctx = dpaa_sec_alloc_ctx(ses, 4);
874 	if (!ctx)
875 		return NULL;
876 
877 	cf = &ctx->job;
878 	ctx->op = op;
879 	old_digest = ctx->digest;
880 
881 	start_addr = rte_pktmbuf_iova(mbuf);
882 	/* output */
883 	sg = &cf->sg[0];
884 	qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
885 	sg->length = ses->digest_length;
886 	cpu_to_hw_sg(sg);
887 
888 	/* input */
889 	in_sg = &cf->sg[1];
890 	/* need to extend the input to a compound frame */
891 	in_sg->extension = 1;
892 	in_sg->final = 1;
893 	in_sg->length = data_len;
894 	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
895 	sg = &cf->sg[2];
896 
897 	if (ses->iv.length) {
898 		uint8_t *iv_ptr;
899 
900 		iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
901 						   ses->iv.offset);
902 
903 		if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
904 			iv_ptr = conv_to_snow_f9_iv(iv_ptr);
905 			sg->length = 12;
906 		} else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
907 			iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
908 			sg->length = 8;
909 		} else {
910 			sg->length = ses->iv.length;
911 		}
912 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
913 		in_sg->length += sg->length;
914 		cpu_to_hw_sg(sg);
915 		sg++;
916 	}
917 
918 	qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
919 	sg->offset = data_offset;
920 	sg->length = data_len;
921 
922 	if (is_decode(ses)) {
923 		/* Digest verification case */
924 		cpu_to_hw_sg(sg);
925 		/* hash result or digest, save digest first */
926 		rte_memcpy(old_digest, sym->auth.digest.data,
927 				ses->digest_length);
928 		/* let's check digest by hw */
929 		start_addr = rte_dpaa_mem_vtop(old_digest);
930 		sg++;
931 		qm_sg_entry_set64(sg, start_addr);
932 		sg->length = ses->digest_length;
933 		in_sg->length += ses->digest_length;
934 	}
935 	sg->final = 1;
936 	cpu_to_hw_sg(sg);
937 	cpu_to_hw_sg(in_sg);
938 
939 	return cf;
940 }
941 
942 static inline struct dpaa_sec_job *
943 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
944 {
945 	struct rte_crypto_sym_op *sym = op->sym;
946 	struct dpaa_sec_job *cf;
947 	struct dpaa_sec_op_ctx *ctx;
948 	struct qm_sg_entry *sg, *out_sg, *in_sg;
949 	struct rte_mbuf *mbuf;
950 	uint8_t req_segs;
951 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
952 			ses->iv.offset);
953 	int data_len, data_offset;
954 
955 	data_len = sym->cipher.data.length;
956 	data_offset = sym->cipher.data.offset;
957 
958 	if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
959 		ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
960 		if ((data_len & 7) || (data_offset & 7)) {
961 			DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
962 			return NULL;
963 		}
964 
965 		data_len = data_len >> 3;
966 		data_offset = data_offset >> 3;
967 	}
968 
969 	if (sym->m_dst) {
970 		mbuf = sym->m_dst;
971 		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
972 	} else {
973 		mbuf = sym->m_src;
974 		req_segs = mbuf->nb_segs * 2 + 3;
975 	}
976 	if (mbuf->nb_segs > MAX_SG_ENTRIES) {
977 		DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
978 				MAX_SG_ENTRIES);
979 		return NULL;
980 	}
981 
982 	ctx = dpaa_sec_alloc_ctx(ses, req_segs);
983 	if (!ctx)
984 		return NULL;
985 
986 	cf = &ctx->job;
987 	ctx->op = op;
988 
989 	/* output */
990 	out_sg = &cf->sg[0];
991 	out_sg->extension = 1;
992 	out_sg->length = data_len;
993 	qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
994 	cpu_to_hw_sg(out_sg);
995 
996 	/* 1st seg */
997 	sg = &cf->sg[2];
998 	qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
999 	sg->length = mbuf->data_len - data_offset;
1000 	sg->offset = data_offset;
1001 
1002 	/* Successive segs */
1003 	mbuf = mbuf->next;
1004 	while (mbuf) {
1005 		cpu_to_hw_sg(sg);
1006 		sg++;
1007 		qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1008 		sg->length = mbuf->data_len;
1009 		mbuf = mbuf->next;
1010 	}
1011 	sg->final = 1;
1012 	cpu_to_hw_sg(sg);
1013 
1014 	/* input */
1015 	mbuf = sym->m_src;
1016 	in_sg = &cf->sg[1];
1017 	in_sg->extension = 1;
1018 	in_sg->final = 1;
1019 	in_sg->length = data_len + ses->iv.length;
1020 
1021 	sg++;
1022 	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1023 	cpu_to_hw_sg(in_sg);
1024 
1025 	/* IV */
1026 	qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1027 	sg->length = ses->iv.length;
1028 	cpu_to_hw_sg(sg);
1029 
1030 	/* 1st seg */
1031 	sg++;
1032 	qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1033 	sg->length = mbuf->data_len - data_offset;
1034 	sg->offset = data_offset;
1035 
1036 	/* Successive segs */
1037 	mbuf = mbuf->next;
1038 	while (mbuf) {
1039 		cpu_to_hw_sg(sg);
1040 		sg++;
1041 		qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1042 		sg->length = mbuf->data_len;
1043 		mbuf = mbuf->next;
1044 	}
1045 	sg->final = 1;
1046 	cpu_to_hw_sg(sg);
1047 
1048 	return cf;
1049 }
1050 
1051 static inline struct dpaa_sec_job *
1052 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1053 {
1054 	struct rte_crypto_sym_op *sym = op->sym;
1055 	struct dpaa_sec_job *cf;
1056 	struct dpaa_sec_op_ctx *ctx;
1057 	struct qm_sg_entry *sg;
1058 	rte_iova_t src_start_addr, dst_start_addr;
1059 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1060 			ses->iv.offset);
1061 	int data_len, data_offset;
1062 
1063 	data_len = sym->cipher.data.length;
1064 	data_offset = sym->cipher.data.offset;
1065 
1066 	if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1067 		ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1068 		if ((data_len & 7) || (data_offset & 7)) {
1069 			DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
1070 			return NULL;
1071 		}
1072 
1073 		data_len = data_len >> 3;
1074 		data_offset = data_offset >> 3;
1075 	}
1076 
1077 	ctx = dpaa_sec_alloc_ctx(ses, 4);
1078 	if (!ctx)
1079 		return NULL;
1080 
1081 	cf = &ctx->job;
1082 	ctx->op = op;
1083 
1084 	src_start_addr = rte_pktmbuf_iova(sym->m_src);
1085 
1086 	if (sym->m_dst)
1087 		dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1088 	else
1089 		dst_start_addr = src_start_addr;
1090 
1091 	/* output */
1092 	sg = &cf->sg[0];
1093 	qm_sg_entry_set64(sg, dst_start_addr + data_offset);
1094 	sg->length = data_len + ses->iv.length;
1095 	cpu_to_hw_sg(sg);
1096 
1097 	/* input */
1098 	sg = &cf->sg[1];
1099 
1100 	/* need to extend the input to a compound frame */
1101 	sg->extension = 1;
1102 	sg->final = 1;
1103 	sg->length = data_len + ses->iv.length;
1104 	qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1105 	cpu_to_hw_sg(sg);
1106 
1107 	sg = &cf->sg[2];
1108 	qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1109 	sg->length = ses->iv.length;
1110 	cpu_to_hw_sg(sg);
1111 
1112 	sg++;
1113 	qm_sg_entry_set64(sg, src_start_addr + data_offset);
1114 	sg->length = data_len;
1115 	sg->final = 1;
1116 	cpu_to_hw_sg(sg);
1117 
1118 	return cf;
1119 }
1120 
1121 static inline struct dpaa_sec_job *
1122 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1123 {
1124 	struct rte_crypto_sym_op *sym = op->sym;
1125 	struct dpaa_sec_job *cf;
1126 	struct dpaa_sec_op_ctx *ctx;
1127 	struct qm_sg_entry *sg, *out_sg, *in_sg;
1128 	struct rte_mbuf *mbuf;
1129 	uint8_t req_segs;
1130 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1131 			ses->iv.offset);
1132 
1133 	if (sym->m_dst) {
1134 		mbuf = sym->m_dst;
1135 		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1136 	} else {
1137 		mbuf = sym->m_src;
1138 		req_segs = mbuf->nb_segs * 2 + 4;
1139 	}
1140 
1141 	if (ses->auth_only_len)
1142 		req_segs++;
1143 
1144 	if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1145 		DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1146 				MAX_SG_ENTRIES);
1147 		return NULL;
1148 	}
1149 
1150 	ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1151 	if (!ctx)
1152 		return NULL;
1153 
1154 	cf = &ctx->job;
1155 	ctx->op = op;
1156 
1157 	rte_prefetch0(cf->sg);
1158 
1159 	/* output */
1160 	out_sg = &cf->sg[0];
1161 	out_sg->extension = 1;
1162 	if (is_encode(ses))
1163 		out_sg->length = sym->aead.data.length + ses->digest_length;
1164 	else
1165 		out_sg->length = sym->aead.data.length;
1166 
1167 	/* output sg entries */
1168 	sg = &cf->sg[2];
1169 	qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1170 	cpu_to_hw_sg(out_sg);
1171 
1172 	/* 1st seg */
1173 	qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1174 	sg->length = mbuf->data_len - sym->aead.data.offset;
1175 	sg->offset = sym->aead.data.offset;
1176 
1177 	/* Successive segs */
1178 	mbuf = mbuf->next;
1179 	while (mbuf) {
1180 		cpu_to_hw_sg(sg);
1181 		sg++;
1182 		qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1183 		sg->length = mbuf->data_len;
1184 		mbuf = mbuf->next;
1185 	}
1186 	sg->length -= ses->digest_length;
1187 
1188 	if (is_encode(ses)) {
1189 		cpu_to_hw_sg(sg);
1190 		/* set auth output */
1191 		sg++;
1192 		qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1193 		sg->length = ses->digest_length;
1194 	}
1195 	sg->final = 1;
1196 	cpu_to_hw_sg(sg);
1197 
1198 	/* input */
1199 	mbuf = sym->m_src;
1200 	in_sg = &cf->sg[1];
1201 	in_sg->extension = 1;
1202 	in_sg->final = 1;
1203 	if (is_encode(ses))
1204 		in_sg->length = ses->iv.length + sym->aead.data.length
1205 							+ ses->auth_only_len;
1206 	else
1207 		in_sg->length = ses->iv.length + sym->aead.data.length
1208 				+ ses->auth_only_len + ses->digest_length;
1209 
1210 	/* input sg entries */
1211 	sg++;
1212 	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1213 	cpu_to_hw_sg(in_sg);
1214 
1215 	/* 1st seg IV */
1216 	qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1217 	sg->length = ses->iv.length;
1218 	cpu_to_hw_sg(sg);
1219 
1220 	/* 2nd seg auth only */
1221 	if (ses->auth_only_len) {
1222 		sg++;
1223 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(sym->aead.aad.data));
1224 		sg->length = ses->auth_only_len;
1225 		cpu_to_hw_sg(sg);
1226 	}
1227 
1228 	/* 3rd seg */
1229 	sg++;
1230 	qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1231 	sg->length = mbuf->data_len - sym->aead.data.offset;
1232 	sg->offset = sym->aead.data.offset;
1233 
1234 	/* Successive segs */
1235 	mbuf = mbuf->next;
1236 	while (mbuf) {
1237 		cpu_to_hw_sg(sg);
1238 		sg++;
1239 		qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1240 		sg->length = mbuf->data_len;
1241 		mbuf = mbuf->next;
1242 	}
1243 
1244 	if (is_decode(ses)) {
1245 		cpu_to_hw_sg(sg);
1246 		sg++;
1247 		memcpy(ctx->digest, sym->aead.digest.data,
1248 			ses->digest_length);
1249 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1250 		sg->length = ses->digest_length;
1251 	}
1252 	sg->final = 1;
1253 	cpu_to_hw_sg(sg);
1254 
1255 	return cf;
1256 }
1257 
1258 static inline struct dpaa_sec_job *
1259 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1260 {
1261 	struct rte_crypto_sym_op *sym = op->sym;
1262 	struct dpaa_sec_job *cf;
1263 	struct dpaa_sec_op_ctx *ctx;
1264 	struct qm_sg_entry *sg;
1265 	uint32_t length = 0;
1266 	rte_iova_t src_start_addr, dst_start_addr;
1267 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1268 			ses->iv.offset);
1269 
1270 	src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1271 
1272 	if (sym->m_dst)
1273 		dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1274 	else
1275 		dst_start_addr = src_start_addr;
1276 
1277 	ctx = dpaa_sec_alloc_ctx(ses, 7);
1278 	if (!ctx)
1279 		return NULL;
1280 
1281 	cf = &ctx->job;
1282 	ctx->op = op;
1283 
1284 	/* input */
1285 	rte_prefetch0(cf->sg);
1286 	sg = &cf->sg[2];
1287 	qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1288 	if (is_encode(ses)) {
1289 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1290 		sg->length = ses->iv.length;
1291 		length += sg->length;
1292 		cpu_to_hw_sg(sg);
1293 
1294 		sg++;
1295 		if (ses->auth_only_len) {
1296 			qm_sg_entry_set64(sg,
1297 					  rte_dpaa_mem_vtop(sym->aead.aad.data));
1298 			sg->length = ses->auth_only_len;
1299 			length += sg->length;
1300 			cpu_to_hw_sg(sg);
1301 			sg++;
1302 		}
1303 		qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1304 		sg->length = sym->aead.data.length;
1305 		length += sg->length;
1306 		sg->final = 1;
1307 		cpu_to_hw_sg(sg);
1308 	} else {
1309 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1310 		sg->length = ses->iv.length;
1311 		length += sg->length;
1312 		cpu_to_hw_sg(sg);
1313 
1314 		sg++;
1315 		if (ses->auth_only_len) {
1316 			qm_sg_entry_set64(sg,
1317 					  rte_dpaa_mem_vtop(sym->aead.aad.data));
1318 			sg->length = ses->auth_only_len;
1319 			length += sg->length;
1320 			cpu_to_hw_sg(sg);
1321 			sg++;
1322 		}
1323 		qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1324 		sg->length = sym->aead.data.length;
1325 		length += sg->length;
1326 		cpu_to_hw_sg(sg);
1327 
1328 		memcpy(ctx->digest, sym->aead.digest.data,
1329 		       ses->digest_length);
1330 		sg++;
1331 
1332 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1333 		sg->length = ses->digest_length;
1334 		length += sg->length;
1335 		sg->final = 1;
1336 		cpu_to_hw_sg(sg);
1337 	}
1338 	/* input compound frame */
1339 	cf->sg[1].length = length;
1340 	cf->sg[1].extension = 1;
1341 	cf->sg[1].final = 1;
1342 	cpu_to_hw_sg(&cf->sg[1]);
1343 
1344 	/* output */
1345 	sg++;
1346 	qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1347 	qm_sg_entry_set64(sg,
1348 		dst_start_addr + sym->aead.data.offset);
1349 	sg->length = sym->aead.data.length;
1350 	length = sg->length;
1351 	if (is_encode(ses)) {
1352 		cpu_to_hw_sg(sg);
1353 		/* set auth output */
1354 		sg++;
1355 		qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1356 		sg->length = ses->digest_length;
1357 		length += sg->length;
1358 	}
1359 	sg->final = 1;
1360 	cpu_to_hw_sg(sg);
1361 
1362 	/* output compound frame */
1363 	cf->sg[0].length = length;
1364 	cf->sg[0].extension = 1;
1365 	cpu_to_hw_sg(&cf->sg[0]);
1366 
1367 	return cf;
1368 }
1369 
1370 static inline struct dpaa_sec_job *
1371 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1372 {
1373 	struct rte_crypto_sym_op *sym = op->sym;
1374 	struct dpaa_sec_job *cf;
1375 	struct dpaa_sec_op_ctx *ctx;
1376 	struct qm_sg_entry *sg, *out_sg, *in_sg;
1377 	struct rte_mbuf *mbuf;
1378 	uint8_t req_segs;
1379 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1380 			ses->iv.offset);
1381 
1382 	if (sym->m_dst) {
1383 		mbuf = sym->m_dst;
1384 		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1385 	} else {
1386 		mbuf = sym->m_src;
1387 		req_segs = mbuf->nb_segs * 2 + 4;
1388 	}
1389 
1390 	if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1391 		DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1392 				MAX_SG_ENTRIES);
1393 		return NULL;
1394 	}
1395 
1396 	ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1397 	if (!ctx)
1398 		return NULL;
1399 
1400 	cf = &ctx->job;
1401 	ctx->op = op;
1402 
1403 	rte_prefetch0(cf->sg);
1404 
1405 	/* output */
1406 	out_sg = &cf->sg[0];
1407 	out_sg->extension = 1;
1408 	if (is_encode(ses))
1409 		out_sg->length = sym->auth.data.length + ses->digest_length;
1410 	else
1411 		out_sg->length = sym->auth.data.length;
1412 
1413 	/* output sg entries */
1414 	sg = &cf->sg[2];
1415 	qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1416 	cpu_to_hw_sg(out_sg);
1417 
1418 	/* 1st seg */
1419 	qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1420 	sg->length = mbuf->data_len - sym->auth.data.offset;
1421 	sg->offset = sym->auth.data.offset;
1422 
1423 	/* Successive segs */
1424 	mbuf = mbuf->next;
1425 	while (mbuf) {
1426 		cpu_to_hw_sg(sg);
1427 		sg++;
1428 		qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1429 		sg->length = mbuf->data_len;
1430 		mbuf = mbuf->next;
1431 	}
1432 	sg->length -= ses->digest_length;
1433 
1434 	if (is_encode(ses)) {
1435 		cpu_to_hw_sg(sg);
1436 		/* set auth output */
1437 		sg++;
1438 		qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1439 		sg->length = ses->digest_length;
1440 	}
1441 	sg->final = 1;
1442 	cpu_to_hw_sg(sg);
1443 
1444 	/* input */
1445 	mbuf = sym->m_src;
1446 	in_sg = &cf->sg[1];
1447 	in_sg->extension = 1;
1448 	in_sg->final = 1;
1449 	if (is_encode(ses))
1450 		in_sg->length = ses->iv.length + sym->auth.data.length;
1451 	else
1452 		in_sg->length = ses->iv.length + sym->auth.data.length
1453 						+ ses->digest_length;
1454 
1455 	/* input sg entries */
1456 	sg++;
1457 	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1458 	cpu_to_hw_sg(in_sg);
1459 
1460 	/* 1st seg IV */
1461 	qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1462 	sg->length = ses->iv.length;
1463 	cpu_to_hw_sg(sg);
1464 
1465 	/* 2nd seg */
1466 	sg++;
1467 	qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1468 	sg->length = mbuf->data_len - sym->auth.data.offset;
1469 	sg->offset = sym->auth.data.offset;
1470 
1471 	/* Successive segs */
1472 	mbuf = mbuf->next;
1473 	while (mbuf) {
1474 		cpu_to_hw_sg(sg);
1475 		sg++;
1476 		qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1477 		sg->length = mbuf->data_len;
1478 		mbuf = mbuf->next;
1479 	}
1480 
1481 	sg->length -= ses->digest_length;
1482 	if (is_decode(ses)) {
1483 		cpu_to_hw_sg(sg);
1484 		sg++;
1485 		memcpy(ctx->digest, sym->auth.digest.data,
1486 			ses->digest_length);
1487 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1488 		sg->length = ses->digest_length;
1489 	}
1490 	sg->final = 1;
1491 	cpu_to_hw_sg(sg);
1492 
1493 	return cf;
1494 }
1495 
1496 static inline struct dpaa_sec_job *
1497 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1498 {
1499 	struct rte_crypto_sym_op *sym = op->sym;
1500 	struct dpaa_sec_job *cf;
1501 	struct dpaa_sec_op_ctx *ctx;
1502 	struct qm_sg_entry *sg;
1503 	rte_iova_t src_start_addr, dst_start_addr;
1504 	uint32_t length = 0;
1505 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1506 			ses->iv.offset);
1507 
1508 	src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1509 	if (sym->m_dst)
1510 		dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1511 	else
1512 		dst_start_addr = src_start_addr;
1513 
1514 	ctx = dpaa_sec_alloc_ctx(ses, 7);
1515 	if (!ctx)
1516 		return NULL;
1517 
1518 	cf = &ctx->job;
1519 	ctx->op = op;
1520 
1521 	/* input */
1522 	rte_prefetch0(cf->sg);
1523 	sg = &cf->sg[2];
1524 	qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1525 	if (is_encode(ses)) {
1526 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1527 		sg->length = ses->iv.length;
1528 		length += sg->length;
1529 		cpu_to_hw_sg(sg);
1530 
1531 		sg++;
1532 		qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1533 		sg->length = sym->auth.data.length;
1534 		length += sg->length;
1535 		sg->final = 1;
1536 		cpu_to_hw_sg(sg);
1537 	} else {
1538 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1539 		sg->length = ses->iv.length;
1540 		length += sg->length;
1541 		cpu_to_hw_sg(sg);
1542 
1543 		sg++;
1544 
1545 		qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1546 		sg->length = sym->auth.data.length;
1547 		length += sg->length;
1548 		cpu_to_hw_sg(sg);
1549 
1550 		memcpy(ctx->digest, sym->auth.digest.data,
1551 		       ses->digest_length);
1552 		sg++;
1553 
1554 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1555 		sg->length = ses->digest_length;
1556 		length += sg->length;
1557 		sg->final = 1;
1558 		cpu_to_hw_sg(sg);
1559 	}
1560 	/* input compound frame */
1561 	cf->sg[1].length = length;
1562 	cf->sg[1].extension = 1;
1563 	cf->sg[1].final = 1;
1564 	cpu_to_hw_sg(&cf->sg[1]);
1565 
1566 	/* output */
1567 	sg++;
1568 	qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1569 	qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1570 	sg->length = sym->cipher.data.length;
1571 	length = sg->length;
1572 	if (is_encode(ses)) {
1573 		cpu_to_hw_sg(sg);
1574 		/* set auth output */
1575 		sg++;
1576 		qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1577 		sg->length = ses->digest_length;
1578 		length += sg->length;
1579 	}
1580 	sg->final = 1;
1581 	cpu_to_hw_sg(sg);
1582 
1583 	/* output compound frame */
1584 	cf->sg[0].length = length;
1585 	cf->sg[0].extension = 1;
1586 	cpu_to_hw_sg(&cf->sg[0]);
1587 
1588 	return cf;
1589 }
1590 
1591 #ifdef RTE_LIB_SECURITY
1592 static inline struct dpaa_sec_job *
1593 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1594 {
1595 	struct rte_crypto_sym_op *sym = op->sym;
1596 	struct dpaa_sec_job *cf;
1597 	struct dpaa_sec_op_ctx *ctx;
1598 	struct qm_sg_entry *sg;
1599 	phys_addr_t src_start_addr, dst_start_addr;
1600 
1601 	ctx = dpaa_sec_alloc_ctx(ses, 2);
1602 	if (!ctx)
1603 		return NULL;
1604 	cf = &ctx->job;
1605 	ctx->op = op;
1606 
1607 	src_start_addr = rte_pktmbuf_iova(sym->m_src);
1608 
1609 	if (sym->m_dst)
1610 		dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1611 	else
1612 		dst_start_addr = src_start_addr;
1613 
1614 	/* input */
1615 	sg = &cf->sg[1];
1616 	qm_sg_entry_set64(sg, src_start_addr);
1617 	sg->length = sym->m_src->pkt_len;
1618 	sg->final = 1;
1619 	cpu_to_hw_sg(sg);
1620 
1621 	sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1622 	/* output */
1623 	sg = &cf->sg[0];
1624 	qm_sg_entry_set64(sg, dst_start_addr);
1625 	sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1626 	cpu_to_hw_sg(sg);
1627 
1628 	return cf;
1629 }
1630 
1631 static inline struct dpaa_sec_job *
1632 build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1633 {
1634 	struct rte_crypto_sym_op *sym = op->sym;
1635 	struct dpaa_sec_job *cf;
1636 	struct dpaa_sec_op_ctx *ctx;
1637 	struct qm_sg_entry *sg, *out_sg, *in_sg;
1638 	struct rte_mbuf *mbuf;
1639 	uint8_t req_segs;
1640 	uint32_t in_len = 0, out_len = 0;
1641 
1642 	if (sym->m_dst)
1643 		mbuf = sym->m_dst;
1644 	else
1645 		mbuf = sym->m_src;
1646 
1647 	req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
1648 	if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1649 		DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
1650 				MAX_SG_ENTRIES);
1651 		return NULL;
1652 	}
1653 
1654 	ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1655 	if (!ctx)
1656 		return NULL;
1657 	cf = &ctx->job;
1658 	ctx->op = op;
1659 	/* output */
1660 	out_sg = &cf->sg[0];
1661 	out_sg->extension = 1;
1662 	qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1663 
1664 	/* 1st seg */
1665 	sg = &cf->sg[2];
1666 	qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1667 	sg->offset = 0;
1668 
1669 	/* Successive segs */
1670 	while (mbuf->next) {
1671 		sg->length = mbuf->data_len;
1672 		out_len += sg->length;
1673 		mbuf = mbuf->next;
1674 		cpu_to_hw_sg(sg);
1675 		sg++;
1676 		qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1677 		sg->offset = 0;
1678 	}
1679 	sg->length = mbuf->buf_len - mbuf->data_off;
1680 	out_len += sg->length;
1681 	sg->final = 1;
1682 	cpu_to_hw_sg(sg);
1683 
1684 	out_sg->length = out_len;
1685 	cpu_to_hw_sg(out_sg);
1686 
1687 	/* input */
1688 	mbuf = sym->m_src;
1689 	in_sg = &cf->sg[1];
1690 	in_sg->extension = 1;
1691 	in_sg->final = 1;
1692 	in_len = mbuf->data_len;
1693 
1694 	sg++;
1695 	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1696 
1697 	/* 1st seg */
1698 	qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1699 	sg->length = mbuf->data_len;
1700 	sg->offset = 0;
1701 
1702 	/* Successive segs */
1703 	mbuf = mbuf->next;
1704 	while (mbuf) {
1705 		cpu_to_hw_sg(sg);
1706 		sg++;
1707 		qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1708 		sg->length = mbuf->data_len;
1709 		sg->offset = 0;
1710 		in_len += sg->length;
1711 		mbuf = mbuf->next;
1712 	}
1713 	sg->final = 1;
1714 	cpu_to_hw_sg(sg);
1715 
1716 	in_sg->length = in_len;
1717 	cpu_to_hw_sg(in_sg);
1718 
1719 	sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1720 
1721 	return cf;
1722 }
1723 #endif
1724 
1725 static uint16_t
1726 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1727 		       uint16_t nb_ops)
1728 {
1729 	/* Function to transmit the frames to given device and queuepair */
1730 	uint32_t loop;
1731 	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1732 	uint16_t num_tx = 0;
1733 	struct qm_fd fds[DPAA_SEC_BURST], *fd;
1734 	uint32_t frames_to_send;
1735 	struct rte_crypto_op *op;
1736 	struct dpaa_sec_job *cf;
1737 	dpaa_sec_session *ses;
1738 	uint16_t auth_hdr_len, auth_tail_len;
1739 	uint32_t index, flags[DPAA_SEC_BURST] = {0};
1740 	struct qman_fq *inq[DPAA_SEC_BURST];
1741 
1742 	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
1743 		if (rte_dpaa_portal_init((void *)0)) {
1744 			DPAA_SEC_ERR("Failure in affining portal");
1745 			return 0;
1746 		}
1747 	}
1748 
1749 	while (nb_ops) {
1750 		frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1751 				DPAA_SEC_BURST : nb_ops;
1752 		for (loop = 0; loop < frames_to_send; loop++) {
1753 			op = *(ops++);
1754 			if (*dpaa_seqn(op->sym->m_src) != 0) {
1755 				index = *dpaa_seqn(op->sym->m_src) - 1;
1756 				if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
1757 					/* QM_EQCR_DCA_IDXMASK = 0x0f */
1758 					flags[loop] = ((index & 0x0f) << 8);
1759 					flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
1760 					DPAA_PER_LCORE_DQRR_SIZE--;
1761 					DPAA_PER_LCORE_DQRR_HELD &=
1762 								~(1 << index);
1763 				}
1764 			}
1765 
1766 			switch (op->sess_type) {
1767 			case RTE_CRYPTO_OP_WITH_SESSION:
1768 				ses = (dpaa_sec_session *)
1769 					get_sym_session_private_data(
1770 							op->sym->session,
1771 							cryptodev_driver_id);
1772 				break;
1773 #ifdef RTE_LIB_SECURITY
1774 			case RTE_CRYPTO_OP_SECURITY_SESSION:
1775 				ses = (dpaa_sec_session *)
1776 					get_sec_session_private_data(
1777 							op->sym->sec_session);
1778 				break;
1779 #endif
1780 			default:
1781 				DPAA_SEC_DP_ERR(
1782 					"sessionless crypto op not supported");
1783 				frames_to_send = loop;
1784 				nb_ops = loop;
1785 				goto send_pkts;
1786 			}
1787 
1788 			if (!ses) {
1789 				DPAA_SEC_DP_ERR("session not available");
1790 				frames_to_send = loop;
1791 				nb_ops = loop;
1792 				goto send_pkts;
1793 			}
1794 
1795 			if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
1796 				if (dpaa_sec_attach_sess_q(qp, ses)) {
1797 					frames_to_send = loop;
1798 					nb_ops = loop;
1799 					goto send_pkts;
1800 				}
1801 			} else if (unlikely(ses->qp[rte_lcore_id() %
1802 						MAX_DPAA_CORES] != qp)) {
1803 				DPAA_SEC_DP_ERR("Old:sess->qp = %p"
1804 					" New qp = %p\n",
1805 					ses->qp[rte_lcore_id() %
1806 					MAX_DPAA_CORES], qp);
1807 				frames_to_send = loop;
1808 				nb_ops = loop;
1809 				goto send_pkts;
1810 			}
1811 
1812 			auth_hdr_len = op->sym->auth.data.length -
1813 						op->sym->cipher.data.length;
1814 			auth_tail_len = 0;
1815 
1816 			if (rte_pktmbuf_is_contiguous(op->sym->m_src) &&
1817 				  ((op->sym->m_dst == NULL) ||
1818 				   rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1819 				switch (ses->ctxt) {
1820 #ifdef RTE_LIB_SECURITY
1821 				case DPAA_SEC_PDCP:
1822 				case DPAA_SEC_IPSEC:
1823 					cf = build_proto(op, ses);
1824 					break;
1825 #endif
1826 				case DPAA_SEC_AUTH:
1827 					cf = build_auth_only(op, ses);
1828 					break;
1829 				case DPAA_SEC_CIPHER:
1830 					cf = build_cipher_only(op, ses);
1831 					break;
1832 				case DPAA_SEC_AEAD:
1833 					cf = build_cipher_auth_gcm(op, ses);
1834 					auth_hdr_len = ses->auth_only_len;
1835 					break;
1836 				case DPAA_SEC_CIPHER_HASH:
1837 					auth_hdr_len =
1838 						op->sym->cipher.data.offset
1839 						- op->sym->auth.data.offset;
1840 					auth_tail_len =
1841 						op->sym->auth.data.length
1842 						- op->sym->cipher.data.length
1843 						- auth_hdr_len;
1844 					cf = build_cipher_auth(op, ses);
1845 					break;
1846 				default:
1847 					DPAA_SEC_DP_ERR("not supported ops");
1848 					frames_to_send = loop;
1849 					nb_ops = loop;
1850 					goto send_pkts;
1851 				}
1852 			} else {
1853 				switch (ses->ctxt) {
1854 #ifdef RTE_LIB_SECURITY
1855 				case DPAA_SEC_PDCP:
1856 				case DPAA_SEC_IPSEC:
1857 					cf = build_proto_sg(op, ses);
1858 					break;
1859 #endif
1860 				case DPAA_SEC_AUTH:
1861 					cf = build_auth_only_sg(op, ses);
1862 					break;
1863 				case DPAA_SEC_CIPHER:
1864 					cf = build_cipher_only_sg(op, ses);
1865 					break;
1866 				case DPAA_SEC_AEAD:
1867 					cf = build_cipher_auth_gcm_sg(op, ses);
1868 					auth_hdr_len = ses->auth_only_len;
1869 					break;
1870 				case DPAA_SEC_CIPHER_HASH:
1871 					auth_hdr_len =
1872 						op->sym->cipher.data.offset
1873 						- op->sym->auth.data.offset;
1874 					auth_tail_len =
1875 						op->sym->auth.data.length
1876 						- op->sym->cipher.data.length
1877 						- auth_hdr_len;
1878 					cf = build_cipher_auth_sg(op, ses);
1879 					break;
1880 				default:
1881 					DPAA_SEC_DP_ERR("not supported ops");
1882 					frames_to_send = loop;
1883 					nb_ops = loop;
1884 					goto send_pkts;
1885 				}
1886 			}
1887 			if (unlikely(!cf)) {
1888 				frames_to_send = loop;
1889 				nb_ops = loop;
1890 				goto send_pkts;
1891 			}
1892 
1893 			fd = &fds[loop];
1894 			inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
1895 			fd->opaque_addr = 0;
1896 			fd->cmd = 0;
1897 			qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg));
1898 			fd->_format1 = qm_fd_compound;
1899 			fd->length29 = 2 * sizeof(struct qm_sg_entry);
1900 
1901 			/* Auth_only_len is set as 0 in descriptor and it is
1902 			 * overwritten here in the fd.cmd which will update
1903 			 * the DPOVRD reg.
1904 			 */
1905 			if (auth_hdr_len || auth_tail_len) {
1906 				fd->cmd = 0x80000000;
1907 				fd->cmd |=
1908 					((auth_tail_len << 16) | auth_hdr_len);
1909 			}
1910 
1911 #ifdef RTE_LIB_SECURITY
1912 			/* In case of PDCP, per packet HFN is stored in
1913 			 * mbuf priv after sym_op.
1914 			 */
1915 			if ((ses->ctxt == DPAA_SEC_PDCP) && ses->pdcp.hfn_ovd) {
1916 				fd->cmd = 0x80000000 |
1917 					*((uint32_t *)((uint8_t *)op +
1918 					ses->pdcp.hfn_ovd_offset));
1919 				DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n",
1920 					*((uint32_t *)((uint8_t *)op +
1921 					ses->pdcp.hfn_ovd_offset)),
1922 					ses->pdcp.hfn_ovd);
1923 			}
1924 #endif
1925 		}
1926 send_pkts:
1927 		loop = 0;
1928 		while (loop < frames_to_send) {
1929 			loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1930 					&flags[loop], frames_to_send - loop);
1931 		}
1932 		nb_ops -= frames_to_send;
1933 		num_tx += frames_to_send;
1934 	}
1935 
1936 	dpaa_qp->tx_pkts += num_tx;
1937 	dpaa_qp->tx_errs += nb_ops - num_tx;
1938 
1939 	return num_tx;
1940 }
1941 
1942 static uint16_t
1943 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1944 		       uint16_t nb_ops)
1945 {
1946 	uint16_t num_rx;
1947 	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1948 
1949 	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
1950 		if (rte_dpaa_portal_init((void *)0)) {
1951 			DPAA_SEC_ERR("Failure in affining portal");
1952 			return 0;
1953 		}
1954 	}
1955 
1956 	num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1957 
1958 	dpaa_qp->rx_pkts += num_rx;
1959 	dpaa_qp->rx_errs += nb_ops - num_rx;
1960 
1961 	DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1962 
1963 	return num_rx;
1964 }
1965 
1966 /** Release queue pair */
1967 static int
1968 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1969 			    uint16_t qp_id)
1970 {
1971 	struct dpaa_sec_dev_private *internals;
1972 	struct dpaa_sec_qp *qp = NULL;
1973 
1974 	PMD_INIT_FUNC_TRACE();
1975 
1976 	DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1977 
1978 	internals = dev->data->dev_private;
1979 	if (qp_id >= internals->max_nb_queue_pairs) {
1980 		DPAA_SEC_ERR("Max supported qpid %d",
1981 			     internals->max_nb_queue_pairs);
1982 		return -EINVAL;
1983 	}
1984 
1985 	qp = &internals->qps[qp_id];
1986 	rte_mempool_free(qp->ctx_pool);
1987 	qp->internals = NULL;
1988 	dev->data->queue_pairs[qp_id] = NULL;
1989 
1990 	return 0;
1991 }
1992 
1993 /** Setup a queue pair */
1994 static int
1995 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1996 		__rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1997 		__rte_unused int socket_id)
1998 {
1999 	struct dpaa_sec_dev_private *internals;
2000 	struct dpaa_sec_qp *qp = NULL;
2001 	char str[20];
2002 
2003 	DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
2004 
2005 	internals = dev->data->dev_private;
2006 	if (qp_id >= internals->max_nb_queue_pairs) {
2007 		DPAA_SEC_ERR("Max supported qpid %d",
2008 			     internals->max_nb_queue_pairs);
2009 		return -EINVAL;
2010 	}
2011 
2012 	qp = &internals->qps[qp_id];
2013 	qp->internals = internals;
2014 	snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d",
2015 			dev->data->dev_id, qp_id);
2016 	if (!qp->ctx_pool) {
2017 		qp->ctx_pool = rte_mempool_create((const char *)str,
2018 							CTX_POOL_NUM_BUFS,
2019 							CTX_POOL_BUF_SIZE,
2020 							CTX_POOL_CACHE_SIZE, 0,
2021 							NULL, NULL, NULL, NULL,
2022 							SOCKET_ID_ANY, 0);
2023 		if (!qp->ctx_pool) {
2024 			DPAA_SEC_ERR("%s create failed\n", str);
2025 			return -ENOMEM;
2026 		}
2027 	} else
2028 		DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d",
2029 				dev->data->dev_id, qp_id);
2030 	dev->data->queue_pairs[qp_id] = qp;
2031 
2032 	return 0;
2033 }
2034 
2035 /** Returns the size of session structure */
2036 static unsigned int
2037 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
2038 {
2039 	PMD_INIT_FUNC_TRACE();
2040 
2041 	return sizeof(dpaa_sec_session);
2042 }
2043 
2044 static int
2045 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
2046 		     struct rte_crypto_sym_xform *xform,
2047 		     dpaa_sec_session *session)
2048 {
2049 	session->ctxt = DPAA_SEC_CIPHER;
2050 	session->cipher_alg = xform->cipher.algo;
2051 	session->iv.length = xform->cipher.iv.length;
2052 	session->iv.offset = xform->cipher.iv.offset;
2053 	session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
2054 					       RTE_CACHE_LINE_SIZE);
2055 	if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
2056 		DPAA_SEC_ERR("No Memory for cipher key");
2057 		return -ENOMEM;
2058 	}
2059 	session->cipher_key.length = xform->cipher.key.length;
2060 
2061 	memcpy(session->cipher_key.data, xform->cipher.key.data,
2062 	       xform->cipher.key.length);
2063 	switch (xform->cipher.algo) {
2064 	case RTE_CRYPTO_CIPHER_AES_CBC:
2065 		session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2066 		session->cipher_key.algmode = OP_ALG_AAI_CBC;
2067 		break;
2068 	case RTE_CRYPTO_CIPHER_DES_CBC:
2069 		session->cipher_key.alg = OP_ALG_ALGSEL_DES;
2070 		session->cipher_key.algmode = OP_ALG_AAI_CBC;
2071 		break;
2072 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2073 		session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2074 		session->cipher_key.algmode = OP_ALG_AAI_CBC;
2075 		break;
2076 	case RTE_CRYPTO_CIPHER_AES_CTR:
2077 		session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2078 		session->cipher_key.algmode = OP_ALG_AAI_CTR;
2079 		break;
2080 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2081 		session->cipher_key.alg = OP_ALG_ALGSEL_SNOW_F8;
2082 		break;
2083 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2084 		session->cipher_key.alg = OP_ALG_ALGSEL_ZUCE;
2085 		break;
2086 	default:
2087 		DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2088 			      xform->cipher.algo);
2089 		return -ENOTSUP;
2090 	}
2091 	session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2092 			DIR_ENC : DIR_DEC;
2093 
2094 	return 0;
2095 }
2096 
2097 static int
2098 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
2099 		   struct rte_crypto_sym_xform *xform,
2100 		   dpaa_sec_session *session)
2101 {
2102 	session->ctxt = DPAA_SEC_AUTH;
2103 	session->auth_alg = xform->auth.algo;
2104 	session->auth_key.length = xform->auth.key.length;
2105 	if (xform->auth.key.length) {
2106 		session->auth_key.data =
2107 				rte_zmalloc(NULL, xform->auth.key.length,
2108 					     RTE_CACHE_LINE_SIZE);
2109 		if (session->auth_key.data == NULL) {
2110 			DPAA_SEC_ERR("No Memory for auth key");
2111 			return -ENOMEM;
2112 		}
2113 		memcpy(session->auth_key.data, xform->auth.key.data,
2114 				xform->auth.key.length);
2115 
2116 	}
2117 	session->digest_length = xform->auth.digest_length;
2118 	if (session->cipher_alg == RTE_CRYPTO_CIPHER_NULL) {
2119 		session->iv.offset = xform->auth.iv.offset;
2120 		session->iv.length = xform->auth.iv.length;
2121 	}
2122 
2123 	switch (xform->auth.algo) {
2124 	case RTE_CRYPTO_AUTH_SHA1:
2125 		session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2126 		session->auth_key.algmode = OP_ALG_AAI_HASH;
2127 		break;
2128 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
2129 		session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2130 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2131 		break;
2132 	case RTE_CRYPTO_AUTH_MD5:
2133 		session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2134 		session->auth_key.algmode = OP_ALG_AAI_HASH;
2135 		break;
2136 	case RTE_CRYPTO_AUTH_MD5_HMAC:
2137 		session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2138 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2139 		break;
2140 	case RTE_CRYPTO_AUTH_SHA224:
2141 		session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2142 		session->auth_key.algmode = OP_ALG_AAI_HASH;
2143 		break;
2144 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2145 		session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2146 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2147 		break;
2148 	case RTE_CRYPTO_AUTH_SHA256:
2149 		session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2150 		session->auth_key.algmode = OP_ALG_AAI_HASH;
2151 		break;
2152 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2153 		session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2154 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2155 		break;
2156 	case RTE_CRYPTO_AUTH_SHA384:
2157 		session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2158 		session->auth_key.algmode = OP_ALG_AAI_HASH;
2159 		break;
2160 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2161 		session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2162 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2163 		break;
2164 	case RTE_CRYPTO_AUTH_SHA512:
2165 		session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2166 		session->auth_key.algmode = OP_ALG_AAI_HASH;
2167 		break;
2168 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2169 		session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2170 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2171 		break;
2172 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2173 		session->auth_key.alg = OP_ALG_ALGSEL_SNOW_F9;
2174 		session->auth_key.algmode = OP_ALG_AAI_F9;
2175 		break;
2176 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
2177 		session->auth_key.alg = OP_ALG_ALGSEL_ZUCA;
2178 		session->auth_key.algmode = OP_ALG_AAI_F9;
2179 		break;
2180 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2181 		session->auth_key.alg = OP_ALG_ALGSEL_AES;
2182 		session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC;
2183 		break;
2184 	case RTE_CRYPTO_AUTH_AES_CMAC:
2185 		session->auth_key.alg = OP_ALG_ALGSEL_AES;
2186 		session->auth_key.algmode = OP_ALG_AAI_CMAC;
2187 		break;
2188 	default:
2189 		DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2190 			      xform->auth.algo);
2191 		return -ENOTSUP;
2192 	}
2193 
2194 	session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2195 			DIR_ENC : DIR_DEC;
2196 
2197 	return 0;
2198 }
2199 
2200 static int
2201 dpaa_sec_chain_init(struct rte_cryptodev *dev __rte_unused,
2202 		   struct rte_crypto_sym_xform *xform,
2203 		   dpaa_sec_session *session)
2204 {
2205 
2206 	struct rte_crypto_cipher_xform *cipher_xform;
2207 	struct rte_crypto_auth_xform *auth_xform;
2208 
2209 	session->ctxt = DPAA_SEC_CIPHER_HASH;
2210 	if (session->auth_cipher_text) {
2211 		cipher_xform = &xform->cipher;
2212 		auth_xform = &xform->next->auth;
2213 	} else {
2214 		cipher_xform = &xform->next->cipher;
2215 		auth_xform = &xform->auth;
2216 	}
2217 
2218 	/* Set IV parameters */
2219 	session->iv.offset = cipher_xform->iv.offset;
2220 	session->iv.length = cipher_xform->iv.length;
2221 
2222 	session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2223 					       RTE_CACHE_LINE_SIZE);
2224 	if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2225 		DPAA_SEC_ERR("No Memory for cipher key");
2226 		return -ENOMEM;
2227 	}
2228 	session->cipher_key.length = cipher_xform->key.length;
2229 	session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2230 					     RTE_CACHE_LINE_SIZE);
2231 	if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2232 		DPAA_SEC_ERR("No Memory for auth key");
2233 		return -ENOMEM;
2234 	}
2235 	session->auth_key.length = auth_xform->key.length;
2236 	memcpy(session->cipher_key.data, cipher_xform->key.data,
2237 	       cipher_xform->key.length);
2238 	memcpy(session->auth_key.data, auth_xform->key.data,
2239 	       auth_xform->key.length);
2240 
2241 	session->digest_length = auth_xform->digest_length;
2242 	session->auth_alg = auth_xform->algo;
2243 
2244 	switch (auth_xform->algo) {
2245 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
2246 		session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2247 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2248 		break;
2249 	case RTE_CRYPTO_AUTH_MD5_HMAC:
2250 		session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2251 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2252 		break;
2253 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2254 		session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2255 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2256 		break;
2257 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2258 		session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2259 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2260 		break;
2261 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2262 		session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2263 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2264 		break;
2265 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2266 		session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2267 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2268 		break;
2269 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2270 		session->auth_key.alg = OP_ALG_ALGSEL_AES;
2271 		session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC;
2272 		break;
2273 	case RTE_CRYPTO_AUTH_AES_CMAC:
2274 		session->auth_key.alg = OP_ALG_ALGSEL_AES;
2275 		session->auth_key.algmode = OP_ALG_AAI_CMAC;
2276 		break;
2277 	default:
2278 		DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2279 			      auth_xform->algo);
2280 		return -ENOTSUP;
2281 	}
2282 
2283 	session->cipher_alg = cipher_xform->algo;
2284 
2285 	switch (cipher_xform->algo) {
2286 	case RTE_CRYPTO_CIPHER_AES_CBC:
2287 		session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2288 		session->cipher_key.algmode = OP_ALG_AAI_CBC;
2289 		break;
2290 	case RTE_CRYPTO_CIPHER_DES_CBC:
2291 		session->cipher_key.alg = OP_ALG_ALGSEL_DES;
2292 		session->cipher_key.algmode = OP_ALG_AAI_CBC;
2293 		break;
2294 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2295 		session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2296 		session->cipher_key.algmode = OP_ALG_AAI_CBC;
2297 		break;
2298 	case RTE_CRYPTO_CIPHER_AES_CTR:
2299 		session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2300 		session->cipher_key.algmode = OP_ALG_AAI_CTR;
2301 		break;
2302 	default:
2303 		DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2304 			      cipher_xform->algo);
2305 		return -ENOTSUP;
2306 	}
2307 	session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2308 				DIR_ENC : DIR_DEC;
2309 	return 0;
2310 }
2311 
2312 static int
2313 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
2314 		   struct rte_crypto_sym_xform *xform,
2315 		   dpaa_sec_session *session)
2316 {
2317 	session->aead_alg = xform->aead.algo;
2318 	session->ctxt = DPAA_SEC_AEAD;
2319 	session->iv.length = xform->aead.iv.length;
2320 	session->iv.offset = xform->aead.iv.offset;
2321 	session->auth_only_len = xform->aead.aad_length;
2322 	session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
2323 					     RTE_CACHE_LINE_SIZE);
2324 	if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
2325 		DPAA_SEC_ERR("No Memory for aead key\n");
2326 		return -ENOMEM;
2327 	}
2328 	session->aead_key.length = xform->aead.key.length;
2329 	session->digest_length = xform->aead.digest_length;
2330 
2331 	memcpy(session->aead_key.data, xform->aead.key.data,
2332 	       xform->aead.key.length);
2333 
2334 	switch (session->aead_alg) {
2335 	case RTE_CRYPTO_AEAD_AES_GCM:
2336 		session->aead_key.alg = OP_ALG_ALGSEL_AES;
2337 		session->aead_key.algmode = OP_ALG_AAI_GCM;
2338 		break;
2339 	default:
2340 		DPAA_SEC_ERR("unsupported AEAD alg %d", session->aead_alg);
2341 		return -ENOTSUP;
2342 	}
2343 
2344 	session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2345 			DIR_ENC : DIR_DEC;
2346 
2347 	return 0;
2348 }
2349 
2350 static struct qman_fq *
2351 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
2352 {
2353 	unsigned int i;
2354 
2355 	for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2356 		if (qi->inq_attach[i] == 0) {
2357 			qi->inq_attach[i] = 1;
2358 			return &qi->inq[i];
2359 		}
2360 	}
2361 	DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions);
2362 
2363 	return NULL;
2364 }
2365 
2366 static int
2367 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
2368 {
2369 	unsigned int i;
2370 
2371 	for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2372 		if (&qi->inq[i] == fq) {
2373 			if (qman_retire_fq(fq, NULL) != 0)
2374 				DPAA_SEC_DEBUG("Queue is not retired\n");
2375 			qman_oos_fq(fq);
2376 			qi->inq_attach[i] = 0;
2377 			return 0;
2378 		}
2379 	}
2380 	return -1;
2381 }
2382 
2383 static int
2384 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
2385 {
2386 	int ret;
2387 
2388 	sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp;
2389 	ret = dpaa_sec_prep_cdb(sess);
2390 	if (ret) {
2391 		DPAA_SEC_ERR("Unable to prepare sec cdb");
2392 		return ret;
2393 	}
2394 	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
2395 		ret = rte_dpaa_portal_init((void *)0);
2396 		if (ret) {
2397 			DPAA_SEC_ERR("Failure in affining portal");
2398 			return ret;
2399 		}
2400 	}
2401 	ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
2402 			       rte_dpaa_mem_vtop(&sess->cdb),
2403 			       qman_fq_fqid(&qp->outq));
2404 	if (ret)
2405 		DPAA_SEC_ERR("Unable to init sec queue");
2406 
2407 	return ret;
2408 }
2409 
2410 static inline void
2411 free_session_data(dpaa_sec_session *s)
2412 {
2413 	if (is_aead(s))
2414 		rte_free(s->aead_key.data);
2415 	else {
2416 		rte_free(s->auth_key.data);
2417 		rte_free(s->cipher_key.data);
2418 	}
2419 	memset(s, 0, sizeof(dpaa_sec_session));
2420 }
2421 
2422 static int
2423 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
2424 			    struct rte_crypto_sym_xform *xform,	void *sess)
2425 {
2426 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2427 	dpaa_sec_session *session = sess;
2428 	uint32_t i;
2429 	int ret;
2430 
2431 	PMD_INIT_FUNC_TRACE();
2432 
2433 	if (unlikely(sess == NULL)) {
2434 		DPAA_SEC_ERR("invalid session struct");
2435 		return -EINVAL;
2436 	}
2437 	memset(session, 0, sizeof(dpaa_sec_session));
2438 
2439 	/* Default IV length = 0 */
2440 	session->iv.length = 0;
2441 
2442 	/* Cipher Only */
2443 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2444 		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2445 		ret = dpaa_sec_cipher_init(dev, xform, session);
2446 
2447 	/* Authentication Only */
2448 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2449 		   xform->next == NULL) {
2450 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2451 		session->ctxt = DPAA_SEC_AUTH;
2452 		ret = dpaa_sec_auth_init(dev, xform, session);
2453 
2454 	/* Cipher then Authenticate */
2455 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2456 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2457 		if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2458 			session->auth_cipher_text = 1;
2459 			if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2460 				ret = dpaa_sec_auth_init(dev, xform, session);
2461 			else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
2462 				ret = dpaa_sec_cipher_init(dev, xform, session);
2463 			else
2464 				ret = dpaa_sec_chain_init(dev, xform, session);
2465 		} else {
2466 			DPAA_SEC_ERR("Not supported: Auth then Cipher");
2467 			return -ENOTSUP;
2468 		}
2469 	/* Authenticate then Cipher */
2470 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2471 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2472 		if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2473 			session->auth_cipher_text = 0;
2474 			if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
2475 				ret = dpaa_sec_cipher_init(dev, xform, session);
2476 			else if (xform->next->cipher.algo
2477 					== RTE_CRYPTO_CIPHER_NULL)
2478 				ret = dpaa_sec_auth_init(dev, xform, session);
2479 			else
2480 				ret = dpaa_sec_chain_init(dev, xform, session);
2481 		} else {
2482 			DPAA_SEC_ERR("Not supported: Auth then Cipher");
2483 			return -ENOTSUP;
2484 		}
2485 
2486 	/* AEAD operation for AES-GCM kind of Algorithms */
2487 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2488 		   xform->next == NULL) {
2489 		ret = dpaa_sec_aead_init(dev, xform, session);
2490 
2491 	} else {
2492 		DPAA_SEC_ERR("Invalid crypto type");
2493 		return -EINVAL;
2494 	}
2495 	if (ret) {
2496 		DPAA_SEC_ERR("unable to init session");
2497 		goto err1;
2498 	}
2499 
2500 	rte_spinlock_lock(&internals->lock);
2501 	for (i = 0; i < MAX_DPAA_CORES; i++) {
2502 		session->inq[i] = dpaa_sec_attach_rxq(internals);
2503 		if (session->inq[i] == NULL) {
2504 			DPAA_SEC_ERR("unable to attach sec queue");
2505 			rte_spinlock_unlock(&internals->lock);
2506 			ret = -EBUSY;
2507 			goto err1;
2508 		}
2509 	}
2510 	rte_spinlock_unlock(&internals->lock);
2511 
2512 	return 0;
2513 
2514 err1:
2515 	free_session_data(session);
2516 	return ret;
2517 }
2518 
2519 static int
2520 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
2521 		struct rte_crypto_sym_xform *xform,
2522 		struct rte_cryptodev_sym_session *sess,
2523 		struct rte_mempool *mempool)
2524 {
2525 	void *sess_private_data;
2526 	int ret;
2527 
2528 	PMD_INIT_FUNC_TRACE();
2529 
2530 	if (rte_mempool_get(mempool, &sess_private_data)) {
2531 		DPAA_SEC_ERR("Couldn't get object from session mempool");
2532 		return -ENOMEM;
2533 	}
2534 
2535 	ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
2536 	if (ret != 0) {
2537 		DPAA_SEC_ERR("failed to configure session parameters");
2538 
2539 		/* Return session to mempool */
2540 		rte_mempool_put(mempool, sess_private_data);
2541 		return ret;
2542 	}
2543 
2544 	set_sym_session_private_data(sess, dev->driver_id,
2545 			sess_private_data);
2546 
2547 
2548 	return 0;
2549 }
2550 
2551 static inline void
2552 free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s)
2553 {
2554 	struct dpaa_sec_dev_private *qi = dev->data->dev_private;
2555 	struct rte_mempool *sess_mp = rte_mempool_from_obj((void *)s);
2556 	uint8_t i;
2557 
2558 	for (i = 0; i < MAX_DPAA_CORES; i++) {
2559 		if (s->inq[i])
2560 			dpaa_sec_detach_rxq(qi, s->inq[i]);
2561 		s->inq[i] = NULL;
2562 		s->qp[i] = NULL;
2563 	}
2564 	free_session_data(s);
2565 	rte_mempool_put(sess_mp, (void *)s);
2566 }
2567 
2568 /** Clear the memory of session so it doesn't leave key material behind */
2569 static void
2570 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
2571 		struct rte_cryptodev_sym_session *sess)
2572 {
2573 	PMD_INIT_FUNC_TRACE();
2574 	uint8_t index = dev->driver_id;
2575 	void *sess_priv = get_sym_session_private_data(sess, index);
2576 	dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2577 
2578 	if (sess_priv) {
2579 		free_session_memory(dev, s);
2580 		set_sym_session_private_data(sess, index, NULL);
2581 	}
2582 }
2583 
2584 #ifdef RTE_LIB_SECURITY
2585 static int
2586 dpaa_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2587 			struct rte_security_ipsec_xform *ipsec_xform,
2588 			dpaa_sec_session *session)
2589 {
2590 	PMD_INIT_FUNC_TRACE();
2591 
2592 	session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2593 					       RTE_CACHE_LINE_SIZE);
2594 	if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2595 		DPAA_SEC_ERR("No Memory for aead key");
2596 		return -ENOMEM;
2597 	}
2598 	memcpy(session->aead_key.data, aead_xform->key.data,
2599 	       aead_xform->key.length);
2600 
2601 	session->digest_length = aead_xform->digest_length;
2602 	session->aead_key.length = aead_xform->key.length;
2603 
2604 	switch (aead_xform->algo) {
2605 	case RTE_CRYPTO_AEAD_AES_GCM:
2606 		switch (session->digest_length) {
2607 		case 8:
2608 			session->aead_key.alg = OP_PCL_IPSEC_AES_GCM8;
2609 			break;
2610 		case 12:
2611 			session->aead_key.alg = OP_PCL_IPSEC_AES_GCM12;
2612 			break;
2613 		case 16:
2614 			session->aead_key.alg = OP_PCL_IPSEC_AES_GCM16;
2615 			break;
2616 		default:
2617 			DPAA_SEC_ERR("Crypto: Undefined GCM digest %d",
2618 				     session->digest_length);
2619 			return -EINVAL;
2620 		}
2621 		if (session->dir == DIR_ENC) {
2622 			memcpy(session->encap_pdb.gcm.salt,
2623 				(uint8_t *)&(ipsec_xform->salt), 4);
2624 		} else {
2625 			memcpy(session->decap_pdb.gcm.salt,
2626 				(uint8_t *)&(ipsec_xform->salt), 4);
2627 		}
2628 		session->aead_key.algmode = OP_ALG_AAI_GCM;
2629 		session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2630 		break;
2631 	default:
2632 		DPAA_SEC_ERR("Crypto: Undefined AEAD specified %u",
2633 			      aead_xform->algo);
2634 		return -ENOTSUP;
2635 	}
2636 	return 0;
2637 }
2638 
2639 static int
2640 dpaa_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2641 	struct rte_crypto_auth_xform *auth_xform,
2642 	struct rte_security_ipsec_xform *ipsec_xform,
2643 	dpaa_sec_session *session)
2644 {
2645 	if (cipher_xform) {
2646 		session->cipher_key.data = rte_zmalloc(NULL,
2647 						       cipher_xform->key.length,
2648 						       RTE_CACHE_LINE_SIZE);
2649 		if (session->cipher_key.data == NULL &&
2650 				cipher_xform->key.length > 0) {
2651 			DPAA_SEC_ERR("No Memory for cipher key");
2652 			return -ENOMEM;
2653 		}
2654 
2655 		session->cipher_key.length = cipher_xform->key.length;
2656 		memcpy(session->cipher_key.data, cipher_xform->key.data,
2657 				cipher_xform->key.length);
2658 		session->cipher_alg = cipher_xform->algo;
2659 	} else {
2660 		session->cipher_key.data = NULL;
2661 		session->cipher_key.length = 0;
2662 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2663 	}
2664 
2665 	if (auth_xform) {
2666 		session->auth_key.data = rte_zmalloc(NULL,
2667 						auth_xform->key.length,
2668 						RTE_CACHE_LINE_SIZE);
2669 		if (session->auth_key.data == NULL &&
2670 				auth_xform->key.length > 0) {
2671 			DPAA_SEC_ERR("No Memory for auth key");
2672 			return -ENOMEM;
2673 		}
2674 		session->auth_key.length = auth_xform->key.length;
2675 		memcpy(session->auth_key.data, auth_xform->key.data,
2676 				auth_xform->key.length);
2677 		session->auth_alg = auth_xform->algo;
2678 		session->digest_length = auth_xform->digest_length;
2679 	} else {
2680 		session->auth_key.data = NULL;
2681 		session->auth_key.length = 0;
2682 		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2683 	}
2684 
2685 	switch (session->auth_alg) {
2686 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
2687 		session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA1_96;
2688 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2689 		break;
2690 	case RTE_CRYPTO_AUTH_MD5_HMAC:
2691 		session->auth_key.alg = OP_PCL_IPSEC_HMAC_MD5_96;
2692 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2693 		break;
2694 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2695 		session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2696 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2697 		if (session->digest_length != 16)
2698 			DPAA_SEC_WARN(
2699 			"+++Using sha256-hmac truncated len is non-standard,"
2700 			"it will not work with lookaside proto");
2701 		break;
2702 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2703 		session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2704 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2705 		break;
2706 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2707 		session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2708 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2709 		break;
2710 	case RTE_CRYPTO_AUTH_AES_CMAC:
2711 		session->auth_key.alg = OP_PCL_IPSEC_AES_CMAC_96;
2712 		session->auth_key.algmode = OP_ALG_AAI_CMAC;
2713 		break;
2714 	case RTE_CRYPTO_AUTH_NULL:
2715 		session->auth_key.alg = OP_PCL_IPSEC_HMAC_NULL;
2716 		break;
2717 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2718 		session->auth_key.alg = OP_PCL_IPSEC_AES_XCBC_MAC_96;
2719 		session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC;
2720 		break;
2721 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2722 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2723 	case RTE_CRYPTO_AUTH_SHA1:
2724 	case RTE_CRYPTO_AUTH_SHA256:
2725 	case RTE_CRYPTO_AUTH_SHA512:
2726 	case RTE_CRYPTO_AUTH_SHA224:
2727 	case RTE_CRYPTO_AUTH_SHA384:
2728 	case RTE_CRYPTO_AUTH_MD5:
2729 	case RTE_CRYPTO_AUTH_AES_GMAC:
2730 	case RTE_CRYPTO_AUTH_KASUMI_F9:
2731 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2732 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
2733 		DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2734 			      session->auth_alg);
2735 		return -ENOTSUP;
2736 	default:
2737 		DPAA_SEC_ERR("Crypto: Undefined Auth specified %u",
2738 			      session->auth_alg);
2739 		return -ENOTSUP;
2740 	}
2741 
2742 	switch (session->cipher_alg) {
2743 	case RTE_CRYPTO_CIPHER_AES_CBC:
2744 		session->cipher_key.alg = OP_PCL_IPSEC_AES_CBC;
2745 		session->cipher_key.algmode = OP_ALG_AAI_CBC;
2746 		break;
2747 	case RTE_CRYPTO_CIPHER_DES_CBC:
2748 		session->cipher_key.alg = OP_PCL_IPSEC_DES;
2749 		session->cipher_key.algmode = OP_ALG_AAI_CBC;
2750 		break;
2751 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2752 		session->cipher_key.alg = OP_PCL_IPSEC_3DES;
2753 		session->cipher_key.algmode = OP_ALG_AAI_CBC;
2754 		break;
2755 	case RTE_CRYPTO_CIPHER_AES_CTR:
2756 		session->cipher_key.alg = OP_PCL_IPSEC_AES_CTR;
2757 		session->cipher_key.algmode = OP_ALG_AAI_CTR;
2758 		if (session->dir == DIR_ENC) {
2759 			session->encap_pdb.ctr.ctr_initial = 0x00000001;
2760 			session->encap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2761 		} else {
2762 			session->decap_pdb.ctr.ctr_initial = 0x00000001;
2763 			session->decap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2764 		}
2765 		break;
2766 	case RTE_CRYPTO_CIPHER_NULL:
2767 		session->cipher_key.alg = OP_PCL_IPSEC_NULL;
2768 		break;
2769 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2770 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2771 	case RTE_CRYPTO_CIPHER_3DES_ECB:
2772 	case RTE_CRYPTO_CIPHER_AES_ECB:
2773 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
2774 		DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2775 			      session->cipher_alg);
2776 		return -ENOTSUP;
2777 	default:
2778 		DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2779 			      session->cipher_alg);
2780 		return -ENOTSUP;
2781 	}
2782 
2783 	return 0;
2784 }
2785 
2786 static int
2787 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
2788 			   struct rte_security_session_conf *conf,
2789 			   void *sess)
2790 {
2791 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2792 	struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2793 	struct rte_crypto_auth_xform *auth_xform = NULL;
2794 	struct rte_crypto_cipher_xform *cipher_xform = NULL;
2795 	struct rte_crypto_aead_xform *aead_xform = NULL;
2796 	dpaa_sec_session *session = (dpaa_sec_session *)sess;
2797 	uint32_t i;
2798 	int ret;
2799 
2800 	PMD_INIT_FUNC_TRACE();
2801 
2802 	memset(session, 0, sizeof(dpaa_sec_session));
2803 	session->proto_alg = conf->protocol;
2804 	session->ctxt = DPAA_SEC_IPSEC;
2805 
2806 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
2807 		session->dir = DIR_ENC;
2808 	else
2809 		session->dir = DIR_DEC;
2810 
2811 	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2812 		cipher_xform = &conf->crypto_xform->cipher;
2813 		if (conf->crypto_xform->next)
2814 			auth_xform = &conf->crypto_xform->next->auth;
2815 		ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2816 					ipsec_xform, session);
2817 	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2818 		auth_xform = &conf->crypto_xform->auth;
2819 		if (conf->crypto_xform->next)
2820 			cipher_xform = &conf->crypto_xform->next->cipher;
2821 		ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2822 					ipsec_xform, session);
2823 	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2824 		aead_xform = &conf->crypto_xform->aead;
2825 		ret = dpaa_sec_ipsec_aead_init(aead_xform,
2826 					ipsec_xform, session);
2827 	} else {
2828 		DPAA_SEC_ERR("XFORM not specified");
2829 		ret = -EINVAL;
2830 		goto out;
2831 	}
2832 	if (ret) {
2833 		DPAA_SEC_ERR("Failed to process xform");
2834 		goto out;
2835 	}
2836 
2837 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2838 		if (ipsec_xform->tunnel.type ==
2839 				RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2840 			session->ip4_hdr.ip_v = IPVERSION;
2841 			session->ip4_hdr.ip_hl = 5;
2842 			session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2843 						sizeof(session->ip4_hdr));
2844 			session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2845 			session->ip4_hdr.ip_id = 0;
2846 			session->ip4_hdr.ip_off = 0;
2847 			session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2848 			session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2849 					RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2850 					IPPROTO_ESP : IPPROTO_AH;
2851 			session->ip4_hdr.ip_sum = 0;
2852 			session->ip4_hdr.ip_src =
2853 					ipsec_xform->tunnel.ipv4.src_ip;
2854 			session->ip4_hdr.ip_dst =
2855 					ipsec_xform->tunnel.ipv4.dst_ip;
2856 			session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2857 						(void *)&session->ip4_hdr,
2858 						sizeof(struct ip));
2859 			session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2860 		} else if (ipsec_xform->tunnel.type ==
2861 				RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2862 			session->ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2863 				DPAA_IPv6_DEFAULT_VTC_FLOW |
2864 				((ipsec_xform->tunnel.ipv6.dscp <<
2865 					RTE_IPV6_HDR_TC_SHIFT) &
2866 					RTE_IPV6_HDR_TC_MASK) |
2867 				((ipsec_xform->tunnel.ipv6.flabel <<
2868 					RTE_IPV6_HDR_FL_SHIFT) &
2869 					RTE_IPV6_HDR_FL_MASK));
2870 			/* Payload length will be updated by HW */
2871 			session->ip6_hdr.payload_len = 0;
2872 			session->ip6_hdr.hop_limits =
2873 					ipsec_xform->tunnel.ipv6.hlimit;
2874 			session->ip6_hdr.proto = (ipsec_xform->proto ==
2875 					RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2876 					IPPROTO_ESP : IPPROTO_AH;
2877 			memcpy(&session->ip6_hdr.src_addr,
2878 					&ipsec_xform->tunnel.ipv6.src_addr, 16);
2879 			memcpy(&session->ip6_hdr.dst_addr,
2880 					&ipsec_xform->tunnel.ipv6.dst_addr, 16);
2881 			session->encap_pdb.ip_hdr_len =
2882 						sizeof(struct rte_ipv6_hdr);
2883 		}
2884 		session->encap_pdb.options =
2885 			(IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2886 			PDBOPTS_ESP_OIHI_PDB_INL |
2887 			PDBOPTS_ESP_IVSRC |
2888 			PDBHMO_ESP_ENCAP_DTTL |
2889 			PDBHMO_ESP_SNR;
2890 		if (ipsec_xform->options.esn)
2891 			session->encap_pdb.options |= PDBOPTS_ESP_ESN;
2892 		session->encap_pdb.spi = ipsec_xform->spi;
2893 
2894 	} else if (ipsec_xform->direction ==
2895 			RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2896 		if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
2897 			session->decap_pdb.options = sizeof(struct ip) << 16;
2898 		else
2899 			session->decap_pdb.options =
2900 					sizeof(struct rte_ipv6_hdr) << 16;
2901 		if (ipsec_xform->options.esn)
2902 			session->decap_pdb.options |= PDBOPTS_ESP_ESN;
2903 		if (ipsec_xform->replay_win_sz) {
2904 			uint32_t win_sz;
2905 			win_sz = rte_align32pow2(ipsec_xform->replay_win_sz);
2906 
2907 			switch (win_sz) {
2908 			case 1:
2909 			case 2:
2910 			case 4:
2911 			case 8:
2912 			case 16:
2913 			case 32:
2914 				session->decap_pdb.options |= PDBOPTS_ESP_ARS32;
2915 				break;
2916 			case 64:
2917 				session->decap_pdb.options |= PDBOPTS_ESP_ARS64;
2918 				break;
2919 			default:
2920 				session->decap_pdb.options |=
2921 							PDBOPTS_ESP_ARS128;
2922 			}
2923 		}
2924 	} else
2925 		goto out;
2926 	rte_spinlock_lock(&internals->lock);
2927 	for (i = 0; i < MAX_DPAA_CORES; i++) {
2928 		session->inq[i] = dpaa_sec_attach_rxq(internals);
2929 		if (session->inq[i] == NULL) {
2930 			DPAA_SEC_ERR("unable to attach sec queue");
2931 			rte_spinlock_unlock(&internals->lock);
2932 			goto out;
2933 		}
2934 	}
2935 	rte_spinlock_unlock(&internals->lock);
2936 
2937 	return 0;
2938 out:
2939 	free_session_data(session);
2940 	return -1;
2941 }
2942 
2943 static int
2944 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
2945 			  struct rte_security_session_conf *conf,
2946 			  void *sess)
2947 {
2948 	struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2949 	struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2950 	struct rte_crypto_auth_xform *auth_xform = NULL;
2951 	struct rte_crypto_cipher_xform *cipher_xform = NULL;
2952 	dpaa_sec_session *session = (dpaa_sec_session *)sess;
2953 	struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
2954 	uint32_t i;
2955 	int ret;
2956 
2957 	PMD_INIT_FUNC_TRACE();
2958 
2959 	memset(session, 0, sizeof(dpaa_sec_session));
2960 
2961 	/* find xfrm types */
2962 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2963 		cipher_xform = &xform->cipher;
2964 		if (xform->next != NULL)
2965 			auth_xform = &xform->next->auth;
2966 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2967 		auth_xform = &xform->auth;
2968 		if (xform->next != NULL)
2969 			cipher_xform = &xform->next->cipher;
2970 	} else {
2971 		DPAA_SEC_ERR("Invalid crypto type");
2972 		return -EINVAL;
2973 	}
2974 
2975 	session->proto_alg = conf->protocol;
2976 	session->ctxt = DPAA_SEC_PDCP;
2977 
2978 	if (cipher_xform) {
2979 		switch (cipher_xform->algo) {
2980 		case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2981 			session->cipher_key.alg = PDCP_CIPHER_TYPE_SNOW;
2982 			break;
2983 		case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2984 			session->cipher_key.alg = PDCP_CIPHER_TYPE_ZUC;
2985 			break;
2986 		case RTE_CRYPTO_CIPHER_AES_CTR:
2987 			session->cipher_key.alg = PDCP_CIPHER_TYPE_AES;
2988 			break;
2989 		case RTE_CRYPTO_CIPHER_NULL:
2990 			session->cipher_key.alg = PDCP_CIPHER_TYPE_NULL;
2991 			break;
2992 		default:
2993 			DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2994 				      session->cipher_alg);
2995 			return -EINVAL;
2996 		}
2997 
2998 		session->cipher_key.data = rte_zmalloc(NULL,
2999 					       cipher_xform->key.length,
3000 					       RTE_CACHE_LINE_SIZE);
3001 		if (session->cipher_key.data == NULL &&
3002 				cipher_xform->key.length > 0) {
3003 			DPAA_SEC_ERR("No Memory for cipher key");
3004 			return -ENOMEM;
3005 		}
3006 		session->cipher_key.length = cipher_xform->key.length;
3007 		memcpy(session->cipher_key.data, cipher_xform->key.data,
3008 			cipher_xform->key.length);
3009 		session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
3010 					DIR_ENC : DIR_DEC;
3011 		session->cipher_alg = cipher_xform->algo;
3012 	} else {
3013 		session->cipher_key.data = NULL;
3014 		session->cipher_key.length = 0;
3015 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
3016 		session->dir = DIR_ENC;
3017 	}
3018 
3019 	if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3020 		if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 &&
3021 		    pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) {
3022 			DPAA_SEC_ERR(
3023 				"PDCP Seq Num size should be 5/12 bits for cmode");
3024 			ret = -EINVAL;
3025 			goto out;
3026 		}
3027 	}
3028 
3029 	if (auth_xform) {
3030 		switch (auth_xform->algo) {
3031 		case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
3032 			session->auth_key.alg = PDCP_AUTH_TYPE_SNOW;
3033 			break;
3034 		case RTE_CRYPTO_AUTH_ZUC_EIA3:
3035 			session->auth_key.alg = PDCP_AUTH_TYPE_ZUC;
3036 			break;
3037 		case RTE_CRYPTO_AUTH_AES_CMAC:
3038 			session->auth_key.alg = PDCP_AUTH_TYPE_AES;
3039 			break;
3040 		case RTE_CRYPTO_AUTH_NULL:
3041 			session->auth_key.alg = PDCP_AUTH_TYPE_NULL;
3042 			break;
3043 		default:
3044 			DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
3045 				      session->auth_alg);
3046 			rte_free(session->cipher_key.data);
3047 			return -EINVAL;
3048 		}
3049 		session->auth_key.data = rte_zmalloc(NULL,
3050 						     auth_xform->key.length,
3051 						     RTE_CACHE_LINE_SIZE);
3052 		if (!session->auth_key.data &&
3053 		    auth_xform->key.length > 0) {
3054 			DPAA_SEC_ERR("No Memory for auth key");
3055 			rte_free(session->cipher_key.data);
3056 			return -ENOMEM;
3057 		}
3058 		session->auth_key.length = auth_xform->key.length;
3059 		memcpy(session->auth_key.data, auth_xform->key.data,
3060 		       auth_xform->key.length);
3061 		session->auth_alg = auth_xform->algo;
3062 	} else {
3063 		session->auth_key.data = NULL;
3064 		session->auth_key.length = 0;
3065 		session->auth_alg = 0;
3066 	}
3067 	session->pdcp.domain = pdcp_xform->domain;
3068 	session->pdcp.bearer = pdcp_xform->bearer;
3069 	session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
3070 	session->pdcp.sn_size = pdcp_xform->sn_size;
3071 	session->pdcp.hfn = pdcp_xform->hfn;
3072 	session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
3073 	session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
3074 	session->pdcp.sdap_enabled = pdcp_xform->sdap_enabled;
3075 	if (cipher_xform)
3076 		session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
3077 
3078 	rte_spinlock_lock(&dev_priv->lock);
3079 	for (i = 0; i < MAX_DPAA_CORES; i++) {
3080 		session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
3081 		if (session->inq[i] == NULL) {
3082 			DPAA_SEC_ERR("unable to attach sec queue");
3083 			rte_spinlock_unlock(&dev_priv->lock);
3084 			ret = -EBUSY;
3085 			goto out;
3086 		}
3087 	}
3088 	rte_spinlock_unlock(&dev_priv->lock);
3089 	return 0;
3090 out:
3091 	rte_free(session->auth_key.data);
3092 	rte_free(session->cipher_key.data);
3093 	memset(session, 0, sizeof(dpaa_sec_session));
3094 	return ret;
3095 }
3096 
3097 static int
3098 dpaa_sec_security_session_create(void *dev,
3099 				 struct rte_security_session_conf *conf,
3100 				 struct rte_security_session *sess,
3101 				 struct rte_mempool *mempool)
3102 {
3103 	void *sess_private_data;
3104 	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
3105 	int ret;
3106 
3107 	if (rte_mempool_get(mempool, &sess_private_data)) {
3108 		DPAA_SEC_ERR("Couldn't get object from session mempool");
3109 		return -ENOMEM;
3110 	}
3111 
3112 	switch (conf->protocol) {
3113 	case RTE_SECURITY_PROTOCOL_IPSEC:
3114 		ret = dpaa_sec_set_ipsec_session(cdev, conf,
3115 				sess_private_data);
3116 		break;
3117 	case RTE_SECURITY_PROTOCOL_PDCP:
3118 		ret = dpaa_sec_set_pdcp_session(cdev, conf,
3119 				sess_private_data);
3120 		break;
3121 	case RTE_SECURITY_PROTOCOL_MACSEC:
3122 		return -ENOTSUP;
3123 	default:
3124 		return -EINVAL;
3125 	}
3126 	if (ret != 0) {
3127 		DPAA_SEC_ERR("failed to configure session parameters");
3128 		/* Return session to mempool */
3129 		rte_mempool_put(mempool, sess_private_data);
3130 		return ret;
3131 	}
3132 
3133 	set_sec_session_private_data(sess, sess_private_data);
3134 
3135 	return ret;
3136 }
3137 
3138 /** Clear the memory of session so it doesn't leave key material behind */
3139 static int
3140 dpaa_sec_security_session_destroy(void *dev __rte_unused,
3141 		struct rte_security_session *sess)
3142 {
3143 	PMD_INIT_FUNC_TRACE();
3144 	void *sess_priv = get_sec_session_private_data(sess);
3145 	dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
3146 
3147 	if (sess_priv) {
3148 		free_session_memory((struct rte_cryptodev *)dev, s);
3149 		set_sec_session_private_data(sess, NULL);
3150 	}
3151 	return 0;
3152 }
3153 #endif
3154 static int
3155 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
3156 		       struct rte_cryptodev_config *config __rte_unused)
3157 {
3158 	PMD_INIT_FUNC_TRACE();
3159 
3160 	return 0;
3161 }
3162 
3163 static int
3164 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
3165 {
3166 	PMD_INIT_FUNC_TRACE();
3167 	return 0;
3168 }
3169 
3170 static void
3171 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
3172 {
3173 	PMD_INIT_FUNC_TRACE();
3174 }
3175 
3176 static int
3177 dpaa_sec_dev_close(struct rte_cryptodev *dev)
3178 {
3179 	PMD_INIT_FUNC_TRACE();
3180 
3181 	if (dev == NULL)
3182 		return -ENOMEM;
3183 
3184 	return 0;
3185 }
3186 
3187 static void
3188 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
3189 		       struct rte_cryptodev_info *info)
3190 {
3191 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
3192 
3193 	PMD_INIT_FUNC_TRACE();
3194 	if (info != NULL) {
3195 		info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3196 		info->feature_flags = dev->feature_flags;
3197 		info->capabilities = dpaa_sec_capabilities;
3198 		info->sym.max_nb_sessions = internals->max_nb_sessions;
3199 		info->driver_id = cryptodev_driver_id;
3200 	}
3201 }
3202 
3203 static enum qman_cb_dqrr_result
3204 dpaa_sec_process_parallel_event(void *event,
3205 			struct qman_portal *qm __always_unused,
3206 			struct qman_fq *outq,
3207 			const struct qm_dqrr_entry *dqrr,
3208 			void **bufs)
3209 {
3210 	const struct qm_fd *fd;
3211 	struct dpaa_sec_job *job;
3212 	struct dpaa_sec_op_ctx *ctx;
3213 	struct rte_event *ev = (struct rte_event *)event;
3214 
3215 	fd = &dqrr->fd;
3216 
3217 	/* sg is embedded in an op ctx,
3218 	 * sg[0] is for output
3219 	 * sg[1] for input
3220 	 */
3221 	job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3222 
3223 	ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3224 	ctx->fd_status = fd->status;
3225 	if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3226 		struct qm_sg_entry *sg_out;
3227 		uint32_t len;
3228 
3229 		sg_out = &job->sg[0];
3230 		hw_sg_to_cpu(sg_out);
3231 		len = sg_out->length;
3232 		ctx->op->sym->m_src->pkt_len = len;
3233 		ctx->op->sym->m_src->data_len = len;
3234 	}
3235 	if (!ctx->fd_status) {
3236 		ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3237 	} else {
3238 		DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3239 		ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3240 	}
3241 	ev->event_ptr = (void *)ctx->op;
3242 
3243 	ev->flow_id = outq->ev.flow_id;
3244 	ev->sub_event_type = outq->ev.sub_event_type;
3245 	ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3246 	ev->op = RTE_EVENT_OP_NEW;
3247 	ev->sched_type = outq->ev.sched_type;
3248 	ev->queue_id = outq->ev.queue_id;
3249 	ev->priority = outq->ev.priority;
3250 	*bufs = (void *)ctx->op;
3251 
3252 	rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3253 
3254 	return qman_cb_dqrr_consume;
3255 }
3256 
3257 static enum qman_cb_dqrr_result
3258 dpaa_sec_process_atomic_event(void *event,
3259 			struct qman_portal *qm __rte_unused,
3260 			struct qman_fq *outq,
3261 			const struct qm_dqrr_entry *dqrr,
3262 			void **bufs)
3263 {
3264 	u8 index;
3265 	const struct qm_fd *fd;
3266 	struct dpaa_sec_job *job;
3267 	struct dpaa_sec_op_ctx *ctx;
3268 	struct rte_event *ev = (struct rte_event *)event;
3269 
3270 	fd = &dqrr->fd;
3271 
3272 	/* sg is embedded in an op ctx,
3273 	 * sg[0] is for output
3274 	 * sg[1] for input
3275 	 */
3276 	job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3277 
3278 	ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3279 	ctx->fd_status = fd->status;
3280 	if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3281 		struct qm_sg_entry *sg_out;
3282 		uint32_t len;
3283 
3284 		sg_out = &job->sg[0];
3285 		hw_sg_to_cpu(sg_out);
3286 		len = sg_out->length;
3287 		ctx->op->sym->m_src->pkt_len = len;
3288 		ctx->op->sym->m_src->data_len = len;
3289 	}
3290 	if (!ctx->fd_status) {
3291 		ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3292 	} else {
3293 		DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3294 		ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3295 	}
3296 	ev->event_ptr = (void *)ctx->op;
3297 	ev->flow_id = outq->ev.flow_id;
3298 	ev->sub_event_type = outq->ev.sub_event_type;
3299 	ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3300 	ev->op = RTE_EVENT_OP_NEW;
3301 	ev->sched_type = outq->ev.sched_type;
3302 	ev->queue_id = outq->ev.queue_id;
3303 	ev->priority = outq->ev.priority;
3304 
3305 	/* Save active dqrr entries */
3306 	index = ((uintptr_t)dqrr >> 6) & (16/*QM_DQRR_SIZE*/ - 1);
3307 	DPAA_PER_LCORE_DQRR_SIZE++;
3308 	DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
3309 	DPAA_PER_LCORE_DQRR_MBUF(index) = ctx->op->sym->m_src;
3310 	ev->impl_opaque = index + 1;
3311 	*dpaa_seqn(ctx->op->sym->m_src) = (uint32_t)index + 1;
3312 	*bufs = (void *)ctx->op;
3313 
3314 	rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3315 
3316 	return qman_cb_dqrr_defer;
3317 }
3318 
3319 int
3320 dpaa_sec_eventq_attach(const struct rte_cryptodev *dev,
3321 		int qp_id,
3322 		uint16_t ch_id,
3323 		const struct rte_event *event)
3324 {
3325 	struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3326 	struct qm_mcc_initfq opts = {0};
3327 
3328 	int ret;
3329 
3330 	opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3331 		       QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3332 	opts.fqd.dest.channel = ch_id;
3333 
3334 	switch (event->sched_type) {
3335 	case RTE_SCHED_TYPE_ATOMIC:
3336 		opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
3337 		/* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
3338 		 * configuration with HOLD_ACTIVE setting
3339 		 */
3340 		opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
3341 		qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_atomic_event;
3342 		break;
3343 	case RTE_SCHED_TYPE_ORDERED:
3344 		DPAA_SEC_ERR("Ordered queue schedule type is not supported\n");
3345 		return -ENOTSUP;
3346 	default:
3347 		opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
3348 		qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_parallel_event;
3349 		break;
3350 	}
3351 
3352 	ret = qman_init_fq(&qp->outq, QMAN_INITFQ_FLAG_SCHED, &opts);
3353 	if (unlikely(ret)) {
3354 		DPAA_SEC_ERR("unable to init caam source fq!");
3355 		return ret;
3356 	}
3357 
3358 	memcpy(&qp->outq.ev, event, sizeof(struct rte_event));
3359 
3360 	return 0;
3361 }
3362 
3363 int
3364 dpaa_sec_eventq_detach(const struct rte_cryptodev *dev,
3365 			int qp_id)
3366 {
3367 	struct qm_mcc_initfq opts = {0};
3368 	int ret;
3369 	struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3370 
3371 	opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3372 		       QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3373 	qp->outq.cb.dqrr = dqrr_out_fq_cb_rx;
3374 	qp->outq.cb.ern  = ern_sec_fq_handler;
3375 	qman_retire_fq(&qp->outq, NULL);
3376 	qman_oos_fq(&qp->outq);
3377 	ret = qman_init_fq(&qp->outq, 0, &opts);
3378 	if (ret)
3379 		RTE_LOG(ERR, PMD, "Error in qman_init_fq: ret: %d\n", ret);
3380 	qp->outq.cb.dqrr = NULL;
3381 
3382 	return ret;
3383 }
3384 
3385 static struct rte_cryptodev_ops crypto_ops = {
3386 	.dev_configure	      = dpaa_sec_dev_configure,
3387 	.dev_start	      = dpaa_sec_dev_start,
3388 	.dev_stop	      = dpaa_sec_dev_stop,
3389 	.dev_close	      = dpaa_sec_dev_close,
3390 	.dev_infos_get        = dpaa_sec_dev_infos_get,
3391 	.queue_pair_setup     = dpaa_sec_queue_pair_setup,
3392 	.queue_pair_release   = dpaa_sec_queue_pair_release,
3393 	.sym_session_get_size     = dpaa_sec_sym_session_get_size,
3394 	.sym_session_configure    = dpaa_sec_sym_session_configure,
3395 	.sym_session_clear        = dpaa_sec_sym_session_clear
3396 };
3397 
3398 #ifdef RTE_LIB_SECURITY
3399 static const struct rte_security_capability *
3400 dpaa_sec_capabilities_get(void *device __rte_unused)
3401 {
3402 	return dpaa_sec_security_cap;
3403 }
3404 
3405 static const struct rte_security_ops dpaa_sec_security_ops = {
3406 	.session_create = dpaa_sec_security_session_create,
3407 	.session_update = NULL,
3408 	.session_stats_get = NULL,
3409 	.session_destroy = dpaa_sec_security_session_destroy,
3410 	.set_pkt_metadata = NULL,
3411 	.capabilities_get = dpaa_sec_capabilities_get
3412 };
3413 #endif
3414 static int
3415 dpaa_sec_uninit(struct rte_cryptodev *dev)
3416 {
3417 	struct dpaa_sec_dev_private *internals;
3418 
3419 	if (dev == NULL)
3420 		return -ENODEV;
3421 
3422 	internals = dev->data->dev_private;
3423 	rte_free(dev->security_ctx);
3424 
3425 	rte_free(internals);
3426 
3427 	DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
3428 		      dev->data->name, rte_socket_id());
3429 
3430 	return 0;
3431 }
3432 
3433 static int
3434 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
3435 {
3436 	struct dpaa_sec_dev_private *internals;
3437 #ifdef RTE_LIB_SECURITY
3438 	struct rte_security_ctx *security_instance;
3439 #endif
3440 	struct dpaa_sec_qp *qp;
3441 	uint32_t i, flags;
3442 	int ret;
3443 
3444 	PMD_INIT_FUNC_TRACE();
3445 
3446 	cryptodev->driver_id = cryptodev_driver_id;
3447 	cryptodev->dev_ops = &crypto_ops;
3448 
3449 	cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
3450 	cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
3451 	cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3452 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
3453 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3454 			RTE_CRYPTODEV_FF_SECURITY |
3455 			RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3456 			RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3457 			RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3458 			RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3459 			RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3460 
3461 	internals = cryptodev->data->dev_private;
3462 	internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
3463 	internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
3464 
3465 	/*
3466 	 * For secondary processes, we don't initialise any further as primary
3467 	 * has already done this work. Only check we don't need a different
3468 	 * RX function
3469 	 */
3470 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3471 		DPAA_SEC_WARN("Device already init by primary process");
3472 		return 0;
3473 	}
3474 #ifdef RTE_LIB_SECURITY
3475 	/* Initialize security_ctx only for primary process*/
3476 	security_instance = rte_malloc("rte_security_instances_ops",
3477 				sizeof(struct rte_security_ctx), 0);
3478 	if (security_instance == NULL)
3479 		return -ENOMEM;
3480 	security_instance->device = (void *)cryptodev;
3481 	security_instance->ops = &dpaa_sec_security_ops;
3482 	security_instance->sess_cnt = 0;
3483 	cryptodev->security_ctx = security_instance;
3484 #endif
3485 	rte_spinlock_init(&internals->lock);
3486 	for (i = 0; i < internals->max_nb_queue_pairs; i++) {
3487 		/* init qman fq for queue pair */
3488 		qp = &internals->qps[i];
3489 		ret = dpaa_sec_init_tx(&qp->outq);
3490 		if (ret) {
3491 			DPAA_SEC_ERR("config tx of queue pair  %d", i);
3492 			goto init_error;
3493 		}
3494 	}
3495 
3496 	flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
3497 		QMAN_FQ_FLAG_TO_DCPORTAL;
3498 	for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
3499 		/* create rx qman fq for sessions*/
3500 		ret = qman_create_fq(0, flags, &internals->inq[i]);
3501 		if (unlikely(ret != 0)) {
3502 			DPAA_SEC_ERR("sec qman_create_fq failed");
3503 			goto init_error;
3504 		}
3505 	}
3506 
3507 	RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
3508 	return 0;
3509 
3510 init_error:
3511 	DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
3512 
3513 	rte_free(cryptodev->security_ctx);
3514 	return -EFAULT;
3515 }
3516 
3517 static int
3518 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
3519 				struct rte_dpaa_device *dpaa_dev)
3520 {
3521 	struct rte_cryptodev *cryptodev;
3522 	char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3523 
3524 	int retval;
3525 
3526 	snprintf(cryptodev_name, sizeof(cryptodev_name), "%s", dpaa_dev->name);
3527 
3528 	cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3529 	if (cryptodev == NULL)
3530 		return -ENOMEM;
3531 
3532 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3533 		cryptodev->data->dev_private = rte_zmalloc_socket(
3534 					"cryptodev private structure",
3535 					sizeof(struct dpaa_sec_dev_private),
3536 					RTE_CACHE_LINE_SIZE,
3537 					rte_socket_id());
3538 
3539 		if (cryptodev->data->dev_private == NULL)
3540 			rte_panic("Cannot allocate memzone for private "
3541 					"device data");
3542 	}
3543 
3544 	dpaa_dev->crypto_dev = cryptodev;
3545 	cryptodev->device = &dpaa_dev->device;
3546 
3547 	/* init user callbacks */
3548 	TAILQ_INIT(&(cryptodev->link_intr_cbs));
3549 
3550 	/* if sec device version is not configured */
3551 	if (!rta_get_sec_era()) {
3552 		const struct device_node *caam_node;
3553 
3554 		for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
3555 			const uint32_t *prop = of_get_property(caam_node,
3556 					"fsl,sec-era",
3557 					NULL);
3558 			if (prop) {
3559 				rta_set_sec_era(
3560 					INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
3561 				break;
3562 			}
3563 		}
3564 	}
3565 
3566 	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
3567 		retval = rte_dpaa_portal_init((void *)1);
3568 		if (retval) {
3569 			DPAA_SEC_ERR("Unable to initialize portal");
3570 			goto out;
3571 		}
3572 	}
3573 
3574 	/* Invoke PMD device initialization function */
3575 	retval = dpaa_sec_dev_init(cryptodev);
3576 	if (retval == 0)
3577 		return 0;
3578 
3579 	retval = -ENXIO;
3580 out:
3581 	/* In case of error, cleanup is done */
3582 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3583 		rte_free(cryptodev->data->dev_private);
3584 
3585 	rte_cryptodev_pmd_release_device(cryptodev);
3586 
3587 	return retval;
3588 }
3589 
3590 static int
3591 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
3592 {
3593 	struct rte_cryptodev *cryptodev;
3594 	int ret;
3595 
3596 	cryptodev = dpaa_dev->crypto_dev;
3597 	if (cryptodev == NULL)
3598 		return -ENODEV;
3599 
3600 	ret = dpaa_sec_uninit(cryptodev);
3601 	if (ret)
3602 		return ret;
3603 
3604 	return rte_cryptodev_pmd_destroy(cryptodev);
3605 }
3606 
3607 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
3608 	.drv_type = FSL_DPAA_CRYPTO,
3609 	.driver = {
3610 		.name = "DPAA SEC PMD"
3611 	},
3612 	.probe = cryptodev_dpaa_sec_probe,
3613 	.remove = cryptodev_dpaa_sec_remove,
3614 };
3615 
3616 static struct cryptodev_driver dpaa_sec_crypto_drv;
3617 
3618 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
3619 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
3620 		cryptodev_driver_id);
3621 RTE_LOG_REGISTER(dpaa_logtype_sec, pmd.crypto.dpaa, NOTICE);
3622