xref: /dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c (revision bbbe38a6d59ccdda25917712701e629d0b10af6f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2017-2019 NXP
5  *
6  */
7 
8 #include <fcntl.h>
9 #include <unistd.h>
10 #include <sched.h>
11 #include <net/if.h>
12 
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #ifdef RTE_LIB_SECURITY
19 #include <rte_security_driver.h>
20 #endif
21 #include <rte_cycles.h>
22 #include <rte_dev.h>
23 #include <rte_ip.h>
24 #include <rte_kvargs.h>
25 #include <rte_malloc.h>
26 #include <rte_mbuf.h>
27 #include <rte_memcpy.h>
28 #include <rte_string_fns.h>
29 #include <rte_spinlock.h>
30 
31 #include <fsl_usd.h>
32 #include <fsl_qman.h>
33 #include <dpaa_of.h>
34 
35 /* RTA header files */
36 #include <desc/common.h>
37 #include <desc/algo.h>
38 #include <desc/ipsec.h>
39 #include <desc/pdcp.h>
40 #include <desc/sdap.h>
41 
42 #include <rte_dpaa_bus.h>
43 #include <dpaa_sec.h>
44 #include <dpaa_sec_event.h>
45 #include <dpaa_sec_log.h>
46 #include <dpaax_iova_table.h>
47 
48 static uint8_t cryptodev_driver_id;
49 
50 static int
51 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
52 
53 static inline void
54 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
55 {
56 	if (!ctx->fd_status) {
57 		ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
58 	} else {
59 		DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
60 		ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
61 	}
62 }
63 
64 static inline struct dpaa_sec_op_ctx *
65 dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count)
66 {
67 	struct dpaa_sec_op_ctx *ctx;
68 	int i, retval;
69 
70 	retval = rte_mempool_get(
71 			ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
72 			(void **)(&ctx));
73 	if (!ctx || retval) {
74 		DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
75 		return NULL;
76 	}
77 	/*
78 	 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
79 	 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
80 	 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
81 	 * each packet, memset is costlier than dcbz_64().
82 	 */
83 	for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
84 		dcbz_64(&ctx->job.sg[i]);
85 
86 	ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
87 	ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
88 
89 	return ctx;
90 }
91 
92 static void
93 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
94 		   struct qman_fq *fq,
95 		   const struct qm_mr_entry *msg)
96 {
97 	DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
98 			fq->fqid, msg->ern.rc, msg->ern.seqnum);
99 }
100 
101 /* initialize the queue with dest chan as caam chan so that
102  * all the packets in this queue could be dispatched into caam
103  */
104 static int
105 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
106 		 uint32_t fqid_out)
107 {
108 	struct qm_mcc_initfq fq_opts;
109 	uint32_t flags;
110 	int ret = -1;
111 
112 	/* Clear FQ options */
113 	memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
114 
115 	flags = QMAN_INITFQ_FLAG_SCHED;
116 	fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
117 			  QM_INITFQ_WE_CONTEXTB;
118 
119 	qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
120 	fq_opts.fqd.context_b = fqid_out;
121 	fq_opts.fqd.dest.channel = dpaa_get_qm_channel_caam();
122 	fq_opts.fqd.dest.wq = 0;
123 
124 	fq_in->cb.ern  = ern_sec_fq_handler;
125 
126 	DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
127 
128 	ret = qman_init_fq(fq_in, flags, &fq_opts);
129 	if (unlikely(ret != 0))
130 		DPAA_SEC_ERR("qman_init_fq failed %d", ret);
131 
132 	return ret;
133 }
134 
135 /* something is put into in_fq and caam put the crypto result into out_fq */
136 static enum qman_cb_dqrr_result
137 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
138 		  struct qman_fq *fq __always_unused,
139 		  const struct qm_dqrr_entry *dqrr)
140 {
141 	const struct qm_fd *fd;
142 	struct dpaa_sec_job *job;
143 	struct dpaa_sec_op_ctx *ctx;
144 
145 	if (DPAA_PER_LCORE_DPAA_SEC_OP_NB >= DPAA_SEC_BURST)
146 		return qman_cb_dqrr_defer;
147 
148 	if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
149 		return qman_cb_dqrr_consume;
150 
151 	fd = &dqrr->fd;
152 	/* sg is embedded in an op ctx,
153 	 * sg[0] is for output
154 	 * sg[1] for input
155 	 */
156 	job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
157 
158 	ctx = container_of(job, struct dpaa_sec_op_ctx, job);
159 	ctx->fd_status = fd->status;
160 	if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
161 		struct qm_sg_entry *sg_out;
162 		uint32_t len;
163 		struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ?
164 				ctx->op->sym->m_src : ctx->op->sym->m_dst;
165 
166 		sg_out = &job->sg[0];
167 		hw_sg_to_cpu(sg_out);
168 		len = sg_out->length;
169 		mbuf->pkt_len = len;
170 		while (mbuf->next != NULL) {
171 			len -= mbuf->data_len;
172 			mbuf = mbuf->next;
173 		}
174 		mbuf->data_len = len;
175 	}
176 	DPAA_PER_LCORE_RTE_CRYPTO_OP[DPAA_PER_LCORE_DPAA_SEC_OP_NB++] = ctx->op;
177 	dpaa_sec_op_ending(ctx);
178 
179 	return qman_cb_dqrr_consume;
180 }
181 
182 /* caam result is put into this queue */
183 static int
184 dpaa_sec_init_tx(struct qman_fq *fq)
185 {
186 	int ret;
187 	struct qm_mcc_initfq opts;
188 	uint32_t flags;
189 
190 	flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
191 		QMAN_FQ_FLAG_DYNAMIC_FQID;
192 
193 	ret = qman_create_fq(0, flags, fq);
194 	if (unlikely(ret)) {
195 		DPAA_SEC_ERR("qman_create_fq failed");
196 		return ret;
197 	}
198 
199 	memset(&opts, 0, sizeof(opts));
200 	opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
201 		       QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
202 
203 	/* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
204 
205 	fq->cb.dqrr = dqrr_out_fq_cb_rx;
206 	fq->cb.ern  = ern_sec_fq_handler;
207 
208 	ret = qman_init_fq(fq, 0, &opts);
209 	if (unlikely(ret)) {
210 		DPAA_SEC_ERR("unable to init caam source fq!");
211 		return ret;
212 	}
213 
214 	return ret;
215 }
216 
217 static inline int is_aead(dpaa_sec_session *ses)
218 {
219 	return ((ses->cipher_alg == 0) &&
220 		(ses->auth_alg == 0) &&
221 		(ses->aead_alg != 0));
222 }
223 
224 static inline int is_encode(dpaa_sec_session *ses)
225 {
226 	return ses->dir == DIR_ENC;
227 }
228 
229 static inline int is_decode(dpaa_sec_session *ses)
230 {
231 	return ses->dir == DIR_DEC;
232 }
233 
234 #ifdef RTE_LIB_SECURITY
235 static int
236 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
237 {
238 	struct alginfo authdata = {0}, cipherdata = {0};
239 	struct sec_cdb *cdb = &ses->cdb;
240 	struct alginfo *p_authdata = NULL;
241 	int32_t shared_desc_len = 0;
242 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
243 	int swap = false;
244 #else
245 	int swap = true;
246 #endif
247 
248 	cipherdata.key = (size_t)ses->cipher_key.data;
249 	cipherdata.keylen = ses->cipher_key.length;
250 	cipherdata.key_enc_flags = 0;
251 	cipherdata.key_type = RTA_DATA_IMM;
252 	cipherdata.algtype = ses->cipher_key.alg;
253 	cipherdata.algmode = ses->cipher_key.algmode;
254 
255 	if (ses->auth_alg) {
256 		authdata.key = (size_t)ses->auth_key.data;
257 		authdata.keylen = ses->auth_key.length;
258 		authdata.key_enc_flags = 0;
259 		authdata.key_type = RTA_DATA_IMM;
260 		authdata.algtype = ses->auth_key.alg;
261 		authdata.algmode = ses->auth_key.algmode;
262 
263 		p_authdata = &authdata;
264 	}
265 
266 	if (rta_inline_pdcp_query(authdata.algtype,
267 				cipherdata.algtype,
268 				ses->pdcp.sn_size,
269 				ses->pdcp.hfn_ovd)) {
270 		cipherdata.key =
271 			(size_t)rte_dpaa_mem_vtop((void *)
272 					(size_t)cipherdata.key);
273 		cipherdata.key_type = RTA_DATA_PTR;
274 	}
275 
276 	if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
277 		if (ses->dir == DIR_ENC)
278 			shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
279 					cdb->sh_desc, 1, swap,
280 					ses->pdcp.hfn,
281 					ses->pdcp.sn_size,
282 					ses->pdcp.bearer,
283 					ses->pdcp.pkt_dir,
284 					ses->pdcp.hfn_threshold,
285 					&cipherdata, &authdata,
286 					0);
287 		else if (ses->dir == DIR_DEC)
288 			shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
289 					cdb->sh_desc, 1, swap,
290 					ses->pdcp.hfn,
291 					ses->pdcp.sn_size,
292 					ses->pdcp.bearer,
293 					ses->pdcp.pkt_dir,
294 					ses->pdcp.hfn_threshold,
295 					&cipherdata, &authdata,
296 					0);
297 	} else {
298 		if (ses->dir == DIR_ENC) {
299 			if (ses->pdcp.sdap_enabled)
300 				shared_desc_len =
301 					cnstr_shdsc_pdcp_sdap_u_plane_encap(
302 						cdb->sh_desc, 1, swap,
303 						ses->pdcp.sn_size,
304 						ses->pdcp.hfn,
305 						ses->pdcp.bearer,
306 						ses->pdcp.pkt_dir,
307 						ses->pdcp.hfn_threshold,
308 						&cipherdata, p_authdata, 0);
309 			else
310 				shared_desc_len =
311 					cnstr_shdsc_pdcp_u_plane_encap(
312 						cdb->sh_desc, 1, swap,
313 						ses->pdcp.sn_size,
314 						ses->pdcp.hfn,
315 						ses->pdcp.bearer,
316 						ses->pdcp.pkt_dir,
317 						ses->pdcp.hfn_threshold,
318 						&cipherdata, p_authdata, 0);
319 		} else if (ses->dir == DIR_DEC) {
320 			if (ses->pdcp.sdap_enabled)
321 				shared_desc_len =
322 					cnstr_shdsc_pdcp_sdap_u_plane_decap(
323 						cdb->sh_desc, 1, swap,
324 						ses->pdcp.sn_size,
325 						ses->pdcp.hfn,
326 						ses->pdcp.bearer,
327 						ses->pdcp.pkt_dir,
328 						ses->pdcp.hfn_threshold,
329 						&cipherdata, p_authdata, 0);
330 			else
331 				shared_desc_len =
332 					cnstr_shdsc_pdcp_u_plane_decap(
333 						cdb->sh_desc, 1, swap,
334 						ses->pdcp.sn_size,
335 						ses->pdcp.hfn,
336 						ses->pdcp.bearer,
337 						ses->pdcp.pkt_dir,
338 						ses->pdcp.hfn_threshold,
339 						&cipherdata, p_authdata, 0);
340 		}
341 	}
342 	return shared_desc_len;
343 }
344 
345 /* prepare ipsec proto command block of the session */
346 static int
347 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
348 {
349 	struct alginfo cipherdata = {0}, authdata = {0};
350 	struct sec_cdb *cdb = &ses->cdb;
351 	int32_t shared_desc_len = 0;
352 	int err;
353 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
354 	int swap = false;
355 #else
356 	int swap = true;
357 #endif
358 
359 	cipherdata.key = (size_t)ses->cipher_key.data;
360 	cipherdata.keylen = ses->cipher_key.length;
361 	cipherdata.key_enc_flags = 0;
362 	cipherdata.key_type = RTA_DATA_IMM;
363 	cipherdata.algtype = ses->cipher_key.alg;
364 	cipherdata.algmode = ses->cipher_key.algmode;
365 
366 	if (ses->auth_key.length) {
367 		authdata.key = (size_t)ses->auth_key.data;
368 		authdata.keylen = ses->auth_key.length;
369 		authdata.key_enc_flags = 0;
370 		authdata.key_type = RTA_DATA_IMM;
371 		authdata.algtype = ses->auth_key.alg;
372 		authdata.algmode = ses->auth_key.algmode;
373 	}
374 
375 	cdb->sh_desc[0] = cipherdata.keylen;
376 	cdb->sh_desc[1] = authdata.keylen;
377 	err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
378 			       DESC_JOB_IO_LEN,
379 			       (unsigned int *)cdb->sh_desc,
380 			       &cdb->sh_desc[2], 2);
381 
382 	if (err < 0) {
383 		DPAA_SEC_ERR("Crypto: Incorrect key lengths");
384 		return err;
385 	}
386 	if (cdb->sh_desc[2] & 1)
387 		cipherdata.key_type = RTA_DATA_IMM;
388 	else {
389 		cipherdata.key = (size_t)rte_dpaa_mem_vtop(
390 					(void *)(size_t)cipherdata.key);
391 		cipherdata.key_type = RTA_DATA_PTR;
392 	}
393 	if (cdb->sh_desc[2] & (1<<1))
394 		authdata.key_type = RTA_DATA_IMM;
395 	else {
396 		authdata.key = (size_t)rte_dpaa_mem_vtop(
397 					(void *)(size_t)authdata.key);
398 		authdata.key_type = RTA_DATA_PTR;
399 	}
400 
401 	cdb->sh_desc[0] = 0;
402 	cdb->sh_desc[1] = 0;
403 	cdb->sh_desc[2] = 0;
404 	if (ses->dir == DIR_ENC) {
405 		shared_desc_len = cnstr_shdsc_ipsec_new_encap(
406 				cdb->sh_desc,
407 				true, swap, SHR_SERIAL,
408 				&ses->encap_pdb,
409 				(uint8_t *)&ses->ip4_hdr,
410 				&cipherdata, &authdata);
411 	} else if (ses->dir == DIR_DEC) {
412 		shared_desc_len = cnstr_shdsc_ipsec_new_decap(
413 				cdb->sh_desc,
414 				true, swap, SHR_SERIAL,
415 				&ses->decap_pdb,
416 				&cipherdata, &authdata);
417 	}
418 	return shared_desc_len;
419 }
420 #endif
421 /* prepare command block of the session */
422 static int
423 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
424 {
425 	struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
426 	int32_t shared_desc_len = 0;
427 	struct sec_cdb *cdb = &ses->cdb;
428 	int err;
429 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
430 	int swap = false;
431 #else
432 	int swap = true;
433 #endif
434 
435 	memset(cdb, 0, sizeof(struct sec_cdb));
436 
437 	switch (ses->ctxt) {
438 #ifdef RTE_LIB_SECURITY
439 	case DPAA_SEC_IPSEC:
440 		shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
441 		break;
442 	case DPAA_SEC_PDCP:
443 		shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
444 		break;
445 #endif
446 	case DPAA_SEC_CIPHER:
447 		alginfo_c.key = (size_t)ses->cipher_key.data;
448 		alginfo_c.keylen = ses->cipher_key.length;
449 		alginfo_c.key_enc_flags = 0;
450 		alginfo_c.key_type = RTA_DATA_IMM;
451 		alginfo_c.algtype = ses->cipher_key.alg;
452 		alginfo_c.algmode = ses->cipher_key.algmode;
453 
454 		switch (ses->cipher_alg) {
455 		case RTE_CRYPTO_CIPHER_AES_CBC:
456 		case RTE_CRYPTO_CIPHER_3DES_CBC:
457 		case RTE_CRYPTO_CIPHER_AES_CTR:
458 		case RTE_CRYPTO_CIPHER_3DES_CTR:
459 			shared_desc_len = cnstr_shdsc_blkcipher(
460 					cdb->sh_desc, true,
461 					swap, SHR_NEVER, &alginfo_c,
462 					ses->iv.length,
463 					ses->dir);
464 			break;
465 		case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
466 			shared_desc_len = cnstr_shdsc_snow_f8(
467 					cdb->sh_desc, true, swap,
468 					&alginfo_c,
469 					ses->dir);
470 			break;
471 		case RTE_CRYPTO_CIPHER_ZUC_EEA3:
472 			shared_desc_len = cnstr_shdsc_zuce(
473 					cdb->sh_desc, true, swap,
474 					&alginfo_c,
475 					ses->dir);
476 			break;
477 		default:
478 			DPAA_SEC_ERR("unsupported cipher alg %d",
479 				     ses->cipher_alg);
480 			return -ENOTSUP;
481 		}
482 		break;
483 	case DPAA_SEC_AUTH:
484 		alginfo_a.key = (size_t)ses->auth_key.data;
485 		alginfo_a.keylen = ses->auth_key.length;
486 		alginfo_a.key_enc_flags = 0;
487 		alginfo_a.key_type = RTA_DATA_IMM;
488 		alginfo_a.algtype = ses->auth_key.alg;
489 		alginfo_a.algmode = ses->auth_key.algmode;
490 		switch (ses->auth_alg) {
491 		case RTE_CRYPTO_AUTH_MD5_HMAC:
492 		case RTE_CRYPTO_AUTH_SHA1_HMAC:
493 		case RTE_CRYPTO_AUTH_SHA224_HMAC:
494 		case RTE_CRYPTO_AUTH_SHA256_HMAC:
495 		case RTE_CRYPTO_AUTH_SHA384_HMAC:
496 		case RTE_CRYPTO_AUTH_SHA512_HMAC:
497 			shared_desc_len = cnstr_shdsc_hmac(
498 						cdb->sh_desc, true,
499 						swap, SHR_NEVER, &alginfo_a,
500 						!ses->dir,
501 						ses->digest_length);
502 			break;
503 		case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
504 			shared_desc_len = cnstr_shdsc_snow_f9(
505 						cdb->sh_desc, true, swap,
506 						&alginfo_a,
507 						!ses->dir,
508 						ses->digest_length);
509 			break;
510 		case RTE_CRYPTO_AUTH_ZUC_EIA3:
511 			shared_desc_len = cnstr_shdsc_zuca(
512 						cdb->sh_desc, true, swap,
513 						&alginfo_a,
514 						!ses->dir,
515 						ses->digest_length);
516 			break;
517 		default:
518 			DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
519 		}
520 		break;
521 	case DPAA_SEC_AEAD:
522 		if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
523 			DPAA_SEC_ERR("not supported aead alg");
524 			return -ENOTSUP;
525 		}
526 		alginfo.key = (size_t)ses->aead_key.data;
527 		alginfo.keylen = ses->aead_key.length;
528 		alginfo.key_enc_flags = 0;
529 		alginfo.key_type = RTA_DATA_IMM;
530 		alginfo.algtype = ses->aead_key.alg;
531 		alginfo.algmode = ses->aead_key.algmode;
532 
533 		if (ses->dir == DIR_ENC)
534 			shared_desc_len = cnstr_shdsc_gcm_encap(
535 					cdb->sh_desc, true, swap, SHR_NEVER,
536 					&alginfo,
537 					ses->iv.length,
538 					ses->digest_length);
539 		else
540 			shared_desc_len = cnstr_shdsc_gcm_decap(
541 					cdb->sh_desc, true, swap, SHR_NEVER,
542 					&alginfo,
543 					ses->iv.length,
544 					ses->digest_length);
545 		break;
546 	case DPAA_SEC_CIPHER_HASH:
547 		alginfo_c.key = (size_t)ses->cipher_key.data;
548 		alginfo_c.keylen = ses->cipher_key.length;
549 		alginfo_c.key_enc_flags = 0;
550 		alginfo_c.key_type = RTA_DATA_IMM;
551 		alginfo_c.algtype = ses->cipher_key.alg;
552 		alginfo_c.algmode = ses->cipher_key.algmode;
553 
554 		alginfo_a.key = (size_t)ses->auth_key.data;
555 		alginfo_a.keylen = ses->auth_key.length;
556 		alginfo_a.key_enc_flags = 0;
557 		alginfo_a.key_type = RTA_DATA_IMM;
558 		alginfo_a.algtype = ses->auth_key.alg;
559 		alginfo_a.algmode = ses->auth_key.algmode;
560 
561 		cdb->sh_desc[0] = alginfo_c.keylen;
562 		cdb->sh_desc[1] = alginfo_a.keylen;
563 		err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
564 				       DESC_JOB_IO_LEN,
565 				       (unsigned int *)cdb->sh_desc,
566 				       &cdb->sh_desc[2], 2);
567 
568 		if (err < 0) {
569 			DPAA_SEC_ERR("Crypto: Incorrect key lengths");
570 			return err;
571 		}
572 		if (cdb->sh_desc[2] & 1)
573 			alginfo_c.key_type = RTA_DATA_IMM;
574 		else {
575 			alginfo_c.key = (size_t)rte_dpaa_mem_vtop(
576 						(void *)(size_t)alginfo_c.key);
577 			alginfo_c.key_type = RTA_DATA_PTR;
578 		}
579 		if (cdb->sh_desc[2] & (1<<1))
580 			alginfo_a.key_type = RTA_DATA_IMM;
581 		else {
582 			alginfo_a.key = (size_t)rte_dpaa_mem_vtop(
583 						(void *)(size_t)alginfo_a.key);
584 			alginfo_a.key_type = RTA_DATA_PTR;
585 		}
586 		cdb->sh_desc[0] = 0;
587 		cdb->sh_desc[1] = 0;
588 		cdb->sh_desc[2] = 0;
589 		/* Auth_only_len is set as 0 here and it will be
590 		 * overwritten in fd for each packet.
591 		 */
592 		shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
593 				true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a,
594 				ses->iv.length,
595 				ses->digest_length, ses->dir);
596 		break;
597 	case DPAA_SEC_HASH_CIPHER:
598 	default:
599 		DPAA_SEC_ERR("error: Unsupported session");
600 		return -ENOTSUP;
601 	}
602 
603 	if (shared_desc_len < 0) {
604 		DPAA_SEC_ERR("error in preparing command block");
605 		return shared_desc_len;
606 	}
607 
608 	cdb->sh_hdr.hi.field.idlen = shared_desc_len;
609 	cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
610 	cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
611 
612 	return 0;
613 }
614 
615 /* qp is lockless, should be accessed by only one thread */
616 static int
617 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
618 {
619 	struct qman_fq *fq;
620 	unsigned int pkts = 0;
621 	int num_rx_bufs, ret;
622 	struct qm_dqrr_entry *dq;
623 	uint32_t vdqcr_flags = 0;
624 
625 	fq = &qp->outq;
626 	/*
627 	 * Until request for four buffers, we provide exact number of buffers.
628 	 * Otherwise we do not set the QM_VDQCR_EXACT flag.
629 	 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
630 	 * requested, so we request two less in this case.
631 	 */
632 	if (nb_ops < 4) {
633 		vdqcr_flags = QM_VDQCR_EXACT;
634 		num_rx_bufs = nb_ops;
635 	} else {
636 		num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
637 			(DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
638 	}
639 	ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
640 	if (ret)
641 		return 0;
642 
643 	do {
644 		const struct qm_fd *fd;
645 		struct dpaa_sec_job *job;
646 		struct dpaa_sec_op_ctx *ctx;
647 		struct rte_crypto_op *op;
648 
649 		dq = qman_dequeue(fq);
650 		if (!dq)
651 			continue;
652 
653 		fd = &dq->fd;
654 		/* sg is embedded in an op ctx,
655 		 * sg[0] is for output
656 		 * sg[1] for input
657 		 */
658 		job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
659 
660 		ctx = container_of(job, struct dpaa_sec_op_ctx, job);
661 		ctx->fd_status = fd->status;
662 		op = ctx->op;
663 		if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
664 			struct qm_sg_entry *sg_out;
665 			uint32_t len;
666 			struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ?
667 						op->sym->m_src : op->sym->m_dst;
668 
669 			sg_out = &job->sg[0];
670 			hw_sg_to_cpu(sg_out);
671 			len = sg_out->length;
672 			mbuf->pkt_len = len;
673 			while (mbuf->next != NULL) {
674 				len -= mbuf->data_len;
675 				mbuf = mbuf->next;
676 			}
677 			mbuf->data_len = len;
678 		}
679 		if (!ctx->fd_status) {
680 			op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
681 		} else {
682 			DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
683 			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
684 		}
685 		ops[pkts++] = op;
686 
687 		/* report op status to sym->op and then free the ctx memeory */
688 		rte_mempool_put(ctx->ctx_pool, (void *)ctx);
689 
690 		qman_dqrr_consume(fq, dq);
691 	} while (fq->flags & QMAN_FQ_STATE_VDQCR);
692 
693 	return pkts;
694 }
695 
696 static inline struct dpaa_sec_job *
697 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
698 {
699 	struct rte_crypto_sym_op *sym = op->sym;
700 	struct rte_mbuf *mbuf = sym->m_src;
701 	struct dpaa_sec_job *cf;
702 	struct dpaa_sec_op_ctx *ctx;
703 	struct qm_sg_entry *sg, *out_sg, *in_sg;
704 	phys_addr_t start_addr;
705 	uint8_t *old_digest, extra_segs;
706 	int data_len, data_offset;
707 
708 	data_len = sym->auth.data.length;
709 	data_offset = sym->auth.data.offset;
710 
711 	if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
712 	    ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
713 		if ((data_len & 7) || (data_offset & 7)) {
714 			DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
715 			return NULL;
716 		}
717 
718 		data_len = data_len >> 3;
719 		data_offset = data_offset >> 3;
720 	}
721 
722 	if (is_decode(ses))
723 		extra_segs = 3;
724 	else
725 		extra_segs = 2;
726 
727 	if (mbuf->nb_segs > MAX_SG_ENTRIES) {
728 		DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
729 				MAX_SG_ENTRIES);
730 		return NULL;
731 	}
732 	ctx = dpaa_sec_alloc_ctx(ses, mbuf->nb_segs + extra_segs);
733 	if (!ctx)
734 		return NULL;
735 
736 	cf = &ctx->job;
737 	ctx->op = op;
738 	old_digest = ctx->digest;
739 
740 	/* output */
741 	out_sg = &cf->sg[0];
742 	qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
743 	out_sg->length = ses->digest_length;
744 	cpu_to_hw_sg(out_sg);
745 
746 	/* input */
747 	in_sg = &cf->sg[1];
748 	/* need to extend the input to a compound frame */
749 	in_sg->extension = 1;
750 	in_sg->final = 1;
751 	in_sg->length = data_len;
752 	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
753 
754 	/* 1st seg */
755 	sg = in_sg + 1;
756 
757 	if (ses->iv.length) {
758 		uint8_t *iv_ptr;
759 
760 		iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
761 						   ses->iv.offset);
762 
763 		if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
764 			iv_ptr = conv_to_snow_f9_iv(iv_ptr);
765 			sg->length = 12;
766 		} else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
767 			iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
768 			sg->length = 8;
769 		} else {
770 			sg->length = ses->iv.length;
771 		}
772 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
773 		in_sg->length += sg->length;
774 		cpu_to_hw_sg(sg);
775 		sg++;
776 	}
777 
778 	qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
779 	sg->offset = data_offset;
780 
781 	if (data_len <= (mbuf->data_len - data_offset)) {
782 		sg->length = data_len;
783 	} else {
784 		sg->length = mbuf->data_len - data_offset;
785 
786 		/* remaining i/p segs */
787 		while ((data_len = data_len - sg->length) &&
788 		       (mbuf = mbuf->next)) {
789 			cpu_to_hw_sg(sg);
790 			sg++;
791 			qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
792 			if (data_len > mbuf->data_len)
793 				sg->length = mbuf->data_len;
794 			else
795 				sg->length = data_len;
796 		}
797 	}
798 
799 	if (is_decode(ses)) {
800 		/* Digest verification case */
801 		cpu_to_hw_sg(sg);
802 		sg++;
803 		rte_memcpy(old_digest, sym->auth.digest.data,
804 				ses->digest_length);
805 		start_addr = rte_dpaa_mem_vtop(old_digest);
806 		qm_sg_entry_set64(sg, start_addr);
807 		sg->length = ses->digest_length;
808 		in_sg->length += ses->digest_length;
809 	}
810 	sg->final = 1;
811 	cpu_to_hw_sg(sg);
812 	cpu_to_hw_sg(in_sg);
813 
814 	return cf;
815 }
816 
817 /**
818  * packet looks like:
819  *		|<----data_len------->|
820  *    |ip_header|ah_header|icv|payload|
821  *              ^
822  *		|
823  *	   mbuf->pkt.data
824  */
825 static inline struct dpaa_sec_job *
826 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
827 {
828 	struct rte_crypto_sym_op *sym = op->sym;
829 	struct rte_mbuf *mbuf = sym->m_src;
830 	struct dpaa_sec_job *cf;
831 	struct dpaa_sec_op_ctx *ctx;
832 	struct qm_sg_entry *sg, *in_sg;
833 	rte_iova_t start_addr;
834 	uint8_t *old_digest;
835 	int data_len, data_offset;
836 
837 	data_len = sym->auth.data.length;
838 	data_offset = sym->auth.data.offset;
839 
840 	if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
841 	    ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
842 		if ((data_len & 7) || (data_offset & 7)) {
843 			DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
844 			return NULL;
845 		}
846 
847 		data_len = data_len >> 3;
848 		data_offset = data_offset >> 3;
849 	}
850 
851 	ctx = dpaa_sec_alloc_ctx(ses, 4);
852 	if (!ctx)
853 		return NULL;
854 
855 	cf = &ctx->job;
856 	ctx->op = op;
857 	old_digest = ctx->digest;
858 
859 	start_addr = rte_pktmbuf_iova(mbuf);
860 	/* output */
861 	sg = &cf->sg[0];
862 	qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
863 	sg->length = ses->digest_length;
864 	cpu_to_hw_sg(sg);
865 
866 	/* input */
867 	in_sg = &cf->sg[1];
868 	/* need to extend the input to a compound frame */
869 	in_sg->extension = 1;
870 	in_sg->final = 1;
871 	in_sg->length = data_len;
872 	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
873 	sg = &cf->sg[2];
874 
875 	if (ses->iv.length) {
876 		uint8_t *iv_ptr;
877 
878 		iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
879 						   ses->iv.offset);
880 
881 		if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
882 			iv_ptr = conv_to_snow_f9_iv(iv_ptr);
883 			sg->length = 12;
884 		} else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
885 			iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
886 			sg->length = 8;
887 		} else {
888 			sg->length = ses->iv.length;
889 		}
890 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
891 		in_sg->length += sg->length;
892 		cpu_to_hw_sg(sg);
893 		sg++;
894 	}
895 
896 	qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
897 	sg->offset = data_offset;
898 	sg->length = data_len;
899 
900 	if (is_decode(ses)) {
901 		/* Digest verification case */
902 		cpu_to_hw_sg(sg);
903 		/* hash result or digest, save digest first */
904 		rte_memcpy(old_digest, sym->auth.digest.data,
905 				ses->digest_length);
906 		/* let's check digest by hw */
907 		start_addr = rte_dpaa_mem_vtop(old_digest);
908 		sg++;
909 		qm_sg_entry_set64(sg, start_addr);
910 		sg->length = ses->digest_length;
911 		in_sg->length += ses->digest_length;
912 	}
913 	sg->final = 1;
914 	cpu_to_hw_sg(sg);
915 	cpu_to_hw_sg(in_sg);
916 
917 	return cf;
918 }
919 
920 static inline struct dpaa_sec_job *
921 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
922 {
923 	struct rte_crypto_sym_op *sym = op->sym;
924 	struct dpaa_sec_job *cf;
925 	struct dpaa_sec_op_ctx *ctx;
926 	struct qm_sg_entry *sg, *out_sg, *in_sg;
927 	struct rte_mbuf *mbuf;
928 	uint8_t req_segs;
929 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
930 			ses->iv.offset);
931 	int data_len, data_offset;
932 
933 	data_len = sym->cipher.data.length;
934 	data_offset = sym->cipher.data.offset;
935 
936 	if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
937 		ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
938 		if ((data_len & 7) || (data_offset & 7)) {
939 			DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
940 			return NULL;
941 		}
942 
943 		data_len = data_len >> 3;
944 		data_offset = data_offset >> 3;
945 	}
946 
947 	if (sym->m_dst) {
948 		mbuf = sym->m_dst;
949 		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
950 	} else {
951 		mbuf = sym->m_src;
952 		req_segs = mbuf->nb_segs * 2 + 3;
953 	}
954 	if (mbuf->nb_segs > MAX_SG_ENTRIES) {
955 		DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
956 				MAX_SG_ENTRIES);
957 		return NULL;
958 	}
959 
960 	ctx = dpaa_sec_alloc_ctx(ses, req_segs);
961 	if (!ctx)
962 		return NULL;
963 
964 	cf = &ctx->job;
965 	ctx->op = op;
966 
967 	/* output */
968 	out_sg = &cf->sg[0];
969 	out_sg->extension = 1;
970 	out_sg->length = data_len;
971 	qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
972 	cpu_to_hw_sg(out_sg);
973 
974 	/* 1st seg */
975 	sg = &cf->sg[2];
976 	qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
977 	sg->length = mbuf->data_len - data_offset;
978 	sg->offset = data_offset;
979 
980 	/* Successive segs */
981 	mbuf = mbuf->next;
982 	while (mbuf) {
983 		cpu_to_hw_sg(sg);
984 		sg++;
985 		qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
986 		sg->length = mbuf->data_len;
987 		mbuf = mbuf->next;
988 	}
989 	sg->final = 1;
990 	cpu_to_hw_sg(sg);
991 
992 	/* input */
993 	mbuf = sym->m_src;
994 	in_sg = &cf->sg[1];
995 	in_sg->extension = 1;
996 	in_sg->final = 1;
997 	in_sg->length = data_len + ses->iv.length;
998 
999 	sg++;
1000 	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1001 	cpu_to_hw_sg(in_sg);
1002 
1003 	/* IV */
1004 	qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1005 	sg->length = ses->iv.length;
1006 	cpu_to_hw_sg(sg);
1007 
1008 	/* 1st seg */
1009 	sg++;
1010 	qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1011 	sg->length = mbuf->data_len - data_offset;
1012 	sg->offset = data_offset;
1013 
1014 	/* Successive segs */
1015 	mbuf = mbuf->next;
1016 	while (mbuf) {
1017 		cpu_to_hw_sg(sg);
1018 		sg++;
1019 		qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1020 		sg->length = mbuf->data_len;
1021 		mbuf = mbuf->next;
1022 	}
1023 	sg->final = 1;
1024 	cpu_to_hw_sg(sg);
1025 
1026 	return cf;
1027 }
1028 
1029 static inline struct dpaa_sec_job *
1030 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1031 {
1032 	struct rte_crypto_sym_op *sym = op->sym;
1033 	struct dpaa_sec_job *cf;
1034 	struct dpaa_sec_op_ctx *ctx;
1035 	struct qm_sg_entry *sg;
1036 	rte_iova_t src_start_addr, dst_start_addr;
1037 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1038 			ses->iv.offset);
1039 	int data_len, data_offset;
1040 
1041 	data_len = sym->cipher.data.length;
1042 	data_offset = sym->cipher.data.offset;
1043 
1044 	if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1045 		ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1046 		if ((data_len & 7) || (data_offset & 7)) {
1047 			DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
1048 			return NULL;
1049 		}
1050 
1051 		data_len = data_len >> 3;
1052 		data_offset = data_offset >> 3;
1053 	}
1054 
1055 	ctx = dpaa_sec_alloc_ctx(ses, 4);
1056 	if (!ctx)
1057 		return NULL;
1058 
1059 	cf = &ctx->job;
1060 	ctx->op = op;
1061 
1062 	src_start_addr = rte_pktmbuf_iova(sym->m_src);
1063 
1064 	if (sym->m_dst)
1065 		dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1066 	else
1067 		dst_start_addr = src_start_addr;
1068 
1069 	/* output */
1070 	sg = &cf->sg[0];
1071 	qm_sg_entry_set64(sg, dst_start_addr + data_offset);
1072 	sg->length = data_len + ses->iv.length;
1073 	cpu_to_hw_sg(sg);
1074 
1075 	/* input */
1076 	sg = &cf->sg[1];
1077 
1078 	/* need to extend the input to a compound frame */
1079 	sg->extension = 1;
1080 	sg->final = 1;
1081 	sg->length = data_len + ses->iv.length;
1082 	qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1083 	cpu_to_hw_sg(sg);
1084 
1085 	sg = &cf->sg[2];
1086 	qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1087 	sg->length = ses->iv.length;
1088 	cpu_to_hw_sg(sg);
1089 
1090 	sg++;
1091 	qm_sg_entry_set64(sg, src_start_addr + data_offset);
1092 	sg->length = data_len;
1093 	sg->final = 1;
1094 	cpu_to_hw_sg(sg);
1095 
1096 	return cf;
1097 }
1098 
1099 static inline struct dpaa_sec_job *
1100 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1101 {
1102 	struct rte_crypto_sym_op *sym = op->sym;
1103 	struct dpaa_sec_job *cf;
1104 	struct dpaa_sec_op_ctx *ctx;
1105 	struct qm_sg_entry *sg, *out_sg, *in_sg;
1106 	struct rte_mbuf *mbuf;
1107 	uint8_t req_segs;
1108 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1109 			ses->iv.offset);
1110 
1111 	if (sym->m_dst) {
1112 		mbuf = sym->m_dst;
1113 		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1114 	} else {
1115 		mbuf = sym->m_src;
1116 		req_segs = mbuf->nb_segs * 2 + 4;
1117 	}
1118 
1119 	if (ses->auth_only_len)
1120 		req_segs++;
1121 
1122 	if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1123 		DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1124 				MAX_SG_ENTRIES);
1125 		return NULL;
1126 	}
1127 
1128 	ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1129 	if (!ctx)
1130 		return NULL;
1131 
1132 	cf = &ctx->job;
1133 	ctx->op = op;
1134 
1135 	rte_prefetch0(cf->sg);
1136 
1137 	/* output */
1138 	out_sg = &cf->sg[0];
1139 	out_sg->extension = 1;
1140 	if (is_encode(ses))
1141 		out_sg->length = sym->aead.data.length + ses->digest_length;
1142 	else
1143 		out_sg->length = sym->aead.data.length;
1144 
1145 	/* output sg entries */
1146 	sg = &cf->sg[2];
1147 	qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1148 	cpu_to_hw_sg(out_sg);
1149 
1150 	/* 1st seg */
1151 	qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1152 	sg->length = mbuf->data_len - sym->aead.data.offset;
1153 	sg->offset = sym->aead.data.offset;
1154 
1155 	/* Successive segs */
1156 	mbuf = mbuf->next;
1157 	while (mbuf) {
1158 		cpu_to_hw_sg(sg);
1159 		sg++;
1160 		qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1161 		sg->length = mbuf->data_len;
1162 		mbuf = mbuf->next;
1163 	}
1164 	sg->length -= ses->digest_length;
1165 
1166 	if (is_encode(ses)) {
1167 		cpu_to_hw_sg(sg);
1168 		/* set auth output */
1169 		sg++;
1170 		qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1171 		sg->length = ses->digest_length;
1172 	}
1173 	sg->final = 1;
1174 	cpu_to_hw_sg(sg);
1175 
1176 	/* input */
1177 	mbuf = sym->m_src;
1178 	in_sg = &cf->sg[1];
1179 	in_sg->extension = 1;
1180 	in_sg->final = 1;
1181 	if (is_encode(ses))
1182 		in_sg->length = ses->iv.length + sym->aead.data.length
1183 							+ ses->auth_only_len;
1184 	else
1185 		in_sg->length = ses->iv.length + sym->aead.data.length
1186 				+ ses->auth_only_len + ses->digest_length;
1187 
1188 	/* input sg entries */
1189 	sg++;
1190 	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1191 	cpu_to_hw_sg(in_sg);
1192 
1193 	/* 1st seg IV */
1194 	qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1195 	sg->length = ses->iv.length;
1196 	cpu_to_hw_sg(sg);
1197 
1198 	/* 2nd seg auth only */
1199 	if (ses->auth_only_len) {
1200 		sg++;
1201 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(sym->aead.aad.data));
1202 		sg->length = ses->auth_only_len;
1203 		cpu_to_hw_sg(sg);
1204 	}
1205 
1206 	/* 3rd seg */
1207 	sg++;
1208 	qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1209 	sg->length = mbuf->data_len - sym->aead.data.offset;
1210 	sg->offset = sym->aead.data.offset;
1211 
1212 	/* Successive segs */
1213 	mbuf = mbuf->next;
1214 	while (mbuf) {
1215 		cpu_to_hw_sg(sg);
1216 		sg++;
1217 		qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1218 		sg->length = mbuf->data_len;
1219 		mbuf = mbuf->next;
1220 	}
1221 
1222 	if (is_decode(ses)) {
1223 		cpu_to_hw_sg(sg);
1224 		sg++;
1225 		memcpy(ctx->digest, sym->aead.digest.data,
1226 			ses->digest_length);
1227 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1228 		sg->length = ses->digest_length;
1229 	}
1230 	sg->final = 1;
1231 	cpu_to_hw_sg(sg);
1232 
1233 	return cf;
1234 }
1235 
1236 static inline struct dpaa_sec_job *
1237 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1238 {
1239 	struct rte_crypto_sym_op *sym = op->sym;
1240 	struct dpaa_sec_job *cf;
1241 	struct dpaa_sec_op_ctx *ctx;
1242 	struct qm_sg_entry *sg;
1243 	uint32_t length = 0;
1244 	rte_iova_t src_start_addr, dst_start_addr;
1245 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1246 			ses->iv.offset);
1247 
1248 	src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1249 
1250 	if (sym->m_dst)
1251 		dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1252 	else
1253 		dst_start_addr = src_start_addr;
1254 
1255 	ctx = dpaa_sec_alloc_ctx(ses, 7);
1256 	if (!ctx)
1257 		return NULL;
1258 
1259 	cf = &ctx->job;
1260 	ctx->op = op;
1261 
1262 	/* input */
1263 	rte_prefetch0(cf->sg);
1264 	sg = &cf->sg[2];
1265 	qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1266 	if (is_encode(ses)) {
1267 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1268 		sg->length = ses->iv.length;
1269 		length += sg->length;
1270 		cpu_to_hw_sg(sg);
1271 
1272 		sg++;
1273 		if (ses->auth_only_len) {
1274 			qm_sg_entry_set64(sg,
1275 					  rte_dpaa_mem_vtop(sym->aead.aad.data));
1276 			sg->length = ses->auth_only_len;
1277 			length += sg->length;
1278 			cpu_to_hw_sg(sg);
1279 			sg++;
1280 		}
1281 		qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1282 		sg->length = sym->aead.data.length;
1283 		length += sg->length;
1284 		sg->final = 1;
1285 		cpu_to_hw_sg(sg);
1286 	} else {
1287 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1288 		sg->length = ses->iv.length;
1289 		length += sg->length;
1290 		cpu_to_hw_sg(sg);
1291 
1292 		sg++;
1293 		if (ses->auth_only_len) {
1294 			qm_sg_entry_set64(sg,
1295 					  rte_dpaa_mem_vtop(sym->aead.aad.data));
1296 			sg->length = ses->auth_only_len;
1297 			length += sg->length;
1298 			cpu_to_hw_sg(sg);
1299 			sg++;
1300 		}
1301 		qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1302 		sg->length = sym->aead.data.length;
1303 		length += sg->length;
1304 		cpu_to_hw_sg(sg);
1305 
1306 		memcpy(ctx->digest, sym->aead.digest.data,
1307 		       ses->digest_length);
1308 		sg++;
1309 
1310 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1311 		sg->length = ses->digest_length;
1312 		length += sg->length;
1313 		sg->final = 1;
1314 		cpu_to_hw_sg(sg);
1315 	}
1316 	/* input compound frame */
1317 	cf->sg[1].length = length;
1318 	cf->sg[1].extension = 1;
1319 	cf->sg[1].final = 1;
1320 	cpu_to_hw_sg(&cf->sg[1]);
1321 
1322 	/* output */
1323 	sg++;
1324 	qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1325 	qm_sg_entry_set64(sg,
1326 		dst_start_addr + sym->aead.data.offset);
1327 	sg->length = sym->aead.data.length;
1328 	length = sg->length;
1329 	if (is_encode(ses)) {
1330 		cpu_to_hw_sg(sg);
1331 		/* set auth output */
1332 		sg++;
1333 		qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1334 		sg->length = ses->digest_length;
1335 		length += sg->length;
1336 	}
1337 	sg->final = 1;
1338 	cpu_to_hw_sg(sg);
1339 
1340 	/* output compound frame */
1341 	cf->sg[0].length = length;
1342 	cf->sg[0].extension = 1;
1343 	cpu_to_hw_sg(&cf->sg[0]);
1344 
1345 	return cf;
1346 }
1347 
1348 static inline struct dpaa_sec_job *
1349 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1350 {
1351 	struct rte_crypto_sym_op *sym = op->sym;
1352 	struct dpaa_sec_job *cf;
1353 	struct dpaa_sec_op_ctx *ctx;
1354 	struct qm_sg_entry *sg, *out_sg, *in_sg;
1355 	struct rte_mbuf *mbuf;
1356 	uint8_t req_segs;
1357 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1358 			ses->iv.offset);
1359 
1360 	if (sym->m_dst) {
1361 		mbuf = sym->m_dst;
1362 		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1363 	} else {
1364 		mbuf = sym->m_src;
1365 		req_segs = mbuf->nb_segs * 2 + 4;
1366 	}
1367 
1368 	if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1369 		DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1370 				MAX_SG_ENTRIES);
1371 		return NULL;
1372 	}
1373 
1374 	ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1375 	if (!ctx)
1376 		return NULL;
1377 
1378 	cf = &ctx->job;
1379 	ctx->op = op;
1380 
1381 	rte_prefetch0(cf->sg);
1382 
1383 	/* output */
1384 	out_sg = &cf->sg[0];
1385 	out_sg->extension = 1;
1386 	if (is_encode(ses))
1387 		out_sg->length = sym->auth.data.length + ses->digest_length;
1388 	else
1389 		out_sg->length = sym->auth.data.length;
1390 
1391 	/* output sg entries */
1392 	sg = &cf->sg[2];
1393 	qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1394 	cpu_to_hw_sg(out_sg);
1395 
1396 	/* 1st seg */
1397 	qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1398 	sg->length = mbuf->data_len - sym->auth.data.offset;
1399 	sg->offset = sym->auth.data.offset;
1400 
1401 	/* Successive segs */
1402 	mbuf = mbuf->next;
1403 	while (mbuf) {
1404 		cpu_to_hw_sg(sg);
1405 		sg++;
1406 		qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1407 		sg->length = mbuf->data_len;
1408 		mbuf = mbuf->next;
1409 	}
1410 	sg->length -= ses->digest_length;
1411 
1412 	if (is_encode(ses)) {
1413 		cpu_to_hw_sg(sg);
1414 		/* set auth output */
1415 		sg++;
1416 		qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1417 		sg->length = ses->digest_length;
1418 	}
1419 	sg->final = 1;
1420 	cpu_to_hw_sg(sg);
1421 
1422 	/* input */
1423 	mbuf = sym->m_src;
1424 	in_sg = &cf->sg[1];
1425 	in_sg->extension = 1;
1426 	in_sg->final = 1;
1427 	if (is_encode(ses))
1428 		in_sg->length = ses->iv.length + sym->auth.data.length;
1429 	else
1430 		in_sg->length = ses->iv.length + sym->auth.data.length
1431 						+ ses->digest_length;
1432 
1433 	/* input sg entries */
1434 	sg++;
1435 	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1436 	cpu_to_hw_sg(in_sg);
1437 
1438 	/* 1st seg IV */
1439 	qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1440 	sg->length = ses->iv.length;
1441 	cpu_to_hw_sg(sg);
1442 
1443 	/* 2nd seg */
1444 	sg++;
1445 	qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1446 	sg->length = mbuf->data_len - sym->auth.data.offset;
1447 	sg->offset = sym->auth.data.offset;
1448 
1449 	/* Successive segs */
1450 	mbuf = mbuf->next;
1451 	while (mbuf) {
1452 		cpu_to_hw_sg(sg);
1453 		sg++;
1454 		qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1455 		sg->length = mbuf->data_len;
1456 		mbuf = mbuf->next;
1457 	}
1458 
1459 	sg->length -= ses->digest_length;
1460 	if (is_decode(ses)) {
1461 		cpu_to_hw_sg(sg);
1462 		sg++;
1463 		memcpy(ctx->digest, sym->auth.digest.data,
1464 			ses->digest_length);
1465 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1466 		sg->length = ses->digest_length;
1467 	}
1468 	sg->final = 1;
1469 	cpu_to_hw_sg(sg);
1470 
1471 	return cf;
1472 }
1473 
1474 static inline struct dpaa_sec_job *
1475 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1476 {
1477 	struct rte_crypto_sym_op *sym = op->sym;
1478 	struct dpaa_sec_job *cf;
1479 	struct dpaa_sec_op_ctx *ctx;
1480 	struct qm_sg_entry *sg;
1481 	rte_iova_t src_start_addr, dst_start_addr;
1482 	uint32_t length = 0;
1483 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1484 			ses->iv.offset);
1485 
1486 	src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1487 	if (sym->m_dst)
1488 		dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1489 	else
1490 		dst_start_addr = src_start_addr;
1491 
1492 	ctx = dpaa_sec_alloc_ctx(ses, 7);
1493 	if (!ctx)
1494 		return NULL;
1495 
1496 	cf = &ctx->job;
1497 	ctx->op = op;
1498 
1499 	/* input */
1500 	rte_prefetch0(cf->sg);
1501 	sg = &cf->sg[2];
1502 	qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1503 	if (is_encode(ses)) {
1504 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1505 		sg->length = ses->iv.length;
1506 		length += sg->length;
1507 		cpu_to_hw_sg(sg);
1508 
1509 		sg++;
1510 		qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1511 		sg->length = sym->auth.data.length;
1512 		length += sg->length;
1513 		sg->final = 1;
1514 		cpu_to_hw_sg(sg);
1515 	} else {
1516 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1517 		sg->length = ses->iv.length;
1518 		length += sg->length;
1519 		cpu_to_hw_sg(sg);
1520 
1521 		sg++;
1522 
1523 		qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1524 		sg->length = sym->auth.data.length;
1525 		length += sg->length;
1526 		cpu_to_hw_sg(sg);
1527 
1528 		memcpy(ctx->digest, sym->auth.digest.data,
1529 		       ses->digest_length);
1530 		sg++;
1531 
1532 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1533 		sg->length = ses->digest_length;
1534 		length += sg->length;
1535 		sg->final = 1;
1536 		cpu_to_hw_sg(sg);
1537 	}
1538 	/* input compound frame */
1539 	cf->sg[1].length = length;
1540 	cf->sg[1].extension = 1;
1541 	cf->sg[1].final = 1;
1542 	cpu_to_hw_sg(&cf->sg[1]);
1543 
1544 	/* output */
1545 	sg++;
1546 	qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1547 	qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1548 	sg->length = sym->cipher.data.length;
1549 	length = sg->length;
1550 	if (is_encode(ses)) {
1551 		cpu_to_hw_sg(sg);
1552 		/* set auth output */
1553 		sg++;
1554 		qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1555 		sg->length = ses->digest_length;
1556 		length += sg->length;
1557 	}
1558 	sg->final = 1;
1559 	cpu_to_hw_sg(sg);
1560 
1561 	/* output compound frame */
1562 	cf->sg[0].length = length;
1563 	cf->sg[0].extension = 1;
1564 	cpu_to_hw_sg(&cf->sg[0]);
1565 
1566 	return cf;
1567 }
1568 
1569 #ifdef RTE_LIB_SECURITY
1570 static inline struct dpaa_sec_job *
1571 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1572 {
1573 	struct rte_crypto_sym_op *sym = op->sym;
1574 	struct dpaa_sec_job *cf;
1575 	struct dpaa_sec_op_ctx *ctx;
1576 	struct qm_sg_entry *sg;
1577 	phys_addr_t src_start_addr, dst_start_addr;
1578 
1579 	ctx = dpaa_sec_alloc_ctx(ses, 2);
1580 	if (!ctx)
1581 		return NULL;
1582 	cf = &ctx->job;
1583 	ctx->op = op;
1584 
1585 	src_start_addr = rte_pktmbuf_iova(sym->m_src);
1586 
1587 	if (sym->m_dst)
1588 		dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1589 	else
1590 		dst_start_addr = src_start_addr;
1591 
1592 	/* input */
1593 	sg = &cf->sg[1];
1594 	qm_sg_entry_set64(sg, src_start_addr);
1595 	sg->length = sym->m_src->pkt_len;
1596 	sg->final = 1;
1597 	cpu_to_hw_sg(sg);
1598 
1599 	sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1600 	/* output */
1601 	sg = &cf->sg[0];
1602 	qm_sg_entry_set64(sg, dst_start_addr);
1603 	sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1604 	cpu_to_hw_sg(sg);
1605 
1606 	return cf;
1607 }
1608 
1609 static inline struct dpaa_sec_job *
1610 build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1611 {
1612 	struct rte_crypto_sym_op *sym = op->sym;
1613 	struct dpaa_sec_job *cf;
1614 	struct dpaa_sec_op_ctx *ctx;
1615 	struct qm_sg_entry *sg, *out_sg, *in_sg;
1616 	struct rte_mbuf *mbuf;
1617 	uint8_t req_segs;
1618 	uint32_t in_len = 0, out_len = 0;
1619 
1620 	if (sym->m_dst)
1621 		mbuf = sym->m_dst;
1622 	else
1623 		mbuf = sym->m_src;
1624 
1625 	req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
1626 	if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1627 		DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
1628 				MAX_SG_ENTRIES);
1629 		return NULL;
1630 	}
1631 
1632 	ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1633 	if (!ctx)
1634 		return NULL;
1635 	cf = &ctx->job;
1636 	ctx->op = op;
1637 	/* output */
1638 	out_sg = &cf->sg[0];
1639 	out_sg->extension = 1;
1640 	qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1641 
1642 	/* 1st seg */
1643 	sg = &cf->sg[2];
1644 	qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1645 	sg->offset = 0;
1646 
1647 	/* Successive segs */
1648 	while (mbuf->next) {
1649 		sg->length = mbuf->data_len;
1650 		out_len += sg->length;
1651 		mbuf = mbuf->next;
1652 		cpu_to_hw_sg(sg);
1653 		sg++;
1654 		qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1655 		sg->offset = 0;
1656 	}
1657 	sg->length = mbuf->buf_len - mbuf->data_off;
1658 	out_len += sg->length;
1659 	sg->final = 1;
1660 	cpu_to_hw_sg(sg);
1661 
1662 	out_sg->length = out_len;
1663 	cpu_to_hw_sg(out_sg);
1664 
1665 	/* input */
1666 	mbuf = sym->m_src;
1667 	in_sg = &cf->sg[1];
1668 	in_sg->extension = 1;
1669 	in_sg->final = 1;
1670 	in_len = mbuf->data_len;
1671 
1672 	sg++;
1673 	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1674 
1675 	/* 1st seg */
1676 	qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1677 	sg->length = mbuf->data_len;
1678 	sg->offset = 0;
1679 
1680 	/* Successive segs */
1681 	mbuf = mbuf->next;
1682 	while (mbuf) {
1683 		cpu_to_hw_sg(sg);
1684 		sg++;
1685 		qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1686 		sg->length = mbuf->data_len;
1687 		sg->offset = 0;
1688 		in_len += sg->length;
1689 		mbuf = mbuf->next;
1690 	}
1691 	sg->final = 1;
1692 	cpu_to_hw_sg(sg);
1693 
1694 	in_sg->length = in_len;
1695 	cpu_to_hw_sg(in_sg);
1696 
1697 	sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1698 
1699 	return cf;
1700 }
1701 #endif
1702 
1703 static uint16_t
1704 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1705 		       uint16_t nb_ops)
1706 {
1707 	/* Function to transmit the frames to given device and queuepair */
1708 	uint32_t loop;
1709 	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1710 	uint16_t num_tx = 0;
1711 	struct qm_fd fds[DPAA_SEC_BURST], *fd;
1712 	uint32_t frames_to_send;
1713 	struct rte_crypto_op *op;
1714 	struct dpaa_sec_job *cf;
1715 	dpaa_sec_session *ses;
1716 	uint16_t auth_hdr_len, auth_tail_len;
1717 	uint32_t index, flags[DPAA_SEC_BURST] = {0};
1718 	struct qman_fq *inq[DPAA_SEC_BURST];
1719 
1720 	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
1721 		if (rte_dpaa_portal_init((void *)0)) {
1722 			DPAA_SEC_ERR("Failure in affining portal");
1723 			return 0;
1724 		}
1725 	}
1726 
1727 	while (nb_ops) {
1728 		frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1729 				DPAA_SEC_BURST : nb_ops;
1730 		for (loop = 0; loop < frames_to_send; loop++) {
1731 			op = *(ops++);
1732 			if (*dpaa_seqn(op->sym->m_src) != 0) {
1733 				index = *dpaa_seqn(op->sym->m_src) - 1;
1734 				if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
1735 					/* QM_EQCR_DCA_IDXMASK = 0x0f */
1736 					flags[loop] = ((index & 0x0f) << 8);
1737 					flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
1738 					DPAA_PER_LCORE_DQRR_SIZE--;
1739 					DPAA_PER_LCORE_DQRR_HELD &=
1740 								~(1 << index);
1741 				}
1742 			}
1743 
1744 			switch (op->sess_type) {
1745 			case RTE_CRYPTO_OP_WITH_SESSION:
1746 				ses = (dpaa_sec_session *)
1747 					get_sym_session_private_data(
1748 							op->sym->session,
1749 							cryptodev_driver_id);
1750 				break;
1751 #ifdef RTE_LIB_SECURITY
1752 			case RTE_CRYPTO_OP_SECURITY_SESSION:
1753 				ses = (dpaa_sec_session *)
1754 					get_sec_session_private_data(
1755 							op->sym->sec_session);
1756 				break;
1757 #endif
1758 			default:
1759 				DPAA_SEC_DP_ERR(
1760 					"sessionless crypto op not supported");
1761 				frames_to_send = loop;
1762 				nb_ops = loop;
1763 				goto send_pkts;
1764 			}
1765 
1766 			if (!ses) {
1767 				DPAA_SEC_DP_ERR("session not available");
1768 				frames_to_send = loop;
1769 				nb_ops = loop;
1770 				goto send_pkts;
1771 			}
1772 
1773 			if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
1774 				if (dpaa_sec_attach_sess_q(qp, ses)) {
1775 					frames_to_send = loop;
1776 					nb_ops = loop;
1777 					goto send_pkts;
1778 				}
1779 			} else if (unlikely(ses->qp[rte_lcore_id() %
1780 						MAX_DPAA_CORES] != qp)) {
1781 				DPAA_SEC_DP_ERR("Old:sess->qp = %p"
1782 					" New qp = %p\n",
1783 					ses->qp[rte_lcore_id() %
1784 					MAX_DPAA_CORES], qp);
1785 				frames_to_send = loop;
1786 				nb_ops = loop;
1787 				goto send_pkts;
1788 			}
1789 
1790 			auth_hdr_len = op->sym->auth.data.length -
1791 						op->sym->cipher.data.length;
1792 			auth_tail_len = 0;
1793 
1794 			if (rte_pktmbuf_is_contiguous(op->sym->m_src) &&
1795 				  ((op->sym->m_dst == NULL) ||
1796 				   rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1797 				switch (ses->ctxt) {
1798 #ifdef RTE_LIB_SECURITY
1799 				case DPAA_SEC_PDCP:
1800 				case DPAA_SEC_IPSEC:
1801 					cf = build_proto(op, ses);
1802 					break;
1803 #endif
1804 				case DPAA_SEC_AUTH:
1805 					cf = build_auth_only(op, ses);
1806 					break;
1807 				case DPAA_SEC_CIPHER:
1808 					cf = build_cipher_only(op, ses);
1809 					break;
1810 				case DPAA_SEC_AEAD:
1811 					cf = build_cipher_auth_gcm(op, ses);
1812 					auth_hdr_len = ses->auth_only_len;
1813 					break;
1814 				case DPAA_SEC_CIPHER_HASH:
1815 					auth_hdr_len =
1816 						op->sym->cipher.data.offset
1817 						- op->sym->auth.data.offset;
1818 					auth_tail_len =
1819 						op->sym->auth.data.length
1820 						- op->sym->cipher.data.length
1821 						- auth_hdr_len;
1822 					cf = build_cipher_auth(op, ses);
1823 					break;
1824 				default:
1825 					DPAA_SEC_DP_ERR("not supported ops");
1826 					frames_to_send = loop;
1827 					nb_ops = loop;
1828 					goto send_pkts;
1829 				}
1830 			} else {
1831 				switch (ses->ctxt) {
1832 #ifdef RTE_LIB_SECURITY
1833 				case DPAA_SEC_PDCP:
1834 				case DPAA_SEC_IPSEC:
1835 					cf = build_proto_sg(op, ses);
1836 					break;
1837 #endif
1838 				case DPAA_SEC_AUTH:
1839 					cf = build_auth_only_sg(op, ses);
1840 					break;
1841 				case DPAA_SEC_CIPHER:
1842 					cf = build_cipher_only_sg(op, ses);
1843 					break;
1844 				case DPAA_SEC_AEAD:
1845 					cf = build_cipher_auth_gcm_sg(op, ses);
1846 					auth_hdr_len = ses->auth_only_len;
1847 					break;
1848 				case DPAA_SEC_CIPHER_HASH:
1849 					auth_hdr_len =
1850 						op->sym->cipher.data.offset
1851 						- op->sym->auth.data.offset;
1852 					auth_tail_len =
1853 						op->sym->auth.data.length
1854 						- op->sym->cipher.data.length
1855 						- auth_hdr_len;
1856 					cf = build_cipher_auth_sg(op, ses);
1857 					break;
1858 				default:
1859 					DPAA_SEC_DP_ERR("not supported ops");
1860 					frames_to_send = loop;
1861 					nb_ops = loop;
1862 					goto send_pkts;
1863 				}
1864 			}
1865 			if (unlikely(!cf)) {
1866 				frames_to_send = loop;
1867 				nb_ops = loop;
1868 				goto send_pkts;
1869 			}
1870 
1871 			fd = &fds[loop];
1872 			inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
1873 			fd->opaque_addr = 0;
1874 			fd->cmd = 0;
1875 			qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg));
1876 			fd->_format1 = qm_fd_compound;
1877 			fd->length29 = 2 * sizeof(struct qm_sg_entry);
1878 
1879 			/* Auth_only_len is set as 0 in descriptor and it is
1880 			 * overwritten here in the fd.cmd which will update
1881 			 * the DPOVRD reg.
1882 			 */
1883 			if (auth_hdr_len || auth_tail_len) {
1884 				fd->cmd = 0x80000000;
1885 				fd->cmd |=
1886 					((auth_tail_len << 16) | auth_hdr_len);
1887 			}
1888 
1889 #ifdef RTE_LIB_SECURITY
1890 			/* In case of PDCP, per packet HFN is stored in
1891 			 * mbuf priv after sym_op.
1892 			 */
1893 			if ((ses->ctxt == DPAA_SEC_PDCP) && ses->pdcp.hfn_ovd) {
1894 				fd->cmd = 0x80000000 |
1895 					*((uint32_t *)((uint8_t *)op +
1896 					ses->pdcp.hfn_ovd_offset));
1897 				DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n",
1898 					*((uint32_t *)((uint8_t *)op +
1899 					ses->pdcp.hfn_ovd_offset)),
1900 					ses->pdcp.hfn_ovd);
1901 			}
1902 #endif
1903 		}
1904 send_pkts:
1905 		loop = 0;
1906 		while (loop < frames_to_send) {
1907 			loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1908 					&flags[loop], frames_to_send - loop);
1909 		}
1910 		nb_ops -= frames_to_send;
1911 		num_tx += frames_to_send;
1912 	}
1913 
1914 	dpaa_qp->tx_pkts += num_tx;
1915 	dpaa_qp->tx_errs += nb_ops - num_tx;
1916 
1917 	return num_tx;
1918 }
1919 
1920 static uint16_t
1921 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1922 		       uint16_t nb_ops)
1923 {
1924 	uint16_t num_rx;
1925 	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1926 
1927 	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
1928 		if (rte_dpaa_portal_init((void *)0)) {
1929 			DPAA_SEC_ERR("Failure in affining portal");
1930 			return 0;
1931 		}
1932 	}
1933 
1934 	num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1935 
1936 	dpaa_qp->rx_pkts += num_rx;
1937 	dpaa_qp->rx_errs += nb_ops - num_rx;
1938 
1939 	DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1940 
1941 	return num_rx;
1942 }
1943 
1944 /** Release queue pair */
1945 static int
1946 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1947 			    uint16_t qp_id)
1948 {
1949 	struct dpaa_sec_dev_private *internals;
1950 	struct dpaa_sec_qp *qp = NULL;
1951 
1952 	PMD_INIT_FUNC_TRACE();
1953 
1954 	DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1955 
1956 	internals = dev->data->dev_private;
1957 	if (qp_id >= internals->max_nb_queue_pairs) {
1958 		DPAA_SEC_ERR("Max supported qpid %d",
1959 			     internals->max_nb_queue_pairs);
1960 		return -EINVAL;
1961 	}
1962 
1963 	qp = &internals->qps[qp_id];
1964 	rte_mempool_free(qp->ctx_pool);
1965 	qp->internals = NULL;
1966 	dev->data->queue_pairs[qp_id] = NULL;
1967 
1968 	return 0;
1969 }
1970 
1971 /** Setup a queue pair */
1972 static int
1973 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1974 		__rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1975 		__rte_unused int socket_id)
1976 {
1977 	struct dpaa_sec_dev_private *internals;
1978 	struct dpaa_sec_qp *qp = NULL;
1979 	char str[20];
1980 
1981 	DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1982 
1983 	internals = dev->data->dev_private;
1984 	if (qp_id >= internals->max_nb_queue_pairs) {
1985 		DPAA_SEC_ERR("Max supported qpid %d",
1986 			     internals->max_nb_queue_pairs);
1987 		return -EINVAL;
1988 	}
1989 
1990 	qp = &internals->qps[qp_id];
1991 	qp->internals = internals;
1992 	snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d",
1993 			dev->data->dev_id, qp_id);
1994 	if (!qp->ctx_pool) {
1995 		qp->ctx_pool = rte_mempool_create((const char *)str,
1996 							CTX_POOL_NUM_BUFS,
1997 							CTX_POOL_BUF_SIZE,
1998 							CTX_POOL_CACHE_SIZE, 0,
1999 							NULL, NULL, NULL, NULL,
2000 							SOCKET_ID_ANY, 0);
2001 		if (!qp->ctx_pool) {
2002 			DPAA_SEC_ERR("%s create failed\n", str);
2003 			return -ENOMEM;
2004 		}
2005 	} else
2006 		DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d",
2007 				dev->data->dev_id, qp_id);
2008 	dev->data->queue_pairs[qp_id] = qp;
2009 
2010 	return 0;
2011 }
2012 
2013 /** Returns the size of session structure */
2014 static unsigned int
2015 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
2016 {
2017 	PMD_INIT_FUNC_TRACE();
2018 
2019 	return sizeof(dpaa_sec_session);
2020 }
2021 
2022 static int
2023 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
2024 		     struct rte_crypto_sym_xform *xform,
2025 		     dpaa_sec_session *session)
2026 {
2027 	session->ctxt = DPAA_SEC_CIPHER;
2028 	session->cipher_alg = xform->cipher.algo;
2029 	session->iv.length = xform->cipher.iv.length;
2030 	session->iv.offset = xform->cipher.iv.offset;
2031 	session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
2032 					       RTE_CACHE_LINE_SIZE);
2033 	if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
2034 		DPAA_SEC_ERR("No Memory for cipher key");
2035 		return -ENOMEM;
2036 	}
2037 	session->cipher_key.length = xform->cipher.key.length;
2038 
2039 	memcpy(session->cipher_key.data, xform->cipher.key.data,
2040 	       xform->cipher.key.length);
2041 	switch (xform->cipher.algo) {
2042 	case RTE_CRYPTO_CIPHER_AES_CBC:
2043 		session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2044 		session->cipher_key.algmode = OP_ALG_AAI_CBC;
2045 		break;
2046 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2047 		session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2048 		session->cipher_key.algmode = OP_ALG_AAI_CBC;
2049 		break;
2050 	case RTE_CRYPTO_CIPHER_AES_CTR:
2051 		session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2052 		session->cipher_key.algmode = OP_ALG_AAI_CTR;
2053 		break;
2054 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2055 		session->cipher_key.alg = OP_ALG_ALGSEL_SNOW_F8;
2056 		break;
2057 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2058 		session->cipher_key.alg = OP_ALG_ALGSEL_ZUCE;
2059 		break;
2060 	default:
2061 		DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2062 			      xform->cipher.algo);
2063 		return -ENOTSUP;
2064 	}
2065 	session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2066 			DIR_ENC : DIR_DEC;
2067 
2068 	return 0;
2069 }
2070 
2071 static int
2072 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
2073 		   struct rte_crypto_sym_xform *xform,
2074 		   dpaa_sec_session *session)
2075 {
2076 	session->ctxt = DPAA_SEC_AUTH;
2077 	session->auth_alg = xform->auth.algo;
2078 	session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
2079 					     RTE_CACHE_LINE_SIZE);
2080 	if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
2081 		DPAA_SEC_ERR("No Memory for auth key");
2082 		return -ENOMEM;
2083 	}
2084 	session->auth_key.length = xform->auth.key.length;
2085 	session->digest_length = xform->auth.digest_length;
2086 	if (session->cipher_alg == RTE_CRYPTO_CIPHER_NULL) {
2087 		session->iv.offset = xform->auth.iv.offset;
2088 		session->iv.length = xform->auth.iv.length;
2089 	}
2090 
2091 	memcpy(session->auth_key.data, xform->auth.key.data,
2092 	       xform->auth.key.length);
2093 
2094 	switch (xform->auth.algo) {
2095 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
2096 		session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2097 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2098 		break;
2099 	case RTE_CRYPTO_AUTH_MD5_HMAC:
2100 		session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2101 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2102 		break;
2103 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2104 		session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2105 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2106 		break;
2107 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2108 		session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2109 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2110 		break;
2111 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2112 		session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2113 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2114 		break;
2115 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2116 		session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2117 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2118 		break;
2119 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2120 		session->auth_key.alg = OP_ALG_ALGSEL_SNOW_F9;
2121 		session->auth_key.algmode = OP_ALG_AAI_F9;
2122 		break;
2123 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
2124 		session->auth_key.alg = OP_ALG_ALGSEL_ZUCA;
2125 		session->auth_key.algmode = OP_ALG_AAI_F9;
2126 		break;
2127 	default:
2128 		DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2129 			      xform->auth.algo);
2130 		return -ENOTSUP;
2131 	}
2132 
2133 	session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2134 			DIR_ENC : DIR_DEC;
2135 
2136 	return 0;
2137 }
2138 
2139 static int
2140 dpaa_sec_chain_init(struct rte_cryptodev *dev __rte_unused,
2141 		   struct rte_crypto_sym_xform *xform,
2142 		   dpaa_sec_session *session)
2143 {
2144 
2145 	struct rte_crypto_cipher_xform *cipher_xform;
2146 	struct rte_crypto_auth_xform *auth_xform;
2147 
2148 	session->ctxt = DPAA_SEC_CIPHER_HASH;
2149 	if (session->auth_cipher_text) {
2150 		cipher_xform = &xform->cipher;
2151 		auth_xform = &xform->next->auth;
2152 	} else {
2153 		cipher_xform = &xform->next->cipher;
2154 		auth_xform = &xform->auth;
2155 	}
2156 
2157 	/* Set IV parameters */
2158 	session->iv.offset = cipher_xform->iv.offset;
2159 	session->iv.length = cipher_xform->iv.length;
2160 
2161 	session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2162 					       RTE_CACHE_LINE_SIZE);
2163 	if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2164 		DPAA_SEC_ERR("No Memory for cipher key");
2165 		return -ENOMEM;
2166 	}
2167 	session->cipher_key.length = cipher_xform->key.length;
2168 	session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2169 					     RTE_CACHE_LINE_SIZE);
2170 	if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2171 		DPAA_SEC_ERR("No Memory for auth key");
2172 		return -ENOMEM;
2173 	}
2174 	session->auth_key.length = auth_xform->key.length;
2175 	memcpy(session->cipher_key.data, cipher_xform->key.data,
2176 	       cipher_xform->key.length);
2177 	memcpy(session->auth_key.data, auth_xform->key.data,
2178 	       auth_xform->key.length);
2179 
2180 	session->digest_length = auth_xform->digest_length;
2181 	session->auth_alg = auth_xform->algo;
2182 
2183 	switch (auth_xform->algo) {
2184 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
2185 		session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2186 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2187 		break;
2188 	case RTE_CRYPTO_AUTH_MD5_HMAC:
2189 		session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2190 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2191 		break;
2192 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2193 		session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2194 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2195 		break;
2196 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2197 		session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2198 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2199 		break;
2200 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2201 		session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2202 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2203 		break;
2204 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2205 		session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2206 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2207 		break;
2208 	default:
2209 		DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2210 			      auth_xform->algo);
2211 		return -ENOTSUP;
2212 	}
2213 
2214 	session->cipher_alg = cipher_xform->algo;
2215 
2216 	switch (cipher_xform->algo) {
2217 	case RTE_CRYPTO_CIPHER_AES_CBC:
2218 		session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2219 		session->cipher_key.algmode = OP_ALG_AAI_CBC;
2220 		break;
2221 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2222 		session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2223 		session->cipher_key.algmode = OP_ALG_AAI_CBC;
2224 		break;
2225 	case RTE_CRYPTO_CIPHER_AES_CTR:
2226 		session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2227 		session->cipher_key.algmode = OP_ALG_AAI_CTR;
2228 		break;
2229 	default:
2230 		DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2231 			      cipher_xform->algo);
2232 		return -ENOTSUP;
2233 	}
2234 	session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2235 				DIR_ENC : DIR_DEC;
2236 	return 0;
2237 }
2238 
2239 static int
2240 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
2241 		   struct rte_crypto_sym_xform *xform,
2242 		   dpaa_sec_session *session)
2243 {
2244 	session->aead_alg = xform->aead.algo;
2245 	session->ctxt = DPAA_SEC_AEAD;
2246 	session->iv.length = xform->aead.iv.length;
2247 	session->iv.offset = xform->aead.iv.offset;
2248 	session->auth_only_len = xform->aead.aad_length;
2249 	session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
2250 					     RTE_CACHE_LINE_SIZE);
2251 	if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
2252 		DPAA_SEC_ERR("No Memory for aead key\n");
2253 		return -ENOMEM;
2254 	}
2255 	session->aead_key.length = xform->aead.key.length;
2256 	session->digest_length = xform->aead.digest_length;
2257 
2258 	memcpy(session->aead_key.data, xform->aead.key.data,
2259 	       xform->aead.key.length);
2260 
2261 	switch (session->aead_alg) {
2262 	case RTE_CRYPTO_AEAD_AES_GCM:
2263 		session->aead_key.alg = OP_ALG_ALGSEL_AES;
2264 		session->aead_key.algmode = OP_ALG_AAI_GCM;
2265 		break;
2266 	default:
2267 		DPAA_SEC_ERR("unsupported AEAD alg %d", session->aead_alg);
2268 		return -ENOTSUP;
2269 	}
2270 
2271 	session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2272 			DIR_ENC : DIR_DEC;
2273 
2274 	return 0;
2275 }
2276 
2277 static struct qman_fq *
2278 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
2279 {
2280 	unsigned int i;
2281 
2282 	for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2283 		if (qi->inq_attach[i] == 0) {
2284 			qi->inq_attach[i] = 1;
2285 			return &qi->inq[i];
2286 		}
2287 	}
2288 	DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions);
2289 
2290 	return NULL;
2291 }
2292 
2293 static int
2294 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
2295 {
2296 	unsigned int i;
2297 
2298 	for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2299 		if (&qi->inq[i] == fq) {
2300 			if (qman_retire_fq(fq, NULL) != 0)
2301 				DPAA_SEC_DEBUG("Queue is not retired\n");
2302 			qman_oos_fq(fq);
2303 			qi->inq_attach[i] = 0;
2304 			return 0;
2305 		}
2306 	}
2307 	return -1;
2308 }
2309 
2310 static int
2311 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
2312 {
2313 	int ret;
2314 
2315 	sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp;
2316 	ret = dpaa_sec_prep_cdb(sess);
2317 	if (ret) {
2318 		DPAA_SEC_ERR("Unable to prepare sec cdb");
2319 		return ret;
2320 	}
2321 	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
2322 		ret = rte_dpaa_portal_init((void *)0);
2323 		if (ret) {
2324 			DPAA_SEC_ERR("Failure in affining portal");
2325 			return ret;
2326 		}
2327 	}
2328 	ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
2329 			       rte_dpaa_mem_vtop(&sess->cdb),
2330 			       qman_fq_fqid(&qp->outq));
2331 	if (ret)
2332 		DPAA_SEC_ERR("Unable to init sec queue");
2333 
2334 	return ret;
2335 }
2336 
2337 static inline void
2338 free_session_data(dpaa_sec_session *s)
2339 {
2340 	if (is_aead(s))
2341 		rte_free(s->aead_key.data);
2342 	else {
2343 		rte_free(s->auth_key.data);
2344 		rte_free(s->cipher_key.data);
2345 	}
2346 	memset(s, 0, sizeof(dpaa_sec_session));
2347 }
2348 
2349 static int
2350 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
2351 			    struct rte_crypto_sym_xform *xform,	void *sess)
2352 {
2353 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2354 	dpaa_sec_session *session = sess;
2355 	uint32_t i;
2356 	int ret;
2357 
2358 	PMD_INIT_FUNC_TRACE();
2359 
2360 	if (unlikely(sess == NULL)) {
2361 		DPAA_SEC_ERR("invalid session struct");
2362 		return -EINVAL;
2363 	}
2364 	memset(session, 0, sizeof(dpaa_sec_session));
2365 
2366 	/* Default IV length = 0 */
2367 	session->iv.length = 0;
2368 
2369 	/* Cipher Only */
2370 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2371 		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2372 		ret = dpaa_sec_cipher_init(dev, xform, session);
2373 
2374 	/* Authentication Only */
2375 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2376 		   xform->next == NULL) {
2377 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2378 		session->ctxt = DPAA_SEC_AUTH;
2379 		ret = dpaa_sec_auth_init(dev, xform, session);
2380 
2381 	/* Cipher then Authenticate */
2382 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2383 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2384 		if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2385 			session->auth_cipher_text = 1;
2386 			if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2387 				ret = dpaa_sec_auth_init(dev, xform, session);
2388 			else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
2389 				ret = dpaa_sec_cipher_init(dev, xform, session);
2390 			else
2391 				ret = dpaa_sec_chain_init(dev, xform, session);
2392 		} else {
2393 			DPAA_SEC_ERR("Not supported: Auth then Cipher");
2394 			return -ENOTSUP;
2395 		}
2396 	/* Authenticate then Cipher */
2397 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2398 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2399 		if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2400 			session->auth_cipher_text = 0;
2401 			if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
2402 				ret = dpaa_sec_cipher_init(dev, xform, session);
2403 			else if (xform->next->cipher.algo
2404 					== RTE_CRYPTO_CIPHER_NULL)
2405 				ret = dpaa_sec_auth_init(dev, xform, session);
2406 			else
2407 				ret = dpaa_sec_chain_init(dev, xform, session);
2408 		} else {
2409 			DPAA_SEC_ERR("Not supported: Auth then Cipher");
2410 			return -ENOTSUP;
2411 		}
2412 
2413 	/* AEAD operation for AES-GCM kind of Algorithms */
2414 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2415 		   xform->next == NULL) {
2416 		ret = dpaa_sec_aead_init(dev, xform, session);
2417 
2418 	} else {
2419 		DPAA_SEC_ERR("Invalid crypto type");
2420 		return -EINVAL;
2421 	}
2422 	if (ret) {
2423 		DPAA_SEC_ERR("unable to init session");
2424 		goto err1;
2425 	}
2426 
2427 	rte_spinlock_lock(&internals->lock);
2428 	for (i = 0; i < MAX_DPAA_CORES; i++) {
2429 		session->inq[i] = dpaa_sec_attach_rxq(internals);
2430 		if (session->inq[i] == NULL) {
2431 			DPAA_SEC_ERR("unable to attach sec queue");
2432 			rte_spinlock_unlock(&internals->lock);
2433 			ret = -EBUSY;
2434 			goto err1;
2435 		}
2436 	}
2437 	rte_spinlock_unlock(&internals->lock);
2438 
2439 	return 0;
2440 
2441 err1:
2442 	free_session_data(session);
2443 	return ret;
2444 }
2445 
2446 static int
2447 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
2448 		struct rte_crypto_sym_xform *xform,
2449 		struct rte_cryptodev_sym_session *sess,
2450 		struct rte_mempool *mempool)
2451 {
2452 	void *sess_private_data;
2453 	int ret;
2454 
2455 	PMD_INIT_FUNC_TRACE();
2456 
2457 	if (rte_mempool_get(mempool, &sess_private_data)) {
2458 		DPAA_SEC_ERR("Couldn't get object from session mempool");
2459 		return -ENOMEM;
2460 	}
2461 
2462 	ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
2463 	if (ret != 0) {
2464 		DPAA_SEC_ERR("failed to configure session parameters");
2465 
2466 		/* Return session to mempool */
2467 		rte_mempool_put(mempool, sess_private_data);
2468 		return ret;
2469 	}
2470 
2471 	set_sym_session_private_data(sess, dev->driver_id,
2472 			sess_private_data);
2473 
2474 
2475 	return 0;
2476 }
2477 
2478 static inline void
2479 free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s)
2480 {
2481 	struct dpaa_sec_dev_private *qi = dev->data->dev_private;
2482 	struct rte_mempool *sess_mp = rte_mempool_from_obj((void *)s);
2483 	uint8_t i;
2484 
2485 	for (i = 0; i < MAX_DPAA_CORES; i++) {
2486 		if (s->inq[i])
2487 			dpaa_sec_detach_rxq(qi, s->inq[i]);
2488 		s->inq[i] = NULL;
2489 		s->qp[i] = NULL;
2490 	}
2491 	free_session_data(s);
2492 	rte_mempool_put(sess_mp, (void *)s);
2493 }
2494 
2495 /** Clear the memory of session so it doesn't leave key material behind */
2496 static void
2497 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
2498 		struct rte_cryptodev_sym_session *sess)
2499 {
2500 	PMD_INIT_FUNC_TRACE();
2501 	uint8_t index = dev->driver_id;
2502 	void *sess_priv = get_sym_session_private_data(sess, index);
2503 	dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2504 
2505 	if (sess_priv) {
2506 		free_session_memory(dev, s);
2507 		set_sym_session_private_data(sess, index, NULL);
2508 	}
2509 }
2510 
2511 #ifdef RTE_LIB_SECURITY
2512 static int
2513 dpaa_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2514 			struct rte_security_ipsec_xform *ipsec_xform,
2515 			dpaa_sec_session *session)
2516 {
2517 	PMD_INIT_FUNC_TRACE();
2518 
2519 	session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2520 					       RTE_CACHE_LINE_SIZE);
2521 	if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2522 		DPAA_SEC_ERR("No Memory for aead key");
2523 		return -ENOMEM;
2524 	}
2525 	memcpy(session->aead_key.data, aead_xform->key.data,
2526 	       aead_xform->key.length);
2527 
2528 	session->digest_length = aead_xform->digest_length;
2529 	session->aead_key.length = aead_xform->key.length;
2530 
2531 	switch (aead_xform->algo) {
2532 	case RTE_CRYPTO_AEAD_AES_GCM:
2533 		switch (session->digest_length) {
2534 		case 8:
2535 			session->aead_key.alg = OP_PCL_IPSEC_AES_GCM8;
2536 			break;
2537 		case 12:
2538 			session->aead_key.alg = OP_PCL_IPSEC_AES_GCM12;
2539 			break;
2540 		case 16:
2541 			session->aead_key.alg = OP_PCL_IPSEC_AES_GCM16;
2542 			break;
2543 		default:
2544 			DPAA_SEC_ERR("Crypto: Undefined GCM digest %d",
2545 				     session->digest_length);
2546 			return -EINVAL;
2547 		}
2548 		if (session->dir == DIR_ENC) {
2549 			memcpy(session->encap_pdb.gcm.salt,
2550 				(uint8_t *)&(ipsec_xform->salt), 4);
2551 		} else {
2552 			memcpy(session->decap_pdb.gcm.salt,
2553 				(uint8_t *)&(ipsec_xform->salt), 4);
2554 		}
2555 		session->aead_key.algmode = OP_ALG_AAI_GCM;
2556 		session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2557 		break;
2558 	default:
2559 		DPAA_SEC_ERR("Crypto: Undefined AEAD specified %u",
2560 			      aead_xform->algo);
2561 		return -ENOTSUP;
2562 	}
2563 	return 0;
2564 }
2565 
2566 static int
2567 dpaa_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2568 	struct rte_crypto_auth_xform *auth_xform,
2569 	struct rte_security_ipsec_xform *ipsec_xform,
2570 	dpaa_sec_session *session)
2571 {
2572 	if (cipher_xform) {
2573 		session->cipher_key.data = rte_zmalloc(NULL,
2574 						       cipher_xform->key.length,
2575 						       RTE_CACHE_LINE_SIZE);
2576 		if (session->cipher_key.data == NULL &&
2577 				cipher_xform->key.length > 0) {
2578 			DPAA_SEC_ERR("No Memory for cipher key");
2579 			return -ENOMEM;
2580 		}
2581 
2582 		session->cipher_key.length = cipher_xform->key.length;
2583 		memcpy(session->cipher_key.data, cipher_xform->key.data,
2584 				cipher_xform->key.length);
2585 		session->cipher_alg = cipher_xform->algo;
2586 	} else {
2587 		session->cipher_key.data = NULL;
2588 		session->cipher_key.length = 0;
2589 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2590 	}
2591 
2592 	if (auth_xform) {
2593 		session->auth_key.data = rte_zmalloc(NULL,
2594 						auth_xform->key.length,
2595 						RTE_CACHE_LINE_SIZE);
2596 		if (session->auth_key.data == NULL &&
2597 				auth_xform->key.length > 0) {
2598 			DPAA_SEC_ERR("No Memory for auth key");
2599 			return -ENOMEM;
2600 		}
2601 		session->auth_key.length = auth_xform->key.length;
2602 		memcpy(session->auth_key.data, auth_xform->key.data,
2603 				auth_xform->key.length);
2604 		session->auth_alg = auth_xform->algo;
2605 		session->digest_length = auth_xform->digest_length;
2606 	} else {
2607 		session->auth_key.data = NULL;
2608 		session->auth_key.length = 0;
2609 		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2610 	}
2611 
2612 	switch (session->auth_alg) {
2613 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
2614 		session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA1_96;
2615 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2616 		break;
2617 	case RTE_CRYPTO_AUTH_MD5_HMAC:
2618 		session->auth_key.alg = OP_PCL_IPSEC_HMAC_MD5_96;
2619 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2620 		break;
2621 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2622 		session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2623 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2624 		if (session->digest_length != 16)
2625 			DPAA_SEC_WARN(
2626 			"+++Using sha256-hmac truncated len is non-standard,"
2627 			"it will not work with lookaside proto");
2628 		break;
2629 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2630 		session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2631 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2632 		break;
2633 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2634 		session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2635 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2636 		break;
2637 	case RTE_CRYPTO_AUTH_AES_CMAC:
2638 		session->auth_key.alg = OP_PCL_IPSEC_AES_CMAC_96;
2639 		break;
2640 	case RTE_CRYPTO_AUTH_NULL:
2641 		session->auth_key.alg = OP_PCL_IPSEC_HMAC_NULL;
2642 		break;
2643 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2644 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2645 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2646 	case RTE_CRYPTO_AUTH_SHA1:
2647 	case RTE_CRYPTO_AUTH_SHA256:
2648 	case RTE_CRYPTO_AUTH_SHA512:
2649 	case RTE_CRYPTO_AUTH_SHA224:
2650 	case RTE_CRYPTO_AUTH_SHA384:
2651 	case RTE_CRYPTO_AUTH_MD5:
2652 	case RTE_CRYPTO_AUTH_AES_GMAC:
2653 	case RTE_CRYPTO_AUTH_KASUMI_F9:
2654 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2655 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
2656 		DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2657 			      session->auth_alg);
2658 		return -ENOTSUP;
2659 	default:
2660 		DPAA_SEC_ERR("Crypto: Undefined Auth specified %u",
2661 			      session->auth_alg);
2662 		return -ENOTSUP;
2663 	}
2664 
2665 	switch (session->cipher_alg) {
2666 	case RTE_CRYPTO_CIPHER_AES_CBC:
2667 		session->cipher_key.alg = OP_PCL_IPSEC_AES_CBC;
2668 		session->cipher_key.algmode = OP_ALG_AAI_CBC;
2669 		break;
2670 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2671 		session->cipher_key.alg = OP_PCL_IPSEC_3DES;
2672 		session->cipher_key.algmode = OP_ALG_AAI_CBC;
2673 		break;
2674 	case RTE_CRYPTO_CIPHER_AES_CTR:
2675 		session->cipher_key.alg = OP_PCL_IPSEC_AES_CTR;
2676 		session->cipher_key.algmode = OP_ALG_AAI_CTR;
2677 		if (session->dir == DIR_ENC) {
2678 			session->encap_pdb.ctr.ctr_initial = 0x00000001;
2679 			session->encap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2680 		} else {
2681 			session->decap_pdb.ctr.ctr_initial = 0x00000001;
2682 			session->decap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2683 		}
2684 		break;
2685 	case RTE_CRYPTO_CIPHER_NULL:
2686 		session->cipher_key.alg = OP_PCL_IPSEC_NULL;
2687 		break;
2688 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2689 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2690 	case RTE_CRYPTO_CIPHER_3DES_ECB:
2691 	case RTE_CRYPTO_CIPHER_AES_ECB:
2692 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
2693 		DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2694 			      session->cipher_alg);
2695 		return -ENOTSUP;
2696 	default:
2697 		DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2698 			      session->cipher_alg);
2699 		return -ENOTSUP;
2700 	}
2701 
2702 	return 0;
2703 }
2704 
2705 static int
2706 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
2707 			   struct rte_security_session_conf *conf,
2708 			   void *sess)
2709 {
2710 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2711 	struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2712 	struct rte_crypto_auth_xform *auth_xform = NULL;
2713 	struct rte_crypto_cipher_xform *cipher_xform = NULL;
2714 	struct rte_crypto_aead_xform *aead_xform = NULL;
2715 	dpaa_sec_session *session = (dpaa_sec_session *)sess;
2716 	uint32_t i;
2717 	int ret;
2718 
2719 	PMD_INIT_FUNC_TRACE();
2720 
2721 	memset(session, 0, sizeof(dpaa_sec_session));
2722 	session->proto_alg = conf->protocol;
2723 	session->ctxt = DPAA_SEC_IPSEC;
2724 
2725 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
2726 		session->dir = DIR_ENC;
2727 	else
2728 		session->dir = DIR_DEC;
2729 
2730 	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2731 		cipher_xform = &conf->crypto_xform->cipher;
2732 		if (conf->crypto_xform->next)
2733 			auth_xform = &conf->crypto_xform->next->auth;
2734 		ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2735 					ipsec_xform, session);
2736 	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2737 		auth_xform = &conf->crypto_xform->auth;
2738 		if (conf->crypto_xform->next)
2739 			cipher_xform = &conf->crypto_xform->next->cipher;
2740 		ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2741 					ipsec_xform, session);
2742 	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2743 		aead_xform = &conf->crypto_xform->aead;
2744 		ret = dpaa_sec_ipsec_aead_init(aead_xform,
2745 					ipsec_xform, session);
2746 	} else {
2747 		DPAA_SEC_ERR("XFORM not specified");
2748 		ret = -EINVAL;
2749 		goto out;
2750 	}
2751 	if (ret) {
2752 		DPAA_SEC_ERR("Failed to process xform");
2753 		goto out;
2754 	}
2755 
2756 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2757 		if (ipsec_xform->tunnel.type ==
2758 				RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2759 			session->ip4_hdr.ip_v = IPVERSION;
2760 			session->ip4_hdr.ip_hl = 5;
2761 			session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2762 						sizeof(session->ip4_hdr));
2763 			session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2764 			session->ip4_hdr.ip_id = 0;
2765 			session->ip4_hdr.ip_off = 0;
2766 			session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2767 			session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2768 					RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2769 					IPPROTO_ESP : IPPROTO_AH;
2770 			session->ip4_hdr.ip_sum = 0;
2771 			session->ip4_hdr.ip_src =
2772 					ipsec_xform->tunnel.ipv4.src_ip;
2773 			session->ip4_hdr.ip_dst =
2774 					ipsec_xform->tunnel.ipv4.dst_ip;
2775 			session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2776 						(void *)&session->ip4_hdr,
2777 						sizeof(struct ip));
2778 			session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2779 		} else if (ipsec_xform->tunnel.type ==
2780 				RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2781 			session->ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2782 				DPAA_IPv6_DEFAULT_VTC_FLOW |
2783 				((ipsec_xform->tunnel.ipv6.dscp <<
2784 					RTE_IPV6_HDR_TC_SHIFT) &
2785 					RTE_IPV6_HDR_TC_MASK) |
2786 				((ipsec_xform->tunnel.ipv6.flabel <<
2787 					RTE_IPV6_HDR_FL_SHIFT) &
2788 					RTE_IPV6_HDR_FL_MASK));
2789 			/* Payload length will be updated by HW */
2790 			session->ip6_hdr.payload_len = 0;
2791 			session->ip6_hdr.hop_limits =
2792 					ipsec_xform->tunnel.ipv6.hlimit;
2793 			session->ip6_hdr.proto = (ipsec_xform->proto ==
2794 					RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2795 					IPPROTO_ESP : IPPROTO_AH;
2796 			memcpy(&session->ip6_hdr.src_addr,
2797 					&ipsec_xform->tunnel.ipv6.src_addr, 16);
2798 			memcpy(&session->ip6_hdr.dst_addr,
2799 					&ipsec_xform->tunnel.ipv6.dst_addr, 16);
2800 			session->encap_pdb.ip_hdr_len =
2801 						sizeof(struct rte_ipv6_hdr);
2802 		}
2803 		session->encap_pdb.options =
2804 			(IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2805 			PDBOPTS_ESP_OIHI_PDB_INL |
2806 			PDBOPTS_ESP_IVSRC |
2807 			PDBHMO_ESP_ENCAP_DTTL |
2808 			PDBHMO_ESP_SNR;
2809 		if (ipsec_xform->options.esn)
2810 			session->encap_pdb.options |= PDBOPTS_ESP_ESN;
2811 		session->encap_pdb.spi = ipsec_xform->spi;
2812 
2813 	} else if (ipsec_xform->direction ==
2814 			RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2815 		if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
2816 			session->decap_pdb.options = sizeof(struct ip) << 16;
2817 		else
2818 			session->decap_pdb.options =
2819 					sizeof(struct rte_ipv6_hdr) << 16;
2820 		if (ipsec_xform->options.esn)
2821 			session->decap_pdb.options |= PDBOPTS_ESP_ESN;
2822 		if (ipsec_xform->replay_win_sz) {
2823 			uint32_t win_sz;
2824 			win_sz = rte_align32pow2(ipsec_xform->replay_win_sz);
2825 
2826 			switch (win_sz) {
2827 			case 1:
2828 			case 2:
2829 			case 4:
2830 			case 8:
2831 			case 16:
2832 			case 32:
2833 				session->decap_pdb.options |= PDBOPTS_ESP_ARS32;
2834 				break;
2835 			case 64:
2836 				session->decap_pdb.options |= PDBOPTS_ESP_ARS64;
2837 				break;
2838 			default:
2839 				session->decap_pdb.options |=
2840 							PDBOPTS_ESP_ARS128;
2841 			}
2842 		}
2843 	} else
2844 		goto out;
2845 	rte_spinlock_lock(&internals->lock);
2846 	for (i = 0; i < MAX_DPAA_CORES; i++) {
2847 		session->inq[i] = dpaa_sec_attach_rxq(internals);
2848 		if (session->inq[i] == NULL) {
2849 			DPAA_SEC_ERR("unable to attach sec queue");
2850 			rte_spinlock_unlock(&internals->lock);
2851 			goto out;
2852 		}
2853 	}
2854 	rte_spinlock_unlock(&internals->lock);
2855 
2856 	return 0;
2857 out:
2858 	free_session_data(session);
2859 	return -1;
2860 }
2861 
2862 static int
2863 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
2864 			  struct rte_security_session_conf *conf,
2865 			  void *sess)
2866 {
2867 	struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2868 	struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2869 	struct rte_crypto_auth_xform *auth_xform = NULL;
2870 	struct rte_crypto_cipher_xform *cipher_xform = NULL;
2871 	dpaa_sec_session *session = (dpaa_sec_session *)sess;
2872 	struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
2873 	uint32_t i;
2874 	int ret;
2875 
2876 	PMD_INIT_FUNC_TRACE();
2877 
2878 	memset(session, 0, sizeof(dpaa_sec_session));
2879 
2880 	/* find xfrm types */
2881 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2882 		cipher_xform = &xform->cipher;
2883 		if (xform->next != NULL)
2884 			auth_xform = &xform->next->auth;
2885 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2886 		auth_xform = &xform->auth;
2887 		if (xform->next != NULL)
2888 			cipher_xform = &xform->next->cipher;
2889 	} else {
2890 		DPAA_SEC_ERR("Invalid crypto type");
2891 		return -EINVAL;
2892 	}
2893 
2894 	session->proto_alg = conf->protocol;
2895 	session->ctxt = DPAA_SEC_PDCP;
2896 
2897 	if (cipher_xform) {
2898 		switch (cipher_xform->algo) {
2899 		case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2900 			session->cipher_key.alg = PDCP_CIPHER_TYPE_SNOW;
2901 			break;
2902 		case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2903 			session->cipher_key.alg = PDCP_CIPHER_TYPE_ZUC;
2904 			break;
2905 		case RTE_CRYPTO_CIPHER_AES_CTR:
2906 			session->cipher_key.alg = PDCP_CIPHER_TYPE_AES;
2907 			break;
2908 		case RTE_CRYPTO_CIPHER_NULL:
2909 			session->cipher_key.alg = PDCP_CIPHER_TYPE_NULL;
2910 			break;
2911 		default:
2912 			DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2913 				      session->cipher_alg);
2914 			return -EINVAL;
2915 		}
2916 
2917 		session->cipher_key.data = rte_zmalloc(NULL,
2918 					       cipher_xform->key.length,
2919 					       RTE_CACHE_LINE_SIZE);
2920 		if (session->cipher_key.data == NULL &&
2921 				cipher_xform->key.length > 0) {
2922 			DPAA_SEC_ERR("No Memory for cipher key");
2923 			return -ENOMEM;
2924 		}
2925 		session->cipher_key.length = cipher_xform->key.length;
2926 		memcpy(session->cipher_key.data, cipher_xform->key.data,
2927 			cipher_xform->key.length);
2928 		session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2929 					DIR_ENC : DIR_DEC;
2930 		session->cipher_alg = cipher_xform->algo;
2931 	} else {
2932 		session->cipher_key.data = NULL;
2933 		session->cipher_key.length = 0;
2934 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2935 		session->dir = DIR_ENC;
2936 	}
2937 
2938 	if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
2939 		if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 &&
2940 		    pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) {
2941 			DPAA_SEC_ERR(
2942 				"PDCP Seq Num size should be 5/12 bits for cmode");
2943 			ret = -EINVAL;
2944 			goto out;
2945 		}
2946 	}
2947 
2948 	if (auth_xform) {
2949 		switch (auth_xform->algo) {
2950 		case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2951 			session->auth_key.alg = PDCP_AUTH_TYPE_SNOW;
2952 			break;
2953 		case RTE_CRYPTO_AUTH_ZUC_EIA3:
2954 			session->auth_key.alg = PDCP_AUTH_TYPE_ZUC;
2955 			break;
2956 		case RTE_CRYPTO_AUTH_AES_CMAC:
2957 			session->auth_key.alg = PDCP_AUTH_TYPE_AES;
2958 			break;
2959 		case RTE_CRYPTO_AUTH_NULL:
2960 			session->auth_key.alg = PDCP_AUTH_TYPE_NULL;
2961 			break;
2962 		default:
2963 			DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2964 				      session->auth_alg);
2965 			rte_free(session->cipher_key.data);
2966 			return -EINVAL;
2967 		}
2968 		session->auth_key.data = rte_zmalloc(NULL,
2969 						     auth_xform->key.length,
2970 						     RTE_CACHE_LINE_SIZE);
2971 		if (!session->auth_key.data &&
2972 		    auth_xform->key.length > 0) {
2973 			DPAA_SEC_ERR("No Memory for auth key");
2974 			rte_free(session->cipher_key.data);
2975 			return -ENOMEM;
2976 		}
2977 		session->auth_key.length = auth_xform->key.length;
2978 		memcpy(session->auth_key.data, auth_xform->key.data,
2979 		       auth_xform->key.length);
2980 		session->auth_alg = auth_xform->algo;
2981 	} else {
2982 		session->auth_key.data = NULL;
2983 		session->auth_key.length = 0;
2984 		session->auth_alg = 0;
2985 	}
2986 	session->pdcp.domain = pdcp_xform->domain;
2987 	session->pdcp.bearer = pdcp_xform->bearer;
2988 	session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
2989 	session->pdcp.sn_size = pdcp_xform->sn_size;
2990 	session->pdcp.hfn = pdcp_xform->hfn;
2991 	session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
2992 	session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
2993 	session->pdcp.sdap_enabled = pdcp_xform->sdap_enabled;
2994 	if (cipher_xform)
2995 		session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
2996 
2997 	rte_spinlock_lock(&dev_priv->lock);
2998 	for (i = 0; i < MAX_DPAA_CORES; i++) {
2999 		session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
3000 		if (session->inq[i] == NULL) {
3001 			DPAA_SEC_ERR("unable to attach sec queue");
3002 			rte_spinlock_unlock(&dev_priv->lock);
3003 			ret = -EBUSY;
3004 			goto out;
3005 		}
3006 	}
3007 	rte_spinlock_unlock(&dev_priv->lock);
3008 	return 0;
3009 out:
3010 	rte_free(session->auth_key.data);
3011 	rte_free(session->cipher_key.data);
3012 	memset(session, 0, sizeof(dpaa_sec_session));
3013 	return ret;
3014 }
3015 
3016 static int
3017 dpaa_sec_security_session_create(void *dev,
3018 				 struct rte_security_session_conf *conf,
3019 				 struct rte_security_session *sess,
3020 				 struct rte_mempool *mempool)
3021 {
3022 	void *sess_private_data;
3023 	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
3024 	int ret;
3025 
3026 	if (rte_mempool_get(mempool, &sess_private_data)) {
3027 		DPAA_SEC_ERR("Couldn't get object from session mempool");
3028 		return -ENOMEM;
3029 	}
3030 
3031 	switch (conf->protocol) {
3032 	case RTE_SECURITY_PROTOCOL_IPSEC:
3033 		ret = dpaa_sec_set_ipsec_session(cdev, conf,
3034 				sess_private_data);
3035 		break;
3036 	case RTE_SECURITY_PROTOCOL_PDCP:
3037 		ret = dpaa_sec_set_pdcp_session(cdev, conf,
3038 				sess_private_data);
3039 		break;
3040 	case RTE_SECURITY_PROTOCOL_MACSEC:
3041 		return -ENOTSUP;
3042 	default:
3043 		return -EINVAL;
3044 	}
3045 	if (ret != 0) {
3046 		DPAA_SEC_ERR("failed to configure session parameters");
3047 		/* Return session to mempool */
3048 		rte_mempool_put(mempool, sess_private_data);
3049 		return ret;
3050 	}
3051 
3052 	set_sec_session_private_data(sess, sess_private_data);
3053 
3054 	return ret;
3055 }
3056 
3057 /** Clear the memory of session so it doesn't leave key material behind */
3058 static int
3059 dpaa_sec_security_session_destroy(void *dev __rte_unused,
3060 		struct rte_security_session *sess)
3061 {
3062 	PMD_INIT_FUNC_TRACE();
3063 	void *sess_priv = get_sec_session_private_data(sess);
3064 	dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
3065 
3066 	if (sess_priv) {
3067 		free_session_memory((struct rte_cryptodev *)dev, s);
3068 		set_sec_session_private_data(sess, NULL);
3069 	}
3070 	return 0;
3071 }
3072 #endif
3073 static int
3074 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
3075 		       struct rte_cryptodev_config *config __rte_unused)
3076 {
3077 	PMD_INIT_FUNC_TRACE();
3078 
3079 	return 0;
3080 }
3081 
3082 static int
3083 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
3084 {
3085 	PMD_INIT_FUNC_TRACE();
3086 	return 0;
3087 }
3088 
3089 static void
3090 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
3091 {
3092 	PMD_INIT_FUNC_TRACE();
3093 }
3094 
3095 static int
3096 dpaa_sec_dev_close(struct rte_cryptodev *dev)
3097 {
3098 	PMD_INIT_FUNC_TRACE();
3099 
3100 	if (dev == NULL)
3101 		return -ENOMEM;
3102 
3103 	return 0;
3104 }
3105 
3106 static void
3107 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
3108 		       struct rte_cryptodev_info *info)
3109 {
3110 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
3111 
3112 	PMD_INIT_FUNC_TRACE();
3113 	if (info != NULL) {
3114 		info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3115 		info->feature_flags = dev->feature_flags;
3116 		info->capabilities = dpaa_sec_capabilities;
3117 		info->sym.max_nb_sessions = internals->max_nb_sessions;
3118 		info->driver_id = cryptodev_driver_id;
3119 	}
3120 }
3121 
3122 static enum qman_cb_dqrr_result
3123 dpaa_sec_process_parallel_event(void *event,
3124 			struct qman_portal *qm __always_unused,
3125 			struct qman_fq *outq,
3126 			const struct qm_dqrr_entry *dqrr,
3127 			void **bufs)
3128 {
3129 	const struct qm_fd *fd;
3130 	struct dpaa_sec_job *job;
3131 	struct dpaa_sec_op_ctx *ctx;
3132 	struct rte_event *ev = (struct rte_event *)event;
3133 
3134 	fd = &dqrr->fd;
3135 
3136 	/* sg is embedded in an op ctx,
3137 	 * sg[0] is for output
3138 	 * sg[1] for input
3139 	 */
3140 	job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3141 
3142 	ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3143 	ctx->fd_status = fd->status;
3144 	if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3145 		struct qm_sg_entry *sg_out;
3146 		uint32_t len;
3147 
3148 		sg_out = &job->sg[0];
3149 		hw_sg_to_cpu(sg_out);
3150 		len = sg_out->length;
3151 		ctx->op->sym->m_src->pkt_len = len;
3152 		ctx->op->sym->m_src->data_len = len;
3153 	}
3154 	if (!ctx->fd_status) {
3155 		ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3156 	} else {
3157 		DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3158 		ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3159 	}
3160 	ev->event_ptr = (void *)ctx->op;
3161 
3162 	ev->flow_id = outq->ev.flow_id;
3163 	ev->sub_event_type = outq->ev.sub_event_type;
3164 	ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3165 	ev->op = RTE_EVENT_OP_NEW;
3166 	ev->sched_type = outq->ev.sched_type;
3167 	ev->queue_id = outq->ev.queue_id;
3168 	ev->priority = outq->ev.priority;
3169 	*bufs = (void *)ctx->op;
3170 
3171 	rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3172 
3173 	return qman_cb_dqrr_consume;
3174 }
3175 
3176 static enum qman_cb_dqrr_result
3177 dpaa_sec_process_atomic_event(void *event,
3178 			struct qman_portal *qm __rte_unused,
3179 			struct qman_fq *outq,
3180 			const struct qm_dqrr_entry *dqrr,
3181 			void **bufs)
3182 {
3183 	u8 index;
3184 	const struct qm_fd *fd;
3185 	struct dpaa_sec_job *job;
3186 	struct dpaa_sec_op_ctx *ctx;
3187 	struct rte_event *ev = (struct rte_event *)event;
3188 
3189 	fd = &dqrr->fd;
3190 
3191 	/* sg is embedded in an op ctx,
3192 	 * sg[0] is for output
3193 	 * sg[1] for input
3194 	 */
3195 	job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3196 
3197 	ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3198 	ctx->fd_status = fd->status;
3199 	if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3200 		struct qm_sg_entry *sg_out;
3201 		uint32_t len;
3202 
3203 		sg_out = &job->sg[0];
3204 		hw_sg_to_cpu(sg_out);
3205 		len = sg_out->length;
3206 		ctx->op->sym->m_src->pkt_len = len;
3207 		ctx->op->sym->m_src->data_len = len;
3208 	}
3209 	if (!ctx->fd_status) {
3210 		ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3211 	} else {
3212 		DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3213 		ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3214 	}
3215 	ev->event_ptr = (void *)ctx->op;
3216 	ev->flow_id = outq->ev.flow_id;
3217 	ev->sub_event_type = outq->ev.sub_event_type;
3218 	ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3219 	ev->op = RTE_EVENT_OP_NEW;
3220 	ev->sched_type = outq->ev.sched_type;
3221 	ev->queue_id = outq->ev.queue_id;
3222 	ev->priority = outq->ev.priority;
3223 
3224 	/* Save active dqrr entries */
3225 	index = ((uintptr_t)dqrr >> 6) & (16/*QM_DQRR_SIZE*/ - 1);
3226 	DPAA_PER_LCORE_DQRR_SIZE++;
3227 	DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
3228 	DPAA_PER_LCORE_DQRR_MBUF(index) = ctx->op->sym->m_src;
3229 	ev->impl_opaque = index + 1;
3230 	*dpaa_seqn(ctx->op->sym->m_src) = (uint32_t)index + 1;
3231 	*bufs = (void *)ctx->op;
3232 
3233 	rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3234 
3235 	return qman_cb_dqrr_defer;
3236 }
3237 
3238 int
3239 dpaa_sec_eventq_attach(const struct rte_cryptodev *dev,
3240 		int qp_id,
3241 		uint16_t ch_id,
3242 		const struct rte_event *event)
3243 {
3244 	struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3245 	struct qm_mcc_initfq opts = {0};
3246 
3247 	int ret;
3248 
3249 	opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3250 		       QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3251 	opts.fqd.dest.channel = ch_id;
3252 
3253 	switch (event->sched_type) {
3254 	case RTE_SCHED_TYPE_ATOMIC:
3255 		opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
3256 		/* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
3257 		 * configuration with HOLD_ACTIVE setting
3258 		 */
3259 		opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
3260 		qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_atomic_event;
3261 		break;
3262 	case RTE_SCHED_TYPE_ORDERED:
3263 		DPAA_SEC_ERR("Ordered queue schedule type is not supported\n");
3264 		return -ENOTSUP;
3265 	default:
3266 		opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
3267 		qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_parallel_event;
3268 		break;
3269 	}
3270 
3271 	ret = qman_init_fq(&qp->outq, QMAN_INITFQ_FLAG_SCHED, &opts);
3272 	if (unlikely(ret)) {
3273 		DPAA_SEC_ERR("unable to init caam source fq!");
3274 		return ret;
3275 	}
3276 
3277 	memcpy(&qp->outq.ev, event, sizeof(struct rte_event));
3278 
3279 	return 0;
3280 }
3281 
3282 int
3283 dpaa_sec_eventq_detach(const struct rte_cryptodev *dev,
3284 			int qp_id)
3285 {
3286 	struct qm_mcc_initfq opts = {0};
3287 	int ret;
3288 	struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3289 
3290 	opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3291 		       QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3292 	qp->outq.cb.dqrr = dqrr_out_fq_cb_rx;
3293 	qp->outq.cb.ern  = ern_sec_fq_handler;
3294 	qman_retire_fq(&qp->outq, NULL);
3295 	qman_oos_fq(&qp->outq);
3296 	ret = qman_init_fq(&qp->outq, 0, &opts);
3297 	if (ret)
3298 		RTE_LOG(ERR, PMD, "Error in qman_init_fq: ret: %d\n", ret);
3299 	qp->outq.cb.dqrr = NULL;
3300 
3301 	return ret;
3302 }
3303 
3304 static struct rte_cryptodev_ops crypto_ops = {
3305 	.dev_configure	      = dpaa_sec_dev_configure,
3306 	.dev_start	      = dpaa_sec_dev_start,
3307 	.dev_stop	      = dpaa_sec_dev_stop,
3308 	.dev_close	      = dpaa_sec_dev_close,
3309 	.dev_infos_get        = dpaa_sec_dev_infos_get,
3310 	.queue_pair_setup     = dpaa_sec_queue_pair_setup,
3311 	.queue_pair_release   = dpaa_sec_queue_pair_release,
3312 	.sym_session_get_size     = dpaa_sec_sym_session_get_size,
3313 	.sym_session_configure    = dpaa_sec_sym_session_configure,
3314 	.sym_session_clear        = dpaa_sec_sym_session_clear
3315 };
3316 
3317 #ifdef RTE_LIB_SECURITY
3318 static const struct rte_security_capability *
3319 dpaa_sec_capabilities_get(void *device __rte_unused)
3320 {
3321 	return dpaa_sec_security_cap;
3322 }
3323 
3324 static const struct rte_security_ops dpaa_sec_security_ops = {
3325 	.session_create = dpaa_sec_security_session_create,
3326 	.session_update = NULL,
3327 	.session_stats_get = NULL,
3328 	.session_destroy = dpaa_sec_security_session_destroy,
3329 	.set_pkt_metadata = NULL,
3330 	.capabilities_get = dpaa_sec_capabilities_get
3331 };
3332 #endif
3333 static int
3334 dpaa_sec_uninit(struct rte_cryptodev *dev)
3335 {
3336 	struct dpaa_sec_dev_private *internals;
3337 
3338 	if (dev == NULL)
3339 		return -ENODEV;
3340 
3341 	internals = dev->data->dev_private;
3342 	rte_free(dev->security_ctx);
3343 
3344 	rte_free(internals);
3345 
3346 	DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
3347 		      dev->data->name, rte_socket_id());
3348 
3349 	return 0;
3350 }
3351 
3352 static int
3353 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
3354 {
3355 	struct dpaa_sec_dev_private *internals;
3356 #ifdef RTE_LIB_SECURITY
3357 	struct rte_security_ctx *security_instance;
3358 #endif
3359 	struct dpaa_sec_qp *qp;
3360 	uint32_t i, flags;
3361 	int ret;
3362 
3363 	PMD_INIT_FUNC_TRACE();
3364 
3365 	cryptodev->driver_id = cryptodev_driver_id;
3366 	cryptodev->dev_ops = &crypto_ops;
3367 
3368 	cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
3369 	cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
3370 	cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3371 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
3372 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3373 			RTE_CRYPTODEV_FF_SECURITY |
3374 			RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3375 			RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3376 			RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3377 			RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3378 			RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3379 
3380 	internals = cryptodev->data->dev_private;
3381 	internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
3382 	internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
3383 
3384 	/*
3385 	 * For secondary processes, we don't initialise any further as primary
3386 	 * has already done this work. Only check we don't need a different
3387 	 * RX function
3388 	 */
3389 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3390 		DPAA_SEC_WARN("Device already init by primary process");
3391 		return 0;
3392 	}
3393 #ifdef RTE_LIB_SECURITY
3394 	/* Initialize security_ctx only for primary process*/
3395 	security_instance = rte_malloc("rte_security_instances_ops",
3396 				sizeof(struct rte_security_ctx), 0);
3397 	if (security_instance == NULL)
3398 		return -ENOMEM;
3399 	security_instance->device = (void *)cryptodev;
3400 	security_instance->ops = &dpaa_sec_security_ops;
3401 	security_instance->sess_cnt = 0;
3402 	cryptodev->security_ctx = security_instance;
3403 #endif
3404 	rte_spinlock_init(&internals->lock);
3405 	for (i = 0; i < internals->max_nb_queue_pairs; i++) {
3406 		/* init qman fq for queue pair */
3407 		qp = &internals->qps[i];
3408 		ret = dpaa_sec_init_tx(&qp->outq);
3409 		if (ret) {
3410 			DPAA_SEC_ERR("config tx of queue pair  %d", i);
3411 			goto init_error;
3412 		}
3413 	}
3414 
3415 	flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
3416 		QMAN_FQ_FLAG_TO_DCPORTAL;
3417 	for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
3418 		/* create rx qman fq for sessions*/
3419 		ret = qman_create_fq(0, flags, &internals->inq[i]);
3420 		if (unlikely(ret != 0)) {
3421 			DPAA_SEC_ERR("sec qman_create_fq failed");
3422 			goto init_error;
3423 		}
3424 	}
3425 
3426 	RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
3427 	return 0;
3428 
3429 init_error:
3430 	DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
3431 
3432 	rte_free(cryptodev->security_ctx);
3433 	return -EFAULT;
3434 }
3435 
3436 static int
3437 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
3438 				struct rte_dpaa_device *dpaa_dev)
3439 {
3440 	struct rte_cryptodev *cryptodev;
3441 	char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3442 
3443 	int retval;
3444 
3445 	snprintf(cryptodev_name, sizeof(cryptodev_name), "%s", dpaa_dev->name);
3446 
3447 	cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3448 	if (cryptodev == NULL)
3449 		return -ENOMEM;
3450 
3451 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3452 		cryptodev->data->dev_private = rte_zmalloc_socket(
3453 					"cryptodev private structure",
3454 					sizeof(struct dpaa_sec_dev_private),
3455 					RTE_CACHE_LINE_SIZE,
3456 					rte_socket_id());
3457 
3458 		if (cryptodev->data->dev_private == NULL)
3459 			rte_panic("Cannot allocate memzone for private "
3460 					"device data");
3461 	}
3462 
3463 	dpaa_dev->crypto_dev = cryptodev;
3464 	cryptodev->device = &dpaa_dev->device;
3465 
3466 	/* init user callbacks */
3467 	TAILQ_INIT(&(cryptodev->link_intr_cbs));
3468 
3469 	/* if sec device version is not configured */
3470 	if (!rta_get_sec_era()) {
3471 		const struct device_node *caam_node;
3472 
3473 		for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
3474 			const uint32_t *prop = of_get_property(caam_node,
3475 					"fsl,sec-era",
3476 					NULL);
3477 			if (prop) {
3478 				rta_set_sec_era(
3479 					INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
3480 				break;
3481 			}
3482 		}
3483 	}
3484 
3485 	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
3486 		retval = rte_dpaa_portal_init((void *)1);
3487 		if (retval) {
3488 			DPAA_SEC_ERR("Unable to initialize portal");
3489 			goto out;
3490 		}
3491 	}
3492 
3493 	/* Invoke PMD device initialization function */
3494 	retval = dpaa_sec_dev_init(cryptodev);
3495 	if (retval == 0)
3496 		return 0;
3497 
3498 	retval = -ENXIO;
3499 out:
3500 	/* In case of error, cleanup is done */
3501 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3502 		rte_free(cryptodev->data->dev_private);
3503 
3504 	rte_cryptodev_pmd_release_device(cryptodev);
3505 
3506 	return retval;
3507 }
3508 
3509 static int
3510 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
3511 {
3512 	struct rte_cryptodev *cryptodev;
3513 	int ret;
3514 
3515 	cryptodev = dpaa_dev->crypto_dev;
3516 	if (cryptodev == NULL)
3517 		return -ENODEV;
3518 
3519 	ret = dpaa_sec_uninit(cryptodev);
3520 	if (ret)
3521 		return ret;
3522 
3523 	return rte_cryptodev_pmd_destroy(cryptodev);
3524 }
3525 
3526 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
3527 	.drv_type = FSL_DPAA_CRYPTO,
3528 	.driver = {
3529 		.name = "DPAA SEC PMD"
3530 	},
3531 	.probe = cryptodev_dpaa_sec_probe,
3532 	.remove = cryptodev_dpaa_sec_remove,
3533 };
3534 
3535 static struct cryptodev_driver dpaa_sec_crypto_drv;
3536 
3537 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
3538 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
3539 		cryptodev_driver_id);
3540 RTE_LOG_REGISTER(dpaa_logtype_sec, pmd.crypto.dpaa, NOTICE);
3541