xref: /dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c (revision db4e81351fb85ff623bd0438d1b5a8fb55fe9fee)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2017-2019 NXP
5  *
6  */
7 
8 #include <fcntl.h>
9 #include <unistd.h>
10 #include <sched.h>
11 #include <net/if.h>
12 
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #ifdef RTE_LIBRTE_SECURITY
19 #include <rte_security_driver.h>
20 #endif
21 #include <rte_cycles.h>
22 #include <rte_dev.h>
23 #include <rte_kvargs.h>
24 #include <rte_malloc.h>
25 #include <rte_mbuf.h>
26 #include <rte_memcpy.h>
27 #include <rte_string_fns.h>
28 #include <rte_spinlock.h>
29 
30 #include <fsl_usd.h>
31 #include <fsl_qman.h>
32 #include <dpaa_of.h>
33 
34 /* RTA header files */
35 #include <desc/common.h>
36 #include <desc/algo.h>
37 #include <desc/ipsec.h>
38 #include <desc/pdcp.h>
39 
40 #include <rte_dpaa_bus.h>
41 #include <dpaa_sec.h>
42 #include <dpaa_sec_event.h>
43 #include <dpaa_sec_log.h>
44 #include <dpaax_iova_table.h>
45 
46 static uint8_t cryptodev_driver_id;
47 
48 static int
49 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
50 
51 static inline void
52 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
53 {
54 	if (!ctx->fd_status) {
55 		ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
56 	} else {
57 		DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
58 		ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
59 	}
60 }
61 
62 static inline struct dpaa_sec_op_ctx *
63 dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count)
64 {
65 	struct dpaa_sec_op_ctx *ctx;
66 	int i, retval;
67 
68 	retval = rte_mempool_get(
69 			ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
70 			(void **)(&ctx));
71 	if (!ctx || retval) {
72 		DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
73 		return NULL;
74 	}
75 	/*
76 	 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
77 	 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
78 	 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
79 	 * each packet, memset is costlier than dcbz_64().
80 	 */
81 	for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
82 		dcbz_64(&ctx->job.sg[i]);
83 
84 	ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
85 	ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
86 
87 	return ctx;
88 }
89 
90 static void
91 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
92 		   struct qman_fq *fq,
93 		   const struct qm_mr_entry *msg)
94 {
95 	DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
96 			fq->fqid, msg->ern.rc, msg->ern.seqnum);
97 }
98 
99 /* initialize the queue with dest chan as caam chan so that
100  * all the packets in this queue could be dispatched into caam
101  */
102 static int
103 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
104 		 uint32_t fqid_out)
105 {
106 	struct qm_mcc_initfq fq_opts;
107 	uint32_t flags;
108 	int ret = -1;
109 
110 	/* Clear FQ options */
111 	memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
112 
113 	flags = QMAN_INITFQ_FLAG_SCHED;
114 	fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
115 			  QM_INITFQ_WE_CONTEXTB;
116 
117 	qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
118 	fq_opts.fqd.context_b = fqid_out;
119 	fq_opts.fqd.dest.channel = dpaa_get_qm_channel_caam();
120 	fq_opts.fqd.dest.wq = 0;
121 
122 	fq_in->cb.ern  = ern_sec_fq_handler;
123 
124 	DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
125 
126 	ret = qman_init_fq(fq_in, flags, &fq_opts);
127 	if (unlikely(ret != 0))
128 		DPAA_SEC_ERR("qman_init_fq failed %d", ret);
129 
130 	return ret;
131 }
132 
133 /* something is put into in_fq and caam put the crypto result into out_fq */
134 static enum qman_cb_dqrr_result
135 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
136 		  struct qman_fq *fq __always_unused,
137 		  const struct qm_dqrr_entry *dqrr)
138 {
139 	const struct qm_fd *fd;
140 	struct dpaa_sec_job *job;
141 	struct dpaa_sec_op_ctx *ctx;
142 
143 	if (DPAA_PER_LCORE_DPAA_SEC_OP_NB >= DPAA_SEC_BURST)
144 		return qman_cb_dqrr_defer;
145 
146 	if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
147 		return qman_cb_dqrr_consume;
148 
149 	fd = &dqrr->fd;
150 	/* sg is embedded in an op ctx,
151 	 * sg[0] is for output
152 	 * sg[1] for input
153 	 */
154 	job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
155 
156 	ctx = container_of(job, struct dpaa_sec_op_ctx, job);
157 	ctx->fd_status = fd->status;
158 	if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
159 		struct qm_sg_entry *sg_out;
160 		uint32_t len;
161 		struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ?
162 				ctx->op->sym->m_src : ctx->op->sym->m_dst;
163 
164 		sg_out = &job->sg[0];
165 		hw_sg_to_cpu(sg_out);
166 		len = sg_out->length;
167 		mbuf->pkt_len = len;
168 		while (mbuf->next != NULL) {
169 			len -= mbuf->data_len;
170 			mbuf = mbuf->next;
171 		}
172 		mbuf->data_len = len;
173 	}
174 	DPAA_PER_LCORE_RTE_CRYPTO_OP[DPAA_PER_LCORE_DPAA_SEC_OP_NB++] = ctx->op;
175 	dpaa_sec_op_ending(ctx);
176 
177 	return qman_cb_dqrr_consume;
178 }
179 
180 /* caam result is put into this queue */
181 static int
182 dpaa_sec_init_tx(struct qman_fq *fq)
183 {
184 	int ret;
185 	struct qm_mcc_initfq opts;
186 	uint32_t flags;
187 
188 	flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
189 		QMAN_FQ_FLAG_DYNAMIC_FQID;
190 
191 	ret = qman_create_fq(0, flags, fq);
192 	if (unlikely(ret)) {
193 		DPAA_SEC_ERR("qman_create_fq failed");
194 		return ret;
195 	}
196 
197 	memset(&opts, 0, sizeof(opts));
198 	opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
199 		       QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
200 
201 	/* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
202 
203 	fq->cb.dqrr = dqrr_out_fq_cb_rx;
204 	fq->cb.ern  = ern_sec_fq_handler;
205 
206 	ret = qman_init_fq(fq, 0, &opts);
207 	if (unlikely(ret)) {
208 		DPAA_SEC_ERR("unable to init caam source fq!");
209 		return ret;
210 	}
211 
212 	return ret;
213 }
214 
215 static inline int is_aead(dpaa_sec_session *ses)
216 {
217 	return ((ses->cipher_alg == 0) &&
218 		(ses->auth_alg == 0) &&
219 		(ses->aead_alg != 0));
220 }
221 
222 static inline int is_encode(dpaa_sec_session *ses)
223 {
224 	return ses->dir == DIR_ENC;
225 }
226 
227 static inline int is_decode(dpaa_sec_session *ses)
228 {
229 	return ses->dir == DIR_DEC;
230 }
231 
232 #ifdef RTE_LIBRTE_SECURITY
233 static int
234 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
235 {
236 	struct alginfo authdata = {0}, cipherdata = {0};
237 	struct sec_cdb *cdb = &ses->cdb;
238 	struct alginfo *p_authdata = NULL;
239 	int32_t shared_desc_len = 0;
240 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
241 	int swap = false;
242 #else
243 	int swap = true;
244 #endif
245 
246 	cipherdata.key = (size_t)ses->cipher_key.data;
247 	cipherdata.keylen = ses->cipher_key.length;
248 	cipherdata.key_enc_flags = 0;
249 	cipherdata.key_type = RTA_DATA_IMM;
250 	cipherdata.algtype = ses->cipher_key.alg;
251 	cipherdata.algmode = ses->cipher_key.algmode;
252 
253 	if (ses->auth_alg) {
254 		authdata.key = (size_t)ses->auth_key.data;
255 		authdata.keylen = ses->auth_key.length;
256 		authdata.key_enc_flags = 0;
257 		authdata.key_type = RTA_DATA_IMM;
258 		authdata.algtype = ses->auth_key.alg;
259 		authdata.algmode = ses->auth_key.algmode;
260 
261 		p_authdata = &authdata;
262 	}
263 
264 	if (rta_inline_pdcp_query(authdata.algtype,
265 				cipherdata.algtype,
266 				ses->pdcp.sn_size,
267 				ses->pdcp.hfn_ovd)) {
268 		cipherdata.key =
269 			(size_t)rte_dpaa_mem_vtop((void *)
270 					(size_t)cipherdata.key);
271 		cipherdata.key_type = RTA_DATA_PTR;
272 	}
273 
274 	if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
275 		if (ses->dir == DIR_ENC)
276 			shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
277 					cdb->sh_desc, 1, swap,
278 					ses->pdcp.hfn,
279 					ses->pdcp.sn_size,
280 					ses->pdcp.bearer,
281 					ses->pdcp.pkt_dir,
282 					ses->pdcp.hfn_threshold,
283 					&cipherdata, &authdata,
284 					0);
285 		else if (ses->dir == DIR_DEC)
286 			shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
287 					cdb->sh_desc, 1, swap,
288 					ses->pdcp.hfn,
289 					ses->pdcp.sn_size,
290 					ses->pdcp.bearer,
291 					ses->pdcp.pkt_dir,
292 					ses->pdcp.hfn_threshold,
293 					&cipherdata, &authdata,
294 					0);
295 	} else {
296 		if (ses->dir == DIR_ENC)
297 			shared_desc_len = cnstr_shdsc_pdcp_u_plane_encap(
298 					cdb->sh_desc, 1, swap,
299 					ses->pdcp.sn_size,
300 					ses->pdcp.hfn,
301 					ses->pdcp.bearer,
302 					ses->pdcp.pkt_dir,
303 					ses->pdcp.hfn_threshold,
304 					&cipherdata, p_authdata, 0);
305 		else if (ses->dir == DIR_DEC)
306 			shared_desc_len = cnstr_shdsc_pdcp_u_plane_decap(
307 					cdb->sh_desc, 1, swap,
308 					ses->pdcp.sn_size,
309 					ses->pdcp.hfn,
310 					ses->pdcp.bearer,
311 					ses->pdcp.pkt_dir,
312 					ses->pdcp.hfn_threshold,
313 					&cipherdata, p_authdata, 0);
314 	}
315 	return shared_desc_len;
316 }
317 
318 /* prepare ipsec proto command block of the session */
319 static int
320 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
321 {
322 	struct alginfo cipherdata = {0}, authdata = {0};
323 	struct sec_cdb *cdb = &ses->cdb;
324 	int32_t shared_desc_len = 0;
325 	int err;
326 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
327 	int swap = false;
328 #else
329 	int swap = true;
330 #endif
331 
332 	cipherdata.key = (size_t)ses->cipher_key.data;
333 	cipherdata.keylen = ses->cipher_key.length;
334 	cipherdata.key_enc_flags = 0;
335 	cipherdata.key_type = RTA_DATA_IMM;
336 	cipherdata.algtype = ses->cipher_key.alg;
337 	cipherdata.algmode = ses->cipher_key.algmode;
338 
339 	if (ses->auth_key.length) {
340 		authdata.key = (size_t)ses->auth_key.data;
341 		authdata.keylen = ses->auth_key.length;
342 		authdata.key_enc_flags = 0;
343 		authdata.key_type = RTA_DATA_IMM;
344 		authdata.algtype = ses->auth_key.alg;
345 		authdata.algmode = ses->auth_key.algmode;
346 	}
347 
348 	cdb->sh_desc[0] = cipherdata.keylen;
349 	cdb->sh_desc[1] = authdata.keylen;
350 	err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
351 			       DESC_JOB_IO_LEN,
352 			       (unsigned int *)cdb->sh_desc,
353 			       &cdb->sh_desc[2], 2);
354 
355 	if (err < 0) {
356 		DPAA_SEC_ERR("Crypto: Incorrect key lengths");
357 		return err;
358 	}
359 	if (cdb->sh_desc[2] & 1)
360 		cipherdata.key_type = RTA_DATA_IMM;
361 	else {
362 		cipherdata.key = (size_t)rte_dpaa_mem_vtop(
363 					(void *)(size_t)cipherdata.key);
364 		cipherdata.key_type = RTA_DATA_PTR;
365 	}
366 	if (cdb->sh_desc[2] & (1<<1))
367 		authdata.key_type = RTA_DATA_IMM;
368 	else {
369 		authdata.key = (size_t)rte_dpaa_mem_vtop(
370 					(void *)(size_t)authdata.key);
371 		authdata.key_type = RTA_DATA_PTR;
372 	}
373 
374 	cdb->sh_desc[0] = 0;
375 	cdb->sh_desc[1] = 0;
376 	cdb->sh_desc[2] = 0;
377 	if (ses->dir == DIR_ENC) {
378 		shared_desc_len = cnstr_shdsc_ipsec_new_encap(
379 				cdb->sh_desc,
380 				true, swap, SHR_SERIAL,
381 				&ses->encap_pdb,
382 				(uint8_t *)&ses->ip4_hdr,
383 				&cipherdata, &authdata);
384 	} else if (ses->dir == DIR_DEC) {
385 		shared_desc_len = cnstr_shdsc_ipsec_new_decap(
386 				cdb->sh_desc,
387 				true, swap, SHR_SERIAL,
388 				&ses->decap_pdb,
389 				&cipherdata, &authdata);
390 	}
391 	return shared_desc_len;
392 }
393 #endif
394 /* prepare command block of the session */
395 static int
396 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
397 {
398 	struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
399 	int32_t shared_desc_len = 0;
400 	struct sec_cdb *cdb = &ses->cdb;
401 	int err;
402 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
403 	int swap = false;
404 #else
405 	int swap = true;
406 #endif
407 
408 	memset(cdb, 0, sizeof(struct sec_cdb));
409 
410 	switch (ses->ctxt) {
411 #ifdef RTE_LIBRTE_SECURITY
412 	case DPAA_SEC_IPSEC:
413 		shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
414 		break;
415 	case DPAA_SEC_PDCP:
416 		shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
417 		break;
418 #endif
419 	case DPAA_SEC_CIPHER:
420 		alginfo_c.key = (size_t)ses->cipher_key.data;
421 		alginfo_c.keylen = ses->cipher_key.length;
422 		alginfo_c.key_enc_flags = 0;
423 		alginfo_c.key_type = RTA_DATA_IMM;
424 		alginfo_c.algtype = ses->cipher_key.alg;
425 		alginfo_c.algmode = ses->cipher_key.algmode;
426 
427 		switch (ses->cipher_alg) {
428 		case RTE_CRYPTO_CIPHER_AES_CBC:
429 		case RTE_CRYPTO_CIPHER_3DES_CBC:
430 		case RTE_CRYPTO_CIPHER_AES_CTR:
431 		case RTE_CRYPTO_CIPHER_3DES_CTR:
432 			shared_desc_len = cnstr_shdsc_blkcipher(
433 					cdb->sh_desc, true,
434 					swap, SHR_NEVER, &alginfo_c,
435 					ses->iv.length,
436 					ses->dir);
437 			break;
438 		case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
439 			shared_desc_len = cnstr_shdsc_snow_f8(
440 					cdb->sh_desc, true, swap,
441 					&alginfo_c,
442 					ses->dir);
443 			break;
444 		case RTE_CRYPTO_CIPHER_ZUC_EEA3:
445 			shared_desc_len = cnstr_shdsc_zuce(
446 					cdb->sh_desc, true, swap,
447 					&alginfo_c,
448 					ses->dir);
449 			break;
450 		default:
451 			DPAA_SEC_ERR("unsupported cipher alg %d",
452 				     ses->cipher_alg);
453 			return -ENOTSUP;
454 		}
455 		break;
456 	case DPAA_SEC_AUTH:
457 		alginfo_a.key = (size_t)ses->auth_key.data;
458 		alginfo_a.keylen = ses->auth_key.length;
459 		alginfo_a.key_enc_flags = 0;
460 		alginfo_a.key_type = RTA_DATA_IMM;
461 		alginfo_a.algtype = ses->auth_key.alg;
462 		alginfo_a.algmode = ses->auth_key.algmode;
463 		switch (ses->auth_alg) {
464 		case RTE_CRYPTO_AUTH_MD5_HMAC:
465 		case RTE_CRYPTO_AUTH_SHA1_HMAC:
466 		case RTE_CRYPTO_AUTH_SHA224_HMAC:
467 		case RTE_CRYPTO_AUTH_SHA256_HMAC:
468 		case RTE_CRYPTO_AUTH_SHA384_HMAC:
469 		case RTE_CRYPTO_AUTH_SHA512_HMAC:
470 			shared_desc_len = cnstr_shdsc_hmac(
471 						cdb->sh_desc, true,
472 						swap, SHR_NEVER, &alginfo_a,
473 						!ses->dir,
474 						ses->digest_length);
475 			break;
476 		case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
477 			shared_desc_len = cnstr_shdsc_snow_f9(
478 						cdb->sh_desc, true, swap,
479 						&alginfo_a,
480 						!ses->dir,
481 						ses->digest_length);
482 			break;
483 		case RTE_CRYPTO_AUTH_ZUC_EIA3:
484 			shared_desc_len = cnstr_shdsc_zuca(
485 						cdb->sh_desc, true, swap,
486 						&alginfo_a,
487 						!ses->dir,
488 						ses->digest_length);
489 			break;
490 		default:
491 			DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
492 		}
493 		break;
494 	case DPAA_SEC_AEAD:
495 		if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
496 			DPAA_SEC_ERR("not supported aead alg");
497 			return -ENOTSUP;
498 		}
499 		alginfo.key = (size_t)ses->aead_key.data;
500 		alginfo.keylen = ses->aead_key.length;
501 		alginfo.key_enc_flags = 0;
502 		alginfo.key_type = RTA_DATA_IMM;
503 		alginfo.algtype = ses->aead_key.alg;
504 		alginfo.algmode = ses->aead_key.algmode;
505 
506 		if (ses->dir == DIR_ENC)
507 			shared_desc_len = cnstr_shdsc_gcm_encap(
508 					cdb->sh_desc, true, swap, SHR_NEVER,
509 					&alginfo,
510 					ses->iv.length,
511 					ses->digest_length);
512 		else
513 			shared_desc_len = cnstr_shdsc_gcm_decap(
514 					cdb->sh_desc, true, swap, SHR_NEVER,
515 					&alginfo,
516 					ses->iv.length,
517 					ses->digest_length);
518 		break;
519 	case DPAA_SEC_CIPHER_HASH:
520 		alginfo_c.key = (size_t)ses->cipher_key.data;
521 		alginfo_c.keylen = ses->cipher_key.length;
522 		alginfo_c.key_enc_flags = 0;
523 		alginfo_c.key_type = RTA_DATA_IMM;
524 		alginfo_c.algtype = ses->cipher_key.alg;
525 		alginfo_c.algmode = ses->cipher_key.algmode;
526 
527 		alginfo_a.key = (size_t)ses->auth_key.data;
528 		alginfo_a.keylen = ses->auth_key.length;
529 		alginfo_a.key_enc_flags = 0;
530 		alginfo_a.key_type = RTA_DATA_IMM;
531 		alginfo_a.algtype = ses->auth_key.alg;
532 		alginfo_a.algmode = ses->auth_key.algmode;
533 
534 		cdb->sh_desc[0] = alginfo_c.keylen;
535 		cdb->sh_desc[1] = alginfo_a.keylen;
536 		err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
537 				       DESC_JOB_IO_LEN,
538 				       (unsigned int *)cdb->sh_desc,
539 				       &cdb->sh_desc[2], 2);
540 
541 		if (err < 0) {
542 			DPAA_SEC_ERR("Crypto: Incorrect key lengths");
543 			return err;
544 		}
545 		if (cdb->sh_desc[2] & 1)
546 			alginfo_c.key_type = RTA_DATA_IMM;
547 		else {
548 			alginfo_c.key = (size_t)rte_dpaa_mem_vtop(
549 						(void *)(size_t)alginfo_c.key);
550 			alginfo_c.key_type = RTA_DATA_PTR;
551 		}
552 		if (cdb->sh_desc[2] & (1<<1))
553 			alginfo_a.key_type = RTA_DATA_IMM;
554 		else {
555 			alginfo_a.key = (size_t)rte_dpaa_mem_vtop(
556 						(void *)(size_t)alginfo_a.key);
557 			alginfo_a.key_type = RTA_DATA_PTR;
558 		}
559 		cdb->sh_desc[0] = 0;
560 		cdb->sh_desc[1] = 0;
561 		cdb->sh_desc[2] = 0;
562 		/* Auth_only_len is set as 0 here and it will be
563 		 * overwritten in fd for each packet.
564 		 */
565 		shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
566 				true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a,
567 				ses->iv.length,
568 				ses->digest_length, ses->dir);
569 		break;
570 	case DPAA_SEC_HASH_CIPHER:
571 	default:
572 		DPAA_SEC_ERR("error: Unsupported session");
573 		return -ENOTSUP;
574 	}
575 
576 	if (shared_desc_len < 0) {
577 		DPAA_SEC_ERR("error in preparing command block");
578 		return shared_desc_len;
579 	}
580 
581 	cdb->sh_hdr.hi.field.idlen = shared_desc_len;
582 	cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
583 	cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
584 
585 	return 0;
586 }
587 
588 /* qp is lockless, should be accessed by only one thread */
589 static int
590 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
591 {
592 	struct qman_fq *fq;
593 	unsigned int pkts = 0;
594 	int num_rx_bufs, ret;
595 	struct qm_dqrr_entry *dq;
596 	uint32_t vdqcr_flags = 0;
597 
598 	fq = &qp->outq;
599 	/*
600 	 * Until request for four buffers, we provide exact number of buffers.
601 	 * Otherwise we do not set the QM_VDQCR_EXACT flag.
602 	 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
603 	 * requested, so we request two less in this case.
604 	 */
605 	if (nb_ops < 4) {
606 		vdqcr_flags = QM_VDQCR_EXACT;
607 		num_rx_bufs = nb_ops;
608 	} else {
609 		num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
610 			(DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
611 	}
612 	ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
613 	if (ret)
614 		return 0;
615 
616 	do {
617 		const struct qm_fd *fd;
618 		struct dpaa_sec_job *job;
619 		struct dpaa_sec_op_ctx *ctx;
620 		struct rte_crypto_op *op;
621 
622 		dq = qman_dequeue(fq);
623 		if (!dq)
624 			continue;
625 
626 		fd = &dq->fd;
627 		/* sg is embedded in an op ctx,
628 		 * sg[0] is for output
629 		 * sg[1] for input
630 		 */
631 		job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
632 
633 		ctx = container_of(job, struct dpaa_sec_op_ctx, job);
634 		ctx->fd_status = fd->status;
635 		op = ctx->op;
636 		if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
637 			struct qm_sg_entry *sg_out;
638 			uint32_t len;
639 			struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ?
640 						op->sym->m_src : op->sym->m_dst;
641 
642 			sg_out = &job->sg[0];
643 			hw_sg_to_cpu(sg_out);
644 			len = sg_out->length;
645 			mbuf->pkt_len = len;
646 			while (mbuf->next != NULL) {
647 				len -= mbuf->data_len;
648 				mbuf = mbuf->next;
649 			}
650 			mbuf->data_len = len;
651 		}
652 		if (!ctx->fd_status) {
653 			op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
654 		} else {
655 			DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
656 			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
657 		}
658 		ops[pkts++] = op;
659 
660 		/* report op status to sym->op and then free the ctx memeory */
661 		rte_mempool_put(ctx->ctx_pool, (void *)ctx);
662 
663 		qman_dqrr_consume(fq, dq);
664 	} while (fq->flags & QMAN_FQ_STATE_VDQCR);
665 
666 	return pkts;
667 }
668 
669 static inline struct dpaa_sec_job *
670 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
671 {
672 	struct rte_crypto_sym_op *sym = op->sym;
673 	struct rte_mbuf *mbuf = sym->m_src;
674 	struct dpaa_sec_job *cf;
675 	struct dpaa_sec_op_ctx *ctx;
676 	struct qm_sg_entry *sg, *out_sg, *in_sg;
677 	phys_addr_t start_addr;
678 	uint8_t *old_digest, extra_segs;
679 	int data_len, data_offset;
680 
681 	data_len = sym->auth.data.length;
682 	data_offset = sym->auth.data.offset;
683 
684 	if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
685 	    ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
686 		if ((data_len & 7) || (data_offset & 7)) {
687 			DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
688 			return NULL;
689 		}
690 
691 		data_len = data_len >> 3;
692 		data_offset = data_offset >> 3;
693 	}
694 
695 	if (is_decode(ses))
696 		extra_segs = 3;
697 	else
698 		extra_segs = 2;
699 
700 	if (mbuf->nb_segs > MAX_SG_ENTRIES) {
701 		DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
702 				MAX_SG_ENTRIES);
703 		return NULL;
704 	}
705 	ctx = dpaa_sec_alloc_ctx(ses, mbuf->nb_segs + extra_segs);
706 	if (!ctx)
707 		return NULL;
708 
709 	cf = &ctx->job;
710 	ctx->op = op;
711 	old_digest = ctx->digest;
712 
713 	/* output */
714 	out_sg = &cf->sg[0];
715 	qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
716 	out_sg->length = ses->digest_length;
717 	cpu_to_hw_sg(out_sg);
718 
719 	/* input */
720 	in_sg = &cf->sg[1];
721 	/* need to extend the input to a compound frame */
722 	in_sg->extension = 1;
723 	in_sg->final = 1;
724 	in_sg->length = data_len;
725 	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
726 
727 	/* 1st seg */
728 	sg = in_sg + 1;
729 
730 	if (ses->iv.length) {
731 		uint8_t *iv_ptr;
732 
733 		iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
734 						   ses->iv.offset);
735 
736 		if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
737 			iv_ptr = conv_to_snow_f9_iv(iv_ptr);
738 			sg->length = 12;
739 		} else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
740 			iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
741 			sg->length = 8;
742 		} else {
743 			sg->length = ses->iv.length;
744 		}
745 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
746 		in_sg->length += sg->length;
747 		cpu_to_hw_sg(sg);
748 		sg++;
749 	}
750 
751 	qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
752 	sg->offset = data_offset;
753 
754 	if (data_len <= (mbuf->data_len - data_offset)) {
755 		sg->length = data_len;
756 	} else {
757 		sg->length = mbuf->data_len - data_offset;
758 
759 		/* remaining i/p segs */
760 		while ((data_len = data_len - sg->length) &&
761 		       (mbuf = mbuf->next)) {
762 			cpu_to_hw_sg(sg);
763 			sg++;
764 			qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
765 			if (data_len > mbuf->data_len)
766 				sg->length = mbuf->data_len;
767 			else
768 				sg->length = data_len;
769 		}
770 	}
771 
772 	if (is_decode(ses)) {
773 		/* Digest verification case */
774 		cpu_to_hw_sg(sg);
775 		sg++;
776 		rte_memcpy(old_digest, sym->auth.digest.data,
777 				ses->digest_length);
778 		start_addr = rte_dpaa_mem_vtop(old_digest);
779 		qm_sg_entry_set64(sg, start_addr);
780 		sg->length = ses->digest_length;
781 		in_sg->length += ses->digest_length;
782 	}
783 	sg->final = 1;
784 	cpu_to_hw_sg(sg);
785 	cpu_to_hw_sg(in_sg);
786 
787 	return cf;
788 }
789 
790 /**
791  * packet looks like:
792  *		|<----data_len------->|
793  *    |ip_header|ah_header|icv|payload|
794  *              ^
795  *		|
796  *	   mbuf->pkt.data
797  */
798 static inline struct dpaa_sec_job *
799 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
800 {
801 	struct rte_crypto_sym_op *sym = op->sym;
802 	struct rte_mbuf *mbuf = sym->m_src;
803 	struct dpaa_sec_job *cf;
804 	struct dpaa_sec_op_ctx *ctx;
805 	struct qm_sg_entry *sg, *in_sg;
806 	rte_iova_t start_addr;
807 	uint8_t *old_digest;
808 	int data_len, data_offset;
809 
810 	data_len = sym->auth.data.length;
811 	data_offset = sym->auth.data.offset;
812 
813 	if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
814 	    ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
815 		if ((data_len & 7) || (data_offset & 7)) {
816 			DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
817 			return NULL;
818 		}
819 
820 		data_len = data_len >> 3;
821 		data_offset = data_offset >> 3;
822 	}
823 
824 	ctx = dpaa_sec_alloc_ctx(ses, 4);
825 	if (!ctx)
826 		return NULL;
827 
828 	cf = &ctx->job;
829 	ctx->op = op;
830 	old_digest = ctx->digest;
831 
832 	start_addr = rte_pktmbuf_iova(mbuf);
833 	/* output */
834 	sg = &cf->sg[0];
835 	qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
836 	sg->length = ses->digest_length;
837 	cpu_to_hw_sg(sg);
838 
839 	/* input */
840 	in_sg = &cf->sg[1];
841 	/* need to extend the input to a compound frame */
842 	in_sg->extension = 1;
843 	in_sg->final = 1;
844 	in_sg->length = data_len;
845 	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
846 	sg = &cf->sg[2];
847 
848 	if (ses->iv.length) {
849 		uint8_t *iv_ptr;
850 
851 		iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
852 						   ses->iv.offset);
853 
854 		if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
855 			iv_ptr = conv_to_snow_f9_iv(iv_ptr);
856 			sg->length = 12;
857 		} else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
858 			iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
859 			sg->length = 8;
860 		} else {
861 			sg->length = ses->iv.length;
862 		}
863 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
864 		in_sg->length += sg->length;
865 		cpu_to_hw_sg(sg);
866 		sg++;
867 	}
868 
869 	qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
870 	sg->offset = data_offset;
871 	sg->length = data_len;
872 
873 	if (is_decode(ses)) {
874 		/* Digest verification case */
875 		cpu_to_hw_sg(sg);
876 		/* hash result or digest, save digest first */
877 		rte_memcpy(old_digest, sym->auth.digest.data,
878 				ses->digest_length);
879 		/* let's check digest by hw */
880 		start_addr = rte_dpaa_mem_vtop(old_digest);
881 		sg++;
882 		qm_sg_entry_set64(sg, start_addr);
883 		sg->length = ses->digest_length;
884 		in_sg->length += ses->digest_length;
885 	}
886 	sg->final = 1;
887 	cpu_to_hw_sg(sg);
888 	cpu_to_hw_sg(in_sg);
889 
890 	return cf;
891 }
892 
893 static inline struct dpaa_sec_job *
894 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
895 {
896 	struct rte_crypto_sym_op *sym = op->sym;
897 	struct dpaa_sec_job *cf;
898 	struct dpaa_sec_op_ctx *ctx;
899 	struct qm_sg_entry *sg, *out_sg, *in_sg;
900 	struct rte_mbuf *mbuf;
901 	uint8_t req_segs;
902 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
903 			ses->iv.offset);
904 	int data_len, data_offset;
905 
906 	data_len = sym->cipher.data.length;
907 	data_offset = sym->cipher.data.offset;
908 
909 	if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
910 		ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
911 		if ((data_len & 7) || (data_offset & 7)) {
912 			DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
913 			return NULL;
914 		}
915 
916 		data_len = data_len >> 3;
917 		data_offset = data_offset >> 3;
918 	}
919 
920 	if (sym->m_dst) {
921 		mbuf = sym->m_dst;
922 		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
923 	} else {
924 		mbuf = sym->m_src;
925 		req_segs = mbuf->nb_segs * 2 + 3;
926 	}
927 	if (mbuf->nb_segs > MAX_SG_ENTRIES) {
928 		DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
929 				MAX_SG_ENTRIES);
930 		return NULL;
931 	}
932 
933 	ctx = dpaa_sec_alloc_ctx(ses, req_segs);
934 	if (!ctx)
935 		return NULL;
936 
937 	cf = &ctx->job;
938 	ctx->op = op;
939 
940 	/* output */
941 	out_sg = &cf->sg[0];
942 	out_sg->extension = 1;
943 	out_sg->length = data_len;
944 	qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
945 	cpu_to_hw_sg(out_sg);
946 
947 	/* 1st seg */
948 	sg = &cf->sg[2];
949 	qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
950 	sg->length = mbuf->data_len - data_offset;
951 	sg->offset = data_offset;
952 
953 	/* Successive segs */
954 	mbuf = mbuf->next;
955 	while (mbuf) {
956 		cpu_to_hw_sg(sg);
957 		sg++;
958 		qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
959 		sg->length = mbuf->data_len;
960 		mbuf = mbuf->next;
961 	}
962 	sg->final = 1;
963 	cpu_to_hw_sg(sg);
964 
965 	/* input */
966 	mbuf = sym->m_src;
967 	in_sg = &cf->sg[1];
968 	in_sg->extension = 1;
969 	in_sg->final = 1;
970 	in_sg->length = data_len + ses->iv.length;
971 
972 	sg++;
973 	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
974 	cpu_to_hw_sg(in_sg);
975 
976 	/* IV */
977 	qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
978 	sg->length = ses->iv.length;
979 	cpu_to_hw_sg(sg);
980 
981 	/* 1st seg */
982 	sg++;
983 	qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
984 	sg->length = mbuf->data_len - data_offset;
985 	sg->offset = data_offset;
986 
987 	/* Successive segs */
988 	mbuf = mbuf->next;
989 	while (mbuf) {
990 		cpu_to_hw_sg(sg);
991 		sg++;
992 		qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
993 		sg->length = mbuf->data_len;
994 		mbuf = mbuf->next;
995 	}
996 	sg->final = 1;
997 	cpu_to_hw_sg(sg);
998 
999 	return cf;
1000 }
1001 
1002 static inline struct dpaa_sec_job *
1003 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1004 {
1005 	struct rte_crypto_sym_op *sym = op->sym;
1006 	struct dpaa_sec_job *cf;
1007 	struct dpaa_sec_op_ctx *ctx;
1008 	struct qm_sg_entry *sg;
1009 	rte_iova_t src_start_addr, dst_start_addr;
1010 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1011 			ses->iv.offset);
1012 	int data_len, data_offset;
1013 
1014 	data_len = sym->cipher.data.length;
1015 	data_offset = sym->cipher.data.offset;
1016 
1017 	if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1018 		ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1019 		if ((data_len & 7) || (data_offset & 7)) {
1020 			DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
1021 			return NULL;
1022 		}
1023 
1024 		data_len = data_len >> 3;
1025 		data_offset = data_offset >> 3;
1026 	}
1027 
1028 	ctx = dpaa_sec_alloc_ctx(ses, 4);
1029 	if (!ctx)
1030 		return NULL;
1031 
1032 	cf = &ctx->job;
1033 	ctx->op = op;
1034 
1035 	src_start_addr = rte_pktmbuf_iova(sym->m_src);
1036 
1037 	if (sym->m_dst)
1038 		dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1039 	else
1040 		dst_start_addr = src_start_addr;
1041 
1042 	/* output */
1043 	sg = &cf->sg[0];
1044 	qm_sg_entry_set64(sg, dst_start_addr + data_offset);
1045 	sg->length = data_len + ses->iv.length;
1046 	cpu_to_hw_sg(sg);
1047 
1048 	/* input */
1049 	sg = &cf->sg[1];
1050 
1051 	/* need to extend the input to a compound frame */
1052 	sg->extension = 1;
1053 	sg->final = 1;
1054 	sg->length = data_len + ses->iv.length;
1055 	qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1056 	cpu_to_hw_sg(sg);
1057 
1058 	sg = &cf->sg[2];
1059 	qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1060 	sg->length = ses->iv.length;
1061 	cpu_to_hw_sg(sg);
1062 
1063 	sg++;
1064 	qm_sg_entry_set64(sg, src_start_addr + data_offset);
1065 	sg->length = data_len;
1066 	sg->final = 1;
1067 	cpu_to_hw_sg(sg);
1068 
1069 	return cf;
1070 }
1071 
1072 static inline struct dpaa_sec_job *
1073 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1074 {
1075 	struct rte_crypto_sym_op *sym = op->sym;
1076 	struct dpaa_sec_job *cf;
1077 	struct dpaa_sec_op_ctx *ctx;
1078 	struct qm_sg_entry *sg, *out_sg, *in_sg;
1079 	struct rte_mbuf *mbuf;
1080 	uint8_t req_segs;
1081 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1082 			ses->iv.offset);
1083 
1084 	if (sym->m_dst) {
1085 		mbuf = sym->m_dst;
1086 		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1087 	} else {
1088 		mbuf = sym->m_src;
1089 		req_segs = mbuf->nb_segs * 2 + 4;
1090 	}
1091 
1092 	if (ses->auth_only_len)
1093 		req_segs++;
1094 
1095 	if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1096 		DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1097 				MAX_SG_ENTRIES);
1098 		return NULL;
1099 	}
1100 
1101 	ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1102 	if (!ctx)
1103 		return NULL;
1104 
1105 	cf = &ctx->job;
1106 	ctx->op = op;
1107 
1108 	rte_prefetch0(cf->sg);
1109 
1110 	/* output */
1111 	out_sg = &cf->sg[0];
1112 	out_sg->extension = 1;
1113 	if (is_encode(ses))
1114 		out_sg->length = sym->aead.data.length + ses->digest_length;
1115 	else
1116 		out_sg->length = sym->aead.data.length;
1117 
1118 	/* output sg entries */
1119 	sg = &cf->sg[2];
1120 	qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1121 	cpu_to_hw_sg(out_sg);
1122 
1123 	/* 1st seg */
1124 	qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1125 	sg->length = mbuf->data_len - sym->aead.data.offset;
1126 	sg->offset = sym->aead.data.offset;
1127 
1128 	/* Successive segs */
1129 	mbuf = mbuf->next;
1130 	while (mbuf) {
1131 		cpu_to_hw_sg(sg);
1132 		sg++;
1133 		qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1134 		sg->length = mbuf->data_len;
1135 		mbuf = mbuf->next;
1136 	}
1137 	sg->length -= ses->digest_length;
1138 
1139 	if (is_encode(ses)) {
1140 		cpu_to_hw_sg(sg);
1141 		/* set auth output */
1142 		sg++;
1143 		qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1144 		sg->length = ses->digest_length;
1145 	}
1146 	sg->final = 1;
1147 	cpu_to_hw_sg(sg);
1148 
1149 	/* input */
1150 	mbuf = sym->m_src;
1151 	in_sg = &cf->sg[1];
1152 	in_sg->extension = 1;
1153 	in_sg->final = 1;
1154 	if (is_encode(ses))
1155 		in_sg->length = ses->iv.length + sym->aead.data.length
1156 							+ ses->auth_only_len;
1157 	else
1158 		in_sg->length = ses->iv.length + sym->aead.data.length
1159 				+ ses->auth_only_len + ses->digest_length;
1160 
1161 	/* input sg entries */
1162 	sg++;
1163 	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1164 	cpu_to_hw_sg(in_sg);
1165 
1166 	/* 1st seg IV */
1167 	qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1168 	sg->length = ses->iv.length;
1169 	cpu_to_hw_sg(sg);
1170 
1171 	/* 2nd seg auth only */
1172 	if (ses->auth_only_len) {
1173 		sg++;
1174 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(sym->aead.aad.data));
1175 		sg->length = ses->auth_only_len;
1176 		cpu_to_hw_sg(sg);
1177 	}
1178 
1179 	/* 3rd seg */
1180 	sg++;
1181 	qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1182 	sg->length = mbuf->data_len - sym->aead.data.offset;
1183 	sg->offset = sym->aead.data.offset;
1184 
1185 	/* Successive segs */
1186 	mbuf = mbuf->next;
1187 	while (mbuf) {
1188 		cpu_to_hw_sg(sg);
1189 		sg++;
1190 		qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1191 		sg->length = mbuf->data_len;
1192 		mbuf = mbuf->next;
1193 	}
1194 
1195 	if (is_decode(ses)) {
1196 		cpu_to_hw_sg(sg);
1197 		sg++;
1198 		memcpy(ctx->digest, sym->aead.digest.data,
1199 			ses->digest_length);
1200 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1201 		sg->length = ses->digest_length;
1202 	}
1203 	sg->final = 1;
1204 	cpu_to_hw_sg(sg);
1205 
1206 	return cf;
1207 }
1208 
1209 static inline struct dpaa_sec_job *
1210 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1211 {
1212 	struct rte_crypto_sym_op *sym = op->sym;
1213 	struct dpaa_sec_job *cf;
1214 	struct dpaa_sec_op_ctx *ctx;
1215 	struct qm_sg_entry *sg;
1216 	uint32_t length = 0;
1217 	rte_iova_t src_start_addr, dst_start_addr;
1218 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1219 			ses->iv.offset);
1220 
1221 	src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1222 
1223 	if (sym->m_dst)
1224 		dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1225 	else
1226 		dst_start_addr = src_start_addr;
1227 
1228 	ctx = dpaa_sec_alloc_ctx(ses, 7);
1229 	if (!ctx)
1230 		return NULL;
1231 
1232 	cf = &ctx->job;
1233 	ctx->op = op;
1234 
1235 	/* input */
1236 	rte_prefetch0(cf->sg);
1237 	sg = &cf->sg[2];
1238 	qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1239 	if (is_encode(ses)) {
1240 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1241 		sg->length = ses->iv.length;
1242 		length += sg->length;
1243 		cpu_to_hw_sg(sg);
1244 
1245 		sg++;
1246 		if (ses->auth_only_len) {
1247 			qm_sg_entry_set64(sg,
1248 					  rte_dpaa_mem_vtop(sym->aead.aad.data));
1249 			sg->length = ses->auth_only_len;
1250 			length += sg->length;
1251 			cpu_to_hw_sg(sg);
1252 			sg++;
1253 		}
1254 		qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1255 		sg->length = sym->aead.data.length;
1256 		length += sg->length;
1257 		sg->final = 1;
1258 		cpu_to_hw_sg(sg);
1259 	} else {
1260 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1261 		sg->length = ses->iv.length;
1262 		length += sg->length;
1263 		cpu_to_hw_sg(sg);
1264 
1265 		sg++;
1266 		if (ses->auth_only_len) {
1267 			qm_sg_entry_set64(sg,
1268 					  rte_dpaa_mem_vtop(sym->aead.aad.data));
1269 			sg->length = ses->auth_only_len;
1270 			length += sg->length;
1271 			cpu_to_hw_sg(sg);
1272 			sg++;
1273 		}
1274 		qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1275 		sg->length = sym->aead.data.length;
1276 		length += sg->length;
1277 		cpu_to_hw_sg(sg);
1278 
1279 		memcpy(ctx->digest, sym->aead.digest.data,
1280 		       ses->digest_length);
1281 		sg++;
1282 
1283 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1284 		sg->length = ses->digest_length;
1285 		length += sg->length;
1286 		sg->final = 1;
1287 		cpu_to_hw_sg(sg);
1288 	}
1289 	/* input compound frame */
1290 	cf->sg[1].length = length;
1291 	cf->sg[1].extension = 1;
1292 	cf->sg[1].final = 1;
1293 	cpu_to_hw_sg(&cf->sg[1]);
1294 
1295 	/* output */
1296 	sg++;
1297 	qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1298 	qm_sg_entry_set64(sg,
1299 		dst_start_addr + sym->aead.data.offset);
1300 	sg->length = sym->aead.data.length;
1301 	length = sg->length;
1302 	if (is_encode(ses)) {
1303 		cpu_to_hw_sg(sg);
1304 		/* set auth output */
1305 		sg++;
1306 		qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1307 		sg->length = ses->digest_length;
1308 		length += sg->length;
1309 	}
1310 	sg->final = 1;
1311 	cpu_to_hw_sg(sg);
1312 
1313 	/* output compound frame */
1314 	cf->sg[0].length = length;
1315 	cf->sg[0].extension = 1;
1316 	cpu_to_hw_sg(&cf->sg[0]);
1317 
1318 	return cf;
1319 }
1320 
1321 static inline struct dpaa_sec_job *
1322 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1323 {
1324 	struct rte_crypto_sym_op *sym = op->sym;
1325 	struct dpaa_sec_job *cf;
1326 	struct dpaa_sec_op_ctx *ctx;
1327 	struct qm_sg_entry *sg, *out_sg, *in_sg;
1328 	struct rte_mbuf *mbuf;
1329 	uint8_t req_segs;
1330 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1331 			ses->iv.offset);
1332 
1333 	if (sym->m_dst) {
1334 		mbuf = sym->m_dst;
1335 		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1336 	} else {
1337 		mbuf = sym->m_src;
1338 		req_segs = mbuf->nb_segs * 2 + 4;
1339 	}
1340 
1341 	if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1342 		DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1343 				MAX_SG_ENTRIES);
1344 		return NULL;
1345 	}
1346 
1347 	ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1348 	if (!ctx)
1349 		return NULL;
1350 
1351 	cf = &ctx->job;
1352 	ctx->op = op;
1353 
1354 	rte_prefetch0(cf->sg);
1355 
1356 	/* output */
1357 	out_sg = &cf->sg[0];
1358 	out_sg->extension = 1;
1359 	if (is_encode(ses))
1360 		out_sg->length = sym->auth.data.length + ses->digest_length;
1361 	else
1362 		out_sg->length = sym->auth.data.length;
1363 
1364 	/* output sg entries */
1365 	sg = &cf->sg[2];
1366 	qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1367 	cpu_to_hw_sg(out_sg);
1368 
1369 	/* 1st seg */
1370 	qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1371 	sg->length = mbuf->data_len - sym->auth.data.offset;
1372 	sg->offset = sym->auth.data.offset;
1373 
1374 	/* Successive segs */
1375 	mbuf = mbuf->next;
1376 	while (mbuf) {
1377 		cpu_to_hw_sg(sg);
1378 		sg++;
1379 		qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1380 		sg->length = mbuf->data_len;
1381 		mbuf = mbuf->next;
1382 	}
1383 	sg->length -= ses->digest_length;
1384 
1385 	if (is_encode(ses)) {
1386 		cpu_to_hw_sg(sg);
1387 		/* set auth output */
1388 		sg++;
1389 		qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1390 		sg->length = ses->digest_length;
1391 	}
1392 	sg->final = 1;
1393 	cpu_to_hw_sg(sg);
1394 
1395 	/* input */
1396 	mbuf = sym->m_src;
1397 	in_sg = &cf->sg[1];
1398 	in_sg->extension = 1;
1399 	in_sg->final = 1;
1400 	if (is_encode(ses))
1401 		in_sg->length = ses->iv.length + sym->auth.data.length;
1402 	else
1403 		in_sg->length = ses->iv.length + sym->auth.data.length
1404 						+ ses->digest_length;
1405 
1406 	/* input sg entries */
1407 	sg++;
1408 	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1409 	cpu_to_hw_sg(in_sg);
1410 
1411 	/* 1st seg IV */
1412 	qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1413 	sg->length = ses->iv.length;
1414 	cpu_to_hw_sg(sg);
1415 
1416 	/* 2nd seg */
1417 	sg++;
1418 	qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1419 	sg->length = mbuf->data_len - sym->auth.data.offset;
1420 	sg->offset = sym->auth.data.offset;
1421 
1422 	/* Successive segs */
1423 	mbuf = mbuf->next;
1424 	while (mbuf) {
1425 		cpu_to_hw_sg(sg);
1426 		sg++;
1427 		qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1428 		sg->length = mbuf->data_len;
1429 		mbuf = mbuf->next;
1430 	}
1431 
1432 	sg->length -= ses->digest_length;
1433 	if (is_decode(ses)) {
1434 		cpu_to_hw_sg(sg);
1435 		sg++;
1436 		memcpy(ctx->digest, sym->auth.digest.data,
1437 			ses->digest_length);
1438 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1439 		sg->length = ses->digest_length;
1440 	}
1441 	sg->final = 1;
1442 	cpu_to_hw_sg(sg);
1443 
1444 	return cf;
1445 }
1446 
1447 static inline struct dpaa_sec_job *
1448 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1449 {
1450 	struct rte_crypto_sym_op *sym = op->sym;
1451 	struct dpaa_sec_job *cf;
1452 	struct dpaa_sec_op_ctx *ctx;
1453 	struct qm_sg_entry *sg;
1454 	rte_iova_t src_start_addr, dst_start_addr;
1455 	uint32_t length = 0;
1456 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1457 			ses->iv.offset);
1458 
1459 	src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1460 	if (sym->m_dst)
1461 		dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1462 	else
1463 		dst_start_addr = src_start_addr;
1464 
1465 	ctx = dpaa_sec_alloc_ctx(ses, 7);
1466 	if (!ctx)
1467 		return NULL;
1468 
1469 	cf = &ctx->job;
1470 	ctx->op = op;
1471 
1472 	/* input */
1473 	rte_prefetch0(cf->sg);
1474 	sg = &cf->sg[2];
1475 	qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1476 	if (is_encode(ses)) {
1477 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1478 		sg->length = ses->iv.length;
1479 		length += sg->length;
1480 		cpu_to_hw_sg(sg);
1481 
1482 		sg++;
1483 		qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1484 		sg->length = sym->auth.data.length;
1485 		length += sg->length;
1486 		sg->final = 1;
1487 		cpu_to_hw_sg(sg);
1488 	} else {
1489 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1490 		sg->length = ses->iv.length;
1491 		length += sg->length;
1492 		cpu_to_hw_sg(sg);
1493 
1494 		sg++;
1495 
1496 		qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1497 		sg->length = sym->auth.data.length;
1498 		length += sg->length;
1499 		cpu_to_hw_sg(sg);
1500 
1501 		memcpy(ctx->digest, sym->auth.digest.data,
1502 		       ses->digest_length);
1503 		sg++;
1504 
1505 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1506 		sg->length = ses->digest_length;
1507 		length += sg->length;
1508 		sg->final = 1;
1509 		cpu_to_hw_sg(sg);
1510 	}
1511 	/* input compound frame */
1512 	cf->sg[1].length = length;
1513 	cf->sg[1].extension = 1;
1514 	cf->sg[1].final = 1;
1515 	cpu_to_hw_sg(&cf->sg[1]);
1516 
1517 	/* output */
1518 	sg++;
1519 	qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1520 	qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1521 	sg->length = sym->cipher.data.length;
1522 	length = sg->length;
1523 	if (is_encode(ses)) {
1524 		cpu_to_hw_sg(sg);
1525 		/* set auth output */
1526 		sg++;
1527 		qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1528 		sg->length = ses->digest_length;
1529 		length += sg->length;
1530 	}
1531 	sg->final = 1;
1532 	cpu_to_hw_sg(sg);
1533 
1534 	/* output compound frame */
1535 	cf->sg[0].length = length;
1536 	cf->sg[0].extension = 1;
1537 	cpu_to_hw_sg(&cf->sg[0]);
1538 
1539 	return cf;
1540 }
1541 
1542 #ifdef RTE_LIBRTE_SECURITY
1543 static inline struct dpaa_sec_job *
1544 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1545 {
1546 	struct rte_crypto_sym_op *sym = op->sym;
1547 	struct dpaa_sec_job *cf;
1548 	struct dpaa_sec_op_ctx *ctx;
1549 	struct qm_sg_entry *sg;
1550 	phys_addr_t src_start_addr, dst_start_addr;
1551 
1552 	ctx = dpaa_sec_alloc_ctx(ses, 2);
1553 	if (!ctx)
1554 		return NULL;
1555 	cf = &ctx->job;
1556 	ctx->op = op;
1557 
1558 	src_start_addr = rte_pktmbuf_iova(sym->m_src);
1559 
1560 	if (sym->m_dst)
1561 		dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1562 	else
1563 		dst_start_addr = src_start_addr;
1564 
1565 	/* input */
1566 	sg = &cf->sg[1];
1567 	qm_sg_entry_set64(sg, src_start_addr);
1568 	sg->length = sym->m_src->pkt_len;
1569 	sg->final = 1;
1570 	cpu_to_hw_sg(sg);
1571 
1572 	sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1573 	/* output */
1574 	sg = &cf->sg[0];
1575 	qm_sg_entry_set64(sg, dst_start_addr);
1576 	sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1577 	cpu_to_hw_sg(sg);
1578 
1579 	return cf;
1580 }
1581 
1582 static inline struct dpaa_sec_job *
1583 build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1584 {
1585 	struct rte_crypto_sym_op *sym = op->sym;
1586 	struct dpaa_sec_job *cf;
1587 	struct dpaa_sec_op_ctx *ctx;
1588 	struct qm_sg_entry *sg, *out_sg, *in_sg;
1589 	struct rte_mbuf *mbuf;
1590 	uint8_t req_segs;
1591 	uint32_t in_len = 0, out_len = 0;
1592 
1593 	if (sym->m_dst)
1594 		mbuf = sym->m_dst;
1595 	else
1596 		mbuf = sym->m_src;
1597 
1598 	req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
1599 	if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1600 		DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
1601 				MAX_SG_ENTRIES);
1602 		return NULL;
1603 	}
1604 
1605 	ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1606 	if (!ctx)
1607 		return NULL;
1608 	cf = &ctx->job;
1609 	ctx->op = op;
1610 	/* output */
1611 	out_sg = &cf->sg[0];
1612 	out_sg->extension = 1;
1613 	qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1614 
1615 	/* 1st seg */
1616 	sg = &cf->sg[2];
1617 	qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1618 	sg->offset = 0;
1619 
1620 	/* Successive segs */
1621 	while (mbuf->next) {
1622 		sg->length = mbuf->data_len;
1623 		out_len += sg->length;
1624 		mbuf = mbuf->next;
1625 		cpu_to_hw_sg(sg);
1626 		sg++;
1627 		qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1628 		sg->offset = 0;
1629 	}
1630 	sg->length = mbuf->buf_len - mbuf->data_off;
1631 	out_len += sg->length;
1632 	sg->final = 1;
1633 	cpu_to_hw_sg(sg);
1634 
1635 	out_sg->length = out_len;
1636 	cpu_to_hw_sg(out_sg);
1637 
1638 	/* input */
1639 	mbuf = sym->m_src;
1640 	in_sg = &cf->sg[1];
1641 	in_sg->extension = 1;
1642 	in_sg->final = 1;
1643 	in_len = mbuf->data_len;
1644 
1645 	sg++;
1646 	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1647 
1648 	/* 1st seg */
1649 	qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1650 	sg->length = mbuf->data_len;
1651 	sg->offset = 0;
1652 
1653 	/* Successive segs */
1654 	mbuf = mbuf->next;
1655 	while (mbuf) {
1656 		cpu_to_hw_sg(sg);
1657 		sg++;
1658 		qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1659 		sg->length = mbuf->data_len;
1660 		sg->offset = 0;
1661 		in_len += sg->length;
1662 		mbuf = mbuf->next;
1663 	}
1664 	sg->final = 1;
1665 	cpu_to_hw_sg(sg);
1666 
1667 	in_sg->length = in_len;
1668 	cpu_to_hw_sg(in_sg);
1669 
1670 	sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1671 
1672 	return cf;
1673 }
1674 #endif
1675 
1676 static uint16_t
1677 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1678 		       uint16_t nb_ops)
1679 {
1680 	/* Function to transmit the frames to given device and queuepair */
1681 	uint32_t loop;
1682 	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1683 	uint16_t num_tx = 0;
1684 	struct qm_fd fds[DPAA_SEC_BURST], *fd;
1685 	uint32_t frames_to_send;
1686 	struct rte_crypto_op *op;
1687 	struct dpaa_sec_job *cf;
1688 	dpaa_sec_session *ses;
1689 	uint16_t auth_hdr_len, auth_tail_len;
1690 	uint32_t index, flags[DPAA_SEC_BURST] = {0};
1691 	struct qman_fq *inq[DPAA_SEC_BURST];
1692 
1693 	while (nb_ops) {
1694 		frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1695 				DPAA_SEC_BURST : nb_ops;
1696 		for (loop = 0; loop < frames_to_send; loop++) {
1697 			op = *(ops++);
1698 			if (op->sym->m_src->seqn != 0) {
1699 				index = op->sym->m_src->seqn - 1;
1700 				if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
1701 					/* QM_EQCR_DCA_IDXMASK = 0x0f */
1702 					flags[loop] = ((index & 0x0f) << 8);
1703 					flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
1704 					DPAA_PER_LCORE_DQRR_SIZE--;
1705 					DPAA_PER_LCORE_DQRR_HELD &=
1706 								~(1 << index);
1707 				}
1708 			}
1709 
1710 			switch (op->sess_type) {
1711 			case RTE_CRYPTO_OP_WITH_SESSION:
1712 				ses = (dpaa_sec_session *)
1713 					get_sym_session_private_data(
1714 							op->sym->session,
1715 							cryptodev_driver_id);
1716 				break;
1717 #ifdef RTE_LIBRTE_SECURITY
1718 			case RTE_CRYPTO_OP_SECURITY_SESSION:
1719 				ses = (dpaa_sec_session *)
1720 					get_sec_session_private_data(
1721 							op->sym->sec_session);
1722 				break;
1723 #endif
1724 			default:
1725 				DPAA_SEC_DP_ERR(
1726 					"sessionless crypto op not supported");
1727 				frames_to_send = loop;
1728 				nb_ops = loop;
1729 				goto send_pkts;
1730 			}
1731 
1732 			if (!ses) {
1733 				DPAA_SEC_DP_ERR("session not available");
1734 				frames_to_send = loop;
1735 				nb_ops = loop;
1736 				goto send_pkts;
1737 			}
1738 
1739 			if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
1740 				if (dpaa_sec_attach_sess_q(qp, ses)) {
1741 					frames_to_send = loop;
1742 					nb_ops = loop;
1743 					goto send_pkts;
1744 				}
1745 			} else if (unlikely(ses->qp[rte_lcore_id() %
1746 						MAX_DPAA_CORES] != qp)) {
1747 				DPAA_SEC_DP_ERR("Old:sess->qp = %p"
1748 					" New qp = %p\n",
1749 					ses->qp[rte_lcore_id() %
1750 					MAX_DPAA_CORES], qp);
1751 				frames_to_send = loop;
1752 				nb_ops = loop;
1753 				goto send_pkts;
1754 			}
1755 
1756 			auth_hdr_len = op->sym->auth.data.length -
1757 						op->sym->cipher.data.length;
1758 			auth_tail_len = 0;
1759 
1760 			if (rte_pktmbuf_is_contiguous(op->sym->m_src) &&
1761 				  ((op->sym->m_dst == NULL) ||
1762 				   rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1763 				switch (ses->ctxt) {
1764 #ifdef RTE_LIBRTE_SECURITY
1765 				case DPAA_SEC_PDCP:
1766 				case DPAA_SEC_IPSEC:
1767 					cf = build_proto(op, ses);
1768 					break;
1769 #endif
1770 				case DPAA_SEC_AUTH:
1771 					cf = build_auth_only(op, ses);
1772 					break;
1773 				case DPAA_SEC_CIPHER:
1774 					cf = build_cipher_only(op, ses);
1775 					break;
1776 				case DPAA_SEC_AEAD:
1777 					cf = build_cipher_auth_gcm(op, ses);
1778 					auth_hdr_len = ses->auth_only_len;
1779 					break;
1780 				case DPAA_SEC_CIPHER_HASH:
1781 					auth_hdr_len =
1782 						op->sym->cipher.data.offset
1783 						- op->sym->auth.data.offset;
1784 					auth_tail_len =
1785 						op->sym->auth.data.length
1786 						- op->sym->cipher.data.length
1787 						- auth_hdr_len;
1788 					cf = build_cipher_auth(op, ses);
1789 					break;
1790 				default:
1791 					DPAA_SEC_DP_ERR("not supported ops");
1792 					frames_to_send = loop;
1793 					nb_ops = loop;
1794 					goto send_pkts;
1795 				}
1796 			} else {
1797 				switch (ses->ctxt) {
1798 #ifdef RTE_LIBRTE_SECURITY
1799 				case DPAA_SEC_PDCP:
1800 				case DPAA_SEC_IPSEC:
1801 					cf = build_proto_sg(op, ses);
1802 					break;
1803 #endif
1804 				case DPAA_SEC_AUTH:
1805 					cf = build_auth_only_sg(op, ses);
1806 					break;
1807 				case DPAA_SEC_CIPHER:
1808 					cf = build_cipher_only_sg(op, ses);
1809 					break;
1810 				case DPAA_SEC_AEAD:
1811 					cf = build_cipher_auth_gcm_sg(op, ses);
1812 					auth_hdr_len = ses->auth_only_len;
1813 					break;
1814 				case DPAA_SEC_CIPHER_HASH:
1815 					auth_hdr_len =
1816 						op->sym->cipher.data.offset
1817 						- op->sym->auth.data.offset;
1818 					auth_tail_len =
1819 						op->sym->auth.data.length
1820 						- op->sym->cipher.data.length
1821 						- auth_hdr_len;
1822 					cf = build_cipher_auth_sg(op, ses);
1823 					break;
1824 				default:
1825 					DPAA_SEC_DP_ERR("not supported ops");
1826 					frames_to_send = loop;
1827 					nb_ops = loop;
1828 					goto send_pkts;
1829 				}
1830 			}
1831 			if (unlikely(!cf)) {
1832 				frames_to_send = loop;
1833 				nb_ops = loop;
1834 				goto send_pkts;
1835 			}
1836 
1837 			fd = &fds[loop];
1838 			inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
1839 			fd->opaque_addr = 0;
1840 			fd->cmd = 0;
1841 			qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg));
1842 			fd->_format1 = qm_fd_compound;
1843 			fd->length29 = 2 * sizeof(struct qm_sg_entry);
1844 
1845 			/* Auth_only_len is set as 0 in descriptor and it is
1846 			 * overwritten here in the fd.cmd which will update
1847 			 * the DPOVRD reg.
1848 			 */
1849 			if (auth_hdr_len || auth_tail_len) {
1850 				fd->cmd = 0x80000000;
1851 				fd->cmd |=
1852 					((auth_tail_len << 16) | auth_hdr_len);
1853 			}
1854 
1855 #ifdef RTE_LIBRTE_SECURITY
1856 			/* In case of PDCP, per packet HFN is stored in
1857 			 * mbuf priv after sym_op.
1858 			 */
1859 			if ((ses->ctxt == DPAA_SEC_PDCP) && ses->pdcp.hfn_ovd) {
1860 				fd->cmd = 0x80000000 |
1861 					*((uint32_t *)((uint8_t *)op +
1862 					ses->pdcp.hfn_ovd_offset));
1863 				DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n",
1864 					*((uint32_t *)((uint8_t *)op +
1865 					ses->pdcp.hfn_ovd_offset)),
1866 					ses->pdcp.hfn_ovd);
1867 			}
1868 #endif
1869 		}
1870 send_pkts:
1871 		loop = 0;
1872 		while (loop < frames_to_send) {
1873 			loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1874 					&flags[loop], frames_to_send - loop);
1875 		}
1876 		nb_ops -= frames_to_send;
1877 		num_tx += frames_to_send;
1878 	}
1879 
1880 	dpaa_qp->tx_pkts += num_tx;
1881 	dpaa_qp->tx_errs += nb_ops - num_tx;
1882 
1883 	return num_tx;
1884 }
1885 
1886 static uint16_t
1887 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1888 		       uint16_t nb_ops)
1889 {
1890 	uint16_t num_rx;
1891 	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1892 
1893 	num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1894 
1895 	dpaa_qp->rx_pkts += num_rx;
1896 	dpaa_qp->rx_errs += nb_ops - num_rx;
1897 
1898 	DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1899 
1900 	return num_rx;
1901 }
1902 
1903 /** Release queue pair */
1904 static int
1905 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1906 			    uint16_t qp_id)
1907 {
1908 	struct dpaa_sec_dev_private *internals;
1909 	struct dpaa_sec_qp *qp = NULL;
1910 
1911 	PMD_INIT_FUNC_TRACE();
1912 
1913 	DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1914 
1915 	internals = dev->data->dev_private;
1916 	if (qp_id >= internals->max_nb_queue_pairs) {
1917 		DPAA_SEC_ERR("Max supported qpid %d",
1918 			     internals->max_nb_queue_pairs);
1919 		return -EINVAL;
1920 	}
1921 
1922 	qp = &internals->qps[qp_id];
1923 	rte_mempool_free(qp->ctx_pool);
1924 	qp->internals = NULL;
1925 	dev->data->queue_pairs[qp_id] = NULL;
1926 
1927 	return 0;
1928 }
1929 
1930 /** Setup a queue pair */
1931 static int
1932 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1933 		__rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1934 		__rte_unused int socket_id)
1935 {
1936 	struct dpaa_sec_dev_private *internals;
1937 	struct dpaa_sec_qp *qp = NULL;
1938 	char str[20];
1939 
1940 	DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1941 
1942 	internals = dev->data->dev_private;
1943 	if (qp_id >= internals->max_nb_queue_pairs) {
1944 		DPAA_SEC_ERR("Max supported qpid %d",
1945 			     internals->max_nb_queue_pairs);
1946 		return -EINVAL;
1947 	}
1948 
1949 	qp = &internals->qps[qp_id];
1950 	qp->internals = internals;
1951 	snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d",
1952 			dev->data->dev_id, qp_id);
1953 	if (!qp->ctx_pool) {
1954 		qp->ctx_pool = rte_mempool_create((const char *)str,
1955 							CTX_POOL_NUM_BUFS,
1956 							CTX_POOL_BUF_SIZE,
1957 							CTX_POOL_CACHE_SIZE, 0,
1958 							NULL, NULL, NULL, NULL,
1959 							SOCKET_ID_ANY, 0);
1960 		if (!qp->ctx_pool) {
1961 			DPAA_SEC_ERR("%s create failed\n", str);
1962 			return -ENOMEM;
1963 		}
1964 	} else
1965 		DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d",
1966 				dev->data->dev_id, qp_id);
1967 	dev->data->queue_pairs[qp_id] = qp;
1968 
1969 	return 0;
1970 }
1971 
1972 /** Returns the size of session structure */
1973 static unsigned int
1974 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1975 {
1976 	PMD_INIT_FUNC_TRACE();
1977 
1978 	return sizeof(dpaa_sec_session);
1979 }
1980 
1981 static int
1982 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
1983 		     struct rte_crypto_sym_xform *xform,
1984 		     dpaa_sec_session *session)
1985 {
1986 	session->ctxt = DPAA_SEC_CIPHER;
1987 	session->cipher_alg = xform->cipher.algo;
1988 	session->iv.length = xform->cipher.iv.length;
1989 	session->iv.offset = xform->cipher.iv.offset;
1990 	session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1991 					       RTE_CACHE_LINE_SIZE);
1992 	if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1993 		DPAA_SEC_ERR("No Memory for cipher key");
1994 		return -ENOMEM;
1995 	}
1996 	session->cipher_key.length = xform->cipher.key.length;
1997 
1998 	memcpy(session->cipher_key.data, xform->cipher.key.data,
1999 	       xform->cipher.key.length);
2000 	switch (xform->cipher.algo) {
2001 	case RTE_CRYPTO_CIPHER_AES_CBC:
2002 		session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2003 		session->cipher_key.algmode = OP_ALG_AAI_CBC;
2004 		break;
2005 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2006 		session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2007 		session->cipher_key.algmode = OP_ALG_AAI_CBC;
2008 		break;
2009 	case RTE_CRYPTO_CIPHER_AES_CTR:
2010 		session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2011 		session->cipher_key.algmode = OP_ALG_AAI_CTR;
2012 		break;
2013 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2014 		session->cipher_key.alg = OP_ALG_ALGSEL_SNOW_F8;
2015 		break;
2016 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2017 		session->cipher_key.alg = OP_ALG_ALGSEL_ZUCE;
2018 		break;
2019 	default:
2020 		DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2021 			      xform->cipher.algo);
2022 		return -ENOTSUP;
2023 	}
2024 	session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2025 			DIR_ENC : DIR_DEC;
2026 
2027 	return 0;
2028 }
2029 
2030 static int
2031 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
2032 		   struct rte_crypto_sym_xform *xform,
2033 		   dpaa_sec_session *session)
2034 {
2035 	session->ctxt = DPAA_SEC_AUTH;
2036 	session->auth_alg = xform->auth.algo;
2037 	session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
2038 					     RTE_CACHE_LINE_SIZE);
2039 	if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
2040 		DPAA_SEC_ERR("No Memory for auth key");
2041 		return -ENOMEM;
2042 	}
2043 	session->auth_key.length = xform->auth.key.length;
2044 	session->digest_length = xform->auth.digest_length;
2045 	if (session->cipher_alg == RTE_CRYPTO_CIPHER_NULL) {
2046 		session->iv.offset = xform->auth.iv.offset;
2047 		session->iv.length = xform->auth.iv.length;
2048 	}
2049 
2050 	memcpy(session->auth_key.data, xform->auth.key.data,
2051 	       xform->auth.key.length);
2052 
2053 	switch (xform->auth.algo) {
2054 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
2055 		session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2056 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2057 		break;
2058 	case RTE_CRYPTO_AUTH_MD5_HMAC:
2059 		session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2060 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2061 		break;
2062 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2063 		session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2064 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2065 		break;
2066 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2067 		session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2068 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2069 		break;
2070 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2071 		session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2072 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2073 		break;
2074 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2075 		session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2076 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2077 		break;
2078 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2079 		session->auth_key.alg = OP_ALG_ALGSEL_SNOW_F9;
2080 		session->auth_key.algmode = OP_ALG_AAI_F9;
2081 		break;
2082 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
2083 		session->auth_key.alg = OP_ALG_ALGSEL_ZUCA;
2084 		session->auth_key.algmode = OP_ALG_AAI_F9;
2085 		break;
2086 	default:
2087 		DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2088 			      xform->auth.algo);
2089 		return -ENOTSUP;
2090 	}
2091 
2092 	session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2093 			DIR_ENC : DIR_DEC;
2094 
2095 	return 0;
2096 }
2097 
2098 static int
2099 dpaa_sec_chain_init(struct rte_cryptodev *dev __rte_unused,
2100 		   struct rte_crypto_sym_xform *xform,
2101 		   dpaa_sec_session *session)
2102 {
2103 
2104 	struct rte_crypto_cipher_xform *cipher_xform;
2105 	struct rte_crypto_auth_xform *auth_xform;
2106 
2107 	session->ctxt = DPAA_SEC_CIPHER_HASH;
2108 	if (session->auth_cipher_text) {
2109 		cipher_xform = &xform->cipher;
2110 		auth_xform = &xform->next->auth;
2111 	} else {
2112 		cipher_xform = &xform->next->cipher;
2113 		auth_xform = &xform->auth;
2114 	}
2115 
2116 	/* Set IV parameters */
2117 	session->iv.offset = cipher_xform->iv.offset;
2118 	session->iv.length = cipher_xform->iv.length;
2119 
2120 	session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2121 					       RTE_CACHE_LINE_SIZE);
2122 	if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2123 		DPAA_SEC_ERR("No Memory for cipher key");
2124 		return -ENOMEM;
2125 	}
2126 	session->cipher_key.length = cipher_xform->key.length;
2127 	session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2128 					     RTE_CACHE_LINE_SIZE);
2129 	if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2130 		DPAA_SEC_ERR("No Memory for auth key");
2131 		return -ENOMEM;
2132 	}
2133 	session->auth_key.length = auth_xform->key.length;
2134 	memcpy(session->cipher_key.data, cipher_xform->key.data,
2135 	       cipher_xform->key.length);
2136 	memcpy(session->auth_key.data, auth_xform->key.data,
2137 	       auth_xform->key.length);
2138 
2139 	session->digest_length = auth_xform->digest_length;
2140 	session->auth_alg = auth_xform->algo;
2141 
2142 	switch (auth_xform->algo) {
2143 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
2144 		session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2145 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2146 		break;
2147 	case RTE_CRYPTO_AUTH_MD5_HMAC:
2148 		session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2149 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2150 		break;
2151 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2152 		session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2153 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2154 		break;
2155 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2156 		session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2157 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2158 		break;
2159 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2160 		session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2161 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2162 		break;
2163 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2164 		session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2165 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2166 		break;
2167 	default:
2168 		DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2169 			      auth_xform->algo);
2170 		return -ENOTSUP;
2171 	}
2172 
2173 	session->cipher_alg = cipher_xform->algo;
2174 
2175 	switch (cipher_xform->algo) {
2176 	case RTE_CRYPTO_CIPHER_AES_CBC:
2177 		session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2178 		session->cipher_key.algmode = OP_ALG_AAI_CBC;
2179 		break;
2180 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2181 		session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2182 		session->cipher_key.algmode = OP_ALG_AAI_CBC;
2183 		break;
2184 	case RTE_CRYPTO_CIPHER_AES_CTR:
2185 		session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2186 		session->cipher_key.algmode = OP_ALG_AAI_CTR;
2187 		break;
2188 	default:
2189 		DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2190 			      cipher_xform->algo);
2191 		return -ENOTSUP;
2192 	}
2193 	session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2194 				DIR_ENC : DIR_DEC;
2195 	return 0;
2196 }
2197 
2198 static int
2199 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
2200 		   struct rte_crypto_sym_xform *xform,
2201 		   dpaa_sec_session *session)
2202 {
2203 	session->aead_alg = xform->aead.algo;
2204 	session->ctxt = DPAA_SEC_AEAD;
2205 	session->iv.length = xform->aead.iv.length;
2206 	session->iv.offset = xform->aead.iv.offset;
2207 	session->auth_only_len = xform->aead.aad_length;
2208 	session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
2209 					     RTE_CACHE_LINE_SIZE);
2210 	if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
2211 		DPAA_SEC_ERR("No Memory for aead key\n");
2212 		return -ENOMEM;
2213 	}
2214 	session->aead_key.length = xform->aead.key.length;
2215 	session->digest_length = xform->aead.digest_length;
2216 
2217 	memcpy(session->aead_key.data, xform->aead.key.data,
2218 	       xform->aead.key.length);
2219 
2220 	switch (session->aead_alg) {
2221 	case RTE_CRYPTO_AEAD_AES_GCM:
2222 		session->aead_key.alg = OP_ALG_ALGSEL_AES;
2223 		session->aead_key.algmode = OP_ALG_AAI_GCM;
2224 		break;
2225 	default:
2226 		DPAA_SEC_ERR("unsupported AEAD alg %d", session->aead_alg);
2227 		return -ENOTSUP;
2228 	}
2229 
2230 	session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2231 			DIR_ENC : DIR_DEC;
2232 
2233 	return 0;
2234 }
2235 
2236 static struct qman_fq *
2237 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
2238 {
2239 	unsigned int i;
2240 
2241 	for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2242 		if (qi->inq_attach[i] == 0) {
2243 			qi->inq_attach[i] = 1;
2244 			return &qi->inq[i];
2245 		}
2246 	}
2247 	DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions);
2248 
2249 	return NULL;
2250 }
2251 
2252 static int
2253 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
2254 {
2255 	unsigned int i;
2256 
2257 	for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2258 		if (&qi->inq[i] == fq) {
2259 			if (qman_retire_fq(fq, NULL) != 0)
2260 				DPAA_SEC_WARN("Queue is not retired\n");
2261 			qman_oos_fq(fq);
2262 			qi->inq_attach[i] = 0;
2263 			return 0;
2264 		}
2265 	}
2266 	return -1;
2267 }
2268 
2269 static int
2270 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
2271 {
2272 	int ret;
2273 
2274 	sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp;
2275 	ret = dpaa_sec_prep_cdb(sess);
2276 	if (ret) {
2277 		DPAA_SEC_ERR("Unable to prepare sec cdb");
2278 		return ret;
2279 	}
2280 	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
2281 		ret = rte_dpaa_portal_init((void *)0);
2282 		if (ret) {
2283 			DPAA_SEC_ERR("Failure in affining portal");
2284 			return ret;
2285 		}
2286 	}
2287 	ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
2288 			       rte_dpaa_mem_vtop(&sess->cdb),
2289 			       qman_fq_fqid(&qp->outq));
2290 	if (ret)
2291 		DPAA_SEC_ERR("Unable to init sec queue");
2292 
2293 	return ret;
2294 }
2295 
2296 static inline void
2297 free_session_data(dpaa_sec_session *s)
2298 {
2299 	if (is_aead(s))
2300 		rte_free(s->aead_key.data);
2301 	else {
2302 		rte_free(s->auth_key.data);
2303 		rte_free(s->cipher_key.data);
2304 	}
2305 	memset(s, 0, sizeof(dpaa_sec_session));
2306 }
2307 
2308 static int
2309 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
2310 			    struct rte_crypto_sym_xform *xform,	void *sess)
2311 {
2312 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2313 	dpaa_sec_session *session = sess;
2314 	uint32_t i;
2315 	int ret;
2316 
2317 	PMD_INIT_FUNC_TRACE();
2318 
2319 	if (unlikely(sess == NULL)) {
2320 		DPAA_SEC_ERR("invalid session struct");
2321 		return -EINVAL;
2322 	}
2323 	memset(session, 0, sizeof(dpaa_sec_session));
2324 
2325 	/* Default IV length = 0 */
2326 	session->iv.length = 0;
2327 
2328 	/* Cipher Only */
2329 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2330 		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2331 		ret = dpaa_sec_cipher_init(dev, xform, session);
2332 
2333 	/* Authentication Only */
2334 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2335 		   xform->next == NULL) {
2336 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2337 		session->ctxt = DPAA_SEC_AUTH;
2338 		ret = dpaa_sec_auth_init(dev, xform, session);
2339 
2340 	/* Cipher then Authenticate */
2341 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2342 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2343 		if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2344 			session->auth_cipher_text = 1;
2345 			if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2346 				ret = dpaa_sec_auth_init(dev, xform, session);
2347 			else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
2348 				ret = dpaa_sec_cipher_init(dev, xform, session);
2349 			else
2350 				ret = dpaa_sec_chain_init(dev, xform, session);
2351 		} else {
2352 			DPAA_SEC_ERR("Not supported: Auth then Cipher");
2353 			return -ENOTSUP;
2354 		}
2355 	/* Authenticate then Cipher */
2356 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2357 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2358 		if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2359 			session->auth_cipher_text = 0;
2360 			if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
2361 				ret = dpaa_sec_cipher_init(dev, xform, session);
2362 			else if (xform->next->cipher.algo
2363 					== RTE_CRYPTO_CIPHER_NULL)
2364 				ret = dpaa_sec_auth_init(dev, xform, session);
2365 			else
2366 				ret = dpaa_sec_chain_init(dev, xform, session);
2367 		} else {
2368 			DPAA_SEC_ERR("Not supported: Auth then Cipher");
2369 			return -ENOTSUP;
2370 		}
2371 
2372 	/* AEAD operation for AES-GCM kind of Algorithms */
2373 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2374 		   xform->next == NULL) {
2375 		ret = dpaa_sec_aead_init(dev, xform, session);
2376 
2377 	} else {
2378 		DPAA_SEC_ERR("Invalid crypto type");
2379 		return -EINVAL;
2380 	}
2381 	if (ret) {
2382 		DPAA_SEC_ERR("unable to init session");
2383 		goto err1;
2384 	}
2385 
2386 	rte_spinlock_lock(&internals->lock);
2387 	for (i = 0; i < MAX_DPAA_CORES; i++) {
2388 		session->inq[i] = dpaa_sec_attach_rxq(internals);
2389 		if (session->inq[i] == NULL) {
2390 			DPAA_SEC_ERR("unable to attach sec queue");
2391 			rte_spinlock_unlock(&internals->lock);
2392 			ret = -EBUSY;
2393 			goto err1;
2394 		}
2395 	}
2396 	rte_spinlock_unlock(&internals->lock);
2397 
2398 	return 0;
2399 
2400 err1:
2401 	free_session_data(session);
2402 	return ret;
2403 }
2404 
2405 static int
2406 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
2407 		struct rte_crypto_sym_xform *xform,
2408 		struct rte_cryptodev_sym_session *sess,
2409 		struct rte_mempool *mempool)
2410 {
2411 	void *sess_private_data;
2412 	int ret;
2413 
2414 	PMD_INIT_FUNC_TRACE();
2415 
2416 	if (rte_mempool_get(mempool, &sess_private_data)) {
2417 		DPAA_SEC_ERR("Couldn't get object from session mempool");
2418 		return -ENOMEM;
2419 	}
2420 
2421 	ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
2422 	if (ret != 0) {
2423 		DPAA_SEC_ERR("failed to configure session parameters");
2424 
2425 		/* Return session to mempool */
2426 		rte_mempool_put(mempool, sess_private_data);
2427 		return ret;
2428 	}
2429 
2430 	set_sym_session_private_data(sess, dev->driver_id,
2431 			sess_private_data);
2432 
2433 
2434 	return 0;
2435 }
2436 
2437 static inline void
2438 free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s)
2439 {
2440 	struct dpaa_sec_dev_private *qi = dev->data->dev_private;
2441 	struct rte_mempool *sess_mp = rte_mempool_from_obj((void *)s);
2442 	uint8_t i;
2443 
2444 	for (i = 0; i < MAX_DPAA_CORES; i++) {
2445 		if (s->inq[i])
2446 			dpaa_sec_detach_rxq(qi, s->inq[i]);
2447 		s->inq[i] = NULL;
2448 		s->qp[i] = NULL;
2449 	}
2450 	free_session_data(s);
2451 	rte_mempool_put(sess_mp, (void *)s);
2452 }
2453 
2454 /** Clear the memory of session so it doesn't leave key material behind */
2455 static void
2456 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
2457 		struct rte_cryptodev_sym_session *sess)
2458 {
2459 	PMD_INIT_FUNC_TRACE();
2460 	uint8_t index = dev->driver_id;
2461 	void *sess_priv = get_sym_session_private_data(sess, index);
2462 	dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2463 
2464 	if (sess_priv) {
2465 		free_session_memory(dev, s);
2466 		set_sym_session_private_data(sess, index, NULL);
2467 	}
2468 }
2469 
2470 #ifdef RTE_LIBRTE_SECURITY
2471 static int
2472 dpaa_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2473 			struct rte_security_ipsec_xform *ipsec_xform,
2474 			dpaa_sec_session *session)
2475 {
2476 	PMD_INIT_FUNC_TRACE();
2477 
2478 	session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2479 					       RTE_CACHE_LINE_SIZE);
2480 	if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2481 		DPAA_SEC_ERR("No Memory for aead key");
2482 		return -ENOMEM;
2483 	}
2484 	memcpy(session->aead_key.data, aead_xform->key.data,
2485 	       aead_xform->key.length);
2486 
2487 	session->digest_length = aead_xform->digest_length;
2488 	session->aead_key.length = aead_xform->key.length;
2489 
2490 	switch (aead_xform->algo) {
2491 	case RTE_CRYPTO_AEAD_AES_GCM:
2492 		switch (session->digest_length) {
2493 		case 8:
2494 			session->aead_key.alg = OP_PCL_IPSEC_AES_GCM8;
2495 			break;
2496 		case 12:
2497 			session->aead_key.alg = OP_PCL_IPSEC_AES_GCM12;
2498 			break;
2499 		case 16:
2500 			session->aead_key.alg = OP_PCL_IPSEC_AES_GCM16;
2501 			break;
2502 		default:
2503 			DPAA_SEC_ERR("Crypto: Undefined GCM digest %d",
2504 				     session->digest_length);
2505 			return -EINVAL;
2506 		}
2507 		if (session->dir == DIR_ENC) {
2508 			memcpy(session->encap_pdb.gcm.salt,
2509 				(uint8_t *)&(ipsec_xform->salt), 4);
2510 		} else {
2511 			memcpy(session->decap_pdb.gcm.salt,
2512 				(uint8_t *)&(ipsec_xform->salt), 4);
2513 		}
2514 		session->aead_key.algmode = OP_ALG_AAI_GCM;
2515 		session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2516 		break;
2517 	default:
2518 		DPAA_SEC_ERR("Crypto: Undefined AEAD specified %u",
2519 			      aead_xform->algo);
2520 		return -ENOTSUP;
2521 	}
2522 	return 0;
2523 }
2524 
2525 static int
2526 dpaa_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2527 	struct rte_crypto_auth_xform *auth_xform,
2528 	struct rte_security_ipsec_xform *ipsec_xform,
2529 	dpaa_sec_session *session)
2530 {
2531 	if (cipher_xform) {
2532 		session->cipher_key.data = rte_zmalloc(NULL,
2533 						       cipher_xform->key.length,
2534 						       RTE_CACHE_LINE_SIZE);
2535 		if (session->cipher_key.data == NULL &&
2536 				cipher_xform->key.length > 0) {
2537 			DPAA_SEC_ERR("No Memory for cipher key");
2538 			return -ENOMEM;
2539 		}
2540 
2541 		session->cipher_key.length = cipher_xform->key.length;
2542 		memcpy(session->cipher_key.data, cipher_xform->key.data,
2543 				cipher_xform->key.length);
2544 		session->cipher_alg = cipher_xform->algo;
2545 	} else {
2546 		session->cipher_key.data = NULL;
2547 		session->cipher_key.length = 0;
2548 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2549 	}
2550 
2551 	if (auth_xform) {
2552 		session->auth_key.data = rte_zmalloc(NULL,
2553 						auth_xform->key.length,
2554 						RTE_CACHE_LINE_SIZE);
2555 		if (session->auth_key.data == NULL &&
2556 				auth_xform->key.length > 0) {
2557 			DPAA_SEC_ERR("No Memory for auth key");
2558 			return -ENOMEM;
2559 		}
2560 		session->auth_key.length = auth_xform->key.length;
2561 		memcpy(session->auth_key.data, auth_xform->key.data,
2562 				auth_xform->key.length);
2563 		session->auth_alg = auth_xform->algo;
2564 		session->digest_length = auth_xform->digest_length;
2565 	} else {
2566 		session->auth_key.data = NULL;
2567 		session->auth_key.length = 0;
2568 		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2569 	}
2570 
2571 	switch (session->auth_alg) {
2572 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
2573 		session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA1_96;
2574 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2575 		break;
2576 	case RTE_CRYPTO_AUTH_MD5_HMAC:
2577 		session->auth_key.alg = OP_PCL_IPSEC_HMAC_MD5_96;
2578 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2579 		break;
2580 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2581 		session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2582 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2583 		if (session->digest_length != 16)
2584 			DPAA_SEC_WARN(
2585 			"+++Using sha256-hmac truncated len is non-standard,"
2586 			"it will not work with lookaside proto");
2587 		break;
2588 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2589 		session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2590 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2591 		break;
2592 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2593 		session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2594 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2595 		break;
2596 	case RTE_CRYPTO_AUTH_AES_CMAC:
2597 		session->auth_key.alg = OP_PCL_IPSEC_AES_CMAC_96;
2598 		break;
2599 	case RTE_CRYPTO_AUTH_NULL:
2600 		session->auth_key.alg = OP_PCL_IPSEC_HMAC_NULL;
2601 		break;
2602 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2603 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2604 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2605 	case RTE_CRYPTO_AUTH_SHA1:
2606 	case RTE_CRYPTO_AUTH_SHA256:
2607 	case RTE_CRYPTO_AUTH_SHA512:
2608 	case RTE_CRYPTO_AUTH_SHA224:
2609 	case RTE_CRYPTO_AUTH_SHA384:
2610 	case RTE_CRYPTO_AUTH_MD5:
2611 	case RTE_CRYPTO_AUTH_AES_GMAC:
2612 	case RTE_CRYPTO_AUTH_KASUMI_F9:
2613 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2614 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
2615 		DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2616 			      session->auth_alg);
2617 		return -ENOTSUP;
2618 	default:
2619 		DPAA_SEC_ERR("Crypto: Undefined Auth specified %u",
2620 			      session->auth_alg);
2621 		return -ENOTSUP;
2622 	}
2623 
2624 	switch (session->cipher_alg) {
2625 	case RTE_CRYPTO_CIPHER_AES_CBC:
2626 		session->cipher_key.alg = OP_PCL_IPSEC_AES_CBC;
2627 		session->cipher_key.algmode = OP_ALG_AAI_CBC;
2628 		break;
2629 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2630 		session->cipher_key.alg = OP_PCL_IPSEC_3DES;
2631 		session->cipher_key.algmode = OP_ALG_AAI_CBC;
2632 		break;
2633 	case RTE_CRYPTO_CIPHER_AES_CTR:
2634 		session->cipher_key.alg = OP_PCL_IPSEC_AES_CTR;
2635 		session->cipher_key.algmode = OP_ALG_AAI_CTR;
2636 		if (session->dir == DIR_ENC) {
2637 			session->encap_pdb.ctr.ctr_initial = 0x00000001;
2638 			session->encap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2639 		} else {
2640 			session->decap_pdb.ctr.ctr_initial = 0x00000001;
2641 			session->decap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2642 		}
2643 		break;
2644 	case RTE_CRYPTO_CIPHER_NULL:
2645 		session->cipher_key.alg = OP_PCL_IPSEC_NULL;
2646 		break;
2647 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2648 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2649 	case RTE_CRYPTO_CIPHER_3DES_ECB:
2650 	case RTE_CRYPTO_CIPHER_AES_ECB:
2651 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
2652 		DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2653 			      session->cipher_alg);
2654 		return -ENOTSUP;
2655 	default:
2656 		DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2657 			      session->cipher_alg);
2658 		return -ENOTSUP;
2659 	}
2660 
2661 	return 0;
2662 }
2663 
2664 static int
2665 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
2666 			   struct rte_security_session_conf *conf,
2667 			   void *sess)
2668 {
2669 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2670 	struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2671 	struct rte_crypto_auth_xform *auth_xform = NULL;
2672 	struct rte_crypto_cipher_xform *cipher_xform = NULL;
2673 	struct rte_crypto_aead_xform *aead_xform = NULL;
2674 	dpaa_sec_session *session = (dpaa_sec_session *)sess;
2675 	uint32_t i;
2676 	int ret;
2677 
2678 	PMD_INIT_FUNC_TRACE();
2679 
2680 	memset(session, 0, sizeof(dpaa_sec_session));
2681 	session->proto_alg = conf->protocol;
2682 	session->ctxt = DPAA_SEC_IPSEC;
2683 
2684 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
2685 		session->dir = DIR_ENC;
2686 	else
2687 		session->dir = DIR_DEC;
2688 
2689 	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2690 		cipher_xform = &conf->crypto_xform->cipher;
2691 		if (conf->crypto_xform->next)
2692 			auth_xform = &conf->crypto_xform->next->auth;
2693 		ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2694 					ipsec_xform, session);
2695 	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2696 		auth_xform = &conf->crypto_xform->auth;
2697 		if (conf->crypto_xform->next)
2698 			cipher_xform = &conf->crypto_xform->next->cipher;
2699 		ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2700 					ipsec_xform, session);
2701 	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2702 		aead_xform = &conf->crypto_xform->aead;
2703 		ret = dpaa_sec_ipsec_aead_init(aead_xform,
2704 					ipsec_xform, session);
2705 	} else {
2706 		DPAA_SEC_ERR("XFORM not specified");
2707 		ret = -EINVAL;
2708 		goto out;
2709 	}
2710 	if (ret) {
2711 		DPAA_SEC_ERR("Failed to process xform");
2712 		goto out;
2713 	}
2714 
2715 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2716 		if (ipsec_xform->tunnel.type ==
2717 				RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2718 			session->ip4_hdr.ip_v = IPVERSION;
2719 			session->ip4_hdr.ip_hl = 5;
2720 			session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2721 						sizeof(session->ip4_hdr));
2722 			session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2723 			session->ip4_hdr.ip_id = 0;
2724 			session->ip4_hdr.ip_off = 0;
2725 			session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2726 			session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2727 					RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2728 					IPPROTO_ESP : IPPROTO_AH;
2729 			session->ip4_hdr.ip_sum = 0;
2730 			session->ip4_hdr.ip_src =
2731 					ipsec_xform->tunnel.ipv4.src_ip;
2732 			session->ip4_hdr.ip_dst =
2733 					ipsec_xform->tunnel.ipv4.dst_ip;
2734 			session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2735 						(void *)&session->ip4_hdr,
2736 						sizeof(struct ip));
2737 			session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2738 		} else if (ipsec_xform->tunnel.type ==
2739 				RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2740 			session->ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2741 				DPAA_IPv6_DEFAULT_VTC_FLOW |
2742 				((ipsec_xform->tunnel.ipv6.dscp <<
2743 					RTE_IPV6_HDR_TC_SHIFT) &
2744 					RTE_IPV6_HDR_TC_MASK) |
2745 				((ipsec_xform->tunnel.ipv6.flabel <<
2746 					RTE_IPV6_HDR_FL_SHIFT) &
2747 					RTE_IPV6_HDR_FL_MASK));
2748 			/* Payload length will be updated by HW */
2749 			session->ip6_hdr.payload_len = 0;
2750 			session->ip6_hdr.hop_limits =
2751 					ipsec_xform->tunnel.ipv6.hlimit;
2752 			session->ip6_hdr.proto = (ipsec_xform->proto ==
2753 					RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2754 					IPPROTO_ESP : IPPROTO_AH;
2755 			memcpy(&session->ip6_hdr.src_addr,
2756 					&ipsec_xform->tunnel.ipv6.src_addr, 16);
2757 			memcpy(&session->ip6_hdr.dst_addr,
2758 					&ipsec_xform->tunnel.ipv6.dst_addr, 16);
2759 			session->encap_pdb.ip_hdr_len =
2760 						sizeof(struct rte_ipv6_hdr);
2761 		}
2762 		session->encap_pdb.options =
2763 			(IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2764 			PDBOPTS_ESP_OIHI_PDB_INL |
2765 			PDBOPTS_ESP_IVSRC |
2766 			PDBHMO_ESP_ENCAP_DTTL |
2767 			PDBHMO_ESP_SNR;
2768 		if (ipsec_xform->options.esn)
2769 			session->encap_pdb.options |= PDBOPTS_ESP_ESN;
2770 		session->encap_pdb.spi = ipsec_xform->spi;
2771 
2772 	} else if (ipsec_xform->direction ==
2773 			RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2774 		if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
2775 			session->decap_pdb.options = sizeof(struct ip) << 16;
2776 		else
2777 			session->decap_pdb.options =
2778 					sizeof(struct rte_ipv6_hdr) << 16;
2779 		if (ipsec_xform->options.esn)
2780 			session->decap_pdb.options |= PDBOPTS_ESP_ESN;
2781 		if (ipsec_xform->replay_win_sz) {
2782 			uint32_t win_sz;
2783 			win_sz = rte_align32pow2(ipsec_xform->replay_win_sz);
2784 
2785 			switch (win_sz) {
2786 			case 1:
2787 			case 2:
2788 			case 4:
2789 			case 8:
2790 			case 16:
2791 			case 32:
2792 				session->decap_pdb.options |= PDBOPTS_ESP_ARS32;
2793 				break;
2794 			case 64:
2795 				session->decap_pdb.options |= PDBOPTS_ESP_ARS64;
2796 				break;
2797 			default:
2798 				session->decap_pdb.options |=
2799 							PDBOPTS_ESP_ARS128;
2800 			}
2801 		}
2802 	} else
2803 		goto out;
2804 	rte_spinlock_lock(&internals->lock);
2805 	for (i = 0; i < MAX_DPAA_CORES; i++) {
2806 		session->inq[i] = dpaa_sec_attach_rxq(internals);
2807 		if (session->inq[i] == NULL) {
2808 			DPAA_SEC_ERR("unable to attach sec queue");
2809 			rte_spinlock_unlock(&internals->lock);
2810 			goto out;
2811 		}
2812 	}
2813 	rte_spinlock_unlock(&internals->lock);
2814 
2815 	return 0;
2816 out:
2817 	free_session_data(session);
2818 	return -1;
2819 }
2820 
2821 static int
2822 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
2823 			  struct rte_security_session_conf *conf,
2824 			  void *sess)
2825 {
2826 	struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2827 	struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2828 	struct rte_crypto_auth_xform *auth_xform = NULL;
2829 	struct rte_crypto_cipher_xform *cipher_xform = NULL;
2830 	dpaa_sec_session *session = (dpaa_sec_session *)sess;
2831 	struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
2832 	uint32_t i;
2833 	int ret;
2834 
2835 	PMD_INIT_FUNC_TRACE();
2836 
2837 	memset(session, 0, sizeof(dpaa_sec_session));
2838 
2839 	/* find xfrm types */
2840 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2841 		cipher_xform = &xform->cipher;
2842 		if (xform->next != NULL)
2843 			auth_xform = &xform->next->auth;
2844 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2845 		auth_xform = &xform->auth;
2846 		if (xform->next != NULL)
2847 			cipher_xform = &xform->next->cipher;
2848 	} else {
2849 		DPAA_SEC_ERR("Invalid crypto type");
2850 		return -EINVAL;
2851 	}
2852 
2853 	session->proto_alg = conf->protocol;
2854 	session->ctxt = DPAA_SEC_PDCP;
2855 
2856 	if (cipher_xform) {
2857 		switch (cipher_xform->algo) {
2858 		case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2859 			session->cipher_key.alg = PDCP_CIPHER_TYPE_SNOW;
2860 			break;
2861 		case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2862 			session->cipher_key.alg = PDCP_CIPHER_TYPE_ZUC;
2863 			break;
2864 		case RTE_CRYPTO_CIPHER_AES_CTR:
2865 			session->cipher_key.alg = PDCP_CIPHER_TYPE_AES;
2866 			break;
2867 		case RTE_CRYPTO_CIPHER_NULL:
2868 			session->cipher_key.alg = PDCP_CIPHER_TYPE_NULL;
2869 			break;
2870 		default:
2871 			DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2872 				      session->cipher_alg);
2873 			return -EINVAL;
2874 		}
2875 
2876 		session->cipher_key.data = rte_zmalloc(NULL,
2877 					       cipher_xform->key.length,
2878 					       RTE_CACHE_LINE_SIZE);
2879 		if (session->cipher_key.data == NULL &&
2880 				cipher_xform->key.length > 0) {
2881 			DPAA_SEC_ERR("No Memory for cipher key");
2882 			return -ENOMEM;
2883 		}
2884 		session->cipher_key.length = cipher_xform->key.length;
2885 		memcpy(session->cipher_key.data, cipher_xform->key.data,
2886 			cipher_xform->key.length);
2887 		session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2888 					DIR_ENC : DIR_DEC;
2889 		session->cipher_alg = cipher_xform->algo;
2890 	} else {
2891 		session->cipher_key.data = NULL;
2892 		session->cipher_key.length = 0;
2893 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2894 		session->dir = DIR_ENC;
2895 	}
2896 
2897 	if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
2898 		if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 &&
2899 		    pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) {
2900 			DPAA_SEC_ERR(
2901 				"PDCP Seq Num size should be 5/12 bits for cmode");
2902 			ret = -EINVAL;
2903 			goto out;
2904 		}
2905 	}
2906 
2907 	if (auth_xform) {
2908 		switch (auth_xform->algo) {
2909 		case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2910 			session->auth_key.alg = PDCP_AUTH_TYPE_SNOW;
2911 			break;
2912 		case RTE_CRYPTO_AUTH_ZUC_EIA3:
2913 			session->auth_key.alg = PDCP_AUTH_TYPE_ZUC;
2914 			break;
2915 		case RTE_CRYPTO_AUTH_AES_CMAC:
2916 			session->auth_key.alg = PDCP_AUTH_TYPE_AES;
2917 			break;
2918 		case RTE_CRYPTO_AUTH_NULL:
2919 			session->auth_key.alg = PDCP_AUTH_TYPE_NULL;
2920 			break;
2921 		default:
2922 			DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2923 				      session->auth_alg);
2924 			rte_free(session->cipher_key.data);
2925 			return -EINVAL;
2926 		}
2927 		session->auth_key.data = rte_zmalloc(NULL,
2928 						     auth_xform->key.length,
2929 						     RTE_CACHE_LINE_SIZE);
2930 		if (!session->auth_key.data &&
2931 		    auth_xform->key.length > 0) {
2932 			DPAA_SEC_ERR("No Memory for auth key");
2933 			rte_free(session->cipher_key.data);
2934 			return -ENOMEM;
2935 		}
2936 		session->auth_key.length = auth_xform->key.length;
2937 		memcpy(session->auth_key.data, auth_xform->key.data,
2938 		       auth_xform->key.length);
2939 		session->auth_alg = auth_xform->algo;
2940 	} else {
2941 		session->auth_key.data = NULL;
2942 		session->auth_key.length = 0;
2943 		session->auth_alg = 0;
2944 	}
2945 	session->pdcp.domain = pdcp_xform->domain;
2946 	session->pdcp.bearer = pdcp_xform->bearer;
2947 	session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
2948 	session->pdcp.sn_size = pdcp_xform->sn_size;
2949 	session->pdcp.hfn = pdcp_xform->hfn;
2950 	session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
2951 	session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
2952 	session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
2953 
2954 	rte_spinlock_lock(&dev_priv->lock);
2955 	for (i = 0; i < MAX_DPAA_CORES; i++) {
2956 		session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
2957 		if (session->inq[i] == NULL) {
2958 			DPAA_SEC_ERR("unable to attach sec queue");
2959 			rte_spinlock_unlock(&dev_priv->lock);
2960 			ret = -EBUSY;
2961 			goto out;
2962 		}
2963 	}
2964 	rte_spinlock_unlock(&dev_priv->lock);
2965 	return 0;
2966 out:
2967 	rte_free(session->auth_key.data);
2968 	rte_free(session->cipher_key.data);
2969 	memset(session, 0, sizeof(dpaa_sec_session));
2970 	return ret;
2971 }
2972 
2973 static int
2974 dpaa_sec_security_session_create(void *dev,
2975 				 struct rte_security_session_conf *conf,
2976 				 struct rte_security_session *sess,
2977 				 struct rte_mempool *mempool)
2978 {
2979 	void *sess_private_data;
2980 	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2981 	int ret;
2982 
2983 	if (rte_mempool_get(mempool, &sess_private_data)) {
2984 		DPAA_SEC_ERR("Couldn't get object from session mempool");
2985 		return -ENOMEM;
2986 	}
2987 
2988 	switch (conf->protocol) {
2989 	case RTE_SECURITY_PROTOCOL_IPSEC:
2990 		ret = dpaa_sec_set_ipsec_session(cdev, conf,
2991 				sess_private_data);
2992 		break;
2993 	case RTE_SECURITY_PROTOCOL_PDCP:
2994 		ret = dpaa_sec_set_pdcp_session(cdev, conf,
2995 				sess_private_data);
2996 		break;
2997 	case RTE_SECURITY_PROTOCOL_MACSEC:
2998 		return -ENOTSUP;
2999 	default:
3000 		return -EINVAL;
3001 	}
3002 	if (ret != 0) {
3003 		DPAA_SEC_ERR("failed to configure session parameters");
3004 		/* Return session to mempool */
3005 		rte_mempool_put(mempool, sess_private_data);
3006 		return ret;
3007 	}
3008 
3009 	set_sec_session_private_data(sess, sess_private_data);
3010 
3011 	return ret;
3012 }
3013 
3014 /** Clear the memory of session so it doesn't leave key material behind */
3015 static int
3016 dpaa_sec_security_session_destroy(void *dev __rte_unused,
3017 		struct rte_security_session *sess)
3018 {
3019 	PMD_INIT_FUNC_TRACE();
3020 	void *sess_priv = get_sec_session_private_data(sess);
3021 	dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
3022 
3023 	if (sess_priv) {
3024 		free_session_memory((struct rte_cryptodev *)dev, s);
3025 		set_sec_session_private_data(sess, NULL);
3026 	}
3027 	return 0;
3028 }
3029 #endif
3030 static int
3031 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
3032 		       struct rte_cryptodev_config *config __rte_unused)
3033 {
3034 	PMD_INIT_FUNC_TRACE();
3035 
3036 	return 0;
3037 }
3038 
3039 static int
3040 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
3041 {
3042 	PMD_INIT_FUNC_TRACE();
3043 	return 0;
3044 }
3045 
3046 static void
3047 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
3048 {
3049 	PMD_INIT_FUNC_TRACE();
3050 }
3051 
3052 static int
3053 dpaa_sec_dev_close(struct rte_cryptodev *dev)
3054 {
3055 	PMD_INIT_FUNC_TRACE();
3056 
3057 	if (dev == NULL)
3058 		return -ENOMEM;
3059 
3060 	return 0;
3061 }
3062 
3063 static void
3064 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
3065 		       struct rte_cryptodev_info *info)
3066 {
3067 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
3068 
3069 	PMD_INIT_FUNC_TRACE();
3070 	if (info != NULL) {
3071 		info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3072 		info->feature_flags = dev->feature_flags;
3073 		info->capabilities = dpaa_sec_capabilities;
3074 		info->sym.max_nb_sessions = internals->max_nb_sessions;
3075 		info->driver_id = cryptodev_driver_id;
3076 	}
3077 }
3078 
3079 static enum qman_cb_dqrr_result
3080 dpaa_sec_process_parallel_event(void *event,
3081 			struct qman_portal *qm __always_unused,
3082 			struct qman_fq *outq,
3083 			const struct qm_dqrr_entry *dqrr,
3084 			void **bufs)
3085 {
3086 	const struct qm_fd *fd;
3087 	struct dpaa_sec_job *job;
3088 	struct dpaa_sec_op_ctx *ctx;
3089 	struct rte_event *ev = (struct rte_event *)event;
3090 
3091 	fd = &dqrr->fd;
3092 
3093 	/* sg is embedded in an op ctx,
3094 	 * sg[0] is for output
3095 	 * sg[1] for input
3096 	 */
3097 	job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3098 
3099 	ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3100 	ctx->fd_status = fd->status;
3101 	if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3102 		struct qm_sg_entry *sg_out;
3103 		uint32_t len;
3104 
3105 		sg_out = &job->sg[0];
3106 		hw_sg_to_cpu(sg_out);
3107 		len = sg_out->length;
3108 		ctx->op->sym->m_src->pkt_len = len;
3109 		ctx->op->sym->m_src->data_len = len;
3110 	}
3111 	if (!ctx->fd_status) {
3112 		ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3113 	} else {
3114 		DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3115 		ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3116 	}
3117 	ev->event_ptr = (void *)ctx->op;
3118 
3119 	ev->flow_id = outq->ev.flow_id;
3120 	ev->sub_event_type = outq->ev.sub_event_type;
3121 	ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3122 	ev->op = RTE_EVENT_OP_NEW;
3123 	ev->sched_type = outq->ev.sched_type;
3124 	ev->queue_id = outq->ev.queue_id;
3125 	ev->priority = outq->ev.priority;
3126 	*bufs = (void *)ctx->op;
3127 
3128 	rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3129 
3130 	return qman_cb_dqrr_consume;
3131 }
3132 
3133 static enum qman_cb_dqrr_result
3134 dpaa_sec_process_atomic_event(void *event,
3135 			struct qman_portal *qm __rte_unused,
3136 			struct qman_fq *outq,
3137 			const struct qm_dqrr_entry *dqrr,
3138 			void **bufs)
3139 {
3140 	u8 index;
3141 	const struct qm_fd *fd;
3142 	struct dpaa_sec_job *job;
3143 	struct dpaa_sec_op_ctx *ctx;
3144 	struct rte_event *ev = (struct rte_event *)event;
3145 
3146 	fd = &dqrr->fd;
3147 
3148 	/* sg is embedded in an op ctx,
3149 	 * sg[0] is for output
3150 	 * sg[1] for input
3151 	 */
3152 	job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3153 
3154 	ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3155 	ctx->fd_status = fd->status;
3156 	if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3157 		struct qm_sg_entry *sg_out;
3158 		uint32_t len;
3159 
3160 		sg_out = &job->sg[0];
3161 		hw_sg_to_cpu(sg_out);
3162 		len = sg_out->length;
3163 		ctx->op->sym->m_src->pkt_len = len;
3164 		ctx->op->sym->m_src->data_len = len;
3165 	}
3166 	if (!ctx->fd_status) {
3167 		ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3168 	} else {
3169 		DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3170 		ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3171 	}
3172 	ev->event_ptr = (void *)ctx->op;
3173 	ev->flow_id = outq->ev.flow_id;
3174 	ev->sub_event_type = outq->ev.sub_event_type;
3175 	ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3176 	ev->op = RTE_EVENT_OP_NEW;
3177 	ev->sched_type = outq->ev.sched_type;
3178 	ev->queue_id = outq->ev.queue_id;
3179 	ev->priority = outq->ev.priority;
3180 
3181 	/* Save active dqrr entries */
3182 	index = ((uintptr_t)dqrr >> 6) & (16/*QM_DQRR_SIZE*/ - 1);
3183 	DPAA_PER_LCORE_DQRR_SIZE++;
3184 	DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
3185 	DPAA_PER_LCORE_DQRR_MBUF(index) = ctx->op->sym->m_src;
3186 	ev->impl_opaque = index + 1;
3187 	ctx->op->sym->m_src->seqn = (uint32_t)index + 1;
3188 	*bufs = (void *)ctx->op;
3189 
3190 	rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3191 
3192 	return qman_cb_dqrr_defer;
3193 }
3194 
3195 int
3196 dpaa_sec_eventq_attach(const struct rte_cryptodev *dev,
3197 		int qp_id,
3198 		uint16_t ch_id,
3199 		const struct rte_event *event)
3200 {
3201 	struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3202 	struct qm_mcc_initfq opts = {0};
3203 
3204 	int ret;
3205 
3206 	opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3207 		       QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3208 	opts.fqd.dest.channel = ch_id;
3209 
3210 	switch (event->sched_type) {
3211 	case RTE_SCHED_TYPE_ATOMIC:
3212 		opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
3213 		/* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
3214 		 * configuration with HOLD_ACTIVE setting
3215 		 */
3216 		opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
3217 		qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_atomic_event;
3218 		break;
3219 	case RTE_SCHED_TYPE_ORDERED:
3220 		DPAA_SEC_ERR("Ordered queue schedule type is not supported\n");
3221 		return -ENOTSUP;
3222 	default:
3223 		opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
3224 		qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_parallel_event;
3225 		break;
3226 	}
3227 
3228 	ret = qman_init_fq(&qp->outq, QMAN_INITFQ_FLAG_SCHED, &opts);
3229 	if (unlikely(ret)) {
3230 		DPAA_SEC_ERR("unable to init caam source fq!");
3231 		return ret;
3232 	}
3233 
3234 	memcpy(&qp->outq.ev, event, sizeof(struct rte_event));
3235 
3236 	return 0;
3237 }
3238 
3239 int
3240 dpaa_sec_eventq_detach(const struct rte_cryptodev *dev,
3241 			int qp_id)
3242 {
3243 	struct qm_mcc_initfq opts = {0};
3244 	int ret;
3245 	struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3246 
3247 	opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3248 		       QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3249 	qp->outq.cb.dqrr = dqrr_out_fq_cb_rx;
3250 	qp->outq.cb.ern  = ern_sec_fq_handler;
3251 	qman_retire_fq(&qp->outq, NULL);
3252 	qman_oos_fq(&qp->outq);
3253 	ret = qman_init_fq(&qp->outq, 0, &opts);
3254 	if (ret)
3255 		RTE_LOG(ERR, PMD, "Error in qman_init_fq: ret: %d\n", ret);
3256 	qp->outq.cb.dqrr = NULL;
3257 
3258 	return ret;
3259 }
3260 
3261 static struct rte_cryptodev_ops crypto_ops = {
3262 	.dev_configure	      = dpaa_sec_dev_configure,
3263 	.dev_start	      = dpaa_sec_dev_start,
3264 	.dev_stop	      = dpaa_sec_dev_stop,
3265 	.dev_close	      = dpaa_sec_dev_close,
3266 	.dev_infos_get        = dpaa_sec_dev_infos_get,
3267 	.queue_pair_setup     = dpaa_sec_queue_pair_setup,
3268 	.queue_pair_release   = dpaa_sec_queue_pair_release,
3269 	.sym_session_get_size     = dpaa_sec_sym_session_get_size,
3270 	.sym_session_configure    = dpaa_sec_sym_session_configure,
3271 	.sym_session_clear        = dpaa_sec_sym_session_clear
3272 };
3273 
3274 #ifdef RTE_LIBRTE_SECURITY
3275 static const struct rte_security_capability *
3276 dpaa_sec_capabilities_get(void *device __rte_unused)
3277 {
3278 	return dpaa_sec_security_cap;
3279 }
3280 
3281 static const struct rte_security_ops dpaa_sec_security_ops = {
3282 	.session_create = dpaa_sec_security_session_create,
3283 	.session_update = NULL,
3284 	.session_stats_get = NULL,
3285 	.session_destroy = dpaa_sec_security_session_destroy,
3286 	.set_pkt_metadata = NULL,
3287 	.capabilities_get = dpaa_sec_capabilities_get
3288 };
3289 #endif
3290 static int
3291 dpaa_sec_uninit(struct rte_cryptodev *dev)
3292 {
3293 	struct dpaa_sec_dev_private *internals;
3294 
3295 	if (dev == NULL)
3296 		return -ENODEV;
3297 
3298 	internals = dev->data->dev_private;
3299 	rte_free(dev->security_ctx);
3300 
3301 	rte_free(internals);
3302 
3303 	DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
3304 		      dev->data->name, rte_socket_id());
3305 
3306 	return 0;
3307 }
3308 
3309 static int
3310 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
3311 {
3312 	struct dpaa_sec_dev_private *internals;
3313 #ifdef RTE_LIBRTE_SECURITY
3314 	struct rte_security_ctx *security_instance;
3315 #endif
3316 	struct dpaa_sec_qp *qp;
3317 	uint32_t i, flags;
3318 	int ret;
3319 
3320 	PMD_INIT_FUNC_TRACE();
3321 
3322 	cryptodev->driver_id = cryptodev_driver_id;
3323 	cryptodev->dev_ops = &crypto_ops;
3324 
3325 	cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
3326 	cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
3327 	cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3328 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
3329 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3330 			RTE_CRYPTODEV_FF_SECURITY |
3331 			RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3332 			RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3333 			RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3334 			RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3335 			RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3336 
3337 	internals = cryptodev->data->dev_private;
3338 	internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
3339 	internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
3340 
3341 	/*
3342 	 * For secondary processes, we don't initialise any further as primary
3343 	 * has already done this work. Only check we don't need a different
3344 	 * RX function
3345 	 */
3346 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3347 		DPAA_SEC_WARN("Device already init by primary process");
3348 		return 0;
3349 	}
3350 #ifdef RTE_LIBRTE_SECURITY
3351 	/* Initialize security_ctx only for primary process*/
3352 	security_instance = rte_malloc("rte_security_instances_ops",
3353 				sizeof(struct rte_security_ctx), 0);
3354 	if (security_instance == NULL)
3355 		return -ENOMEM;
3356 	security_instance->device = (void *)cryptodev;
3357 	security_instance->ops = &dpaa_sec_security_ops;
3358 	security_instance->sess_cnt = 0;
3359 	cryptodev->security_ctx = security_instance;
3360 #endif
3361 	rte_spinlock_init(&internals->lock);
3362 	for (i = 0; i < internals->max_nb_queue_pairs; i++) {
3363 		/* init qman fq for queue pair */
3364 		qp = &internals->qps[i];
3365 		ret = dpaa_sec_init_tx(&qp->outq);
3366 		if (ret) {
3367 			DPAA_SEC_ERR("config tx of queue pair  %d", i);
3368 			goto init_error;
3369 		}
3370 	}
3371 
3372 	flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
3373 		QMAN_FQ_FLAG_TO_DCPORTAL;
3374 	for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
3375 		/* create rx qman fq for sessions*/
3376 		ret = qman_create_fq(0, flags, &internals->inq[i]);
3377 		if (unlikely(ret != 0)) {
3378 			DPAA_SEC_ERR("sec qman_create_fq failed");
3379 			goto init_error;
3380 		}
3381 	}
3382 
3383 	RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
3384 	return 0;
3385 
3386 init_error:
3387 	DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
3388 
3389 	rte_free(cryptodev->security_ctx);
3390 	return -EFAULT;
3391 }
3392 
3393 static int
3394 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
3395 				struct rte_dpaa_device *dpaa_dev)
3396 {
3397 	struct rte_cryptodev *cryptodev;
3398 	char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3399 
3400 	int retval;
3401 
3402 	snprintf(cryptodev_name, sizeof(cryptodev_name), "%s", dpaa_dev->name);
3403 
3404 	cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3405 	if (cryptodev == NULL)
3406 		return -ENOMEM;
3407 
3408 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3409 		cryptodev->data->dev_private = rte_zmalloc_socket(
3410 					"cryptodev private structure",
3411 					sizeof(struct dpaa_sec_dev_private),
3412 					RTE_CACHE_LINE_SIZE,
3413 					rte_socket_id());
3414 
3415 		if (cryptodev->data->dev_private == NULL)
3416 			rte_panic("Cannot allocate memzone for private "
3417 					"device data");
3418 	}
3419 
3420 	dpaa_dev->crypto_dev = cryptodev;
3421 	cryptodev->device = &dpaa_dev->device;
3422 
3423 	/* init user callbacks */
3424 	TAILQ_INIT(&(cryptodev->link_intr_cbs));
3425 
3426 	/* if sec device version is not configured */
3427 	if (!rta_get_sec_era()) {
3428 		const struct device_node *caam_node;
3429 
3430 		for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
3431 			const uint32_t *prop = of_get_property(caam_node,
3432 					"fsl,sec-era",
3433 					NULL);
3434 			if (prop) {
3435 				rta_set_sec_era(
3436 					INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
3437 				break;
3438 			}
3439 		}
3440 	}
3441 
3442 	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
3443 		retval = rte_dpaa_portal_init((void *)1);
3444 		if (retval) {
3445 			DPAA_SEC_ERR("Unable to initialize portal");
3446 			goto out;
3447 		}
3448 	}
3449 
3450 	/* Invoke PMD device initialization function */
3451 	retval = dpaa_sec_dev_init(cryptodev);
3452 	if (retval == 0)
3453 		return 0;
3454 
3455 	retval = -ENXIO;
3456 out:
3457 	/* In case of error, cleanup is done */
3458 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3459 		rte_free(cryptodev->data->dev_private);
3460 
3461 	rte_cryptodev_pmd_release_device(cryptodev);
3462 
3463 	return retval;
3464 }
3465 
3466 static int
3467 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
3468 {
3469 	struct rte_cryptodev *cryptodev;
3470 	int ret;
3471 
3472 	cryptodev = dpaa_dev->crypto_dev;
3473 	if (cryptodev == NULL)
3474 		return -ENODEV;
3475 
3476 	ret = dpaa_sec_uninit(cryptodev);
3477 	if (ret)
3478 		return ret;
3479 
3480 	return rte_cryptodev_pmd_destroy(cryptodev);
3481 }
3482 
3483 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
3484 	.drv_type = FSL_DPAA_CRYPTO,
3485 	.driver = {
3486 		.name = "DPAA SEC PMD"
3487 	},
3488 	.probe = cryptodev_dpaa_sec_probe,
3489 	.remove = cryptodev_dpaa_sec_remove,
3490 };
3491 
3492 static struct cryptodev_driver dpaa_sec_crypto_drv;
3493 
3494 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
3495 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
3496 		cryptodev_driver_id);
3497 RTE_LOG_REGISTER(dpaa_logtype_sec, pmd.crypto.dpaa, NOTICE);
3498