xref: /dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c (revision 33bcaae5f85ad805ee287bee42013e61a1cff6fa)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2017-2019 NXP
5  *
6  */
7 
8 #include <fcntl.h>
9 #include <unistd.h>
10 #include <sched.h>
11 #include <net/if.h>
12 
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_security_driver.h>
19 #include <rte_cycles.h>
20 #include <rte_dev.h>
21 #include <rte_kvargs.h>
22 #include <rte_malloc.h>
23 #include <rte_mbuf.h>
24 #include <rte_memcpy.h>
25 #include <rte_string_fns.h>
26 #include <rte_spinlock.h>
27 
28 #include <fsl_usd.h>
29 #include <fsl_qman.h>
30 #include <of.h>
31 
32 /* RTA header files */
33 #include <hw/desc/common.h>
34 #include <hw/desc/algo.h>
35 #include <hw/desc/ipsec.h>
36 #include <hw/desc/pdcp.h>
37 
38 #include <rte_dpaa_bus.h>
39 #include <dpaa_sec.h>
40 #include <dpaa_sec_event.h>
41 #include <dpaa_sec_log.h>
42 #include <dpaax_iova_table.h>
43 
44 enum rta_sec_era rta_sec_era;
45 
46 int dpaa_logtype_sec;
47 
48 static uint8_t cryptodev_driver_id;
49 
50 static __thread struct rte_crypto_op **dpaa_sec_ops;
51 static __thread int dpaa_sec_op_nb;
52 
53 static int
54 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
55 
56 static inline void
57 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
58 {
59 	if (!ctx->fd_status) {
60 		ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
61 	} else {
62 		DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
63 		ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
64 	}
65 }
66 
67 static inline struct dpaa_sec_op_ctx *
68 dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count)
69 {
70 	struct dpaa_sec_op_ctx *ctx;
71 	int i, retval;
72 
73 	retval = rte_mempool_get(
74 			ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
75 			(void **)(&ctx));
76 	if (!ctx || retval) {
77 		DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
78 		return NULL;
79 	}
80 	/*
81 	 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
82 	 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
83 	 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
84 	 * each packet, memset is costlier than dcbz_64().
85 	 */
86 	for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
87 		dcbz_64(&ctx->job.sg[i]);
88 
89 	ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
90 	ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
91 
92 	return ctx;
93 }
94 
95 static inline rte_iova_t
96 dpaa_mem_vtop(void *vaddr)
97 {
98 	const struct rte_memseg *ms;
99 
100 	ms = rte_mem_virt2memseg(vaddr, NULL);
101 	if (ms) {
102 		dpaax_iova_table_update(ms->iova, ms->addr, ms->len);
103 		return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
104 	}
105 	return (size_t)NULL;
106 }
107 
108 static inline void *
109 dpaa_mem_ptov(rte_iova_t paddr)
110 {
111 	void *va;
112 
113 	va = (void *)dpaax_iova_table_get_va(paddr);
114 	if (likely(va))
115 		return va;
116 
117 	return rte_mem_iova2virt(paddr);
118 }
119 
120 static void
121 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
122 		   struct qman_fq *fq,
123 		   const struct qm_mr_entry *msg)
124 {
125 	DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
126 			fq->fqid, msg->ern.rc, msg->ern.seqnum);
127 }
128 
129 /* initialize the queue with dest chan as caam chan so that
130  * all the packets in this queue could be dispatched into caam
131  */
132 static int
133 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
134 		 uint32_t fqid_out)
135 {
136 	struct qm_mcc_initfq fq_opts;
137 	uint32_t flags;
138 	int ret = -1;
139 
140 	/* Clear FQ options */
141 	memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
142 
143 	flags = QMAN_INITFQ_FLAG_SCHED;
144 	fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
145 			  QM_INITFQ_WE_CONTEXTB;
146 
147 	qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
148 	fq_opts.fqd.context_b = fqid_out;
149 	fq_opts.fqd.dest.channel = qm_channel_caam;
150 	fq_opts.fqd.dest.wq = 0;
151 
152 	fq_in->cb.ern  = ern_sec_fq_handler;
153 
154 	DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
155 
156 	ret = qman_init_fq(fq_in, flags, &fq_opts);
157 	if (unlikely(ret != 0))
158 		DPAA_SEC_ERR("qman_init_fq failed %d", ret);
159 
160 	return ret;
161 }
162 
163 /* something is put into in_fq and caam put the crypto result into out_fq */
164 static enum qman_cb_dqrr_result
165 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
166 		  struct qman_fq *fq __always_unused,
167 		  const struct qm_dqrr_entry *dqrr)
168 {
169 	const struct qm_fd *fd;
170 	struct dpaa_sec_job *job;
171 	struct dpaa_sec_op_ctx *ctx;
172 
173 	if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
174 		return qman_cb_dqrr_defer;
175 
176 	if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
177 		return qman_cb_dqrr_consume;
178 
179 	fd = &dqrr->fd;
180 	/* sg is embedded in an op ctx,
181 	 * sg[0] is for output
182 	 * sg[1] for input
183 	 */
184 	job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
185 
186 	ctx = container_of(job, struct dpaa_sec_op_ctx, job);
187 	ctx->fd_status = fd->status;
188 	if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
189 		struct qm_sg_entry *sg_out;
190 		uint32_t len;
191 		struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ?
192 				ctx->op->sym->m_src : ctx->op->sym->m_dst;
193 
194 		sg_out = &job->sg[0];
195 		hw_sg_to_cpu(sg_out);
196 		len = sg_out->length;
197 		mbuf->pkt_len = len;
198 		while (mbuf->next != NULL) {
199 			len -= mbuf->data_len;
200 			mbuf = mbuf->next;
201 		}
202 		mbuf->data_len = len;
203 	}
204 	dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
205 	dpaa_sec_op_ending(ctx);
206 
207 	return qman_cb_dqrr_consume;
208 }
209 
210 /* caam result is put into this queue */
211 static int
212 dpaa_sec_init_tx(struct qman_fq *fq)
213 {
214 	int ret;
215 	struct qm_mcc_initfq opts;
216 	uint32_t flags;
217 
218 	flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
219 		QMAN_FQ_FLAG_DYNAMIC_FQID;
220 
221 	ret = qman_create_fq(0, flags, fq);
222 	if (unlikely(ret)) {
223 		DPAA_SEC_ERR("qman_create_fq failed");
224 		return ret;
225 	}
226 
227 	memset(&opts, 0, sizeof(opts));
228 	opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
229 		       QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
230 
231 	/* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
232 
233 	fq->cb.dqrr = dqrr_out_fq_cb_rx;
234 	fq->cb.ern  = ern_sec_fq_handler;
235 
236 	ret = qman_init_fq(fq, 0, &opts);
237 	if (unlikely(ret)) {
238 		DPAA_SEC_ERR("unable to init caam source fq!");
239 		return ret;
240 	}
241 
242 	return ret;
243 }
244 
245 static inline int is_encode(dpaa_sec_session *ses)
246 {
247 	return ses->dir == DIR_ENC;
248 }
249 
250 static inline int is_decode(dpaa_sec_session *ses)
251 {
252 	return ses->dir == DIR_DEC;
253 }
254 
255 static int
256 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
257 {
258 	struct alginfo authdata = {0}, cipherdata = {0};
259 	struct sec_cdb *cdb = &ses->cdb;
260 	struct alginfo *p_authdata = NULL;
261 	int32_t shared_desc_len = 0;
262 	int err;
263 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
264 	int swap = false;
265 #else
266 	int swap = true;
267 #endif
268 
269 	cipherdata.key = (size_t)ses->cipher_key.data;
270 	cipherdata.keylen = ses->cipher_key.length;
271 	cipherdata.key_enc_flags = 0;
272 	cipherdata.key_type = RTA_DATA_IMM;
273 	cipherdata.algtype = ses->cipher_key.alg;
274 	cipherdata.algmode = ses->cipher_key.algmode;
275 
276 	cdb->sh_desc[0] = cipherdata.keylen;
277 	cdb->sh_desc[1] = 0;
278 	cdb->sh_desc[2] = 0;
279 
280 	if (ses->auth_alg) {
281 		authdata.key = (size_t)ses->auth_key.data;
282 		authdata.keylen = ses->auth_key.length;
283 		authdata.key_enc_flags = 0;
284 		authdata.key_type = RTA_DATA_IMM;
285 		authdata.algtype = ses->auth_key.alg;
286 		authdata.algmode = ses->auth_key.algmode;
287 
288 		p_authdata = &authdata;
289 
290 		cdb->sh_desc[1] = authdata.keylen;
291 	}
292 
293 	err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
294 			       MIN_JOB_DESC_SIZE,
295 			       (unsigned int *)cdb->sh_desc,
296 			       &cdb->sh_desc[2], 2);
297 	if (err < 0) {
298 		DPAA_SEC_ERR("Crypto: Incorrect key lengths");
299 		return err;
300 	}
301 
302 	if (!(cdb->sh_desc[2] & 1) && cipherdata.keylen) {
303 		cipherdata.key =
304 			(size_t)dpaa_mem_vtop((void *)(size_t)cipherdata.key);
305 		cipherdata.key_type = RTA_DATA_PTR;
306 	}
307 	if (!(cdb->sh_desc[2] & (1 << 1)) &&  authdata.keylen) {
308 		authdata.key =
309 			(size_t)dpaa_mem_vtop((void *)(size_t)authdata.key);
310 		authdata.key_type = RTA_DATA_PTR;
311 	}
312 
313 	cdb->sh_desc[0] = 0;
314 	cdb->sh_desc[1] = 0;
315 	cdb->sh_desc[2] = 0;
316 
317 	if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
318 		if (ses->dir == DIR_ENC)
319 			shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
320 					cdb->sh_desc, 1, swap,
321 					ses->pdcp.hfn,
322 					ses->pdcp.sn_size,
323 					ses->pdcp.bearer,
324 					ses->pdcp.pkt_dir,
325 					ses->pdcp.hfn_threshold,
326 					&cipherdata, &authdata,
327 					0);
328 		else if (ses->dir == DIR_DEC)
329 			shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
330 					cdb->sh_desc, 1, swap,
331 					ses->pdcp.hfn,
332 					ses->pdcp.sn_size,
333 					ses->pdcp.bearer,
334 					ses->pdcp.pkt_dir,
335 					ses->pdcp.hfn_threshold,
336 					&cipherdata, &authdata,
337 					0);
338 	} else {
339 		if (ses->dir == DIR_ENC)
340 			shared_desc_len = cnstr_shdsc_pdcp_u_plane_encap(
341 					cdb->sh_desc, 1, swap,
342 					ses->pdcp.sn_size,
343 					ses->pdcp.hfn,
344 					ses->pdcp.bearer,
345 					ses->pdcp.pkt_dir,
346 					ses->pdcp.hfn_threshold,
347 					&cipherdata, p_authdata, 0);
348 		else if (ses->dir == DIR_DEC)
349 			shared_desc_len = cnstr_shdsc_pdcp_u_plane_decap(
350 					cdb->sh_desc, 1, swap,
351 					ses->pdcp.sn_size,
352 					ses->pdcp.hfn,
353 					ses->pdcp.bearer,
354 					ses->pdcp.pkt_dir,
355 					ses->pdcp.hfn_threshold,
356 					&cipherdata, p_authdata, 0);
357 	}
358 
359 	return shared_desc_len;
360 }
361 
362 /* prepare ipsec proto command block of the session */
363 static int
364 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
365 {
366 	struct alginfo cipherdata = {0}, authdata = {0};
367 	struct sec_cdb *cdb = &ses->cdb;
368 	int32_t shared_desc_len = 0;
369 	int err;
370 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
371 	int swap = false;
372 #else
373 	int swap = true;
374 #endif
375 
376 	cipherdata.key = (size_t)ses->cipher_key.data;
377 	cipherdata.keylen = ses->cipher_key.length;
378 	cipherdata.key_enc_flags = 0;
379 	cipherdata.key_type = RTA_DATA_IMM;
380 	cipherdata.algtype = ses->cipher_key.alg;
381 	cipherdata.algmode = ses->cipher_key.algmode;
382 
383 	authdata.key = (size_t)ses->auth_key.data;
384 	authdata.keylen = ses->auth_key.length;
385 	authdata.key_enc_flags = 0;
386 	authdata.key_type = RTA_DATA_IMM;
387 	authdata.algtype = ses->auth_key.alg;
388 	authdata.algmode = ses->auth_key.algmode;
389 
390 	cdb->sh_desc[0] = cipherdata.keylen;
391 	cdb->sh_desc[1] = authdata.keylen;
392 	err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
393 			       MIN_JOB_DESC_SIZE,
394 			       (unsigned int *)cdb->sh_desc,
395 			       &cdb->sh_desc[2], 2);
396 
397 	if (err < 0) {
398 		DPAA_SEC_ERR("Crypto: Incorrect key lengths");
399 		return err;
400 	}
401 	if (cdb->sh_desc[2] & 1)
402 		cipherdata.key_type = RTA_DATA_IMM;
403 	else {
404 		cipherdata.key = (size_t)dpaa_mem_vtop(
405 					(void *)(size_t)cipherdata.key);
406 		cipherdata.key_type = RTA_DATA_PTR;
407 	}
408 	if (cdb->sh_desc[2] & (1<<1))
409 		authdata.key_type = RTA_DATA_IMM;
410 	else {
411 		authdata.key = (size_t)dpaa_mem_vtop(
412 					(void *)(size_t)authdata.key);
413 		authdata.key_type = RTA_DATA_PTR;
414 	}
415 
416 	cdb->sh_desc[0] = 0;
417 	cdb->sh_desc[1] = 0;
418 	cdb->sh_desc[2] = 0;
419 	if (ses->dir == DIR_ENC) {
420 		shared_desc_len = cnstr_shdsc_ipsec_new_encap(
421 				cdb->sh_desc,
422 				true, swap, SHR_SERIAL,
423 				&ses->encap_pdb,
424 				(uint8_t *)&ses->ip4_hdr,
425 				&cipherdata, &authdata);
426 	} else if (ses->dir == DIR_DEC) {
427 		shared_desc_len = cnstr_shdsc_ipsec_new_decap(
428 				cdb->sh_desc,
429 				true, swap, SHR_SERIAL,
430 				&ses->decap_pdb,
431 				&cipherdata, &authdata);
432 	}
433 	return shared_desc_len;
434 }
435 
436 /* prepare command block of the session */
437 static int
438 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
439 {
440 	struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
441 	int32_t shared_desc_len = 0;
442 	struct sec_cdb *cdb = &ses->cdb;
443 	int err;
444 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
445 	int swap = false;
446 #else
447 	int swap = true;
448 #endif
449 
450 	memset(cdb, 0, sizeof(struct sec_cdb));
451 
452 	switch (ses->ctxt) {
453 	case DPAA_SEC_IPSEC:
454 		shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
455 		break;
456 	case DPAA_SEC_PDCP:
457 		shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
458 		break;
459 	case DPAA_SEC_CIPHER:
460 		alginfo_c.key = (size_t)ses->cipher_key.data;
461 		alginfo_c.keylen = ses->cipher_key.length;
462 		alginfo_c.key_enc_flags = 0;
463 		alginfo_c.key_type = RTA_DATA_IMM;
464 		alginfo_c.algtype = ses->cipher_key.alg;
465 		alginfo_c.algmode = ses->cipher_key.algmode;
466 
467 		switch (ses->cipher_alg) {
468 		case RTE_CRYPTO_CIPHER_AES_CBC:
469 		case RTE_CRYPTO_CIPHER_3DES_CBC:
470 		case RTE_CRYPTO_CIPHER_AES_CTR:
471 		case RTE_CRYPTO_CIPHER_3DES_CTR:
472 			shared_desc_len = cnstr_shdsc_blkcipher(
473 					cdb->sh_desc, true,
474 					swap, SHR_NEVER, &alginfo_c,
475 					NULL,
476 					ses->iv.length,
477 					ses->dir);
478 			break;
479 		case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
480 			shared_desc_len = cnstr_shdsc_snow_f8(
481 					cdb->sh_desc, true, swap,
482 					&alginfo_c,
483 					ses->dir);
484 			break;
485 		case RTE_CRYPTO_CIPHER_ZUC_EEA3:
486 			shared_desc_len = cnstr_shdsc_zuce(
487 					cdb->sh_desc, true, swap,
488 					&alginfo_c,
489 					ses->dir);
490 			break;
491 		default:
492 			DPAA_SEC_ERR("unsupported cipher alg %d",
493 				     ses->cipher_alg);
494 			return -ENOTSUP;
495 		}
496 		break;
497 	case DPAA_SEC_AUTH:
498 		alginfo_a.key = (size_t)ses->auth_key.data;
499 		alginfo_a.keylen = ses->auth_key.length;
500 		alginfo_a.key_enc_flags = 0;
501 		alginfo_a.key_type = RTA_DATA_IMM;
502 		alginfo_a.algtype = ses->auth_key.alg;
503 		alginfo_a.algmode = ses->auth_key.algmode;
504 		switch (ses->auth_alg) {
505 		case RTE_CRYPTO_AUTH_MD5_HMAC:
506 		case RTE_CRYPTO_AUTH_SHA1_HMAC:
507 		case RTE_CRYPTO_AUTH_SHA224_HMAC:
508 		case RTE_CRYPTO_AUTH_SHA256_HMAC:
509 		case RTE_CRYPTO_AUTH_SHA384_HMAC:
510 		case RTE_CRYPTO_AUTH_SHA512_HMAC:
511 			shared_desc_len = cnstr_shdsc_hmac(
512 						cdb->sh_desc, true,
513 						swap, SHR_NEVER, &alginfo_a,
514 						!ses->dir,
515 						ses->digest_length);
516 			break;
517 		case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
518 			shared_desc_len = cnstr_shdsc_snow_f9(
519 						cdb->sh_desc, true, swap,
520 						&alginfo_a,
521 						!ses->dir,
522 						ses->digest_length);
523 			break;
524 		case RTE_CRYPTO_AUTH_ZUC_EIA3:
525 			shared_desc_len = cnstr_shdsc_zuca(
526 						cdb->sh_desc, true, swap,
527 						&alginfo_a,
528 						!ses->dir,
529 						ses->digest_length);
530 			break;
531 		default:
532 			DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
533 		}
534 		break;
535 	case DPAA_SEC_AEAD:
536 		if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
537 			DPAA_SEC_ERR("not supported aead alg");
538 			return -ENOTSUP;
539 		}
540 		alginfo.key = (size_t)ses->aead_key.data;
541 		alginfo.keylen = ses->aead_key.length;
542 		alginfo.key_enc_flags = 0;
543 		alginfo.key_type = RTA_DATA_IMM;
544 		alginfo.algtype = ses->aead_key.alg;
545 		alginfo.algmode = ses->aead_key.algmode;
546 
547 		if (ses->dir == DIR_ENC)
548 			shared_desc_len = cnstr_shdsc_gcm_encap(
549 					cdb->sh_desc, true, swap, SHR_NEVER,
550 					&alginfo,
551 					ses->iv.length,
552 					ses->digest_length);
553 		else
554 			shared_desc_len = cnstr_shdsc_gcm_decap(
555 					cdb->sh_desc, true, swap, SHR_NEVER,
556 					&alginfo,
557 					ses->iv.length,
558 					ses->digest_length);
559 		break;
560 	case DPAA_SEC_CIPHER_HASH:
561 		alginfo_c.key = (size_t)ses->cipher_key.data;
562 		alginfo_c.keylen = ses->cipher_key.length;
563 		alginfo_c.key_enc_flags = 0;
564 		alginfo_c.key_type = RTA_DATA_IMM;
565 		alginfo_c.algtype = ses->cipher_key.alg;
566 		alginfo_c.algmode = ses->cipher_key.algmode;
567 
568 		alginfo_a.key = (size_t)ses->auth_key.data;
569 		alginfo_a.keylen = ses->auth_key.length;
570 		alginfo_a.key_enc_flags = 0;
571 		alginfo_a.key_type = RTA_DATA_IMM;
572 		alginfo_a.algtype = ses->auth_key.alg;
573 		alginfo_a.algmode = ses->auth_key.algmode;
574 
575 		cdb->sh_desc[0] = alginfo_c.keylen;
576 		cdb->sh_desc[1] = alginfo_a.keylen;
577 		err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
578 				       MIN_JOB_DESC_SIZE,
579 				       (unsigned int *)cdb->sh_desc,
580 				       &cdb->sh_desc[2], 2);
581 
582 		if (err < 0) {
583 			DPAA_SEC_ERR("Crypto: Incorrect key lengths");
584 			return err;
585 		}
586 		if (cdb->sh_desc[2] & 1)
587 			alginfo_c.key_type = RTA_DATA_IMM;
588 		else {
589 			alginfo_c.key = (size_t)dpaa_mem_vtop(
590 						(void *)(size_t)alginfo_c.key);
591 			alginfo_c.key_type = RTA_DATA_PTR;
592 		}
593 		if (cdb->sh_desc[2] & (1<<1))
594 			alginfo_a.key_type = RTA_DATA_IMM;
595 		else {
596 			alginfo_a.key = (size_t)dpaa_mem_vtop(
597 						(void *)(size_t)alginfo_a.key);
598 			alginfo_a.key_type = RTA_DATA_PTR;
599 		}
600 		cdb->sh_desc[0] = 0;
601 		cdb->sh_desc[1] = 0;
602 		cdb->sh_desc[2] = 0;
603 		/* Auth_only_len is set as 0 here and it will be
604 		 * overwritten in fd for each packet.
605 		 */
606 		shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
607 				true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a,
608 				ses->iv.length,
609 				ses->digest_length, ses->dir);
610 		break;
611 	case DPAA_SEC_HASH_CIPHER:
612 	default:
613 		DPAA_SEC_ERR("error: Unsupported session");
614 		return -ENOTSUP;
615 	}
616 
617 	if (shared_desc_len < 0) {
618 		DPAA_SEC_ERR("error in preparing command block");
619 		return shared_desc_len;
620 	}
621 
622 	cdb->sh_hdr.hi.field.idlen = shared_desc_len;
623 	cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
624 	cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
625 
626 	return 0;
627 }
628 
629 /* qp is lockless, should be accessed by only one thread */
630 static int
631 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
632 {
633 	struct qman_fq *fq;
634 	unsigned int pkts = 0;
635 	int num_rx_bufs, ret;
636 	struct qm_dqrr_entry *dq;
637 	uint32_t vdqcr_flags = 0;
638 
639 	fq = &qp->outq;
640 	/*
641 	 * Until request for four buffers, we provide exact number of buffers.
642 	 * Otherwise we do not set the QM_VDQCR_EXACT flag.
643 	 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
644 	 * requested, so we request two less in this case.
645 	 */
646 	if (nb_ops < 4) {
647 		vdqcr_flags = QM_VDQCR_EXACT;
648 		num_rx_bufs = nb_ops;
649 	} else {
650 		num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
651 			(DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
652 	}
653 	ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
654 	if (ret)
655 		return 0;
656 
657 	do {
658 		const struct qm_fd *fd;
659 		struct dpaa_sec_job *job;
660 		struct dpaa_sec_op_ctx *ctx;
661 		struct rte_crypto_op *op;
662 
663 		dq = qman_dequeue(fq);
664 		if (!dq)
665 			continue;
666 
667 		fd = &dq->fd;
668 		/* sg is embedded in an op ctx,
669 		 * sg[0] is for output
670 		 * sg[1] for input
671 		 */
672 		job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
673 
674 		ctx = container_of(job, struct dpaa_sec_op_ctx, job);
675 		ctx->fd_status = fd->status;
676 		op = ctx->op;
677 		if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
678 			struct qm_sg_entry *sg_out;
679 			uint32_t len;
680 			struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ?
681 						op->sym->m_src : op->sym->m_dst;
682 
683 			sg_out = &job->sg[0];
684 			hw_sg_to_cpu(sg_out);
685 			len = sg_out->length;
686 			mbuf->pkt_len = len;
687 			while (mbuf->next != NULL) {
688 				len -= mbuf->data_len;
689 				mbuf = mbuf->next;
690 			}
691 			mbuf->data_len = len;
692 		}
693 		if (!ctx->fd_status) {
694 			op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
695 		} else {
696 			DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
697 			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
698 		}
699 		ops[pkts++] = op;
700 
701 		/* report op status to sym->op and then free the ctx memeory */
702 		rte_mempool_put(ctx->ctx_pool, (void *)ctx);
703 
704 		qman_dqrr_consume(fq, dq);
705 	} while (fq->flags & QMAN_FQ_STATE_VDQCR);
706 
707 	return pkts;
708 }
709 
710 static inline struct dpaa_sec_job *
711 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
712 {
713 	struct rte_crypto_sym_op *sym = op->sym;
714 	struct rte_mbuf *mbuf = sym->m_src;
715 	struct dpaa_sec_job *cf;
716 	struct dpaa_sec_op_ctx *ctx;
717 	struct qm_sg_entry *sg, *out_sg, *in_sg;
718 	phys_addr_t start_addr;
719 	uint8_t *old_digest, extra_segs;
720 	int data_len, data_offset;
721 
722 	data_len = sym->auth.data.length;
723 	data_offset = sym->auth.data.offset;
724 
725 	if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
726 	    ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
727 		if ((data_len & 7) || (data_offset & 7)) {
728 			DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
729 			return NULL;
730 		}
731 
732 		data_len = data_len >> 3;
733 		data_offset = data_offset >> 3;
734 	}
735 
736 	if (is_decode(ses))
737 		extra_segs = 3;
738 	else
739 		extra_segs = 2;
740 
741 	if (mbuf->nb_segs > MAX_SG_ENTRIES) {
742 		DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
743 				MAX_SG_ENTRIES);
744 		return NULL;
745 	}
746 	ctx = dpaa_sec_alloc_ctx(ses, mbuf->nb_segs + extra_segs);
747 	if (!ctx)
748 		return NULL;
749 
750 	cf = &ctx->job;
751 	ctx->op = op;
752 	old_digest = ctx->digest;
753 
754 	/* output */
755 	out_sg = &cf->sg[0];
756 	qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
757 	out_sg->length = ses->digest_length;
758 	cpu_to_hw_sg(out_sg);
759 
760 	/* input */
761 	in_sg = &cf->sg[1];
762 	/* need to extend the input to a compound frame */
763 	in_sg->extension = 1;
764 	in_sg->final = 1;
765 	in_sg->length = data_len;
766 	qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
767 
768 	/* 1st seg */
769 	sg = in_sg + 1;
770 
771 	if (ses->iv.length) {
772 		uint8_t *iv_ptr;
773 
774 		iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
775 						   ses->iv.offset);
776 
777 		if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
778 			iv_ptr = conv_to_snow_f9_iv(iv_ptr);
779 			sg->length = 12;
780 		} else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
781 			iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
782 			sg->length = 8;
783 		} else {
784 			sg->length = ses->iv.length;
785 		}
786 		qm_sg_entry_set64(sg, dpaa_mem_vtop(iv_ptr));
787 		in_sg->length += sg->length;
788 		cpu_to_hw_sg(sg);
789 		sg++;
790 	}
791 
792 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
793 	sg->offset = data_offset;
794 
795 	if (data_len <= (mbuf->data_len - data_offset)) {
796 		sg->length = data_len;
797 	} else {
798 		sg->length = mbuf->data_len - data_offset;
799 
800 		/* remaining i/p segs */
801 		while ((data_len = data_len - sg->length) &&
802 		       (mbuf = mbuf->next)) {
803 			cpu_to_hw_sg(sg);
804 			sg++;
805 			qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
806 			if (data_len > mbuf->data_len)
807 				sg->length = mbuf->data_len;
808 			else
809 				sg->length = data_len;
810 		}
811 	}
812 
813 	if (is_decode(ses)) {
814 		/* Digest verification case */
815 		cpu_to_hw_sg(sg);
816 		sg++;
817 		rte_memcpy(old_digest, sym->auth.digest.data,
818 				ses->digest_length);
819 		start_addr = dpaa_mem_vtop(old_digest);
820 		qm_sg_entry_set64(sg, start_addr);
821 		sg->length = ses->digest_length;
822 		in_sg->length += ses->digest_length;
823 	}
824 	sg->final = 1;
825 	cpu_to_hw_sg(sg);
826 	cpu_to_hw_sg(in_sg);
827 
828 	return cf;
829 }
830 
831 /**
832  * packet looks like:
833  *		|<----data_len------->|
834  *    |ip_header|ah_header|icv|payload|
835  *              ^
836  *		|
837  *	   mbuf->pkt.data
838  */
839 static inline struct dpaa_sec_job *
840 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
841 {
842 	struct rte_crypto_sym_op *sym = op->sym;
843 	struct rte_mbuf *mbuf = sym->m_src;
844 	struct dpaa_sec_job *cf;
845 	struct dpaa_sec_op_ctx *ctx;
846 	struct qm_sg_entry *sg, *in_sg;
847 	rte_iova_t start_addr;
848 	uint8_t *old_digest;
849 	int data_len, data_offset;
850 
851 	data_len = sym->auth.data.length;
852 	data_offset = sym->auth.data.offset;
853 
854 	if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
855 	    ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
856 		if ((data_len & 7) || (data_offset & 7)) {
857 			DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
858 			return NULL;
859 		}
860 
861 		data_len = data_len >> 3;
862 		data_offset = data_offset >> 3;
863 	}
864 
865 	ctx = dpaa_sec_alloc_ctx(ses, 4);
866 	if (!ctx)
867 		return NULL;
868 
869 	cf = &ctx->job;
870 	ctx->op = op;
871 	old_digest = ctx->digest;
872 
873 	start_addr = rte_pktmbuf_iova(mbuf);
874 	/* output */
875 	sg = &cf->sg[0];
876 	qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
877 	sg->length = ses->digest_length;
878 	cpu_to_hw_sg(sg);
879 
880 	/* input */
881 	in_sg = &cf->sg[1];
882 	/* need to extend the input to a compound frame */
883 	in_sg->extension = 1;
884 	in_sg->final = 1;
885 	in_sg->length = data_len;
886 	qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
887 	sg = &cf->sg[2];
888 
889 	if (ses->iv.length) {
890 		uint8_t *iv_ptr;
891 
892 		iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
893 						   ses->iv.offset);
894 
895 		if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
896 			iv_ptr = conv_to_snow_f9_iv(iv_ptr);
897 			sg->length = 12;
898 		} else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
899 			iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
900 			sg->length = 8;
901 		} else {
902 			sg->length = ses->iv.length;
903 		}
904 		qm_sg_entry_set64(sg, dpaa_mem_vtop(iv_ptr));
905 		in_sg->length += sg->length;
906 		cpu_to_hw_sg(sg);
907 		sg++;
908 	}
909 
910 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
911 	sg->offset = data_offset;
912 	sg->length = data_len;
913 
914 	if (is_decode(ses)) {
915 		/* Digest verification case */
916 		cpu_to_hw_sg(sg);
917 		/* hash result or digest, save digest first */
918 		rte_memcpy(old_digest, sym->auth.digest.data,
919 				ses->digest_length);
920 		/* let's check digest by hw */
921 		start_addr = dpaa_mem_vtop(old_digest);
922 		sg++;
923 		qm_sg_entry_set64(sg, start_addr);
924 		sg->length = ses->digest_length;
925 		in_sg->length += ses->digest_length;
926 	}
927 	sg->final = 1;
928 	cpu_to_hw_sg(sg);
929 	cpu_to_hw_sg(in_sg);
930 
931 	return cf;
932 }
933 
934 static inline struct dpaa_sec_job *
935 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
936 {
937 	struct rte_crypto_sym_op *sym = op->sym;
938 	struct dpaa_sec_job *cf;
939 	struct dpaa_sec_op_ctx *ctx;
940 	struct qm_sg_entry *sg, *out_sg, *in_sg;
941 	struct rte_mbuf *mbuf;
942 	uint8_t req_segs;
943 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
944 			ses->iv.offset);
945 	int data_len, data_offset;
946 
947 	data_len = sym->cipher.data.length;
948 	data_offset = sym->cipher.data.offset;
949 
950 	if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
951 		ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
952 		if ((data_len & 7) || (data_offset & 7)) {
953 			DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
954 			return NULL;
955 		}
956 
957 		data_len = data_len >> 3;
958 		data_offset = data_offset >> 3;
959 	}
960 
961 	if (sym->m_dst) {
962 		mbuf = sym->m_dst;
963 		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
964 	} else {
965 		mbuf = sym->m_src;
966 		req_segs = mbuf->nb_segs * 2 + 3;
967 	}
968 	if (mbuf->nb_segs > MAX_SG_ENTRIES) {
969 		DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
970 				MAX_SG_ENTRIES);
971 		return NULL;
972 	}
973 
974 	ctx = dpaa_sec_alloc_ctx(ses, req_segs);
975 	if (!ctx)
976 		return NULL;
977 
978 	cf = &ctx->job;
979 	ctx->op = op;
980 
981 	/* output */
982 	out_sg = &cf->sg[0];
983 	out_sg->extension = 1;
984 	out_sg->length = data_len;
985 	qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
986 	cpu_to_hw_sg(out_sg);
987 
988 	/* 1st seg */
989 	sg = &cf->sg[2];
990 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
991 	sg->length = mbuf->data_len - data_offset;
992 	sg->offset = data_offset;
993 
994 	/* Successive segs */
995 	mbuf = mbuf->next;
996 	while (mbuf) {
997 		cpu_to_hw_sg(sg);
998 		sg++;
999 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1000 		sg->length = mbuf->data_len;
1001 		mbuf = mbuf->next;
1002 	}
1003 	sg->final = 1;
1004 	cpu_to_hw_sg(sg);
1005 
1006 	/* input */
1007 	mbuf = sym->m_src;
1008 	in_sg = &cf->sg[1];
1009 	in_sg->extension = 1;
1010 	in_sg->final = 1;
1011 	in_sg->length = data_len + ses->iv.length;
1012 
1013 	sg++;
1014 	qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1015 	cpu_to_hw_sg(in_sg);
1016 
1017 	/* IV */
1018 	qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1019 	sg->length = ses->iv.length;
1020 	cpu_to_hw_sg(sg);
1021 
1022 	/* 1st seg */
1023 	sg++;
1024 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1025 	sg->length = mbuf->data_len - data_offset;
1026 	sg->offset = data_offset;
1027 
1028 	/* Successive segs */
1029 	mbuf = mbuf->next;
1030 	while (mbuf) {
1031 		cpu_to_hw_sg(sg);
1032 		sg++;
1033 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1034 		sg->length = mbuf->data_len;
1035 		mbuf = mbuf->next;
1036 	}
1037 	sg->final = 1;
1038 	cpu_to_hw_sg(sg);
1039 
1040 	return cf;
1041 }
1042 
1043 static inline struct dpaa_sec_job *
1044 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1045 {
1046 	struct rte_crypto_sym_op *sym = op->sym;
1047 	struct dpaa_sec_job *cf;
1048 	struct dpaa_sec_op_ctx *ctx;
1049 	struct qm_sg_entry *sg;
1050 	rte_iova_t src_start_addr, dst_start_addr;
1051 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1052 			ses->iv.offset);
1053 	int data_len, data_offset;
1054 
1055 	data_len = sym->cipher.data.length;
1056 	data_offset = sym->cipher.data.offset;
1057 
1058 	if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1059 		ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1060 		if ((data_len & 7) || (data_offset & 7)) {
1061 			DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
1062 			return NULL;
1063 		}
1064 
1065 		data_len = data_len >> 3;
1066 		data_offset = data_offset >> 3;
1067 	}
1068 
1069 	ctx = dpaa_sec_alloc_ctx(ses, 4);
1070 	if (!ctx)
1071 		return NULL;
1072 
1073 	cf = &ctx->job;
1074 	ctx->op = op;
1075 
1076 	src_start_addr = rte_pktmbuf_iova(sym->m_src);
1077 
1078 	if (sym->m_dst)
1079 		dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1080 	else
1081 		dst_start_addr = src_start_addr;
1082 
1083 	/* output */
1084 	sg = &cf->sg[0];
1085 	qm_sg_entry_set64(sg, dst_start_addr + data_offset);
1086 	sg->length = data_len + ses->iv.length;
1087 	cpu_to_hw_sg(sg);
1088 
1089 	/* input */
1090 	sg = &cf->sg[1];
1091 
1092 	/* need to extend the input to a compound frame */
1093 	sg->extension = 1;
1094 	sg->final = 1;
1095 	sg->length = data_len + ses->iv.length;
1096 	qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
1097 	cpu_to_hw_sg(sg);
1098 
1099 	sg = &cf->sg[2];
1100 	qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1101 	sg->length = ses->iv.length;
1102 	cpu_to_hw_sg(sg);
1103 
1104 	sg++;
1105 	qm_sg_entry_set64(sg, src_start_addr + data_offset);
1106 	sg->length = data_len;
1107 	sg->final = 1;
1108 	cpu_to_hw_sg(sg);
1109 
1110 	return cf;
1111 }
1112 
1113 static inline struct dpaa_sec_job *
1114 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1115 {
1116 	struct rte_crypto_sym_op *sym = op->sym;
1117 	struct dpaa_sec_job *cf;
1118 	struct dpaa_sec_op_ctx *ctx;
1119 	struct qm_sg_entry *sg, *out_sg, *in_sg;
1120 	struct rte_mbuf *mbuf;
1121 	uint8_t req_segs;
1122 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1123 			ses->iv.offset);
1124 
1125 	if (sym->m_dst) {
1126 		mbuf = sym->m_dst;
1127 		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1128 	} else {
1129 		mbuf = sym->m_src;
1130 		req_segs = mbuf->nb_segs * 2 + 4;
1131 	}
1132 
1133 	if (ses->auth_only_len)
1134 		req_segs++;
1135 
1136 	if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1137 		DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1138 				MAX_SG_ENTRIES);
1139 		return NULL;
1140 	}
1141 
1142 	ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1143 	if (!ctx)
1144 		return NULL;
1145 
1146 	cf = &ctx->job;
1147 	ctx->op = op;
1148 
1149 	rte_prefetch0(cf->sg);
1150 
1151 	/* output */
1152 	out_sg = &cf->sg[0];
1153 	out_sg->extension = 1;
1154 	if (is_encode(ses))
1155 		out_sg->length = sym->aead.data.length + ses->digest_length;
1156 	else
1157 		out_sg->length = sym->aead.data.length;
1158 
1159 	/* output sg entries */
1160 	sg = &cf->sg[2];
1161 	qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1162 	cpu_to_hw_sg(out_sg);
1163 
1164 	/* 1st seg */
1165 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1166 	sg->length = mbuf->data_len - sym->aead.data.offset;
1167 	sg->offset = sym->aead.data.offset;
1168 
1169 	/* Successive segs */
1170 	mbuf = mbuf->next;
1171 	while (mbuf) {
1172 		cpu_to_hw_sg(sg);
1173 		sg++;
1174 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1175 		sg->length = mbuf->data_len;
1176 		mbuf = mbuf->next;
1177 	}
1178 	sg->length -= ses->digest_length;
1179 
1180 	if (is_encode(ses)) {
1181 		cpu_to_hw_sg(sg);
1182 		/* set auth output */
1183 		sg++;
1184 		qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1185 		sg->length = ses->digest_length;
1186 	}
1187 	sg->final = 1;
1188 	cpu_to_hw_sg(sg);
1189 
1190 	/* input */
1191 	mbuf = sym->m_src;
1192 	in_sg = &cf->sg[1];
1193 	in_sg->extension = 1;
1194 	in_sg->final = 1;
1195 	if (is_encode(ses))
1196 		in_sg->length = ses->iv.length + sym->aead.data.length
1197 							+ ses->auth_only_len;
1198 	else
1199 		in_sg->length = ses->iv.length + sym->aead.data.length
1200 				+ ses->auth_only_len + ses->digest_length;
1201 
1202 	/* input sg entries */
1203 	sg++;
1204 	qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1205 	cpu_to_hw_sg(in_sg);
1206 
1207 	/* 1st seg IV */
1208 	qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1209 	sg->length = ses->iv.length;
1210 	cpu_to_hw_sg(sg);
1211 
1212 	/* 2nd seg auth only */
1213 	if (ses->auth_only_len) {
1214 		sg++;
1215 		qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
1216 		sg->length = ses->auth_only_len;
1217 		cpu_to_hw_sg(sg);
1218 	}
1219 
1220 	/* 3rd seg */
1221 	sg++;
1222 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1223 	sg->length = mbuf->data_len - sym->aead.data.offset;
1224 	sg->offset = sym->aead.data.offset;
1225 
1226 	/* Successive segs */
1227 	mbuf = mbuf->next;
1228 	while (mbuf) {
1229 		cpu_to_hw_sg(sg);
1230 		sg++;
1231 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1232 		sg->length = mbuf->data_len;
1233 		mbuf = mbuf->next;
1234 	}
1235 
1236 	if (is_decode(ses)) {
1237 		cpu_to_hw_sg(sg);
1238 		sg++;
1239 		memcpy(ctx->digest, sym->aead.digest.data,
1240 			ses->digest_length);
1241 		qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1242 		sg->length = ses->digest_length;
1243 	}
1244 	sg->final = 1;
1245 	cpu_to_hw_sg(sg);
1246 
1247 	return cf;
1248 }
1249 
1250 static inline struct dpaa_sec_job *
1251 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1252 {
1253 	struct rte_crypto_sym_op *sym = op->sym;
1254 	struct dpaa_sec_job *cf;
1255 	struct dpaa_sec_op_ctx *ctx;
1256 	struct qm_sg_entry *sg;
1257 	uint32_t length = 0;
1258 	rte_iova_t src_start_addr, dst_start_addr;
1259 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1260 			ses->iv.offset);
1261 
1262 	src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1263 
1264 	if (sym->m_dst)
1265 		dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1266 	else
1267 		dst_start_addr = src_start_addr;
1268 
1269 	ctx = dpaa_sec_alloc_ctx(ses, 7);
1270 	if (!ctx)
1271 		return NULL;
1272 
1273 	cf = &ctx->job;
1274 	ctx->op = op;
1275 
1276 	/* input */
1277 	rte_prefetch0(cf->sg);
1278 	sg = &cf->sg[2];
1279 	qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1280 	if (is_encode(ses)) {
1281 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1282 		sg->length = ses->iv.length;
1283 		length += sg->length;
1284 		cpu_to_hw_sg(sg);
1285 
1286 		sg++;
1287 		if (ses->auth_only_len) {
1288 			qm_sg_entry_set64(sg,
1289 					  dpaa_mem_vtop(sym->aead.aad.data));
1290 			sg->length = ses->auth_only_len;
1291 			length += sg->length;
1292 			cpu_to_hw_sg(sg);
1293 			sg++;
1294 		}
1295 		qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1296 		sg->length = sym->aead.data.length;
1297 		length += sg->length;
1298 		sg->final = 1;
1299 		cpu_to_hw_sg(sg);
1300 	} else {
1301 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1302 		sg->length = ses->iv.length;
1303 		length += sg->length;
1304 		cpu_to_hw_sg(sg);
1305 
1306 		sg++;
1307 		if (ses->auth_only_len) {
1308 			qm_sg_entry_set64(sg,
1309 					  dpaa_mem_vtop(sym->aead.aad.data));
1310 			sg->length = ses->auth_only_len;
1311 			length += sg->length;
1312 			cpu_to_hw_sg(sg);
1313 			sg++;
1314 		}
1315 		qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1316 		sg->length = sym->aead.data.length;
1317 		length += sg->length;
1318 		cpu_to_hw_sg(sg);
1319 
1320 		memcpy(ctx->digest, sym->aead.digest.data,
1321 		       ses->digest_length);
1322 		sg++;
1323 
1324 		qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1325 		sg->length = ses->digest_length;
1326 		length += sg->length;
1327 		sg->final = 1;
1328 		cpu_to_hw_sg(sg);
1329 	}
1330 	/* input compound frame */
1331 	cf->sg[1].length = length;
1332 	cf->sg[1].extension = 1;
1333 	cf->sg[1].final = 1;
1334 	cpu_to_hw_sg(&cf->sg[1]);
1335 
1336 	/* output */
1337 	sg++;
1338 	qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1339 	qm_sg_entry_set64(sg,
1340 		dst_start_addr + sym->aead.data.offset);
1341 	sg->length = sym->aead.data.length;
1342 	length = sg->length;
1343 	if (is_encode(ses)) {
1344 		cpu_to_hw_sg(sg);
1345 		/* set auth output */
1346 		sg++;
1347 		qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1348 		sg->length = ses->digest_length;
1349 		length += sg->length;
1350 	}
1351 	sg->final = 1;
1352 	cpu_to_hw_sg(sg);
1353 
1354 	/* output compound frame */
1355 	cf->sg[0].length = length;
1356 	cf->sg[0].extension = 1;
1357 	cpu_to_hw_sg(&cf->sg[0]);
1358 
1359 	return cf;
1360 }
1361 
1362 static inline struct dpaa_sec_job *
1363 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1364 {
1365 	struct rte_crypto_sym_op *sym = op->sym;
1366 	struct dpaa_sec_job *cf;
1367 	struct dpaa_sec_op_ctx *ctx;
1368 	struct qm_sg_entry *sg, *out_sg, *in_sg;
1369 	struct rte_mbuf *mbuf;
1370 	uint8_t req_segs;
1371 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1372 			ses->iv.offset);
1373 
1374 	if (sym->m_dst) {
1375 		mbuf = sym->m_dst;
1376 		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1377 	} else {
1378 		mbuf = sym->m_src;
1379 		req_segs = mbuf->nb_segs * 2 + 4;
1380 	}
1381 
1382 	if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1383 		DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1384 				MAX_SG_ENTRIES);
1385 		return NULL;
1386 	}
1387 
1388 	ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1389 	if (!ctx)
1390 		return NULL;
1391 
1392 	cf = &ctx->job;
1393 	ctx->op = op;
1394 
1395 	rte_prefetch0(cf->sg);
1396 
1397 	/* output */
1398 	out_sg = &cf->sg[0];
1399 	out_sg->extension = 1;
1400 	if (is_encode(ses))
1401 		out_sg->length = sym->auth.data.length + ses->digest_length;
1402 	else
1403 		out_sg->length = sym->auth.data.length;
1404 
1405 	/* output sg entries */
1406 	sg = &cf->sg[2];
1407 	qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1408 	cpu_to_hw_sg(out_sg);
1409 
1410 	/* 1st seg */
1411 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1412 	sg->length = mbuf->data_len - sym->auth.data.offset;
1413 	sg->offset = sym->auth.data.offset;
1414 
1415 	/* Successive segs */
1416 	mbuf = mbuf->next;
1417 	while (mbuf) {
1418 		cpu_to_hw_sg(sg);
1419 		sg++;
1420 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1421 		sg->length = mbuf->data_len;
1422 		mbuf = mbuf->next;
1423 	}
1424 	sg->length -= ses->digest_length;
1425 
1426 	if (is_encode(ses)) {
1427 		cpu_to_hw_sg(sg);
1428 		/* set auth output */
1429 		sg++;
1430 		qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1431 		sg->length = ses->digest_length;
1432 	}
1433 	sg->final = 1;
1434 	cpu_to_hw_sg(sg);
1435 
1436 	/* input */
1437 	mbuf = sym->m_src;
1438 	in_sg = &cf->sg[1];
1439 	in_sg->extension = 1;
1440 	in_sg->final = 1;
1441 	if (is_encode(ses))
1442 		in_sg->length = ses->iv.length + sym->auth.data.length;
1443 	else
1444 		in_sg->length = ses->iv.length + sym->auth.data.length
1445 						+ ses->digest_length;
1446 
1447 	/* input sg entries */
1448 	sg++;
1449 	qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1450 	cpu_to_hw_sg(in_sg);
1451 
1452 	/* 1st seg IV */
1453 	qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1454 	sg->length = ses->iv.length;
1455 	cpu_to_hw_sg(sg);
1456 
1457 	/* 2nd seg */
1458 	sg++;
1459 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1460 	sg->length = mbuf->data_len - sym->auth.data.offset;
1461 	sg->offset = sym->auth.data.offset;
1462 
1463 	/* Successive segs */
1464 	mbuf = mbuf->next;
1465 	while (mbuf) {
1466 		cpu_to_hw_sg(sg);
1467 		sg++;
1468 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1469 		sg->length = mbuf->data_len;
1470 		mbuf = mbuf->next;
1471 	}
1472 
1473 	sg->length -= ses->digest_length;
1474 	if (is_decode(ses)) {
1475 		cpu_to_hw_sg(sg);
1476 		sg++;
1477 		memcpy(ctx->digest, sym->auth.digest.data,
1478 			ses->digest_length);
1479 		qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1480 		sg->length = ses->digest_length;
1481 	}
1482 	sg->final = 1;
1483 	cpu_to_hw_sg(sg);
1484 
1485 	return cf;
1486 }
1487 
1488 static inline struct dpaa_sec_job *
1489 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1490 {
1491 	struct rte_crypto_sym_op *sym = op->sym;
1492 	struct dpaa_sec_job *cf;
1493 	struct dpaa_sec_op_ctx *ctx;
1494 	struct qm_sg_entry *sg;
1495 	rte_iova_t src_start_addr, dst_start_addr;
1496 	uint32_t length = 0;
1497 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1498 			ses->iv.offset);
1499 
1500 	src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1501 	if (sym->m_dst)
1502 		dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1503 	else
1504 		dst_start_addr = src_start_addr;
1505 
1506 	ctx = dpaa_sec_alloc_ctx(ses, 7);
1507 	if (!ctx)
1508 		return NULL;
1509 
1510 	cf = &ctx->job;
1511 	ctx->op = op;
1512 
1513 	/* input */
1514 	rte_prefetch0(cf->sg);
1515 	sg = &cf->sg[2];
1516 	qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1517 	if (is_encode(ses)) {
1518 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1519 		sg->length = ses->iv.length;
1520 		length += sg->length;
1521 		cpu_to_hw_sg(sg);
1522 
1523 		sg++;
1524 		qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1525 		sg->length = sym->auth.data.length;
1526 		length += sg->length;
1527 		sg->final = 1;
1528 		cpu_to_hw_sg(sg);
1529 	} else {
1530 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1531 		sg->length = ses->iv.length;
1532 		length += sg->length;
1533 		cpu_to_hw_sg(sg);
1534 
1535 		sg++;
1536 
1537 		qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1538 		sg->length = sym->auth.data.length;
1539 		length += sg->length;
1540 		cpu_to_hw_sg(sg);
1541 
1542 		memcpy(ctx->digest, sym->auth.digest.data,
1543 		       ses->digest_length);
1544 		sg++;
1545 
1546 		qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1547 		sg->length = ses->digest_length;
1548 		length += sg->length;
1549 		sg->final = 1;
1550 		cpu_to_hw_sg(sg);
1551 	}
1552 	/* input compound frame */
1553 	cf->sg[1].length = length;
1554 	cf->sg[1].extension = 1;
1555 	cf->sg[1].final = 1;
1556 	cpu_to_hw_sg(&cf->sg[1]);
1557 
1558 	/* output */
1559 	sg++;
1560 	qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1561 	qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1562 	sg->length = sym->cipher.data.length;
1563 	length = sg->length;
1564 	if (is_encode(ses)) {
1565 		cpu_to_hw_sg(sg);
1566 		/* set auth output */
1567 		sg++;
1568 		qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1569 		sg->length = ses->digest_length;
1570 		length += sg->length;
1571 	}
1572 	sg->final = 1;
1573 	cpu_to_hw_sg(sg);
1574 
1575 	/* output compound frame */
1576 	cf->sg[0].length = length;
1577 	cf->sg[0].extension = 1;
1578 	cpu_to_hw_sg(&cf->sg[0]);
1579 
1580 	return cf;
1581 }
1582 
1583 static inline struct dpaa_sec_job *
1584 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1585 {
1586 	struct rte_crypto_sym_op *sym = op->sym;
1587 	struct dpaa_sec_job *cf;
1588 	struct dpaa_sec_op_ctx *ctx;
1589 	struct qm_sg_entry *sg;
1590 	phys_addr_t src_start_addr, dst_start_addr;
1591 
1592 	ctx = dpaa_sec_alloc_ctx(ses, 2);
1593 	if (!ctx)
1594 		return NULL;
1595 	cf = &ctx->job;
1596 	ctx->op = op;
1597 
1598 	src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
1599 
1600 	if (sym->m_dst)
1601 		dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
1602 	else
1603 		dst_start_addr = src_start_addr;
1604 
1605 	/* input */
1606 	sg = &cf->sg[1];
1607 	qm_sg_entry_set64(sg, src_start_addr);
1608 	sg->length = sym->m_src->pkt_len;
1609 	sg->final = 1;
1610 	cpu_to_hw_sg(sg);
1611 
1612 	sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1613 	/* output */
1614 	sg = &cf->sg[0];
1615 	qm_sg_entry_set64(sg, dst_start_addr);
1616 	sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1617 	cpu_to_hw_sg(sg);
1618 
1619 	return cf;
1620 }
1621 
1622 static inline struct dpaa_sec_job *
1623 build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1624 {
1625 	struct rte_crypto_sym_op *sym = op->sym;
1626 	struct dpaa_sec_job *cf;
1627 	struct dpaa_sec_op_ctx *ctx;
1628 	struct qm_sg_entry *sg, *out_sg, *in_sg;
1629 	struct rte_mbuf *mbuf;
1630 	uint8_t req_segs;
1631 	uint32_t in_len = 0, out_len = 0;
1632 
1633 	if (sym->m_dst)
1634 		mbuf = sym->m_dst;
1635 	else
1636 		mbuf = sym->m_src;
1637 
1638 	req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
1639 	if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1640 		DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
1641 				MAX_SG_ENTRIES);
1642 		return NULL;
1643 	}
1644 
1645 	ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1646 	if (!ctx)
1647 		return NULL;
1648 	cf = &ctx->job;
1649 	ctx->op = op;
1650 	/* output */
1651 	out_sg = &cf->sg[0];
1652 	out_sg->extension = 1;
1653 	qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
1654 
1655 	/* 1st seg */
1656 	sg = &cf->sg[2];
1657 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1658 	sg->offset = 0;
1659 
1660 	/* Successive segs */
1661 	while (mbuf->next) {
1662 		sg->length = mbuf->data_len;
1663 		out_len += sg->length;
1664 		mbuf = mbuf->next;
1665 		cpu_to_hw_sg(sg);
1666 		sg++;
1667 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1668 		sg->offset = 0;
1669 	}
1670 	sg->length = mbuf->buf_len - mbuf->data_off;
1671 	out_len += sg->length;
1672 	sg->final = 1;
1673 	cpu_to_hw_sg(sg);
1674 
1675 	out_sg->length = out_len;
1676 	cpu_to_hw_sg(out_sg);
1677 
1678 	/* input */
1679 	mbuf = sym->m_src;
1680 	in_sg = &cf->sg[1];
1681 	in_sg->extension = 1;
1682 	in_sg->final = 1;
1683 	in_len = mbuf->data_len;
1684 
1685 	sg++;
1686 	qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1687 
1688 	/* 1st seg */
1689 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1690 	sg->length = mbuf->data_len;
1691 	sg->offset = 0;
1692 
1693 	/* Successive segs */
1694 	mbuf = mbuf->next;
1695 	while (mbuf) {
1696 		cpu_to_hw_sg(sg);
1697 		sg++;
1698 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1699 		sg->length = mbuf->data_len;
1700 		sg->offset = 0;
1701 		in_len += sg->length;
1702 		mbuf = mbuf->next;
1703 	}
1704 	sg->final = 1;
1705 	cpu_to_hw_sg(sg);
1706 
1707 	in_sg->length = in_len;
1708 	cpu_to_hw_sg(in_sg);
1709 
1710 	sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1711 
1712 	return cf;
1713 }
1714 
1715 static uint16_t
1716 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1717 		       uint16_t nb_ops)
1718 {
1719 	/* Function to transmit the frames to given device and queuepair */
1720 	uint32_t loop;
1721 	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1722 	uint16_t num_tx = 0;
1723 	struct qm_fd fds[DPAA_SEC_BURST], *fd;
1724 	uint32_t frames_to_send;
1725 	struct rte_crypto_op *op;
1726 	struct dpaa_sec_job *cf;
1727 	dpaa_sec_session *ses;
1728 	uint16_t auth_hdr_len, auth_tail_len;
1729 	uint32_t index, flags[DPAA_SEC_BURST] = {0};
1730 	struct qman_fq *inq[DPAA_SEC_BURST];
1731 
1732 	while (nb_ops) {
1733 		frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1734 				DPAA_SEC_BURST : nb_ops;
1735 		for (loop = 0; loop < frames_to_send; loop++) {
1736 			op = *(ops++);
1737 			if (op->sym->m_src->seqn != 0) {
1738 				index = op->sym->m_src->seqn - 1;
1739 				if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
1740 					/* QM_EQCR_DCA_IDXMASK = 0x0f */
1741 					flags[loop] = ((index & 0x0f) << 8);
1742 					flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
1743 					DPAA_PER_LCORE_DQRR_SIZE--;
1744 					DPAA_PER_LCORE_DQRR_HELD &=
1745 								~(1 << index);
1746 				}
1747 			}
1748 
1749 			switch (op->sess_type) {
1750 			case RTE_CRYPTO_OP_WITH_SESSION:
1751 				ses = (dpaa_sec_session *)
1752 					get_sym_session_private_data(
1753 							op->sym->session,
1754 							cryptodev_driver_id);
1755 				break;
1756 			case RTE_CRYPTO_OP_SECURITY_SESSION:
1757 				ses = (dpaa_sec_session *)
1758 					get_sec_session_private_data(
1759 							op->sym->sec_session);
1760 				break;
1761 			default:
1762 				DPAA_SEC_DP_ERR(
1763 					"sessionless crypto op not supported");
1764 				frames_to_send = loop;
1765 				nb_ops = loop;
1766 				goto send_pkts;
1767 			}
1768 			if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
1769 				if (dpaa_sec_attach_sess_q(qp, ses)) {
1770 					frames_to_send = loop;
1771 					nb_ops = loop;
1772 					goto send_pkts;
1773 				}
1774 			} else if (unlikely(ses->qp[rte_lcore_id() %
1775 						MAX_DPAA_CORES] != qp)) {
1776 				DPAA_SEC_DP_ERR("Old:sess->qp = %p"
1777 					" New qp = %p\n",
1778 					ses->qp[rte_lcore_id() %
1779 					MAX_DPAA_CORES], qp);
1780 				frames_to_send = loop;
1781 				nb_ops = loop;
1782 				goto send_pkts;
1783 			}
1784 
1785 			auth_hdr_len = op->sym->auth.data.length -
1786 						op->sym->cipher.data.length;
1787 			auth_tail_len = 0;
1788 
1789 			if (rte_pktmbuf_is_contiguous(op->sym->m_src) &&
1790 				  ((op->sym->m_dst == NULL) ||
1791 				   rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1792 				switch (ses->ctxt) {
1793 				case DPAA_SEC_PDCP:
1794 				case DPAA_SEC_IPSEC:
1795 					cf = build_proto(op, ses);
1796 					break;
1797 				case DPAA_SEC_AUTH:
1798 					cf = build_auth_only(op, ses);
1799 					break;
1800 				case DPAA_SEC_CIPHER:
1801 					cf = build_cipher_only(op, ses);
1802 					break;
1803 				case DPAA_SEC_AEAD:
1804 					cf = build_cipher_auth_gcm(op, ses);
1805 					auth_hdr_len = ses->auth_only_len;
1806 					break;
1807 				case DPAA_SEC_CIPHER_HASH:
1808 					auth_hdr_len =
1809 						op->sym->cipher.data.offset
1810 						- op->sym->auth.data.offset;
1811 					auth_tail_len =
1812 						op->sym->auth.data.length
1813 						- op->sym->cipher.data.length
1814 						- auth_hdr_len;
1815 					cf = build_cipher_auth(op, ses);
1816 					break;
1817 				default:
1818 					DPAA_SEC_DP_ERR("not supported ops");
1819 					frames_to_send = loop;
1820 					nb_ops = loop;
1821 					goto send_pkts;
1822 				}
1823 			} else {
1824 				switch (ses->ctxt) {
1825 				case DPAA_SEC_PDCP:
1826 				case DPAA_SEC_IPSEC:
1827 					cf = build_proto_sg(op, ses);
1828 					break;
1829 				case DPAA_SEC_AUTH:
1830 					cf = build_auth_only_sg(op, ses);
1831 					break;
1832 				case DPAA_SEC_CIPHER:
1833 					cf = build_cipher_only_sg(op, ses);
1834 					break;
1835 				case DPAA_SEC_AEAD:
1836 					cf = build_cipher_auth_gcm_sg(op, ses);
1837 					auth_hdr_len = ses->auth_only_len;
1838 					break;
1839 				case DPAA_SEC_CIPHER_HASH:
1840 					auth_hdr_len =
1841 						op->sym->cipher.data.offset
1842 						- op->sym->auth.data.offset;
1843 					auth_tail_len =
1844 						op->sym->auth.data.length
1845 						- op->sym->cipher.data.length
1846 						- auth_hdr_len;
1847 					cf = build_cipher_auth_sg(op, ses);
1848 					break;
1849 				default:
1850 					DPAA_SEC_DP_ERR("not supported ops");
1851 					frames_to_send = loop;
1852 					nb_ops = loop;
1853 					goto send_pkts;
1854 				}
1855 			}
1856 			if (unlikely(!cf)) {
1857 				frames_to_send = loop;
1858 				nb_ops = loop;
1859 				goto send_pkts;
1860 			}
1861 
1862 			fd = &fds[loop];
1863 			inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
1864 			fd->opaque_addr = 0;
1865 			fd->cmd = 0;
1866 			qm_fd_addr_set64(fd, dpaa_mem_vtop(cf->sg));
1867 			fd->_format1 = qm_fd_compound;
1868 			fd->length29 = 2 * sizeof(struct qm_sg_entry);
1869 
1870 			/* Auth_only_len is set as 0 in descriptor and it is
1871 			 * overwritten here in the fd.cmd which will update
1872 			 * the DPOVRD reg.
1873 			 */
1874 			if (auth_hdr_len || auth_tail_len) {
1875 				fd->cmd = 0x80000000;
1876 				fd->cmd |=
1877 					((auth_tail_len << 16) | auth_hdr_len);
1878 			}
1879 
1880 			/* In case of PDCP, per packet HFN is stored in
1881 			 * mbuf priv after sym_op.
1882 			 */
1883 			if ((ses->ctxt == DPAA_SEC_PDCP) && ses->pdcp.hfn_ovd) {
1884 				fd->cmd = 0x80000000 |
1885 					*((uint32_t *)((uint8_t *)op +
1886 					ses->pdcp.hfn_ovd_offset));
1887 				DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n",
1888 					*((uint32_t *)((uint8_t *)op +
1889 					ses->pdcp.hfn_ovd_offset)),
1890 					ses->pdcp.hfn_ovd);
1891 			}
1892 
1893 		}
1894 send_pkts:
1895 		loop = 0;
1896 		while (loop < frames_to_send) {
1897 			loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1898 					&flags[loop], frames_to_send - loop);
1899 		}
1900 		nb_ops -= frames_to_send;
1901 		num_tx += frames_to_send;
1902 	}
1903 
1904 	dpaa_qp->tx_pkts += num_tx;
1905 	dpaa_qp->tx_errs += nb_ops - num_tx;
1906 
1907 	return num_tx;
1908 }
1909 
1910 static uint16_t
1911 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1912 		       uint16_t nb_ops)
1913 {
1914 	uint16_t num_rx;
1915 	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1916 
1917 	num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1918 
1919 	dpaa_qp->rx_pkts += num_rx;
1920 	dpaa_qp->rx_errs += nb_ops - num_rx;
1921 
1922 	DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1923 
1924 	return num_rx;
1925 }
1926 
1927 /** Release queue pair */
1928 static int
1929 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1930 			    uint16_t qp_id)
1931 {
1932 	struct dpaa_sec_dev_private *internals;
1933 	struct dpaa_sec_qp *qp = NULL;
1934 
1935 	PMD_INIT_FUNC_TRACE();
1936 
1937 	DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1938 
1939 	internals = dev->data->dev_private;
1940 	if (qp_id >= internals->max_nb_queue_pairs) {
1941 		DPAA_SEC_ERR("Max supported qpid %d",
1942 			     internals->max_nb_queue_pairs);
1943 		return -EINVAL;
1944 	}
1945 
1946 	qp = &internals->qps[qp_id];
1947 	rte_mempool_free(qp->ctx_pool);
1948 	qp->internals = NULL;
1949 	dev->data->queue_pairs[qp_id] = NULL;
1950 
1951 	return 0;
1952 }
1953 
1954 /** Setup a queue pair */
1955 static int
1956 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1957 		__rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1958 		__rte_unused int socket_id)
1959 {
1960 	struct dpaa_sec_dev_private *internals;
1961 	struct dpaa_sec_qp *qp = NULL;
1962 	char str[20];
1963 
1964 	DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1965 
1966 	internals = dev->data->dev_private;
1967 	if (qp_id >= internals->max_nb_queue_pairs) {
1968 		DPAA_SEC_ERR("Max supported qpid %d",
1969 			     internals->max_nb_queue_pairs);
1970 		return -EINVAL;
1971 	}
1972 
1973 	qp = &internals->qps[qp_id];
1974 	qp->internals = internals;
1975 	snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d",
1976 			dev->data->dev_id, qp_id);
1977 	if (!qp->ctx_pool) {
1978 		qp->ctx_pool = rte_mempool_create((const char *)str,
1979 							CTX_POOL_NUM_BUFS,
1980 							CTX_POOL_BUF_SIZE,
1981 							CTX_POOL_CACHE_SIZE, 0,
1982 							NULL, NULL, NULL, NULL,
1983 							SOCKET_ID_ANY, 0);
1984 		if (!qp->ctx_pool) {
1985 			DPAA_SEC_ERR("%s create failed\n", str);
1986 			return -ENOMEM;
1987 		}
1988 	} else
1989 		DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d",
1990 				dev->data->dev_id, qp_id);
1991 	dev->data->queue_pairs[qp_id] = qp;
1992 
1993 	return 0;
1994 }
1995 
1996 /** Return the number of allocated queue pairs */
1997 static uint32_t
1998 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
1999 {
2000 	PMD_INIT_FUNC_TRACE();
2001 
2002 	return dev->data->nb_queue_pairs;
2003 }
2004 
2005 /** Returns the size of session structure */
2006 static unsigned int
2007 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
2008 {
2009 	PMD_INIT_FUNC_TRACE();
2010 
2011 	return sizeof(dpaa_sec_session);
2012 }
2013 
2014 static int
2015 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
2016 		     struct rte_crypto_sym_xform *xform,
2017 		     dpaa_sec_session *session)
2018 {
2019 	session->cipher_alg = xform->cipher.algo;
2020 	session->iv.length = xform->cipher.iv.length;
2021 	session->iv.offset = xform->cipher.iv.offset;
2022 	session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
2023 					       RTE_CACHE_LINE_SIZE);
2024 	if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
2025 		DPAA_SEC_ERR("No Memory for cipher key");
2026 		return -ENOMEM;
2027 	}
2028 	session->cipher_key.length = xform->cipher.key.length;
2029 
2030 	memcpy(session->cipher_key.data, xform->cipher.key.data,
2031 	       xform->cipher.key.length);
2032 	switch (xform->cipher.algo) {
2033 	case RTE_CRYPTO_CIPHER_AES_CBC:
2034 		session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2035 		session->cipher_key.algmode = OP_ALG_AAI_CBC;
2036 		break;
2037 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2038 		session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2039 		session->cipher_key.algmode = OP_ALG_AAI_CBC;
2040 		break;
2041 	case RTE_CRYPTO_CIPHER_AES_CTR:
2042 		session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2043 		session->cipher_key.algmode = OP_ALG_AAI_CTR;
2044 		break;
2045 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2046 		session->cipher_key.alg = OP_ALG_ALGSEL_SNOW_F8;
2047 		break;
2048 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2049 		session->cipher_key.alg = OP_ALG_ALGSEL_ZUCE;
2050 		break;
2051 	default:
2052 		DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2053 			      xform->cipher.algo);
2054 		rte_free(session->cipher_key.data);
2055 		return -1;
2056 	}
2057 	session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2058 			DIR_ENC : DIR_DEC;
2059 
2060 	return 0;
2061 }
2062 
2063 static int
2064 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
2065 		   struct rte_crypto_sym_xform *xform,
2066 		   dpaa_sec_session *session)
2067 {
2068 	session->auth_alg = xform->auth.algo;
2069 	session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
2070 					     RTE_CACHE_LINE_SIZE);
2071 	if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
2072 		DPAA_SEC_ERR("No Memory for auth key");
2073 		return -ENOMEM;
2074 	}
2075 	session->auth_key.length = xform->auth.key.length;
2076 	session->digest_length = xform->auth.digest_length;
2077 	if (session->cipher_alg == RTE_CRYPTO_CIPHER_NULL) {
2078 		session->iv.offset = xform->auth.iv.offset;
2079 		session->iv.length = xform->auth.iv.length;
2080 	}
2081 
2082 	memcpy(session->auth_key.data, xform->auth.key.data,
2083 	       xform->auth.key.length);
2084 
2085 	switch (xform->auth.algo) {
2086 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
2087 		session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2088 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2089 		break;
2090 	case RTE_CRYPTO_AUTH_MD5_HMAC:
2091 		session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2092 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2093 		break;
2094 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2095 		session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2096 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2097 		break;
2098 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2099 		session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2100 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2101 		break;
2102 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2103 		session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2104 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2105 		break;
2106 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2107 		session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2108 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2109 		break;
2110 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2111 		session->auth_key.alg = OP_ALG_ALGSEL_SNOW_F9;
2112 		session->auth_key.algmode = OP_ALG_AAI_F9;
2113 		break;
2114 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
2115 		session->auth_key.alg = OP_ALG_ALGSEL_ZUCA;
2116 		session->auth_key.algmode = OP_ALG_AAI_F9;
2117 		break;
2118 	default:
2119 		DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2120 			      xform->auth.algo);
2121 		rte_free(session->auth_key.data);
2122 		return -1;
2123 	}
2124 
2125 	session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2126 			DIR_ENC : DIR_DEC;
2127 
2128 	return 0;
2129 }
2130 
2131 static int
2132 dpaa_sec_chain_init(struct rte_cryptodev *dev __rte_unused,
2133 		   struct rte_crypto_sym_xform *xform,
2134 		   dpaa_sec_session *session)
2135 {
2136 
2137 	struct rte_crypto_cipher_xform *cipher_xform;
2138 	struct rte_crypto_auth_xform *auth_xform;
2139 
2140 	if (session->auth_cipher_text) {
2141 		cipher_xform = &xform->cipher;
2142 		auth_xform = &xform->next->auth;
2143 	} else {
2144 		cipher_xform = &xform->next->cipher;
2145 		auth_xform = &xform->auth;
2146 	}
2147 
2148 	/* Set IV parameters */
2149 	session->iv.offset = cipher_xform->iv.offset;
2150 	session->iv.length = cipher_xform->iv.length;
2151 
2152 	session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2153 					       RTE_CACHE_LINE_SIZE);
2154 	if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2155 		DPAA_SEC_ERR("No Memory for cipher key");
2156 		return -1;
2157 	}
2158 	session->cipher_key.length = cipher_xform->key.length;
2159 	session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2160 					     RTE_CACHE_LINE_SIZE);
2161 	if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2162 		DPAA_SEC_ERR("No Memory for auth key");
2163 		rte_free(session->cipher_key.data);
2164 		return -ENOMEM;
2165 	}
2166 	session->auth_key.length = auth_xform->key.length;
2167 	memcpy(session->cipher_key.data, cipher_xform->key.data,
2168 	       cipher_xform->key.length);
2169 	memcpy(session->auth_key.data, auth_xform->key.data,
2170 	       auth_xform->key.length);
2171 
2172 	session->digest_length = auth_xform->digest_length;
2173 	session->auth_alg = auth_xform->algo;
2174 
2175 	switch (auth_xform->algo) {
2176 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
2177 		session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2178 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2179 		break;
2180 	case RTE_CRYPTO_AUTH_MD5_HMAC:
2181 		session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2182 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2183 		break;
2184 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2185 		session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2186 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2187 		break;
2188 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2189 		session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2190 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2191 		break;
2192 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2193 		session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2194 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2195 		break;
2196 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2197 		session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2198 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2199 		break;
2200 	default:
2201 		DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2202 			      auth_xform->algo);
2203 		goto error_out;
2204 	}
2205 
2206 	session->cipher_alg = cipher_xform->algo;
2207 
2208 	switch (cipher_xform->algo) {
2209 	case RTE_CRYPTO_CIPHER_AES_CBC:
2210 		session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2211 		session->cipher_key.algmode = OP_ALG_AAI_CBC;
2212 		break;
2213 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2214 		session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2215 		session->cipher_key.algmode = OP_ALG_AAI_CBC;
2216 		break;
2217 	case RTE_CRYPTO_CIPHER_AES_CTR:
2218 		session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2219 		session->cipher_key.algmode = OP_ALG_AAI_CTR;
2220 		break;
2221 	default:
2222 		DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2223 			      cipher_xform->algo);
2224 		goto error_out;
2225 	}
2226 	session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2227 				DIR_ENC : DIR_DEC;
2228 	return 0;
2229 
2230 error_out:
2231 	rte_free(session->cipher_key.data);
2232 	rte_free(session->auth_key.data);
2233 	return -1;
2234 }
2235 
2236 static int
2237 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
2238 		   struct rte_crypto_sym_xform *xform,
2239 		   dpaa_sec_session *session)
2240 {
2241 	session->aead_alg = xform->aead.algo;
2242 	session->ctxt = DPAA_SEC_AEAD;
2243 	session->iv.length = xform->aead.iv.length;
2244 	session->iv.offset = xform->aead.iv.offset;
2245 	session->auth_only_len = xform->aead.aad_length;
2246 	session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
2247 					     RTE_CACHE_LINE_SIZE);
2248 	if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
2249 		DPAA_SEC_ERR("No Memory for aead key\n");
2250 		return -ENOMEM;
2251 	}
2252 	session->aead_key.length = xform->aead.key.length;
2253 	session->digest_length = xform->aead.digest_length;
2254 
2255 	memcpy(session->aead_key.data, xform->aead.key.data,
2256 	       xform->aead.key.length);
2257 
2258 	switch (session->aead_alg) {
2259 	case RTE_CRYPTO_AEAD_AES_GCM:
2260 		session->aead_key.alg = OP_ALG_ALGSEL_AES;
2261 		session->aead_key.algmode = OP_ALG_AAI_GCM;
2262 		break;
2263 	default:
2264 		DPAA_SEC_ERR("unsupported AEAD alg %d", session->aead_alg);
2265 		rte_free(session->aead_key.data);
2266 		return -ENOMEM;
2267 	}
2268 
2269 	session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2270 			DIR_ENC : DIR_DEC;
2271 
2272 	return 0;
2273 }
2274 
2275 static struct qman_fq *
2276 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
2277 {
2278 	unsigned int i;
2279 
2280 	for (i = 0; i < qi->max_nb_sessions * MAX_DPAA_CORES; i++) {
2281 		if (qi->inq_attach[i] == 0) {
2282 			qi->inq_attach[i] = 1;
2283 			return &qi->inq[i];
2284 		}
2285 	}
2286 	DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions);
2287 
2288 	return NULL;
2289 }
2290 
2291 static int
2292 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
2293 {
2294 	unsigned int i;
2295 
2296 	for (i = 0; i < qi->max_nb_sessions; i++) {
2297 		if (&qi->inq[i] == fq) {
2298 			qman_retire_fq(fq, NULL);
2299 			qman_oos_fq(fq);
2300 			qi->inq_attach[i] = 0;
2301 			return 0;
2302 		}
2303 	}
2304 	return -1;
2305 }
2306 
2307 static int
2308 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
2309 {
2310 	int ret;
2311 
2312 	sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp;
2313 	ret = dpaa_sec_prep_cdb(sess);
2314 	if (ret) {
2315 		DPAA_SEC_ERR("Unable to prepare sec cdb");
2316 		return -1;
2317 	}
2318 	if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
2319 		ret = rte_dpaa_portal_init((void *)0);
2320 		if (ret) {
2321 			DPAA_SEC_ERR("Failure in affining portal");
2322 			return ret;
2323 		}
2324 	}
2325 	ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
2326 			       dpaa_mem_vtop(&sess->cdb),
2327 			       qman_fq_fqid(&qp->outq));
2328 	if (ret)
2329 		DPAA_SEC_ERR("Unable to init sec queue");
2330 
2331 	return ret;
2332 }
2333 
2334 static int
2335 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
2336 			    struct rte_crypto_sym_xform *xform,	void *sess)
2337 {
2338 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2339 	dpaa_sec_session *session = sess;
2340 	uint32_t i;
2341 
2342 	PMD_INIT_FUNC_TRACE();
2343 
2344 	if (unlikely(sess == NULL)) {
2345 		DPAA_SEC_ERR("invalid session struct");
2346 		return -EINVAL;
2347 	}
2348 	memset(session, 0, sizeof(dpaa_sec_session));
2349 
2350 	/* Default IV length = 0 */
2351 	session->iv.length = 0;
2352 
2353 	/* Cipher Only */
2354 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2355 		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2356 		session->ctxt = DPAA_SEC_CIPHER;
2357 		dpaa_sec_cipher_init(dev, xform, session);
2358 
2359 	/* Authentication Only */
2360 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2361 		   xform->next == NULL) {
2362 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2363 		session->ctxt = DPAA_SEC_AUTH;
2364 		dpaa_sec_auth_init(dev, xform, session);
2365 
2366 	/* Cipher then Authenticate */
2367 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2368 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2369 		if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2370 			session->ctxt = DPAA_SEC_CIPHER_HASH;
2371 			session->auth_cipher_text = 1;
2372 			dpaa_sec_chain_init(dev, xform, session);
2373 		} else {
2374 			DPAA_SEC_ERR("Not supported: Auth then Cipher");
2375 			return -EINVAL;
2376 		}
2377 	/* Authenticate then Cipher */
2378 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2379 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2380 		if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2381 			session->ctxt = DPAA_SEC_CIPHER_HASH;
2382 			session->auth_cipher_text = 0;
2383 			dpaa_sec_chain_init(dev, xform, session);
2384 		} else {
2385 			DPAA_SEC_ERR("Not supported: Auth then Cipher");
2386 			return -EINVAL;
2387 		}
2388 
2389 	/* AEAD operation for AES-GCM kind of Algorithms */
2390 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2391 		   xform->next == NULL) {
2392 		dpaa_sec_aead_init(dev, xform, session);
2393 
2394 	} else {
2395 		DPAA_SEC_ERR("Invalid crypto type");
2396 		return -EINVAL;
2397 	}
2398 	rte_spinlock_lock(&internals->lock);
2399 	for (i = 0; i < MAX_DPAA_CORES; i++) {
2400 		session->inq[i] = dpaa_sec_attach_rxq(internals);
2401 		if (session->inq[i] == NULL) {
2402 			DPAA_SEC_ERR("unable to attach sec queue");
2403 			rte_spinlock_unlock(&internals->lock);
2404 			goto err1;
2405 		}
2406 	}
2407 	rte_spinlock_unlock(&internals->lock);
2408 
2409 	return 0;
2410 
2411 err1:
2412 	rte_free(session->cipher_key.data);
2413 	rte_free(session->auth_key.data);
2414 	memset(session, 0, sizeof(dpaa_sec_session));
2415 
2416 	return -EINVAL;
2417 }
2418 
2419 static int
2420 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
2421 		struct rte_crypto_sym_xform *xform,
2422 		struct rte_cryptodev_sym_session *sess,
2423 		struct rte_mempool *mempool)
2424 {
2425 	void *sess_private_data;
2426 	int ret;
2427 
2428 	PMD_INIT_FUNC_TRACE();
2429 
2430 	if (rte_mempool_get(mempool, &sess_private_data)) {
2431 		DPAA_SEC_ERR("Couldn't get object from session mempool");
2432 		return -ENOMEM;
2433 	}
2434 
2435 	ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
2436 	if (ret != 0) {
2437 		DPAA_SEC_ERR("failed to configure session parameters");
2438 
2439 		/* Return session to mempool */
2440 		rte_mempool_put(mempool, sess_private_data);
2441 		return ret;
2442 	}
2443 
2444 	set_sym_session_private_data(sess, dev->driver_id,
2445 			sess_private_data);
2446 
2447 
2448 	return 0;
2449 }
2450 
2451 static inline void
2452 free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s)
2453 {
2454 	struct dpaa_sec_dev_private *qi = dev->data->dev_private;
2455 	struct rte_mempool *sess_mp = rte_mempool_from_obj((void *)s);
2456 	uint8_t i;
2457 
2458 	for (i = 0; i < MAX_DPAA_CORES; i++) {
2459 		if (s->inq[i])
2460 			dpaa_sec_detach_rxq(qi, s->inq[i]);
2461 		s->inq[i] = NULL;
2462 		s->qp[i] = NULL;
2463 	}
2464 	rte_free(s->cipher_key.data);
2465 	rte_free(s->auth_key.data);
2466 	memset(s, 0, sizeof(dpaa_sec_session));
2467 	rte_mempool_put(sess_mp, (void *)s);
2468 }
2469 
2470 /** Clear the memory of session so it doesn't leave key material behind */
2471 static void
2472 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
2473 		struct rte_cryptodev_sym_session *sess)
2474 {
2475 	PMD_INIT_FUNC_TRACE();
2476 	uint8_t index = dev->driver_id;
2477 	void *sess_priv = get_sym_session_private_data(sess, index);
2478 	dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2479 
2480 	if (sess_priv) {
2481 		free_session_memory(dev, s);
2482 		set_sym_session_private_data(sess, index, NULL);
2483 	}
2484 }
2485 
2486 static int
2487 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
2488 			   struct rte_security_session_conf *conf,
2489 			   void *sess)
2490 {
2491 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2492 	struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2493 	struct rte_crypto_auth_xform *auth_xform = NULL;
2494 	struct rte_crypto_cipher_xform *cipher_xform = NULL;
2495 	dpaa_sec_session *session = (dpaa_sec_session *)sess;
2496 	uint32_t i;
2497 
2498 	PMD_INIT_FUNC_TRACE();
2499 
2500 	memset(session, 0, sizeof(dpaa_sec_session));
2501 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2502 		cipher_xform = &conf->crypto_xform->cipher;
2503 		if (conf->crypto_xform->next)
2504 			auth_xform = &conf->crypto_xform->next->auth;
2505 	} else {
2506 		auth_xform = &conf->crypto_xform->auth;
2507 		if (conf->crypto_xform->next)
2508 			cipher_xform = &conf->crypto_xform->next->cipher;
2509 	}
2510 	session->proto_alg = conf->protocol;
2511 	session->ctxt = DPAA_SEC_IPSEC;
2512 
2513 	if (cipher_xform && cipher_xform->algo != RTE_CRYPTO_CIPHER_NULL) {
2514 		session->cipher_key.data = rte_zmalloc(NULL,
2515 						       cipher_xform->key.length,
2516 						       RTE_CACHE_LINE_SIZE);
2517 		if (session->cipher_key.data == NULL &&
2518 				cipher_xform->key.length > 0) {
2519 			DPAA_SEC_ERR("No Memory for cipher key");
2520 			return -ENOMEM;
2521 		}
2522 		memcpy(session->cipher_key.data, cipher_xform->key.data,
2523 				cipher_xform->key.length);
2524 		session->cipher_key.length = cipher_xform->key.length;
2525 
2526 		switch (cipher_xform->algo) {
2527 		case RTE_CRYPTO_CIPHER_NULL:
2528 			session->cipher_key.alg = OP_PCL_IPSEC_NULL;
2529 			break;
2530 		case RTE_CRYPTO_CIPHER_AES_CBC:
2531 			session->cipher_key.alg = OP_PCL_IPSEC_AES_CBC;
2532 			session->cipher_key.algmode = OP_ALG_AAI_CBC;
2533 			break;
2534 		case RTE_CRYPTO_CIPHER_3DES_CBC:
2535 			session->cipher_key.alg = OP_PCL_IPSEC_3DES;
2536 			session->cipher_key.algmode = OP_ALG_AAI_CBC;
2537 			break;
2538 		case RTE_CRYPTO_CIPHER_AES_CTR:
2539 			session->cipher_key.alg = OP_PCL_IPSEC_AES_CTR;
2540 			session->cipher_key.algmode = OP_ALG_AAI_CTR;
2541 			break;
2542 		default:
2543 			DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2544 				cipher_xform->algo);
2545 			goto out;
2546 		}
2547 		session->cipher_alg = cipher_xform->algo;
2548 	} else {
2549 		session->cipher_key.data = NULL;
2550 		session->cipher_key.length = 0;
2551 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2552 	}
2553 
2554 	if (auth_xform && auth_xform->algo != RTE_CRYPTO_AUTH_NULL) {
2555 		session->auth_key.data = rte_zmalloc(NULL,
2556 						auth_xform->key.length,
2557 						RTE_CACHE_LINE_SIZE);
2558 		if (session->auth_key.data == NULL &&
2559 				auth_xform->key.length > 0) {
2560 			DPAA_SEC_ERR("No Memory for auth key");
2561 			rte_free(session->cipher_key.data);
2562 			return -ENOMEM;
2563 		}
2564 		memcpy(session->auth_key.data, auth_xform->key.data,
2565 				auth_xform->key.length);
2566 		session->auth_key.length = auth_xform->key.length;
2567 
2568 		switch (auth_xform->algo) {
2569 		case RTE_CRYPTO_AUTH_NULL:
2570 			session->auth_key.alg = OP_PCL_IPSEC_HMAC_NULL;
2571 			session->digest_length = 0;
2572 			break;
2573 		case RTE_CRYPTO_AUTH_MD5_HMAC:
2574 			session->auth_key.alg = OP_PCL_IPSEC_HMAC_MD5_96;
2575 			session->auth_key.algmode = OP_ALG_AAI_HMAC;
2576 			break;
2577 		case RTE_CRYPTO_AUTH_SHA1_HMAC:
2578 			session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA1_96;
2579 			session->auth_key.algmode = OP_ALG_AAI_HMAC;
2580 			break;
2581 		case RTE_CRYPTO_AUTH_SHA224_HMAC:
2582 			session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA1_160;
2583 			session->auth_key.algmode = OP_ALG_AAI_HMAC;
2584 			break;
2585 		case RTE_CRYPTO_AUTH_SHA256_HMAC:
2586 			session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2587 			session->auth_key.algmode = OP_ALG_AAI_HMAC;
2588 			break;
2589 		case RTE_CRYPTO_AUTH_SHA384_HMAC:
2590 			session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2591 			session->auth_key.algmode = OP_ALG_AAI_HMAC;
2592 			break;
2593 		case RTE_CRYPTO_AUTH_SHA512_HMAC:
2594 			session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2595 			session->auth_key.algmode = OP_ALG_AAI_HMAC;
2596 			break;
2597 		default:
2598 			DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2599 				auth_xform->algo);
2600 			goto out;
2601 		}
2602 		session->auth_alg = auth_xform->algo;
2603 	} else {
2604 		session->auth_key.data = NULL;
2605 		session->auth_key.length = 0;
2606 		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2607 	}
2608 
2609 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2610 		if (ipsec_xform->tunnel.type ==
2611 				RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2612 			memset(&session->encap_pdb, 0,
2613 				sizeof(struct ipsec_encap_pdb) +
2614 				sizeof(session->ip4_hdr));
2615 			session->ip4_hdr.ip_v = IPVERSION;
2616 			session->ip4_hdr.ip_hl = 5;
2617 			session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2618 						sizeof(session->ip4_hdr));
2619 			session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2620 			session->ip4_hdr.ip_id = 0;
2621 			session->ip4_hdr.ip_off = 0;
2622 			session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2623 			session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2624 					RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2625 					IPPROTO_ESP : IPPROTO_AH;
2626 			session->ip4_hdr.ip_sum = 0;
2627 			session->ip4_hdr.ip_src =
2628 					ipsec_xform->tunnel.ipv4.src_ip;
2629 			session->ip4_hdr.ip_dst =
2630 					ipsec_xform->tunnel.ipv4.dst_ip;
2631 			session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2632 						(void *)&session->ip4_hdr,
2633 						sizeof(struct ip));
2634 			session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2635 		} else if (ipsec_xform->tunnel.type ==
2636 				RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2637 			memset(&session->encap_pdb, 0,
2638 				sizeof(struct ipsec_encap_pdb) +
2639 				sizeof(session->ip6_hdr));
2640 			session->ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2641 				DPAA_IPv6_DEFAULT_VTC_FLOW |
2642 				((ipsec_xform->tunnel.ipv6.dscp <<
2643 					RTE_IPV6_HDR_TC_SHIFT) &
2644 					RTE_IPV6_HDR_TC_MASK) |
2645 				((ipsec_xform->tunnel.ipv6.flabel <<
2646 					RTE_IPV6_HDR_FL_SHIFT) &
2647 					RTE_IPV6_HDR_FL_MASK));
2648 			/* Payload length will be updated by HW */
2649 			session->ip6_hdr.payload_len = 0;
2650 			session->ip6_hdr.hop_limits =
2651 					ipsec_xform->tunnel.ipv6.hlimit;
2652 			session->ip6_hdr.proto = (ipsec_xform->proto ==
2653 					RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2654 					IPPROTO_ESP : IPPROTO_AH;
2655 			memcpy(&session->ip6_hdr.src_addr,
2656 					&ipsec_xform->tunnel.ipv6.src_addr, 16);
2657 			memcpy(&session->ip6_hdr.dst_addr,
2658 					&ipsec_xform->tunnel.ipv6.dst_addr, 16);
2659 			session->encap_pdb.ip_hdr_len =
2660 						sizeof(struct rte_ipv6_hdr);
2661 		}
2662 		session->encap_pdb.options =
2663 			(IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2664 			PDBOPTS_ESP_OIHI_PDB_INL |
2665 			PDBOPTS_ESP_IVSRC |
2666 			PDBHMO_ESP_ENCAP_DTTL |
2667 			PDBHMO_ESP_SNR;
2668 		if (ipsec_xform->options.esn)
2669 			session->encap_pdb.options |= PDBOPTS_ESP_ESN;
2670 		session->encap_pdb.spi = ipsec_xform->spi;
2671 		session->dir = DIR_ENC;
2672 	} else if (ipsec_xform->direction ==
2673 			RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2674 		memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2675 		if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
2676 			session->decap_pdb.options = sizeof(struct ip) << 16;
2677 		else
2678 			session->decap_pdb.options =
2679 					sizeof(struct rte_ipv6_hdr) << 16;
2680 		if (ipsec_xform->options.esn)
2681 			session->decap_pdb.options |= PDBOPTS_ESP_ESN;
2682 		session->dir = DIR_DEC;
2683 	} else
2684 		goto out;
2685 	rte_spinlock_lock(&internals->lock);
2686 	for (i = 0; i < MAX_DPAA_CORES; i++) {
2687 		session->inq[i] = dpaa_sec_attach_rxq(internals);
2688 		if (session->inq[i] == NULL) {
2689 			DPAA_SEC_ERR("unable to attach sec queue");
2690 			rte_spinlock_unlock(&internals->lock);
2691 			goto out;
2692 		}
2693 	}
2694 	rte_spinlock_unlock(&internals->lock);
2695 
2696 	return 0;
2697 out:
2698 	rte_free(session->auth_key.data);
2699 	rte_free(session->cipher_key.data);
2700 	memset(session, 0, sizeof(dpaa_sec_session));
2701 	return -1;
2702 }
2703 
2704 static int
2705 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
2706 			  struct rte_security_session_conf *conf,
2707 			  void *sess)
2708 {
2709 	struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2710 	struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2711 	struct rte_crypto_auth_xform *auth_xform = NULL;
2712 	struct rte_crypto_cipher_xform *cipher_xform = NULL;
2713 	dpaa_sec_session *session = (dpaa_sec_session *)sess;
2714 	struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
2715 	uint32_t i;
2716 
2717 	PMD_INIT_FUNC_TRACE();
2718 
2719 	memset(session, 0, sizeof(dpaa_sec_session));
2720 
2721 	/* find xfrm types */
2722 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2723 		cipher_xform = &xform->cipher;
2724 		if (xform->next != NULL)
2725 			auth_xform = &xform->next->auth;
2726 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2727 		auth_xform = &xform->auth;
2728 		if (xform->next != NULL)
2729 			cipher_xform = &xform->next->cipher;
2730 	} else {
2731 		DPAA_SEC_ERR("Invalid crypto type");
2732 		return -EINVAL;
2733 	}
2734 
2735 	session->proto_alg = conf->protocol;
2736 	session->ctxt = DPAA_SEC_PDCP;
2737 
2738 	if (cipher_xform) {
2739 		switch (cipher_xform->algo) {
2740 		case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2741 			session->cipher_key.alg = PDCP_CIPHER_TYPE_SNOW;
2742 			break;
2743 		case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2744 			session->cipher_key.alg = PDCP_CIPHER_TYPE_ZUC;
2745 			break;
2746 		case RTE_CRYPTO_CIPHER_AES_CTR:
2747 			session->cipher_key.alg = PDCP_CIPHER_TYPE_AES;
2748 			break;
2749 		case RTE_CRYPTO_CIPHER_NULL:
2750 			session->cipher_key.alg = PDCP_CIPHER_TYPE_NULL;
2751 			break;
2752 		default:
2753 			DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2754 				      session->cipher_alg);
2755 			return -1;
2756 		}
2757 
2758 		session->cipher_key.data = rte_zmalloc(NULL,
2759 					       cipher_xform->key.length,
2760 					       RTE_CACHE_LINE_SIZE);
2761 		if (session->cipher_key.data == NULL &&
2762 				cipher_xform->key.length > 0) {
2763 			DPAA_SEC_ERR("No Memory for cipher key");
2764 			return -ENOMEM;
2765 		}
2766 		session->cipher_key.length = cipher_xform->key.length;
2767 		memcpy(session->cipher_key.data, cipher_xform->key.data,
2768 			cipher_xform->key.length);
2769 		session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2770 					DIR_ENC : DIR_DEC;
2771 		session->cipher_alg = cipher_xform->algo;
2772 	} else {
2773 		session->cipher_key.data = NULL;
2774 		session->cipher_key.length = 0;
2775 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2776 		session->dir = DIR_ENC;
2777 	}
2778 
2779 	if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
2780 		if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 &&
2781 		    pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) {
2782 			DPAA_SEC_ERR(
2783 				"PDCP Seq Num size should be 5/12 bits for cmode");
2784 			goto out;
2785 		}
2786 	}
2787 
2788 	if (auth_xform) {
2789 		switch (auth_xform->algo) {
2790 		case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2791 			session->auth_key.alg = PDCP_AUTH_TYPE_SNOW;
2792 			break;
2793 		case RTE_CRYPTO_AUTH_ZUC_EIA3:
2794 			session->auth_key.alg = PDCP_AUTH_TYPE_ZUC;
2795 			break;
2796 		case RTE_CRYPTO_AUTH_AES_CMAC:
2797 			session->auth_key.alg = PDCP_AUTH_TYPE_AES;
2798 			break;
2799 		case RTE_CRYPTO_AUTH_NULL:
2800 			session->auth_key.alg = PDCP_AUTH_TYPE_NULL;
2801 			break;
2802 		default:
2803 			DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2804 				      session->auth_alg);
2805 			rte_free(session->cipher_key.data);
2806 			return -1;
2807 		}
2808 		session->auth_key.data = rte_zmalloc(NULL,
2809 						     auth_xform->key.length,
2810 						     RTE_CACHE_LINE_SIZE);
2811 		if (!session->auth_key.data &&
2812 		    auth_xform->key.length > 0) {
2813 			DPAA_SEC_ERR("No Memory for auth key");
2814 			rte_free(session->cipher_key.data);
2815 			return -ENOMEM;
2816 		}
2817 		session->auth_key.length = auth_xform->key.length;
2818 		memcpy(session->auth_key.data, auth_xform->key.data,
2819 		       auth_xform->key.length);
2820 		session->auth_alg = auth_xform->algo;
2821 	} else {
2822 		session->auth_key.data = NULL;
2823 		session->auth_key.length = 0;
2824 		session->auth_alg = 0;
2825 	}
2826 	session->pdcp.domain = pdcp_xform->domain;
2827 	session->pdcp.bearer = pdcp_xform->bearer;
2828 	session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
2829 	session->pdcp.sn_size = pdcp_xform->sn_size;
2830 	session->pdcp.hfn = pdcp_xform->hfn;
2831 	session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
2832 	session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
2833 	session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
2834 
2835 	rte_spinlock_lock(&dev_priv->lock);
2836 	for (i = 0; i < MAX_DPAA_CORES; i++) {
2837 		session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
2838 		if (session->inq[i] == NULL) {
2839 			DPAA_SEC_ERR("unable to attach sec queue");
2840 			rte_spinlock_unlock(&dev_priv->lock);
2841 			goto out;
2842 		}
2843 	}
2844 	rte_spinlock_unlock(&dev_priv->lock);
2845 	return 0;
2846 out:
2847 	rte_free(session->auth_key.data);
2848 	rte_free(session->cipher_key.data);
2849 	memset(session, 0, sizeof(dpaa_sec_session));
2850 	return -1;
2851 }
2852 
2853 static int
2854 dpaa_sec_security_session_create(void *dev,
2855 				 struct rte_security_session_conf *conf,
2856 				 struct rte_security_session *sess,
2857 				 struct rte_mempool *mempool)
2858 {
2859 	void *sess_private_data;
2860 	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2861 	int ret;
2862 
2863 	if (rte_mempool_get(mempool, &sess_private_data)) {
2864 		DPAA_SEC_ERR("Couldn't get object from session mempool");
2865 		return -ENOMEM;
2866 	}
2867 
2868 	switch (conf->protocol) {
2869 	case RTE_SECURITY_PROTOCOL_IPSEC:
2870 		ret = dpaa_sec_set_ipsec_session(cdev, conf,
2871 				sess_private_data);
2872 		break;
2873 	case RTE_SECURITY_PROTOCOL_PDCP:
2874 		ret = dpaa_sec_set_pdcp_session(cdev, conf,
2875 				sess_private_data);
2876 		break;
2877 	case RTE_SECURITY_PROTOCOL_MACSEC:
2878 		return -ENOTSUP;
2879 	default:
2880 		return -EINVAL;
2881 	}
2882 	if (ret != 0) {
2883 		DPAA_SEC_ERR("failed to configure session parameters");
2884 		/* Return session to mempool */
2885 		rte_mempool_put(mempool, sess_private_data);
2886 		return ret;
2887 	}
2888 
2889 	set_sec_session_private_data(sess, sess_private_data);
2890 
2891 	return ret;
2892 }
2893 
2894 /** Clear the memory of session so it doesn't leave key material behind */
2895 static int
2896 dpaa_sec_security_session_destroy(void *dev __rte_unused,
2897 		struct rte_security_session *sess)
2898 {
2899 	PMD_INIT_FUNC_TRACE();
2900 	void *sess_priv = get_sec_session_private_data(sess);
2901 	dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2902 
2903 	if (sess_priv) {
2904 		free_session_memory((struct rte_cryptodev *)dev, s);
2905 		set_sec_session_private_data(sess, NULL);
2906 	}
2907 	return 0;
2908 }
2909 
2910 static int
2911 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
2912 		       struct rte_cryptodev_config *config __rte_unused)
2913 {
2914 	PMD_INIT_FUNC_TRACE();
2915 
2916 	return 0;
2917 }
2918 
2919 static int
2920 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
2921 {
2922 	PMD_INIT_FUNC_TRACE();
2923 	return 0;
2924 }
2925 
2926 static void
2927 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
2928 {
2929 	PMD_INIT_FUNC_TRACE();
2930 }
2931 
2932 static int
2933 dpaa_sec_dev_close(struct rte_cryptodev *dev)
2934 {
2935 	PMD_INIT_FUNC_TRACE();
2936 
2937 	if (dev == NULL)
2938 		return -ENOMEM;
2939 
2940 	return 0;
2941 }
2942 
2943 static void
2944 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
2945 		       struct rte_cryptodev_info *info)
2946 {
2947 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2948 
2949 	PMD_INIT_FUNC_TRACE();
2950 	if (info != NULL) {
2951 		info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2952 		info->feature_flags = dev->feature_flags;
2953 		info->capabilities = dpaa_sec_capabilities;
2954 		info->sym.max_nb_sessions = internals->max_nb_sessions;
2955 		info->driver_id = cryptodev_driver_id;
2956 	}
2957 }
2958 
2959 static enum qman_cb_dqrr_result
2960 dpaa_sec_process_parallel_event(void *event,
2961 			struct qman_portal *qm __always_unused,
2962 			struct qman_fq *outq,
2963 			const struct qm_dqrr_entry *dqrr,
2964 			void **bufs)
2965 {
2966 	const struct qm_fd *fd;
2967 	struct dpaa_sec_job *job;
2968 	struct dpaa_sec_op_ctx *ctx;
2969 	struct rte_event *ev = (struct rte_event *)event;
2970 
2971 	fd = &dqrr->fd;
2972 
2973 	/* sg is embedded in an op ctx,
2974 	 * sg[0] is for output
2975 	 * sg[1] for input
2976 	 */
2977 	job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
2978 
2979 	ctx = container_of(job, struct dpaa_sec_op_ctx, job);
2980 	ctx->fd_status = fd->status;
2981 	if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
2982 		struct qm_sg_entry *sg_out;
2983 		uint32_t len;
2984 
2985 		sg_out = &job->sg[0];
2986 		hw_sg_to_cpu(sg_out);
2987 		len = sg_out->length;
2988 		ctx->op->sym->m_src->pkt_len = len;
2989 		ctx->op->sym->m_src->data_len = len;
2990 	}
2991 	if (!ctx->fd_status) {
2992 		ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
2993 	} else {
2994 		DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
2995 		ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
2996 	}
2997 	ev->event_ptr = (void *)ctx->op;
2998 
2999 	ev->flow_id = outq->ev.flow_id;
3000 	ev->sub_event_type = outq->ev.sub_event_type;
3001 	ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3002 	ev->op = RTE_EVENT_OP_NEW;
3003 	ev->sched_type = outq->ev.sched_type;
3004 	ev->queue_id = outq->ev.queue_id;
3005 	ev->priority = outq->ev.priority;
3006 	*bufs = (void *)ctx->op;
3007 
3008 	rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3009 
3010 	return qman_cb_dqrr_consume;
3011 }
3012 
3013 static enum qman_cb_dqrr_result
3014 dpaa_sec_process_atomic_event(void *event,
3015 			struct qman_portal *qm __rte_unused,
3016 			struct qman_fq *outq,
3017 			const struct qm_dqrr_entry *dqrr,
3018 			void **bufs)
3019 {
3020 	u8 index;
3021 	const struct qm_fd *fd;
3022 	struct dpaa_sec_job *job;
3023 	struct dpaa_sec_op_ctx *ctx;
3024 	struct rte_event *ev = (struct rte_event *)event;
3025 
3026 	fd = &dqrr->fd;
3027 
3028 	/* sg is embedded in an op ctx,
3029 	 * sg[0] is for output
3030 	 * sg[1] for input
3031 	 */
3032 	job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
3033 
3034 	ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3035 	ctx->fd_status = fd->status;
3036 	if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3037 		struct qm_sg_entry *sg_out;
3038 		uint32_t len;
3039 
3040 		sg_out = &job->sg[0];
3041 		hw_sg_to_cpu(sg_out);
3042 		len = sg_out->length;
3043 		ctx->op->sym->m_src->pkt_len = len;
3044 		ctx->op->sym->m_src->data_len = len;
3045 	}
3046 	if (!ctx->fd_status) {
3047 		ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3048 	} else {
3049 		DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3050 		ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3051 	}
3052 	ev->event_ptr = (void *)ctx->op;
3053 	ev->flow_id = outq->ev.flow_id;
3054 	ev->sub_event_type = outq->ev.sub_event_type;
3055 	ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3056 	ev->op = RTE_EVENT_OP_NEW;
3057 	ev->sched_type = outq->ev.sched_type;
3058 	ev->queue_id = outq->ev.queue_id;
3059 	ev->priority = outq->ev.priority;
3060 
3061 	/* Save active dqrr entries */
3062 	index = ((uintptr_t)dqrr >> 6) & (16/*QM_DQRR_SIZE*/ - 1);
3063 	DPAA_PER_LCORE_DQRR_SIZE++;
3064 	DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
3065 	DPAA_PER_LCORE_DQRR_MBUF(index) = ctx->op->sym->m_src;
3066 	ev->impl_opaque = index + 1;
3067 	ctx->op->sym->m_src->seqn = (uint32_t)index + 1;
3068 	*bufs = (void *)ctx->op;
3069 
3070 	rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3071 
3072 	return qman_cb_dqrr_defer;
3073 }
3074 
3075 int
3076 dpaa_sec_eventq_attach(const struct rte_cryptodev *dev,
3077 		int qp_id,
3078 		uint16_t ch_id,
3079 		const struct rte_event *event)
3080 {
3081 	struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3082 	struct qm_mcc_initfq opts = {0};
3083 
3084 	int ret;
3085 
3086 	opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3087 		       QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3088 	opts.fqd.dest.channel = ch_id;
3089 
3090 	switch (event->sched_type) {
3091 	case RTE_SCHED_TYPE_ATOMIC:
3092 		opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
3093 		/* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
3094 		 * configuration with HOLD_ACTIVE setting
3095 		 */
3096 		opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
3097 		qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_atomic_event;
3098 		break;
3099 	case RTE_SCHED_TYPE_ORDERED:
3100 		DPAA_SEC_ERR("Ordered queue schedule type is not supported\n");
3101 		return -1;
3102 	default:
3103 		opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
3104 		qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_parallel_event;
3105 		break;
3106 	}
3107 
3108 	ret = qman_init_fq(&qp->outq, QMAN_INITFQ_FLAG_SCHED, &opts);
3109 	if (unlikely(ret)) {
3110 		DPAA_SEC_ERR("unable to init caam source fq!");
3111 		return ret;
3112 	}
3113 
3114 	memcpy(&qp->outq.ev, event, sizeof(struct rte_event));
3115 
3116 	return 0;
3117 }
3118 
3119 int
3120 dpaa_sec_eventq_detach(const struct rte_cryptodev *dev,
3121 			int qp_id)
3122 {
3123 	struct qm_mcc_initfq opts = {0};
3124 	int ret;
3125 	struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3126 
3127 	opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3128 		       QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3129 	qp->outq.cb.dqrr = dqrr_out_fq_cb_rx;
3130 	qp->outq.cb.ern  = ern_sec_fq_handler;
3131 	qman_retire_fq(&qp->outq, NULL);
3132 	qman_oos_fq(&qp->outq);
3133 	ret = qman_init_fq(&qp->outq, 0, &opts);
3134 	if (ret)
3135 		RTE_LOG(ERR, PMD, "Error in qman_init_fq: ret: %d\n", ret);
3136 	qp->outq.cb.dqrr = NULL;
3137 
3138 	return ret;
3139 }
3140 
3141 static struct rte_cryptodev_ops crypto_ops = {
3142 	.dev_configure	      = dpaa_sec_dev_configure,
3143 	.dev_start	      = dpaa_sec_dev_start,
3144 	.dev_stop	      = dpaa_sec_dev_stop,
3145 	.dev_close	      = dpaa_sec_dev_close,
3146 	.dev_infos_get        = dpaa_sec_dev_infos_get,
3147 	.queue_pair_setup     = dpaa_sec_queue_pair_setup,
3148 	.queue_pair_release   = dpaa_sec_queue_pair_release,
3149 	.queue_pair_count     = dpaa_sec_queue_pair_count,
3150 	.sym_session_get_size     = dpaa_sec_sym_session_get_size,
3151 	.sym_session_configure    = dpaa_sec_sym_session_configure,
3152 	.sym_session_clear        = dpaa_sec_sym_session_clear
3153 };
3154 
3155 static const struct rte_security_capability *
3156 dpaa_sec_capabilities_get(void *device __rte_unused)
3157 {
3158 	return dpaa_sec_security_cap;
3159 }
3160 
3161 static const struct rte_security_ops dpaa_sec_security_ops = {
3162 	.session_create = dpaa_sec_security_session_create,
3163 	.session_update = NULL,
3164 	.session_stats_get = NULL,
3165 	.session_destroy = dpaa_sec_security_session_destroy,
3166 	.set_pkt_metadata = NULL,
3167 	.capabilities_get = dpaa_sec_capabilities_get
3168 };
3169 
3170 static int
3171 dpaa_sec_uninit(struct rte_cryptodev *dev)
3172 {
3173 	struct dpaa_sec_dev_private *internals;
3174 
3175 	if (dev == NULL)
3176 		return -ENODEV;
3177 
3178 	internals = dev->data->dev_private;
3179 	rte_free(dev->security_ctx);
3180 
3181 	rte_free(internals);
3182 
3183 	DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
3184 		      dev->data->name, rte_socket_id());
3185 
3186 	return 0;
3187 }
3188 
3189 static int
3190 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
3191 {
3192 	struct dpaa_sec_dev_private *internals;
3193 	struct rte_security_ctx *security_instance;
3194 	struct dpaa_sec_qp *qp;
3195 	uint32_t i, flags;
3196 	int ret;
3197 
3198 	PMD_INIT_FUNC_TRACE();
3199 
3200 	cryptodev->driver_id = cryptodev_driver_id;
3201 	cryptodev->dev_ops = &crypto_ops;
3202 
3203 	cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
3204 	cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
3205 	cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3206 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
3207 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3208 			RTE_CRYPTODEV_FF_SECURITY |
3209 			RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3210 			RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3211 			RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3212 			RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3213 			RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3214 
3215 	internals = cryptodev->data->dev_private;
3216 	internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
3217 	internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
3218 
3219 	/*
3220 	 * For secondary processes, we don't initialise any further as primary
3221 	 * has already done this work. Only check we don't need a different
3222 	 * RX function
3223 	 */
3224 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3225 		DPAA_SEC_WARN("Device already init by primary process");
3226 		return 0;
3227 	}
3228 
3229 	/* Initialize security_ctx only for primary process*/
3230 	security_instance = rte_malloc("rte_security_instances_ops",
3231 				sizeof(struct rte_security_ctx), 0);
3232 	if (security_instance == NULL)
3233 		return -ENOMEM;
3234 	security_instance->device = (void *)cryptodev;
3235 	security_instance->ops = &dpaa_sec_security_ops;
3236 	security_instance->sess_cnt = 0;
3237 	cryptodev->security_ctx = security_instance;
3238 
3239 	rte_spinlock_init(&internals->lock);
3240 	for (i = 0; i < internals->max_nb_queue_pairs; i++) {
3241 		/* init qman fq for queue pair */
3242 		qp = &internals->qps[i];
3243 		ret = dpaa_sec_init_tx(&qp->outq);
3244 		if (ret) {
3245 			DPAA_SEC_ERR("config tx of queue pair  %d", i);
3246 			goto init_error;
3247 		}
3248 	}
3249 
3250 	flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
3251 		QMAN_FQ_FLAG_TO_DCPORTAL;
3252 	for (i = 0; i < MAX_DPAA_CORES * internals->max_nb_sessions; i++) {
3253 		/* create rx qman fq for sessions*/
3254 		ret = qman_create_fq(0, flags, &internals->inq[i]);
3255 		if (unlikely(ret != 0)) {
3256 			DPAA_SEC_ERR("sec qman_create_fq failed");
3257 			goto init_error;
3258 		}
3259 	}
3260 
3261 	RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
3262 	return 0;
3263 
3264 init_error:
3265 	DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
3266 
3267 	dpaa_sec_uninit(cryptodev);
3268 	return -EFAULT;
3269 }
3270 
3271 static int
3272 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
3273 				struct rte_dpaa_device *dpaa_dev)
3274 {
3275 	struct rte_cryptodev *cryptodev;
3276 	char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3277 
3278 	int retval;
3279 
3280 	snprintf(cryptodev_name, sizeof(cryptodev_name), "%s", dpaa_dev->name);
3281 
3282 	cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3283 	if (cryptodev == NULL)
3284 		return -ENOMEM;
3285 
3286 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3287 		cryptodev->data->dev_private = rte_zmalloc_socket(
3288 					"cryptodev private structure",
3289 					sizeof(struct dpaa_sec_dev_private),
3290 					RTE_CACHE_LINE_SIZE,
3291 					rte_socket_id());
3292 
3293 		if (cryptodev->data->dev_private == NULL)
3294 			rte_panic("Cannot allocate memzone for private "
3295 					"device data");
3296 	}
3297 
3298 	dpaa_dev->crypto_dev = cryptodev;
3299 	cryptodev->device = &dpaa_dev->device;
3300 
3301 	/* init user callbacks */
3302 	TAILQ_INIT(&(cryptodev->link_intr_cbs));
3303 
3304 	/* if sec device version is not configured */
3305 	if (!rta_get_sec_era()) {
3306 		const struct device_node *caam_node;
3307 
3308 		for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
3309 			const uint32_t *prop = of_get_property(caam_node,
3310 					"fsl,sec-era",
3311 					NULL);
3312 			if (prop) {
3313 				rta_set_sec_era(
3314 					INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
3315 				break;
3316 			}
3317 		}
3318 	}
3319 
3320 	/* Invoke PMD device initialization function */
3321 	retval = dpaa_sec_dev_init(cryptodev);
3322 	if (retval == 0)
3323 		return 0;
3324 
3325 	/* In case of error, cleanup is done */
3326 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3327 		rte_free(cryptodev->data->dev_private);
3328 
3329 	rte_cryptodev_pmd_release_device(cryptodev);
3330 
3331 	return -ENXIO;
3332 }
3333 
3334 static int
3335 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
3336 {
3337 	struct rte_cryptodev *cryptodev;
3338 	int ret;
3339 
3340 	cryptodev = dpaa_dev->crypto_dev;
3341 	if (cryptodev == NULL)
3342 		return -ENODEV;
3343 
3344 	ret = dpaa_sec_uninit(cryptodev);
3345 	if (ret)
3346 		return ret;
3347 
3348 	return rte_cryptodev_pmd_destroy(cryptodev);
3349 }
3350 
3351 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
3352 	.drv_type = FSL_DPAA_CRYPTO,
3353 	.driver = {
3354 		.name = "DPAA SEC PMD"
3355 	},
3356 	.probe = cryptodev_dpaa_sec_probe,
3357 	.remove = cryptodev_dpaa_sec_remove,
3358 };
3359 
3360 static struct cryptodev_driver dpaa_sec_crypto_drv;
3361 
3362 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
3363 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
3364 		cryptodev_driver_id);
3365 
3366 RTE_INIT(dpaa_sec_init_log)
3367 {
3368 	dpaa_logtype_sec = rte_log_register("pmd.crypto.dpaa");
3369 	if (dpaa_logtype_sec >= 0)
3370 		rte_log_set_level(dpaa_logtype_sec, RTE_LOG_NOTICE);
3371 }
3372