xref: /dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c (revision 0964a95120fa024888fbc0ea5e34d1abef1b93dc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2017-2019 NXP
5  *
6  */
7 
8 #include <fcntl.h>
9 #include <unistd.h>
10 #include <sched.h>
11 #include <net/if.h>
12 
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_security_driver.h>
19 #include <rte_cycles.h>
20 #include <rte_dev.h>
21 #include <rte_kvargs.h>
22 #include <rte_malloc.h>
23 #include <rte_mbuf.h>
24 #include <rte_memcpy.h>
25 #include <rte_string_fns.h>
26 #include <rte_spinlock.h>
27 
28 #include <fsl_usd.h>
29 #include <fsl_qman.h>
30 #include <of.h>
31 
32 /* RTA header files */
33 #include <hw/desc/common.h>
34 #include <hw/desc/algo.h>
35 #include <hw/desc/ipsec.h>
36 #include <hw/desc/pdcp.h>
37 
38 #include <rte_dpaa_bus.h>
39 #include <dpaa_sec.h>
40 #include <dpaa_sec_event.h>
41 #include <dpaa_sec_log.h>
42 #include <dpaax_iova_table.h>
43 
44 enum rta_sec_era rta_sec_era;
45 
46 int dpaa_logtype_sec;
47 
48 static uint8_t cryptodev_driver_id;
49 
50 static __thread struct rte_crypto_op **dpaa_sec_ops;
51 static __thread int dpaa_sec_op_nb;
52 
53 static int
54 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
55 
56 static inline void
57 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
58 {
59 	if (!ctx->fd_status) {
60 		ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
61 	} else {
62 		DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
63 		ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
64 	}
65 }
66 
67 static inline struct dpaa_sec_op_ctx *
68 dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count)
69 {
70 	struct dpaa_sec_op_ctx *ctx;
71 	int i, retval;
72 
73 	retval = rte_mempool_get(
74 			ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
75 			(void **)(&ctx));
76 	if (!ctx || retval) {
77 		DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
78 		return NULL;
79 	}
80 	/*
81 	 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
82 	 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
83 	 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
84 	 * each packet, memset is costlier than dcbz_64().
85 	 */
86 	for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
87 		dcbz_64(&ctx->job.sg[i]);
88 
89 	ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
90 	ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
91 
92 	return ctx;
93 }
94 
95 static inline rte_iova_t
96 dpaa_mem_vtop(void *vaddr)
97 {
98 	const struct rte_memseg *ms;
99 
100 	ms = rte_mem_virt2memseg(vaddr, NULL);
101 	if (ms) {
102 		dpaax_iova_table_update(ms->iova, ms->addr, ms->len);
103 		return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
104 	}
105 	return (size_t)NULL;
106 }
107 
108 static inline void *
109 dpaa_mem_ptov(rte_iova_t paddr)
110 {
111 	void *va;
112 
113 	va = (void *)dpaax_iova_table_get_va(paddr);
114 	if (likely(va))
115 		return va;
116 
117 	return rte_mem_iova2virt(paddr);
118 }
119 
120 static void
121 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
122 		   struct qman_fq *fq,
123 		   const struct qm_mr_entry *msg)
124 {
125 	DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
126 			fq->fqid, msg->ern.rc, msg->ern.seqnum);
127 }
128 
129 /* initialize the queue with dest chan as caam chan so that
130  * all the packets in this queue could be dispatched into caam
131  */
132 static int
133 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
134 		 uint32_t fqid_out)
135 {
136 	struct qm_mcc_initfq fq_opts;
137 	uint32_t flags;
138 	int ret = -1;
139 
140 	/* Clear FQ options */
141 	memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
142 
143 	flags = QMAN_INITFQ_FLAG_SCHED;
144 	fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
145 			  QM_INITFQ_WE_CONTEXTB;
146 
147 	qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
148 	fq_opts.fqd.context_b = fqid_out;
149 	fq_opts.fqd.dest.channel = qm_channel_caam;
150 	fq_opts.fqd.dest.wq = 0;
151 
152 	fq_in->cb.ern  = ern_sec_fq_handler;
153 
154 	DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
155 
156 	ret = qman_init_fq(fq_in, flags, &fq_opts);
157 	if (unlikely(ret != 0))
158 		DPAA_SEC_ERR("qman_init_fq failed %d", ret);
159 
160 	return ret;
161 }
162 
163 /* something is put into in_fq and caam put the crypto result into out_fq */
164 static enum qman_cb_dqrr_result
165 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
166 		  struct qman_fq *fq __always_unused,
167 		  const struct qm_dqrr_entry *dqrr)
168 {
169 	const struct qm_fd *fd;
170 	struct dpaa_sec_job *job;
171 	struct dpaa_sec_op_ctx *ctx;
172 
173 	if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
174 		return qman_cb_dqrr_defer;
175 
176 	if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
177 		return qman_cb_dqrr_consume;
178 
179 	fd = &dqrr->fd;
180 	/* sg is embedded in an op ctx,
181 	 * sg[0] is for output
182 	 * sg[1] for input
183 	 */
184 	job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
185 
186 	ctx = container_of(job, struct dpaa_sec_op_ctx, job);
187 	ctx->fd_status = fd->status;
188 	if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
189 		struct qm_sg_entry *sg_out;
190 		uint32_t len;
191 		struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ?
192 				ctx->op->sym->m_src : ctx->op->sym->m_dst;
193 
194 		sg_out = &job->sg[0];
195 		hw_sg_to_cpu(sg_out);
196 		len = sg_out->length;
197 		mbuf->pkt_len = len;
198 		while (mbuf->next != NULL) {
199 			len -= mbuf->data_len;
200 			mbuf = mbuf->next;
201 		}
202 		mbuf->data_len = len;
203 	}
204 	dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
205 	dpaa_sec_op_ending(ctx);
206 
207 	return qman_cb_dqrr_consume;
208 }
209 
210 /* caam result is put into this queue */
211 static int
212 dpaa_sec_init_tx(struct qman_fq *fq)
213 {
214 	int ret;
215 	struct qm_mcc_initfq opts;
216 	uint32_t flags;
217 
218 	flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
219 		QMAN_FQ_FLAG_DYNAMIC_FQID;
220 
221 	ret = qman_create_fq(0, flags, fq);
222 	if (unlikely(ret)) {
223 		DPAA_SEC_ERR("qman_create_fq failed");
224 		return ret;
225 	}
226 
227 	memset(&opts, 0, sizeof(opts));
228 	opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
229 		       QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
230 
231 	/* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
232 
233 	fq->cb.dqrr = dqrr_out_fq_cb_rx;
234 	fq->cb.ern  = ern_sec_fq_handler;
235 
236 	ret = qman_init_fq(fq, 0, &opts);
237 	if (unlikely(ret)) {
238 		DPAA_SEC_ERR("unable to init caam source fq!");
239 		return ret;
240 	}
241 
242 	return ret;
243 }
244 
245 static inline int is_cipher_only(dpaa_sec_session *ses)
246 {
247 	return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
248 		(ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
249 }
250 
251 static inline int is_auth_only(dpaa_sec_session *ses)
252 {
253 	return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
254 		(ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
255 }
256 
257 static inline int is_aead(dpaa_sec_session *ses)
258 {
259 	return ((ses->cipher_alg == 0) &&
260 		(ses->auth_alg == 0) &&
261 		(ses->aead_alg != 0));
262 }
263 
264 static inline int is_auth_cipher(dpaa_sec_session *ses)
265 {
266 	return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
267 		(ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
268 		(ses->proto_alg != RTE_SECURITY_PROTOCOL_PDCP) &&
269 		(ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
270 }
271 
272 static inline int is_proto_ipsec(dpaa_sec_session *ses)
273 {
274 	return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
275 }
276 
277 static inline int is_proto_pdcp(dpaa_sec_session *ses)
278 {
279 	return (ses->proto_alg == RTE_SECURITY_PROTOCOL_PDCP);
280 }
281 
282 static inline int is_encode(dpaa_sec_session *ses)
283 {
284 	return ses->dir == DIR_ENC;
285 }
286 
287 static inline int is_decode(dpaa_sec_session *ses)
288 {
289 	return ses->dir == DIR_DEC;
290 }
291 
292 static inline void
293 caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
294 {
295 	switch (ses->auth_alg) {
296 	case RTE_CRYPTO_AUTH_NULL:
297 		alginfo_a->algtype =
298 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
299 			OP_PCL_IPSEC_HMAC_NULL : 0;
300 		ses->digest_length = 0;
301 		break;
302 	case RTE_CRYPTO_AUTH_MD5_HMAC:
303 		alginfo_a->algtype =
304 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
305 			OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
306 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
307 		break;
308 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
309 		alginfo_a->algtype =
310 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
311 			OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
312 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
313 		break;
314 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
315 		alginfo_a->algtype =
316 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
317 			OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
318 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
319 		break;
320 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
321 		alginfo_a->algtype =
322 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
323 			OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
324 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
325 		break;
326 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
327 		alginfo_a->algtype =
328 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
329 			OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
330 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
331 		break;
332 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
333 		alginfo_a->algtype =
334 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
335 			OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
336 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
337 		break;
338 	default:
339 		DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
340 	}
341 }
342 
343 static inline void
344 caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
345 {
346 	switch (ses->cipher_alg) {
347 	case RTE_CRYPTO_CIPHER_NULL:
348 		alginfo_c->algtype =
349 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
350 			OP_PCL_IPSEC_NULL : 0;
351 		break;
352 	case RTE_CRYPTO_CIPHER_AES_CBC:
353 		alginfo_c->algtype =
354 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
355 			OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
356 		alginfo_c->algmode = OP_ALG_AAI_CBC;
357 		break;
358 	case RTE_CRYPTO_CIPHER_3DES_CBC:
359 		alginfo_c->algtype =
360 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
361 			OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
362 		alginfo_c->algmode = OP_ALG_AAI_CBC;
363 		break;
364 	case RTE_CRYPTO_CIPHER_AES_CTR:
365 		alginfo_c->algtype =
366 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
367 			OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
368 		alginfo_c->algmode = OP_ALG_AAI_CTR;
369 		break;
370 	default:
371 		DPAA_SEC_ERR("unsupported cipher alg %d", ses->cipher_alg);
372 	}
373 }
374 
375 static inline void
376 caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
377 {
378 	switch (ses->aead_alg) {
379 	case RTE_CRYPTO_AEAD_AES_GCM:
380 		alginfo->algtype = OP_ALG_ALGSEL_AES;
381 		alginfo->algmode = OP_ALG_AAI_GCM;
382 		break;
383 	default:
384 		DPAA_SEC_ERR("unsupported AEAD alg %d", ses->aead_alg);
385 	}
386 }
387 
388 static int
389 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
390 {
391 	struct alginfo authdata = {0}, cipherdata = {0};
392 	struct sec_cdb *cdb = &ses->cdb;
393 	struct alginfo *p_authdata = NULL;
394 	int32_t shared_desc_len = 0;
395 	int err;
396 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
397 	int swap = false;
398 #else
399 	int swap = true;
400 #endif
401 
402 	switch (ses->cipher_alg) {
403 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
404 		cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW;
405 		break;
406 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
407 		cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC;
408 		break;
409 	case RTE_CRYPTO_CIPHER_AES_CTR:
410 		cipherdata.algtype = PDCP_CIPHER_TYPE_AES;
411 		break;
412 	case RTE_CRYPTO_CIPHER_NULL:
413 		cipherdata.algtype = PDCP_CIPHER_TYPE_NULL;
414 		break;
415 	default:
416 		DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
417 			      ses->cipher_alg);
418 		return -1;
419 	}
420 
421 	cipherdata.key = (size_t)ses->cipher_key.data;
422 	cipherdata.keylen = ses->cipher_key.length;
423 	cipherdata.key_enc_flags = 0;
424 	cipherdata.key_type = RTA_DATA_IMM;
425 
426 	cdb->sh_desc[0] = cipherdata.keylen;
427 	cdb->sh_desc[1] = 0;
428 	cdb->sh_desc[2] = 0;
429 
430 	if (ses->auth_alg) {
431 		switch (ses->auth_alg) {
432 		case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
433 			authdata.algtype = PDCP_AUTH_TYPE_SNOW;
434 			break;
435 		case RTE_CRYPTO_AUTH_ZUC_EIA3:
436 			authdata.algtype = PDCP_AUTH_TYPE_ZUC;
437 			break;
438 		case RTE_CRYPTO_AUTH_AES_CMAC:
439 			authdata.algtype = PDCP_AUTH_TYPE_AES;
440 			break;
441 		case RTE_CRYPTO_AUTH_NULL:
442 			authdata.algtype = PDCP_AUTH_TYPE_NULL;
443 			break;
444 		default:
445 			DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
446 				      ses->auth_alg);
447 			return -1;
448 		}
449 
450 		authdata.key = (size_t)ses->auth_key.data;
451 		authdata.keylen = ses->auth_key.length;
452 		authdata.key_enc_flags = 0;
453 		authdata.key_type = RTA_DATA_IMM;
454 
455 		p_authdata = &authdata;
456 
457 		cdb->sh_desc[1] = authdata.keylen;
458 	}
459 
460 	err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
461 			       MIN_JOB_DESC_SIZE,
462 			       (unsigned int *)cdb->sh_desc,
463 			       &cdb->sh_desc[2], 2);
464 	if (err < 0) {
465 		DPAA_SEC_ERR("Crypto: Incorrect key lengths");
466 		return err;
467 	}
468 
469 	if (!(cdb->sh_desc[2] & 1) && cipherdata.keylen) {
470 		cipherdata.key =
471 			(size_t)dpaa_mem_vtop((void *)(size_t)cipherdata.key);
472 		cipherdata.key_type = RTA_DATA_PTR;
473 	}
474 	if (!(cdb->sh_desc[2] & (1 << 1)) &&  authdata.keylen) {
475 		authdata.key =
476 			(size_t)dpaa_mem_vtop((void *)(size_t)authdata.key);
477 		authdata.key_type = RTA_DATA_PTR;
478 	}
479 
480 	cdb->sh_desc[0] = 0;
481 	cdb->sh_desc[1] = 0;
482 	cdb->sh_desc[2] = 0;
483 
484 	if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
485 		if (ses->dir == DIR_ENC)
486 			shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
487 					cdb->sh_desc, 1, swap,
488 					ses->pdcp.hfn,
489 					ses->pdcp.sn_size,
490 					ses->pdcp.bearer,
491 					ses->pdcp.pkt_dir,
492 					ses->pdcp.hfn_threshold,
493 					&cipherdata, &authdata,
494 					0);
495 		else if (ses->dir == DIR_DEC)
496 			shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
497 					cdb->sh_desc, 1, swap,
498 					ses->pdcp.hfn,
499 					ses->pdcp.sn_size,
500 					ses->pdcp.bearer,
501 					ses->pdcp.pkt_dir,
502 					ses->pdcp.hfn_threshold,
503 					&cipherdata, &authdata,
504 					0);
505 	} else {
506 		if (ses->dir == DIR_ENC)
507 			shared_desc_len = cnstr_shdsc_pdcp_u_plane_encap(
508 					cdb->sh_desc, 1, swap,
509 					ses->pdcp.sn_size,
510 					ses->pdcp.hfn,
511 					ses->pdcp.bearer,
512 					ses->pdcp.pkt_dir,
513 					ses->pdcp.hfn_threshold,
514 					&cipherdata, p_authdata, 0);
515 		else if (ses->dir == DIR_DEC)
516 			shared_desc_len = cnstr_shdsc_pdcp_u_plane_decap(
517 					cdb->sh_desc, 1, swap,
518 					ses->pdcp.sn_size,
519 					ses->pdcp.hfn,
520 					ses->pdcp.bearer,
521 					ses->pdcp.pkt_dir,
522 					ses->pdcp.hfn_threshold,
523 					&cipherdata, p_authdata, 0);
524 	}
525 
526 	return shared_desc_len;
527 }
528 
529 /* prepare ipsec proto command block of the session */
530 static int
531 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
532 {
533 	struct alginfo cipherdata = {0}, authdata = {0};
534 	struct sec_cdb *cdb = &ses->cdb;
535 	int32_t shared_desc_len = 0;
536 	int err;
537 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
538 	int swap = false;
539 #else
540 	int swap = true;
541 #endif
542 
543 	caam_cipher_alg(ses, &cipherdata);
544 	if (cipherdata.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
545 		DPAA_SEC_ERR("not supported cipher alg");
546 		return -ENOTSUP;
547 	}
548 
549 	cipherdata.key = (size_t)ses->cipher_key.data;
550 	cipherdata.keylen = ses->cipher_key.length;
551 	cipherdata.key_enc_flags = 0;
552 	cipherdata.key_type = RTA_DATA_IMM;
553 
554 	caam_auth_alg(ses, &authdata);
555 	if (authdata.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
556 		DPAA_SEC_ERR("not supported auth alg");
557 		return -ENOTSUP;
558 	}
559 
560 	authdata.key = (size_t)ses->auth_key.data;
561 	authdata.keylen = ses->auth_key.length;
562 	authdata.key_enc_flags = 0;
563 	authdata.key_type = RTA_DATA_IMM;
564 
565 	cdb->sh_desc[0] = cipherdata.keylen;
566 	cdb->sh_desc[1] = authdata.keylen;
567 	err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
568 			       MIN_JOB_DESC_SIZE,
569 			       (unsigned int *)cdb->sh_desc,
570 			       &cdb->sh_desc[2], 2);
571 
572 	if (err < 0) {
573 		DPAA_SEC_ERR("Crypto: Incorrect key lengths");
574 		return err;
575 	}
576 	if (cdb->sh_desc[2] & 1)
577 		cipherdata.key_type = RTA_DATA_IMM;
578 	else {
579 		cipherdata.key = (size_t)dpaa_mem_vtop(
580 					(void *)(size_t)cipherdata.key);
581 		cipherdata.key_type = RTA_DATA_PTR;
582 	}
583 	if (cdb->sh_desc[2] & (1<<1))
584 		authdata.key_type = RTA_DATA_IMM;
585 	else {
586 		authdata.key = (size_t)dpaa_mem_vtop(
587 					(void *)(size_t)authdata.key);
588 		authdata.key_type = RTA_DATA_PTR;
589 	}
590 
591 	cdb->sh_desc[0] = 0;
592 	cdb->sh_desc[1] = 0;
593 	cdb->sh_desc[2] = 0;
594 	if (ses->dir == DIR_ENC) {
595 		shared_desc_len = cnstr_shdsc_ipsec_new_encap(
596 				cdb->sh_desc,
597 				true, swap, SHR_SERIAL,
598 				&ses->encap_pdb,
599 				(uint8_t *)&ses->ip4_hdr,
600 				&cipherdata, &authdata);
601 	} else if (ses->dir == DIR_DEC) {
602 		shared_desc_len = cnstr_shdsc_ipsec_new_decap(
603 				cdb->sh_desc,
604 				true, swap, SHR_SERIAL,
605 				&ses->decap_pdb,
606 				&cipherdata, &authdata);
607 	}
608 	return shared_desc_len;
609 }
610 
611 /* prepare command block of the session */
612 static int
613 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
614 {
615 	struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
616 	int32_t shared_desc_len = 0;
617 	struct sec_cdb *cdb = &ses->cdb;
618 	int err;
619 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
620 	int swap = false;
621 #else
622 	int swap = true;
623 #endif
624 
625 	memset(cdb, 0, sizeof(struct sec_cdb));
626 
627 	if (is_proto_ipsec(ses)) {
628 		shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
629 	} else if (is_proto_pdcp(ses)) {
630 		shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
631 	} else if (is_cipher_only(ses)) {
632 		caam_cipher_alg(ses, &alginfo_c);
633 		if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
634 			DPAA_SEC_ERR("not supported cipher alg");
635 			return -ENOTSUP;
636 		}
637 
638 		alginfo_c.key = (size_t)ses->cipher_key.data;
639 		alginfo_c.keylen = ses->cipher_key.length;
640 		alginfo_c.key_enc_flags = 0;
641 		alginfo_c.key_type = RTA_DATA_IMM;
642 
643 		shared_desc_len = cnstr_shdsc_blkcipher(
644 						cdb->sh_desc, true,
645 						swap, SHR_NEVER, &alginfo_c,
646 						NULL,
647 						ses->iv.length,
648 						ses->dir);
649 	} else if (is_auth_only(ses)) {
650 		caam_auth_alg(ses, &alginfo_a);
651 		if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
652 			DPAA_SEC_ERR("not supported auth alg");
653 			return -ENOTSUP;
654 		}
655 
656 		alginfo_a.key = (size_t)ses->auth_key.data;
657 		alginfo_a.keylen = ses->auth_key.length;
658 		alginfo_a.key_enc_flags = 0;
659 		alginfo_a.key_type = RTA_DATA_IMM;
660 
661 		shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
662 						   swap, SHR_NEVER, &alginfo_a,
663 						   !ses->dir,
664 						   ses->digest_length);
665 	} else if (is_aead(ses)) {
666 		caam_aead_alg(ses, &alginfo);
667 		if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
668 			DPAA_SEC_ERR("not supported aead alg");
669 			return -ENOTSUP;
670 		}
671 		alginfo.key = (size_t)ses->aead_key.data;
672 		alginfo.keylen = ses->aead_key.length;
673 		alginfo.key_enc_flags = 0;
674 		alginfo.key_type = RTA_DATA_IMM;
675 
676 		if (ses->dir == DIR_ENC)
677 			shared_desc_len = cnstr_shdsc_gcm_encap(
678 					cdb->sh_desc, true, swap, SHR_NEVER,
679 					&alginfo,
680 					ses->iv.length,
681 					ses->digest_length);
682 		else
683 			shared_desc_len = cnstr_shdsc_gcm_decap(
684 					cdb->sh_desc, true, swap, SHR_NEVER,
685 					&alginfo,
686 					ses->iv.length,
687 					ses->digest_length);
688 	} else {
689 		caam_cipher_alg(ses, &alginfo_c);
690 		if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
691 			DPAA_SEC_ERR("not supported cipher alg");
692 			return -ENOTSUP;
693 		}
694 
695 		alginfo_c.key = (size_t)ses->cipher_key.data;
696 		alginfo_c.keylen = ses->cipher_key.length;
697 		alginfo_c.key_enc_flags = 0;
698 		alginfo_c.key_type = RTA_DATA_IMM;
699 
700 		caam_auth_alg(ses, &alginfo_a);
701 		if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
702 			DPAA_SEC_ERR("not supported auth alg");
703 			return -ENOTSUP;
704 		}
705 
706 		alginfo_a.key = (size_t)ses->auth_key.data;
707 		alginfo_a.keylen = ses->auth_key.length;
708 		alginfo_a.key_enc_flags = 0;
709 		alginfo_a.key_type = RTA_DATA_IMM;
710 
711 		cdb->sh_desc[0] = alginfo_c.keylen;
712 		cdb->sh_desc[1] = alginfo_a.keylen;
713 		err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
714 				       MIN_JOB_DESC_SIZE,
715 				       (unsigned int *)cdb->sh_desc,
716 				       &cdb->sh_desc[2], 2);
717 
718 		if (err < 0) {
719 			DPAA_SEC_ERR("Crypto: Incorrect key lengths");
720 			return err;
721 		}
722 		if (cdb->sh_desc[2] & 1)
723 			alginfo_c.key_type = RTA_DATA_IMM;
724 		else {
725 			alginfo_c.key = (size_t)dpaa_mem_vtop(
726 						(void *)(size_t)alginfo_c.key);
727 			alginfo_c.key_type = RTA_DATA_PTR;
728 		}
729 		if (cdb->sh_desc[2] & (1<<1))
730 			alginfo_a.key_type = RTA_DATA_IMM;
731 		else {
732 			alginfo_a.key = (size_t)dpaa_mem_vtop(
733 						(void *)(size_t)alginfo_a.key);
734 			alginfo_a.key_type = RTA_DATA_PTR;
735 		}
736 		cdb->sh_desc[0] = 0;
737 		cdb->sh_desc[1] = 0;
738 		cdb->sh_desc[2] = 0;
739 		/* Auth_only_len is set as 0 here and it will be
740 		 * overwritten in fd for each packet.
741 		 */
742 		shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
743 				true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a,
744 				ses->iv.length, 0,
745 				ses->digest_length, ses->dir);
746 	}
747 
748 	if (shared_desc_len < 0) {
749 		DPAA_SEC_ERR("error in preparing command block");
750 		return shared_desc_len;
751 	}
752 
753 	cdb->sh_hdr.hi.field.idlen = shared_desc_len;
754 	cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
755 	cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
756 
757 	return 0;
758 }
759 
760 /* qp is lockless, should be accessed by only one thread */
761 static int
762 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
763 {
764 	struct qman_fq *fq;
765 	unsigned int pkts = 0;
766 	int num_rx_bufs, ret;
767 	struct qm_dqrr_entry *dq;
768 	uint32_t vdqcr_flags = 0;
769 
770 	fq = &qp->outq;
771 	/*
772 	 * Until request for four buffers, we provide exact number of buffers.
773 	 * Otherwise we do not set the QM_VDQCR_EXACT flag.
774 	 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
775 	 * requested, so we request two less in this case.
776 	 */
777 	if (nb_ops < 4) {
778 		vdqcr_flags = QM_VDQCR_EXACT;
779 		num_rx_bufs = nb_ops;
780 	} else {
781 		num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
782 			(DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
783 	}
784 	ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
785 	if (ret)
786 		return 0;
787 
788 	do {
789 		const struct qm_fd *fd;
790 		struct dpaa_sec_job *job;
791 		struct dpaa_sec_op_ctx *ctx;
792 		struct rte_crypto_op *op;
793 
794 		dq = qman_dequeue(fq);
795 		if (!dq)
796 			continue;
797 
798 		fd = &dq->fd;
799 		/* sg is embedded in an op ctx,
800 		 * sg[0] is for output
801 		 * sg[1] for input
802 		 */
803 		job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
804 
805 		ctx = container_of(job, struct dpaa_sec_op_ctx, job);
806 		ctx->fd_status = fd->status;
807 		op = ctx->op;
808 		if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
809 			struct qm_sg_entry *sg_out;
810 			uint32_t len;
811 			struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ?
812 						op->sym->m_src : op->sym->m_dst;
813 
814 			sg_out = &job->sg[0];
815 			hw_sg_to_cpu(sg_out);
816 			len = sg_out->length;
817 			mbuf->pkt_len = len;
818 			while (mbuf->next != NULL) {
819 				len -= mbuf->data_len;
820 				mbuf = mbuf->next;
821 			}
822 			mbuf->data_len = len;
823 		}
824 		if (!ctx->fd_status) {
825 			op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
826 		} else {
827 			DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
828 			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
829 		}
830 		ops[pkts++] = op;
831 
832 		/* report op status to sym->op and then free the ctx memeory */
833 		rte_mempool_put(ctx->ctx_pool, (void *)ctx);
834 
835 		qman_dqrr_consume(fq, dq);
836 	} while (fq->flags & QMAN_FQ_STATE_VDQCR);
837 
838 	return pkts;
839 }
840 
841 static inline struct dpaa_sec_job *
842 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
843 {
844 	struct rte_crypto_sym_op *sym = op->sym;
845 	struct rte_mbuf *mbuf = sym->m_src;
846 	struct dpaa_sec_job *cf;
847 	struct dpaa_sec_op_ctx *ctx;
848 	struct qm_sg_entry *sg, *out_sg, *in_sg;
849 	phys_addr_t start_addr;
850 	uint8_t *old_digest, extra_segs;
851 
852 	if (is_decode(ses))
853 		extra_segs = 3;
854 	else
855 		extra_segs = 2;
856 
857 	if (mbuf->nb_segs > MAX_SG_ENTRIES) {
858 		DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
859 				MAX_SG_ENTRIES);
860 		return NULL;
861 	}
862 	ctx = dpaa_sec_alloc_ctx(ses, mbuf->nb_segs + extra_segs);
863 	if (!ctx)
864 		return NULL;
865 
866 	cf = &ctx->job;
867 	ctx->op = op;
868 	old_digest = ctx->digest;
869 
870 	/* output */
871 	out_sg = &cf->sg[0];
872 	qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
873 	out_sg->length = ses->digest_length;
874 	cpu_to_hw_sg(out_sg);
875 
876 	/* input */
877 	in_sg = &cf->sg[1];
878 	/* need to extend the input to a compound frame */
879 	in_sg->extension = 1;
880 	in_sg->final = 1;
881 	in_sg->length = sym->auth.data.length;
882 	qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
883 
884 	/* 1st seg */
885 	sg = in_sg + 1;
886 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
887 	sg->length = mbuf->data_len - sym->auth.data.offset;
888 	sg->offset = sym->auth.data.offset;
889 
890 	/* Successive segs */
891 	mbuf = mbuf->next;
892 	while (mbuf) {
893 		cpu_to_hw_sg(sg);
894 		sg++;
895 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
896 		sg->length = mbuf->data_len;
897 		mbuf = mbuf->next;
898 	}
899 
900 	if (is_decode(ses)) {
901 		/* Digest verification case */
902 		cpu_to_hw_sg(sg);
903 		sg++;
904 		rte_memcpy(old_digest, sym->auth.digest.data,
905 				ses->digest_length);
906 		start_addr = dpaa_mem_vtop(old_digest);
907 		qm_sg_entry_set64(sg, start_addr);
908 		sg->length = ses->digest_length;
909 		in_sg->length += ses->digest_length;
910 	} else {
911 		/* Digest calculation case */
912 		sg->length -= ses->digest_length;
913 	}
914 	sg->final = 1;
915 	cpu_to_hw_sg(sg);
916 	cpu_to_hw_sg(in_sg);
917 
918 	return cf;
919 }
920 
921 /**
922  * packet looks like:
923  *		|<----data_len------->|
924  *    |ip_header|ah_header|icv|payload|
925  *              ^
926  *		|
927  *	   mbuf->pkt.data
928  */
929 static inline struct dpaa_sec_job *
930 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
931 {
932 	struct rte_crypto_sym_op *sym = op->sym;
933 	struct rte_mbuf *mbuf = sym->m_src;
934 	struct dpaa_sec_job *cf;
935 	struct dpaa_sec_op_ctx *ctx;
936 	struct qm_sg_entry *sg;
937 	rte_iova_t start_addr;
938 	uint8_t *old_digest;
939 
940 	ctx = dpaa_sec_alloc_ctx(ses, 4);
941 	if (!ctx)
942 		return NULL;
943 
944 	cf = &ctx->job;
945 	ctx->op = op;
946 	old_digest = ctx->digest;
947 
948 	start_addr = rte_pktmbuf_iova(mbuf);
949 	/* output */
950 	sg = &cf->sg[0];
951 	qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
952 	sg->length = ses->digest_length;
953 	cpu_to_hw_sg(sg);
954 
955 	/* input */
956 	sg = &cf->sg[1];
957 	if (is_decode(ses)) {
958 		/* need to extend the input to a compound frame */
959 		sg->extension = 1;
960 		qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
961 		sg->length = sym->auth.data.length + ses->digest_length;
962 		sg->final = 1;
963 		cpu_to_hw_sg(sg);
964 
965 		sg = &cf->sg[2];
966 		/* hash result or digest, save digest first */
967 		rte_memcpy(old_digest, sym->auth.digest.data,
968 			   ses->digest_length);
969 		qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
970 		sg->length = sym->auth.data.length;
971 		cpu_to_hw_sg(sg);
972 
973 		/* let's check digest by hw */
974 		start_addr = dpaa_mem_vtop(old_digest);
975 		sg++;
976 		qm_sg_entry_set64(sg, start_addr);
977 		sg->length = ses->digest_length;
978 		sg->final = 1;
979 		cpu_to_hw_sg(sg);
980 	} else {
981 		qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
982 		sg->length = sym->auth.data.length;
983 		sg->final = 1;
984 		cpu_to_hw_sg(sg);
985 	}
986 
987 	return cf;
988 }
989 
990 static inline struct dpaa_sec_job *
991 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
992 {
993 	struct rte_crypto_sym_op *sym = op->sym;
994 	struct dpaa_sec_job *cf;
995 	struct dpaa_sec_op_ctx *ctx;
996 	struct qm_sg_entry *sg, *out_sg, *in_sg;
997 	struct rte_mbuf *mbuf;
998 	uint8_t req_segs;
999 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1000 			ses->iv.offset);
1001 
1002 	if (sym->m_dst) {
1003 		mbuf = sym->m_dst;
1004 		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
1005 	} else {
1006 		mbuf = sym->m_src;
1007 		req_segs = mbuf->nb_segs * 2 + 3;
1008 	}
1009 
1010 	if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1011 		DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
1012 				MAX_SG_ENTRIES);
1013 		return NULL;
1014 	}
1015 
1016 	ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1017 	if (!ctx)
1018 		return NULL;
1019 
1020 	cf = &ctx->job;
1021 	ctx->op = op;
1022 
1023 	/* output */
1024 	out_sg = &cf->sg[0];
1025 	out_sg->extension = 1;
1026 	out_sg->length = sym->cipher.data.length;
1027 	qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
1028 	cpu_to_hw_sg(out_sg);
1029 
1030 	/* 1st seg */
1031 	sg = &cf->sg[2];
1032 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1033 	sg->length = mbuf->data_len - sym->cipher.data.offset;
1034 	sg->offset = sym->cipher.data.offset;
1035 
1036 	/* Successive segs */
1037 	mbuf = mbuf->next;
1038 	while (mbuf) {
1039 		cpu_to_hw_sg(sg);
1040 		sg++;
1041 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1042 		sg->length = mbuf->data_len;
1043 		mbuf = mbuf->next;
1044 	}
1045 	sg->final = 1;
1046 	cpu_to_hw_sg(sg);
1047 
1048 	/* input */
1049 	mbuf = sym->m_src;
1050 	in_sg = &cf->sg[1];
1051 	in_sg->extension = 1;
1052 	in_sg->final = 1;
1053 	in_sg->length = sym->cipher.data.length + ses->iv.length;
1054 
1055 	sg++;
1056 	qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1057 	cpu_to_hw_sg(in_sg);
1058 
1059 	/* IV */
1060 	qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1061 	sg->length = ses->iv.length;
1062 	cpu_to_hw_sg(sg);
1063 
1064 	/* 1st seg */
1065 	sg++;
1066 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1067 	sg->length = mbuf->data_len - sym->cipher.data.offset;
1068 	sg->offset = sym->cipher.data.offset;
1069 
1070 	/* Successive segs */
1071 	mbuf = mbuf->next;
1072 	while (mbuf) {
1073 		cpu_to_hw_sg(sg);
1074 		sg++;
1075 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1076 		sg->length = mbuf->data_len;
1077 		mbuf = mbuf->next;
1078 	}
1079 	sg->final = 1;
1080 	cpu_to_hw_sg(sg);
1081 
1082 	return cf;
1083 }
1084 
1085 static inline struct dpaa_sec_job *
1086 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1087 {
1088 	struct rte_crypto_sym_op *sym = op->sym;
1089 	struct dpaa_sec_job *cf;
1090 	struct dpaa_sec_op_ctx *ctx;
1091 	struct qm_sg_entry *sg;
1092 	rte_iova_t src_start_addr, dst_start_addr;
1093 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1094 			ses->iv.offset);
1095 
1096 	ctx = dpaa_sec_alloc_ctx(ses, 4);
1097 	if (!ctx)
1098 		return NULL;
1099 
1100 	cf = &ctx->job;
1101 	ctx->op = op;
1102 
1103 	src_start_addr = rte_pktmbuf_iova(sym->m_src);
1104 
1105 	if (sym->m_dst)
1106 		dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1107 	else
1108 		dst_start_addr = src_start_addr;
1109 
1110 	/* output */
1111 	sg = &cf->sg[0];
1112 	qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1113 	sg->length = sym->cipher.data.length + ses->iv.length;
1114 	cpu_to_hw_sg(sg);
1115 
1116 	/* input */
1117 	sg = &cf->sg[1];
1118 
1119 	/* need to extend the input to a compound frame */
1120 	sg->extension = 1;
1121 	sg->final = 1;
1122 	sg->length = sym->cipher.data.length + ses->iv.length;
1123 	qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
1124 	cpu_to_hw_sg(sg);
1125 
1126 	sg = &cf->sg[2];
1127 	qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1128 	sg->length = ses->iv.length;
1129 	cpu_to_hw_sg(sg);
1130 
1131 	sg++;
1132 	qm_sg_entry_set64(sg, src_start_addr + sym->cipher.data.offset);
1133 	sg->length = sym->cipher.data.length;
1134 	sg->final = 1;
1135 	cpu_to_hw_sg(sg);
1136 
1137 	return cf;
1138 }
1139 
1140 static inline struct dpaa_sec_job *
1141 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1142 {
1143 	struct rte_crypto_sym_op *sym = op->sym;
1144 	struct dpaa_sec_job *cf;
1145 	struct dpaa_sec_op_ctx *ctx;
1146 	struct qm_sg_entry *sg, *out_sg, *in_sg;
1147 	struct rte_mbuf *mbuf;
1148 	uint8_t req_segs;
1149 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1150 			ses->iv.offset);
1151 
1152 	if (sym->m_dst) {
1153 		mbuf = sym->m_dst;
1154 		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1155 	} else {
1156 		mbuf = sym->m_src;
1157 		req_segs = mbuf->nb_segs * 2 + 4;
1158 	}
1159 
1160 	if (ses->auth_only_len)
1161 		req_segs++;
1162 
1163 	if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1164 		DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1165 				MAX_SG_ENTRIES);
1166 		return NULL;
1167 	}
1168 
1169 	ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1170 	if (!ctx)
1171 		return NULL;
1172 
1173 	cf = &ctx->job;
1174 	ctx->op = op;
1175 
1176 	rte_prefetch0(cf->sg);
1177 
1178 	/* output */
1179 	out_sg = &cf->sg[0];
1180 	out_sg->extension = 1;
1181 	if (is_encode(ses))
1182 		out_sg->length = sym->aead.data.length + ses->auth_only_len
1183 						+ ses->digest_length;
1184 	else
1185 		out_sg->length = sym->aead.data.length + ses->auth_only_len;
1186 
1187 	/* output sg entries */
1188 	sg = &cf->sg[2];
1189 	qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1190 	cpu_to_hw_sg(out_sg);
1191 
1192 	/* 1st seg */
1193 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1194 	sg->length = mbuf->data_len - sym->aead.data.offset +
1195 					ses->auth_only_len;
1196 	sg->offset = sym->aead.data.offset - ses->auth_only_len;
1197 
1198 	/* Successive segs */
1199 	mbuf = mbuf->next;
1200 	while (mbuf) {
1201 		cpu_to_hw_sg(sg);
1202 		sg++;
1203 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1204 		sg->length = mbuf->data_len;
1205 		mbuf = mbuf->next;
1206 	}
1207 	sg->length -= ses->digest_length;
1208 
1209 	if (is_encode(ses)) {
1210 		cpu_to_hw_sg(sg);
1211 		/* set auth output */
1212 		sg++;
1213 		qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1214 		sg->length = ses->digest_length;
1215 	}
1216 	sg->final = 1;
1217 	cpu_to_hw_sg(sg);
1218 
1219 	/* input */
1220 	mbuf = sym->m_src;
1221 	in_sg = &cf->sg[1];
1222 	in_sg->extension = 1;
1223 	in_sg->final = 1;
1224 	if (is_encode(ses))
1225 		in_sg->length = ses->iv.length + sym->aead.data.length
1226 							+ ses->auth_only_len;
1227 	else
1228 		in_sg->length = ses->iv.length + sym->aead.data.length
1229 				+ ses->auth_only_len + ses->digest_length;
1230 
1231 	/* input sg entries */
1232 	sg++;
1233 	qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1234 	cpu_to_hw_sg(in_sg);
1235 
1236 	/* 1st seg IV */
1237 	qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1238 	sg->length = ses->iv.length;
1239 	cpu_to_hw_sg(sg);
1240 
1241 	/* 2nd seg auth only */
1242 	if (ses->auth_only_len) {
1243 		sg++;
1244 		qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
1245 		sg->length = ses->auth_only_len;
1246 		cpu_to_hw_sg(sg);
1247 	}
1248 
1249 	/* 3rd seg */
1250 	sg++;
1251 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1252 	sg->length = mbuf->data_len - sym->aead.data.offset;
1253 	sg->offset = sym->aead.data.offset;
1254 
1255 	/* Successive segs */
1256 	mbuf = mbuf->next;
1257 	while (mbuf) {
1258 		cpu_to_hw_sg(sg);
1259 		sg++;
1260 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1261 		sg->length = mbuf->data_len;
1262 		mbuf = mbuf->next;
1263 	}
1264 
1265 	if (is_decode(ses)) {
1266 		cpu_to_hw_sg(sg);
1267 		sg++;
1268 		memcpy(ctx->digest, sym->aead.digest.data,
1269 			ses->digest_length);
1270 		qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1271 		sg->length = ses->digest_length;
1272 	}
1273 	sg->final = 1;
1274 	cpu_to_hw_sg(sg);
1275 
1276 	return cf;
1277 }
1278 
1279 static inline struct dpaa_sec_job *
1280 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1281 {
1282 	struct rte_crypto_sym_op *sym = op->sym;
1283 	struct dpaa_sec_job *cf;
1284 	struct dpaa_sec_op_ctx *ctx;
1285 	struct qm_sg_entry *sg;
1286 	uint32_t length = 0;
1287 	rte_iova_t src_start_addr, dst_start_addr;
1288 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1289 			ses->iv.offset);
1290 
1291 	src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1292 
1293 	if (sym->m_dst)
1294 		dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1295 	else
1296 		dst_start_addr = src_start_addr;
1297 
1298 	ctx = dpaa_sec_alloc_ctx(ses, 7);
1299 	if (!ctx)
1300 		return NULL;
1301 
1302 	cf = &ctx->job;
1303 	ctx->op = op;
1304 
1305 	/* input */
1306 	rte_prefetch0(cf->sg);
1307 	sg = &cf->sg[2];
1308 	qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1309 	if (is_encode(ses)) {
1310 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1311 		sg->length = ses->iv.length;
1312 		length += sg->length;
1313 		cpu_to_hw_sg(sg);
1314 
1315 		sg++;
1316 		if (ses->auth_only_len) {
1317 			qm_sg_entry_set64(sg,
1318 					  dpaa_mem_vtop(sym->aead.aad.data));
1319 			sg->length = ses->auth_only_len;
1320 			length += sg->length;
1321 			cpu_to_hw_sg(sg);
1322 			sg++;
1323 		}
1324 		qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1325 		sg->length = sym->aead.data.length;
1326 		length += sg->length;
1327 		sg->final = 1;
1328 		cpu_to_hw_sg(sg);
1329 	} else {
1330 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1331 		sg->length = ses->iv.length;
1332 		length += sg->length;
1333 		cpu_to_hw_sg(sg);
1334 
1335 		sg++;
1336 		if (ses->auth_only_len) {
1337 			qm_sg_entry_set64(sg,
1338 					  dpaa_mem_vtop(sym->aead.aad.data));
1339 			sg->length = ses->auth_only_len;
1340 			length += sg->length;
1341 			cpu_to_hw_sg(sg);
1342 			sg++;
1343 		}
1344 		qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1345 		sg->length = sym->aead.data.length;
1346 		length += sg->length;
1347 		cpu_to_hw_sg(sg);
1348 
1349 		memcpy(ctx->digest, sym->aead.digest.data,
1350 		       ses->digest_length);
1351 		sg++;
1352 
1353 		qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1354 		sg->length = ses->digest_length;
1355 		length += sg->length;
1356 		sg->final = 1;
1357 		cpu_to_hw_sg(sg);
1358 	}
1359 	/* input compound frame */
1360 	cf->sg[1].length = length;
1361 	cf->sg[1].extension = 1;
1362 	cf->sg[1].final = 1;
1363 	cpu_to_hw_sg(&cf->sg[1]);
1364 
1365 	/* output */
1366 	sg++;
1367 	qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1368 	qm_sg_entry_set64(sg,
1369 		dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
1370 	sg->length = sym->aead.data.length + ses->auth_only_len;
1371 	length = sg->length;
1372 	if (is_encode(ses)) {
1373 		cpu_to_hw_sg(sg);
1374 		/* set auth output */
1375 		sg++;
1376 		qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1377 		sg->length = ses->digest_length;
1378 		length += sg->length;
1379 	}
1380 	sg->final = 1;
1381 	cpu_to_hw_sg(sg);
1382 
1383 	/* output compound frame */
1384 	cf->sg[0].length = length;
1385 	cf->sg[0].extension = 1;
1386 	cpu_to_hw_sg(&cf->sg[0]);
1387 
1388 	return cf;
1389 }
1390 
1391 static inline struct dpaa_sec_job *
1392 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1393 {
1394 	struct rte_crypto_sym_op *sym = op->sym;
1395 	struct dpaa_sec_job *cf;
1396 	struct dpaa_sec_op_ctx *ctx;
1397 	struct qm_sg_entry *sg, *out_sg, *in_sg;
1398 	struct rte_mbuf *mbuf;
1399 	uint8_t req_segs;
1400 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1401 			ses->iv.offset);
1402 
1403 	if (sym->m_dst) {
1404 		mbuf = sym->m_dst;
1405 		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1406 	} else {
1407 		mbuf = sym->m_src;
1408 		req_segs = mbuf->nb_segs * 2 + 4;
1409 	}
1410 
1411 	if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1412 		DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1413 				MAX_SG_ENTRIES);
1414 		return NULL;
1415 	}
1416 
1417 	ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1418 	if (!ctx)
1419 		return NULL;
1420 
1421 	cf = &ctx->job;
1422 	ctx->op = op;
1423 
1424 	rte_prefetch0(cf->sg);
1425 
1426 	/* output */
1427 	out_sg = &cf->sg[0];
1428 	out_sg->extension = 1;
1429 	if (is_encode(ses))
1430 		out_sg->length = sym->auth.data.length + ses->digest_length;
1431 	else
1432 		out_sg->length = sym->auth.data.length;
1433 
1434 	/* output sg entries */
1435 	sg = &cf->sg[2];
1436 	qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1437 	cpu_to_hw_sg(out_sg);
1438 
1439 	/* 1st seg */
1440 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1441 	sg->length = mbuf->data_len - sym->auth.data.offset;
1442 	sg->offset = sym->auth.data.offset;
1443 
1444 	/* Successive segs */
1445 	mbuf = mbuf->next;
1446 	while (mbuf) {
1447 		cpu_to_hw_sg(sg);
1448 		sg++;
1449 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1450 		sg->length = mbuf->data_len;
1451 		mbuf = mbuf->next;
1452 	}
1453 	sg->length -= ses->digest_length;
1454 
1455 	if (is_encode(ses)) {
1456 		cpu_to_hw_sg(sg);
1457 		/* set auth output */
1458 		sg++;
1459 		qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1460 		sg->length = ses->digest_length;
1461 	}
1462 	sg->final = 1;
1463 	cpu_to_hw_sg(sg);
1464 
1465 	/* input */
1466 	mbuf = sym->m_src;
1467 	in_sg = &cf->sg[1];
1468 	in_sg->extension = 1;
1469 	in_sg->final = 1;
1470 	if (is_encode(ses))
1471 		in_sg->length = ses->iv.length + sym->auth.data.length;
1472 	else
1473 		in_sg->length = ses->iv.length + sym->auth.data.length
1474 						+ ses->digest_length;
1475 
1476 	/* input sg entries */
1477 	sg++;
1478 	qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1479 	cpu_to_hw_sg(in_sg);
1480 
1481 	/* 1st seg IV */
1482 	qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1483 	sg->length = ses->iv.length;
1484 	cpu_to_hw_sg(sg);
1485 
1486 	/* 2nd seg */
1487 	sg++;
1488 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1489 	sg->length = mbuf->data_len - sym->auth.data.offset;
1490 	sg->offset = sym->auth.data.offset;
1491 
1492 	/* Successive segs */
1493 	mbuf = mbuf->next;
1494 	while (mbuf) {
1495 		cpu_to_hw_sg(sg);
1496 		sg++;
1497 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1498 		sg->length = mbuf->data_len;
1499 		mbuf = mbuf->next;
1500 	}
1501 
1502 	sg->length -= ses->digest_length;
1503 	if (is_decode(ses)) {
1504 		cpu_to_hw_sg(sg);
1505 		sg++;
1506 		memcpy(ctx->digest, sym->auth.digest.data,
1507 			ses->digest_length);
1508 		qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1509 		sg->length = ses->digest_length;
1510 	}
1511 	sg->final = 1;
1512 	cpu_to_hw_sg(sg);
1513 
1514 	return cf;
1515 }
1516 
1517 static inline struct dpaa_sec_job *
1518 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1519 {
1520 	struct rte_crypto_sym_op *sym = op->sym;
1521 	struct dpaa_sec_job *cf;
1522 	struct dpaa_sec_op_ctx *ctx;
1523 	struct qm_sg_entry *sg;
1524 	rte_iova_t src_start_addr, dst_start_addr;
1525 	uint32_t length = 0;
1526 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1527 			ses->iv.offset);
1528 
1529 	src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1530 	if (sym->m_dst)
1531 		dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1532 	else
1533 		dst_start_addr = src_start_addr;
1534 
1535 	ctx = dpaa_sec_alloc_ctx(ses, 7);
1536 	if (!ctx)
1537 		return NULL;
1538 
1539 	cf = &ctx->job;
1540 	ctx->op = op;
1541 
1542 	/* input */
1543 	rte_prefetch0(cf->sg);
1544 	sg = &cf->sg[2];
1545 	qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1546 	if (is_encode(ses)) {
1547 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1548 		sg->length = ses->iv.length;
1549 		length += sg->length;
1550 		cpu_to_hw_sg(sg);
1551 
1552 		sg++;
1553 		qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1554 		sg->length = sym->auth.data.length;
1555 		length += sg->length;
1556 		sg->final = 1;
1557 		cpu_to_hw_sg(sg);
1558 	} else {
1559 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1560 		sg->length = ses->iv.length;
1561 		length += sg->length;
1562 		cpu_to_hw_sg(sg);
1563 
1564 		sg++;
1565 
1566 		qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1567 		sg->length = sym->auth.data.length;
1568 		length += sg->length;
1569 		cpu_to_hw_sg(sg);
1570 
1571 		memcpy(ctx->digest, sym->auth.digest.data,
1572 		       ses->digest_length);
1573 		sg++;
1574 
1575 		qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1576 		sg->length = ses->digest_length;
1577 		length += sg->length;
1578 		sg->final = 1;
1579 		cpu_to_hw_sg(sg);
1580 	}
1581 	/* input compound frame */
1582 	cf->sg[1].length = length;
1583 	cf->sg[1].extension = 1;
1584 	cf->sg[1].final = 1;
1585 	cpu_to_hw_sg(&cf->sg[1]);
1586 
1587 	/* output */
1588 	sg++;
1589 	qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1590 	qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1591 	sg->length = sym->cipher.data.length;
1592 	length = sg->length;
1593 	if (is_encode(ses)) {
1594 		cpu_to_hw_sg(sg);
1595 		/* set auth output */
1596 		sg++;
1597 		qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1598 		sg->length = ses->digest_length;
1599 		length += sg->length;
1600 	}
1601 	sg->final = 1;
1602 	cpu_to_hw_sg(sg);
1603 
1604 	/* output compound frame */
1605 	cf->sg[0].length = length;
1606 	cf->sg[0].extension = 1;
1607 	cpu_to_hw_sg(&cf->sg[0]);
1608 
1609 	return cf;
1610 }
1611 
1612 static inline struct dpaa_sec_job *
1613 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1614 {
1615 	struct rte_crypto_sym_op *sym = op->sym;
1616 	struct dpaa_sec_job *cf;
1617 	struct dpaa_sec_op_ctx *ctx;
1618 	struct qm_sg_entry *sg;
1619 	phys_addr_t src_start_addr, dst_start_addr;
1620 
1621 	ctx = dpaa_sec_alloc_ctx(ses, 2);
1622 	if (!ctx)
1623 		return NULL;
1624 	cf = &ctx->job;
1625 	ctx->op = op;
1626 
1627 	src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
1628 
1629 	if (sym->m_dst)
1630 		dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
1631 	else
1632 		dst_start_addr = src_start_addr;
1633 
1634 	/* input */
1635 	sg = &cf->sg[1];
1636 	qm_sg_entry_set64(sg, src_start_addr);
1637 	sg->length = sym->m_src->pkt_len;
1638 	sg->final = 1;
1639 	cpu_to_hw_sg(sg);
1640 
1641 	sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1642 	/* output */
1643 	sg = &cf->sg[0];
1644 	qm_sg_entry_set64(sg, dst_start_addr);
1645 	sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1646 	cpu_to_hw_sg(sg);
1647 
1648 	return cf;
1649 }
1650 
1651 static inline struct dpaa_sec_job *
1652 build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1653 {
1654 	struct rte_crypto_sym_op *sym = op->sym;
1655 	struct dpaa_sec_job *cf;
1656 	struct dpaa_sec_op_ctx *ctx;
1657 	struct qm_sg_entry *sg, *out_sg, *in_sg;
1658 	struct rte_mbuf *mbuf;
1659 	uint8_t req_segs;
1660 	uint32_t in_len = 0, out_len = 0;
1661 
1662 	if (sym->m_dst)
1663 		mbuf = sym->m_dst;
1664 	else
1665 		mbuf = sym->m_src;
1666 
1667 	req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
1668 	if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1669 		DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
1670 				MAX_SG_ENTRIES);
1671 		return NULL;
1672 	}
1673 
1674 	ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1675 	if (!ctx)
1676 		return NULL;
1677 	cf = &ctx->job;
1678 	ctx->op = op;
1679 	/* output */
1680 	out_sg = &cf->sg[0];
1681 	out_sg->extension = 1;
1682 	qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
1683 
1684 	/* 1st seg */
1685 	sg = &cf->sg[2];
1686 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1687 	sg->offset = 0;
1688 
1689 	/* Successive segs */
1690 	while (mbuf->next) {
1691 		sg->length = mbuf->data_len;
1692 		out_len += sg->length;
1693 		mbuf = mbuf->next;
1694 		cpu_to_hw_sg(sg);
1695 		sg++;
1696 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1697 		sg->offset = 0;
1698 	}
1699 	sg->length = mbuf->buf_len - mbuf->data_off;
1700 	out_len += sg->length;
1701 	sg->final = 1;
1702 	cpu_to_hw_sg(sg);
1703 
1704 	out_sg->length = out_len;
1705 	cpu_to_hw_sg(out_sg);
1706 
1707 	/* input */
1708 	mbuf = sym->m_src;
1709 	in_sg = &cf->sg[1];
1710 	in_sg->extension = 1;
1711 	in_sg->final = 1;
1712 	in_len = mbuf->data_len;
1713 
1714 	sg++;
1715 	qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1716 
1717 	/* 1st seg */
1718 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1719 	sg->length = mbuf->data_len;
1720 	sg->offset = 0;
1721 
1722 	/* Successive segs */
1723 	mbuf = mbuf->next;
1724 	while (mbuf) {
1725 		cpu_to_hw_sg(sg);
1726 		sg++;
1727 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1728 		sg->length = mbuf->data_len;
1729 		sg->offset = 0;
1730 		in_len += sg->length;
1731 		mbuf = mbuf->next;
1732 	}
1733 	sg->final = 1;
1734 	cpu_to_hw_sg(sg);
1735 
1736 	in_sg->length = in_len;
1737 	cpu_to_hw_sg(in_sg);
1738 
1739 	sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1740 
1741 	return cf;
1742 }
1743 
1744 static uint16_t
1745 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1746 		       uint16_t nb_ops)
1747 {
1748 	/* Function to transmit the frames to given device and queuepair */
1749 	uint32_t loop;
1750 	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1751 	uint16_t num_tx = 0;
1752 	struct qm_fd fds[DPAA_SEC_BURST], *fd;
1753 	uint32_t frames_to_send;
1754 	struct rte_crypto_op *op;
1755 	struct dpaa_sec_job *cf;
1756 	dpaa_sec_session *ses;
1757 	uint32_t auth_only_len, index, flags[DPAA_SEC_BURST] = {0};
1758 	struct qman_fq *inq[DPAA_SEC_BURST];
1759 
1760 	while (nb_ops) {
1761 		frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1762 				DPAA_SEC_BURST : nb_ops;
1763 		for (loop = 0; loop < frames_to_send; loop++) {
1764 			op = *(ops++);
1765 			if (op->sym->m_src->seqn != 0) {
1766 				index = op->sym->m_src->seqn - 1;
1767 				if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
1768 					/* QM_EQCR_DCA_IDXMASK = 0x0f */
1769 					flags[loop] = ((index & 0x0f) << 8);
1770 					flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
1771 					DPAA_PER_LCORE_DQRR_SIZE--;
1772 					DPAA_PER_LCORE_DQRR_HELD &=
1773 								~(1 << index);
1774 				}
1775 			}
1776 
1777 			switch (op->sess_type) {
1778 			case RTE_CRYPTO_OP_WITH_SESSION:
1779 				ses = (dpaa_sec_session *)
1780 					get_sym_session_private_data(
1781 							op->sym->session,
1782 							cryptodev_driver_id);
1783 				break;
1784 			case RTE_CRYPTO_OP_SECURITY_SESSION:
1785 				ses = (dpaa_sec_session *)
1786 					get_sec_session_private_data(
1787 							op->sym->sec_session);
1788 				break;
1789 			default:
1790 				DPAA_SEC_DP_ERR(
1791 					"sessionless crypto op not supported");
1792 				frames_to_send = loop;
1793 				nb_ops = loop;
1794 				goto send_pkts;
1795 			}
1796 			if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
1797 				if (dpaa_sec_attach_sess_q(qp, ses)) {
1798 					frames_to_send = loop;
1799 					nb_ops = loop;
1800 					goto send_pkts;
1801 				}
1802 			} else if (unlikely(ses->qp[rte_lcore_id() %
1803 						MAX_DPAA_CORES] != qp)) {
1804 				DPAA_SEC_DP_ERR("Old:sess->qp = %p"
1805 					" New qp = %p\n",
1806 					ses->qp[rte_lcore_id() %
1807 					MAX_DPAA_CORES], qp);
1808 				frames_to_send = loop;
1809 				nb_ops = loop;
1810 				goto send_pkts;
1811 			}
1812 
1813 			auth_only_len = op->sym->auth.data.length -
1814 						op->sym->cipher.data.length;
1815 			if (rte_pktmbuf_is_contiguous(op->sym->m_src) &&
1816 				  ((op->sym->m_dst == NULL) ||
1817 				   rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1818 				if (is_proto_ipsec(ses)) {
1819 					cf = build_proto(op, ses);
1820 				} else if (is_proto_pdcp(ses)) {
1821 					cf = build_proto(op, ses);
1822 				} else if (is_auth_only(ses)) {
1823 					cf = build_auth_only(op, ses);
1824 				} else if (is_cipher_only(ses)) {
1825 					cf = build_cipher_only(op, ses);
1826 				} else if (is_aead(ses)) {
1827 					cf = build_cipher_auth_gcm(op, ses);
1828 					auth_only_len = ses->auth_only_len;
1829 				} else if (is_auth_cipher(ses)) {
1830 					cf = build_cipher_auth(op, ses);
1831 				} else {
1832 					DPAA_SEC_DP_ERR("not supported ops");
1833 					frames_to_send = loop;
1834 					nb_ops = loop;
1835 					goto send_pkts;
1836 				}
1837 			} else {
1838 				if (is_proto_pdcp(ses) || is_proto_ipsec(ses)) {
1839 					cf = build_proto_sg(op, ses);
1840 				} else if (is_auth_only(ses)) {
1841 					cf = build_auth_only_sg(op, ses);
1842 				} else if (is_cipher_only(ses)) {
1843 					cf = build_cipher_only_sg(op, ses);
1844 				} else if (is_aead(ses)) {
1845 					cf = build_cipher_auth_gcm_sg(op, ses);
1846 					auth_only_len = ses->auth_only_len;
1847 				} else if (is_auth_cipher(ses)) {
1848 					cf = build_cipher_auth_sg(op, ses);
1849 				} else {
1850 					DPAA_SEC_DP_ERR("not supported ops");
1851 					frames_to_send = loop;
1852 					nb_ops = loop;
1853 					goto send_pkts;
1854 				}
1855 			}
1856 			if (unlikely(!cf)) {
1857 				frames_to_send = loop;
1858 				nb_ops = loop;
1859 				goto send_pkts;
1860 			}
1861 
1862 			fd = &fds[loop];
1863 			inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
1864 			fd->opaque_addr = 0;
1865 			fd->cmd = 0;
1866 			qm_fd_addr_set64(fd, dpaa_mem_vtop(cf->sg));
1867 			fd->_format1 = qm_fd_compound;
1868 			fd->length29 = 2 * sizeof(struct qm_sg_entry);
1869 			/* Auth_only_len is set as 0 in descriptor and it is
1870 			 * overwritten here in the fd.cmd which will update
1871 			 * the DPOVRD reg.
1872 			 */
1873 			if (auth_only_len)
1874 				fd->cmd = 0x80000000 | auth_only_len;
1875 
1876 			/* In case of PDCP, per packet HFN is stored in
1877 			 * mbuf priv after sym_op.
1878 			 */
1879 			if (is_proto_pdcp(ses) && ses->pdcp.hfn_ovd) {
1880 				fd->cmd = 0x80000000 |
1881 					*((uint32_t *)((uint8_t *)op +
1882 					ses->pdcp.hfn_ovd_offset));
1883 				DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u,%u\n",
1884 					*((uint32_t *)((uint8_t *)op +
1885 					ses->pdcp.hfn_ovd_offset)),
1886 					ses->pdcp.hfn_ovd,
1887 					is_proto_pdcp(ses));
1888 			}
1889 
1890 		}
1891 send_pkts:
1892 		loop = 0;
1893 		while (loop < frames_to_send) {
1894 			loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1895 					&flags[loop], frames_to_send - loop);
1896 		}
1897 		nb_ops -= frames_to_send;
1898 		num_tx += frames_to_send;
1899 	}
1900 
1901 	dpaa_qp->tx_pkts += num_tx;
1902 	dpaa_qp->tx_errs += nb_ops - num_tx;
1903 
1904 	return num_tx;
1905 }
1906 
1907 static uint16_t
1908 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1909 		       uint16_t nb_ops)
1910 {
1911 	uint16_t num_rx;
1912 	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1913 
1914 	num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1915 
1916 	dpaa_qp->rx_pkts += num_rx;
1917 	dpaa_qp->rx_errs += nb_ops - num_rx;
1918 
1919 	DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1920 
1921 	return num_rx;
1922 }
1923 
1924 /** Release queue pair */
1925 static int
1926 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1927 			    uint16_t qp_id)
1928 {
1929 	struct dpaa_sec_dev_private *internals;
1930 	struct dpaa_sec_qp *qp = NULL;
1931 
1932 	PMD_INIT_FUNC_TRACE();
1933 
1934 	DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1935 
1936 	internals = dev->data->dev_private;
1937 	if (qp_id >= internals->max_nb_queue_pairs) {
1938 		DPAA_SEC_ERR("Max supported qpid %d",
1939 			     internals->max_nb_queue_pairs);
1940 		return -EINVAL;
1941 	}
1942 
1943 	qp = &internals->qps[qp_id];
1944 	rte_mempool_free(qp->ctx_pool);
1945 	qp->internals = NULL;
1946 	dev->data->queue_pairs[qp_id] = NULL;
1947 
1948 	return 0;
1949 }
1950 
1951 /** Setup a queue pair */
1952 static int
1953 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1954 		__rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1955 		__rte_unused int socket_id)
1956 {
1957 	struct dpaa_sec_dev_private *internals;
1958 	struct dpaa_sec_qp *qp = NULL;
1959 	char str[20];
1960 
1961 	DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1962 
1963 	internals = dev->data->dev_private;
1964 	if (qp_id >= internals->max_nb_queue_pairs) {
1965 		DPAA_SEC_ERR("Max supported qpid %d",
1966 			     internals->max_nb_queue_pairs);
1967 		return -EINVAL;
1968 	}
1969 
1970 	qp = &internals->qps[qp_id];
1971 	qp->internals = internals;
1972 	snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d",
1973 			dev->data->dev_id, qp_id);
1974 	if (!qp->ctx_pool) {
1975 		qp->ctx_pool = rte_mempool_create((const char *)str,
1976 							CTX_POOL_NUM_BUFS,
1977 							CTX_POOL_BUF_SIZE,
1978 							CTX_POOL_CACHE_SIZE, 0,
1979 							NULL, NULL, NULL, NULL,
1980 							SOCKET_ID_ANY, 0);
1981 		if (!qp->ctx_pool) {
1982 			DPAA_SEC_ERR("%s create failed\n", str);
1983 			return -ENOMEM;
1984 		}
1985 	} else
1986 		DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d",
1987 				dev->data->dev_id, qp_id);
1988 	dev->data->queue_pairs[qp_id] = qp;
1989 
1990 	return 0;
1991 }
1992 
1993 /** Return the number of allocated queue pairs */
1994 static uint32_t
1995 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
1996 {
1997 	PMD_INIT_FUNC_TRACE();
1998 
1999 	return dev->data->nb_queue_pairs;
2000 }
2001 
2002 /** Returns the size of session structure */
2003 static unsigned int
2004 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
2005 {
2006 	PMD_INIT_FUNC_TRACE();
2007 
2008 	return sizeof(dpaa_sec_session);
2009 }
2010 
2011 static int
2012 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
2013 		     struct rte_crypto_sym_xform *xform,
2014 		     dpaa_sec_session *session)
2015 {
2016 	session->cipher_alg = xform->cipher.algo;
2017 	session->iv.length = xform->cipher.iv.length;
2018 	session->iv.offset = xform->cipher.iv.offset;
2019 	session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
2020 					       RTE_CACHE_LINE_SIZE);
2021 	if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
2022 		DPAA_SEC_ERR("No Memory for cipher key");
2023 		return -ENOMEM;
2024 	}
2025 	session->cipher_key.length = xform->cipher.key.length;
2026 
2027 	memcpy(session->cipher_key.data, xform->cipher.key.data,
2028 	       xform->cipher.key.length);
2029 	session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2030 			DIR_ENC : DIR_DEC;
2031 
2032 	return 0;
2033 }
2034 
2035 static int
2036 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
2037 		   struct rte_crypto_sym_xform *xform,
2038 		   dpaa_sec_session *session)
2039 {
2040 	session->auth_alg = xform->auth.algo;
2041 	session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
2042 					     RTE_CACHE_LINE_SIZE);
2043 	if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
2044 		DPAA_SEC_ERR("No Memory for auth key");
2045 		return -ENOMEM;
2046 	}
2047 	session->auth_key.length = xform->auth.key.length;
2048 	session->digest_length = xform->auth.digest_length;
2049 
2050 	memcpy(session->auth_key.data, xform->auth.key.data,
2051 	       xform->auth.key.length);
2052 	session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2053 			DIR_ENC : DIR_DEC;
2054 
2055 	return 0;
2056 }
2057 
2058 static int
2059 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
2060 		   struct rte_crypto_sym_xform *xform,
2061 		   dpaa_sec_session *session)
2062 {
2063 	session->aead_alg = xform->aead.algo;
2064 	session->iv.length = xform->aead.iv.length;
2065 	session->iv.offset = xform->aead.iv.offset;
2066 	session->auth_only_len = xform->aead.aad_length;
2067 	session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
2068 					     RTE_CACHE_LINE_SIZE);
2069 	if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
2070 		DPAA_SEC_ERR("No Memory for aead key\n");
2071 		return -ENOMEM;
2072 	}
2073 	session->aead_key.length = xform->aead.key.length;
2074 	session->digest_length = xform->aead.digest_length;
2075 
2076 	memcpy(session->aead_key.data, xform->aead.key.data,
2077 	       xform->aead.key.length);
2078 	session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2079 			DIR_ENC : DIR_DEC;
2080 
2081 	return 0;
2082 }
2083 
2084 static struct qman_fq *
2085 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
2086 {
2087 	unsigned int i;
2088 
2089 	for (i = 0; i < qi->max_nb_sessions * MAX_DPAA_CORES; i++) {
2090 		if (qi->inq_attach[i] == 0) {
2091 			qi->inq_attach[i] = 1;
2092 			return &qi->inq[i];
2093 		}
2094 	}
2095 	DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions);
2096 
2097 	return NULL;
2098 }
2099 
2100 static int
2101 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
2102 {
2103 	unsigned int i;
2104 
2105 	for (i = 0; i < qi->max_nb_sessions; i++) {
2106 		if (&qi->inq[i] == fq) {
2107 			qman_retire_fq(fq, NULL);
2108 			qman_oos_fq(fq);
2109 			qi->inq_attach[i] = 0;
2110 			return 0;
2111 		}
2112 	}
2113 	return -1;
2114 }
2115 
2116 static int
2117 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
2118 {
2119 	int ret;
2120 
2121 	sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp;
2122 	ret = dpaa_sec_prep_cdb(sess);
2123 	if (ret) {
2124 		DPAA_SEC_ERR("Unable to prepare sec cdb");
2125 		return -1;
2126 	}
2127 	if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
2128 		ret = rte_dpaa_portal_init((void *)0);
2129 		if (ret) {
2130 			DPAA_SEC_ERR("Failure in affining portal");
2131 			return ret;
2132 		}
2133 	}
2134 	ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
2135 			       dpaa_mem_vtop(&sess->cdb),
2136 			       qman_fq_fqid(&qp->outq));
2137 	if (ret)
2138 		DPAA_SEC_ERR("Unable to init sec queue");
2139 
2140 	return ret;
2141 }
2142 
2143 static int
2144 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
2145 			    struct rte_crypto_sym_xform *xform,	void *sess)
2146 {
2147 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2148 	dpaa_sec_session *session = sess;
2149 	uint32_t i;
2150 
2151 	PMD_INIT_FUNC_TRACE();
2152 
2153 	if (unlikely(sess == NULL)) {
2154 		DPAA_SEC_ERR("invalid session struct");
2155 		return -EINVAL;
2156 	}
2157 	memset(session, 0, sizeof(dpaa_sec_session));
2158 
2159 	/* Default IV length = 0 */
2160 	session->iv.length = 0;
2161 
2162 	/* Cipher Only */
2163 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2164 		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2165 		dpaa_sec_cipher_init(dev, xform, session);
2166 
2167 	/* Authentication Only */
2168 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2169 		   xform->next == NULL) {
2170 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2171 		dpaa_sec_auth_init(dev, xform, session);
2172 
2173 	/* Cipher then Authenticate */
2174 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2175 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2176 		if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2177 			dpaa_sec_cipher_init(dev, xform, session);
2178 			dpaa_sec_auth_init(dev, xform->next, session);
2179 		} else {
2180 			DPAA_SEC_ERR("Not supported: Auth then Cipher");
2181 			return -EINVAL;
2182 		}
2183 
2184 	/* Authenticate then Cipher */
2185 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2186 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2187 		if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2188 			dpaa_sec_auth_init(dev, xform, session);
2189 			dpaa_sec_cipher_init(dev, xform->next, session);
2190 		} else {
2191 			DPAA_SEC_ERR("Not supported: Auth then Cipher");
2192 			return -EINVAL;
2193 		}
2194 
2195 	/* AEAD operation for AES-GCM kind of Algorithms */
2196 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2197 		   xform->next == NULL) {
2198 		dpaa_sec_aead_init(dev, xform, session);
2199 
2200 	} else {
2201 		DPAA_SEC_ERR("Invalid crypto type");
2202 		return -EINVAL;
2203 	}
2204 	rte_spinlock_lock(&internals->lock);
2205 	for (i = 0; i < MAX_DPAA_CORES; i++) {
2206 		session->inq[i] = dpaa_sec_attach_rxq(internals);
2207 		if (session->inq[i] == NULL) {
2208 			DPAA_SEC_ERR("unable to attach sec queue");
2209 			rte_spinlock_unlock(&internals->lock);
2210 			goto err1;
2211 		}
2212 	}
2213 	rte_spinlock_unlock(&internals->lock);
2214 
2215 	return 0;
2216 
2217 err1:
2218 	rte_free(session->cipher_key.data);
2219 	rte_free(session->auth_key.data);
2220 	memset(session, 0, sizeof(dpaa_sec_session));
2221 
2222 	return -EINVAL;
2223 }
2224 
2225 static int
2226 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
2227 		struct rte_crypto_sym_xform *xform,
2228 		struct rte_cryptodev_sym_session *sess,
2229 		struct rte_mempool *mempool)
2230 {
2231 	void *sess_private_data;
2232 	int ret;
2233 
2234 	PMD_INIT_FUNC_TRACE();
2235 
2236 	if (rte_mempool_get(mempool, &sess_private_data)) {
2237 		DPAA_SEC_ERR("Couldn't get object from session mempool");
2238 		return -ENOMEM;
2239 	}
2240 
2241 	ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
2242 	if (ret != 0) {
2243 		DPAA_SEC_ERR("failed to configure session parameters");
2244 
2245 		/* Return session to mempool */
2246 		rte_mempool_put(mempool, sess_private_data);
2247 		return ret;
2248 	}
2249 
2250 	set_sym_session_private_data(sess, dev->driver_id,
2251 			sess_private_data);
2252 
2253 
2254 	return 0;
2255 }
2256 
2257 static inline void
2258 free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s)
2259 {
2260 	struct dpaa_sec_dev_private *qi = dev->data->dev_private;
2261 	struct rte_mempool *sess_mp = rte_mempool_from_obj((void *)s);
2262 	uint8_t i;
2263 
2264 	for (i = 0; i < MAX_DPAA_CORES; i++) {
2265 		if (s->inq[i])
2266 			dpaa_sec_detach_rxq(qi, s->inq[i]);
2267 		s->inq[i] = NULL;
2268 		s->qp[i] = NULL;
2269 	}
2270 	rte_free(s->cipher_key.data);
2271 	rte_free(s->auth_key.data);
2272 	memset(s, 0, sizeof(dpaa_sec_session));
2273 	rte_mempool_put(sess_mp, (void *)s);
2274 }
2275 
2276 /** Clear the memory of session so it doesn't leave key material behind */
2277 static void
2278 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
2279 		struct rte_cryptodev_sym_session *sess)
2280 {
2281 	PMD_INIT_FUNC_TRACE();
2282 	uint8_t index = dev->driver_id;
2283 	void *sess_priv = get_sym_session_private_data(sess, index);
2284 	dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2285 
2286 	if (sess_priv) {
2287 		free_session_memory(dev, s);
2288 		set_sym_session_private_data(sess, index, NULL);
2289 	}
2290 }
2291 
2292 static int
2293 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
2294 			   struct rte_security_session_conf *conf,
2295 			   void *sess)
2296 {
2297 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2298 	struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2299 	struct rte_crypto_auth_xform *auth_xform = NULL;
2300 	struct rte_crypto_cipher_xform *cipher_xform = NULL;
2301 	dpaa_sec_session *session = (dpaa_sec_session *)sess;
2302 	uint32_t i;
2303 
2304 	PMD_INIT_FUNC_TRACE();
2305 
2306 	memset(session, 0, sizeof(dpaa_sec_session));
2307 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2308 		cipher_xform = &conf->crypto_xform->cipher;
2309 		if (conf->crypto_xform->next)
2310 			auth_xform = &conf->crypto_xform->next->auth;
2311 	} else {
2312 		auth_xform = &conf->crypto_xform->auth;
2313 		if (conf->crypto_xform->next)
2314 			cipher_xform = &conf->crypto_xform->next->cipher;
2315 	}
2316 	session->proto_alg = conf->protocol;
2317 
2318 	if (cipher_xform && cipher_xform->algo != RTE_CRYPTO_CIPHER_NULL) {
2319 		session->cipher_key.data = rte_zmalloc(NULL,
2320 						       cipher_xform->key.length,
2321 						       RTE_CACHE_LINE_SIZE);
2322 		if (session->cipher_key.data == NULL &&
2323 				cipher_xform->key.length > 0) {
2324 			DPAA_SEC_ERR("No Memory for cipher key");
2325 			return -ENOMEM;
2326 		}
2327 		memcpy(session->cipher_key.data, cipher_xform->key.data,
2328 				cipher_xform->key.length);
2329 		session->cipher_key.length = cipher_xform->key.length;
2330 
2331 		switch (cipher_xform->algo) {
2332 		case RTE_CRYPTO_CIPHER_AES_CBC:
2333 		case RTE_CRYPTO_CIPHER_3DES_CBC:
2334 		case RTE_CRYPTO_CIPHER_AES_CTR:
2335 			break;
2336 		default:
2337 			DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2338 				cipher_xform->algo);
2339 			goto out;
2340 		}
2341 		session->cipher_alg = cipher_xform->algo;
2342 	} else {
2343 		session->cipher_key.data = NULL;
2344 		session->cipher_key.length = 0;
2345 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2346 	}
2347 
2348 	if (auth_xform && auth_xform->algo != RTE_CRYPTO_AUTH_NULL) {
2349 		session->auth_key.data = rte_zmalloc(NULL,
2350 						auth_xform->key.length,
2351 						RTE_CACHE_LINE_SIZE);
2352 		if (session->auth_key.data == NULL &&
2353 				auth_xform->key.length > 0) {
2354 			DPAA_SEC_ERR("No Memory for auth key");
2355 			rte_free(session->cipher_key.data);
2356 			return -ENOMEM;
2357 		}
2358 		memcpy(session->auth_key.data, auth_xform->key.data,
2359 				auth_xform->key.length);
2360 		session->auth_key.length = auth_xform->key.length;
2361 
2362 		switch (auth_xform->algo) {
2363 		case RTE_CRYPTO_AUTH_SHA1_HMAC:
2364 		case RTE_CRYPTO_AUTH_MD5_HMAC:
2365 		case RTE_CRYPTO_AUTH_SHA256_HMAC:
2366 		case RTE_CRYPTO_AUTH_SHA384_HMAC:
2367 		case RTE_CRYPTO_AUTH_SHA512_HMAC:
2368 		case RTE_CRYPTO_AUTH_AES_CMAC:
2369 			break;
2370 		default:
2371 			DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2372 				auth_xform->algo);
2373 			goto out;
2374 		}
2375 		session->auth_alg = auth_xform->algo;
2376 	} else {
2377 		session->auth_key.data = NULL;
2378 		session->auth_key.length = 0;
2379 		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2380 	}
2381 
2382 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2383 		if (ipsec_xform->tunnel.type ==
2384 				RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2385 			memset(&session->encap_pdb, 0,
2386 				sizeof(struct ipsec_encap_pdb) +
2387 				sizeof(session->ip4_hdr));
2388 			session->ip4_hdr.ip_v = IPVERSION;
2389 			session->ip4_hdr.ip_hl = 5;
2390 			session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2391 						sizeof(session->ip4_hdr));
2392 			session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2393 			session->ip4_hdr.ip_id = 0;
2394 			session->ip4_hdr.ip_off = 0;
2395 			session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2396 			session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2397 					RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2398 					IPPROTO_ESP : IPPROTO_AH;
2399 			session->ip4_hdr.ip_sum = 0;
2400 			session->ip4_hdr.ip_src =
2401 					ipsec_xform->tunnel.ipv4.src_ip;
2402 			session->ip4_hdr.ip_dst =
2403 					ipsec_xform->tunnel.ipv4.dst_ip;
2404 			session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2405 						(void *)&session->ip4_hdr,
2406 						sizeof(struct ip));
2407 			session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2408 		} else if (ipsec_xform->tunnel.type ==
2409 				RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2410 			memset(&session->encap_pdb, 0,
2411 				sizeof(struct ipsec_encap_pdb) +
2412 				sizeof(session->ip6_hdr));
2413 			session->ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2414 				DPAA_IPv6_DEFAULT_VTC_FLOW |
2415 				((ipsec_xform->tunnel.ipv6.dscp <<
2416 					RTE_IPV6_HDR_TC_SHIFT) &
2417 					RTE_IPV6_HDR_TC_MASK) |
2418 				((ipsec_xform->tunnel.ipv6.flabel <<
2419 					RTE_IPV6_HDR_FL_SHIFT) &
2420 					RTE_IPV6_HDR_FL_MASK));
2421 			/* Payload length will be updated by HW */
2422 			session->ip6_hdr.payload_len = 0;
2423 			session->ip6_hdr.hop_limits =
2424 					ipsec_xform->tunnel.ipv6.hlimit;
2425 			session->ip6_hdr.proto = (ipsec_xform->proto ==
2426 					RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2427 					IPPROTO_ESP : IPPROTO_AH;
2428 			memcpy(&session->ip6_hdr.src_addr,
2429 					&ipsec_xform->tunnel.ipv6.src_addr, 16);
2430 			memcpy(&session->ip6_hdr.dst_addr,
2431 					&ipsec_xform->tunnel.ipv6.dst_addr, 16);
2432 			session->encap_pdb.ip_hdr_len =
2433 						sizeof(struct rte_ipv6_hdr);
2434 		}
2435 		session->encap_pdb.options =
2436 			(IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2437 			PDBOPTS_ESP_OIHI_PDB_INL |
2438 			PDBOPTS_ESP_IVSRC |
2439 			PDBHMO_ESP_ENCAP_DTTL |
2440 			PDBHMO_ESP_SNR;
2441 		if (ipsec_xform->options.esn)
2442 			session->encap_pdb.options |= PDBOPTS_ESP_ESN;
2443 		session->encap_pdb.spi = ipsec_xform->spi;
2444 		session->dir = DIR_ENC;
2445 	} else if (ipsec_xform->direction ==
2446 			RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2447 		memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2448 		if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
2449 			session->decap_pdb.options = sizeof(struct ip) << 16;
2450 		else
2451 			session->decap_pdb.options =
2452 					sizeof(struct rte_ipv6_hdr) << 16;
2453 		if (ipsec_xform->options.esn)
2454 			session->decap_pdb.options |= PDBOPTS_ESP_ESN;
2455 		session->dir = DIR_DEC;
2456 	} else
2457 		goto out;
2458 	rte_spinlock_lock(&internals->lock);
2459 	for (i = 0; i < MAX_DPAA_CORES; i++) {
2460 		session->inq[i] = dpaa_sec_attach_rxq(internals);
2461 		if (session->inq[i] == NULL) {
2462 			DPAA_SEC_ERR("unable to attach sec queue");
2463 			rte_spinlock_unlock(&internals->lock);
2464 			goto out;
2465 		}
2466 	}
2467 	rte_spinlock_unlock(&internals->lock);
2468 
2469 	return 0;
2470 out:
2471 	rte_free(session->auth_key.data);
2472 	rte_free(session->cipher_key.data);
2473 	memset(session, 0, sizeof(dpaa_sec_session));
2474 	return -1;
2475 }
2476 
2477 static int
2478 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
2479 			  struct rte_security_session_conf *conf,
2480 			  void *sess)
2481 {
2482 	struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2483 	struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2484 	struct rte_crypto_auth_xform *auth_xform = NULL;
2485 	struct rte_crypto_cipher_xform *cipher_xform = NULL;
2486 	dpaa_sec_session *session = (dpaa_sec_session *)sess;
2487 	struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
2488 	uint32_t i;
2489 
2490 	PMD_INIT_FUNC_TRACE();
2491 
2492 	memset(session, 0, sizeof(dpaa_sec_session));
2493 
2494 	/* find xfrm types */
2495 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2496 		cipher_xform = &xform->cipher;
2497 		if (xform->next != NULL)
2498 			auth_xform = &xform->next->auth;
2499 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2500 		auth_xform = &xform->auth;
2501 		if (xform->next != NULL)
2502 			cipher_xform = &xform->next->cipher;
2503 	} else {
2504 		DPAA_SEC_ERR("Invalid crypto type");
2505 		return -EINVAL;
2506 	}
2507 
2508 	session->proto_alg = conf->protocol;
2509 	if (cipher_xform) {
2510 		session->cipher_key.data = rte_zmalloc(NULL,
2511 					       cipher_xform->key.length,
2512 					       RTE_CACHE_LINE_SIZE);
2513 		if (session->cipher_key.data == NULL &&
2514 				cipher_xform->key.length > 0) {
2515 			DPAA_SEC_ERR("No Memory for cipher key");
2516 			return -ENOMEM;
2517 		}
2518 		session->cipher_key.length = cipher_xform->key.length;
2519 		memcpy(session->cipher_key.data, cipher_xform->key.data,
2520 			cipher_xform->key.length);
2521 		session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2522 					DIR_ENC : DIR_DEC;
2523 		session->cipher_alg = cipher_xform->algo;
2524 	} else {
2525 		session->cipher_key.data = NULL;
2526 		session->cipher_key.length = 0;
2527 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2528 		session->dir = DIR_ENC;
2529 	}
2530 
2531 	if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
2532 		if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 &&
2533 		    pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) {
2534 			DPAA_SEC_ERR(
2535 				"PDCP Seq Num size should be 5/12 bits for cmode");
2536 			goto out;
2537 		}
2538 	}
2539 
2540 	if (auth_xform) {
2541 		session->auth_key.data = rte_zmalloc(NULL,
2542 						     auth_xform->key.length,
2543 						     RTE_CACHE_LINE_SIZE);
2544 		if (!session->auth_key.data &&
2545 		    auth_xform->key.length > 0) {
2546 			DPAA_SEC_ERR("No Memory for auth key");
2547 			rte_free(session->cipher_key.data);
2548 			return -ENOMEM;
2549 		}
2550 		session->auth_key.length = auth_xform->key.length;
2551 		memcpy(session->auth_key.data, auth_xform->key.data,
2552 		       auth_xform->key.length);
2553 		session->auth_alg = auth_xform->algo;
2554 	} else {
2555 		session->auth_key.data = NULL;
2556 		session->auth_key.length = 0;
2557 		session->auth_alg = 0;
2558 	}
2559 	session->pdcp.domain = pdcp_xform->domain;
2560 	session->pdcp.bearer = pdcp_xform->bearer;
2561 	session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
2562 	session->pdcp.sn_size = pdcp_xform->sn_size;
2563 	session->pdcp.hfn = pdcp_xform->hfn;
2564 	session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
2565 	session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
2566 	session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
2567 
2568 	rte_spinlock_lock(&dev_priv->lock);
2569 	for (i = 0; i < MAX_DPAA_CORES; i++) {
2570 		session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
2571 		if (session->inq[i] == NULL) {
2572 			DPAA_SEC_ERR("unable to attach sec queue");
2573 			rte_spinlock_unlock(&dev_priv->lock);
2574 			goto out;
2575 		}
2576 	}
2577 	rte_spinlock_unlock(&dev_priv->lock);
2578 	return 0;
2579 out:
2580 	rte_free(session->auth_key.data);
2581 	rte_free(session->cipher_key.data);
2582 	memset(session, 0, sizeof(dpaa_sec_session));
2583 	return -1;
2584 }
2585 
2586 static int
2587 dpaa_sec_security_session_create(void *dev,
2588 				 struct rte_security_session_conf *conf,
2589 				 struct rte_security_session *sess,
2590 				 struct rte_mempool *mempool)
2591 {
2592 	void *sess_private_data;
2593 	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2594 	int ret;
2595 
2596 	if (rte_mempool_get(mempool, &sess_private_data)) {
2597 		DPAA_SEC_ERR("Couldn't get object from session mempool");
2598 		return -ENOMEM;
2599 	}
2600 
2601 	switch (conf->protocol) {
2602 	case RTE_SECURITY_PROTOCOL_IPSEC:
2603 		ret = dpaa_sec_set_ipsec_session(cdev, conf,
2604 				sess_private_data);
2605 		break;
2606 	case RTE_SECURITY_PROTOCOL_PDCP:
2607 		ret = dpaa_sec_set_pdcp_session(cdev, conf,
2608 				sess_private_data);
2609 		break;
2610 	case RTE_SECURITY_PROTOCOL_MACSEC:
2611 		return -ENOTSUP;
2612 	default:
2613 		return -EINVAL;
2614 	}
2615 	if (ret != 0) {
2616 		DPAA_SEC_ERR("failed to configure session parameters");
2617 		/* Return session to mempool */
2618 		rte_mempool_put(mempool, sess_private_data);
2619 		return ret;
2620 	}
2621 
2622 	set_sec_session_private_data(sess, sess_private_data);
2623 
2624 	return ret;
2625 }
2626 
2627 /** Clear the memory of session so it doesn't leave key material behind */
2628 static int
2629 dpaa_sec_security_session_destroy(void *dev __rte_unused,
2630 		struct rte_security_session *sess)
2631 {
2632 	PMD_INIT_FUNC_TRACE();
2633 	void *sess_priv = get_sec_session_private_data(sess);
2634 	dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2635 
2636 	if (sess_priv) {
2637 		free_session_memory((struct rte_cryptodev *)dev, s);
2638 		set_sec_session_private_data(sess, NULL);
2639 	}
2640 	return 0;
2641 }
2642 
2643 static int
2644 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
2645 		       struct rte_cryptodev_config *config __rte_unused)
2646 {
2647 	PMD_INIT_FUNC_TRACE();
2648 
2649 	return 0;
2650 }
2651 
2652 static int
2653 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
2654 {
2655 	PMD_INIT_FUNC_TRACE();
2656 	return 0;
2657 }
2658 
2659 static void
2660 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
2661 {
2662 	PMD_INIT_FUNC_TRACE();
2663 }
2664 
2665 static int
2666 dpaa_sec_dev_close(struct rte_cryptodev *dev)
2667 {
2668 	PMD_INIT_FUNC_TRACE();
2669 
2670 	if (dev == NULL)
2671 		return -ENOMEM;
2672 
2673 	return 0;
2674 }
2675 
2676 static void
2677 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
2678 		       struct rte_cryptodev_info *info)
2679 {
2680 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2681 
2682 	PMD_INIT_FUNC_TRACE();
2683 	if (info != NULL) {
2684 		info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2685 		info->feature_flags = dev->feature_flags;
2686 		info->capabilities = dpaa_sec_capabilities;
2687 		info->sym.max_nb_sessions = internals->max_nb_sessions;
2688 		info->driver_id = cryptodev_driver_id;
2689 	}
2690 }
2691 
2692 static enum qman_cb_dqrr_result
2693 dpaa_sec_process_parallel_event(void *event,
2694 			struct qman_portal *qm __always_unused,
2695 			struct qman_fq *outq,
2696 			const struct qm_dqrr_entry *dqrr,
2697 			void **bufs)
2698 {
2699 	const struct qm_fd *fd;
2700 	struct dpaa_sec_job *job;
2701 	struct dpaa_sec_op_ctx *ctx;
2702 	struct rte_event *ev = (struct rte_event *)event;
2703 
2704 	fd = &dqrr->fd;
2705 
2706 	/* sg is embedded in an op ctx,
2707 	 * sg[0] is for output
2708 	 * sg[1] for input
2709 	 */
2710 	job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
2711 
2712 	ctx = container_of(job, struct dpaa_sec_op_ctx, job);
2713 	ctx->fd_status = fd->status;
2714 	if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
2715 		struct qm_sg_entry *sg_out;
2716 		uint32_t len;
2717 
2718 		sg_out = &job->sg[0];
2719 		hw_sg_to_cpu(sg_out);
2720 		len = sg_out->length;
2721 		ctx->op->sym->m_src->pkt_len = len;
2722 		ctx->op->sym->m_src->data_len = len;
2723 	}
2724 	if (!ctx->fd_status) {
2725 		ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
2726 	} else {
2727 		DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
2728 		ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
2729 	}
2730 	ev->event_ptr = (void *)ctx->op;
2731 
2732 	ev->flow_id = outq->ev.flow_id;
2733 	ev->sub_event_type = outq->ev.sub_event_type;
2734 	ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
2735 	ev->op = RTE_EVENT_OP_NEW;
2736 	ev->sched_type = outq->ev.sched_type;
2737 	ev->queue_id = outq->ev.queue_id;
2738 	ev->priority = outq->ev.priority;
2739 	*bufs = (void *)ctx->op;
2740 
2741 	rte_mempool_put(ctx->ctx_pool, (void *)ctx);
2742 
2743 	return qman_cb_dqrr_consume;
2744 }
2745 
2746 static enum qman_cb_dqrr_result
2747 dpaa_sec_process_atomic_event(void *event,
2748 			struct qman_portal *qm __rte_unused,
2749 			struct qman_fq *outq,
2750 			const struct qm_dqrr_entry *dqrr,
2751 			void **bufs)
2752 {
2753 	u8 index;
2754 	const struct qm_fd *fd;
2755 	struct dpaa_sec_job *job;
2756 	struct dpaa_sec_op_ctx *ctx;
2757 	struct rte_event *ev = (struct rte_event *)event;
2758 
2759 	fd = &dqrr->fd;
2760 
2761 	/* sg is embedded in an op ctx,
2762 	 * sg[0] is for output
2763 	 * sg[1] for input
2764 	 */
2765 	job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
2766 
2767 	ctx = container_of(job, struct dpaa_sec_op_ctx, job);
2768 	ctx->fd_status = fd->status;
2769 	if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
2770 		struct qm_sg_entry *sg_out;
2771 		uint32_t len;
2772 
2773 		sg_out = &job->sg[0];
2774 		hw_sg_to_cpu(sg_out);
2775 		len = sg_out->length;
2776 		ctx->op->sym->m_src->pkt_len = len;
2777 		ctx->op->sym->m_src->data_len = len;
2778 	}
2779 	if (!ctx->fd_status) {
2780 		ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
2781 	} else {
2782 		DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
2783 		ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
2784 	}
2785 	ev->event_ptr = (void *)ctx->op;
2786 	ev->flow_id = outq->ev.flow_id;
2787 	ev->sub_event_type = outq->ev.sub_event_type;
2788 	ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
2789 	ev->op = RTE_EVENT_OP_NEW;
2790 	ev->sched_type = outq->ev.sched_type;
2791 	ev->queue_id = outq->ev.queue_id;
2792 	ev->priority = outq->ev.priority;
2793 
2794 	/* Save active dqrr entries */
2795 	index = ((uintptr_t)dqrr >> 6) & (16/*QM_DQRR_SIZE*/ - 1);
2796 	DPAA_PER_LCORE_DQRR_SIZE++;
2797 	DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
2798 	DPAA_PER_LCORE_DQRR_MBUF(index) = ctx->op->sym->m_src;
2799 	ev->impl_opaque = index + 1;
2800 	ctx->op->sym->m_src->seqn = (uint32_t)index + 1;
2801 	*bufs = (void *)ctx->op;
2802 
2803 	rte_mempool_put(ctx->ctx_pool, (void *)ctx);
2804 
2805 	return qman_cb_dqrr_defer;
2806 }
2807 
2808 int
2809 dpaa_sec_eventq_attach(const struct rte_cryptodev *dev,
2810 		int qp_id,
2811 		uint16_t ch_id,
2812 		const struct rte_event *event)
2813 {
2814 	struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
2815 	struct qm_mcc_initfq opts = {0};
2816 
2817 	int ret;
2818 
2819 	opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
2820 		       QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
2821 	opts.fqd.dest.channel = ch_id;
2822 
2823 	switch (event->sched_type) {
2824 	case RTE_SCHED_TYPE_ATOMIC:
2825 		opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
2826 		/* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
2827 		 * configuration with HOLD_ACTIVE setting
2828 		 */
2829 		opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
2830 		qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_atomic_event;
2831 		break;
2832 	case RTE_SCHED_TYPE_ORDERED:
2833 		DPAA_SEC_ERR("Ordered queue schedule type is not supported\n");
2834 		return -1;
2835 	default:
2836 		opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
2837 		qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_parallel_event;
2838 		break;
2839 	}
2840 
2841 	ret = qman_init_fq(&qp->outq, QMAN_INITFQ_FLAG_SCHED, &opts);
2842 	if (unlikely(ret)) {
2843 		DPAA_SEC_ERR("unable to init caam source fq!");
2844 		return ret;
2845 	}
2846 
2847 	memcpy(&qp->outq.ev, event, sizeof(struct rte_event));
2848 
2849 	return 0;
2850 }
2851 
2852 int
2853 dpaa_sec_eventq_detach(const struct rte_cryptodev *dev,
2854 			int qp_id)
2855 {
2856 	struct qm_mcc_initfq opts = {0};
2857 	int ret;
2858 	struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
2859 
2860 	opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
2861 		       QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
2862 	qp->outq.cb.dqrr = dqrr_out_fq_cb_rx;
2863 	qp->outq.cb.ern  = ern_sec_fq_handler;
2864 	qman_retire_fq(&qp->outq, NULL);
2865 	qman_oos_fq(&qp->outq);
2866 	ret = qman_init_fq(&qp->outq, 0, &opts);
2867 	if (ret)
2868 		RTE_LOG(ERR, PMD, "Error in qman_init_fq: ret: %d\n", ret);
2869 	qp->outq.cb.dqrr = NULL;
2870 
2871 	return ret;
2872 }
2873 
2874 static struct rte_cryptodev_ops crypto_ops = {
2875 	.dev_configure	      = dpaa_sec_dev_configure,
2876 	.dev_start	      = dpaa_sec_dev_start,
2877 	.dev_stop	      = dpaa_sec_dev_stop,
2878 	.dev_close	      = dpaa_sec_dev_close,
2879 	.dev_infos_get        = dpaa_sec_dev_infos_get,
2880 	.queue_pair_setup     = dpaa_sec_queue_pair_setup,
2881 	.queue_pair_release   = dpaa_sec_queue_pair_release,
2882 	.queue_pair_count     = dpaa_sec_queue_pair_count,
2883 	.sym_session_get_size     = dpaa_sec_sym_session_get_size,
2884 	.sym_session_configure    = dpaa_sec_sym_session_configure,
2885 	.sym_session_clear        = dpaa_sec_sym_session_clear
2886 };
2887 
2888 static const struct rte_security_capability *
2889 dpaa_sec_capabilities_get(void *device __rte_unused)
2890 {
2891 	return dpaa_sec_security_cap;
2892 }
2893 
2894 static const struct rte_security_ops dpaa_sec_security_ops = {
2895 	.session_create = dpaa_sec_security_session_create,
2896 	.session_update = NULL,
2897 	.session_stats_get = NULL,
2898 	.session_destroy = dpaa_sec_security_session_destroy,
2899 	.set_pkt_metadata = NULL,
2900 	.capabilities_get = dpaa_sec_capabilities_get
2901 };
2902 
2903 static int
2904 dpaa_sec_uninit(struct rte_cryptodev *dev)
2905 {
2906 	struct dpaa_sec_dev_private *internals;
2907 
2908 	if (dev == NULL)
2909 		return -ENODEV;
2910 
2911 	internals = dev->data->dev_private;
2912 	rte_free(dev->security_ctx);
2913 
2914 	rte_free(internals);
2915 
2916 	DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
2917 		      dev->data->name, rte_socket_id());
2918 
2919 	return 0;
2920 }
2921 
2922 static int
2923 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
2924 {
2925 	struct dpaa_sec_dev_private *internals;
2926 	struct rte_security_ctx *security_instance;
2927 	struct dpaa_sec_qp *qp;
2928 	uint32_t i, flags;
2929 	int ret;
2930 
2931 	PMD_INIT_FUNC_TRACE();
2932 
2933 	cryptodev->driver_id = cryptodev_driver_id;
2934 	cryptodev->dev_ops = &crypto_ops;
2935 
2936 	cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
2937 	cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
2938 	cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2939 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
2940 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2941 			RTE_CRYPTODEV_FF_SECURITY |
2942 			RTE_CRYPTODEV_FF_IN_PLACE_SGL |
2943 			RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
2944 			RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
2945 			RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
2946 			RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
2947 
2948 	internals = cryptodev->data->dev_private;
2949 	internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
2950 	internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
2951 
2952 	/*
2953 	 * For secondary processes, we don't initialise any further as primary
2954 	 * has already done this work. Only check we don't need a different
2955 	 * RX function
2956 	 */
2957 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2958 		DPAA_SEC_WARN("Device already init by primary process");
2959 		return 0;
2960 	}
2961 
2962 	/* Initialize security_ctx only for primary process*/
2963 	security_instance = rte_malloc("rte_security_instances_ops",
2964 				sizeof(struct rte_security_ctx), 0);
2965 	if (security_instance == NULL)
2966 		return -ENOMEM;
2967 	security_instance->device = (void *)cryptodev;
2968 	security_instance->ops = &dpaa_sec_security_ops;
2969 	security_instance->sess_cnt = 0;
2970 	cryptodev->security_ctx = security_instance;
2971 
2972 	rte_spinlock_init(&internals->lock);
2973 	for (i = 0; i < internals->max_nb_queue_pairs; i++) {
2974 		/* init qman fq for queue pair */
2975 		qp = &internals->qps[i];
2976 		ret = dpaa_sec_init_tx(&qp->outq);
2977 		if (ret) {
2978 			DPAA_SEC_ERR("config tx of queue pair  %d", i);
2979 			goto init_error;
2980 		}
2981 	}
2982 
2983 	flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
2984 		QMAN_FQ_FLAG_TO_DCPORTAL;
2985 	for (i = 0; i < MAX_DPAA_CORES * internals->max_nb_sessions; i++) {
2986 		/* create rx qman fq for sessions*/
2987 		ret = qman_create_fq(0, flags, &internals->inq[i]);
2988 		if (unlikely(ret != 0)) {
2989 			DPAA_SEC_ERR("sec qman_create_fq failed");
2990 			goto init_error;
2991 		}
2992 	}
2993 
2994 	RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
2995 	return 0;
2996 
2997 init_error:
2998 	DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
2999 
3000 	dpaa_sec_uninit(cryptodev);
3001 	return -EFAULT;
3002 }
3003 
3004 static int
3005 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
3006 				struct rte_dpaa_device *dpaa_dev)
3007 {
3008 	struct rte_cryptodev *cryptodev;
3009 	char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3010 
3011 	int retval;
3012 
3013 	snprintf(cryptodev_name, sizeof(cryptodev_name), "%s", dpaa_dev->name);
3014 
3015 	cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3016 	if (cryptodev == NULL)
3017 		return -ENOMEM;
3018 
3019 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3020 		cryptodev->data->dev_private = rte_zmalloc_socket(
3021 					"cryptodev private structure",
3022 					sizeof(struct dpaa_sec_dev_private),
3023 					RTE_CACHE_LINE_SIZE,
3024 					rte_socket_id());
3025 
3026 		if (cryptodev->data->dev_private == NULL)
3027 			rte_panic("Cannot allocate memzone for private "
3028 					"device data");
3029 	}
3030 
3031 	dpaa_dev->crypto_dev = cryptodev;
3032 	cryptodev->device = &dpaa_dev->device;
3033 
3034 	/* init user callbacks */
3035 	TAILQ_INIT(&(cryptodev->link_intr_cbs));
3036 
3037 	/* if sec device version is not configured */
3038 	if (!rta_get_sec_era()) {
3039 		const struct device_node *caam_node;
3040 
3041 		for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
3042 			const uint32_t *prop = of_get_property(caam_node,
3043 					"fsl,sec-era",
3044 					NULL);
3045 			if (prop) {
3046 				rta_set_sec_era(
3047 					INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
3048 				break;
3049 			}
3050 		}
3051 	}
3052 
3053 	/* Invoke PMD device initialization function */
3054 	retval = dpaa_sec_dev_init(cryptodev);
3055 	if (retval == 0)
3056 		return 0;
3057 
3058 	/* In case of error, cleanup is done */
3059 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3060 		rte_free(cryptodev->data->dev_private);
3061 
3062 	rte_cryptodev_pmd_release_device(cryptodev);
3063 
3064 	return -ENXIO;
3065 }
3066 
3067 static int
3068 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
3069 {
3070 	struct rte_cryptodev *cryptodev;
3071 	int ret;
3072 
3073 	cryptodev = dpaa_dev->crypto_dev;
3074 	if (cryptodev == NULL)
3075 		return -ENODEV;
3076 
3077 	ret = dpaa_sec_uninit(cryptodev);
3078 	if (ret)
3079 		return ret;
3080 
3081 	return rte_cryptodev_pmd_destroy(cryptodev);
3082 }
3083 
3084 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
3085 	.drv_type = FSL_DPAA_CRYPTO,
3086 	.driver = {
3087 		.name = "DPAA SEC PMD"
3088 	},
3089 	.probe = cryptodev_dpaa_sec_probe,
3090 	.remove = cryptodev_dpaa_sec_remove,
3091 };
3092 
3093 static struct cryptodev_driver dpaa_sec_crypto_drv;
3094 
3095 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
3096 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
3097 		cryptodev_driver_id);
3098 
3099 RTE_INIT(dpaa_sec_init_log)
3100 {
3101 	dpaa_logtype_sec = rte_log_register("pmd.crypto.dpaa");
3102 	if (dpaa_logtype_sec >= 0)
3103 		rte_log_set_level(dpaa_logtype_sec, RTE_LOG_NOTICE);
3104 }
3105