xref: /dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c (revision 53e6597643e47652af29baa24df7566fffbf8b0c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2017-2022 NXP
5  *
6  */
7 
8 #include <fcntl.h>
9 #include <unistd.h>
10 #include <sched.h>
11 #include <net/if.h>
12 
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #ifdef RTE_LIB_SECURITY
19 #include <rte_security_driver.h>
20 #endif
21 #include <rte_cycles.h>
22 #include <dev_driver.h>
23 #include <rte_io.h>
24 #include <rte_ip.h>
25 #include <rte_kvargs.h>
26 #include <rte_malloc.h>
27 #include <rte_mbuf.h>
28 #include <rte_memcpy.h>
29 #include <rte_string_fns.h>
30 #include <rte_spinlock.h>
31 #include <rte_hexdump.h>
32 
33 #include <fsl_usd.h>
34 #include <fsl_qman.h>
35 #include <dpaa_of.h>
36 
37 /* RTA header files */
38 #include <desc/common.h>
39 #include <desc/algo.h>
40 #include <desc/ipsec.h>
41 #include <desc/pdcp.h>
42 #include <desc/sdap.h>
43 
44 #include <bus_dpaa_driver.h>
45 #include <dpaa_sec.h>
46 #include <dpaa_sec_event.h>
47 #include <dpaa_sec_log.h>
48 #include <dpaax_iova_table.h>
49 
50 #define DRIVER_DUMP_MODE "drv_dump_mode"
51 
52 /* DPAA_SEC_DP_DUMP levels */
53 enum dpaa_sec_dump_levels {
54 	DPAA_SEC_DP_NO_DUMP,
55 	DPAA_SEC_DP_ERR_DUMP,
56 	DPAA_SEC_DP_FULL_DUMP
57 };
58 
59 uint8_t dpaa_sec_dp_dump = DPAA_SEC_DP_ERR_DUMP;
60 
61 uint8_t dpaa_cryptodev_driver_id;
62 
63 static inline void
64 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
65 {
66 	if (!ctx->fd_status) {
67 		ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
68 	} else {
69 		DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
70 		ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
71 	}
72 }
73 
74 static inline struct dpaa_sec_op_ctx *
75 dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count)
76 {
77 	struct dpaa_sec_op_ctx *ctx;
78 	int i, retval;
79 
80 	retval = rte_mempool_get(
81 			ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
82 			(void **)(&ctx));
83 	if (!ctx || retval) {
84 		DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
85 		return NULL;
86 	}
87 	/*
88 	 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
89 	 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
90 	 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
91 	 * each packet, memset is costlier than dcbz_64().
92 	 */
93 	for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
94 		dcbz_64(&ctx->job.sg[i]);
95 
96 	ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
97 	ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
98 
99 	return ctx;
100 }
101 
102 static void
103 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
104 		   struct qman_fq *fq,
105 		   const struct qm_mr_entry *msg)
106 {
107 	DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
108 			fq->fqid, msg->ern.rc, msg->ern.seqnum);
109 }
110 
111 /* initialize the queue with dest chan as caam chan so that
112  * all the packets in this queue could be dispatched into caam
113  */
114 static int
115 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
116 		 uint32_t fqid_out)
117 {
118 	struct qm_mcc_initfq fq_opts;
119 	uint32_t flags;
120 	int ret = -1;
121 
122 	/* Clear FQ options */
123 	memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
124 
125 	flags = QMAN_INITFQ_FLAG_SCHED;
126 	fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
127 			  QM_INITFQ_WE_CONTEXTB;
128 
129 	qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
130 	fq_opts.fqd.context_b = fqid_out;
131 	fq_opts.fqd.dest.channel = dpaa_get_qm_channel_caam();
132 	fq_opts.fqd.dest.wq = 0;
133 
134 	fq_in->cb.ern  = ern_sec_fq_handler;
135 
136 	DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
137 
138 	ret = qman_init_fq(fq_in, flags, &fq_opts);
139 	if (unlikely(ret != 0))
140 		DPAA_SEC_ERR("qman_init_fq failed %d", ret);
141 
142 	return ret;
143 }
144 
145 /* something is put into in_fq and caam put the crypto result into out_fq */
146 static enum qman_cb_dqrr_result
147 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
148 		  struct qman_fq *fq __always_unused,
149 		  const struct qm_dqrr_entry *dqrr)
150 {
151 	const struct qm_fd *fd;
152 	struct dpaa_sec_job *job;
153 	struct dpaa_sec_op_ctx *ctx;
154 
155 	if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
156 		return qman_cb_dqrr_consume;
157 
158 	fd = &dqrr->fd;
159 	/* sg is embedded in an op ctx,
160 	 * sg[0] is for output
161 	 * sg[1] for input
162 	 */
163 	job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
164 
165 	ctx = container_of(job, struct dpaa_sec_op_ctx, job);
166 	ctx->fd_status = fd->status;
167 	if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
168 		struct qm_sg_entry *sg_out;
169 		uint32_t len;
170 		struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ?
171 				ctx->op->sym->m_src : ctx->op->sym->m_dst;
172 
173 		sg_out = &job->sg[0];
174 		hw_sg_to_cpu(sg_out);
175 		len = sg_out->length;
176 		mbuf->pkt_len = len;
177 		while (mbuf->next != NULL) {
178 			len -= mbuf->data_len;
179 			mbuf = mbuf->next;
180 		}
181 		mbuf->data_len = len;
182 	}
183 	dpaa_sec_op_ending(ctx);
184 
185 	return qman_cb_dqrr_consume;
186 }
187 
188 /* caam result is put into this queue */
189 static int
190 dpaa_sec_init_tx(struct qman_fq *fq)
191 {
192 	int ret;
193 	struct qm_mcc_initfq opts;
194 	uint32_t flags;
195 
196 	flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
197 		QMAN_FQ_FLAG_DYNAMIC_FQID;
198 
199 	ret = qman_create_fq(0, flags, fq);
200 	if (unlikely(ret)) {
201 		DPAA_SEC_ERR("qman_create_fq failed");
202 		return ret;
203 	}
204 
205 	memset(&opts, 0, sizeof(opts));
206 	opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
207 		       QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
208 
209 	/* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
210 
211 	fq->cb.dqrr = dqrr_out_fq_cb_rx;
212 	fq->cb.ern  = ern_sec_fq_handler;
213 
214 	ret = qman_init_fq(fq, 0, &opts);
215 	if (unlikely(ret)) {
216 		DPAA_SEC_ERR("unable to init caam source fq!");
217 		return ret;
218 	}
219 
220 	return ret;
221 }
222 
223 static inline int is_aead(dpaa_sec_session *ses)
224 {
225 	return ((ses->cipher_alg == 0) &&
226 		(ses->auth_alg == 0) &&
227 		(ses->aead_alg != 0));
228 }
229 
230 static inline int is_encode(dpaa_sec_session *ses)
231 {
232 	return ses->dir == DIR_ENC;
233 }
234 
235 static inline int is_decode(dpaa_sec_session *ses)
236 {
237 	return ses->dir == DIR_DEC;
238 }
239 
240 #ifdef RTE_LIB_SECURITY
241 static int
242 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
243 {
244 	struct alginfo authdata = {0}, cipherdata = {0};
245 	struct sec_cdb *cdb = &ses->cdb;
246 	struct alginfo *p_authdata = NULL;
247 	int32_t shared_desc_len = 0;
248 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
249 	int swap = false;
250 #else
251 	int swap = true;
252 #endif
253 
254 	cipherdata.key = (size_t)ses->cipher_key.data;
255 	cipherdata.keylen = ses->cipher_key.length;
256 	cipherdata.key_enc_flags = 0;
257 	cipherdata.key_type = RTA_DATA_IMM;
258 	cipherdata.algtype = ses->cipher_key.alg;
259 	cipherdata.algmode = ses->cipher_key.algmode;
260 
261 	if (ses->auth_alg) {
262 		authdata.key = (size_t)ses->auth_key.data;
263 		authdata.keylen = ses->auth_key.length;
264 		authdata.key_enc_flags = 0;
265 		authdata.key_type = RTA_DATA_IMM;
266 		authdata.algtype = ses->auth_key.alg;
267 		authdata.algmode = ses->auth_key.algmode;
268 
269 		p_authdata = &authdata;
270 	}
271 
272 	if (ses->pdcp.sdap_enabled) {
273 		int nb_keys_to_inline =
274 				rta_inline_pdcp_sdap_query(authdata.algtype,
275 					cipherdata.algtype,
276 					ses->pdcp.sn_size,
277 					ses->pdcp.hfn_ovd);
278 		if (nb_keys_to_inline >= 1) {
279 			cipherdata.key = (size_t)rte_dpaa_mem_vtop((void *)
280 						(size_t)cipherdata.key);
281 			cipherdata.key_type = RTA_DATA_PTR;
282 		}
283 		if (nb_keys_to_inline >= 2) {
284 			authdata.key = (size_t)rte_dpaa_mem_vtop((void *)
285 						(size_t)authdata.key);
286 			authdata.key_type = RTA_DATA_PTR;
287 		}
288 	} else {
289 		if (rta_inline_pdcp_query(authdata.algtype,
290 					cipherdata.algtype,
291 					ses->pdcp.sn_size,
292 					ses->pdcp.hfn_ovd)) {
293 			cipherdata.key = (size_t)rte_dpaa_mem_vtop((void *)
294 						(size_t)cipherdata.key);
295 			cipherdata.key_type = RTA_DATA_PTR;
296 		}
297 	}
298 
299 	if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
300 		if (ses->dir == DIR_ENC)
301 			shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
302 					cdb->sh_desc, 1, swap,
303 					ses->pdcp.hfn,
304 					ses->pdcp.sn_size,
305 					ses->pdcp.bearer,
306 					ses->pdcp.pkt_dir,
307 					ses->pdcp.hfn_threshold,
308 					&cipherdata, &authdata);
309 		else if (ses->dir == DIR_DEC)
310 			shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
311 					cdb->sh_desc, 1, swap,
312 					ses->pdcp.hfn,
313 					ses->pdcp.sn_size,
314 					ses->pdcp.bearer,
315 					ses->pdcp.pkt_dir,
316 					ses->pdcp.hfn_threshold,
317 					&cipherdata, &authdata);
318 	} else if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_SHORT_MAC) {
319 		shared_desc_len = cnstr_shdsc_pdcp_short_mac(cdb->sh_desc,
320 						     1, swap, &authdata);
321 	} else {
322 		if (ses->dir == DIR_ENC) {
323 			if (ses->pdcp.sdap_enabled)
324 				shared_desc_len =
325 					cnstr_shdsc_pdcp_sdap_u_plane_encap(
326 						cdb->sh_desc, 1, swap,
327 						ses->pdcp.sn_size,
328 						ses->pdcp.hfn,
329 						ses->pdcp.bearer,
330 						ses->pdcp.pkt_dir,
331 						ses->pdcp.hfn_threshold,
332 						&cipherdata, p_authdata);
333 			else
334 				shared_desc_len =
335 					cnstr_shdsc_pdcp_u_plane_encap(
336 						cdb->sh_desc, 1, swap,
337 						ses->pdcp.sn_size,
338 						ses->pdcp.hfn,
339 						ses->pdcp.bearer,
340 						ses->pdcp.pkt_dir,
341 						ses->pdcp.hfn_threshold,
342 						&cipherdata, p_authdata);
343 		} else if (ses->dir == DIR_DEC) {
344 			if (ses->pdcp.sdap_enabled)
345 				shared_desc_len =
346 					cnstr_shdsc_pdcp_sdap_u_plane_decap(
347 						cdb->sh_desc, 1, swap,
348 						ses->pdcp.sn_size,
349 						ses->pdcp.hfn,
350 						ses->pdcp.bearer,
351 						ses->pdcp.pkt_dir,
352 						ses->pdcp.hfn_threshold,
353 						&cipherdata, p_authdata);
354 			else
355 				shared_desc_len =
356 					cnstr_shdsc_pdcp_u_plane_decap(
357 						cdb->sh_desc, 1, swap,
358 						ses->pdcp.sn_size,
359 						ses->pdcp.hfn,
360 						ses->pdcp.bearer,
361 						ses->pdcp.pkt_dir,
362 						ses->pdcp.hfn_threshold,
363 						&cipherdata, p_authdata);
364 		}
365 	}
366 	return shared_desc_len;
367 }
368 
369 /* prepare ipsec proto command block of the session */
370 static int
371 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
372 {
373 	struct alginfo cipherdata = {0}, authdata = {0};
374 	struct sec_cdb *cdb = &ses->cdb;
375 	int32_t shared_desc_len = 0;
376 	int err;
377 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
378 	int swap = false;
379 #else
380 	int swap = true;
381 #endif
382 
383 	cipherdata.key = (size_t)ses->cipher_key.data;
384 	cipherdata.keylen = ses->cipher_key.length;
385 	cipherdata.key_enc_flags = 0;
386 	cipherdata.key_type = RTA_DATA_IMM;
387 	cipherdata.algtype = ses->cipher_key.alg;
388 	cipherdata.algmode = ses->cipher_key.algmode;
389 
390 	if (ses->auth_key.length) {
391 		authdata.key = (size_t)ses->auth_key.data;
392 		authdata.keylen = ses->auth_key.length;
393 		authdata.key_enc_flags = 0;
394 		authdata.key_type = RTA_DATA_IMM;
395 		authdata.algtype = ses->auth_key.alg;
396 		authdata.algmode = ses->auth_key.algmode;
397 	}
398 
399 	cdb->sh_desc[0] = cipherdata.keylen;
400 	cdb->sh_desc[1] = authdata.keylen;
401 	err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
402 			       DESC_JOB_IO_LEN,
403 			       (unsigned int *)cdb->sh_desc,
404 			       &cdb->sh_desc[2], 2);
405 
406 	if (err < 0) {
407 		DPAA_SEC_ERR("Crypto: Incorrect key lengths");
408 		return err;
409 	}
410 	if (cdb->sh_desc[2] & 1)
411 		cipherdata.key_type = RTA_DATA_IMM;
412 	else {
413 		cipherdata.key = (size_t)rte_dpaa_mem_vtop(
414 					(void *)(size_t)cipherdata.key);
415 		cipherdata.key_type = RTA_DATA_PTR;
416 	}
417 	if (cdb->sh_desc[2] & (1<<1))
418 		authdata.key_type = RTA_DATA_IMM;
419 	else {
420 		authdata.key = (size_t)rte_dpaa_mem_vtop(
421 					(void *)(size_t)authdata.key);
422 		authdata.key_type = RTA_DATA_PTR;
423 	}
424 
425 	cdb->sh_desc[0] = 0;
426 	cdb->sh_desc[1] = 0;
427 	cdb->sh_desc[2] = 0;
428 	if (ses->dir == DIR_ENC) {
429 		shared_desc_len = cnstr_shdsc_ipsec_new_encap(
430 				cdb->sh_desc,
431 				true, swap, SHR_SERIAL,
432 				&ses->encap_pdb,
433 				(uint8_t *)&ses->ip4_hdr,
434 				&cipherdata, &authdata);
435 	} else if (ses->dir == DIR_DEC) {
436 		shared_desc_len = cnstr_shdsc_ipsec_new_decap(
437 				cdb->sh_desc,
438 				true, swap, SHR_SERIAL,
439 				&ses->decap_pdb,
440 				&cipherdata, &authdata);
441 	}
442 	return shared_desc_len;
443 }
444 #endif
445 /* prepare command block of the session */
446 static int
447 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
448 {
449 	struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
450 	int32_t shared_desc_len = 0;
451 	struct sec_cdb *cdb = &ses->cdb;
452 	int err;
453 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
454 	int swap = false;
455 #else
456 	int swap = true;
457 #endif
458 
459 	memset(cdb, 0, sizeof(struct sec_cdb));
460 
461 	switch (ses->ctxt) {
462 #ifdef RTE_LIB_SECURITY
463 	case DPAA_SEC_IPSEC:
464 		shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
465 		break;
466 	case DPAA_SEC_PDCP:
467 		shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
468 		break;
469 #endif
470 	case DPAA_SEC_CIPHER:
471 		alginfo_c.key = (size_t)ses->cipher_key.data;
472 		alginfo_c.keylen = ses->cipher_key.length;
473 		alginfo_c.key_enc_flags = 0;
474 		alginfo_c.key_type = RTA_DATA_IMM;
475 		alginfo_c.algtype = ses->cipher_key.alg;
476 		alginfo_c.algmode = ses->cipher_key.algmode;
477 
478 		switch (ses->cipher_alg) {
479 		case RTE_CRYPTO_CIPHER_AES_CBC:
480 		case RTE_CRYPTO_CIPHER_3DES_CBC:
481 		case RTE_CRYPTO_CIPHER_DES_CBC:
482 		case RTE_CRYPTO_CIPHER_AES_CTR:
483 		case RTE_CRYPTO_CIPHER_3DES_CTR:
484 			shared_desc_len = cnstr_shdsc_blkcipher(
485 					cdb->sh_desc, true,
486 					swap, SHR_NEVER, &alginfo_c,
487 					ses->iv.length,
488 					ses->dir);
489 			break;
490 		case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
491 			shared_desc_len = cnstr_shdsc_snow_f8(
492 					cdb->sh_desc, true, swap,
493 					&alginfo_c,
494 					ses->dir);
495 			break;
496 		case RTE_CRYPTO_CIPHER_ZUC_EEA3:
497 			shared_desc_len = cnstr_shdsc_zuce(
498 					cdb->sh_desc, true, swap,
499 					&alginfo_c,
500 					ses->dir);
501 			break;
502 		default:
503 			DPAA_SEC_ERR("unsupported cipher alg %d",
504 				     ses->cipher_alg);
505 			return -ENOTSUP;
506 		}
507 		break;
508 	case DPAA_SEC_AUTH:
509 		alginfo_a.key = (size_t)ses->auth_key.data;
510 		alginfo_a.keylen = ses->auth_key.length;
511 		alginfo_a.key_enc_flags = 0;
512 		alginfo_a.key_type = RTA_DATA_IMM;
513 		alginfo_a.algtype = ses->auth_key.alg;
514 		alginfo_a.algmode = ses->auth_key.algmode;
515 		switch (ses->auth_alg) {
516 		case RTE_CRYPTO_AUTH_MD5:
517 		case RTE_CRYPTO_AUTH_SHA1:
518 		case RTE_CRYPTO_AUTH_SHA224:
519 		case RTE_CRYPTO_AUTH_SHA256:
520 		case RTE_CRYPTO_AUTH_SHA384:
521 		case RTE_CRYPTO_AUTH_SHA512:
522 			shared_desc_len = cnstr_shdsc_hash(
523 						cdb->sh_desc, true,
524 						swap, SHR_NEVER, &alginfo_a,
525 						!ses->dir,
526 						ses->digest_length);
527 			break;
528 		case RTE_CRYPTO_AUTH_MD5_HMAC:
529 		case RTE_CRYPTO_AUTH_SHA1_HMAC:
530 		case RTE_CRYPTO_AUTH_SHA224_HMAC:
531 		case RTE_CRYPTO_AUTH_SHA256_HMAC:
532 		case RTE_CRYPTO_AUTH_SHA384_HMAC:
533 		case RTE_CRYPTO_AUTH_SHA512_HMAC:
534 			shared_desc_len = cnstr_shdsc_hmac(
535 						cdb->sh_desc, true,
536 						swap, SHR_NEVER, &alginfo_a,
537 						!ses->dir,
538 						ses->digest_length);
539 			break;
540 		case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
541 			shared_desc_len = cnstr_shdsc_snow_f9(
542 						cdb->sh_desc, true, swap,
543 						&alginfo_a,
544 						!ses->dir,
545 						ses->digest_length);
546 			break;
547 		case RTE_CRYPTO_AUTH_ZUC_EIA3:
548 			shared_desc_len = cnstr_shdsc_zuca(
549 						cdb->sh_desc, true, swap,
550 						&alginfo_a,
551 						!ses->dir,
552 						ses->digest_length);
553 			break;
554 		case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
555 		case RTE_CRYPTO_AUTH_AES_CMAC:
556 			shared_desc_len = cnstr_shdsc_aes_mac(
557 						cdb->sh_desc,
558 						true, swap, SHR_NEVER,
559 						&alginfo_a,
560 						!ses->dir,
561 						ses->digest_length);
562 			break;
563 		default:
564 			DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
565 		}
566 		break;
567 	case DPAA_SEC_AEAD:
568 		if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
569 			DPAA_SEC_ERR("not supported aead alg");
570 			return -ENOTSUP;
571 		}
572 		alginfo.key = (size_t)ses->aead_key.data;
573 		alginfo.keylen = ses->aead_key.length;
574 		alginfo.key_enc_flags = 0;
575 		alginfo.key_type = RTA_DATA_IMM;
576 		alginfo.algtype = ses->aead_key.alg;
577 		alginfo.algmode = ses->aead_key.algmode;
578 
579 		if (ses->dir == DIR_ENC)
580 			shared_desc_len = cnstr_shdsc_gcm_encap(
581 					cdb->sh_desc, true, swap, SHR_NEVER,
582 					&alginfo,
583 					ses->iv.length,
584 					ses->digest_length);
585 		else
586 			shared_desc_len = cnstr_shdsc_gcm_decap(
587 					cdb->sh_desc, true, swap, SHR_NEVER,
588 					&alginfo,
589 					ses->iv.length,
590 					ses->digest_length);
591 		break;
592 	case DPAA_SEC_CIPHER_HASH:
593 		alginfo_c.key = (size_t)ses->cipher_key.data;
594 		alginfo_c.keylen = ses->cipher_key.length;
595 		alginfo_c.key_enc_flags = 0;
596 		alginfo_c.key_type = RTA_DATA_IMM;
597 		alginfo_c.algtype = ses->cipher_key.alg;
598 		alginfo_c.algmode = ses->cipher_key.algmode;
599 
600 		alginfo_a.key = (size_t)ses->auth_key.data;
601 		alginfo_a.keylen = ses->auth_key.length;
602 		alginfo_a.key_enc_flags = 0;
603 		alginfo_a.key_type = RTA_DATA_IMM;
604 		alginfo_a.algtype = ses->auth_key.alg;
605 		alginfo_a.algmode = ses->auth_key.algmode;
606 
607 		cdb->sh_desc[0] = alginfo_c.keylen;
608 		cdb->sh_desc[1] = alginfo_a.keylen;
609 		err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
610 				       DESC_JOB_IO_LEN,
611 				       (unsigned int *)cdb->sh_desc,
612 				       &cdb->sh_desc[2], 2);
613 
614 		if (err < 0) {
615 			DPAA_SEC_ERR("Crypto: Incorrect key lengths");
616 			return err;
617 		}
618 		if (cdb->sh_desc[2] & 1)
619 			alginfo_c.key_type = RTA_DATA_IMM;
620 		else {
621 			alginfo_c.key = (size_t)rte_dpaa_mem_vtop(
622 						(void *)(size_t)alginfo_c.key);
623 			alginfo_c.key_type = RTA_DATA_PTR;
624 		}
625 		if (cdb->sh_desc[2] & (1<<1))
626 			alginfo_a.key_type = RTA_DATA_IMM;
627 		else {
628 			alginfo_a.key = (size_t)rte_dpaa_mem_vtop(
629 						(void *)(size_t)alginfo_a.key);
630 			alginfo_a.key_type = RTA_DATA_PTR;
631 		}
632 		cdb->sh_desc[0] = 0;
633 		cdb->sh_desc[1] = 0;
634 		cdb->sh_desc[2] = 0;
635 		/* Auth_only_len is set as 0 here and it will be
636 		 * overwritten in fd for each packet.
637 		 */
638 		shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
639 				true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a,
640 				ses->iv.length,
641 				ses->digest_length, ses->dir);
642 		break;
643 	case DPAA_SEC_HASH_CIPHER:
644 	default:
645 		DPAA_SEC_ERR("error: Unsupported session");
646 		return -ENOTSUP;
647 	}
648 
649 	if (shared_desc_len < 0) {
650 		DPAA_SEC_ERR("error in preparing command block");
651 		return shared_desc_len;
652 	}
653 
654 	cdb->sh_hdr.hi.field.idlen = shared_desc_len;
655 	cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
656 	cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
657 
658 	return 0;
659 }
660 
661 static void
662 dpaa_sec_dump(struct dpaa_sec_op_ctx *ctx, struct dpaa_sec_qp *qp)
663 {
664 	struct dpaa_sec_job *job = &ctx->job;
665 	struct rte_crypto_op *op = ctx->op;
666 	dpaa_sec_session *sess = NULL;
667 	struct sec_cdb c_cdb, *cdb;
668 	uint8_t bufsize;
669 	struct rte_crypto_sym_op *sym_op;
670 	struct qm_sg_entry sg[2];
671 
672 	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
673 		sess = CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session);
674 #ifdef RTE_LIBRTE_SECURITY
675 	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
676 		sess = SECURITY_GET_SESS_PRIV(op->sym->session);
677 #endif
678 	if (sess == NULL) {
679 		printf("session is NULL\n");
680 		goto mbuf_dump;
681 	}
682 
683 	cdb = &sess->cdb;
684 	rte_memcpy(&c_cdb, cdb, sizeof(struct sec_cdb));
685 #ifdef RTE_LIBRTE_SECURITY
686 	printf("\nsession protocol type = %d\n", sess->proto_alg);
687 #endif
688 	printf("\n****************************************\n"
689 		"session params:\n\tContext type:\t%d\n\tDirection:\t%s\n"
690 		"\tCipher alg:\t%d\n\tAuth alg:\t%d\n\tAead alg:\t%d\n"
691 		"\tCipher key len:\t%"PRIu64"\n\tCipher alg:\t%d\n"
692 		"\tCipher algmode:\t%d\n", sess->ctxt,
693 		(sess->dir == DIR_ENC) ? "DIR_ENC" : "DIR_DEC",
694 		sess->cipher_alg, sess->auth_alg, sess->aead_alg,
695 		(uint64_t)sess->cipher_key.length, sess->cipher_key.alg,
696 		sess->cipher_key.algmode);
697 		rte_hexdump(stdout, "cipher key", sess->cipher_key.data,
698 				sess->cipher_key.length);
699 		rte_hexdump(stdout, "auth key", sess->auth_key.data,
700 				sess->auth_key.length);
701 	printf("\tAuth key len:\t%"PRIu64"\n\tAuth alg:\t%d\n"
702 		"\tAuth algmode:\t%d\n\tIV len:\t\t%d\n\tIV offset:\t%d\n"
703 		"\tdigest length:\t%d\n\tauth only len:\t\t%d\n"
704 		"\taead cipher text:\t%d\n",
705 		(uint64_t)sess->auth_key.length, sess->auth_key.alg,
706 		sess->auth_key.algmode,
707 		sess->iv.length, sess->iv.offset,
708 		sess->digest_length, sess->auth_only_len,
709 		sess->auth_cipher_text);
710 #ifdef RTE_LIBRTE_SECURITY
711 	printf("PDCP session params:\n"
712 		"\tDomain:\t\t%d\n\tBearer:\t\t%d\n\tpkt_dir:\t%d\n\thfn_ovd:"
713 		"\t%d\n\tsn_size:\t%d\n\tsdap_enabled:\t%d\n\thfn_ovd_offset:"
714 		"\t%d\n\thfn:\t\t%d\n"
715 		"\thfn_threshold:\t0x%x\n", sess->pdcp.domain,
716 		sess->pdcp.bearer, sess->pdcp.pkt_dir, sess->pdcp.hfn_ovd,
717 		sess->pdcp.sn_size, sess->pdcp.sdap_enabled,
718 		sess->pdcp.hfn_ovd_offset, sess->pdcp.hfn,
719 		sess->pdcp.hfn_threshold);
720 #endif
721 	c_cdb.sh_hdr.hi.word = rte_be_to_cpu_32(c_cdb.sh_hdr.hi.word);
722 	c_cdb.sh_hdr.lo.word = rte_be_to_cpu_32(c_cdb.sh_hdr.lo.word);
723 	bufsize = c_cdb.sh_hdr.hi.field.idlen;
724 
725 	printf("cdb = %p\n\n", cdb);
726 	printf("Descriptor size = %d\n", bufsize);
727 	int m;
728 	for (m = 0; m < bufsize; m++)
729 		printf("0x%x\n", rte_be_to_cpu_32(c_cdb.sh_desc[m]));
730 
731 	printf("\n");
732 mbuf_dump:
733 	sym_op = op->sym;
734 	if (sym_op->m_src) {
735 		printf("Source mbuf:\n");
736 		rte_pktmbuf_dump(stdout, sym_op->m_src,
737 				 sym_op->m_src->data_len);
738 	}
739 	if (sym_op->m_dst) {
740 		printf("Destination mbuf:\n");
741 		rte_pktmbuf_dump(stdout, sym_op->m_dst,
742 				 sym_op->m_dst->data_len);
743 	}
744 
745 	printf("Session address = %p\ncipher offset: %d, length: %d\n"
746 		"auth offset: %d, length:  %d\n aead offset: %d, length: %d\n",
747 		sym_op->session, sym_op->cipher.data.offset,
748 		sym_op->cipher.data.length,
749 		sym_op->auth.data.offset, sym_op->auth.data.length,
750 		sym_op->aead.data.offset, sym_op->aead.data.length);
751 	printf("\n");
752 
753 	printf("******************************************************\n");
754 	printf("ctx info:\n");
755 	printf("job->sg[0] output info:\n");
756 	memcpy(&sg[0], &job->sg[0], sizeof(sg[0]));
757 	printf("\taddr = %"PRIx64",\n\tlen = %d,\n\tfinal = %d,\n\textension = %d"
758 		"\n\tbpid = %d\n\toffset = %d\n",
759 		(uint64_t)sg[0].addr, sg[0].length, sg[0].final,
760 		sg[0].extension, sg[0].bpid, sg[0].offset);
761 	printf("\njob->sg[1] input info:\n");
762 	memcpy(&sg[1], &job->sg[1], sizeof(sg[1]));
763 	hw_sg_to_cpu(&sg[1]);
764 	printf("\taddr = %"PRIx64",\n\tlen = %d,\n\tfinal = %d,\n\textension = %d"
765 		"\n\tbpid = %d\n\toffset = %d\n",
766 		(uint64_t)sg[1].addr, sg[1].length, sg[1].final,
767 		sg[1].extension, sg[1].bpid, sg[1].offset);
768 
769 	printf("\nctx pool addr = %p\n", ctx->ctx_pool);
770 	if (ctx->ctx_pool)
771 		printf("ctx pool available counts = %d\n",
772 			rte_mempool_avail_count(ctx->ctx_pool));
773 
774 	printf("\nop pool addr = %p\n", op->mempool);
775 	if (op->mempool)
776 		printf("op pool available counts = %d\n",
777 			rte_mempool_avail_count(op->mempool));
778 
779 	printf("********************************************************\n");
780 	printf("Queue data:\n");
781 	printf("\tFQID = 0x%x\n\tstate = %d\n\tnb_desc = %d\n"
782 		"\tctx_pool = %p\n\trx_pkts = %d\n\ttx_pkts"
783 	       "= %d\n\trx_errs = %d\n\ttx_errs = %d\n\n",
784 		qp->outq.fqid, qp->outq.state, qp->outq.nb_desc,
785 		qp->ctx_pool, qp->rx_pkts, qp->tx_pkts,
786 		qp->rx_errs, qp->tx_errs);
787 }
788 
789 /* qp is lockless, should be accessed by only one thread */
790 static int
791 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
792 {
793 	struct qman_fq *fq;
794 	unsigned int pkts = 0;
795 	int num_rx_bufs, ret;
796 	struct qm_dqrr_entry *dq;
797 	uint32_t vdqcr_flags = 0;
798 
799 	fq = &qp->outq;
800 	/*
801 	 * Until request for four buffers, we provide exact number of buffers.
802 	 * Otherwise we do not set the QM_VDQCR_EXACT flag.
803 	 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
804 	 * requested, so we request two less in this case.
805 	 */
806 	if (nb_ops < 4) {
807 		vdqcr_flags = QM_VDQCR_EXACT;
808 		num_rx_bufs = nb_ops;
809 	} else {
810 		num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
811 			(DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
812 	}
813 	ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
814 	if (ret)
815 		return 0;
816 
817 	do {
818 		const struct qm_fd *fd;
819 		struct dpaa_sec_job *job;
820 		struct dpaa_sec_op_ctx *ctx;
821 		struct rte_crypto_op *op;
822 
823 		dq = qman_dequeue(fq);
824 		if (!dq)
825 			continue;
826 
827 		fd = &dq->fd;
828 		/* sg is embedded in an op ctx,
829 		 * sg[0] is for output
830 		 * sg[1] for input
831 		 */
832 		job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
833 
834 		ctx = container_of(job, struct dpaa_sec_op_ctx, job);
835 		ctx->fd_status = fd->status;
836 		op = ctx->op;
837 		if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
838 			struct qm_sg_entry *sg_out;
839 			uint32_t len;
840 			struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ?
841 						op->sym->m_src : op->sym->m_dst;
842 
843 			sg_out = &job->sg[0];
844 			hw_sg_to_cpu(sg_out);
845 			len = sg_out->length;
846 			mbuf->pkt_len = len;
847 			while (mbuf->next != NULL) {
848 				len -= mbuf->data_len;
849 				mbuf = mbuf->next;
850 			}
851 			mbuf->data_len = len;
852 		}
853 		if (!ctx->fd_status) {
854 			op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
855 		} else {
856 			if (dpaa_sec_dp_dump > DPAA_SEC_DP_NO_DUMP) {
857 				DPAA_SEC_DP_WARN("SEC return err:0x%x\n",
858 						  ctx->fd_status);
859 				if (dpaa_sec_dp_dump > DPAA_SEC_DP_ERR_DUMP)
860 					dpaa_sec_dump(ctx, qp);
861 			}
862 			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
863 		}
864 		ops[pkts++] = op;
865 
866 		/* report op status to sym->op and then free the ctx memory */
867 		rte_mempool_put(ctx->ctx_pool, (void *)ctx);
868 
869 		qman_dqrr_consume(fq, dq);
870 	} while (fq->flags & QMAN_FQ_STATE_VDQCR);
871 
872 	return pkts;
873 }
874 
875 static inline struct dpaa_sec_job *
876 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
877 {
878 	struct rte_crypto_sym_op *sym = op->sym;
879 	struct rte_mbuf *mbuf = sym->m_src;
880 	struct dpaa_sec_job *cf;
881 	struct dpaa_sec_op_ctx *ctx;
882 	struct qm_sg_entry *sg, *out_sg, *in_sg;
883 	phys_addr_t start_addr;
884 	uint8_t *old_digest, extra_segs;
885 	int data_len, data_offset;
886 
887 	data_len = sym->auth.data.length;
888 	data_offset = sym->auth.data.offset;
889 
890 	if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
891 	    ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
892 		if ((data_len & 7) || (data_offset & 7)) {
893 			DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
894 			return NULL;
895 		}
896 
897 		data_len = data_len >> 3;
898 		data_offset = data_offset >> 3;
899 	}
900 
901 	if (is_decode(ses))
902 		extra_segs = 3;
903 	else
904 		extra_segs = 2;
905 
906 	if (mbuf->nb_segs > MAX_SG_ENTRIES) {
907 		DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
908 				MAX_SG_ENTRIES);
909 		return NULL;
910 	}
911 	ctx = dpaa_sec_alloc_ctx(ses, mbuf->nb_segs + extra_segs);
912 	if (!ctx)
913 		return NULL;
914 
915 	cf = &ctx->job;
916 	ctx->op = op;
917 	old_digest = ctx->digest;
918 
919 	/* output */
920 	out_sg = &cf->sg[0];
921 	qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
922 	out_sg->length = ses->digest_length;
923 	cpu_to_hw_sg(out_sg);
924 
925 	/* input */
926 	in_sg = &cf->sg[1];
927 	/* need to extend the input to a compound frame */
928 	in_sg->extension = 1;
929 	in_sg->final = 1;
930 	in_sg->length = data_len;
931 	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
932 
933 	/* 1st seg */
934 	sg = in_sg + 1;
935 
936 	if (ses->iv.length) {
937 		uint8_t *iv_ptr;
938 
939 		iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
940 						   ses->iv.offset);
941 
942 		if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
943 			iv_ptr = conv_to_snow_f9_iv(iv_ptr);
944 			sg->length = 12;
945 		} else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
946 			iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
947 			sg->length = 8;
948 		} else {
949 			sg->length = ses->iv.length;
950 		}
951 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
952 		in_sg->length += sg->length;
953 		cpu_to_hw_sg(sg);
954 		sg++;
955 	}
956 
957 	qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
958 	sg->offset = data_offset;
959 
960 	if (data_len <= (mbuf->data_len - data_offset)) {
961 		sg->length = data_len;
962 	} else {
963 		sg->length = mbuf->data_len - data_offset;
964 
965 		/* remaining i/p segs */
966 		while ((data_len = data_len - sg->length) &&
967 		       (mbuf = mbuf->next)) {
968 			cpu_to_hw_sg(sg);
969 			sg++;
970 			qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
971 			if (data_len > mbuf->data_len)
972 				sg->length = mbuf->data_len;
973 			else
974 				sg->length = data_len;
975 		}
976 	}
977 
978 	if (is_decode(ses)) {
979 		/* Digest verification case */
980 		cpu_to_hw_sg(sg);
981 		sg++;
982 		rte_memcpy(old_digest, sym->auth.digest.data,
983 				ses->digest_length);
984 		start_addr = rte_dpaa_mem_vtop(old_digest);
985 		qm_sg_entry_set64(sg, start_addr);
986 		sg->length = ses->digest_length;
987 		in_sg->length += ses->digest_length;
988 	}
989 	sg->final = 1;
990 	cpu_to_hw_sg(sg);
991 	cpu_to_hw_sg(in_sg);
992 
993 	return cf;
994 }
995 
996 /**
997  * packet looks like:
998  *		|<----data_len------->|
999  *    |ip_header|ah_header|icv|payload|
1000  *              ^
1001  *		|
1002  *	   mbuf->pkt.data
1003  */
1004 static inline struct dpaa_sec_job *
1005 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1006 {
1007 	struct rte_crypto_sym_op *sym = op->sym;
1008 	struct rte_mbuf *mbuf = sym->m_src;
1009 	struct dpaa_sec_job *cf;
1010 	struct dpaa_sec_op_ctx *ctx;
1011 	struct qm_sg_entry *sg, *in_sg;
1012 	rte_iova_t start_addr;
1013 	uint8_t *old_digest;
1014 	int data_len, data_offset;
1015 
1016 	data_len = sym->auth.data.length;
1017 	data_offset = sym->auth.data.offset;
1018 
1019 	if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
1020 	    ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1021 		if ((data_len & 7) || (data_offset & 7)) {
1022 			DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
1023 			return NULL;
1024 		}
1025 
1026 		data_len = data_len >> 3;
1027 		data_offset = data_offset >> 3;
1028 	}
1029 
1030 	ctx = dpaa_sec_alloc_ctx(ses, 4);
1031 	if (!ctx)
1032 		return NULL;
1033 
1034 	cf = &ctx->job;
1035 	ctx->op = op;
1036 	old_digest = ctx->digest;
1037 
1038 	start_addr = rte_pktmbuf_iova(mbuf);
1039 	/* output */
1040 	sg = &cf->sg[0];
1041 	qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1042 	sg->length = ses->digest_length;
1043 	cpu_to_hw_sg(sg);
1044 
1045 	/* input */
1046 	in_sg = &cf->sg[1];
1047 	/* need to extend the input to a compound frame */
1048 	in_sg->extension = 1;
1049 	in_sg->final = 1;
1050 	in_sg->length = data_len;
1051 	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1052 	sg = &cf->sg[2];
1053 
1054 	if (ses->iv.length) {
1055 		uint8_t *iv_ptr;
1056 
1057 		iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1058 						   ses->iv.offset);
1059 
1060 		if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
1061 			iv_ptr = conv_to_snow_f9_iv(iv_ptr);
1062 			sg->length = 12;
1063 		} else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1064 			iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
1065 			sg->length = 8;
1066 		} else {
1067 			sg->length = ses->iv.length;
1068 		}
1069 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
1070 		in_sg->length += sg->length;
1071 		cpu_to_hw_sg(sg);
1072 		sg++;
1073 	}
1074 
1075 	qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1076 	sg->offset = data_offset;
1077 	sg->length = data_len;
1078 
1079 	if (is_decode(ses)) {
1080 		/* Digest verification case */
1081 		cpu_to_hw_sg(sg);
1082 		/* hash result or digest, save digest first */
1083 		rte_memcpy(old_digest, sym->auth.digest.data,
1084 				ses->digest_length);
1085 		/* let's check digest by hw */
1086 		start_addr = rte_dpaa_mem_vtop(old_digest);
1087 		sg++;
1088 		qm_sg_entry_set64(sg, start_addr);
1089 		sg->length = ses->digest_length;
1090 		in_sg->length += ses->digest_length;
1091 	}
1092 	sg->final = 1;
1093 	cpu_to_hw_sg(sg);
1094 	cpu_to_hw_sg(in_sg);
1095 
1096 	return cf;
1097 }
1098 
1099 static inline struct dpaa_sec_job *
1100 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1101 {
1102 	struct rte_crypto_sym_op *sym = op->sym;
1103 	struct dpaa_sec_job *cf;
1104 	struct dpaa_sec_op_ctx *ctx;
1105 	struct qm_sg_entry *sg, *out_sg, *in_sg;
1106 	struct rte_mbuf *mbuf;
1107 	uint8_t req_segs;
1108 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1109 			ses->iv.offset);
1110 	int data_len, data_offset;
1111 
1112 	data_len = sym->cipher.data.length;
1113 	data_offset = sym->cipher.data.offset;
1114 
1115 	if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1116 		ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1117 		if ((data_len & 7) || (data_offset & 7)) {
1118 			DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
1119 			return NULL;
1120 		}
1121 
1122 		data_len = data_len >> 3;
1123 		data_offset = data_offset >> 3;
1124 	}
1125 
1126 	if (sym->m_dst) {
1127 		mbuf = sym->m_dst;
1128 		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
1129 	} else {
1130 		mbuf = sym->m_src;
1131 		req_segs = mbuf->nb_segs * 2 + 3;
1132 	}
1133 	if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1134 		DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
1135 				MAX_SG_ENTRIES);
1136 		return NULL;
1137 	}
1138 
1139 	ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1140 	if (!ctx)
1141 		return NULL;
1142 
1143 	cf = &ctx->job;
1144 	ctx->op = op;
1145 
1146 	/* output */
1147 	out_sg = &cf->sg[0];
1148 	out_sg->extension = 1;
1149 	out_sg->length = data_len;
1150 	qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1151 	cpu_to_hw_sg(out_sg);
1152 
1153 	/* 1st seg */
1154 	sg = &cf->sg[2];
1155 	qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1156 	sg->length = mbuf->data_len - data_offset;
1157 	sg->offset = data_offset;
1158 
1159 	/* Successive segs */
1160 	mbuf = mbuf->next;
1161 	while (mbuf) {
1162 		cpu_to_hw_sg(sg);
1163 		sg++;
1164 		qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1165 		sg->length = mbuf->data_len;
1166 		mbuf = mbuf->next;
1167 	}
1168 	sg->final = 1;
1169 	cpu_to_hw_sg(sg);
1170 
1171 	/* input */
1172 	mbuf = sym->m_src;
1173 	in_sg = &cf->sg[1];
1174 	in_sg->extension = 1;
1175 	in_sg->final = 1;
1176 	in_sg->length = data_len + ses->iv.length;
1177 
1178 	sg++;
1179 	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1180 	cpu_to_hw_sg(in_sg);
1181 
1182 	/* IV */
1183 	qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1184 	sg->length = ses->iv.length;
1185 	cpu_to_hw_sg(sg);
1186 
1187 	/* 1st seg */
1188 	sg++;
1189 	qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1190 	sg->length = mbuf->data_len - data_offset;
1191 	sg->offset = data_offset;
1192 
1193 	/* Successive segs */
1194 	mbuf = mbuf->next;
1195 	while (mbuf) {
1196 		cpu_to_hw_sg(sg);
1197 		sg++;
1198 		qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1199 		sg->length = mbuf->data_len;
1200 		mbuf = mbuf->next;
1201 	}
1202 	sg->final = 1;
1203 	cpu_to_hw_sg(sg);
1204 
1205 	return cf;
1206 }
1207 
1208 static inline struct dpaa_sec_job *
1209 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1210 {
1211 	struct rte_crypto_sym_op *sym = op->sym;
1212 	struct dpaa_sec_job *cf;
1213 	struct dpaa_sec_op_ctx *ctx;
1214 	struct qm_sg_entry *sg;
1215 	rte_iova_t src_start_addr, dst_start_addr;
1216 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1217 			ses->iv.offset);
1218 	int data_len, data_offset;
1219 
1220 	data_len = sym->cipher.data.length;
1221 	data_offset = sym->cipher.data.offset;
1222 
1223 	if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1224 		ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1225 		if ((data_len & 7) || (data_offset & 7)) {
1226 			DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
1227 			return NULL;
1228 		}
1229 
1230 		data_len = data_len >> 3;
1231 		data_offset = data_offset >> 3;
1232 	}
1233 
1234 	ctx = dpaa_sec_alloc_ctx(ses, 4);
1235 	if (!ctx)
1236 		return NULL;
1237 
1238 	cf = &ctx->job;
1239 	ctx->op = op;
1240 
1241 	src_start_addr = rte_pktmbuf_iova(sym->m_src);
1242 
1243 	if (sym->m_dst)
1244 		dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1245 	else
1246 		dst_start_addr = src_start_addr;
1247 
1248 	/* output */
1249 	sg = &cf->sg[0];
1250 	qm_sg_entry_set64(sg, dst_start_addr + data_offset);
1251 	sg->length = data_len + ses->iv.length;
1252 	cpu_to_hw_sg(sg);
1253 
1254 	/* input */
1255 	sg = &cf->sg[1];
1256 
1257 	/* need to extend the input to a compound frame */
1258 	sg->extension = 1;
1259 	sg->final = 1;
1260 	sg->length = data_len + ses->iv.length;
1261 	qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1262 	cpu_to_hw_sg(sg);
1263 
1264 	sg = &cf->sg[2];
1265 	qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1266 	sg->length = ses->iv.length;
1267 	cpu_to_hw_sg(sg);
1268 
1269 	sg++;
1270 	qm_sg_entry_set64(sg, src_start_addr + data_offset);
1271 	sg->length = data_len;
1272 	sg->final = 1;
1273 	cpu_to_hw_sg(sg);
1274 
1275 	return cf;
1276 }
1277 
1278 static inline struct dpaa_sec_job *
1279 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1280 {
1281 	struct rte_crypto_sym_op *sym = op->sym;
1282 	struct dpaa_sec_job *cf;
1283 	struct dpaa_sec_op_ctx *ctx;
1284 	struct qm_sg_entry *sg, *out_sg, *in_sg;
1285 	struct rte_mbuf *mbuf;
1286 	uint8_t req_segs;
1287 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1288 			ses->iv.offset);
1289 
1290 	if (sym->m_dst) {
1291 		mbuf = sym->m_dst;
1292 		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1293 	} else {
1294 		mbuf = sym->m_src;
1295 		req_segs = mbuf->nb_segs * 2 + 4;
1296 	}
1297 
1298 	if (ses->auth_only_len)
1299 		req_segs++;
1300 
1301 	if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1302 		DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1303 				MAX_SG_ENTRIES);
1304 		return NULL;
1305 	}
1306 
1307 	ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1308 	if (!ctx)
1309 		return NULL;
1310 
1311 	cf = &ctx->job;
1312 	ctx->op = op;
1313 
1314 	rte_prefetch0(cf->sg);
1315 
1316 	/* output */
1317 	out_sg = &cf->sg[0];
1318 	out_sg->extension = 1;
1319 	if (is_encode(ses))
1320 		out_sg->length = sym->aead.data.length + ses->digest_length;
1321 	else
1322 		out_sg->length = sym->aead.data.length;
1323 
1324 	/* output sg entries */
1325 	sg = &cf->sg[2];
1326 	qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1327 	cpu_to_hw_sg(out_sg);
1328 
1329 	/* 1st seg */
1330 	qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1331 	sg->length = mbuf->data_len - sym->aead.data.offset;
1332 	sg->offset = sym->aead.data.offset;
1333 
1334 	/* Successive segs */
1335 	mbuf = mbuf->next;
1336 	while (mbuf) {
1337 		cpu_to_hw_sg(sg);
1338 		sg++;
1339 		qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1340 		sg->length = mbuf->data_len;
1341 		mbuf = mbuf->next;
1342 	}
1343 	sg->length -= ses->digest_length;
1344 
1345 	if (is_encode(ses)) {
1346 		cpu_to_hw_sg(sg);
1347 		/* set auth output */
1348 		sg++;
1349 		qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1350 		sg->length = ses->digest_length;
1351 	}
1352 	sg->final = 1;
1353 	cpu_to_hw_sg(sg);
1354 
1355 	/* input */
1356 	mbuf = sym->m_src;
1357 	in_sg = &cf->sg[1];
1358 	in_sg->extension = 1;
1359 	in_sg->final = 1;
1360 	if (is_encode(ses))
1361 		in_sg->length = ses->iv.length + sym->aead.data.length
1362 							+ ses->auth_only_len;
1363 	else
1364 		in_sg->length = ses->iv.length + sym->aead.data.length
1365 				+ ses->auth_only_len + ses->digest_length;
1366 
1367 	/* input sg entries */
1368 	sg++;
1369 	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1370 	cpu_to_hw_sg(in_sg);
1371 
1372 	/* 1st seg IV */
1373 	qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1374 	sg->length = ses->iv.length;
1375 	cpu_to_hw_sg(sg);
1376 
1377 	/* 2nd seg auth only */
1378 	if (ses->auth_only_len) {
1379 		sg++;
1380 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(sym->aead.aad.data));
1381 		sg->length = ses->auth_only_len;
1382 		cpu_to_hw_sg(sg);
1383 	}
1384 
1385 	/* 3rd seg */
1386 	sg++;
1387 	qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1388 	sg->length = mbuf->data_len - sym->aead.data.offset;
1389 	sg->offset = sym->aead.data.offset;
1390 
1391 	/* Successive segs */
1392 	mbuf = mbuf->next;
1393 	while (mbuf) {
1394 		cpu_to_hw_sg(sg);
1395 		sg++;
1396 		qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1397 		sg->length = mbuf->data_len;
1398 		mbuf = mbuf->next;
1399 	}
1400 
1401 	if (is_decode(ses)) {
1402 		cpu_to_hw_sg(sg);
1403 		sg++;
1404 		memcpy(ctx->digest, sym->aead.digest.data,
1405 			ses->digest_length);
1406 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1407 		sg->length = ses->digest_length;
1408 	}
1409 	sg->final = 1;
1410 	cpu_to_hw_sg(sg);
1411 
1412 	return cf;
1413 }
1414 
1415 static inline struct dpaa_sec_job *
1416 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1417 {
1418 	struct rte_crypto_sym_op *sym = op->sym;
1419 	struct dpaa_sec_job *cf;
1420 	struct dpaa_sec_op_ctx *ctx;
1421 	struct qm_sg_entry *sg;
1422 	uint32_t length = 0;
1423 	rte_iova_t src_start_addr, dst_start_addr;
1424 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1425 			ses->iv.offset);
1426 
1427 	src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1428 
1429 	if (sym->m_dst)
1430 		dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1431 	else
1432 		dst_start_addr = src_start_addr;
1433 
1434 	ctx = dpaa_sec_alloc_ctx(ses, 7);
1435 	if (!ctx)
1436 		return NULL;
1437 
1438 	cf = &ctx->job;
1439 	ctx->op = op;
1440 
1441 	/* input */
1442 	rte_prefetch0(cf->sg);
1443 	sg = &cf->sg[2];
1444 	qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1445 	if (is_encode(ses)) {
1446 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1447 		sg->length = ses->iv.length;
1448 		length += sg->length;
1449 		cpu_to_hw_sg(sg);
1450 
1451 		sg++;
1452 		if (ses->auth_only_len) {
1453 			qm_sg_entry_set64(sg,
1454 					  rte_dpaa_mem_vtop(sym->aead.aad.data));
1455 			sg->length = ses->auth_only_len;
1456 			length += sg->length;
1457 			cpu_to_hw_sg(sg);
1458 			sg++;
1459 		}
1460 		qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1461 		sg->length = sym->aead.data.length;
1462 		length += sg->length;
1463 		sg->final = 1;
1464 		cpu_to_hw_sg(sg);
1465 	} else {
1466 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1467 		sg->length = ses->iv.length;
1468 		length += sg->length;
1469 		cpu_to_hw_sg(sg);
1470 
1471 		sg++;
1472 		if (ses->auth_only_len) {
1473 			qm_sg_entry_set64(sg,
1474 					  rte_dpaa_mem_vtop(sym->aead.aad.data));
1475 			sg->length = ses->auth_only_len;
1476 			length += sg->length;
1477 			cpu_to_hw_sg(sg);
1478 			sg++;
1479 		}
1480 		qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1481 		sg->length = sym->aead.data.length;
1482 		length += sg->length;
1483 		cpu_to_hw_sg(sg);
1484 
1485 		memcpy(ctx->digest, sym->aead.digest.data,
1486 		       ses->digest_length);
1487 		sg++;
1488 
1489 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1490 		sg->length = ses->digest_length;
1491 		length += sg->length;
1492 		sg->final = 1;
1493 		cpu_to_hw_sg(sg);
1494 	}
1495 	/* input compound frame */
1496 	cf->sg[1].length = length;
1497 	cf->sg[1].extension = 1;
1498 	cf->sg[1].final = 1;
1499 	cpu_to_hw_sg(&cf->sg[1]);
1500 
1501 	/* output */
1502 	sg++;
1503 	qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1504 	qm_sg_entry_set64(sg,
1505 		dst_start_addr + sym->aead.data.offset);
1506 	sg->length = sym->aead.data.length;
1507 	length = sg->length;
1508 	if (is_encode(ses)) {
1509 		cpu_to_hw_sg(sg);
1510 		/* set auth output */
1511 		sg++;
1512 		qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1513 		sg->length = ses->digest_length;
1514 		length += sg->length;
1515 	}
1516 	sg->final = 1;
1517 	cpu_to_hw_sg(sg);
1518 
1519 	/* output compound frame */
1520 	cf->sg[0].length = length;
1521 	cf->sg[0].extension = 1;
1522 	cpu_to_hw_sg(&cf->sg[0]);
1523 
1524 	return cf;
1525 }
1526 
1527 static inline struct dpaa_sec_job *
1528 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1529 {
1530 	struct rte_crypto_sym_op *sym = op->sym;
1531 	struct dpaa_sec_job *cf;
1532 	struct dpaa_sec_op_ctx *ctx;
1533 	struct qm_sg_entry *sg, *out_sg, *in_sg;
1534 	struct rte_mbuf *mbuf;
1535 	uint8_t req_segs;
1536 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1537 			ses->iv.offset);
1538 
1539 	if (sym->m_dst) {
1540 		mbuf = sym->m_dst;
1541 		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1542 	} else {
1543 		mbuf = sym->m_src;
1544 		req_segs = mbuf->nb_segs * 2 + 4;
1545 	}
1546 
1547 	if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1548 		DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1549 				MAX_SG_ENTRIES);
1550 		return NULL;
1551 	}
1552 
1553 	ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1554 	if (!ctx)
1555 		return NULL;
1556 
1557 	cf = &ctx->job;
1558 	ctx->op = op;
1559 
1560 	rte_prefetch0(cf->sg);
1561 
1562 	/* output */
1563 	out_sg = &cf->sg[0];
1564 	out_sg->extension = 1;
1565 	if (is_encode(ses))
1566 		out_sg->length = sym->auth.data.length + ses->digest_length;
1567 	else
1568 		out_sg->length = sym->auth.data.length;
1569 
1570 	/* output sg entries */
1571 	sg = &cf->sg[2];
1572 	qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1573 	cpu_to_hw_sg(out_sg);
1574 
1575 	/* 1st seg */
1576 	qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1577 	sg->length = mbuf->data_len - sym->auth.data.offset;
1578 	sg->offset = sym->auth.data.offset;
1579 
1580 	/* Successive segs */
1581 	mbuf = mbuf->next;
1582 	while (mbuf) {
1583 		cpu_to_hw_sg(sg);
1584 		sg++;
1585 		qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1586 		sg->length = mbuf->data_len;
1587 		mbuf = mbuf->next;
1588 	}
1589 	sg->length -= ses->digest_length;
1590 
1591 	if (is_encode(ses)) {
1592 		cpu_to_hw_sg(sg);
1593 		/* set auth output */
1594 		sg++;
1595 		qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1596 		sg->length = ses->digest_length;
1597 	}
1598 	sg->final = 1;
1599 	cpu_to_hw_sg(sg);
1600 
1601 	/* input */
1602 	mbuf = sym->m_src;
1603 	in_sg = &cf->sg[1];
1604 	in_sg->extension = 1;
1605 	in_sg->final = 1;
1606 	if (is_encode(ses))
1607 		in_sg->length = ses->iv.length + sym->auth.data.length;
1608 	else
1609 		in_sg->length = ses->iv.length + sym->auth.data.length
1610 						+ ses->digest_length;
1611 
1612 	/* input sg entries */
1613 	sg++;
1614 	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1615 	cpu_to_hw_sg(in_sg);
1616 
1617 	/* 1st seg IV */
1618 	qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1619 	sg->length = ses->iv.length;
1620 	cpu_to_hw_sg(sg);
1621 
1622 	/* 2nd seg */
1623 	sg++;
1624 	qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1625 	sg->length = mbuf->data_len - sym->auth.data.offset;
1626 	sg->offset = sym->auth.data.offset;
1627 
1628 	/* Successive segs */
1629 	mbuf = mbuf->next;
1630 	while (mbuf) {
1631 		cpu_to_hw_sg(sg);
1632 		sg++;
1633 		qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1634 		sg->length = mbuf->data_len;
1635 		mbuf = mbuf->next;
1636 	}
1637 
1638 	sg->length -= ses->digest_length;
1639 	if (is_decode(ses)) {
1640 		cpu_to_hw_sg(sg);
1641 		sg++;
1642 		memcpy(ctx->digest, sym->auth.digest.data,
1643 			ses->digest_length);
1644 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1645 		sg->length = ses->digest_length;
1646 	}
1647 	sg->final = 1;
1648 	cpu_to_hw_sg(sg);
1649 
1650 	return cf;
1651 }
1652 
1653 static inline struct dpaa_sec_job *
1654 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1655 {
1656 	struct rte_crypto_sym_op *sym = op->sym;
1657 	struct dpaa_sec_job *cf;
1658 	struct dpaa_sec_op_ctx *ctx;
1659 	struct qm_sg_entry *sg;
1660 	rte_iova_t src_start_addr, dst_start_addr;
1661 	uint32_t length = 0;
1662 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1663 			ses->iv.offset);
1664 
1665 	src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1666 	if (sym->m_dst)
1667 		dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1668 	else
1669 		dst_start_addr = src_start_addr;
1670 
1671 	ctx = dpaa_sec_alloc_ctx(ses, 7);
1672 	if (!ctx)
1673 		return NULL;
1674 
1675 	cf = &ctx->job;
1676 	ctx->op = op;
1677 
1678 	/* input */
1679 	rte_prefetch0(cf->sg);
1680 	sg = &cf->sg[2];
1681 	qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1682 	if (is_encode(ses)) {
1683 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1684 		sg->length = ses->iv.length;
1685 		length += sg->length;
1686 		cpu_to_hw_sg(sg);
1687 
1688 		sg++;
1689 		qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1690 		sg->length = sym->auth.data.length;
1691 		length += sg->length;
1692 		sg->final = 1;
1693 		cpu_to_hw_sg(sg);
1694 	} else {
1695 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1696 		sg->length = ses->iv.length;
1697 		length += sg->length;
1698 		cpu_to_hw_sg(sg);
1699 
1700 		sg++;
1701 
1702 		qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1703 		sg->length = sym->auth.data.length;
1704 		length += sg->length;
1705 		cpu_to_hw_sg(sg);
1706 
1707 		memcpy(ctx->digest, sym->auth.digest.data,
1708 		       ses->digest_length);
1709 		sg++;
1710 
1711 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1712 		sg->length = ses->digest_length;
1713 		length += sg->length;
1714 		sg->final = 1;
1715 		cpu_to_hw_sg(sg);
1716 	}
1717 	/* input compound frame */
1718 	cf->sg[1].length = length;
1719 	cf->sg[1].extension = 1;
1720 	cf->sg[1].final = 1;
1721 	cpu_to_hw_sg(&cf->sg[1]);
1722 
1723 	/* output */
1724 	sg++;
1725 	qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1726 	qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1727 	sg->length = sym->cipher.data.length;
1728 	length = sg->length;
1729 	if (is_encode(ses)) {
1730 		cpu_to_hw_sg(sg);
1731 		/* set auth output */
1732 		sg++;
1733 		qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1734 		sg->length = ses->digest_length;
1735 		length += sg->length;
1736 	}
1737 	sg->final = 1;
1738 	cpu_to_hw_sg(sg);
1739 
1740 	/* output compound frame */
1741 	cf->sg[0].length = length;
1742 	cf->sg[0].extension = 1;
1743 	cpu_to_hw_sg(&cf->sg[0]);
1744 
1745 	return cf;
1746 }
1747 
1748 #ifdef RTE_LIB_SECURITY
1749 static inline struct dpaa_sec_job *
1750 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1751 {
1752 	struct rte_crypto_sym_op *sym = op->sym;
1753 	struct dpaa_sec_job *cf;
1754 	struct dpaa_sec_op_ctx *ctx;
1755 	struct qm_sg_entry *sg;
1756 	phys_addr_t src_start_addr, dst_start_addr;
1757 
1758 	ctx = dpaa_sec_alloc_ctx(ses, 2);
1759 	if (!ctx)
1760 		return NULL;
1761 	cf = &ctx->job;
1762 	ctx->op = op;
1763 
1764 	src_start_addr = rte_pktmbuf_iova(sym->m_src);
1765 
1766 	if (sym->m_dst)
1767 		dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1768 	else
1769 		dst_start_addr = src_start_addr;
1770 
1771 	/* input */
1772 	sg = &cf->sg[1];
1773 	qm_sg_entry_set64(sg, src_start_addr);
1774 	sg->length = sym->m_src->pkt_len;
1775 	sg->final = 1;
1776 	cpu_to_hw_sg(sg);
1777 
1778 	sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1779 	/* output */
1780 	sg = &cf->sg[0];
1781 	qm_sg_entry_set64(sg, dst_start_addr);
1782 	sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1783 	cpu_to_hw_sg(sg);
1784 
1785 	return cf;
1786 }
1787 
1788 static inline struct dpaa_sec_job *
1789 build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1790 {
1791 	struct rte_crypto_sym_op *sym = op->sym;
1792 	struct dpaa_sec_job *cf;
1793 	struct dpaa_sec_op_ctx *ctx;
1794 	struct qm_sg_entry *sg, *out_sg, *in_sg;
1795 	struct rte_mbuf *mbuf;
1796 	uint8_t req_segs;
1797 	uint32_t in_len = 0, out_len = 0;
1798 
1799 	if (sym->m_dst)
1800 		mbuf = sym->m_dst;
1801 	else
1802 		mbuf = sym->m_src;
1803 
1804 	req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
1805 	if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1806 		DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
1807 				MAX_SG_ENTRIES);
1808 		return NULL;
1809 	}
1810 
1811 	ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1812 	if (!ctx)
1813 		return NULL;
1814 	cf = &ctx->job;
1815 	ctx->op = op;
1816 	/* output */
1817 	out_sg = &cf->sg[0];
1818 	out_sg->extension = 1;
1819 	qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1820 
1821 	/* 1st seg */
1822 	sg = &cf->sg[2];
1823 	qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1824 	sg->offset = 0;
1825 
1826 	/* Successive segs */
1827 	while (mbuf->next) {
1828 		sg->length = mbuf->data_len;
1829 		out_len += sg->length;
1830 		mbuf = mbuf->next;
1831 		cpu_to_hw_sg(sg);
1832 		sg++;
1833 		qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1834 		sg->offset = 0;
1835 	}
1836 	sg->length = mbuf->buf_len - mbuf->data_off;
1837 	out_len += sg->length;
1838 	sg->final = 1;
1839 	cpu_to_hw_sg(sg);
1840 
1841 	out_sg->length = out_len;
1842 	cpu_to_hw_sg(out_sg);
1843 
1844 	/* input */
1845 	mbuf = sym->m_src;
1846 	in_sg = &cf->sg[1];
1847 	in_sg->extension = 1;
1848 	in_sg->final = 1;
1849 	in_len = mbuf->data_len;
1850 
1851 	sg++;
1852 	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1853 
1854 	/* 1st seg */
1855 	qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1856 	sg->length = mbuf->data_len;
1857 	sg->offset = 0;
1858 
1859 	/* Successive segs */
1860 	mbuf = mbuf->next;
1861 	while (mbuf) {
1862 		cpu_to_hw_sg(sg);
1863 		sg++;
1864 		qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1865 		sg->length = mbuf->data_len;
1866 		sg->offset = 0;
1867 		in_len += sg->length;
1868 		mbuf = mbuf->next;
1869 	}
1870 	sg->final = 1;
1871 	cpu_to_hw_sg(sg);
1872 
1873 	in_sg->length = in_len;
1874 	cpu_to_hw_sg(in_sg);
1875 
1876 	sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1877 
1878 	return cf;
1879 }
1880 #endif
1881 
1882 static uint16_t
1883 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1884 		       uint16_t nb_ops)
1885 {
1886 	/* Function to transmit the frames to given device and queuepair */
1887 	uint32_t loop;
1888 	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1889 	uint16_t num_tx = 0;
1890 	struct qm_fd fds[DPAA_SEC_BURST], *fd;
1891 	uint32_t frames_to_send;
1892 	struct rte_crypto_op *op;
1893 	struct dpaa_sec_job *cf;
1894 	dpaa_sec_session *ses;
1895 	uint16_t auth_hdr_len, auth_tail_len;
1896 	uint32_t index, flags[DPAA_SEC_BURST] = {0};
1897 	struct qman_fq *inq[DPAA_SEC_BURST];
1898 
1899 	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
1900 		if (rte_dpaa_portal_init((void *)0)) {
1901 			DPAA_SEC_ERR("Failure in affining portal");
1902 			return 0;
1903 		}
1904 	}
1905 
1906 	while (nb_ops) {
1907 		frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1908 				DPAA_SEC_BURST : nb_ops;
1909 		for (loop = 0; loop < frames_to_send; loop++) {
1910 			op = *(ops++);
1911 			if (*dpaa_seqn(op->sym->m_src) != 0) {
1912 				index = *dpaa_seqn(op->sym->m_src) - 1;
1913 				if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
1914 					/* QM_EQCR_DCA_IDXMASK = 0x0f */
1915 					flags[loop] = ((index & 0x0f) << 8);
1916 					flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
1917 					DPAA_PER_LCORE_DQRR_SIZE--;
1918 					DPAA_PER_LCORE_DQRR_HELD &=
1919 								~(1 << index);
1920 				}
1921 			}
1922 
1923 			switch (op->sess_type) {
1924 			case RTE_CRYPTO_OP_WITH_SESSION:
1925 				ses = CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session);
1926 				break;
1927 #ifdef RTE_LIB_SECURITY
1928 			case RTE_CRYPTO_OP_SECURITY_SESSION:
1929 				ses = SECURITY_GET_SESS_PRIV(op->sym->session);
1930 				break;
1931 #endif
1932 			default:
1933 				DPAA_SEC_DP_ERR(
1934 					"sessionless crypto op not supported");
1935 				frames_to_send = loop;
1936 				nb_ops = loop;
1937 				goto send_pkts;
1938 			}
1939 
1940 			if (!ses) {
1941 				DPAA_SEC_DP_ERR("session not available");
1942 				frames_to_send = loop;
1943 				nb_ops = loop;
1944 				goto send_pkts;
1945 			}
1946 
1947 			if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
1948 				if (dpaa_sec_attach_sess_q(qp, ses)) {
1949 					frames_to_send = loop;
1950 					nb_ops = loop;
1951 					goto send_pkts;
1952 				}
1953 			} else if (unlikely(ses->qp[rte_lcore_id() %
1954 						MAX_DPAA_CORES] != qp)) {
1955 				DPAA_SEC_DP_ERR("Old:sess->qp = %p"
1956 					" New qp = %p\n",
1957 					ses->qp[rte_lcore_id() %
1958 					MAX_DPAA_CORES], qp);
1959 				frames_to_send = loop;
1960 				nb_ops = loop;
1961 				goto send_pkts;
1962 			}
1963 
1964 			auth_hdr_len = op->sym->auth.data.length -
1965 						op->sym->cipher.data.length;
1966 			auth_tail_len = 0;
1967 
1968 			if (rte_pktmbuf_is_contiguous(op->sym->m_src) &&
1969 				  ((op->sym->m_dst == NULL) ||
1970 				   rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1971 				switch (ses->ctxt) {
1972 #ifdef RTE_LIB_SECURITY
1973 				case DPAA_SEC_PDCP:
1974 				case DPAA_SEC_IPSEC:
1975 					cf = build_proto(op, ses);
1976 					break;
1977 #endif
1978 				case DPAA_SEC_AUTH:
1979 					cf = build_auth_only(op, ses);
1980 					break;
1981 				case DPAA_SEC_CIPHER:
1982 					cf = build_cipher_only(op, ses);
1983 					break;
1984 				case DPAA_SEC_AEAD:
1985 					cf = build_cipher_auth_gcm(op, ses);
1986 					auth_hdr_len = ses->auth_only_len;
1987 					break;
1988 				case DPAA_SEC_CIPHER_HASH:
1989 					auth_hdr_len =
1990 						op->sym->cipher.data.offset
1991 						- op->sym->auth.data.offset;
1992 					auth_tail_len =
1993 						op->sym->auth.data.length
1994 						- op->sym->cipher.data.length
1995 						- auth_hdr_len;
1996 					cf = build_cipher_auth(op, ses);
1997 					break;
1998 				default:
1999 					DPAA_SEC_DP_ERR("not supported ops");
2000 					frames_to_send = loop;
2001 					nb_ops = loop;
2002 					goto send_pkts;
2003 				}
2004 			} else {
2005 				switch (ses->ctxt) {
2006 #ifdef RTE_LIB_SECURITY
2007 				case DPAA_SEC_PDCP:
2008 				case DPAA_SEC_IPSEC:
2009 					cf = build_proto_sg(op, ses);
2010 					break;
2011 #endif
2012 				case DPAA_SEC_AUTH:
2013 					cf = build_auth_only_sg(op, ses);
2014 					break;
2015 				case DPAA_SEC_CIPHER:
2016 					cf = build_cipher_only_sg(op, ses);
2017 					break;
2018 				case DPAA_SEC_AEAD:
2019 					cf = build_cipher_auth_gcm_sg(op, ses);
2020 					auth_hdr_len = ses->auth_only_len;
2021 					break;
2022 				case DPAA_SEC_CIPHER_HASH:
2023 					auth_hdr_len =
2024 						op->sym->cipher.data.offset
2025 						- op->sym->auth.data.offset;
2026 					auth_tail_len =
2027 						op->sym->auth.data.length
2028 						- op->sym->cipher.data.length
2029 						- auth_hdr_len;
2030 					cf = build_cipher_auth_sg(op, ses);
2031 					break;
2032 				default:
2033 					DPAA_SEC_DP_ERR("not supported ops");
2034 					frames_to_send = loop;
2035 					nb_ops = loop;
2036 					goto send_pkts;
2037 				}
2038 			}
2039 			if (unlikely(!cf)) {
2040 				frames_to_send = loop;
2041 				nb_ops = loop;
2042 				goto send_pkts;
2043 			}
2044 
2045 			fd = &fds[loop];
2046 			inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
2047 			fd->opaque_addr = 0;
2048 			fd->cmd = 0;
2049 			qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg));
2050 			fd->_format1 = qm_fd_compound;
2051 			fd->length29 = 2 * sizeof(struct qm_sg_entry);
2052 
2053 			/* Auth_only_len is set as 0 in descriptor and it is
2054 			 * overwritten here in the fd.cmd which will update
2055 			 * the DPOVRD reg.
2056 			 */
2057 			if (auth_hdr_len || auth_tail_len) {
2058 				fd->cmd = 0x80000000;
2059 				fd->cmd |=
2060 					((auth_tail_len << 16) | auth_hdr_len);
2061 			}
2062 
2063 #ifdef RTE_LIB_SECURITY
2064 			/* In case of PDCP, per packet HFN is stored in
2065 			 * mbuf priv after sym_op.
2066 			 */
2067 			if ((ses->ctxt == DPAA_SEC_PDCP) && ses->pdcp.hfn_ovd) {
2068 				fd->cmd = 0x80000000 |
2069 					*((uint32_t *)((uint8_t *)op +
2070 					ses->pdcp.hfn_ovd_offset));
2071 				DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n",
2072 					*((uint32_t *)((uint8_t *)op +
2073 					ses->pdcp.hfn_ovd_offset)),
2074 					ses->pdcp.hfn_ovd);
2075 			}
2076 #endif
2077 		}
2078 send_pkts:
2079 		loop = 0;
2080 		while (loop < frames_to_send) {
2081 			loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
2082 					&flags[loop], frames_to_send - loop);
2083 		}
2084 		nb_ops -= frames_to_send;
2085 		num_tx += frames_to_send;
2086 	}
2087 
2088 	dpaa_qp->tx_pkts += num_tx;
2089 	dpaa_qp->tx_errs += nb_ops - num_tx;
2090 
2091 	return num_tx;
2092 }
2093 
2094 static uint16_t
2095 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
2096 		       uint16_t nb_ops)
2097 {
2098 	uint16_t num_rx;
2099 	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
2100 
2101 	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
2102 		if (rte_dpaa_portal_init((void *)0)) {
2103 			DPAA_SEC_ERR("Failure in affining portal");
2104 			return 0;
2105 		}
2106 	}
2107 
2108 	num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
2109 
2110 	dpaa_qp->rx_pkts += num_rx;
2111 	dpaa_qp->rx_errs += nb_ops - num_rx;
2112 
2113 	DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
2114 
2115 	return num_rx;
2116 }
2117 
2118 /** Release queue pair */
2119 static int
2120 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
2121 			    uint16_t qp_id)
2122 {
2123 	struct dpaa_sec_dev_private *internals;
2124 	struct dpaa_sec_qp *qp = NULL;
2125 
2126 	PMD_INIT_FUNC_TRACE();
2127 
2128 	DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
2129 
2130 	internals = dev->data->dev_private;
2131 	if (qp_id >= internals->max_nb_queue_pairs) {
2132 		DPAA_SEC_ERR("Max supported qpid %d",
2133 			     internals->max_nb_queue_pairs);
2134 		return -EINVAL;
2135 	}
2136 
2137 	qp = &internals->qps[qp_id];
2138 	rte_mempool_free(qp->ctx_pool);
2139 	qp->internals = NULL;
2140 	dev->data->queue_pairs[qp_id] = NULL;
2141 
2142 	return 0;
2143 }
2144 
2145 /** Setup a queue pair */
2146 static int
2147 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
2148 		__rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
2149 		__rte_unused int socket_id)
2150 {
2151 	struct dpaa_sec_dev_private *internals;
2152 	struct dpaa_sec_qp *qp = NULL;
2153 	char str[20];
2154 
2155 	DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
2156 
2157 	internals = dev->data->dev_private;
2158 	if (qp_id >= internals->max_nb_queue_pairs) {
2159 		DPAA_SEC_ERR("Max supported qpid %d",
2160 			     internals->max_nb_queue_pairs);
2161 		return -EINVAL;
2162 	}
2163 
2164 	qp = &internals->qps[qp_id];
2165 	qp->internals = internals;
2166 	snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d",
2167 			dev->data->dev_id, qp_id);
2168 	if (!qp->ctx_pool) {
2169 		qp->ctx_pool = rte_mempool_create((const char *)str,
2170 							CTX_POOL_NUM_BUFS,
2171 							CTX_POOL_BUF_SIZE,
2172 							CTX_POOL_CACHE_SIZE, 0,
2173 							NULL, NULL, NULL, NULL,
2174 							SOCKET_ID_ANY, 0);
2175 		if (!qp->ctx_pool) {
2176 			DPAA_SEC_ERR("%s create failed\n", str);
2177 			return -ENOMEM;
2178 		}
2179 	} else
2180 		DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d",
2181 				dev->data->dev_id, qp_id);
2182 	dev->data->queue_pairs[qp_id] = qp;
2183 
2184 	return 0;
2185 }
2186 
2187 /** Returns the size of session structure */
2188 static unsigned int
2189 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
2190 {
2191 	PMD_INIT_FUNC_TRACE();
2192 
2193 	return sizeof(dpaa_sec_session);
2194 }
2195 
2196 static int
2197 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
2198 		     struct rte_crypto_sym_xform *xform,
2199 		     dpaa_sec_session *session)
2200 {
2201 	session->ctxt = DPAA_SEC_CIPHER;
2202 	session->cipher_alg = xform->cipher.algo;
2203 	session->iv.length = xform->cipher.iv.length;
2204 	session->iv.offset = xform->cipher.iv.offset;
2205 	session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
2206 					       RTE_CACHE_LINE_SIZE);
2207 	if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
2208 		DPAA_SEC_ERR("No Memory for cipher key");
2209 		return -ENOMEM;
2210 	}
2211 	session->cipher_key.length = xform->cipher.key.length;
2212 
2213 	memcpy(session->cipher_key.data, xform->cipher.key.data,
2214 	       xform->cipher.key.length);
2215 	switch (xform->cipher.algo) {
2216 	case RTE_CRYPTO_CIPHER_AES_CBC:
2217 		session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2218 		session->cipher_key.algmode = OP_ALG_AAI_CBC;
2219 		break;
2220 	case RTE_CRYPTO_CIPHER_DES_CBC:
2221 		session->cipher_key.alg = OP_ALG_ALGSEL_DES;
2222 		session->cipher_key.algmode = OP_ALG_AAI_CBC;
2223 		break;
2224 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2225 		session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2226 		session->cipher_key.algmode = OP_ALG_AAI_CBC;
2227 		break;
2228 	case RTE_CRYPTO_CIPHER_AES_CTR:
2229 		session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2230 		session->cipher_key.algmode = OP_ALG_AAI_CTR;
2231 		break;
2232 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2233 		session->cipher_key.alg = OP_ALG_ALGSEL_SNOW_F8;
2234 		break;
2235 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2236 		session->cipher_key.alg = OP_ALG_ALGSEL_ZUCE;
2237 		break;
2238 	default:
2239 		DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2240 			      xform->cipher.algo);
2241 		return -ENOTSUP;
2242 	}
2243 	session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2244 			DIR_ENC : DIR_DEC;
2245 
2246 	return 0;
2247 }
2248 
2249 static int
2250 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
2251 		   struct rte_crypto_sym_xform *xform,
2252 		   dpaa_sec_session *session)
2253 {
2254 	session->ctxt = DPAA_SEC_AUTH;
2255 	session->auth_alg = xform->auth.algo;
2256 	session->auth_key.length = xform->auth.key.length;
2257 	if (xform->auth.key.length) {
2258 		session->auth_key.data =
2259 				rte_zmalloc(NULL, xform->auth.key.length,
2260 					     RTE_CACHE_LINE_SIZE);
2261 		if (session->auth_key.data == NULL) {
2262 			DPAA_SEC_ERR("No Memory for auth key");
2263 			return -ENOMEM;
2264 		}
2265 		memcpy(session->auth_key.data, xform->auth.key.data,
2266 				xform->auth.key.length);
2267 
2268 	}
2269 	session->digest_length = xform->auth.digest_length;
2270 	if (session->cipher_alg == RTE_CRYPTO_CIPHER_NULL) {
2271 		session->iv.offset = xform->auth.iv.offset;
2272 		session->iv.length = xform->auth.iv.length;
2273 	}
2274 
2275 	switch (xform->auth.algo) {
2276 	case RTE_CRYPTO_AUTH_SHA1:
2277 		session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2278 		session->auth_key.algmode = OP_ALG_AAI_HASH;
2279 		break;
2280 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
2281 		session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2282 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2283 		break;
2284 	case RTE_CRYPTO_AUTH_MD5:
2285 		session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2286 		session->auth_key.algmode = OP_ALG_AAI_HASH;
2287 		break;
2288 	case RTE_CRYPTO_AUTH_MD5_HMAC:
2289 		session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2290 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2291 		break;
2292 	case RTE_CRYPTO_AUTH_SHA224:
2293 		session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2294 		session->auth_key.algmode = OP_ALG_AAI_HASH;
2295 		break;
2296 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2297 		session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2298 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2299 		break;
2300 	case RTE_CRYPTO_AUTH_SHA256:
2301 		session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2302 		session->auth_key.algmode = OP_ALG_AAI_HASH;
2303 		break;
2304 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2305 		session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2306 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2307 		break;
2308 	case RTE_CRYPTO_AUTH_SHA384:
2309 		session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2310 		session->auth_key.algmode = OP_ALG_AAI_HASH;
2311 		break;
2312 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2313 		session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2314 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2315 		break;
2316 	case RTE_CRYPTO_AUTH_SHA512:
2317 		session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2318 		session->auth_key.algmode = OP_ALG_AAI_HASH;
2319 		break;
2320 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2321 		session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2322 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2323 		break;
2324 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2325 		session->auth_key.alg = OP_ALG_ALGSEL_SNOW_F9;
2326 		session->auth_key.algmode = OP_ALG_AAI_F9;
2327 		break;
2328 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
2329 		session->auth_key.alg = OP_ALG_ALGSEL_ZUCA;
2330 		session->auth_key.algmode = OP_ALG_AAI_F9;
2331 		break;
2332 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2333 		session->auth_key.alg = OP_ALG_ALGSEL_AES;
2334 		session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC;
2335 		break;
2336 	case RTE_CRYPTO_AUTH_AES_CMAC:
2337 		session->auth_key.alg = OP_ALG_ALGSEL_AES;
2338 		session->auth_key.algmode = OP_ALG_AAI_CMAC;
2339 		break;
2340 	default:
2341 		DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2342 			      xform->auth.algo);
2343 		return -ENOTSUP;
2344 	}
2345 
2346 	session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2347 			DIR_ENC : DIR_DEC;
2348 
2349 	return 0;
2350 }
2351 
2352 static int
2353 dpaa_sec_chain_init(struct rte_cryptodev *dev __rte_unused,
2354 		   struct rte_crypto_sym_xform *xform,
2355 		   dpaa_sec_session *session)
2356 {
2357 
2358 	struct rte_crypto_cipher_xform *cipher_xform;
2359 	struct rte_crypto_auth_xform *auth_xform;
2360 
2361 	session->ctxt = DPAA_SEC_CIPHER_HASH;
2362 	if (session->auth_cipher_text) {
2363 		cipher_xform = &xform->cipher;
2364 		auth_xform = &xform->next->auth;
2365 	} else {
2366 		cipher_xform = &xform->next->cipher;
2367 		auth_xform = &xform->auth;
2368 	}
2369 
2370 	/* Set IV parameters */
2371 	session->iv.offset = cipher_xform->iv.offset;
2372 	session->iv.length = cipher_xform->iv.length;
2373 
2374 	session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2375 					       RTE_CACHE_LINE_SIZE);
2376 	if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2377 		DPAA_SEC_ERR("No Memory for cipher key");
2378 		return -ENOMEM;
2379 	}
2380 	session->cipher_key.length = cipher_xform->key.length;
2381 	session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2382 					     RTE_CACHE_LINE_SIZE);
2383 	if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2384 		DPAA_SEC_ERR("No Memory for auth key");
2385 		return -ENOMEM;
2386 	}
2387 	session->auth_key.length = auth_xform->key.length;
2388 	memcpy(session->cipher_key.data, cipher_xform->key.data,
2389 	       cipher_xform->key.length);
2390 	memcpy(session->auth_key.data, auth_xform->key.data,
2391 	       auth_xform->key.length);
2392 
2393 	session->digest_length = auth_xform->digest_length;
2394 	session->auth_alg = auth_xform->algo;
2395 
2396 	switch (auth_xform->algo) {
2397 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
2398 		session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2399 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2400 		break;
2401 	case RTE_CRYPTO_AUTH_MD5_HMAC:
2402 		session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2403 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2404 		break;
2405 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2406 		session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2407 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2408 		break;
2409 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2410 		session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2411 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2412 		break;
2413 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2414 		session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2415 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2416 		break;
2417 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2418 		session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2419 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2420 		break;
2421 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2422 		session->auth_key.alg = OP_ALG_ALGSEL_AES;
2423 		session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC;
2424 		break;
2425 	case RTE_CRYPTO_AUTH_AES_CMAC:
2426 		session->auth_key.alg = OP_ALG_ALGSEL_AES;
2427 		session->auth_key.algmode = OP_ALG_AAI_CMAC;
2428 		break;
2429 	default:
2430 		DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2431 			      auth_xform->algo);
2432 		return -ENOTSUP;
2433 	}
2434 
2435 	session->cipher_alg = cipher_xform->algo;
2436 
2437 	switch (cipher_xform->algo) {
2438 	case RTE_CRYPTO_CIPHER_AES_CBC:
2439 		session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2440 		session->cipher_key.algmode = OP_ALG_AAI_CBC;
2441 		break;
2442 	case RTE_CRYPTO_CIPHER_DES_CBC:
2443 		session->cipher_key.alg = OP_ALG_ALGSEL_DES;
2444 		session->cipher_key.algmode = OP_ALG_AAI_CBC;
2445 		break;
2446 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2447 		session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2448 		session->cipher_key.algmode = OP_ALG_AAI_CBC;
2449 		break;
2450 	case RTE_CRYPTO_CIPHER_AES_CTR:
2451 		session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2452 		session->cipher_key.algmode = OP_ALG_AAI_CTR;
2453 		break;
2454 	default:
2455 		DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2456 			      cipher_xform->algo);
2457 		return -ENOTSUP;
2458 	}
2459 	session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2460 				DIR_ENC : DIR_DEC;
2461 	return 0;
2462 }
2463 
2464 static int
2465 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
2466 		   struct rte_crypto_sym_xform *xform,
2467 		   dpaa_sec_session *session)
2468 {
2469 	session->aead_alg = xform->aead.algo;
2470 	session->ctxt = DPAA_SEC_AEAD;
2471 	session->iv.length = xform->aead.iv.length;
2472 	session->iv.offset = xform->aead.iv.offset;
2473 	session->auth_only_len = xform->aead.aad_length;
2474 	session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
2475 					     RTE_CACHE_LINE_SIZE);
2476 	if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
2477 		DPAA_SEC_ERR("No Memory for aead key\n");
2478 		return -ENOMEM;
2479 	}
2480 	session->aead_key.length = xform->aead.key.length;
2481 	session->digest_length = xform->aead.digest_length;
2482 
2483 	memcpy(session->aead_key.data, xform->aead.key.data,
2484 	       xform->aead.key.length);
2485 
2486 	switch (session->aead_alg) {
2487 	case RTE_CRYPTO_AEAD_AES_GCM:
2488 		session->aead_key.alg = OP_ALG_ALGSEL_AES;
2489 		session->aead_key.algmode = OP_ALG_AAI_GCM;
2490 		break;
2491 	default:
2492 		DPAA_SEC_ERR("unsupported AEAD alg %d", session->aead_alg);
2493 		return -ENOTSUP;
2494 	}
2495 
2496 	session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2497 			DIR_ENC : DIR_DEC;
2498 
2499 	return 0;
2500 }
2501 
2502 static struct qman_fq *
2503 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
2504 {
2505 	unsigned int i;
2506 
2507 	for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2508 		if (qi->inq_attach[i] == 0) {
2509 			qi->inq_attach[i] = 1;
2510 			return &qi->inq[i];
2511 		}
2512 	}
2513 	DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions);
2514 
2515 	return NULL;
2516 }
2517 
2518 static int
2519 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
2520 {
2521 	unsigned int i;
2522 
2523 	for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2524 		if (&qi->inq[i] == fq) {
2525 			if (qman_retire_fq(fq, NULL) != 0)
2526 				DPAA_SEC_DEBUG("Queue is not retired\n");
2527 			qman_oos_fq(fq);
2528 			qi->inq_attach[i] = 0;
2529 			return 0;
2530 		}
2531 	}
2532 	return -1;
2533 }
2534 
2535 int
2536 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
2537 {
2538 	int ret;
2539 
2540 	sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp;
2541 	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
2542 		ret = rte_dpaa_portal_init((void *)0);
2543 		if (ret) {
2544 			DPAA_SEC_ERR("Failure in affining portal");
2545 			return ret;
2546 		}
2547 	}
2548 	ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
2549 			       rte_dpaa_mem_vtop(&sess->cdb),
2550 			       qman_fq_fqid(&qp->outq));
2551 	if (ret)
2552 		DPAA_SEC_ERR("Unable to init sec queue");
2553 
2554 	return ret;
2555 }
2556 
2557 static inline void
2558 free_session_data(dpaa_sec_session *s)
2559 {
2560 	if (is_aead(s))
2561 		rte_free(s->aead_key.data);
2562 	else {
2563 		rte_free(s->auth_key.data);
2564 		rte_free(s->cipher_key.data);
2565 	}
2566 	memset(s, 0, sizeof(dpaa_sec_session));
2567 }
2568 
2569 static int
2570 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
2571 			    struct rte_crypto_sym_xform *xform,	void *sess)
2572 {
2573 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2574 	dpaa_sec_session *session = sess;
2575 	uint32_t i;
2576 	int ret;
2577 
2578 	PMD_INIT_FUNC_TRACE();
2579 
2580 	if (unlikely(sess == NULL)) {
2581 		DPAA_SEC_ERR("invalid session struct");
2582 		return -EINVAL;
2583 	}
2584 	memset(session, 0, sizeof(dpaa_sec_session));
2585 
2586 	/* Default IV length = 0 */
2587 	session->iv.length = 0;
2588 
2589 	/* Cipher Only */
2590 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2591 		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2592 		ret = dpaa_sec_cipher_init(dev, xform, session);
2593 
2594 	/* Authentication Only */
2595 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2596 		   xform->next == NULL) {
2597 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2598 		session->ctxt = DPAA_SEC_AUTH;
2599 		ret = dpaa_sec_auth_init(dev, xform, session);
2600 
2601 	/* Cipher then Authenticate */
2602 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2603 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2604 		if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2605 			session->auth_cipher_text = 1;
2606 			if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2607 				ret = dpaa_sec_auth_init(dev, xform, session);
2608 			else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
2609 				ret = dpaa_sec_cipher_init(dev, xform, session);
2610 			else
2611 				ret = dpaa_sec_chain_init(dev, xform, session);
2612 		} else {
2613 			DPAA_SEC_ERR("Not supported: Auth then Cipher");
2614 			return -ENOTSUP;
2615 		}
2616 	/* Authenticate then Cipher */
2617 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2618 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2619 		if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2620 			session->auth_cipher_text = 0;
2621 			if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
2622 				ret = dpaa_sec_cipher_init(dev, xform, session);
2623 			else if (xform->next->cipher.algo
2624 					== RTE_CRYPTO_CIPHER_NULL)
2625 				ret = dpaa_sec_auth_init(dev, xform, session);
2626 			else
2627 				ret = dpaa_sec_chain_init(dev, xform, session);
2628 		} else {
2629 			DPAA_SEC_ERR("Not supported: Auth then Cipher");
2630 			return -ENOTSUP;
2631 		}
2632 
2633 	/* AEAD operation for AES-GCM kind of Algorithms */
2634 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2635 		   xform->next == NULL) {
2636 		ret = dpaa_sec_aead_init(dev, xform, session);
2637 
2638 	} else {
2639 		DPAA_SEC_ERR("Invalid crypto type");
2640 		return -EINVAL;
2641 	}
2642 	if (ret) {
2643 		DPAA_SEC_ERR("unable to init session");
2644 		goto err1;
2645 	}
2646 
2647 	rte_spinlock_lock(&internals->lock);
2648 	for (i = 0; i < MAX_DPAA_CORES; i++) {
2649 		session->inq[i] = dpaa_sec_attach_rxq(internals);
2650 		if (session->inq[i] == NULL) {
2651 			DPAA_SEC_ERR("unable to attach sec queue");
2652 			rte_spinlock_unlock(&internals->lock);
2653 			ret = -EBUSY;
2654 			goto err1;
2655 		}
2656 	}
2657 	rte_spinlock_unlock(&internals->lock);
2658 
2659 	return 0;
2660 
2661 err1:
2662 	free_session_data(session);
2663 	return ret;
2664 }
2665 
2666 static int
2667 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
2668 		struct rte_crypto_sym_xform *xform,
2669 		struct rte_cryptodev_sym_session *sess)
2670 {
2671 	void *sess_private_data = CRYPTODEV_GET_SYM_SESS_PRIV(sess);
2672 	int ret;
2673 
2674 	PMD_INIT_FUNC_TRACE();
2675 
2676 	ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
2677 	if (ret != 0) {
2678 		DPAA_SEC_ERR("failed to configure session parameters");
2679 		return ret;
2680 	}
2681 
2682 	ret = dpaa_sec_prep_cdb(sess_private_data);
2683 	if (ret) {
2684 		DPAA_SEC_ERR("Unable to prepare sec cdb");
2685 		return ret;
2686 	}
2687 
2688 	return 0;
2689 }
2690 
2691 static inline void
2692 free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s)
2693 {
2694 	struct dpaa_sec_dev_private *qi = dev->data->dev_private;
2695 	uint8_t i;
2696 
2697 	for (i = 0; i < MAX_DPAA_CORES; i++) {
2698 		if (s->inq[i])
2699 			dpaa_sec_detach_rxq(qi, s->inq[i]);
2700 		s->inq[i] = NULL;
2701 		s->qp[i] = NULL;
2702 	}
2703 	free_session_data(s);
2704 }
2705 
2706 /** Clear the memory of session so it doesn't leave key material behind */
2707 static void
2708 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
2709 		struct rte_cryptodev_sym_session *sess)
2710 {
2711 	PMD_INIT_FUNC_TRACE();
2712 	void *sess_priv = CRYPTODEV_GET_SYM_SESS_PRIV(sess);
2713 	dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2714 
2715 	free_session_memory(dev, s);
2716 }
2717 
2718 #ifdef RTE_LIB_SECURITY
2719 static int
2720 dpaa_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2721 			struct rte_security_ipsec_xform *ipsec_xform,
2722 			dpaa_sec_session *session)
2723 {
2724 	PMD_INIT_FUNC_TRACE();
2725 
2726 	session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2727 					       RTE_CACHE_LINE_SIZE);
2728 	if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2729 		DPAA_SEC_ERR("No Memory for aead key");
2730 		return -ENOMEM;
2731 	}
2732 	memcpy(session->aead_key.data, aead_xform->key.data,
2733 	       aead_xform->key.length);
2734 
2735 	session->digest_length = aead_xform->digest_length;
2736 	session->aead_key.length = aead_xform->key.length;
2737 
2738 	switch (aead_xform->algo) {
2739 	case RTE_CRYPTO_AEAD_AES_GCM:
2740 		switch (session->digest_length) {
2741 		case 8:
2742 			session->aead_key.alg = OP_PCL_IPSEC_AES_GCM8;
2743 			break;
2744 		case 12:
2745 			session->aead_key.alg = OP_PCL_IPSEC_AES_GCM12;
2746 			break;
2747 		case 16:
2748 			session->aead_key.alg = OP_PCL_IPSEC_AES_GCM16;
2749 			break;
2750 		default:
2751 			DPAA_SEC_ERR("Crypto: Undefined GCM digest %d",
2752 				     session->digest_length);
2753 			return -EINVAL;
2754 		}
2755 		if (session->dir == DIR_ENC) {
2756 			memcpy(session->encap_pdb.gcm.salt,
2757 				(uint8_t *)&(ipsec_xform->salt), 4);
2758 		} else {
2759 			memcpy(session->decap_pdb.gcm.salt,
2760 				(uint8_t *)&(ipsec_xform->salt), 4);
2761 		}
2762 		session->aead_key.algmode = OP_ALG_AAI_GCM;
2763 		session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2764 		break;
2765 	default:
2766 		DPAA_SEC_ERR("Crypto: Undefined AEAD specified %u",
2767 			      aead_xform->algo);
2768 		return -ENOTSUP;
2769 	}
2770 	return 0;
2771 }
2772 
2773 static int
2774 dpaa_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2775 	struct rte_crypto_auth_xform *auth_xform,
2776 	struct rte_security_ipsec_xform *ipsec_xform,
2777 	dpaa_sec_session *session)
2778 {
2779 	if (cipher_xform) {
2780 		session->cipher_key.data = rte_zmalloc(NULL,
2781 						       cipher_xform->key.length,
2782 						       RTE_CACHE_LINE_SIZE);
2783 		if (session->cipher_key.data == NULL &&
2784 				cipher_xform->key.length > 0) {
2785 			DPAA_SEC_ERR("No Memory for cipher key");
2786 			return -ENOMEM;
2787 		}
2788 
2789 		session->cipher_key.length = cipher_xform->key.length;
2790 		memcpy(session->cipher_key.data, cipher_xform->key.data,
2791 				cipher_xform->key.length);
2792 		session->cipher_alg = cipher_xform->algo;
2793 	} else {
2794 		session->cipher_key.data = NULL;
2795 		session->cipher_key.length = 0;
2796 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2797 	}
2798 
2799 	if (auth_xform) {
2800 		session->auth_key.data = rte_zmalloc(NULL,
2801 						auth_xform->key.length,
2802 						RTE_CACHE_LINE_SIZE);
2803 		if (session->auth_key.data == NULL &&
2804 				auth_xform->key.length > 0) {
2805 			DPAA_SEC_ERR("No Memory for auth key");
2806 			return -ENOMEM;
2807 		}
2808 		session->auth_key.length = auth_xform->key.length;
2809 		memcpy(session->auth_key.data, auth_xform->key.data,
2810 				auth_xform->key.length);
2811 		session->auth_alg = auth_xform->algo;
2812 		session->digest_length = auth_xform->digest_length;
2813 	} else {
2814 		session->auth_key.data = NULL;
2815 		session->auth_key.length = 0;
2816 		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2817 	}
2818 
2819 	switch (session->auth_alg) {
2820 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
2821 		session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA1_96;
2822 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2823 		break;
2824 	case RTE_CRYPTO_AUTH_MD5_HMAC:
2825 		session->auth_key.alg = OP_PCL_IPSEC_HMAC_MD5_96;
2826 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2827 		break;
2828 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2829 		session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2830 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2831 		if (session->digest_length != 16)
2832 			DPAA_SEC_WARN(
2833 			"+++Using sha256-hmac truncated len is non-standard,"
2834 			"it will not work with lookaside proto");
2835 		break;
2836 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2837 		session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2838 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2839 		break;
2840 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2841 		session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2842 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2843 		break;
2844 	case RTE_CRYPTO_AUTH_AES_CMAC:
2845 		session->auth_key.alg = OP_PCL_IPSEC_AES_CMAC_96;
2846 		session->auth_key.algmode = OP_ALG_AAI_CMAC;
2847 		break;
2848 	case RTE_CRYPTO_AUTH_NULL:
2849 		session->auth_key.alg = OP_PCL_IPSEC_HMAC_NULL;
2850 		break;
2851 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2852 		session->auth_key.alg = OP_PCL_IPSEC_AES_XCBC_MAC_96;
2853 		session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC;
2854 		break;
2855 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2856 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2857 	case RTE_CRYPTO_AUTH_SHA1:
2858 	case RTE_CRYPTO_AUTH_SHA256:
2859 	case RTE_CRYPTO_AUTH_SHA512:
2860 	case RTE_CRYPTO_AUTH_SHA224:
2861 	case RTE_CRYPTO_AUTH_SHA384:
2862 	case RTE_CRYPTO_AUTH_MD5:
2863 	case RTE_CRYPTO_AUTH_AES_GMAC:
2864 	case RTE_CRYPTO_AUTH_KASUMI_F9:
2865 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2866 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
2867 		DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2868 			      session->auth_alg);
2869 		return -ENOTSUP;
2870 	default:
2871 		DPAA_SEC_ERR("Crypto: Undefined Auth specified %u",
2872 			      session->auth_alg);
2873 		return -ENOTSUP;
2874 	}
2875 
2876 	switch (session->cipher_alg) {
2877 	case RTE_CRYPTO_CIPHER_AES_CBC:
2878 		session->cipher_key.alg = OP_PCL_IPSEC_AES_CBC;
2879 		session->cipher_key.algmode = OP_ALG_AAI_CBC;
2880 		break;
2881 	case RTE_CRYPTO_CIPHER_DES_CBC:
2882 		session->cipher_key.alg = OP_PCL_IPSEC_DES;
2883 		session->cipher_key.algmode = OP_ALG_AAI_CBC;
2884 		break;
2885 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2886 		session->cipher_key.alg = OP_PCL_IPSEC_3DES;
2887 		session->cipher_key.algmode = OP_ALG_AAI_CBC;
2888 		break;
2889 	case RTE_CRYPTO_CIPHER_AES_CTR:
2890 		session->cipher_key.alg = OP_PCL_IPSEC_AES_CTR;
2891 		session->cipher_key.algmode = OP_ALG_AAI_CTR;
2892 		if (session->dir == DIR_ENC) {
2893 			session->encap_pdb.ctr.ctr_initial = 0x00000001;
2894 			session->encap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2895 		} else {
2896 			session->decap_pdb.ctr.ctr_initial = 0x00000001;
2897 			session->decap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2898 		}
2899 		break;
2900 	case RTE_CRYPTO_CIPHER_NULL:
2901 		session->cipher_key.alg = OP_PCL_IPSEC_NULL;
2902 		break;
2903 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2904 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2905 	case RTE_CRYPTO_CIPHER_3DES_ECB:
2906 	case RTE_CRYPTO_CIPHER_AES_ECB:
2907 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
2908 		DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2909 			      session->cipher_alg);
2910 		return -ENOTSUP;
2911 	default:
2912 		DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2913 			      session->cipher_alg);
2914 		return -ENOTSUP;
2915 	}
2916 
2917 	return 0;
2918 }
2919 
2920 static int
2921 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
2922 			   struct rte_security_session_conf *conf,
2923 			   void *sess)
2924 {
2925 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2926 	struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2927 	struct rte_crypto_auth_xform *auth_xform = NULL;
2928 	struct rte_crypto_cipher_xform *cipher_xform = NULL;
2929 	struct rte_crypto_aead_xform *aead_xform = NULL;
2930 	dpaa_sec_session *session = (dpaa_sec_session *)sess;
2931 	uint32_t i;
2932 	int ret;
2933 
2934 	PMD_INIT_FUNC_TRACE();
2935 
2936 	memset(session, 0, sizeof(dpaa_sec_session));
2937 	session->proto_alg = conf->protocol;
2938 	session->ctxt = DPAA_SEC_IPSEC;
2939 
2940 	if (ipsec_xform->life.bytes_hard_limit != 0 ||
2941 	    ipsec_xform->life.bytes_soft_limit != 0 ||
2942 	    ipsec_xform->life.packets_hard_limit != 0 ||
2943 	    ipsec_xform->life.packets_soft_limit != 0)
2944 		return -ENOTSUP;
2945 
2946 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
2947 		session->dir = DIR_ENC;
2948 	else
2949 		session->dir = DIR_DEC;
2950 
2951 	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2952 		cipher_xform = &conf->crypto_xform->cipher;
2953 		if (conf->crypto_xform->next)
2954 			auth_xform = &conf->crypto_xform->next->auth;
2955 		ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2956 					ipsec_xform, session);
2957 	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2958 		auth_xform = &conf->crypto_xform->auth;
2959 		if (conf->crypto_xform->next)
2960 			cipher_xform = &conf->crypto_xform->next->cipher;
2961 		ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2962 					ipsec_xform, session);
2963 	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2964 		aead_xform = &conf->crypto_xform->aead;
2965 		ret = dpaa_sec_ipsec_aead_init(aead_xform,
2966 					ipsec_xform, session);
2967 	} else {
2968 		DPAA_SEC_ERR("XFORM not specified");
2969 		ret = -EINVAL;
2970 		goto out;
2971 	}
2972 	if (ret) {
2973 		DPAA_SEC_ERR("Failed to process xform");
2974 		goto out;
2975 	}
2976 
2977 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2978 		if (ipsec_xform->tunnel.type ==
2979 				RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2980 			session->ip4_hdr.ip_v = IPVERSION;
2981 			session->ip4_hdr.ip_hl = 5;
2982 			session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2983 						sizeof(session->ip4_hdr));
2984 			session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2985 			session->ip4_hdr.ip_id = 0;
2986 			session->ip4_hdr.ip_off = 0;
2987 			session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2988 			session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2989 					RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2990 					IPPROTO_ESP : IPPROTO_AH;
2991 			session->ip4_hdr.ip_sum = 0;
2992 			session->ip4_hdr.ip_src =
2993 					ipsec_xform->tunnel.ipv4.src_ip;
2994 			session->ip4_hdr.ip_dst =
2995 					ipsec_xform->tunnel.ipv4.dst_ip;
2996 			session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2997 						(void *)&session->ip4_hdr,
2998 						sizeof(struct ip));
2999 			session->encap_pdb.ip_hdr_len = sizeof(struct ip);
3000 		} else if (ipsec_xform->tunnel.type ==
3001 				RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
3002 			session->ip6_hdr.vtc_flow = rte_cpu_to_be_32(
3003 				DPAA_IPv6_DEFAULT_VTC_FLOW |
3004 				((ipsec_xform->tunnel.ipv6.dscp <<
3005 					RTE_IPV6_HDR_TC_SHIFT) &
3006 					RTE_IPV6_HDR_TC_MASK) |
3007 				((ipsec_xform->tunnel.ipv6.flabel <<
3008 					RTE_IPV6_HDR_FL_SHIFT) &
3009 					RTE_IPV6_HDR_FL_MASK));
3010 			/* Payload length will be updated by HW */
3011 			session->ip6_hdr.payload_len = 0;
3012 			session->ip6_hdr.hop_limits =
3013 					ipsec_xform->tunnel.ipv6.hlimit;
3014 			session->ip6_hdr.proto = (ipsec_xform->proto ==
3015 					RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
3016 					IPPROTO_ESP : IPPROTO_AH;
3017 			memcpy(&session->ip6_hdr.src_addr,
3018 					&ipsec_xform->tunnel.ipv6.src_addr, 16);
3019 			memcpy(&session->ip6_hdr.dst_addr,
3020 					&ipsec_xform->tunnel.ipv6.dst_addr, 16);
3021 			session->encap_pdb.ip_hdr_len =
3022 						sizeof(struct rte_ipv6_hdr);
3023 		}
3024 
3025 		session->encap_pdb.options =
3026 			(IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
3027 			PDBOPTS_ESP_OIHI_PDB_INL |
3028 			PDBOPTS_ESP_IVSRC |
3029 			PDBHMO_ESP_SNR;
3030 		if (ipsec_xform->options.dec_ttl)
3031 			session->encap_pdb.options |= PDBHMO_ESP_ENCAP_DTTL;
3032 		if (ipsec_xform->options.esn)
3033 			session->encap_pdb.options |= PDBOPTS_ESP_ESN;
3034 		session->encap_pdb.spi = ipsec_xform->spi;
3035 
3036 	} else if (ipsec_xform->direction ==
3037 			RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
3038 		if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
3039 			session->decap_pdb.options = sizeof(struct ip) << 16;
3040 		else
3041 			session->decap_pdb.options =
3042 					sizeof(struct rte_ipv6_hdr) << 16;
3043 		if (ipsec_xform->options.esn)
3044 			session->decap_pdb.options |= PDBOPTS_ESP_ESN;
3045 		if (ipsec_xform->replay_win_sz) {
3046 			uint32_t win_sz;
3047 			win_sz = rte_align32pow2(ipsec_xform->replay_win_sz);
3048 
3049 			switch (win_sz) {
3050 			case 1:
3051 			case 2:
3052 			case 4:
3053 			case 8:
3054 			case 16:
3055 			case 32:
3056 				session->decap_pdb.options |= PDBOPTS_ESP_ARS32;
3057 				break;
3058 			case 64:
3059 				session->decap_pdb.options |= PDBOPTS_ESP_ARS64;
3060 				break;
3061 			default:
3062 				session->decap_pdb.options |=
3063 							PDBOPTS_ESP_ARS128;
3064 			}
3065 		}
3066 	} else
3067 		goto out;
3068 	rte_spinlock_lock(&internals->lock);
3069 	for (i = 0; i < MAX_DPAA_CORES; i++) {
3070 		session->inq[i] = dpaa_sec_attach_rxq(internals);
3071 		if (session->inq[i] == NULL) {
3072 			DPAA_SEC_ERR("unable to attach sec queue");
3073 			rte_spinlock_unlock(&internals->lock);
3074 			goto out;
3075 		}
3076 	}
3077 	rte_spinlock_unlock(&internals->lock);
3078 
3079 	return 0;
3080 out:
3081 	free_session_data(session);
3082 	return -1;
3083 }
3084 
3085 static int
3086 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
3087 			  struct rte_security_session_conf *conf,
3088 			  void *sess)
3089 {
3090 	struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
3091 	struct rte_crypto_sym_xform *xform = conf->crypto_xform;
3092 	struct rte_crypto_auth_xform *auth_xform = NULL;
3093 	struct rte_crypto_cipher_xform *cipher_xform = NULL;
3094 	dpaa_sec_session *session = (dpaa_sec_session *)sess;
3095 	struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
3096 	uint32_t i;
3097 	int ret;
3098 
3099 	PMD_INIT_FUNC_TRACE();
3100 
3101 	memset(session, 0, sizeof(dpaa_sec_session));
3102 
3103 	/* find xfrm types */
3104 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
3105 		cipher_xform = &xform->cipher;
3106 		if (xform->next != NULL &&
3107 			xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
3108 			auth_xform = &xform->next->auth;
3109 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
3110 		auth_xform = &xform->auth;
3111 		if (xform->next != NULL &&
3112 			xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
3113 			cipher_xform = &xform->next->cipher;
3114 	} else {
3115 		DPAA_SEC_ERR("Invalid crypto type");
3116 		return -EINVAL;
3117 	}
3118 
3119 	session->proto_alg = conf->protocol;
3120 	session->ctxt = DPAA_SEC_PDCP;
3121 
3122 	if (cipher_xform) {
3123 		switch (cipher_xform->algo) {
3124 		case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
3125 			session->cipher_key.alg = PDCP_CIPHER_TYPE_SNOW;
3126 			break;
3127 		case RTE_CRYPTO_CIPHER_ZUC_EEA3:
3128 			session->cipher_key.alg = PDCP_CIPHER_TYPE_ZUC;
3129 			break;
3130 		case RTE_CRYPTO_CIPHER_AES_CTR:
3131 			session->cipher_key.alg = PDCP_CIPHER_TYPE_AES;
3132 			break;
3133 		case RTE_CRYPTO_CIPHER_NULL:
3134 			session->cipher_key.alg = PDCP_CIPHER_TYPE_NULL;
3135 			break;
3136 		default:
3137 			DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
3138 				      session->cipher_alg);
3139 			return -EINVAL;
3140 		}
3141 
3142 		session->cipher_key.data = rte_zmalloc(NULL,
3143 					       cipher_xform->key.length,
3144 					       RTE_CACHE_LINE_SIZE);
3145 		if (session->cipher_key.data == NULL &&
3146 				cipher_xform->key.length > 0) {
3147 			DPAA_SEC_ERR("No Memory for cipher key");
3148 			return -ENOMEM;
3149 		}
3150 		session->cipher_key.length = cipher_xform->key.length;
3151 		memcpy(session->cipher_key.data, cipher_xform->key.data,
3152 			cipher_xform->key.length);
3153 		session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
3154 					DIR_ENC : DIR_DEC;
3155 		session->cipher_alg = cipher_xform->algo;
3156 	} else {
3157 		session->cipher_key.data = NULL;
3158 		session->cipher_key.length = 0;
3159 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
3160 		session->dir = DIR_ENC;
3161 	}
3162 
3163 	if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3164 		if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 &&
3165 		    pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) {
3166 			DPAA_SEC_ERR(
3167 				"PDCP Seq Num size should be 5/12 bits for cmode");
3168 			ret = -EINVAL;
3169 			goto out;
3170 		}
3171 	}
3172 
3173 	if (auth_xform) {
3174 		switch (auth_xform->algo) {
3175 		case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
3176 			session->auth_key.alg = PDCP_AUTH_TYPE_SNOW;
3177 			break;
3178 		case RTE_CRYPTO_AUTH_ZUC_EIA3:
3179 			session->auth_key.alg = PDCP_AUTH_TYPE_ZUC;
3180 			break;
3181 		case RTE_CRYPTO_AUTH_AES_CMAC:
3182 			session->auth_key.alg = PDCP_AUTH_TYPE_AES;
3183 			break;
3184 		case RTE_CRYPTO_AUTH_NULL:
3185 			session->auth_key.alg = PDCP_AUTH_TYPE_NULL;
3186 			break;
3187 		default:
3188 			DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
3189 				      session->auth_alg);
3190 			rte_free(session->cipher_key.data);
3191 			return -EINVAL;
3192 		}
3193 		session->auth_key.data = rte_zmalloc(NULL,
3194 						     auth_xform->key.length,
3195 						     RTE_CACHE_LINE_SIZE);
3196 		if (!session->auth_key.data &&
3197 		    auth_xform->key.length > 0) {
3198 			DPAA_SEC_ERR("No Memory for auth key");
3199 			rte_free(session->cipher_key.data);
3200 			return -ENOMEM;
3201 		}
3202 		session->auth_key.length = auth_xform->key.length;
3203 		memcpy(session->auth_key.data, auth_xform->key.data,
3204 		       auth_xform->key.length);
3205 		session->auth_alg = auth_xform->algo;
3206 	} else {
3207 		session->auth_key.data = NULL;
3208 		session->auth_key.length = 0;
3209 		session->auth_alg = 0;
3210 	}
3211 	session->pdcp.domain = pdcp_xform->domain;
3212 	session->pdcp.bearer = pdcp_xform->bearer;
3213 	session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
3214 	session->pdcp.sn_size = pdcp_xform->sn_size;
3215 	session->pdcp.hfn = pdcp_xform->hfn;
3216 	session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
3217 	session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
3218 	session->pdcp.sdap_enabled = pdcp_xform->sdap_enabled;
3219 	if (cipher_xform)
3220 		session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
3221 
3222 	rte_spinlock_lock(&dev_priv->lock);
3223 	for (i = 0; i < MAX_DPAA_CORES; i++) {
3224 		session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
3225 		if (session->inq[i] == NULL) {
3226 			DPAA_SEC_ERR("unable to attach sec queue");
3227 			rte_spinlock_unlock(&dev_priv->lock);
3228 			ret = -EBUSY;
3229 			goto out;
3230 		}
3231 	}
3232 	rte_spinlock_unlock(&dev_priv->lock);
3233 	return 0;
3234 out:
3235 	rte_free(session->auth_key.data);
3236 	rte_free(session->cipher_key.data);
3237 	memset(session, 0, sizeof(dpaa_sec_session));
3238 	return ret;
3239 }
3240 
3241 static int
3242 dpaa_sec_security_session_create(void *dev,
3243 				 struct rte_security_session_conf *conf,
3244 				 struct rte_security_session *sess)
3245 {
3246 	void *sess_private_data = SECURITY_GET_SESS_PRIV(sess);
3247 	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
3248 	int ret;
3249 
3250 	switch (conf->protocol) {
3251 	case RTE_SECURITY_PROTOCOL_IPSEC:
3252 		ret = dpaa_sec_set_ipsec_session(cdev, conf,
3253 				sess_private_data);
3254 		break;
3255 	case RTE_SECURITY_PROTOCOL_PDCP:
3256 		ret = dpaa_sec_set_pdcp_session(cdev, conf,
3257 				sess_private_data);
3258 		break;
3259 	case RTE_SECURITY_PROTOCOL_MACSEC:
3260 		return -ENOTSUP;
3261 	default:
3262 		return -EINVAL;
3263 	}
3264 	if (ret != 0) {
3265 		DPAA_SEC_ERR("failed to configure session parameters");
3266 		return ret;
3267 	}
3268 
3269 	ret = dpaa_sec_prep_cdb(sess_private_data);
3270 	if (ret) {
3271 		DPAA_SEC_ERR("Unable to prepare sec cdb");
3272 		return ret;
3273 	}
3274 
3275 	return ret;
3276 }
3277 
3278 /** Clear the memory of session so it doesn't leave key material behind */
3279 static int
3280 dpaa_sec_security_session_destroy(void *dev __rte_unused,
3281 		struct rte_security_session *sess)
3282 {
3283 	PMD_INIT_FUNC_TRACE();
3284 	void *sess_priv = SECURITY_GET_SESS_PRIV(sess);
3285 	dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
3286 
3287 	if (sess_priv) {
3288 		free_session_memory((struct rte_cryptodev *)dev, s);
3289 	}
3290 	return 0;
3291 }
3292 
3293 static unsigned int
3294 dpaa_sec_security_session_get_size(void *device __rte_unused)
3295 {
3296 	return sizeof(dpaa_sec_session);
3297 }
3298 
3299 #endif
3300 static int
3301 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
3302 		       struct rte_cryptodev_config *config __rte_unused)
3303 {
3304 	PMD_INIT_FUNC_TRACE();
3305 
3306 	return 0;
3307 }
3308 
3309 static int
3310 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
3311 {
3312 	PMD_INIT_FUNC_TRACE();
3313 	return 0;
3314 }
3315 
3316 static void
3317 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
3318 {
3319 	PMD_INIT_FUNC_TRACE();
3320 }
3321 
3322 static int
3323 dpaa_sec_dev_close(struct rte_cryptodev *dev)
3324 {
3325 	PMD_INIT_FUNC_TRACE();
3326 
3327 	if (dev == NULL)
3328 		return -ENOMEM;
3329 
3330 	return 0;
3331 }
3332 
3333 static void
3334 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
3335 		       struct rte_cryptodev_info *info)
3336 {
3337 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
3338 
3339 	PMD_INIT_FUNC_TRACE();
3340 	if (info != NULL) {
3341 		info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3342 		info->feature_flags = dev->feature_flags;
3343 		info->capabilities = dpaa_sec_capabilities;
3344 		info->sym.max_nb_sessions = internals->max_nb_sessions;
3345 		info->driver_id = dpaa_cryptodev_driver_id;
3346 	}
3347 }
3348 
3349 static enum qman_cb_dqrr_result
3350 dpaa_sec_process_parallel_event(void *event,
3351 			struct qman_portal *qm __always_unused,
3352 			struct qman_fq *outq,
3353 			const struct qm_dqrr_entry *dqrr,
3354 			void **bufs)
3355 {
3356 	const struct qm_fd *fd;
3357 	struct dpaa_sec_job *job;
3358 	struct dpaa_sec_op_ctx *ctx;
3359 	struct rte_event *ev = (struct rte_event *)event;
3360 
3361 	fd = &dqrr->fd;
3362 
3363 	/* sg is embedded in an op ctx,
3364 	 * sg[0] is for output
3365 	 * sg[1] for input
3366 	 */
3367 	job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3368 
3369 	ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3370 	ctx->fd_status = fd->status;
3371 	if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3372 		struct qm_sg_entry *sg_out;
3373 		uint32_t len;
3374 
3375 		sg_out = &job->sg[0];
3376 		hw_sg_to_cpu(sg_out);
3377 		len = sg_out->length;
3378 		ctx->op->sym->m_src->pkt_len = len;
3379 		ctx->op->sym->m_src->data_len = len;
3380 	}
3381 	if (!ctx->fd_status) {
3382 		ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3383 	} else {
3384 		DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3385 		ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3386 	}
3387 	ev->event_ptr = (void *)ctx->op;
3388 
3389 	ev->flow_id = outq->ev.flow_id;
3390 	ev->sub_event_type = outq->ev.sub_event_type;
3391 	ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3392 	ev->op = RTE_EVENT_OP_NEW;
3393 	ev->sched_type = outq->ev.sched_type;
3394 	ev->queue_id = outq->ev.queue_id;
3395 	ev->priority = outq->ev.priority;
3396 	*bufs = (void *)ctx->op;
3397 
3398 	rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3399 
3400 	return qman_cb_dqrr_consume;
3401 }
3402 
3403 static enum qman_cb_dqrr_result
3404 dpaa_sec_process_atomic_event(void *event,
3405 			struct qman_portal *qm __rte_unused,
3406 			struct qman_fq *outq,
3407 			const struct qm_dqrr_entry *dqrr,
3408 			void **bufs)
3409 {
3410 	u8 index;
3411 	const struct qm_fd *fd;
3412 	struct dpaa_sec_job *job;
3413 	struct dpaa_sec_op_ctx *ctx;
3414 	struct rte_event *ev = (struct rte_event *)event;
3415 
3416 	fd = &dqrr->fd;
3417 
3418 	/* sg is embedded in an op ctx,
3419 	 * sg[0] is for output
3420 	 * sg[1] for input
3421 	 */
3422 	job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3423 
3424 	ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3425 	ctx->fd_status = fd->status;
3426 	if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3427 		struct qm_sg_entry *sg_out;
3428 		uint32_t len;
3429 
3430 		sg_out = &job->sg[0];
3431 		hw_sg_to_cpu(sg_out);
3432 		len = sg_out->length;
3433 		ctx->op->sym->m_src->pkt_len = len;
3434 		ctx->op->sym->m_src->data_len = len;
3435 	}
3436 	if (!ctx->fd_status) {
3437 		ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3438 	} else {
3439 		DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3440 		ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3441 	}
3442 	ev->event_ptr = (void *)ctx->op;
3443 	ev->flow_id = outq->ev.flow_id;
3444 	ev->sub_event_type = outq->ev.sub_event_type;
3445 	ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3446 	ev->op = RTE_EVENT_OP_NEW;
3447 	ev->sched_type = outq->ev.sched_type;
3448 	ev->queue_id = outq->ev.queue_id;
3449 	ev->priority = outq->ev.priority;
3450 
3451 	/* Save active dqrr entries */
3452 	index = ((uintptr_t)dqrr >> 6) & (16/*QM_DQRR_SIZE*/ - 1);
3453 	DPAA_PER_LCORE_DQRR_SIZE++;
3454 	DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
3455 	DPAA_PER_LCORE_DQRR_MBUF(index) = ctx->op->sym->m_src;
3456 	ev->impl_opaque = index + 1;
3457 	*dpaa_seqn(ctx->op->sym->m_src) = (uint32_t)index + 1;
3458 	*bufs = (void *)ctx->op;
3459 
3460 	rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3461 
3462 	return qman_cb_dqrr_defer;
3463 }
3464 
3465 int
3466 dpaa_sec_eventq_attach(const struct rte_cryptodev *dev,
3467 		int qp_id,
3468 		uint16_t ch_id,
3469 		const struct rte_event *event)
3470 {
3471 	struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3472 	struct qm_mcc_initfq opts = {0};
3473 
3474 	int ret;
3475 
3476 	opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3477 		       QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3478 	opts.fqd.dest.channel = ch_id;
3479 
3480 	switch (event->sched_type) {
3481 	case RTE_SCHED_TYPE_ATOMIC:
3482 		opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
3483 		/* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
3484 		 * configuration with HOLD_ACTIVE setting
3485 		 */
3486 		opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
3487 		qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_atomic_event;
3488 		break;
3489 	case RTE_SCHED_TYPE_ORDERED:
3490 		DPAA_SEC_ERR("Ordered queue schedule type is not supported\n");
3491 		return -ENOTSUP;
3492 	default:
3493 		opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
3494 		qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_parallel_event;
3495 		break;
3496 	}
3497 
3498 	ret = qman_init_fq(&qp->outq, QMAN_INITFQ_FLAG_SCHED, &opts);
3499 	if (unlikely(ret)) {
3500 		DPAA_SEC_ERR("unable to init caam source fq!");
3501 		return ret;
3502 	}
3503 
3504 	memcpy(&qp->outq.ev, event, sizeof(struct rte_event));
3505 
3506 	return 0;
3507 }
3508 
3509 int
3510 dpaa_sec_eventq_detach(const struct rte_cryptodev *dev,
3511 			int qp_id)
3512 {
3513 	struct qm_mcc_initfq opts = {0};
3514 	int ret;
3515 	struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3516 
3517 	opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3518 		       QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3519 	qp->outq.cb.dqrr = dqrr_out_fq_cb_rx;
3520 	qp->outq.cb.ern  = ern_sec_fq_handler;
3521 	qman_retire_fq(&qp->outq, NULL);
3522 	qman_oos_fq(&qp->outq);
3523 	ret = qman_init_fq(&qp->outq, 0, &opts);
3524 	if (ret)
3525 		RTE_LOG(ERR, PMD, "Error in qman_init_fq: ret: %d\n", ret);
3526 	qp->outq.cb.dqrr = NULL;
3527 
3528 	return ret;
3529 }
3530 
3531 static struct rte_cryptodev_ops crypto_ops = {
3532 	.dev_configure	      = dpaa_sec_dev_configure,
3533 	.dev_start	      = dpaa_sec_dev_start,
3534 	.dev_stop	      = dpaa_sec_dev_stop,
3535 	.dev_close	      = dpaa_sec_dev_close,
3536 	.dev_infos_get        = dpaa_sec_dev_infos_get,
3537 	.queue_pair_setup     = dpaa_sec_queue_pair_setup,
3538 	.queue_pair_release   = dpaa_sec_queue_pair_release,
3539 	.sym_session_get_size     = dpaa_sec_sym_session_get_size,
3540 	.sym_session_configure    = dpaa_sec_sym_session_configure,
3541 	.sym_session_clear        = dpaa_sec_sym_session_clear,
3542 	/* Raw data-path API related operations */
3543 	.sym_get_raw_dp_ctx_size = dpaa_sec_get_dp_ctx_size,
3544 	.sym_configure_raw_dp_ctx = dpaa_sec_configure_raw_dp_ctx,
3545 };
3546 
3547 #ifdef RTE_LIB_SECURITY
3548 static const struct rte_security_capability *
3549 dpaa_sec_capabilities_get(void *device __rte_unused)
3550 {
3551 	return dpaa_sec_security_cap;
3552 }
3553 
3554 static const struct rte_security_ops dpaa_sec_security_ops = {
3555 	.session_create = dpaa_sec_security_session_create,
3556 	.session_update = NULL,
3557 	.session_get_size = dpaa_sec_security_session_get_size,
3558 	.session_stats_get = NULL,
3559 	.session_destroy = dpaa_sec_security_session_destroy,
3560 	.set_pkt_metadata = NULL,
3561 	.capabilities_get = dpaa_sec_capabilities_get
3562 };
3563 #endif
3564 static int
3565 dpaa_sec_uninit(struct rte_cryptodev *dev)
3566 {
3567 	struct dpaa_sec_dev_private *internals;
3568 
3569 	if (dev == NULL)
3570 		return -ENODEV;
3571 
3572 	internals = dev->data->dev_private;
3573 	rte_free(dev->security_ctx);
3574 
3575 	rte_free(internals);
3576 
3577 	DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
3578 		      dev->data->name, rte_socket_id());
3579 
3580 	return 0;
3581 }
3582 
3583 static int
3584 check_devargs_handler(__rte_unused const char *key, const char *value,
3585 		      __rte_unused void *opaque)
3586 {
3587 	dpaa_sec_dp_dump = atoi(value);
3588 	if (dpaa_sec_dp_dump > DPAA_SEC_DP_FULL_DUMP) {
3589 		DPAA_SEC_WARN("WARN: DPAA_SEC_DP_DUMP_LEVEL is not "
3590 			      "supported, changing to FULL error prints\n");
3591 		dpaa_sec_dp_dump = DPAA_SEC_DP_FULL_DUMP;
3592 	}
3593 
3594 	return 0;
3595 }
3596 
3597 static void
3598 dpaa_sec_get_devargs(struct rte_devargs *devargs, const char *key)
3599 {
3600 	struct rte_kvargs *kvlist;
3601 
3602 	if (!devargs)
3603 		return;
3604 
3605 	kvlist = rte_kvargs_parse(devargs->args, NULL);
3606 	if (!kvlist)
3607 		return;
3608 
3609 	if (!rte_kvargs_count(kvlist, key)) {
3610 		rte_kvargs_free(kvlist);
3611 		return;
3612 	}
3613 
3614 	rte_kvargs_process(kvlist, key,
3615 				check_devargs_handler, NULL);
3616 	rte_kvargs_free(kvlist);
3617 }
3618 
3619 static int
3620 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
3621 {
3622 	struct dpaa_sec_dev_private *internals;
3623 #ifdef RTE_LIB_SECURITY
3624 	struct rte_security_ctx *security_instance;
3625 #endif
3626 	struct dpaa_sec_qp *qp;
3627 	uint32_t i, flags;
3628 	int ret;
3629 	void *cmd_map;
3630 	int map_fd = -1;
3631 
3632 	PMD_INIT_FUNC_TRACE();
3633 
3634 	internals = cryptodev->data->dev_private;
3635 	map_fd = open("/dev/mem", O_RDWR);
3636 	if (unlikely(map_fd < 0)) {
3637 		DPAA_SEC_ERR("Unable to open (/dev/mem)");
3638 		return map_fd;
3639 	}
3640 	internals->sec_hw = mmap(NULL, MAP_SIZE, PROT_READ | PROT_WRITE,
3641 			    MAP_SHARED, map_fd, SEC_BASE_ADDR);
3642 	if (internals->sec_hw == MAP_FAILED) {
3643 		DPAA_SEC_ERR("Memory map failed");
3644 		close(map_fd);
3645 		return -EINVAL;
3646 	}
3647 	cmd_map = (uint8_t *)internals->sec_hw +
3648 		  (BLOCK_OFFSET * QI_BLOCK_NUMBER) + CMD_REG;
3649 	if (!(be32_to_cpu(rte_read32(cmd_map)) & QICTL_DQEN))
3650 		/* enable QI interface */
3651 		rte_write32(cpu_to_be32(QICTL_DQEN), cmd_map);
3652 
3653 	ret = munmap(internals->sec_hw, MAP_SIZE);
3654 	if (ret)
3655 		DPAA_SEC_WARN("munmap failed\n");
3656 
3657 	close(map_fd);
3658 	cryptodev->driver_id = dpaa_cryptodev_driver_id;
3659 	cryptodev->dev_ops = &crypto_ops;
3660 
3661 	cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
3662 	cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
3663 	cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3664 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
3665 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3666 			RTE_CRYPTODEV_FF_SECURITY |
3667 			RTE_CRYPTODEV_FF_SYM_RAW_DP |
3668 			RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3669 			RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3670 			RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3671 			RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3672 			RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3673 
3674 	internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
3675 	internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
3676 
3677 	/*
3678 	 * For secondary processes, we don't initialise any further as primary
3679 	 * has already done this work. Only check we don't need a different
3680 	 * RX function
3681 	 */
3682 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3683 		DPAA_SEC_WARN("Device already init by primary process");
3684 		return 0;
3685 	}
3686 #ifdef RTE_LIB_SECURITY
3687 	/* Initialize security_ctx only for primary process*/
3688 	security_instance = rte_malloc("rte_security_instances_ops",
3689 				sizeof(struct rte_security_ctx), 0);
3690 	if (security_instance == NULL)
3691 		return -ENOMEM;
3692 	security_instance->device = (void *)cryptodev;
3693 	security_instance->ops = &dpaa_sec_security_ops;
3694 	security_instance->sess_cnt = 0;
3695 	cryptodev->security_ctx = security_instance;
3696 #endif
3697 	rte_spinlock_init(&internals->lock);
3698 	for (i = 0; i < internals->max_nb_queue_pairs; i++) {
3699 		/* init qman fq for queue pair */
3700 		qp = &internals->qps[i];
3701 		ret = dpaa_sec_init_tx(&qp->outq);
3702 		if (ret) {
3703 			DPAA_SEC_ERR("config tx of queue pair  %d", i);
3704 			goto init_error;
3705 		}
3706 	}
3707 
3708 	flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
3709 		QMAN_FQ_FLAG_TO_DCPORTAL;
3710 	for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
3711 		/* create rx qman fq for sessions*/
3712 		ret = qman_create_fq(0, flags, &internals->inq[i]);
3713 		if (unlikely(ret != 0)) {
3714 			DPAA_SEC_ERR("sec qman_create_fq failed");
3715 			goto init_error;
3716 		}
3717 	}
3718 
3719 	dpaa_sec_get_devargs(cryptodev->device->devargs, DRIVER_DUMP_MODE);
3720 
3721 	RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
3722 	return 0;
3723 
3724 init_error:
3725 	DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
3726 
3727 	rte_free(cryptodev->security_ctx);
3728 	return -EFAULT;
3729 }
3730 
3731 static int
3732 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
3733 				struct rte_dpaa_device *dpaa_dev)
3734 {
3735 	struct rte_cryptodev *cryptodev;
3736 	char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3737 
3738 	int retval;
3739 
3740 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3741 		return 0;
3742 
3743 	snprintf(cryptodev_name, sizeof(cryptodev_name), "%s", dpaa_dev->name);
3744 
3745 	cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3746 	if (cryptodev == NULL)
3747 		return -ENOMEM;
3748 
3749 	cryptodev->data->dev_private = rte_zmalloc_socket(
3750 				"cryptodev private structure",
3751 				sizeof(struct dpaa_sec_dev_private),
3752 				RTE_CACHE_LINE_SIZE,
3753 				rte_socket_id());
3754 
3755 	if (cryptodev->data->dev_private == NULL)
3756 		rte_panic("Cannot allocate memzone for private "
3757 				"device data");
3758 
3759 	dpaa_dev->crypto_dev = cryptodev;
3760 	cryptodev->device = &dpaa_dev->device;
3761 
3762 	/* init user callbacks */
3763 	TAILQ_INIT(&(cryptodev->link_intr_cbs));
3764 
3765 	/* if sec device version is not configured */
3766 	if (!rta_get_sec_era()) {
3767 		const struct device_node *caam_node;
3768 
3769 		for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
3770 			const uint32_t *prop = of_get_property(caam_node,
3771 					"fsl,sec-era",
3772 					NULL);
3773 			if (prop) {
3774 				rta_set_sec_era(
3775 					INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
3776 				break;
3777 			}
3778 		}
3779 	}
3780 
3781 	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
3782 		retval = rte_dpaa_portal_init((void *)1);
3783 		if (retval) {
3784 			DPAA_SEC_ERR("Unable to initialize portal");
3785 			goto out;
3786 		}
3787 	}
3788 
3789 	/* Invoke PMD device initialization function */
3790 	retval = dpaa_sec_dev_init(cryptodev);
3791 	if (retval == 0) {
3792 		rte_cryptodev_pmd_probing_finish(cryptodev);
3793 		return 0;
3794 	}
3795 
3796 	retval = -ENXIO;
3797 out:
3798 	/* In case of error, cleanup is done */
3799 	rte_free(cryptodev->data->dev_private);
3800 
3801 	rte_cryptodev_pmd_release_device(cryptodev);
3802 
3803 	return retval;
3804 }
3805 
3806 static int
3807 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
3808 {
3809 	struct rte_cryptodev *cryptodev;
3810 	int ret;
3811 
3812 	cryptodev = dpaa_dev->crypto_dev;
3813 	if (cryptodev == NULL)
3814 		return -ENODEV;
3815 
3816 	ret = dpaa_sec_uninit(cryptodev);
3817 	if (ret)
3818 		return ret;
3819 
3820 	return rte_cryptodev_pmd_destroy(cryptodev);
3821 }
3822 
3823 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
3824 	.drv_type = FSL_DPAA_CRYPTO,
3825 	.driver = {
3826 		.name = "DPAA SEC PMD"
3827 	},
3828 	.probe = cryptodev_dpaa_sec_probe,
3829 	.remove = cryptodev_dpaa_sec_remove,
3830 };
3831 
3832 static struct cryptodev_driver dpaa_sec_crypto_drv;
3833 
3834 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
3835 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
3836 		dpaa_cryptodev_driver_id);
3837 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_DPAA_SEC_PMD,
3838 		DRIVER_DUMP_MODE "=<int>");
3839 RTE_LOG_REGISTER(dpaa_logtype_sec, pmd.crypto.dpaa, NOTICE);
3840