xref: /dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c (revision 3da59f30a23f2e795d2315f3d949e1b3e0ce0c3d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2017-2022 NXP
5  *
6  */
7 
8 #include <fcntl.h>
9 #include <unistd.h>
10 #include <sched.h>
11 #include <net/if.h>
12 
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_security_driver.h>
19 #include <rte_cycles.h>
20 #include <dev_driver.h>
21 #include <rte_io.h>
22 #include <rte_ip.h>
23 #include <rte_kvargs.h>
24 #include <rte_malloc.h>
25 #include <rte_mbuf.h>
26 #include <rte_memcpy.h>
27 #include <rte_string_fns.h>
28 #include <rte_spinlock.h>
29 #include <rte_hexdump.h>
30 
31 #include <fsl_usd.h>
32 #include <fsl_qman.h>
33 #include <dpaa_of.h>
34 
35 /* RTA header files */
36 #include <desc/common.h>
37 #include <desc/algo.h>
38 #include <desc/ipsec.h>
39 #include <desc/pdcp.h>
40 #include <desc/sdap.h>
41 
42 #include <bus_dpaa_driver.h>
43 #include <dpaa_sec.h>
44 #include <dpaa_sec_event.h>
45 #include <dpaa_sec_log.h>
46 #include <dpaax_iova_table.h>
47 
48 #define DRIVER_DUMP_MODE "drv_dump_mode"
49 
50 /* DPAA_SEC_DP_DUMP levels */
51 enum dpaa_sec_dump_levels {
52 	DPAA_SEC_DP_NO_DUMP,
53 	DPAA_SEC_DP_ERR_DUMP,
54 	DPAA_SEC_DP_FULL_DUMP
55 };
56 
57 uint8_t dpaa_sec_dp_dump = DPAA_SEC_DP_ERR_DUMP;
58 
59 uint8_t dpaa_cryptodev_driver_id;
60 
61 static inline void
62 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
63 {
64 	if (!ctx->fd_status) {
65 		ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
66 	} else {
67 		DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
68 		ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
69 	}
70 }
71 
72 static inline struct dpaa_sec_op_ctx *
73 dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count)
74 {
75 	struct dpaa_sec_op_ctx *ctx;
76 	int i, retval;
77 
78 	retval = rte_mempool_get(
79 			ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
80 			(void **)(&ctx));
81 	if (!ctx || retval) {
82 		DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
83 		return NULL;
84 	}
85 	/*
86 	 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
87 	 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
88 	 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
89 	 * each packet, memset is costlier than dcbz_64().
90 	 */
91 	for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
92 		dcbz_64(&ctx->job.sg[i]);
93 
94 	ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
95 	ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
96 
97 	return ctx;
98 }
99 
100 static void
101 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
102 		   struct qman_fq *fq,
103 		   const struct qm_mr_entry *msg)
104 {
105 	DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
106 			fq->fqid, msg->ern.rc, msg->ern.seqnum);
107 }
108 
109 /* initialize the queue with dest chan as caam chan so that
110  * all the packets in this queue could be dispatched into caam
111  */
112 static int
113 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
114 		 uint32_t fqid_out)
115 {
116 	struct qm_mcc_initfq fq_opts;
117 	uint32_t flags;
118 	int ret = -1;
119 
120 	/* Clear FQ options */
121 	memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
122 
123 	flags = QMAN_INITFQ_FLAG_SCHED;
124 	fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
125 			  QM_INITFQ_WE_CONTEXTB;
126 
127 	qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
128 	fq_opts.fqd.context_b = fqid_out;
129 	fq_opts.fqd.dest.channel = dpaa_get_qm_channel_caam();
130 	fq_opts.fqd.dest.wq = 0;
131 
132 	fq_in->cb.ern  = ern_sec_fq_handler;
133 
134 	DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
135 
136 	ret = qman_init_fq(fq_in, flags, &fq_opts);
137 	if (unlikely(ret != 0))
138 		DPAA_SEC_ERR("qman_init_fq failed %d", ret);
139 
140 	return ret;
141 }
142 
143 /* something is put into in_fq and caam put the crypto result into out_fq */
144 static enum qman_cb_dqrr_result
145 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
146 		  struct qman_fq *fq __always_unused,
147 		  const struct qm_dqrr_entry *dqrr)
148 {
149 	const struct qm_fd *fd;
150 	struct dpaa_sec_job *job;
151 	struct dpaa_sec_op_ctx *ctx;
152 
153 	if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
154 		return qman_cb_dqrr_consume;
155 
156 	fd = &dqrr->fd;
157 	/* sg is embedded in an op ctx,
158 	 * sg[0] is for output
159 	 * sg[1] for input
160 	 */
161 	job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
162 
163 	ctx = container_of(job, struct dpaa_sec_op_ctx, job);
164 	ctx->fd_status = fd->status;
165 	if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
166 		struct qm_sg_entry *sg_out;
167 		uint32_t len;
168 		struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ?
169 				ctx->op->sym->m_src : ctx->op->sym->m_dst;
170 
171 		sg_out = &job->sg[0];
172 		hw_sg_to_cpu(sg_out);
173 		len = sg_out->length;
174 		mbuf->pkt_len = len;
175 		while (mbuf->next != NULL) {
176 			len -= mbuf->data_len;
177 			mbuf = mbuf->next;
178 		}
179 		mbuf->data_len = len;
180 	}
181 	dpaa_sec_op_ending(ctx);
182 
183 	return qman_cb_dqrr_consume;
184 }
185 
186 /* caam result is put into this queue */
187 static int
188 dpaa_sec_init_tx(struct qman_fq *fq)
189 {
190 	int ret;
191 	struct qm_mcc_initfq opts;
192 	uint32_t flags;
193 
194 	flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
195 		QMAN_FQ_FLAG_DYNAMIC_FQID;
196 
197 	ret = qman_create_fq(0, flags, fq);
198 	if (unlikely(ret)) {
199 		DPAA_SEC_ERR("qman_create_fq failed");
200 		return ret;
201 	}
202 
203 	memset(&opts, 0, sizeof(opts));
204 	opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
205 		       QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
206 
207 	/* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
208 
209 	fq->cb.dqrr = dqrr_out_fq_cb_rx;
210 	fq->cb.ern  = ern_sec_fq_handler;
211 
212 	ret = qman_init_fq(fq, 0, &opts);
213 	if (unlikely(ret)) {
214 		DPAA_SEC_ERR("unable to init caam source fq!");
215 		return ret;
216 	}
217 
218 	return ret;
219 }
220 
221 static inline int is_aead(dpaa_sec_session *ses)
222 {
223 	return ((ses->cipher_alg == 0) &&
224 		(ses->auth_alg == 0) &&
225 		(ses->aead_alg != 0));
226 }
227 
228 static inline int is_encode(dpaa_sec_session *ses)
229 {
230 	return ses->dir == DIR_ENC;
231 }
232 
233 static inline int is_decode(dpaa_sec_session *ses)
234 {
235 	return ses->dir == DIR_DEC;
236 }
237 
238 static int
239 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
240 {
241 	struct alginfo authdata = {0}, cipherdata = {0};
242 	struct sec_cdb *cdb = &ses->cdb;
243 	struct alginfo *p_authdata = NULL;
244 	int32_t shared_desc_len = 0;
245 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
246 	int swap = false;
247 #else
248 	int swap = true;
249 #endif
250 
251 	cipherdata.key = (size_t)ses->cipher_key.data;
252 	cipherdata.keylen = ses->cipher_key.length;
253 	cipherdata.key_enc_flags = 0;
254 	cipherdata.key_type = RTA_DATA_IMM;
255 	cipherdata.algtype = ses->cipher_key.alg;
256 	cipherdata.algmode = ses->cipher_key.algmode;
257 
258 	if (ses->auth_alg) {
259 		authdata.key = (size_t)ses->auth_key.data;
260 		authdata.keylen = ses->auth_key.length;
261 		authdata.key_enc_flags = 0;
262 		authdata.key_type = RTA_DATA_IMM;
263 		authdata.algtype = ses->auth_key.alg;
264 		authdata.algmode = ses->auth_key.algmode;
265 
266 		p_authdata = &authdata;
267 	}
268 
269 	if (ses->pdcp.sdap_enabled) {
270 		int nb_keys_to_inline =
271 				rta_inline_pdcp_sdap_query(authdata.algtype,
272 					cipherdata.algtype,
273 					ses->pdcp.sn_size,
274 					ses->pdcp.hfn_ovd);
275 		if (nb_keys_to_inline >= 1) {
276 			cipherdata.key = (size_t)rte_dpaa_mem_vtop((void *)
277 						(size_t)cipherdata.key);
278 			cipherdata.key_type = RTA_DATA_PTR;
279 		}
280 		if (nb_keys_to_inline >= 2) {
281 			authdata.key = (size_t)rte_dpaa_mem_vtop((void *)
282 						(size_t)authdata.key);
283 			authdata.key_type = RTA_DATA_PTR;
284 		}
285 	} else {
286 		if (rta_inline_pdcp_query(authdata.algtype,
287 					cipherdata.algtype,
288 					ses->pdcp.sn_size,
289 					ses->pdcp.hfn_ovd)) {
290 			cipherdata.key = (size_t)rte_dpaa_mem_vtop((void *)
291 						(size_t)cipherdata.key);
292 			cipherdata.key_type = RTA_DATA_PTR;
293 		}
294 	}
295 
296 	if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
297 		if (ses->dir == DIR_ENC)
298 			shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
299 					cdb->sh_desc, 1, swap,
300 					ses->pdcp.hfn,
301 					ses->pdcp.sn_size,
302 					ses->pdcp.bearer,
303 					ses->pdcp.pkt_dir,
304 					ses->pdcp.hfn_threshold,
305 					&cipherdata, &authdata);
306 		else if (ses->dir == DIR_DEC)
307 			shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
308 					cdb->sh_desc, 1, swap,
309 					ses->pdcp.hfn,
310 					ses->pdcp.sn_size,
311 					ses->pdcp.bearer,
312 					ses->pdcp.pkt_dir,
313 					ses->pdcp.hfn_threshold,
314 					&cipherdata, &authdata);
315 	} else if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_SHORT_MAC) {
316 		shared_desc_len = cnstr_shdsc_pdcp_short_mac(cdb->sh_desc,
317 						     1, swap, &authdata);
318 	} else {
319 		if (ses->dir == DIR_ENC) {
320 			if (ses->pdcp.sdap_enabled)
321 				shared_desc_len =
322 					cnstr_shdsc_pdcp_sdap_u_plane_encap(
323 						cdb->sh_desc, 1, swap,
324 						ses->pdcp.sn_size,
325 						ses->pdcp.hfn,
326 						ses->pdcp.bearer,
327 						ses->pdcp.pkt_dir,
328 						ses->pdcp.hfn_threshold,
329 						&cipherdata, p_authdata);
330 			else
331 				shared_desc_len =
332 					cnstr_shdsc_pdcp_u_plane_encap(
333 						cdb->sh_desc, 1, swap,
334 						ses->pdcp.sn_size,
335 						ses->pdcp.hfn,
336 						ses->pdcp.bearer,
337 						ses->pdcp.pkt_dir,
338 						ses->pdcp.hfn_threshold,
339 						&cipherdata, p_authdata);
340 		} else if (ses->dir == DIR_DEC) {
341 			if (ses->pdcp.sdap_enabled)
342 				shared_desc_len =
343 					cnstr_shdsc_pdcp_sdap_u_plane_decap(
344 						cdb->sh_desc, 1, swap,
345 						ses->pdcp.sn_size,
346 						ses->pdcp.hfn,
347 						ses->pdcp.bearer,
348 						ses->pdcp.pkt_dir,
349 						ses->pdcp.hfn_threshold,
350 						&cipherdata, p_authdata);
351 			else
352 				shared_desc_len =
353 					cnstr_shdsc_pdcp_u_plane_decap(
354 						cdb->sh_desc, 1, swap,
355 						ses->pdcp.sn_size,
356 						ses->pdcp.hfn,
357 						ses->pdcp.bearer,
358 						ses->pdcp.pkt_dir,
359 						ses->pdcp.hfn_threshold,
360 						&cipherdata, p_authdata);
361 		}
362 	}
363 	return shared_desc_len;
364 }
365 
366 /* prepare ipsec proto command block of the session */
367 static int
368 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
369 {
370 	struct alginfo cipherdata = {0}, authdata = {0};
371 	struct sec_cdb *cdb = &ses->cdb;
372 	int32_t shared_desc_len = 0;
373 	int err;
374 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
375 	int swap = false;
376 #else
377 	int swap = true;
378 #endif
379 
380 	cipherdata.key = (size_t)ses->cipher_key.data;
381 	cipherdata.keylen = ses->cipher_key.length;
382 	cipherdata.key_enc_flags = 0;
383 	cipherdata.key_type = RTA_DATA_IMM;
384 	cipherdata.algtype = ses->cipher_key.alg;
385 	cipherdata.algmode = ses->cipher_key.algmode;
386 
387 	if (ses->auth_key.length) {
388 		authdata.key = (size_t)ses->auth_key.data;
389 		authdata.keylen = ses->auth_key.length;
390 		authdata.key_enc_flags = 0;
391 		authdata.key_type = RTA_DATA_IMM;
392 		authdata.algtype = ses->auth_key.alg;
393 		authdata.algmode = ses->auth_key.algmode;
394 	}
395 
396 	cdb->sh_desc[0] = cipherdata.keylen;
397 	cdb->sh_desc[1] = authdata.keylen;
398 	err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
399 			       DESC_JOB_IO_LEN,
400 			       (unsigned int *)cdb->sh_desc,
401 			       &cdb->sh_desc[2], 2);
402 
403 	if (err < 0) {
404 		DPAA_SEC_ERR("Crypto: Incorrect key lengths");
405 		return err;
406 	}
407 	if (cdb->sh_desc[2] & 1)
408 		cipherdata.key_type = RTA_DATA_IMM;
409 	else {
410 		cipherdata.key = (size_t)rte_dpaa_mem_vtop(
411 					(void *)(size_t)cipherdata.key);
412 		cipherdata.key_type = RTA_DATA_PTR;
413 	}
414 	if (cdb->sh_desc[2] & (1<<1))
415 		authdata.key_type = RTA_DATA_IMM;
416 	else {
417 		authdata.key = (size_t)rte_dpaa_mem_vtop(
418 					(void *)(size_t)authdata.key);
419 		authdata.key_type = RTA_DATA_PTR;
420 	}
421 
422 	cdb->sh_desc[0] = 0;
423 	cdb->sh_desc[1] = 0;
424 	cdb->sh_desc[2] = 0;
425 	if (ses->dir == DIR_ENC) {
426 		shared_desc_len = cnstr_shdsc_ipsec_new_encap(
427 				cdb->sh_desc,
428 				true, swap, SHR_SERIAL,
429 				&ses->encap_pdb,
430 				(uint8_t *)&ses->ip4_hdr,
431 				&cipherdata, &authdata);
432 	} else if (ses->dir == DIR_DEC) {
433 		shared_desc_len = cnstr_shdsc_ipsec_new_decap(
434 				cdb->sh_desc,
435 				true, swap, SHR_SERIAL,
436 				&ses->decap_pdb,
437 				&cipherdata, &authdata);
438 	}
439 	return shared_desc_len;
440 }
441 
442 /* prepare command block of the session */
443 static int
444 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
445 {
446 	struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
447 	int32_t shared_desc_len = 0;
448 	struct sec_cdb *cdb = &ses->cdb;
449 	int err;
450 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
451 	int swap = false;
452 #else
453 	int swap = true;
454 #endif
455 
456 	memset(cdb, 0, sizeof(struct sec_cdb));
457 
458 	switch (ses->ctxt) {
459 	case DPAA_SEC_IPSEC:
460 		shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
461 		break;
462 	case DPAA_SEC_PDCP:
463 		shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
464 		break;
465 	case DPAA_SEC_CIPHER:
466 		alginfo_c.key = (size_t)ses->cipher_key.data;
467 		alginfo_c.keylen = ses->cipher_key.length;
468 		alginfo_c.key_enc_flags = 0;
469 		alginfo_c.key_type = RTA_DATA_IMM;
470 		alginfo_c.algtype = ses->cipher_key.alg;
471 		alginfo_c.algmode = ses->cipher_key.algmode;
472 
473 		switch (ses->cipher_alg) {
474 		case RTE_CRYPTO_CIPHER_AES_CBC:
475 		case RTE_CRYPTO_CIPHER_3DES_CBC:
476 		case RTE_CRYPTO_CIPHER_DES_CBC:
477 		case RTE_CRYPTO_CIPHER_AES_CTR:
478 		case RTE_CRYPTO_CIPHER_3DES_CTR:
479 			shared_desc_len = cnstr_shdsc_blkcipher(
480 					cdb->sh_desc, true,
481 					swap, SHR_NEVER, &alginfo_c,
482 					ses->iv.length,
483 					ses->dir);
484 			break;
485 		case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
486 			shared_desc_len = cnstr_shdsc_snow_f8(
487 					cdb->sh_desc, true, swap,
488 					&alginfo_c,
489 					ses->dir);
490 			break;
491 		case RTE_CRYPTO_CIPHER_ZUC_EEA3:
492 			shared_desc_len = cnstr_shdsc_zuce(
493 					cdb->sh_desc, true, swap,
494 					&alginfo_c,
495 					ses->dir);
496 			break;
497 		default:
498 			DPAA_SEC_ERR("unsupported cipher alg %d",
499 				     ses->cipher_alg);
500 			return -ENOTSUP;
501 		}
502 		break;
503 	case DPAA_SEC_AUTH:
504 		alginfo_a.key = (size_t)ses->auth_key.data;
505 		alginfo_a.keylen = ses->auth_key.length;
506 		alginfo_a.key_enc_flags = 0;
507 		alginfo_a.key_type = RTA_DATA_IMM;
508 		alginfo_a.algtype = ses->auth_key.alg;
509 		alginfo_a.algmode = ses->auth_key.algmode;
510 		switch (ses->auth_alg) {
511 		case RTE_CRYPTO_AUTH_MD5:
512 		case RTE_CRYPTO_AUTH_SHA1:
513 		case RTE_CRYPTO_AUTH_SHA224:
514 		case RTE_CRYPTO_AUTH_SHA256:
515 		case RTE_CRYPTO_AUTH_SHA384:
516 		case RTE_CRYPTO_AUTH_SHA512:
517 			shared_desc_len = cnstr_shdsc_hash(
518 						cdb->sh_desc, true,
519 						swap, SHR_NEVER, &alginfo_a,
520 						!ses->dir,
521 						ses->digest_length);
522 			break;
523 		case RTE_CRYPTO_AUTH_MD5_HMAC:
524 		case RTE_CRYPTO_AUTH_SHA1_HMAC:
525 		case RTE_CRYPTO_AUTH_SHA224_HMAC:
526 		case RTE_CRYPTO_AUTH_SHA256_HMAC:
527 		case RTE_CRYPTO_AUTH_SHA384_HMAC:
528 		case RTE_CRYPTO_AUTH_SHA512_HMAC:
529 			shared_desc_len = cnstr_shdsc_hmac(
530 						cdb->sh_desc, true,
531 						swap, SHR_NEVER, &alginfo_a,
532 						!ses->dir,
533 						ses->digest_length);
534 			break;
535 		case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
536 			shared_desc_len = cnstr_shdsc_snow_f9(
537 						cdb->sh_desc, true, swap,
538 						&alginfo_a,
539 						!ses->dir,
540 						ses->digest_length);
541 			break;
542 		case RTE_CRYPTO_AUTH_ZUC_EIA3:
543 			shared_desc_len = cnstr_shdsc_zuca(
544 						cdb->sh_desc, true, swap,
545 						&alginfo_a,
546 						!ses->dir,
547 						ses->digest_length);
548 			break;
549 		case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
550 		case RTE_CRYPTO_AUTH_AES_CMAC:
551 			shared_desc_len = cnstr_shdsc_aes_mac(
552 						cdb->sh_desc,
553 						true, swap, SHR_NEVER,
554 						&alginfo_a,
555 						!ses->dir,
556 						ses->digest_length);
557 			break;
558 		default:
559 			DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
560 		}
561 		break;
562 	case DPAA_SEC_AEAD:
563 		if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
564 			DPAA_SEC_ERR("not supported aead alg");
565 			return -ENOTSUP;
566 		}
567 		alginfo.key = (size_t)ses->aead_key.data;
568 		alginfo.keylen = ses->aead_key.length;
569 		alginfo.key_enc_flags = 0;
570 		alginfo.key_type = RTA_DATA_IMM;
571 		alginfo.algtype = ses->aead_key.alg;
572 		alginfo.algmode = ses->aead_key.algmode;
573 
574 		if (ses->dir == DIR_ENC)
575 			shared_desc_len = cnstr_shdsc_gcm_encap(
576 					cdb->sh_desc, true, swap, SHR_NEVER,
577 					&alginfo,
578 					ses->iv.length,
579 					ses->digest_length);
580 		else
581 			shared_desc_len = cnstr_shdsc_gcm_decap(
582 					cdb->sh_desc, true, swap, SHR_NEVER,
583 					&alginfo,
584 					ses->iv.length,
585 					ses->digest_length);
586 		break;
587 	case DPAA_SEC_CIPHER_HASH:
588 		alginfo_c.key = (size_t)ses->cipher_key.data;
589 		alginfo_c.keylen = ses->cipher_key.length;
590 		alginfo_c.key_enc_flags = 0;
591 		alginfo_c.key_type = RTA_DATA_IMM;
592 		alginfo_c.algtype = ses->cipher_key.alg;
593 		alginfo_c.algmode = ses->cipher_key.algmode;
594 
595 		alginfo_a.key = (size_t)ses->auth_key.data;
596 		alginfo_a.keylen = ses->auth_key.length;
597 		alginfo_a.key_enc_flags = 0;
598 		alginfo_a.key_type = RTA_DATA_IMM;
599 		alginfo_a.algtype = ses->auth_key.alg;
600 		alginfo_a.algmode = ses->auth_key.algmode;
601 
602 		cdb->sh_desc[0] = alginfo_c.keylen;
603 		cdb->sh_desc[1] = alginfo_a.keylen;
604 		err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
605 				       DESC_JOB_IO_LEN,
606 				       (unsigned int *)cdb->sh_desc,
607 				       &cdb->sh_desc[2], 2);
608 
609 		if (err < 0) {
610 			DPAA_SEC_ERR("Crypto: Incorrect key lengths");
611 			return err;
612 		}
613 		if (cdb->sh_desc[2] & 1)
614 			alginfo_c.key_type = RTA_DATA_IMM;
615 		else {
616 			alginfo_c.key = (size_t)rte_dpaa_mem_vtop(
617 						(void *)(size_t)alginfo_c.key);
618 			alginfo_c.key_type = RTA_DATA_PTR;
619 		}
620 		if (cdb->sh_desc[2] & (1<<1))
621 			alginfo_a.key_type = RTA_DATA_IMM;
622 		else {
623 			alginfo_a.key = (size_t)rte_dpaa_mem_vtop(
624 						(void *)(size_t)alginfo_a.key);
625 			alginfo_a.key_type = RTA_DATA_PTR;
626 		}
627 		cdb->sh_desc[0] = 0;
628 		cdb->sh_desc[1] = 0;
629 		cdb->sh_desc[2] = 0;
630 		/* Auth_only_len is set as 0 here and it will be
631 		 * overwritten in fd for each packet.
632 		 */
633 		shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
634 				true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a,
635 				ses->iv.length,
636 				ses->digest_length, ses->dir);
637 		break;
638 	case DPAA_SEC_HASH_CIPHER:
639 	default:
640 		DPAA_SEC_ERR("error: Unsupported session");
641 		return -ENOTSUP;
642 	}
643 
644 	if (shared_desc_len < 0) {
645 		DPAA_SEC_ERR("error in preparing command block");
646 		return shared_desc_len;
647 	}
648 
649 	cdb->sh_hdr.hi.field.idlen = shared_desc_len;
650 	cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
651 	cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
652 
653 	return 0;
654 }
655 
656 static void
657 dpaa_sec_dump(struct dpaa_sec_op_ctx *ctx, struct dpaa_sec_qp *qp)
658 {
659 	struct dpaa_sec_job *job = &ctx->job;
660 	struct rte_crypto_op *op = ctx->op;
661 	dpaa_sec_session *sess = NULL;
662 	struct sec_cdb c_cdb, *cdb;
663 	uint8_t bufsize;
664 	struct rte_crypto_sym_op *sym_op;
665 	struct qm_sg_entry sg[2];
666 
667 	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
668 		sess = CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session);
669 #ifdef RTE_LIB_SECURITY
670 	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
671 		sess = SECURITY_GET_SESS_PRIV(op->sym->session);
672 #endif
673 	if (sess == NULL) {
674 		printf("session is NULL\n");
675 		goto mbuf_dump;
676 	}
677 
678 	cdb = &sess->cdb;
679 	rte_memcpy(&c_cdb, cdb, sizeof(struct sec_cdb));
680 #ifdef RTE_LIB_SECURITY
681 	printf("\nsession protocol type = %d\n", sess->proto_alg);
682 #endif
683 	printf("\n****************************************\n"
684 		"session params:\n\tContext type:\t%d\n\tDirection:\t%s\n"
685 		"\tCipher alg:\t%d\n\tAuth alg:\t%d\n\tAead alg:\t%d\n"
686 		"\tCipher key len:\t%"PRIu64"\n\tCipher alg:\t%d\n"
687 		"\tCipher algmode:\t%d\n", sess->ctxt,
688 		(sess->dir == DIR_ENC) ? "DIR_ENC" : "DIR_DEC",
689 		sess->cipher_alg, sess->auth_alg, sess->aead_alg,
690 		(uint64_t)sess->cipher_key.length, sess->cipher_key.alg,
691 		sess->cipher_key.algmode);
692 		rte_hexdump(stdout, "cipher key", sess->cipher_key.data,
693 				sess->cipher_key.length);
694 		rte_hexdump(stdout, "auth key", sess->auth_key.data,
695 				sess->auth_key.length);
696 	printf("\tAuth key len:\t%"PRIu64"\n\tAuth alg:\t%d\n"
697 		"\tAuth algmode:\t%d\n\tIV len:\t\t%d\n\tIV offset:\t%d\n"
698 		"\tdigest length:\t%d\n\tauth only len:\t\t%d\n"
699 		"\taead cipher text:\t%d\n",
700 		(uint64_t)sess->auth_key.length, sess->auth_key.alg,
701 		sess->auth_key.algmode,
702 		sess->iv.length, sess->iv.offset,
703 		sess->digest_length, sess->auth_only_len,
704 		sess->auth_cipher_text);
705 #ifdef RTE_LIB_SECURITY
706 	printf("PDCP session params:\n"
707 		"\tDomain:\t\t%d\n\tBearer:\t\t%d\n\tpkt_dir:\t%d\n\thfn_ovd:"
708 		"\t%d\n\tsn_size:\t%d\n\tsdap_enabled:\t%d\n\thfn_ovd_offset:"
709 		"\t%d\n\thfn:\t\t%d\n"
710 		"\thfn_threshold:\t0x%x\n", sess->pdcp.domain,
711 		sess->pdcp.bearer, sess->pdcp.pkt_dir, sess->pdcp.hfn_ovd,
712 		sess->pdcp.sn_size, sess->pdcp.sdap_enabled,
713 		sess->pdcp.hfn_ovd_offset, sess->pdcp.hfn,
714 		sess->pdcp.hfn_threshold);
715 #endif
716 	c_cdb.sh_hdr.hi.word = rte_be_to_cpu_32(c_cdb.sh_hdr.hi.word);
717 	c_cdb.sh_hdr.lo.word = rte_be_to_cpu_32(c_cdb.sh_hdr.lo.word);
718 	bufsize = c_cdb.sh_hdr.hi.field.idlen;
719 
720 	printf("cdb = %p\n\n", cdb);
721 	printf("Descriptor size = %d\n", bufsize);
722 	int m;
723 	for (m = 0; m < bufsize; m++)
724 		printf("0x%x\n", rte_be_to_cpu_32(c_cdb.sh_desc[m]));
725 
726 	printf("\n");
727 mbuf_dump:
728 	sym_op = op->sym;
729 	if (sym_op->m_src) {
730 		printf("Source mbuf:\n");
731 		rte_pktmbuf_dump(stdout, sym_op->m_src,
732 				 sym_op->m_src->data_len);
733 	}
734 	if (sym_op->m_dst) {
735 		printf("Destination mbuf:\n");
736 		rte_pktmbuf_dump(stdout, sym_op->m_dst,
737 				 sym_op->m_dst->data_len);
738 	}
739 
740 	printf("Session address = %p\ncipher offset: %d, length: %d\n"
741 		"auth offset: %d, length:  %d\n aead offset: %d, length: %d\n",
742 		sym_op->session, sym_op->cipher.data.offset,
743 		sym_op->cipher.data.length,
744 		sym_op->auth.data.offset, sym_op->auth.data.length,
745 		sym_op->aead.data.offset, sym_op->aead.data.length);
746 	printf("\n");
747 
748 	printf("******************************************************\n");
749 	printf("ctx info:\n");
750 	printf("job->sg[0] output info:\n");
751 	memcpy(&sg[0], &job->sg[0], sizeof(sg[0]));
752 	printf("\taddr = %"PRIx64",\n\tlen = %d,\n\tfinal = %d,\n\textension = %d"
753 		"\n\tbpid = %d\n\toffset = %d\n",
754 		(uint64_t)sg[0].addr, sg[0].length, sg[0].final,
755 		sg[0].extension, sg[0].bpid, sg[0].offset);
756 	printf("\njob->sg[1] input info:\n");
757 	memcpy(&sg[1], &job->sg[1], sizeof(sg[1]));
758 	hw_sg_to_cpu(&sg[1]);
759 	printf("\taddr = %"PRIx64",\n\tlen = %d,\n\tfinal = %d,\n\textension = %d"
760 		"\n\tbpid = %d\n\toffset = %d\n",
761 		(uint64_t)sg[1].addr, sg[1].length, sg[1].final,
762 		sg[1].extension, sg[1].bpid, sg[1].offset);
763 
764 	printf("\nctx pool addr = %p\n", ctx->ctx_pool);
765 	if (ctx->ctx_pool)
766 		printf("ctx pool available counts = %d\n",
767 			rte_mempool_avail_count(ctx->ctx_pool));
768 
769 	printf("\nop pool addr = %p\n", op->mempool);
770 	if (op->mempool)
771 		printf("op pool available counts = %d\n",
772 			rte_mempool_avail_count(op->mempool));
773 
774 	printf("********************************************************\n");
775 	printf("Queue data:\n");
776 	printf("\tFQID = 0x%x\n\tstate = %d\n\tnb_desc = %d\n"
777 		"\tctx_pool = %p\n\trx_pkts = %d\n\ttx_pkts"
778 	       "= %d\n\trx_errs = %d\n\ttx_errs = %d\n\n",
779 		qp->outq.fqid, qp->outq.state, qp->outq.nb_desc,
780 		qp->ctx_pool, qp->rx_pkts, qp->tx_pkts,
781 		qp->rx_errs, qp->tx_errs);
782 }
783 
784 /* qp is lockless, should be accessed by only one thread */
785 static int
786 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
787 {
788 	struct qman_fq *fq;
789 	unsigned int pkts = 0;
790 	int num_rx_bufs, ret;
791 	struct qm_dqrr_entry *dq;
792 	uint32_t vdqcr_flags = 0;
793 
794 	fq = &qp->outq;
795 	/*
796 	 * Until request for four buffers, we provide exact number of buffers.
797 	 * Otherwise we do not set the QM_VDQCR_EXACT flag.
798 	 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
799 	 * requested, so we request two less in this case.
800 	 */
801 	if (nb_ops < 4) {
802 		vdqcr_flags = QM_VDQCR_EXACT;
803 		num_rx_bufs = nb_ops;
804 	} else {
805 		num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
806 			(DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
807 	}
808 	ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
809 	if (ret)
810 		return 0;
811 
812 	do {
813 		const struct qm_fd *fd;
814 		struct dpaa_sec_job *job;
815 		struct dpaa_sec_op_ctx *ctx;
816 		struct rte_crypto_op *op;
817 
818 		dq = qman_dequeue(fq);
819 		if (!dq)
820 			continue;
821 
822 		fd = &dq->fd;
823 		/* sg is embedded in an op ctx,
824 		 * sg[0] is for output
825 		 * sg[1] for input
826 		 */
827 		job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
828 
829 		ctx = container_of(job, struct dpaa_sec_op_ctx, job);
830 		ctx->fd_status = fd->status;
831 		op = ctx->op;
832 		if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
833 			struct qm_sg_entry *sg_out;
834 			uint32_t len;
835 			struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ?
836 						op->sym->m_src : op->sym->m_dst;
837 
838 			sg_out = &job->sg[0];
839 			hw_sg_to_cpu(sg_out);
840 			len = sg_out->length;
841 			mbuf->pkt_len = len;
842 			while (mbuf->next != NULL) {
843 				len -= mbuf->data_len;
844 				mbuf = mbuf->next;
845 			}
846 			mbuf->data_len = len;
847 		}
848 		if (!ctx->fd_status) {
849 			op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
850 		} else {
851 			if (dpaa_sec_dp_dump > DPAA_SEC_DP_NO_DUMP) {
852 				DPAA_SEC_DP_WARN("SEC return err:0x%x\n",
853 						  ctx->fd_status);
854 				if (dpaa_sec_dp_dump > DPAA_SEC_DP_ERR_DUMP)
855 					dpaa_sec_dump(ctx, qp);
856 			}
857 			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
858 		}
859 		ops[pkts++] = op;
860 
861 		/* report op status to sym->op and then free the ctx memory */
862 		rte_mempool_put(ctx->ctx_pool, (void *)ctx);
863 
864 		qman_dqrr_consume(fq, dq);
865 	} while (fq->flags & QMAN_FQ_STATE_VDQCR);
866 
867 	return pkts;
868 }
869 
870 static inline struct dpaa_sec_job *
871 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
872 {
873 	struct rte_crypto_sym_op *sym = op->sym;
874 	struct rte_mbuf *mbuf = sym->m_src;
875 	struct dpaa_sec_job *cf;
876 	struct dpaa_sec_op_ctx *ctx;
877 	struct qm_sg_entry *sg, *out_sg, *in_sg;
878 	phys_addr_t start_addr;
879 	uint8_t *old_digest, extra_segs;
880 	int data_len, data_offset;
881 
882 	data_len = sym->auth.data.length;
883 	data_offset = sym->auth.data.offset;
884 
885 	if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
886 	    ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
887 		if ((data_len & 7) || (data_offset & 7)) {
888 			DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
889 			return NULL;
890 		}
891 
892 		data_len = data_len >> 3;
893 		data_offset = data_offset >> 3;
894 	}
895 
896 	if (is_decode(ses))
897 		extra_segs = 3;
898 	else
899 		extra_segs = 2;
900 
901 	if (mbuf->nb_segs > MAX_SG_ENTRIES) {
902 		DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
903 				MAX_SG_ENTRIES);
904 		return NULL;
905 	}
906 	ctx = dpaa_sec_alloc_ctx(ses, mbuf->nb_segs + extra_segs);
907 	if (!ctx)
908 		return NULL;
909 
910 	cf = &ctx->job;
911 	ctx->op = op;
912 	old_digest = ctx->digest;
913 
914 	/* output */
915 	out_sg = &cf->sg[0];
916 	qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
917 	out_sg->length = ses->digest_length;
918 	cpu_to_hw_sg(out_sg);
919 
920 	/* input */
921 	in_sg = &cf->sg[1];
922 	/* need to extend the input to a compound frame */
923 	in_sg->extension = 1;
924 	in_sg->final = 1;
925 	in_sg->length = data_len;
926 	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
927 
928 	/* 1st seg */
929 	sg = in_sg + 1;
930 
931 	if (ses->iv.length) {
932 		uint8_t *iv_ptr;
933 
934 		iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
935 						   ses->iv.offset);
936 
937 		if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
938 			iv_ptr = conv_to_snow_f9_iv(iv_ptr);
939 			sg->length = 12;
940 		} else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
941 			iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
942 			sg->length = 8;
943 		} else {
944 			sg->length = ses->iv.length;
945 		}
946 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
947 		in_sg->length += sg->length;
948 		cpu_to_hw_sg(sg);
949 		sg++;
950 	}
951 
952 	qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
953 	sg->offset = data_offset;
954 
955 	if (data_len <= (mbuf->data_len - data_offset)) {
956 		sg->length = data_len;
957 	} else {
958 		sg->length = mbuf->data_len - data_offset;
959 
960 		/* remaining i/p segs */
961 		while ((data_len = data_len - sg->length) &&
962 		       (mbuf = mbuf->next)) {
963 			cpu_to_hw_sg(sg);
964 			sg++;
965 			qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
966 			if (data_len > mbuf->data_len)
967 				sg->length = mbuf->data_len;
968 			else
969 				sg->length = data_len;
970 		}
971 	}
972 
973 	if (is_decode(ses)) {
974 		/* Digest verification case */
975 		cpu_to_hw_sg(sg);
976 		sg++;
977 		rte_memcpy(old_digest, sym->auth.digest.data,
978 				ses->digest_length);
979 		start_addr = rte_dpaa_mem_vtop(old_digest);
980 		qm_sg_entry_set64(sg, start_addr);
981 		sg->length = ses->digest_length;
982 		in_sg->length += ses->digest_length;
983 	}
984 	sg->final = 1;
985 	cpu_to_hw_sg(sg);
986 	cpu_to_hw_sg(in_sg);
987 
988 	return cf;
989 }
990 
991 /**
992  * packet looks like:
993  *		|<----data_len------->|
994  *    |ip_header|ah_header|icv|payload|
995  *              ^
996  *		|
997  *	   mbuf->pkt.data
998  */
999 static inline struct dpaa_sec_job *
1000 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1001 {
1002 	struct rte_crypto_sym_op *sym = op->sym;
1003 	struct rte_mbuf *mbuf = sym->m_src;
1004 	struct dpaa_sec_job *cf;
1005 	struct dpaa_sec_op_ctx *ctx;
1006 	struct qm_sg_entry *sg, *in_sg;
1007 	rte_iova_t start_addr;
1008 	uint8_t *old_digest;
1009 	int data_len, data_offset;
1010 
1011 	data_len = sym->auth.data.length;
1012 	data_offset = sym->auth.data.offset;
1013 
1014 	if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
1015 	    ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1016 		if ((data_len & 7) || (data_offset & 7)) {
1017 			DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
1018 			return NULL;
1019 		}
1020 
1021 		data_len = data_len >> 3;
1022 		data_offset = data_offset >> 3;
1023 	}
1024 
1025 	ctx = dpaa_sec_alloc_ctx(ses, 4);
1026 	if (!ctx)
1027 		return NULL;
1028 
1029 	cf = &ctx->job;
1030 	ctx->op = op;
1031 	old_digest = ctx->digest;
1032 
1033 	start_addr = rte_pktmbuf_iova(mbuf);
1034 	/* output */
1035 	sg = &cf->sg[0];
1036 	qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1037 	sg->length = ses->digest_length;
1038 	cpu_to_hw_sg(sg);
1039 
1040 	/* input */
1041 	in_sg = &cf->sg[1];
1042 	/* need to extend the input to a compound frame */
1043 	in_sg->extension = 1;
1044 	in_sg->final = 1;
1045 	in_sg->length = data_len;
1046 	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1047 	sg = &cf->sg[2];
1048 
1049 	if (ses->iv.length) {
1050 		uint8_t *iv_ptr;
1051 
1052 		iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1053 						   ses->iv.offset);
1054 
1055 		if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
1056 			iv_ptr = conv_to_snow_f9_iv(iv_ptr);
1057 			sg->length = 12;
1058 		} else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1059 			iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
1060 			sg->length = 8;
1061 		} else {
1062 			sg->length = ses->iv.length;
1063 		}
1064 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
1065 		in_sg->length += sg->length;
1066 		cpu_to_hw_sg(sg);
1067 		sg++;
1068 	}
1069 
1070 	qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1071 	sg->offset = data_offset;
1072 	sg->length = data_len;
1073 
1074 	if (is_decode(ses)) {
1075 		/* Digest verification case */
1076 		cpu_to_hw_sg(sg);
1077 		/* hash result or digest, save digest first */
1078 		rte_memcpy(old_digest, sym->auth.digest.data,
1079 				ses->digest_length);
1080 		/* let's check digest by hw */
1081 		start_addr = rte_dpaa_mem_vtop(old_digest);
1082 		sg++;
1083 		qm_sg_entry_set64(sg, start_addr);
1084 		sg->length = ses->digest_length;
1085 		in_sg->length += ses->digest_length;
1086 	}
1087 	sg->final = 1;
1088 	cpu_to_hw_sg(sg);
1089 	cpu_to_hw_sg(in_sg);
1090 
1091 	return cf;
1092 }
1093 
1094 static inline struct dpaa_sec_job *
1095 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1096 {
1097 	struct rte_crypto_sym_op *sym = op->sym;
1098 	struct dpaa_sec_job *cf;
1099 	struct dpaa_sec_op_ctx *ctx;
1100 	struct qm_sg_entry *sg, *out_sg, *in_sg;
1101 	struct rte_mbuf *mbuf;
1102 	uint8_t req_segs;
1103 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1104 			ses->iv.offset);
1105 	int data_len, data_offset;
1106 
1107 	data_len = sym->cipher.data.length;
1108 	data_offset = sym->cipher.data.offset;
1109 
1110 	if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1111 		ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1112 		if ((data_len & 7) || (data_offset & 7)) {
1113 			DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
1114 			return NULL;
1115 		}
1116 
1117 		data_len = data_len >> 3;
1118 		data_offset = data_offset >> 3;
1119 	}
1120 
1121 	if (sym->m_dst) {
1122 		mbuf = sym->m_dst;
1123 		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
1124 	} else {
1125 		mbuf = sym->m_src;
1126 		req_segs = mbuf->nb_segs * 2 + 3;
1127 	}
1128 	if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1129 		DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
1130 				MAX_SG_ENTRIES);
1131 		return NULL;
1132 	}
1133 
1134 	ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1135 	if (!ctx)
1136 		return NULL;
1137 
1138 	cf = &ctx->job;
1139 	ctx->op = op;
1140 
1141 	/* output */
1142 	out_sg = &cf->sg[0];
1143 	out_sg->extension = 1;
1144 	out_sg->length = data_len;
1145 	qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1146 	cpu_to_hw_sg(out_sg);
1147 
1148 	/* 1st seg */
1149 	sg = &cf->sg[2];
1150 	qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1151 	sg->length = mbuf->data_len - data_offset;
1152 	sg->offset = data_offset;
1153 
1154 	/* Successive segs */
1155 	mbuf = mbuf->next;
1156 	while (mbuf) {
1157 		cpu_to_hw_sg(sg);
1158 		sg++;
1159 		qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1160 		sg->length = mbuf->data_len;
1161 		mbuf = mbuf->next;
1162 	}
1163 	sg->final = 1;
1164 	cpu_to_hw_sg(sg);
1165 
1166 	/* input */
1167 	mbuf = sym->m_src;
1168 	in_sg = &cf->sg[1];
1169 	in_sg->extension = 1;
1170 	in_sg->final = 1;
1171 	in_sg->length = data_len + ses->iv.length;
1172 
1173 	sg++;
1174 	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1175 	cpu_to_hw_sg(in_sg);
1176 
1177 	/* IV */
1178 	qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1179 	sg->length = ses->iv.length;
1180 	cpu_to_hw_sg(sg);
1181 
1182 	/* 1st seg */
1183 	sg++;
1184 	qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1185 	sg->length = mbuf->data_len - data_offset;
1186 	sg->offset = data_offset;
1187 
1188 	/* Successive segs */
1189 	mbuf = mbuf->next;
1190 	while (mbuf) {
1191 		cpu_to_hw_sg(sg);
1192 		sg++;
1193 		qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1194 		sg->length = mbuf->data_len;
1195 		mbuf = mbuf->next;
1196 	}
1197 	sg->final = 1;
1198 	cpu_to_hw_sg(sg);
1199 
1200 	return cf;
1201 }
1202 
1203 static inline struct dpaa_sec_job *
1204 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1205 {
1206 	struct rte_crypto_sym_op *sym = op->sym;
1207 	struct dpaa_sec_job *cf;
1208 	struct dpaa_sec_op_ctx *ctx;
1209 	struct qm_sg_entry *sg;
1210 	rte_iova_t src_start_addr, dst_start_addr;
1211 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1212 			ses->iv.offset);
1213 	int data_len, data_offset;
1214 
1215 	data_len = sym->cipher.data.length;
1216 	data_offset = sym->cipher.data.offset;
1217 
1218 	if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1219 		ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1220 		if ((data_len & 7) || (data_offset & 7)) {
1221 			DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
1222 			return NULL;
1223 		}
1224 
1225 		data_len = data_len >> 3;
1226 		data_offset = data_offset >> 3;
1227 	}
1228 
1229 	ctx = dpaa_sec_alloc_ctx(ses, 4);
1230 	if (!ctx)
1231 		return NULL;
1232 
1233 	cf = &ctx->job;
1234 	ctx->op = op;
1235 
1236 	src_start_addr = rte_pktmbuf_iova(sym->m_src);
1237 
1238 	if (sym->m_dst)
1239 		dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1240 	else
1241 		dst_start_addr = src_start_addr;
1242 
1243 	/* output */
1244 	sg = &cf->sg[0];
1245 	qm_sg_entry_set64(sg, dst_start_addr + data_offset);
1246 	sg->length = data_len + ses->iv.length;
1247 	cpu_to_hw_sg(sg);
1248 
1249 	/* input */
1250 	sg = &cf->sg[1];
1251 
1252 	/* need to extend the input to a compound frame */
1253 	sg->extension = 1;
1254 	sg->final = 1;
1255 	sg->length = data_len + ses->iv.length;
1256 	qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1257 	cpu_to_hw_sg(sg);
1258 
1259 	sg = &cf->sg[2];
1260 	qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1261 	sg->length = ses->iv.length;
1262 	cpu_to_hw_sg(sg);
1263 
1264 	sg++;
1265 	qm_sg_entry_set64(sg, src_start_addr + data_offset);
1266 	sg->length = data_len;
1267 	sg->final = 1;
1268 	cpu_to_hw_sg(sg);
1269 
1270 	return cf;
1271 }
1272 
1273 static inline struct dpaa_sec_job *
1274 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1275 {
1276 	struct rte_crypto_sym_op *sym = op->sym;
1277 	struct dpaa_sec_job *cf;
1278 	struct dpaa_sec_op_ctx *ctx;
1279 	struct qm_sg_entry *sg, *out_sg, *in_sg;
1280 	struct rte_mbuf *mbuf;
1281 	uint8_t req_segs;
1282 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1283 			ses->iv.offset);
1284 
1285 	if (sym->m_dst) {
1286 		mbuf = sym->m_dst;
1287 		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1288 	} else {
1289 		mbuf = sym->m_src;
1290 		req_segs = mbuf->nb_segs * 2 + 4;
1291 	}
1292 
1293 	if (ses->auth_only_len)
1294 		req_segs++;
1295 
1296 	if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1297 		DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1298 				MAX_SG_ENTRIES);
1299 		return NULL;
1300 	}
1301 
1302 	ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1303 	if (!ctx)
1304 		return NULL;
1305 
1306 	cf = &ctx->job;
1307 	ctx->op = op;
1308 
1309 	rte_prefetch0(cf->sg);
1310 
1311 	/* output */
1312 	out_sg = &cf->sg[0];
1313 	out_sg->extension = 1;
1314 	if (is_encode(ses))
1315 		out_sg->length = sym->aead.data.length + ses->digest_length;
1316 	else
1317 		out_sg->length = sym->aead.data.length;
1318 
1319 	/* output sg entries */
1320 	sg = &cf->sg[2];
1321 	qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1322 	cpu_to_hw_sg(out_sg);
1323 
1324 	/* 1st seg */
1325 	qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1326 	sg->length = mbuf->data_len - sym->aead.data.offset;
1327 	sg->offset = sym->aead.data.offset;
1328 
1329 	/* Successive segs */
1330 	mbuf = mbuf->next;
1331 	while (mbuf) {
1332 		cpu_to_hw_sg(sg);
1333 		sg++;
1334 		qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1335 		sg->length = mbuf->data_len;
1336 		mbuf = mbuf->next;
1337 	}
1338 	sg->length -= ses->digest_length;
1339 
1340 	if (is_encode(ses)) {
1341 		cpu_to_hw_sg(sg);
1342 		/* set auth output */
1343 		sg++;
1344 		qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1345 		sg->length = ses->digest_length;
1346 	}
1347 	sg->final = 1;
1348 	cpu_to_hw_sg(sg);
1349 
1350 	/* input */
1351 	mbuf = sym->m_src;
1352 	in_sg = &cf->sg[1];
1353 	in_sg->extension = 1;
1354 	in_sg->final = 1;
1355 	if (is_encode(ses))
1356 		in_sg->length = ses->iv.length + sym->aead.data.length
1357 							+ ses->auth_only_len;
1358 	else
1359 		in_sg->length = ses->iv.length + sym->aead.data.length
1360 				+ ses->auth_only_len + ses->digest_length;
1361 
1362 	/* input sg entries */
1363 	sg++;
1364 	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1365 	cpu_to_hw_sg(in_sg);
1366 
1367 	/* 1st seg IV */
1368 	qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1369 	sg->length = ses->iv.length;
1370 	cpu_to_hw_sg(sg);
1371 
1372 	/* 2nd seg auth only */
1373 	if (ses->auth_only_len) {
1374 		sg++;
1375 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(sym->aead.aad.data));
1376 		sg->length = ses->auth_only_len;
1377 		cpu_to_hw_sg(sg);
1378 	}
1379 
1380 	/* 3rd seg */
1381 	sg++;
1382 	qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1383 	sg->length = mbuf->data_len - sym->aead.data.offset;
1384 	sg->offset = sym->aead.data.offset;
1385 
1386 	/* Successive segs */
1387 	mbuf = mbuf->next;
1388 	while (mbuf) {
1389 		cpu_to_hw_sg(sg);
1390 		sg++;
1391 		qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1392 		sg->length = mbuf->data_len;
1393 		mbuf = mbuf->next;
1394 	}
1395 
1396 	if (is_decode(ses)) {
1397 		cpu_to_hw_sg(sg);
1398 		sg++;
1399 		memcpy(ctx->digest, sym->aead.digest.data,
1400 			ses->digest_length);
1401 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1402 		sg->length = ses->digest_length;
1403 	}
1404 	sg->final = 1;
1405 	cpu_to_hw_sg(sg);
1406 
1407 	return cf;
1408 }
1409 
1410 static inline struct dpaa_sec_job *
1411 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1412 {
1413 	struct rte_crypto_sym_op *sym = op->sym;
1414 	struct dpaa_sec_job *cf;
1415 	struct dpaa_sec_op_ctx *ctx;
1416 	struct qm_sg_entry *sg;
1417 	uint32_t length = 0;
1418 	rte_iova_t src_start_addr, dst_start_addr;
1419 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1420 			ses->iv.offset);
1421 
1422 	src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1423 
1424 	if (sym->m_dst)
1425 		dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1426 	else
1427 		dst_start_addr = src_start_addr;
1428 
1429 	ctx = dpaa_sec_alloc_ctx(ses, 7);
1430 	if (!ctx)
1431 		return NULL;
1432 
1433 	cf = &ctx->job;
1434 	ctx->op = op;
1435 
1436 	/* input */
1437 	rte_prefetch0(cf->sg);
1438 	sg = &cf->sg[2];
1439 	qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1440 	if (is_encode(ses)) {
1441 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1442 		sg->length = ses->iv.length;
1443 		length += sg->length;
1444 		cpu_to_hw_sg(sg);
1445 
1446 		sg++;
1447 		if (ses->auth_only_len) {
1448 			qm_sg_entry_set64(sg,
1449 					  rte_dpaa_mem_vtop(sym->aead.aad.data));
1450 			sg->length = ses->auth_only_len;
1451 			length += sg->length;
1452 			cpu_to_hw_sg(sg);
1453 			sg++;
1454 		}
1455 		qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1456 		sg->length = sym->aead.data.length;
1457 		length += sg->length;
1458 		sg->final = 1;
1459 		cpu_to_hw_sg(sg);
1460 	} else {
1461 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1462 		sg->length = ses->iv.length;
1463 		length += sg->length;
1464 		cpu_to_hw_sg(sg);
1465 
1466 		sg++;
1467 		if (ses->auth_only_len) {
1468 			qm_sg_entry_set64(sg,
1469 					  rte_dpaa_mem_vtop(sym->aead.aad.data));
1470 			sg->length = ses->auth_only_len;
1471 			length += sg->length;
1472 			cpu_to_hw_sg(sg);
1473 			sg++;
1474 		}
1475 		qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1476 		sg->length = sym->aead.data.length;
1477 		length += sg->length;
1478 		cpu_to_hw_sg(sg);
1479 
1480 		memcpy(ctx->digest, sym->aead.digest.data,
1481 		       ses->digest_length);
1482 		sg++;
1483 
1484 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1485 		sg->length = ses->digest_length;
1486 		length += sg->length;
1487 		sg->final = 1;
1488 		cpu_to_hw_sg(sg);
1489 	}
1490 	/* input compound frame */
1491 	cf->sg[1].length = length;
1492 	cf->sg[1].extension = 1;
1493 	cf->sg[1].final = 1;
1494 	cpu_to_hw_sg(&cf->sg[1]);
1495 
1496 	/* output */
1497 	sg++;
1498 	qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1499 	qm_sg_entry_set64(sg,
1500 		dst_start_addr + sym->aead.data.offset);
1501 	sg->length = sym->aead.data.length;
1502 	length = sg->length;
1503 	if (is_encode(ses)) {
1504 		cpu_to_hw_sg(sg);
1505 		/* set auth output */
1506 		sg++;
1507 		qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1508 		sg->length = ses->digest_length;
1509 		length += sg->length;
1510 	}
1511 	sg->final = 1;
1512 	cpu_to_hw_sg(sg);
1513 
1514 	/* output compound frame */
1515 	cf->sg[0].length = length;
1516 	cf->sg[0].extension = 1;
1517 	cpu_to_hw_sg(&cf->sg[0]);
1518 
1519 	return cf;
1520 }
1521 
1522 static inline struct dpaa_sec_job *
1523 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1524 {
1525 	struct rte_crypto_sym_op *sym = op->sym;
1526 	struct dpaa_sec_job *cf;
1527 	struct dpaa_sec_op_ctx *ctx;
1528 	struct qm_sg_entry *sg, *out_sg, *in_sg;
1529 	struct rte_mbuf *mbuf;
1530 	uint8_t req_segs;
1531 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1532 			ses->iv.offset);
1533 
1534 	if (sym->m_dst) {
1535 		mbuf = sym->m_dst;
1536 		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1537 	} else {
1538 		mbuf = sym->m_src;
1539 		req_segs = mbuf->nb_segs * 2 + 4;
1540 	}
1541 
1542 	if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1543 		DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1544 				MAX_SG_ENTRIES);
1545 		return NULL;
1546 	}
1547 
1548 	ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1549 	if (!ctx)
1550 		return NULL;
1551 
1552 	cf = &ctx->job;
1553 	ctx->op = op;
1554 
1555 	rte_prefetch0(cf->sg);
1556 
1557 	/* output */
1558 	out_sg = &cf->sg[0];
1559 	out_sg->extension = 1;
1560 	if (is_encode(ses))
1561 		out_sg->length = sym->auth.data.length + ses->digest_length;
1562 	else
1563 		out_sg->length = sym->auth.data.length;
1564 
1565 	/* output sg entries */
1566 	sg = &cf->sg[2];
1567 	qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1568 	cpu_to_hw_sg(out_sg);
1569 
1570 	/* 1st seg */
1571 	qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1572 	sg->length = mbuf->data_len - sym->auth.data.offset;
1573 	sg->offset = sym->auth.data.offset;
1574 
1575 	/* Successive segs */
1576 	mbuf = mbuf->next;
1577 	while (mbuf) {
1578 		cpu_to_hw_sg(sg);
1579 		sg++;
1580 		qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1581 		sg->length = mbuf->data_len;
1582 		mbuf = mbuf->next;
1583 	}
1584 	sg->length -= ses->digest_length;
1585 
1586 	if (is_encode(ses)) {
1587 		cpu_to_hw_sg(sg);
1588 		/* set auth output */
1589 		sg++;
1590 		qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1591 		sg->length = ses->digest_length;
1592 	}
1593 	sg->final = 1;
1594 	cpu_to_hw_sg(sg);
1595 
1596 	/* input */
1597 	mbuf = sym->m_src;
1598 	in_sg = &cf->sg[1];
1599 	in_sg->extension = 1;
1600 	in_sg->final = 1;
1601 	if (is_encode(ses))
1602 		in_sg->length = ses->iv.length + sym->auth.data.length;
1603 	else
1604 		in_sg->length = ses->iv.length + sym->auth.data.length
1605 						+ ses->digest_length;
1606 
1607 	/* input sg entries */
1608 	sg++;
1609 	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1610 	cpu_to_hw_sg(in_sg);
1611 
1612 	/* 1st seg IV */
1613 	qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1614 	sg->length = ses->iv.length;
1615 	cpu_to_hw_sg(sg);
1616 
1617 	/* 2nd seg */
1618 	sg++;
1619 	qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1620 	sg->length = mbuf->data_len - sym->auth.data.offset;
1621 	sg->offset = sym->auth.data.offset;
1622 
1623 	/* Successive segs */
1624 	mbuf = mbuf->next;
1625 	while (mbuf) {
1626 		cpu_to_hw_sg(sg);
1627 		sg++;
1628 		qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1629 		sg->length = mbuf->data_len;
1630 		mbuf = mbuf->next;
1631 	}
1632 
1633 	sg->length -= ses->digest_length;
1634 	if (is_decode(ses)) {
1635 		cpu_to_hw_sg(sg);
1636 		sg++;
1637 		memcpy(ctx->digest, sym->auth.digest.data,
1638 			ses->digest_length);
1639 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1640 		sg->length = ses->digest_length;
1641 	}
1642 	sg->final = 1;
1643 	cpu_to_hw_sg(sg);
1644 
1645 	return cf;
1646 }
1647 
1648 static inline struct dpaa_sec_job *
1649 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1650 {
1651 	struct rte_crypto_sym_op *sym = op->sym;
1652 	struct dpaa_sec_job *cf;
1653 	struct dpaa_sec_op_ctx *ctx;
1654 	struct qm_sg_entry *sg;
1655 	rte_iova_t src_start_addr, dst_start_addr;
1656 	uint32_t length = 0;
1657 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1658 			ses->iv.offset);
1659 
1660 	src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1661 	if (sym->m_dst)
1662 		dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1663 	else
1664 		dst_start_addr = src_start_addr;
1665 
1666 	ctx = dpaa_sec_alloc_ctx(ses, 7);
1667 	if (!ctx)
1668 		return NULL;
1669 
1670 	cf = &ctx->job;
1671 	ctx->op = op;
1672 
1673 	/* input */
1674 	rte_prefetch0(cf->sg);
1675 	sg = &cf->sg[2];
1676 	qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1677 	if (is_encode(ses)) {
1678 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1679 		sg->length = ses->iv.length;
1680 		length += sg->length;
1681 		cpu_to_hw_sg(sg);
1682 
1683 		sg++;
1684 		qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1685 		sg->length = sym->auth.data.length;
1686 		length += sg->length;
1687 		sg->final = 1;
1688 		cpu_to_hw_sg(sg);
1689 	} else {
1690 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1691 		sg->length = ses->iv.length;
1692 		length += sg->length;
1693 		cpu_to_hw_sg(sg);
1694 
1695 		sg++;
1696 
1697 		qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1698 		sg->length = sym->auth.data.length;
1699 		length += sg->length;
1700 		cpu_to_hw_sg(sg);
1701 
1702 		memcpy(ctx->digest, sym->auth.digest.data,
1703 		       ses->digest_length);
1704 		sg++;
1705 
1706 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1707 		sg->length = ses->digest_length;
1708 		length += sg->length;
1709 		sg->final = 1;
1710 		cpu_to_hw_sg(sg);
1711 	}
1712 	/* input compound frame */
1713 	cf->sg[1].length = length;
1714 	cf->sg[1].extension = 1;
1715 	cf->sg[1].final = 1;
1716 	cpu_to_hw_sg(&cf->sg[1]);
1717 
1718 	/* output */
1719 	sg++;
1720 	qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1721 	qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1722 	sg->length = sym->cipher.data.length;
1723 	length = sg->length;
1724 	if (is_encode(ses)) {
1725 		cpu_to_hw_sg(sg);
1726 		/* set auth output */
1727 		sg++;
1728 		qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1729 		sg->length = ses->digest_length;
1730 		length += sg->length;
1731 	}
1732 	sg->final = 1;
1733 	cpu_to_hw_sg(sg);
1734 
1735 	/* output compound frame */
1736 	cf->sg[0].length = length;
1737 	cf->sg[0].extension = 1;
1738 	cpu_to_hw_sg(&cf->sg[0]);
1739 
1740 	return cf;
1741 }
1742 
1743 static inline struct dpaa_sec_job *
1744 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1745 {
1746 	struct rte_crypto_sym_op *sym = op->sym;
1747 	struct dpaa_sec_job *cf;
1748 	struct dpaa_sec_op_ctx *ctx;
1749 	struct qm_sg_entry *sg;
1750 	phys_addr_t src_start_addr, dst_start_addr;
1751 
1752 	ctx = dpaa_sec_alloc_ctx(ses, 2);
1753 	if (!ctx)
1754 		return NULL;
1755 	cf = &ctx->job;
1756 	ctx->op = op;
1757 
1758 	src_start_addr = rte_pktmbuf_iova(sym->m_src);
1759 
1760 	if (sym->m_dst)
1761 		dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1762 	else
1763 		dst_start_addr = src_start_addr;
1764 
1765 	/* input */
1766 	sg = &cf->sg[1];
1767 	qm_sg_entry_set64(sg, src_start_addr);
1768 	sg->length = sym->m_src->pkt_len;
1769 	sg->final = 1;
1770 	cpu_to_hw_sg(sg);
1771 
1772 	sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1773 	/* output */
1774 	sg = &cf->sg[0];
1775 	qm_sg_entry_set64(sg, dst_start_addr);
1776 	sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1777 	cpu_to_hw_sg(sg);
1778 
1779 	return cf;
1780 }
1781 
1782 static inline struct dpaa_sec_job *
1783 build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1784 {
1785 	struct rte_crypto_sym_op *sym = op->sym;
1786 	struct dpaa_sec_job *cf;
1787 	struct dpaa_sec_op_ctx *ctx;
1788 	struct qm_sg_entry *sg, *out_sg, *in_sg;
1789 	struct rte_mbuf *mbuf;
1790 	uint8_t req_segs;
1791 	uint32_t in_len = 0, out_len = 0;
1792 
1793 	if (sym->m_dst)
1794 		mbuf = sym->m_dst;
1795 	else
1796 		mbuf = sym->m_src;
1797 
1798 	req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
1799 	if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1800 		DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
1801 				MAX_SG_ENTRIES);
1802 		return NULL;
1803 	}
1804 
1805 	ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1806 	if (!ctx)
1807 		return NULL;
1808 	cf = &ctx->job;
1809 	ctx->op = op;
1810 	/* output */
1811 	out_sg = &cf->sg[0];
1812 	out_sg->extension = 1;
1813 	qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1814 
1815 	/* 1st seg */
1816 	sg = &cf->sg[2];
1817 	qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1818 	sg->offset = 0;
1819 
1820 	/* Successive segs */
1821 	while (mbuf->next) {
1822 		sg->length = mbuf->data_len;
1823 		out_len += sg->length;
1824 		mbuf = mbuf->next;
1825 		cpu_to_hw_sg(sg);
1826 		sg++;
1827 		qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1828 		sg->offset = 0;
1829 	}
1830 	sg->length = mbuf->buf_len - mbuf->data_off;
1831 	out_len += sg->length;
1832 	sg->final = 1;
1833 	cpu_to_hw_sg(sg);
1834 
1835 	out_sg->length = out_len;
1836 	cpu_to_hw_sg(out_sg);
1837 
1838 	/* input */
1839 	mbuf = sym->m_src;
1840 	in_sg = &cf->sg[1];
1841 	in_sg->extension = 1;
1842 	in_sg->final = 1;
1843 	in_len = mbuf->data_len;
1844 
1845 	sg++;
1846 	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1847 
1848 	/* 1st seg */
1849 	qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1850 	sg->length = mbuf->data_len;
1851 	sg->offset = 0;
1852 
1853 	/* Successive segs */
1854 	mbuf = mbuf->next;
1855 	while (mbuf) {
1856 		cpu_to_hw_sg(sg);
1857 		sg++;
1858 		qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1859 		sg->length = mbuf->data_len;
1860 		sg->offset = 0;
1861 		in_len += sg->length;
1862 		mbuf = mbuf->next;
1863 	}
1864 	sg->final = 1;
1865 	cpu_to_hw_sg(sg);
1866 
1867 	in_sg->length = in_len;
1868 	cpu_to_hw_sg(in_sg);
1869 
1870 	sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1871 
1872 	return cf;
1873 }
1874 
1875 static uint16_t
1876 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1877 		       uint16_t nb_ops)
1878 {
1879 	/* Function to transmit the frames to given device and queuepair */
1880 	uint32_t loop;
1881 	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1882 	uint16_t num_tx = 0;
1883 	struct qm_fd fds[DPAA_SEC_BURST], *fd;
1884 	uint32_t frames_to_send;
1885 	struct rte_crypto_op *op;
1886 	struct dpaa_sec_job *cf;
1887 	dpaa_sec_session *ses;
1888 	uint16_t auth_hdr_len, auth_tail_len;
1889 	uint32_t index, flags[DPAA_SEC_BURST] = {0};
1890 	struct qman_fq *inq[DPAA_SEC_BURST];
1891 
1892 	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
1893 		if (rte_dpaa_portal_init((void *)0)) {
1894 			DPAA_SEC_ERR("Failure in affining portal");
1895 			return 0;
1896 		}
1897 	}
1898 
1899 	while (nb_ops) {
1900 		frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1901 				DPAA_SEC_BURST : nb_ops;
1902 		for (loop = 0; loop < frames_to_send; loop++) {
1903 			op = *(ops++);
1904 			if (*dpaa_seqn(op->sym->m_src) != 0) {
1905 				index = *dpaa_seqn(op->sym->m_src) - 1;
1906 				if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
1907 					/* QM_EQCR_DCA_IDXMASK = 0x0f */
1908 					flags[loop] = ((index & 0x0f) << 8);
1909 					flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
1910 					DPAA_PER_LCORE_DQRR_SIZE--;
1911 					DPAA_PER_LCORE_DQRR_HELD &=
1912 								~(1 << index);
1913 				}
1914 			}
1915 
1916 			switch (op->sess_type) {
1917 			case RTE_CRYPTO_OP_WITH_SESSION:
1918 				ses = CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session);
1919 				break;
1920 			case RTE_CRYPTO_OP_SECURITY_SESSION:
1921 				ses = SECURITY_GET_SESS_PRIV(op->sym->session);
1922 				break;
1923 			default:
1924 				DPAA_SEC_DP_ERR(
1925 					"sessionless crypto op not supported");
1926 				frames_to_send = loop;
1927 				nb_ops = loop;
1928 				goto send_pkts;
1929 			}
1930 
1931 			if (!ses) {
1932 				DPAA_SEC_DP_ERR("session not available");
1933 				frames_to_send = loop;
1934 				nb_ops = loop;
1935 				goto send_pkts;
1936 			}
1937 
1938 			if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
1939 				if (dpaa_sec_attach_sess_q(qp, ses)) {
1940 					frames_to_send = loop;
1941 					nb_ops = loop;
1942 					goto send_pkts;
1943 				}
1944 			} else if (unlikely(ses->qp[rte_lcore_id() %
1945 						MAX_DPAA_CORES] != qp)) {
1946 				DPAA_SEC_DP_ERR("Old:sess->qp = %p"
1947 					" New qp = %p\n",
1948 					ses->qp[rte_lcore_id() %
1949 					MAX_DPAA_CORES], qp);
1950 				frames_to_send = loop;
1951 				nb_ops = loop;
1952 				goto send_pkts;
1953 			}
1954 
1955 			auth_hdr_len = op->sym->auth.data.length -
1956 						op->sym->cipher.data.length;
1957 			auth_tail_len = 0;
1958 
1959 			if (rte_pktmbuf_is_contiguous(op->sym->m_src) &&
1960 				  ((op->sym->m_dst == NULL) ||
1961 				   rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1962 				switch (ses->ctxt) {
1963 				case DPAA_SEC_PDCP:
1964 				case DPAA_SEC_IPSEC:
1965 					cf = build_proto(op, ses);
1966 					break;
1967 				case DPAA_SEC_AUTH:
1968 					cf = build_auth_only(op, ses);
1969 					break;
1970 				case DPAA_SEC_CIPHER:
1971 					cf = build_cipher_only(op, ses);
1972 					break;
1973 				case DPAA_SEC_AEAD:
1974 					cf = build_cipher_auth_gcm(op, ses);
1975 					auth_hdr_len = ses->auth_only_len;
1976 					break;
1977 				case DPAA_SEC_CIPHER_HASH:
1978 					auth_hdr_len =
1979 						op->sym->cipher.data.offset
1980 						- op->sym->auth.data.offset;
1981 					auth_tail_len =
1982 						op->sym->auth.data.length
1983 						- op->sym->cipher.data.length
1984 						- auth_hdr_len;
1985 					cf = build_cipher_auth(op, ses);
1986 					break;
1987 				default:
1988 					DPAA_SEC_DP_ERR("not supported ops");
1989 					frames_to_send = loop;
1990 					nb_ops = loop;
1991 					goto send_pkts;
1992 				}
1993 			} else {
1994 				switch (ses->ctxt) {
1995 				case DPAA_SEC_PDCP:
1996 				case DPAA_SEC_IPSEC:
1997 					cf = build_proto_sg(op, ses);
1998 					break;
1999 				case DPAA_SEC_AUTH:
2000 					cf = build_auth_only_sg(op, ses);
2001 					break;
2002 				case DPAA_SEC_CIPHER:
2003 					cf = build_cipher_only_sg(op, ses);
2004 					break;
2005 				case DPAA_SEC_AEAD:
2006 					cf = build_cipher_auth_gcm_sg(op, ses);
2007 					auth_hdr_len = ses->auth_only_len;
2008 					break;
2009 				case DPAA_SEC_CIPHER_HASH:
2010 					auth_hdr_len =
2011 						op->sym->cipher.data.offset
2012 						- op->sym->auth.data.offset;
2013 					auth_tail_len =
2014 						op->sym->auth.data.length
2015 						- op->sym->cipher.data.length
2016 						- auth_hdr_len;
2017 					cf = build_cipher_auth_sg(op, ses);
2018 					break;
2019 				default:
2020 					DPAA_SEC_DP_ERR("not supported ops");
2021 					frames_to_send = loop;
2022 					nb_ops = loop;
2023 					goto send_pkts;
2024 				}
2025 			}
2026 			if (unlikely(!cf)) {
2027 				frames_to_send = loop;
2028 				nb_ops = loop;
2029 				goto send_pkts;
2030 			}
2031 
2032 			fd = &fds[loop];
2033 			inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
2034 			fd->opaque_addr = 0;
2035 			fd->cmd = 0;
2036 			qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg));
2037 			fd->_format1 = qm_fd_compound;
2038 			fd->length29 = 2 * sizeof(struct qm_sg_entry);
2039 
2040 			/* Auth_only_len is set as 0 in descriptor and it is
2041 			 * overwritten here in the fd.cmd which will update
2042 			 * the DPOVRD reg.
2043 			 */
2044 			if (auth_hdr_len || auth_tail_len) {
2045 				fd->cmd = 0x80000000;
2046 				fd->cmd |=
2047 					((auth_tail_len << 16) | auth_hdr_len);
2048 			}
2049 
2050 			/* In case of PDCP, per packet HFN is stored in
2051 			 * mbuf priv after sym_op.
2052 			 */
2053 			if ((ses->ctxt == DPAA_SEC_PDCP) && ses->pdcp.hfn_ovd) {
2054 				fd->cmd = 0x80000000 |
2055 					*((uint32_t *)((uint8_t *)op +
2056 					ses->pdcp.hfn_ovd_offset));
2057 				DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n",
2058 					*((uint32_t *)((uint8_t *)op +
2059 					ses->pdcp.hfn_ovd_offset)),
2060 					ses->pdcp.hfn_ovd);
2061 			}
2062 		}
2063 send_pkts:
2064 		loop = 0;
2065 		while (loop < frames_to_send) {
2066 			loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
2067 					&flags[loop], frames_to_send - loop);
2068 		}
2069 		nb_ops -= frames_to_send;
2070 		num_tx += frames_to_send;
2071 	}
2072 
2073 	dpaa_qp->tx_pkts += num_tx;
2074 	dpaa_qp->tx_errs += nb_ops - num_tx;
2075 
2076 	return num_tx;
2077 }
2078 
2079 static uint16_t
2080 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
2081 		       uint16_t nb_ops)
2082 {
2083 	uint16_t num_rx;
2084 	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
2085 
2086 	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
2087 		if (rte_dpaa_portal_init((void *)0)) {
2088 			DPAA_SEC_ERR("Failure in affining portal");
2089 			return 0;
2090 		}
2091 	}
2092 
2093 	num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
2094 
2095 	dpaa_qp->rx_pkts += num_rx;
2096 	dpaa_qp->rx_errs += nb_ops - num_rx;
2097 
2098 	DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
2099 
2100 	return num_rx;
2101 }
2102 
2103 /** Release queue pair */
2104 static int
2105 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
2106 			    uint16_t qp_id)
2107 {
2108 	struct dpaa_sec_dev_private *internals;
2109 	struct dpaa_sec_qp *qp = NULL;
2110 
2111 	PMD_INIT_FUNC_TRACE();
2112 
2113 	DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
2114 
2115 	internals = dev->data->dev_private;
2116 	if (qp_id >= internals->max_nb_queue_pairs) {
2117 		DPAA_SEC_ERR("Max supported qpid %d",
2118 			     internals->max_nb_queue_pairs);
2119 		return -EINVAL;
2120 	}
2121 
2122 	qp = &internals->qps[qp_id];
2123 	rte_mempool_free(qp->ctx_pool);
2124 	qp->internals = NULL;
2125 	dev->data->queue_pairs[qp_id] = NULL;
2126 
2127 	return 0;
2128 }
2129 
2130 /** Setup a queue pair */
2131 static int
2132 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
2133 		__rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
2134 		__rte_unused int socket_id)
2135 {
2136 	struct dpaa_sec_dev_private *internals;
2137 	struct dpaa_sec_qp *qp = NULL;
2138 	char str[20];
2139 
2140 	DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
2141 
2142 	internals = dev->data->dev_private;
2143 	if (qp_id >= internals->max_nb_queue_pairs) {
2144 		DPAA_SEC_ERR("Max supported qpid %d",
2145 			     internals->max_nb_queue_pairs);
2146 		return -EINVAL;
2147 	}
2148 
2149 	qp = &internals->qps[qp_id];
2150 	qp->internals = internals;
2151 	snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d",
2152 			dev->data->dev_id, qp_id);
2153 	if (!qp->ctx_pool) {
2154 		qp->ctx_pool = rte_mempool_create((const char *)str,
2155 							CTX_POOL_NUM_BUFS,
2156 							CTX_POOL_BUF_SIZE,
2157 							CTX_POOL_CACHE_SIZE, 0,
2158 							NULL, NULL, NULL, NULL,
2159 							SOCKET_ID_ANY, 0);
2160 		if (!qp->ctx_pool) {
2161 			DPAA_SEC_ERR("%s create failed\n", str);
2162 			return -ENOMEM;
2163 		}
2164 	} else
2165 		DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d",
2166 				dev->data->dev_id, qp_id);
2167 	dev->data->queue_pairs[qp_id] = qp;
2168 
2169 	return 0;
2170 }
2171 
2172 /** Returns the size of session structure */
2173 static unsigned int
2174 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
2175 {
2176 	PMD_INIT_FUNC_TRACE();
2177 
2178 	return sizeof(dpaa_sec_session);
2179 }
2180 
2181 static int
2182 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
2183 		     struct rte_crypto_sym_xform *xform,
2184 		     dpaa_sec_session *session)
2185 {
2186 	session->ctxt = DPAA_SEC_CIPHER;
2187 	session->cipher_alg = xform->cipher.algo;
2188 	session->iv.length = xform->cipher.iv.length;
2189 	session->iv.offset = xform->cipher.iv.offset;
2190 	session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
2191 					       RTE_CACHE_LINE_SIZE);
2192 	if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
2193 		DPAA_SEC_ERR("No Memory for cipher key");
2194 		return -ENOMEM;
2195 	}
2196 	session->cipher_key.length = xform->cipher.key.length;
2197 
2198 	memcpy(session->cipher_key.data, xform->cipher.key.data,
2199 	       xform->cipher.key.length);
2200 	switch (xform->cipher.algo) {
2201 	case RTE_CRYPTO_CIPHER_AES_CBC:
2202 		session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2203 		session->cipher_key.algmode = OP_ALG_AAI_CBC;
2204 		break;
2205 	case RTE_CRYPTO_CIPHER_DES_CBC:
2206 		session->cipher_key.alg = OP_ALG_ALGSEL_DES;
2207 		session->cipher_key.algmode = OP_ALG_AAI_CBC;
2208 		break;
2209 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2210 		session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2211 		session->cipher_key.algmode = OP_ALG_AAI_CBC;
2212 		break;
2213 	case RTE_CRYPTO_CIPHER_AES_CTR:
2214 		session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2215 		session->cipher_key.algmode = OP_ALG_AAI_CTR;
2216 		break;
2217 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2218 		session->cipher_key.alg = OP_ALG_ALGSEL_SNOW_F8;
2219 		break;
2220 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2221 		session->cipher_key.alg = OP_ALG_ALGSEL_ZUCE;
2222 		break;
2223 	default:
2224 		DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2225 			      xform->cipher.algo);
2226 		return -ENOTSUP;
2227 	}
2228 	session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2229 			DIR_ENC : DIR_DEC;
2230 
2231 	return 0;
2232 }
2233 
2234 static int
2235 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
2236 		   struct rte_crypto_sym_xform *xform,
2237 		   dpaa_sec_session *session)
2238 {
2239 	session->ctxt = DPAA_SEC_AUTH;
2240 	session->auth_alg = xform->auth.algo;
2241 	session->auth_key.length = xform->auth.key.length;
2242 	if (xform->auth.key.length) {
2243 		session->auth_key.data =
2244 				rte_zmalloc(NULL, xform->auth.key.length,
2245 					     RTE_CACHE_LINE_SIZE);
2246 		if (session->auth_key.data == NULL) {
2247 			DPAA_SEC_ERR("No Memory for auth key");
2248 			return -ENOMEM;
2249 		}
2250 		memcpy(session->auth_key.data, xform->auth.key.data,
2251 				xform->auth.key.length);
2252 
2253 	}
2254 	session->digest_length = xform->auth.digest_length;
2255 	if (session->cipher_alg == RTE_CRYPTO_CIPHER_NULL) {
2256 		session->iv.offset = xform->auth.iv.offset;
2257 		session->iv.length = xform->auth.iv.length;
2258 	}
2259 
2260 	switch (xform->auth.algo) {
2261 	case RTE_CRYPTO_AUTH_SHA1:
2262 		session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2263 		session->auth_key.algmode = OP_ALG_AAI_HASH;
2264 		break;
2265 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
2266 		session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2267 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2268 		break;
2269 	case RTE_CRYPTO_AUTH_MD5:
2270 		session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2271 		session->auth_key.algmode = OP_ALG_AAI_HASH;
2272 		break;
2273 	case RTE_CRYPTO_AUTH_MD5_HMAC:
2274 		session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2275 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2276 		break;
2277 	case RTE_CRYPTO_AUTH_SHA224:
2278 		session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2279 		session->auth_key.algmode = OP_ALG_AAI_HASH;
2280 		break;
2281 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2282 		session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2283 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2284 		break;
2285 	case RTE_CRYPTO_AUTH_SHA256:
2286 		session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2287 		session->auth_key.algmode = OP_ALG_AAI_HASH;
2288 		break;
2289 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2290 		session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2291 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2292 		break;
2293 	case RTE_CRYPTO_AUTH_SHA384:
2294 		session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2295 		session->auth_key.algmode = OP_ALG_AAI_HASH;
2296 		break;
2297 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2298 		session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2299 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2300 		break;
2301 	case RTE_CRYPTO_AUTH_SHA512:
2302 		session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2303 		session->auth_key.algmode = OP_ALG_AAI_HASH;
2304 		break;
2305 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2306 		session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2307 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2308 		break;
2309 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2310 		session->auth_key.alg = OP_ALG_ALGSEL_SNOW_F9;
2311 		session->auth_key.algmode = OP_ALG_AAI_F9;
2312 		break;
2313 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
2314 		session->auth_key.alg = OP_ALG_ALGSEL_ZUCA;
2315 		session->auth_key.algmode = OP_ALG_AAI_F9;
2316 		break;
2317 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2318 		session->auth_key.alg = OP_ALG_ALGSEL_AES;
2319 		session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC;
2320 		break;
2321 	case RTE_CRYPTO_AUTH_AES_CMAC:
2322 		session->auth_key.alg = OP_ALG_ALGSEL_AES;
2323 		session->auth_key.algmode = OP_ALG_AAI_CMAC;
2324 		break;
2325 	default:
2326 		DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2327 			      xform->auth.algo);
2328 		return -ENOTSUP;
2329 	}
2330 
2331 	session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2332 			DIR_ENC : DIR_DEC;
2333 
2334 	return 0;
2335 }
2336 
2337 static int
2338 dpaa_sec_chain_init(struct rte_cryptodev *dev __rte_unused,
2339 		   struct rte_crypto_sym_xform *xform,
2340 		   dpaa_sec_session *session)
2341 {
2342 
2343 	struct rte_crypto_cipher_xform *cipher_xform;
2344 	struct rte_crypto_auth_xform *auth_xform;
2345 
2346 	session->ctxt = DPAA_SEC_CIPHER_HASH;
2347 	if (session->auth_cipher_text) {
2348 		cipher_xform = &xform->cipher;
2349 		auth_xform = &xform->next->auth;
2350 	} else {
2351 		cipher_xform = &xform->next->cipher;
2352 		auth_xform = &xform->auth;
2353 	}
2354 
2355 	/* Set IV parameters */
2356 	session->iv.offset = cipher_xform->iv.offset;
2357 	session->iv.length = cipher_xform->iv.length;
2358 
2359 	session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2360 					       RTE_CACHE_LINE_SIZE);
2361 	if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2362 		DPAA_SEC_ERR("No Memory for cipher key");
2363 		return -ENOMEM;
2364 	}
2365 	session->cipher_key.length = cipher_xform->key.length;
2366 	session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2367 					     RTE_CACHE_LINE_SIZE);
2368 	if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2369 		DPAA_SEC_ERR("No Memory for auth key");
2370 		return -ENOMEM;
2371 	}
2372 	session->auth_key.length = auth_xform->key.length;
2373 	memcpy(session->cipher_key.data, cipher_xform->key.data,
2374 	       cipher_xform->key.length);
2375 	memcpy(session->auth_key.data, auth_xform->key.data,
2376 	       auth_xform->key.length);
2377 
2378 	session->digest_length = auth_xform->digest_length;
2379 	session->auth_alg = auth_xform->algo;
2380 
2381 	switch (auth_xform->algo) {
2382 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
2383 		session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2384 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2385 		break;
2386 	case RTE_CRYPTO_AUTH_MD5_HMAC:
2387 		session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2388 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2389 		break;
2390 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2391 		session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2392 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2393 		break;
2394 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2395 		session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2396 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2397 		break;
2398 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2399 		session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2400 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2401 		break;
2402 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2403 		session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2404 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2405 		break;
2406 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2407 		session->auth_key.alg = OP_ALG_ALGSEL_AES;
2408 		session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC;
2409 		break;
2410 	case RTE_CRYPTO_AUTH_AES_CMAC:
2411 		session->auth_key.alg = OP_ALG_ALGSEL_AES;
2412 		session->auth_key.algmode = OP_ALG_AAI_CMAC;
2413 		break;
2414 	default:
2415 		DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2416 			      auth_xform->algo);
2417 		return -ENOTSUP;
2418 	}
2419 
2420 	session->cipher_alg = cipher_xform->algo;
2421 
2422 	switch (cipher_xform->algo) {
2423 	case RTE_CRYPTO_CIPHER_AES_CBC:
2424 		session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2425 		session->cipher_key.algmode = OP_ALG_AAI_CBC;
2426 		break;
2427 	case RTE_CRYPTO_CIPHER_DES_CBC:
2428 		session->cipher_key.alg = OP_ALG_ALGSEL_DES;
2429 		session->cipher_key.algmode = OP_ALG_AAI_CBC;
2430 		break;
2431 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2432 		session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2433 		session->cipher_key.algmode = OP_ALG_AAI_CBC;
2434 		break;
2435 	case RTE_CRYPTO_CIPHER_AES_CTR:
2436 		session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2437 		session->cipher_key.algmode = OP_ALG_AAI_CTR;
2438 		break;
2439 	default:
2440 		DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2441 			      cipher_xform->algo);
2442 		return -ENOTSUP;
2443 	}
2444 	session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2445 				DIR_ENC : DIR_DEC;
2446 	return 0;
2447 }
2448 
2449 static int
2450 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
2451 		   struct rte_crypto_sym_xform *xform,
2452 		   dpaa_sec_session *session)
2453 {
2454 	session->aead_alg = xform->aead.algo;
2455 	session->ctxt = DPAA_SEC_AEAD;
2456 	session->iv.length = xform->aead.iv.length;
2457 	session->iv.offset = xform->aead.iv.offset;
2458 	session->auth_only_len = xform->aead.aad_length;
2459 	session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
2460 					     RTE_CACHE_LINE_SIZE);
2461 	if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
2462 		DPAA_SEC_ERR("No Memory for aead key\n");
2463 		return -ENOMEM;
2464 	}
2465 	session->aead_key.length = xform->aead.key.length;
2466 	session->digest_length = xform->aead.digest_length;
2467 
2468 	memcpy(session->aead_key.data, xform->aead.key.data,
2469 	       xform->aead.key.length);
2470 
2471 	switch (session->aead_alg) {
2472 	case RTE_CRYPTO_AEAD_AES_GCM:
2473 		session->aead_key.alg = OP_ALG_ALGSEL_AES;
2474 		session->aead_key.algmode = OP_ALG_AAI_GCM;
2475 		break;
2476 	default:
2477 		DPAA_SEC_ERR("unsupported AEAD alg %d", session->aead_alg);
2478 		return -ENOTSUP;
2479 	}
2480 
2481 	session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2482 			DIR_ENC : DIR_DEC;
2483 
2484 	return 0;
2485 }
2486 
2487 static struct qman_fq *
2488 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
2489 {
2490 	unsigned int i;
2491 
2492 	for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2493 		if (qi->inq_attach[i] == 0) {
2494 			qi->inq_attach[i] = 1;
2495 			return &qi->inq[i];
2496 		}
2497 	}
2498 	DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions);
2499 
2500 	return NULL;
2501 }
2502 
2503 static int
2504 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
2505 {
2506 	unsigned int i;
2507 
2508 	for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2509 		if (&qi->inq[i] == fq) {
2510 			if (qman_retire_fq(fq, NULL) != 0)
2511 				DPAA_SEC_DEBUG("Queue is not retired\n");
2512 			qman_oos_fq(fq);
2513 			qi->inq_attach[i] = 0;
2514 			return 0;
2515 		}
2516 	}
2517 	return -1;
2518 }
2519 
2520 int
2521 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
2522 {
2523 	int ret;
2524 
2525 	sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp;
2526 	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
2527 		ret = rte_dpaa_portal_init((void *)0);
2528 		if (ret) {
2529 			DPAA_SEC_ERR("Failure in affining portal");
2530 			return ret;
2531 		}
2532 	}
2533 	ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
2534 			       rte_dpaa_mem_vtop(&sess->cdb),
2535 			       qman_fq_fqid(&qp->outq));
2536 	if (ret)
2537 		DPAA_SEC_ERR("Unable to init sec queue");
2538 
2539 	return ret;
2540 }
2541 
2542 static inline void
2543 free_session_data(dpaa_sec_session *s)
2544 {
2545 	if (is_aead(s))
2546 		rte_free(s->aead_key.data);
2547 	else {
2548 		rte_free(s->auth_key.data);
2549 		rte_free(s->cipher_key.data);
2550 	}
2551 	memset(s, 0, sizeof(dpaa_sec_session));
2552 }
2553 
2554 static int
2555 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
2556 			    struct rte_crypto_sym_xform *xform,	void *sess)
2557 {
2558 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2559 	dpaa_sec_session *session = sess;
2560 	uint32_t i;
2561 	int ret;
2562 
2563 	PMD_INIT_FUNC_TRACE();
2564 
2565 	if (unlikely(sess == NULL)) {
2566 		DPAA_SEC_ERR("invalid session struct");
2567 		return -EINVAL;
2568 	}
2569 	memset(session, 0, sizeof(dpaa_sec_session));
2570 
2571 	/* Default IV length = 0 */
2572 	session->iv.length = 0;
2573 
2574 	/* Cipher Only */
2575 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2576 		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2577 		ret = dpaa_sec_cipher_init(dev, xform, session);
2578 
2579 	/* Authentication Only */
2580 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2581 		   xform->next == NULL) {
2582 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2583 		session->ctxt = DPAA_SEC_AUTH;
2584 		ret = dpaa_sec_auth_init(dev, xform, session);
2585 
2586 	/* Cipher then Authenticate */
2587 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2588 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2589 		if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2590 			session->auth_cipher_text = 1;
2591 			if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2592 				ret = dpaa_sec_auth_init(dev, xform, session);
2593 			else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
2594 				ret = dpaa_sec_cipher_init(dev, xform, session);
2595 			else
2596 				ret = dpaa_sec_chain_init(dev, xform, session);
2597 		} else {
2598 			DPAA_SEC_ERR("Not supported: Auth then Cipher");
2599 			return -ENOTSUP;
2600 		}
2601 	/* Authenticate then Cipher */
2602 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2603 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2604 		if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2605 			session->auth_cipher_text = 0;
2606 			if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
2607 				ret = dpaa_sec_cipher_init(dev, xform, session);
2608 			else if (xform->next->cipher.algo
2609 					== RTE_CRYPTO_CIPHER_NULL)
2610 				ret = dpaa_sec_auth_init(dev, xform, session);
2611 			else
2612 				ret = dpaa_sec_chain_init(dev, xform, session);
2613 		} else {
2614 			DPAA_SEC_ERR("Not supported: Auth then Cipher");
2615 			return -ENOTSUP;
2616 		}
2617 
2618 	/* AEAD operation for AES-GCM kind of Algorithms */
2619 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2620 		   xform->next == NULL) {
2621 		ret = dpaa_sec_aead_init(dev, xform, session);
2622 
2623 	} else {
2624 		DPAA_SEC_ERR("Invalid crypto type");
2625 		return -EINVAL;
2626 	}
2627 	if (ret) {
2628 		DPAA_SEC_ERR("unable to init session");
2629 		goto err1;
2630 	}
2631 
2632 	rte_spinlock_lock(&internals->lock);
2633 	for (i = 0; i < MAX_DPAA_CORES; i++) {
2634 		session->inq[i] = dpaa_sec_attach_rxq(internals);
2635 		if (session->inq[i] == NULL) {
2636 			DPAA_SEC_ERR("unable to attach sec queue");
2637 			rte_spinlock_unlock(&internals->lock);
2638 			ret = -EBUSY;
2639 			goto err1;
2640 		}
2641 	}
2642 	rte_spinlock_unlock(&internals->lock);
2643 
2644 	return 0;
2645 
2646 err1:
2647 	free_session_data(session);
2648 	return ret;
2649 }
2650 
2651 static int
2652 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
2653 		struct rte_crypto_sym_xform *xform,
2654 		struct rte_cryptodev_sym_session *sess)
2655 {
2656 	void *sess_private_data = CRYPTODEV_GET_SYM_SESS_PRIV(sess);
2657 	int ret;
2658 
2659 	PMD_INIT_FUNC_TRACE();
2660 
2661 	ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
2662 	if (ret != 0) {
2663 		DPAA_SEC_ERR("failed to configure session parameters");
2664 		return ret;
2665 	}
2666 
2667 	ret = dpaa_sec_prep_cdb(sess_private_data);
2668 	if (ret) {
2669 		DPAA_SEC_ERR("Unable to prepare sec cdb");
2670 		return ret;
2671 	}
2672 
2673 	return 0;
2674 }
2675 
2676 static inline void
2677 free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s)
2678 {
2679 	struct dpaa_sec_dev_private *qi = dev->data->dev_private;
2680 	uint8_t i;
2681 
2682 	for (i = 0; i < MAX_DPAA_CORES; i++) {
2683 		if (s->inq[i])
2684 			dpaa_sec_detach_rxq(qi, s->inq[i]);
2685 		s->inq[i] = NULL;
2686 		s->qp[i] = NULL;
2687 	}
2688 	free_session_data(s);
2689 }
2690 
2691 /** Clear the memory of session so it doesn't leave key material behind */
2692 static void
2693 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
2694 		struct rte_cryptodev_sym_session *sess)
2695 {
2696 	PMD_INIT_FUNC_TRACE();
2697 	void *sess_priv = CRYPTODEV_GET_SYM_SESS_PRIV(sess);
2698 	dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2699 
2700 	free_session_memory(dev, s);
2701 }
2702 
2703 static int
2704 dpaa_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2705 			struct rte_security_ipsec_xform *ipsec_xform,
2706 			dpaa_sec_session *session)
2707 {
2708 	PMD_INIT_FUNC_TRACE();
2709 
2710 	session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2711 					       RTE_CACHE_LINE_SIZE);
2712 	if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2713 		DPAA_SEC_ERR("No Memory for aead key");
2714 		return -ENOMEM;
2715 	}
2716 	memcpy(session->aead_key.data, aead_xform->key.data,
2717 	       aead_xform->key.length);
2718 
2719 	session->digest_length = aead_xform->digest_length;
2720 	session->aead_key.length = aead_xform->key.length;
2721 
2722 	switch (aead_xform->algo) {
2723 	case RTE_CRYPTO_AEAD_AES_GCM:
2724 		switch (session->digest_length) {
2725 		case 8:
2726 			session->aead_key.alg = OP_PCL_IPSEC_AES_GCM8;
2727 			break;
2728 		case 12:
2729 			session->aead_key.alg = OP_PCL_IPSEC_AES_GCM12;
2730 			break;
2731 		case 16:
2732 			session->aead_key.alg = OP_PCL_IPSEC_AES_GCM16;
2733 			break;
2734 		default:
2735 			DPAA_SEC_ERR("Crypto: Undefined GCM digest %d",
2736 				     session->digest_length);
2737 			return -EINVAL;
2738 		}
2739 		if (session->dir == DIR_ENC) {
2740 			memcpy(session->encap_pdb.gcm.salt,
2741 				(uint8_t *)&(ipsec_xform->salt), 4);
2742 		} else {
2743 			memcpy(session->decap_pdb.gcm.salt,
2744 				(uint8_t *)&(ipsec_xform->salt), 4);
2745 		}
2746 		session->aead_key.algmode = OP_ALG_AAI_GCM;
2747 		session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2748 		break;
2749 	default:
2750 		DPAA_SEC_ERR("Crypto: Undefined AEAD specified %u",
2751 			      aead_xform->algo);
2752 		return -ENOTSUP;
2753 	}
2754 	return 0;
2755 }
2756 
2757 static int
2758 dpaa_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2759 	struct rte_crypto_auth_xform *auth_xform,
2760 	struct rte_security_ipsec_xform *ipsec_xform,
2761 	dpaa_sec_session *session)
2762 {
2763 	if (cipher_xform) {
2764 		session->cipher_key.data = rte_zmalloc(NULL,
2765 						       cipher_xform->key.length,
2766 						       RTE_CACHE_LINE_SIZE);
2767 		if (session->cipher_key.data == NULL &&
2768 				cipher_xform->key.length > 0) {
2769 			DPAA_SEC_ERR("No Memory for cipher key");
2770 			return -ENOMEM;
2771 		}
2772 
2773 		session->cipher_key.length = cipher_xform->key.length;
2774 		memcpy(session->cipher_key.data, cipher_xform->key.data,
2775 				cipher_xform->key.length);
2776 		session->cipher_alg = cipher_xform->algo;
2777 	} else {
2778 		session->cipher_key.data = NULL;
2779 		session->cipher_key.length = 0;
2780 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2781 	}
2782 
2783 	if (auth_xform) {
2784 		session->auth_key.data = rte_zmalloc(NULL,
2785 						auth_xform->key.length,
2786 						RTE_CACHE_LINE_SIZE);
2787 		if (session->auth_key.data == NULL &&
2788 				auth_xform->key.length > 0) {
2789 			DPAA_SEC_ERR("No Memory for auth key");
2790 			return -ENOMEM;
2791 		}
2792 		session->auth_key.length = auth_xform->key.length;
2793 		memcpy(session->auth_key.data, auth_xform->key.data,
2794 				auth_xform->key.length);
2795 		session->auth_alg = auth_xform->algo;
2796 		session->digest_length = auth_xform->digest_length;
2797 	} else {
2798 		session->auth_key.data = NULL;
2799 		session->auth_key.length = 0;
2800 		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2801 	}
2802 
2803 	switch (session->auth_alg) {
2804 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
2805 		session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA1_96;
2806 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2807 		break;
2808 	case RTE_CRYPTO_AUTH_MD5_HMAC:
2809 		session->auth_key.alg = OP_PCL_IPSEC_HMAC_MD5_96;
2810 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2811 		break;
2812 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2813 		session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2814 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2815 		if (session->digest_length != 16)
2816 			DPAA_SEC_WARN(
2817 			"+++Using sha256-hmac truncated len is non-standard,"
2818 			"it will not work with lookaside proto");
2819 		break;
2820 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2821 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2822 		if (session->digest_length == 6)
2823 			session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_224_96;
2824 		else if (session->digest_length == 14)
2825 			session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_224_224;
2826 		else
2827 			session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_224_112;
2828 		break;
2829 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2830 		session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2831 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2832 		break;
2833 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2834 		session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2835 		session->auth_key.algmode = OP_ALG_AAI_HMAC;
2836 		break;
2837 	case RTE_CRYPTO_AUTH_AES_CMAC:
2838 		session->auth_key.alg = OP_PCL_IPSEC_AES_CMAC_96;
2839 		session->auth_key.algmode = OP_ALG_AAI_CMAC;
2840 		break;
2841 	case RTE_CRYPTO_AUTH_NULL:
2842 		session->auth_key.alg = OP_PCL_IPSEC_HMAC_NULL;
2843 		break;
2844 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2845 		session->auth_key.alg = OP_PCL_IPSEC_AES_XCBC_MAC_96;
2846 		session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC;
2847 		break;
2848 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2849 	case RTE_CRYPTO_AUTH_SHA1:
2850 	case RTE_CRYPTO_AUTH_SHA256:
2851 	case RTE_CRYPTO_AUTH_SHA512:
2852 	case RTE_CRYPTO_AUTH_SHA224:
2853 	case RTE_CRYPTO_AUTH_SHA384:
2854 	case RTE_CRYPTO_AUTH_MD5:
2855 	case RTE_CRYPTO_AUTH_AES_GMAC:
2856 	case RTE_CRYPTO_AUTH_KASUMI_F9:
2857 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2858 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
2859 		DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2860 			      session->auth_alg);
2861 		return -ENOTSUP;
2862 	default:
2863 		DPAA_SEC_ERR("Crypto: Undefined Auth specified %u",
2864 			      session->auth_alg);
2865 		return -ENOTSUP;
2866 	}
2867 
2868 	switch (session->cipher_alg) {
2869 	case RTE_CRYPTO_CIPHER_AES_CBC:
2870 		session->cipher_key.alg = OP_PCL_IPSEC_AES_CBC;
2871 		session->cipher_key.algmode = OP_ALG_AAI_CBC;
2872 		break;
2873 	case RTE_CRYPTO_CIPHER_DES_CBC:
2874 		session->cipher_key.alg = OP_PCL_IPSEC_DES;
2875 		session->cipher_key.algmode = OP_ALG_AAI_CBC;
2876 		break;
2877 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2878 		session->cipher_key.alg = OP_PCL_IPSEC_3DES;
2879 		session->cipher_key.algmode = OP_ALG_AAI_CBC;
2880 		break;
2881 	case RTE_CRYPTO_CIPHER_AES_CTR:
2882 		session->cipher_key.alg = OP_PCL_IPSEC_AES_CTR;
2883 		session->cipher_key.algmode = OP_ALG_AAI_CTR;
2884 		if (session->dir == DIR_ENC) {
2885 			session->encap_pdb.ctr.ctr_initial = 0x00000001;
2886 			session->encap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2887 		} else {
2888 			session->decap_pdb.ctr.ctr_initial = 0x00000001;
2889 			session->decap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2890 		}
2891 		break;
2892 	case RTE_CRYPTO_CIPHER_NULL:
2893 		session->cipher_key.alg = OP_PCL_IPSEC_NULL;
2894 		break;
2895 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2896 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2897 	case RTE_CRYPTO_CIPHER_3DES_ECB:
2898 	case RTE_CRYPTO_CIPHER_AES_ECB:
2899 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
2900 		DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2901 			      session->cipher_alg);
2902 		return -ENOTSUP;
2903 	default:
2904 		DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2905 			      session->cipher_alg);
2906 		return -ENOTSUP;
2907 	}
2908 
2909 	return 0;
2910 }
2911 
2912 static int
2913 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
2914 			   struct rte_security_session_conf *conf,
2915 			   void *sess)
2916 {
2917 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2918 	struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2919 	struct rte_crypto_auth_xform *auth_xform = NULL;
2920 	struct rte_crypto_cipher_xform *cipher_xform = NULL;
2921 	struct rte_crypto_aead_xform *aead_xform = NULL;
2922 	dpaa_sec_session *session = (dpaa_sec_session *)sess;
2923 	uint32_t i;
2924 	int ret;
2925 
2926 	PMD_INIT_FUNC_TRACE();
2927 
2928 	memset(session, 0, sizeof(dpaa_sec_session));
2929 	session->proto_alg = conf->protocol;
2930 	session->ctxt = DPAA_SEC_IPSEC;
2931 
2932 	if (ipsec_xform->life.bytes_hard_limit != 0 ||
2933 	    ipsec_xform->life.bytes_soft_limit != 0 ||
2934 	    ipsec_xform->life.packets_hard_limit != 0 ||
2935 	    ipsec_xform->life.packets_soft_limit != 0)
2936 		return -ENOTSUP;
2937 
2938 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
2939 		session->dir = DIR_ENC;
2940 	else
2941 		session->dir = DIR_DEC;
2942 
2943 	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2944 		cipher_xform = &conf->crypto_xform->cipher;
2945 		if (conf->crypto_xform->next)
2946 			auth_xform = &conf->crypto_xform->next->auth;
2947 		ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2948 					ipsec_xform, session);
2949 	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2950 		auth_xform = &conf->crypto_xform->auth;
2951 		if (conf->crypto_xform->next)
2952 			cipher_xform = &conf->crypto_xform->next->cipher;
2953 		ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2954 					ipsec_xform, session);
2955 	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2956 		aead_xform = &conf->crypto_xform->aead;
2957 		ret = dpaa_sec_ipsec_aead_init(aead_xform,
2958 					ipsec_xform, session);
2959 	} else {
2960 		DPAA_SEC_ERR("XFORM not specified");
2961 		ret = -EINVAL;
2962 		goto out;
2963 	}
2964 	if (ret) {
2965 		DPAA_SEC_ERR("Failed to process xform");
2966 		goto out;
2967 	}
2968 
2969 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2970 		if (ipsec_xform->tunnel.type ==
2971 				RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2972 			session->ip4_hdr.ip_v = IPVERSION;
2973 			session->ip4_hdr.ip_hl = 5;
2974 			session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2975 						sizeof(session->ip4_hdr));
2976 			session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2977 			session->ip4_hdr.ip_id = 0;
2978 			session->ip4_hdr.ip_off = 0;
2979 			session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2980 			session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2981 					RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2982 					IPPROTO_ESP : IPPROTO_AH;
2983 			session->ip4_hdr.ip_sum = 0;
2984 			session->ip4_hdr.ip_src =
2985 					ipsec_xform->tunnel.ipv4.src_ip;
2986 			session->ip4_hdr.ip_dst =
2987 					ipsec_xform->tunnel.ipv4.dst_ip;
2988 			session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2989 						(void *)&session->ip4_hdr,
2990 						sizeof(struct ip));
2991 			session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2992 		} else if (ipsec_xform->tunnel.type ==
2993 				RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2994 			session->ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2995 				DPAA_IPv6_DEFAULT_VTC_FLOW |
2996 				((ipsec_xform->tunnel.ipv6.dscp <<
2997 					RTE_IPV6_HDR_TC_SHIFT) &
2998 					RTE_IPV6_HDR_TC_MASK) |
2999 				((ipsec_xform->tunnel.ipv6.flabel <<
3000 					RTE_IPV6_HDR_FL_SHIFT) &
3001 					RTE_IPV6_HDR_FL_MASK));
3002 			/* Payload length will be updated by HW */
3003 			session->ip6_hdr.payload_len = 0;
3004 			session->ip6_hdr.hop_limits =
3005 					ipsec_xform->tunnel.ipv6.hlimit;
3006 			session->ip6_hdr.proto = (ipsec_xform->proto ==
3007 					RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
3008 					IPPROTO_ESP : IPPROTO_AH;
3009 			memcpy(&session->ip6_hdr.src_addr,
3010 					&ipsec_xform->tunnel.ipv6.src_addr, 16);
3011 			memcpy(&session->ip6_hdr.dst_addr,
3012 					&ipsec_xform->tunnel.ipv6.dst_addr, 16);
3013 			session->encap_pdb.ip_hdr_len =
3014 						sizeof(struct rte_ipv6_hdr);
3015 		}
3016 
3017 		session->encap_pdb.options =
3018 			(IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
3019 			PDBOPTS_ESP_OIHI_PDB_INL |
3020 			PDBOPTS_ESP_IVSRC |
3021 			PDBHMO_ESP_SNR;
3022 		if (ipsec_xform->options.dec_ttl)
3023 			session->encap_pdb.options |= PDBHMO_ESP_ENCAP_DTTL;
3024 		if (ipsec_xform->options.esn)
3025 			session->encap_pdb.options |= PDBOPTS_ESP_ESN;
3026 		session->encap_pdb.spi = ipsec_xform->spi;
3027 
3028 	} else if (ipsec_xform->direction ==
3029 			RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
3030 		if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
3031 			session->decap_pdb.options = sizeof(struct ip) << 16;
3032 		else
3033 			session->decap_pdb.options =
3034 					sizeof(struct rte_ipv6_hdr) << 16;
3035 		if (ipsec_xform->options.esn)
3036 			session->decap_pdb.options |= PDBOPTS_ESP_ESN;
3037 		if (ipsec_xform->replay_win_sz) {
3038 			uint32_t win_sz;
3039 			win_sz = rte_align32pow2(ipsec_xform->replay_win_sz);
3040 
3041 			switch (win_sz) {
3042 			case 1:
3043 			case 2:
3044 			case 4:
3045 			case 8:
3046 			case 16:
3047 			case 32:
3048 				session->decap_pdb.options |= PDBOPTS_ESP_ARS32;
3049 				break;
3050 			case 64:
3051 				session->decap_pdb.options |= PDBOPTS_ESP_ARS64;
3052 				break;
3053 			default:
3054 				session->decap_pdb.options |=
3055 							PDBOPTS_ESP_ARS128;
3056 			}
3057 		}
3058 	} else
3059 		goto out;
3060 	rte_spinlock_lock(&internals->lock);
3061 	for (i = 0; i < MAX_DPAA_CORES; i++) {
3062 		session->inq[i] = dpaa_sec_attach_rxq(internals);
3063 		if (session->inq[i] == NULL) {
3064 			DPAA_SEC_ERR("unable to attach sec queue");
3065 			rte_spinlock_unlock(&internals->lock);
3066 			goto out;
3067 		}
3068 	}
3069 	rte_spinlock_unlock(&internals->lock);
3070 
3071 	return 0;
3072 out:
3073 	free_session_data(session);
3074 	return -1;
3075 }
3076 
3077 static int
3078 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
3079 			  struct rte_security_session_conf *conf,
3080 			  void *sess)
3081 {
3082 	struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
3083 	struct rte_crypto_sym_xform *xform = conf->crypto_xform;
3084 	struct rte_crypto_auth_xform *auth_xform = NULL;
3085 	struct rte_crypto_cipher_xform *cipher_xform = NULL;
3086 	dpaa_sec_session *session = (dpaa_sec_session *)sess;
3087 	struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
3088 	uint32_t i;
3089 	int ret;
3090 
3091 	PMD_INIT_FUNC_TRACE();
3092 
3093 	memset(session, 0, sizeof(dpaa_sec_session));
3094 
3095 	/* find xfrm types */
3096 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
3097 		cipher_xform = &xform->cipher;
3098 		if (xform->next != NULL &&
3099 			xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
3100 			auth_xform = &xform->next->auth;
3101 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
3102 		auth_xform = &xform->auth;
3103 		if (xform->next != NULL &&
3104 			xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
3105 			cipher_xform = &xform->next->cipher;
3106 	} else {
3107 		DPAA_SEC_ERR("Invalid crypto type");
3108 		return -EINVAL;
3109 	}
3110 
3111 	session->proto_alg = conf->protocol;
3112 	session->ctxt = DPAA_SEC_PDCP;
3113 
3114 	if (cipher_xform) {
3115 		switch (cipher_xform->algo) {
3116 		case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
3117 			session->cipher_key.alg = PDCP_CIPHER_TYPE_SNOW;
3118 			break;
3119 		case RTE_CRYPTO_CIPHER_ZUC_EEA3:
3120 			session->cipher_key.alg = PDCP_CIPHER_TYPE_ZUC;
3121 			break;
3122 		case RTE_CRYPTO_CIPHER_AES_CTR:
3123 			session->cipher_key.alg = PDCP_CIPHER_TYPE_AES;
3124 			break;
3125 		case RTE_CRYPTO_CIPHER_NULL:
3126 			session->cipher_key.alg = PDCP_CIPHER_TYPE_NULL;
3127 			break;
3128 		default:
3129 			DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
3130 				      session->cipher_alg);
3131 			return -EINVAL;
3132 		}
3133 
3134 		session->cipher_key.data = rte_zmalloc(NULL,
3135 					       cipher_xform->key.length,
3136 					       RTE_CACHE_LINE_SIZE);
3137 		if (session->cipher_key.data == NULL &&
3138 				cipher_xform->key.length > 0) {
3139 			DPAA_SEC_ERR("No Memory for cipher key");
3140 			return -ENOMEM;
3141 		}
3142 		session->cipher_key.length = cipher_xform->key.length;
3143 		memcpy(session->cipher_key.data, cipher_xform->key.data,
3144 			cipher_xform->key.length);
3145 		session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
3146 					DIR_ENC : DIR_DEC;
3147 		session->cipher_alg = cipher_xform->algo;
3148 	} else {
3149 		session->cipher_key.data = NULL;
3150 		session->cipher_key.length = 0;
3151 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
3152 		session->dir = DIR_ENC;
3153 	}
3154 
3155 	if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3156 		if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 &&
3157 		    pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) {
3158 			DPAA_SEC_ERR(
3159 				"PDCP Seq Num size should be 5/12 bits for cmode");
3160 			ret = -EINVAL;
3161 			goto out;
3162 		}
3163 	}
3164 
3165 	if (auth_xform) {
3166 		switch (auth_xform->algo) {
3167 		case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
3168 			session->auth_key.alg = PDCP_AUTH_TYPE_SNOW;
3169 			break;
3170 		case RTE_CRYPTO_AUTH_ZUC_EIA3:
3171 			session->auth_key.alg = PDCP_AUTH_TYPE_ZUC;
3172 			break;
3173 		case RTE_CRYPTO_AUTH_AES_CMAC:
3174 			session->auth_key.alg = PDCP_AUTH_TYPE_AES;
3175 			break;
3176 		case RTE_CRYPTO_AUTH_NULL:
3177 			session->auth_key.alg = PDCP_AUTH_TYPE_NULL;
3178 			break;
3179 		default:
3180 			DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
3181 				      session->auth_alg);
3182 			rte_free(session->cipher_key.data);
3183 			return -EINVAL;
3184 		}
3185 		session->auth_key.data = rte_zmalloc(NULL,
3186 						     auth_xform->key.length,
3187 						     RTE_CACHE_LINE_SIZE);
3188 		if (!session->auth_key.data &&
3189 		    auth_xform->key.length > 0) {
3190 			DPAA_SEC_ERR("No Memory for auth key");
3191 			rte_free(session->cipher_key.data);
3192 			return -ENOMEM;
3193 		}
3194 		session->auth_key.length = auth_xform->key.length;
3195 		memcpy(session->auth_key.data, auth_xform->key.data,
3196 		       auth_xform->key.length);
3197 		session->auth_alg = auth_xform->algo;
3198 	} else {
3199 		if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3200 			DPAA_SEC_ERR("Crypto: Integrity must for c-plane");
3201 			ret = -EINVAL;
3202 			goto out;
3203 		}
3204 		session->auth_key.data = NULL;
3205 		session->auth_key.length = 0;
3206 		session->auth_alg = 0;
3207 	}
3208 	session->pdcp.domain = pdcp_xform->domain;
3209 	session->pdcp.bearer = pdcp_xform->bearer;
3210 	session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
3211 	session->pdcp.sn_size = pdcp_xform->sn_size;
3212 	session->pdcp.hfn = pdcp_xform->hfn;
3213 	session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
3214 	session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
3215 	session->pdcp.sdap_enabled = pdcp_xform->sdap_enabled;
3216 	if (cipher_xform)
3217 		session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
3218 
3219 	rte_spinlock_lock(&dev_priv->lock);
3220 	for (i = 0; i < MAX_DPAA_CORES; i++) {
3221 		session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
3222 		if (session->inq[i] == NULL) {
3223 			DPAA_SEC_ERR("unable to attach sec queue");
3224 			rte_spinlock_unlock(&dev_priv->lock);
3225 			ret = -EBUSY;
3226 			goto out;
3227 		}
3228 	}
3229 	rte_spinlock_unlock(&dev_priv->lock);
3230 	return 0;
3231 out:
3232 	rte_free(session->auth_key.data);
3233 	rte_free(session->cipher_key.data);
3234 	memset(session, 0, sizeof(dpaa_sec_session));
3235 	return ret;
3236 }
3237 
3238 static int
3239 dpaa_sec_security_session_create(void *dev,
3240 				 struct rte_security_session_conf *conf,
3241 				 struct rte_security_session *sess)
3242 {
3243 	void *sess_private_data = SECURITY_GET_SESS_PRIV(sess);
3244 	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
3245 	int ret;
3246 
3247 	switch (conf->protocol) {
3248 	case RTE_SECURITY_PROTOCOL_IPSEC:
3249 		ret = dpaa_sec_set_ipsec_session(cdev, conf,
3250 				sess_private_data);
3251 		break;
3252 	case RTE_SECURITY_PROTOCOL_PDCP:
3253 		ret = dpaa_sec_set_pdcp_session(cdev, conf,
3254 				sess_private_data);
3255 		break;
3256 	case RTE_SECURITY_PROTOCOL_MACSEC:
3257 		return -ENOTSUP;
3258 	default:
3259 		return -EINVAL;
3260 	}
3261 	if (ret != 0) {
3262 		DPAA_SEC_ERR("failed to configure session parameters");
3263 		return ret;
3264 	}
3265 
3266 	ret = dpaa_sec_prep_cdb(sess_private_data);
3267 	if (ret) {
3268 		DPAA_SEC_ERR("Unable to prepare sec cdb");
3269 		return ret;
3270 	}
3271 
3272 	return ret;
3273 }
3274 
3275 /** Clear the memory of session so it doesn't leave key material behind */
3276 static int
3277 dpaa_sec_security_session_destroy(void *dev __rte_unused,
3278 		struct rte_security_session *sess)
3279 {
3280 	PMD_INIT_FUNC_TRACE();
3281 	void *sess_priv = SECURITY_GET_SESS_PRIV(sess);
3282 	dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
3283 
3284 	if (sess_priv) {
3285 		free_session_memory((struct rte_cryptodev *)dev, s);
3286 	}
3287 	return 0;
3288 }
3289 
3290 static unsigned int
3291 dpaa_sec_security_session_get_size(void *device __rte_unused)
3292 {
3293 	return sizeof(dpaa_sec_session);
3294 }
3295 
3296 static int
3297 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
3298 		       struct rte_cryptodev_config *config __rte_unused)
3299 {
3300 	PMD_INIT_FUNC_TRACE();
3301 
3302 	return 0;
3303 }
3304 
3305 static int
3306 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
3307 {
3308 	PMD_INIT_FUNC_TRACE();
3309 	return 0;
3310 }
3311 
3312 static void
3313 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
3314 {
3315 	PMD_INIT_FUNC_TRACE();
3316 }
3317 
3318 static int
3319 dpaa_sec_dev_close(struct rte_cryptodev *dev)
3320 {
3321 	PMD_INIT_FUNC_TRACE();
3322 
3323 	if (dev == NULL)
3324 		return -ENOMEM;
3325 
3326 	return 0;
3327 }
3328 
3329 static void
3330 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
3331 		       struct rte_cryptodev_info *info)
3332 {
3333 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
3334 
3335 	PMD_INIT_FUNC_TRACE();
3336 	if (info != NULL) {
3337 		info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3338 		info->feature_flags = dev->feature_flags;
3339 		info->capabilities = dpaa_sec_capabilities;
3340 		info->sym.max_nb_sessions = internals->max_nb_sessions;
3341 		info->driver_id = dpaa_cryptodev_driver_id;
3342 	}
3343 }
3344 
3345 static enum qman_cb_dqrr_result
3346 dpaa_sec_process_parallel_event(void *event,
3347 			struct qman_portal *qm __always_unused,
3348 			struct qman_fq *outq,
3349 			const struct qm_dqrr_entry *dqrr,
3350 			void **bufs)
3351 {
3352 	const struct qm_fd *fd;
3353 	struct dpaa_sec_job *job;
3354 	struct dpaa_sec_op_ctx *ctx;
3355 	struct rte_event *ev = (struct rte_event *)event;
3356 
3357 	fd = &dqrr->fd;
3358 
3359 	/* sg is embedded in an op ctx,
3360 	 * sg[0] is for output
3361 	 * sg[1] for input
3362 	 */
3363 	job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3364 
3365 	ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3366 	ctx->fd_status = fd->status;
3367 	if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3368 		struct qm_sg_entry *sg_out;
3369 		uint32_t len;
3370 
3371 		sg_out = &job->sg[0];
3372 		hw_sg_to_cpu(sg_out);
3373 		len = sg_out->length;
3374 		ctx->op->sym->m_src->pkt_len = len;
3375 		ctx->op->sym->m_src->data_len = len;
3376 	}
3377 	if (!ctx->fd_status) {
3378 		ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3379 	} else {
3380 		DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3381 		ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3382 	}
3383 	ev->event_ptr = (void *)ctx->op;
3384 
3385 	ev->flow_id = outq->ev.flow_id;
3386 	ev->sub_event_type = outq->ev.sub_event_type;
3387 	ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3388 	ev->op = RTE_EVENT_OP_NEW;
3389 	ev->sched_type = outq->ev.sched_type;
3390 	ev->queue_id = outq->ev.queue_id;
3391 	ev->priority = outq->ev.priority;
3392 	*bufs = (void *)ctx->op;
3393 
3394 	rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3395 
3396 	return qman_cb_dqrr_consume;
3397 }
3398 
3399 static enum qman_cb_dqrr_result
3400 dpaa_sec_process_atomic_event(void *event,
3401 			struct qman_portal *qm __rte_unused,
3402 			struct qman_fq *outq,
3403 			const struct qm_dqrr_entry *dqrr,
3404 			void **bufs)
3405 {
3406 	u8 index;
3407 	const struct qm_fd *fd;
3408 	struct dpaa_sec_job *job;
3409 	struct dpaa_sec_op_ctx *ctx;
3410 	struct rte_event *ev = (struct rte_event *)event;
3411 
3412 	fd = &dqrr->fd;
3413 
3414 	/* sg is embedded in an op ctx,
3415 	 * sg[0] is for output
3416 	 * sg[1] for input
3417 	 */
3418 	job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3419 
3420 	ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3421 	ctx->fd_status = fd->status;
3422 	if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3423 		struct qm_sg_entry *sg_out;
3424 		uint32_t len;
3425 
3426 		sg_out = &job->sg[0];
3427 		hw_sg_to_cpu(sg_out);
3428 		len = sg_out->length;
3429 		ctx->op->sym->m_src->pkt_len = len;
3430 		ctx->op->sym->m_src->data_len = len;
3431 	}
3432 	if (!ctx->fd_status) {
3433 		ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3434 	} else {
3435 		DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3436 		ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3437 	}
3438 	ev->event_ptr = (void *)ctx->op;
3439 	ev->flow_id = outq->ev.flow_id;
3440 	ev->sub_event_type = outq->ev.sub_event_type;
3441 	ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3442 	ev->op = RTE_EVENT_OP_NEW;
3443 	ev->sched_type = outq->ev.sched_type;
3444 	ev->queue_id = outq->ev.queue_id;
3445 	ev->priority = outq->ev.priority;
3446 
3447 	/* Save active dqrr entries */
3448 	index = ((uintptr_t)dqrr >> 6) & (16/*QM_DQRR_SIZE*/ - 1);
3449 	DPAA_PER_LCORE_DQRR_SIZE++;
3450 	DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
3451 	DPAA_PER_LCORE_DQRR_MBUF(index) = ctx->op->sym->m_src;
3452 	ev->impl_opaque = index + 1;
3453 	*dpaa_seqn(ctx->op->sym->m_src) = (uint32_t)index + 1;
3454 	*bufs = (void *)ctx->op;
3455 
3456 	rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3457 
3458 	return qman_cb_dqrr_defer;
3459 }
3460 
3461 int
3462 dpaa_sec_eventq_attach(const struct rte_cryptodev *dev,
3463 		int qp_id,
3464 		uint16_t ch_id,
3465 		const struct rte_event *event)
3466 {
3467 	struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3468 	struct qm_mcc_initfq opts = {0};
3469 
3470 	int ret;
3471 
3472 	opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3473 		       QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3474 	opts.fqd.dest.channel = ch_id;
3475 
3476 	switch (event->sched_type) {
3477 	case RTE_SCHED_TYPE_ATOMIC:
3478 		opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
3479 		/* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
3480 		 * configuration with HOLD_ACTIVE setting
3481 		 */
3482 		opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
3483 		qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_atomic_event;
3484 		break;
3485 	case RTE_SCHED_TYPE_ORDERED:
3486 		DPAA_SEC_ERR("Ordered queue schedule type is not supported\n");
3487 		return -ENOTSUP;
3488 	default:
3489 		opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
3490 		qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_parallel_event;
3491 		break;
3492 	}
3493 
3494 	ret = qman_init_fq(&qp->outq, QMAN_INITFQ_FLAG_SCHED, &opts);
3495 	if (unlikely(ret)) {
3496 		DPAA_SEC_ERR("unable to init caam source fq!");
3497 		return ret;
3498 	}
3499 
3500 	memcpy(&qp->outq.ev, event, sizeof(struct rte_event));
3501 
3502 	return 0;
3503 }
3504 
3505 int
3506 dpaa_sec_eventq_detach(const struct rte_cryptodev *dev,
3507 			int qp_id)
3508 {
3509 	struct qm_mcc_initfq opts = {0};
3510 	int ret;
3511 	struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3512 
3513 	opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3514 		       QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3515 	qp->outq.cb.dqrr = dqrr_out_fq_cb_rx;
3516 	qp->outq.cb.ern  = ern_sec_fq_handler;
3517 	qman_retire_fq(&qp->outq, NULL);
3518 	qman_oos_fq(&qp->outq);
3519 	ret = qman_init_fq(&qp->outq, 0, &opts);
3520 	if (ret)
3521 		DPAA_SEC_ERR("Error in qman_init_fq: ret: %d", ret);
3522 	qp->outq.cb.dqrr = NULL;
3523 
3524 	return ret;
3525 }
3526 
3527 static struct rte_cryptodev_ops crypto_ops = {
3528 	.dev_configure	      = dpaa_sec_dev_configure,
3529 	.dev_start	      = dpaa_sec_dev_start,
3530 	.dev_stop	      = dpaa_sec_dev_stop,
3531 	.dev_close	      = dpaa_sec_dev_close,
3532 	.dev_infos_get        = dpaa_sec_dev_infos_get,
3533 	.queue_pair_setup     = dpaa_sec_queue_pair_setup,
3534 	.queue_pair_release   = dpaa_sec_queue_pair_release,
3535 	.sym_session_get_size     = dpaa_sec_sym_session_get_size,
3536 	.sym_session_configure    = dpaa_sec_sym_session_configure,
3537 	.sym_session_clear        = dpaa_sec_sym_session_clear,
3538 	/* Raw data-path API related operations */
3539 	.sym_get_raw_dp_ctx_size = dpaa_sec_get_dp_ctx_size,
3540 	.sym_configure_raw_dp_ctx = dpaa_sec_configure_raw_dp_ctx,
3541 };
3542 
3543 static const struct rte_security_capability *
3544 dpaa_sec_capabilities_get(void *device __rte_unused)
3545 {
3546 	return dpaa_sec_security_cap;
3547 }
3548 
3549 static const struct rte_security_ops dpaa_sec_security_ops = {
3550 	.session_create = dpaa_sec_security_session_create,
3551 	.session_update = NULL,
3552 	.session_get_size = dpaa_sec_security_session_get_size,
3553 	.session_stats_get = NULL,
3554 	.session_destroy = dpaa_sec_security_session_destroy,
3555 	.set_pkt_metadata = NULL,
3556 	.capabilities_get = dpaa_sec_capabilities_get
3557 };
3558 
3559 static int
3560 dpaa_sec_uninit(struct rte_cryptodev *dev)
3561 {
3562 	struct dpaa_sec_dev_private *internals;
3563 
3564 	if (dev == NULL)
3565 		return -ENODEV;
3566 
3567 	internals = dev->data->dev_private;
3568 	rte_free(dev->security_ctx);
3569 
3570 	rte_free(internals);
3571 
3572 	DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
3573 		      dev->data->name, rte_socket_id());
3574 
3575 	return 0;
3576 }
3577 
3578 static int
3579 check_devargs_handler(__rte_unused const char *key, const char *value,
3580 		      __rte_unused void *opaque)
3581 {
3582 	dpaa_sec_dp_dump = atoi(value);
3583 	if (dpaa_sec_dp_dump > DPAA_SEC_DP_FULL_DUMP) {
3584 		DPAA_SEC_WARN("WARN: DPAA_SEC_DP_DUMP_LEVEL is not "
3585 			      "supported, changing to FULL error prints\n");
3586 		dpaa_sec_dp_dump = DPAA_SEC_DP_FULL_DUMP;
3587 	}
3588 
3589 	return 0;
3590 }
3591 
3592 static void
3593 dpaa_sec_get_devargs(struct rte_devargs *devargs, const char *key)
3594 {
3595 	struct rte_kvargs *kvlist;
3596 
3597 	if (!devargs)
3598 		return;
3599 
3600 	kvlist = rte_kvargs_parse(devargs->args, NULL);
3601 	if (!kvlist)
3602 		return;
3603 
3604 	if (!rte_kvargs_count(kvlist, key)) {
3605 		rte_kvargs_free(kvlist);
3606 		return;
3607 	}
3608 
3609 	rte_kvargs_process(kvlist, key,
3610 				check_devargs_handler, NULL);
3611 	rte_kvargs_free(kvlist);
3612 }
3613 
3614 static int
3615 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
3616 {
3617 	struct dpaa_sec_dev_private *internals;
3618 	struct rte_security_ctx *security_instance;
3619 	struct dpaa_sec_qp *qp;
3620 	uint32_t i, flags;
3621 	int ret;
3622 	void *cmd_map;
3623 	int map_fd = -1;
3624 
3625 	PMD_INIT_FUNC_TRACE();
3626 
3627 	internals = cryptodev->data->dev_private;
3628 	map_fd = open("/dev/mem", O_RDWR);
3629 	if (unlikely(map_fd < 0)) {
3630 		DPAA_SEC_ERR("Unable to open (/dev/mem)");
3631 		return map_fd;
3632 	}
3633 	internals->sec_hw = mmap(NULL, MAP_SIZE, PROT_READ | PROT_WRITE,
3634 			    MAP_SHARED, map_fd, SEC_BASE_ADDR);
3635 	if (internals->sec_hw == MAP_FAILED) {
3636 		DPAA_SEC_ERR("Memory map failed");
3637 		close(map_fd);
3638 		return -EINVAL;
3639 	}
3640 	cmd_map = (uint8_t *)internals->sec_hw +
3641 		  (BLOCK_OFFSET * QI_BLOCK_NUMBER) + CMD_REG;
3642 	if (!(be32_to_cpu(rte_read32(cmd_map)) & QICTL_DQEN))
3643 		/* enable QI interface */
3644 		rte_write32(cpu_to_be32(QICTL_DQEN), cmd_map);
3645 
3646 	ret = munmap(internals->sec_hw, MAP_SIZE);
3647 	if (ret)
3648 		DPAA_SEC_WARN("munmap failed\n");
3649 
3650 	close(map_fd);
3651 	cryptodev->driver_id = dpaa_cryptodev_driver_id;
3652 	cryptodev->dev_ops = &crypto_ops;
3653 
3654 	cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
3655 	cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
3656 	cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3657 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
3658 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3659 			RTE_CRYPTODEV_FF_SECURITY |
3660 			RTE_CRYPTODEV_FF_SYM_RAW_DP |
3661 			RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3662 			RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3663 			RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3664 			RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3665 			RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3666 
3667 	internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
3668 	internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
3669 
3670 	/*
3671 	 * For secondary processes, we don't initialise any further as primary
3672 	 * has already done this work. Only check we don't need a different
3673 	 * RX function
3674 	 */
3675 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3676 		DPAA_SEC_WARN("Device already init by primary process");
3677 		return 0;
3678 	}
3679 	/* Initialize security_ctx only for primary process*/
3680 	security_instance = rte_malloc("rte_security_instances_ops",
3681 				sizeof(struct rte_security_ctx), 0);
3682 	if (security_instance == NULL)
3683 		return -ENOMEM;
3684 	security_instance->device = (void *)cryptodev;
3685 	security_instance->ops = &dpaa_sec_security_ops;
3686 	security_instance->sess_cnt = 0;
3687 	cryptodev->security_ctx = security_instance;
3688 	rte_spinlock_init(&internals->lock);
3689 	for (i = 0; i < internals->max_nb_queue_pairs; i++) {
3690 		/* init qman fq for queue pair */
3691 		qp = &internals->qps[i];
3692 		ret = dpaa_sec_init_tx(&qp->outq);
3693 		if (ret) {
3694 			DPAA_SEC_ERR("config tx of queue pair  %d", i);
3695 			goto init_error;
3696 		}
3697 	}
3698 
3699 	flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
3700 		QMAN_FQ_FLAG_TO_DCPORTAL;
3701 	for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
3702 		/* create rx qman fq for sessions*/
3703 		ret = qman_create_fq(0, flags, &internals->inq[i]);
3704 		if (unlikely(ret != 0)) {
3705 			DPAA_SEC_ERR("sec qman_create_fq failed");
3706 			goto init_error;
3707 		}
3708 	}
3709 
3710 	dpaa_sec_get_devargs(cryptodev->device->devargs, DRIVER_DUMP_MODE);
3711 
3712 	DPAA_SEC_INFO("%s cryptodev init", cryptodev->data->name);
3713 	return 0;
3714 
3715 init_error:
3716 	DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
3717 
3718 	rte_free(cryptodev->security_ctx);
3719 	return -EFAULT;
3720 }
3721 
3722 static int
3723 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
3724 				struct rte_dpaa_device *dpaa_dev)
3725 {
3726 	struct rte_cryptodev *cryptodev;
3727 	char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3728 
3729 	int retval;
3730 
3731 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3732 		return 0;
3733 
3734 	snprintf(cryptodev_name, sizeof(cryptodev_name), "%s", dpaa_dev->name);
3735 
3736 	cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3737 	if (cryptodev == NULL)
3738 		return -ENOMEM;
3739 
3740 	cryptodev->data->dev_private = rte_zmalloc_socket(
3741 				"cryptodev private structure",
3742 				sizeof(struct dpaa_sec_dev_private),
3743 				RTE_CACHE_LINE_SIZE,
3744 				rte_socket_id());
3745 
3746 	if (cryptodev->data->dev_private == NULL)
3747 		rte_panic("Cannot allocate memzone for private "
3748 				"device data");
3749 
3750 	dpaa_dev->crypto_dev = cryptodev;
3751 	cryptodev->device = &dpaa_dev->device;
3752 
3753 	/* init user callbacks */
3754 	TAILQ_INIT(&(cryptodev->link_intr_cbs));
3755 
3756 	/* if sec device version is not configured */
3757 	if (!rta_get_sec_era()) {
3758 		const struct device_node *caam_node;
3759 
3760 		for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
3761 			const uint32_t *prop = of_get_property(caam_node,
3762 					"fsl,sec-era",
3763 					NULL);
3764 			if (prop) {
3765 				rta_set_sec_era(
3766 					INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
3767 				break;
3768 			}
3769 		}
3770 	}
3771 
3772 	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
3773 		retval = rte_dpaa_portal_init((void *)1);
3774 		if (retval) {
3775 			DPAA_SEC_ERR("Unable to initialize portal");
3776 			goto out;
3777 		}
3778 	}
3779 
3780 	/* Invoke PMD device initialization function */
3781 	retval = dpaa_sec_dev_init(cryptodev);
3782 	if (retval == 0) {
3783 		rte_cryptodev_pmd_probing_finish(cryptodev);
3784 		return 0;
3785 	}
3786 
3787 	retval = -ENXIO;
3788 out:
3789 	/* In case of error, cleanup is done */
3790 	rte_free(cryptodev->data->dev_private);
3791 
3792 	rte_cryptodev_pmd_release_device(cryptodev);
3793 
3794 	return retval;
3795 }
3796 
3797 static int
3798 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
3799 {
3800 	struct rte_cryptodev *cryptodev;
3801 	int ret;
3802 
3803 	cryptodev = dpaa_dev->crypto_dev;
3804 	if (cryptodev == NULL)
3805 		return -ENODEV;
3806 
3807 	ret = dpaa_sec_uninit(cryptodev);
3808 	if (ret)
3809 		return ret;
3810 
3811 	return rte_cryptodev_pmd_destroy(cryptodev);
3812 }
3813 
3814 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
3815 	.drv_type = FSL_DPAA_CRYPTO,
3816 	.driver = {
3817 		.name = "DPAA SEC PMD"
3818 	},
3819 	.probe = cryptodev_dpaa_sec_probe,
3820 	.remove = cryptodev_dpaa_sec_remove,
3821 };
3822 
3823 static struct cryptodev_driver dpaa_sec_crypto_drv;
3824 
3825 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
3826 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
3827 		dpaa_cryptodev_driver_id);
3828 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_DPAA_SEC_PMD,
3829 		DRIVER_DUMP_MODE "=<int>");
3830 RTE_LOG_REGISTER(dpaa_logtype_sec, pmd.crypto.dpaa, NOTICE);
3831