xref: /dpdk/drivers/crypto/caam_jr/caam_jr.c (revision 2a7bb4fdf61e9edfb7adbaecb50e728b82da9e23)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2017-2018 NXP
3  */
4 
5 #include <fcntl.h>
6 #include <unistd.h>
7 #include <sched.h>
8 #include <net/if.h>
9 
10 #include <rte_byteorder.h>
11 #include <rte_common.h>
12 #include <rte_cryptodev_pmd.h>
13 #include <rte_crypto.h>
14 #include <rte_cryptodev.h>
15 #include <rte_bus_vdev.h>
16 #include <rte_malloc.h>
17 #include <rte_security_driver.h>
18 #include <rte_hexdump.h>
19 
20 #include <caam_jr_capabilities.h>
21 #include <caam_jr_config.h>
22 #include <caam_jr_hw_specific.h>
23 #include <caam_jr_pvt.h>
24 #include <caam_jr_desc.h>
25 #include <caam_jr_log.h>
26 
27 /* RTA header files */
28 #include <hw/desc/common.h>
29 #include <hw/desc/algo.h>
30 #include <of.h>
31 
32 #define CAAM_JR_DBG	0
33 #define CRYPTODEV_NAME_CAAM_JR_PMD	crypto_caam_jr
34 static uint8_t cryptodev_driver_id;
35 int caam_jr_logtype;
36 
37 enum rta_sec_era rta_sec_era;
38 
39 /* Lists the states possible for the SEC user space driver. */
40 enum sec_driver_state_e {
41 	SEC_DRIVER_STATE_IDLE,		/* Driver not initialized */
42 	SEC_DRIVER_STATE_STARTED,	/* Driver initialized and can be used*/
43 	SEC_DRIVER_STATE_RELEASE,	/* Driver release is in progress */
44 };
45 
46 /* Job rings used for communication with SEC HW */
47 static struct sec_job_ring_t g_job_rings[MAX_SEC_JOB_RINGS];
48 
49 /* The current state of SEC user space driver */
50 static enum sec_driver_state_e g_driver_state = SEC_DRIVER_STATE_IDLE;
51 
52 /* The number of job rings used by SEC user space driver */
53 static int g_job_rings_no;
54 static int g_job_rings_max;
55 
56 struct sec_outring_entry {
57 	phys_addr_t desc;	/* Pointer to completed descriptor */
58 	uint32_t status;	/* Status for completed descriptor */
59 } __rte_packed;
60 
61 /* virtual address conversin when mempool support is available for ctx */
62 static inline phys_addr_t
63 caam_jr_vtop_ctx(struct caam_jr_op_ctx *ctx, void *vaddr)
64 {
65 	PMD_INIT_FUNC_TRACE();
66 	return (size_t)vaddr - ctx->vtop_offset;
67 }
68 
69 static inline void
70 caam_jr_op_ending(struct caam_jr_op_ctx *ctx)
71 {
72 	PMD_INIT_FUNC_TRACE();
73 	/* report op status to sym->op and then free the ctx memeory  */
74 	rte_mempool_put(ctx->ctx_pool, (void *)ctx);
75 }
76 
77 static inline struct caam_jr_op_ctx *
78 caam_jr_alloc_ctx(struct caam_jr_session *ses)
79 {
80 	struct caam_jr_op_ctx *ctx;
81 	int ret;
82 
83 	PMD_INIT_FUNC_TRACE();
84 	ret = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
85 	if (!ctx || ret) {
86 		CAAM_JR_DP_WARN("Alloc sec descriptor failed!");
87 		return NULL;
88 	}
89 	/*
90 	 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
91 	 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
92 	 * to clear all the SG entries. caam_jr_alloc_ctx() is called for
93 	 * each packet, memset is costlier than dcbz_64().
94 	 */
95 	dcbz_64(&ctx->sg[SG_CACHELINE_0]);
96 	dcbz_64(&ctx->sg[SG_CACHELINE_1]);
97 	dcbz_64(&ctx->sg[SG_CACHELINE_2]);
98 	dcbz_64(&ctx->sg[SG_CACHELINE_3]);
99 
100 	ctx->ctx_pool = ses->ctx_pool;
101 	ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
102 
103 	return ctx;
104 }
105 
106 static
107 void caam_jr_stats_get(struct rte_cryptodev *dev,
108 			struct rte_cryptodev_stats *stats)
109 {
110 	struct caam_jr_qp **qp = (struct caam_jr_qp **)
111 					dev->data->queue_pairs;
112 	int i;
113 
114 	PMD_INIT_FUNC_TRACE();
115 	if (stats == NULL) {
116 		CAAM_JR_ERR("Invalid stats ptr NULL");
117 		return;
118 	}
119 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
120 		if (qp[i] == NULL) {
121 			CAAM_JR_WARN("Uninitialised queue pair");
122 			continue;
123 		}
124 
125 		stats->enqueued_count += qp[i]->tx_pkts;
126 		stats->dequeued_count += qp[i]->rx_pkts;
127 		stats->enqueue_err_count += qp[i]->tx_errs;
128 		stats->dequeue_err_count += qp[i]->rx_errs;
129 		CAAM_JR_INFO("extra stats:\n\tRX Poll ERR = %" PRIu64
130 			     "\n\tTX Ring Full = %" PRIu64,
131 			     qp[i]->rx_poll_err,
132 			     qp[i]->tx_ring_full);
133 	}
134 }
135 
136 static
137 void caam_jr_stats_reset(struct rte_cryptodev *dev)
138 {
139 	int i;
140 	struct caam_jr_qp **qp = (struct caam_jr_qp **)
141 				   (dev->data->queue_pairs);
142 
143 	PMD_INIT_FUNC_TRACE();
144 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
145 		if (qp[i] == NULL) {
146 			CAAM_JR_WARN("Uninitialised queue pair");
147 			continue;
148 		}
149 		qp[i]->rx_pkts = 0;
150 		qp[i]->rx_errs = 0;
151 		qp[i]->rx_poll_err = 0;
152 		qp[i]->tx_pkts = 0;
153 		qp[i]->tx_errs = 0;
154 		qp[i]->tx_ring_full = 0;
155 	}
156 }
157 
158 static inline int
159 is_cipher_only(struct caam_jr_session *ses)
160 {
161 	PMD_INIT_FUNC_TRACE();
162 	return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
163 		(ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
164 }
165 
166 static inline int
167 is_auth_only(struct caam_jr_session *ses)
168 {
169 	PMD_INIT_FUNC_TRACE();
170 	return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
171 		(ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
172 }
173 
174 static inline int
175 is_aead(struct caam_jr_session *ses)
176 {
177 	PMD_INIT_FUNC_TRACE();
178 	return ((ses->cipher_alg == 0) &&
179 		(ses->auth_alg == 0) &&
180 		(ses->aead_alg != 0));
181 }
182 
183 static inline int
184 is_auth_cipher(struct caam_jr_session *ses)
185 {
186 	PMD_INIT_FUNC_TRACE();
187 	return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
188 		(ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
189 		(ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
190 }
191 
192 static inline int
193 is_proto_ipsec(struct caam_jr_session *ses)
194 {
195 	PMD_INIT_FUNC_TRACE();
196 	return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
197 }
198 
199 static inline int
200 is_encode(struct caam_jr_session *ses)
201 {
202 	PMD_INIT_FUNC_TRACE();
203 	return ses->dir == DIR_ENC;
204 }
205 
206 static inline int
207 is_decode(struct caam_jr_session *ses)
208 {
209 	PMD_INIT_FUNC_TRACE();
210 	return ses->dir == DIR_DEC;
211 }
212 
213 static inline void
214 caam_auth_alg(struct caam_jr_session *ses, struct alginfo *alginfo_a)
215 {
216 	PMD_INIT_FUNC_TRACE();
217 	switch (ses->auth_alg) {
218 	case RTE_CRYPTO_AUTH_NULL:
219 		ses->digest_length = 0;
220 		break;
221 	case RTE_CRYPTO_AUTH_MD5_HMAC:
222 		alginfo_a->algtype =
223 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
224 			OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
225 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
226 		break;
227 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
228 		alginfo_a->algtype =
229 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
230 			OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
231 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
232 		break;
233 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
234 		alginfo_a->algtype =
235 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
236 			OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
237 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
238 		break;
239 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
240 		alginfo_a->algtype =
241 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
242 			OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
243 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
244 		break;
245 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
246 		alginfo_a->algtype =
247 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
248 			OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
249 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
250 		break;
251 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
252 		alginfo_a->algtype =
253 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
254 			OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
255 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
256 		break;
257 	default:
258 		CAAM_JR_DEBUG("unsupported auth alg %u", ses->auth_alg);
259 	}
260 }
261 
262 static inline void
263 caam_cipher_alg(struct caam_jr_session *ses, struct alginfo *alginfo_c)
264 {
265 	PMD_INIT_FUNC_TRACE();
266 	switch (ses->cipher_alg) {
267 	case RTE_CRYPTO_CIPHER_NULL:
268 		break;
269 	case RTE_CRYPTO_CIPHER_AES_CBC:
270 		alginfo_c->algtype =
271 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
272 			OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
273 		alginfo_c->algmode = OP_ALG_AAI_CBC;
274 		break;
275 	case RTE_CRYPTO_CIPHER_3DES_CBC:
276 		alginfo_c->algtype =
277 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
278 			OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
279 		alginfo_c->algmode = OP_ALG_AAI_CBC;
280 		break;
281 	case RTE_CRYPTO_CIPHER_AES_CTR:
282 		alginfo_c->algtype =
283 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
284 			OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
285 		alginfo_c->algmode = OP_ALG_AAI_CTR;
286 		break;
287 	default:
288 		CAAM_JR_DEBUG("unsupported cipher alg %d", ses->cipher_alg);
289 	}
290 }
291 
292 static inline void
293 caam_aead_alg(struct caam_jr_session *ses, struct alginfo *alginfo)
294 {
295 	PMD_INIT_FUNC_TRACE();
296 	switch (ses->aead_alg) {
297 	case RTE_CRYPTO_AEAD_AES_GCM:
298 		alginfo->algtype = OP_ALG_ALGSEL_AES;
299 		alginfo->algmode = OP_ALG_AAI_GCM;
300 		break;
301 	default:
302 		CAAM_JR_DEBUG("unsupported AEAD alg %d", ses->aead_alg);
303 	}
304 }
305 
306 /* prepare command block of the session */
307 static int
308 caam_jr_prep_cdb(struct caam_jr_session *ses)
309 {
310 	struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
311 	int32_t shared_desc_len = 0;
312 	struct sec_cdb *cdb;
313 	int err;
314 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
315 	int swap = false;
316 #else
317 	int swap = true;
318 #endif
319 
320 	PMD_INIT_FUNC_TRACE();
321 	if (ses->cdb)
322 		caam_jr_dma_free(ses->cdb);
323 
324 	cdb = caam_jr_dma_mem_alloc(L1_CACHE_BYTES, sizeof(struct sec_cdb));
325 	if (!cdb) {
326 		CAAM_JR_ERR("failed to allocate memory for cdb\n");
327 		return -1;
328 	}
329 
330 	ses->cdb = cdb;
331 
332 	memset(cdb, 0, sizeof(struct sec_cdb));
333 
334 	if (is_cipher_only(ses)) {
335 		caam_cipher_alg(ses, &alginfo_c);
336 		if (alginfo_c.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
337 			CAAM_JR_ERR("not supported cipher alg");
338 			rte_free(cdb);
339 			return -ENOTSUP;
340 		}
341 
342 		alginfo_c.key = (size_t)ses->cipher_key.data;
343 		alginfo_c.keylen = ses->cipher_key.length;
344 		alginfo_c.key_enc_flags = 0;
345 		alginfo_c.key_type = RTA_DATA_IMM;
346 
347 		shared_desc_len = cnstr_shdsc_blkcipher(
348 						cdb->sh_desc, true,
349 						swap, &alginfo_c,
350 						NULL,
351 						ses->iv.length,
352 						ses->dir);
353 	} else if (is_auth_only(ses)) {
354 		caam_auth_alg(ses, &alginfo_a);
355 		if (alginfo_a.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
356 			CAAM_JR_ERR("not supported auth alg");
357 			rte_free(cdb);
358 			return -ENOTSUP;
359 		}
360 
361 		alginfo_a.key = (size_t)ses->auth_key.data;
362 		alginfo_a.keylen = ses->auth_key.length;
363 		alginfo_a.key_enc_flags = 0;
364 		alginfo_a.key_type = RTA_DATA_IMM;
365 
366 		shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
367 						   swap, &alginfo_a,
368 						   !ses->dir,
369 						   ses->digest_length);
370 	} else if (is_aead(ses)) {
371 		caam_aead_alg(ses, &alginfo);
372 		if (alginfo.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
373 			CAAM_JR_ERR("not supported aead alg");
374 			rte_free(cdb);
375 			return -ENOTSUP;
376 		}
377 		alginfo.key = (size_t)ses->aead_key.data;
378 		alginfo.keylen = ses->aead_key.length;
379 		alginfo.key_enc_flags = 0;
380 		alginfo.key_type = RTA_DATA_IMM;
381 
382 		if (ses->dir == DIR_ENC)
383 			shared_desc_len = cnstr_shdsc_gcm_encap(
384 					cdb->sh_desc, true, swap,
385 					&alginfo,
386 					ses->iv.length,
387 					ses->digest_length);
388 		else
389 			shared_desc_len = cnstr_shdsc_gcm_decap(
390 					cdb->sh_desc, true, swap,
391 					&alginfo,
392 					ses->iv.length,
393 					ses->digest_length);
394 	} else {
395 		caam_cipher_alg(ses, &alginfo_c);
396 		if (alginfo_c.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
397 			CAAM_JR_ERR("not supported cipher alg");
398 			rte_free(cdb);
399 			return -ENOTSUP;
400 		}
401 
402 		alginfo_c.key = (size_t)ses->cipher_key.data;
403 		alginfo_c.keylen = ses->cipher_key.length;
404 		alginfo_c.key_enc_flags = 0;
405 		alginfo_c.key_type = RTA_DATA_IMM;
406 
407 		caam_auth_alg(ses, &alginfo_a);
408 		if (alginfo_a.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
409 			CAAM_JR_ERR("not supported auth alg");
410 			rte_free(cdb);
411 			return -ENOTSUP;
412 		}
413 
414 		alginfo_a.key = (size_t)ses->auth_key.data;
415 		alginfo_a.keylen = ses->auth_key.length;
416 		alginfo_a.key_enc_flags = 0;
417 		alginfo_a.key_type = RTA_DATA_IMM;
418 
419 		cdb->sh_desc[0] = alginfo_c.keylen;
420 		cdb->sh_desc[1] = alginfo_a.keylen;
421 		err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
422 				       MIN_JOB_DESC_SIZE,
423 				       (unsigned int *)cdb->sh_desc,
424 				       &cdb->sh_desc[2], 2);
425 
426 		if (err < 0) {
427 			CAAM_JR_ERR("Crypto: Incorrect key lengths");
428 			rte_free(cdb);
429 			return err;
430 		}
431 		if (cdb->sh_desc[2] & 1)
432 			alginfo_c.key_type = RTA_DATA_IMM;
433 		else {
434 			alginfo_c.key = (size_t)caam_jr_mem_vtop(
435 						(void *)(size_t)alginfo_c.key);
436 			alginfo_c.key_type = RTA_DATA_PTR;
437 		}
438 		if (cdb->sh_desc[2] & (1<<1))
439 			alginfo_a.key_type = RTA_DATA_IMM;
440 		else {
441 			alginfo_a.key = (size_t)caam_jr_mem_vtop(
442 						(void *)(size_t)alginfo_a.key);
443 			alginfo_a.key_type = RTA_DATA_PTR;
444 		}
445 		cdb->sh_desc[0] = 0;
446 		cdb->sh_desc[1] = 0;
447 		cdb->sh_desc[2] = 0;
448 		if (is_proto_ipsec(ses)) {
449 			if (ses->dir == DIR_ENC) {
450 				shared_desc_len = cnstr_shdsc_ipsec_new_encap(
451 						cdb->sh_desc,
452 						true, swap, SHR_SERIAL,
453 						&ses->encap_pdb,
454 						(uint8_t *)&ses->ip4_hdr,
455 						&alginfo_c, &alginfo_a);
456 			} else if (ses->dir == DIR_DEC) {
457 				shared_desc_len = cnstr_shdsc_ipsec_new_decap(
458 						cdb->sh_desc,
459 						true, swap, SHR_SERIAL,
460 						&ses->decap_pdb,
461 						&alginfo_c, &alginfo_a);
462 			}
463 		} else {
464 			/* Auth_only_len is set as 0 here and it will be
465 			 * overwritten in fd for each packet.
466 			 */
467 			shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
468 					true, swap, &alginfo_c, &alginfo_a,
469 					ses->iv.length, 0,
470 					ses->digest_length, ses->dir);
471 		}
472 	}
473 
474 	if (shared_desc_len < 0) {
475 		CAAM_JR_ERR("error in preparing command block");
476 		return shared_desc_len;
477 	}
478 
479 #if CAAM_JR_DBG
480 	SEC_DUMP_DESC(cdb->sh_desc);
481 #endif
482 
483 	cdb->sh_hdr.hi.field.idlen = shared_desc_len;
484 
485 	return 0;
486 }
487 
488 /* @brief Poll the HW for already processed jobs in the JR
489  * and silently discard the available jobs or notify them to UA
490  * with indicated error code.
491  *
492  * @param [in,out]  job_ring        The job ring to poll.
493  * @param [in]  do_notify           Can be #TRUE or #FALSE. Indicates if
494  *				    descriptors are to be discarded
495  *                                  or notified to UA with given error_code.
496  * @param [out] notified_descs    Number of notified descriptors. Can be NULL
497  *					if do_notify is #FALSE
498  */
499 static void
500 hw_flush_job_ring(struct sec_job_ring_t *job_ring,
501 		  uint32_t do_notify,
502 		  uint32_t *notified_descs)
503 {
504 	int32_t jobs_no_to_discard = 0;
505 	int32_t discarded_descs_no = 0;
506 
507 	PMD_INIT_FUNC_TRACE();
508 	CAAM_JR_DEBUG("Jr[%p] pi[%d] ci[%d].Flushing jr notify desc=[%d]",
509 		job_ring, job_ring->pidx, job_ring->cidx, do_notify);
510 
511 	jobs_no_to_discard = hw_get_no_finished_jobs(job_ring);
512 
513 	/* Discard all jobs */
514 	CAAM_JR_DEBUG("Jr[%p] pi[%d] ci[%d].Discarding %d descs",
515 		  job_ring, job_ring->pidx, job_ring->cidx,
516 		  jobs_no_to_discard);
517 
518 	while (jobs_no_to_discard > discarded_descs_no) {
519 		discarded_descs_no++;
520 		/* Now increment the consumer index for the current job ring,
521 		 * AFTER saving job in temporary location!
522 		 * Increment the consumer index for the current job ring
523 		 */
524 		job_ring->cidx = SEC_CIRCULAR_COUNTER(job_ring->cidx,
525 					 SEC_JOB_RING_SIZE);
526 
527 		hw_remove_entries(job_ring, 1);
528 	}
529 
530 	if (do_notify == true) {
531 		ASSERT(notified_descs != NULL);
532 		*notified_descs = discarded_descs_no;
533 	}
534 }
535 
536 /* @brief Poll the HW for already processed jobs in the JR
537  * and notify the available jobs to UA.
538  *
539  * @param [in]  job_ring	The job ring to poll.
540  * @param [in]  limit           The maximum number of jobs to notify.
541  *                              If set to negative value, all available jobs are
542  *				notified.
543  *
544  * @retval >=0 for No of jobs notified to UA.
545  * @retval -1 for error
546  */
547 static int
548 hw_poll_job_ring(struct sec_job_ring_t *job_ring,
549 		 struct rte_crypto_op **ops, int32_t limit,
550 		 struct caam_jr_qp *jr_qp)
551 {
552 	int32_t jobs_no_to_notify = 0; /* the number of done jobs to notify*/
553 	int32_t number_of_jobs_available = 0;
554 	int32_t notified_descs_no = 0;
555 	uint32_t sec_error_code = 0;
556 	struct job_descriptor *current_desc;
557 	phys_addr_t current_desc_addr;
558 	phys_addr_t *temp_addr;
559 	struct caam_jr_op_ctx *ctx;
560 
561 	PMD_INIT_FUNC_TRACE();
562 	/* TODO check for ops have memory*/
563 	/* check here if any JR error that cannot be written
564 	 * in the output status word has occurred
565 	 */
566 	if (JR_REG_JRINT_JRE_EXTRACT(GET_JR_REG(JRINT, job_ring))) {
567 		CAAM_JR_INFO("err received");
568 		sec_error_code = JR_REG_JRINT_ERR_TYPE_EXTRACT(
569 					GET_JR_REG(JRINT, job_ring));
570 		if (unlikely(sec_error_code)) {
571 			hw_job_ring_error_print(job_ring, sec_error_code);
572 			return -1;
573 		}
574 	}
575 	/* compute the number of jobs available in the job ring based on the
576 	 * producer and consumer index values.
577 	 */
578 	number_of_jobs_available = hw_get_no_finished_jobs(job_ring);
579 	/* Compute the number of notifications that need to be raised to UA
580 	 * If limit > total number of done jobs -> notify all done jobs
581 	 * If limit = 0 -> error
582 	 * If limit < total number of done jobs -> notify a number
583 	 * of done jobs equal with limit
584 	 */
585 	jobs_no_to_notify = (limit > number_of_jobs_available) ?
586 				number_of_jobs_available : limit;
587 	CAAM_JR_DP_DEBUG(
588 		"Jr[%p] pi[%d] ci[%d].limit =%d Available=%d.Jobs to notify=%d",
589 		job_ring, job_ring->pidx, job_ring->cidx,
590 		limit, number_of_jobs_available, jobs_no_to_notify);
591 
592 	rte_smp_rmb();
593 
594 	while (jobs_no_to_notify > notified_descs_no) {
595 		static uint64_t false_alarm;
596 		static uint64_t real_poll;
597 
598 		/* Get job status here */
599 		sec_error_code = job_ring->output_ring[job_ring->cidx].status;
600 		/* Get completed descriptor */
601 		temp_addr = &(job_ring->output_ring[job_ring->cidx].desc);
602 		current_desc_addr = (phys_addr_t)sec_read_addr(temp_addr);
603 
604 		real_poll++;
605 		/* todo check if it is false alarm no desc present */
606 		if (!current_desc_addr) {
607 			false_alarm++;
608 			printf("false alarm %" PRIu64 "real %" PRIu64
609 				" sec_err =0x%x cidx Index =0%d\n",
610 				false_alarm, real_poll,
611 				sec_error_code, job_ring->cidx);
612 			rte_panic("CAAM JR descriptor NULL");
613 			return notified_descs_no;
614 		}
615 		current_desc = (struct job_descriptor *)
616 				caam_jr_dma_ptov(current_desc_addr);
617 		/* now increment the consumer index for the current job ring,
618 		 * AFTER saving job in temporary location!
619 		 */
620 		job_ring->cidx = SEC_CIRCULAR_COUNTER(job_ring->cidx,
621 				 SEC_JOB_RING_SIZE);
622 		/* Signal that the job has been processed and the slot is free*/
623 		hw_remove_entries(job_ring, 1);
624 		/*TODO for multiple ops, packets*/
625 		ctx = container_of(current_desc, struct caam_jr_op_ctx, jobdes);
626 		if (unlikely(sec_error_code)) {
627 			CAAM_JR_ERR("desc at cidx %d generated error 0x%x\n",
628 				job_ring->cidx, sec_error_code);
629 			hw_handle_job_ring_error(job_ring, sec_error_code);
630 			//todo improve with exact errors
631 			ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
632 			jr_qp->rx_errs++;
633 		} else {
634 			ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
635 #if CAAM_JR_DBG
636 			if (ctx->op->sym->m_dst) {
637 				rte_hexdump(stdout, "PROCESSED",
638 				rte_pktmbuf_mtod(ctx->op->sym->m_dst, void *),
639 				rte_pktmbuf_data_len(ctx->op->sym->m_dst));
640 			} else {
641 				rte_hexdump(stdout, "PROCESSED",
642 				rte_pktmbuf_mtod(ctx->op->sym->m_src, void *),
643 				rte_pktmbuf_data_len(ctx->op->sym->m_src));
644 			}
645 #endif
646 		}
647 		if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
648 			struct ip *ip4_hdr;
649 
650 			if (ctx->op->sym->m_dst) {
651 				/*TODO check for ip header or other*/
652 				ip4_hdr = (struct ip *)
653 				rte_pktmbuf_mtod(ctx->op->sym->m_dst, char*);
654 				ctx->op->sym->m_dst->pkt_len =
655 					rte_be_to_cpu_16(ip4_hdr->ip_len);
656 				ctx->op->sym->m_dst->data_len =
657 					rte_be_to_cpu_16(ip4_hdr->ip_len);
658 			} else {
659 				ip4_hdr = (struct ip *)
660 				rte_pktmbuf_mtod(ctx->op->sym->m_src, char*);
661 				ctx->op->sym->m_src->pkt_len =
662 					rte_be_to_cpu_16(ip4_hdr->ip_len);
663 				ctx->op->sym->m_src->data_len =
664 					rte_be_to_cpu_16(ip4_hdr->ip_len);
665 			}
666 		}
667 		*ops = ctx->op;
668 		caam_jr_op_ending(ctx);
669 		ops++;
670 		notified_descs_no++;
671 	}
672 	return notified_descs_no;
673 }
674 
675 static uint16_t
676 caam_jr_dequeue_burst(void *qp, struct rte_crypto_op **ops,
677 		       uint16_t nb_ops)
678 {
679 	struct caam_jr_qp *jr_qp = (struct caam_jr_qp *)qp;
680 	struct sec_job_ring_t *ring = jr_qp->ring;
681 	int num_rx;
682 	int ret;
683 
684 	PMD_INIT_FUNC_TRACE();
685 	CAAM_JR_DP_DEBUG("Jr[%p]Polling. limit[%d]", ring, nb_ops);
686 
687 	/* Poll job ring
688 	 * If nb_ops < 0 -> poll JR until no more notifications are available.
689 	 * If nb_ops > 0 -> poll JR until limit is reached.
690 	 */
691 
692 	/* Run hw poll job ring */
693 	num_rx = hw_poll_job_ring(ring, ops, nb_ops, jr_qp);
694 	if (num_rx < 0) {
695 		CAAM_JR_ERR("Error polling SEC engine (%d)", num_rx);
696 		return 0;
697 	}
698 
699 	CAAM_JR_DP_DEBUG("Jr[%p].Jobs notified[%d]. ", ring, num_rx);
700 
701 	if (ring->jr_mode == SEC_NOTIFICATION_TYPE_NAPI) {
702 		if (num_rx < nb_ops) {
703 			ret = caam_jr_enable_irqs(ring->irq_fd);
704 			SEC_ASSERT(ret == 0, ret,
705 			"Failed to enable irqs for job ring %p", ring);
706 		}
707 	} else if (ring->jr_mode == SEC_NOTIFICATION_TYPE_IRQ) {
708 
709 		/* Always enable IRQ generation when in pure IRQ mode */
710 		ret = caam_jr_enable_irqs(ring->irq_fd);
711 		SEC_ASSERT(ret == 0, ret,
712 			"Failed to enable irqs for job ring %p", ring);
713 	}
714 
715 	jr_qp->rx_pkts += num_rx;
716 
717 	return num_rx;
718 }
719 
720 /**
721  * packet looks like:
722  *		|<----data_len------->|
723  *    |ip_header|ah_header|icv|payload|
724  *              ^
725  *		|
726  *	   mbuf->pkt.data
727  */
728 static inline struct caam_jr_op_ctx *
729 build_auth_only_sg(struct rte_crypto_op *op, struct caam_jr_session *ses)
730 {
731 	struct rte_crypto_sym_op *sym = op->sym;
732 	struct rte_mbuf *mbuf = sym->m_src;
733 	struct caam_jr_op_ctx *ctx;
734 	struct sec4_sg_entry *sg;
735 	int	length;
736 	struct sec_cdb *cdb;
737 	uint64_t sdesc_offset;
738 	struct sec_job_descriptor_t *jobdescr;
739 	uint8_t extra_segs;
740 
741 	PMD_INIT_FUNC_TRACE();
742 	if (is_decode(ses))
743 		extra_segs = 2;
744 	else
745 		extra_segs = 1;
746 
747 	if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
748 		CAAM_JR_DP_ERR("Auth: Max sec segs supported is %d",
749 				MAX_SG_ENTRIES);
750 		return NULL;
751 	}
752 
753 	ctx = caam_jr_alloc_ctx(ses);
754 	if (!ctx)
755 		return NULL;
756 
757 	ctx->op = op;
758 
759 	cdb = ses->cdb;
760 	sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
761 
762 	jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
763 
764 	SEC_JD_INIT(jobdescr);
765 	SEC_JD_SET_SD(jobdescr,
766 		(phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
767 		cdb->sh_hdr.hi.field.idlen);
768 
769 	/* output */
770 	SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)sym->auth.digest.phys_addr,
771 			0, ses->digest_length);
772 
773 	/*input */
774 	sg = &ctx->sg[0];
775 	length = sym->auth.data.length;
776 	sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf) + sym->auth.data.offset);
777 	sg->len = cpu_to_caam32(mbuf->data_len - sym->auth.data.offset);
778 
779 	/* Successive segs */
780 	mbuf = mbuf->next;
781 	while (mbuf) {
782 		sg++;
783 		sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
784 		sg->len = cpu_to_caam32(mbuf->data_len);
785 		mbuf = mbuf->next;
786 	}
787 
788 	if (is_decode(ses)) {
789 		/* digest verification case */
790 		sg++;
791 		/* hash result or digest, save digest first */
792 		rte_memcpy(ctx->digest, sym->auth.digest.data,
793 			   ses->digest_length);
794 #if CAAM_JR_DBG
795 		rte_hexdump(stdout, "ICV", ctx->digest, ses->digest_length);
796 #endif
797 		sg->ptr = cpu_to_caam64(caam_jr_vtop_ctx(ctx, ctx->digest));
798 		sg->len = cpu_to_caam32(ses->digest_length);
799 		length += ses->digest_length;
800 	} else {
801 		length -= ses->digest_length;
802 	}
803 
804 	/* last element*/
805 	sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
806 
807 	SEC_JD_SET_IN_PTR(jobdescr,
808 		(uint64_t)caam_jr_vtop_ctx(ctx, &ctx->sg[0]), 0, length);
809 	/* enabling sg list */
810 	(jobdescr)->seq_in.command.word  |= 0x01000000;
811 
812 	return ctx;
813 }
814 
815 static inline struct caam_jr_op_ctx *
816 build_auth_only(struct rte_crypto_op *op, struct caam_jr_session *ses)
817 {
818 	struct rte_crypto_sym_op *sym = op->sym;
819 	struct caam_jr_op_ctx *ctx;
820 	struct sec4_sg_entry *sg;
821 	rte_iova_t start_addr;
822 	struct sec_cdb *cdb;
823 	uint64_t sdesc_offset;
824 	struct sec_job_descriptor_t *jobdescr;
825 
826 	PMD_INIT_FUNC_TRACE();
827 	ctx = caam_jr_alloc_ctx(ses);
828 	if (!ctx)
829 		return NULL;
830 
831 	ctx->op = op;
832 
833 	cdb = ses->cdb;
834 	sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
835 
836 	start_addr = rte_pktmbuf_iova(sym->m_src);
837 
838 	jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
839 
840 	SEC_JD_INIT(jobdescr);
841 	SEC_JD_SET_SD(jobdescr,
842 		(phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
843 		cdb->sh_hdr.hi.field.idlen);
844 
845 	/* output */
846 	SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)sym->auth.digest.phys_addr,
847 			0, ses->digest_length);
848 
849 	/*input */
850 	if (is_decode(ses)) {
851 		sg = &ctx->sg[0];
852 		SEC_JD_SET_IN_PTR(jobdescr,
853 			(uint64_t)caam_jr_vtop_ctx(ctx, sg), 0,
854 			(sym->auth.data.length + ses->digest_length));
855 		/* enabling sg list */
856 		(jobdescr)->seq_in.command.word  |= 0x01000000;
857 
858 		/* hash result or digest, save digest first */
859 		rte_memcpy(ctx->digest, sym->auth.digest.data,
860 			   ses->digest_length);
861 		sg->ptr = cpu_to_caam64(start_addr + sym->auth.data.offset);
862 		sg->len = cpu_to_caam32(sym->auth.data.length);
863 
864 #if CAAM_JR_DBG
865 		rte_hexdump(stdout, "ICV", ctx->digest, ses->digest_length);
866 #endif
867 		/* let's check digest by hw */
868 		sg++;
869 		sg->ptr = cpu_to_caam64(caam_jr_vtop_ctx(ctx, ctx->digest));
870 		sg->len = cpu_to_caam32(ses->digest_length);
871 		/* last element*/
872 		sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
873 	} else {
874 		SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)start_addr,
875 			sym->auth.data.offset, sym->auth.data.length);
876 	}
877 	return ctx;
878 }
879 
880 static inline struct caam_jr_op_ctx *
881 build_cipher_only_sg(struct rte_crypto_op *op, struct caam_jr_session *ses)
882 {
883 	struct rte_crypto_sym_op *sym = op->sym;
884 	struct rte_mbuf *mbuf = sym->m_src;
885 	struct caam_jr_op_ctx *ctx;
886 	struct sec4_sg_entry *sg, *in_sg;
887 	int length;
888 	struct sec_cdb *cdb;
889 	uint64_t sdesc_offset;
890 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
891 			ses->iv.offset);
892 	struct sec_job_descriptor_t *jobdescr;
893 	uint8_t reg_segs;
894 
895 	PMD_INIT_FUNC_TRACE();
896 	if (sym->m_dst) {
897 		mbuf = sym->m_dst;
898 		reg_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
899 	} else {
900 		mbuf = sym->m_src;
901 		reg_segs = mbuf->nb_segs * 2 + 2;
902 	}
903 
904 	if (reg_segs > MAX_SG_ENTRIES) {
905 		CAAM_JR_DP_ERR("Cipher: Max sec segs supported is %d",
906 				MAX_SG_ENTRIES);
907 		return NULL;
908 	}
909 
910 	ctx = caam_jr_alloc_ctx(ses);
911 	if (!ctx)
912 		return NULL;
913 
914 	ctx->op = op;
915 	cdb = ses->cdb;
916 	sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
917 
918 	jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
919 
920 	SEC_JD_INIT(jobdescr);
921 	SEC_JD_SET_SD(jobdescr,
922 		(phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
923 		cdb->sh_hdr.hi.field.idlen);
924 
925 #if CAAM_JR_DBG
926 	CAAM_JR_INFO("mbuf offset =%d, cipher offset = %d, length =%d+%d",
927 			sym->m_src->data_off, sym->cipher.data.offset,
928 			sym->cipher.data.length, ses->iv.length);
929 #endif
930 	/* output */
931 	if (sym->m_dst)
932 		mbuf = sym->m_dst;
933 	else
934 		mbuf = sym->m_src;
935 
936 	sg = &ctx->sg[0];
937 	length = sym->cipher.data.length;
938 
939 	sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)
940 		+ sym->cipher.data.offset);
941 	sg->len = cpu_to_caam32(mbuf->data_len - sym->cipher.data.offset);
942 
943 	/* Successive segs */
944 	mbuf = mbuf->next;
945 	while (mbuf) {
946 		sg++;
947 		sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
948 		sg->len = cpu_to_caam32(mbuf->data_len);
949 		mbuf = mbuf->next;
950 	}
951 	/* last element*/
952 	sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
953 
954 	SEC_JD_SET_OUT_PTR(jobdescr,
955 			(uint64_t)caam_jr_vtop_ctx(ctx, &ctx->sg[0]), 0,
956 			length);
957 	/*enabling sg bit */
958 	(jobdescr)->seq_out.command.word  |= 0x01000000;
959 
960 	/*input */
961 	sg++;
962 	mbuf = sym->m_src;
963 	in_sg = sg;
964 
965 	length = sym->cipher.data.length + ses->iv.length;
966 
967 	/* IV */
968 	sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
969 	sg->len = cpu_to_caam32(ses->iv.length);
970 
971 	/* 1st seg */
972 	sg++;
973 	sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)
974 				+ sym->cipher.data.offset);
975 	sg->len = cpu_to_caam32(mbuf->data_len - sym->cipher.data.offset);
976 
977 	/* Successive segs */
978 	mbuf = mbuf->next;
979 	while (mbuf) {
980 		sg++;
981 		sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
982 		sg->len = cpu_to_caam32(mbuf->data_len);
983 		mbuf = mbuf->next;
984 	}
985 	/* last element*/
986 	sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
987 
988 
989 	SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_vtop_ctx(ctx, in_sg), 0,
990 				length);
991 	/*enabling sg bit */
992 	(jobdescr)->seq_in.command.word  |= 0x01000000;
993 
994 	return ctx;
995 }
996 
997 static inline struct caam_jr_op_ctx *
998 build_cipher_only(struct rte_crypto_op *op, struct caam_jr_session *ses)
999 {
1000 	struct rte_crypto_sym_op *sym = op->sym;
1001 	struct caam_jr_op_ctx *ctx;
1002 	struct sec4_sg_entry *sg;
1003 	rte_iova_t src_start_addr, dst_start_addr;
1004 	struct sec_cdb *cdb;
1005 	uint64_t sdesc_offset;
1006 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1007 			ses->iv.offset);
1008 	struct sec_job_descriptor_t *jobdescr;
1009 
1010 	PMD_INIT_FUNC_TRACE();
1011 	ctx = caam_jr_alloc_ctx(ses);
1012 	if (!ctx)
1013 		return NULL;
1014 
1015 	ctx->op = op;
1016 	cdb = ses->cdb;
1017 	sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
1018 
1019 	src_start_addr = rte_pktmbuf_iova(sym->m_src);
1020 	if (sym->m_dst)
1021 		dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1022 	else
1023 		dst_start_addr = src_start_addr;
1024 
1025 	jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1026 
1027 	SEC_JD_INIT(jobdescr);
1028 	SEC_JD_SET_SD(jobdescr,
1029 		(phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
1030 		cdb->sh_hdr.hi.field.idlen);
1031 
1032 #if CAAM_JR_DBG
1033 	CAAM_JR_INFO("mbuf offset =%d, cipher offset = %d, length =%d+%d",
1034 			sym->m_src->data_off, sym->cipher.data.offset,
1035 			sym->cipher.data.length, ses->iv.length);
1036 #endif
1037 	/* output */
1038 	SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)dst_start_addr,
1039 			sym->cipher.data.offset,
1040 			sym->cipher.data.length + ses->iv.length);
1041 
1042 	/*input */
1043 	sg = &ctx->sg[0];
1044 	SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_vtop_ctx(ctx, sg), 0,
1045 				sym->cipher.data.length + ses->iv.length);
1046 	/*enabling sg bit */
1047 	(jobdescr)->seq_in.command.word  |= 0x01000000;
1048 
1049 	sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
1050 	sg->len = cpu_to_caam32(ses->iv.length);
1051 
1052 	sg = &ctx->sg[1];
1053 	sg->ptr = cpu_to_caam64(src_start_addr + sym->cipher.data.offset);
1054 	sg->len = cpu_to_caam32(sym->cipher.data.length);
1055 	/* last element*/
1056 	sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1057 
1058 	return ctx;
1059 }
1060 
1061 /* For decapsulation:
1062  *     Input:
1063  * +----+----------------+--------------------------------+-----+
1064  * | IV | Auth-only data | Authenticated & Encrypted data | ICV |
1065  * +----+----------------+--------------------------------+-----+
1066  *     Output:
1067  * +----+--------------------------+
1068  * | Decrypted & authenticated data |
1069  * +----+--------------------------+
1070  */
1071 
1072 static inline struct caam_jr_op_ctx *
1073 build_cipher_auth_sg(struct rte_crypto_op *op, struct caam_jr_session *ses)
1074 {
1075 	struct rte_crypto_sym_op *sym = op->sym;
1076 	struct caam_jr_op_ctx *ctx;
1077 	struct sec4_sg_entry *sg, *out_sg, *in_sg;
1078 	struct rte_mbuf *mbuf;
1079 	uint32_t length = 0;
1080 	struct sec_cdb *cdb;
1081 	uint64_t sdesc_offset;
1082 	uint8_t req_segs;
1083 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1084 			ses->iv.offset);
1085 	struct sec_job_descriptor_t *jobdescr;
1086 	uint32_t auth_only_len;
1087 
1088 	PMD_INIT_FUNC_TRACE();
1089 	auth_only_len = op->sym->auth.data.length -
1090 				op->sym->cipher.data.length;
1091 
1092 	if (sym->m_dst) {
1093 		mbuf = sym->m_dst;
1094 		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
1095 	} else {
1096 		mbuf = sym->m_src;
1097 		req_segs = mbuf->nb_segs * 2 + 3;
1098 	}
1099 
1100 	if (req_segs > MAX_SG_ENTRIES) {
1101 		CAAM_JR_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1102 				MAX_SG_ENTRIES);
1103 		return NULL;
1104 	}
1105 
1106 	ctx = caam_jr_alloc_ctx(ses);
1107 	if (!ctx)
1108 		return NULL;
1109 
1110 	ctx->op = op;
1111 	cdb = ses->cdb;
1112 	sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
1113 
1114 	jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1115 
1116 	SEC_JD_INIT(jobdescr);
1117 	SEC_JD_SET_SD(jobdescr,
1118 		(phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
1119 		cdb->sh_hdr.hi.field.idlen);
1120 
1121 	/* output */
1122 	if (sym->m_dst)
1123 		mbuf = sym->m_dst;
1124 	else
1125 		mbuf = sym->m_src;
1126 
1127 	out_sg = &ctx->sg[0];
1128 	if (is_encode(ses))
1129 		length = sym->auth.data.length + ses->digest_length;
1130 	else
1131 		length = sym->auth.data.length;
1132 
1133 	sg = &ctx->sg[0];
1134 
1135 	/* 1st seg */
1136 	sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)
1137 		+ sym->auth.data.offset);
1138 	sg->len = cpu_to_caam32(mbuf->data_len - sym->auth.data.offset);
1139 
1140 	/* Successive segs */
1141 	mbuf = mbuf->next;
1142 	while (mbuf) {
1143 		sg++;
1144 		sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
1145 		sg->len = cpu_to_caam32(mbuf->data_len);
1146 		mbuf = mbuf->next;
1147 	}
1148 
1149 	if (is_encode(ses)) {
1150 		/* set auth output */
1151 		sg++;
1152 		sg->ptr = cpu_to_caam64(sym->auth.digest.phys_addr);
1153 		sg->len = cpu_to_caam32(ses->digest_length);
1154 	}
1155 	/* last element*/
1156 	sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1157 
1158 	SEC_JD_SET_OUT_PTR(jobdescr,
1159 			   (uint64_t)caam_jr_dma_vtop(out_sg), 0, length);
1160 	/* set sg bit */
1161 	(jobdescr)->seq_out.command.word  |= 0x01000000;
1162 
1163 	/* input */
1164 	sg++;
1165 	mbuf = sym->m_src;
1166 	in_sg = sg;
1167 	if (is_encode(ses))
1168 		length = ses->iv.length + sym->auth.data.length;
1169 	else
1170 		length = ses->iv.length + sym->auth.data.length
1171 						+ ses->digest_length;
1172 
1173 	sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
1174 	sg->len = cpu_to_caam32(ses->iv.length);
1175 
1176 	sg++;
1177 	/* 1st seg */
1178 	sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)
1179 		+ sym->auth.data.offset);
1180 	sg->len = cpu_to_caam32(mbuf->data_len - sym->auth.data.offset);
1181 
1182 	/* Successive segs */
1183 	mbuf = mbuf->next;
1184 	while (mbuf) {
1185 		sg++;
1186 		sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
1187 		sg->len = cpu_to_caam32(mbuf->data_len);
1188 		mbuf = mbuf->next;
1189 	}
1190 
1191 	if (is_decode(ses)) {
1192 		sg++;
1193 		rte_memcpy(ctx->digest, sym->auth.digest.data,
1194 		       ses->digest_length);
1195 		sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(ctx->digest));
1196 		sg->len = cpu_to_caam32(ses->digest_length);
1197 	}
1198 	/* last element*/
1199 	sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1200 
1201 	SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_dma_vtop(in_sg), 0,
1202 				length);
1203 	/* set sg bit */
1204 	(jobdescr)->seq_in.command.word  |= 0x01000000;
1205 	/* Auth_only_len is set as 0 in descriptor and it is
1206 	 * overwritten here in the jd which will update
1207 	 * the DPOVRD reg.
1208 	 */
1209 	if (auth_only_len)
1210 		/* set sg bit */
1211 		(jobdescr)->dpovrd = 0x80000000 | auth_only_len;
1212 
1213 	return ctx;
1214 }
1215 
1216 static inline struct caam_jr_op_ctx *
1217 build_cipher_auth(struct rte_crypto_op *op, struct caam_jr_session *ses)
1218 {
1219 	struct rte_crypto_sym_op *sym = op->sym;
1220 	struct caam_jr_op_ctx *ctx;
1221 	struct sec4_sg_entry *sg;
1222 	rte_iova_t src_start_addr, dst_start_addr;
1223 	uint32_t length = 0;
1224 	struct sec_cdb *cdb;
1225 	uint64_t sdesc_offset;
1226 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1227 			ses->iv.offset);
1228 	struct sec_job_descriptor_t *jobdescr;
1229 	uint32_t auth_only_len;
1230 
1231 	PMD_INIT_FUNC_TRACE();
1232 	auth_only_len = op->sym->auth.data.length -
1233 				op->sym->cipher.data.length;
1234 
1235 	src_start_addr = rte_pktmbuf_iova(sym->m_src);
1236 	if (sym->m_dst)
1237 		dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1238 	else
1239 		dst_start_addr = src_start_addr;
1240 
1241 	ctx = caam_jr_alloc_ctx(ses);
1242 	if (!ctx)
1243 		return NULL;
1244 
1245 	ctx->op = op;
1246 	cdb = ses->cdb;
1247 	sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
1248 
1249 	jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1250 
1251 	SEC_JD_INIT(jobdescr);
1252 	SEC_JD_SET_SD(jobdescr,
1253 		(phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
1254 		cdb->sh_hdr.hi.field.idlen);
1255 
1256 	/* input */
1257 	sg = &ctx->sg[0];
1258 	if (is_encode(ses)) {
1259 		sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
1260 		sg->len = cpu_to_caam32(ses->iv.length);
1261 		length += ses->iv.length;
1262 
1263 		sg++;
1264 		sg->ptr = cpu_to_caam64(src_start_addr + sym->auth.data.offset);
1265 		sg->len = cpu_to_caam32(sym->auth.data.length);
1266 		length += sym->auth.data.length;
1267 		/* last element*/
1268 		sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1269 	} else {
1270 		sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
1271 		sg->len = cpu_to_caam32(ses->iv.length);
1272 		length += ses->iv.length;
1273 
1274 		sg++;
1275 		sg->ptr = cpu_to_caam64(src_start_addr + sym->auth.data.offset);
1276 		sg->len = cpu_to_caam32(sym->auth.data.length);
1277 		length += sym->auth.data.length;
1278 
1279 		rte_memcpy(ctx->digest, sym->auth.digest.data,
1280 		       ses->digest_length);
1281 		sg++;
1282 		sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(ctx->digest));
1283 		sg->len = cpu_to_caam32(ses->digest_length);
1284 		length += ses->digest_length;
1285 		/* last element*/
1286 		sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1287 	}
1288 
1289 	SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_dma_vtop(&ctx->sg[0]), 0,
1290 				length);
1291 	/* set sg bit */
1292 	(jobdescr)->seq_in.command.word  |= 0x01000000;
1293 
1294 	/* output */
1295 	sg = &ctx->sg[6];
1296 
1297 	sg->ptr = cpu_to_caam64(dst_start_addr + sym->cipher.data.offset);
1298 	sg->len = cpu_to_caam32(sym->cipher.data.length);
1299 	length = sym->cipher.data.length;
1300 
1301 	if (is_encode(ses)) {
1302 		/* set auth output */
1303 		sg++;
1304 		sg->ptr = cpu_to_caam64(sym->auth.digest.phys_addr);
1305 		sg->len = cpu_to_caam32(ses->digest_length);
1306 		length += ses->digest_length;
1307 	}
1308 	/* last element*/
1309 	sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1310 
1311 	SEC_JD_SET_OUT_PTR(jobdescr,
1312 			   (uint64_t)caam_jr_dma_vtop(&ctx->sg[6]), 0, length);
1313 	/* set sg bit */
1314 	(jobdescr)->seq_out.command.word  |= 0x01000000;
1315 
1316 	/* Auth_only_len is set as 0 in descriptor and it is
1317 	 * overwritten here in the jd which will update
1318 	 * the DPOVRD reg.
1319 	 */
1320 	if (auth_only_len)
1321 		/* set sg bit */
1322 		(jobdescr)->dpovrd = 0x80000000 | auth_only_len;
1323 
1324 	return ctx;
1325 }
1326 
1327 static inline struct caam_jr_op_ctx *
1328 build_proto(struct rte_crypto_op *op, struct caam_jr_session *ses)
1329 {
1330 	struct rte_crypto_sym_op *sym = op->sym;
1331 	struct caam_jr_op_ctx *ctx = NULL;
1332 	phys_addr_t src_start_addr, dst_start_addr;
1333 	struct sec_cdb *cdb;
1334 	uint64_t sdesc_offset;
1335 	struct sec_job_descriptor_t *jobdescr;
1336 
1337 	PMD_INIT_FUNC_TRACE();
1338 	ctx = caam_jr_alloc_ctx(ses);
1339 	if (!ctx)
1340 		return NULL;
1341 	ctx->op = op;
1342 
1343 	src_start_addr = rte_pktmbuf_iova(sym->m_src);
1344 	if (sym->m_dst)
1345 		dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1346 	else
1347 		dst_start_addr = src_start_addr;
1348 
1349 	cdb = ses->cdb;
1350 	sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
1351 
1352 	jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1353 
1354 	SEC_JD_INIT(jobdescr);
1355 	SEC_JD_SET_SD(jobdescr,
1356 		(phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
1357 			cdb->sh_hdr.hi.field.idlen);
1358 
1359 	/* output */
1360 	SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)dst_start_addr, 0,
1361 			sym->m_src->buf_len - sym->m_src->data_off);
1362 	/* input */
1363 	SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)src_start_addr, 0,
1364 			sym->m_src->pkt_len);
1365 	sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1366 
1367 	return ctx;
1368 }
1369 
1370 static int
1371 caam_jr_enqueue_op(struct rte_crypto_op *op, struct caam_jr_qp *qp)
1372 {
1373 	struct sec_job_ring_t *ring = qp->ring;
1374 	struct caam_jr_session *ses;
1375 	struct caam_jr_op_ctx *ctx = NULL;
1376 	struct sec_job_descriptor_t *jobdescr __rte_unused;
1377 
1378 	PMD_INIT_FUNC_TRACE();
1379 	switch (op->sess_type) {
1380 	case RTE_CRYPTO_OP_WITH_SESSION:
1381 		ses = (struct caam_jr_session *)
1382 		get_sym_session_private_data(op->sym->session,
1383 					cryptodev_driver_id);
1384 		break;
1385 	case RTE_CRYPTO_OP_SECURITY_SESSION:
1386 		ses = (struct caam_jr_session *)
1387 			get_sec_session_private_data(
1388 					op->sym->sec_session);
1389 		break;
1390 	default:
1391 		CAAM_JR_DP_ERR("sessionless crypto op not supported");
1392 		qp->tx_errs++;
1393 		return -1;
1394 	}
1395 
1396 	if (unlikely(!ses->qp || ses->qp != qp)) {
1397 		CAAM_JR_DP_DEBUG("Old:sess->qp=%p New qp = %p\n", ses->qp, qp);
1398 		ses->qp = qp;
1399 		caam_jr_prep_cdb(ses);
1400 	}
1401 
1402 	if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
1403 		if (is_auth_cipher(ses))
1404 			ctx = build_cipher_auth(op, ses);
1405 		else if (is_aead(ses))
1406 			goto err1;
1407 		else if (is_auth_only(ses))
1408 			ctx = build_auth_only(op, ses);
1409 		else if (is_cipher_only(ses))
1410 			ctx = build_cipher_only(op, ses);
1411 		else if (is_proto_ipsec(ses))
1412 			ctx = build_proto(op, ses);
1413 	} else {
1414 		if (is_auth_cipher(ses))
1415 			ctx = build_cipher_auth_sg(op, ses);
1416 		else if (is_aead(ses))
1417 			goto err1;
1418 		else if (is_auth_only(ses))
1419 			ctx = build_auth_only_sg(op, ses);
1420 		else if (is_cipher_only(ses))
1421 			ctx = build_cipher_only_sg(op, ses);
1422 	}
1423 err1:
1424 	if (unlikely(!ctx)) {
1425 		qp->tx_errs++;
1426 		CAAM_JR_ERR("not supported sec op");
1427 		return -1;
1428 	}
1429 #if CAAM_JR_DBG
1430 	if (is_decode(ses))
1431 		rte_hexdump(stdout, "DECODE",
1432 			rte_pktmbuf_mtod(op->sym->m_src, void *),
1433 			rte_pktmbuf_data_len(op->sym->m_src));
1434 	else
1435 		rte_hexdump(stdout, "ENCODE",
1436 			rte_pktmbuf_mtod(op->sym->m_src, void *),
1437 			rte_pktmbuf_data_len(op->sym->m_src));
1438 
1439 	printf("\n JD before conversion\n");
1440 	for (int i = 0; i < 12; i++)
1441 		printf("\n 0x%08x", ctx->jobdes.desc[i]);
1442 #endif
1443 
1444 	CAAM_JR_DP_DEBUG("Jr[%p] pi[%d] ci[%d].Before sending desc",
1445 		      ring, ring->pidx, ring->cidx);
1446 
1447 	/* todo - do we want to retry */
1448 	if (SEC_JOB_RING_IS_FULL(ring->pidx, ring->cidx,
1449 			 SEC_JOB_RING_SIZE, SEC_JOB_RING_SIZE)) {
1450 		CAAM_JR_DP_DEBUG("Ring FULL Jr[%p] pi[%d] ci[%d].Size = %d",
1451 			      ring, ring->pidx, ring->cidx, SEC_JOB_RING_SIZE);
1452 		caam_jr_op_ending(ctx);
1453 		qp->tx_ring_full++;
1454 		return -EBUSY;
1455 	}
1456 
1457 #if CORE_BYTE_ORDER != CAAM_BYTE_ORDER
1458 	jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1459 
1460 	jobdescr->deschdr.command.word =
1461 		cpu_to_caam32(jobdescr->deschdr.command.word);
1462 	jobdescr->sd_ptr = cpu_to_caam64(jobdescr->sd_ptr);
1463 	jobdescr->seq_out.command.word =
1464 		cpu_to_caam32(jobdescr->seq_out.command.word);
1465 	jobdescr->seq_out_ptr = cpu_to_caam64(jobdescr->seq_out_ptr);
1466 	jobdescr->out_ext_length = cpu_to_caam32(jobdescr->out_ext_length);
1467 	jobdescr->seq_in.command.word =
1468 		cpu_to_caam32(jobdescr->seq_in.command.word);
1469 	jobdescr->seq_in_ptr = cpu_to_caam64(jobdescr->seq_in_ptr);
1470 	jobdescr->in_ext_length = cpu_to_caam32(jobdescr->in_ext_length);
1471 	jobdescr->load_dpovrd.command.word =
1472 		cpu_to_caam32(jobdescr->load_dpovrd.command.word);
1473 	jobdescr->dpovrd = cpu_to_caam32(jobdescr->dpovrd);
1474 #endif
1475 
1476 	/* Set ptr in input ring to current descriptor	*/
1477 	sec_write_addr(&ring->input_ring[ring->pidx],
1478 			(phys_addr_t)caam_jr_vtop_ctx(ctx, ctx->jobdes.desc));
1479 	rte_smp_wmb();
1480 
1481 	/* Notify HW that a new job is enqueued */
1482 	hw_enqueue_desc_on_job_ring(ring);
1483 
1484 	/* increment the producer index for the current job ring */
1485 	ring->pidx = SEC_CIRCULAR_COUNTER(ring->pidx, SEC_JOB_RING_SIZE);
1486 
1487 	return 0;
1488 }
1489 
1490 static uint16_t
1491 caam_jr_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1492 		       uint16_t nb_ops)
1493 {
1494 	/* Function to transmit the frames to given device and queuepair */
1495 	uint32_t loop;
1496 	int32_t ret;
1497 	struct caam_jr_qp *jr_qp = (struct caam_jr_qp *)qp;
1498 	uint16_t num_tx = 0;
1499 
1500 	PMD_INIT_FUNC_TRACE();
1501 	/*Prepare each packet which is to be sent*/
1502 	for (loop = 0; loop < nb_ops; loop++) {
1503 		ret = caam_jr_enqueue_op(ops[loop], jr_qp);
1504 		if (!ret)
1505 			num_tx++;
1506 	}
1507 
1508 	jr_qp->tx_pkts += num_tx;
1509 
1510 	return num_tx;
1511 }
1512 
1513 /* Release queue pair */
1514 static int
1515 caam_jr_queue_pair_release(struct rte_cryptodev *dev,
1516 			   uint16_t qp_id)
1517 {
1518 	struct sec_job_ring_t *internals;
1519 	struct caam_jr_qp *qp = NULL;
1520 
1521 	PMD_INIT_FUNC_TRACE();
1522 	CAAM_JR_DEBUG("dev =%p, queue =%d", dev, qp_id);
1523 
1524 	internals = dev->data->dev_private;
1525 	if (qp_id >= internals->max_nb_queue_pairs) {
1526 		CAAM_JR_ERR("Max supported qpid %d",
1527 			     internals->max_nb_queue_pairs);
1528 		return -EINVAL;
1529 	}
1530 
1531 	qp = &internals->qps[qp_id];
1532 	qp->ring = NULL;
1533 	dev->data->queue_pairs[qp_id] = NULL;
1534 
1535 	return 0;
1536 }
1537 
1538 /* Setup a queue pair */
1539 static int
1540 caam_jr_queue_pair_setup(
1541 		struct rte_cryptodev *dev, uint16_t qp_id,
1542 		__rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1543 		__rte_unused int socket_id)
1544 {
1545 	struct sec_job_ring_t *internals;
1546 	struct caam_jr_qp *qp = NULL;
1547 
1548 	PMD_INIT_FUNC_TRACE();
1549 	CAAM_JR_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1550 
1551 	internals = dev->data->dev_private;
1552 	if (qp_id >= internals->max_nb_queue_pairs) {
1553 		CAAM_JR_ERR("Max supported qpid %d",
1554 			     internals->max_nb_queue_pairs);
1555 		return -EINVAL;
1556 	}
1557 
1558 	qp = &internals->qps[qp_id];
1559 	qp->ring = internals;
1560 	dev->data->queue_pairs[qp_id] = qp;
1561 
1562 	return 0;
1563 }
1564 
1565 /* Return the number of allocated queue pairs */
1566 static uint32_t
1567 caam_jr_queue_pair_count(struct rte_cryptodev *dev)
1568 {
1569 	PMD_INIT_FUNC_TRACE();
1570 
1571 	return dev->data->nb_queue_pairs;
1572 }
1573 
1574 /* Returns the size of the aesni gcm session structure */
1575 static unsigned int
1576 caam_jr_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1577 {
1578 	PMD_INIT_FUNC_TRACE();
1579 
1580 	return sizeof(struct caam_jr_session);
1581 }
1582 
1583 static int
1584 caam_jr_cipher_init(struct rte_cryptodev *dev __rte_unused,
1585 		    struct rte_crypto_sym_xform *xform,
1586 		    struct caam_jr_session *session)
1587 {
1588 	PMD_INIT_FUNC_TRACE();
1589 	session->cipher_alg = xform->cipher.algo;
1590 	session->iv.length = xform->cipher.iv.length;
1591 	session->iv.offset = xform->cipher.iv.offset;
1592 	session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1593 					       RTE_CACHE_LINE_SIZE);
1594 	if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1595 		CAAM_JR_ERR("No Memory for cipher key\n");
1596 		return -ENOMEM;
1597 	}
1598 	session->cipher_key.length = xform->cipher.key.length;
1599 
1600 	memcpy(session->cipher_key.data, xform->cipher.key.data,
1601 	       xform->cipher.key.length);
1602 	session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1603 			DIR_ENC : DIR_DEC;
1604 
1605 	return 0;
1606 }
1607 
1608 static int
1609 caam_jr_auth_init(struct rte_cryptodev *dev __rte_unused,
1610 		  struct rte_crypto_sym_xform *xform,
1611 		  struct caam_jr_session *session)
1612 {
1613 	PMD_INIT_FUNC_TRACE();
1614 	session->auth_alg = xform->auth.algo;
1615 	session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1616 					     RTE_CACHE_LINE_SIZE);
1617 	if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
1618 		CAAM_JR_ERR("No Memory for auth key\n");
1619 		return -ENOMEM;
1620 	}
1621 	session->auth_key.length = xform->auth.key.length;
1622 	session->digest_length = xform->auth.digest_length;
1623 
1624 	memcpy(session->auth_key.data, xform->auth.key.data,
1625 	       xform->auth.key.length);
1626 	session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1627 			DIR_ENC : DIR_DEC;
1628 
1629 	return 0;
1630 }
1631 
1632 static int
1633 caam_jr_aead_init(struct rte_cryptodev *dev __rte_unused,
1634 		  struct rte_crypto_sym_xform *xform,
1635 		  struct caam_jr_session *session)
1636 {
1637 	PMD_INIT_FUNC_TRACE();
1638 	session->aead_alg = xform->aead.algo;
1639 	session->iv.length = xform->aead.iv.length;
1640 	session->iv.offset = xform->aead.iv.offset;
1641 	session->auth_only_len = xform->aead.aad_length;
1642 	session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
1643 					     RTE_CACHE_LINE_SIZE);
1644 	if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
1645 		CAAM_JR_ERR("No Memory for aead key\n");
1646 		return -ENOMEM;
1647 	}
1648 	session->aead_key.length = xform->aead.key.length;
1649 	session->digest_length = xform->aead.digest_length;
1650 
1651 	memcpy(session->aead_key.data, xform->aead.key.data,
1652 	       xform->aead.key.length);
1653 	session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1654 			DIR_ENC : DIR_DEC;
1655 
1656 	return 0;
1657 }
1658 
1659 static int
1660 caam_jr_set_session_parameters(struct rte_cryptodev *dev,
1661 			       struct rte_crypto_sym_xform *xform, void *sess)
1662 {
1663 	struct sec_job_ring_t *internals = dev->data->dev_private;
1664 	struct caam_jr_session *session = sess;
1665 
1666 	PMD_INIT_FUNC_TRACE();
1667 
1668 	if (unlikely(sess == NULL)) {
1669 		CAAM_JR_ERR("invalid session struct");
1670 		return -EINVAL;
1671 	}
1672 
1673 	/* Default IV length = 0 */
1674 	session->iv.length = 0;
1675 
1676 	/* Cipher Only */
1677 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
1678 		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1679 		caam_jr_cipher_init(dev, xform, session);
1680 
1681 	/* Authentication Only */
1682 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1683 		   xform->next == NULL) {
1684 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
1685 		caam_jr_auth_init(dev, xform, session);
1686 
1687 	/* Cipher then Authenticate */
1688 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1689 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1690 		if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1691 			caam_jr_cipher_init(dev, xform, session);
1692 			caam_jr_auth_init(dev, xform->next, session);
1693 		} else {
1694 			CAAM_JR_ERR("Not supported: Auth then Cipher");
1695 			goto err1;
1696 		}
1697 
1698 	/* Authenticate then Cipher */
1699 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1700 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1701 		if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1702 			caam_jr_auth_init(dev, xform, session);
1703 			caam_jr_cipher_init(dev, xform->next, session);
1704 		} else {
1705 			CAAM_JR_ERR("Not supported: Auth then Cipher");
1706 			goto err1;
1707 		}
1708 
1709 	/* AEAD operation for AES-GCM kind of Algorithms */
1710 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
1711 		   xform->next == NULL) {
1712 		caam_jr_aead_init(dev, xform, session);
1713 
1714 	} else {
1715 		CAAM_JR_ERR("Invalid crypto type");
1716 		return -EINVAL;
1717 	}
1718 	session->ctx_pool = internals->ctx_pool;
1719 
1720 	return 0;
1721 
1722 err1:
1723 	rte_free(session->cipher_key.data);
1724 	rte_free(session->auth_key.data);
1725 	memset(session, 0, sizeof(struct caam_jr_session));
1726 
1727 	return -EINVAL;
1728 }
1729 
1730 static int
1731 caam_jr_sym_session_configure(struct rte_cryptodev *dev,
1732 			      struct rte_crypto_sym_xform *xform,
1733 			      struct rte_cryptodev_sym_session *sess,
1734 			      struct rte_mempool *mempool)
1735 {
1736 	void *sess_private_data;
1737 	int ret;
1738 
1739 	PMD_INIT_FUNC_TRACE();
1740 
1741 	if (rte_mempool_get(mempool, &sess_private_data)) {
1742 		CAAM_JR_ERR("Couldn't get object from session mempool");
1743 		return -ENOMEM;
1744 	}
1745 
1746 	memset(sess_private_data, 0, sizeof(struct caam_jr_session));
1747 	ret = caam_jr_set_session_parameters(dev, xform, sess_private_data);
1748 	if (ret != 0) {
1749 		CAAM_JR_ERR("failed to configure session parameters");
1750 		/* Return session to mempool */
1751 		rte_mempool_put(mempool, sess_private_data);
1752 		return ret;
1753 	}
1754 
1755 	set_sym_session_private_data(sess, dev->driver_id, sess_private_data);
1756 
1757 	return 0;
1758 }
1759 
1760 /* Clear the memory of session so it doesn't leave key material behind */
1761 static void
1762 caam_jr_sym_session_clear(struct rte_cryptodev *dev,
1763 		struct rte_cryptodev_sym_session *sess)
1764 {
1765 	uint8_t index = dev->driver_id;
1766 	void *sess_priv = get_sym_session_private_data(sess, index);
1767 	struct caam_jr_session *s = (struct caam_jr_session *)sess_priv;
1768 
1769 	PMD_INIT_FUNC_TRACE();
1770 
1771 	if (sess_priv) {
1772 		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1773 
1774 		rte_free(s->cipher_key.data);
1775 		rte_free(s->auth_key.data);
1776 		memset(s, 0, sizeof(struct caam_jr_session));
1777 		set_sym_session_private_data(sess, index, NULL);
1778 		rte_mempool_put(sess_mp, sess_priv);
1779 	}
1780 }
1781 
1782 static int
1783 caam_jr_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
1784 			  struct rte_security_session_conf *conf,
1785 			  void *sess)
1786 {
1787 	struct sec_job_ring_t *internals = dev->data->dev_private;
1788 	struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
1789 	struct rte_crypto_auth_xform *auth_xform;
1790 	struct rte_crypto_cipher_xform *cipher_xform;
1791 	struct caam_jr_session *session = (struct caam_jr_session *)sess;
1792 
1793 	PMD_INIT_FUNC_TRACE();
1794 
1795 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1796 		cipher_xform = &conf->crypto_xform->cipher;
1797 		auth_xform = &conf->crypto_xform->next->auth;
1798 	} else {
1799 		auth_xform = &conf->crypto_xform->auth;
1800 		cipher_xform = &conf->crypto_xform->next->cipher;
1801 	}
1802 	session->proto_alg = conf->protocol;
1803 	session->cipher_key.data = rte_zmalloc(NULL,
1804 					       cipher_xform->key.length,
1805 					       RTE_CACHE_LINE_SIZE);
1806 	if (session->cipher_key.data == NULL &&
1807 			cipher_xform->key.length > 0) {
1808 		CAAM_JR_ERR("No Memory for cipher key\n");
1809 		return -ENOMEM;
1810 	}
1811 
1812 	session->cipher_key.length = cipher_xform->key.length;
1813 	session->auth_key.data = rte_zmalloc(NULL,
1814 					auth_xform->key.length,
1815 					RTE_CACHE_LINE_SIZE);
1816 	if (session->auth_key.data == NULL &&
1817 			auth_xform->key.length > 0) {
1818 		CAAM_JR_ERR("No Memory for auth key\n");
1819 		rte_free(session->cipher_key.data);
1820 		return -ENOMEM;
1821 	}
1822 	session->auth_key.length = auth_xform->key.length;
1823 	memcpy(session->cipher_key.data, cipher_xform->key.data,
1824 			cipher_xform->key.length);
1825 	memcpy(session->auth_key.data, auth_xform->key.data,
1826 			auth_xform->key.length);
1827 
1828 	switch (auth_xform->algo) {
1829 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
1830 		session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1831 		break;
1832 	case RTE_CRYPTO_AUTH_MD5_HMAC:
1833 		session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1834 		break;
1835 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
1836 		session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1837 		break;
1838 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
1839 		session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1840 		break;
1841 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
1842 		session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1843 		break;
1844 	case RTE_CRYPTO_AUTH_AES_CMAC:
1845 		session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
1846 		break;
1847 	case RTE_CRYPTO_AUTH_NULL:
1848 		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1849 		break;
1850 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
1851 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1852 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1853 	case RTE_CRYPTO_AUTH_SHA1:
1854 	case RTE_CRYPTO_AUTH_SHA256:
1855 	case RTE_CRYPTO_AUTH_SHA512:
1856 	case RTE_CRYPTO_AUTH_SHA224:
1857 	case RTE_CRYPTO_AUTH_SHA384:
1858 	case RTE_CRYPTO_AUTH_MD5:
1859 	case RTE_CRYPTO_AUTH_AES_GMAC:
1860 	case RTE_CRYPTO_AUTH_KASUMI_F9:
1861 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1862 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
1863 		CAAM_JR_ERR("Crypto: Unsupported auth alg %u\n",
1864 			auth_xform->algo);
1865 		goto out;
1866 	default:
1867 		CAAM_JR_ERR("Crypto: Undefined Auth specified %u\n",
1868 			auth_xform->algo);
1869 		goto out;
1870 	}
1871 
1872 	switch (cipher_xform->algo) {
1873 	case RTE_CRYPTO_CIPHER_AES_CBC:
1874 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1875 		break;
1876 	case RTE_CRYPTO_CIPHER_3DES_CBC:
1877 		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1878 		break;
1879 	case RTE_CRYPTO_CIPHER_AES_CTR:
1880 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
1881 		break;
1882 	case RTE_CRYPTO_CIPHER_NULL:
1883 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1884 	case RTE_CRYPTO_CIPHER_3DES_ECB:
1885 	case RTE_CRYPTO_CIPHER_AES_ECB:
1886 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
1887 		CAAM_JR_ERR("Crypto: Unsupported Cipher alg %u\n",
1888 			cipher_xform->algo);
1889 		goto out;
1890 	default:
1891 		CAAM_JR_ERR("Crypto: Undefined Cipher specified %u\n",
1892 			cipher_xform->algo);
1893 		goto out;
1894 	}
1895 
1896 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1897 		memset(&session->encap_pdb, 0, sizeof(struct ipsec_encap_pdb) +
1898 				sizeof(session->ip4_hdr));
1899 		session->ip4_hdr.ip_v = IPVERSION;
1900 		session->ip4_hdr.ip_hl = 5;
1901 		session->ip4_hdr.ip_len = rte_cpu_to_be_16(
1902 						sizeof(session->ip4_hdr));
1903 		session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
1904 		session->ip4_hdr.ip_id = 0;
1905 		session->ip4_hdr.ip_off = 0;
1906 		session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
1907 		session->ip4_hdr.ip_p = (ipsec_xform->proto ==
1908 				RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? IPPROTO_ESP
1909 				: IPPROTO_AH;
1910 		session->ip4_hdr.ip_sum = 0;
1911 		session->ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
1912 		session->ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
1913 		session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
1914 						(void *)&session->ip4_hdr,
1915 						sizeof(struct ip));
1916 
1917 		session->encap_pdb.options =
1918 			(IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
1919 			PDBOPTS_ESP_OIHI_PDB_INL |
1920 			PDBOPTS_ESP_IVSRC |
1921 			PDBHMO_ESP_ENCAP_DTTL;
1922 		session->encap_pdb.spi = ipsec_xform->spi;
1923 		session->encap_pdb.ip_hdr_len = sizeof(struct ip);
1924 
1925 		session->dir = DIR_ENC;
1926 	} else if (ipsec_xform->direction ==
1927 			RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
1928 		memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
1929 		session->decap_pdb.options = sizeof(struct ip) << 16;
1930 		session->dir = DIR_DEC;
1931 	} else
1932 		goto out;
1933 	session->ctx_pool = internals->ctx_pool;
1934 
1935 	return 0;
1936 out:
1937 	rte_free(session->auth_key.data);
1938 	rte_free(session->cipher_key.data);
1939 	memset(session, 0, sizeof(struct caam_jr_session));
1940 	return -1;
1941 }
1942 
1943 static int
1944 caam_jr_security_session_create(void *dev,
1945 				struct rte_security_session_conf *conf,
1946 				struct rte_security_session *sess,
1947 				struct rte_mempool *mempool)
1948 {
1949 	void *sess_private_data;
1950 	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
1951 	int ret;
1952 
1953 	PMD_INIT_FUNC_TRACE();
1954 	if (rte_mempool_get(mempool, &sess_private_data)) {
1955 		CAAM_JR_ERR("Couldn't get object from session mempool");
1956 		return -ENOMEM;
1957 	}
1958 
1959 	switch (conf->protocol) {
1960 	case RTE_SECURITY_PROTOCOL_IPSEC:
1961 		ret = caam_jr_set_ipsec_session(cdev, conf,
1962 				sess_private_data);
1963 		break;
1964 	case RTE_SECURITY_PROTOCOL_MACSEC:
1965 		return -ENOTSUP;
1966 	default:
1967 		return -EINVAL;
1968 	}
1969 	if (ret != 0) {
1970 		CAAM_JR_ERR("failed to configure session parameters");
1971 		/* Return session to mempool */
1972 		rte_mempool_put(mempool, sess_private_data);
1973 		return ret;
1974 	}
1975 
1976 	set_sec_session_private_data(sess, sess_private_data);
1977 
1978 	return ret;
1979 }
1980 
1981 /* Clear the memory of session so it doesn't leave key material behind */
1982 static int
1983 caam_jr_security_session_destroy(void *dev __rte_unused,
1984 				 struct rte_security_session *sess)
1985 {
1986 	PMD_INIT_FUNC_TRACE();
1987 	void *sess_priv = get_sec_session_private_data(sess);
1988 
1989 	struct caam_jr_session *s = (struct caam_jr_session *)sess_priv;
1990 
1991 	if (sess_priv) {
1992 		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1993 
1994 		rte_free(s->cipher_key.data);
1995 		rte_free(s->auth_key.data);
1996 		memset(sess, 0, sizeof(struct caam_jr_session));
1997 		set_sec_session_private_data(sess, NULL);
1998 		rte_mempool_put(sess_mp, sess_priv);
1999 	}
2000 	return 0;
2001 }
2002 
2003 
2004 static int
2005 caam_jr_dev_configure(struct rte_cryptodev *dev,
2006 		       struct rte_cryptodev_config *config __rte_unused)
2007 {
2008 	char str[20];
2009 	struct sec_job_ring_t *internals;
2010 
2011 	PMD_INIT_FUNC_TRACE();
2012 
2013 	internals = dev->data->dev_private;
2014 	snprintf(str, sizeof(str), "ctx_pool_%d", dev->data->dev_id);
2015 	if (!internals->ctx_pool) {
2016 		internals->ctx_pool = rte_mempool_create((const char *)str,
2017 						CTX_POOL_NUM_BUFS,
2018 						sizeof(struct caam_jr_op_ctx),
2019 						CTX_POOL_CACHE_SIZE, 0,
2020 						NULL, NULL, NULL, NULL,
2021 						SOCKET_ID_ANY, 0);
2022 		if (!internals->ctx_pool) {
2023 			CAAM_JR_ERR("%s create failed\n", str);
2024 			return -ENOMEM;
2025 		}
2026 	} else
2027 		CAAM_JR_INFO("mempool already created for dev_id : %d",
2028 				dev->data->dev_id);
2029 
2030 	return 0;
2031 }
2032 
2033 static int
2034 caam_jr_dev_start(struct rte_cryptodev *dev __rte_unused)
2035 {
2036 	PMD_INIT_FUNC_TRACE();
2037 	return 0;
2038 }
2039 
2040 static void
2041 caam_jr_dev_stop(struct rte_cryptodev *dev __rte_unused)
2042 {
2043 	PMD_INIT_FUNC_TRACE();
2044 }
2045 
2046 static int
2047 caam_jr_dev_close(struct rte_cryptodev *dev)
2048 {
2049 	struct sec_job_ring_t *internals;
2050 
2051 	PMD_INIT_FUNC_TRACE();
2052 
2053 	if (dev == NULL)
2054 		return -ENOMEM;
2055 
2056 	internals = dev->data->dev_private;
2057 	rte_mempool_free(internals->ctx_pool);
2058 	internals->ctx_pool = NULL;
2059 
2060 	return 0;
2061 }
2062 
2063 static void
2064 caam_jr_dev_infos_get(struct rte_cryptodev *dev,
2065 		       struct rte_cryptodev_info *info)
2066 {
2067 	struct sec_job_ring_t *internals = dev->data->dev_private;
2068 
2069 	PMD_INIT_FUNC_TRACE();
2070 	if (info != NULL) {
2071 		info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2072 		info->feature_flags = dev->feature_flags;
2073 		info->capabilities = caam_jr_get_cryptodev_capabilities();
2074 		info->sym.max_nb_sessions = internals->max_nb_sessions;
2075 		info->driver_id = cryptodev_driver_id;
2076 	}
2077 }
2078 
2079 static struct rte_cryptodev_ops caam_jr_ops = {
2080 	.dev_configure	      = caam_jr_dev_configure,
2081 	.dev_start	      = caam_jr_dev_start,
2082 	.dev_stop	      = caam_jr_dev_stop,
2083 	.dev_close	      = caam_jr_dev_close,
2084 	.dev_infos_get        = caam_jr_dev_infos_get,
2085 	.stats_get	      = caam_jr_stats_get,
2086 	.stats_reset	      = caam_jr_stats_reset,
2087 	.queue_pair_setup     = caam_jr_queue_pair_setup,
2088 	.queue_pair_release   = caam_jr_queue_pair_release,
2089 	.queue_pair_count     = caam_jr_queue_pair_count,
2090 	.sym_session_get_size = caam_jr_sym_session_get_size,
2091 	.sym_session_configure = caam_jr_sym_session_configure,
2092 	.sym_session_clear    = caam_jr_sym_session_clear
2093 };
2094 
2095 static struct rte_security_ops caam_jr_security_ops = {
2096 	.session_create = caam_jr_security_session_create,
2097 	.session_update = NULL,
2098 	.session_stats_get = NULL,
2099 	.session_destroy = caam_jr_security_session_destroy,
2100 	.set_pkt_metadata = NULL,
2101 	.capabilities_get = caam_jr_get_security_capabilities
2102 };
2103 
2104 /* @brief Flush job rings of any processed descs.
2105  * The processed descs are silently dropped,
2106  * WITHOUT being notified to UA.
2107  */
2108 static void
2109 close_job_ring(struct sec_job_ring_t *job_ring)
2110 {
2111 	PMD_INIT_FUNC_TRACE();
2112 	if (job_ring->irq_fd) {
2113 		/* Producer index is frozen. If consumer index is not equal
2114 		 * with producer index, then we have descs to flush.
2115 		 */
2116 		while (job_ring->pidx != job_ring->cidx)
2117 			hw_flush_job_ring(job_ring, false, NULL);
2118 
2119 		/* free the uio job ring */
2120 		free_job_ring(job_ring->irq_fd);
2121 		job_ring->irq_fd = 0;
2122 		caam_jr_dma_free(job_ring->input_ring);
2123 		caam_jr_dma_free(job_ring->output_ring);
2124 		g_job_rings_no--;
2125 	}
2126 }
2127 
2128 /** @brief Release the software and hardware resources tied to a job ring.
2129  * @param [in] job_ring The job ring
2130  *
2131  * @retval  0 for success
2132  * @retval  -1 for error
2133  */
2134 static int
2135 shutdown_job_ring(struct sec_job_ring_t *job_ring)
2136 {
2137 	int ret = 0;
2138 
2139 	PMD_INIT_FUNC_TRACE();
2140 	ASSERT(job_ring != NULL);
2141 	ret = hw_shutdown_job_ring(job_ring);
2142 	SEC_ASSERT(ret == 0, ret,
2143 		"Failed to shutdown hardware job ring %p",
2144 		job_ring);
2145 
2146 	if (job_ring->coalescing_en)
2147 		hw_job_ring_disable_coalescing(job_ring);
2148 
2149 	if (job_ring->jr_mode != SEC_NOTIFICATION_TYPE_POLL) {
2150 		ret = caam_jr_disable_irqs(job_ring->irq_fd);
2151 		SEC_ASSERT(ret == 0, ret,
2152 		"Failed to disable irqs for job ring %p",
2153 		job_ring);
2154 	}
2155 
2156 	return ret;
2157 }
2158 
2159 /*
2160  * @brief Release the resources used by the SEC user space driver.
2161  *
2162  * Reset and release SEC's job rings indicated by the User Application at
2163  * init_job_ring() and free any memory allocated internally.
2164  * Call once during application tear down.
2165  *
2166  * @note In case there are any descriptors in-flight (descriptors received by
2167  * SEC driver for processing and for which no response was yet provided to UA),
2168  * the descriptors are discarded without any notifications to User Application.
2169  *
2170  * @retval ::0			is returned for a successful execution
2171  * @retval ::-1		is returned if SEC driver release is in progress
2172  */
2173 static int
2174 caam_jr_dev_uninit(struct rte_cryptodev *dev)
2175 {
2176 	struct sec_job_ring_t *internals;
2177 
2178 	PMD_INIT_FUNC_TRACE();
2179 	if (dev == NULL)
2180 		return -ENODEV;
2181 
2182 	internals = dev->data->dev_private;
2183 	rte_free(dev->security_ctx);
2184 
2185 	/* If any descriptors in flight , poll and wait
2186 	 * until all descriptors are received and silently discarded.
2187 	 */
2188 	if (internals) {
2189 		shutdown_job_ring(internals);
2190 		close_job_ring(internals);
2191 		rte_mempool_free(internals->ctx_pool);
2192 	}
2193 
2194 	CAAM_JR_INFO("Closing crypto device %s", dev->data->name);
2195 
2196 	/* last caam jr instance) */
2197 	if (g_job_rings_no == 0)
2198 		g_driver_state = SEC_DRIVER_STATE_IDLE;
2199 
2200 	return SEC_SUCCESS;
2201 }
2202 
2203 /* @brief Initialize the software and hardware resources tied to a job ring.
2204  * @param [in] jr_mode;		Model to be used by SEC Driver to receive
2205  *				notifications from SEC.  Can be either
2206  *				of the three: #SEC_NOTIFICATION_TYPE_NAPI
2207  *				#SEC_NOTIFICATION_TYPE_IRQ or
2208  *				#SEC_NOTIFICATION_TYPE_POLL
2209  * @param [in] NAPI_mode	The NAPI work mode to configure a job ring at
2210  *				startup. Used only when #SEC_NOTIFICATION_TYPE
2211  *				is set to #SEC_NOTIFICATION_TYPE_NAPI.
2212  * @param [in] irq_coalescing_timer This value determines the maximum
2213  *					amount of time after processing a
2214  *					descriptor before raising an interrupt.
2215  * @param [in] irq_coalescing_count This value determines how many
2216  *					descriptors are completed before
2217  *					raising an interrupt.
2218  * @param [in] reg_base_addr,	The job ring base address register
2219  * @param [in] irq_id		The job ring interrupt identification number.
2220  * @retval  job_ring_handle for successful job ring configuration
2221  * @retval  NULL on error
2222  *
2223  */
2224 static void *
2225 init_job_ring(void *reg_base_addr, uint32_t irq_id)
2226 {
2227 	struct sec_job_ring_t *job_ring = NULL;
2228 	int i, ret = 0;
2229 	int jr_mode = SEC_NOTIFICATION_TYPE_POLL;
2230 	int napi_mode = 0;
2231 	int irq_coalescing_timer = 0;
2232 	int irq_coalescing_count = 0;
2233 
2234 	for (i = 0; i < MAX_SEC_JOB_RINGS; i++) {
2235 		if (g_job_rings[i].irq_fd == 0) {
2236 			job_ring = &g_job_rings[i];
2237 			g_job_rings_no++;
2238 			break;
2239 		}
2240 	}
2241 	if (job_ring == NULL) {
2242 		CAAM_JR_ERR("No free job ring\n");
2243 		return NULL;
2244 	}
2245 
2246 	job_ring->register_base_addr = reg_base_addr;
2247 	job_ring->jr_mode = jr_mode;
2248 	job_ring->napi_mode = 0;
2249 	job_ring->irq_fd = irq_id;
2250 
2251 	/* Allocate mem for input and output ring */
2252 
2253 	/* Allocate memory for input ring */
2254 	job_ring->input_ring = caam_jr_dma_mem_alloc(L1_CACHE_BYTES,
2255 				SEC_DMA_MEM_INPUT_RING_SIZE);
2256 	memset(job_ring->input_ring, 0, SEC_DMA_MEM_INPUT_RING_SIZE);
2257 
2258 	/* Allocate memory for output ring */
2259 	job_ring->output_ring = caam_jr_dma_mem_alloc(L1_CACHE_BYTES,
2260 				SEC_DMA_MEM_OUTPUT_RING_SIZE);
2261 	memset(job_ring->output_ring, 0, SEC_DMA_MEM_OUTPUT_RING_SIZE);
2262 
2263 	/* Reset job ring in SEC hw and configure job ring registers */
2264 	ret = hw_reset_job_ring(job_ring);
2265 	if (ret != 0) {
2266 		CAAM_JR_ERR("Failed to reset hardware job ring");
2267 		goto cleanup;
2268 	}
2269 
2270 	if (jr_mode == SEC_NOTIFICATION_TYPE_NAPI) {
2271 	/* When SEC US driver works in NAPI mode, the UA can select
2272 	 * if the driver starts with IRQs on or off.
2273 	 */
2274 		if (napi_mode == SEC_STARTUP_INTERRUPT_MODE) {
2275 			CAAM_JR_INFO("Enabling DONE IRQ generationon job ring - %p",
2276 				job_ring);
2277 			ret = caam_jr_enable_irqs(job_ring->irq_fd);
2278 			if (ret != 0) {
2279 				CAAM_JR_ERR("Failed to enable irqs for job ring");
2280 				goto cleanup;
2281 			}
2282 		}
2283 	} else if (jr_mode == SEC_NOTIFICATION_TYPE_IRQ) {
2284 	/* When SEC US driver works in pure interrupt mode,
2285 	 * IRQ's are always enabled.
2286 	 */
2287 		CAAM_JR_INFO("Enabling DONE IRQ generation on job ring - %p",
2288 			 job_ring);
2289 		ret = caam_jr_enable_irqs(job_ring->irq_fd);
2290 		if (ret != 0) {
2291 			CAAM_JR_ERR("Failed to enable irqs for job ring");
2292 			goto cleanup;
2293 		}
2294 	}
2295 	if (irq_coalescing_timer || irq_coalescing_count) {
2296 		hw_job_ring_set_coalescing_param(job_ring,
2297 			 irq_coalescing_timer,
2298 			 irq_coalescing_count);
2299 
2300 		hw_job_ring_enable_coalescing(job_ring);
2301 		job_ring->coalescing_en = 1;
2302 	}
2303 
2304 	job_ring->jr_state = SEC_JOB_RING_STATE_STARTED;
2305 	job_ring->max_nb_queue_pairs = RTE_CAAM_MAX_NB_SEC_QPS;
2306 	job_ring->max_nb_sessions = RTE_CAAM_JR_PMD_MAX_NB_SESSIONS;
2307 
2308 	return job_ring;
2309 cleanup:
2310 	caam_jr_dma_free(job_ring->output_ring);
2311 	caam_jr_dma_free(job_ring->input_ring);
2312 	return NULL;
2313 }
2314 
2315 
2316 static int
2317 caam_jr_dev_init(const char *name,
2318 		 struct rte_vdev_device *vdev,
2319 		 struct rte_cryptodev_pmd_init_params *init_params)
2320 {
2321 	struct rte_cryptodev *dev;
2322 	struct rte_security_ctx *security_instance;
2323 	struct uio_job_ring *job_ring;
2324 	char str[RTE_CRYPTODEV_NAME_MAX_LEN];
2325 
2326 	PMD_INIT_FUNC_TRACE();
2327 
2328 	/* Validate driver state */
2329 	if (g_driver_state == SEC_DRIVER_STATE_IDLE) {
2330 		g_job_rings_max = sec_configure();
2331 		if (!g_job_rings_max) {
2332 			CAAM_JR_ERR("No job ring detected on UIO !!!!");
2333 			return -1;
2334 		}
2335 		/* Update driver state */
2336 		g_driver_state = SEC_DRIVER_STATE_STARTED;
2337 	}
2338 
2339 	if (g_job_rings_no >= g_job_rings_max) {
2340 		CAAM_JR_ERR("No more job rings available max=%d!!!!",
2341 				g_job_rings_max);
2342 		return -1;
2343 	}
2344 
2345 	job_ring = config_job_ring();
2346 	if (job_ring == NULL) {
2347 		CAAM_JR_ERR("failed to create job ring");
2348 		goto init_error;
2349 	}
2350 
2351 	snprintf(str, sizeof(str), "caam_jr%d", job_ring->jr_id);
2352 
2353 	dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
2354 	if (dev == NULL) {
2355 		CAAM_JR_ERR("failed to create cryptodev vdev");
2356 		goto cleanup;
2357 	}
2358 	/*TODO free it during teardown*/
2359 	dev->data->dev_private = init_job_ring(job_ring->register_base_addr,
2360 						job_ring->uio_fd);
2361 
2362 	if (!dev->data->dev_private) {
2363 		CAAM_JR_ERR("Ring memory allocation failed\n");
2364 		goto cleanup2;
2365 	}
2366 
2367 	dev->driver_id = cryptodev_driver_id;
2368 	dev->dev_ops = &caam_jr_ops;
2369 
2370 	/* register rx/tx burst functions for data path */
2371 	dev->dequeue_burst = caam_jr_dequeue_burst;
2372 	dev->enqueue_burst = caam_jr_enqueue_burst;
2373 	dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2374 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
2375 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2376 			RTE_CRYPTODEV_FF_SECURITY |
2377 			RTE_CRYPTODEV_FF_IN_PLACE_SGL |
2378 			RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
2379 			RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
2380 			RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
2381 			RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
2382 
2383 	/* For secondary processes, we don't initialise any further as primary
2384 	 * has already done this work. Only check we don't need a different
2385 	 * RX function
2386 	 */
2387 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2388 		CAAM_JR_WARN("Device already init by primary process");
2389 		return 0;
2390 	}
2391 
2392 	/*TODO free it during teardown*/
2393 	security_instance = rte_malloc("caam_jr",
2394 				sizeof(struct rte_security_ctx), 0);
2395 	if (security_instance == NULL) {
2396 		CAAM_JR_ERR("memory allocation failed\n");
2397 		//todo error handling.
2398 		goto cleanup2;
2399 	}
2400 
2401 	security_instance->device = (void *)dev;
2402 	security_instance->ops = &caam_jr_security_ops;
2403 	security_instance->sess_cnt = 0;
2404 	dev->security_ctx = security_instance;
2405 
2406 	RTE_LOG(INFO, PMD, "%s cryptodev init\n", dev->data->name);
2407 
2408 	return 0;
2409 
2410 cleanup2:
2411 	caam_jr_dev_uninit(dev);
2412 	rte_cryptodev_pmd_release_device(dev);
2413 cleanup:
2414 	free_job_ring(job_ring->uio_fd);
2415 init_error:
2416 	CAAM_JR_ERR("driver %s: cryptodev_caam_jr_create failed",
2417 			init_params->name);
2418 
2419 	return -ENXIO;
2420 }
2421 
2422 /** Initialise CAAM JR crypto device */
2423 static int
2424 cryptodev_caam_jr_probe(struct rte_vdev_device *vdev)
2425 {
2426 	struct rte_cryptodev_pmd_init_params init_params = {
2427 		"",
2428 		sizeof(struct sec_job_ring_t),
2429 		rte_socket_id(),
2430 		RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
2431 	};
2432 	const char *name;
2433 	const char *input_args;
2434 
2435 	name = rte_vdev_device_name(vdev);
2436 	if (name == NULL)
2437 		return -EINVAL;
2438 
2439 	input_args = rte_vdev_device_args(vdev);
2440 	rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
2441 
2442 	/* if sec device version is not configured */
2443 	if (!rta_get_sec_era()) {
2444 		const struct device_node *caam_node;
2445 
2446 		for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
2447 			const uint32_t *prop = of_get_property(caam_node,
2448 					"fsl,sec-era",
2449 					NULL);
2450 			if (prop) {
2451 				rta_set_sec_era(
2452 					INTL_SEC_ERA(cpu_to_caam32(*prop)));
2453 				break;
2454 			}
2455 		}
2456 	}
2457 #ifdef RTE_LIBRTE_PMD_CAAM_JR_BE
2458 	if (rta_get_sec_era() > RTA_SEC_ERA_8) {
2459 		RTE_LOG(ERR, PMD,
2460 		"CAAM is compiled in BE mode for device with sec era > 8???\n");
2461 		return -EINVAL;
2462 	}
2463 #endif
2464 
2465 	return caam_jr_dev_init(name, vdev, &init_params);
2466 }
2467 
2468 /** Uninitialise CAAM JR crypto device */
2469 static int
2470 cryptodev_caam_jr_remove(struct rte_vdev_device *vdev)
2471 {
2472 	struct rte_cryptodev *cryptodev;
2473 	const char *name;
2474 
2475 	name = rte_vdev_device_name(vdev);
2476 	if (name == NULL)
2477 		return -EINVAL;
2478 
2479 	cryptodev = rte_cryptodev_pmd_get_named_dev(name);
2480 	if (cryptodev == NULL)
2481 		return -ENODEV;
2482 
2483 	caam_jr_dev_uninit(cryptodev);
2484 
2485 	return rte_cryptodev_pmd_destroy(cryptodev);
2486 }
2487 
2488 static struct rte_vdev_driver cryptodev_caam_jr_drv = {
2489 	.probe = cryptodev_caam_jr_probe,
2490 	.remove = cryptodev_caam_jr_remove
2491 };
2492 
2493 static struct cryptodev_driver caam_jr_crypto_drv;
2494 
2495 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_CAAM_JR_PMD, cryptodev_caam_jr_drv);
2496 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_CAAM_JR_PMD,
2497 	"max_nb_queue_pairs=<int>"
2498 	"socket_id=<int>");
2499 RTE_PMD_REGISTER_CRYPTO_DRIVER(caam_jr_crypto_drv, cryptodev_caam_jr_drv.driver,
2500 		cryptodev_driver_id);
2501 
2502 RTE_INIT(caam_jr_init_log)
2503 {
2504 	caam_jr_logtype = rte_log_register("pmd.crypto.caam");
2505 	if (caam_jr_logtype >= 0)
2506 		rte_log_set_level(caam_jr_logtype, RTE_LOG_NOTICE);
2507 }
2508