xref: /dpdk/drivers/crypto/caam_jr/caam_jr.c (revision e77506397fc8005c5129e22e9e2d15d5876790fd)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2017-2019 NXP
3  */
4 
5 #include <fcntl.h>
6 #include <unistd.h>
7 #include <sched.h>
8 #include <net/if.h>
9 
10 #include <rte_byteorder.h>
11 #include <rte_common.h>
12 #include <cryptodev_pmd.h>
13 #include <rte_crypto.h>
14 #include <rte_cryptodev.h>
15 #include <bus_vdev_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_security_driver.h>
18 #include <rte_hexdump.h>
19 
20 #include <caam_jr_capabilities.h>
21 #include <caam_jr_config.h>
22 #include <caam_jr_hw_specific.h>
23 #include <caam_jr_pvt.h>
24 #include <caam_jr_desc.h>
25 #include <caam_jr_log.h>
26 
27 /* RTA header files */
28 #include <desc/common.h>
29 #include <desc/algo.h>
30 #include <dpaa_of.h>
31 #ifdef RTE_LIBRTE_PMD_CAAM_JR_DEBUG
32 #define CAAM_JR_DBG    1
33 #else
34 #define CAAM_JR_DBG	0
35 #endif
36 #define CRYPTODEV_NAME_CAAM_JR_PMD	crypto_caam_jr
37 static uint8_t cryptodev_driver_id;
38 
39 /* Lists the states possible for the SEC user space driver. */
40 enum sec_driver_state_e {
41 	SEC_DRIVER_STATE_IDLE,		/* Driver not initialized */
42 	SEC_DRIVER_STATE_STARTED,	/* Driver initialized and can be used*/
43 	SEC_DRIVER_STATE_RELEASE,	/* Driver release is in progress */
44 };
45 
46 /* Job rings used for communication with SEC HW */
47 static struct sec_job_ring_t g_job_rings[MAX_SEC_JOB_RINGS];
48 
49 /* The current state of SEC user space driver */
50 static enum sec_driver_state_e g_driver_state = SEC_DRIVER_STATE_IDLE;
51 
52 /* The number of job rings used by SEC user space driver */
53 static int g_job_rings_no;
54 static int g_job_rings_max;
55 
56 struct __rte_packed_begin sec_outring_entry {
57 	phys_addr_t desc;	/* Pointer to completed descriptor */
58 	uint32_t status;	/* Status for completed descriptor */
59 } __rte_packed_end;
60 
61 /* virtual address conversin when mempool support is available for ctx */
62 static inline phys_addr_t
63 caam_jr_vtop_ctx(struct caam_jr_op_ctx *ctx, void *vaddr)
64 {
65 	return (size_t)vaddr - ctx->vtop_offset;
66 }
67 
68 static inline void
69 caam_jr_op_ending(struct caam_jr_op_ctx *ctx)
70 {
71 	/* report op status to sym->op and then free the ctx memory  */
72 	rte_mempool_put(ctx->ctx_pool, (void *)ctx);
73 }
74 
75 static inline struct caam_jr_op_ctx *
76 caam_jr_alloc_ctx(struct caam_jr_session *ses)
77 {
78 	struct caam_jr_op_ctx *ctx;
79 	int ret;
80 
81 	ret = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
82 	if (!ctx || ret) {
83 		CAAM_JR_DP_WARN("Alloc sec descriptor failed!");
84 		return NULL;
85 	}
86 	/*
87 	 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
88 	 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
89 	 * to clear all the SG entries. caam_jr_alloc_ctx() is called for
90 	 * each packet, memset is costlier than dcbz_64().
91 	 */
92 	dcbz_64(&ctx->sg[SG_CACHELINE_0]);
93 	dcbz_64(&ctx->sg[SG_CACHELINE_1]);
94 	dcbz_64(&ctx->sg[SG_CACHELINE_2]);
95 	dcbz_64(&ctx->sg[SG_CACHELINE_3]);
96 
97 	ctx->ctx_pool = ses->ctx_pool;
98 	ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
99 
100 	return ctx;
101 }
102 
103 static
104 void caam_jr_stats_get(struct rte_cryptodev *dev,
105 			struct rte_cryptodev_stats *stats)
106 {
107 	struct caam_jr_qp **qp = (struct caam_jr_qp **)
108 					dev->data->queue_pairs;
109 	int i;
110 
111 	PMD_INIT_FUNC_TRACE();
112 	if (stats == NULL) {
113 		CAAM_JR_ERR("Invalid stats ptr NULL");
114 		return;
115 	}
116 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
117 		if (qp[i] == NULL) {
118 			CAAM_JR_WARN("Uninitialised queue pair");
119 			continue;
120 		}
121 
122 		stats->enqueued_count += qp[i]->tx_pkts;
123 		stats->dequeued_count += qp[i]->rx_pkts;
124 		stats->enqueue_err_count += qp[i]->tx_errs;
125 		stats->dequeue_err_count += qp[i]->rx_errs;
126 		CAAM_JR_INFO("extra stats:");
127 		CAAM_JR_INFO("\tRX Poll ERR = %" PRIu64,
128 			     qp[i]->rx_poll_err);
129 		CAAM_JR_INFO("\tTX Ring Full = %" PRIu64,
130 			     qp[i]->tx_ring_full);
131 	}
132 }
133 
134 static
135 void caam_jr_stats_reset(struct rte_cryptodev *dev)
136 {
137 	int i;
138 	struct caam_jr_qp **qp = (struct caam_jr_qp **)
139 				   (dev->data->queue_pairs);
140 
141 	PMD_INIT_FUNC_TRACE();
142 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
143 		if (qp[i] == NULL) {
144 			CAAM_JR_WARN("Uninitialised queue pair");
145 			continue;
146 		}
147 		qp[i]->rx_pkts = 0;
148 		qp[i]->rx_errs = 0;
149 		qp[i]->rx_poll_err = 0;
150 		qp[i]->tx_pkts = 0;
151 		qp[i]->tx_errs = 0;
152 		qp[i]->tx_ring_full = 0;
153 	}
154 }
155 
156 static inline int
157 is_cipher_only(struct caam_jr_session *ses)
158 {
159 	return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
160 		(ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
161 }
162 
163 static inline int
164 is_auth_only(struct caam_jr_session *ses)
165 {
166 	return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
167 		(ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
168 }
169 
170 static inline int
171 is_aead(struct caam_jr_session *ses)
172 {
173 	return ((ses->cipher_alg == 0) &&
174 		(ses->auth_alg == 0) &&
175 		(ses->aead_alg != 0));
176 }
177 
178 static inline int
179 is_auth_cipher(struct caam_jr_session *ses)
180 {
181 	return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
182 		(ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
183 		(ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
184 }
185 
186 static inline int
187 is_proto_ipsec(struct caam_jr_session *ses)
188 {
189 	return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
190 }
191 
192 static inline int
193 is_encode(struct caam_jr_session *ses)
194 {
195 	return ses->dir == DIR_ENC;
196 }
197 
198 static inline int
199 is_decode(struct caam_jr_session *ses)
200 {
201 	return ses->dir == DIR_DEC;
202 }
203 
204 static inline void
205 caam_auth_alg(struct caam_jr_session *ses, struct alginfo *alginfo_a)
206 {
207 	switch (ses->auth_alg) {
208 	case RTE_CRYPTO_AUTH_NULL:
209 		ses->digest_length = 0;
210 		break;
211 	case RTE_CRYPTO_AUTH_MD5_HMAC:
212 		alginfo_a->algtype =
213 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
214 			OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
215 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
216 		break;
217 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
218 		alginfo_a->algtype =
219 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
220 			OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
221 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
222 		break;
223 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
224 		alginfo_a->algtype =
225 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
226 			OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
227 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
228 		break;
229 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
230 		alginfo_a->algtype =
231 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
232 			OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
233 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
234 		break;
235 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
236 		alginfo_a->algtype =
237 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
238 			OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
239 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
240 		break;
241 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
242 		alginfo_a->algtype =
243 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
244 			OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
245 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
246 		break;
247 	default:
248 		CAAM_JR_DEBUG("unsupported auth alg %u", ses->auth_alg);
249 	}
250 }
251 
252 static inline void
253 caam_cipher_alg(struct caam_jr_session *ses, struct alginfo *alginfo_c)
254 {
255 	switch (ses->cipher_alg) {
256 	case RTE_CRYPTO_CIPHER_NULL:
257 		break;
258 	case RTE_CRYPTO_CIPHER_AES_CBC:
259 		alginfo_c->algtype =
260 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
261 			OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
262 		alginfo_c->algmode = OP_ALG_AAI_CBC;
263 		break;
264 	case RTE_CRYPTO_CIPHER_3DES_CBC:
265 		alginfo_c->algtype =
266 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
267 			OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
268 		alginfo_c->algmode = OP_ALG_AAI_CBC;
269 		break;
270 	case RTE_CRYPTO_CIPHER_AES_CTR:
271 		alginfo_c->algtype =
272 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
273 			OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
274 		alginfo_c->algmode = OP_ALG_AAI_CTR;
275 		break;
276 	default:
277 		CAAM_JR_DEBUG("unsupported cipher alg %d", ses->cipher_alg);
278 	}
279 }
280 
281 static inline void
282 caam_aead_alg(struct caam_jr_session *ses, struct alginfo *alginfo)
283 {
284 	switch (ses->aead_alg) {
285 	case RTE_CRYPTO_AEAD_AES_GCM:
286 		alginfo->algtype = OP_ALG_ALGSEL_AES;
287 		alginfo->algmode = OP_ALG_AAI_GCM;
288 		break;
289 	default:
290 		CAAM_JR_DEBUG("unsupported AEAD alg %d", ses->aead_alg);
291 	}
292 }
293 
294 /* prepare command block of the session */
295 static int
296 caam_jr_prep_cdb(struct caam_jr_session *ses)
297 {
298 	struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
299 	int32_t shared_desc_len = 0;
300 	struct sec_cdb *cdb;
301 	int err;
302 #if CAAM_BYTE_ORDER == CORE_BYTE_ORDER
303 	int swap = false;
304 #else
305 	int swap = true;
306 #endif
307 
308 	if (ses->cdb)
309 		caam_jr_dma_free(ses->cdb);
310 
311 	cdb = caam_jr_dma_mem_alloc(L1_CACHE_BYTES, sizeof(struct sec_cdb));
312 	if (!cdb) {
313 		CAAM_JR_ERR("failed to allocate memory for cdb");
314 		return -1;
315 	}
316 
317 	ses->cdb = cdb;
318 
319 	memset(cdb, 0, sizeof(struct sec_cdb));
320 
321 	if (is_cipher_only(ses)) {
322 		caam_cipher_alg(ses, &alginfo_c);
323 		if (alginfo_c.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
324 			CAAM_JR_ERR("not supported cipher alg");
325 			rte_free(cdb);
326 			return -ENOTSUP;
327 		}
328 
329 		alginfo_c.key = (size_t)ses->cipher_key.data;
330 		alginfo_c.keylen = ses->cipher_key.length;
331 		alginfo_c.key_enc_flags = 0;
332 		alginfo_c.key_type = RTA_DATA_IMM;
333 
334 		shared_desc_len = cnstr_shdsc_blkcipher(
335 						cdb->sh_desc, true,
336 						swap, SHR_NEVER, &alginfo_c,
337 						ses->iv.length,
338 						ses->dir);
339 	} else if (is_auth_only(ses)) {
340 		caam_auth_alg(ses, &alginfo_a);
341 		if (alginfo_a.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
342 			CAAM_JR_ERR("not supported auth alg");
343 			rte_free(cdb);
344 			return -ENOTSUP;
345 		}
346 
347 		alginfo_a.key = (size_t)ses->auth_key.data;
348 		alginfo_a.keylen = ses->auth_key.length;
349 		alginfo_a.key_enc_flags = 0;
350 		alginfo_a.key_type = RTA_DATA_IMM;
351 
352 		shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
353 						   swap, SHR_NEVER, &alginfo_a,
354 						   !ses->dir,
355 						   ses->digest_length);
356 	} else if (is_aead(ses)) {
357 		caam_aead_alg(ses, &alginfo);
358 		if (alginfo.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
359 			CAAM_JR_ERR("not supported aead alg");
360 			rte_free(cdb);
361 			return -ENOTSUP;
362 		}
363 		alginfo.key = (size_t)ses->aead_key.data;
364 		alginfo.keylen = ses->aead_key.length;
365 		alginfo.key_enc_flags = 0;
366 		alginfo.key_type = RTA_DATA_IMM;
367 
368 		if (ses->dir == DIR_ENC)
369 			shared_desc_len = cnstr_shdsc_gcm_encap(
370 					cdb->sh_desc, true, swap,
371 					SHR_NEVER, &alginfo,
372 					ses->iv.length,
373 					ses->digest_length);
374 		else
375 			shared_desc_len = cnstr_shdsc_gcm_decap(
376 					cdb->sh_desc, true, swap,
377 					SHR_NEVER, &alginfo,
378 					ses->iv.length,
379 					ses->digest_length);
380 	} else {
381 		caam_cipher_alg(ses, &alginfo_c);
382 		if (alginfo_c.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
383 			CAAM_JR_ERR("not supported cipher alg");
384 			rte_free(cdb);
385 			return -ENOTSUP;
386 		}
387 
388 		alginfo_c.key = (size_t)ses->cipher_key.data;
389 		alginfo_c.keylen = ses->cipher_key.length;
390 		alginfo_c.key_enc_flags = 0;
391 		alginfo_c.key_type = RTA_DATA_IMM;
392 
393 		caam_auth_alg(ses, &alginfo_a);
394 		if (alginfo_a.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
395 			CAAM_JR_ERR("not supported auth alg");
396 			rte_free(cdb);
397 			return -ENOTSUP;
398 		}
399 
400 		alginfo_a.key = (size_t)ses->auth_key.data;
401 		alginfo_a.keylen = ses->auth_key.length;
402 		alginfo_a.key_enc_flags = 0;
403 		alginfo_a.key_type = RTA_DATA_IMM;
404 
405 		cdb->sh_desc[0] = alginfo_c.keylen;
406 		cdb->sh_desc[1] = alginfo_a.keylen;
407 		err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
408 				       MIN_JOB_DESC_SIZE,
409 				       (unsigned int *)cdb->sh_desc,
410 				       &cdb->sh_desc[2], 2);
411 
412 		if (err < 0) {
413 			CAAM_JR_ERR("Crypto: Incorrect key lengths");
414 			rte_free(cdb);
415 			return err;
416 		}
417 		if (cdb->sh_desc[2] & 1)
418 			alginfo_c.key_type = RTA_DATA_IMM;
419 		else {
420 			alginfo_c.key = (size_t)caam_jr_mem_vtop(
421 						(void *)(size_t)alginfo_c.key);
422 			alginfo_c.key_type = RTA_DATA_PTR;
423 		}
424 		if (cdb->sh_desc[2] & (1<<1))
425 			alginfo_a.key_type = RTA_DATA_IMM;
426 		else {
427 			alginfo_a.key = (size_t)caam_jr_mem_vtop(
428 						(void *)(size_t)alginfo_a.key);
429 			alginfo_a.key_type = RTA_DATA_PTR;
430 		}
431 		cdb->sh_desc[0] = 0;
432 		cdb->sh_desc[1] = 0;
433 		cdb->sh_desc[2] = 0;
434 		if (is_proto_ipsec(ses)) {
435 			if (ses->dir == DIR_ENC) {
436 				shared_desc_len = cnstr_shdsc_ipsec_new_encap(
437 						cdb->sh_desc,
438 						true, swap, SHR_SERIAL,
439 						&ses->encap_pdb,
440 						(uint8_t *)&ses->ip4_hdr,
441 						&alginfo_c, &alginfo_a);
442 			} else if (ses->dir == DIR_DEC) {
443 				shared_desc_len = cnstr_shdsc_ipsec_new_decap(
444 						cdb->sh_desc,
445 						true, swap, SHR_SERIAL,
446 						&ses->decap_pdb,
447 						&alginfo_c, &alginfo_a);
448 			}
449 		} else {
450 			/* Auth_only_len is overwritten in fd for each job */
451 			shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
452 					true, swap, SHR_SERIAL,
453 					&alginfo_c, &alginfo_a,
454 					ses->iv.length,
455 					ses->digest_length, ses->dir);
456 		}
457 	}
458 
459 	if (shared_desc_len < 0) {
460 		CAAM_JR_ERR("error in preparing command block");
461 		return shared_desc_len;
462 	}
463 
464 #if CAAM_JR_DBG
465 	SEC_DUMP_DESC(cdb->sh_desc);
466 #endif
467 
468 	cdb->sh_hdr.hi.field.idlen = shared_desc_len;
469 
470 	return 0;
471 }
472 
473 /* @brief Poll the HW for already processed jobs in the JR
474  * and silently discard the available jobs or notify them to UA
475  * with indicated error code.
476  *
477  * @param [in,out]  job_ring        The job ring to poll.
478  * @param [in]  do_notify           Can be #TRUE or #FALSE. Indicates if
479  *				    descriptors are to be discarded
480  *                                  or notified to UA with given error_code.
481  * @param [out] notified_descs    Number of notified descriptors. Can be NULL
482  *					if do_notify is #FALSE
483  */
484 static void
485 hw_flush_job_ring(struct sec_job_ring_t *job_ring,
486 		  uint32_t do_notify,
487 		  uint32_t *notified_descs)
488 {
489 	int32_t jobs_no_to_discard = 0;
490 	int32_t discarded_descs_no = 0;
491 
492 	CAAM_JR_DEBUG("Jr[%p] pi[%d] ci[%d].Flushing jr notify desc=[%d]",
493 		job_ring, job_ring->pidx, job_ring->cidx, do_notify);
494 
495 	jobs_no_to_discard = hw_get_no_finished_jobs(job_ring);
496 
497 	/* Discard all jobs */
498 	CAAM_JR_DEBUG("Jr[%p] pi[%d] ci[%d].Discarding %d descs",
499 		  job_ring, job_ring->pidx, job_ring->cidx,
500 		  jobs_no_to_discard);
501 
502 	while (jobs_no_to_discard > discarded_descs_no) {
503 		discarded_descs_no++;
504 		/* Now increment the consumer index for the current job ring,
505 		 * AFTER saving job in temporary location!
506 		 * Increment the consumer index for the current job ring
507 		 */
508 		job_ring->cidx = SEC_CIRCULAR_COUNTER(job_ring->cidx,
509 					 SEC_JOB_RING_SIZE);
510 
511 		hw_remove_entries(job_ring, 1);
512 	}
513 
514 	if (do_notify == true) {
515 		ASSERT(notified_descs != NULL);
516 		*notified_descs = discarded_descs_no;
517 	}
518 }
519 
520 /* @brief Poll the HW for already processed jobs in the JR
521  * and notify the available jobs to UA.
522  *
523  * @param [in]  job_ring	The job ring to poll.
524  * @param [in]  limit           The maximum number of jobs to notify.
525  *                              If set to negative value, all available jobs are
526  *				notified.
527  *
528  * @retval >=0 for No of jobs notified to UA.
529  * @retval -1 for error
530  */
531 static int
532 hw_poll_job_ring(struct sec_job_ring_t *job_ring,
533 		 struct rte_crypto_op **ops, int32_t limit,
534 		 struct caam_jr_qp *jr_qp)
535 {
536 	int32_t jobs_no_to_notify = 0; /* the number of done jobs to notify*/
537 	int32_t number_of_jobs_available = 0;
538 	int32_t notified_descs_no = 0;
539 	uint32_t sec_error_code = 0;
540 	struct job_descriptor *current_desc;
541 	phys_addr_t current_desc_addr;
542 	phys_addr_t *temp_addr;
543 	struct caam_jr_op_ctx *ctx;
544 
545 	/* TODO check for ops have memory*/
546 	/* check here if any JR error that cannot be written
547 	 * in the output status word has occurred
548 	 */
549 	if (JR_REG_JRINT_JRE_EXTRACT(GET_JR_REG(JRINT, job_ring))) {
550 		CAAM_JR_INFO("err received");
551 		sec_error_code = JR_REG_JRINT_ERR_TYPE_EXTRACT(
552 					GET_JR_REG(JRINT, job_ring));
553 		if (unlikely(sec_error_code)) {
554 			hw_job_ring_error_print(job_ring, sec_error_code);
555 			return -1;
556 		}
557 	}
558 	/* compute the number of jobs available in the job ring based on the
559 	 * producer and consumer index values.
560 	 */
561 	number_of_jobs_available = hw_get_no_finished_jobs(job_ring);
562 	/* Compute the number of notifications that need to be raised to UA
563 	 * If limit > total number of done jobs -> notify all done jobs
564 	 * If limit = 0 -> error
565 	 * If limit < total number of done jobs -> notify a number
566 	 * of done jobs equal with limit
567 	 */
568 	jobs_no_to_notify = (limit > number_of_jobs_available) ?
569 				number_of_jobs_available : limit;
570 	CAAM_JR_DP_DEBUG(
571 		"Jr[%p] pi[%d] ci[%d].limit =%d Available=%d.Jobs to notify=%d",
572 		job_ring, job_ring->pidx, job_ring->cidx,
573 		limit, number_of_jobs_available, jobs_no_to_notify);
574 
575 	rte_smp_rmb();
576 
577 	while (jobs_no_to_notify > notified_descs_no) {
578 		static uint64_t false_alarm;
579 		static uint64_t real_poll;
580 
581 		/* Get job status here */
582 		sec_error_code = job_ring->output_ring[job_ring->cidx].status;
583 		/* Get completed descriptor */
584 		temp_addr = &(job_ring->output_ring[job_ring->cidx].desc);
585 		current_desc_addr = (phys_addr_t)sec_read_addr(temp_addr);
586 
587 		real_poll++;
588 		/* todo check if it is false alarm no desc present */
589 		if (!current_desc_addr) {
590 			false_alarm++;
591 			CAAM_JR_ERR("false alarm %" PRIu64 "real %" PRIu64
592 				" sec_err =0x%x cidx Index =0%d",
593 				false_alarm, real_poll,
594 				sec_error_code, job_ring->cidx);
595 			rte_panic("CAAM JR descriptor NULL");
596 			return notified_descs_no;
597 		}
598 		current_desc = (struct job_descriptor *)
599 				caam_jr_dma_ptov(current_desc_addr);
600 		/* now increment the consumer index for the current job ring,
601 		 * AFTER saving job in temporary location!
602 		 */
603 		job_ring->cidx = SEC_CIRCULAR_COUNTER(job_ring->cidx,
604 				 SEC_JOB_RING_SIZE);
605 		/* Signal that the job has been processed and the slot is free*/
606 		hw_remove_entries(job_ring, 1);
607 		/*TODO for multiple ops, packets*/
608 		ctx = container_of(current_desc, struct caam_jr_op_ctx, jobdes);
609 		if (unlikely(sec_error_code)) {
610 			CAAM_JR_ERR("desc at cidx %d generated error 0x%x",
611 				job_ring->cidx, sec_error_code);
612 			hw_handle_job_ring_error(job_ring, sec_error_code);
613 			//todo improve with exact errors
614 			ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
615 			jr_qp->rx_errs++;
616 		} else {
617 			ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
618 #if CAAM_JR_DBG
619 			if (ctx->op->sym->m_dst) {
620 				rte_hexdump(stdout, "PROCESSED",
621 				rte_pktmbuf_mtod(ctx->op->sym->m_dst, void *),
622 				rte_pktmbuf_data_len(ctx->op->sym->m_dst));
623 			} else {
624 				rte_hexdump(stdout, "PROCESSED",
625 				rte_pktmbuf_mtod(ctx->op->sym->m_src, void *),
626 				rte_pktmbuf_data_len(ctx->op->sym->m_src));
627 			}
628 #endif
629 		}
630 		if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
631 			struct ip *ip4_hdr;
632 
633 			if (ctx->op->sym->m_dst) {
634 				/*TODO check for ip header or other*/
635 				ip4_hdr = rte_pktmbuf_mtod(ctx->op->sym->m_dst,
636 							   struct ip *);
637 				ctx->op->sym->m_dst->pkt_len =
638 					rte_be_to_cpu_16(ip4_hdr->ip_len);
639 				ctx->op->sym->m_dst->data_len =
640 					rte_be_to_cpu_16(ip4_hdr->ip_len);
641 			} else {
642 				ip4_hdr = rte_pktmbuf_mtod(ctx->op->sym->m_src,
643 							   struct ip *);
644 				ctx->op->sym->m_src->pkt_len =
645 					rte_be_to_cpu_16(ip4_hdr->ip_len);
646 				ctx->op->sym->m_src->data_len =
647 					rte_be_to_cpu_16(ip4_hdr->ip_len);
648 			}
649 		}
650 		*ops = ctx->op;
651 		caam_jr_op_ending(ctx);
652 		ops++;
653 		notified_descs_no++;
654 	}
655 	return notified_descs_no;
656 }
657 
658 static uint16_t
659 caam_jr_dequeue_burst(void *qp, struct rte_crypto_op **ops,
660 		       uint16_t nb_ops)
661 {
662 	struct caam_jr_qp *jr_qp = (struct caam_jr_qp *)qp;
663 	struct sec_job_ring_t *ring = jr_qp->ring;
664 	int num_rx;
665 	int ret;
666 
667 	CAAM_JR_DP_DEBUG("Jr[%p]Polling. limit[%d]", ring, nb_ops);
668 
669 	/* Poll job ring
670 	 * If nb_ops < 0 -> poll JR until no more notifications are available.
671 	 * If nb_ops > 0 -> poll JR until limit is reached.
672 	 */
673 
674 	/* Run hw poll job ring */
675 	num_rx = hw_poll_job_ring(ring, ops, nb_ops, jr_qp);
676 	if (num_rx < 0) {
677 		CAAM_JR_ERR("Error polling SEC engine (%d)", num_rx);
678 		return 0;
679 	}
680 
681 	CAAM_JR_DP_DEBUG("Jr[%p].Jobs notified[%d]. ", ring, num_rx);
682 
683 	if (ring->jr_mode == SEC_NOTIFICATION_TYPE_NAPI) {
684 		if (num_rx < nb_ops) {
685 			ret = caam_jr_enable_irqs(ring->irq_fd);
686 			SEC_ASSERT(ret == 0, ret,
687 			"Failed to enable irqs for job ring %p", ring);
688 		}
689 	} else if (ring->jr_mode == SEC_NOTIFICATION_TYPE_IRQ) {
690 
691 		/* Always enable IRQ generation when in pure IRQ mode */
692 		ret = caam_jr_enable_irqs(ring->irq_fd);
693 		SEC_ASSERT(ret == 0, ret,
694 			"Failed to enable irqs for job ring %p", ring);
695 	}
696 
697 	jr_qp->rx_pkts += num_rx;
698 
699 	return num_rx;
700 }
701 
702 /**
703  * packet looks like:
704  *		|<----data_len------->|
705  *    |ip_header|ah_header|icv|payload|
706  *              ^
707  *		|
708  *	   mbuf->pkt.data
709  */
710 static inline struct caam_jr_op_ctx *
711 build_auth_only_sg(struct rte_crypto_op *op, struct caam_jr_session *ses)
712 {
713 	struct rte_crypto_sym_op *sym = op->sym;
714 	struct rte_mbuf *mbuf = sym->m_src;
715 	struct caam_jr_op_ctx *ctx;
716 	struct sec4_sg_entry *sg;
717 	int	length;
718 	struct sec_cdb *cdb;
719 	uint64_t sdesc_offset;
720 	struct sec_job_descriptor_t *jobdescr;
721 	uint8_t extra_segs;
722 
723 	if (is_decode(ses))
724 		extra_segs = 2;
725 	else
726 		extra_segs = 1;
727 
728 	if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
729 		CAAM_JR_DP_ERR("Auth: Max sec segs supported is %d",
730 				MAX_SG_ENTRIES);
731 		return NULL;
732 	}
733 
734 	ctx = caam_jr_alloc_ctx(ses);
735 	if (!ctx)
736 		return NULL;
737 
738 	ctx->op = op;
739 
740 	cdb = ses->cdb;
741 	sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
742 
743 	jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
744 
745 	SEC_JD_INIT(jobdescr);
746 	SEC_JD_SET_SD(jobdescr,
747 		(phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
748 		cdb->sh_hdr.hi.field.idlen);
749 
750 	/* output */
751 	SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)sym->auth.digest.phys_addr,
752 			0, ses->digest_length);
753 
754 	/*input */
755 	sg = &ctx->sg[0];
756 	length = sym->auth.data.length;
757 	sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf) + sym->auth.data.offset);
758 	sg->len = cpu_to_caam32(mbuf->data_len - sym->auth.data.offset);
759 
760 	/* Successive segs */
761 	mbuf = mbuf->next;
762 	while (mbuf) {
763 		sg++;
764 		sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
765 		sg->len = cpu_to_caam32(mbuf->data_len);
766 		mbuf = mbuf->next;
767 	}
768 
769 	if (is_decode(ses)) {
770 		/* digest verification case */
771 		sg++;
772 		/* hash result or digest, save digest first */
773 		rte_memcpy(ctx->digest, sym->auth.digest.data,
774 			   ses->digest_length);
775 #if CAAM_JR_DBG
776 		rte_hexdump(stdout, "ICV", ctx->digest, ses->digest_length);
777 #endif
778 		sg->ptr = cpu_to_caam64(caam_jr_vtop_ctx(ctx, ctx->digest));
779 		sg->len = cpu_to_caam32(ses->digest_length);
780 		length += ses->digest_length;
781 	} else {
782 		sg->len -= ses->digest_length;
783 	}
784 
785 	/* last element*/
786 	sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
787 
788 	SEC_JD_SET_IN_PTR(jobdescr,
789 		(uint64_t)caam_jr_vtop_ctx(ctx, &ctx->sg[0]), 0, length);
790 	/* enabling sg list */
791 	(jobdescr)->seq_in.command.word  |= 0x01000000;
792 
793 	return ctx;
794 }
795 
796 static inline struct caam_jr_op_ctx *
797 build_auth_only(struct rte_crypto_op *op, struct caam_jr_session *ses)
798 {
799 	struct rte_crypto_sym_op *sym = op->sym;
800 	struct caam_jr_op_ctx *ctx;
801 	struct sec4_sg_entry *sg;
802 	rte_iova_t start_addr;
803 	struct sec_cdb *cdb;
804 	uint64_t sdesc_offset;
805 	struct sec_job_descriptor_t *jobdescr;
806 
807 	ctx = caam_jr_alloc_ctx(ses);
808 	if (!ctx)
809 		return NULL;
810 
811 	ctx->op = op;
812 
813 	cdb = ses->cdb;
814 	sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
815 
816 	start_addr = rte_pktmbuf_iova(sym->m_src);
817 
818 	jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
819 
820 	SEC_JD_INIT(jobdescr);
821 	SEC_JD_SET_SD(jobdescr,
822 		(phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
823 		cdb->sh_hdr.hi.field.idlen);
824 
825 	/* output */
826 	SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)sym->auth.digest.phys_addr,
827 			0, ses->digest_length);
828 
829 	/*input */
830 	if (is_decode(ses)) {
831 		sg = &ctx->sg[0];
832 		SEC_JD_SET_IN_PTR(jobdescr,
833 			(uint64_t)caam_jr_vtop_ctx(ctx, sg), 0,
834 			(sym->auth.data.length + ses->digest_length));
835 		/* enabling sg list */
836 		(jobdescr)->seq_in.command.word  |= 0x01000000;
837 
838 		/* hash result or digest, save digest first */
839 		rte_memcpy(ctx->digest, sym->auth.digest.data,
840 			   ses->digest_length);
841 		sg->ptr = cpu_to_caam64(start_addr + sym->auth.data.offset);
842 		sg->len = cpu_to_caam32(sym->auth.data.length);
843 
844 #if CAAM_JR_DBG
845 		rte_hexdump(stdout, "ICV", ctx->digest, ses->digest_length);
846 #endif
847 		/* let's check digest by hw */
848 		sg++;
849 		sg->ptr = cpu_to_caam64(caam_jr_vtop_ctx(ctx, ctx->digest));
850 		sg->len = cpu_to_caam32(ses->digest_length);
851 		/* last element*/
852 		sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
853 	} else {
854 		SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)start_addr,
855 			sym->auth.data.offset, sym->auth.data.length);
856 	}
857 	return ctx;
858 }
859 
860 static inline struct caam_jr_op_ctx *
861 build_cipher_only_sg(struct rte_crypto_op *op, struct caam_jr_session *ses)
862 {
863 	struct rte_crypto_sym_op *sym = op->sym;
864 	struct rte_mbuf *mbuf = sym->m_src;
865 	struct caam_jr_op_ctx *ctx;
866 	struct sec4_sg_entry *sg, *in_sg;
867 	int length;
868 	struct sec_cdb *cdb;
869 	uint64_t sdesc_offset;
870 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
871 			ses->iv.offset);
872 	struct sec_job_descriptor_t *jobdescr;
873 	uint8_t reg_segs;
874 
875 	if (sym->m_dst) {
876 		mbuf = sym->m_dst;
877 		reg_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
878 	} else {
879 		mbuf = sym->m_src;
880 		reg_segs = mbuf->nb_segs * 2 + 2;
881 	}
882 
883 	if (reg_segs > MAX_SG_ENTRIES) {
884 		CAAM_JR_DP_ERR("Cipher: Max sec segs supported is %d",
885 				MAX_SG_ENTRIES);
886 		return NULL;
887 	}
888 
889 	ctx = caam_jr_alloc_ctx(ses);
890 	if (!ctx)
891 		return NULL;
892 
893 	ctx->op = op;
894 	cdb = ses->cdb;
895 	sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
896 
897 	jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
898 
899 	SEC_JD_INIT(jobdescr);
900 	SEC_JD_SET_SD(jobdescr,
901 		(phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
902 		cdb->sh_hdr.hi.field.idlen);
903 
904 #if CAAM_JR_DBG
905 	CAAM_JR_INFO("mbuf offset =%d, cipher offset = %d, length =%d+%d",
906 			sym->m_src->data_off, sym->cipher.data.offset,
907 			sym->cipher.data.length, ses->iv.length);
908 #endif
909 	/* output */
910 	if (sym->m_dst)
911 		mbuf = sym->m_dst;
912 	else
913 		mbuf = sym->m_src;
914 
915 	sg = &ctx->sg[0];
916 	length = sym->cipher.data.length;
917 
918 	sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)
919 		+ sym->cipher.data.offset);
920 	sg->len = cpu_to_caam32(mbuf->data_len - sym->cipher.data.offset);
921 
922 	/* Successive segs */
923 	mbuf = mbuf->next;
924 	while (mbuf) {
925 		sg++;
926 		sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
927 		sg->len = cpu_to_caam32(mbuf->data_len);
928 		mbuf = mbuf->next;
929 	}
930 	/* last element*/
931 	sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
932 
933 	SEC_JD_SET_OUT_PTR(jobdescr,
934 			(uint64_t)caam_jr_vtop_ctx(ctx, &ctx->sg[0]), 0,
935 			length);
936 	/*enabling sg bit */
937 	(jobdescr)->seq_out.command.word  |= 0x01000000;
938 
939 	/*input */
940 	sg++;
941 	mbuf = sym->m_src;
942 	in_sg = sg;
943 
944 	length = sym->cipher.data.length + ses->iv.length;
945 
946 	/* IV */
947 	sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
948 	sg->len = cpu_to_caam32(ses->iv.length);
949 
950 	/* 1st seg */
951 	sg++;
952 	sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)
953 				+ sym->cipher.data.offset);
954 	sg->len = cpu_to_caam32(mbuf->data_len - sym->cipher.data.offset);
955 
956 	/* Successive segs */
957 	mbuf = mbuf->next;
958 	while (mbuf) {
959 		sg++;
960 		sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
961 		sg->len = cpu_to_caam32(mbuf->data_len);
962 		mbuf = mbuf->next;
963 	}
964 	/* last element*/
965 	sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
966 
967 
968 	SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_vtop_ctx(ctx, in_sg), 0,
969 				length);
970 	/*enabling sg bit */
971 	(jobdescr)->seq_in.command.word  |= 0x01000000;
972 
973 	return ctx;
974 }
975 
976 static inline struct caam_jr_op_ctx *
977 build_cipher_only(struct rte_crypto_op *op, struct caam_jr_session *ses)
978 {
979 	struct rte_crypto_sym_op *sym = op->sym;
980 	struct caam_jr_op_ctx *ctx;
981 	struct sec4_sg_entry *sg;
982 	rte_iova_t src_start_addr, dst_start_addr;
983 	struct sec_cdb *cdb;
984 	uint64_t sdesc_offset;
985 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
986 			ses->iv.offset);
987 	struct sec_job_descriptor_t *jobdescr;
988 
989 	ctx = caam_jr_alloc_ctx(ses);
990 	if (!ctx)
991 		return NULL;
992 
993 	ctx->op = op;
994 	cdb = ses->cdb;
995 	sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
996 
997 	src_start_addr = rte_pktmbuf_iova(sym->m_src);
998 	if (sym->m_dst)
999 		dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1000 	else
1001 		dst_start_addr = src_start_addr;
1002 
1003 	jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1004 
1005 	SEC_JD_INIT(jobdescr);
1006 	SEC_JD_SET_SD(jobdescr,
1007 		(phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
1008 		cdb->sh_hdr.hi.field.idlen);
1009 
1010 #if CAAM_JR_DBG
1011 	CAAM_JR_INFO("mbuf offset =%d, cipher offset = %d, length =%d+%d",
1012 			sym->m_src->data_off, sym->cipher.data.offset,
1013 			sym->cipher.data.length, ses->iv.length);
1014 #endif
1015 	/* output */
1016 	SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)dst_start_addr,
1017 			sym->cipher.data.offset,
1018 			sym->cipher.data.length + ses->iv.length);
1019 
1020 	/*input */
1021 	sg = &ctx->sg[0];
1022 	SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_vtop_ctx(ctx, sg), 0,
1023 				sym->cipher.data.length + ses->iv.length);
1024 	/*enabling sg bit */
1025 	(jobdescr)->seq_in.command.word  |= 0x01000000;
1026 
1027 	sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
1028 	sg->len = cpu_to_caam32(ses->iv.length);
1029 
1030 	sg = &ctx->sg[1];
1031 	sg->ptr = cpu_to_caam64(src_start_addr + sym->cipher.data.offset);
1032 	sg->len = cpu_to_caam32(sym->cipher.data.length);
1033 	/* last element*/
1034 	sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1035 
1036 	return ctx;
1037 }
1038 
1039 /* For decapsulation:
1040  *     Input:
1041  * +----+----------------+--------------------------------+-----+
1042  * | IV | Auth-only data | Authenticated & Encrypted data | ICV |
1043  * +----+----------------+--------------------------------+-----+
1044  *     Output:
1045  * +----+--------------------------+
1046  * | Decrypted & authenticated data |
1047  * +----+--------------------------+
1048  */
1049 
1050 static inline struct caam_jr_op_ctx *
1051 build_cipher_auth_sg(struct rte_crypto_op *op, struct caam_jr_session *ses)
1052 {
1053 	struct rte_crypto_sym_op *sym = op->sym;
1054 	struct caam_jr_op_ctx *ctx;
1055 	struct sec4_sg_entry *sg, *out_sg, *in_sg;
1056 	struct rte_mbuf *mbuf;
1057 	uint32_t length = 0;
1058 	struct sec_cdb *cdb;
1059 	uint64_t sdesc_offset;
1060 	uint8_t req_segs;
1061 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1062 			ses->iv.offset);
1063 	struct sec_job_descriptor_t *jobdescr;
1064 	uint16_t auth_hdr_len = sym->cipher.data.offset -
1065 			sym->auth.data.offset;
1066 	uint16_t auth_tail_len = sym->auth.data.length -
1067 			sym->cipher.data.length - auth_hdr_len;
1068 	uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
1069 
1070 	if (sym->m_dst) {
1071 		mbuf = sym->m_dst;
1072 		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
1073 	} else {
1074 		mbuf = sym->m_src;
1075 		req_segs = mbuf->nb_segs * 2 + 3;
1076 	}
1077 
1078 	if (req_segs > MAX_SG_ENTRIES) {
1079 		CAAM_JR_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1080 				MAX_SG_ENTRIES);
1081 		return NULL;
1082 	}
1083 
1084 	ctx = caam_jr_alloc_ctx(ses);
1085 	if (!ctx)
1086 		return NULL;
1087 
1088 	ctx->op = op;
1089 	cdb = ses->cdb;
1090 	sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
1091 
1092 	jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1093 
1094 	SEC_JD_INIT(jobdescr);
1095 	SEC_JD_SET_SD(jobdescr,
1096 		(phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
1097 		cdb->sh_hdr.hi.field.idlen);
1098 
1099 	/* output */
1100 	if (sym->m_dst)
1101 		mbuf = sym->m_dst;
1102 	else
1103 		mbuf = sym->m_src;
1104 
1105 	out_sg = &ctx->sg[0];
1106 	if (is_encode(ses))
1107 		length = sym->auth.data.length + ses->digest_length;
1108 	else
1109 		length = sym->auth.data.length;
1110 
1111 	sg = &ctx->sg[0];
1112 
1113 	/* 1st seg */
1114 	sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)
1115 		+ sym->auth.data.offset);
1116 	sg->len = cpu_to_caam32(mbuf->data_len - sym->auth.data.offset);
1117 
1118 	/* Successive segs */
1119 	mbuf = mbuf->next;
1120 	while (mbuf) {
1121 		sg++;
1122 		sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
1123 		sg->len = cpu_to_caam32(mbuf->data_len);
1124 		mbuf = mbuf->next;
1125 	}
1126 
1127 	if (is_encode(ses)) {
1128 		/* set auth output */
1129 		sg++;
1130 		sg->ptr = cpu_to_caam64(sym->auth.digest.phys_addr);
1131 		sg->len = cpu_to_caam32(ses->digest_length);
1132 	}
1133 	/* last element*/
1134 	sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1135 
1136 	SEC_JD_SET_OUT_PTR(jobdescr,
1137 			   (uint64_t)caam_jr_dma_vtop(out_sg), 0, length);
1138 	/* set sg bit */
1139 	(jobdescr)->seq_out.command.word  |= 0x01000000;
1140 
1141 	/* input */
1142 	sg++;
1143 	mbuf = sym->m_src;
1144 	in_sg = sg;
1145 	if (is_encode(ses))
1146 		length = ses->iv.length + sym->auth.data.length;
1147 	else
1148 		length = ses->iv.length + sym->auth.data.length
1149 						+ ses->digest_length;
1150 
1151 	sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
1152 	sg->len = cpu_to_caam32(ses->iv.length);
1153 
1154 	sg++;
1155 	/* 1st seg */
1156 	sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)
1157 		+ sym->auth.data.offset);
1158 	sg->len = cpu_to_caam32(mbuf->data_len - sym->auth.data.offset);
1159 
1160 	/* Successive segs */
1161 	mbuf = mbuf->next;
1162 	while (mbuf) {
1163 		sg++;
1164 		sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
1165 		sg->len = cpu_to_caam32(mbuf->data_len);
1166 		mbuf = mbuf->next;
1167 	}
1168 
1169 	if (is_decode(ses)) {
1170 		sg++;
1171 		rte_memcpy(ctx->digest, sym->auth.digest.data,
1172 		       ses->digest_length);
1173 		sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(ctx->digest));
1174 		sg->len = cpu_to_caam32(ses->digest_length);
1175 	}
1176 	/* last element*/
1177 	sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1178 
1179 	SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_dma_vtop(in_sg), 0,
1180 				length);
1181 	/* set sg bit */
1182 	(jobdescr)->seq_in.command.word  |= 0x01000000;
1183 	/* Auth_only_len is set as 0 in descriptor and it is
1184 	 * overwritten here in the jd which will update
1185 	 * the DPOVRD reg.
1186 	 */
1187 	if (auth_only_len)
1188 		/* set sg bit */
1189 		(jobdescr)->dpovrd = 0x80000000 | auth_only_len;
1190 
1191 	return ctx;
1192 }
1193 
1194 static inline struct caam_jr_op_ctx *
1195 build_cipher_auth(struct rte_crypto_op *op, struct caam_jr_session *ses)
1196 {
1197 	struct rte_crypto_sym_op *sym = op->sym;
1198 	struct caam_jr_op_ctx *ctx;
1199 	struct sec4_sg_entry *sg;
1200 	rte_iova_t src_start_addr, dst_start_addr;
1201 	uint32_t length = 0;
1202 	struct sec_cdb *cdb;
1203 	uint64_t sdesc_offset;
1204 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1205 			ses->iv.offset);
1206 	struct sec_job_descriptor_t *jobdescr;
1207 	uint16_t auth_hdr_len = sym->cipher.data.offset -
1208 			sym->auth.data.offset;
1209 	uint16_t auth_tail_len = sym->auth.data.length -
1210 			sym->cipher.data.length - auth_hdr_len;
1211 	uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
1212 
1213 	src_start_addr = rte_pktmbuf_iova(sym->m_src);
1214 	if (sym->m_dst)
1215 		dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1216 	else
1217 		dst_start_addr = src_start_addr;
1218 
1219 	ctx = caam_jr_alloc_ctx(ses);
1220 	if (!ctx)
1221 		return NULL;
1222 
1223 	ctx->op = op;
1224 	cdb = ses->cdb;
1225 	sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
1226 
1227 	jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1228 
1229 	SEC_JD_INIT(jobdescr);
1230 	SEC_JD_SET_SD(jobdescr,
1231 		(phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
1232 		cdb->sh_hdr.hi.field.idlen);
1233 
1234 	/* input */
1235 	sg = &ctx->sg[0];
1236 	if (is_encode(ses)) {
1237 		sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
1238 		sg->len = cpu_to_caam32(ses->iv.length);
1239 		length += ses->iv.length;
1240 
1241 		sg++;
1242 		sg->ptr = cpu_to_caam64(src_start_addr + sym->auth.data.offset);
1243 		sg->len = cpu_to_caam32(sym->auth.data.length);
1244 		length += sym->auth.data.length;
1245 		/* last element*/
1246 		sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1247 	} else {
1248 		sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
1249 		sg->len = cpu_to_caam32(ses->iv.length);
1250 		length += ses->iv.length;
1251 
1252 		sg++;
1253 		sg->ptr = cpu_to_caam64(src_start_addr + sym->auth.data.offset);
1254 		sg->len = cpu_to_caam32(sym->auth.data.length);
1255 		length += sym->auth.data.length;
1256 
1257 		rte_memcpy(ctx->digest, sym->auth.digest.data,
1258 		       ses->digest_length);
1259 		sg++;
1260 		sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(ctx->digest));
1261 		sg->len = cpu_to_caam32(ses->digest_length);
1262 		length += ses->digest_length;
1263 		/* last element*/
1264 		sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1265 	}
1266 
1267 	SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_dma_vtop(&ctx->sg[0]), 0,
1268 				length);
1269 	/* set sg bit */
1270 	(jobdescr)->seq_in.command.word  |= 0x01000000;
1271 
1272 	/* output */
1273 	sg = &ctx->sg[6];
1274 
1275 	sg->ptr = cpu_to_caam64(dst_start_addr + sym->cipher.data.offset);
1276 	sg->len = cpu_to_caam32(sym->cipher.data.length);
1277 	length = sym->cipher.data.length;
1278 
1279 	if (is_encode(ses)) {
1280 		/* set auth output */
1281 		sg++;
1282 		sg->ptr = cpu_to_caam64(sym->auth.digest.phys_addr);
1283 		sg->len = cpu_to_caam32(ses->digest_length);
1284 		length += ses->digest_length;
1285 	}
1286 	/* last element*/
1287 	sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1288 
1289 	SEC_JD_SET_OUT_PTR(jobdescr,
1290 			   (uint64_t)caam_jr_dma_vtop(&ctx->sg[6]), 0, length);
1291 	/* set sg bit */
1292 	(jobdescr)->seq_out.command.word  |= 0x01000000;
1293 
1294 	/* Auth_only_len is set as 0 in descriptor and it is
1295 	 * overwritten here in the jd which will update
1296 	 * the DPOVRD reg.
1297 	 */
1298 	if (auth_only_len)
1299 		/* set sg bit */
1300 		(jobdescr)->dpovrd = 0x80000000 | auth_only_len;
1301 
1302 	return ctx;
1303 }
1304 
1305 static inline struct caam_jr_op_ctx *
1306 build_proto(struct rte_crypto_op *op, struct caam_jr_session *ses)
1307 {
1308 	struct rte_crypto_sym_op *sym = op->sym;
1309 	struct caam_jr_op_ctx *ctx = NULL;
1310 	phys_addr_t src_start_addr, dst_start_addr;
1311 	struct sec_cdb *cdb;
1312 	uint64_t sdesc_offset;
1313 	struct sec_job_descriptor_t *jobdescr;
1314 
1315 	ctx = caam_jr_alloc_ctx(ses);
1316 	if (!ctx)
1317 		return NULL;
1318 	ctx->op = op;
1319 
1320 	src_start_addr = rte_pktmbuf_iova(sym->m_src);
1321 	if (sym->m_dst)
1322 		dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1323 	else
1324 		dst_start_addr = src_start_addr;
1325 
1326 	cdb = ses->cdb;
1327 	sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
1328 
1329 	jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1330 
1331 	SEC_JD_INIT(jobdescr);
1332 	SEC_JD_SET_SD(jobdescr,
1333 		(phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
1334 			cdb->sh_hdr.hi.field.idlen);
1335 
1336 	/* output */
1337 	SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)dst_start_addr, 0,
1338 			sym->m_src->buf_len - sym->m_src->data_off);
1339 	/* input */
1340 	SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)src_start_addr, 0,
1341 			sym->m_src->pkt_len);
1342 	sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1343 
1344 	return ctx;
1345 }
1346 
1347 static int
1348 caam_jr_enqueue_op(struct rte_crypto_op *op, struct caam_jr_qp *qp)
1349 {
1350 	struct sec_job_ring_t *ring = qp->ring;
1351 	struct caam_jr_session *ses;
1352 	struct caam_jr_op_ctx *ctx = NULL;
1353 	struct sec_job_descriptor_t *jobdescr __rte_unused;
1354 #if CAAM_JR_DBG
1355 	int i;
1356 #endif
1357 
1358 	switch (op->sess_type) {
1359 	case RTE_CRYPTO_OP_WITH_SESSION:
1360 		ses = CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session);
1361 		break;
1362 	case RTE_CRYPTO_OP_SECURITY_SESSION:
1363 		ses = SECURITY_GET_SESS_PRIV(op->sym->session);
1364 		break;
1365 	default:
1366 		CAAM_JR_DP_ERR("sessionless crypto op not supported");
1367 		qp->tx_errs++;
1368 		return -1;
1369 	}
1370 
1371 	if (unlikely(!ses->qp || ses->qp != qp)) {
1372 		CAAM_JR_DP_DEBUG("Old:sess->qp=%p New qp = %p", ses->qp, qp);
1373 		ses->qp = qp;
1374 		caam_jr_prep_cdb(ses);
1375 	}
1376 
1377 	if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
1378 		if (is_auth_cipher(ses))
1379 			ctx = build_cipher_auth(op, ses);
1380 		else if (is_aead(ses))
1381 			goto err1;
1382 		else if (is_auth_only(ses))
1383 			ctx = build_auth_only(op, ses);
1384 		else if (is_cipher_only(ses))
1385 			ctx = build_cipher_only(op, ses);
1386 		else if (is_proto_ipsec(ses))
1387 			ctx = build_proto(op, ses);
1388 	} else {
1389 		if (is_auth_cipher(ses))
1390 			ctx = build_cipher_auth_sg(op, ses);
1391 		else if (is_aead(ses))
1392 			goto err1;
1393 		else if (is_auth_only(ses))
1394 			ctx = build_auth_only_sg(op, ses);
1395 		else if (is_cipher_only(ses))
1396 			ctx = build_cipher_only_sg(op, ses);
1397 	}
1398 err1:
1399 	if (unlikely(!ctx)) {
1400 		qp->tx_errs++;
1401 		CAAM_JR_ERR("not supported sec op");
1402 		return -1;
1403 	}
1404 #if CAAM_JR_DBG
1405 	if (is_decode(ses))
1406 		rte_hexdump(stdout, "DECODE",
1407 			rte_pktmbuf_mtod(op->sym->m_src, void *),
1408 			rte_pktmbuf_data_len(op->sym->m_src));
1409 	else
1410 		rte_hexdump(stdout, "ENCODE",
1411 			rte_pktmbuf_mtod(op->sym->m_src, void *),
1412 			rte_pktmbuf_data_len(op->sym->m_src));
1413 
1414 	fprintf(stdout, "\n JD before conversion\n");
1415 	for (i = 0; i < 12; i++)
1416 		fprintf(stdout, "\n 0x%08x", ctx->jobdes.desc[i]);
1417 #endif
1418 
1419 	CAAM_JR_DP_DEBUG("Jr[%p] pi[%d] ci[%d].Before sending desc",
1420 		      ring, ring->pidx, ring->cidx);
1421 
1422 	/* todo - do we want to retry */
1423 	if (SEC_JOB_RING_IS_FULL(ring->pidx, ring->cidx,
1424 			 SEC_JOB_RING_SIZE, SEC_JOB_RING_SIZE)) {
1425 		CAAM_JR_DP_DEBUG("Ring FULL Jr[%p] pi[%d] ci[%d].Size = %d",
1426 			      ring, ring->pidx, ring->cidx, SEC_JOB_RING_SIZE);
1427 		caam_jr_op_ending(ctx);
1428 		qp->tx_ring_full++;
1429 		return -EBUSY;
1430 	}
1431 
1432 #if CORE_BYTE_ORDER != CAAM_BYTE_ORDER
1433 	jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1434 
1435 	jobdescr->deschdr.command.word =
1436 		cpu_to_caam32(jobdescr->deschdr.command.word);
1437 	jobdescr->sd_ptr = cpu_to_caam64(jobdescr->sd_ptr);
1438 	jobdescr->seq_out.command.word =
1439 		cpu_to_caam32(jobdescr->seq_out.command.word);
1440 	jobdescr->seq_out_ptr = cpu_to_caam64(jobdescr->seq_out_ptr);
1441 	jobdescr->out_ext_length = cpu_to_caam32(jobdescr->out_ext_length);
1442 	jobdescr->seq_in.command.word =
1443 		cpu_to_caam32(jobdescr->seq_in.command.word);
1444 	jobdescr->seq_in_ptr = cpu_to_caam64(jobdescr->seq_in_ptr);
1445 	jobdescr->in_ext_length = cpu_to_caam32(jobdescr->in_ext_length);
1446 	jobdescr->load_dpovrd.command.word =
1447 		cpu_to_caam32(jobdescr->load_dpovrd.command.word);
1448 	jobdescr->dpovrd = cpu_to_caam32(jobdescr->dpovrd);
1449 #endif
1450 
1451 	/* Set ptr in input ring to current descriptor	*/
1452 	sec_write_addr(&ring->input_ring[ring->pidx],
1453 			(phys_addr_t)caam_jr_vtop_ctx(ctx, ctx->jobdes.desc));
1454 	rte_smp_wmb();
1455 
1456 	/* Notify HW that a new job is enqueued */
1457 	hw_enqueue_desc_on_job_ring(ring);
1458 
1459 	/* increment the producer index for the current job ring */
1460 	ring->pidx = SEC_CIRCULAR_COUNTER(ring->pidx, SEC_JOB_RING_SIZE);
1461 
1462 	return 0;
1463 }
1464 
1465 static uint16_t
1466 caam_jr_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1467 		       uint16_t nb_ops)
1468 {
1469 	/* Function to transmit the frames to given device and queuepair */
1470 	uint32_t loop;
1471 	int32_t ret;
1472 	struct caam_jr_qp *jr_qp = (struct caam_jr_qp *)qp;
1473 	uint16_t num_tx = 0;
1474 	/*Prepare each packet which is to be sent*/
1475 	for (loop = 0; loop < nb_ops; loop++) {
1476 		ret = caam_jr_enqueue_op(ops[loop], jr_qp);
1477 		if (!ret)
1478 			num_tx++;
1479 	}
1480 
1481 	jr_qp->tx_pkts += num_tx;
1482 
1483 	return num_tx;
1484 }
1485 
1486 /* Release queue pair */
1487 static int
1488 caam_jr_queue_pair_release(struct rte_cryptodev *dev,
1489 			   uint16_t qp_id)
1490 {
1491 	struct sec_job_ring_t *internals;
1492 	struct caam_jr_qp *qp = NULL;
1493 
1494 	PMD_INIT_FUNC_TRACE();
1495 	CAAM_JR_DEBUG("dev =%p, queue =%d", dev, qp_id);
1496 
1497 	internals = dev->data->dev_private;
1498 	if (qp_id >= internals->max_nb_queue_pairs) {
1499 		CAAM_JR_ERR("Max supported qpid %d",
1500 			     internals->max_nb_queue_pairs);
1501 		return -EINVAL;
1502 	}
1503 
1504 	qp = &internals->qps[qp_id];
1505 	qp->ring = NULL;
1506 	dev->data->queue_pairs[qp_id] = NULL;
1507 
1508 	return 0;
1509 }
1510 
1511 /* Setup a queue pair */
1512 static int
1513 caam_jr_queue_pair_setup(
1514 		struct rte_cryptodev *dev, uint16_t qp_id,
1515 		__rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1516 		__rte_unused int socket_id)
1517 {
1518 	struct sec_job_ring_t *internals;
1519 	struct caam_jr_qp *qp = NULL;
1520 
1521 	PMD_INIT_FUNC_TRACE();
1522 	CAAM_JR_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1523 
1524 	internals = dev->data->dev_private;
1525 	if (qp_id >= internals->max_nb_queue_pairs) {
1526 		CAAM_JR_ERR("Max supported qpid %d",
1527 			     internals->max_nb_queue_pairs);
1528 		return -EINVAL;
1529 	}
1530 
1531 	qp = &internals->qps[qp_id];
1532 	qp->ring = internals;
1533 	dev->data->queue_pairs[qp_id] = qp;
1534 
1535 	return 0;
1536 }
1537 
1538 /* Returns the size of the aesni gcm session structure */
1539 static unsigned int
1540 caam_jr_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1541 {
1542 	PMD_INIT_FUNC_TRACE();
1543 
1544 	return sizeof(struct caam_jr_session);
1545 }
1546 
1547 static int
1548 caam_jr_cipher_init(struct rte_cryptodev *dev __rte_unused,
1549 		    struct rte_crypto_sym_xform *xform,
1550 		    struct caam_jr_session *session)
1551 {
1552 	session->cipher_alg = xform->cipher.algo;
1553 	session->iv.length = xform->cipher.iv.length;
1554 	session->iv.offset = xform->cipher.iv.offset;
1555 	session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1556 					       RTE_CACHE_LINE_SIZE);
1557 	if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1558 		CAAM_JR_ERR("No Memory for cipher key");
1559 		return -ENOMEM;
1560 	}
1561 	session->cipher_key.length = xform->cipher.key.length;
1562 
1563 	memcpy(session->cipher_key.data, xform->cipher.key.data,
1564 	       xform->cipher.key.length);
1565 	session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1566 			DIR_ENC : DIR_DEC;
1567 
1568 	return 0;
1569 }
1570 
1571 static int
1572 caam_jr_auth_init(struct rte_cryptodev *dev __rte_unused,
1573 		  struct rte_crypto_sym_xform *xform,
1574 		  struct caam_jr_session *session)
1575 {
1576 	session->auth_alg = xform->auth.algo;
1577 	session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1578 					     RTE_CACHE_LINE_SIZE);
1579 	if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
1580 		CAAM_JR_ERR("No Memory for auth key");
1581 		return -ENOMEM;
1582 	}
1583 	session->auth_key.length = xform->auth.key.length;
1584 	session->digest_length = xform->auth.digest_length;
1585 
1586 	memcpy(session->auth_key.data, xform->auth.key.data,
1587 	       xform->auth.key.length);
1588 	session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1589 			DIR_ENC : DIR_DEC;
1590 
1591 	return 0;
1592 }
1593 
1594 static int
1595 caam_jr_aead_init(struct rte_cryptodev *dev __rte_unused,
1596 		  struct rte_crypto_sym_xform *xform,
1597 		  struct caam_jr_session *session)
1598 {
1599 	session->aead_alg = xform->aead.algo;
1600 	session->iv.length = xform->aead.iv.length;
1601 	session->iv.offset = xform->aead.iv.offset;
1602 	session->auth_only_len = xform->aead.aad_length;
1603 	session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
1604 					     RTE_CACHE_LINE_SIZE);
1605 	if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
1606 		CAAM_JR_ERR("No Memory for aead key");
1607 		return -ENOMEM;
1608 	}
1609 	session->aead_key.length = xform->aead.key.length;
1610 	session->digest_length = xform->aead.digest_length;
1611 
1612 	memcpy(session->aead_key.data, xform->aead.key.data,
1613 	       xform->aead.key.length);
1614 	session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1615 			DIR_ENC : DIR_DEC;
1616 
1617 	return 0;
1618 }
1619 
1620 static int
1621 caam_jr_set_session_parameters(struct rte_cryptodev *dev,
1622 			       struct rte_crypto_sym_xform *xform, void *sess)
1623 {
1624 	struct sec_job_ring_t *internals = dev->data->dev_private;
1625 	struct caam_jr_session *session = sess;
1626 
1627 	PMD_INIT_FUNC_TRACE();
1628 
1629 	if (unlikely(sess == NULL)) {
1630 		CAAM_JR_ERR("invalid session struct");
1631 		return -EINVAL;
1632 	}
1633 
1634 	/* Default IV length = 0 */
1635 	session->iv.length = 0;
1636 
1637 	/* Cipher Only */
1638 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
1639 		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1640 		caam_jr_cipher_init(dev, xform, session);
1641 
1642 	/* Authentication Only */
1643 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1644 		   xform->next == NULL) {
1645 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
1646 		caam_jr_auth_init(dev, xform, session);
1647 
1648 	/* Cipher then Authenticate */
1649 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1650 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1651 		if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1652 			caam_jr_cipher_init(dev, xform, session);
1653 			caam_jr_auth_init(dev, xform->next, session);
1654 		} else {
1655 			CAAM_JR_ERR("Not supported: Auth then Cipher");
1656 			goto err1;
1657 		}
1658 
1659 	/* Authenticate then Cipher */
1660 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1661 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1662 		if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1663 			caam_jr_auth_init(dev, xform, session);
1664 			caam_jr_cipher_init(dev, xform->next, session);
1665 		} else {
1666 			CAAM_JR_ERR("Not supported: Auth then Cipher");
1667 			goto err1;
1668 		}
1669 
1670 	/* AEAD operation for AES-GCM kind of Algorithms */
1671 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
1672 		   xform->next == NULL) {
1673 		caam_jr_aead_init(dev, xform, session);
1674 
1675 	} else {
1676 		CAAM_JR_ERR("Invalid crypto type");
1677 		return -EINVAL;
1678 	}
1679 	session->ctx_pool = internals->ctx_pool;
1680 
1681 	return 0;
1682 
1683 err1:
1684 	rte_free(session->cipher_key.data);
1685 	rte_free(session->auth_key.data);
1686 	memset(session, 0, sizeof(struct caam_jr_session));
1687 
1688 	return -EINVAL;
1689 }
1690 
1691 static int
1692 caam_jr_sym_session_configure(struct rte_cryptodev *dev __rte_unused,
1693 			      struct rte_crypto_sym_xform *xform,
1694 			      struct rte_cryptodev_sym_session *sess)
1695 {
1696 	void *sess_private_data;
1697 	int ret;
1698 
1699 	PMD_INIT_FUNC_TRACE();
1700 	sess_private_data = CRYPTODEV_GET_SYM_SESS_PRIV(sess);
1701 	memset(sess_private_data, 0, sizeof(struct caam_jr_session));
1702 	ret = caam_jr_set_session_parameters(dev, xform, sess_private_data);
1703 	if (ret != 0) {
1704 		CAAM_JR_ERR("failed to configure session parameters");
1705 		/* Return session to mempool */
1706 		return ret;
1707 	}
1708 
1709 	return 0;
1710 }
1711 
1712 /* Clear the memory of session so it doesn't leave key material behind */
1713 static void
1714 caam_jr_sym_session_clear(struct rte_cryptodev *dev __rte_unused,
1715 		struct rte_cryptodev_sym_session *sess)
1716 {
1717 	struct caam_jr_session *s = CRYPTODEV_GET_SYM_SESS_PRIV(sess);
1718 
1719 	PMD_INIT_FUNC_TRACE();
1720 
1721 	if (s) {
1722 		rte_free(s->cipher_key.data);
1723 		rte_free(s->auth_key.data);
1724 	}
1725 }
1726 
1727 static int
1728 caam_jr_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
1729 			  struct rte_security_session_conf *conf,
1730 			  void *sess)
1731 {
1732 	struct sec_job_ring_t *internals = dev->data->dev_private;
1733 	struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
1734 	struct rte_crypto_auth_xform *auth_xform;
1735 	struct rte_crypto_cipher_xform *cipher_xform;
1736 	struct caam_jr_session *session = (struct caam_jr_session *)sess;
1737 
1738 	PMD_INIT_FUNC_TRACE();
1739 
1740 	if (ipsec_xform->life.bytes_hard_limit != 0 ||
1741 	    ipsec_xform->life.bytes_soft_limit != 0 ||
1742 	    ipsec_xform->life.packets_hard_limit != 0 ||
1743 	    ipsec_xform->life.packets_soft_limit != 0)
1744 		return -ENOTSUP;
1745 
1746 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1747 		cipher_xform = &conf->crypto_xform->cipher;
1748 		auth_xform = &conf->crypto_xform->next->auth;
1749 	} else {
1750 		auth_xform = &conf->crypto_xform->auth;
1751 		cipher_xform = &conf->crypto_xform->next->cipher;
1752 	}
1753 	session->proto_alg = conf->protocol;
1754 	session->cipher_key.data = rte_zmalloc(NULL,
1755 					       cipher_xform->key.length,
1756 					       RTE_CACHE_LINE_SIZE);
1757 	if (session->cipher_key.data == NULL &&
1758 			cipher_xform->key.length > 0) {
1759 		CAAM_JR_ERR("No Memory for cipher key");
1760 		return -ENOMEM;
1761 	}
1762 
1763 	session->cipher_key.length = cipher_xform->key.length;
1764 	session->auth_key.data = rte_zmalloc(NULL,
1765 					auth_xform->key.length,
1766 					RTE_CACHE_LINE_SIZE);
1767 	if (session->auth_key.data == NULL &&
1768 			auth_xform->key.length > 0) {
1769 		CAAM_JR_ERR("No Memory for auth key");
1770 		rte_free(session->cipher_key.data);
1771 		return -ENOMEM;
1772 	}
1773 	session->auth_key.length = auth_xform->key.length;
1774 	memcpy(session->cipher_key.data, cipher_xform->key.data,
1775 			cipher_xform->key.length);
1776 	memcpy(session->auth_key.data, auth_xform->key.data,
1777 			auth_xform->key.length);
1778 
1779 	switch (auth_xform->algo) {
1780 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
1781 		session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1782 		break;
1783 	case RTE_CRYPTO_AUTH_MD5_HMAC:
1784 		session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1785 		break;
1786 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
1787 		session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1788 		break;
1789 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
1790 		session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1791 		break;
1792 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
1793 		session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1794 		break;
1795 	case RTE_CRYPTO_AUTH_AES_CMAC:
1796 		session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
1797 		break;
1798 	case RTE_CRYPTO_AUTH_NULL:
1799 		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1800 		break;
1801 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
1802 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1803 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1804 	case RTE_CRYPTO_AUTH_SHA1:
1805 	case RTE_CRYPTO_AUTH_SHA256:
1806 	case RTE_CRYPTO_AUTH_SHA512:
1807 	case RTE_CRYPTO_AUTH_SHA224:
1808 	case RTE_CRYPTO_AUTH_SHA384:
1809 	case RTE_CRYPTO_AUTH_MD5:
1810 	case RTE_CRYPTO_AUTH_AES_GMAC:
1811 	case RTE_CRYPTO_AUTH_KASUMI_F9:
1812 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1813 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
1814 		CAAM_JR_ERR("Crypto: Unsupported auth alg %u",
1815 			auth_xform->algo);
1816 		goto out;
1817 	default:
1818 		CAAM_JR_ERR("Crypto: Undefined Auth specified %u",
1819 			auth_xform->algo);
1820 		goto out;
1821 	}
1822 
1823 	switch (cipher_xform->algo) {
1824 	case RTE_CRYPTO_CIPHER_AES_CBC:
1825 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1826 		break;
1827 	case RTE_CRYPTO_CIPHER_3DES_CBC:
1828 		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1829 		break;
1830 	case RTE_CRYPTO_CIPHER_AES_CTR:
1831 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
1832 		break;
1833 	case RTE_CRYPTO_CIPHER_NULL:
1834 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1835 	case RTE_CRYPTO_CIPHER_3DES_ECB:
1836 	case RTE_CRYPTO_CIPHER_AES_ECB:
1837 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
1838 		CAAM_JR_ERR("Crypto: Unsupported Cipher alg %u",
1839 			cipher_xform->algo);
1840 		goto out;
1841 	default:
1842 		CAAM_JR_ERR("Crypto: Undefined Cipher specified %u",
1843 			cipher_xform->algo);
1844 		goto out;
1845 	}
1846 
1847 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1848 		memset(&session->encap_pdb, 0, sizeof(struct ipsec_encap_pdb) +
1849 				sizeof(session->ip4_hdr));
1850 		session->ip4_hdr.ip_v = IPVERSION;
1851 		session->ip4_hdr.ip_hl = 5;
1852 		session->ip4_hdr.ip_len = rte_cpu_to_be_16(
1853 						sizeof(session->ip4_hdr));
1854 		session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
1855 		session->ip4_hdr.ip_id = 0;
1856 		session->ip4_hdr.ip_off = 0;
1857 		session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
1858 		session->ip4_hdr.ip_p = (ipsec_xform->proto ==
1859 				RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? IPPROTO_ESP
1860 				: IPPROTO_AH;
1861 		session->ip4_hdr.ip_sum = 0;
1862 		session->ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
1863 		session->ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
1864 		session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
1865 						(void *)&session->ip4_hdr,
1866 						sizeof(struct ip));
1867 
1868 		session->encap_pdb.options =
1869 			(IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
1870 			PDBOPTS_ESP_OIHI_PDB_INL |
1871 			PDBOPTS_ESP_IVSRC;
1872 		if (ipsec_xform->options.dec_ttl)
1873 			session->encap_pdb.options |= PDBHMO_ESP_ENCAP_DTTL;
1874 		if (ipsec_xform->options.esn)
1875 			session->encap_pdb.options |= PDBOPTS_ESP_ESN;
1876 		session->encap_pdb.spi = ipsec_xform->spi;
1877 		session->encap_pdb.ip_hdr_len = sizeof(struct ip);
1878 
1879 		session->dir = DIR_ENC;
1880 	} else if (ipsec_xform->direction ==
1881 			RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
1882 		memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
1883 		session->decap_pdb.options = sizeof(struct ip) << 16;
1884 		if (ipsec_xform->options.esn)
1885 			session->decap_pdb.options |= PDBOPTS_ESP_ESN;
1886 		session->dir = DIR_DEC;
1887 	} else
1888 		goto out;
1889 	session->ctx_pool = internals->ctx_pool;
1890 
1891 	return 0;
1892 out:
1893 	rte_free(session->auth_key.data);
1894 	rte_free(session->cipher_key.data);
1895 	memset(session, 0, sizeof(struct caam_jr_session));
1896 	return -1;
1897 }
1898 
1899 static int
1900 caam_jr_security_session_create(void *dev,
1901 				struct rte_security_session_conf *conf,
1902 				struct rte_security_session *sess)
1903 {
1904 	void *sess_private_data = SECURITY_GET_SESS_PRIV(sess);
1905 	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
1906 	int ret;
1907 
1908 	switch (conf->protocol) {
1909 	case RTE_SECURITY_PROTOCOL_IPSEC:
1910 		ret = caam_jr_set_ipsec_session(cdev, conf,
1911 				sess_private_data);
1912 		break;
1913 	case RTE_SECURITY_PROTOCOL_MACSEC:
1914 		return -ENOTSUP;
1915 	default:
1916 		return -EINVAL;
1917 	}
1918 	if (ret != 0) {
1919 		CAAM_JR_ERR("failed to configure session parameters");
1920 	}
1921 
1922 	return ret;
1923 }
1924 
1925 /* Clear the memory of session so it doesn't leave key material behind */
1926 static int
1927 caam_jr_security_session_destroy(void *dev __rte_unused,
1928 				 struct rte_security_session *sess)
1929 {
1930 	PMD_INIT_FUNC_TRACE();
1931 	struct caam_jr_session *s = SECURITY_GET_SESS_PRIV(sess);
1932 
1933 	if (s) {
1934 		rte_free(s->cipher_key.data);
1935 		rte_free(s->auth_key.data);
1936 		memset(s, 0, sizeof(struct caam_jr_session));
1937 	}
1938 	return 0;
1939 }
1940 
1941 static unsigned int
1942 caam_jr_security_session_get_size(void *device __rte_unused)
1943 {
1944 	return sizeof(struct caam_jr_session);
1945 }
1946 
1947 static int
1948 caam_jr_dev_configure(struct rte_cryptodev *dev,
1949 		       struct rte_cryptodev_config *config __rte_unused)
1950 {
1951 	char str[20];
1952 	struct sec_job_ring_t *internals;
1953 
1954 	PMD_INIT_FUNC_TRACE();
1955 
1956 	internals = dev->data->dev_private;
1957 	snprintf(str, sizeof(str), "ctx_pool_%d", dev->data->dev_id);
1958 	if (!internals->ctx_pool) {
1959 		internals->ctx_pool = rte_mempool_create((const char *)str,
1960 						CTX_POOL_NUM_BUFS,
1961 						sizeof(struct caam_jr_op_ctx),
1962 						CTX_POOL_CACHE_SIZE, 0,
1963 						NULL, NULL, NULL, NULL,
1964 						SOCKET_ID_ANY, 0);
1965 		if (!internals->ctx_pool) {
1966 			CAAM_JR_ERR("%s create failed", str);
1967 			return -ENOMEM;
1968 		}
1969 	} else
1970 		CAAM_JR_INFO("mempool already created for dev_id : %d",
1971 				dev->data->dev_id);
1972 
1973 	return 0;
1974 }
1975 
1976 static int
1977 caam_jr_dev_start(struct rte_cryptodev *dev __rte_unused)
1978 {
1979 	PMD_INIT_FUNC_TRACE();
1980 	return 0;
1981 }
1982 
1983 static void
1984 caam_jr_dev_stop(struct rte_cryptodev *dev __rte_unused)
1985 {
1986 	PMD_INIT_FUNC_TRACE();
1987 }
1988 
1989 static int
1990 caam_jr_dev_close(struct rte_cryptodev *dev)
1991 {
1992 	struct sec_job_ring_t *internals;
1993 
1994 	PMD_INIT_FUNC_TRACE();
1995 
1996 	if (dev == NULL)
1997 		return -ENOMEM;
1998 
1999 	internals = dev->data->dev_private;
2000 	rte_mempool_free(internals->ctx_pool);
2001 	internals->ctx_pool = NULL;
2002 
2003 	return 0;
2004 }
2005 
2006 static void
2007 caam_jr_dev_infos_get(struct rte_cryptodev *dev,
2008 		       struct rte_cryptodev_info *info)
2009 {
2010 	struct sec_job_ring_t *internals = dev->data->dev_private;
2011 
2012 	PMD_INIT_FUNC_TRACE();
2013 	if (info != NULL) {
2014 		info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2015 		info->feature_flags = dev->feature_flags;
2016 		info->capabilities = caam_jr_get_cryptodev_capabilities();
2017 		info->sym.max_nb_sessions = internals->max_nb_sessions;
2018 		info->driver_id = cryptodev_driver_id;
2019 	}
2020 }
2021 
2022 static struct rte_cryptodev_ops caam_jr_ops = {
2023 	.dev_configure	      = caam_jr_dev_configure,
2024 	.dev_start	      = caam_jr_dev_start,
2025 	.dev_stop	      = caam_jr_dev_stop,
2026 	.dev_close	      = caam_jr_dev_close,
2027 	.dev_infos_get        = caam_jr_dev_infos_get,
2028 	.stats_get	      = caam_jr_stats_get,
2029 	.stats_reset	      = caam_jr_stats_reset,
2030 	.queue_pair_setup     = caam_jr_queue_pair_setup,
2031 	.queue_pair_release   = caam_jr_queue_pair_release,
2032 	.sym_session_get_size = caam_jr_sym_session_get_size,
2033 	.sym_session_configure = caam_jr_sym_session_configure,
2034 	.sym_session_clear    = caam_jr_sym_session_clear
2035 };
2036 
2037 static struct rte_security_ops caam_jr_security_ops = {
2038 	.session_create = caam_jr_security_session_create,
2039 	.session_update = NULL,
2040 	.session_get_size = caam_jr_security_session_get_size,
2041 	.session_stats_get = NULL,
2042 	.session_destroy = caam_jr_security_session_destroy,
2043 	.set_pkt_metadata = NULL,
2044 	.capabilities_get = caam_jr_get_security_capabilities
2045 };
2046 
2047 /* @brief Flush job rings of any processed descs.
2048  * The processed descs are silently dropped,
2049  * WITHOUT being notified to UA.
2050  */
2051 static void
2052 close_job_ring(struct sec_job_ring_t *job_ring)
2053 {
2054 	if (job_ring->irq_fd != -1) {
2055 		/* Producer index is frozen. If consumer index is not equal
2056 		 * with producer index, then we have descs to flush.
2057 		 */
2058 		while (job_ring->pidx != job_ring->cidx)
2059 			hw_flush_job_ring(job_ring, false, NULL);
2060 
2061 		/* free the uio job ring */
2062 		free_job_ring(job_ring->irq_fd);
2063 		job_ring->irq_fd = -1;
2064 		caam_jr_dma_free(job_ring->input_ring);
2065 		caam_jr_dma_free(job_ring->output_ring);
2066 		g_job_rings_no--;
2067 	}
2068 }
2069 
2070 /** @brief Release the software and hardware resources tied to a job ring.
2071  * @param [in] job_ring The job ring
2072  *
2073  * @retval  0 for success
2074  * @retval  -1 for error
2075  */
2076 static int
2077 shutdown_job_ring(struct sec_job_ring_t *job_ring)
2078 {
2079 	int ret = 0;
2080 
2081 	PMD_INIT_FUNC_TRACE();
2082 	ASSERT(job_ring != NULL);
2083 	ret = hw_shutdown_job_ring(job_ring);
2084 	SEC_ASSERT(ret == 0, ret,
2085 		"Failed to shutdown hardware job ring %p",
2086 		job_ring);
2087 
2088 	if (job_ring->coalescing_en)
2089 		hw_job_ring_disable_coalescing(job_ring);
2090 
2091 	if (job_ring->jr_mode != SEC_NOTIFICATION_TYPE_POLL) {
2092 		ret = caam_jr_disable_irqs(job_ring->irq_fd);
2093 		SEC_ASSERT(ret == 0, ret,
2094 		"Failed to disable irqs for job ring %p",
2095 		job_ring);
2096 	}
2097 
2098 	return ret;
2099 }
2100 
2101 /*
2102  * @brief Release the resources used by the SEC user space driver.
2103  *
2104  * Reset and release SEC's job rings indicated by the User Application at
2105  * init_job_ring() and free any memory allocated internally.
2106  * Call once during application tear down.
2107  *
2108  * @note In case there are any descriptors in-flight (descriptors received by
2109  * SEC driver for processing and for which no response was yet provided to UA),
2110  * the descriptors are discarded without any notifications to User Application.
2111  *
2112  * @retval ::0			is returned for a successful execution
2113  * @retval ::-1		is returned if SEC driver release is in progress
2114  */
2115 static int
2116 caam_jr_dev_uninit(struct rte_cryptodev *dev)
2117 {
2118 	struct sec_job_ring_t *internals;
2119 
2120 	PMD_INIT_FUNC_TRACE();
2121 	if (dev == NULL)
2122 		return -ENODEV;
2123 
2124 	internals = dev->data->dev_private;
2125 	rte_free(dev->security_ctx);
2126 
2127 	/* If any descriptors in flight , poll and wait
2128 	 * until all descriptors are received and silently discarded.
2129 	 */
2130 	if (internals) {
2131 		shutdown_job_ring(internals);
2132 		close_job_ring(internals);
2133 		rte_mempool_free(internals->ctx_pool);
2134 	}
2135 
2136 	CAAM_JR_INFO("Closing crypto device %s", dev->data->name);
2137 
2138 	/* last caam jr instance) */
2139 	if (g_job_rings_no == 0)
2140 		g_driver_state = SEC_DRIVER_STATE_IDLE;
2141 
2142 	return SEC_SUCCESS;
2143 }
2144 
2145 /* @brief Initialize the software and hardware resources tied to a job ring.
2146  * @param [in] jr_mode;		Model to be used by SEC Driver to receive
2147  *				notifications from SEC.  Can be either
2148  *				of the three: #SEC_NOTIFICATION_TYPE_NAPI
2149  *				#SEC_NOTIFICATION_TYPE_IRQ or
2150  *				#SEC_NOTIFICATION_TYPE_POLL
2151  * @param [in] NAPI_mode	The NAPI work mode to configure a job ring at
2152  *				startup. Used only when #SEC_NOTIFICATION_TYPE
2153  *				is set to #SEC_NOTIFICATION_TYPE_NAPI.
2154  * @param [in] irq_coalescing_timer This value determines the maximum
2155  *					amount of time after processing a
2156  *					descriptor before raising an interrupt.
2157  * @param [in] irq_coalescing_count This value determines how many
2158  *					descriptors are completed before
2159  *					raising an interrupt.
2160  * @param [in] reg_base_addr,	The job ring base address register
2161  * @param [in] irq_id		The job ring interrupt identification number.
2162  * @retval  job_ring_handle for successful job ring configuration
2163  * @retval  NULL on error
2164  *
2165  */
2166 static void *
2167 init_job_ring(void *reg_base_addr, int irq_id)
2168 {
2169 	struct sec_job_ring_t *job_ring = NULL;
2170 	int i, ret = 0;
2171 	int jr_mode = SEC_NOTIFICATION_TYPE_POLL;
2172 	int napi_mode = 0;
2173 	int irq_coalescing_timer = 0;
2174 	int irq_coalescing_count = 0;
2175 
2176 	for (i = 0; i < MAX_SEC_JOB_RINGS; i++) {
2177 		if (g_job_rings[i].irq_fd == -1) {
2178 			job_ring = &g_job_rings[i];
2179 			g_job_rings_no++;
2180 			break;
2181 		}
2182 	}
2183 	if (job_ring == NULL) {
2184 		CAAM_JR_ERR("No free job ring");
2185 		return NULL;
2186 	}
2187 
2188 	job_ring->register_base_addr = reg_base_addr;
2189 	job_ring->jr_mode = jr_mode;
2190 	job_ring->napi_mode = 0;
2191 	job_ring->irq_fd = irq_id;
2192 
2193 	/* Allocate mem for input and output ring */
2194 
2195 	/* Allocate memory for input ring */
2196 	job_ring->input_ring = caam_jr_dma_mem_alloc(L1_CACHE_BYTES,
2197 				SEC_DMA_MEM_INPUT_RING_SIZE);
2198 	memset(job_ring->input_ring, 0, SEC_DMA_MEM_INPUT_RING_SIZE);
2199 
2200 	/* Allocate memory for output ring */
2201 	job_ring->output_ring = caam_jr_dma_mem_alloc(L1_CACHE_BYTES,
2202 				SEC_DMA_MEM_OUTPUT_RING_SIZE);
2203 	memset(job_ring->output_ring, 0, SEC_DMA_MEM_OUTPUT_RING_SIZE);
2204 
2205 	/* Reset job ring in SEC hw and configure job ring registers */
2206 	ret = hw_reset_job_ring(job_ring);
2207 	if (ret != 0) {
2208 		CAAM_JR_ERR("Failed to reset hardware job ring");
2209 		goto cleanup;
2210 	}
2211 
2212 	if (jr_mode == SEC_NOTIFICATION_TYPE_NAPI) {
2213 	/* When SEC US driver works in NAPI mode, the UA can select
2214 	 * if the driver starts with IRQs on or off.
2215 	 */
2216 		if (napi_mode == SEC_STARTUP_INTERRUPT_MODE) {
2217 			CAAM_JR_INFO("Enabling DONE IRQ generationon job ring - %p",
2218 				job_ring);
2219 			ret = caam_jr_enable_irqs(job_ring->irq_fd);
2220 			if (ret != 0) {
2221 				CAAM_JR_ERR("Failed to enable irqs for job ring");
2222 				goto cleanup;
2223 			}
2224 		}
2225 	} else if (jr_mode == SEC_NOTIFICATION_TYPE_IRQ) {
2226 	/* When SEC US driver works in pure interrupt mode,
2227 	 * IRQ's are always enabled.
2228 	 */
2229 		CAAM_JR_INFO("Enabling DONE IRQ generation on job ring - %p",
2230 			 job_ring);
2231 		ret = caam_jr_enable_irqs(job_ring->irq_fd);
2232 		if (ret != 0) {
2233 			CAAM_JR_ERR("Failed to enable irqs for job ring");
2234 			goto cleanup;
2235 		}
2236 	}
2237 	if (irq_coalescing_timer || irq_coalescing_count) {
2238 		hw_job_ring_set_coalescing_param(job_ring,
2239 			 irq_coalescing_timer,
2240 			 irq_coalescing_count);
2241 
2242 		hw_job_ring_enable_coalescing(job_ring);
2243 		job_ring->coalescing_en = 1;
2244 	}
2245 
2246 	job_ring->jr_state = SEC_JOB_RING_STATE_STARTED;
2247 	job_ring->max_nb_queue_pairs = RTE_CAAM_MAX_NB_SEC_QPS;
2248 	job_ring->max_nb_sessions = RTE_CAAM_JR_PMD_MAX_NB_SESSIONS;
2249 
2250 	return job_ring;
2251 cleanup:
2252 	caam_jr_dma_free(job_ring->output_ring);
2253 	caam_jr_dma_free(job_ring->input_ring);
2254 	return NULL;
2255 }
2256 
2257 
2258 static int
2259 caam_jr_dev_init(const char *name,
2260 		 struct rte_vdev_device *vdev,
2261 		 struct rte_cryptodev_pmd_init_params *init_params)
2262 {
2263 	struct rte_cryptodev *dev;
2264 	struct rte_security_ctx *security_instance;
2265 	struct uio_job_ring *job_ring;
2266 	char str[RTE_CRYPTODEV_NAME_MAX_LEN];
2267 
2268 	PMD_INIT_FUNC_TRACE();
2269 
2270 	/* Validate driver state */
2271 	if (g_driver_state == SEC_DRIVER_STATE_IDLE) {
2272 		g_job_rings_max = sec_configure();
2273 		if (!g_job_rings_max) {
2274 			CAAM_JR_ERR("No job ring detected on UIO !!!!");
2275 			return -1;
2276 		}
2277 		/* Update driver state */
2278 		g_driver_state = SEC_DRIVER_STATE_STARTED;
2279 	}
2280 
2281 	if (g_job_rings_no >= g_job_rings_max) {
2282 		CAAM_JR_ERR("No more job rings available max=%d!!!!",
2283 				g_job_rings_max);
2284 		return -1;
2285 	}
2286 
2287 	job_ring = config_job_ring();
2288 	if (job_ring == NULL) {
2289 		CAAM_JR_ERR("failed to create job ring");
2290 		goto init_error;
2291 	}
2292 
2293 	snprintf(str, sizeof(str), "caam_jr%d", job_ring->jr_id);
2294 
2295 	dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
2296 	if (dev == NULL) {
2297 		CAAM_JR_ERR("failed to create cryptodev vdev");
2298 		goto cleanup;
2299 	}
2300 	/*TODO free it during teardown*/
2301 	dev->data->dev_private = init_job_ring(job_ring->register_base_addr,
2302 						job_ring->uio_fd);
2303 
2304 	if (!dev->data->dev_private) {
2305 		CAAM_JR_ERR("Ring memory allocation failed");
2306 		goto cleanup2;
2307 	}
2308 
2309 	dev->driver_id = cryptodev_driver_id;
2310 	dev->dev_ops = &caam_jr_ops;
2311 
2312 	/* register rx/tx burst functions for data path */
2313 	dev->dequeue_burst = caam_jr_dequeue_burst;
2314 	dev->enqueue_burst = caam_jr_enqueue_burst;
2315 	dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2316 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
2317 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2318 			RTE_CRYPTODEV_FF_SECURITY |
2319 			RTE_CRYPTODEV_FF_IN_PLACE_SGL |
2320 			RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
2321 			RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
2322 			RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
2323 			RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
2324 
2325 	/* For secondary processes, we don't initialise any further as primary
2326 	 * has already done this work. Only check we don't need a different
2327 	 * RX function
2328 	 */
2329 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2330 		CAAM_JR_WARN("Device already init by primary process");
2331 		return 0;
2332 	}
2333 
2334 	/*TODO free it during teardown*/
2335 	security_instance = rte_malloc("caam_jr",
2336 				sizeof(struct rte_security_ctx), 0);
2337 	if (security_instance == NULL) {
2338 		CAAM_JR_ERR("memory allocation failed");
2339 		//todo error handling.
2340 		goto cleanup2;
2341 	}
2342 
2343 	security_instance->device = (void *)dev;
2344 	security_instance->ops = &caam_jr_security_ops;
2345 	security_instance->sess_cnt = 0;
2346 	dev->security_ctx = security_instance;
2347 
2348 	rte_cryptodev_pmd_probing_finish(dev);
2349 
2350 	CAAM_JR_INFO("%s cryptodev init", dev->data->name);
2351 
2352 	return 0;
2353 
2354 cleanup2:
2355 	caam_jr_dev_uninit(dev);
2356 	rte_cryptodev_pmd_release_device(dev);
2357 cleanup:
2358 	free_job_ring(job_ring->uio_fd);
2359 init_error:
2360 	CAAM_JR_ERR("driver %s: cryptodev_caam_jr_create failed",
2361 			init_params->name);
2362 
2363 	return -ENXIO;
2364 }
2365 
2366 /** Initialise CAAM JR crypto device */
2367 static int
2368 cryptodev_caam_jr_probe(struct rte_vdev_device *vdev)
2369 {
2370 	int ret;
2371 
2372 	struct rte_cryptodev_pmd_init_params init_params = {
2373 		"",
2374 		sizeof(struct sec_job_ring_t),
2375 		rte_socket_id(),
2376 		RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
2377 	};
2378 	const char *name;
2379 	const char *input_args;
2380 
2381 	name = rte_vdev_device_name(vdev);
2382 	if (name == NULL)
2383 		return -EINVAL;
2384 
2385 	input_args = rte_vdev_device_args(vdev);
2386 	rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
2387 
2388 	ret = of_init();
2389 	if (ret) {
2390 		CAAM_JR_ERR("of_init failed");
2391 		return -EINVAL;
2392 	}
2393 	/* if sec device version is not configured */
2394 	if (!rta_get_sec_era()) {
2395 		const struct device_node *caam_node;
2396 
2397 		for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
2398 			const uint32_t *prop = of_get_property(caam_node,
2399 					"fsl,sec-era",
2400 					NULL);
2401 			if (prop) {
2402 				rta_set_sec_era(
2403 					INTL_SEC_ERA(rte_be_to_cpu_32(*prop)));
2404 				break;
2405 			}
2406 		}
2407 	}
2408 #ifdef RTE_LIBRTE_PMD_CAAM_JR_BE
2409 	if (rta_get_sec_era() > RTA_SEC_ERA_8) {
2410 		CAAM_JR_ERR("CAAM is compiled in BE mode for device with sec era > 8");
2411 		return -EINVAL;
2412 	}
2413 #endif
2414 
2415 	return caam_jr_dev_init(name, vdev, &init_params);
2416 }
2417 
2418 /** Uninitialise CAAM JR crypto device */
2419 static int
2420 cryptodev_caam_jr_remove(struct rte_vdev_device *vdev)
2421 {
2422 	struct rte_cryptodev *cryptodev;
2423 	const char *name;
2424 
2425 	name = rte_vdev_device_name(vdev);
2426 	if (name == NULL)
2427 		return -EINVAL;
2428 
2429 	cryptodev = rte_cryptodev_pmd_get_named_dev(name);
2430 	if (cryptodev == NULL)
2431 		return -ENODEV;
2432 
2433 	caam_jr_dev_uninit(cryptodev);
2434 
2435 	return rte_cryptodev_pmd_destroy(cryptodev);
2436 }
2437 
2438 static void
2439 sec_job_rings_init(void)
2440 {
2441 	int i;
2442 
2443 	for (i = 0; i < MAX_SEC_JOB_RINGS; i++)
2444 		g_job_rings[i].irq_fd = -1;
2445 }
2446 
2447 static struct rte_vdev_driver cryptodev_caam_jr_drv = {
2448 	.probe = cryptodev_caam_jr_probe,
2449 	.remove = cryptodev_caam_jr_remove
2450 };
2451 
2452 static struct cryptodev_driver caam_jr_crypto_drv;
2453 
2454 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_CAAM_JR_PMD, cryptodev_caam_jr_drv);
2455 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_CAAM_JR_PMD,
2456 	"max_nb_queue_pairs=<int>"
2457 	"socket_id=<int>");
2458 RTE_PMD_REGISTER_CRYPTO_DRIVER(caam_jr_crypto_drv, cryptodev_caam_jr_drv.driver,
2459 		cryptodev_driver_id);
2460 
2461 RTE_INIT(caam_jr_init)
2462 {
2463 	sec_uio_job_rings_init();
2464 	sec_job_rings_init();
2465 }
2466 
2467 RTE_LOG_REGISTER(caam_jr_logtype, pmd.crypto.caam, NOTICE);
2468