xref: /dpdk/lib/eventdev/rte_event_crypto_adapter.c (revision 3da59f30a23f2e795d2315f3d949e1b3e0ce0c3d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation.
3  * All rights reserved.
4  */
5 
6 #include <string.h>
7 #include <stdbool.h>
8 #include <rte_common.h>
9 #include <dev_driver.h>
10 #include <rte_errno.h>
11 #include <rte_cryptodev.h>
12 #include <cryptodev_pmd.h>
13 #include <rte_log.h>
14 #include <rte_malloc.h>
15 #include <rte_service_component.h>
16 
17 #include "rte_eventdev.h"
18 #include "eventdev_pmd.h"
19 #include "eventdev_trace.h"
20 #include "rte_event_crypto_adapter.h"
21 
22 #define BATCH_SIZE 32
23 #define DEFAULT_MAX_NB 128
24 #define CRYPTO_ADAPTER_NAME_LEN 32
25 #define CRYPTO_ADAPTER_MEM_NAME_LEN 32
26 #define CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES 100
27 
28 /* MAX_OPS_IN_BUFFER contains size for  batch of dequeued events */
29 #define MAX_OPS_IN_BUFFER BATCH_SIZE
30 
31 /* CRYPTO_ADAPTER_OPS_BUFFER_SZ to accommodate MAX_OPS_IN_BUFFER +
32  * additional space for one batch
33  */
34 #define CRYPTO_ADAPTER_OPS_BUFFER_SZ (MAX_OPS_IN_BUFFER + BATCH_SIZE)
35 
36 #define CRYPTO_ADAPTER_BUFFER_SZ 1024
37 
38 /* Flush an instance's enqueue buffers every CRYPTO_ENQ_FLUSH_THRESHOLD
39  * iterations of eca_crypto_adapter_enq_run()
40  */
41 #define CRYPTO_ENQ_FLUSH_THRESHOLD 1024
42 
43 #define ECA_ADAPTER_ARRAY "crypto_adapter_array"
44 
45 struct crypto_ops_circular_buffer {
46 	/* index of head element in circular buffer */
47 	uint16_t head;
48 	/* index of tail element in circular buffer */
49 	uint16_t tail;
50 	/* number of elements in buffer */
51 	uint16_t count;
52 	/* size of circular buffer */
53 	uint16_t size;
54 	/* Pointer to hold rte_crypto_ops for batching */
55 	struct rte_crypto_op **op_buffer;
56 } __rte_cache_aligned;
57 
58 struct event_crypto_adapter {
59 	/* Event device identifier */
60 	uint8_t eventdev_id;
61 	/* Event port identifier */
62 	uint8_t event_port_id;
63 	/* Store event port's implicit release capability */
64 	uint8_t implicit_release_disabled;
65 	/* Flag to indicate backpressure at cryptodev
66 	 * Stop further dequeuing events from eventdev
67 	 */
68 	bool stop_enq_to_cryptodev;
69 	/* Max crypto ops processed in any service function invocation */
70 	uint32_t max_nb;
71 	/* Lock to serialize config updates with service function */
72 	rte_spinlock_t lock;
73 	/* Next crypto device to be processed */
74 	uint16_t next_cdev_id;
75 	/* Per crypto device structure */
76 	struct crypto_device_info *cdevs;
77 	/* Loop counter to flush crypto ops */
78 	uint16_t transmit_loop_count;
79 	/* Circular buffer for batching crypto ops to eventdev */
80 	struct crypto_ops_circular_buffer ebuf;
81 	/* Per instance stats structure */
82 	struct rte_event_crypto_adapter_stats crypto_stats;
83 	/* Configuration callback for rte_service configuration */
84 	rte_event_crypto_adapter_conf_cb conf_cb;
85 	/* Configuration callback argument */
86 	void *conf_arg;
87 	/* Set if  default_cb is being used */
88 	int default_cb_arg;
89 	/* Service initialization state */
90 	uint8_t service_inited;
91 	/* Memory allocation name */
92 	char mem_name[CRYPTO_ADAPTER_MEM_NAME_LEN];
93 	/* Socket identifier cached from eventdev */
94 	int socket_id;
95 	/* Per adapter EAL service */
96 	uint32_t service_id;
97 	/* No. of queue pairs configured */
98 	uint16_t nb_qps;
99 	/* Adapter mode */
100 	enum rte_event_crypto_adapter_mode mode;
101 } __rte_cache_aligned;
102 
103 /* Per crypto device information */
104 struct crypto_device_info {
105 	/* Pointer to cryptodev */
106 	struct rte_cryptodev *dev;
107 	/* Pointer to queue pair info */
108 	struct crypto_queue_pair_info *qpairs;
109 	/* Next queue pair to be processed */
110 	uint16_t next_queue_pair_id;
111 	/* Set to indicate cryptodev->eventdev packet
112 	 * transfer uses a hardware mechanism
113 	 */
114 	uint8_t internal_event_port;
115 	/* Set to indicate processing has been started */
116 	uint8_t dev_started;
117 	/* If num_qpairs > 0, the start callback will
118 	 * be invoked if not already invoked
119 	 */
120 	uint16_t num_qpairs;
121 } __rte_cache_aligned;
122 
123 /* Per queue pair information */
124 struct crypto_queue_pair_info {
125 	/* Set to indicate queue pair is enabled */
126 	bool qp_enabled;
127 	/* Circular buffer for batching crypto ops to cdev */
128 	struct crypto_ops_circular_buffer cbuf;
129 } __rte_cache_aligned;
130 
131 static struct event_crypto_adapter **event_crypto_adapter;
132 
133 /* Macros to check for valid adapter */
134 #define EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
135 	if (!eca_valid_id(id)) { \
136 		RTE_EDEV_LOG_ERR("Invalid crypto adapter id = %d", id); \
137 		return retval; \
138 	} \
139 } while (0)
140 
141 static inline int
142 eca_valid_id(uint8_t id)
143 {
144 	return id < RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE;
145 }
146 
147 static int
148 eca_init(void)
149 {
150 	const struct rte_memzone *mz;
151 	unsigned int sz;
152 
153 	sz = sizeof(*event_crypto_adapter) *
154 	    RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE;
155 	sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
156 
157 	mz = rte_memzone_lookup(ECA_ADAPTER_ARRAY);
158 	if (mz == NULL) {
159 		mz = rte_memzone_reserve_aligned(ECA_ADAPTER_ARRAY, sz,
160 						 rte_socket_id(), 0,
161 						 RTE_CACHE_LINE_SIZE);
162 		if (mz == NULL) {
163 			RTE_EDEV_LOG_ERR("failed to reserve memzone err = %"
164 					PRId32, rte_errno);
165 			return -rte_errno;
166 		}
167 	}
168 
169 	event_crypto_adapter = mz->addr;
170 	return 0;
171 }
172 
173 static int
174 eca_memzone_lookup(void)
175 {
176 	const struct rte_memzone *mz;
177 
178 	if (event_crypto_adapter == NULL) {
179 		mz = rte_memzone_lookup(ECA_ADAPTER_ARRAY);
180 		if (mz == NULL)
181 			return -ENOMEM;
182 
183 		event_crypto_adapter = mz->addr;
184 	}
185 
186 	return 0;
187 }
188 
189 static inline bool
190 eca_circular_buffer_batch_ready(struct crypto_ops_circular_buffer *bufp)
191 {
192 	return bufp->count >= BATCH_SIZE;
193 }
194 
195 static inline bool
196 eca_circular_buffer_space_for_batch(struct crypto_ops_circular_buffer *bufp)
197 {
198 	/* circular buffer can have atmost MAX_OPS_IN_BUFFER */
199 	return (bufp->size - bufp->count) >= MAX_OPS_IN_BUFFER;
200 }
201 
202 static inline void
203 eca_circular_buffer_free(struct crypto_ops_circular_buffer *bufp)
204 {
205 	rte_free(bufp->op_buffer);
206 }
207 
208 static inline int
209 eca_circular_buffer_init(const char *name,
210 			 struct crypto_ops_circular_buffer *bufp,
211 			 uint16_t sz)
212 {
213 	bufp->op_buffer = rte_zmalloc(name,
214 				      sizeof(struct rte_crypto_op *) * sz,
215 				      0);
216 	if (bufp->op_buffer == NULL)
217 		return -ENOMEM;
218 
219 	bufp->size = sz;
220 	return 0;
221 }
222 
223 static inline int
224 eca_circular_buffer_add(struct crypto_ops_circular_buffer *bufp,
225 			struct rte_crypto_op *op)
226 {
227 	uint16_t *tailp = &bufp->tail;
228 
229 	bufp->op_buffer[*tailp] = op;
230 	/* circular buffer, go round */
231 	*tailp = (*tailp + 1) % bufp->size;
232 	bufp->count++;
233 
234 	return 0;
235 }
236 
237 static inline int
238 eca_circular_buffer_flush_to_cdev(struct crypto_ops_circular_buffer *bufp,
239 				  uint8_t cdev_id, uint16_t qp_id,
240 				  uint16_t *nb_ops_flushed)
241 {
242 	uint16_t n = 0;
243 	uint16_t *headp = &bufp->head;
244 	uint16_t *tailp = &bufp->tail;
245 	struct rte_crypto_op **ops = bufp->op_buffer;
246 
247 	if (*tailp > *headp)
248 		n = *tailp - *headp;
249 	else if (*tailp < *headp)
250 		n = bufp->size - *headp;
251 	else { /* head == tail case */
252 		/* when head == tail,
253 		 * circ buff is either full(tail pointer roll over) or empty
254 		 */
255 		if (bufp->count != 0) {
256 			/* circ buffer is full */
257 			n = bufp->count;
258 		} else {
259 			/* circ buffer is empty */
260 			*nb_ops_flushed = 0;
261 			return 0;  /* buffer empty */
262 		}
263 	}
264 
265 	*nb_ops_flushed = rte_cryptodev_enqueue_burst(cdev_id, qp_id,
266 						      &ops[*headp], n);
267 	bufp->count -= *nb_ops_flushed;
268 	if (!bufp->count) {
269 		*headp = 0;
270 		*tailp = 0;
271 	} else
272 		*headp = (*headp + *nb_ops_flushed) % bufp->size;
273 
274 	return *nb_ops_flushed == n ? 0 : -1;
275 }
276 
277 static inline struct event_crypto_adapter *
278 eca_id_to_adapter(uint8_t id)
279 {
280 	return event_crypto_adapter ?
281 		event_crypto_adapter[id] : NULL;
282 }
283 
284 static int
285 eca_default_config_cb(uint8_t id, uint8_t dev_id,
286 			struct rte_event_crypto_adapter_conf *conf, void *arg)
287 {
288 	struct rte_event_dev_config dev_conf;
289 	struct rte_eventdev *dev;
290 	uint8_t port_id;
291 	int started;
292 	int ret;
293 	struct rte_event_port_conf *port_conf = arg;
294 	struct event_crypto_adapter *adapter = eca_id_to_adapter(id);
295 
296 	if (adapter == NULL)
297 		return -EINVAL;
298 
299 	dev = &rte_eventdevs[adapter->eventdev_id];
300 	dev_conf = dev->data->dev_conf;
301 
302 	started = dev->data->dev_started;
303 	if (started)
304 		rte_event_dev_stop(dev_id);
305 	port_id = dev_conf.nb_event_ports;
306 	dev_conf.nb_event_ports += 1;
307 	if (port_conf->event_port_cfg & RTE_EVENT_PORT_CFG_SINGLE_LINK)
308 		dev_conf.nb_single_link_event_port_queues += 1;
309 
310 	ret = rte_event_dev_configure(dev_id, &dev_conf);
311 	if (ret) {
312 		RTE_EDEV_LOG_ERR("failed to configure event dev %u", dev_id);
313 		if (started) {
314 			if (rte_event_dev_start(dev_id))
315 				return -EIO;
316 		}
317 		return ret;
318 	}
319 
320 	ret = rte_event_port_setup(dev_id, port_id, port_conf);
321 	if (ret) {
322 		RTE_EDEV_LOG_ERR("failed to setup event port %u", port_id);
323 		return ret;
324 	}
325 
326 	conf->event_port_id = port_id;
327 	conf->max_nb = DEFAULT_MAX_NB;
328 	if (started)
329 		ret = rte_event_dev_start(dev_id);
330 
331 	adapter->default_cb_arg = 1;
332 	return ret;
333 }
334 
335 int
336 rte_event_crypto_adapter_create_ext(uint8_t id, uint8_t dev_id,
337 				rte_event_crypto_adapter_conf_cb conf_cb,
338 				enum rte_event_crypto_adapter_mode mode,
339 				void *conf_arg)
340 {
341 	struct event_crypto_adapter *adapter;
342 	char mem_name[CRYPTO_ADAPTER_NAME_LEN];
343 	int socket_id;
344 	uint8_t i;
345 	int ret;
346 
347 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
348 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
349 	if (conf_cb == NULL)
350 		return -EINVAL;
351 
352 	if (event_crypto_adapter == NULL) {
353 		ret = eca_init();
354 		if (ret)
355 			return ret;
356 	}
357 
358 	adapter = eca_id_to_adapter(id);
359 	if (adapter != NULL) {
360 		RTE_EDEV_LOG_ERR("Crypto adapter id %u already exists!", id);
361 		return -EEXIST;
362 	}
363 
364 	socket_id = rte_event_dev_socket_id(dev_id);
365 	snprintf(mem_name, CRYPTO_ADAPTER_MEM_NAME_LEN,
366 		 "rte_event_crypto_adapter_%d", id);
367 
368 	adapter = rte_zmalloc_socket(mem_name, sizeof(*adapter),
369 			RTE_CACHE_LINE_SIZE, socket_id);
370 	if (adapter == NULL) {
371 		RTE_EDEV_LOG_ERR("Failed to get mem for event crypto adapter!");
372 		return -ENOMEM;
373 	}
374 
375 	if (eca_circular_buffer_init("eca_edev_circular_buffer",
376 				     &adapter->ebuf,
377 				     CRYPTO_ADAPTER_BUFFER_SZ)) {
378 		RTE_EDEV_LOG_ERR("Failed to get memory for eventdev buffer");
379 		rte_free(adapter);
380 		return -ENOMEM;
381 	}
382 
383 	adapter->eventdev_id = dev_id;
384 	adapter->socket_id = socket_id;
385 	adapter->conf_cb = conf_cb;
386 	adapter->conf_arg = conf_arg;
387 	adapter->mode = mode;
388 	strcpy(adapter->mem_name, mem_name);
389 	adapter->cdevs = rte_zmalloc_socket(adapter->mem_name,
390 					rte_cryptodev_count() *
391 					sizeof(struct crypto_device_info), 0,
392 					socket_id);
393 	if (adapter->cdevs == NULL) {
394 		RTE_EDEV_LOG_ERR("Failed to get mem for crypto devices");
395 		eca_circular_buffer_free(&adapter->ebuf);
396 		rte_free(adapter);
397 		return -ENOMEM;
398 	}
399 
400 	rte_spinlock_init(&adapter->lock);
401 	for (i = 0; i < rte_cryptodev_count(); i++)
402 		adapter->cdevs[i].dev = rte_cryptodev_pmd_get_dev(i);
403 
404 	event_crypto_adapter[id] = adapter;
405 
406 	return 0;
407 }
408 
409 
410 int
411 rte_event_crypto_adapter_create(uint8_t id, uint8_t dev_id,
412 				struct rte_event_port_conf *port_config,
413 				enum rte_event_crypto_adapter_mode mode)
414 {
415 	struct rte_event_port_conf *pc;
416 	int ret;
417 
418 	if (port_config == NULL)
419 		return -EINVAL;
420 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
421 
422 	pc = rte_malloc(NULL, sizeof(*pc), 0);
423 	if (pc == NULL)
424 		return -ENOMEM;
425 	*pc = *port_config;
426 	ret = rte_event_crypto_adapter_create_ext(id, dev_id,
427 						  eca_default_config_cb,
428 						  mode,
429 						  pc);
430 	if (ret)
431 		rte_free(pc);
432 
433 	rte_eventdev_trace_crypto_adapter_create(id, dev_id, port_config, mode,	ret);
434 
435 	return ret;
436 }
437 
438 int
439 rte_event_crypto_adapter_free(uint8_t id)
440 {
441 	struct event_crypto_adapter *adapter;
442 
443 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
444 
445 	adapter = eca_id_to_adapter(id);
446 	if (adapter == NULL)
447 		return -EINVAL;
448 
449 	if (adapter->nb_qps) {
450 		RTE_EDEV_LOG_ERR("%" PRIu16 "Queue pairs not deleted",
451 				adapter->nb_qps);
452 		return -EBUSY;
453 	}
454 
455 	rte_eventdev_trace_crypto_adapter_free(id, adapter);
456 	if (adapter->default_cb_arg)
457 		rte_free(adapter->conf_arg);
458 	rte_free(adapter->cdevs);
459 	rte_free(adapter);
460 	event_crypto_adapter[id] = NULL;
461 
462 	return 0;
463 }
464 
465 static inline unsigned int
466 eca_enq_to_cryptodev(struct event_crypto_adapter *adapter, struct rte_event *ev,
467 		     unsigned int cnt)
468 {
469 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
470 	union rte_event_crypto_metadata *m_data = NULL;
471 	struct crypto_queue_pair_info *qp_info = NULL;
472 	struct rte_crypto_op *crypto_op;
473 	unsigned int i, n;
474 	uint16_t qp_id, nb_enqueued = 0;
475 	uint8_t cdev_id;
476 	int ret;
477 
478 	ret = 0;
479 	n = 0;
480 	stats->event_deq_count += cnt;
481 
482 	for (i = 0; i < cnt; i++) {
483 		crypto_op = ev[i].event_ptr;
484 		if (crypto_op == NULL)
485 			continue;
486 		m_data = rte_cryptodev_session_event_mdata_get(crypto_op);
487 		if (m_data == NULL) {
488 			rte_pktmbuf_free(crypto_op->sym->m_src);
489 			rte_crypto_op_free(crypto_op);
490 			continue;
491 		}
492 
493 		cdev_id = m_data->request_info.cdev_id;
494 		qp_id = m_data->request_info.queue_pair_id;
495 		qp_info = &adapter->cdevs[cdev_id].qpairs[qp_id];
496 		if (!qp_info->qp_enabled) {
497 			rte_pktmbuf_free(crypto_op->sym->m_src);
498 			rte_crypto_op_free(crypto_op);
499 			continue;
500 		}
501 		eca_circular_buffer_add(&qp_info->cbuf, crypto_op);
502 
503 		if (eca_circular_buffer_batch_ready(&qp_info->cbuf)) {
504 			ret = eca_circular_buffer_flush_to_cdev(&qp_info->cbuf,
505 								cdev_id,
506 								qp_id,
507 								&nb_enqueued);
508 			stats->crypto_enq_count += nb_enqueued;
509 			n += nb_enqueued;
510 
511 			/**
512 			 * If some crypto ops failed to flush to cdev and
513 			 * space for another batch is not available, stop
514 			 * dequeue from eventdev momentarily
515 			 */
516 			if (unlikely(ret < 0 &&
517 				!eca_circular_buffer_space_for_batch(
518 							&qp_info->cbuf)))
519 				adapter->stop_enq_to_cryptodev = true;
520 		}
521 	}
522 
523 	return n;
524 }
525 
526 static unsigned int
527 eca_crypto_cdev_flush(struct event_crypto_adapter *adapter,
528 		      uint8_t cdev_id, uint16_t *nb_ops_flushed)
529 {
530 	struct crypto_device_info *curr_dev;
531 	struct crypto_queue_pair_info *curr_queue;
532 	struct rte_cryptodev *dev;
533 	uint16_t nb = 0, nb_enqueued = 0;
534 	uint16_t qp;
535 
536 	curr_dev = &adapter->cdevs[cdev_id];
537 	dev = rte_cryptodev_pmd_get_dev(cdev_id);
538 
539 	for (qp = 0; qp < dev->data->nb_queue_pairs; qp++) {
540 
541 		curr_queue = &curr_dev->qpairs[qp];
542 		if (unlikely(curr_queue == NULL || !curr_queue->qp_enabled))
543 			continue;
544 
545 		eca_circular_buffer_flush_to_cdev(&curr_queue->cbuf,
546 						  cdev_id,
547 						  qp,
548 						  &nb_enqueued);
549 		*nb_ops_flushed += curr_queue->cbuf.count;
550 		nb += nb_enqueued;
551 	}
552 
553 	return nb;
554 }
555 
556 static unsigned int
557 eca_crypto_enq_flush(struct event_crypto_adapter *adapter)
558 {
559 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
560 	uint8_t cdev_id;
561 	uint16_t nb_enqueued = 0;
562 	uint16_t nb_ops_flushed = 0;
563 	uint16_t num_cdev = rte_cryptodev_count();
564 
565 	for (cdev_id = 0; cdev_id < num_cdev; cdev_id++)
566 		nb_enqueued += eca_crypto_cdev_flush(adapter,
567 						    cdev_id,
568 						    &nb_ops_flushed);
569 	/**
570 	 * Enable dequeue from eventdev if all ops from circular
571 	 * buffer flushed to cdev
572 	 */
573 	if (!nb_ops_flushed)
574 		adapter->stop_enq_to_cryptodev = false;
575 
576 	stats->crypto_enq_count += nb_enqueued;
577 
578 	return nb_enqueued;
579 }
580 
581 static int
582 eca_crypto_adapter_enq_run(struct event_crypto_adapter *adapter,
583 			   unsigned int max_enq)
584 {
585 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
586 	struct rte_event ev[BATCH_SIZE];
587 	unsigned int nb_enq, nb_enqueued;
588 	uint16_t n;
589 	uint8_t event_dev_id = adapter->eventdev_id;
590 	uint8_t event_port_id = adapter->event_port_id;
591 
592 	nb_enqueued = 0;
593 	if (adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)
594 		return 0;
595 
596 	for (nb_enq = 0; nb_enq < max_enq; nb_enq += n) {
597 
598 		if (unlikely(adapter->stop_enq_to_cryptodev)) {
599 			nb_enqueued += eca_crypto_enq_flush(adapter);
600 
601 			if (unlikely(adapter->stop_enq_to_cryptodev))
602 				break;
603 		}
604 
605 		stats->event_poll_count++;
606 		n = rte_event_dequeue_burst(event_dev_id,
607 					    event_port_id, ev, BATCH_SIZE, 0);
608 
609 		if (!n)
610 			break;
611 
612 		nb_enqueued += eca_enq_to_cryptodev(adapter, ev, n);
613 	}
614 
615 	if ((++adapter->transmit_loop_count &
616 		(CRYPTO_ENQ_FLUSH_THRESHOLD - 1)) == 0) {
617 		nb_enqueued += eca_crypto_enq_flush(adapter);
618 	}
619 
620 	return nb_enqueued;
621 }
622 
623 static inline uint16_t
624 eca_ops_enqueue_burst(struct event_crypto_adapter *adapter,
625 		  struct rte_crypto_op **ops, uint16_t num)
626 {
627 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
628 	union rte_event_crypto_metadata *m_data = NULL;
629 	uint8_t event_dev_id = adapter->eventdev_id;
630 	uint8_t event_port_id = adapter->event_port_id;
631 	struct rte_event events[BATCH_SIZE];
632 	uint16_t nb_enqueued, nb_ev;
633 	uint8_t retry;
634 	uint8_t i;
635 
636 	nb_ev = 0;
637 	retry = 0;
638 	nb_enqueued = 0;
639 	num = RTE_MIN(num, BATCH_SIZE);
640 	for (i = 0; i < num; i++) {
641 		struct rte_event *ev = &events[nb_ev++];
642 
643 		m_data = rte_cryptodev_session_event_mdata_get(ops[i]);
644 		if (unlikely(m_data == NULL)) {
645 			rte_pktmbuf_free(ops[i]->sym->m_src);
646 			rte_crypto_op_free(ops[i]);
647 			continue;
648 		}
649 
650 		rte_memcpy(ev, &m_data->response_info, sizeof(*ev));
651 		ev->event_ptr = ops[i];
652 		ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
653 		if (adapter->implicit_release_disabled)
654 			ev->op = RTE_EVENT_OP_FORWARD;
655 		else
656 			ev->op = RTE_EVENT_OP_NEW;
657 	}
658 
659 	do {
660 		nb_enqueued += rte_event_enqueue_burst(event_dev_id,
661 						  event_port_id,
662 						  &events[nb_enqueued],
663 						  nb_ev - nb_enqueued);
664 
665 	} while (retry++ < CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES &&
666 		 nb_enqueued < nb_ev);
667 
668 	stats->event_enq_fail_count += nb_ev - nb_enqueued;
669 	stats->event_enq_count += nb_enqueued;
670 	stats->event_enq_retry_count += retry - 1;
671 
672 	return nb_enqueued;
673 }
674 
675 static int
676 eca_circular_buffer_flush_to_evdev(struct event_crypto_adapter *adapter,
677 				   struct crypto_ops_circular_buffer *bufp)
678 {
679 	uint16_t n = 0, nb_ops_flushed;
680 	uint16_t *headp = &bufp->head;
681 	uint16_t *tailp = &bufp->tail;
682 	struct rte_crypto_op **ops = bufp->op_buffer;
683 
684 	if (*tailp > *headp)
685 		n = *tailp - *headp;
686 	else if (*tailp < *headp)
687 		n = bufp->size - *headp;
688 	else
689 		return 0;  /* buffer empty */
690 
691 	nb_ops_flushed =  eca_ops_enqueue_burst(adapter, &ops[*headp], n);
692 	bufp->count -= nb_ops_flushed;
693 	if (!bufp->count) {
694 		*headp = 0;
695 		*tailp = 0;
696 		return 0;  /* buffer empty */
697 	}
698 
699 	*headp = (*headp + nb_ops_flushed) % bufp->size;
700 	return 1;
701 }
702 
703 
704 static void
705 eca_ops_buffer_flush(struct event_crypto_adapter *adapter)
706 {
707 	if (likely(adapter->ebuf.count == 0))
708 		return;
709 
710 	while (eca_circular_buffer_flush_to_evdev(adapter,
711 						  &adapter->ebuf))
712 		;
713 }
714 static inline unsigned int
715 eca_crypto_adapter_deq_run(struct event_crypto_adapter *adapter,
716 			   unsigned int max_deq)
717 {
718 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
719 	struct crypto_device_info *curr_dev;
720 	struct crypto_queue_pair_info *curr_queue;
721 	struct rte_crypto_op *ops[BATCH_SIZE];
722 	uint16_t n, nb_deq, nb_enqueued, i;
723 	struct rte_cryptodev *dev;
724 	uint8_t cdev_id;
725 	uint16_t qp, dev_qps;
726 	bool done;
727 	uint16_t num_cdev = rte_cryptodev_count();
728 
729 	nb_deq = 0;
730 	eca_ops_buffer_flush(adapter);
731 
732 	do {
733 		done = true;
734 
735 		for (cdev_id = adapter->next_cdev_id;
736 			cdev_id < num_cdev; cdev_id++) {
737 			uint16_t queues = 0;
738 
739 			curr_dev = &adapter->cdevs[cdev_id];
740 			dev = curr_dev->dev;
741 			if (unlikely(dev == NULL))
742 				continue;
743 
744 			dev_qps = dev->data->nb_queue_pairs;
745 
746 			for (qp = curr_dev->next_queue_pair_id;
747 				queues < dev_qps; qp = (qp + 1) % dev_qps,
748 				queues++) {
749 
750 				curr_queue = &curr_dev->qpairs[qp];
751 				if (unlikely(curr_queue == NULL ||
752 				    !curr_queue->qp_enabled))
753 					continue;
754 
755 				n = rte_cryptodev_dequeue_burst(cdev_id, qp,
756 					ops, BATCH_SIZE);
757 				if (!n)
758 					continue;
759 
760 				done = false;
761 				nb_enqueued = 0;
762 
763 				stats->crypto_deq_count += n;
764 
765 				if (unlikely(!adapter->ebuf.count))
766 					nb_enqueued = eca_ops_enqueue_burst(
767 							adapter, ops, n);
768 
769 				if (likely(nb_enqueued == n))
770 					goto check;
771 
772 				/* Failed to enqueue events case */
773 				for (i = nb_enqueued; i < n; i++)
774 					eca_circular_buffer_add(
775 						&adapter->ebuf,
776 						ops[i]);
777 
778 check:
779 				nb_deq += n;
780 
781 				if (nb_deq >= max_deq) {
782 					if ((qp + 1) == dev_qps) {
783 						adapter->next_cdev_id =
784 							(cdev_id + 1)
785 							% num_cdev;
786 					}
787 					curr_dev->next_queue_pair_id = (qp + 1)
788 						% dev->data->nb_queue_pairs;
789 
790 					return nb_deq;
791 				}
792 			}
793 		}
794 		adapter->next_cdev_id = 0;
795 	} while (done == false);
796 	return nb_deq;
797 }
798 
799 static int
800 eca_crypto_adapter_run(struct event_crypto_adapter *adapter,
801 		       unsigned int max_ops)
802 {
803 	unsigned int ops_left = max_ops;
804 
805 	while (ops_left > 0) {
806 		unsigned int e_cnt, d_cnt;
807 
808 		e_cnt = eca_crypto_adapter_deq_run(adapter, ops_left);
809 		ops_left -= RTE_MIN(ops_left, e_cnt);
810 
811 		d_cnt = eca_crypto_adapter_enq_run(adapter, ops_left);
812 		ops_left -= RTE_MIN(ops_left, d_cnt);
813 
814 		if (e_cnt == 0 && d_cnt == 0)
815 			break;
816 
817 	}
818 
819 	if (ops_left == max_ops) {
820 		rte_event_maintain(adapter->eventdev_id,
821 				   adapter->event_port_id, 0);
822 		return -EAGAIN;
823 	} else
824 		return 0;
825 }
826 
827 static int
828 eca_service_func(void *args)
829 {
830 	struct event_crypto_adapter *adapter = args;
831 	int ret;
832 
833 	if (rte_spinlock_trylock(&adapter->lock) == 0)
834 		return 0;
835 	ret = eca_crypto_adapter_run(adapter, adapter->max_nb);
836 	rte_spinlock_unlock(&adapter->lock);
837 
838 	return ret;
839 }
840 
841 static int
842 eca_init_service(struct event_crypto_adapter *adapter, uint8_t id)
843 {
844 	struct rte_event_crypto_adapter_conf adapter_conf;
845 	struct rte_service_spec service;
846 	int ret;
847 	uint32_t impl_rel;
848 
849 	if (adapter->service_inited)
850 		return 0;
851 
852 	memset(&service, 0, sizeof(service));
853 	snprintf(service.name, CRYPTO_ADAPTER_NAME_LEN,
854 		"rte_event_crypto_adapter_%d", id);
855 	service.socket_id = adapter->socket_id;
856 	service.callback = eca_service_func;
857 	service.callback_userdata = adapter;
858 	/* Service function handles locking for queue add/del updates */
859 	service.capabilities = RTE_SERVICE_CAP_MT_SAFE;
860 	ret = rte_service_component_register(&service, &adapter->service_id);
861 	if (ret) {
862 		RTE_EDEV_LOG_ERR("failed to register service %s err = %" PRId32,
863 			service.name, ret);
864 		return ret;
865 	}
866 
867 	ret = adapter->conf_cb(id, adapter->eventdev_id,
868 		&adapter_conf, adapter->conf_arg);
869 	if (ret) {
870 		RTE_EDEV_LOG_ERR("configuration callback failed err = %" PRId32,
871 			ret);
872 		return ret;
873 	}
874 
875 	adapter->max_nb = adapter_conf.max_nb;
876 	adapter->event_port_id = adapter_conf.event_port_id;
877 
878 	if (rte_event_port_attr_get(adapter->eventdev_id,
879 				adapter->event_port_id,
880 				RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE,
881 				&impl_rel)) {
882 		RTE_EDEV_LOG_ERR("Failed to get port info for eventdev %" PRId32,
883 				 adapter->eventdev_id);
884 		eca_circular_buffer_free(&adapter->ebuf);
885 		rte_free(adapter);
886 		return -EINVAL;
887 	}
888 
889 	adapter->implicit_release_disabled = (uint8_t)impl_rel;
890 	adapter->service_inited = 1;
891 
892 	return ret;
893 }
894 
895 static void
896 eca_update_qp_info(struct event_crypto_adapter *adapter,
897 		   struct crypto_device_info *dev_info, int32_t queue_pair_id,
898 		   uint8_t add)
899 {
900 	struct crypto_queue_pair_info *qp_info;
901 	int enabled;
902 	uint16_t i;
903 
904 	if (dev_info->qpairs == NULL)
905 		return;
906 
907 	if (queue_pair_id == -1) {
908 		for (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++)
909 			eca_update_qp_info(adapter, dev_info, i, add);
910 	} else {
911 		qp_info = &dev_info->qpairs[queue_pair_id];
912 		enabled = qp_info->qp_enabled;
913 		if (add) {
914 			adapter->nb_qps += !enabled;
915 			dev_info->num_qpairs += !enabled;
916 		} else {
917 			adapter->nb_qps -= enabled;
918 			dev_info->num_qpairs -= enabled;
919 		}
920 		qp_info->qp_enabled = !!add;
921 	}
922 }
923 
924 static int
925 eca_add_queue_pair(struct event_crypto_adapter *adapter, uint8_t cdev_id,
926 		   int queue_pair_id)
927 {
928 	struct crypto_device_info *dev_info = &adapter->cdevs[cdev_id];
929 	struct crypto_queue_pair_info *qpairs;
930 	uint32_t i;
931 
932 	if (dev_info->qpairs == NULL) {
933 		dev_info->qpairs =
934 		    rte_zmalloc_socket(adapter->mem_name,
935 					dev_info->dev->data->nb_queue_pairs *
936 					sizeof(struct crypto_queue_pair_info),
937 					0, adapter->socket_id);
938 		if (dev_info->qpairs == NULL)
939 			return -ENOMEM;
940 
941 		qpairs = dev_info->qpairs;
942 
943 		if (eca_circular_buffer_init("eca_cdev_circular_buffer",
944 					     &qpairs->cbuf,
945 					     CRYPTO_ADAPTER_OPS_BUFFER_SZ)) {
946 			RTE_EDEV_LOG_ERR("Failed to get memory for cryptodev "
947 					 "buffer");
948 			rte_free(qpairs);
949 			return -ENOMEM;
950 		}
951 	}
952 
953 	if (queue_pair_id == -1) {
954 		for (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++)
955 			eca_update_qp_info(adapter, dev_info, i, 1);
956 	} else
957 		eca_update_qp_info(adapter, dev_info,
958 					(uint16_t)queue_pair_id, 1);
959 
960 	return 0;
961 }
962 
963 int
964 rte_event_crypto_adapter_queue_pair_add(uint8_t id,
965 			uint8_t cdev_id,
966 			int32_t queue_pair_id,
967 			const struct rte_event_crypto_adapter_queue_conf *conf)
968 {
969 	struct rte_event_crypto_adapter_vector_limits limits;
970 	struct event_crypto_adapter *adapter;
971 	struct crypto_device_info *dev_info;
972 	struct rte_eventdev *dev;
973 	uint32_t cap;
974 	int ret;
975 
976 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
977 
978 	if (!rte_cryptodev_is_valid_dev(cdev_id)) {
979 		RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
980 		return -EINVAL;
981 	}
982 
983 	adapter = eca_id_to_adapter(id);
984 	if (adapter == NULL)
985 		return -EINVAL;
986 
987 	dev = &rte_eventdevs[adapter->eventdev_id];
988 	ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
989 						cdev_id,
990 						&cap);
991 	if (ret) {
992 		RTE_EDEV_LOG_ERR("Failed to get adapter caps dev %" PRIu8
993 			" cdev %" PRIu8, id, cdev_id);
994 		return ret;
995 	}
996 
997 	if (conf == NULL) {
998 		if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
999 			RTE_EDEV_LOG_ERR("Conf value can not be NULL for dev_id=%u",
1000 					 cdev_id);
1001 			return -EINVAL;
1002 		}
1003 	} else {
1004 		if (conf->flags & RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR) {
1005 			if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR) == 0) {
1006 				RTE_EDEV_LOG_ERR("Event vectorization is not supported,"
1007 						 "dev %" PRIu8 " cdev %" PRIu8, id,
1008 						 cdev_id);
1009 				return -ENOTSUP;
1010 			}
1011 
1012 			ret = rte_event_crypto_adapter_vector_limits_get(
1013 				adapter->eventdev_id, cdev_id, &limits);
1014 			if (ret < 0) {
1015 				RTE_EDEV_LOG_ERR("Failed to get event device vector "
1016 						 "limits, dev %" PRIu8 " cdev %" PRIu8,
1017 						 id, cdev_id);
1018 				return -EINVAL;
1019 			}
1020 
1021 			if (conf->vector_sz < limits.min_sz ||
1022 			    conf->vector_sz > limits.max_sz ||
1023 			    conf->vector_timeout_ns < limits.min_timeout_ns ||
1024 			    conf->vector_timeout_ns > limits.max_timeout_ns ||
1025 			    conf->vector_mp == NULL) {
1026 				RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
1027 						" dev %" PRIu8 " cdev %" PRIu8,
1028 						id, cdev_id);
1029 				return -EINVAL;
1030 			}
1031 
1032 			if (conf->vector_mp->elt_size < (sizeof(struct rte_event_vector) +
1033 			    (sizeof(uintptr_t) * conf->vector_sz))) {
1034 				RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
1035 						" dev %" PRIu8 " cdev %" PRIu8,
1036 						id, cdev_id);
1037 				return -EINVAL;
1038 			}
1039 		}
1040 	}
1041 
1042 	dev_info = &adapter->cdevs[cdev_id];
1043 
1044 	if (queue_pair_id != -1 &&
1045 	    (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) {
1046 		RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16,
1047 				 (uint16_t)queue_pair_id);
1048 		return -EINVAL;
1049 	}
1050 
1051 	/* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD,
1052 	 * no need of service core as HW supports event forward capability.
1053 	 */
1054 	if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
1055 	    (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND &&
1056 	     adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW) ||
1057 	    (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
1058 	     adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) {
1059 		if (*dev->dev_ops->crypto_adapter_queue_pair_add == NULL)
1060 			return -ENOTSUP;
1061 		if (dev_info->qpairs == NULL) {
1062 			dev_info->qpairs =
1063 			    rte_zmalloc_socket(adapter->mem_name,
1064 					dev_info->dev->data->nb_queue_pairs *
1065 					sizeof(struct crypto_queue_pair_info),
1066 					0, adapter->socket_id);
1067 			if (dev_info->qpairs == NULL)
1068 				return -ENOMEM;
1069 		}
1070 
1071 		ret = (*dev->dev_ops->crypto_adapter_queue_pair_add)(dev,
1072 				dev_info->dev,
1073 				queue_pair_id,
1074 				conf);
1075 		if (ret)
1076 			return ret;
1077 
1078 		else
1079 			eca_update_qp_info(adapter, &adapter->cdevs[cdev_id],
1080 					   queue_pair_id, 1);
1081 	}
1082 
1083 	/* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW,
1084 	 * or SW adapter, initiate services so the application can choose
1085 	 * which ever way it wants to use the adapter.
1086 	 * Case 1: RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW
1087 	 *         Application may wants to use one of below two mode
1088 	 *          a. OP_FORWARD mode -> HW Dequeue + SW enqueue
1089 	 *          b. OP_NEW mode -> HW Dequeue
1090 	 * Case 2: No HW caps, use SW adapter
1091 	 *          a. OP_FORWARD mode -> SW enqueue & dequeue
1092 	 *          b. OP_NEW mode -> SW Dequeue
1093 	 */
1094 	if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
1095 	     !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
1096 	     adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD) ||
1097 	     (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW) &&
1098 	      !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
1099 	      !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) &&
1100 	       (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA))) {
1101 		rte_spinlock_lock(&adapter->lock);
1102 		ret = eca_init_service(adapter, id);
1103 		if (ret == 0)
1104 			ret = eca_add_queue_pair(adapter, cdev_id,
1105 						 queue_pair_id);
1106 		rte_spinlock_unlock(&adapter->lock);
1107 
1108 		if (ret)
1109 			return ret;
1110 
1111 		rte_service_component_runstate_set(adapter->service_id, 1);
1112 	}
1113 
1114 	rte_eventdev_trace_crypto_adapter_queue_pair_add(id, cdev_id,
1115 		queue_pair_id, conf);
1116 	return 0;
1117 }
1118 
1119 int
1120 rte_event_crypto_adapter_queue_pair_del(uint8_t id, uint8_t cdev_id,
1121 					int32_t queue_pair_id)
1122 {
1123 	struct event_crypto_adapter *adapter;
1124 	struct crypto_device_info *dev_info;
1125 	struct rte_eventdev *dev;
1126 	int ret;
1127 	uint32_t cap;
1128 	uint16_t i;
1129 
1130 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1131 
1132 	if (!rte_cryptodev_is_valid_dev(cdev_id)) {
1133 		RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
1134 		return -EINVAL;
1135 	}
1136 
1137 	adapter = eca_id_to_adapter(id);
1138 	if (adapter == NULL)
1139 		return -EINVAL;
1140 
1141 	dev = &rte_eventdevs[adapter->eventdev_id];
1142 	ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
1143 						cdev_id,
1144 						&cap);
1145 	if (ret)
1146 		return ret;
1147 
1148 	dev_info = &adapter->cdevs[cdev_id];
1149 
1150 	if (queue_pair_id != -1 &&
1151 	    (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) {
1152 		RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16,
1153 				 (uint16_t)queue_pair_id);
1154 		return -EINVAL;
1155 	}
1156 
1157 	if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
1158 	    (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
1159 	     adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) {
1160 		if (*dev->dev_ops->crypto_adapter_queue_pair_del == NULL)
1161 			return -ENOTSUP;
1162 		ret = (*dev->dev_ops->crypto_adapter_queue_pair_del)(dev,
1163 						dev_info->dev,
1164 						queue_pair_id);
1165 		if (ret == 0) {
1166 			eca_update_qp_info(adapter,
1167 					&adapter->cdevs[cdev_id],
1168 					queue_pair_id,
1169 					0);
1170 			if (dev_info->num_qpairs == 0) {
1171 				rte_free(dev_info->qpairs);
1172 				dev_info->qpairs = NULL;
1173 			}
1174 		}
1175 	} else {
1176 		if (adapter->nb_qps == 0)
1177 			return 0;
1178 
1179 		rte_spinlock_lock(&adapter->lock);
1180 		if (queue_pair_id == -1) {
1181 			for (i = 0; i < dev_info->dev->data->nb_queue_pairs;
1182 				i++)
1183 				eca_update_qp_info(adapter, dev_info,
1184 							queue_pair_id, 0);
1185 		} else {
1186 			eca_update_qp_info(adapter, dev_info,
1187 						(uint16_t)queue_pair_id, 0);
1188 		}
1189 
1190 		if (dev_info->num_qpairs == 0) {
1191 			rte_free(dev_info->qpairs);
1192 			dev_info->qpairs = NULL;
1193 		}
1194 
1195 		rte_spinlock_unlock(&adapter->lock);
1196 		rte_service_component_runstate_set(adapter->service_id,
1197 				adapter->nb_qps);
1198 	}
1199 
1200 	rte_eventdev_trace_crypto_adapter_queue_pair_del(id, cdev_id,
1201 		queue_pair_id, ret);
1202 	return ret;
1203 }
1204 
1205 static int
1206 eca_adapter_ctrl(uint8_t id, int start)
1207 {
1208 	struct event_crypto_adapter *adapter;
1209 	struct crypto_device_info *dev_info;
1210 	struct rte_eventdev *dev;
1211 	uint32_t i;
1212 	int use_service;
1213 	int stop = !start;
1214 
1215 	use_service = 0;
1216 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1217 	adapter = eca_id_to_adapter(id);
1218 	if (adapter == NULL)
1219 		return -EINVAL;
1220 
1221 	dev = &rte_eventdevs[adapter->eventdev_id];
1222 
1223 	for (i = 0; i < rte_cryptodev_count(); i++) {
1224 		dev_info = &adapter->cdevs[i];
1225 		/* if start  check for num queue pairs */
1226 		if (start && !dev_info->num_qpairs)
1227 			continue;
1228 		/* if stop check if dev has been started */
1229 		if (stop && !dev_info->dev_started)
1230 			continue;
1231 		use_service |= !dev_info->internal_event_port;
1232 		dev_info->dev_started = start;
1233 		if (dev_info->internal_event_port == 0)
1234 			continue;
1235 		start ? (*dev->dev_ops->crypto_adapter_start)(dev,
1236 						&dev_info->dev[i]) :
1237 			(*dev->dev_ops->crypto_adapter_stop)(dev,
1238 						&dev_info->dev[i]);
1239 	}
1240 
1241 	if (use_service)
1242 		rte_service_runstate_set(adapter->service_id, start);
1243 
1244 	return 0;
1245 }
1246 
1247 int
1248 rte_event_crypto_adapter_start(uint8_t id)
1249 {
1250 	struct event_crypto_adapter *adapter;
1251 
1252 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1253 	adapter = eca_id_to_adapter(id);
1254 	if (adapter == NULL)
1255 		return -EINVAL;
1256 
1257 	rte_eventdev_trace_crypto_adapter_start(id, adapter);
1258 	return eca_adapter_ctrl(id, 1);
1259 }
1260 
1261 int
1262 rte_event_crypto_adapter_stop(uint8_t id)
1263 {
1264 	rte_eventdev_trace_crypto_adapter_stop(id);
1265 	return eca_adapter_ctrl(id, 0);
1266 }
1267 
1268 int
1269 rte_event_crypto_adapter_stats_get(uint8_t id,
1270 				struct rte_event_crypto_adapter_stats *stats)
1271 {
1272 	struct event_crypto_adapter *adapter;
1273 	struct rte_event_crypto_adapter_stats dev_stats_sum = { 0 };
1274 	struct rte_event_crypto_adapter_stats dev_stats;
1275 	struct rte_eventdev *dev;
1276 	struct crypto_device_info *dev_info;
1277 	uint32_t i;
1278 	int ret;
1279 
1280 	if (eca_memzone_lookup())
1281 		return -ENOMEM;
1282 
1283 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1284 
1285 	adapter = eca_id_to_adapter(id);
1286 	if (adapter == NULL || stats == NULL)
1287 		return -EINVAL;
1288 
1289 	dev = &rte_eventdevs[adapter->eventdev_id];
1290 	memset(stats, 0, sizeof(*stats));
1291 	for (i = 0; i < rte_cryptodev_count(); i++) {
1292 		dev_info = &adapter->cdevs[i];
1293 		if (dev_info->internal_event_port == 0 ||
1294 			dev->dev_ops->crypto_adapter_stats_get == NULL)
1295 			continue;
1296 		ret = (*dev->dev_ops->crypto_adapter_stats_get)(dev,
1297 						dev_info->dev,
1298 						&dev_stats);
1299 		if (ret)
1300 			continue;
1301 
1302 		dev_stats_sum.crypto_deq_count += dev_stats.crypto_deq_count;
1303 		dev_stats_sum.event_enq_count +=
1304 			dev_stats.event_enq_count;
1305 	}
1306 
1307 	if (adapter->service_inited)
1308 		*stats = adapter->crypto_stats;
1309 
1310 	stats->crypto_deq_count += dev_stats_sum.crypto_deq_count;
1311 	stats->event_enq_count += dev_stats_sum.event_enq_count;
1312 
1313 	rte_eventdev_trace_crypto_adapter_stats_get(id, stats,
1314 		stats->event_poll_count, stats->event_deq_count,
1315 		stats->crypto_enq_count, stats->crypto_enq_fail,
1316 		stats->crypto_deq_count, stats->event_enq_count,
1317 		stats->event_enq_retry_count, stats->event_enq_fail_count);
1318 
1319 	return 0;
1320 }
1321 
1322 int
1323 rte_event_crypto_adapter_stats_reset(uint8_t id)
1324 {
1325 	struct event_crypto_adapter *adapter;
1326 	struct crypto_device_info *dev_info;
1327 	struct rte_eventdev *dev;
1328 	uint32_t i;
1329 
1330 	rte_eventdev_trace_crypto_adapter_stats_reset(id);
1331 
1332 	if (eca_memzone_lookup())
1333 		return -ENOMEM;
1334 
1335 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1336 
1337 	adapter = eca_id_to_adapter(id);
1338 	if (adapter == NULL)
1339 		return -EINVAL;
1340 
1341 	dev = &rte_eventdevs[adapter->eventdev_id];
1342 	for (i = 0; i < rte_cryptodev_count(); i++) {
1343 		dev_info = &adapter->cdevs[i];
1344 		if (dev_info->internal_event_port == 0 ||
1345 			dev->dev_ops->crypto_adapter_stats_reset == NULL)
1346 			continue;
1347 		(*dev->dev_ops->crypto_adapter_stats_reset)(dev,
1348 						dev_info->dev);
1349 	}
1350 
1351 	memset(&adapter->crypto_stats, 0, sizeof(adapter->crypto_stats));
1352 	return 0;
1353 }
1354 
1355 int
1356 rte_event_crypto_adapter_runtime_params_init(
1357 		struct rte_event_crypto_adapter_runtime_params *params)
1358 {
1359 	if (params == NULL)
1360 		return -EINVAL;
1361 
1362 	memset(params, 0, sizeof(*params));
1363 	params->max_nb = DEFAULT_MAX_NB;
1364 
1365 	return 0;
1366 }
1367 
1368 static int
1369 crypto_adapter_cap_check(struct event_crypto_adapter *adapter)
1370 {
1371 	int ret;
1372 	uint32_t caps;
1373 
1374 	if (!adapter->nb_qps)
1375 		return -EINVAL;
1376 	ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
1377 						adapter->next_cdev_id,
1378 						&caps);
1379 	if (ret) {
1380 		RTE_EDEV_LOG_ERR("Failed to get adapter caps dev %" PRIu8
1381 			" cdev %" PRIu8, adapter->eventdev_id,
1382 			adapter->next_cdev_id);
1383 		return ret;
1384 	}
1385 
1386 	if ((caps & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
1387 	    (caps & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW))
1388 		return -ENOTSUP;
1389 
1390 	return 0;
1391 }
1392 
1393 int
1394 rte_event_crypto_adapter_runtime_params_set(uint8_t id,
1395 		struct rte_event_crypto_adapter_runtime_params *params)
1396 {
1397 	struct event_crypto_adapter *adapter;
1398 	int ret;
1399 
1400 	if (eca_memzone_lookup())
1401 		return -ENOMEM;
1402 
1403 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1404 
1405 	if (params == NULL) {
1406 		RTE_EDEV_LOG_ERR("params pointer is NULL");
1407 		return -EINVAL;
1408 	}
1409 
1410 	adapter = eca_id_to_adapter(id);
1411 	if (adapter == NULL)
1412 		return -EINVAL;
1413 
1414 	ret = crypto_adapter_cap_check(adapter);
1415 	if (ret)
1416 		return ret;
1417 
1418 	rte_spinlock_lock(&adapter->lock);
1419 	adapter->max_nb = params->max_nb;
1420 	rte_spinlock_unlock(&adapter->lock);
1421 
1422 	return 0;
1423 }
1424 
1425 int
1426 rte_event_crypto_adapter_runtime_params_get(uint8_t id,
1427 		struct rte_event_crypto_adapter_runtime_params *params)
1428 {
1429 	struct event_crypto_adapter *adapter;
1430 	int ret;
1431 
1432 	if (eca_memzone_lookup())
1433 		return -ENOMEM;
1434 
1435 
1436 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1437 
1438 	if (params == NULL) {
1439 		RTE_EDEV_LOG_ERR("params pointer is NULL");
1440 		return -EINVAL;
1441 	}
1442 
1443 	adapter = eca_id_to_adapter(id);
1444 	if (adapter == NULL)
1445 		return -EINVAL;
1446 
1447 	ret = crypto_adapter_cap_check(adapter);
1448 	if (ret)
1449 		return ret;
1450 
1451 	params->max_nb = adapter->max_nb;
1452 
1453 	return 0;
1454 }
1455 
1456 int
1457 rte_event_crypto_adapter_service_id_get(uint8_t id, uint32_t *service_id)
1458 {
1459 	struct event_crypto_adapter *adapter;
1460 
1461 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1462 
1463 	adapter = eca_id_to_adapter(id);
1464 	if (adapter == NULL || service_id == NULL)
1465 		return -EINVAL;
1466 
1467 	if (adapter->service_inited)
1468 		*service_id = adapter->service_id;
1469 
1470 	rte_eventdev_trace_crypto_adapter_service_id_get(id, *service_id);
1471 
1472 	return adapter->service_inited ? 0 : -ESRCH;
1473 }
1474 
1475 int
1476 rte_event_crypto_adapter_event_port_get(uint8_t id, uint8_t *event_port_id)
1477 {
1478 	struct event_crypto_adapter *adapter;
1479 
1480 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1481 
1482 	adapter = eca_id_to_adapter(id);
1483 	if (adapter == NULL || event_port_id == NULL)
1484 		return -EINVAL;
1485 
1486 	*event_port_id = adapter->event_port_id;
1487 
1488 	rte_eventdev_trace_crypto_adapter_event_port_get(id, *event_port_id);
1489 
1490 	return 0;
1491 }
1492 
1493 int
1494 rte_event_crypto_adapter_vector_limits_get(
1495 	uint8_t dev_id, uint16_t cdev_id,
1496 	struct rte_event_crypto_adapter_vector_limits *limits)
1497 {
1498 	struct rte_cryptodev *cdev;
1499 	struct rte_eventdev *dev;
1500 	uint32_t cap;
1501 	int ret;
1502 
1503 	rte_eventdev_trace_crypto_adapter_vector_limits_get(dev_id, cdev_id, limits);
1504 
1505 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1506 
1507 	if (!rte_cryptodev_is_valid_dev(cdev_id)) {
1508 		RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
1509 		return -EINVAL;
1510 	}
1511 
1512 	if (limits == NULL) {
1513 		RTE_EDEV_LOG_ERR("Invalid limits storage provided");
1514 		return -EINVAL;
1515 	}
1516 
1517 	dev = &rte_eventdevs[dev_id];
1518 	cdev = rte_cryptodev_pmd_get_dev(cdev_id);
1519 
1520 	ret = rte_event_crypto_adapter_caps_get(dev_id, cdev_id, &cap);
1521 	if (ret) {
1522 		RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
1523 				 "cdev %" PRIu16, dev_id, cdev_id);
1524 		return ret;
1525 	}
1526 
1527 	if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR)) {
1528 		RTE_EDEV_LOG_ERR("Event vectorization is not supported,"
1529 				 "dev %" PRIu8 " cdev %" PRIu8, dev_id, cdev_id);
1530 		return -ENOTSUP;
1531 	}
1532 
1533 	if ((*dev->dev_ops->crypto_adapter_vector_limits_get) == NULL)
1534 		return -ENOTSUP;
1535 
1536 	return dev->dev_ops->crypto_adapter_vector_limits_get(
1537 		dev, cdev, limits);
1538 }
1539