xref: /dpdk/lib/eventdev/rte_event_crypto_adapter.c (revision 3a80d7fb2ecdd6e8e48e56e3726b26980fa2a089)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation.
3  * All rights reserved.
4  */
5 
6 #include <string.h>
7 #include <stdbool.h>
8 #include <rte_common.h>
9 #include <dev_driver.h>
10 #include <rte_errno.h>
11 #include <rte_cryptodev.h>
12 #include <cryptodev_pmd.h>
13 #include <rte_log.h>
14 #include <rte_malloc.h>
15 #include <rte_service_component.h>
16 
17 #include "rte_eventdev.h"
18 #include "eventdev_pmd.h"
19 #include "eventdev_trace.h"
20 #include "rte_event_crypto_adapter.h"
21 
22 #define BATCH_SIZE 32
23 #define DEFAULT_MAX_NB 128
24 #define CRYPTO_ADAPTER_NAME_LEN 32
25 #define CRYPTO_ADAPTER_MEM_NAME_LEN 32
26 #define CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES 100
27 
28 #define CRYPTO_ADAPTER_OPS_BUFFER_SZ (BATCH_SIZE + BATCH_SIZE)
29 #define CRYPTO_ADAPTER_BUFFER_SZ 1024
30 
31 /* Flush an instance's enqueue buffers every CRYPTO_ENQ_FLUSH_THRESHOLD
32  * iterations of eca_crypto_adapter_enq_run()
33  */
34 #define CRYPTO_ENQ_FLUSH_THRESHOLD 1024
35 
36 #define ECA_ADAPTER_ARRAY "crypto_adapter_array"
37 
38 struct crypto_ops_circular_buffer {
39 	/* index of head element in circular buffer */
40 	uint16_t head;
41 	/* index of tail element in circular buffer */
42 	uint16_t tail;
43 	/* number of elements in buffer */
44 	uint16_t count;
45 	/* size of circular buffer */
46 	uint16_t size;
47 	/* Pointer to hold rte_crypto_ops for batching */
48 	struct rte_crypto_op **op_buffer;
49 } __rte_cache_aligned;
50 
51 struct event_crypto_adapter {
52 	/* Event device identifier */
53 	uint8_t eventdev_id;
54 	/* Event port identifier */
55 	uint8_t event_port_id;
56 	/* Store event port's implicit release capability */
57 	uint8_t implicit_release_disabled;
58 	/* Flag to indicate backpressure at cryptodev
59 	 * Stop further dequeuing events from eventdev
60 	 */
61 	bool stop_enq_to_cryptodev;
62 	/* Max crypto ops processed in any service function invocation */
63 	uint32_t max_nb;
64 	/* Lock to serialize config updates with service function */
65 	rte_spinlock_t lock;
66 	/* Next crypto device to be processed */
67 	uint16_t next_cdev_id;
68 	/* Per crypto device structure */
69 	struct crypto_device_info *cdevs;
70 	/* Loop counter to flush crypto ops */
71 	uint16_t transmit_loop_count;
72 	/* Circular buffer for batching crypto ops to eventdev */
73 	struct crypto_ops_circular_buffer ebuf;
74 	/* Per instance stats structure */
75 	struct rte_event_crypto_adapter_stats crypto_stats;
76 	/* Configuration callback for rte_service configuration */
77 	rte_event_crypto_adapter_conf_cb conf_cb;
78 	/* Configuration callback argument */
79 	void *conf_arg;
80 	/* Set if  default_cb is being used */
81 	int default_cb_arg;
82 	/* Service initialization state */
83 	uint8_t service_inited;
84 	/* Memory allocation name */
85 	char mem_name[CRYPTO_ADAPTER_MEM_NAME_LEN];
86 	/* Socket identifier cached from eventdev */
87 	int socket_id;
88 	/* Per adapter EAL service */
89 	uint32_t service_id;
90 	/* No. of queue pairs configured */
91 	uint16_t nb_qps;
92 	/* Adapter mode */
93 	enum rte_event_crypto_adapter_mode mode;
94 } __rte_cache_aligned;
95 
96 /* Per crypto device information */
97 struct crypto_device_info {
98 	/* Pointer to cryptodev */
99 	struct rte_cryptodev *dev;
100 	/* Pointer to queue pair info */
101 	struct crypto_queue_pair_info *qpairs;
102 	/* Next queue pair to be processed */
103 	uint16_t next_queue_pair_id;
104 	/* Set to indicate cryptodev->eventdev packet
105 	 * transfer uses a hardware mechanism
106 	 */
107 	uint8_t internal_event_port;
108 	/* Set to indicate processing has been started */
109 	uint8_t dev_started;
110 	/* If num_qpairs > 0, the start callback will
111 	 * be invoked if not already invoked
112 	 */
113 	uint16_t num_qpairs;
114 } __rte_cache_aligned;
115 
116 /* Per queue pair information */
117 struct crypto_queue_pair_info {
118 	/* Set to indicate queue pair is enabled */
119 	bool qp_enabled;
120 	/* Circular buffer for batching crypto ops to cdev */
121 	struct crypto_ops_circular_buffer cbuf;
122 } __rte_cache_aligned;
123 
124 static struct event_crypto_adapter **event_crypto_adapter;
125 
126 /* Macros to check for valid adapter */
127 #define EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
128 	if (!eca_valid_id(id)) { \
129 		RTE_EDEV_LOG_ERR("Invalid crypto adapter id = %d\n", id); \
130 		return retval; \
131 	} \
132 } while (0)
133 
134 static inline int
135 eca_valid_id(uint8_t id)
136 {
137 	return id < RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE;
138 }
139 
140 static int
141 eca_init(void)
142 {
143 	const struct rte_memzone *mz;
144 	unsigned int sz;
145 
146 	sz = sizeof(*event_crypto_adapter) *
147 	    RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE;
148 	sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
149 
150 	mz = rte_memzone_lookup(ECA_ADAPTER_ARRAY);
151 	if (mz == NULL) {
152 		mz = rte_memzone_reserve_aligned(ECA_ADAPTER_ARRAY, sz,
153 						 rte_socket_id(), 0,
154 						 RTE_CACHE_LINE_SIZE);
155 		if (mz == NULL) {
156 			RTE_EDEV_LOG_ERR("failed to reserve memzone err = %"
157 					PRId32, rte_errno);
158 			return -rte_errno;
159 		}
160 	}
161 
162 	event_crypto_adapter = mz->addr;
163 	return 0;
164 }
165 
166 static int
167 eca_memzone_lookup(void)
168 {
169 	const struct rte_memzone *mz;
170 
171 	if (event_crypto_adapter == NULL) {
172 		mz = rte_memzone_lookup(ECA_ADAPTER_ARRAY);
173 		if (mz == NULL)
174 			return -ENOMEM;
175 
176 		event_crypto_adapter = mz->addr;
177 	}
178 
179 	return 0;
180 }
181 
182 static inline bool
183 eca_circular_buffer_batch_ready(struct crypto_ops_circular_buffer *bufp)
184 {
185 	return bufp->count >= BATCH_SIZE;
186 }
187 
188 static inline bool
189 eca_circular_buffer_space_for_batch(struct crypto_ops_circular_buffer *bufp)
190 {
191 	return (bufp->size - bufp->count) >= BATCH_SIZE;
192 }
193 
194 static inline void
195 eca_circular_buffer_free(struct crypto_ops_circular_buffer *bufp)
196 {
197 	rte_free(bufp->op_buffer);
198 }
199 
200 static inline int
201 eca_circular_buffer_init(const char *name,
202 			 struct crypto_ops_circular_buffer *bufp,
203 			 uint16_t sz)
204 {
205 	bufp->op_buffer = rte_zmalloc(name,
206 				      sizeof(struct rte_crypto_op *) * sz,
207 				      0);
208 	if (bufp->op_buffer == NULL)
209 		return -ENOMEM;
210 
211 	bufp->size = sz;
212 	return 0;
213 }
214 
215 static inline int
216 eca_circular_buffer_add(struct crypto_ops_circular_buffer *bufp,
217 			struct rte_crypto_op *op)
218 {
219 	uint16_t *tailp = &bufp->tail;
220 
221 	bufp->op_buffer[*tailp] = op;
222 	/* circular buffer, go round */
223 	*tailp = (*tailp + 1) % bufp->size;
224 	bufp->count++;
225 
226 	return 0;
227 }
228 
229 static inline int
230 eca_circular_buffer_flush_to_cdev(struct crypto_ops_circular_buffer *bufp,
231 				  uint8_t cdev_id, uint16_t qp_id,
232 				  uint16_t *nb_ops_flushed)
233 {
234 	uint16_t n = 0;
235 	uint16_t *headp = &bufp->head;
236 	uint16_t *tailp = &bufp->tail;
237 	struct rte_crypto_op **ops = bufp->op_buffer;
238 
239 	if (*tailp > *headp)
240 		n = *tailp - *headp;
241 	else if (*tailp < *headp)
242 		n = bufp->size - *headp;
243 	else {
244 		*nb_ops_flushed = 0;
245 		return 0;  /* buffer empty */
246 	}
247 
248 	*nb_ops_flushed = rte_cryptodev_enqueue_burst(cdev_id, qp_id,
249 						      &ops[*headp], n);
250 	bufp->count -= *nb_ops_flushed;
251 	if (!bufp->count) {
252 		*headp = 0;
253 		*tailp = 0;
254 	} else
255 		*headp = (*headp + *nb_ops_flushed) % bufp->size;
256 
257 	return *nb_ops_flushed == n ? 0 : -1;
258 }
259 
260 static inline struct event_crypto_adapter *
261 eca_id_to_adapter(uint8_t id)
262 {
263 	return event_crypto_adapter ?
264 		event_crypto_adapter[id] : NULL;
265 }
266 
267 static int
268 eca_default_config_cb(uint8_t id, uint8_t dev_id,
269 			struct rte_event_crypto_adapter_conf *conf, void *arg)
270 {
271 	struct rte_event_dev_config dev_conf;
272 	struct rte_eventdev *dev;
273 	uint8_t port_id;
274 	int started;
275 	int ret;
276 	struct rte_event_port_conf *port_conf = arg;
277 	struct event_crypto_adapter *adapter = eca_id_to_adapter(id);
278 
279 	if (adapter == NULL)
280 		return -EINVAL;
281 
282 	dev = &rte_eventdevs[adapter->eventdev_id];
283 	dev_conf = dev->data->dev_conf;
284 
285 	started = dev->data->dev_started;
286 	if (started)
287 		rte_event_dev_stop(dev_id);
288 	port_id = dev_conf.nb_event_ports;
289 	dev_conf.nb_event_ports += 1;
290 	if (port_conf->event_port_cfg & RTE_EVENT_PORT_CFG_SINGLE_LINK)
291 		dev_conf.nb_single_link_event_port_queues += 1;
292 
293 	ret = rte_event_dev_configure(dev_id, &dev_conf);
294 	if (ret) {
295 		RTE_EDEV_LOG_ERR("failed to configure event dev %u\n", dev_id);
296 		if (started) {
297 			if (rte_event_dev_start(dev_id))
298 				return -EIO;
299 		}
300 		return ret;
301 	}
302 
303 	ret = rte_event_port_setup(dev_id, port_id, port_conf);
304 	if (ret) {
305 		RTE_EDEV_LOG_ERR("failed to setup event port %u\n", port_id);
306 		return ret;
307 	}
308 
309 	conf->event_port_id = port_id;
310 	conf->max_nb = DEFAULT_MAX_NB;
311 	if (started)
312 		ret = rte_event_dev_start(dev_id);
313 
314 	adapter->default_cb_arg = 1;
315 	return ret;
316 }
317 
318 int
319 rte_event_crypto_adapter_create_ext(uint8_t id, uint8_t dev_id,
320 				rte_event_crypto_adapter_conf_cb conf_cb,
321 				enum rte_event_crypto_adapter_mode mode,
322 				void *conf_arg)
323 {
324 	struct event_crypto_adapter *adapter;
325 	char mem_name[CRYPTO_ADAPTER_NAME_LEN];
326 	int socket_id;
327 	uint8_t i;
328 	int ret;
329 
330 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
331 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
332 	if (conf_cb == NULL)
333 		return -EINVAL;
334 
335 	if (event_crypto_adapter == NULL) {
336 		ret = eca_init();
337 		if (ret)
338 			return ret;
339 	}
340 
341 	adapter = eca_id_to_adapter(id);
342 	if (adapter != NULL) {
343 		RTE_EDEV_LOG_ERR("Crypto adapter id %u already exists!", id);
344 		return -EEXIST;
345 	}
346 
347 	socket_id = rte_event_dev_socket_id(dev_id);
348 	snprintf(mem_name, CRYPTO_ADAPTER_MEM_NAME_LEN,
349 		 "rte_event_crypto_adapter_%d", id);
350 
351 	adapter = rte_zmalloc_socket(mem_name, sizeof(*adapter),
352 			RTE_CACHE_LINE_SIZE, socket_id);
353 	if (adapter == NULL) {
354 		RTE_EDEV_LOG_ERR("Failed to get mem for event crypto adapter!");
355 		return -ENOMEM;
356 	}
357 
358 	if (eca_circular_buffer_init("eca_edev_circular_buffer",
359 				     &adapter->ebuf,
360 				     CRYPTO_ADAPTER_BUFFER_SZ)) {
361 		RTE_EDEV_LOG_ERR("Failed to get memory for eventdev buffer");
362 		rte_free(adapter);
363 		return -ENOMEM;
364 	}
365 
366 	adapter->eventdev_id = dev_id;
367 	adapter->socket_id = socket_id;
368 	adapter->conf_cb = conf_cb;
369 	adapter->conf_arg = conf_arg;
370 	adapter->mode = mode;
371 	strcpy(adapter->mem_name, mem_name);
372 	adapter->cdevs = rte_zmalloc_socket(adapter->mem_name,
373 					rte_cryptodev_count() *
374 					sizeof(struct crypto_device_info), 0,
375 					socket_id);
376 	if (adapter->cdevs == NULL) {
377 		RTE_EDEV_LOG_ERR("Failed to get mem for crypto devices\n");
378 		eca_circular_buffer_free(&adapter->ebuf);
379 		rte_free(adapter);
380 		return -ENOMEM;
381 	}
382 
383 	rte_spinlock_init(&adapter->lock);
384 	for (i = 0; i < rte_cryptodev_count(); i++)
385 		adapter->cdevs[i].dev = rte_cryptodev_pmd_get_dev(i);
386 
387 	event_crypto_adapter[id] = adapter;
388 
389 	rte_eventdev_trace_crypto_adapter_create(id, dev_id, adapter, conf_arg,
390 		mode);
391 	return 0;
392 }
393 
394 
395 int
396 rte_event_crypto_adapter_create(uint8_t id, uint8_t dev_id,
397 				struct rte_event_port_conf *port_config,
398 				enum rte_event_crypto_adapter_mode mode)
399 {
400 	struct rte_event_port_conf *pc;
401 	int ret;
402 
403 	if (port_config == NULL)
404 		return -EINVAL;
405 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
406 
407 	pc = rte_malloc(NULL, sizeof(*pc), 0);
408 	if (pc == NULL)
409 		return -ENOMEM;
410 	*pc = *port_config;
411 	ret = rte_event_crypto_adapter_create_ext(id, dev_id,
412 						  eca_default_config_cb,
413 						  mode,
414 						  pc);
415 	if (ret)
416 		rte_free(pc);
417 
418 	return ret;
419 }
420 
421 int
422 rte_event_crypto_adapter_free(uint8_t id)
423 {
424 	struct event_crypto_adapter *adapter;
425 
426 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
427 
428 	adapter = eca_id_to_adapter(id);
429 	if (adapter == NULL)
430 		return -EINVAL;
431 
432 	if (adapter->nb_qps) {
433 		RTE_EDEV_LOG_ERR("%" PRIu16 "Queue pairs not deleted",
434 				adapter->nb_qps);
435 		return -EBUSY;
436 	}
437 
438 	rte_eventdev_trace_crypto_adapter_free(id, adapter);
439 	if (adapter->default_cb_arg)
440 		rte_free(adapter->conf_arg);
441 	rte_free(adapter->cdevs);
442 	rte_free(adapter);
443 	event_crypto_adapter[id] = NULL;
444 
445 	return 0;
446 }
447 
448 static inline unsigned int
449 eca_enq_to_cryptodev(struct event_crypto_adapter *adapter, struct rte_event *ev,
450 		     unsigned int cnt)
451 {
452 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
453 	union rte_event_crypto_metadata *m_data = NULL;
454 	struct crypto_queue_pair_info *qp_info = NULL;
455 	struct rte_crypto_op *crypto_op;
456 	unsigned int i, n;
457 	uint16_t qp_id, nb_enqueued = 0;
458 	uint8_t cdev_id;
459 	int ret;
460 
461 	ret = 0;
462 	n = 0;
463 	stats->event_deq_count += cnt;
464 
465 	for (i = 0; i < cnt; i++) {
466 		crypto_op = ev[i].event_ptr;
467 		if (crypto_op == NULL)
468 			continue;
469 		m_data = rte_cryptodev_session_event_mdata_get(crypto_op);
470 		if (m_data == NULL) {
471 			rte_pktmbuf_free(crypto_op->sym->m_src);
472 			rte_crypto_op_free(crypto_op);
473 			continue;
474 		}
475 
476 		cdev_id = m_data->request_info.cdev_id;
477 		qp_id = m_data->request_info.queue_pair_id;
478 		qp_info = &adapter->cdevs[cdev_id].qpairs[qp_id];
479 		if (!qp_info->qp_enabled) {
480 			rte_pktmbuf_free(crypto_op->sym->m_src);
481 			rte_crypto_op_free(crypto_op);
482 			continue;
483 		}
484 		eca_circular_buffer_add(&qp_info->cbuf, crypto_op);
485 
486 		if (eca_circular_buffer_batch_ready(&qp_info->cbuf)) {
487 			ret = eca_circular_buffer_flush_to_cdev(&qp_info->cbuf,
488 								cdev_id,
489 								qp_id,
490 								&nb_enqueued);
491 			stats->crypto_enq_count += nb_enqueued;
492 			n += nb_enqueued;
493 
494 			/**
495 			 * If some crypto ops failed to flush to cdev and
496 			 * space for another batch is not available, stop
497 			 * dequeue from eventdev momentarily
498 			 */
499 			if (unlikely(ret < 0 &&
500 				!eca_circular_buffer_space_for_batch(
501 							&qp_info->cbuf)))
502 				adapter->stop_enq_to_cryptodev = true;
503 		}
504 	}
505 
506 	return n;
507 }
508 
509 static unsigned int
510 eca_crypto_cdev_flush(struct event_crypto_adapter *adapter,
511 		      uint8_t cdev_id, uint16_t *nb_ops_flushed)
512 {
513 	struct crypto_device_info *curr_dev;
514 	struct crypto_queue_pair_info *curr_queue;
515 	struct rte_cryptodev *dev;
516 	uint16_t nb = 0, nb_enqueued = 0;
517 	uint16_t qp;
518 
519 	curr_dev = &adapter->cdevs[cdev_id];
520 	dev = rte_cryptodev_pmd_get_dev(cdev_id);
521 
522 	for (qp = 0; qp < dev->data->nb_queue_pairs; qp++) {
523 
524 		curr_queue = &curr_dev->qpairs[qp];
525 		if (unlikely(curr_queue == NULL || !curr_queue->qp_enabled))
526 			continue;
527 
528 		eca_circular_buffer_flush_to_cdev(&curr_queue->cbuf,
529 						  cdev_id,
530 						  qp,
531 						  &nb_enqueued);
532 		*nb_ops_flushed += curr_queue->cbuf.count;
533 		nb += nb_enqueued;
534 	}
535 
536 	return nb;
537 }
538 
539 static unsigned int
540 eca_crypto_enq_flush(struct event_crypto_adapter *adapter)
541 {
542 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
543 	uint8_t cdev_id;
544 	uint16_t nb_enqueued = 0;
545 	uint16_t nb_ops_flushed = 0;
546 	uint16_t num_cdev = rte_cryptodev_count();
547 
548 	for (cdev_id = 0; cdev_id < num_cdev; cdev_id++)
549 		nb_enqueued += eca_crypto_cdev_flush(adapter,
550 						    cdev_id,
551 						    &nb_ops_flushed);
552 	/**
553 	 * Enable dequeue from eventdev if all ops from circular
554 	 * buffer flushed to cdev
555 	 */
556 	if (!nb_ops_flushed)
557 		adapter->stop_enq_to_cryptodev = false;
558 
559 	stats->crypto_enq_count += nb_enqueued;
560 
561 	return nb_enqueued;
562 }
563 
564 static int
565 eca_crypto_adapter_enq_run(struct event_crypto_adapter *adapter,
566 			   unsigned int max_enq)
567 {
568 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
569 	struct rte_event ev[BATCH_SIZE];
570 	unsigned int nb_enq, nb_enqueued;
571 	uint16_t n;
572 	uint8_t event_dev_id = adapter->eventdev_id;
573 	uint8_t event_port_id = adapter->event_port_id;
574 
575 	nb_enqueued = 0;
576 	if (adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)
577 		return 0;
578 
579 	for (nb_enq = 0; nb_enq < max_enq; nb_enq += n) {
580 
581 		if (unlikely(adapter->stop_enq_to_cryptodev)) {
582 			nb_enqueued += eca_crypto_enq_flush(adapter);
583 
584 			if (unlikely(adapter->stop_enq_to_cryptodev))
585 				break;
586 		}
587 
588 		stats->event_poll_count++;
589 		n = rte_event_dequeue_burst(event_dev_id,
590 					    event_port_id, ev, BATCH_SIZE, 0);
591 
592 		if (!n)
593 			break;
594 
595 		nb_enqueued += eca_enq_to_cryptodev(adapter, ev, n);
596 	}
597 
598 	if ((++adapter->transmit_loop_count &
599 		(CRYPTO_ENQ_FLUSH_THRESHOLD - 1)) == 0) {
600 		nb_enqueued += eca_crypto_enq_flush(adapter);
601 	}
602 
603 	return nb_enqueued;
604 }
605 
606 static inline uint16_t
607 eca_ops_enqueue_burst(struct event_crypto_adapter *adapter,
608 		  struct rte_crypto_op **ops, uint16_t num)
609 {
610 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
611 	union rte_event_crypto_metadata *m_data = NULL;
612 	uint8_t event_dev_id = adapter->eventdev_id;
613 	uint8_t event_port_id = adapter->event_port_id;
614 	struct rte_event events[BATCH_SIZE];
615 	uint16_t nb_enqueued, nb_ev;
616 	uint8_t retry;
617 	uint8_t i;
618 
619 	nb_ev = 0;
620 	retry = 0;
621 	nb_enqueued = 0;
622 	num = RTE_MIN(num, BATCH_SIZE);
623 	for (i = 0; i < num; i++) {
624 		struct rte_event *ev = &events[nb_ev++];
625 
626 		m_data = rte_cryptodev_session_event_mdata_get(ops[i]);
627 		if (unlikely(m_data == NULL)) {
628 			rte_pktmbuf_free(ops[i]->sym->m_src);
629 			rte_crypto_op_free(ops[i]);
630 			continue;
631 		}
632 
633 		rte_memcpy(ev, &m_data->response_info, sizeof(*ev));
634 		ev->event_ptr = ops[i];
635 		ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
636 		if (adapter->implicit_release_disabled)
637 			ev->op = RTE_EVENT_OP_FORWARD;
638 		else
639 			ev->op = RTE_EVENT_OP_NEW;
640 	}
641 
642 	do {
643 		nb_enqueued += rte_event_enqueue_burst(event_dev_id,
644 						  event_port_id,
645 						  &events[nb_enqueued],
646 						  nb_ev - nb_enqueued);
647 
648 	} while (retry++ < CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES &&
649 		 nb_enqueued < nb_ev);
650 
651 	stats->event_enq_fail_count += nb_ev - nb_enqueued;
652 	stats->event_enq_count += nb_enqueued;
653 	stats->event_enq_retry_count += retry - 1;
654 
655 	return nb_enqueued;
656 }
657 
658 static int
659 eca_circular_buffer_flush_to_evdev(struct event_crypto_adapter *adapter,
660 				   struct crypto_ops_circular_buffer *bufp)
661 {
662 	uint16_t n = 0, nb_ops_flushed;
663 	uint16_t *headp = &bufp->head;
664 	uint16_t *tailp = &bufp->tail;
665 	struct rte_crypto_op **ops = bufp->op_buffer;
666 
667 	if (*tailp > *headp)
668 		n = *tailp - *headp;
669 	else if (*tailp < *headp)
670 		n = bufp->size - *headp;
671 	else
672 		return 0;  /* buffer empty */
673 
674 	nb_ops_flushed =  eca_ops_enqueue_burst(adapter, &ops[*headp], n);
675 	bufp->count -= nb_ops_flushed;
676 	if (!bufp->count) {
677 		*headp = 0;
678 		*tailp = 0;
679 		return 0;  /* buffer empty */
680 	}
681 
682 	*headp = (*headp + nb_ops_flushed) % bufp->size;
683 	return 1;
684 }
685 
686 
687 static void
688 eca_ops_buffer_flush(struct event_crypto_adapter *adapter)
689 {
690 	if (likely(adapter->ebuf.count == 0))
691 		return;
692 
693 	while (eca_circular_buffer_flush_to_evdev(adapter,
694 						  &adapter->ebuf))
695 		;
696 }
697 static inline unsigned int
698 eca_crypto_adapter_deq_run(struct event_crypto_adapter *adapter,
699 			   unsigned int max_deq)
700 {
701 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
702 	struct crypto_device_info *curr_dev;
703 	struct crypto_queue_pair_info *curr_queue;
704 	struct rte_crypto_op *ops[BATCH_SIZE];
705 	uint16_t n, nb_deq, nb_enqueued, i;
706 	struct rte_cryptodev *dev;
707 	uint8_t cdev_id;
708 	uint16_t qp, dev_qps;
709 	bool done;
710 	uint16_t num_cdev = rte_cryptodev_count();
711 
712 	nb_deq = 0;
713 	eca_ops_buffer_flush(adapter);
714 
715 	do {
716 		done = true;
717 
718 		for (cdev_id = adapter->next_cdev_id;
719 			cdev_id < num_cdev; cdev_id++) {
720 			uint16_t queues = 0;
721 
722 			curr_dev = &adapter->cdevs[cdev_id];
723 			dev = curr_dev->dev;
724 			if (unlikely(dev == NULL))
725 				continue;
726 
727 			dev_qps = dev->data->nb_queue_pairs;
728 
729 			for (qp = curr_dev->next_queue_pair_id;
730 				queues < dev_qps; qp = (qp + 1) % dev_qps,
731 				queues++) {
732 
733 				curr_queue = &curr_dev->qpairs[qp];
734 				if (unlikely(curr_queue == NULL ||
735 				    !curr_queue->qp_enabled))
736 					continue;
737 
738 				n = rte_cryptodev_dequeue_burst(cdev_id, qp,
739 					ops, BATCH_SIZE);
740 				if (!n)
741 					continue;
742 
743 				done = false;
744 				nb_enqueued = 0;
745 
746 				stats->crypto_deq_count += n;
747 
748 				if (unlikely(!adapter->ebuf.count))
749 					nb_enqueued = eca_ops_enqueue_burst(
750 							adapter, ops, n);
751 
752 				if (likely(nb_enqueued == n))
753 					goto check;
754 
755 				/* Failed to enqueue events case */
756 				for (i = nb_enqueued; i < n; i++)
757 					eca_circular_buffer_add(
758 						&adapter->ebuf,
759 						ops[i]);
760 
761 check:
762 				nb_deq += n;
763 
764 				if (nb_deq >= max_deq) {
765 					if ((qp + 1) == dev_qps) {
766 						adapter->next_cdev_id =
767 							(cdev_id + 1)
768 							% num_cdev;
769 					}
770 					curr_dev->next_queue_pair_id = (qp + 1)
771 						% dev->data->nb_queue_pairs;
772 
773 					return nb_deq;
774 				}
775 			}
776 		}
777 		adapter->next_cdev_id = 0;
778 	} while (done == false);
779 	return nb_deq;
780 }
781 
782 static int
783 eca_crypto_adapter_run(struct event_crypto_adapter *adapter,
784 		       unsigned int max_ops)
785 {
786 	unsigned int ops_left = max_ops;
787 
788 	while (ops_left > 0) {
789 		unsigned int e_cnt, d_cnt;
790 
791 		e_cnt = eca_crypto_adapter_deq_run(adapter, ops_left);
792 		ops_left -= RTE_MIN(ops_left, e_cnt);
793 
794 		d_cnt = eca_crypto_adapter_enq_run(adapter, ops_left);
795 		ops_left -= RTE_MIN(ops_left, d_cnt);
796 
797 		if (e_cnt == 0 && d_cnt == 0)
798 			break;
799 
800 	}
801 
802 	if (ops_left == max_ops) {
803 		rte_event_maintain(adapter->eventdev_id,
804 				   adapter->event_port_id, 0);
805 		return -EAGAIN;
806 	} else
807 		return 0;
808 }
809 
810 static int
811 eca_service_func(void *args)
812 {
813 	struct event_crypto_adapter *adapter = args;
814 	int ret;
815 
816 	if (rte_spinlock_trylock(&adapter->lock) == 0)
817 		return 0;
818 	ret = eca_crypto_adapter_run(adapter, adapter->max_nb);
819 	rte_spinlock_unlock(&adapter->lock);
820 
821 	return ret;
822 }
823 
824 static int
825 eca_init_service(struct event_crypto_adapter *adapter, uint8_t id)
826 {
827 	struct rte_event_crypto_adapter_conf adapter_conf;
828 	struct rte_service_spec service;
829 	int ret;
830 	uint32_t impl_rel;
831 
832 	if (adapter->service_inited)
833 		return 0;
834 
835 	memset(&service, 0, sizeof(service));
836 	snprintf(service.name, CRYPTO_ADAPTER_NAME_LEN,
837 		"rte_event_crypto_adapter_%d", id);
838 	service.socket_id = adapter->socket_id;
839 	service.callback = eca_service_func;
840 	service.callback_userdata = adapter;
841 	/* Service function handles locking for queue add/del updates */
842 	service.capabilities = RTE_SERVICE_CAP_MT_SAFE;
843 	ret = rte_service_component_register(&service, &adapter->service_id);
844 	if (ret) {
845 		RTE_EDEV_LOG_ERR("failed to register service %s err = %" PRId32,
846 			service.name, ret);
847 		return ret;
848 	}
849 
850 	ret = adapter->conf_cb(id, adapter->eventdev_id,
851 		&adapter_conf, adapter->conf_arg);
852 	if (ret) {
853 		RTE_EDEV_LOG_ERR("configuration callback failed err = %" PRId32,
854 			ret);
855 		return ret;
856 	}
857 
858 	adapter->max_nb = adapter_conf.max_nb;
859 	adapter->event_port_id = adapter_conf.event_port_id;
860 
861 	if (rte_event_port_attr_get(adapter->eventdev_id,
862 				adapter->event_port_id,
863 				RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE,
864 				&impl_rel)) {
865 		RTE_EDEV_LOG_ERR("Failed to get port info for eventdev %" PRId32,
866 				 adapter->eventdev_id);
867 		eca_circular_buffer_free(&adapter->ebuf);
868 		rte_free(adapter);
869 		return -EINVAL;
870 	}
871 
872 	adapter->implicit_release_disabled = (uint8_t)impl_rel;
873 	adapter->service_inited = 1;
874 
875 	return ret;
876 }
877 
878 static void
879 eca_update_qp_info(struct event_crypto_adapter *adapter,
880 		   struct crypto_device_info *dev_info, int32_t queue_pair_id,
881 		   uint8_t add)
882 {
883 	struct crypto_queue_pair_info *qp_info;
884 	int enabled;
885 	uint16_t i;
886 
887 	if (dev_info->qpairs == NULL)
888 		return;
889 
890 	if (queue_pair_id == -1) {
891 		for (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++)
892 			eca_update_qp_info(adapter, dev_info, i, add);
893 	} else {
894 		qp_info = &dev_info->qpairs[queue_pair_id];
895 		enabled = qp_info->qp_enabled;
896 		if (add) {
897 			adapter->nb_qps += !enabled;
898 			dev_info->num_qpairs += !enabled;
899 		} else {
900 			adapter->nb_qps -= enabled;
901 			dev_info->num_qpairs -= enabled;
902 		}
903 		qp_info->qp_enabled = !!add;
904 	}
905 }
906 
907 static int
908 eca_add_queue_pair(struct event_crypto_adapter *adapter, uint8_t cdev_id,
909 		   int queue_pair_id)
910 {
911 	struct crypto_device_info *dev_info = &adapter->cdevs[cdev_id];
912 	struct crypto_queue_pair_info *qpairs;
913 	uint32_t i;
914 
915 	if (dev_info->qpairs == NULL) {
916 		dev_info->qpairs =
917 		    rte_zmalloc_socket(adapter->mem_name,
918 					dev_info->dev->data->nb_queue_pairs *
919 					sizeof(struct crypto_queue_pair_info),
920 					0, adapter->socket_id);
921 		if (dev_info->qpairs == NULL)
922 			return -ENOMEM;
923 
924 		qpairs = dev_info->qpairs;
925 
926 		if (eca_circular_buffer_init("eca_cdev_circular_buffer",
927 					     &qpairs->cbuf,
928 					     CRYPTO_ADAPTER_OPS_BUFFER_SZ)) {
929 			RTE_EDEV_LOG_ERR("Failed to get memory for cryptodev "
930 					 "buffer");
931 			rte_free(qpairs);
932 			return -ENOMEM;
933 		}
934 	}
935 
936 	if (queue_pair_id == -1) {
937 		for (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++)
938 			eca_update_qp_info(adapter, dev_info, i, 1);
939 	} else
940 		eca_update_qp_info(adapter, dev_info,
941 					(uint16_t)queue_pair_id, 1);
942 
943 	return 0;
944 }
945 
946 int
947 rte_event_crypto_adapter_queue_pair_add(uint8_t id,
948 			uint8_t cdev_id,
949 			int32_t queue_pair_id,
950 			const struct rte_event_crypto_adapter_queue_conf *conf)
951 {
952 	struct rte_event_crypto_adapter_vector_limits limits;
953 	struct event_crypto_adapter *adapter;
954 	struct crypto_device_info *dev_info;
955 	struct rte_eventdev *dev;
956 	uint32_t cap;
957 	int ret;
958 
959 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
960 
961 	if (!rte_cryptodev_is_valid_dev(cdev_id)) {
962 		RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
963 		return -EINVAL;
964 	}
965 
966 	adapter = eca_id_to_adapter(id);
967 	if (adapter == NULL)
968 		return -EINVAL;
969 
970 	dev = &rte_eventdevs[adapter->eventdev_id];
971 	ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
972 						cdev_id,
973 						&cap);
974 	if (ret) {
975 		RTE_EDEV_LOG_ERR("Failed to get adapter caps dev %" PRIu8
976 			" cdev %" PRIu8, id, cdev_id);
977 		return ret;
978 	}
979 
980 	if (conf == NULL) {
981 		if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
982 			RTE_EDEV_LOG_ERR("Conf value can not be NULL for dev_id=%u",
983 					 cdev_id);
984 			return -EINVAL;
985 		}
986 	} else {
987 		if (conf->flags & RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR) {
988 			if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR) == 0) {
989 				RTE_EDEV_LOG_ERR("Event vectorization is not supported,"
990 						 "dev %" PRIu8 " cdev %" PRIu8, id,
991 						 cdev_id);
992 				return -ENOTSUP;
993 			}
994 
995 			ret = rte_event_crypto_adapter_vector_limits_get(
996 				adapter->eventdev_id, cdev_id, &limits);
997 			if (ret < 0) {
998 				RTE_EDEV_LOG_ERR("Failed to get event device vector "
999 						 "limits, dev %" PRIu8 " cdev %" PRIu8,
1000 						 id, cdev_id);
1001 				return -EINVAL;
1002 			}
1003 
1004 			if (conf->vector_sz < limits.min_sz ||
1005 			    conf->vector_sz > limits.max_sz ||
1006 			    conf->vector_timeout_ns < limits.min_timeout_ns ||
1007 			    conf->vector_timeout_ns > limits.max_timeout_ns ||
1008 			    conf->vector_mp == NULL) {
1009 				RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
1010 						" dev %" PRIu8 " cdev %" PRIu8,
1011 						id, cdev_id);
1012 				return -EINVAL;
1013 			}
1014 
1015 			if (conf->vector_mp->elt_size < (sizeof(struct rte_event_vector) +
1016 			    (sizeof(uintptr_t) * conf->vector_sz))) {
1017 				RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
1018 						" dev %" PRIu8 " cdev %" PRIu8,
1019 						id, cdev_id);
1020 				return -EINVAL;
1021 			}
1022 		}
1023 	}
1024 
1025 	dev_info = &adapter->cdevs[cdev_id];
1026 
1027 	if (queue_pair_id != -1 &&
1028 	    (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) {
1029 		RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16,
1030 				 (uint16_t)queue_pair_id);
1031 		return -EINVAL;
1032 	}
1033 
1034 	/* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD,
1035 	 * no need of service core as HW supports event forward capability.
1036 	 */
1037 	if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
1038 	    (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND &&
1039 	     adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW) ||
1040 	    (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
1041 	     adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) {
1042 		if (*dev->dev_ops->crypto_adapter_queue_pair_add == NULL)
1043 			return -ENOTSUP;
1044 		if (dev_info->qpairs == NULL) {
1045 			dev_info->qpairs =
1046 			    rte_zmalloc_socket(adapter->mem_name,
1047 					dev_info->dev->data->nb_queue_pairs *
1048 					sizeof(struct crypto_queue_pair_info),
1049 					0, adapter->socket_id);
1050 			if (dev_info->qpairs == NULL)
1051 				return -ENOMEM;
1052 		}
1053 
1054 		ret = (*dev->dev_ops->crypto_adapter_queue_pair_add)(dev,
1055 				dev_info->dev,
1056 				queue_pair_id,
1057 				conf);
1058 		if (ret)
1059 			return ret;
1060 
1061 		else
1062 			eca_update_qp_info(adapter, &adapter->cdevs[cdev_id],
1063 					   queue_pair_id, 1);
1064 	}
1065 
1066 	/* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW,
1067 	 * or SW adapter, initiate services so the application can choose
1068 	 * which ever way it wants to use the adapter.
1069 	 * Case 1: RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW
1070 	 *         Application may wants to use one of below two mode
1071 	 *          a. OP_FORWARD mode -> HW Dequeue + SW enqueue
1072 	 *          b. OP_NEW mode -> HW Dequeue
1073 	 * Case 2: No HW caps, use SW adapter
1074 	 *          a. OP_FORWARD mode -> SW enqueue & dequeue
1075 	 *          b. OP_NEW mode -> SW Dequeue
1076 	 */
1077 	if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
1078 	     !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
1079 	     adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD) ||
1080 	     (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW) &&
1081 	      !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
1082 	      !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) &&
1083 	       (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA))) {
1084 		rte_spinlock_lock(&adapter->lock);
1085 		ret = eca_init_service(adapter, id);
1086 		if (ret == 0)
1087 			ret = eca_add_queue_pair(adapter, cdev_id,
1088 						 queue_pair_id);
1089 		rte_spinlock_unlock(&adapter->lock);
1090 
1091 		if (ret)
1092 			return ret;
1093 
1094 		rte_service_component_runstate_set(adapter->service_id, 1);
1095 	}
1096 
1097 	rte_eventdev_trace_crypto_adapter_queue_pair_add(id, cdev_id,
1098 		queue_pair_id, conf);
1099 	return 0;
1100 }
1101 
1102 int
1103 rte_event_crypto_adapter_queue_pair_del(uint8_t id, uint8_t cdev_id,
1104 					int32_t queue_pair_id)
1105 {
1106 	struct event_crypto_adapter *adapter;
1107 	struct crypto_device_info *dev_info;
1108 	struct rte_eventdev *dev;
1109 	int ret;
1110 	uint32_t cap;
1111 	uint16_t i;
1112 
1113 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1114 
1115 	if (!rte_cryptodev_is_valid_dev(cdev_id)) {
1116 		RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
1117 		return -EINVAL;
1118 	}
1119 
1120 	adapter = eca_id_to_adapter(id);
1121 	if (adapter == NULL)
1122 		return -EINVAL;
1123 
1124 	dev = &rte_eventdevs[adapter->eventdev_id];
1125 	ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
1126 						cdev_id,
1127 						&cap);
1128 	if (ret)
1129 		return ret;
1130 
1131 	dev_info = &adapter->cdevs[cdev_id];
1132 
1133 	if (queue_pair_id != -1 &&
1134 	    (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) {
1135 		RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16,
1136 				 (uint16_t)queue_pair_id);
1137 		return -EINVAL;
1138 	}
1139 
1140 	if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
1141 	    (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
1142 	     adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) {
1143 		if (*dev->dev_ops->crypto_adapter_queue_pair_del == NULL)
1144 			return -ENOTSUP;
1145 		ret = (*dev->dev_ops->crypto_adapter_queue_pair_del)(dev,
1146 						dev_info->dev,
1147 						queue_pair_id);
1148 		if (ret == 0) {
1149 			eca_update_qp_info(adapter,
1150 					&adapter->cdevs[cdev_id],
1151 					queue_pair_id,
1152 					0);
1153 			if (dev_info->num_qpairs == 0) {
1154 				rte_free(dev_info->qpairs);
1155 				dev_info->qpairs = NULL;
1156 			}
1157 		}
1158 	} else {
1159 		if (adapter->nb_qps == 0)
1160 			return 0;
1161 
1162 		rte_spinlock_lock(&adapter->lock);
1163 		if (queue_pair_id == -1) {
1164 			for (i = 0; i < dev_info->dev->data->nb_queue_pairs;
1165 				i++)
1166 				eca_update_qp_info(adapter, dev_info,
1167 							queue_pair_id, 0);
1168 		} else {
1169 			eca_update_qp_info(adapter, dev_info,
1170 						(uint16_t)queue_pair_id, 0);
1171 		}
1172 
1173 		if (dev_info->num_qpairs == 0) {
1174 			rte_free(dev_info->qpairs);
1175 			dev_info->qpairs = NULL;
1176 		}
1177 
1178 		rte_spinlock_unlock(&adapter->lock);
1179 		rte_service_component_runstate_set(adapter->service_id,
1180 				adapter->nb_qps);
1181 	}
1182 
1183 	rte_eventdev_trace_crypto_adapter_queue_pair_del(id, cdev_id,
1184 		queue_pair_id, ret);
1185 	return ret;
1186 }
1187 
1188 static int
1189 eca_adapter_ctrl(uint8_t id, int start)
1190 {
1191 	struct event_crypto_adapter *adapter;
1192 	struct crypto_device_info *dev_info;
1193 	struct rte_eventdev *dev;
1194 	uint32_t i;
1195 	int use_service;
1196 	int stop = !start;
1197 
1198 	use_service = 0;
1199 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1200 	adapter = eca_id_to_adapter(id);
1201 	if (adapter == NULL)
1202 		return -EINVAL;
1203 
1204 	dev = &rte_eventdevs[adapter->eventdev_id];
1205 
1206 	for (i = 0; i < rte_cryptodev_count(); i++) {
1207 		dev_info = &adapter->cdevs[i];
1208 		/* if start  check for num queue pairs */
1209 		if (start && !dev_info->num_qpairs)
1210 			continue;
1211 		/* if stop check if dev has been started */
1212 		if (stop && !dev_info->dev_started)
1213 			continue;
1214 		use_service |= !dev_info->internal_event_port;
1215 		dev_info->dev_started = start;
1216 		if (dev_info->internal_event_port == 0)
1217 			continue;
1218 		start ? (*dev->dev_ops->crypto_adapter_start)(dev,
1219 						&dev_info->dev[i]) :
1220 			(*dev->dev_ops->crypto_adapter_stop)(dev,
1221 						&dev_info->dev[i]);
1222 	}
1223 
1224 	if (use_service)
1225 		rte_service_runstate_set(adapter->service_id, start);
1226 
1227 	return 0;
1228 }
1229 
1230 int
1231 rte_event_crypto_adapter_start(uint8_t id)
1232 {
1233 	struct event_crypto_adapter *adapter;
1234 
1235 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1236 	adapter = eca_id_to_adapter(id);
1237 	if (adapter == NULL)
1238 		return -EINVAL;
1239 
1240 	rte_eventdev_trace_crypto_adapter_start(id, adapter);
1241 	return eca_adapter_ctrl(id, 1);
1242 }
1243 
1244 int
1245 rte_event_crypto_adapter_stop(uint8_t id)
1246 {
1247 	rte_eventdev_trace_crypto_adapter_stop(id);
1248 	return eca_adapter_ctrl(id, 0);
1249 }
1250 
1251 int
1252 rte_event_crypto_adapter_stats_get(uint8_t id,
1253 				struct rte_event_crypto_adapter_stats *stats)
1254 {
1255 	struct event_crypto_adapter *adapter;
1256 	struct rte_event_crypto_adapter_stats dev_stats_sum = { 0 };
1257 	struct rte_event_crypto_adapter_stats dev_stats;
1258 	struct rte_eventdev *dev;
1259 	struct crypto_device_info *dev_info;
1260 	uint32_t i;
1261 	int ret;
1262 
1263 	if (eca_memzone_lookup())
1264 		return -ENOMEM;
1265 
1266 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1267 
1268 	adapter = eca_id_to_adapter(id);
1269 	if (adapter == NULL || stats == NULL)
1270 		return -EINVAL;
1271 
1272 	dev = &rte_eventdevs[adapter->eventdev_id];
1273 	memset(stats, 0, sizeof(*stats));
1274 	for (i = 0; i < rte_cryptodev_count(); i++) {
1275 		dev_info = &adapter->cdevs[i];
1276 		if (dev_info->internal_event_port == 0 ||
1277 			dev->dev_ops->crypto_adapter_stats_get == NULL)
1278 			continue;
1279 		ret = (*dev->dev_ops->crypto_adapter_stats_get)(dev,
1280 						dev_info->dev,
1281 						&dev_stats);
1282 		if (ret)
1283 			continue;
1284 
1285 		dev_stats_sum.crypto_deq_count += dev_stats.crypto_deq_count;
1286 		dev_stats_sum.event_enq_count +=
1287 			dev_stats.event_enq_count;
1288 	}
1289 
1290 	if (adapter->service_inited)
1291 		*stats = adapter->crypto_stats;
1292 
1293 	stats->crypto_deq_count += dev_stats_sum.crypto_deq_count;
1294 	stats->event_enq_count += dev_stats_sum.event_enq_count;
1295 
1296 	return 0;
1297 }
1298 
1299 int
1300 rte_event_crypto_adapter_stats_reset(uint8_t id)
1301 {
1302 	struct event_crypto_adapter *adapter;
1303 	struct crypto_device_info *dev_info;
1304 	struct rte_eventdev *dev;
1305 	uint32_t i;
1306 
1307 	if (eca_memzone_lookup())
1308 		return -ENOMEM;
1309 
1310 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1311 
1312 	adapter = eca_id_to_adapter(id);
1313 	if (adapter == NULL)
1314 		return -EINVAL;
1315 
1316 	dev = &rte_eventdevs[adapter->eventdev_id];
1317 	for (i = 0; i < rte_cryptodev_count(); i++) {
1318 		dev_info = &adapter->cdevs[i];
1319 		if (dev_info->internal_event_port == 0 ||
1320 			dev->dev_ops->crypto_adapter_stats_reset == NULL)
1321 			continue;
1322 		(*dev->dev_ops->crypto_adapter_stats_reset)(dev,
1323 						dev_info->dev);
1324 	}
1325 
1326 	memset(&adapter->crypto_stats, 0, sizeof(adapter->crypto_stats));
1327 	return 0;
1328 }
1329 
1330 int
1331 rte_event_crypto_adapter_service_id_get(uint8_t id, uint32_t *service_id)
1332 {
1333 	struct event_crypto_adapter *adapter;
1334 
1335 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1336 
1337 	adapter = eca_id_to_adapter(id);
1338 	if (adapter == NULL || service_id == NULL)
1339 		return -EINVAL;
1340 
1341 	if (adapter->service_inited)
1342 		*service_id = adapter->service_id;
1343 
1344 	return adapter->service_inited ? 0 : -ESRCH;
1345 }
1346 
1347 int
1348 rte_event_crypto_adapter_event_port_get(uint8_t id, uint8_t *event_port_id)
1349 {
1350 	struct event_crypto_adapter *adapter;
1351 
1352 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1353 
1354 	adapter = eca_id_to_adapter(id);
1355 	if (adapter == NULL || event_port_id == NULL)
1356 		return -EINVAL;
1357 
1358 	*event_port_id = adapter->event_port_id;
1359 
1360 	return 0;
1361 }
1362 
1363 int
1364 rte_event_crypto_adapter_vector_limits_get(
1365 	uint8_t dev_id, uint16_t cdev_id,
1366 	struct rte_event_crypto_adapter_vector_limits *limits)
1367 {
1368 	struct rte_cryptodev *cdev;
1369 	struct rte_eventdev *dev;
1370 	uint32_t cap;
1371 	int ret;
1372 
1373 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1374 
1375 	if (!rte_cryptodev_is_valid_dev(cdev_id)) {
1376 		RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
1377 		return -EINVAL;
1378 	}
1379 
1380 	if (limits == NULL) {
1381 		RTE_EDEV_LOG_ERR("Invalid limits storage provided");
1382 		return -EINVAL;
1383 	}
1384 
1385 	dev = &rte_eventdevs[dev_id];
1386 	cdev = rte_cryptodev_pmd_get_dev(cdev_id);
1387 
1388 	ret = rte_event_crypto_adapter_caps_get(dev_id, cdev_id, &cap);
1389 	if (ret) {
1390 		RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
1391 				 "cdev %" PRIu16, dev_id, cdev_id);
1392 		return ret;
1393 	}
1394 
1395 	if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR)) {
1396 		RTE_EDEV_LOG_ERR("Event vectorization is not supported,"
1397 				 "dev %" PRIu8 " cdev %" PRIu8, dev_id, cdev_id);
1398 		return -ENOTSUP;
1399 	}
1400 
1401 	if ((*dev->dev_ops->crypto_adapter_vector_limits_get) == NULL)
1402 		return -ENOTSUP;
1403 
1404 	return dev->dev_ops->crypto_adapter_vector_limits_get(
1405 		dev, cdev, limits);
1406 }
1407