xref: /dpdk/lib/eventdev/rte_event_crypto_adapter.c (revision 34d785571fb1083e2fc6ca00fc32bae7dd76425e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation.
3  * All rights reserved.
4  */
5 
6 #include <string.h>
7 #include <stdbool.h>
8 #include <rte_common.h>
9 #include <dev_driver.h>
10 #include <rte_errno.h>
11 #include <rte_cryptodev.h>
12 #include <cryptodev_pmd.h>
13 #include <rte_log.h>
14 #include <rte_malloc.h>
15 #include <rte_service_component.h>
16 
17 #include "rte_eventdev.h"
18 #include "eventdev_pmd.h"
19 #include "eventdev_trace.h"
20 #include "rte_event_crypto_adapter.h"
21 
22 #define BATCH_SIZE 32
23 #define DEFAULT_MAX_NB 128
24 #define CRYPTO_ADAPTER_NAME_LEN 32
25 #define CRYPTO_ADAPTER_MEM_NAME_LEN 32
26 #define CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES 100
27 
28 #define CRYPTO_ADAPTER_OPS_BUFFER_SZ (BATCH_SIZE + BATCH_SIZE)
29 #define CRYPTO_ADAPTER_BUFFER_SZ 1024
30 
31 /* Flush an instance's enqueue buffers every CRYPTO_ENQ_FLUSH_THRESHOLD
32  * iterations of eca_crypto_adapter_enq_run()
33  */
34 #define CRYPTO_ENQ_FLUSH_THRESHOLD 1024
35 
36 struct crypto_ops_circular_buffer {
37 	/* index of head element in circular buffer */
38 	uint16_t head;
39 	/* index of tail element in circular buffer */
40 	uint16_t tail;
41 	/* number of elements in buffer */
42 	uint16_t count;
43 	/* size of circular buffer */
44 	uint16_t size;
45 	/* Pointer to hold rte_crypto_ops for batching */
46 	struct rte_crypto_op **op_buffer;
47 } __rte_cache_aligned;
48 
49 struct event_crypto_adapter {
50 	/* Event device identifier */
51 	uint8_t eventdev_id;
52 	/* Event port identifier */
53 	uint8_t event_port_id;
54 	/* Store event device's implicit release capability */
55 	uint8_t implicit_release_disabled;
56 	/* Flag to indicate backpressure at cryptodev
57 	 * Stop further dequeuing events from eventdev
58 	 */
59 	bool stop_enq_to_cryptodev;
60 	/* Max crypto ops processed in any service function invocation */
61 	uint32_t max_nb;
62 	/* Lock to serialize config updates with service function */
63 	rte_spinlock_t lock;
64 	/* Next crypto device to be processed */
65 	uint16_t next_cdev_id;
66 	/* Per crypto device structure */
67 	struct crypto_device_info *cdevs;
68 	/* Loop counter to flush crypto ops */
69 	uint16_t transmit_loop_count;
70 	/* Circular buffer for batching crypto ops to eventdev */
71 	struct crypto_ops_circular_buffer ebuf;
72 	/* Per instance stats structure */
73 	struct rte_event_crypto_adapter_stats crypto_stats;
74 	/* Configuration callback for rte_service configuration */
75 	rte_event_crypto_adapter_conf_cb conf_cb;
76 	/* Configuration callback argument */
77 	void *conf_arg;
78 	/* Set if  default_cb is being used */
79 	int default_cb_arg;
80 	/* Service initialization state */
81 	uint8_t service_inited;
82 	/* Memory allocation name */
83 	char mem_name[CRYPTO_ADAPTER_MEM_NAME_LEN];
84 	/* Socket identifier cached from eventdev */
85 	int socket_id;
86 	/* Per adapter EAL service */
87 	uint32_t service_id;
88 	/* No. of queue pairs configured */
89 	uint16_t nb_qps;
90 	/* Adapter mode */
91 	enum rte_event_crypto_adapter_mode mode;
92 } __rte_cache_aligned;
93 
94 /* Per crypto device information */
95 struct crypto_device_info {
96 	/* Pointer to cryptodev */
97 	struct rte_cryptodev *dev;
98 	/* Pointer to queue pair info */
99 	struct crypto_queue_pair_info *qpairs;
100 	/* Next queue pair to be processed */
101 	uint16_t next_queue_pair_id;
102 	/* Set to indicate cryptodev->eventdev packet
103 	 * transfer uses a hardware mechanism
104 	 */
105 	uint8_t internal_event_port;
106 	/* Set to indicate processing has been started */
107 	uint8_t dev_started;
108 	/* If num_qpairs > 0, the start callback will
109 	 * be invoked if not already invoked
110 	 */
111 	uint16_t num_qpairs;
112 } __rte_cache_aligned;
113 
114 /* Per queue pair information */
115 struct crypto_queue_pair_info {
116 	/* Set to indicate queue pair is enabled */
117 	bool qp_enabled;
118 	/* Circular buffer for batching crypto ops to cdev */
119 	struct crypto_ops_circular_buffer cbuf;
120 } __rte_cache_aligned;
121 
122 static struct event_crypto_adapter **event_crypto_adapter;
123 
124 /* Macros to check for valid adapter */
125 #define EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
126 	if (!eca_valid_id(id)) { \
127 		RTE_EDEV_LOG_ERR("Invalid crypto adapter id = %d\n", id); \
128 		return retval; \
129 	} \
130 } while (0)
131 
132 static inline int
133 eca_valid_id(uint8_t id)
134 {
135 	return id < RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE;
136 }
137 
138 static int
139 eca_init(void)
140 {
141 	const char *name = "crypto_adapter_array";
142 	const struct rte_memzone *mz;
143 	unsigned int sz;
144 
145 	sz = sizeof(*event_crypto_adapter) *
146 	    RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE;
147 	sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
148 
149 	mz = rte_memzone_lookup(name);
150 	if (mz == NULL) {
151 		mz = rte_memzone_reserve_aligned(name, sz, rte_socket_id(), 0,
152 						 RTE_CACHE_LINE_SIZE);
153 		if (mz == NULL) {
154 			RTE_EDEV_LOG_ERR("failed to reserve memzone err = %"
155 					PRId32, rte_errno);
156 			return -rte_errno;
157 		}
158 	}
159 
160 	event_crypto_adapter = mz->addr;
161 	return 0;
162 }
163 
164 static inline bool
165 eca_circular_buffer_batch_ready(struct crypto_ops_circular_buffer *bufp)
166 {
167 	return bufp->count >= BATCH_SIZE;
168 }
169 
170 static inline bool
171 eca_circular_buffer_space_for_batch(struct crypto_ops_circular_buffer *bufp)
172 {
173 	return (bufp->size - bufp->count) >= BATCH_SIZE;
174 }
175 
176 static inline void
177 eca_circular_buffer_free(struct crypto_ops_circular_buffer *bufp)
178 {
179 	rte_free(bufp->op_buffer);
180 }
181 
182 static inline int
183 eca_circular_buffer_init(const char *name,
184 			 struct crypto_ops_circular_buffer *bufp,
185 			 uint16_t sz)
186 {
187 	bufp->op_buffer = rte_zmalloc(name,
188 				      sizeof(struct rte_crypto_op *) * sz,
189 				      0);
190 	if (bufp->op_buffer == NULL)
191 		return -ENOMEM;
192 
193 	bufp->size = sz;
194 	return 0;
195 }
196 
197 static inline int
198 eca_circular_buffer_add(struct crypto_ops_circular_buffer *bufp,
199 			struct rte_crypto_op *op)
200 {
201 	uint16_t *tailp = &bufp->tail;
202 
203 	bufp->op_buffer[*tailp] = op;
204 	/* circular buffer, go round */
205 	*tailp = (*tailp + 1) % bufp->size;
206 	bufp->count++;
207 
208 	return 0;
209 }
210 
211 static inline int
212 eca_circular_buffer_flush_to_cdev(struct crypto_ops_circular_buffer *bufp,
213 				  uint8_t cdev_id, uint16_t qp_id,
214 				  uint16_t *nb_ops_flushed)
215 {
216 	uint16_t n = 0;
217 	uint16_t *headp = &bufp->head;
218 	uint16_t *tailp = &bufp->tail;
219 	struct rte_crypto_op **ops = bufp->op_buffer;
220 
221 	if (*tailp > *headp)
222 		n = *tailp - *headp;
223 	else if (*tailp < *headp)
224 		n = bufp->size - *headp;
225 	else {
226 		*nb_ops_flushed = 0;
227 		return 0;  /* buffer empty */
228 	}
229 
230 	*nb_ops_flushed = rte_cryptodev_enqueue_burst(cdev_id, qp_id,
231 						      &ops[*headp], n);
232 	bufp->count -= *nb_ops_flushed;
233 	if (!bufp->count) {
234 		*headp = 0;
235 		*tailp = 0;
236 	} else
237 		*headp = (*headp + *nb_ops_flushed) % bufp->size;
238 
239 	return *nb_ops_flushed == n ? 0 : -1;
240 }
241 
242 static inline struct event_crypto_adapter *
243 eca_id_to_adapter(uint8_t id)
244 {
245 	return event_crypto_adapter ?
246 		event_crypto_adapter[id] : NULL;
247 }
248 
249 static int
250 eca_default_config_cb(uint8_t id, uint8_t dev_id,
251 			struct rte_event_crypto_adapter_conf *conf, void *arg)
252 {
253 	struct rte_event_dev_config dev_conf;
254 	struct rte_eventdev *dev;
255 	uint8_t port_id;
256 	int started;
257 	int ret;
258 	struct rte_event_port_conf *port_conf = arg;
259 	struct event_crypto_adapter *adapter = eca_id_to_adapter(id);
260 
261 	if (adapter == NULL)
262 		return -EINVAL;
263 
264 	dev = &rte_eventdevs[adapter->eventdev_id];
265 	dev_conf = dev->data->dev_conf;
266 
267 	started = dev->data->dev_started;
268 	if (started)
269 		rte_event_dev_stop(dev_id);
270 	port_id = dev_conf.nb_event_ports;
271 	dev_conf.nb_event_ports += 1;
272 	ret = rte_event_dev_configure(dev_id, &dev_conf);
273 	if (ret) {
274 		RTE_EDEV_LOG_ERR("failed to configure event dev %u\n", dev_id);
275 		if (started) {
276 			if (rte_event_dev_start(dev_id))
277 				return -EIO;
278 		}
279 		return ret;
280 	}
281 
282 	ret = rte_event_port_setup(dev_id, port_id, port_conf);
283 	if (ret) {
284 		RTE_EDEV_LOG_ERR("failed to setup event port %u\n", port_id);
285 		return ret;
286 	}
287 
288 	conf->event_port_id = port_id;
289 	conf->max_nb = DEFAULT_MAX_NB;
290 	if (started)
291 		ret = rte_event_dev_start(dev_id);
292 
293 	adapter->default_cb_arg = 1;
294 	return ret;
295 }
296 
297 int
298 rte_event_crypto_adapter_create_ext(uint8_t id, uint8_t dev_id,
299 				rte_event_crypto_adapter_conf_cb conf_cb,
300 				enum rte_event_crypto_adapter_mode mode,
301 				void *conf_arg)
302 {
303 	struct event_crypto_adapter *adapter;
304 	char mem_name[CRYPTO_ADAPTER_NAME_LEN];
305 	struct rte_event_dev_info dev_info;
306 	int socket_id;
307 	uint8_t i;
308 	int ret;
309 
310 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
311 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
312 	if (conf_cb == NULL)
313 		return -EINVAL;
314 
315 	if (event_crypto_adapter == NULL) {
316 		ret = eca_init();
317 		if (ret)
318 			return ret;
319 	}
320 
321 	adapter = eca_id_to_adapter(id);
322 	if (adapter != NULL) {
323 		RTE_EDEV_LOG_ERR("Crypto adapter id %u already exists!", id);
324 		return -EEXIST;
325 	}
326 
327 	socket_id = rte_event_dev_socket_id(dev_id);
328 	snprintf(mem_name, CRYPTO_ADAPTER_MEM_NAME_LEN,
329 		 "rte_event_crypto_adapter_%d", id);
330 
331 	adapter = rte_zmalloc_socket(mem_name, sizeof(*adapter),
332 			RTE_CACHE_LINE_SIZE, socket_id);
333 	if (adapter == NULL) {
334 		RTE_EDEV_LOG_ERR("Failed to get mem for event crypto adapter!");
335 		return -ENOMEM;
336 	}
337 
338 	if (eca_circular_buffer_init("eca_edev_circular_buffer",
339 				     &adapter->ebuf,
340 				     CRYPTO_ADAPTER_BUFFER_SZ)) {
341 		RTE_EDEV_LOG_ERR("Failed to get memory for eventdev buffer");
342 		rte_free(adapter);
343 		return -ENOMEM;
344 	}
345 
346 	ret = rte_event_dev_info_get(dev_id, &dev_info);
347 	if (ret < 0) {
348 		RTE_EDEV_LOG_ERR("Failed to get info for eventdev %d: %s!",
349 				 dev_id, dev_info.driver_name);
350 		eca_circular_buffer_free(&adapter->ebuf);
351 		rte_free(adapter);
352 		return ret;
353 	}
354 
355 	adapter->implicit_release_disabled = (dev_info.event_dev_cap &
356 			RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE);
357 	adapter->eventdev_id = dev_id;
358 	adapter->socket_id = socket_id;
359 	adapter->conf_cb = conf_cb;
360 	adapter->conf_arg = conf_arg;
361 	adapter->mode = mode;
362 	strcpy(adapter->mem_name, mem_name);
363 	adapter->cdevs = rte_zmalloc_socket(adapter->mem_name,
364 					rte_cryptodev_count() *
365 					sizeof(struct crypto_device_info), 0,
366 					socket_id);
367 	if (adapter->cdevs == NULL) {
368 		RTE_EDEV_LOG_ERR("Failed to get mem for crypto devices\n");
369 		eca_circular_buffer_free(&adapter->ebuf);
370 		rte_free(adapter);
371 		return -ENOMEM;
372 	}
373 
374 	rte_spinlock_init(&adapter->lock);
375 	for (i = 0; i < rte_cryptodev_count(); i++)
376 		adapter->cdevs[i].dev = rte_cryptodev_pmd_get_dev(i);
377 
378 	event_crypto_adapter[id] = adapter;
379 
380 	rte_eventdev_trace_crypto_adapter_create(id, dev_id, adapter, conf_arg,
381 		mode);
382 	return 0;
383 }
384 
385 
386 int
387 rte_event_crypto_adapter_create(uint8_t id, uint8_t dev_id,
388 				struct rte_event_port_conf *port_config,
389 				enum rte_event_crypto_adapter_mode mode)
390 {
391 	struct rte_event_port_conf *pc;
392 	int ret;
393 
394 	if (port_config == NULL)
395 		return -EINVAL;
396 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
397 
398 	pc = rte_malloc(NULL, sizeof(*pc), 0);
399 	if (pc == NULL)
400 		return -ENOMEM;
401 	*pc = *port_config;
402 	ret = rte_event_crypto_adapter_create_ext(id, dev_id,
403 						  eca_default_config_cb,
404 						  mode,
405 						  pc);
406 	if (ret)
407 		rte_free(pc);
408 
409 	return ret;
410 }
411 
412 int
413 rte_event_crypto_adapter_free(uint8_t id)
414 {
415 	struct event_crypto_adapter *adapter;
416 
417 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
418 
419 	adapter = eca_id_to_adapter(id);
420 	if (adapter == NULL)
421 		return -EINVAL;
422 
423 	if (adapter->nb_qps) {
424 		RTE_EDEV_LOG_ERR("%" PRIu16 "Queue pairs not deleted",
425 				adapter->nb_qps);
426 		return -EBUSY;
427 	}
428 
429 	rte_eventdev_trace_crypto_adapter_free(id, adapter);
430 	if (adapter->default_cb_arg)
431 		rte_free(adapter->conf_arg);
432 	rte_free(adapter->cdevs);
433 	rte_free(adapter);
434 	event_crypto_adapter[id] = NULL;
435 
436 	return 0;
437 }
438 
439 static inline unsigned int
440 eca_enq_to_cryptodev(struct event_crypto_adapter *adapter, struct rte_event *ev,
441 		     unsigned int cnt)
442 {
443 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
444 	union rte_event_crypto_metadata *m_data = NULL;
445 	struct crypto_queue_pair_info *qp_info = NULL;
446 	struct rte_crypto_op *crypto_op;
447 	unsigned int i, n;
448 	uint16_t qp_id, nb_enqueued = 0;
449 	uint8_t cdev_id;
450 	int ret;
451 
452 	ret = 0;
453 	n = 0;
454 	stats->event_deq_count += cnt;
455 
456 	for (i = 0; i < cnt; i++) {
457 		crypto_op = ev[i].event_ptr;
458 		if (crypto_op == NULL)
459 			continue;
460 		m_data = rte_cryptodev_session_event_mdata_get(crypto_op);
461 		if (m_data == NULL) {
462 			rte_pktmbuf_free(crypto_op->sym->m_src);
463 			rte_crypto_op_free(crypto_op);
464 			continue;
465 		}
466 
467 		cdev_id = m_data->request_info.cdev_id;
468 		qp_id = m_data->request_info.queue_pair_id;
469 		qp_info = &adapter->cdevs[cdev_id].qpairs[qp_id];
470 		if (!qp_info->qp_enabled) {
471 			rte_pktmbuf_free(crypto_op->sym->m_src);
472 			rte_crypto_op_free(crypto_op);
473 			continue;
474 		}
475 		eca_circular_buffer_add(&qp_info->cbuf, crypto_op);
476 
477 		if (eca_circular_buffer_batch_ready(&qp_info->cbuf)) {
478 			ret = eca_circular_buffer_flush_to_cdev(&qp_info->cbuf,
479 								cdev_id,
480 								qp_id,
481 								&nb_enqueued);
482 			/**
483 			 * If some crypto ops failed to flush to cdev and
484 			 * space for another batch is not available, stop
485 			 * dequeue from eventdev momentarily
486 			 */
487 			if (unlikely(ret < 0 &&
488 				!eca_circular_buffer_space_for_batch(
489 							&qp_info->cbuf)))
490 				adapter->stop_enq_to_cryptodev = true;
491 		}
492 
493 		stats->crypto_enq_count += nb_enqueued;
494 		n += nb_enqueued;
495 	}
496 
497 	return n;
498 }
499 
500 static unsigned int
501 eca_crypto_cdev_flush(struct event_crypto_adapter *adapter,
502 		      uint8_t cdev_id, uint16_t *nb_ops_flushed)
503 {
504 	struct crypto_device_info *curr_dev;
505 	struct crypto_queue_pair_info *curr_queue;
506 	struct rte_cryptodev *dev;
507 	uint16_t nb = 0, nb_enqueued = 0;
508 	uint16_t qp;
509 
510 	curr_dev = &adapter->cdevs[cdev_id];
511 	dev = rte_cryptodev_pmd_get_dev(cdev_id);
512 
513 	for (qp = 0; qp < dev->data->nb_queue_pairs; qp++) {
514 
515 		curr_queue = &curr_dev->qpairs[qp];
516 		if (unlikely(curr_queue == NULL || !curr_queue->qp_enabled))
517 			continue;
518 
519 		eca_circular_buffer_flush_to_cdev(&curr_queue->cbuf,
520 						  cdev_id,
521 						  qp,
522 						  &nb_enqueued);
523 		*nb_ops_flushed += curr_queue->cbuf.count;
524 		nb += nb_enqueued;
525 	}
526 
527 	return nb;
528 }
529 
530 static unsigned int
531 eca_crypto_enq_flush(struct event_crypto_adapter *adapter)
532 {
533 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
534 	uint8_t cdev_id;
535 	uint16_t nb_enqueued = 0;
536 	uint16_t nb_ops_flushed = 0;
537 	uint16_t num_cdev = rte_cryptodev_count();
538 
539 	for (cdev_id = 0; cdev_id < num_cdev; cdev_id++)
540 		nb_enqueued += eca_crypto_cdev_flush(adapter,
541 						    cdev_id,
542 						    &nb_ops_flushed);
543 	/**
544 	 * Enable dequeue from eventdev if all ops from circular
545 	 * buffer flushed to cdev
546 	 */
547 	if (!nb_ops_flushed)
548 		adapter->stop_enq_to_cryptodev = false;
549 
550 	stats->crypto_enq_count += nb_enqueued;
551 
552 	return nb_enqueued;
553 }
554 
555 static int
556 eca_crypto_adapter_enq_run(struct event_crypto_adapter *adapter,
557 			   unsigned int max_enq)
558 {
559 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
560 	struct rte_event ev[BATCH_SIZE];
561 	unsigned int nb_enq, nb_enqueued;
562 	uint16_t n;
563 	uint8_t event_dev_id = adapter->eventdev_id;
564 	uint8_t event_port_id = adapter->event_port_id;
565 
566 	nb_enqueued = 0;
567 	if (adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)
568 		return 0;
569 
570 	if (unlikely(adapter->stop_enq_to_cryptodev)) {
571 		nb_enqueued += eca_crypto_enq_flush(adapter);
572 
573 		if (unlikely(adapter->stop_enq_to_cryptodev))
574 			goto skip_event_dequeue_burst;
575 	}
576 
577 	for (nb_enq = 0; nb_enq < max_enq; nb_enq += n) {
578 		stats->event_poll_count++;
579 		n = rte_event_dequeue_burst(event_dev_id,
580 					    event_port_id, ev, BATCH_SIZE, 0);
581 
582 		if (!n)
583 			break;
584 
585 		nb_enqueued += eca_enq_to_cryptodev(adapter, ev, n);
586 	}
587 
588 skip_event_dequeue_burst:
589 
590 	if ((++adapter->transmit_loop_count &
591 		(CRYPTO_ENQ_FLUSH_THRESHOLD - 1)) == 0) {
592 		nb_enqueued += eca_crypto_enq_flush(adapter);
593 	}
594 
595 	return nb_enqueued;
596 }
597 
598 static inline uint16_t
599 eca_ops_enqueue_burst(struct event_crypto_adapter *adapter,
600 		  struct rte_crypto_op **ops, uint16_t num)
601 {
602 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
603 	union rte_event_crypto_metadata *m_data = NULL;
604 	uint8_t event_dev_id = adapter->eventdev_id;
605 	uint8_t event_port_id = adapter->event_port_id;
606 	struct rte_event events[BATCH_SIZE];
607 	uint16_t nb_enqueued, nb_ev;
608 	uint8_t retry;
609 	uint8_t i;
610 
611 	nb_ev = 0;
612 	retry = 0;
613 	nb_enqueued = 0;
614 	num = RTE_MIN(num, BATCH_SIZE);
615 	for (i = 0; i < num; i++) {
616 		struct rte_event *ev = &events[nb_ev++];
617 
618 		m_data = rte_cryptodev_session_event_mdata_get(ops[i]);
619 		if (unlikely(m_data == NULL)) {
620 			rte_pktmbuf_free(ops[i]->sym->m_src);
621 			rte_crypto_op_free(ops[i]);
622 			continue;
623 		}
624 
625 		rte_memcpy(ev, &m_data->response_info, sizeof(*ev));
626 		ev->event_ptr = ops[i];
627 		ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
628 		if (adapter->implicit_release_disabled)
629 			ev->op = RTE_EVENT_OP_FORWARD;
630 		else
631 			ev->op = RTE_EVENT_OP_NEW;
632 	}
633 
634 	do {
635 		nb_enqueued += rte_event_enqueue_burst(event_dev_id,
636 						  event_port_id,
637 						  &events[nb_enqueued],
638 						  nb_ev - nb_enqueued);
639 
640 	} while (retry++ < CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES &&
641 		 nb_enqueued < nb_ev);
642 
643 	stats->event_enq_fail_count += nb_ev - nb_enqueued;
644 	stats->event_enq_count += nb_enqueued;
645 	stats->event_enq_retry_count += retry - 1;
646 
647 	return nb_enqueued;
648 }
649 
650 static int
651 eca_circular_buffer_flush_to_evdev(struct event_crypto_adapter *adapter,
652 				   struct crypto_ops_circular_buffer *bufp)
653 {
654 	uint16_t n = 0, nb_ops_flushed;
655 	uint16_t *headp = &bufp->head;
656 	uint16_t *tailp = &bufp->tail;
657 	struct rte_crypto_op **ops = bufp->op_buffer;
658 
659 	if (*tailp > *headp)
660 		n = *tailp - *headp;
661 	else if (*tailp < *headp)
662 		n = bufp->size - *headp;
663 	else
664 		return 0;  /* buffer empty */
665 
666 	nb_ops_flushed =  eca_ops_enqueue_burst(adapter, ops, n);
667 	bufp->count -= nb_ops_flushed;
668 	if (!bufp->count) {
669 		*headp = 0;
670 		*tailp = 0;
671 		return 0;  /* buffer empty */
672 	}
673 
674 	*headp = (*headp + nb_ops_flushed) % bufp->size;
675 	return 1;
676 }
677 
678 
679 static void
680 eca_ops_buffer_flush(struct event_crypto_adapter *adapter)
681 {
682 	if (likely(adapter->ebuf.count == 0))
683 		return;
684 
685 	while (eca_circular_buffer_flush_to_evdev(adapter,
686 						  &adapter->ebuf))
687 		;
688 }
689 static inline unsigned int
690 eca_crypto_adapter_deq_run(struct event_crypto_adapter *adapter,
691 			   unsigned int max_deq)
692 {
693 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
694 	struct crypto_device_info *curr_dev;
695 	struct crypto_queue_pair_info *curr_queue;
696 	struct rte_crypto_op *ops[BATCH_SIZE];
697 	uint16_t n, nb_deq, nb_enqueued, i;
698 	struct rte_cryptodev *dev;
699 	uint8_t cdev_id;
700 	uint16_t qp, dev_qps;
701 	bool done;
702 	uint16_t num_cdev = rte_cryptodev_count();
703 
704 	nb_deq = 0;
705 	eca_ops_buffer_flush(adapter);
706 
707 	do {
708 		done = true;
709 
710 		for (cdev_id = adapter->next_cdev_id;
711 			cdev_id < num_cdev; cdev_id++) {
712 			uint16_t queues = 0;
713 
714 			curr_dev = &adapter->cdevs[cdev_id];
715 			dev = curr_dev->dev;
716 			if (unlikely(dev == NULL))
717 				continue;
718 
719 			dev_qps = dev->data->nb_queue_pairs;
720 
721 			for (qp = curr_dev->next_queue_pair_id;
722 				queues < dev_qps; qp = (qp + 1) % dev_qps,
723 				queues++) {
724 
725 				curr_queue = &curr_dev->qpairs[qp];
726 				if (unlikely(curr_queue == NULL ||
727 				    !curr_queue->qp_enabled))
728 					continue;
729 
730 				n = rte_cryptodev_dequeue_burst(cdev_id, qp,
731 					ops, BATCH_SIZE);
732 				if (!n)
733 					continue;
734 
735 				done = false;
736 				nb_enqueued = 0;
737 
738 				stats->crypto_deq_count += n;
739 
740 				if (unlikely(!adapter->ebuf.count))
741 					nb_enqueued = eca_ops_enqueue_burst(
742 							adapter, ops, n);
743 
744 				if (likely(nb_enqueued == n))
745 					goto check;
746 
747 				/* Failed to enqueue events case */
748 				for (i = nb_enqueued; i < n; i++)
749 					eca_circular_buffer_add(
750 						&adapter->ebuf,
751 						ops[nb_enqueued]);
752 
753 check:
754 				nb_deq += n;
755 
756 				if (nb_deq >= max_deq) {
757 					if ((qp + 1) == dev_qps) {
758 						adapter->next_cdev_id =
759 							(cdev_id + 1)
760 							% num_cdev;
761 					}
762 					curr_dev->next_queue_pair_id = (qp + 1)
763 						% dev->data->nb_queue_pairs;
764 
765 					return nb_deq;
766 				}
767 			}
768 		}
769 		adapter->next_cdev_id = 0;
770 	} while (done == false);
771 	return nb_deq;
772 }
773 
774 static int
775 eca_crypto_adapter_run(struct event_crypto_adapter *adapter,
776 		       unsigned int max_ops)
777 {
778 	unsigned int ops_left = max_ops;
779 
780 	while (ops_left > 0) {
781 		unsigned int e_cnt, d_cnt;
782 
783 		e_cnt = eca_crypto_adapter_deq_run(adapter, ops_left);
784 		ops_left -= RTE_MIN(ops_left, e_cnt);
785 
786 		d_cnt = eca_crypto_adapter_enq_run(adapter, ops_left);
787 		ops_left -= RTE_MIN(ops_left, d_cnt);
788 
789 		if (e_cnt == 0 && d_cnt == 0)
790 			break;
791 
792 	}
793 
794 	if (ops_left == max_ops) {
795 		rte_event_maintain(adapter->eventdev_id,
796 				   adapter->event_port_id, 0);
797 		return -EAGAIN;
798 	} else
799 		return 0;
800 }
801 
802 static int
803 eca_service_func(void *args)
804 {
805 	struct event_crypto_adapter *adapter = args;
806 	int ret;
807 
808 	if (rte_spinlock_trylock(&adapter->lock) == 0)
809 		return 0;
810 	ret = eca_crypto_adapter_run(adapter, adapter->max_nb);
811 	rte_spinlock_unlock(&adapter->lock);
812 
813 	return ret;
814 }
815 
816 static int
817 eca_init_service(struct event_crypto_adapter *adapter, uint8_t id)
818 {
819 	struct rte_event_crypto_adapter_conf adapter_conf;
820 	struct rte_service_spec service;
821 	int ret;
822 
823 	if (adapter->service_inited)
824 		return 0;
825 
826 	memset(&service, 0, sizeof(service));
827 	snprintf(service.name, CRYPTO_ADAPTER_NAME_LEN,
828 		"rte_event_crypto_adapter_%d", id);
829 	service.socket_id = adapter->socket_id;
830 	service.callback = eca_service_func;
831 	service.callback_userdata = adapter;
832 	/* Service function handles locking for queue add/del updates */
833 	service.capabilities = RTE_SERVICE_CAP_MT_SAFE;
834 	ret = rte_service_component_register(&service, &adapter->service_id);
835 	if (ret) {
836 		RTE_EDEV_LOG_ERR("failed to register service %s err = %" PRId32,
837 			service.name, ret);
838 		return ret;
839 	}
840 
841 	ret = adapter->conf_cb(id, adapter->eventdev_id,
842 		&adapter_conf, adapter->conf_arg);
843 	if (ret) {
844 		RTE_EDEV_LOG_ERR("configuration callback failed err = %" PRId32,
845 			ret);
846 		return ret;
847 	}
848 
849 	adapter->max_nb = adapter_conf.max_nb;
850 	adapter->event_port_id = adapter_conf.event_port_id;
851 	adapter->service_inited = 1;
852 
853 	return ret;
854 }
855 
856 static void
857 eca_update_qp_info(struct event_crypto_adapter *adapter,
858 		   struct crypto_device_info *dev_info, int32_t queue_pair_id,
859 		   uint8_t add)
860 {
861 	struct crypto_queue_pair_info *qp_info;
862 	int enabled;
863 	uint16_t i;
864 
865 	if (dev_info->qpairs == NULL)
866 		return;
867 
868 	if (queue_pair_id == -1) {
869 		for (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++)
870 			eca_update_qp_info(adapter, dev_info, i, add);
871 	} else {
872 		qp_info = &dev_info->qpairs[queue_pair_id];
873 		enabled = qp_info->qp_enabled;
874 		if (add) {
875 			adapter->nb_qps += !enabled;
876 			dev_info->num_qpairs += !enabled;
877 		} else {
878 			adapter->nb_qps -= enabled;
879 			dev_info->num_qpairs -= enabled;
880 		}
881 		qp_info->qp_enabled = !!add;
882 	}
883 }
884 
885 static int
886 eca_add_queue_pair(struct event_crypto_adapter *adapter, uint8_t cdev_id,
887 		   int queue_pair_id)
888 {
889 	struct crypto_device_info *dev_info = &adapter->cdevs[cdev_id];
890 	struct crypto_queue_pair_info *qpairs;
891 	uint32_t i;
892 
893 	if (dev_info->qpairs == NULL) {
894 		dev_info->qpairs =
895 		    rte_zmalloc_socket(adapter->mem_name,
896 					dev_info->dev->data->nb_queue_pairs *
897 					sizeof(struct crypto_queue_pair_info),
898 					0, adapter->socket_id);
899 		if (dev_info->qpairs == NULL)
900 			return -ENOMEM;
901 
902 		qpairs = dev_info->qpairs;
903 
904 		if (eca_circular_buffer_init("eca_cdev_circular_buffer",
905 					     &qpairs->cbuf,
906 					     CRYPTO_ADAPTER_OPS_BUFFER_SZ)) {
907 			RTE_EDEV_LOG_ERR("Failed to get memory for cryptodev "
908 					 "buffer");
909 			rte_free(qpairs);
910 			return -ENOMEM;
911 		}
912 	}
913 
914 	if (queue_pair_id == -1) {
915 		for (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++)
916 			eca_update_qp_info(adapter, dev_info, i, 1);
917 	} else
918 		eca_update_qp_info(adapter, dev_info,
919 					(uint16_t)queue_pair_id, 1);
920 
921 	return 0;
922 }
923 
924 int
925 rte_event_crypto_adapter_queue_pair_add(uint8_t id,
926 			uint8_t cdev_id,
927 			int32_t queue_pair_id,
928 			const struct rte_event_crypto_adapter_queue_conf *conf)
929 {
930 	struct rte_event_crypto_adapter_vector_limits limits;
931 	struct event_crypto_adapter *adapter;
932 	struct crypto_device_info *dev_info;
933 	struct rte_eventdev *dev;
934 	uint32_t cap;
935 	int ret;
936 
937 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
938 
939 	if (!rte_cryptodev_is_valid_dev(cdev_id)) {
940 		RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
941 		return -EINVAL;
942 	}
943 
944 	adapter = eca_id_to_adapter(id);
945 	if (adapter == NULL)
946 		return -EINVAL;
947 
948 	dev = &rte_eventdevs[adapter->eventdev_id];
949 	ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
950 						cdev_id,
951 						&cap);
952 	if (ret) {
953 		RTE_EDEV_LOG_ERR("Failed to get adapter caps dev %" PRIu8
954 			" cdev %" PRIu8, id, cdev_id);
955 		return ret;
956 	}
957 
958 	if (conf == NULL) {
959 		if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
960 			RTE_EDEV_LOG_ERR("Conf value can not be NULL for dev_id=%u",
961 					 cdev_id);
962 			return -EINVAL;
963 		}
964 	} else {
965 		if (conf->flags & RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR) {
966 			if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR) == 0) {
967 				RTE_EDEV_LOG_ERR("Event vectorization is not supported,"
968 						 "dev %" PRIu8 " cdev %" PRIu8, id,
969 						 cdev_id);
970 				return -ENOTSUP;
971 			}
972 
973 			ret = rte_event_crypto_adapter_vector_limits_get(
974 				adapter->eventdev_id, cdev_id, &limits);
975 			if (ret < 0) {
976 				RTE_EDEV_LOG_ERR("Failed to get event device vector "
977 						 "limits, dev %" PRIu8 " cdev %" PRIu8,
978 						 id, cdev_id);
979 				return -EINVAL;
980 			}
981 
982 			if (conf->vector_sz < limits.min_sz ||
983 			    conf->vector_sz > limits.max_sz ||
984 			    conf->vector_timeout_ns < limits.min_timeout_ns ||
985 			    conf->vector_timeout_ns > limits.max_timeout_ns ||
986 			    conf->vector_mp == NULL) {
987 				RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
988 						" dev %" PRIu8 " cdev %" PRIu8,
989 						id, cdev_id);
990 				return -EINVAL;
991 			}
992 
993 			if (conf->vector_mp->elt_size < (sizeof(struct rte_event_vector) +
994 			    (sizeof(uintptr_t) * conf->vector_sz))) {
995 				RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
996 						" dev %" PRIu8 " cdev %" PRIu8,
997 						id, cdev_id);
998 				return -EINVAL;
999 			}
1000 		}
1001 	}
1002 
1003 	dev_info = &adapter->cdevs[cdev_id];
1004 
1005 	if (queue_pair_id != -1 &&
1006 	    (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) {
1007 		RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16,
1008 				 (uint16_t)queue_pair_id);
1009 		return -EINVAL;
1010 	}
1011 
1012 	/* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD,
1013 	 * no need of service core as HW supports event forward capability.
1014 	 */
1015 	if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
1016 	    (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND &&
1017 	     adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW) ||
1018 	    (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
1019 	     adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) {
1020 		if (*dev->dev_ops->crypto_adapter_queue_pair_add == NULL)
1021 			return -ENOTSUP;
1022 		if (dev_info->qpairs == NULL) {
1023 			dev_info->qpairs =
1024 			    rte_zmalloc_socket(adapter->mem_name,
1025 					dev_info->dev->data->nb_queue_pairs *
1026 					sizeof(struct crypto_queue_pair_info),
1027 					0, adapter->socket_id);
1028 			if (dev_info->qpairs == NULL)
1029 				return -ENOMEM;
1030 		}
1031 
1032 		ret = (*dev->dev_ops->crypto_adapter_queue_pair_add)(dev,
1033 				dev_info->dev,
1034 				queue_pair_id,
1035 				conf);
1036 		if (ret)
1037 			return ret;
1038 
1039 		else
1040 			eca_update_qp_info(adapter, &adapter->cdevs[cdev_id],
1041 					   queue_pair_id, 1);
1042 	}
1043 
1044 	/* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW,
1045 	 * or SW adapter, initiate services so the application can choose
1046 	 * which ever way it wants to use the adapter.
1047 	 * Case 1: RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW
1048 	 *         Application may wants to use one of below two mode
1049 	 *          a. OP_FORWARD mode -> HW Dequeue + SW enqueue
1050 	 *          b. OP_NEW mode -> HW Dequeue
1051 	 * Case 2: No HW caps, use SW adapter
1052 	 *          a. OP_FORWARD mode -> SW enqueue & dequeue
1053 	 *          b. OP_NEW mode -> SW Dequeue
1054 	 */
1055 	if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
1056 	     !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
1057 	     adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD) ||
1058 	     (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW) &&
1059 	      !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
1060 	      !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) &&
1061 	       (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA))) {
1062 		rte_spinlock_lock(&adapter->lock);
1063 		ret = eca_init_service(adapter, id);
1064 		if (ret == 0)
1065 			ret = eca_add_queue_pair(adapter, cdev_id,
1066 						 queue_pair_id);
1067 		rte_spinlock_unlock(&adapter->lock);
1068 
1069 		if (ret)
1070 			return ret;
1071 
1072 		rte_service_component_runstate_set(adapter->service_id, 1);
1073 	}
1074 
1075 	rte_eventdev_trace_crypto_adapter_queue_pair_add(id, cdev_id,
1076 		queue_pair_id, conf);
1077 	return 0;
1078 }
1079 
1080 int
1081 rte_event_crypto_adapter_queue_pair_del(uint8_t id, uint8_t cdev_id,
1082 					int32_t queue_pair_id)
1083 {
1084 	struct event_crypto_adapter *adapter;
1085 	struct crypto_device_info *dev_info;
1086 	struct rte_eventdev *dev;
1087 	int ret;
1088 	uint32_t cap;
1089 	uint16_t i;
1090 
1091 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1092 
1093 	if (!rte_cryptodev_is_valid_dev(cdev_id)) {
1094 		RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
1095 		return -EINVAL;
1096 	}
1097 
1098 	adapter = eca_id_to_adapter(id);
1099 	if (adapter == NULL)
1100 		return -EINVAL;
1101 
1102 	dev = &rte_eventdevs[adapter->eventdev_id];
1103 	ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
1104 						cdev_id,
1105 						&cap);
1106 	if (ret)
1107 		return ret;
1108 
1109 	dev_info = &adapter->cdevs[cdev_id];
1110 
1111 	if (queue_pair_id != -1 &&
1112 	    (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) {
1113 		RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16,
1114 				 (uint16_t)queue_pair_id);
1115 		return -EINVAL;
1116 	}
1117 
1118 	if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
1119 	    (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
1120 	     adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) {
1121 		if (*dev->dev_ops->crypto_adapter_queue_pair_del == NULL)
1122 			return -ENOTSUP;
1123 		ret = (*dev->dev_ops->crypto_adapter_queue_pair_del)(dev,
1124 						dev_info->dev,
1125 						queue_pair_id);
1126 		if (ret == 0) {
1127 			eca_update_qp_info(adapter,
1128 					&adapter->cdevs[cdev_id],
1129 					queue_pair_id,
1130 					0);
1131 			if (dev_info->num_qpairs == 0) {
1132 				rte_free(dev_info->qpairs);
1133 				dev_info->qpairs = NULL;
1134 			}
1135 		}
1136 	} else {
1137 		if (adapter->nb_qps == 0)
1138 			return 0;
1139 
1140 		rte_spinlock_lock(&adapter->lock);
1141 		if (queue_pair_id == -1) {
1142 			for (i = 0; i < dev_info->dev->data->nb_queue_pairs;
1143 				i++)
1144 				eca_update_qp_info(adapter, dev_info,
1145 							queue_pair_id, 0);
1146 		} else {
1147 			eca_update_qp_info(adapter, dev_info,
1148 						(uint16_t)queue_pair_id, 0);
1149 		}
1150 
1151 		if (dev_info->num_qpairs == 0) {
1152 			rte_free(dev_info->qpairs);
1153 			dev_info->qpairs = NULL;
1154 		}
1155 
1156 		rte_spinlock_unlock(&adapter->lock);
1157 		rte_service_component_runstate_set(adapter->service_id,
1158 				adapter->nb_qps);
1159 	}
1160 
1161 	rte_eventdev_trace_crypto_adapter_queue_pair_del(id, cdev_id,
1162 		queue_pair_id, ret);
1163 	return ret;
1164 }
1165 
1166 static int
1167 eca_adapter_ctrl(uint8_t id, int start)
1168 {
1169 	struct event_crypto_adapter *adapter;
1170 	struct crypto_device_info *dev_info;
1171 	struct rte_eventdev *dev;
1172 	uint32_t i;
1173 	int use_service;
1174 	int stop = !start;
1175 
1176 	use_service = 0;
1177 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1178 	adapter = eca_id_to_adapter(id);
1179 	if (adapter == NULL)
1180 		return -EINVAL;
1181 
1182 	dev = &rte_eventdevs[adapter->eventdev_id];
1183 
1184 	for (i = 0; i < rte_cryptodev_count(); i++) {
1185 		dev_info = &adapter->cdevs[i];
1186 		/* if start  check for num queue pairs */
1187 		if (start && !dev_info->num_qpairs)
1188 			continue;
1189 		/* if stop check if dev has been started */
1190 		if (stop && !dev_info->dev_started)
1191 			continue;
1192 		use_service |= !dev_info->internal_event_port;
1193 		dev_info->dev_started = start;
1194 		if (dev_info->internal_event_port == 0)
1195 			continue;
1196 		start ? (*dev->dev_ops->crypto_adapter_start)(dev,
1197 						&dev_info->dev[i]) :
1198 			(*dev->dev_ops->crypto_adapter_stop)(dev,
1199 						&dev_info->dev[i]);
1200 	}
1201 
1202 	if (use_service)
1203 		rte_service_runstate_set(adapter->service_id, start);
1204 
1205 	return 0;
1206 }
1207 
1208 int
1209 rte_event_crypto_adapter_start(uint8_t id)
1210 {
1211 	struct event_crypto_adapter *adapter;
1212 
1213 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1214 	adapter = eca_id_to_adapter(id);
1215 	if (adapter == NULL)
1216 		return -EINVAL;
1217 
1218 	rte_eventdev_trace_crypto_adapter_start(id, adapter);
1219 	return eca_adapter_ctrl(id, 1);
1220 }
1221 
1222 int
1223 rte_event_crypto_adapter_stop(uint8_t id)
1224 {
1225 	rte_eventdev_trace_crypto_adapter_stop(id);
1226 	return eca_adapter_ctrl(id, 0);
1227 }
1228 
1229 int
1230 rte_event_crypto_adapter_stats_get(uint8_t id,
1231 				struct rte_event_crypto_adapter_stats *stats)
1232 {
1233 	struct event_crypto_adapter *adapter;
1234 	struct rte_event_crypto_adapter_stats dev_stats_sum = { 0 };
1235 	struct rte_event_crypto_adapter_stats dev_stats;
1236 	struct rte_eventdev *dev;
1237 	struct crypto_device_info *dev_info;
1238 	uint32_t i;
1239 	int ret;
1240 
1241 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1242 
1243 	adapter = eca_id_to_adapter(id);
1244 	if (adapter == NULL || stats == NULL)
1245 		return -EINVAL;
1246 
1247 	dev = &rte_eventdevs[adapter->eventdev_id];
1248 	memset(stats, 0, sizeof(*stats));
1249 	for (i = 0; i < rte_cryptodev_count(); i++) {
1250 		dev_info = &adapter->cdevs[i];
1251 		if (dev_info->internal_event_port == 0 ||
1252 			dev->dev_ops->crypto_adapter_stats_get == NULL)
1253 			continue;
1254 		ret = (*dev->dev_ops->crypto_adapter_stats_get)(dev,
1255 						dev_info->dev,
1256 						&dev_stats);
1257 		if (ret)
1258 			continue;
1259 
1260 		dev_stats_sum.crypto_deq_count += dev_stats.crypto_deq_count;
1261 		dev_stats_sum.event_enq_count +=
1262 			dev_stats.event_enq_count;
1263 	}
1264 
1265 	if (adapter->service_inited)
1266 		*stats = adapter->crypto_stats;
1267 
1268 	stats->crypto_deq_count += dev_stats_sum.crypto_deq_count;
1269 	stats->event_enq_count += dev_stats_sum.event_enq_count;
1270 
1271 	return 0;
1272 }
1273 
1274 int
1275 rte_event_crypto_adapter_stats_reset(uint8_t id)
1276 {
1277 	struct event_crypto_adapter *adapter;
1278 	struct crypto_device_info *dev_info;
1279 	struct rte_eventdev *dev;
1280 	uint32_t i;
1281 
1282 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1283 
1284 	adapter = eca_id_to_adapter(id);
1285 	if (adapter == NULL)
1286 		return -EINVAL;
1287 
1288 	dev = &rte_eventdevs[adapter->eventdev_id];
1289 	for (i = 0; i < rte_cryptodev_count(); i++) {
1290 		dev_info = &adapter->cdevs[i];
1291 		if (dev_info->internal_event_port == 0 ||
1292 			dev->dev_ops->crypto_adapter_stats_reset == NULL)
1293 			continue;
1294 		(*dev->dev_ops->crypto_adapter_stats_reset)(dev,
1295 						dev_info->dev);
1296 	}
1297 
1298 	memset(&adapter->crypto_stats, 0, sizeof(adapter->crypto_stats));
1299 	return 0;
1300 }
1301 
1302 int
1303 rte_event_crypto_adapter_service_id_get(uint8_t id, uint32_t *service_id)
1304 {
1305 	struct event_crypto_adapter *adapter;
1306 
1307 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1308 
1309 	adapter = eca_id_to_adapter(id);
1310 	if (adapter == NULL || service_id == NULL)
1311 		return -EINVAL;
1312 
1313 	if (adapter->service_inited)
1314 		*service_id = adapter->service_id;
1315 
1316 	return adapter->service_inited ? 0 : -ESRCH;
1317 }
1318 
1319 int
1320 rte_event_crypto_adapter_event_port_get(uint8_t id, uint8_t *event_port_id)
1321 {
1322 	struct event_crypto_adapter *adapter;
1323 
1324 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1325 
1326 	adapter = eca_id_to_adapter(id);
1327 	if (adapter == NULL || event_port_id == NULL)
1328 		return -EINVAL;
1329 
1330 	*event_port_id = adapter->event_port_id;
1331 
1332 	return 0;
1333 }
1334 
1335 int
1336 rte_event_crypto_adapter_vector_limits_get(
1337 	uint8_t dev_id, uint16_t cdev_id,
1338 	struct rte_event_crypto_adapter_vector_limits *limits)
1339 {
1340 	struct rte_cryptodev *cdev;
1341 	struct rte_eventdev *dev;
1342 	uint32_t cap;
1343 	int ret;
1344 
1345 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1346 
1347 	if (!rte_cryptodev_is_valid_dev(cdev_id)) {
1348 		RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
1349 		return -EINVAL;
1350 	}
1351 
1352 	if (limits == NULL) {
1353 		RTE_EDEV_LOG_ERR("Invalid limits storage provided");
1354 		return -EINVAL;
1355 	}
1356 
1357 	dev = &rte_eventdevs[dev_id];
1358 	cdev = rte_cryptodev_pmd_get_dev(cdev_id);
1359 
1360 	ret = rte_event_crypto_adapter_caps_get(dev_id, cdev_id, &cap);
1361 	if (ret) {
1362 		RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
1363 				 "cdev %" PRIu16, dev_id, cdev_id);
1364 		return ret;
1365 	}
1366 
1367 	if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR)) {
1368 		RTE_EDEV_LOG_ERR("Event vectorization is not supported,"
1369 				 "dev %" PRIu8 " cdev %" PRIu8, dev_id, cdev_id);
1370 		return -ENOTSUP;
1371 	}
1372 
1373 	if ((*dev->dev_ops->crypto_adapter_vector_limits_get) == NULL)
1374 		return -ENOTSUP;
1375 
1376 	return dev->dev_ops->crypto_adapter_vector_limits_get(
1377 		dev, cdev, limits);
1378 }
1379