xref: /dpdk/lib/eventdev/rte_event_crypto_adapter.c (revision f9dfb59edbccae50e7c5508348aa2b4b84413048)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation.
3  * All rights reserved.
4  */
5 
6 #include <string.h>
7 #include <stdbool.h>
8 #include <rte_common.h>
9 #include <dev_driver.h>
10 #include <rte_errno.h>
11 #include <rte_cryptodev.h>
12 #include <cryptodev_pmd.h>
13 #include <rte_log.h>
14 #include <rte_malloc.h>
15 #include <rte_service_component.h>
16 
17 #include "rte_eventdev.h"
18 #include "eventdev_pmd.h"
19 #include "eventdev_trace.h"
20 #include "rte_event_crypto_adapter.h"
21 
22 #define BATCH_SIZE 32
23 #define DEFAULT_MAX_NB 128
24 #define CRYPTO_ADAPTER_NAME_LEN 32
25 #define CRYPTO_ADAPTER_MEM_NAME_LEN 32
26 #define CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES 100
27 
28 #define CRYPTO_ADAPTER_OPS_BUFFER_SZ (BATCH_SIZE + BATCH_SIZE)
29 #define CRYPTO_ADAPTER_BUFFER_SZ 1024
30 
31 /* Flush an instance's enqueue buffers every CRYPTO_ENQ_FLUSH_THRESHOLD
32  * iterations of eca_crypto_adapter_enq_run()
33  */
34 #define CRYPTO_ENQ_FLUSH_THRESHOLD 1024
35 
36 #define ECA_ADAPTER_ARRAY "crypto_adapter_array"
37 
38 struct crypto_ops_circular_buffer {
39 	/* index of head element in circular buffer */
40 	uint16_t head;
41 	/* index of tail element in circular buffer */
42 	uint16_t tail;
43 	/* number of elements in buffer */
44 	uint16_t count;
45 	/* size of circular buffer */
46 	uint16_t size;
47 	/* Pointer to hold rte_crypto_ops for batching */
48 	struct rte_crypto_op **op_buffer;
49 } __rte_cache_aligned;
50 
51 struct event_crypto_adapter {
52 	/* Event device identifier */
53 	uint8_t eventdev_id;
54 	/* Event port identifier */
55 	uint8_t event_port_id;
56 	/* Store event device's implicit release capability */
57 	uint8_t implicit_release_disabled;
58 	/* Flag to indicate backpressure at cryptodev
59 	 * Stop further dequeuing events from eventdev
60 	 */
61 	bool stop_enq_to_cryptodev;
62 	/* Max crypto ops processed in any service function invocation */
63 	uint32_t max_nb;
64 	/* Lock to serialize config updates with service function */
65 	rte_spinlock_t lock;
66 	/* Next crypto device to be processed */
67 	uint16_t next_cdev_id;
68 	/* Per crypto device structure */
69 	struct crypto_device_info *cdevs;
70 	/* Loop counter to flush crypto ops */
71 	uint16_t transmit_loop_count;
72 	/* Circular buffer for batching crypto ops to eventdev */
73 	struct crypto_ops_circular_buffer ebuf;
74 	/* Per instance stats structure */
75 	struct rte_event_crypto_adapter_stats crypto_stats;
76 	/* Configuration callback for rte_service configuration */
77 	rte_event_crypto_adapter_conf_cb conf_cb;
78 	/* Configuration callback argument */
79 	void *conf_arg;
80 	/* Set if  default_cb is being used */
81 	int default_cb_arg;
82 	/* Service initialization state */
83 	uint8_t service_inited;
84 	/* Memory allocation name */
85 	char mem_name[CRYPTO_ADAPTER_MEM_NAME_LEN];
86 	/* Socket identifier cached from eventdev */
87 	int socket_id;
88 	/* Per adapter EAL service */
89 	uint32_t service_id;
90 	/* No. of queue pairs configured */
91 	uint16_t nb_qps;
92 	/* Adapter mode */
93 	enum rte_event_crypto_adapter_mode mode;
94 } __rte_cache_aligned;
95 
96 /* Per crypto device information */
97 struct crypto_device_info {
98 	/* Pointer to cryptodev */
99 	struct rte_cryptodev *dev;
100 	/* Pointer to queue pair info */
101 	struct crypto_queue_pair_info *qpairs;
102 	/* Next queue pair to be processed */
103 	uint16_t next_queue_pair_id;
104 	/* Set to indicate cryptodev->eventdev packet
105 	 * transfer uses a hardware mechanism
106 	 */
107 	uint8_t internal_event_port;
108 	/* Set to indicate processing has been started */
109 	uint8_t dev_started;
110 	/* If num_qpairs > 0, the start callback will
111 	 * be invoked if not already invoked
112 	 */
113 	uint16_t num_qpairs;
114 } __rte_cache_aligned;
115 
116 /* Per queue pair information */
117 struct crypto_queue_pair_info {
118 	/* Set to indicate queue pair is enabled */
119 	bool qp_enabled;
120 	/* Circular buffer for batching crypto ops to cdev */
121 	struct crypto_ops_circular_buffer cbuf;
122 } __rte_cache_aligned;
123 
124 static struct event_crypto_adapter **event_crypto_adapter;
125 
126 /* Macros to check for valid adapter */
127 #define EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
128 	if (!eca_valid_id(id)) { \
129 		RTE_EDEV_LOG_ERR("Invalid crypto adapter id = %d\n", id); \
130 		return retval; \
131 	} \
132 } while (0)
133 
134 static inline int
135 eca_valid_id(uint8_t id)
136 {
137 	return id < RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE;
138 }
139 
140 static int
141 eca_init(void)
142 {
143 	const struct rte_memzone *mz;
144 	unsigned int sz;
145 
146 	sz = sizeof(*event_crypto_adapter) *
147 	    RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE;
148 	sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
149 
150 	mz = rte_memzone_lookup(ECA_ADAPTER_ARRAY);
151 	if (mz == NULL) {
152 		mz = rte_memzone_reserve_aligned(ECA_ADAPTER_ARRAY, sz,
153 						 rte_socket_id(), 0,
154 						 RTE_CACHE_LINE_SIZE);
155 		if (mz == NULL) {
156 			RTE_EDEV_LOG_ERR("failed to reserve memzone err = %"
157 					PRId32, rte_errno);
158 			return -rte_errno;
159 		}
160 	}
161 
162 	event_crypto_adapter = mz->addr;
163 	return 0;
164 }
165 
166 static int
167 eca_memzone_lookup(void)
168 {
169 	const struct rte_memzone *mz;
170 
171 	if (event_crypto_adapter == NULL) {
172 		mz = rte_memzone_lookup(ECA_ADAPTER_ARRAY);
173 		if (mz == NULL)
174 			return -ENOMEM;
175 
176 		event_crypto_adapter = mz->addr;
177 	}
178 
179 	return 0;
180 }
181 
182 static inline bool
183 eca_circular_buffer_batch_ready(struct crypto_ops_circular_buffer *bufp)
184 {
185 	return bufp->count >= BATCH_SIZE;
186 }
187 
188 static inline bool
189 eca_circular_buffer_space_for_batch(struct crypto_ops_circular_buffer *bufp)
190 {
191 	return (bufp->size - bufp->count) >= BATCH_SIZE;
192 }
193 
194 static inline void
195 eca_circular_buffer_free(struct crypto_ops_circular_buffer *bufp)
196 {
197 	rte_free(bufp->op_buffer);
198 }
199 
200 static inline int
201 eca_circular_buffer_init(const char *name,
202 			 struct crypto_ops_circular_buffer *bufp,
203 			 uint16_t sz)
204 {
205 	bufp->op_buffer = rte_zmalloc(name,
206 				      sizeof(struct rte_crypto_op *) * sz,
207 				      0);
208 	if (bufp->op_buffer == NULL)
209 		return -ENOMEM;
210 
211 	bufp->size = sz;
212 	return 0;
213 }
214 
215 static inline int
216 eca_circular_buffer_add(struct crypto_ops_circular_buffer *bufp,
217 			struct rte_crypto_op *op)
218 {
219 	uint16_t *tailp = &bufp->tail;
220 
221 	bufp->op_buffer[*tailp] = op;
222 	/* circular buffer, go round */
223 	*tailp = (*tailp + 1) % bufp->size;
224 	bufp->count++;
225 
226 	return 0;
227 }
228 
229 static inline int
230 eca_circular_buffer_flush_to_cdev(struct crypto_ops_circular_buffer *bufp,
231 				  uint8_t cdev_id, uint16_t qp_id,
232 				  uint16_t *nb_ops_flushed)
233 {
234 	uint16_t n = 0;
235 	uint16_t *headp = &bufp->head;
236 	uint16_t *tailp = &bufp->tail;
237 	struct rte_crypto_op **ops = bufp->op_buffer;
238 
239 	if (*tailp > *headp)
240 		n = *tailp - *headp;
241 	else if (*tailp < *headp)
242 		n = bufp->size - *headp;
243 	else {
244 		*nb_ops_flushed = 0;
245 		return 0;  /* buffer empty */
246 	}
247 
248 	*nb_ops_flushed = rte_cryptodev_enqueue_burst(cdev_id, qp_id,
249 						      &ops[*headp], n);
250 	bufp->count -= *nb_ops_flushed;
251 	if (!bufp->count) {
252 		*headp = 0;
253 		*tailp = 0;
254 	} else
255 		*headp = (*headp + *nb_ops_flushed) % bufp->size;
256 
257 	return *nb_ops_flushed == n ? 0 : -1;
258 }
259 
260 static inline struct event_crypto_adapter *
261 eca_id_to_adapter(uint8_t id)
262 {
263 	return event_crypto_adapter ?
264 		event_crypto_adapter[id] : NULL;
265 }
266 
267 static int
268 eca_default_config_cb(uint8_t id, uint8_t dev_id,
269 			struct rte_event_crypto_adapter_conf *conf, void *arg)
270 {
271 	struct rte_event_dev_config dev_conf;
272 	struct rte_eventdev *dev;
273 	uint8_t port_id;
274 	int started;
275 	int ret;
276 	struct rte_event_port_conf *port_conf = arg;
277 	struct event_crypto_adapter *adapter = eca_id_to_adapter(id);
278 
279 	if (adapter == NULL)
280 		return -EINVAL;
281 
282 	dev = &rte_eventdevs[adapter->eventdev_id];
283 	dev_conf = dev->data->dev_conf;
284 
285 	started = dev->data->dev_started;
286 	if (started)
287 		rte_event_dev_stop(dev_id);
288 	port_id = dev_conf.nb_event_ports;
289 	dev_conf.nb_event_ports += 1;
290 	ret = rte_event_dev_configure(dev_id, &dev_conf);
291 	if (ret) {
292 		RTE_EDEV_LOG_ERR("failed to configure event dev %u\n", dev_id);
293 		if (started) {
294 			if (rte_event_dev_start(dev_id))
295 				return -EIO;
296 		}
297 		return ret;
298 	}
299 
300 	ret = rte_event_port_setup(dev_id, port_id, port_conf);
301 	if (ret) {
302 		RTE_EDEV_LOG_ERR("failed to setup event port %u\n", port_id);
303 		return ret;
304 	}
305 
306 	conf->event_port_id = port_id;
307 	conf->max_nb = DEFAULT_MAX_NB;
308 	if (started)
309 		ret = rte_event_dev_start(dev_id);
310 
311 	adapter->default_cb_arg = 1;
312 	return ret;
313 }
314 
315 int
316 rte_event_crypto_adapter_create_ext(uint8_t id, uint8_t dev_id,
317 				rte_event_crypto_adapter_conf_cb conf_cb,
318 				enum rte_event_crypto_adapter_mode mode,
319 				void *conf_arg)
320 {
321 	struct event_crypto_adapter *adapter;
322 	char mem_name[CRYPTO_ADAPTER_NAME_LEN];
323 	struct rte_event_dev_info dev_info;
324 	int socket_id;
325 	uint8_t i;
326 	int ret;
327 
328 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
329 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
330 	if (conf_cb == NULL)
331 		return -EINVAL;
332 
333 	if (event_crypto_adapter == NULL) {
334 		ret = eca_init();
335 		if (ret)
336 			return ret;
337 	}
338 
339 	adapter = eca_id_to_adapter(id);
340 	if (adapter != NULL) {
341 		RTE_EDEV_LOG_ERR("Crypto adapter id %u already exists!", id);
342 		return -EEXIST;
343 	}
344 
345 	socket_id = rte_event_dev_socket_id(dev_id);
346 	snprintf(mem_name, CRYPTO_ADAPTER_MEM_NAME_LEN,
347 		 "rte_event_crypto_adapter_%d", id);
348 
349 	adapter = rte_zmalloc_socket(mem_name, sizeof(*adapter),
350 			RTE_CACHE_LINE_SIZE, socket_id);
351 	if (adapter == NULL) {
352 		RTE_EDEV_LOG_ERR("Failed to get mem for event crypto adapter!");
353 		return -ENOMEM;
354 	}
355 
356 	if (eca_circular_buffer_init("eca_edev_circular_buffer",
357 				     &adapter->ebuf,
358 				     CRYPTO_ADAPTER_BUFFER_SZ)) {
359 		RTE_EDEV_LOG_ERR("Failed to get memory for eventdev buffer");
360 		rte_free(adapter);
361 		return -ENOMEM;
362 	}
363 
364 	ret = rte_event_dev_info_get(dev_id, &dev_info);
365 	if (ret < 0) {
366 		RTE_EDEV_LOG_ERR("Failed to get info for eventdev %d: %s!",
367 				 dev_id, dev_info.driver_name);
368 		eca_circular_buffer_free(&adapter->ebuf);
369 		rte_free(adapter);
370 		return ret;
371 	}
372 
373 	adapter->implicit_release_disabled = (dev_info.event_dev_cap &
374 			RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE);
375 	adapter->eventdev_id = dev_id;
376 	adapter->socket_id = socket_id;
377 	adapter->conf_cb = conf_cb;
378 	adapter->conf_arg = conf_arg;
379 	adapter->mode = mode;
380 	strcpy(adapter->mem_name, mem_name);
381 	adapter->cdevs = rte_zmalloc_socket(adapter->mem_name,
382 					rte_cryptodev_count() *
383 					sizeof(struct crypto_device_info), 0,
384 					socket_id);
385 	if (adapter->cdevs == NULL) {
386 		RTE_EDEV_LOG_ERR("Failed to get mem for crypto devices\n");
387 		eca_circular_buffer_free(&adapter->ebuf);
388 		rte_free(adapter);
389 		return -ENOMEM;
390 	}
391 
392 	rte_spinlock_init(&adapter->lock);
393 	for (i = 0; i < rte_cryptodev_count(); i++)
394 		adapter->cdevs[i].dev = rte_cryptodev_pmd_get_dev(i);
395 
396 	event_crypto_adapter[id] = adapter;
397 
398 	rte_eventdev_trace_crypto_adapter_create(id, dev_id, adapter, conf_arg,
399 		mode);
400 	return 0;
401 }
402 
403 
404 int
405 rte_event_crypto_adapter_create(uint8_t id, uint8_t dev_id,
406 				struct rte_event_port_conf *port_config,
407 				enum rte_event_crypto_adapter_mode mode)
408 {
409 	struct rte_event_port_conf *pc;
410 	int ret;
411 
412 	if (port_config == NULL)
413 		return -EINVAL;
414 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
415 
416 	pc = rte_malloc(NULL, sizeof(*pc), 0);
417 	if (pc == NULL)
418 		return -ENOMEM;
419 	*pc = *port_config;
420 	ret = rte_event_crypto_adapter_create_ext(id, dev_id,
421 						  eca_default_config_cb,
422 						  mode,
423 						  pc);
424 	if (ret)
425 		rte_free(pc);
426 
427 	return ret;
428 }
429 
430 int
431 rte_event_crypto_adapter_free(uint8_t id)
432 {
433 	struct event_crypto_adapter *adapter;
434 
435 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
436 
437 	adapter = eca_id_to_adapter(id);
438 	if (adapter == NULL)
439 		return -EINVAL;
440 
441 	if (adapter->nb_qps) {
442 		RTE_EDEV_LOG_ERR("%" PRIu16 "Queue pairs not deleted",
443 				adapter->nb_qps);
444 		return -EBUSY;
445 	}
446 
447 	rte_eventdev_trace_crypto_adapter_free(id, adapter);
448 	if (adapter->default_cb_arg)
449 		rte_free(adapter->conf_arg);
450 	rte_free(adapter->cdevs);
451 	rte_free(adapter);
452 	event_crypto_adapter[id] = NULL;
453 
454 	return 0;
455 }
456 
457 static inline unsigned int
458 eca_enq_to_cryptodev(struct event_crypto_adapter *adapter, struct rte_event *ev,
459 		     unsigned int cnt)
460 {
461 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
462 	union rte_event_crypto_metadata *m_data = NULL;
463 	struct crypto_queue_pair_info *qp_info = NULL;
464 	struct rte_crypto_op *crypto_op;
465 	unsigned int i, n;
466 	uint16_t qp_id, nb_enqueued = 0;
467 	uint8_t cdev_id;
468 	int ret;
469 
470 	ret = 0;
471 	n = 0;
472 	stats->event_deq_count += cnt;
473 
474 	for (i = 0; i < cnt; i++) {
475 		crypto_op = ev[i].event_ptr;
476 		if (crypto_op == NULL)
477 			continue;
478 		m_data = rte_cryptodev_session_event_mdata_get(crypto_op);
479 		if (m_data == NULL) {
480 			rte_pktmbuf_free(crypto_op->sym->m_src);
481 			rte_crypto_op_free(crypto_op);
482 			continue;
483 		}
484 
485 		cdev_id = m_data->request_info.cdev_id;
486 		qp_id = m_data->request_info.queue_pair_id;
487 		qp_info = &adapter->cdevs[cdev_id].qpairs[qp_id];
488 		if (!qp_info->qp_enabled) {
489 			rte_pktmbuf_free(crypto_op->sym->m_src);
490 			rte_crypto_op_free(crypto_op);
491 			continue;
492 		}
493 		eca_circular_buffer_add(&qp_info->cbuf, crypto_op);
494 
495 		if (eca_circular_buffer_batch_ready(&qp_info->cbuf)) {
496 			ret = eca_circular_buffer_flush_to_cdev(&qp_info->cbuf,
497 								cdev_id,
498 								qp_id,
499 								&nb_enqueued);
500 			/**
501 			 * If some crypto ops failed to flush to cdev and
502 			 * space for another batch is not available, stop
503 			 * dequeue from eventdev momentarily
504 			 */
505 			if (unlikely(ret < 0 &&
506 				!eca_circular_buffer_space_for_batch(
507 							&qp_info->cbuf)))
508 				adapter->stop_enq_to_cryptodev = true;
509 		}
510 
511 		stats->crypto_enq_count += nb_enqueued;
512 		n += nb_enqueued;
513 	}
514 
515 	return n;
516 }
517 
518 static unsigned int
519 eca_crypto_cdev_flush(struct event_crypto_adapter *adapter,
520 		      uint8_t cdev_id, uint16_t *nb_ops_flushed)
521 {
522 	struct crypto_device_info *curr_dev;
523 	struct crypto_queue_pair_info *curr_queue;
524 	struct rte_cryptodev *dev;
525 	uint16_t nb = 0, nb_enqueued = 0;
526 	uint16_t qp;
527 
528 	curr_dev = &adapter->cdevs[cdev_id];
529 	dev = rte_cryptodev_pmd_get_dev(cdev_id);
530 
531 	for (qp = 0; qp < dev->data->nb_queue_pairs; qp++) {
532 
533 		curr_queue = &curr_dev->qpairs[qp];
534 		if (unlikely(curr_queue == NULL || !curr_queue->qp_enabled))
535 			continue;
536 
537 		eca_circular_buffer_flush_to_cdev(&curr_queue->cbuf,
538 						  cdev_id,
539 						  qp,
540 						  &nb_enqueued);
541 		*nb_ops_flushed += curr_queue->cbuf.count;
542 		nb += nb_enqueued;
543 	}
544 
545 	return nb;
546 }
547 
548 static unsigned int
549 eca_crypto_enq_flush(struct event_crypto_adapter *adapter)
550 {
551 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
552 	uint8_t cdev_id;
553 	uint16_t nb_enqueued = 0;
554 	uint16_t nb_ops_flushed = 0;
555 	uint16_t num_cdev = rte_cryptodev_count();
556 
557 	for (cdev_id = 0; cdev_id < num_cdev; cdev_id++)
558 		nb_enqueued += eca_crypto_cdev_flush(adapter,
559 						    cdev_id,
560 						    &nb_ops_flushed);
561 	/**
562 	 * Enable dequeue from eventdev if all ops from circular
563 	 * buffer flushed to cdev
564 	 */
565 	if (!nb_ops_flushed)
566 		adapter->stop_enq_to_cryptodev = false;
567 
568 	stats->crypto_enq_count += nb_enqueued;
569 
570 	return nb_enqueued;
571 }
572 
573 static int
574 eca_crypto_adapter_enq_run(struct event_crypto_adapter *adapter,
575 			   unsigned int max_enq)
576 {
577 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
578 	struct rte_event ev[BATCH_SIZE];
579 	unsigned int nb_enq, nb_enqueued;
580 	uint16_t n;
581 	uint8_t event_dev_id = adapter->eventdev_id;
582 	uint8_t event_port_id = adapter->event_port_id;
583 
584 	nb_enqueued = 0;
585 	if (adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)
586 		return 0;
587 
588 	if (unlikely(adapter->stop_enq_to_cryptodev)) {
589 		nb_enqueued += eca_crypto_enq_flush(adapter);
590 
591 		if (unlikely(adapter->stop_enq_to_cryptodev))
592 			goto skip_event_dequeue_burst;
593 	}
594 
595 	for (nb_enq = 0; nb_enq < max_enq; nb_enq += n) {
596 		stats->event_poll_count++;
597 		n = rte_event_dequeue_burst(event_dev_id,
598 					    event_port_id, ev, BATCH_SIZE, 0);
599 
600 		if (!n)
601 			break;
602 
603 		nb_enqueued += eca_enq_to_cryptodev(adapter, ev, n);
604 	}
605 
606 skip_event_dequeue_burst:
607 
608 	if ((++adapter->transmit_loop_count &
609 		(CRYPTO_ENQ_FLUSH_THRESHOLD - 1)) == 0) {
610 		nb_enqueued += eca_crypto_enq_flush(adapter);
611 	}
612 
613 	return nb_enqueued;
614 }
615 
616 static inline uint16_t
617 eca_ops_enqueue_burst(struct event_crypto_adapter *adapter,
618 		  struct rte_crypto_op **ops, uint16_t num)
619 {
620 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
621 	union rte_event_crypto_metadata *m_data = NULL;
622 	uint8_t event_dev_id = adapter->eventdev_id;
623 	uint8_t event_port_id = adapter->event_port_id;
624 	struct rte_event events[BATCH_SIZE];
625 	uint16_t nb_enqueued, nb_ev;
626 	uint8_t retry;
627 	uint8_t i;
628 
629 	nb_ev = 0;
630 	retry = 0;
631 	nb_enqueued = 0;
632 	num = RTE_MIN(num, BATCH_SIZE);
633 	for (i = 0; i < num; i++) {
634 		struct rte_event *ev = &events[nb_ev++];
635 
636 		m_data = rte_cryptodev_session_event_mdata_get(ops[i]);
637 		if (unlikely(m_data == NULL)) {
638 			rte_pktmbuf_free(ops[i]->sym->m_src);
639 			rte_crypto_op_free(ops[i]);
640 			continue;
641 		}
642 
643 		rte_memcpy(ev, &m_data->response_info, sizeof(*ev));
644 		ev->event_ptr = ops[i];
645 		ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
646 		if (adapter->implicit_release_disabled)
647 			ev->op = RTE_EVENT_OP_FORWARD;
648 		else
649 			ev->op = RTE_EVENT_OP_NEW;
650 	}
651 
652 	do {
653 		nb_enqueued += rte_event_enqueue_burst(event_dev_id,
654 						  event_port_id,
655 						  &events[nb_enqueued],
656 						  nb_ev - nb_enqueued);
657 
658 	} while (retry++ < CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES &&
659 		 nb_enqueued < nb_ev);
660 
661 	stats->event_enq_fail_count += nb_ev - nb_enqueued;
662 	stats->event_enq_count += nb_enqueued;
663 	stats->event_enq_retry_count += retry - 1;
664 
665 	return nb_enqueued;
666 }
667 
668 static int
669 eca_circular_buffer_flush_to_evdev(struct event_crypto_adapter *adapter,
670 				   struct crypto_ops_circular_buffer *bufp)
671 {
672 	uint16_t n = 0, nb_ops_flushed;
673 	uint16_t *headp = &bufp->head;
674 	uint16_t *tailp = &bufp->tail;
675 	struct rte_crypto_op **ops = bufp->op_buffer;
676 
677 	if (*tailp > *headp)
678 		n = *tailp - *headp;
679 	else if (*tailp < *headp)
680 		n = bufp->size - *headp;
681 	else
682 		return 0;  /* buffer empty */
683 
684 	nb_ops_flushed =  eca_ops_enqueue_burst(adapter, ops, n);
685 	bufp->count -= nb_ops_flushed;
686 	if (!bufp->count) {
687 		*headp = 0;
688 		*tailp = 0;
689 		return 0;  /* buffer empty */
690 	}
691 
692 	*headp = (*headp + nb_ops_flushed) % bufp->size;
693 	return 1;
694 }
695 
696 
697 static void
698 eca_ops_buffer_flush(struct event_crypto_adapter *adapter)
699 {
700 	if (likely(adapter->ebuf.count == 0))
701 		return;
702 
703 	while (eca_circular_buffer_flush_to_evdev(adapter,
704 						  &adapter->ebuf))
705 		;
706 }
707 static inline unsigned int
708 eca_crypto_adapter_deq_run(struct event_crypto_adapter *adapter,
709 			   unsigned int max_deq)
710 {
711 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
712 	struct crypto_device_info *curr_dev;
713 	struct crypto_queue_pair_info *curr_queue;
714 	struct rte_crypto_op *ops[BATCH_SIZE];
715 	uint16_t n, nb_deq, nb_enqueued, i;
716 	struct rte_cryptodev *dev;
717 	uint8_t cdev_id;
718 	uint16_t qp, dev_qps;
719 	bool done;
720 	uint16_t num_cdev = rte_cryptodev_count();
721 
722 	nb_deq = 0;
723 	eca_ops_buffer_flush(adapter);
724 
725 	do {
726 		done = true;
727 
728 		for (cdev_id = adapter->next_cdev_id;
729 			cdev_id < num_cdev; cdev_id++) {
730 			uint16_t queues = 0;
731 
732 			curr_dev = &adapter->cdevs[cdev_id];
733 			dev = curr_dev->dev;
734 			if (unlikely(dev == NULL))
735 				continue;
736 
737 			dev_qps = dev->data->nb_queue_pairs;
738 
739 			for (qp = curr_dev->next_queue_pair_id;
740 				queues < dev_qps; qp = (qp + 1) % dev_qps,
741 				queues++) {
742 
743 				curr_queue = &curr_dev->qpairs[qp];
744 				if (unlikely(curr_queue == NULL ||
745 				    !curr_queue->qp_enabled))
746 					continue;
747 
748 				n = rte_cryptodev_dequeue_burst(cdev_id, qp,
749 					ops, BATCH_SIZE);
750 				if (!n)
751 					continue;
752 
753 				done = false;
754 				nb_enqueued = 0;
755 
756 				stats->crypto_deq_count += n;
757 
758 				if (unlikely(!adapter->ebuf.count))
759 					nb_enqueued = eca_ops_enqueue_burst(
760 							adapter, ops, n);
761 
762 				if (likely(nb_enqueued == n))
763 					goto check;
764 
765 				/* Failed to enqueue events case */
766 				for (i = nb_enqueued; i < n; i++)
767 					eca_circular_buffer_add(
768 						&adapter->ebuf,
769 						ops[nb_enqueued]);
770 
771 check:
772 				nb_deq += n;
773 
774 				if (nb_deq >= max_deq) {
775 					if ((qp + 1) == dev_qps) {
776 						adapter->next_cdev_id =
777 							(cdev_id + 1)
778 							% num_cdev;
779 					}
780 					curr_dev->next_queue_pair_id = (qp + 1)
781 						% dev->data->nb_queue_pairs;
782 
783 					return nb_deq;
784 				}
785 			}
786 		}
787 		adapter->next_cdev_id = 0;
788 	} while (done == false);
789 	return nb_deq;
790 }
791 
792 static int
793 eca_crypto_adapter_run(struct event_crypto_adapter *adapter,
794 		       unsigned int max_ops)
795 {
796 	unsigned int ops_left = max_ops;
797 
798 	while (ops_left > 0) {
799 		unsigned int e_cnt, d_cnt;
800 
801 		e_cnt = eca_crypto_adapter_deq_run(adapter, ops_left);
802 		ops_left -= RTE_MIN(ops_left, e_cnt);
803 
804 		d_cnt = eca_crypto_adapter_enq_run(adapter, ops_left);
805 		ops_left -= RTE_MIN(ops_left, d_cnt);
806 
807 		if (e_cnt == 0 && d_cnt == 0)
808 			break;
809 
810 	}
811 
812 	if (ops_left == max_ops) {
813 		rte_event_maintain(adapter->eventdev_id,
814 				   adapter->event_port_id, 0);
815 		return -EAGAIN;
816 	} else
817 		return 0;
818 }
819 
820 static int
821 eca_service_func(void *args)
822 {
823 	struct event_crypto_adapter *adapter = args;
824 	int ret;
825 
826 	if (rte_spinlock_trylock(&adapter->lock) == 0)
827 		return 0;
828 	ret = eca_crypto_adapter_run(adapter, adapter->max_nb);
829 	rte_spinlock_unlock(&adapter->lock);
830 
831 	return ret;
832 }
833 
834 static int
835 eca_init_service(struct event_crypto_adapter *adapter, uint8_t id)
836 {
837 	struct rte_event_crypto_adapter_conf adapter_conf;
838 	struct rte_service_spec service;
839 	int ret;
840 
841 	if (adapter->service_inited)
842 		return 0;
843 
844 	memset(&service, 0, sizeof(service));
845 	snprintf(service.name, CRYPTO_ADAPTER_NAME_LEN,
846 		"rte_event_crypto_adapter_%d", id);
847 	service.socket_id = adapter->socket_id;
848 	service.callback = eca_service_func;
849 	service.callback_userdata = adapter;
850 	/* Service function handles locking for queue add/del updates */
851 	service.capabilities = RTE_SERVICE_CAP_MT_SAFE;
852 	ret = rte_service_component_register(&service, &adapter->service_id);
853 	if (ret) {
854 		RTE_EDEV_LOG_ERR("failed to register service %s err = %" PRId32,
855 			service.name, ret);
856 		return ret;
857 	}
858 
859 	ret = adapter->conf_cb(id, adapter->eventdev_id,
860 		&adapter_conf, adapter->conf_arg);
861 	if (ret) {
862 		RTE_EDEV_LOG_ERR("configuration callback failed err = %" PRId32,
863 			ret);
864 		return ret;
865 	}
866 
867 	adapter->max_nb = adapter_conf.max_nb;
868 	adapter->event_port_id = adapter_conf.event_port_id;
869 	adapter->service_inited = 1;
870 
871 	return ret;
872 }
873 
874 static void
875 eca_update_qp_info(struct event_crypto_adapter *adapter,
876 		   struct crypto_device_info *dev_info, int32_t queue_pair_id,
877 		   uint8_t add)
878 {
879 	struct crypto_queue_pair_info *qp_info;
880 	int enabled;
881 	uint16_t i;
882 
883 	if (dev_info->qpairs == NULL)
884 		return;
885 
886 	if (queue_pair_id == -1) {
887 		for (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++)
888 			eca_update_qp_info(adapter, dev_info, i, add);
889 	} else {
890 		qp_info = &dev_info->qpairs[queue_pair_id];
891 		enabled = qp_info->qp_enabled;
892 		if (add) {
893 			adapter->nb_qps += !enabled;
894 			dev_info->num_qpairs += !enabled;
895 		} else {
896 			adapter->nb_qps -= enabled;
897 			dev_info->num_qpairs -= enabled;
898 		}
899 		qp_info->qp_enabled = !!add;
900 	}
901 }
902 
903 static int
904 eca_add_queue_pair(struct event_crypto_adapter *adapter, uint8_t cdev_id,
905 		   int queue_pair_id)
906 {
907 	struct crypto_device_info *dev_info = &adapter->cdevs[cdev_id];
908 	struct crypto_queue_pair_info *qpairs;
909 	uint32_t i;
910 
911 	if (dev_info->qpairs == NULL) {
912 		dev_info->qpairs =
913 		    rte_zmalloc_socket(adapter->mem_name,
914 					dev_info->dev->data->nb_queue_pairs *
915 					sizeof(struct crypto_queue_pair_info),
916 					0, adapter->socket_id);
917 		if (dev_info->qpairs == NULL)
918 			return -ENOMEM;
919 
920 		qpairs = dev_info->qpairs;
921 
922 		if (eca_circular_buffer_init("eca_cdev_circular_buffer",
923 					     &qpairs->cbuf,
924 					     CRYPTO_ADAPTER_OPS_BUFFER_SZ)) {
925 			RTE_EDEV_LOG_ERR("Failed to get memory for cryptodev "
926 					 "buffer");
927 			rte_free(qpairs);
928 			return -ENOMEM;
929 		}
930 	}
931 
932 	if (queue_pair_id == -1) {
933 		for (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++)
934 			eca_update_qp_info(adapter, dev_info, i, 1);
935 	} else
936 		eca_update_qp_info(adapter, dev_info,
937 					(uint16_t)queue_pair_id, 1);
938 
939 	return 0;
940 }
941 
942 int
943 rte_event_crypto_adapter_queue_pair_add(uint8_t id,
944 			uint8_t cdev_id,
945 			int32_t queue_pair_id,
946 			const struct rte_event_crypto_adapter_queue_conf *conf)
947 {
948 	struct rte_event_crypto_adapter_vector_limits limits;
949 	struct event_crypto_adapter *adapter;
950 	struct crypto_device_info *dev_info;
951 	struct rte_eventdev *dev;
952 	uint32_t cap;
953 	int ret;
954 
955 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
956 
957 	if (!rte_cryptodev_is_valid_dev(cdev_id)) {
958 		RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
959 		return -EINVAL;
960 	}
961 
962 	adapter = eca_id_to_adapter(id);
963 	if (adapter == NULL)
964 		return -EINVAL;
965 
966 	dev = &rte_eventdevs[adapter->eventdev_id];
967 	ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
968 						cdev_id,
969 						&cap);
970 	if (ret) {
971 		RTE_EDEV_LOG_ERR("Failed to get adapter caps dev %" PRIu8
972 			" cdev %" PRIu8, id, cdev_id);
973 		return ret;
974 	}
975 
976 	if (conf == NULL) {
977 		if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
978 			RTE_EDEV_LOG_ERR("Conf value can not be NULL for dev_id=%u",
979 					 cdev_id);
980 			return -EINVAL;
981 		}
982 	} else {
983 		if (conf->flags & RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR) {
984 			if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR) == 0) {
985 				RTE_EDEV_LOG_ERR("Event vectorization is not supported,"
986 						 "dev %" PRIu8 " cdev %" PRIu8, id,
987 						 cdev_id);
988 				return -ENOTSUP;
989 			}
990 
991 			ret = rte_event_crypto_adapter_vector_limits_get(
992 				adapter->eventdev_id, cdev_id, &limits);
993 			if (ret < 0) {
994 				RTE_EDEV_LOG_ERR("Failed to get event device vector "
995 						 "limits, dev %" PRIu8 " cdev %" PRIu8,
996 						 id, cdev_id);
997 				return -EINVAL;
998 			}
999 
1000 			if (conf->vector_sz < limits.min_sz ||
1001 			    conf->vector_sz > limits.max_sz ||
1002 			    conf->vector_timeout_ns < limits.min_timeout_ns ||
1003 			    conf->vector_timeout_ns > limits.max_timeout_ns ||
1004 			    conf->vector_mp == NULL) {
1005 				RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
1006 						" dev %" PRIu8 " cdev %" PRIu8,
1007 						id, cdev_id);
1008 				return -EINVAL;
1009 			}
1010 
1011 			if (conf->vector_mp->elt_size < (sizeof(struct rte_event_vector) +
1012 			    (sizeof(uintptr_t) * conf->vector_sz))) {
1013 				RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
1014 						" dev %" PRIu8 " cdev %" PRIu8,
1015 						id, cdev_id);
1016 				return -EINVAL;
1017 			}
1018 		}
1019 	}
1020 
1021 	dev_info = &adapter->cdevs[cdev_id];
1022 
1023 	if (queue_pair_id != -1 &&
1024 	    (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) {
1025 		RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16,
1026 				 (uint16_t)queue_pair_id);
1027 		return -EINVAL;
1028 	}
1029 
1030 	/* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD,
1031 	 * no need of service core as HW supports event forward capability.
1032 	 */
1033 	if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
1034 	    (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND &&
1035 	     adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW) ||
1036 	    (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
1037 	     adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) {
1038 		if (*dev->dev_ops->crypto_adapter_queue_pair_add == NULL)
1039 			return -ENOTSUP;
1040 		if (dev_info->qpairs == NULL) {
1041 			dev_info->qpairs =
1042 			    rte_zmalloc_socket(adapter->mem_name,
1043 					dev_info->dev->data->nb_queue_pairs *
1044 					sizeof(struct crypto_queue_pair_info),
1045 					0, adapter->socket_id);
1046 			if (dev_info->qpairs == NULL)
1047 				return -ENOMEM;
1048 		}
1049 
1050 		ret = (*dev->dev_ops->crypto_adapter_queue_pair_add)(dev,
1051 				dev_info->dev,
1052 				queue_pair_id,
1053 				conf);
1054 		if (ret)
1055 			return ret;
1056 
1057 		else
1058 			eca_update_qp_info(adapter, &adapter->cdevs[cdev_id],
1059 					   queue_pair_id, 1);
1060 	}
1061 
1062 	/* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW,
1063 	 * or SW adapter, initiate services so the application can choose
1064 	 * which ever way it wants to use the adapter.
1065 	 * Case 1: RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW
1066 	 *         Application may wants to use one of below two mode
1067 	 *          a. OP_FORWARD mode -> HW Dequeue + SW enqueue
1068 	 *          b. OP_NEW mode -> HW Dequeue
1069 	 * Case 2: No HW caps, use SW adapter
1070 	 *          a. OP_FORWARD mode -> SW enqueue & dequeue
1071 	 *          b. OP_NEW mode -> SW Dequeue
1072 	 */
1073 	if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
1074 	     !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
1075 	     adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD) ||
1076 	     (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW) &&
1077 	      !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
1078 	      !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) &&
1079 	       (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA))) {
1080 		rte_spinlock_lock(&adapter->lock);
1081 		ret = eca_init_service(adapter, id);
1082 		if (ret == 0)
1083 			ret = eca_add_queue_pair(adapter, cdev_id,
1084 						 queue_pair_id);
1085 		rte_spinlock_unlock(&adapter->lock);
1086 
1087 		if (ret)
1088 			return ret;
1089 
1090 		rte_service_component_runstate_set(adapter->service_id, 1);
1091 	}
1092 
1093 	rte_eventdev_trace_crypto_adapter_queue_pair_add(id, cdev_id,
1094 		queue_pair_id, conf);
1095 	return 0;
1096 }
1097 
1098 int
1099 rte_event_crypto_adapter_queue_pair_del(uint8_t id, uint8_t cdev_id,
1100 					int32_t queue_pair_id)
1101 {
1102 	struct event_crypto_adapter *adapter;
1103 	struct crypto_device_info *dev_info;
1104 	struct rte_eventdev *dev;
1105 	int ret;
1106 	uint32_t cap;
1107 	uint16_t i;
1108 
1109 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1110 
1111 	if (!rte_cryptodev_is_valid_dev(cdev_id)) {
1112 		RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
1113 		return -EINVAL;
1114 	}
1115 
1116 	adapter = eca_id_to_adapter(id);
1117 	if (adapter == NULL)
1118 		return -EINVAL;
1119 
1120 	dev = &rte_eventdevs[adapter->eventdev_id];
1121 	ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
1122 						cdev_id,
1123 						&cap);
1124 	if (ret)
1125 		return ret;
1126 
1127 	dev_info = &adapter->cdevs[cdev_id];
1128 
1129 	if (queue_pair_id != -1 &&
1130 	    (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) {
1131 		RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16,
1132 				 (uint16_t)queue_pair_id);
1133 		return -EINVAL;
1134 	}
1135 
1136 	if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
1137 	    (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
1138 	     adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) {
1139 		if (*dev->dev_ops->crypto_adapter_queue_pair_del == NULL)
1140 			return -ENOTSUP;
1141 		ret = (*dev->dev_ops->crypto_adapter_queue_pair_del)(dev,
1142 						dev_info->dev,
1143 						queue_pair_id);
1144 		if (ret == 0) {
1145 			eca_update_qp_info(adapter,
1146 					&adapter->cdevs[cdev_id],
1147 					queue_pair_id,
1148 					0);
1149 			if (dev_info->num_qpairs == 0) {
1150 				rte_free(dev_info->qpairs);
1151 				dev_info->qpairs = NULL;
1152 			}
1153 		}
1154 	} else {
1155 		if (adapter->nb_qps == 0)
1156 			return 0;
1157 
1158 		rte_spinlock_lock(&adapter->lock);
1159 		if (queue_pair_id == -1) {
1160 			for (i = 0; i < dev_info->dev->data->nb_queue_pairs;
1161 				i++)
1162 				eca_update_qp_info(adapter, dev_info,
1163 							queue_pair_id, 0);
1164 		} else {
1165 			eca_update_qp_info(adapter, dev_info,
1166 						(uint16_t)queue_pair_id, 0);
1167 		}
1168 
1169 		if (dev_info->num_qpairs == 0) {
1170 			rte_free(dev_info->qpairs);
1171 			dev_info->qpairs = NULL;
1172 		}
1173 
1174 		rte_spinlock_unlock(&adapter->lock);
1175 		rte_service_component_runstate_set(adapter->service_id,
1176 				adapter->nb_qps);
1177 	}
1178 
1179 	rte_eventdev_trace_crypto_adapter_queue_pair_del(id, cdev_id,
1180 		queue_pair_id, ret);
1181 	return ret;
1182 }
1183 
1184 static int
1185 eca_adapter_ctrl(uint8_t id, int start)
1186 {
1187 	struct event_crypto_adapter *adapter;
1188 	struct crypto_device_info *dev_info;
1189 	struct rte_eventdev *dev;
1190 	uint32_t i;
1191 	int use_service;
1192 	int stop = !start;
1193 
1194 	use_service = 0;
1195 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1196 	adapter = eca_id_to_adapter(id);
1197 	if (adapter == NULL)
1198 		return -EINVAL;
1199 
1200 	dev = &rte_eventdevs[adapter->eventdev_id];
1201 
1202 	for (i = 0; i < rte_cryptodev_count(); i++) {
1203 		dev_info = &adapter->cdevs[i];
1204 		/* if start  check for num queue pairs */
1205 		if (start && !dev_info->num_qpairs)
1206 			continue;
1207 		/* if stop check if dev has been started */
1208 		if (stop && !dev_info->dev_started)
1209 			continue;
1210 		use_service |= !dev_info->internal_event_port;
1211 		dev_info->dev_started = start;
1212 		if (dev_info->internal_event_port == 0)
1213 			continue;
1214 		start ? (*dev->dev_ops->crypto_adapter_start)(dev,
1215 						&dev_info->dev[i]) :
1216 			(*dev->dev_ops->crypto_adapter_stop)(dev,
1217 						&dev_info->dev[i]);
1218 	}
1219 
1220 	if (use_service)
1221 		rte_service_runstate_set(adapter->service_id, start);
1222 
1223 	return 0;
1224 }
1225 
1226 int
1227 rte_event_crypto_adapter_start(uint8_t id)
1228 {
1229 	struct event_crypto_adapter *adapter;
1230 
1231 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1232 	adapter = eca_id_to_adapter(id);
1233 	if (adapter == NULL)
1234 		return -EINVAL;
1235 
1236 	rte_eventdev_trace_crypto_adapter_start(id, adapter);
1237 	return eca_adapter_ctrl(id, 1);
1238 }
1239 
1240 int
1241 rte_event_crypto_adapter_stop(uint8_t id)
1242 {
1243 	rte_eventdev_trace_crypto_adapter_stop(id);
1244 	return eca_adapter_ctrl(id, 0);
1245 }
1246 
1247 int
1248 rte_event_crypto_adapter_stats_get(uint8_t id,
1249 				struct rte_event_crypto_adapter_stats *stats)
1250 {
1251 	struct event_crypto_adapter *adapter;
1252 	struct rte_event_crypto_adapter_stats dev_stats_sum = { 0 };
1253 	struct rte_event_crypto_adapter_stats dev_stats;
1254 	struct rte_eventdev *dev;
1255 	struct crypto_device_info *dev_info;
1256 	uint32_t i;
1257 	int ret;
1258 
1259 	if (eca_memzone_lookup())
1260 		return -ENOMEM;
1261 
1262 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1263 
1264 	adapter = eca_id_to_adapter(id);
1265 	if (adapter == NULL || stats == NULL)
1266 		return -EINVAL;
1267 
1268 	dev = &rte_eventdevs[adapter->eventdev_id];
1269 	memset(stats, 0, sizeof(*stats));
1270 	for (i = 0; i < rte_cryptodev_count(); i++) {
1271 		dev_info = &adapter->cdevs[i];
1272 		if (dev_info->internal_event_port == 0 ||
1273 			dev->dev_ops->crypto_adapter_stats_get == NULL)
1274 			continue;
1275 		ret = (*dev->dev_ops->crypto_adapter_stats_get)(dev,
1276 						dev_info->dev,
1277 						&dev_stats);
1278 		if (ret)
1279 			continue;
1280 
1281 		dev_stats_sum.crypto_deq_count += dev_stats.crypto_deq_count;
1282 		dev_stats_sum.event_enq_count +=
1283 			dev_stats.event_enq_count;
1284 	}
1285 
1286 	if (adapter->service_inited)
1287 		*stats = adapter->crypto_stats;
1288 
1289 	stats->crypto_deq_count += dev_stats_sum.crypto_deq_count;
1290 	stats->event_enq_count += dev_stats_sum.event_enq_count;
1291 
1292 	return 0;
1293 }
1294 
1295 int
1296 rte_event_crypto_adapter_stats_reset(uint8_t id)
1297 {
1298 	struct event_crypto_adapter *adapter;
1299 	struct crypto_device_info *dev_info;
1300 	struct rte_eventdev *dev;
1301 	uint32_t i;
1302 
1303 	if (eca_memzone_lookup())
1304 		return -ENOMEM;
1305 
1306 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1307 
1308 	adapter = eca_id_to_adapter(id);
1309 	if (adapter == NULL)
1310 		return -EINVAL;
1311 
1312 	dev = &rte_eventdevs[adapter->eventdev_id];
1313 	for (i = 0; i < rte_cryptodev_count(); i++) {
1314 		dev_info = &adapter->cdevs[i];
1315 		if (dev_info->internal_event_port == 0 ||
1316 			dev->dev_ops->crypto_adapter_stats_reset == NULL)
1317 			continue;
1318 		(*dev->dev_ops->crypto_adapter_stats_reset)(dev,
1319 						dev_info->dev);
1320 	}
1321 
1322 	memset(&adapter->crypto_stats, 0, sizeof(adapter->crypto_stats));
1323 	return 0;
1324 }
1325 
1326 int
1327 rte_event_crypto_adapter_service_id_get(uint8_t id, uint32_t *service_id)
1328 {
1329 	struct event_crypto_adapter *adapter;
1330 
1331 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1332 
1333 	adapter = eca_id_to_adapter(id);
1334 	if (adapter == NULL || service_id == NULL)
1335 		return -EINVAL;
1336 
1337 	if (adapter->service_inited)
1338 		*service_id = adapter->service_id;
1339 
1340 	return adapter->service_inited ? 0 : -ESRCH;
1341 }
1342 
1343 int
1344 rte_event_crypto_adapter_event_port_get(uint8_t id, uint8_t *event_port_id)
1345 {
1346 	struct event_crypto_adapter *adapter;
1347 
1348 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1349 
1350 	adapter = eca_id_to_adapter(id);
1351 	if (adapter == NULL || event_port_id == NULL)
1352 		return -EINVAL;
1353 
1354 	*event_port_id = adapter->event_port_id;
1355 
1356 	return 0;
1357 }
1358 
1359 int
1360 rte_event_crypto_adapter_vector_limits_get(
1361 	uint8_t dev_id, uint16_t cdev_id,
1362 	struct rte_event_crypto_adapter_vector_limits *limits)
1363 {
1364 	struct rte_cryptodev *cdev;
1365 	struct rte_eventdev *dev;
1366 	uint32_t cap;
1367 	int ret;
1368 
1369 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1370 
1371 	if (!rte_cryptodev_is_valid_dev(cdev_id)) {
1372 		RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
1373 		return -EINVAL;
1374 	}
1375 
1376 	if (limits == NULL) {
1377 		RTE_EDEV_LOG_ERR("Invalid limits storage provided");
1378 		return -EINVAL;
1379 	}
1380 
1381 	dev = &rte_eventdevs[dev_id];
1382 	cdev = rte_cryptodev_pmd_get_dev(cdev_id);
1383 
1384 	ret = rte_event_crypto_adapter_caps_get(dev_id, cdev_id, &cap);
1385 	if (ret) {
1386 		RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
1387 				 "cdev %" PRIu16, dev_id, cdev_id);
1388 		return ret;
1389 	}
1390 
1391 	if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR)) {
1392 		RTE_EDEV_LOG_ERR("Event vectorization is not supported,"
1393 				 "dev %" PRIu8 " cdev %" PRIu8, dev_id, cdev_id);
1394 		return -ENOTSUP;
1395 	}
1396 
1397 	if ((*dev->dev_ops->crypto_adapter_vector_limits_get) == NULL)
1398 		return -ENOTSUP;
1399 
1400 	return dev->dev_ops->crypto_adapter_vector_limits_get(
1401 		dev, cdev, limits);
1402 }
1403