xref: /dpdk/lib/eventdev/rte_event_crypto_adapter.c (revision 62774b78a84e9fa5df56d04cffed69bef8c901f1)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation.
3  * All rights reserved.
4  */
5 
6 #include <string.h>
7 #include <stdbool.h>
8 #include <rte_common.h>
9 #include <dev_driver.h>
10 #include <rte_errno.h>
11 #include <rte_cryptodev.h>
12 #include <cryptodev_pmd.h>
13 #include <rte_log.h>
14 #include <rte_malloc.h>
15 #include <rte_service_component.h>
16 
17 #include "rte_eventdev.h"
18 #include "eventdev_pmd.h"
19 #include "eventdev_trace.h"
20 #include "rte_event_crypto_adapter.h"
21 
22 #define BATCH_SIZE 32
23 #define DEFAULT_MAX_NB 128
24 #define CRYPTO_ADAPTER_NAME_LEN 32
25 #define CRYPTO_ADAPTER_MEM_NAME_LEN 32
26 #define CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES 100
27 
28 /* MAX_OPS_IN_BUFFER contains size for  batch of dequeued events */
29 #define MAX_OPS_IN_BUFFER BATCH_SIZE
30 
31 /* CRYPTO_ADAPTER_OPS_BUFFER_SZ to accommodate MAX_OPS_IN_BUFFER +
32  * additional space for one batch
33  */
34 #define CRYPTO_ADAPTER_OPS_BUFFER_SZ (MAX_OPS_IN_BUFFER + BATCH_SIZE)
35 
36 #define CRYPTO_ADAPTER_BUFFER_SZ 1024
37 
38 /* Flush an instance's enqueue buffers every CRYPTO_ENQ_FLUSH_THRESHOLD
39  * iterations of eca_crypto_adapter_enq_run()
40  */
41 #define CRYPTO_ENQ_FLUSH_THRESHOLD 1024
42 
43 #define ECA_ADAPTER_ARRAY "crypto_adapter_array"
44 
45 struct crypto_ops_circular_buffer {
46 	/* index of head element in circular buffer */
47 	uint16_t head;
48 	/* index of tail element in circular buffer */
49 	uint16_t tail;
50 	/* number of elements in buffer */
51 	uint16_t count;
52 	/* size of circular buffer */
53 	uint16_t size;
54 	/* Pointer to hold rte_crypto_ops for batching */
55 	struct rte_crypto_op **op_buffer;
56 } __rte_cache_aligned;
57 
58 struct event_crypto_adapter {
59 	/* Event device identifier */
60 	uint8_t eventdev_id;
61 	/* Event port identifier */
62 	uint8_t event_port_id;
63 	/* Store event port's implicit release capability */
64 	uint8_t implicit_release_disabled;
65 	/* Flag to indicate backpressure at cryptodev
66 	 * Stop further dequeuing events from eventdev
67 	 */
68 	bool stop_enq_to_cryptodev;
69 	/* Max crypto ops processed in any service function invocation */
70 	uint32_t max_nb;
71 	/* Lock to serialize config updates with service function */
72 	rte_spinlock_t lock;
73 	/* Next crypto device to be processed */
74 	uint16_t next_cdev_id;
75 	/* Per crypto device structure */
76 	struct crypto_device_info *cdevs;
77 	/* Loop counter to flush crypto ops */
78 	uint16_t transmit_loop_count;
79 	/* Circular buffer for batching crypto ops to eventdev */
80 	struct crypto_ops_circular_buffer ebuf;
81 	/* Per instance stats structure */
82 	struct rte_event_crypto_adapter_stats crypto_stats;
83 	/* Configuration callback for rte_service configuration */
84 	rte_event_crypto_adapter_conf_cb conf_cb;
85 	/* Configuration callback argument */
86 	void *conf_arg;
87 	/* Set if  default_cb is being used */
88 	int default_cb_arg;
89 	/* Service initialization state */
90 	uint8_t service_inited;
91 	/* Memory allocation name */
92 	char mem_name[CRYPTO_ADAPTER_MEM_NAME_LEN];
93 	/* Socket identifier cached from eventdev */
94 	int socket_id;
95 	/* Per adapter EAL service */
96 	uint32_t service_id;
97 	/* No. of queue pairs configured */
98 	uint16_t nb_qps;
99 	/* Adapter mode */
100 	enum rte_event_crypto_adapter_mode mode;
101 } __rte_cache_aligned;
102 
103 /* Per crypto device information */
104 struct crypto_device_info {
105 	/* Pointer to cryptodev */
106 	struct rte_cryptodev *dev;
107 	/* Pointer to queue pair info */
108 	struct crypto_queue_pair_info *qpairs;
109 	/* Next queue pair to be processed */
110 	uint16_t next_queue_pair_id;
111 	/* Set to indicate cryptodev->eventdev packet
112 	 * transfer uses a hardware mechanism
113 	 */
114 	uint8_t internal_event_port;
115 	/* Set to indicate processing has been started */
116 	uint8_t dev_started;
117 	/* If num_qpairs > 0, the start callback will
118 	 * be invoked if not already invoked
119 	 */
120 	uint16_t num_qpairs;
121 } __rte_cache_aligned;
122 
123 /* Per queue pair information */
124 struct crypto_queue_pair_info {
125 	/* Set to indicate queue pair is enabled */
126 	bool qp_enabled;
127 	/* Circular buffer for batching crypto ops to cdev */
128 	struct crypto_ops_circular_buffer cbuf;
129 } __rte_cache_aligned;
130 
131 static struct event_crypto_adapter **event_crypto_adapter;
132 
133 /* Macros to check for valid adapter */
134 #define EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
135 	if (!eca_valid_id(id)) { \
136 		RTE_EDEV_LOG_ERR("Invalid crypto adapter id = %d\n", id); \
137 		return retval; \
138 	} \
139 } while (0)
140 
141 static inline int
142 eca_valid_id(uint8_t id)
143 {
144 	return id < RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE;
145 }
146 
147 static int
148 eca_init(void)
149 {
150 	const struct rte_memzone *mz;
151 	unsigned int sz;
152 
153 	sz = sizeof(*event_crypto_adapter) *
154 	    RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE;
155 	sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
156 
157 	mz = rte_memzone_lookup(ECA_ADAPTER_ARRAY);
158 	if (mz == NULL) {
159 		mz = rte_memzone_reserve_aligned(ECA_ADAPTER_ARRAY, sz,
160 						 rte_socket_id(), 0,
161 						 RTE_CACHE_LINE_SIZE);
162 		if (mz == NULL) {
163 			RTE_EDEV_LOG_ERR("failed to reserve memzone err = %"
164 					PRId32, rte_errno);
165 			return -rte_errno;
166 		}
167 	}
168 
169 	event_crypto_adapter = mz->addr;
170 	return 0;
171 }
172 
173 static int
174 eca_memzone_lookup(void)
175 {
176 	const struct rte_memzone *mz;
177 
178 	if (event_crypto_adapter == NULL) {
179 		mz = rte_memzone_lookup(ECA_ADAPTER_ARRAY);
180 		if (mz == NULL)
181 			return -ENOMEM;
182 
183 		event_crypto_adapter = mz->addr;
184 	}
185 
186 	return 0;
187 }
188 
189 static inline bool
190 eca_circular_buffer_batch_ready(struct crypto_ops_circular_buffer *bufp)
191 {
192 	return bufp->count >= BATCH_SIZE;
193 }
194 
195 static inline bool
196 eca_circular_buffer_space_for_batch(struct crypto_ops_circular_buffer *bufp)
197 {
198 	/* circular buffer can have atmost MAX_OPS_IN_BUFFER */
199 	return (bufp->size - bufp->count) >= MAX_OPS_IN_BUFFER;
200 }
201 
202 static inline void
203 eca_circular_buffer_free(struct crypto_ops_circular_buffer *bufp)
204 {
205 	rte_free(bufp->op_buffer);
206 }
207 
208 static inline int
209 eca_circular_buffer_init(const char *name,
210 			 struct crypto_ops_circular_buffer *bufp,
211 			 uint16_t sz)
212 {
213 	bufp->op_buffer = rte_zmalloc(name,
214 				      sizeof(struct rte_crypto_op *) * sz,
215 				      0);
216 	if (bufp->op_buffer == NULL)
217 		return -ENOMEM;
218 
219 	bufp->size = sz;
220 	return 0;
221 }
222 
223 static inline int
224 eca_circular_buffer_add(struct crypto_ops_circular_buffer *bufp,
225 			struct rte_crypto_op *op)
226 {
227 	uint16_t *tailp = &bufp->tail;
228 
229 	bufp->op_buffer[*tailp] = op;
230 	/* circular buffer, go round */
231 	*tailp = (*tailp + 1) % bufp->size;
232 	bufp->count++;
233 
234 	return 0;
235 }
236 
237 static inline int
238 eca_circular_buffer_flush_to_cdev(struct crypto_ops_circular_buffer *bufp,
239 				  uint8_t cdev_id, uint16_t qp_id,
240 				  uint16_t *nb_ops_flushed)
241 {
242 	uint16_t n = 0;
243 	uint16_t *headp = &bufp->head;
244 	uint16_t *tailp = &bufp->tail;
245 	struct rte_crypto_op **ops = bufp->op_buffer;
246 
247 	if (*tailp > *headp)
248 		n = *tailp - *headp;
249 	else if (*tailp < *headp)
250 		n = bufp->size - *headp;
251 	else {
252 		*nb_ops_flushed = 0;
253 		return 0;  /* buffer empty */
254 	}
255 
256 	*nb_ops_flushed = rte_cryptodev_enqueue_burst(cdev_id, qp_id,
257 						      &ops[*headp], n);
258 	bufp->count -= *nb_ops_flushed;
259 	if (!bufp->count) {
260 		*headp = 0;
261 		*tailp = 0;
262 	} else
263 		*headp = (*headp + *nb_ops_flushed) % bufp->size;
264 
265 	return *nb_ops_flushed == n ? 0 : -1;
266 }
267 
268 static inline struct event_crypto_adapter *
269 eca_id_to_adapter(uint8_t id)
270 {
271 	return event_crypto_adapter ?
272 		event_crypto_adapter[id] : NULL;
273 }
274 
275 static int
276 eca_default_config_cb(uint8_t id, uint8_t dev_id,
277 			struct rte_event_crypto_adapter_conf *conf, void *arg)
278 {
279 	struct rte_event_dev_config dev_conf;
280 	struct rte_eventdev *dev;
281 	uint8_t port_id;
282 	int started;
283 	int ret;
284 	struct rte_event_port_conf *port_conf = arg;
285 	struct event_crypto_adapter *adapter = eca_id_to_adapter(id);
286 
287 	if (adapter == NULL)
288 		return -EINVAL;
289 
290 	dev = &rte_eventdevs[adapter->eventdev_id];
291 	dev_conf = dev->data->dev_conf;
292 
293 	started = dev->data->dev_started;
294 	if (started)
295 		rte_event_dev_stop(dev_id);
296 	port_id = dev_conf.nb_event_ports;
297 	dev_conf.nb_event_ports += 1;
298 	if (port_conf->event_port_cfg & RTE_EVENT_PORT_CFG_SINGLE_LINK)
299 		dev_conf.nb_single_link_event_port_queues += 1;
300 
301 	ret = rte_event_dev_configure(dev_id, &dev_conf);
302 	if (ret) {
303 		RTE_EDEV_LOG_ERR("failed to configure event dev %u\n", dev_id);
304 		if (started) {
305 			if (rte_event_dev_start(dev_id))
306 				return -EIO;
307 		}
308 		return ret;
309 	}
310 
311 	ret = rte_event_port_setup(dev_id, port_id, port_conf);
312 	if (ret) {
313 		RTE_EDEV_LOG_ERR("failed to setup event port %u\n", port_id);
314 		return ret;
315 	}
316 
317 	conf->event_port_id = port_id;
318 	conf->max_nb = DEFAULT_MAX_NB;
319 	if (started)
320 		ret = rte_event_dev_start(dev_id);
321 
322 	adapter->default_cb_arg = 1;
323 	return ret;
324 }
325 
326 int
327 rte_event_crypto_adapter_create_ext(uint8_t id, uint8_t dev_id,
328 				rte_event_crypto_adapter_conf_cb conf_cb,
329 				enum rte_event_crypto_adapter_mode mode,
330 				void *conf_arg)
331 {
332 	struct event_crypto_adapter *adapter;
333 	char mem_name[CRYPTO_ADAPTER_NAME_LEN];
334 	int socket_id;
335 	uint8_t i;
336 	int ret;
337 
338 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
339 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
340 	if (conf_cb == NULL)
341 		return -EINVAL;
342 
343 	if (event_crypto_adapter == NULL) {
344 		ret = eca_init();
345 		if (ret)
346 			return ret;
347 	}
348 
349 	adapter = eca_id_to_adapter(id);
350 	if (adapter != NULL) {
351 		RTE_EDEV_LOG_ERR("Crypto adapter id %u already exists!", id);
352 		return -EEXIST;
353 	}
354 
355 	socket_id = rte_event_dev_socket_id(dev_id);
356 	snprintf(mem_name, CRYPTO_ADAPTER_MEM_NAME_LEN,
357 		 "rte_event_crypto_adapter_%d", id);
358 
359 	adapter = rte_zmalloc_socket(mem_name, sizeof(*adapter),
360 			RTE_CACHE_LINE_SIZE, socket_id);
361 	if (adapter == NULL) {
362 		RTE_EDEV_LOG_ERR("Failed to get mem for event crypto adapter!");
363 		return -ENOMEM;
364 	}
365 
366 	if (eca_circular_buffer_init("eca_edev_circular_buffer",
367 				     &adapter->ebuf,
368 				     CRYPTO_ADAPTER_BUFFER_SZ)) {
369 		RTE_EDEV_LOG_ERR("Failed to get memory for eventdev buffer");
370 		rte_free(adapter);
371 		return -ENOMEM;
372 	}
373 
374 	adapter->eventdev_id = dev_id;
375 	adapter->socket_id = socket_id;
376 	adapter->conf_cb = conf_cb;
377 	adapter->conf_arg = conf_arg;
378 	adapter->mode = mode;
379 	strcpy(adapter->mem_name, mem_name);
380 	adapter->cdevs = rte_zmalloc_socket(adapter->mem_name,
381 					rte_cryptodev_count() *
382 					sizeof(struct crypto_device_info), 0,
383 					socket_id);
384 	if (adapter->cdevs == NULL) {
385 		RTE_EDEV_LOG_ERR("Failed to get mem for crypto devices\n");
386 		eca_circular_buffer_free(&adapter->ebuf);
387 		rte_free(adapter);
388 		return -ENOMEM;
389 	}
390 
391 	rte_spinlock_init(&adapter->lock);
392 	for (i = 0; i < rte_cryptodev_count(); i++)
393 		adapter->cdevs[i].dev = rte_cryptodev_pmd_get_dev(i);
394 
395 	event_crypto_adapter[id] = adapter;
396 
397 	return 0;
398 }
399 
400 
401 int
402 rte_event_crypto_adapter_create(uint8_t id, uint8_t dev_id,
403 				struct rte_event_port_conf *port_config,
404 				enum rte_event_crypto_adapter_mode mode)
405 {
406 	struct rte_event_port_conf *pc;
407 	int ret;
408 
409 	if (port_config == NULL)
410 		return -EINVAL;
411 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
412 
413 	pc = rte_malloc(NULL, sizeof(*pc), 0);
414 	if (pc == NULL)
415 		return -ENOMEM;
416 	*pc = *port_config;
417 	ret = rte_event_crypto_adapter_create_ext(id, dev_id,
418 						  eca_default_config_cb,
419 						  mode,
420 						  pc);
421 	if (ret)
422 		rte_free(pc);
423 
424 	rte_eventdev_trace_crypto_adapter_create(id, dev_id, port_config, mode,	ret);
425 
426 	return ret;
427 }
428 
429 int
430 rte_event_crypto_adapter_free(uint8_t id)
431 {
432 	struct event_crypto_adapter *adapter;
433 
434 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
435 
436 	adapter = eca_id_to_adapter(id);
437 	if (adapter == NULL)
438 		return -EINVAL;
439 
440 	if (adapter->nb_qps) {
441 		RTE_EDEV_LOG_ERR("%" PRIu16 "Queue pairs not deleted",
442 				adapter->nb_qps);
443 		return -EBUSY;
444 	}
445 
446 	rte_eventdev_trace_crypto_adapter_free(id, adapter);
447 	if (adapter->default_cb_arg)
448 		rte_free(adapter->conf_arg);
449 	rte_free(adapter->cdevs);
450 	rte_free(adapter);
451 	event_crypto_adapter[id] = NULL;
452 
453 	return 0;
454 }
455 
456 static inline unsigned int
457 eca_enq_to_cryptodev(struct event_crypto_adapter *adapter, struct rte_event *ev,
458 		     unsigned int cnt)
459 {
460 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
461 	union rte_event_crypto_metadata *m_data = NULL;
462 	struct crypto_queue_pair_info *qp_info = NULL;
463 	struct rte_crypto_op *crypto_op;
464 	unsigned int i, n;
465 	uint16_t qp_id, nb_enqueued = 0;
466 	uint8_t cdev_id;
467 	int ret;
468 
469 	ret = 0;
470 	n = 0;
471 	stats->event_deq_count += cnt;
472 
473 	for (i = 0; i < cnt; i++) {
474 		crypto_op = ev[i].event_ptr;
475 		if (crypto_op == NULL)
476 			continue;
477 		m_data = rte_cryptodev_session_event_mdata_get(crypto_op);
478 		if (m_data == NULL) {
479 			rte_pktmbuf_free(crypto_op->sym->m_src);
480 			rte_crypto_op_free(crypto_op);
481 			continue;
482 		}
483 
484 		cdev_id = m_data->request_info.cdev_id;
485 		qp_id = m_data->request_info.queue_pair_id;
486 		qp_info = &adapter->cdevs[cdev_id].qpairs[qp_id];
487 		if (!qp_info->qp_enabled) {
488 			rte_pktmbuf_free(crypto_op->sym->m_src);
489 			rte_crypto_op_free(crypto_op);
490 			continue;
491 		}
492 		eca_circular_buffer_add(&qp_info->cbuf, crypto_op);
493 
494 		if (eca_circular_buffer_batch_ready(&qp_info->cbuf)) {
495 			ret = eca_circular_buffer_flush_to_cdev(&qp_info->cbuf,
496 								cdev_id,
497 								qp_id,
498 								&nb_enqueued);
499 			stats->crypto_enq_count += nb_enqueued;
500 			n += nb_enqueued;
501 
502 			/**
503 			 * If some crypto ops failed to flush to cdev and
504 			 * space for another batch is not available, stop
505 			 * dequeue from eventdev momentarily
506 			 */
507 			if (unlikely(ret < 0 &&
508 				!eca_circular_buffer_space_for_batch(
509 							&qp_info->cbuf)))
510 				adapter->stop_enq_to_cryptodev = true;
511 		}
512 	}
513 
514 	return n;
515 }
516 
517 static unsigned int
518 eca_crypto_cdev_flush(struct event_crypto_adapter *adapter,
519 		      uint8_t cdev_id, uint16_t *nb_ops_flushed)
520 {
521 	struct crypto_device_info *curr_dev;
522 	struct crypto_queue_pair_info *curr_queue;
523 	struct rte_cryptodev *dev;
524 	uint16_t nb = 0, nb_enqueued = 0;
525 	uint16_t qp;
526 
527 	curr_dev = &adapter->cdevs[cdev_id];
528 	dev = rte_cryptodev_pmd_get_dev(cdev_id);
529 
530 	for (qp = 0; qp < dev->data->nb_queue_pairs; qp++) {
531 
532 		curr_queue = &curr_dev->qpairs[qp];
533 		if (unlikely(curr_queue == NULL || !curr_queue->qp_enabled))
534 			continue;
535 
536 		eca_circular_buffer_flush_to_cdev(&curr_queue->cbuf,
537 						  cdev_id,
538 						  qp,
539 						  &nb_enqueued);
540 		*nb_ops_flushed += curr_queue->cbuf.count;
541 		nb += nb_enqueued;
542 	}
543 
544 	return nb;
545 }
546 
547 static unsigned int
548 eca_crypto_enq_flush(struct event_crypto_adapter *adapter)
549 {
550 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
551 	uint8_t cdev_id;
552 	uint16_t nb_enqueued = 0;
553 	uint16_t nb_ops_flushed = 0;
554 	uint16_t num_cdev = rte_cryptodev_count();
555 
556 	for (cdev_id = 0; cdev_id < num_cdev; cdev_id++)
557 		nb_enqueued += eca_crypto_cdev_flush(adapter,
558 						    cdev_id,
559 						    &nb_ops_flushed);
560 	/**
561 	 * Enable dequeue from eventdev if all ops from circular
562 	 * buffer flushed to cdev
563 	 */
564 	if (!nb_ops_flushed)
565 		adapter->stop_enq_to_cryptodev = false;
566 
567 	stats->crypto_enq_count += nb_enqueued;
568 
569 	return nb_enqueued;
570 }
571 
572 static int
573 eca_crypto_adapter_enq_run(struct event_crypto_adapter *adapter,
574 			   unsigned int max_enq)
575 {
576 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
577 	struct rte_event ev[BATCH_SIZE];
578 	unsigned int nb_enq, nb_enqueued;
579 	uint16_t n;
580 	uint8_t event_dev_id = adapter->eventdev_id;
581 	uint8_t event_port_id = adapter->event_port_id;
582 
583 	nb_enqueued = 0;
584 	if (adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)
585 		return 0;
586 
587 	for (nb_enq = 0; nb_enq < max_enq; nb_enq += n) {
588 
589 		if (unlikely(adapter->stop_enq_to_cryptodev)) {
590 			nb_enqueued += eca_crypto_enq_flush(adapter);
591 
592 			if (unlikely(adapter->stop_enq_to_cryptodev))
593 				break;
594 		}
595 
596 		stats->event_poll_count++;
597 		n = rte_event_dequeue_burst(event_dev_id,
598 					    event_port_id, ev, BATCH_SIZE, 0);
599 
600 		if (!n)
601 			break;
602 
603 		nb_enqueued += eca_enq_to_cryptodev(adapter, ev, n);
604 	}
605 
606 	if ((++adapter->transmit_loop_count &
607 		(CRYPTO_ENQ_FLUSH_THRESHOLD - 1)) == 0) {
608 		nb_enqueued += eca_crypto_enq_flush(adapter);
609 	}
610 
611 	return nb_enqueued;
612 }
613 
614 static inline uint16_t
615 eca_ops_enqueue_burst(struct event_crypto_adapter *adapter,
616 		  struct rte_crypto_op **ops, uint16_t num)
617 {
618 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
619 	union rte_event_crypto_metadata *m_data = NULL;
620 	uint8_t event_dev_id = adapter->eventdev_id;
621 	uint8_t event_port_id = adapter->event_port_id;
622 	struct rte_event events[BATCH_SIZE];
623 	uint16_t nb_enqueued, nb_ev;
624 	uint8_t retry;
625 	uint8_t i;
626 
627 	nb_ev = 0;
628 	retry = 0;
629 	nb_enqueued = 0;
630 	num = RTE_MIN(num, BATCH_SIZE);
631 	for (i = 0; i < num; i++) {
632 		struct rte_event *ev = &events[nb_ev++];
633 
634 		m_data = rte_cryptodev_session_event_mdata_get(ops[i]);
635 		if (unlikely(m_data == NULL)) {
636 			rte_pktmbuf_free(ops[i]->sym->m_src);
637 			rte_crypto_op_free(ops[i]);
638 			continue;
639 		}
640 
641 		rte_memcpy(ev, &m_data->response_info, sizeof(*ev));
642 		ev->event_ptr = ops[i];
643 		ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
644 		if (adapter->implicit_release_disabled)
645 			ev->op = RTE_EVENT_OP_FORWARD;
646 		else
647 			ev->op = RTE_EVENT_OP_NEW;
648 	}
649 
650 	do {
651 		nb_enqueued += rte_event_enqueue_burst(event_dev_id,
652 						  event_port_id,
653 						  &events[nb_enqueued],
654 						  nb_ev - nb_enqueued);
655 
656 	} while (retry++ < CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES &&
657 		 nb_enqueued < nb_ev);
658 
659 	stats->event_enq_fail_count += nb_ev - nb_enqueued;
660 	stats->event_enq_count += nb_enqueued;
661 	stats->event_enq_retry_count += retry - 1;
662 
663 	return nb_enqueued;
664 }
665 
666 static int
667 eca_circular_buffer_flush_to_evdev(struct event_crypto_adapter *adapter,
668 				   struct crypto_ops_circular_buffer *bufp)
669 {
670 	uint16_t n = 0, nb_ops_flushed;
671 	uint16_t *headp = &bufp->head;
672 	uint16_t *tailp = &bufp->tail;
673 	struct rte_crypto_op **ops = bufp->op_buffer;
674 
675 	if (*tailp > *headp)
676 		n = *tailp - *headp;
677 	else if (*tailp < *headp)
678 		n = bufp->size - *headp;
679 	else
680 		return 0;  /* buffer empty */
681 
682 	nb_ops_flushed =  eca_ops_enqueue_burst(adapter, &ops[*headp], n);
683 	bufp->count -= nb_ops_flushed;
684 	if (!bufp->count) {
685 		*headp = 0;
686 		*tailp = 0;
687 		return 0;  /* buffer empty */
688 	}
689 
690 	*headp = (*headp + nb_ops_flushed) % bufp->size;
691 	return 1;
692 }
693 
694 
695 static void
696 eca_ops_buffer_flush(struct event_crypto_adapter *adapter)
697 {
698 	if (likely(adapter->ebuf.count == 0))
699 		return;
700 
701 	while (eca_circular_buffer_flush_to_evdev(adapter,
702 						  &adapter->ebuf))
703 		;
704 }
705 static inline unsigned int
706 eca_crypto_adapter_deq_run(struct event_crypto_adapter *adapter,
707 			   unsigned int max_deq)
708 {
709 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
710 	struct crypto_device_info *curr_dev;
711 	struct crypto_queue_pair_info *curr_queue;
712 	struct rte_crypto_op *ops[BATCH_SIZE];
713 	uint16_t n, nb_deq, nb_enqueued, i;
714 	struct rte_cryptodev *dev;
715 	uint8_t cdev_id;
716 	uint16_t qp, dev_qps;
717 	bool done;
718 	uint16_t num_cdev = rte_cryptodev_count();
719 
720 	nb_deq = 0;
721 	eca_ops_buffer_flush(adapter);
722 
723 	do {
724 		done = true;
725 
726 		for (cdev_id = adapter->next_cdev_id;
727 			cdev_id < num_cdev; cdev_id++) {
728 			uint16_t queues = 0;
729 
730 			curr_dev = &adapter->cdevs[cdev_id];
731 			dev = curr_dev->dev;
732 			if (unlikely(dev == NULL))
733 				continue;
734 
735 			dev_qps = dev->data->nb_queue_pairs;
736 
737 			for (qp = curr_dev->next_queue_pair_id;
738 				queues < dev_qps; qp = (qp + 1) % dev_qps,
739 				queues++) {
740 
741 				curr_queue = &curr_dev->qpairs[qp];
742 				if (unlikely(curr_queue == NULL ||
743 				    !curr_queue->qp_enabled))
744 					continue;
745 
746 				n = rte_cryptodev_dequeue_burst(cdev_id, qp,
747 					ops, BATCH_SIZE);
748 				if (!n)
749 					continue;
750 
751 				done = false;
752 				nb_enqueued = 0;
753 
754 				stats->crypto_deq_count += n;
755 
756 				if (unlikely(!adapter->ebuf.count))
757 					nb_enqueued = eca_ops_enqueue_burst(
758 							adapter, ops, n);
759 
760 				if (likely(nb_enqueued == n))
761 					goto check;
762 
763 				/* Failed to enqueue events case */
764 				for (i = nb_enqueued; i < n; i++)
765 					eca_circular_buffer_add(
766 						&adapter->ebuf,
767 						ops[i]);
768 
769 check:
770 				nb_deq += n;
771 
772 				if (nb_deq >= max_deq) {
773 					if ((qp + 1) == dev_qps) {
774 						adapter->next_cdev_id =
775 							(cdev_id + 1)
776 							% num_cdev;
777 					}
778 					curr_dev->next_queue_pair_id = (qp + 1)
779 						% dev->data->nb_queue_pairs;
780 
781 					return nb_deq;
782 				}
783 			}
784 		}
785 		adapter->next_cdev_id = 0;
786 	} while (done == false);
787 	return nb_deq;
788 }
789 
790 static int
791 eca_crypto_adapter_run(struct event_crypto_adapter *adapter,
792 		       unsigned int max_ops)
793 {
794 	unsigned int ops_left = max_ops;
795 
796 	while (ops_left > 0) {
797 		unsigned int e_cnt, d_cnt;
798 
799 		e_cnt = eca_crypto_adapter_deq_run(adapter, ops_left);
800 		ops_left -= RTE_MIN(ops_left, e_cnt);
801 
802 		d_cnt = eca_crypto_adapter_enq_run(adapter, ops_left);
803 		ops_left -= RTE_MIN(ops_left, d_cnt);
804 
805 		if (e_cnt == 0 && d_cnt == 0)
806 			break;
807 
808 	}
809 
810 	if (ops_left == max_ops) {
811 		rte_event_maintain(adapter->eventdev_id,
812 				   adapter->event_port_id, 0);
813 		return -EAGAIN;
814 	} else
815 		return 0;
816 }
817 
818 static int
819 eca_service_func(void *args)
820 {
821 	struct event_crypto_adapter *adapter = args;
822 	int ret;
823 
824 	if (rte_spinlock_trylock(&adapter->lock) == 0)
825 		return 0;
826 	ret = eca_crypto_adapter_run(adapter, adapter->max_nb);
827 	rte_spinlock_unlock(&adapter->lock);
828 
829 	return ret;
830 }
831 
832 static int
833 eca_init_service(struct event_crypto_adapter *adapter, uint8_t id)
834 {
835 	struct rte_event_crypto_adapter_conf adapter_conf;
836 	struct rte_service_spec service;
837 	int ret;
838 	uint32_t impl_rel;
839 
840 	if (adapter->service_inited)
841 		return 0;
842 
843 	memset(&service, 0, sizeof(service));
844 	snprintf(service.name, CRYPTO_ADAPTER_NAME_LEN,
845 		"rte_event_crypto_adapter_%d", id);
846 	service.socket_id = adapter->socket_id;
847 	service.callback = eca_service_func;
848 	service.callback_userdata = adapter;
849 	/* Service function handles locking for queue add/del updates */
850 	service.capabilities = RTE_SERVICE_CAP_MT_SAFE;
851 	ret = rte_service_component_register(&service, &adapter->service_id);
852 	if (ret) {
853 		RTE_EDEV_LOG_ERR("failed to register service %s err = %" PRId32,
854 			service.name, ret);
855 		return ret;
856 	}
857 
858 	ret = adapter->conf_cb(id, adapter->eventdev_id,
859 		&adapter_conf, adapter->conf_arg);
860 	if (ret) {
861 		RTE_EDEV_LOG_ERR("configuration callback failed err = %" PRId32,
862 			ret);
863 		return ret;
864 	}
865 
866 	adapter->max_nb = adapter_conf.max_nb;
867 	adapter->event_port_id = adapter_conf.event_port_id;
868 
869 	if (rte_event_port_attr_get(adapter->eventdev_id,
870 				adapter->event_port_id,
871 				RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE,
872 				&impl_rel)) {
873 		RTE_EDEV_LOG_ERR("Failed to get port info for eventdev %" PRId32,
874 				 adapter->eventdev_id);
875 		eca_circular_buffer_free(&adapter->ebuf);
876 		rte_free(adapter);
877 		return -EINVAL;
878 	}
879 
880 	adapter->implicit_release_disabled = (uint8_t)impl_rel;
881 	adapter->service_inited = 1;
882 
883 	return ret;
884 }
885 
886 static void
887 eca_update_qp_info(struct event_crypto_adapter *adapter,
888 		   struct crypto_device_info *dev_info, int32_t queue_pair_id,
889 		   uint8_t add)
890 {
891 	struct crypto_queue_pair_info *qp_info;
892 	int enabled;
893 	uint16_t i;
894 
895 	if (dev_info->qpairs == NULL)
896 		return;
897 
898 	if (queue_pair_id == -1) {
899 		for (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++)
900 			eca_update_qp_info(adapter, dev_info, i, add);
901 	} else {
902 		qp_info = &dev_info->qpairs[queue_pair_id];
903 		enabled = qp_info->qp_enabled;
904 		if (add) {
905 			adapter->nb_qps += !enabled;
906 			dev_info->num_qpairs += !enabled;
907 		} else {
908 			adapter->nb_qps -= enabled;
909 			dev_info->num_qpairs -= enabled;
910 		}
911 		qp_info->qp_enabled = !!add;
912 	}
913 }
914 
915 static int
916 eca_add_queue_pair(struct event_crypto_adapter *adapter, uint8_t cdev_id,
917 		   int queue_pair_id)
918 {
919 	struct crypto_device_info *dev_info = &adapter->cdevs[cdev_id];
920 	struct crypto_queue_pair_info *qpairs;
921 	uint32_t i;
922 
923 	if (dev_info->qpairs == NULL) {
924 		dev_info->qpairs =
925 		    rte_zmalloc_socket(adapter->mem_name,
926 					dev_info->dev->data->nb_queue_pairs *
927 					sizeof(struct crypto_queue_pair_info),
928 					0, adapter->socket_id);
929 		if (dev_info->qpairs == NULL)
930 			return -ENOMEM;
931 
932 		qpairs = dev_info->qpairs;
933 
934 		if (eca_circular_buffer_init("eca_cdev_circular_buffer",
935 					     &qpairs->cbuf,
936 					     CRYPTO_ADAPTER_OPS_BUFFER_SZ)) {
937 			RTE_EDEV_LOG_ERR("Failed to get memory for cryptodev "
938 					 "buffer");
939 			rte_free(qpairs);
940 			return -ENOMEM;
941 		}
942 	}
943 
944 	if (queue_pair_id == -1) {
945 		for (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++)
946 			eca_update_qp_info(adapter, dev_info, i, 1);
947 	} else
948 		eca_update_qp_info(adapter, dev_info,
949 					(uint16_t)queue_pair_id, 1);
950 
951 	return 0;
952 }
953 
954 int
955 rte_event_crypto_adapter_queue_pair_add(uint8_t id,
956 			uint8_t cdev_id,
957 			int32_t queue_pair_id,
958 			const struct rte_event_crypto_adapter_queue_conf *conf)
959 {
960 	struct rte_event_crypto_adapter_vector_limits limits;
961 	struct event_crypto_adapter *adapter;
962 	struct crypto_device_info *dev_info;
963 	struct rte_eventdev *dev;
964 	uint32_t cap;
965 	int ret;
966 
967 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
968 
969 	if (!rte_cryptodev_is_valid_dev(cdev_id)) {
970 		RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
971 		return -EINVAL;
972 	}
973 
974 	adapter = eca_id_to_adapter(id);
975 	if (adapter == NULL)
976 		return -EINVAL;
977 
978 	dev = &rte_eventdevs[adapter->eventdev_id];
979 	ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
980 						cdev_id,
981 						&cap);
982 	if (ret) {
983 		RTE_EDEV_LOG_ERR("Failed to get adapter caps dev %" PRIu8
984 			" cdev %" PRIu8, id, cdev_id);
985 		return ret;
986 	}
987 
988 	if (conf == NULL) {
989 		if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
990 			RTE_EDEV_LOG_ERR("Conf value can not be NULL for dev_id=%u",
991 					 cdev_id);
992 			return -EINVAL;
993 		}
994 	} else {
995 		if (conf->flags & RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR) {
996 			if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR) == 0) {
997 				RTE_EDEV_LOG_ERR("Event vectorization is not supported,"
998 						 "dev %" PRIu8 " cdev %" PRIu8, id,
999 						 cdev_id);
1000 				return -ENOTSUP;
1001 			}
1002 
1003 			ret = rte_event_crypto_adapter_vector_limits_get(
1004 				adapter->eventdev_id, cdev_id, &limits);
1005 			if (ret < 0) {
1006 				RTE_EDEV_LOG_ERR("Failed to get event device vector "
1007 						 "limits, dev %" PRIu8 " cdev %" PRIu8,
1008 						 id, cdev_id);
1009 				return -EINVAL;
1010 			}
1011 
1012 			if (conf->vector_sz < limits.min_sz ||
1013 			    conf->vector_sz > limits.max_sz ||
1014 			    conf->vector_timeout_ns < limits.min_timeout_ns ||
1015 			    conf->vector_timeout_ns > limits.max_timeout_ns ||
1016 			    conf->vector_mp == NULL) {
1017 				RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
1018 						" dev %" PRIu8 " cdev %" PRIu8,
1019 						id, cdev_id);
1020 				return -EINVAL;
1021 			}
1022 
1023 			if (conf->vector_mp->elt_size < (sizeof(struct rte_event_vector) +
1024 			    (sizeof(uintptr_t) * conf->vector_sz))) {
1025 				RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
1026 						" dev %" PRIu8 " cdev %" PRIu8,
1027 						id, cdev_id);
1028 				return -EINVAL;
1029 			}
1030 		}
1031 	}
1032 
1033 	dev_info = &adapter->cdevs[cdev_id];
1034 
1035 	if (queue_pair_id != -1 &&
1036 	    (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) {
1037 		RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16,
1038 				 (uint16_t)queue_pair_id);
1039 		return -EINVAL;
1040 	}
1041 
1042 	/* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD,
1043 	 * no need of service core as HW supports event forward capability.
1044 	 */
1045 	if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
1046 	    (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND &&
1047 	     adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW) ||
1048 	    (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
1049 	     adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) {
1050 		if (*dev->dev_ops->crypto_adapter_queue_pair_add == NULL)
1051 			return -ENOTSUP;
1052 		if (dev_info->qpairs == NULL) {
1053 			dev_info->qpairs =
1054 			    rte_zmalloc_socket(adapter->mem_name,
1055 					dev_info->dev->data->nb_queue_pairs *
1056 					sizeof(struct crypto_queue_pair_info),
1057 					0, adapter->socket_id);
1058 			if (dev_info->qpairs == NULL)
1059 				return -ENOMEM;
1060 		}
1061 
1062 		ret = (*dev->dev_ops->crypto_adapter_queue_pair_add)(dev,
1063 				dev_info->dev,
1064 				queue_pair_id,
1065 				conf);
1066 		if (ret)
1067 			return ret;
1068 
1069 		else
1070 			eca_update_qp_info(adapter, &adapter->cdevs[cdev_id],
1071 					   queue_pair_id, 1);
1072 	}
1073 
1074 	/* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW,
1075 	 * or SW adapter, initiate services so the application can choose
1076 	 * which ever way it wants to use the adapter.
1077 	 * Case 1: RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW
1078 	 *         Application may wants to use one of below two mode
1079 	 *          a. OP_FORWARD mode -> HW Dequeue + SW enqueue
1080 	 *          b. OP_NEW mode -> HW Dequeue
1081 	 * Case 2: No HW caps, use SW adapter
1082 	 *          a. OP_FORWARD mode -> SW enqueue & dequeue
1083 	 *          b. OP_NEW mode -> SW Dequeue
1084 	 */
1085 	if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
1086 	     !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
1087 	     adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD) ||
1088 	     (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW) &&
1089 	      !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
1090 	      !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) &&
1091 	       (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA))) {
1092 		rte_spinlock_lock(&adapter->lock);
1093 		ret = eca_init_service(adapter, id);
1094 		if (ret == 0)
1095 			ret = eca_add_queue_pair(adapter, cdev_id,
1096 						 queue_pair_id);
1097 		rte_spinlock_unlock(&adapter->lock);
1098 
1099 		if (ret)
1100 			return ret;
1101 
1102 		rte_service_component_runstate_set(adapter->service_id, 1);
1103 	}
1104 
1105 	rte_eventdev_trace_crypto_adapter_queue_pair_add(id, cdev_id,
1106 		queue_pair_id, conf);
1107 	return 0;
1108 }
1109 
1110 int
1111 rte_event_crypto_adapter_queue_pair_del(uint8_t id, uint8_t cdev_id,
1112 					int32_t queue_pair_id)
1113 {
1114 	struct event_crypto_adapter *adapter;
1115 	struct crypto_device_info *dev_info;
1116 	struct rte_eventdev *dev;
1117 	int ret;
1118 	uint32_t cap;
1119 	uint16_t i;
1120 
1121 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1122 
1123 	if (!rte_cryptodev_is_valid_dev(cdev_id)) {
1124 		RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
1125 		return -EINVAL;
1126 	}
1127 
1128 	adapter = eca_id_to_adapter(id);
1129 	if (adapter == NULL)
1130 		return -EINVAL;
1131 
1132 	dev = &rte_eventdevs[adapter->eventdev_id];
1133 	ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
1134 						cdev_id,
1135 						&cap);
1136 	if (ret)
1137 		return ret;
1138 
1139 	dev_info = &adapter->cdevs[cdev_id];
1140 
1141 	if (queue_pair_id != -1 &&
1142 	    (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) {
1143 		RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16,
1144 				 (uint16_t)queue_pair_id);
1145 		return -EINVAL;
1146 	}
1147 
1148 	if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
1149 	    (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
1150 	     adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) {
1151 		if (*dev->dev_ops->crypto_adapter_queue_pair_del == NULL)
1152 			return -ENOTSUP;
1153 		ret = (*dev->dev_ops->crypto_adapter_queue_pair_del)(dev,
1154 						dev_info->dev,
1155 						queue_pair_id);
1156 		if (ret == 0) {
1157 			eca_update_qp_info(adapter,
1158 					&adapter->cdevs[cdev_id],
1159 					queue_pair_id,
1160 					0);
1161 			if (dev_info->num_qpairs == 0) {
1162 				rte_free(dev_info->qpairs);
1163 				dev_info->qpairs = NULL;
1164 			}
1165 		}
1166 	} else {
1167 		if (adapter->nb_qps == 0)
1168 			return 0;
1169 
1170 		rte_spinlock_lock(&adapter->lock);
1171 		if (queue_pair_id == -1) {
1172 			for (i = 0; i < dev_info->dev->data->nb_queue_pairs;
1173 				i++)
1174 				eca_update_qp_info(adapter, dev_info,
1175 							queue_pair_id, 0);
1176 		} else {
1177 			eca_update_qp_info(adapter, dev_info,
1178 						(uint16_t)queue_pair_id, 0);
1179 		}
1180 
1181 		if (dev_info->num_qpairs == 0) {
1182 			rte_free(dev_info->qpairs);
1183 			dev_info->qpairs = NULL;
1184 		}
1185 
1186 		rte_spinlock_unlock(&adapter->lock);
1187 		rte_service_component_runstate_set(adapter->service_id,
1188 				adapter->nb_qps);
1189 	}
1190 
1191 	rte_eventdev_trace_crypto_adapter_queue_pair_del(id, cdev_id,
1192 		queue_pair_id, ret);
1193 	return ret;
1194 }
1195 
1196 static int
1197 eca_adapter_ctrl(uint8_t id, int start)
1198 {
1199 	struct event_crypto_adapter *adapter;
1200 	struct crypto_device_info *dev_info;
1201 	struct rte_eventdev *dev;
1202 	uint32_t i;
1203 	int use_service;
1204 	int stop = !start;
1205 
1206 	use_service = 0;
1207 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1208 	adapter = eca_id_to_adapter(id);
1209 	if (adapter == NULL)
1210 		return -EINVAL;
1211 
1212 	dev = &rte_eventdevs[adapter->eventdev_id];
1213 
1214 	for (i = 0; i < rte_cryptodev_count(); i++) {
1215 		dev_info = &adapter->cdevs[i];
1216 		/* if start  check for num queue pairs */
1217 		if (start && !dev_info->num_qpairs)
1218 			continue;
1219 		/* if stop check if dev has been started */
1220 		if (stop && !dev_info->dev_started)
1221 			continue;
1222 		use_service |= !dev_info->internal_event_port;
1223 		dev_info->dev_started = start;
1224 		if (dev_info->internal_event_port == 0)
1225 			continue;
1226 		start ? (*dev->dev_ops->crypto_adapter_start)(dev,
1227 						&dev_info->dev[i]) :
1228 			(*dev->dev_ops->crypto_adapter_stop)(dev,
1229 						&dev_info->dev[i]);
1230 	}
1231 
1232 	if (use_service)
1233 		rte_service_runstate_set(adapter->service_id, start);
1234 
1235 	return 0;
1236 }
1237 
1238 int
1239 rte_event_crypto_adapter_start(uint8_t id)
1240 {
1241 	struct event_crypto_adapter *adapter;
1242 
1243 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1244 	adapter = eca_id_to_adapter(id);
1245 	if (adapter == NULL)
1246 		return -EINVAL;
1247 
1248 	rte_eventdev_trace_crypto_adapter_start(id, adapter);
1249 	return eca_adapter_ctrl(id, 1);
1250 }
1251 
1252 int
1253 rte_event_crypto_adapter_stop(uint8_t id)
1254 {
1255 	rte_eventdev_trace_crypto_adapter_stop(id);
1256 	return eca_adapter_ctrl(id, 0);
1257 }
1258 
1259 int
1260 rte_event_crypto_adapter_stats_get(uint8_t id,
1261 				struct rte_event_crypto_adapter_stats *stats)
1262 {
1263 	struct event_crypto_adapter *adapter;
1264 	struct rte_event_crypto_adapter_stats dev_stats_sum = { 0 };
1265 	struct rte_event_crypto_adapter_stats dev_stats;
1266 	struct rte_eventdev *dev;
1267 	struct crypto_device_info *dev_info;
1268 	uint32_t i;
1269 	int ret;
1270 
1271 	if (eca_memzone_lookup())
1272 		return -ENOMEM;
1273 
1274 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1275 
1276 	adapter = eca_id_to_adapter(id);
1277 	if (adapter == NULL || stats == NULL)
1278 		return -EINVAL;
1279 
1280 	dev = &rte_eventdevs[adapter->eventdev_id];
1281 	memset(stats, 0, sizeof(*stats));
1282 	for (i = 0; i < rte_cryptodev_count(); i++) {
1283 		dev_info = &adapter->cdevs[i];
1284 		if (dev_info->internal_event_port == 0 ||
1285 			dev->dev_ops->crypto_adapter_stats_get == NULL)
1286 			continue;
1287 		ret = (*dev->dev_ops->crypto_adapter_stats_get)(dev,
1288 						dev_info->dev,
1289 						&dev_stats);
1290 		if (ret)
1291 			continue;
1292 
1293 		dev_stats_sum.crypto_deq_count += dev_stats.crypto_deq_count;
1294 		dev_stats_sum.event_enq_count +=
1295 			dev_stats.event_enq_count;
1296 	}
1297 
1298 	if (adapter->service_inited)
1299 		*stats = adapter->crypto_stats;
1300 
1301 	stats->crypto_deq_count += dev_stats_sum.crypto_deq_count;
1302 	stats->event_enq_count += dev_stats_sum.event_enq_count;
1303 
1304 	rte_eventdev_trace_crypto_adapter_stats_get(id, stats,
1305 		stats->event_poll_count, stats->event_deq_count,
1306 		stats->crypto_enq_count, stats->crypto_enq_fail,
1307 		stats->crypto_deq_count, stats->event_enq_count,
1308 		stats->event_enq_retry_count, stats->event_enq_fail_count);
1309 
1310 	return 0;
1311 }
1312 
1313 int
1314 rte_event_crypto_adapter_stats_reset(uint8_t id)
1315 {
1316 	struct event_crypto_adapter *adapter;
1317 	struct crypto_device_info *dev_info;
1318 	struct rte_eventdev *dev;
1319 	uint32_t i;
1320 
1321 	rte_eventdev_trace_crypto_adapter_stats_reset(id);
1322 
1323 	if (eca_memzone_lookup())
1324 		return -ENOMEM;
1325 
1326 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1327 
1328 	adapter = eca_id_to_adapter(id);
1329 	if (adapter == NULL)
1330 		return -EINVAL;
1331 
1332 	dev = &rte_eventdevs[adapter->eventdev_id];
1333 	for (i = 0; i < rte_cryptodev_count(); i++) {
1334 		dev_info = &adapter->cdevs[i];
1335 		if (dev_info->internal_event_port == 0 ||
1336 			dev->dev_ops->crypto_adapter_stats_reset == NULL)
1337 			continue;
1338 		(*dev->dev_ops->crypto_adapter_stats_reset)(dev,
1339 						dev_info->dev);
1340 	}
1341 
1342 	memset(&adapter->crypto_stats, 0, sizeof(adapter->crypto_stats));
1343 	return 0;
1344 }
1345 
1346 int
1347 rte_event_crypto_adapter_runtime_params_init(
1348 		struct rte_event_crypto_adapter_runtime_params *params)
1349 {
1350 	if (params == NULL)
1351 		return -EINVAL;
1352 
1353 	memset(params, 0, sizeof(*params));
1354 	params->max_nb = DEFAULT_MAX_NB;
1355 
1356 	return 0;
1357 }
1358 
1359 static int
1360 crypto_adapter_cap_check(struct event_crypto_adapter *adapter)
1361 {
1362 	int ret;
1363 	uint32_t caps;
1364 
1365 	if (!adapter->nb_qps)
1366 		return -EINVAL;
1367 	ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
1368 						adapter->next_cdev_id,
1369 						&caps);
1370 	if (ret) {
1371 		RTE_EDEV_LOG_ERR("Failed to get adapter caps dev %" PRIu8
1372 			" cdev %" PRIu8, adapter->eventdev_id,
1373 			adapter->next_cdev_id);
1374 		return ret;
1375 	}
1376 
1377 	if ((caps & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
1378 	    (caps & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW))
1379 		return -ENOTSUP;
1380 
1381 	return 0;
1382 }
1383 
1384 int
1385 rte_event_crypto_adapter_runtime_params_set(uint8_t id,
1386 		struct rte_event_crypto_adapter_runtime_params *params)
1387 {
1388 	struct event_crypto_adapter *adapter;
1389 	int ret;
1390 
1391 	if (eca_memzone_lookup())
1392 		return -ENOMEM;
1393 
1394 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1395 
1396 	if (params == NULL) {
1397 		RTE_EDEV_LOG_ERR("params pointer is NULL\n");
1398 		return -EINVAL;
1399 	}
1400 
1401 	adapter = eca_id_to_adapter(id);
1402 	if (adapter == NULL)
1403 		return -EINVAL;
1404 
1405 	ret = crypto_adapter_cap_check(adapter);
1406 	if (ret)
1407 		return ret;
1408 
1409 	rte_spinlock_lock(&adapter->lock);
1410 	adapter->max_nb = params->max_nb;
1411 	rte_spinlock_unlock(&adapter->lock);
1412 
1413 	return 0;
1414 }
1415 
1416 int
1417 rte_event_crypto_adapter_runtime_params_get(uint8_t id,
1418 		struct rte_event_crypto_adapter_runtime_params *params)
1419 {
1420 	struct event_crypto_adapter *adapter;
1421 	int ret;
1422 
1423 	if (eca_memzone_lookup())
1424 		return -ENOMEM;
1425 
1426 
1427 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1428 
1429 	if (params == NULL) {
1430 		RTE_EDEV_LOG_ERR("params pointer is NULL\n");
1431 		return -EINVAL;
1432 	}
1433 
1434 	adapter = eca_id_to_adapter(id);
1435 	if (adapter == NULL)
1436 		return -EINVAL;
1437 
1438 	ret = crypto_adapter_cap_check(adapter);
1439 	if (ret)
1440 		return ret;
1441 
1442 	params->max_nb = adapter->max_nb;
1443 
1444 	return 0;
1445 }
1446 
1447 int
1448 rte_event_crypto_adapter_service_id_get(uint8_t id, uint32_t *service_id)
1449 {
1450 	struct event_crypto_adapter *adapter;
1451 
1452 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1453 
1454 	adapter = eca_id_to_adapter(id);
1455 	if (adapter == NULL || service_id == NULL)
1456 		return -EINVAL;
1457 
1458 	if (adapter->service_inited)
1459 		*service_id = adapter->service_id;
1460 
1461 	rte_eventdev_trace_crypto_adapter_service_id_get(id, *service_id);
1462 
1463 	return adapter->service_inited ? 0 : -ESRCH;
1464 }
1465 
1466 int
1467 rte_event_crypto_adapter_event_port_get(uint8_t id, uint8_t *event_port_id)
1468 {
1469 	struct event_crypto_adapter *adapter;
1470 
1471 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1472 
1473 	adapter = eca_id_to_adapter(id);
1474 	if (adapter == NULL || event_port_id == NULL)
1475 		return -EINVAL;
1476 
1477 	*event_port_id = adapter->event_port_id;
1478 
1479 	rte_eventdev_trace_crypto_adapter_event_port_get(id, *event_port_id);
1480 
1481 	return 0;
1482 }
1483 
1484 int
1485 rte_event_crypto_adapter_vector_limits_get(
1486 	uint8_t dev_id, uint16_t cdev_id,
1487 	struct rte_event_crypto_adapter_vector_limits *limits)
1488 {
1489 	struct rte_cryptodev *cdev;
1490 	struct rte_eventdev *dev;
1491 	uint32_t cap;
1492 	int ret;
1493 
1494 	rte_eventdev_trace_crypto_adapter_vector_limits_get(dev_id, cdev_id, limits);
1495 
1496 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1497 
1498 	if (!rte_cryptodev_is_valid_dev(cdev_id)) {
1499 		RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
1500 		return -EINVAL;
1501 	}
1502 
1503 	if (limits == NULL) {
1504 		RTE_EDEV_LOG_ERR("Invalid limits storage provided");
1505 		return -EINVAL;
1506 	}
1507 
1508 	dev = &rte_eventdevs[dev_id];
1509 	cdev = rte_cryptodev_pmd_get_dev(cdev_id);
1510 
1511 	ret = rte_event_crypto_adapter_caps_get(dev_id, cdev_id, &cap);
1512 	if (ret) {
1513 		RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
1514 				 "cdev %" PRIu16, dev_id, cdev_id);
1515 		return ret;
1516 	}
1517 
1518 	if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR)) {
1519 		RTE_EDEV_LOG_ERR("Event vectorization is not supported,"
1520 				 "dev %" PRIu8 " cdev %" PRIu8, dev_id, cdev_id);
1521 		return -ENOTSUP;
1522 	}
1523 
1524 	if ((*dev->dev_ops->crypto_adapter_vector_limits_get) == NULL)
1525 		return -ENOTSUP;
1526 
1527 	return dev->dev_ops->crypto_adapter_vector_limits_get(
1528 		dev, cdev, limits);
1529 }
1530