xref: /dpdk/lib/eventdev/rte_event_crypto_adapter.c (revision 2d9c7e56e52ceb2e14b5134dcd9673dd227e3072)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation.
3  * All rights reserved.
4  */
5 
6 #include <string.h>
7 #include <stdbool.h>
8 #include <rte_common.h>
9 #include <dev_driver.h>
10 #include <rte_errno.h>
11 #include <rte_cryptodev.h>
12 #include <cryptodev_pmd.h>
13 #include <rte_log.h>
14 #include <rte_malloc.h>
15 #include <rte_service_component.h>
16 
17 #include "rte_eventdev.h"
18 #include "eventdev_pmd.h"
19 #include "eventdev_trace.h"
20 #include "rte_event_crypto_adapter.h"
21 
22 #define BATCH_SIZE 32
23 #define DEFAULT_MAX_NB 128
24 #define CRYPTO_ADAPTER_NAME_LEN 32
25 #define CRYPTO_ADAPTER_MEM_NAME_LEN 32
26 #define CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES 100
27 
28 /* MAX_OPS_IN_BUFFER contains size for  batch of dequeued events */
29 #define MAX_OPS_IN_BUFFER BATCH_SIZE
30 
31 /* CRYPTO_ADAPTER_OPS_BUFFER_SZ to accommodate MAX_OPS_IN_BUFFER +
32  * additional space for one batch
33  */
34 #define CRYPTO_ADAPTER_OPS_BUFFER_SZ (MAX_OPS_IN_BUFFER + BATCH_SIZE)
35 
36 #define CRYPTO_ADAPTER_BUFFER_SZ 1024
37 
38 /* Flush an instance's enqueue buffers every CRYPTO_ENQ_FLUSH_THRESHOLD
39  * iterations of eca_crypto_adapter_enq_run()
40  */
41 #define CRYPTO_ENQ_FLUSH_THRESHOLD 1024
42 
43 #define ECA_ADAPTER_ARRAY "crypto_adapter_array"
44 
45 struct crypto_ops_circular_buffer {
46 	/* index of head element in circular buffer */
47 	uint16_t head;
48 	/* index of tail element in circular buffer */
49 	uint16_t tail;
50 	/* number of elements in buffer */
51 	uint16_t count;
52 	/* size of circular buffer */
53 	uint16_t size;
54 	/* Pointer to hold rte_crypto_ops for batching */
55 	struct rte_crypto_op **op_buffer;
56 } __rte_cache_aligned;
57 
58 struct event_crypto_adapter {
59 	/* Event device identifier */
60 	uint8_t eventdev_id;
61 	/* Event port identifier */
62 	uint8_t event_port_id;
63 	/* Store event port's implicit release capability */
64 	uint8_t implicit_release_disabled;
65 	/* Flag to indicate backpressure at cryptodev
66 	 * Stop further dequeuing events from eventdev
67 	 */
68 	bool stop_enq_to_cryptodev;
69 	/* Max crypto ops processed in any service function invocation */
70 	uint32_t max_nb;
71 	/* Lock to serialize config updates with service function */
72 	rte_spinlock_t lock;
73 	/* Next crypto device to be processed */
74 	uint16_t next_cdev_id;
75 	/* Per crypto device structure */
76 	struct crypto_device_info *cdevs;
77 	/* Loop counter to flush crypto ops */
78 	uint16_t transmit_loop_count;
79 	/* Circular buffer for batching crypto ops to eventdev */
80 	struct crypto_ops_circular_buffer ebuf;
81 	/* Per instance stats structure */
82 	struct rte_event_crypto_adapter_stats crypto_stats;
83 	/* Configuration callback for rte_service configuration */
84 	rte_event_crypto_adapter_conf_cb conf_cb;
85 	/* Configuration callback argument */
86 	void *conf_arg;
87 	/* Set if  default_cb is being used */
88 	int default_cb_arg;
89 	/* Service initialization state */
90 	uint8_t service_inited;
91 	/* Memory allocation name */
92 	char mem_name[CRYPTO_ADAPTER_MEM_NAME_LEN];
93 	/* Socket identifier cached from eventdev */
94 	int socket_id;
95 	/* Per adapter EAL service */
96 	uint32_t service_id;
97 	/* No. of queue pairs configured */
98 	uint16_t nb_qps;
99 	/* Adapter mode */
100 	enum rte_event_crypto_adapter_mode mode;
101 } __rte_cache_aligned;
102 
103 /* Per crypto device information */
104 struct crypto_device_info {
105 	/* Pointer to cryptodev */
106 	struct rte_cryptodev *dev;
107 	/* Pointer to queue pair info */
108 	struct crypto_queue_pair_info *qpairs;
109 	/* Next queue pair to be processed */
110 	uint16_t next_queue_pair_id;
111 	/* Set to indicate cryptodev->eventdev packet
112 	 * transfer uses a hardware mechanism
113 	 */
114 	uint8_t internal_event_port;
115 	/* Set to indicate processing has been started */
116 	uint8_t dev_started;
117 	/* If num_qpairs > 0, the start callback will
118 	 * be invoked if not already invoked
119 	 */
120 	uint16_t num_qpairs;
121 } __rte_cache_aligned;
122 
123 /* Per queue pair information */
124 struct crypto_queue_pair_info {
125 	/* Set to indicate queue pair is enabled */
126 	bool qp_enabled;
127 	/* Circular buffer for batching crypto ops to cdev */
128 	struct crypto_ops_circular_buffer cbuf;
129 } __rte_cache_aligned;
130 
131 static struct event_crypto_adapter **event_crypto_adapter;
132 
133 /* Macros to check for valid adapter */
134 #define EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
135 	if (!eca_valid_id(id)) { \
136 		RTE_EDEV_LOG_ERR("Invalid crypto adapter id = %d", id); \
137 		return retval; \
138 	} \
139 } while (0)
140 
141 static inline int
142 eca_valid_id(uint8_t id)
143 {
144 	return id < RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE;
145 }
146 
147 static int
148 eca_init(void)
149 {
150 	const struct rte_memzone *mz;
151 	unsigned int sz;
152 
153 	sz = sizeof(*event_crypto_adapter) *
154 	    RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE;
155 	sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
156 
157 	mz = rte_memzone_lookup(ECA_ADAPTER_ARRAY);
158 	if (mz == NULL) {
159 		mz = rte_memzone_reserve_aligned(ECA_ADAPTER_ARRAY, sz,
160 						 rte_socket_id(), 0,
161 						 RTE_CACHE_LINE_SIZE);
162 		if (mz == NULL) {
163 			RTE_EDEV_LOG_ERR("failed to reserve memzone err = %"
164 					PRId32, rte_errno);
165 			return -rte_errno;
166 		}
167 	}
168 
169 	event_crypto_adapter = mz->addr;
170 	return 0;
171 }
172 
173 static int
174 eca_memzone_lookup(void)
175 {
176 	const struct rte_memzone *mz;
177 
178 	if (event_crypto_adapter == NULL) {
179 		mz = rte_memzone_lookup(ECA_ADAPTER_ARRAY);
180 		if (mz == NULL)
181 			return -ENOMEM;
182 
183 		event_crypto_adapter = mz->addr;
184 	}
185 
186 	return 0;
187 }
188 
189 static inline bool
190 eca_circular_buffer_batch_ready(struct crypto_ops_circular_buffer *bufp)
191 {
192 	return bufp->count >= BATCH_SIZE;
193 }
194 
195 static inline bool
196 eca_circular_buffer_space_for_batch(struct crypto_ops_circular_buffer *bufp)
197 {
198 	/* circular buffer can have atmost MAX_OPS_IN_BUFFER */
199 	return (bufp->size - bufp->count) >= MAX_OPS_IN_BUFFER;
200 }
201 
202 static inline void
203 eca_circular_buffer_free(struct crypto_ops_circular_buffer *bufp)
204 {
205 	rte_free(bufp->op_buffer);
206 }
207 
208 static inline int
209 eca_circular_buffer_init(const char *name,
210 			 struct crypto_ops_circular_buffer *bufp,
211 			 uint16_t sz)
212 {
213 	bufp->op_buffer = rte_zmalloc(name,
214 				      sizeof(struct rte_crypto_op *) * sz,
215 				      0);
216 	if (bufp->op_buffer == NULL)
217 		return -ENOMEM;
218 
219 	bufp->size = sz;
220 	return 0;
221 }
222 
223 static inline int
224 eca_circular_buffer_add(struct crypto_ops_circular_buffer *bufp,
225 			struct rte_crypto_op *op)
226 {
227 	uint16_t *tailp = &bufp->tail;
228 
229 	bufp->op_buffer[*tailp] = op;
230 	/* circular buffer, go round */
231 	*tailp = (*tailp + 1) % bufp->size;
232 	bufp->count++;
233 
234 	return 0;
235 }
236 
237 static inline int
238 eca_circular_buffer_flush_to_cdev(struct crypto_ops_circular_buffer *bufp,
239 				  uint8_t cdev_id, uint16_t qp_id,
240 				  uint16_t *nb_ops_flushed)
241 {
242 	uint16_t n = 0;
243 	uint16_t *headp = &bufp->head;
244 	uint16_t *tailp = &bufp->tail;
245 	struct rte_crypto_op **ops = bufp->op_buffer;
246 
247 	if (*tailp > *headp)
248 		/* Flush ops from head pointer to (tail - head) OPs */
249 		n = *tailp - *headp;
250 	else if (*tailp < *headp)
251 		/* Circ buffer - Rollover.
252 		 * Flush OPs from head to max size of buffer.
253 		 * Rest of the OPs will be flushed in next iteration.
254 		 */
255 		n = bufp->size - *headp;
256 	else { /* head == tail case */
257 		/* when head == tail,
258 		 * circ buff is either full(tail pointer roll over) or empty
259 		 */
260 		if (bufp->count != 0) {
261 			/* Circ buffer - FULL.
262 			 * Flush OPs from head to max size of buffer.
263 			 * Rest of the OPS will be flushed in next iteration.
264 			 */
265 			n = bufp->size - *headp;
266 		} else {
267 			/* Circ buffer - Empty */
268 			*nb_ops_flushed = 0;
269 			return 0;
270 		}
271 	}
272 
273 	*nb_ops_flushed = rte_cryptodev_enqueue_burst(cdev_id, qp_id,
274 						      &ops[*headp], n);
275 	bufp->count -= *nb_ops_flushed;
276 	if (!bufp->count) {
277 		*headp = 0;
278 		*tailp = 0;
279 	} else
280 		*headp = (*headp + *nb_ops_flushed) % bufp->size;
281 
282 	return *nb_ops_flushed == n ? 0 : -1;
283 }
284 
285 static inline struct event_crypto_adapter *
286 eca_id_to_adapter(uint8_t id)
287 {
288 	return event_crypto_adapter ?
289 		event_crypto_adapter[id] : NULL;
290 }
291 
292 static int
293 eca_default_config_cb(uint8_t id, uint8_t dev_id,
294 			struct rte_event_crypto_adapter_conf *conf, void *arg)
295 {
296 	struct rte_event_dev_config dev_conf;
297 	struct rte_eventdev *dev;
298 	uint8_t port_id;
299 	int started;
300 	int ret;
301 	struct rte_event_port_conf *port_conf = arg;
302 	struct event_crypto_adapter *adapter = eca_id_to_adapter(id);
303 
304 	if (adapter == NULL)
305 		return -EINVAL;
306 
307 	dev = &rte_eventdevs[adapter->eventdev_id];
308 	dev_conf = dev->data->dev_conf;
309 
310 	started = dev->data->dev_started;
311 	if (started)
312 		rte_event_dev_stop(dev_id);
313 	port_id = dev_conf.nb_event_ports;
314 	dev_conf.nb_event_ports += 1;
315 	if (port_conf->event_port_cfg & RTE_EVENT_PORT_CFG_SINGLE_LINK)
316 		dev_conf.nb_single_link_event_port_queues += 1;
317 
318 	ret = rte_event_dev_configure(dev_id, &dev_conf);
319 	if (ret) {
320 		RTE_EDEV_LOG_ERR("failed to configure event dev %u", dev_id);
321 		if (started) {
322 			if (rte_event_dev_start(dev_id))
323 				return -EIO;
324 		}
325 		return ret;
326 	}
327 
328 	ret = rte_event_port_setup(dev_id, port_id, port_conf);
329 	if (ret) {
330 		RTE_EDEV_LOG_ERR("failed to setup event port %u", port_id);
331 		return ret;
332 	}
333 
334 	conf->event_port_id = port_id;
335 	conf->max_nb = DEFAULT_MAX_NB;
336 	if (started)
337 		ret = rte_event_dev_start(dev_id);
338 
339 	adapter->default_cb_arg = 1;
340 	return ret;
341 }
342 
343 int
344 rte_event_crypto_adapter_create_ext(uint8_t id, uint8_t dev_id,
345 				rte_event_crypto_adapter_conf_cb conf_cb,
346 				enum rte_event_crypto_adapter_mode mode,
347 				void *conf_arg)
348 {
349 	struct event_crypto_adapter *adapter;
350 	char mem_name[CRYPTO_ADAPTER_NAME_LEN];
351 	int socket_id;
352 	uint8_t i;
353 	int ret;
354 
355 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
356 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
357 	if (conf_cb == NULL)
358 		return -EINVAL;
359 
360 	if (event_crypto_adapter == NULL) {
361 		ret = eca_init();
362 		if (ret)
363 			return ret;
364 	}
365 
366 	adapter = eca_id_to_adapter(id);
367 	if (adapter != NULL) {
368 		RTE_EDEV_LOG_ERR("Crypto adapter id %u already exists!", id);
369 		return -EEXIST;
370 	}
371 
372 	socket_id = rte_event_dev_socket_id(dev_id);
373 	snprintf(mem_name, CRYPTO_ADAPTER_MEM_NAME_LEN,
374 		 "rte_event_crypto_adapter_%d", id);
375 
376 	adapter = rte_zmalloc_socket(mem_name, sizeof(*adapter),
377 			RTE_CACHE_LINE_SIZE, socket_id);
378 	if (adapter == NULL) {
379 		RTE_EDEV_LOG_ERR("Failed to get mem for event crypto adapter!");
380 		return -ENOMEM;
381 	}
382 
383 	if (eca_circular_buffer_init("eca_edev_circular_buffer",
384 				     &adapter->ebuf,
385 				     CRYPTO_ADAPTER_BUFFER_SZ)) {
386 		RTE_EDEV_LOG_ERR("Failed to get memory for eventdev buffer");
387 		rte_free(adapter);
388 		return -ENOMEM;
389 	}
390 
391 	adapter->eventdev_id = dev_id;
392 	adapter->socket_id = socket_id;
393 	adapter->conf_cb = conf_cb;
394 	adapter->conf_arg = conf_arg;
395 	adapter->mode = mode;
396 	strcpy(adapter->mem_name, mem_name);
397 	adapter->cdevs = rte_zmalloc_socket(adapter->mem_name,
398 					rte_cryptodev_count() *
399 					sizeof(struct crypto_device_info), 0,
400 					socket_id);
401 	if (adapter->cdevs == NULL) {
402 		RTE_EDEV_LOG_ERR("Failed to get mem for crypto devices");
403 		eca_circular_buffer_free(&adapter->ebuf);
404 		rte_free(adapter);
405 		return -ENOMEM;
406 	}
407 
408 	rte_spinlock_init(&adapter->lock);
409 	for (i = 0; i < rte_cryptodev_count(); i++)
410 		adapter->cdevs[i].dev = rte_cryptodev_pmd_get_dev(i);
411 
412 	event_crypto_adapter[id] = adapter;
413 
414 	return 0;
415 }
416 
417 
418 int
419 rte_event_crypto_adapter_create(uint8_t id, uint8_t dev_id,
420 				struct rte_event_port_conf *port_config,
421 				enum rte_event_crypto_adapter_mode mode)
422 {
423 	struct rte_event_port_conf *pc;
424 	int ret;
425 
426 	if (port_config == NULL)
427 		return -EINVAL;
428 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
429 
430 	pc = rte_malloc(NULL, sizeof(*pc), 0);
431 	if (pc == NULL)
432 		return -ENOMEM;
433 	*pc = *port_config;
434 	ret = rte_event_crypto_adapter_create_ext(id, dev_id,
435 						  eca_default_config_cb,
436 						  mode,
437 						  pc);
438 	if (ret)
439 		rte_free(pc);
440 
441 	rte_eventdev_trace_crypto_adapter_create(id, dev_id, port_config, mode,	ret);
442 
443 	return ret;
444 }
445 
446 int
447 rte_event_crypto_adapter_free(uint8_t id)
448 {
449 	struct event_crypto_adapter *adapter;
450 
451 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
452 
453 	adapter = eca_id_to_adapter(id);
454 	if (adapter == NULL)
455 		return -EINVAL;
456 
457 	if (adapter->nb_qps) {
458 		RTE_EDEV_LOG_ERR("%" PRIu16 "Queue pairs not deleted",
459 				adapter->nb_qps);
460 		return -EBUSY;
461 	}
462 
463 	rte_eventdev_trace_crypto_adapter_free(id, adapter);
464 	if (adapter->default_cb_arg)
465 		rte_free(adapter->conf_arg);
466 	rte_free(adapter->cdevs);
467 	rte_free(adapter);
468 	event_crypto_adapter[id] = NULL;
469 
470 	return 0;
471 }
472 
473 static inline unsigned int
474 eca_enq_to_cryptodev(struct event_crypto_adapter *adapter, struct rte_event *ev,
475 		     unsigned int cnt)
476 {
477 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
478 	union rte_event_crypto_metadata *m_data = NULL;
479 	struct crypto_queue_pair_info *qp_info = NULL;
480 	struct rte_crypto_op *crypto_op;
481 	unsigned int i, n;
482 	uint16_t qp_id, nb_enqueued = 0;
483 	uint8_t cdev_id;
484 	int ret;
485 
486 	ret = 0;
487 	n = 0;
488 	stats->event_deq_count += cnt;
489 
490 	for (i = 0; i < cnt; i++) {
491 		crypto_op = ev[i].event_ptr;
492 		if (crypto_op == NULL)
493 			continue;
494 		m_data = rte_cryptodev_session_event_mdata_get(crypto_op);
495 		if (m_data == NULL) {
496 			rte_pktmbuf_free(crypto_op->sym->m_src);
497 			rte_crypto_op_free(crypto_op);
498 			continue;
499 		}
500 
501 		cdev_id = m_data->request_info.cdev_id;
502 		qp_id = m_data->request_info.queue_pair_id;
503 		qp_info = &adapter->cdevs[cdev_id].qpairs[qp_id];
504 		if (!qp_info->qp_enabled) {
505 			rte_pktmbuf_free(crypto_op->sym->m_src);
506 			rte_crypto_op_free(crypto_op);
507 			continue;
508 		}
509 		eca_circular_buffer_add(&qp_info->cbuf, crypto_op);
510 
511 		if (eca_circular_buffer_batch_ready(&qp_info->cbuf)) {
512 			ret = eca_circular_buffer_flush_to_cdev(&qp_info->cbuf,
513 								cdev_id,
514 								qp_id,
515 								&nb_enqueued);
516 			stats->crypto_enq_count += nb_enqueued;
517 			n += nb_enqueued;
518 
519 			/**
520 			 * If some crypto ops failed to flush to cdev and
521 			 * space for another batch is not available, stop
522 			 * dequeue from eventdev momentarily
523 			 */
524 			if (unlikely(ret < 0 &&
525 				!eca_circular_buffer_space_for_batch(
526 							&qp_info->cbuf)))
527 				adapter->stop_enq_to_cryptodev = true;
528 		}
529 	}
530 
531 	return n;
532 }
533 
534 static unsigned int
535 eca_crypto_cdev_flush(struct event_crypto_adapter *adapter,
536 		      uint8_t cdev_id, uint16_t *nb_ops_flushed)
537 {
538 	struct crypto_device_info *curr_dev;
539 	struct crypto_queue_pair_info *curr_queue;
540 	struct rte_cryptodev *dev;
541 	uint16_t nb = 0, nb_enqueued = 0;
542 	uint16_t qp;
543 
544 	curr_dev = &adapter->cdevs[cdev_id];
545 	dev = rte_cryptodev_pmd_get_dev(cdev_id);
546 
547 	for (qp = 0; qp < dev->data->nb_queue_pairs; qp++) {
548 
549 		curr_queue = &curr_dev->qpairs[qp];
550 		if (unlikely(curr_queue == NULL || !curr_queue->qp_enabled))
551 			continue;
552 
553 		eca_circular_buffer_flush_to_cdev(&curr_queue->cbuf,
554 						  cdev_id,
555 						  qp,
556 						  &nb_enqueued);
557 		*nb_ops_flushed += curr_queue->cbuf.count;
558 		nb += nb_enqueued;
559 	}
560 
561 	return nb;
562 }
563 
564 static unsigned int
565 eca_crypto_enq_flush(struct event_crypto_adapter *adapter)
566 {
567 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
568 	uint8_t cdev_id;
569 	uint16_t nb_enqueued = 0;
570 	uint16_t nb_ops_flushed = 0;
571 	uint16_t num_cdev = rte_cryptodev_count();
572 
573 	for (cdev_id = 0; cdev_id < num_cdev; cdev_id++)
574 		nb_enqueued += eca_crypto_cdev_flush(adapter,
575 						    cdev_id,
576 						    &nb_ops_flushed);
577 	/**
578 	 * Enable dequeue from eventdev if all ops from circular
579 	 * buffer flushed to cdev
580 	 */
581 	if (!nb_ops_flushed)
582 		adapter->stop_enq_to_cryptodev = false;
583 
584 	stats->crypto_enq_count += nb_enqueued;
585 
586 	return nb_enqueued;
587 }
588 
589 static int
590 eca_crypto_adapter_enq_run(struct event_crypto_adapter *adapter,
591 			   unsigned int max_enq)
592 {
593 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
594 	struct rte_event ev[BATCH_SIZE];
595 	unsigned int nb_enq, nb_enqueued;
596 	uint16_t n;
597 	uint8_t event_dev_id = adapter->eventdev_id;
598 	uint8_t event_port_id = adapter->event_port_id;
599 
600 	nb_enqueued = 0;
601 	if (adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)
602 		return 0;
603 
604 	for (nb_enq = 0; nb_enq < max_enq; nb_enq += n) {
605 
606 		if (unlikely(adapter->stop_enq_to_cryptodev)) {
607 			nb_enqueued += eca_crypto_enq_flush(adapter);
608 
609 			if (unlikely(adapter->stop_enq_to_cryptodev))
610 				break;
611 		}
612 
613 		stats->event_poll_count++;
614 		n = rte_event_dequeue_burst(event_dev_id,
615 					    event_port_id, ev, BATCH_SIZE, 0);
616 
617 		if (!n)
618 			break;
619 
620 		nb_enqueued += eca_enq_to_cryptodev(adapter, ev, n);
621 	}
622 
623 	if ((++adapter->transmit_loop_count &
624 		(CRYPTO_ENQ_FLUSH_THRESHOLD - 1)) == 0) {
625 		nb_enqueued += eca_crypto_enq_flush(adapter);
626 	}
627 
628 	return nb_enqueued;
629 }
630 
631 static inline uint16_t
632 eca_ops_enqueue_burst(struct event_crypto_adapter *adapter,
633 		  struct rte_crypto_op **ops, uint16_t num)
634 {
635 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
636 	union rte_event_crypto_metadata *m_data = NULL;
637 	uint8_t event_dev_id = adapter->eventdev_id;
638 	uint8_t event_port_id = adapter->event_port_id;
639 	struct rte_event events[BATCH_SIZE];
640 	uint16_t nb_enqueued, nb_ev;
641 	uint8_t retry;
642 	uint8_t i;
643 
644 	nb_ev = 0;
645 	retry = 0;
646 	nb_enqueued = 0;
647 	num = RTE_MIN(num, BATCH_SIZE);
648 	for (i = 0; i < num; i++) {
649 		struct rte_event *ev = &events[nb_ev++];
650 
651 		m_data = rte_cryptodev_session_event_mdata_get(ops[i]);
652 		if (unlikely(m_data == NULL)) {
653 			rte_pktmbuf_free(ops[i]->sym->m_src);
654 			rte_crypto_op_free(ops[i]);
655 			continue;
656 		}
657 
658 		rte_memcpy(ev, &m_data->response_info, sizeof(*ev));
659 		ev->event_ptr = ops[i];
660 		ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
661 		if (adapter->implicit_release_disabled)
662 			ev->op = RTE_EVENT_OP_FORWARD;
663 		else
664 			ev->op = RTE_EVENT_OP_NEW;
665 	}
666 
667 	do {
668 		nb_enqueued += rte_event_enqueue_burst(event_dev_id,
669 						  event_port_id,
670 						  &events[nb_enqueued],
671 						  nb_ev - nb_enqueued);
672 
673 	} while (retry++ < CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES &&
674 		 nb_enqueued < nb_ev);
675 
676 	stats->event_enq_fail_count += nb_ev - nb_enqueued;
677 	stats->event_enq_count += nb_enqueued;
678 	stats->event_enq_retry_count += retry - 1;
679 
680 	return nb_enqueued;
681 }
682 
683 static int
684 eca_circular_buffer_flush_to_evdev(struct event_crypto_adapter *adapter,
685 				   struct crypto_ops_circular_buffer *bufp)
686 {
687 	uint16_t n = 0, nb_ops_flushed;
688 	uint16_t *headp = &bufp->head;
689 	uint16_t *tailp = &bufp->tail;
690 	struct rte_crypto_op **ops = bufp->op_buffer;
691 
692 	if (*tailp > *headp)
693 		n = *tailp - *headp;
694 	else if (*tailp < *headp)
695 		n = bufp->size - *headp;
696 	else
697 		return 0;  /* buffer empty */
698 
699 	nb_ops_flushed =  eca_ops_enqueue_burst(adapter, &ops[*headp], n);
700 	bufp->count -= nb_ops_flushed;
701 	if (!bufp->count) {
702 		*headp = 0;
703 		*tailp = 0;
704 		return 0;  /* buffer empty */
705 	}
706 
707 	*headp = (*headp + nb_ops_flushed) % bufp->size;
708 	return 1;
709 }
710 
711 
712 static void
713 eca_ops_buffer_flush(struct event_crypto_adapter *adapter)
714 {
715 	if (likely(adapter->ebuf.count == 0))
716 		return;
717 
718 	while (eca_circular_buffer_flush_to_evdev(adapter,
719 						  &adapter->ebuf))
720 		;
721 }
722 static inline unsigned int
723 eca_crypto_adapter_deq_run(struct event_crypto_adapter *adapter,
724 			   unsigned int max_deq)
725 {
726 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
727 	struct crypto_device_info *curr_dev;
728 	struct crypto_queue_pair_info *curr_queue;
729 	struct rte_crypto_op *ops[BATCH_SIZE];
730 	uint16_t n, nb_deq, nb_enqueued, i;
731 	struct rte_cryptodev *dev;
732 	uint8_t cdev_id;
733 	uint16_t qp, dev_qps;
734 	bool done;
735 	uint16_t num_cdev = rte_cryptodev_count();
736 
737 	nb_deq = 0;
738 	eca_ops_buffer_flush(adapter);
739 
740 	do {
741 		done = true;
742 
743 		for (cdev_id = adapter->next_cdev_id;
744 			cdev_id < num_cdev; cdev_id++) {
745 			uint16_t queues = 0;
746 
747 			curr_dev = &adapter->cdevs[cdev_id];
748 			dev = curr_dev->dev;
749 			if (unlikely(dev == NULL))
750 				continue;
751 
752 			dev_qps = dev->data->nb_queue_pairs;
753 
754 			for (qp = curr_dev->next_queue_pair_id;
755 				queues < dev_qps; qp = (qp + 1) % dev_qps,
756 				queues++) {
757 
758 				curr_queue = &curr_dev->qpairs[qp];
759 				if (unlikely(curr_queue == NULL ||
760 				    !curr_queue->qp_enabled))
761 					continue;
762 
763 				n = rte_cryptodev_dequeue_burst(cdev_id, qp,
764 					ops, BATCH_SIZE);
765 				if (!n)
766 					continue;
767 
768 				done = false;
769 				nb_enqueued = 0;
770 
771 				stats->crypto_deq_count += n;
772 
773 				if (unlikely(!adapter->ebuf.count))
774 					nb_enqueued = eca_ops_enqueue_burst(
775 							adapter, ops, n);
776 
777 				if (likely(nb_enqueued == n))
778 					goto check;
779 
780 				/* Failed to enqueue events case */
781 				for (i = nb_enqueued; i < n; i++)
782 					eca_circular_buffer_add(
783 						&adapter->ebuf,
784 						ops[i]);
785 
786 check:
787 				nb_deq += n;
788 
789 				if (nb_deq >= max_deq) {
790 					if ((qp + 1) == dev_qps) {
791 						adapter->next_cdev_id =
792 							(cdev_id + 1)
793 							% num_cdev;
794 					}
795 					curr_dev->next_queue_pair_id = (qp + 1)
796 						% dev->data->nb_queue_pairs;
797 
798 					return nb_deq;
799 				}
800 			}
801 		}
802 		adapter->next_cdev_id = 0;
803 	} while (done == false);
804 	return nb_deq;
805 }
806 
807 static int
808 eca_crypto_adapter_run(struct event_crypto_adapter *adapter,
809 		       unsigned int max_ops)
810 {
811 	unsigned int ops_left = max_ops;
812 
813 	while (ops_left > 0) {
814 		unsigned int e_cnt, d_cnt;
815 
816 		e_cnt = eca_crypto_adapter_deq_run(adapter, ops_left);
817 		ops_left -= RTE_MIN(ops_left, e_cnt);
818 
819 		d_cnt = eca_crypto_adapter_enq_run(adapter, ops_left);
820 		ops_left -= RTE_MIN(ops_left, d_cnt);
821 
822 		if (e_cnt == 0 && d_cnt == 0)
823 			break;
824 
825 	}
826 
827 	if (ops_left == max_ops) {
828 		rte_event_maintain(adapter->eventdev_id,
829 				   adapter->event_port_id, 0);
830 		return -EAGAIN;
831 	} else
832 		return 0;
833 }
834 
835 static int
836 eca_service_func(void *args)
837 {
838 	struct event_crypto_adapter *adapter = args;
839 	int ret;
840 
841 	if (rte_spinlock_trylock(&adapter->lock) == 0)
842 		return 0;
843 	ret = eca_crypto_adapter_run(adapter, adapter->max_nb);
844 	rte_spinlock_unlock(&adapter->lock);
845 
846 	return ret;
847 }
848 
849 static int
850 eca_init_service(struct event_crypto_adapter *adapter, uint8_t id)
851 {
852 	struct rte_event_crypto_adapter_conf adapter_conf;
853 	struct rte_service_spec service;
854 	int ret;
855 	uint32_t impl_rel;
856 
857 	if (adapter->service_inited)
858 		return 0;
859 
860 	memset(&service, 0, sizeof(service));
861 	snprintf(service.name, CRYPTO_ADAPTER_NAME_LEN,
862 		"rte_event_crypto_adapter_%d", id);
863 	service.socket_id = adapter->socket_id;
864 	service.callback = eca_service_func;
865 	service.callback_userdata = adapter;
866 	/* Service function handles locking for queue add/del updates */
867 	service.capabilities = RTE_SERVICE_CAP_MT_SAFE;
868 	ret = rte_service_component_register(&service, &adapter->service_id);
869 	if (ret) {
870 		RTE_EDEV_LOG_ERR("failed to register service %s err = %" PRId32,
871 			service.name, ret);
872 		return ret;
873 	}
874 
875 	ret = adapter->conf_cb(id, adapter->eventdev_id,
876 		&adapter_conf, adapter->conf_arg);
877 	if (ret) {
878 		RTE_EDEV_LOG_ERR("configuration callback failed err = %" PRId32,
879 			ret);
880 		return ret;
881 	}
882 
883 	adapter->max_nb = adapter_conf.max_nb;
884 	adapter->event_port_id = adapter_conf.event_port_id;
885 
886 	if (rte_event_port_attr_get(adapter->eventdev_id,
887 				adapter->event_port_id,
888 				RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE,
889 				&impl_rel)) {
890 		RTE_EDEV_LOG_ERR("Failed to get port info for eventdev %" PRId32,
891 				 adapter->eventdev_id);
892 		eca_circular_buffer_free(&adapter->ebuf);
893 		rte_free(adapter);
894 		return -EINVAL;
895 	}
896 
897 	adapter->implicit_release_disabled = (uint8_t)impl_rel;
898 	adapter->service_inited = 1;
899 
900 	return ret;
901 }
902 
903 static void
904 eca_update_qp_info(struct event_crypto_adapter *adapter,
905 		   struct crypto_device_info *dev_info, int32_t queue_pair_id,
906 		   uint8_t add)
907 {
908 	struct crypto_queue_pair_info *qp_info;
909 	int enabled;
910 	uint16_t i;
911 
912 	if (dev_info->qpairs == NULL)
913 		return;
914 
915 	if (queue_pair_id == -1) {
916 		for (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++)
917 			eca_update_qp_info(adapter, dev_info, i, add);
918 	} else {
919 		qp_info = &dev_info->qpairs[queue_pair_id];
920 		enabled = qp_info->qp_enabled;
921 		if (add) {
922 			adapter->nb_qps += !enabled;
923 			dev_info->num_qpairs += !enabled;
924 		} else {
925 			adapter->nb_qps -= enabled;
926 			dev_info->num_qpairs -= enabled;
927 		}
928 		qp_info->qp_enabled = !!add;
929 	}
930 }
931 
932 static int
933 eca_add_queue_pair(struct event_crypto_adapter *adapter, uint8_t cdev_id,
934 		   int queue_pair_id)
935 {
936 	struct crypto_device_info *dev_info = &adapter->cdevs[cdev_id];
937 	struct crypto_queue_pair_info *qpairs;
938 	uint32_t i;
939 
940 	if (dev_info->qpairs == NULL) {
941 		dev_info->qpairs =
942 		    rte_zmalloc_socket(adapter->mem_name,
943 					dev_info->dev->data->nb_queue_pairs *
944 					sizeof(struct crypto_queue_pair_info),
945 					0, adapter->socket_id);
946 		if (dev_info->qpairs == NULL)
947 			return -ENOMEM;
948 
949 		qpairs = dev_info->qpairs;
950 
951 		if (eca_circular_buffer_init("eca_cdev_circular_buffer",
952 					     &qpairs->cbuf,
953 					     CRYPTO_ADAPTER_OPS_BUFFER_SZ)) {
954 			RTE_EDEV_LOG_ERR("Failed to get memory for cryptodev "
955 					 "buffer");
956 			rte_free(qpairs);
957 			return -ENOMEM;
958 		}
959 	}
960 
961 	if (queue_pair_id == -1) {
962 		for (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++)
963 			eca_update_qp_info(adapter, dev_info, i, 1);
964 	} else
965 		eca_update_qp_info(adapter, dev_info,
966 					(uint16_t)queue_pair_id, 1);
967 
968 	return 0;
969 }
970 
971 int
972 rte_event_crypto_adapter_queue_pair_add(uint8_t id,
973 			uint8_t cdev_id,
974 			int32_t queue_pair_id,
975 			const struct rte_event_crypto_adapter_queue_conf *conf)
976 {
977 	struct rte_event_crypto_adapter_vector_limits limits;
978 	struct event_crypto_adapter *adapter;
979 	struct crypto_device_info *dev_info;
980 	struct rte_eventdev *dev;
981 	uint32_t cap;
982 	int ret;
983 
984 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
985 
986 	if (!rte_cryptodev_is_valid_dev(cdev_id)) {
987 		RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
988 		return -EINVAL;
989 	}
990 
991 	adapter = eca_id_to_adapter(id);
992 	if (adapter == NULL)
993 		return -EINVAL;
994 
995 	dev = &rte_eventdevs[adapter->eventdev_id];
996 	ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
997 						cdev_id,
998 						&cap);
999 	if (ret) {
1000 		RTE_EDEV_LOG_ERR("Failed to get adapter caps dev %" PRIu8
1001 			" cdev %" PRIu8, id, cdev_id);
1002 		return ret;
1003 	}
1004 
1005 	if (conf == NULL) {
1006 		if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
1007 			RTE_EDEV_LOG_ERR("Conf value can not be NULL for dev_id=%u",
1008 					 cdev_id);
1009 			return -EINVAL;
1010 		}
1011 	} else {
1012 		if (conf->flags & RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR) {
1013 			if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR) == 0) {
1014 				RTE_EDEV_LOG_ERR("Event vectorization is not supported,"
1015 						 "dev %" PRIu8 " cdev %" PRIu8, id,
1016 						 cdev_id);
1017 				return -ENOTSUP;
1018 			}
1019 
1020 			ret = rte_event_crypto_adapter_vector_limits_get(
1021 				adapter->eventdev_id, cdev_id, &limits);
1022 			if (ret < 0) {
1023 				RTE_EDEV_LOG_ERR("Failed to get event device vector "
1024 						 "limits, dev %" PRIu8 " cdev %" PRIu8,
1025 						 id, cdev_id);
1026 				return -EINVAL;
1027 			}
1028 
1029 			if (conf->vector_sz < limits.min_sz ||
1030 			    conf->vector_sz > limits.max_sz ||
1031 			    conf->vector_timeout_ns < limits.min_timeout_ns ||
1032 			    conf->vector_timeout_ns > limits.max_timeout_ns ||
1033 			    conf->vector_mp == NULL) {
1034 				RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
1035 						" dev %" PRIu8 " cdev %" PRIu8,
1036 						id, cdev_id);
1037 				return -EINVAL;
1038 			}
1039 
1040 			if (conf->vector_mp->elt_size < (sizeof(struct rte_event_vector) +
1041 			    (sizeof(uintptr_t) * conf->vector_sz))) {
1042 				RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
1043 						" dev %" PRIu8 " cdev %" PRIu8,
1044 						id, cdev_id);
1045 				return -EINVAL;
1046 			}
1047 		}
1048 	}
1049 
1050 	dev_info = &adapter->cdevs[cdev_id];
1051 
1052 	if (queue_pair_id != -1 &&
1053 	    (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) {
1054 		RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16,
1055 				 (uint16_t)queue_pair_id);
1056 		return -EINVAL;
1057 	}
1058 
1059 	/* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD,
1060 	 * no need of service core as HW supports event forward capability.
1061 	 */
1062 	if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
1063 	    (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND &&
1064 	     adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW) ||
1065 	    (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
1066 	     adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) {
1067 		if (*dev->dev_ops->crypto_adapter_queue_pair_add == NULL)
1068 			return -ENOTSUP;
1069 		if (dev_info->qpairs == NULL) {
1070 			dev_info->qpairs =
1071 			    rte_zmalloc_socket(adapter->mem_name,
1072 					dev_info->dev->data->nb_queue_pairs *
1073 					sizeof(struct crypto_queue_pair_info),
1074 					0, adapter->socket_id);
1075 			if (dev_info->qpairs == NULL)
1076 				return -ENOMEM;
1077 		}
1078 
1079 		ret = (*dev->dev_ops->crypto_adapter_queue_pair_add)(dev,
1080 				dev_info->dev,
1081 				queue_pair_id,
1082 				conf);
1083 		if (ret)
1084 			return ret;
1085 
1086 		else
1087 			eca_update_qp_info(adapter, &adapter->cdevs[cdev_id],
1088 					   queue_pair_id, 1);
1089 	}
1090 
1091 	/* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW,
1092 	 * or SW adapter, initiate services so the application can choose
1093 	 * which ever way it wants to use the adapter.
1094 	 * Case 1: RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW
1095 	 *         Application may wants to use one of below two mode
1096 	 *          a. OP_FORWARD mode -> HW Dequeue + SW enqueue
1097 	 *          b. OP_NEW mode -> HW Dequeue
1098 	 * Case 2: No HW caps, use SW adapter
1099 	 *          a. OP_FORWARD mode -> SW enqueue & dequeue
1100 	 *          b. OP_NEW mode -> SW Dequeue
1101 	 */
1102 	if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
1103 	     !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
1104 	     adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD) ||
1105 	     (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW) &&
1106 	      !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
1107 	      !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) &&
1108 	       (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA))) {
1109 		rte_spinlock_lock(&adapter->lock);
1110 		ret = eca_init_service(adapter, id);
1111 		if (ret == 0)
1112 			ret = eca_add_queue_pair(adapter, cdev_id,
1113 						 queue_pair_id);
1114 		rte_spinlock_unlock(&adapter->lock);
1115 
1116 		if (ret)
1117 			return ret;
1118 
1119 		rte_service_component_runstate_set(adapter->service_id, 1);
1120 	}
1121 
1122 	rte_eventdev_trace_crypto_adapter_queue_pair_add(id, cdev_id,
1123 		queue_pair_id, conf);
1124 	return 0;
1125 }
1126 
1127 int
1128 rte_event_crypto_adapter_queue_pair_del(uint8_t id, uint8_t cdev_id,
1129 					int32_t queue_pair_id)
1130 {
1131 	struct event_crypto_adapter *adapter;
1132 	struct crypto_device_info *dev_info;
1133 	struct rte_eventdev *dev;
1134 	int ret;
1135 	uint32_t cap;
1136 	uint16_t i;
1137 
1138 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1139 
1140 	if (!rte_cryptodev_is_valid_dev(cdev_id)) {
1141 		RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
1142 		return -EINVAL;
1143 	}
1144 
1145 	adapter = eca_id_to_adapter(id);
1146 	if (adapter == NULL)
1147 		return -EINVAL;
1148 
1149 	dev = &rte_eventdevs[adapter->eventdev_id];
1150 	ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
1151 						cdev_id,
1152 						&cap);
1153 	if (ret)
1154 		return ret;
1155 
1156 	dev_info = &adapter->cdevs[cdev_id];
1157 
1158 	if (queue_pair_id != -1 &&
1159 	    (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) {
1160 		RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16,
1161 				 (uint16_t)queue_pair_id);
1162 		return -EINVAL;
1163 	}
1164 
1165 	if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
1166 	    (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
1167 	     adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) {
1168 		if (*dev->dev_ops->crypto_adapter_queue_pair_del == NULL)
1169 			return -ENOTSUP;
1170 		ret = (*dev->dev_ops->crypto_adapter_queue_pair_del)(dev,
1171 						dev_info->dev,
1172 						queue_pair_id);
1173 		if (ret == 0) {
1174 			eca_update_qp_info(adapter,
1175 					&adapter->cdevs[cdev_id],
1176 					queue_pair_id,
1177 					0);
1178 			if (dev_info->num_qpairs == 0) {
1179 				rte_free(dev_info->qpairs);
1180 				dev_info->qpairs = NULL;
1181 			}
1182 		}
1183 	} else {
1184 		if (adapter->nb_qps == 0)
1185 			return 0;
1186 
1187 		rte_spinlock_lock(&adapter->lock);
1188 		if (queue_pair_id == -1) {
1189 			for (i = 0; i < dev_info->dev->data->nb_queue_pairs;
1190 				i++)
1191 				eca_update_qp_info(adapter, dev_info,
1192 							queue_pair_id, 0);
1193 		} else {
1194 			eca_update_qp_info(adapter, dev_info,
1195 						(uint16_t)queue_pair_id, 0);
1196 		}
1197 
1198 		if (dev_info->num_qpairs == 0) {
1199 			rte_free(dev_info->qpairs);
1200 			dev_info->qpairs = NULL;
1201 		}
1202 
1203 		rte_spinlock_unlock(&adapter->lock);
1204 		rte_service_component_runstate_set(adapter->service_id,
1205 				adapter->nb_qps);
1206 	}
1207 
1208 	rte_eventdev_trace_crypto_adapter_queue_pair_del(id, cdev_id,
1209 		queue_pair_id, ret);
1210 	return ret;
1211 }
1212 
1213 static int
1214 eca_adapter_ctrl(uint8_t id, int start)
1215 {
1216 	struct event_crypto_adapter *adapter;
1217 	struct crypto_device_info *dev_info;
1218 	struct rte_eventdev *dev;
1219 	uint32_t i;
1220 	int use_service;
1221 	int stop = !start;
1222 
1223 	use_service = 0;
1224 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1225 	adapter = eca_id_to_adapter(id);
1226 	if (adapter == NULL)
1227 		return -EINVAL;
1228 
1229 	dev = &rte_eventdevs[adapter->eventdev_id];
1230 
1231 	for (i = 0; i < rte_cryptodev_count(); i++) {
1232 		dev_info = &adapter->cdevs[i];
1233 		/* if start  check for num queue pairs */
1234 		if (start && !dev_info->num_qpairs)
1235 			continue;
1236 		/* if stop check if dev has been started */
1237 		if (stop && !dev_info->dev_started)
1238 			continue;
1239 		use_service |= !dev_info->internal_event_port;
1240 		dev_info->dev_started = start;
1241 		if (dev_info->internal_event_port == 0)
1242 			continue;
1243 		start ? (*dev->dev_ops->crypto_adapter_start)(dev,
1244 						&dev_info->dev[i]) :
1245 			(*dev->dev_ops->crypto_adapter_stop)(dev,
1246 						&dev_info->dev[i]);
1247 	}
1248 
1249 	if (use_service)
1250 		rte_service_runstate_set(adapter->service_id, start);
1251 
1252 	return 0;
1253 }
1254 
1255 int
1256 rte_event_crypto_adapter_start(uint8_t id)
1257 {
1258 	struct event_crypto_adapter *adapter;
1259 
1260 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1261 	adapter = eca_id_to_adapter(id);
1262 	if (adapter == NULL)
1263 		return -EINVAL;
1264 
1265 	rte_eventdev_trace_crypto_adapter_start(id, adapter);
1266 	return eca_adapter_ctrl(id, 1);
1267 }
1268 
1269 int
1270 rte_event_crypto_adapter_stop(uint8_t id)
1271 {
1272 	rte_eventdev_trace_crypto_adapter_stop(id);
1273 	return eca_adapter_ctrl(id, 0);
1274 }
1275 
1276 int
1277 rte_event_crypto_adapter_stats_get(uint8_t id,
1278 				struct rte_event_crypto_adapter_stats *stats)
1279 {
1280 	struct event_crypto_adapter *adapter;
1281 	struct rte_event_crypto_adapter_stats dev_stats_sum = { 0 };
1282 	struct rte_event_crypto_adapter_stats dev_stats;
1283 	struct rte_eventdev *dev;
1284 	struct crypto_device_info *dev_info;
1285 	uint32_t i;
1286 	int ret;
1287 
1288 	if (eca_memzone_lookup())
1289 		return -ENOMEM;
1290 
1291 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1292 
1293 	adapter = eca_id_to_adapter(id);
1294 	if (adapter == NULL || stats == NULL)
1295 		return -EINVAL;
1296 
1297 	dev = &rte_eventdevs[adapter->eventdev_id];
1298 	memset(stats, 0, sizeof(*stats));
1299 	for (i = 0; i < rte_cryptodev_count(); i++) {
1300 		dev_info = &adapter->cdevs[i];
1301 		if (dev_info->internal_event_port == 0 ||
1302 			dev->dev_ops->crypto_adapter_stats_get == NULL)
1303 			continue;
1304 		ret = (*dev->dev_ops->crypto_adapter_stats_get)(dev,
1305 						dev_info->dev,
1306 						&dev_stats);
1307 		if (ret)
1308 			continue;
1309 
1310 		dev_stats_sum.crypto_deq_count += dev_stats.crypto_deq_count;
1311 		dev_stats_sum.event_enq_count +=
1312 			dev_stats.event_enq_count;
1313 	}
1314 
1315 	if (adapter->service_inited)
1316 		*stats = adapter->crypto_stats;
1317 
1318 	stats->crypto_deq_count += dev_stats_sum.crypto_deq_count;
1319 	stats->event_enq_count += dev_stats_sum.event_enq_count;
1320 
1321 	rte_eventdev_trace_crypto_adapter_stats_get(id, stats,
1322 		stats->event_poll_count, stats->event_deq_count,
1323 		stats->crypto_enq_count, stats->crypto_enq_fail,
1324 		stats->crypto_deq_count, stats->event_enq_count,
1325 		stats->event_enq_retry_count, stats->event_enq_fail_count);
1326 
1327 	return 0;
1328 }
1329 
1330 int
1331 rte_event_crypto_adapter_stats_reset(uint8_t id)
1332 {
1333 	struct event_crypto_adapter *adapter;
1334 	struct crypto_device_info *dev_info;
1335 	struct rte_eventdev *dev;
1336 	uint32_t i;
1337 
1338 	rte_eventdev_trace_crypto_adapter_stats_reset(id);
1339 
1340 	if (eca_memzone_lookup())
1341 		return -ENOMEM;
1342 
1343 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1344 
1345 	adapter = eca_id_to_adapter(id);
1346 	if (adapter == NULL)
1347 		return -EINVAL;
1348 
1349 	dev = &rte_eventdevs[adapter->eventdev_id];
1350 	for (i = 0; i < rte_cryptodev_count(); i++) {
1351 		dev_info = &adapter->cdevs[i];
1352 		if (dev_info->internal_event_port == 0 ||
1353 			dev->dev_ops->crypto_adapter_stats_reset == NULL)
1354 			continue;
1355 		(*dev->dev_ops->crypto_adapter_stats_reset)(dev,
1356 						dev_info->dev);
1357 	}
1358 
1359 	memset(&adapter->crypto_stats, 0, sizeof(adapter->crypto_stats));
1360 	return 0;
1361 }
1362 
1363 int
1364 rte_event_crypto_adapter_runtime_params_init(
1365 		struct rte_event_crypto_adapter_runtime_params *params)
1366 {
1367 	if (params == NULL)
1368 		return -EINVAL;
1369 
1370 	memset(params, 0, sizeof(*params));
1371 	params->max_nb = DEFAULT_MAX_NB;
1372 
1373 	return 0;
1374 }
1375 
1376 static int
1377 crypto_adapter_cap_check(struct event_crypto_adapter *adapter)
1378 {
1379 	int ret;
1380 	uint32_t caps;
1381 
1382 	if (!adapter->nb_qps)
1383 		return -EINVAL;
1384 	ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
1385 						adapter->next_cdev_id,
1386 						&caps);
1387 	if (ret) {
1388 		RTE_EDEV_LOG_ERR("Failed to get adapter caps dev %" PRIu8
1389 			" cdev %" PRIu8, adapter->eventdev_id,
1390 			adapter->next_cdev_id);
1391 		return ret;
1392 	}
1393 
1394 	if ((caps & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
1395 	    (caps & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW))
1396 		return -ENOTSUP;
1397 
1398 	return 0;
1399 }
1400 
1401 int
1402 rte_event_crypto_adapter_runtime_params_set(uint8_t id,
1403 		struct rte_event_crypto_adapter_runtime_params *params)
1404 {
1405 	struct event_crypto_adapter *adapter;
1406 	int ret;
1407 
1408 	if (eca_memzone_lookup())
1409 		return -ENOMEM;
1410 
1411 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1412 
1413 	if (params == NULL) {
1414 		RTE_EDEV_LOG_ERR("params pointer is NULL");
1415 		return -EINVAL;
1416 	}
1417 
1418 	adapter = eca_id_to_adapter(id);
1419 	if (adapter == NULL)
1420 		return -EINVAL;
1421 
1422 	ret = crypto_adapter_cap_check(adapter);
1423 	if (ret)
1424 		return ret;
1425 
1426 	rte_spinlock_lock(&adapter->lock);
1427 	adapter->max_nb = params->max_nb;
1428 	rte_spinlock_unlock(&adapter->lock);
1429 
1430 	return 0;
1431 }
1432 
1433 int
1434 rte_event_crypto_adapter_runtime_params_get(uint8_t id,
1435 		struct rte_event_crypto_adapter_runtime_params *params)
1436 {
1437 	struct event_crypto_adapter *adapter;
1438 	int ret;
1439 
1440 	if (eca_memzone_lookup())
1441 		return -ENOMEM;
1442 
1443 
1444 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1445 
1446 	if (params == NULL) {
1447 		RTE_EDEV_LOG_ERR("params pointer is NULL");
1448 		return -EINVAL;
1449 	}
1450 
1451 	adapter = eca_id_to_adapter(id);
1452 	if (adapter == NULL)
1453 		return -EINVAL;
1454 
1455 	ret = crypto_adapter_cap_check(adapter);
1456 	if (ret)
1457 		return ret;
1458 
1459 	params->max_nb = adapter->max_nb;
1460 
1461 	return 0;
1462 }
1463 
1464 int
1465 rte_event_crypto_adapter_service_id_get(uint8_t id, uint32_t *service_id)
1466 {
1467 	struct event_crypto_adapter *adapter;
1468 
1469 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1470 
1471 	adapter = eca_id_to_adapter(id);
1472 	if (adapter == NULL || service_id == NULL)
1473 		return -EINVAL;
1474 
1475 	if (adapter->service_inited)
1476 		*service_id = adapter->service_id;
1477 
1478 	rte_eventdev_trace_crypto_adapter_service_id_get(id, *service_id);
1479 
1480 	return adapter->service_inited ? 0 : -ESRCH;
1481 }
1482 
1483 int
1484 rte_event_crypto_adapter_event_port_get(uint8_t id, uint8_t *event_port_id)
1485 {
1486 	struct event_crypto_adapter *adapter;
1487 
1488 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1489 
1490 	adapter = eca_id_to_adapter(id);
1491 	if (adapter == NULL || event_port_id == NULL)
1492 		return -EINVAL;
1493 
1494 	*event_port_id = adapter->event_port_id;
1495 
1496 	rte_eventdev_trace_crypto_adapter_event_port_get(id, *event_port_id);
1497 
1498 	return 0;
1499 }
1500 
1501 int
1502 rte_event_crypto_adapter_vector_limits_get(
1503 	uint8_t dev_id, uint16_t cdev_id,
1504 	struct rte_event_crypto_adapter_vector_limits *limits)
1505 {
1506 	struct rte_cryptodev *cdev;
1507 	struct rte_eventdev *dev;
1508 	uint32_t cap;
1509 	int ret;
1510 
1511 	rte_eventdev_trace_crypto_adapter_vector_limits_get(dev_id, cdev_id, limits);
1512 
1513 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1514 
1515 	if (!rte_cryptodev_is_valid_dev(cdev_id)) {
1516 		RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
1517 		return -EINVAL;
1518 	}
1519 
1520 	if (limits == NULL) {
1521 		RTE_EDEV_LOG_ERR("Invalid limits storage provided");
1522 		return -EINVAL;
1523 	}
1524 
1525 	dev = &rte_eventdevs[dev_id];
1526 	cdev = rte_cryptodev_pmd_get_dev(cdev_id);
1527 
1528 	ret = rte_event_crypto_adapter_caps_get(dev_id, cdev_id, &cap);
1529 	if (ret) {
1530 		RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
1531 				 "cdev %" PRIu16, dev_id, cdev_id);
1532 		return ret;
1533 	}
1534 
1535 	if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR)) {
1536 		RTE_EDEV_LOG_ERR("Event vectorization is not supported,"
1537 				 "dev %" PRIu8 " cdev %" PRIu8, dev_id, cdev_id);
1538 		return -ENOTSUP;
1539 	}
1540 
1541 	if ((*dev->dev_ops->crypto_adapter_vector_limits_get) == NULL)
1542 		return -ENOTSUP;
1543 
1544 	return dev->dev_ops->crypto_adapter_vector_limits_get(
1545 		dev, cdev, limits);
1546 }
1547