xref: /dpdk/lib/eventdev/rte_event_crypto_adapter.c (revision ad12d08f05db8323ac1fb655b75dcef69e536a04)
199a2dd95SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
299a2dd95SBruce Richardson  * Copyright(c) 2018 Intel Corporation.
399a2dd95SBruce Richardson  * All rights reserved.
499a2dd95SBruce Richardson  */
599a2dd95SBruce Richardson 
699a2dd95SBruce Richardson #include <string.h>
799a2dd95SBruce Richardson #include <stdbool.h>
899a2dd95SBruce Richardson #include <rte_common.h>
91acb7f54SDavid Marchand #include <dev_driver.h>
1099a2dd95SBruce Richardson #include <rte_errno.h>
1199a2dd95SBruce Richardson #include <rte_cryptodev.h>
12af668035SAkhil Goyal #include <cryptodev_pmd.h>
1399a2dd95SBruce Richardson #include <rte_log.h>
1499a2dd95SBruce Richardson #include <rte_malloc.h>
1599a2dd95SBruce Richardson #include <rte_service_component.h>
1699a2dd95SBruce Richardson 
1799a2dd95SBruce Richardson #include "rte_eventdev.h"
1899a2dd95SBruce Richardson #include "eventdev_pmd.h"
19f26f2ca6SPavan Nikhilesh #include "eventdev_trace.h"
2099a2dd95SBruce Richardson #include "rte_event_crypto_adapter.h"
2199a2dd95SBruce Richardson 
2299a2dd95SBruce Richardson #define BATCH_SIZE 32
2399a2dd95SBruce Richardson #define DEFAULT_MAX_NB 128
2499a2dd95SBruce Richardson #define CRYPTO_ADAPTER_NAME_LEN 32
2599a2dd95SBruce Richardson #define CRYPTO_ADAPTER_MEM_NAME_LEN 32
2699a2dd95SBruce Richardson #define CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES 100
2799a2dd95SBruce Richardson 
2822505558SGanapati Kundapura /* MAX_OPS_IN_BUFFER contains size for  batch of dequeued events */
2922505558SGanapati Kundapura #define MAX_OPS_IN_BUFFER BATCH_SIZE
3022505558SGanapati Kundapura 
3122505558SGanapati Kundapura /* CRYPTO_ADAPTER_OPS_BUFFER_SZ to accommodate MAX_OPS_IN_BUFFER +
3222505558SGanapati Kundapura  * additional space for one batch
3322505558SGanapati Kundapura  */
3422505558SGanapati Kundapura #define CRYPTO_ADAPTER_OPS_BUFFER_SZ (MAX_OPS_IN_BUFFER + BATCH_SIZE)
3522505558SGanapati Kundapura 
362ae84b39SGanapati Kundapura #define CRYPTO_ADAPTER_BUFFER_SZ 1024
372ae84b39SGanapati Kundapura 
3899a2dd95SBruce Richardson /* Flush an instance's enqueue buffers every CRYPTO_ENQ_FLUSH_THRESHOLD
3999a2dd95SBruce Richardson  * iterations of eca_crypto_adapter_enq_run()
4099a2dd95SBruce Richardson  */
4199a2dd95SBruce Richardson #define CRYPTO_ENQ_FLUSH_THRESHOLD 1024
4299a2dd95SBruce Richardson 
438f4ff7deSGanapati Kundapura #define ECA_ADAPTER_ARRAY "crypto_adapter_array"
448f4ff7deSGanapati Kundapura 
45c6552d9aSTyler Retzlaff struct __rte_cache_aligned crypto_ops_circular_buffer {
462ae84b39SGanapati Kundapura 	/* index of head element in circular buffer */
472ae84b39SGanapati Kundapura 	uint16_t head;
482ae84b39SGanapati Kundapura 	/* index of tail element in circular buffer */
492ae84b39SGanapati Kundapura 	uint16_t tail;
502ae84b39SGanapati Kundapura 	/* number of elements in buffer */
512ae84b39SGanapati Kundapura 	uint16_t count;
522ae84b39SGanapati Kundapura 	/* size of circular buffer */
532ae84b39SGanapati Kundapura 	uint16_t size;
542ae84b39SGanapati Kundapura 	/* Pointer to hold rte_crypto_ops for batching */
552ae84b39SGanapati Kundapura 	struct rte_crypto_op **op_buffer;
56c6552d9aSTyler Retzlaff };
572ae84b39SGanapati Kundapura 
58c6552d9aSTyler Retzlaff struct __rte_cache_aligned event_crypto_adapter {
5999a2dd95SBruce Richardson 	/* Event device identifier */
6099a2dd95SBruce Richardson 	uint8_t eventdev_id;
6199a2dd95SBruce Richardson 	/* Event port identifier */
6299a2dd95SBruce Richardson 	uint8_t event_port_id;
632bbaeadaSGanapati Kundapura 	/* Store event port's implicit release capability */
6499a2dd95SBruce Richardson 	uint8_t implicit_release_disabled;
652ae84b39SGanapati Kundapura 	/* Flag to indicate backpressure at cryptodev
662ae84b39SGanapati Kundapura 	 * Stop further dequeuing events from eventdev
672ae84b39SGanapati Kundapura 	 */
682ae84b39SGanapati Kundapura 	bool stop_enq_to_cryptodev;
6999a2dd95SBruce Richardson 	/* Max crypto ops processed in any service function invocation */
7099a2dd95SBruce Richardson 	uint32_t max_nb;
7199a2dd95SBruce Richardson 	/* Lock to serialize config updates with service function */
7299a2dd95SBruce Richardson 	rte_spinlock_t lock;
7399a2dd95SBruce Richardson 	/* Next crypto device to be processed */
7499a2dd95SBruce Richardson 	uint16_t next_cdev_id;
7599a2dd95SBruce Richardson 	/* Per crypto device structure */
7699a2dd95SBruce Richardson 	struct crypto_device_info *cdevs;
7799a2dd95SBruce Richardson 	/* Loop counter to flush crypto ops */
7899a2dd95SBruce Richardson 	uint16_t transmit_loop_count;
792ae84b39SGanapati Kundapura 	/* Circular buffer for batching crypto ops to eventdev */
802ae84b39SGanapati Kundapura 	struct crypto_ops_circular_buffer ebuf;
8199a2dd95SBruce Richardson 	/* Per instance stats structure */
8299a2dd95SBruce Richardson 	struct rte_event_crypto_adapter_stats crypto_stats;
8399a2dd95SBruce Richardson 	/* Configuration callback for rte_service configuration */
8499a2dd95SBruce Richardson 	rte_event_crypto_adapter_conf_cb conf_cb;
8599a2dd95SBruce Richardson 	/* Configuration callback argument */
8699a2dd95SBruce Richardson 	void *conf_arg;
8799a2dd95SBruce Richardson 	/* Set if  default_cb is being used */
8899a2dd95SBruce Richardson 	int default_cb_arg;
8999a2dd95SBruce Richardson 	/* Service initialization state */
9099a2dd95SBruce Richardson 	uint8_t service_inited;
9199a2dd95SBruce Richardson 	/* Memory allocation name */
9299a2dd95SBruce Richardson 	char mem_name[CRYPTO_ADAPTER_MEM_NAME_LEN];
9399a2dd95SBruce Richardson 	/* Socket identifier cached from eventdev */
9499a2dd95SBruce Richardson 	int socket_id;
9599a2dd95SBruce Richardson 	/* Per adapter EAL service */
9699a2dd95SBruce Richardson 	uint32_t service_id;
9799a2dd95SBruce Richardson 	/* No. of queue pairs configured */
9899a2dd95SBruce Richardson 	uint16_t nb_qps;
9999a2dd95SBruce Richardson 	/* Adapter mode */
10099a2dd95SBruce Richardson 	enum rte_event_crypto_adapter_mode mode;
101c6552d9aSTyler Retzlaff };
10299a2dd95SBruce Richardson 
10399a2dd95SBruce Richardson /* Per crypto device information */
104c6552d9aSTyler Retzlaff struct __rte_cache_aligned crypto_device_info {
10599a2dd95SBruce Richardson 	/* Pointer to cryptodev */
10699a2dd95SBruce Richardson 	struct rte_cryptodev *dev;
10799a2dd95SBruce Richardson 	/* Pointer to queue pair info */
10899a2dd95SBruce Richardson 	struct crypto_queue_pair_info *qpairs;
10999a2dd95SBruce Richardson 	/* Next queue pair to be processed */
11099a2dd95SBruce Richardson 	uint16_t next_queue_pair_id;
11199a2dd95SBruce Richardson 	/* Set to indicate cryptodev->eventdev packet
11299a2dd95SBruce Richardson 	 * transfer uses a hardware mechanism
11399a2dd95SBruce Richardson 	 */
11499a2dd95SBruce Richardson 	uint8_t internal_event_port;
11599a2dd95SBruce Richardson 	/* Set to indicate processing has been started */
11699a2dd95SBruce Richardson 	uint8_t dev_started;
11799a2dd95SBruce Richardson 	/* If num_qpairs > 0, the start callback will
11899a2dd95SBruce Richardson 	 * be invoked if not already invoked
11999a2dd95SBruce Richardson 	 */
12099a2dd95SBruce Richardson 	uint16_t num_qpairs;
121c6552d9aSTyler Retzlaff };
12299a2dd95SBruce Richardson 
12399a2dd95SBruce Richardson /* Per queue pair information */
124c6552d9aSTyler Retzlaff struct __rte_cache_aligned crypto_queue_pair_info {
12599a2dd95SBruce Richardson 	/* Set to indicate queue pair is enabled */
12699a2dd95SBruce Richardson 	bool qp_enabled;
1272ae84b39SGanapati Kundapura 	/* Circular buffer for batching crypto ops to cdev */
1282ae84b39SGanapati Kundapura 	struct crypto_ops_circular_buffer cbuf;
129c6552d9aSTyler Retzlaff };
13099a2dd95SBruce Richardson 
131a256a743SPavan Nikhilesh static struct event_crypto_adapter **event_crypto_adapter;
13299a2dd95SBruce Richardson 
13399a2dd95SBruce Richardson /* Macros to check for valid adapter */
13499a2dd95SBruce Richardson #define EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
13599a2dd95SBruce Richardson 	if (!eca_valid_id(id)) { \
136ae282b06SDavid Marchand 		RTE_EDEV_LOG_ERR("Invalid crypto adapter id = %d", id); \
13799a2dd95SBruce Richardson 		return retval; \
13899a2dd95SBruce Richardson 	} \
13999a2dd95SBruce Richardson } while (0)
14099a2dd95SBruce Richardson 
141*ad12d08fSGanapati Kundapura #define ECA_DYNFIELD_NAME "eca_ev_opaque_data"
142*ad12d08fSGanapati Kundapura /* Device-specific metadata field type */
143*ad12d08fSGanapati Kundapura typedef uint8_t eca_dynfield_t;
144*ad12d08fSGanapati Kundapura 
145*ad12d08fSGanapati Kundapura /* mbuf dynamic field offset for device-specific metadata */
146*ad12d08fSGanapati Kundapura int eca_dynfield_offset = -1;
147*ad12d08fSGanapati Kundapura 
148*ad12d08fSGanapati Kundapura static int
eca_dynfield_register(void)149*ad12d08fSGanapati Kundapura eca_dynfield_register(void)
150*ad12d08fSGanapati Kundapura {
151*ad12d08fSGanapati Kundapura 	static const struct rte_mbuf_dynfield eca_dynfield_desc = {
152*ad12d08fSGanapati Kundapura 		.name = ECA_DYNFIELD_NAME,
153*ad12d08fSGanapati Kundapura 		.size = sizeof(eca_dynfield_t),
154*ad12d08fSGanapati Kundapura 		.align = alignof(eca_dynfield_t),
155*ad12d08fSGanapati Kundapura 		.flags = 0,
156*ad12d08fSGanapati Kundapura 	};
157*ad12d08fSGanapati Kundapura 
158*ad12d08fSGanapati Kundapura 	eca_dynfield_offset =
159*ad12d08fSGanapati Kundapura 		rte_mbuf_dynfield_register(&eca_dynfield_desc);
160*ad12d08fSGanapati Kundapura 	return eca_dynfield_offset;
161*ad12d08fSGanapati Kundapura }
162*ad12d08fSGanapati Kundapura 
16399a2dd95SBruce Richardson static inline int
eca_valid_id(uint8_t id)16499a2dd95SBruce Richardson eca_valid_id(uint8_t id)
16599a2dd95SBruce Richardson {
16699a2dd95SBruce Richardson 	return id < RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE;
16799a2dd95SBruce Richardson }
16899a2dd95SBruce Richardson 
16999a2dd95SBruce Richardson static int
eca_init(void)17099a2dd95SBruce Richardson eca_init(void)
17199a2dd95SBruce Richardson {
17299a2dd95SBruce Richardson 	const struct rte_memzone *mz;
17399a2dd95SBruce Richardson 	unsigned int sz;
17499a2dd95SBruce Richardson 
17599a2dd95SBruce Richardson 	sz = sizeof(*event_crypto_adapter) *
17699a2dd95SBruce Richardson 	    RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE;
17799a2dd95SBruce Richardson 	sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
17899a2dd95SBruce Richardson 
1798f4ff7deSGanapati Kundapura 	mz = rte_memzone_lookup(ECA_ADAPTER_ARRAY);
18099a2dd95SBruce Richardson 	if (mz == NULL) {
1818f4ff7deSGanapati Kundapura 		mz = rte_memzone_reserve_aligned(ECA_ADAPTER_ARRAY, sz,
1828f4ff7deSGanapati Kundapura 						 rte_socket_id(), 0,
18399a2dd95SBruce Richardson 						 RTE_CACHE_LINE_SIZE);
18499a2dd95SBruce Richardson 		if (mz == NULL) {
18599a2dd95SBruce Richardson 			RTE_EDEV_LOG_ERR("failed to reserve memzone err = %"
18699a2dd95SBruce Richardson 					PRId32, rte_errno);
18799a2dd95SBruce Richardson 			return -rte_errno;
18899a2dd95SBruce Richardson 		}
18999a2dd95SBruce Richardson 	}
19099a2dd95SBruce Richardson 
19199a2dd95SBruce Richardson 	event_crypto_adapter = mz->addr;
19299a2dd95SBruce Richardson 	return 0;
19399a2dd95SBruce Richardson }
19499a2dd95SBruce Richardson 
1958f4ff7deSGanapati Kundapura static int
eca_memzone_lookup(void)1968f4ff7deSGanapati Kundapura eca_memzone_lookup(void)
1978f4ff7deSGanapati Kundapura {
1988f4ff7deSGanapati Kundapura 	const struct rte_memzone *mz;
1998f4ff7deSGanapati Kundapura 
2008f4ff7deSGanapati Kundapura 	if (event_crypto_adapter == NULL) {
2018f4ff7deSGanapati Kundapura 		mz = rte_memzone_lookup(ECA_ADAPTER_ARRAY);
2028f4ff7deSGanapati Kundapura 		if (mz == NULL)
2038f4ff7deSGanapati Kundapura 			return -ENOMEM;
2048f4ff7deSGanapati Kundapura 
2058f4ff7deSGanapati Kundapura 		event_crypto_adapter = mz->addr;
2068f4ff7deSGanapati Kundapura 	}
2078f4ff7deSGanapati Kundapura 
2088f4ff7deSGanapati Kundapura 	return 0;
2098f4ff7deSGanapati Kundapura }
2108f4ff7deSGanapati Kundapura 
2112ae84b39SGanapati Kundapura static inline bool
eca_circular_buffer_batch_ready(struct crypto_ops_circular_buffer * bufp)2122ae84b39SGanapati Kundapura eca_circular_buffer_batch_ready(struct crypto_ops_circular_buffer *bufp)
2132ae84b39SGanapati Kundapura {
2142ae84b39SGanapati Kundapura 	return bufp->count >= BATCH_SIZE;
2152ae84b39SGanapati Kundapura }
2162ae84b39SGanapati Kundapura 
2172ae84b39SGanapati Kundapura static inline bool
eca_circular_buffer_space_for_batch(struct crypto_ops_circular_buffer * bufp)2182ae84b39SGanapati Kundapura eca_circular_buffer_space_for_batch(struct crypto_ops_circular_buffer *bufp)
2192ae84b39SGanapati Kundapura {
22022505558SGanapati Kundapura 	/* circular buffer can have atmost MAX_OPS_IN_BUFFER */
22122505558SGanapati Kundapura 	return (bufp->size - bufp->count) >= MAX_OPS_IN_BUFFER;
2222ae84b39SGanapati Kundapura }
2232ae84b39SGanapati Kundapura 
2242ae84b39SGanapati Kundapura static inline void
eca_circular_buffer_free(struct crypto_ops_circular_buffer * bufp)2252ae84b39SGanapati Kundapura eca_circular_buffer_free(struct crypto_ops_circular_buffer *bufp)
2262ae84b39SGanapati Kundapura {
2272ae84b39SGanapati Kundapura 	rte_free(bufp->op_buffer);
2282ae84b39SGanapati Kundapura }
2292ae84b39SGanapati Kundapura 
2302ae84b39SGanapati Kundapura static inline int
eca_circular_buffer_init(const char * name,struct crypto_ops_circular_buffer * bufp,uint16_t sz)2312ae84b39SGanapati Kundapura eca_circular_buffer_init(const char *name,
2322ae84b39SGanapati Kundapura 			 struct crypto_ops_circular_buffer *bufp,
2332ae84b39SGanapati Kundapura 			 uint16_t sz)
2342ae84b39SGanapati Kundapura {
2352ae84b39SGanapati Kundapura 	bufp->op_buffer = rte_zmalloc(name,
2362ae84b39SGanapati Kundapura 				      sizeof(struct rte_crypto_op *) * sz,
2372ae84b39SGanapati Kundapura 				      0);
2382ae84b39SGanapati Kundapura 	if (bufp->op_buffer == NULL)
2392ae84b39SGanapati Kundapura 		return -ENOMEM;
2402ae84b39SGanapati Kundapura 
2412ae84b39SGanapati Kundapura 	bufp->size = sz;
2422ae84b39SGanapati Kundapura 	return 0;
2432ae84b39SGanapati Kundapura }
2442ae84b39SGanapati Kundapura 
2452ae84b39SGanapati Kundapura static inline int
eca_circular_buffer_add(struct crypto_ops_circular_buffer * bufp,struct rte_crypto_op * op)2462ae84b39SGanapati Kundapura eca_circular_buffer_add(struct crypto_ops_circular_buffer *bufp,
2472ae84b39SGanapati Kundapura 			struct rte_crypto_op *op)
2482ae84b39SGanapati Kundapura {
2492ae84b39SGanapati Kundapura 	uint16_t *tailp = &bufp->tail;
2502ae84b39SGanapati Kundapura 
2512ae84b39SGanapati Kundapura 	bufp->op_buffer[*tailp] = op;
2522ae84b39SGanapati Kundapura 	/* circular buffer, go round */
2532ae84b39SGanapati Kundapura 	*tailp = (*tailp + 1) % bufp->size;
2542ae84b39SGanapati Kundapura 	bufp->count++;
2552ae84b39SGanapati Kundapura 
2562ae84b39SGanapati Kundapura 	return 0;
2572ae84b39SGanapati Kundapura }
2582ae84b39SGanapati Kundapura 
2592ae84b39SGanapati Kundapura static inline int
eca_circular_buffer_flush_to_cdev(struct crypto_ops_circular_buffer * bufp,uint8_t cdev_id,uint16_t qp_id,uint16_t * nb_ops_flushed)2602ae84b39SGanapati Kundapura eca_circular_buffer_flush_to_cdev(struct crypto_ops_circular_buffer *bufp,
2612ae84b39SGanapati Kundapura 				  uint8_t cdev_id, uint16_t qp_id,
2622ae84b39SGanapati Kundapura 				  uint16_t *nb_ops_flushed)
2632ae84b39SGanapati Kundapura {
2642ae84b39SGanapati Kundapura 	uint16_t n = 0;
2652ae84b39SGanapati Kundapura 	uint16_t *headp = &bufp->head;
2662ae84b39SGanapati Kundapura 	uint16_t *tailp = &bufp->tail;
2672ae84b39SGanapati Kundapura 	struct rte_crypto_op **ops = bufp->op_buffer;
2682ae84b39SGanapati Kundapura 
2692ae84b39SGanapati Kundapura 	if (*tailp > *headp)
270f5d48ed5SGanapati Kundapura 		/* Flush ops from head pointer to (tail - head) OPs */
2712ae84b39SGanapati Kundapura 		n = *tailp - *headp;
2722ae84b39SGanapati Kundapura 	else if (*tailp < *headp)
273f5d48ed5SGanapati Kundapura 		/* Circ buffer - Rollover.
274f5d48ed5SGanapati Kundapura 		 * Flush OPs from head to max size of buffer.
275f5d48ed5SGanapati Kundapura 		 * Rest of the OPs will be flushed in next iteration.
276f5d48ed5SGanapati Kundapura 		 */
2772ae84b39SGanapati Kundapura 		n = bufp->size - *headp;
2786c3c8886SGanapati Kundapura 	else { /* head == tail case */
2796c3c8886SGanapati Kundapura 		/* when head == tail,
2806c3c8886SGanapati Kundapura 		 * circ buff is either full(tail pointer roll over) or empty
2816c3c8886SGanapati Kundapura 		 */
2826c3c8886SGanapati Kundapura 		if (bufp->count != 0) {
283f5d48ed5SGanapati Kundapura 			/* Circ buffer - FULL.
284f5d48ed5SGanapati Kundapura 			 * Flush OPs from head to max size of buffer.
285f5d48ed5SGanapati Kundapura 			 * Rest of the OPS will be flushed in next iteration.
286f5d48ed5SGanapati Kundapura 			 */
287f5d48ed5SGanapati Kundapura 			n = bufp->size - *headp;
2886c3c8886SGanapati Kundapura 		} else {
289f5d48ed5SGanapati Kundapura 			/* Circ buffer - Empty */
2902ae84b39SGanapati Kundapura 			*nb_ops_flushed = 0;
291f5d48ed5SGanapati Kundapura 			return 0;
2922ae84b39SGanapati Kundapura 		}
2936c3c8886SGanapati Kundapura 	}
2942ae84b39SGanapati Kundapura 
2952ae84b39SGanapati Kundapura 	*nb_ops_flushed = rte_cryptodev_enqueue_burst(cdev_id, qp_id,
2962ae84b39SGanapati Kundapura 						      &ops[*headp], n);
2972ae84b39SGanapati Kundapura 	bufp->count -= *nb_ops_flushed;
2982ae84b39SGanapati Kundapura 	if (!bufp->count) {
2992ae84b39SGanapati Kundapura 		*headp = 0;
3002ae84b39SGanapati Kundapura 		*tailp = 0;
3012ae84b39SGanapati Kundapura 	} else
3022ae84b39SGanapati Kundapura 		*headp = (*headp + *nb_ops_flushed) % bufp->size;
3032ae84b39SGanapati Kundapura 
3042ae84b39SGanapati Kundapura 	return *nb_ops_flushed == n ? 0 : -1;
3052ae84b39SGanapati Kundapura }
3062ae84b39SGanapati Kundapura 
307a256a743SPavan Nikhilesh static inline struct event_crypto_adapter *
eca_id_to_adapter(uint8_t id)30899a2dd95SBruce Richardson eca_id_to_adapter(uint8_t id)
30999a2dd95SBruce Richardson {
31099a2dd95SBruce Richardson 	return event_crypto_adapter ?
31199a2dd95SBruce Richardson 		event_crypto_adapter[id] : NULL;
31299a2dd95SBruce Richardson }
31399a2dd95SBruce Richardson 
31499a2dd95SBruce Richardson static int
eca_default_config_cb(uint8_t id,uint8_t dev_id,struct rte_event_crypto_adapter_conf * conf,void * arg)31599a2dd95SBruce Richardson eca_default_config_cb(uint8_t id, uint8_t dev_id,
31699a2dd95SBruce Richardson 			struct rte_event_crypto_adapter_conf *conf, void *arg)
31799a2dd95SBruce Richardson {
31899a2dd95SBruce Richardson 	struct rte_event_dev_config dev_conf;
31999a2dd95SBruce Richardson 	struct rte_eventdev *dev;
32099a2dd95SBruce Richardson 	uint8_t port_id;
32199a2dd95SBruce Richardson 	int started;
32299a2dd95SBruce Richardson 	int ret;
32399a2dd95SBruce Richardson 	struct rte_event_port_conf *port_conf = arg;
324a256a743SPavan Nikhilesh 	struct event_crypto_adapter *adapter = eca_id_to_adapter(id);
32599a2dd95SBruce Richardson 
32699a2dd95SBruce Richardson 	if (adapter == NULL)
32799a2dd95SBruce Richardson 		return -EINVAL;
32899a2dd95SBruce Richardson 
32999a2dd95SBruce Richardson 	dev = &rte_eventdevs[adapter->eventdev_id];
33099a2dd95SBruce Richardson 	dev_conf = dev->data->dev_conf;
33199a2dd95SBruce Richardson 
33299a2dd95SBruce Richardson 	started = dev->data->dev_started;
33399a2dd95SBruce Richardson 	if (started)
33499a2dd95SBruce Richardson 		rte_event_dev_stop(dev_id);
33599a2dd95SBruce Richardson 	port_id = dev_conf.nb_event_ports;
33699a2dd95SBruce Richardson 	dev_conf.nb_event_ports += 1;
337e55d9dcaSNaga Harish K S V 	if (port_conf->event_port_cfg & RTE_EVENT_PORT_CFG_SINGLE_LINK)
338e55d9dcaSNaga Harish K S V 		dev_conf.nb_single_link_event_port_queues += 1;
339e55d9dcaSNaga Harish K S V 
34099a2dd95SBruce Richardson 	ret = rte_event_dev_configure(dev_id, &dev_conf);
34199a2dd95SBruce Richardson 	if (ret) {
342ae282b06SDavid Marchand 		RTE_EDEV_LOG_ERR("failed to configure event dev %u", dev_id);
34399a2dd95SBruce Richardson 		if (started) {
34499a2dd95SBruce Richardson 			if (rte_event_dev_start(dev_id))
34599a2dd95SBruce Richardson 				return -EIO;
34699a2dd95SBruce Richardson 		}
34799a2dd95SBruce Richardson 		return ret;
34899a2dd95SBruce Richardson 	}
34999a2dd95SBruce Richardson 
35099a2dd95SBruce Richardson 	ret = rte_event_port_setup(dev_id, port_id, port_conf);
35199a2dd95SBruce Richardson 	if (ret) {
352ae282b06SDavid Marchand 		RTE_EDEV_LOG_ERR("failed to setup event port %u", port_id);
35399a2dd95SBruce Richardson 		return ret;
35499a2dd95SBruce Richardson 	}
35599a2dd95SBruce Richardson 
35699a2dd95SBruce Richardson 	conf->event_port_id = port_id;
35799a2dd95SBruce Richardson 	conf->max_nb = DEFAULT_MAX_NB;
35899a2dd95SBruce Richardson 	if (started)
35999a2dd95SBruce Richardson 		ret = rte_event_dev_start(dev_id);
36099a2dd95SBruce Richardson 
36199a2dd95SBruce Richardson 	adapter->default_cb_arg = 1;
36299a2dd95SBruce Richardson 	return ret;
36399a2dd95SBruce Richardson }
36499a2dd95SBruce Richardson 
36599a2dd95SBruce Richardson int
rte_event_crypto_adapter_create_ext(uint8_t id,uint8_t dev_id,rte_event_crypto_adapter_conf_cb conf_cb,enum rte_event_crypto_adapter_mode mode,void * conf_arg)36699a2dd95SBruce Richardson rte_event_crypto_adapter_create_ext(uint8_t id, uint8_t dev_id,
36799a2dd95SBruce Richardson 				rte_event_crypto_adapter_conf_cb conf_cb,
36899a2dd95SBruce Richardson 				enum rte_event_crypto_adapter_mode mode,
36999a2dd95SBruce Richardson 				void *conf_arg)
37099a2dd95SBruce Richardson {
371a256a743SPavan Nikhilesh 	struct event_crypto_adapter *adapter;
37299a2dd95SBruce Richardson 	char mem_name[CRYPTO_ADAPTER_NAME_LEN];
37399a2dd95SBruce Richardson 	int socket_id;
37499a2dd95SBruce Richardson 	uint8_t i;
37599a2dd95SBruce Richardson 	int ret;
37699a2dd95SBruce Richardson 
37799a2dd95SBruce Richardson 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
37899a2dd95SBruce Richardson 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
37999a2dd95SBruce Richardson 	if (conf_cb == NULL)
38099a2dd95SBruce Richardson 		return -EINVAL;
38199a2dd95SBruce Richardson 
38299a2dd95SBruce Richardson 	if (event_crypto_adapter == NULL) {
38399a2dd95SBruce Richardson 		ret = eca_init();
38499a2dd95SBruce Richardson 		if (ret)
38599a2dd95SBruce Richardson 			return ret;
38699a2dd95SBruce Richardson 	}
38799a2dd95SBruce Richardson 
38899a2dd95SBruce Richardson 	adapter = eca_id_to_adapter(id);
38999a2dd95SBruce Richardson 	if (adapter != NULL) {
39099a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Crypto adapter id %u already exists!", id);
39199a2dd95SBruce Richardson 		return -EEXIST;
39299a2dd95SBruce Richardson 	}
39399a2dd95SBruce Richardson 
39499a2dd95SBruce Richardson 	socket_id = rte_event_dev_socket_id(dev_id);
39599a2dd95SBruce Richardson 	snprintf(mem_name, CRYPTO_ADAPTER_MEM_NAME_LEN,
39699a2dd95SBruce Richardson 		 "rte_event_crypto_adapter_%d", id);
39799a2dd95SBruce Richardson 
39899a2dd95SBruce Richardson 	adapter = rte_zmalloc_socket(mem_name, sizeof(*adapter),
39999a2dd95SBruce Richardson 			RTE_CACHE_LINE_SIZE, socket_id);
40099a2dd95SBruce Richardson 	if (adapter == NULL) {
40199a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Failed to get mem for event crypto adapter!");
40299a2dd95SBruce Richardson 		return -ENOMEM;
40399a2dd95SBruce Richardson 	}
40499a2dd95SBruce Richardson 
4052ae84b39SGanapati Kundapura 	if (eca_circular_buffer_init("eca_edev_circular_buffer",
4062ae84b39SGanapati Kundapura 				     &adapter->ebuf,
4072ae84b39SGanapati Kundapura 				     CRYPTO_ADAPTER_BUFFER_SZ)) {
4082ae84b39SGanapati Kundapura 		RTE_EDEV_LOG_ERR("Failed to get memory for eventdev buffer");
4092ae84b39SGanapati Kundapura 		rte_free(adapter);
4102ae84b39SGanapati Kundapura 		return -ENOMEM;
4112ae84b39SGanapati Kundapura 	}
4122ae84b39SGanapati Kundapura 
41399a2dd95SBruce Richardson 	adapter->eventdev_id = dev_id;
41499a2dd95SBruce Richardson 	adapter->socket_id = socket_id;
41599a2dd95SBruce Richardson 	adapter->conf_cb = conf_cb;
41699a2dd95SBruce Richardson 	adapter->conf_arg = conf_arg;
41799a2dd95SBruce Richardson 	adapter->mode = mode;
41899a2dd95SBruce Richardson 	strcpy(adapter->mem_name, mem_name);
41999a2dd95SBruce Richardson 	adapter->cdevs = rte_zmalloc_socket(adapter->mem_name,
42099a2dd95SBruce Richardson 					rte_cryptodev_count() *
42199a2dd95SBruce Richardson 					sizeof(struct crypto_device_info), 0,
42299a2dd95SBruce Richardson 					socket_id);
42399a2dd95SBruce Richardson 	if (adapter->cdevs == NULL) {
424ae282b06SDavid Marchand 		RTE_EDEV_LOG_ERR("Failed to get mem for crypto devices");
4252ae84b39SGanapati Kundapura 		eca_circular_buffer_free(&adapter->ebuf);
42699a2dd95SBruce Richardson 		rte_free(adapter);
42799a2dd95SBruce Richardson 		return -ENOMEM;
42899a2dd95SBruce Richardson 	}
42999a2dd95SBruce Richardson 
43099a2dd95SBruce Richardson 	rte_spinlock_init(&adapter->lock);
43199a2dd95SBruce Richardson 	for (i = 0; i < rte_cryptodev_count(); i++)
43299a2dd95SBruce Richardson 		adapter->cdevs[i].dev = rte_cryptodev_pmd_get_dev(i);
43399a2dd95SBruce Richardson 
43499a2dd95SBruce Richardson 	event_crypto_adapter[id] = adapter;
43599a2dd95SBruce Richardson 
43699a2dd95SBruce Richardson 	return 0;
43799a2dd95SBruce Richardson }
43899a2dd95SBruce Richardson 
43999a2dd95SBruce Richardson 
44099a2dd95SBruce Richardson int
rte_event_crypto_adapter_create(uint8_t id,uint8_t dev_id,struct rte_event_port_conf * port_config,enum rte_event_crypto_adapter_mode mode)44199a2dd95SBruce Richardson rte_event_crypto_adapter_create(uint8_t id, uint8_t dev_id,
44299a2dd95SBruce Richardson 				struct rte_event_port_conf *port_config,
44399a2dd95SBruce Richardson 				enum rte_event_crypto_adapter_mode mode)
44499a2dd95SBruce Richardson {
44599a2dd95SBruce Richardson 	struct rte_event_port_conf *pc;
44699a2dd95SBruce Richardson 	int ret;
44799a2dd95SBruce Richardson 
44899a2dd95SBruce Richardson 	if (port_config == NULL)
44999a2dd95SBruce Richardson 		return -EINVAL;
45099a2dd95SBruce Richardson 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
45199a2dd95SBruce Richardson 
45299a2dd95SBruce Richardson 	pc = rte_malloc(NULL, sizeof(*pc), 0);
45399a2dd95SBruce Richardson 	if (pc == NULL)
45499a2dd95SBruce Richardson 		return -ENOMEM;
45599a2dd95SBruce Richardson 	*pc = *port_config;
45699a2dd95SBruce Richardson 	ret = rte_event_crypto_adapter_create_ext(id, dev_id,
45799a2dd95SBruce Richardson 						  eca_default_config_cb,
45899a2dd95SBruce Richardson 						  mode,
45999a2dd95SBruce Richardson 						  pc);
46099a2dd95SBruce Richardson 	if (ret)
46199a2dd95SBruce Richardson 		rte_free(pc);
46299a2dd95SBruce Richardson 
4637f2d9df6SAmit Prakash Shukla 	rte_eventdev_trace_crypto_adapter_create(id, dev_id, port_config, mode,	ret);
4647f2d9df6SAmit Prakash Shukla 
46599a2dd95SBruce Richardson 	return ret;
46699a2dd95SBruce Richardson }
46799a2dd95SBruce Richardson 
46899a2dd95SBruce Richardson int
rte_event_crypto_adapter_free(uint8_t id)46999a2dd95SBruce Richardson rte_event_crypto_adapter_free(uint8_t id)
47099a2dd95SBruce Richardson {
471a256a743SPavan Nikhilesh 	struct event_crypto_adapter *adapter;
47299a2dd95SBruce Richardson 
47399a2dd95SBruce Richardson 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
47499a2dd95SBruce Richardson 
47599a2dd95SBruce Richardson 	adapter = eca_id_to_adapter(id);
47699a2dd95SBruce Richardson 	if (adapter == NULL)
47799a2dd95SBruce Richardson 		return -EINVAL;
47899a2dd95SBruce Richardson 
47999a2dd95SBruce Richardson 	if (adapter->nb_qps) {
48099a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("%" PRIu16 "Queue pairs not deleted",
48199a2dd95SBruce Richardson 				adapter->nb_qps);
48299a2dd95SBruce Richardson 		return -EBUSY;
48399a2dd95SBruce Richardson 	}
48499a2dd95SBruce Richardson 
48599a2dd95SBruce Richardson 	rte_eventdev_trace_crypto_adapter_free(id, adapter);
48699a2dd95SBruce Richardson 	if (adapter->default_cb_arg)
48799a2dd95SBruce Richardson 		rte_free(adapter->conf_arg);
48899a2dd95SBruce Richardson 	rte_free(adapter->cdevs);
48999a2dd95SBruce Richardson 	rte_free(adapter);
49099a2dd95SBruce Richardson 	event_crypto_adapter[id] = NULL;
49199a2dd95SBruce Richardson 
49299a2dd95SBruce Richardson 	return 0;
49399a2dd95SBruce Richardson }
49499a2dd95SBruce Richardson 
49599a2dd95SBruce Richardson static inline unsigned int
eca_enq_to_cryptodev(struct event_crypto_adapter * adapter,struct rte_event * ev,unsigned int cnt)496a256a743SPavan Nikhilesh eca_enq_to_cryptodev(struct event_crypto_adapter *adapter, struct rte_event *ev,
497a256a743SPavan Nikhilesh 		     unsigned int cnt)
49899a2dd95SBruce Richardson {
49999a2dd95SBruce Richardson 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
50099a2dd95SBruce Richardson 	union rte_event_crypto_metadata *m_data = NULL;
50199a2dd95SBruce Richardson 	struct crypto_queue_pair_info *qp_info = NULL;
50299a2dd95SBruce Richardson 	struct rte_crypto_op *crypto_op;
50399a2dd95SBruce Richardson 	unsigned int i, n;
5042ae84b39SGanapati Kundapura 	uint16_t qp_id, nb_enqueued = 0;
50599a2dd95SBruce Richardson 	uint8_t cdev_id;
5062ae84b39SGanapati Kundapura 	int ret;
50799a2dd95SBruce Richardson 
50899a2dd95SBruce Richardson 	ret = 0;
50999a2dd95SBruce Richardson 	n = 0;
51099a2dd95SBruce Richardson 	stats->event_deq_count += cnt;
51199a2dd95SBruce Richardson 
51299a2dd95SBruce Richardson 	for (i = 0; i < cnt; i++) {
51399a2dd95SBruce Richardson 		crypto_op = ev[i].event_ptr;
51499a2dd95SBruce Richardson 		if (crypto_op == NULL)
51599a2dd95SBruce Richardson 			continue;
516*ad12d08fSGanapati Kundapura 
517*ad12d08fSGanapati Kundapura 		/** "struct rte_event::impl_opaque" field passed on from
518*ad12d08fSGanapati Kundapura 		 *  eventdev PMD could have different value per event.
519*ad12d08fSGanapati Kundapura 		 *  For session-based crypto operations retain
520*ad12d08fSGanapati Kundapura 		 *  "struct rte_event::impl_opaque" into mbuf dynamic field and
521*ad12d08fSGanapati Kundapura 		 *  restore it back after copying event information from
522*ad12d08fSGanapati Kundapura 		 *  session event metadata.
523*ad12d08fSGanapati Kundapura 		 *  For session-less, each crypto operation carries event
524*ad12d08fSGanapati Kundapura 		 *  metadata and retains "struct rte_event:impl_opaque"
525*ad12d08fSGanapati Kundapura 		 *  information to be passed back to eventdev PMD.
526*ad12d08fSGanapati Kundapura 		 */
527*ad12d08fSGanapati Kundapura 		if (crypto_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
528*ad12d08fSGanapati Kundapura 			struct rte_mbuf *mbuf = crypto_op->sym->m_src;
529*ad12d08fSGanapati Kundapura 
530*ad12d08fSGanapati Kundapura 			*RTE_MBUF_DYNFIELD(mbuf,
531*ad12d08fSGanapati Kundapura 					eca_dynfield_offset,
532*ad12d08fSGanapati Kundapura 					eca_dynfield_t *) = ev[i].impl_opaque;
533*ad12d08fSGanapati Kundapura 		}
534*ad12d08fSGanapati Kundapura 
535b8c8a6ddSAkhil Goyal 		m_data = rte_cryptodev_session_event_mdata_get(crypto_op);
53699a2dd95SBruce Richardson 		if (m_data == NULL) {
53799a2dd95SBruce Richardson 			rte_pktmbuf_free(crypto_op->sym->m_src);
53899a2dd95SBruce Richardson 			rte_crypto_op_free(crypto_op);
53999a2dd95SBruce Richardson 			continue;
54099a2dd95SBruce Richardson 		}
54199a2dd95SBruce Richardson 
54299a2dd95SBruce Richardson 		cdev_id = m_data->request_info.cdev_id;
54399a2dd95SBruce Richardson 		qp_id = m_data->request_info.queue_pair_id;
54499a2dd95SBruce Richardson 		qp_info = &adapter->cdevs[cdev_id].qpairs[qp_id];
54599a2dd95SBruce Richardson 		if (!qp_info->qp_enabled) {
54699a2dd95SBruce Richardson 			rte_pktmbuf_free(crypto_op->sym->m_src);
54799a2dd95SBruce Richardson 			rte_crypto_op_free(crypto_op);
54899a2dd95SBruce Richardson 			continue;
54999a2dd95SBruce Richardson 		}
5502ae84b39SGanapati Kundapura 		eca_circular_buffer_add(&qp_info->cbuf, crypto_op);
55199a2dd95SBruce Richardson 
5522ae84b39SGanapati Kundapura 		if (eca_circular_buffer_batch_ready(&qp_info->cbuf)) {
5532ae84b39SGanapati Kundapura 			ret = eca_circular_buffer_flush_to_cdev(&qp_info->cbuf,
5542ae84b39SGanapati Kundapura 								cdev_id,
55599a2dd95SBruce Richardson 								qp_id,
5562ae84b39SGanapati Kundapura 								&nb_enqueued);
55704dac736SGanapati Kundapura 			stats->crypto_enq_count += nb_enqueued;
55804dac736SGanapati Kundapura 			n += nb_enqueued;
55904dac736SGanapati Kundapura 
5602ae84b39SGanapati Kundapura 			/**
5612ae84b39SGanapati Kundapura 			 * If some crypto ops failed to flush to cdev and
5622ae84b39SGanapati Kundapura 			 * space for another batch is not available, stop
5632ae84b39SGanapati Kundapura 			 * dequeue from eventdev momentarily
5642ae84b39SGanapati Kundapura 			 */
5652ae84b39SGanapati Kundapura 			if (unlikely(ret < 0 &&
5662ae84b39SGanapati Kundapura 				!eca_circular_buffer_space_for_batch(
5672ae84b39SGanapati Kundapura 							&qp_info->cbuf)))
5682ae84b39SGanapati Kundapura 				adapter->stop_enq_to_cryptodev = true;
56999a2dd95SBruce Richardson 		}
57099a2dd95SBruce Richardson 	}
57199a2dd95SBruce Richardson 
57299a2dd95SBruce Richardson 	return n;
57399a2dd95SBruce Richardson }
57499a2dd95SBruce Richardson 
57599a2dd95SBruce Richardson static unsigned int
eca_crypto_cdev_flush(struct event_crypto_adapter * adapter,uint8_t cdev_id,uint16_t * nb_ops_flushed)5762ae84b39SGanapati Kundapura eca_crypto_cdev_flush(struct event_crypto_adapter *adapter,
5772ae84b39SGanapati Kundapura 		      uint8_t cdev_id, uint16_t *nb_ops_flushed)
57899a2dd95SBruce Richardson {
57999a2dd95SBruce Richardson 	struct crypto_device_info *curr_dev;
58099a2dd95SBruce Richardson 	struct crypto_queue_pair_info *curr_queue;
58199a2dd95SBruce Richardson 	struct rte_cryptodev *dev;
5822ae84b39SGanapati Kundapura 	uint16_t nb = 0, nb_enqueued = 0;
58399a2dd95SBruce Richardson 	uint16_t qp;
58499a2dd95SBruce Richardson 
58599a2dd95SBruce Richardson 	curr_dev = &adapter->cdevs[cdev_id];
5862ae84b39SGanapati Kundapura 	dev = rte_cryptodev_pmd_get_dev(cdev_id);
587cc08c0b0SGanapati Kundapura 
58899a2dd95SBruce Richardson 	for (qp = 0; qp < dev->data->nb_queue_pairs; qp++) {
58999a2dd95SBruce Richardson 
59099a2dd95SBruce Richardson 		curr_queue = &curr_dev->qpairs[qp];
5912ae84b39SGanapati Kundapura 		if (unlikely(curr_queue == NULL || !curr_queue->qp_enabled))
59299a2dd95SBruce Richardson 			continue;
59399a2dd95SBruce Richardson 
5942ae84b39SGanapati Kundapura 		eca_circular_buffer_flush_to_cdev(&curr_queue->cbuf,
5952ae84b39SGanapati Kundapura 						  cdev_id,
59699a2dd95SBruce Richardson 						  qp,
5972ae84b39SGanapati Kundapura 						  &nb_enqueued);
5982ae84b39SGanapati Kundapura 		*nb_ops_flushed += curr_queue->cbuf.count;
5992ae84b39SGanapati Kundapura 		nb += nb_enqueued;
60099a2dd95SBruce Richardson 	}
60199a2dd95SBruce Richardson 
6022ae84b39SGanapati Kundapura 	return nb;
6032ae84b39SGanapati Kundapura }
6042ae84b39SGanapati Kundapura 
6052ae84b39SGanapati Kundapura static unsigned int
eca_crypto_enq_flush(struct event_crypto_adapter * adapter)6062ae84b39SGanapati Kundapura eca_crypto_enq_flush(struct event_crypto_adapter *adapter)
6072ae84b39SGanapati Kundapura {
6082ae84b39SGanapati Kundapura 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
6092ae84b39SGanapati Kundapura 	uint8_t cdev_id;
6102ae84b39SGanapati Kundapura 	uint16_t nb_enqueued = 0;
6112ae84b39SGanapati Kundapura 	uint16_t nb_ops_flushed = 0;
6122ae84b39SGanapati Kundapura 	uint16_t num_cdev = rte_cryptodev_count();
6132ae84b39SGanapati Kundapura 
6142ae84b39SGanapati Kundapura 	for (cdev_id = 0; cdev_id < num_cdev; cdev_id++)
6152ae84b39SGanapati Kundapura 		nb_enqueued += eca_crypto_cdev_flush(adapter,
6162ae84b39SGanapati Kundapura 						    cdev_id,
6172ae84b39SGanapati Kundapura 						    &nb_ops_flushed);
6182ae84b39SGanapati Kundapura 	/**
6192ae84b39SGanapati Kundapura 	 * Enable dequeue from eventdev if all ops from circular
6202ae84b39SGanapati Kundapura 	 * buffer flushed to cdev
6212ae84b39SGanapati Kundapura 	 */
6222ae84b39SGanapati Kundapura 	if (!nb_ops_flushed)
6232ae84b39SGanapati Kundapura 		adapter->stop_enq_to_cryptodev = false;
6242ae84b39SGanapati Kundapura 
6252ae84b39SGanapati Kundapura 	stats->crypto_enq_count += nb_enqueued;
6262ae84b39SGanapati Kundapura 
6272ae84b39SGanapati Kundapura 	return nb_enqueued;
62899a2dd95SBruce Richardson }
62999a2dd95SBruce Richardson 
63099a2dd95SBruce Richardson static int
eca_crypto_adapter_enq_run(struct event_crypto_adapter * adapter,unsigned int max_enq)631a256a743SPavan Nikhilesh eca_crypto_adapter_enq_run(struct event_crypto_adapter *adapter,
63299a2dd95SBruce Richardson 			   unsigned int max_enq)
63399a2dd95SBruce Richardson {
63499a2dd95SBruce Richardson 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
63599a2dd95SBruce Richardson 	struct rte_event ev[BATCH_SIZE];
63699a2dd95SBruce Richardson 	unsigned int nb_enq, nb_enqueued;
63799a2dd95SBruce Richardson 	uint16_t n;
63899a2dd95SBruce Richardson 	uint8_t event_dev_id = adapter->eventdev_id;
63999a2dd95SBruce Richardson 	uint8_t event_port_id = adapter->event_port_id;
64099a2dd95SBruce Richardson 
64199a2dd95SBruce Richardson 	nb_enqueued = 0;
64299a2dd95SBruce Richardson 	if (adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)
64399a2dd95SBruce Richardson 		return 0;
64499a2dd95SBruce Richardson 
645f442c040SGanapati Kundapura 	for (nb_enq = 0; nb_enq < max_enq; nb_enq += n) {
646f442c040SGanapati Kundapura 
6472ae84b39SGanapati Kundapura 		if (unlikely(adapter->stop_enq_to_cryptodev)) {
6482ae84b39SGanapati Kundapura 			nb_enqueued += eca_crypto_enq_flush(adapter);
6492ae84b39SGanapati Kundapura 
6502ae84b39SGanapati Kundapura 			if (unlikely(adapter->stop_enq_to_cryptodev))
651f442c040SGanapati Kundapura 				break;
6522ae84b39SGanapati Kundapura 		}
6532ae84b39SGanapati Kundapura 
65499a2dd95SBruce Richardson 		stats->event_poll_count++;
65599a2dd95SBruce Richardson 		n = rte_event_dequeue_burst(event_dev_id,
65699a2dd95SBruce Richardson 					    event_port_id, ev, BATCH_SIZE, 0);
65799a2dd95SBruce Richardson 
65899a2dd95SBruce Richardson 		if (!n)
65999a2dd95SBruce Richardson 			break;
66099a2dd95SBruce Richardson 
66199a2dd95SBruce Richardson 		nb_enqueued += eca_enq_to_cryptodev(adapter, ev, n);
66299a2dd95SBruce Richardson 	}
66399a2dd95SBruce Richardson 
66499a2dd95SBruce Richardson 	if ((++adapter->transmit_loop_count &
66599a2dd95SBruce Richardson 		(CRYPTO_ENQ_FLUSH_THRESHOLD - 1)) == 0) {
66699a2dd95SBruce Richardson 		nb_enqueued += eca_crypto_enq_flush(adapter);
66799a2dd95SBruce Richardson 	}
66899a2dd95SBruce Richardson 
66999a2dd95SBruce Richardson 	return nb_enqueued;
67099a2dd95SBruce Richardson }
67199a2dd95SBruce Richardson 
6722ae84b39SGanapati Kundapura static inline uint16_t
eca_ops_enqueue_burst(struct event_crypto_adapter * adapter,struct rte_crypto_op ** ops,uint16_t num)673a256a743SPavan Nikhilesh eca_ops_enqueue_burst(struct event_crypto_adapter *adapter,
67499a2dd95SBruce Richardson 		  struct rte_crypto_op **ops, uint16_t num)
67599a2dd95SBruce Richardson {
67699a2dd95SBruce Richardson 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
67799a2dd95SBruce Richardson 	union rte_event_crypto_metadata *m_data = NULL;
67899a2dd95SBruce Richardson 	uint8_t event_dev_id = adapter->eventdev_id;
67999a2dd95SBruce Richardson 	uint8_t event_port_id = adapter->event_port_id;
68099a2dd95SBruce Richardson 	struct rte_event events[BATCH_SIZE];
68199a2dd95SBruce Richardson 	uint16_t nb_enqueued, nb_ev;
68299a2dd95SBruce Richardson 	uint8_t retry;
68399a2dd95SBruce Richardson 	uint8_t i;
68499a2dd95SBruce Richardson 
68599a2dd95SBruce Richardson 	nb_ev = 0;
68699a2dd95SBruce Richardson 	retry = 0;
68799a2dd95SBruce Richardson 	nb_enqueued = 0;
68899a2dd95SBruce Richardson 	num = RTE_MIN(num, BATCH_SIZE);
68999a2dd95SBruce Richardson 	for (i = 0; i < num; i++) {
69099a2dd95SBruce Richardson 		struct rte_event *ev = &events[nb_ev++];
6912ae84b39SGanapati Kundapura 
692b8c8a6ddSAkhil Goyal 		m_data = rte_cryptodev_session_event_mdata_get(ops[i]);
69399a2dd95SBruce Richardson 		if (unlikely(m_data == NULL)) {
69499a2dd95SBruce Richardson 			rte_pktmbuf_free(ops[i]->sym->m_src);
69599a2dd95SBruce Richardson 			rte_crypto_op_free(ops[i]);
69699a2dd95SBruce Richardson 			continue;
69799a2dd95SBruce Richardson 		}
69899a2dd95SBruce Richardson 
69999a2dd95SBruce Richardson 		rte_memcpy(ev, &m_data->response_info, sizeof(*ev));
70099a2dd95SBruce Richardson 		ev->event_ptr = ops[i];
701*ad12d08fSGanapati Kundapura 
702*ad12d08fSGanapati Kundapura 		/** Restore "struct rte_event::impl_opaque" from mbuf
703*ad12d08fSGanapati Kundapura 		 *  dynamic field for session based crypto operation.
704*ad12d08fSGanapati Kundapura 		 *  For session-less, each crypto operations carries event
705*ad12d08fSGanapati Kundapura 		 *  metadata and retains "struct rte_event::impl_opaque"
706*ad12d08fSGanapati Kundapura 		 *  information to be passed back to eventdev PMD.
707*ad12d08fSGanapati Kundapura 		 */
708*ad12d08fSGanapati Kundapura 		if (ops[i]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
709*ad12d08fSGanapati Kundapura 			struct rte_mbuf *mbuf = ops[i]->sym->m_src;
710*ad12d08fSGanapati Kundapura 
711*ad12d08fSGanapati Kundapura 			ev->impl_opaque = *RTE_MBUF_DYNFIELD(mbuf,
712*ad12d08fSGanapati Kundapura 							eca_dynfield_offset,
713*ad12d08fSGanapati Kundapura 							eca_dynfield_t *);
714*ad12d08fSGanapati Kundapura 		}
715*ad12d08fSGanapati Kundapura 
71699a2dd95SBruce Richardson 		ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
71799a2dd95SBruce Richardson 		if (adapter->implicit_release_disabled)
71899a2dd95SBruce Richardson 			ev->op = RTE_EVENT_OP_FORWARD;
71999a2dd95SBruce Richardson 		else
72099a2dd95SBruce Richardson 			ev->op = RTE_EVENT_OP_NEW;
72199a2dd95SBruce Richardson 	}
72299a2dd95SBruce Richardson 
72399a2dd95SBruce Richardson 	do {
72499a2dd95SBruce Richardson 		nb_enqueued += rte_event_enqueue_burst(event_dev_id,
72599a2dd95SBruce Richardson 						  event_port_id,
72699a2dd95SBruce Richardson 						  &events[nb_enqueued],
72799a2dd95SBruce Richardson 						  nb_ev - nb_enqueued);
7282ae84b39SGanapati Kundapura 
72999a2dd95SBruce Richardson 	} while (retry++ < CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES &&
73099a2dd95SBruce Richardson 		 nb_enqueued < nb_ev);
73199a2dd95SBruce Richardson 
73299a2dd95SBruce Richardson 	stats->event_enq_fail_count += nb_ev - nb_enqueued;
73399a2dd95SBruce Richardson 	stats->event_enq_count += nb_enqueued;
73499a2dd95SBruce Richardson 	stats->event_enq_retry_count += retry - 1;
7352ae84b39SGanapati Kundapura 
7362ae84b39SGanapati Kundapura 	return nb_enqueued;
73799a2dd95SBruce Richardson }
73899a2dd95SBruce Richardson 
7392ae84b39SGanapati Kundapura static int
eca_circular_buffer_flush_to_evdev(struct event_crypto_adapter * adapter,struct crypto_ops_circular_buffer * bufp)7402ae84b39SGanapati Kundapura eca_circular_buffer_flush_to_evdev(struct event_crypto_adapter *adapter,
7412ae84b39SGanapati Kundapura 				   struct crypto_ops_circular_buffer *bufp)
7422ae84b39SGanapati Kundapura {
7432ae84b39SGanapati Kundapura 	uint16_t n = 0, nb_ops_flushed;
7442ae84b39SGanapati Kundapura 	uint16_t *headp = &bufp->head;
7452ae84b39SGanapati Kundapura 	uint16_t *tailp = &bufp->tail;
7462ae84b39SGanapati Kundapura 	struct rte_crypto_op **ops = bufp->op_buffer;
7472ae84b39SGanapati Kundapura 
7482ae84b39SGanapati Kundapura 	if (*tailp > *headp)
7492ae84b39SGanapati Kundapura 		n = *tailp - *headp;
7502ae84b39SGanapati Kundapura 	else if (*tailp < *headp)
7512ae84b39SGanapati Kundapura 		n = bufp->size - *headp;
7522ae84b39SGanapati Kundapura 	else
7532ae84b39SGanapati Kundapura 		return 0;  /* buffer empty */
7542ae84b39SGanapati Kundapura 
755da73a2a0SGanapati Kundapura 	nb_ops_flushed =  eca_ops_enqueue_burst(adapter, &ops[*headp], n);
7562ae84b39SGanapati Kundapura 	bufp->count -= nb_ops_flushed;
7572ae84b39SGanapati Kundapura 	if (!bufp->count) {
7582ae84b39SGanapati Kundapura 		*headp = 0;
7592ae84b39SGanapati Kundapura 		*tailp = 0;
7602ae84b39SGanapati Kundapura 		return 0;  /* buffer empty */
7612ae84b39SGanapati Kundapura 	}
7622ae84b39SGanapati Kundapura 
7632ae84b39SGanapati Kundapura 	*headp = (*headp + nb_ops_flushed) % bufp->size;
7642ae84b39SGanapati Kundapura 	return 1;
7652ae84b39SGanapati Kundapura }
7662ae84b39SGanapati Kundapura 
7672ae84b39SGanapati Kundapura 
7682ae84b39SGanapati Kundapura static void
eca_ops_buffer_flush(struct event_crypto_adapter * adapter)7692ae84b39SGanapati Kundapura eca_ops_buffer_flush(struct event_crypto_adapter *adapter)
7702ae84b39SGanapati Kundapura {
7712ae84b39SGanapati Kundapura 	if (likely(adapter->ebuf.count == 0))
7722ae84b39SGanapati Kundapura 		return;
7732ae84b39SGanapati Kundapura 
7742ae84b39SGanapati Kundapura 	while (eca_circular_buffer_flush_to_evdev(adapter,
7752ae84b39SGanapati Kundapura 						  &adapter->ebuf))
7762ae84b39SGanapati Kundapura 		;
7772ae84b39SGanapati Kundapura }
77899a2dd95SBruce Richardson static inline unsigned int
eca_crypto_adapter_deq_run(struct event_crypto_adapter * adapter,unsigned int max_deq)779a256a743SPavan Nikhilesh eca_crypto_adapter_deq_run(struct event_crypto_adapter *adapter,
78099a2dd95SBruce Richardson 			   unsigned int max_deq)
78199a2dd95SBruce Richardson {
78299a2dd95SBruce Richardson 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
78399a2dd95SBruce Richardson 	struct crypto_device_info *curr_dev;
78499a2dd95SBruce Richardson 	struct crypto_queue_pair_info *curr_queue;
78599a2dd95SBruce Richardson 	struct rte_crypto_op *ops[BATCH_SIZE];
7862ae84b39SGanapati Kundapura 	uint16_t n, nb_deq, nb_enqueued, i;
78799a2dd95SBruce Richardson 	struct rte_cryptodev *dev;
78899a2dd95SBruce Richardson 	uint8_t cdev_id;
78999a2dd95SBruce Richardson 	uint16_t qp, dev_qps;
79099a2dd95SBruce Richardson 	bool done;
79199a2dd95SBruce Richardson 	uint16_t num_cdev = rte_cryptodev_count();
79299a2dd95SBruce Richardson 
79399a2dd95SBruce Richardson 	nb_deq = 0;
7942ae84b39SGanapati Kundapura 	eca_ops_buffer_flush(adapter);
7952ae84b39SGanapati Kundapura 
79699a2dd95SBruce Richardson 	do {
79799a2dd95SBruce Richardson 		done = true;
79899a2dd95SBruce Richardson 
79999a2dd95SBruce Richardson 		for (cdev_id = adapter->next_cdev_id;
80099a2dd95SBruce Richardson 			cdev_id < num_cdev; cdev_id++) {
8012ae84b39SGanapati Kundapura 			uint16_t queues = 0;
8022ae84b39SGanapati Kundapura 
80399a2dd95SBruce Richardson 			curr_dev = &adapter->cdevs[cdev_id];
80499a2dd95SBruce Richardson 			dev = curr_dev->dev;
8052ae84b39SGanapati Kundapura 			if (unlikely(dev == NULL))
80699a2dd95SBruce Richardson 				continue;
8072ae84b39SGanapati Kundapura 
80899a2dd95SBruce Richardson 			dev_qps = dev->data->nb_queue_pairs;
80999a2dd95SBruce Richardson 
81099a2dd95SBruce Richardson 			for (qp = curr_dev->next_queue_pair_id;
81199a2dd95SBruce Richardson 				queues < dev_qps; qp = (qp + 1) % dev_qps,
81299a2dd95SBruce Richardson 				queues++) {
81399a2dd95SBruce Richardson 
81499a2dd95SBruce Richardson 				curr_queue = &curr_dev->qpairs[qp];
8152ae84b39SGanapati Kundapura 				if (unlikely(curr_queue == NULL ||
8162ae84b39SGanapati Kundapura 				    !curr_queue->qp_enabled))
81799a2dd95SBruce Richardson 					continue;
81899a2dd95SBruce Richardson 
81999a2dd95SBruce Richardson 				n = rte_cryptodev_dequeue_burst(cdev_id, qp,
82099a2dd95SBruce Richardson 					ops, BATCH_SIZE);
82199a2dd95SBruce Richardson 				if (!n)
82299a2dd95SBruce Richardson 					continue;
82399a2dd95SBruce Richardson 
82499a2dd95SBruce Richardson 				done = false;
8252ae84b39SGanapati Kundapura 				nb_enqueued = 0;
8262ae84b39SGanapati Kundapura 
82799a2dd95SBruce Richardson 				stats->crypto_deq_count += n;
8282ae84b39SGanapati Kundapura 
8292ae84b39SGanapati Kundapura 				if (unlikely(!adapter->ebuf.count))
8302ae84b39SGanapati Kundapura 					nb_enqueued = eca_ops_enqueue_burst(
8312ae84b39SGanapati Kundapura 							adapter, ops, n);
8322ae84b39SGanapati Kundapura 
8332ae84b39SGanapati Kundapura 				if (likely(nb_enqueued == n))
8342ae84b39SGanapati Kundapura 					goto check;
8352ae84b39SGanapati Kundapura 
8362ae84b39SGanapati Kundapura 				/* Failed to enqueue events case */
8372ae84b39SGanapati Kundapura 				for (i = nb_enqueued; i < n; i++)
8382ae84b39SGanapati Kundapura 					eca_circular_buffer_add(
8392ae84b39SGanapati Kundapura 						&adapter->ebuf,
8404b04134cSGanapati Kundapura 						ops[i]);
8412ae84b39SGanapati Kundapura 
8422ae84b39SGanapati Kundapura check:
84399a2dd95SBruce Richardson 				nb_deq += n;
84499a2dd95SBruce Richardson 
8452ae84b39SGanapati Kundapura 				if (nb_deq >= max_deq) {
84699a2dd95SBruce Richardson 					if ((qp + 1) == dev_qps) {
84799a2dd95SBruce Richardson 						adapter->next_cdev_id =
84899a2dd95SBruce Richardson 							(cdev_id + 1)
84999a2dd95SBruce Richardson 							% num_cdev;
85099a2dd95SBruce Richardson 					}
85199a2dd95SBruce Richardson 					curr_dev->next_queue_pair_id = (qp + 1)
85299a2dd95SBruce Richardson 						% dev->data->nb_queue_pairs;
85399a2dd95SBruce Richardson 
85499a2dd95SBruce Richardson 					return nb_deq;
85599a2dd95SBruce Richardson 				}
85699a2dd95SBruce Richardson 			}
85799a2dd95SBruce Richardson 		}
8582ae84b39SGanapati Kundapura 		adapter->next_cdev_id = 0;
85999a2dd95SBruce Richardson 	} while (done == false);
86099a2dd95SBruce Richardson 	return nb_deq;
86199a2dd95SBruce Richardson }
86299a2dd95SBruce Richardson 
86334d78557SMattias Rönnblom static int
eca_crypto_adapter_run(struct event_crypto_adapter * adapter,unsigned int max_ops)864a256a743SPavan Nikhilesh eca_crypto_adapter_run(struct event_crypto_adapter *adapter,
86599a2dd95SBruce Richardson 		       unsigned int max_ops)
86699a2dd95SBruce Richardson {
867578402f2SMattias Rönnblom 	unsigned int ops_left = max_ops;
868578402f2SMattias Rönnblom 
869578402f2SMattias Rönnblom 	while (ops_left > 0) {
87099a2dd95SBruce Richardson 		unsigned int e_cnt, d_cnt;
87199a2dd95SBruce Richardson 
872578402f2SMattias Rönnblom 		e_cnt = eca_crypto_adapter_deq_run(adapter, ops_left);
873578402f2SMattias Rönnblom 		ops_left -= RTE_MIN(ops_left, e_cnt);
87499a2dd95SBruce Richardson 
875578402f2SMattias Rönnblom 		d_cnt = eca_crypto_adapter_enq_run(adapter, ops_left);
876578402f2SMattias Rönnblom 		ops_left -= RTE_MIN(ops_left, d_cnt);
87799a2dd95SBruce Richardson 
87899a2dd95SBruce Richardson 		if (e_cnt == 0 && d_cnt == 0)
87999a2dd95SBruce Richardson 			break;
88099a2dd95SBruce Richardson 
88199a2dd95SBruce Richardson 	}
882578402f2SMattias Rönnblom 
88334d78557SMattias Rönnblom 	if (ops_left == max_ops) {
884578402f2SMattias Rönnblom 		rte_event_maintain(adapter->eventdev_id,
885578402f2SMattias Rönnblom 				   adapter->event_port_id, 0);
88634d78557SMattias Rönnblom 		return -EAGAIN;
88734d78557SMattias Rönnblom 	} else
88834d78557SMattias Rönnblom 		return 0;
88999a2dd95SBruce Richardson }
89099a2dd95SBruce Richardson 
89199a2dd95SBruce Richardson static int
eca_service_func(void * args)89299a2dd95SBruce Richardson eca_service_func(void *args)
89399a2dd95SBruce Richardson {
894a256a743SPavan Nikhilesh 	struct event_crypto_adapter *adapter = args;
89534d78557SMattias Rönnblom 	int ret;
89699a2dd95SBruce Richardson 
89799a2dd95SBruce Richardson 	if (rte_spinlock_trylock(&adapter->lock) == 0)
89899a2dd95SBruce Richardson 		return 0;
89934d78557SMattias Rönnblom 	ret = eca_crypto_adapter_run(adapter, adapter->max_nb);
90099a2dd95SBruce Richardson 	rte_spinlock_unlock(&adapter->lock);
90199a2dd95SBruce Richardson 
90234d78557SMattias Rönnblom 	return ret;
90399a2dd95SBruce Richardson }
90499a2dd95SBruce Richardson 
90599a2dd95SBruce Richardson static int
eca_init_service(struct event_crypto_adapter * adapter,uint8_t id)906a256a743SPavan Nikhilesh eca_init_service(struct event_crypto_adapter *adapter, uint8_t id)
90799a2dd95SBruce Richardson {
90899a2dd95SBruce Richardson 	struct rte_event_crypto_adapter_conf adapter_conf;
90999a2dd95SBruce Richardson 	struct rte_service_spec service;
91099a2dd95SBruce Richardson 	int ret;
9112bbaeadaSGanapati Kundapura 	uint32_t impl_rel;
91299a2dd95SBruce Richardson 
91399a2dd95SBruce Richardson 	if (adapter->service_inited)
91499a2dd95SBruce Richardson 		return 0;
91599a2dd95SBruce Richardson 
91699a2dd95SBruce Richardson 	memset(&service, 0, sizeof(service));
91799a2dd95SBruce Richardson 	snprintf(service.name, CRYPTO_ADAPTER_NAME_LEN,
91899a2dd95SBruce Richardson 		"rte_event_crypto_adapter_%d", id);
91999a2dd95SBruce Richardson 	service.socket_id = adapter->socket_id;
92099a2dd95SBruce Richardson 	service.callback = eca_service_func;
92199a2dd95SBruce Richardson 	service.callback_userdata = adapter;
92299a2dd95SBruce Richardson 	/* Service function handles locking for queue add/del updates */
92399a2dd95SBruce Richardson 	service.capabilities = RTE_SERVICE_CAP_MT_SAFE;
92499a2dd95SBruce Richardson 	ret = rte_service_component_register(&service, &adapter->service_id);
92599a2dd95SBruce Richardson 	if (ret) {
92699a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("failed to register service %s err = %" PRId32,
92799a2dd95SBruce Richardson 			service.name, ret);
92899a2dd95SBruce Richardson 		return ret;
92999a2dd95SBruce Richardson 	}
93099a2dd95SBruce Richardson 
93199a2dd95SBruce Richardson 	ret = adapter->conf_cb(id, adapter->eventdev_id,
93299a2dd95SBruce Richardson 		&adapter_conf, adapter->conf_arg);
93399a2dd95SBruce Richardson 	if (ret) {
93499a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("configuration callback failed err = %" PRId32,
93599a2dd95SBruce Richardson 			ret);
93699a2dd95SBruce Richardson 		return ret;
93799a2dd95SBruce Richardson 	}
93899a2dd95SBruce Richardson 
93999a2dd95SBruce Richardson 	adapter->max_nb = adapter_conf.max_nb;
94099a2dd95SBruce Richardson 	adapter->event_port_id = adapter_conf.event_port_id;
9412bbaeadaSGanapati Kundapura 
9422bbaeadaSGanapati Kundapura 	if (rte_event_port_attr_get(adapter->eventdev_id,
9432bbaeadaSGanapati Kundapura 				adapter->event_port_id,
9442bbaeadaSGanapati Kundapura 				RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE,
9452bbaeadaSGanapati Kundapura 				&impl_rel)) {
9462bbaeadaSGanapati Kundapura 		RTE_EDEV_LOG_ERR("Failed to get port info for eventdev %" PRId32,
9472bbaeadaSGanapati Kundapura 				 adapter->eventdev_id);
9482bbaeadaSGanapati Kundapura 		eca_circular_buffer_free(&adapter->ebuf);
9492bbaeadaSGanapati Kundapura 		rte_free(adapter);
9502bbaeadaSGanapati Kundapura 		return -EINVAL;
9512bbaeadaSGanapati Kundapura 	}
9522bbaeadaSGanapati Kundapura 
9532bbaeadaSGanapati Kundapura 	adapter->implicit_release_disabled = (uint8_t)impl_rel;
954*ad12d08fSGanapati Kundapura 
955*ad12d08fSGanapati Kundapura 	/** Register for mbuf dyn field to store/restore
956*ad12d08fSGanapati Kundapura 	 *  "struct rte_event::impl_opaque"
957*ad12d08fSGanapati Kundapura 	 */
958*ad12d08fSGanapati Kundapura 	eca_dynfield_offset = eca_dynfield_register();
959*ad12d08fSGanapati Kundapura 	if (eca_dynfield_offset  < 0) {
960*ad12d08fSGanapati Kundapura 		RTE_EDEV_LOG_ERR("Failed to register eca mbuf dyn field");
961*ad12d08fSGanapati Kundapura 		eca_circular_buffer_free(&adapter->ebuf);
962*ad12d08fSGanapati Kundapura 		rte_free(adapter);
963*ad12d08fSGanapati Kundapura 		return -EINVAL;
964*ad12d08fSGanapati Kundapura 	}
965*ad12d08fSGanapati Kundapura 
96699a2dd95SBruce Richardson 	adapter->service_inited = 1;
96799a2dd95SBruce Richardson 
96899a2dd95SBruce Richardson 	return ret;
96999a2dd95SBruce Richardson }
97099a2dd95SBruce Richardson 
97199a2dd95SBruce Richardson static void
eca_update_qp_info(struct event_crypto_adapter * adapter,struct crypto_device_info * dev_info,int32_t queue_pair_id,uint8_t add)972a256a743SPavan Nikhilesh eca_update_qp_info(struct event_crypto_adapter *adapter,
973a256a743SPavan Nikhilesh 		   struct crypto_device_info *dev_info, int32_t queue_pair_id,
97499a2dd95SBruce Richardson 		   uint8_t add)
97599a2dd95SBruce Richardson {
97699a2dd95SBruce Richardson 	struct crypto_queue_pair_info *qp_info;
97799a2dd95SBruce Richardson 	int enabled;
97899a2dd95SBruce Richardson 	uint16_t i;
97999a2dd95SBruce Richardson 
98099a2dd95SBruce Richardson 	if (dev_info->qpairs == NULL)
98199a2dd95SBruce Richardson 		return;
98299a2dd95SBruce Richardson 
98399a2dd95SBruce Richardson 	if (queue_pair_id == -1) {
98499a2dd95SBruce Richardson 		for (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++)
98599a2dd95SBruce Richardson 			eca_update_qp_info(adapter, dev_info, i, add);
98699a2dd95SBruce Richardson 	} else {
98799a2dd95SBruce Richardson 		qp_info = &dev_info->qpairs[queue_pair_id];
98899a2dd95SBruce Richardson 		enabled = qp_info->qp_enabled;
98999a2dd95SBruce Richardson 		if (add) {
99099a2dd95SBruce Richardson 			adapter->nb_qps += !enabled;
99199a2dd95SBruce Richardson 			dev_info->num_qpairs += !enabled;
99299a2dd95SBruce Richardson 		} else {
99399a2dd95SBruce Richardson 			adapter->nb_qps -= enabled;
99499a2dd95SBruce Richardson 			dev_info->num_qpairs -= enabled;
99599a2dd95SBruce Richardson 		}
99699a2dd95SBruce Richardson 		qp_info->qp_enabled = !!add;
99799a2dd95SBruce Richardson 	}
99899a2dd95SBruce Richardson }
99999a2dd95SBruce Richardson 
100099a2dd95SBruce Richardson static int
eca_add_queue_pair(struct event_crypto_adapter * adapter,uint8_t cdev_id,int queue_pair_id)1001a256a743SPavan Nikhilesh eca_add_queue_pair(struct event_crypto_adapter *adapter, uint8_t cdev_id,
100299a2dd95SBruce Richardson 		   int queue_pair_id)
100399a2dd95SBruce Richardson {
100499a2dd95SBruce Richardson 	struct crypto_device_info *dev_info = &adapter->cdevs[cdev_id];
100599a2dd95SBruce Richardson 	struct crypto_queue_pair_info *qpairs;
100699a2dd95SBruce Richardson 	uint32_t i;
100799a2dd95SBruce Richardson 
100899a2dd95SBruce Richardson 	if (dev_info->qpairs == NULL) {
100999a2dd95SBruce Richardson 		dev_info->qpairs =
101099a2dd95SBruce Richardson 		    rte_zmalloc_socket(adapter->mem_name,
101199a2dd95SBruce Richardson 					dev_info->dev->data->nb_queue_pairs *
101299a2dd95SBruce Richardson 					sizeof(struct crypto_queue_pair_info),
101399a2dd95SBruce Richardson 					0, adapter->socket_id);
101499a2dd95SBruce Richardson 		if (dev_info->qpairs == NULL)
101599a2dd95SBruce Richardson 			return -ENOMEM;
101699a2dd95SBruce Richardson 
101799a2dd95SBruce Richardson 		qpairs = dev_info->qpairs;
10182ae84b39SGanapati Kundapura 
10192ae84b39SGanapati Kundapura 		if (eca_circular_buffer_init("eca_cdev_circular_buffer",
10202ae84b39SGanapati Kundapura 					     &qpairs->cbuf,
10212ae84b39SGanapati Kundapura 					     CRYPTO_ADAPTER_OPS_BUFFER_SZ)) {
10222ae84b39SGanapati Kundapura 			RTE_EDEV_LOG_ERR("Failed to get memory for cryptodev "
10232ae84b39SGanapati Kundapura 					 "buffer");
102499a2dd95SBruce Richardson 			rte_free(qpairs);
102599a2dd95SBruce Richardson 			return -ENOMEM;
102699a2dd95SBruce Richardson 		}
102799a2dd95SBruce Richardson 	}
102899a2dd95SBruce Richardson 
102999a2dd95SBruce Richardson 	if (queue_pair_id == -1) {
103099a2dd95SBruce Richardson 		for (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++)
103199a2dd95SBruce Richardson 			eca_update_qp_info(adapter, dev_info, i, 1);
103299a2dd95SBruce Richardson 	} else
103399a2dd95SBruce Richardson 		eca_update_qp_info(adapter, dev_info,
103499a2dd95SBruce Richardson 					(uint16_t)queue_pair_id, 1);
103599a2dd95SBruce Richardson 
103699a2dd95SBruce Richardson 	return 0;
103799a2dd95SBruce Richardson }
103899a2dd95SBruce Richardson 
103999a2dd95SBruce Richardson int
rte_event_crypto_adapter_queue_pair_add(uint8_t id,uint8_t cdev_id,int32_t queue_pair_id,const struct rte_event_crypto_adapter_queue_conf * conf)104099a2dd95SBruce Richardson rte_event_crypto_adapter_queue_pair_add(uint8_t id,
104199a2dd95SBruce Richardson 			uint8_t cdev_id,
104299a2dd95SBruce Richardson 			int32_t queue_pair_id,
1043c1749bc5SVolodymyr Fialko 			const struct rte_event_crypto_adapter_queue_conf *conf)
104499a2dd95SBruce Richardson {
1045c1749bc5SVolodymyr Fialko 	struct rte_event_crypto_adapter_vector_limits limits;
1046a256a743SPavan Nikhilesh 	struct event_crypto_adapter *adapter;
104799a2dd95SBruce Richardson 	struct crypto_device_info *dev_info;
1048c1749bc5SVolodymyr Fialko 	struct rte_eventdev *dev;
104999a2dd95SBruce Richardson 	uint32_t cap;
105099a2dd95SBruce Richardson 	int ret;
105199a2dd95SBruce Richardson 
105299a2dd95SBruce Richardson 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
105399a2dd95SBruce Richardson 
1054e74abd48SAkhil Goyal 	if (!rte_cryptodev_is_valid_dev(cdev_id)) {
105599a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
105699a2dd95SBruce Richardson 		return -EINVAL;
105799a2dd95SBruce Richardson 	}
105899a2dd95SBruce Richardson 
105999a2dd95SBruce Richardson 	adapter = eca_id_to_adapter(id);
106099a2dd95SBruce Richardson 	if (adapter == NULL)
106199a2dd95SBruce Richardson 		return -EINVAL;
106299a2dd95SBruce Richardson 
106399a2dd95SBruce Richardson 	dev = &rte_eventdevs[adapter->eventdev_id];
106499a2dd95SBruce Richardson 	ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
106599a2dd95SBruce Richardson 						cdev_id,
106699a2dd95SBruce Richardson 						&cap);
106799a2dd95SBruce Richardson 	if (ret) {
106899a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Failed to get adapter caps dev %" PRIu8
106999a2dd95SBruce Richardson 			" cdev %" PRIu8, id, cdev_id);
107099a2dd95SBruce Richardson 		return ret;
107199a2dd95SBruce Richardson 	}
107299a2dd95SBruce Richardson 
1073c1749bc5SVolodymyr Fialko 	if (conf == NULL) {
1074c1749bc5SVolodymyr Fialko 		if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
107599a2dd95SBruce Richardson 			RTE_EDEV_LOG_ERR("Conf value can not be NULL for dev_id=%u",
107699a2dd95SBruce Richardson 					 cdev_id);
107799a2dd95SBruce Richardson 			return -EINVAL;
107899a2dd95SBruce Richardson 		}
1079c1749bc5SVolodymyr Fialko 	} else {
1080c1749bc5SVolodymyr Fialko 		if (conf->flags & RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR) {
1081c1749bc5SVolodymyr Fialko 			if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR) == 0) {
1082c1749bc5SVolodymyr Fialko 				RTE_EDEV_LOG_ERR("Event vectorization is not supported,"
1083c1749bc5SVolodymyr Fialko 						 "dev %" PRIu8 " cdev %" PRIu8, id,
1084c1749bc5SVolodymyr Fialko 						 cdev_id);
1085c1749bc5SVolodymyr Fialko 				return -ENOTSUP;
1086c1749bc5SVolodymyr Fialko 			}
1087c1749bc5SVolodymyr Fialko 
1088c1749bc5SVolodymyr Fialko 			ret = rte_event_crypto_adapter_vector_limits_get(
1089c1749bc5SVolodymyr Fialko 				adapter->eventdev_id, cdev_id, &limits);
1090c1749bc5SVolodymyr Fialko 			if (ret < 0) {
1091c1749bc5SVolodymyr Fialko 				RTE_EDEV_LOG_ERR("Failed to get event device vector "
1092c1749bc5SVolodymyr Fialko 						 "limits, dev %" PRIu8 " cdev %" PRIu8,
1093c1749bc5SVolodymyr Fialko 						 id, cdev_id);
1094c1749bc5SVolodymyr Fialko 				return -EINVAL;
1095c1749bc5SVolodymyr Fialko 			}
1096c1749bc5SVolodymyr Fialko 
1097c1749bc5SVolodymyr Fialko 			if (conf->vector_sz < limits.min_sz ||
1098c1749bc5SVolodymyr Fialko 			    conf->vector_sz > limits.max_sz ||
1099c1749bc5SVolodymyr Fialko 			    conf->vector_timeout_ns < limits.min_timeout_ns ||
1100c1749bc5SVolodymyr Fialko 			    conf->vector_timeout_ns > limits.max_timeout_ns ||
1101c1749bc5SVolodymyr Fialko 			    conf->vector_mp == NULL) {
1102c1749bc5SVolodymyr Fialko 				RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
1103c1749bc5SVolodymyr Fialko 						" dev %" PRIu8 " cdev %" PRIu8,
1104c1749bc5SVolodymyr Fialko 						id, cdev_id);
1105c1749bc5SVolodymyr Fialko 				return -EINVAL;
1106c1749bc5SVolodymyr Fialko 			}
1107c1749bc5SVolodymyr Fialko 
1108c1749bc5SVolodymyr Fialko 			if (conf->vector_mp->elt_size < (sizeof(struct rte_event_vector) +
1109c1749bc5SVolodymyr Fialko 			    (sizeof(uintptr_t) * conf->vector_sz))) {
1110c1749bc5SVolodymyr Fialko 				RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
1111c1749bc5SVolodymyr Fialko 						" dev %" PRIu8 " cdev %" PRIu8,
1112c1749bc5SVolodymyr Fialko 						id, cdev_id);
1113c1749bc5SVolodymyr Fialko 				return -EINVAL;
1114c1749bc5SVolodymyr Fialko 			}
1115c1749bc5SVolodymyr Fialko 		}
1116c1749bc5SVolodymyr Fialko 	}
111799a2dd95SBruce Richardson 
111899a2dd95SBruce Richardson 	dev_info = &adapter->cdevs[cdev_id];
111999a2dd95SBruce Richardson 
112099a2dd95SBruce Richardson 	if (queue_pair_id != -1 &&
112199a2dd95SBruce Richardson 	    (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) {
112299a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16,
112399a2dd95SBruce Richardson 				 (uint16_t)queue_pair_id);
112499a2dd95SBruce Richardson 		return -EINVAL;
112599a2dd95SBruce Richardson 	}
112699a2dd95SBruce Richardson 
112799a2dd95SBruce Richardson 	/* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD,
112899a2dd95SBruce Richardson 	 * no need of service core as HW supports event forward capability.
112999a2dd95SBruce Richardson 	 */
113099a2dd95SBruce Richardson 	if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
113199a2dd95SBruce Richardson 	    (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND &&
113299a2dd95SBruce Richardson 	     adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW) ||
113399a2dd95SBruce Richardson 	    (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
113499a2dd95SBruce Richardson 	     adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) {
11358f1d23ecSDavid Marchand 		if (*dev->dev_ops->crypto_adapter_queue_pair_add == NULL)
11368f1d23ecSDavid Marchand 			return -ENOTSUP;
113799a2dd95SBruce Richardson 		if (dev_info->qpairs == NULL) {
113899a2dd95SBruce Richardson 			dev_info->qpairs =
113999a2dd95SBruce Richardson 			    rte_zmalloc_socket(adapter->mem_name,
114099a2dd95SBruce Richardson 					dev_info->dev->data->nb_queue_pairs *
114199a2dd95SBruce Richardson 					sizeof(struct crypto_queue_pair_info),
114299a2dd95SBruce Richardson 					0, adapter->socket_id);
114399a2dd95SBruce Richardson 			if (dev_info->qpairs == NULL)
114499a2dd95SBruce Richardson 				return -ENOMEM;
114599a2dd95SBruce Richardson 		}
114699a2dd95SBruce Richardson 
114799a2dd95SBruce Richardson 		ret = (*dev->dev_ops->crypto_adapter_queue_pair_add)(dev,
114899a2dd95SBruce Richardson 				dev_info->dev,
114999a2dd95SBruce Richardson 				queue_pair_id,
1150c1749bc5SVolodymyr Fialko 				conf);
115199a2dd95SBruce Richardson 		if (ret)
115299a2dd95SBruce Richardson 			return ret;
115399a2dd95SBruce Richardson 
115499a2dd95SBruce Richardson 		else
115599a2dd95SBruce Richardson 			eca_update_qp_info(adapter, &adapter->cdevs[cdev_id],
115699a2dd95SBruce Richardson 					   queue_pair_id, 1);
115799a2dd95SBruce Richardson 	}
115899a2dd95SBruce Richardson 
115999a2dd95SBruce Richardson 	/* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW,
116099a2dd95SBruce Richardson 	 * or SW adapter, initiate services so the application can choose
116199a2dd95SBruce Richardson 	 * which ever way it wants to use the adapter.
116299a2dd95SBruce Richardson 	 * Case 1: RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW
116399a2dd95SBruce Richardson 	 *         Application may wants to use one of below two mode
116499a2dd95SBruce Richardson 	 *          a. OP_FORWARD mode -> HW Dequeue + SW enqueue
116599a2dd95SBruce Richardson 	 *          b. OP_NEW mode -> HW Dequeue
116699a2dd95SBruce Richardson 	 * Case 2: No HW caps, use SW adapter
116799a2dd95SBruce Richardson 	 *          a. OP_FORWARD mode -> SW enqueue & dequeue
116899a2dd95SBruce Richardson 	 *          b. OP_NEW mode -> SW Dequeue
116999a2dd95SBruce Richardson 	 */
117099a2dd95SBruce Richardson 	if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
117199a2dd95SBruce Richardson 	     !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
117299a2dd95SBruce Richardson 	     adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD) ||
117399a2dd95SBruce Richardson 	     (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW) &&
117499a2dd95SBruce Richardson 	      !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
117599a2dd95SBruce Richardson 	      !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) &&
117699a2dd95SBruce Richardson 	       (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA))) {
117799a2dd95SBruce Richardson 		rte_spinlock_lock(&adapter->lock);
117899a2dd95SBruce Richardson 		ret = eca_init_service(adapter, id);
117999a2dd95SBruce Richardson 		if (ret == 0)
118099a2dd95SBruce Richardson 			ret = eca_add_queue_pair(adapter, cdev_id,
118199a2dd95SBruce Richardson 						 queue_pair_id);
118299a2dd95SBruce Richardson 		rte_spinlock_unlock(&adapter->lock);
118399a2dd95SBruce Richardson 
118499a2dd95SBruce Richardson 		if (ret)
118599a2dd95SBruce Richardson 			return ret;
118699a2dd95SBruce Richardson 
118799a2dd95SBruce Richardson 		rte_service_component_runstate_set(adapter->service_id, 1);
118899a2dd95SBruce Richardson 	}
118999a2dd95SBruce Richardson 
1190c1749bc5SVolodymyr Fialko 	rte_eventdev_trace_crypto_adapter_queue_pair_add(id, cdev_id,
1191c1749bc5SVolodymyr Fialko 		queue_pair_id, conf);
119299a2dd95SBruce Richardson 	return 0;
119399a2dd95SBruce Richardson }
119499a2dd95SBruce Richardson 
119599a2dd95SBruce Richardson int
rte_event_crypto_adapter_queue_pair_del(uint8_t id,uint8_t cdev_id,int32_t queue_pair_id)119699a2dd95SBruce Richardson rte_event_crypto_adapter_queue_pair_del(uint8_t id, uint8_t cdev_id,
119799a2dd95SBruce Richardson 					int32_t queue_pair_id)
119899a2dd95SBruce Richardson {
1199a256a743SPavan Nikhilesh 	struct event_crypto_adapter *adapter;
120099a2dd95SBruce Richardson 	struct crypto_device_info *dev_info;
120199a2dd95SBruce Richardson 	struct rte_eventdev *dev;
120299a2dd95SBruce Richardson 	int ret;
120399a2dd95SBruce Richardson 	uint32_t cap;
120499a2dd95SBruce Richardson 	uint16_t i;
120599a2dd95SBruce Richardson 
120699a2dd95SBruce Richardson 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
120799a2dd95SBruce Richardson 
1208e74abd48SAkhil Goyal 	if (!rte_cryptodev_is_valid_dev(cdev_id)) {
120999a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
121099a2dd95SBruce Richardson 		return -EINVAL;
121199a2dd95SBruce Richardson 	}
121299a2dd95SBruce Richardson 
121399a2dd95SBruce Richardson 	adapter = eca_id_to_adapter(id);
121499a2dd95SBruce Richardson 	if (adapter == NULL)
121599a2dd95SBruce Richardson 		return -EINVAL;
121699a2dd95SBruce Richardson 
121799a2dd95SBruce Richardson 	dev = &rte_eventdevs[adapter->eventdev_id];
121899a2dd95SBruce Richardson 	ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
121999a2dd95SBruce Richardson 						cdev_id,
122099a2dd95SBruce Richardson 						&cap);
122199a2dd95SBruce Richardson 	if (ret)
122299a2dd95SBruce Richardson 		return ret;
122399a2dd95SBruce Richardson 
122499a2dd95SBruce Richardson 	dev_info = &adapter->cdevs[cdev_id];
122599a2dd95SBruce Richardson 
122699a2dd95SBruce Richardson 	if (queue_pair_id != -1 &&
122799a2dd95SBruce Richardson 	    (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) {
122899a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16,
122999a2dd95SBruce Richardson 				 (uint16_t)queue_pair_id);
123099a2dd95SBruce Richardson 		return -EINVAL;
123199a2dd95SBruce Richardson 	}
123299a2dd95SBruce Richardson 
123399a2dd95SBruce Richardson 	if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
123499a2dd95SBruce Richardson 	    (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
123599a2dd95SBruce Richardson 	     adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) {
12368f1d23ecSDavid Marchand 		if (*dev->dev_ops->crypto_adapter_queue_pair_del == NULL)
12378f1d23ecSDavid Marchand 			return -ENOTSUP;
123899a2dd95SBruce Richardson 		ret = (*dev->dev_ops->crypto_adapter_queue_pair_del)(dev,
123999a2dd95SBruce Richardson 						dev_info->dev,
124099a2dd95SBruce Richardson 						queue_pair_id);
124199a2dd95SBruce Richardson 		if (ret == 0) {
124299a2dd95SBruce Richardson 			eca_update_qp_info(adapter,
124399a2dd95SBruce Richardson 					&adapter->cdevs[cdev_id],
124499a2dd95SBruce Richardson 					queue_pair_id,
124599a2dd95SBruce Richardson 					0);
124699a2dd95SBruce Richardson 			if (dev_info->num_qpairs == 0) {
124799a2dd95SBruce Richardson 				rte_free(dev_info->qpairs);
124899a2dd95SBruce Richardson 				dev_info->qpairs = NULL;
124999a2dd95SBruce Richardson 			}
125099a2dd95SBruce Richardson 		}
125199a2dd95SBruce Richardson 	} else {
125299a2dd95SBruce Richardson 		if (adapter->nb_qps == 0)
125399a2dd95SBruce Richardson 			return 0;
125499a2dd95SBruce Richardson 
125599a2dd95SBruce Richardson 		rte_spinlock_lock(&adapter->lock);
125699a2dd95SBruce Richardson 		if (queue_pair_id == -1) {
125799a2dd95SBruce Richardson 			for (i = 0; i < dev_info->dev->data->nb_queue_pairs;
125899a2dd95SBruce Richardson 				i++)
125999a2dd95SBruce Richardson 				eca_update_qp_info(adapter, dev_info,
126099a2dd95SBruce Richardson 							queue_pair_id, 0);
126199a2dd95SBruce Richardson 		} else {
126299a2dd95SBruce Richardson 			eca_update_qp_info(adapter, dev_info,
126399a2dd95SBruce Richardson 						(uint16_t)queue_pair_id, 0);
126499a2dd95SBruce Richardson 		}
126599a2dd95SBruce Richardson 
126699a2dd95SBruce Richardson 		if (dev_info->num_qpairs == 0) {
126799a2dd95SBruce Richardson 			rte_free(dev_info->qpairs);
126899a2dd95SBruce Richardson 			dev_info->qpairs = NULL;
126999a2dd95SBruce Richardson 		}
127099a2dd95SBruce Richardson 
127199a2dd95SBruce Richardson 		rte_spinlock_unlock(&adapter->lock);
127299a2dd95SBruce Richardson 		rte_service_component_runstate_set(adapter->service_id,
127399a2dd95SBruce Richardson 				adapter->nb_qps);
127499a2dd95SBruce Richardson 	}
127599a2dd95SBruce Richardson 
127699a2dd95SBruce Richardson 	rte_eventdev_trace_crypto_adapter_queue_pair_del(id, cdev_id,
127799a2dd95SBruce Richardson 		queue_pair_id, ret);
127899a2dd95SBruce Richardson 	return ret;
127999a2dd95SBruce Richardson }
128099a2dd95SBruce Richardson 
128199a2dd95SBruce Richardson static int
eca_adapter_ctrl(uint8_t id,int start)128299a2dd95SBruce Richardson eca_adapter_ctrl(uint8_t id, int start)
128399a2dd95SBruce Richardson {
1284a256a743SPavan Nikhilesh 	struct event_crypto_adapter *adapter;
128599a2dd95SBruce Richardson 	struct crypto_device_info *dev_info;
128699a2dd95SBruce Richardson 	struct rte_eventdev *dev;
128799a2dd95SBruce Richardson 	uint32_t i;
128899a2dd95SBruce Richardson 	int use_service;
128999a2dd95SBruce Richardson 	int stop = !start;
129099a2dd95SBruce Richardson 
129199a2dd95SBruce Richardson 	use_service = 0;
129299a2dd95SBruce Richardson 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
129399a2dd95SBruce Richardson 	adapter = eca_id_to_adapter(id);
129499a2dd95SBruce Richardson 	if (adapter == NULL)
129599a2dd95SBruce Richardson 		return -EINVAL;
129699a2dd95SBruce Richardson 
129799a2dd95SBruce Richardson 	dev = &rte_eventdevs[adapter->eventdev_id];
129899a2dd95SBruce Richardson 
129999a2dd95SBruce Richardson 	for (i = 0; i < rte_cryptodev_count(); i++) {
130099a2dd95SBruce Richardson 		dev_info = &adapter->cdevs[i];
130199a2dd95SBruce Richardson 		/* if start  check for num queue pairs */
130299a2dd95SBruce Richardson 		if (start && !dev_info->num_qpairs)
130399a2dd95SBruce Richardson 			continue;
130499a2dd95SBruce Richardson 		/* if stop check if dev has been started */
130599a2dd95SBruce Richardson 		if (stop && !dev_info->dev_started)
130699a2dd95SBruce Richardson 			continue;
130799a2dd95SBruce Richardson 		use_service |= !dev_info->internal_event_port;
130899a2dd95SBruce Richardson 		dev_info->dev_started = start;
130999a2dd95SBruce Richardson 		if (dev_info->internal_event_port == 0)
131099a2dd95SBruce Richardson 			continue;
131199a2dd95SBruce Richardson 		start ? (*dev->dev_ops->crypto_adapter_start)(dev,
131299a2dd95SBruce Richardson 						&dev_info->dev[i]) :
131399a2dd95SBruce Richardson 			(*dev->dev_ops->crypto_adapter_stop)(dev,
131499a2dd95SBruce Richardson 						&dev_info->dev[i]);
131599a2dd95SBruce Richardson 	}
131699a2dd95SBruce Richardson 
131799a2dd95SBruce Richardson 	if (use_service)
131899a2dd95SBruce Richardson 		rte_service_runstate_set(adapter->service_id, start);
131999a2dd95SBruce Richardson 
132099a2dd95SBruce Richardson 	return 0;
132199a2dd95SBruce Richardson }
132299a2dd95SBruce Richardson 
132399a2dd95SBruce Richardson int
rte_event_crypto_adapter_start(uint8_t id)132499a2dd95SBruce Richardson rte_event_crypto_adapter_start(uint8_t id)
132599a2dd95SBruce Richardson {
1326a256a743SPavan Nikhilesh 	struct event_crypto_adapter *adapter;
132799a2dd95SBruce Richardson 
132899a2dd95SBruce Richardson 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
132999a2dd95SBruce Richardson 	adapter = eca_id_to_adapter(id);
133099a2dd95SBruce Richardson 	if (adapter == NULL)
133199a2dd95SBruce Richardson 		return -EINVAL;
133299a2dd95SBruce Richardson 
133399a2dd95SBruce Richardson 	rte_eventdev_trace_crypto_adapter_start(id, adapter);
133499a2dd95SBruce Richardson 	return eca_adapter_ctrl(id, 1);
133599a2dd95SBruce Richardson }
133699a2dd95SBruce Richardson 
133799a2dd95SBruce Richardson int
rte_event_crypto_adapter_stop(uint8_t id)133899a2dd95SBruce Richardson rte_event_crypto_adapter_stop(uint8_t id)
133999a2dd95SBruce Richardson {
134099a2dd95SBruce Richardson 	rte_eventdev_trace_crypto_adapter_stop(id);
134199a2dd95SBruce Richardson 	return eca_adapter_ctrl(id, 0);
134299a2dd95SBruce Richardson }
134399a2dd95SBruce Richardson 
134499a2dd95SBruce Richardson int
rte_event_crypto_adapter_stats_get(uint8_t id,struct rte_event_crypto_adapter_stats * stats)134599a2dd95SBruce Richardson rte_event_crypto_adapter_stats_get(uint8_t id,
134699a2dd95SBruce Richardson 				struct rte_event_crypto_adapter_stats *stats)
134799a2dd95SBruce Richardson {
1348a256a743SPavan Nikhilesh 	struct event_crypto_adapter *adapter;
134999a2dd95SBruce Richardson 	struct rte_event_crypto_adapter_stats dev_stats_sum = { 0 };
135099a2dd95SBruce Richardson 	struct rte_event_crypto_adapter_stats dev_stats;
135199a2dd95SBruce Richardson 	struct rte_eventdev *dev;
135299a2dd95SBruce Richardson 	struct crypto_device_info *dev_info;
135399a2dd95SBruce Richardson 	uint32_t i;
135499a2dd95SBruce Richardson 	int ret;
135599a2dd95SBruce Richardson 
13568f4ff7deSGanapati Kundapura 	if (eca_memzone_lookup())
13578f4ff7deSGanapati Kundapura 		return -ENOMEM;
13588f4ff7deSGanapati Kundapura 
135999a2dd95SBruce Richardson 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
136099a2dd95SBruce Richardson 
136199a2dd95SBruce Richardson 	adapter = eca_id_to_adapter(id);
136299a2dd95SBruce Richardson 	if (adapter == NULL || stats == NULL)
136399a2dd95SBruce Richardson 		return -EINVAL;
136499a2dd95SBruce Richardson 
136599a2dd95SBruce Richardson 	dev = &rte_eventdevs[adapter->eventdev_id];
136699a2dd95SBruce Richardson 	memset(stats, 0, sizeof(*stats));
136799a2dd95SBruce Richardson 	for (i = 0; i < rte_cryptodev_count(); i++) {
136899a2dd95SBruce Richardson 		dev_info = &adapter->cdevs[i];
136999a2dd95SBruce Richardson 		if (dev_info->internal_event_port == 0 ||
137099a2dd95SBruce Richardson 			dev->dev_ops->crypto_adapter_stats_get == NULL)
137199a2dd95SBruce Richardson 			continue;
137299a2dd95SBruce Richardson 		ret = (*dev->dev_ops->crypto_adapter_stats_get)(dev,
137399a2dd95SBruce Richardson 						dev_info->dev,
137499a2dd95SBruce Richardson 						&dev_stats);
137599a2dd95SBruce Richardson 		if (ret)
137699a2dd95SBruce Richardson 			continue;
137799a2dd95SBruce Richardson 
137899a2dd95SBruce Richardson 		dev_stats_sum.crypto_deq_count += dev_stats.crypto_deq_count;
137999a2dd95SBruce Richardson 		dev_stats_sum.event_enq_count +=
138099a2dd95SBruce Richardson 			dev_stats.event_enq_count;
138199a2dd95SBruce Richardson 	}
138299a2dd95SBruce Richardson 
138399a2dd95SBruce Richardson 	if (adapter->service_inited)
138499a2dd95SBruce Richardson 		*stats = adapter->crypto_stats;
138599a2dd95SBruce Richardson 
138699a2dd95SBruce Richardson 	stats->crypto_deq_count += dev_stats_sum.crypto_deq_count;
138799a2dd95SBruce Richardson 	stats->event_enq_count += dev_stats_sum.event_enq_count;
138899a2dd95SBruce Richardson 
13897f2d9df6SAmit Prakash Shukla 	rte_eventdev_trace_crypto_adapter_stats_get(id, stats,
13907f2d9df6SAmit Prakash Shukla 		stats->event_poll_count, stats->event_deq_count,
13917f2d9df6SAmit Prakash Shukla 		stats->crypto_enq_count, stats->crypto_enq_fail,
13927f2d9df6SAmit Prakash Shukla 		stats->crypto_deq_count, stats->event_enq_count,
13937f2d9df6SAmit Prakash Shukla 		stats->event_enq_retry_count, stats->event_enq_fail_count);
13947f2d9df6SAmit Prakash Shukla 
139599a2dd95SBruce Richardson 	return 0;
139699a2dd95SBruce Richardson }
139799a2dd95SBruce Richardson 
139899a2dd95SBruce Richardson int
rte_event_crypto_adapter_stats_reset(uint8_t id)139999a2dd95SBruce Richardson rte_event_crypto_adapter_stats_reset(uint8_t id)
140099a2dd95SBruce Richardson {
1401a256a743SPavan Nikhilesh 	struct event_crypto_adapter *adapter;
140299a2dd95SBruce Richardson 	struct crypto_device_info *dev_info;
140399a2dd95SBruce Richardson 	struct rte_eventdev *dev;
140499a2dd95SBruce Richardson 	uint32_t i;
140599a2dd95SBruce Richardson 
14067f2d9df6SAmit Prakash Shukla 	rte_eventdev_trace_crypto_adapter_stats_reset(id);
14077f2d9df6SAmit Prakash Shukla 
14088f4ff7deSGanapati Kundapura 	if (eca_memzone_lookup())
14098f4ff7deSGanapati Kundapura 		return -ENOMEM;
14108f4ff7deSGanapati Kundapura 
141199a2dd95SBruce Richardson 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
141299a2dd95SBruce Richardson 
141399a2dd95SBruce Richardson 	adapter = eca_id_to_adapter(id);
141499a2dd95SBruce Richardson 	if (adapter == NULL)
141599a2dd95SBruce Richardson 		return -EINVAL;
141699a2dd95SBruce Richardson 
141799a2dd95SBruce Richardson 	dev = &rte_eventdevs[adapter->eventdev_id];
141899a2dd95SBruce Richardson 	for (i = 0; i < rte_cryptodev_count(); i++) {
141999a2dd95SBruce Richardson 		dev_info = &adapter->cdevs[i];
142099a2dd95SBruce Richardson 		if (dev_info->internal_event_port == 0 ||
142199a2dd95SBruce Richardson 			dev->dev_ops->crypto_adapter_stats_reset == NULL)
142299a2dd95SBruce Richardson 			continue;
142399a2dd95SBruce Richardson 		(*dev->dev_ops->crypto_adapter_stats_reset)(dev,
142499a2dd95SBruce Richardson 						dev_info->dev);
142599a2dd95SBruce Richardson 	}
142699a2dd95SBruce Richardson 
142799a2dd95SBruce Richardson 	memset(&adapter->crypto_stats, 0, sizeof(adapter->crypto_stats));
142899a2dd95SBruce Richardson 	return 0;
142999a2dd95SBruce Richardson }
143099a2dd95SBruce Richardson 
143199a2dd95SBruce Richardson int
rte_event_crypto_adapter_runtime_params_init(struct rte_event_crypto_adapter_runtime_params * params)143204ed18cdSNaga Harish K S V rte_event_crypto_adapter_runtime_params_init(
143304ed18cdSNaga Harish K S V 		struct rte_event_crypto_adapter_runtime_params *params)
143404ed18cdSNaga Harish K S V {
143504ed18cdSNaga Harish K S V 	if (params == NULL)
143604ed18cdSNaga Harish K S V 		return -EINVAL;
143704ed18cdSNaga Harish K S V 
143804ed18cdSNaga Harish K S V 	memset(params, 0, sizeof(*params));
143904ed18cdSNaga Harish K S V 	params->max_nb = DEFAULT_MAX_NB;
144004ed18cdSNaga Harish K S V 
144104ed18cdSNaga Harish K S V 	return 0;
144204ed18cdSNaga Harish K S V }
144304ed18cdSNaga Harish K S V 
144404ed18cdSNaga Harish K S V static int
crypto_adapter_cap_check(struct event_crypto_adapter * adapter)144504ed18cdSNaga Harish K S V crypto_adapter_cap_check(struct event_crypto_adapter *adapter)
144604ed18cdSNaga Harish K S V {
144704ed18cdSNaga Harish K S V 	int ret;
144804ed18cdSNaga Harish K S V 	uint32_t caps;
144904ed18cdSNaga Harish K S V 
145004ed18cdSNaga Harish K S V 	if (!adapter->nb_qps)
145104ed18cdSNaga Harish K S V 		return -EINVAL;
145204ed18cdSNaga Harish K S V 	ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
145304ed18cdSNaga Harish K S V 						adapter->next_cdev_id,
145404ed18cdSNaga Harish K S V 						&caps);
145504ed18cdSNaga Harish K S V 	if (ret) {
145604ed18cdSNaga Harish K S V 		RTE_EDEV_LOG_ERR("Failed to get adapter caps dev %" PRIu8
145704ed18cdSNaga Harish K S V 			" cdev %" PRIu8, adapter->eventdev_id,
145804ed18cdSNaga Harish K S V 			adapter->next_cdev_id);
145904ed18cdSNaga Harish K S V 		return ret;
146004ed18cdSNaga Harish K S V 	}
146104ed18cdSNaga Harish K S V 
146204ed18cdSNaga Harish K S V 	if ((caps & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
146304ed18cdSNaga Harish K S V 	    (caps & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW))
146404ed18cdSNaga Harish K S V 		return -ENOTSUP;
146504ed18cdSNaga Harish K S V 
146604ed18cdSNaga Harish K S V 	return 0;
146704ed18cdSNaga Harish K S V }
146804ed18cdSNaga Harish K S V 
146904ed18cdSNaga Harish K S V int
rte_event_crypto_adapter_runtime_params_set(uint8_t id,struct rte_event_crypto_adapter_runtime_params * params)147004ed18cdSNaga Harish K S V rte_event_crypto_adapter_runtime_params_set(uint8_t id,
147104ed18cdSNaga Harish K S V 		struct rte_event_crypto_adapter_runtime_params *params)
147204ed18cdSNaga Harish K S V {
147304ed18cdSNaga Harish K S V 	struct event_crypto_adapter *adapter;
147404ed18cdSNaga Harish K S V 	int ret;
147504ed18cdSNaga Harish K S V 
147604ed18cdSNaga Harish K S V 	if (eca_memzone_lookup())
147704ed18cdSNaga Harish K S V 		return -ENOMEM;
147804ed18cdSNaga Harish K S V 
147904ed18cdSNaga Harish K S V 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
148004ed18cdSNaga Harish K S V 
148104ed18cdSNaga Harish K S V 	if (params == NULL) {
1482ae282b06SDavid Marchand 		RTE_EDEV_LOG_ERR("params pointer is NULL");
148304ed18cdSNaga Harish K S V 		return -EINVAL;
148404ed18cdSNaga Harish K S V 	}
148504ed18cdSNaga Harish K S V 
148604ed18cdSNaga Harish K S V 	adapter = eca_id_to_adapter(id);
148704ed18cdSNaga Harish K S V 	if (adapter == NULL)
148804ed18cdSNaga Harish K S V 		return -EINVAL;
148904ed18cdSNaga Harish K S V 
149004ed18cdSNaga Harish K S V 	ret = crypto_adapter_cap_check(adapter);
149104ed18cdSNaga Harish K S V 	if (ret)
149204ed18cdSNaga Harish K S V 		return ret;
149304ed18cdSNaga Harish K S V 
149404ed18cdSNaga Harish K S V 	rte_spinlock_lock(&adapter->lock);
149504ed18cdSNaga Harish K S V 	adapter->max_nb = params->max_nb;
149604ed18cdSNaga Harish K S V 	rte_spinlock_unlock(&adapter->lock);
149704ed18cdSNaga Harish K S V 
149804ed18cdSNaga Harish K S V 	return 0;
149904ed18cdSNaga Harish K S V }
150004ed18cdSNaga Harish K S V 
150104ed18cdSNaga Harish K S V int
rte_event_crypto_adapter_runtime_params_get(uint8_t id,struct rte_event_crypto_adapter_runtime_params * params)150204ed18cdSNaga Harish K S V rte_event_crypto_adapter_runtime_params_get(uint8_t id,
150304ed18cdSNaga Harish K S V 		struct rte_event_crypto_adapter_runtime_params *params)
150404ed18cdSNaga Harish K S V {
150504ed18cdSNaga Harish K S V 	struct event_crypto_adapter *adapter;
150604ed18cdSNaga Harish K S V 	int ret;
150704ed18cdSNaga Harish K S V 
150804ed18cdSNaga Harish K S V 	if (eca_memzone_lookup())
150904ed18cdSNaga Harish K S V 		return -ENOMEM;
151004ed18cdSNaga Harish K S V 
151104ed18cdSNaga Harish K S V 
151204ed18cdSNaga Harish K S V 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
151304ed18cdSNaga Harish K S V 
151404ed18cdSNaga Harish K S V 	if (params == NULL) {
1515ae282b06SDavid Marchand 		RTE_EDEV_LOG_ERR("params pointer is NULL");
151604ed18cdSNaga Harish K S V 		return -EINVAL;
151704ed18cdSNaga Harish K S V 	}
151804ed18cdSNaga Harish K S V 
151904ed18cdSNaga Harish K S V 	adapter = eca_id_to_adapter(id);
152004ed18cdSNaga Harish K S V 	if (adapter == NULL)
152104ed18cdSNaga Harish K S V 		return -EINVAL;
152204ed18cdSNaga Harish K S V 
152304ed18cdSNaga Harish K S V 	ret = crypto_adapter_cap_check(adapter);
152404ed18cdSNaga Harish K S V 	if (ret)
152504ed18cdSNaga Harish K S V 		return ret;
152604ed18cdSNaga Harish K S V 
152704ed18cdSNaga Harish K S V 	params->max_nb = adapter->max_nb;
152804ed18cdSNaga Harish K S V 
152904ed18cdSNaga Harish K S V 	return 0;
153004ed18cdSNaga Harish K S V }
153104ed18cdSNaga Harish K S V 
153204ed18cdSNaga Harish K S V int
rte_event_crypto_adapter_service_id_get(uint8_t id,uint32_t * service_id)153399a2dd95SBruce Richardson rte_event_crypto_adapter_service_id_get(uint8_t id, uint32_t *service_id)
153499a2dd95SBruce Richardson {
1535a256a743SPavan Nikhilesh 	struct event_crypto_adapter *adapter;
153699a2dd95SBruce Richardson 
153799a2dd95SBruce Richardson 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
153899a2dd95SBruce Richardson 
153999a2dd95SBruce Richardson 	adapter = eca_id_to_adapter(id);
154099a2dd95SBruce Richardson 	if (adapter == NULL || service_id == NULL)
154199a2dd95SBruce Richardson 		return -EINVAL;
154299a2dd95SBruce Richardson 
154399a2dd95SBruce Richardson 	if (adapter->service_inited)
154499a2dd95SBruce Richardson 		*service_id = adapter->service_id;
154599a2dd95SBruce Richardson 
15467f2d9df6SAmit Prakash Shukla 	rte_eventdev_trace_crypto_adapter_service_id_get(id, *service_id);
15477f2d9df6SAmit Prakash Shukla 
154899a2dd95SBruce Richardson 	return adapter->service_inited ? 0 : -ESRCH;
154999a2dd95SBruce Richardson }
155099a2dd95SBruce Richardson 
155199a2dd95SBruce Richardson int
rte_event_crypto_adapter_event_port_get(uint8_t id,uint8_t * event_port_id)155299a2dd95SBruce Richardson rte_event_crypto_adapter_event_port_get(uint8_t id, uint8_t *event_port_id)
155399a2dd95SBruce Richardson {
1554a256a743SPavan Nikhilesh 	struct event_crypto_adapter *adapter;
155599a2dd95SBruce Richardson 
155699a2dd95SBruce Richardson 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
155799a2dd95SBruce Richardson 
155899a2dd95SBruce Richardson 	adapter = eca_id_to_adapter(id);
155999a2dd95SBruce Richardson 	if (adapter == NULL || event_port_id == NULL)
156099a2dd95SBruce Richardson 		return -EINVAL;
156199a2dd95SBruce Richardson 
156299a2dd95SBruce Richardson 	*event_port_id = adapter->event_port_id;
156399a2dd95SBruce Richardson 
15647f2d9df6SAmit Prakash Shukla 	rte_eventdev_trace_crypto_adapter_event_port_get(id, *event_port_id);
15657f2d9df6SAmit Prakash Shukla 
156699a2dd95SBruce Richardson 	return 0;
156799a2dd95SBruce Richardson }
1568c1749bc5SVolodymyr Fialko 
1569c1749bc5SVolodymyr Fialko int
rte_event_crypto_adapter_vector_limits_get(uint8_t dev_id,uint16_t cdev_id,struct rte_event_crypto_adapter_vector_limits * limits)1570c1749bc5SVolodymyr Fialko rte_event_crypto_adapter_vector_limits_get(
1571c1749bc5SVolodymyr Fialko 	uint8_t dev_id, uint16_t cdev_id,
1572c1749bc5SVolodymyr Fialko 	struct rte_event_crypto_adapter_vector_limits *limits)
1573c1749bc5SVolodymyr Fialko {
1574c1749bc5SVolodymyr Fialko 	struct rte_cryptodev *cdev;
1575c1749bc5SVolodymyr Fialko 	struct rte_eventdev *dev;
1576c1749bc5SVolodymyr Fialko 	uint32_t cap;
1577c1749bc5SVolodymyr Fialko 	int ret;
1578c1749bc5SVolodymyr Fialko 
15797f2d9df6SAmit Prakash Shukla 	rte_eventdev_trace_crypto_adapter_vector_limits_get(dev_id, cdev_id, limits);
15807f2d9df6SAmit Prakash Shukla 
1581c1749bc5SVolodymyr Fialko 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1582c1749bc5SVolodymyr Fialko 
1583c1749bc5SVolodymyr Fialko 	if (!rte_cryptodev_is_valid_dev(cdev_id)) {
1584c1749bc5SVolodymyr Fialko 		RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
1585c1749bc5SVolodymyr Fialko 		return -EINVAL;
1586c1749bc5SVolodymyr Fialko 	}
1587c1749bc5SVolodymyr Fialko 
1588c1749bc5SVolodymyr Fialko 	if (limits == NULL) {
1589c1749bc5SVolodymyr Fialko 		RTE_EDEV_LOG_ERR("Invalid limits storage provided");
1590c1749bc5SVolodymyr Fialko 		return -EINVAL;
1591c1749bc5SVolodymyr Fialko 	}
1592c1749bc5SVolodymyr Fialko 
1593c1749bc5SVolodymyr Fialko 	dev = &rte_eventdevs[dev_id];
1594c1749bc5SVolodymyr Fialko 	cdev = rte_cryptodev_pmd_get_dev(cdev_id);
1595c1749bc5SVolodymyr Fialko 
1596c1749bc5SVolodymyr Fialko 	ret = rte_event_crypto_adapter_caps_get(dev_id, cdev_id, &cap);
1597c1749bc5SVolodymyr Fialko 	if (ret) {
1598c1749bc5SVolodymyr Fialko 		RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
1599c1749bc5SVolodymyr Fialko 				 "cdev %" PRIu16, dev_id, cdev_id);
1600c1749bc5SVolodymyr Fialko 		return ret;
1601c1749bc5SVolodymyr Fialko 	}
1602c1749bc5SVolodymyr Fialko 
1603c1749bc5SVolodymyr Fialko 	if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR)) {
1604c1749bc5SVolodymyr Fialko 		RTE_EDEV_LOG_ERR("Event vectorization is not supported,"
1605c1749bc5SVolodymyr Fialko 				 "dev %" PRIu8 " cdev %" PRIu8, dev_id, cdev_id);
1606c1749bc5SVolodymyr Fialko 		return -ENOTSUP;
1607c1749bc5SVolodymyr Fialko 	}
1608c1749bc5SVolodymyr Fialko 
1609c1749bc5SVolodymyr Fialko 	if ((*dev->dev_ops->crypto_adapter_vector_limits_get) == NULL)
1610c1749bc5SVolodymyr Fialko 		return -ENOTSUP;
1611c1749bc5SVolodymyr Fialko 
1612c1749bc5SVolodymyr Fialko 	return dev->dev_ops->crypto_adapter_vector_limits_get(
1613c1749bc5SVolodymyr Fialko 		dev, cdev, limits);
1614c1749bc5SVolodymyr Fialko }
1615