xref: /dpdk/lib/eventdev/rte_event_crypto_adapter.c (revision 34d785571fb1083e2fc6ca00fc32bae7dd76425e)
199a2dd95SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
299a2dd95SBruce Richardson  * Copyright(c) 2018 Intel Corporation.
399a2dd95SBruce Richardson  * All rights reserved.
499a2dd95SBruce Richardson  */
599a2dd95SBruce Richardson 
699a2dd95SBruce Richardson #include <string.h>
799a2dd95SBruce Richardson #include <stdbool.h>
899a2dd95SBruce Richardson #include <rte_common.h>
91acb7f54SDavid Marchand #include <dev_driver.h>
1099a2dd95SBruce Richardson #include <rte_errno.h>
1199a2dd95SBruce Richardson #include <rte_cryptodev.h>
12af668035SAkhil Goyal #include <cryptodev_pmd.h>
1399a2dd95SBruce Richardson #include <rte_log.h>
1499a2dd95SBruce Richardson #include <rte_malloc.h>
1599a2dd95SBruce Richardson #include <rte_service_component.h>
1699a2dd95SBruce Richardson 
1799a2dd95SBruce Richardson #include "rte_eventdev.h"
1899a2dd95SBruce Richardson #include "eventdev_pmd.h"
19f26f2ca6SPavan Nikhilesh #include "eventdev_trace.h"
2099a2dd95SBruce Richardson #include "rte_event_crypto_adapter.h"
2199a2dd95SBruce Richardson 
2299a2dd95SBruce Richardson #define BATCH_SIZE 32
2399a2dd95SBruce Richardson #define DEFAULT_MAX_NB 128
2499a2dd95SBruce Richardson #define CRYPTO_ADAPTER_NAME_LEN 32
2599a2dd95SBruce Richardson #define CRYPTO_ADAPTER_MEM_NAME_LEN 32
2699a2dd95SBruce Richardson #define CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES 100
2799a2dd95SBruce Richardson 
282ae84b39SGanapati Kundapura #define CRYPTO_ADAPTER_OPS_BUFFER_SZ (BATCH_SIZE + BATCH_SIZE)
292ae84b39SGanapati Kundapura #define CRYPTO_ADAPTER_BUFFER_SZ 1024
302ae84b39SGanapati Kundapura 
3199a2dd95SBruce Richardson /* Flush an instance's enqueue buffers every CRYPTO_ENQ_FLUSH_THRESHOLD
3299a2dd95SBruce Richardson  * iterations of eca_crypto_adapter_enq_run()
3399a2dd95SBruce Richardson  */
3499a2dd95SBruce Richardson #define CRYPTO_ENQ_FLUSH_THRESHOLD 1024
3599a2dd95SBruce Richardson 
362ae84b39SGanapati Kundapura struct crypto_ops_circular_buffer {
372ae84b39SGanapati Kundapura 	/* index of head element in circular buffer */
382ae84b39SGanapati Kundapura 	uint16_t head;
392ae84b39SGanapati Kundapura 	/* index of tail element in circular buffer */
402ae84b39SGanapati Kundapura 	uint16_t tail;
412ae84b39SGanapati Kundapura 	/* number of elements in buffer */
422ae84b39SGanapati Kundapura 	uint16_t count;
432ae84b39SGanapati Kundapura 	/* size of circular buffer */
442ae84b39SGanapati Kundapura 	uint16_t size;
452ae84b39SGanapati Kundapura 	/* Pointer to hold rte_crypto_ops for batching */
462ae84b39SGanapati Kundapura 	struct rte_crypto_op **op_buffer;
472ae84b39SGanapati Kundapura } __rte_cache_aligned;
482ae84b39SGanapati Kundapura 
49a256a743SPavan Nikhilesh struct event_crypto_adapter {
5099a2dd95SBruce Richardson 	/* Event device identifier */
5199a2dd95SBruce Richardson 	uint8_t eventdev_id;
5299a2dd95SBruce Richardson 	/* Event port identifier */
5399a2dd95SBruce Richardson 	uint8_t event_port_id;
5499a2dd95SBruce Richardson 	/* Store event device's implicit release capability */
5599a2dd95SBruce Richardson 	uint8_t implicit_release_disabled;
562ae84b39SGanapati Kundapura 	/* Flag to indicate backpressure at cryptodev
572ae84b39SGanapati Kundapura 	 * Stop further dequeuing events from eventdev
582ae84b39SGanapati Kundapura 	 */
592ae84b39SGanapati Kundapura 	bool stop_enq_to_cryptodev;
6099a2dd95SBruce Richardson 	/* Max crypto ops processed in any service function invocation */
6199a2dd95SBruce Richardson 	uint32_t max_nb;
6299a2dd95SBruce Richardson 	/* Lock to serialize config updates with service function */
6399a2dd95SBruce Richardson 	rte_spinlock_t lock;
6499a2dd95SBruce Richardson 	/* Next crypto device to be processed */
6599a2dd95SBruce Richardson 	uint16_t next_cdev_id;
6699a2dd95SBruce Richardson 	/* Per crypto device structure */
6799a2dd95SBruce Richardson 	struct crypto_device_info *cdevs;
6899a2dd95SBruce Richardson 	/* Loop counter to flush crypto ops */
6999a2dd95SBruce Richardson 	uint16_t transmit_loop_count;
702ae84b39SGanapati Kundapura 	/* Circular buffer for batching crypto ops to eventdev */
712ae84b39SGanapati Kundapura 	struct crypto_ops_circular_buffer ebuf;
7299a2dd95SBruce Richardson 	/* Per instance stats structure */
7399a2dd95SBruce Richardson 	struct rte_event_crypto_adapter_stats crypto_stats;
7499a2dd95SBruce Richardson 	/* Configuration callback for rte_service configuration */
7599a2dd95SBruce Richardson 	rte_event_crypto_adapter_conf_cb conf_cb;
7699a2dd95SBruce Richardson 	/* Configuration callback argument */
7799a2dd95SBruce Richardson 	void *conf_arg;
7899a2dd95SBruce Richardson 	/* Set if  default_cb is being used */
7999a2dd95SBruce Richardson 	int default_cb_arg;
8099a2dd95SBruce Richardson 	/* Service initialization state */
8199a2dd95SBruce Richardson 	uint8_t service_inited;
8299a2dd95SBruce Richardson 	/* Memory allocation name */
8399a2dd95SBruce Richardson 	char mem_name[CRYPTO_ADAPTER_MEM_NAME_LEN];
8499a2dd95SBruce Richardson 	/* Socket identifier cached from eventdev */
8599a2dd95SBruce Richardson 	int socket_id;
8699a2dd95SBruce Richardson 	/* Per adapter EAL service */
8799a2dd95SBruce Richardson 	uint32_t service_id;
8899a2dd95SBruce Richardson 	/* No. of queue pairs configured */
8999a2dd95SBruce Richardson 	uint16_t nb_qps;
9099a2dd95SBruce Richardson 	/* Adapter mode */
9199a2dd95SBruce Richardson 	enum rte_event_crypto_adapter_mode mode;
9299a2dd95SBruce Richardson } __rte_cache_aligned;
9399a2dd95SBruce Richardson 
9499a2dd95SBruce Richardson /* Per crypto device information */
9599a2dd95SBruce Richardson struct crypto_device_info {
9699a2dd95SBruce Richardson 	/* Pointer to cryptodev */
9799a2dd95SBruce Richardson 	struct rte_cryptodev *dev;
9899a2dd95SBruce Richardson 	/* Pointer to queue pair info */
9999a2dd95SBruce Richardson 	struct crypto_queue_pair_info *qpairs;
10099a2dd95SBruce Richardson 	/* Next queue pair to be processed */
10199a2dd95SBruce Richardson 	uint16_t next_queue_pair_id;
10299a2dd95SBruce Richardson 	/* Set to indicate cryptodev->eventdev packet
10399a2dd95SBruce Richardson 	 * transfer uses a hardware mechanism
10499a2dd95SBruce Richardson 	 */
10599a2dd95SBruce Richardson 	uint8_t internal_event_port;
10699a2dd95SBruce Richardson 	/* Set to indicate processing has been started */
10799a2dd95SBruce Richardson 	uint8_t dev_started;
10899a2dd95SBruce Richardson 	/* If num_qpairs > 0, the start callback will
10999a2dd95SBruce Richardson 	 * be invoked if not already invoked
11099a2dd95SBruce Richardson 	 */
11199a2dd95SBruce Richardson 	uint16_t num_qpairs;
11299a2dd95SBruce Richardson } __rte_cache_aligned;
11399a2dd95SBruce Richardson 
11499a2dd95SBruce Richardson /* Per queue pair information */
11599a2dd95SBruce Richardson struct crypto_queue_pair_info {
11699a2dd95SBruce Richardson 	/* Set to indicate queue pair is enabled */
11799a2dd95SBruce Richardson 	bool qp_enabled;
1182ae84b39SGanapati Kundapura 	/* Circular buffer for batching crypto ops to cdev */
1192ae84b39SGanapati Kundapura 	struct crypto_ops_circular_buffer cbuf;
12099a2dd95SBruce Richardson } __rte_cache_aligned;
12199a2dd95SBruce Richardson 
122a256a743SPavan Nikhilesh static struct event_crypto_adapter **event_crypto_adapter;
12399a2dd95SBruce Richardson 
12499a2dd95SBruce Richardson /* Macros to check for valid adapter */
12599a2dd95SBruce Richardson #define EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
12699a2dd95SBruce Richardson 	if (!eca_valid_id(id)) { \
12799a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Invalid crypto adapter id = %d\n", id); \
12899a2dd95SBruce Richardson 		return retval; \
12999a2dd95SBruce Richardson 	} \
13099a2dd95SBruce Richardson } while (0)
13199a2dd95SBruce Richardson 
13299a2dd95SBruce Richardson static inline int
13399a2dd95SBruce Richardson eca_valid_id(uint8_t id)
13499a2dd95SBruce Richardson {
13599a2dd95SBruce Richardson 	return id < RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE;
13699a2dd95SBruce Richardson }
13799a2dd95SBruce Richardson 
13899a2dd95SBruce Richardson static int
13999a2dd95SBruce Richardson eca_init(void)
14099a2dd95SBruce Richardson {
14199a2dd95SBruce Richardson 	const char *name = "crypto_adapter_array";
14299a2dd95SBruce Richardson 	const struct rte_memzone *mz;
14399a2dd95SBruce Richardson 	unsigned int sz;
14499a2dd95SBruce Richardson 
14599a2dd95SBruce Richardson 	sz = sizeof(*event_crypto_adapter) *
14699a2dd95SBruce Richardson 	    RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE;
14799a2dd95SBruce Richardson 	sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
14899a2dd95SBruce Richardson 
14999a2dd95SBruce Richardson 	mz = rte_memzone_lookup(name);
15099a2dd95SBruce Richardson 	if (mz == NULL) {
15199a2dd95SBruce Richardson 		mz = rte_memzone_reserve_aligned(name, sz, rte_socket_id(), 0,
15299a2dd95SBruce Richardson 						 RTE_CACHE_LINE_SIZE);
15399a2dd95SBruce Richardson 		if (mz == NULL) {
15499a2dd95SBruce Richardson 			RTE_EDEV_LOG_ERR("failed to reserve memzone err = %"
15599a2dd95SBruce Richardson 					PRId32, rte_errno);
15699a2dd95SBruce Richardson 			return -rte_errno;
15799a2dd95SBruce Richardson 		}
15899a2dd95SBruce Richardson 	}
15999a2dd95SBruce Richardson 
16099a2dd95SBruce Richardson 	event_crypto_adapter = mz->addr;
16199a2dd95SBruce Richardson 	return 0;
16299a2dd95SBruce Richardson }
16399a2dd95SBruce Richardson 
1642ae84b39SGanapati Kundapura static inline bool
1652ae84b39SGanapati Kundapura eca_circular_buffer_batch_ready(struct crypto_ops_circular_buffer *bufp)
1662ae84b39SGanapati Kundapura {
1672ae84b39SGanapati Kundapura 	return bufp->count >= BATCH_SIZE;
1682ae84b39SGanapati Kundapura }
1692ae84b39SGanapati Kundapura 
1702ae84b39SGanapati Kundapura static inline bool
1712ae84b39SGanapati Kundapura eca_circular_buffer_space_for_batch(struct crypto_ops_circular_buffer *bufp)
1722ae84b39SGanapati Kundapura {
1732ae84b39SGanapati Kundapura 	return (bufp->size - bufp->count) >= BATCH_SIZE;
1742ae84b39SGanapati Kundapura }
1752ae84b39SGanapati Kundapura 
1762ae84b39SGanapati Kundapura static inline void
1772ae84b39SGanapati Kundapura eca_circular_buffer_free(struct crypto_ops_circular_buffer *bufp)
1782ae84b39SGanapati Kundapura {
1792ae84b39SGanapati Kundapura 	rte_free(bufp->op_buffer);
1802ae84b39SGanapati Kundapura }
1812ae84b39SGanapati Kundapura 
1822ae84b39SGanapati Kundapura static inline int
1832ae84b39SGanapati Kundapura eca_circular_buffer_init(const char *name,
1842ae84b39SGanapati Kundapura 			 struct crypto_ops_circular_buffer *bufp,
1852ae84b39SGanapati Kundapura 			 uint16_t sz)
1862ae84b39SGanapati Kundapura {
1872ae84b39SGanapati Kundapura 	bufp->op_buffer = rte_zmalloc(name,
1882ae84b39SGanapati Kundapura 				      sizeof(struct rte_crypto_op *) * sz,
1892ae84b39SGanapati Kundapura 				      0);
1902ae84b39SGanapati Kundapura 	if (bufp->op_buffer == NULL)
1912ae84b39SGanapati Kundapura 		return -ENOMEM;
1922ae84b39SGanapati Kundapura 
1932ae84b39SGanapati Kundapura 	bufp->size = sz;
1942ae84b39SGanapati Kundapura 	return 0;
1952ae84b39SGanapati Kundapura }
1962ae84b39SGanapati Kundapura 
1972ae84b39SGanapati Kundapura static inline int
1982ae84b39SGanapati Kundapura eca_circular_buffer_add(struct crypto_ops_circular_buffer *bufp,
1992ae84b39SGanapati Kundapura 			struct rte_crypto_op *op)
2002ae84b39SGanapati Kundapura {
2012ae84b39SGanapati Kundapura 	uint16_t *tailp = &bufp->tail;
2022ae84b39SGanapati Kundapura 
2032ae84b39SGanapati Kundapura 	bufp->op_buffer[*tailp] = op;
2042ae84b39SGanapati Kundapura 	/* circular buffer, go round */
2052ae84b39SGanapati Kundapura 	*tailp = (*tailp + 1) % bufp->size;
2062ae84b39SGanapati Kundapura 	bufp->count++;
2072ae84b39SGanapati Kundapura 
2082ae84b39SGanapati Kundapura 	return 0;
2092ae84b39SGanapati Kundapura }
2102ae84b39SGanapati Kundapura 
2112ae84b39SGanapati Kundapura static inline int
2122ae84b39SGanapati Kundapura eca_circular_buffer_flush_to_cdev(struct crypto_ops_circular_buffer *bufp,
2132ae84b39SGanapati Kundapura 				  uint8_t cdev_id, uint16_t qp_id,
2142ae84b39SGanapati Kundapura 				  uint16_t *nb_ops_flushed)
2152ae84b39SGanapati Kundapura {
2162ae84b39SGanapati Kundapura 	uint16_t n = 0;
2172ae84b39SGanapati Kundapura 	uint16_t *headp = &bufp->head;
2182ae84b39SGanapati Kundapura 	uint16_t *tailp = &bufp->tail;
2192ae84b39SGanapati Kundapura 	struct rte_crypto_op **ops = bufp->op_buffer;
2202ae84b39SGanapati Kundapura 
2212ae84b39SGanapati Kundapura 	if (*tailp > *headp)
2222ae84b39SGanapati Kundapura 		n = *tailp - *headp;
2232ae84b39SGanapati Kundapura 	else if (*tailp < *headp)
2242ae84b39SGanapati Kundapura 		n = bufp->size - *headp;
2252ae84b39SGanapati Kundapura 	else {
2262ae84b39SGanapati Kundapura 		*nb_ops_flushed = 0;
2272ae84b39SGanapati Kundapura 		return 0;  /* buffer empty */
2282ae84b39SGanapati Kundapura 	}
2292ae84b39SGanapati Kundapura 
2302ae84b39SGanapati Kundapura 	*nb_ops_flushed = rte_cryptodev_enqueue_burst(cdev_id, qp_id,
2312ae84b39SGanapati Kundapura 						      &ops[*headp], n);
2322ae84b39SGanapati Kundapura 	bufp->count -= *nb_ops_flushed;
2332ae84b39SGanapati Kundapura 	if (!bufp->count) {
2342ae84b39SGanapati Kundapura 		*headp = 0;
2352ae84b39SGanapati Kundapura 		*tailp = 0;
2362ae84b39SGanapati Kundapura 	} else
2372ae84b39SGanapati Kundapura 		*headp = (*headp + *nb_ops_flushed) % bufp->size;
2382ae84b39SGanapati Kundapura 
2392ae84b39SGanapati Kundapura 	return *nb_ops_flushed == n ? 0 : -1;
2402ae84b39SGanapati Kundapura }
2412ae84b39SGanapati Kundapura 
242a256a743SPavan Nikhilesh static inline struct event_crypto_adapter *
24399a2dd95SBruce Richardson eca_id_to_adapter(uint8_t id)
24499a2dd95SBruce Richardson {
24599a2dd95SBruce Richardson 	return event_crypto_adapter ?
24699a2dd95SBruce Richardson 		event_crypto_adapter[id] : NULL;
24799a2dd95SBruce Richardson }
24899a2dd95SBruce Richardson 
24999a2dd95SBruce Richardson static int
25099a2dd95SBruce Richardson eca_default_config_cb(uint8_t id, uint8_t dev_id,
25199a2dd95SBruce Richardson 			struct rte_event_crypto_adapter_conf *conf, void *arg)
25299a2dd95SBruce Richardson {
25399a2dd95SBruce Richardson 	struct rte_event_dev_config dev_conf;
25499a2dd95SBruce Richardson 	struct rte_eventdev *dev;
25599a2dd95SBruce Richardson 	uint8_t port_id;
25699a2dd95SBruce Richardson 	int started;
25799a2dd95SBruce Richardson 	int ret;
25899a2dd95SBruce Richardson 	struct rte_event_port_conf *port_conf = arg;
259a256a743SPavan Nikhilesh 	struct event_crypto_adapter *adapter = eca_id_to_adapter(id);
26099a2dd95SBruce Richardson 
26199a2dd95SBruce Richardson 	if (adapter == NULL)
26299a2dd95SBruce Richardson 		return -EINVAL;
26399a2dd95SBruce Richardson 
26499a2dd95SBruce Richardson 	dev = &rte_eventdevs[adapter->eventdev_id];
26599a2dd95SBruce Richardson 	dev_conf = dev->data->dev_conf;
26699a2dd95SBruce Richardson 
26799a2dd95SBruce Richardson 	started = dev->data->dev_started;
26899a2dd95SBruce Richardson 	if (started)
26999a2dd95SBruce Richardson 		rte_event_dev_stop(dev_id);
27099a2dd95SBruce Richardson 	port_id = dev_conf.nb_event_ports;
27199a2dd95SBruce Richardson 	dev_conf.nb_event_ports += 1;
27299a2dd95SBruce Richardson 	ret = rte_event_dev_configure(dev_id, &dev_conf);
27399a2dd95SBruce Richardson 	if (ret) {
27499a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("failed to configure event dev %u\n", dev_id);
27599a2dd95SBruce Richardson 		if (started) {
27699a2dd95SBruce Richardson 			if (rte_event_dev_start(dev_id))
27799a2dd95SBruce Richardson 				return -EIO;
27899a2dd95SBruce Richardson 		}
27999a2dd95SBruce Richardson 		return ret;
28099a2dd95SBruce Richardson 	}
28199a2dd95SBruce Richardson 
28299a2dd95SBruce Richardson 	ret = rte_event_port_setup(dev_id, port_id, port_conf);
28399a2dd95SBruce Richardson 	if (ret) {
28499a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("failed to setup event port %u\n", port_id);
28599a2dd95SBruce Richardson 		return ret;
28699a2dd95SBruce Richardson 	}
28799a2dd95SBruce Richardson 
28899a2dd95SBruce Richardson 	conf->event_port_id = port_id;
28999a2dd95SBruce Richardson 	conf->max_nb = DEFAULT_MAX_NB;
29099a2dd95SBruce Richardson 	if (started)
29199a2dd95SBruce Richardson 		ret = rte_event_dev_start(dev_id);
29299a2dd95SBruce Richardson 
29399a2dd95SBruce Richardson 	adapter->default_cb_arg = 1;
29499a2dd95SBruce Richardson 	return ret;
29599a2dd95SBruce Richardson }
29699a2dd95SBruce Richardson 
29799a2dd95SBruce Richardson int
29899a2dd95SBruce Richardson rte_event_crypto_adapter_create_ext(uint8_t id, uint8_t dev_id,
29999a2dd95SBruce Richardson 				rte_event_crypto_adapter_conf_cb conf_cb,
30099a2dd95SBruce Richardson 				enum rte_event_crypto_adapter_mode mode,
30199a2dd95SBruce Richardson 				void *conf_arg)
30299a2dd95SBruce Richardson {
303a256a743SPavan Nikhilesh 	struct event_crypto_adapter *adapter;
30499a2dd95SBruce Richardson 	char mem_name[CRYPTO_ADAPTER_NAME_LEN];
30599a2dd95SBruce Richardson 	struct rte_event_dev_info dev_info;
30699a2dd95SBruce Richardson 	int socket_id;
30799a2dd95SBruce Richardson 	uint8_t i;
30899a2dd95SBruce Richardson 	int ret;
30999a2dd95SBruce Richardson 
31099a2dd95SBruce Richardson 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
31199a2dd95SBruce Richardson 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
31299a2dd95SBruce Richardson 	if (conf_cb == NULL)
31399a2dd95SBruce Richardson 		return -EINVAL;
31499a2dd95SBruce Richardson 
31599a2dd95SBruce Richardson 	if (event_crypto_adapter == NULL) {
31699a2dd95SBruce Richardson 		ret = eca_init();
31799a2dd95SBruce Richardson 		if (ret)
31899a2dd95SBruce Richardson 			return ret;
31999a2dd95SBruce Richardson 	}
32099a2dd95SBruce Richardson 
32199a2dd95SBruce Richardson 	adapter = eca_id_to_adapter(id);
32299a2dd95SBruce Richardson 	if (adapter != NULL) {
32399a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Crypto adapter id %u already exists!", id);
32499a2dd95SBruce Richardson 		return -EEXIST;
32599a2dd95SBruce Richardson 	}
32699a2dd95SBruce Richardson 
32799a2dd95SBruce Richardson 	socket_id = rte_event_dev_socket_id(dev_id);
32899a2dd95SBruce Richardson 	snprintf(mem_name, CRYPTO_ADAPTER_MEM_NAME_LEN,
32999a2dd95SBruce Richardson 		 "rte_event_crypto_adapter_%d", id);
33099a2dd95SBruce Richardson 
33199a2dd95SBruce Richardson 	adapter = rte_zmalloc_socket(mem_name, sizeof(*adapter),
33299a2dd95SBruce Richardson 			RTE_CACHE_LINE_SIZE, socket_id);
33399a2dd95SBruce Richardson 	if (adapter == NULL) {
33499a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Failed to get mem for event crypto adapter!");
33599a2dd95SBruce Richardson 		return -ENOMEM;
33699a2dd95SBruce Richardson 	}
33799a2dd95SBruce Richardson 
3382ae84b39SGanapati Kundapura 	if (eca_circular_buffer_init("eca_edev_circular_buffer",
3392ae84b39SGanapati Kundapura 				     &adapter->ebuf,
3402ae84b39SGanapati Kundapura 				     CRYPTO_ADAPTER_BUFFER_SZ)) {
3412ae84b39SGanapati Kundapura 		RTE_EDEV_LOG_ERR("Failed to get memory for eventdev buffer");
3422ae84b39SGanapati Kundapura 		rte_free(adapter);
3432ae84b39SGanapati Kundapura 		return -ENOMEM;
3442ae84b39SGanapati Kundapura 	}
3452ae84b39SGanapati Kundapura 
34699a2dd95SBruce Richardson 	ret = rte_event_dev_info_get(dev_id, &dev_info);
34799a2dd95SBruce Richardson 	if (ret < 0) {
34899a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Failed to get info for eventdev %d: %s!",
34999a2dd95SBruce Richardson 				 dev_id, dev_info.driver_name);
3502ae84b39SGanapati Kundapura 		eca_circular_buffer_free(&adapter->ebuf);
35199a2dd95SBruce Richardson 		rte_free(adapter);
35299a2dd95SBruce Richardson 		return ret;
35399a2dd95SBruce Richardson 	}
35499a2dd95SBruce Richardson 
35599a2dd95SBruce Richardson 	adapter->implicit_release_disabled = (dev_info.event_dev_cap &
35699a2dd95SBruce Richardson 			RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE);
35799a2dd95SBruce Richardson 	adapter->eventdev_id = dev_id;
35899a2dd95SBruce Richardson 	adapter->socket_id = socket_id;
35999a2dd95SBruce Richardson 	adapter->conf_cb = conf_cb;
36099a2dd95SBruce Richardson 	adapter->conf_arg = conf_arg;
36199a2dd95SBruce Richardson 	adapter->mode = mode;
36299a2dd95SBruce Richardson 	strcpy(adapter->mem_name, mem_name);
36399a2dd95SBruce Richardson 	adapter->cdevs = rte_zmalloc_socket(adapter->mem_name,
36499a2dd95SBruce Richardson 					rte_cryptodev_count() *
36599a2dd95SBruce Richardson 					sizeof(struct crypto_device_info), 0,
36699a2dd95SBruce Richardson 					socket_id);
36799a2dd95SBruce Richardson 	if (adapter->cdevs == NULL) {
36899a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Failed to get mem for crypto devices\n");
3692ae84b39SGanapati Kundapura 		eca_circular_buffer_free(&adapter->ebuf);
37099a2dd95SBruce Richardson 		rte_free(adapter);
37199a2dd95SBruce Richardson 		return -ENOMEM;
37299a2dd95SBruce Richardson 	}
37399a2dd95SBruce Richardson 
37499a2dd95SBruce Richardson 	rte_spinlock_init(&adapter->lock);
37599a2dd95SBruce Richardson 	for (i = 0; i < rte_cryptodev_count(); i++)
37699a2dd95SBruce Richardson 		adapter->cdevs[i].dev = rte_cryptodev_pmd_get_dev(i);
37799a2dd95SBruce Richardson 
37899a2dd95SBruce Richardson 	event_crypto_adapter[id] = adapter;
37999a2dd95SBruce Richardson 
38099a2dd95SBruce Richardson 	rte_eventdev_trace_crypto_adapter_create(id, dev_id, adapter, conf_arg,
38199a2dd95SBruce Richardson 		mode);
38299a2dd95SBruce Richardson 	return 0;
38399a2dd95SBruce Richardson }
38499a2dd95SBruce Richardson 
38599a2dd95SBruce Richardson 
38699a2dd95SBruce Richardson int
38799a2dd95SBruce Richardson rte_event_crypto_adapter_create(uint8_t id, uint8_t dev_id,
38899a2dd95SBruce Richardson 				struct rte_event_port_conf *port_config,
38999a2dd95SBruce Richardson 				enum rte_event_crypto_adapter_mode mode)
39099a2dd95SBruce Richardson {
39199a2dd95SBruce Richardson 	struct rte_event_port_conf *pc;
39299a2dd95SBruce Richardson 	int ret;
39399a2dd95SBruce Richardson 
39499a2dd95SBruce Richardson 	if (port_config == NULL)
39599a2dd95SBruce Richardson 		return -EINVAL;
39699a2dd95SBruce Richardson 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
39799a2dd95SBruce Richardson 
39899a2dd95SBruce Richardson 	pc = rte_malloc(NULL, sizeof(*pc), 0);
39999a2dd95SBruce Richardson 	if (pc == NULL)
40099a2dd95SBruce Richardson 		return -ENOMEM;
40199a2dd95SBruce Richardson 	*pc = *port_config;
40299a2dd95SBruce Richardson 	ret = rte_event_crypto_adapter_create_ext(id, dev_id,
40399a2dd95SBruce Richardson 						  eca_default_config_cb,
40499a2dd95SBruce Richardson 						  mode,
40599a2dd95SBruce Richardson 						  pc);
40699a2dd95SBruce Richardson 	if (ret)
40799a2dd95SBruce Richardson 		rte_free(pc);
40899a2dd95SBruce Richardson 
40999a2dd95SBruce Richardson 	return ret;
41099a2dd95SBruce Richardson }
41199a2dd95SBruce Richardson 
41299a2dd95SBruce Richardson int
41399a2dd95SBruce Richardson rte_event_crypto_adapter_free(uint8_t id)
41499a2dd95SBruce Richardson {
415a256a743SPavan Nikhilesh 	struct event_crypto_adapter *adapter;
41699a2dd95SBruce Richardson 
41799a2dd95SBruce Richardson 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
41899a2dd95SBruce Richardson 
41999a2dd95SBruce Richardson 	adapter = eca_id_to_adapter(id);
42099a2dd95SBruce Richardson 	if (adapter == NULL)
42199a2dd95SBruce Richardson 		return -EINVAL;
42299a2dd95SBruce Richardson 
42399a2dd95SBruce Richardson 	if (adapter->nb_qps) {
42499a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("%" PRIu16 "Queue pairs not deleted",
42599a2dd95SBruce Richardson 				adapter->nb_qps);
42699a2dd95SBruce Richardson 		return -EBUSY;
42799a2dd95SBruce Richardson 	}
42899a2dd95SBruce Richardson 
42999a2dd95SBruce Richardson 	rte_eventdev_trace_crypto_adapter_free(id, adapter);
43099a2dd95SBruce Richardson 	if (adapter->default_cb_arg)
43199a2dd95SBruce Richardson 		rte_free(adapter->conf_arg);
43299a2dd95SBruce Richardson 	rte_free(adapter->cdevs);
43399a2dd95SBruce Richardson 	rte_free(adapter);
43499a2dd95SBruce Richardson 	event_crypto_adapter[id] = NULL;
43599a2dd95SBruce Richardson 
43699a2dd95SBruce Richardson 	return 0;
43799a2dd95SBruce Richardson }
43899a2dd95SBruce Richardson 
43999a2dd95SBruce Richardson static inline unsigned int
440a256a743SPavan Nikhilesh eca_enq_to_cryptodev(struct event_crypto_adapter *adapter, struct rte_event *ev,
441a256a743SPavan Nikhilesh 		     unsigned int cnt)
44299a2dd95SBruce Richardson {
44399a2dd95SBruce Richardson 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
44499a2dd95SBruce Richardson 	union rte_event_crypto_metadata *m_data = NULL;
44599a2dd95SBruce Richardson 	struct crypto_queue_pair_info *qp_info = NULL;
44699a2dd95SBruce Richardson 	struct rte_crypto_op *crypto_op;
44799a2dd95SBruce Richardson 	unsigned int i, n;
4482ae84b39SGanapati Kundapura 	uint16_t qp_id, nb_enqueued = 0;
44999a2dd95SBruce Richardson 	uint8_t cdev_id;
4502ae84b39SGanapati Kundapura 	int ret;
45199a2dd95SBruce Richardson 
45299a2dd95SBruce Richardson 	ret = 0;
45399a2dd95SBruce Richardson 	n = 0;
45499a2dd95SBruce Richardson 	stats->event_deq_count += cnt;
45599a2dd95SBruce Richardson 
45699a2dd95SBruce Richardson 	for (i = 0; i < cnt; i++) {
45799a2dd95SBruce Richardson 		crypto_op = ev[i].event_ptr;
45899a2dd95SBruce Richardson 		if (crypto_op == NULL)
45999a2dd95SBruce Richardson 			continue;
460b8c8a6ddSAkhil Goyal 		m_data = rte_cryptodev_session_event_mdata_get(crypto_op);
46199a2dd95SBruce Richardson 		if (m_data == NULL) {
46299a2dd95SBruce Richardson 			rte_pktmbuf_free(crypto_op->sym->m_src);
46399a2dd95SBruce Richardson 			rte_crypto_op_free(crypto_op);
46499a2dd95SBruce Richardson 			continue;
46599a2dd95SBruce Richardson 		}
46699a2dd95SBruce Richardson 
46799a2dd95SBruce Richardson 		cdev_id = m_data->request_info.cdev_id;
46899a2dd95SBruce Richardson 		qp_id = m_data->request_info.queue_pair_id;
46999a2dd95SBruce Richardson 		qp_info = &adapter->cdevs[cdev_id].qpairs[qp_id];
47099a2dd95SBruce Richardson 		if (!qp_info->qp_enabled) {
47199a2dd95SBruce Richardson 			rte_pktmbuf_free(crypto_op->sym->m_src);
47299a2dd95SBruce Richardson 			rte_crypto_op_free(crypto_op);
47399a2dd95SBruce Richardson 			continue;
47499a2dd95SBruce Richardson 		}
4752ae84b39SGanapati Kundapura 		eca_circular_buffer_add(&qp_info->cbuf, crypto_op);
47699a2dd95SBruce Richardson 
4772ae84b39SGanapati Kundapura 		if (eca_circular_buffer_batch_ready(&qp_info->cbuf)) {
4782ae84b39SGanapati Kundapura 			ret = eca_circular_buffer_flush_to_cdev(&qp_info->cbuf,
4792ae84b39SGanapati Kundapura 								cdev_id,
48099a2dd95SBruce Richardson 								qp_id,
4812ae84b39SGanapati Kundapura 								&nb_enqueued);
4822ae84b39SGanapati Kundapura 			/**
4832ae84b39SGanapati Kundapura 			 * If some crypto ops failed to flush to cdev and
4842ae84b39SGanapati Kundapura 			 * space for another batch is not available, stop
4852ae84b39SGanapati Kundapura 			 * dequeue from eventdev momentarily
4862ae84b39SGanapati Kundapura 			 */
4872ae84b39SGanapati Kundapura 			if (unlikely(ret < 0 &&
4882ae84b39SGanapati Kundapura 				!eca_circular_buffer_space_for_batch(
4892ae84b39SGanapati Kundapura 							&qp_info->cbuf)))
4902ae84b39SGanapati Kundapura 				adapter->stop_enq_to_cryptodev = true;
49199a2dd95SBruce Richardson 		}
49299a2dd95SBruce Richardson 
4932ae84b39SGanapati Kundapura 		stats->crypto_enq_count += nb_enqueued;
4942ae84b39SGanapati Kundapura 		n += nb_enqueued;
49599a2dd95SBruce Richardson 	}
49699a2dd95SBruce Richardson 
49799a2dd95SBruce Richardson 	return n;
49899a2dd95SBruce Richardson }
49999a2dd95SBruce Richardson 
50099a2dd95SBruce Richardson static unsigned int
5012ae84b39SGanapati Kundapura eca_crypto_cdev_flush(struct event_crypto_adapter *adapter,
5022ae84b39SGanapati Kundapura 		      uint8_t cdev_id, uint16_t *nb_ops_flushed)
50399a2dd95SBruce Richardson {
50499a2dd95SBruce Richardson 	struct crypto_device_info *curr_dev;
50599a2dd95SBruce Richardson 	struct crypto_queue_pair_info *curr_queue;
50699a2dd95SBruce Richardson 	struct rte_cryptodev *dev;
5072ae84b39SGanapati Kundapura 	uint16_t nb = 0, nb_enqueued = 0;
50899a2dd95SBruce Richardson 	uint16_t qp;
50999a2dd95SBruce Richardson 
51099a2dd95SBruce Richardson 	curr_dev = &adapter->cdevs[cdev_id];
5112ae84b39SGanapati Kundapura 	dev = rte_cryptodev_pmd_get_dev(cdev_id);
512cc08c0b0SGanapati Kundapura 
51399a2dd95SBruce Richardson 	for (qp = 0; qp < dev->data->nb_queue_pairs; qp++) {
51499a2dd95SBruce Richardson 
51599a2dd95SBruce Richardson 		curr_queue = &curr_dev->qpairs[qp];
5162ae84b39SGanapati Kundapura 		if (unlikely(curr_queue == NULL || !curr_queue->qp_enabled))
51799a2dd95SBruce Richardson 			continue;
51899a2dd95SBruce Richardson 
5192ae84b39SGanapati Kundapura 		eca_circular_buffer_flush_to_cdev(&curr_queue->cbuf,
5202ae84b39SGanapati Kundapura 						  cdev_id,
52199a2dd95SBruce Richardson 						  qp,
5222ae84b39SGanapati Kundapura 						  &nb_enqueued);
5232ae84b39SGanapati Kundapura 		*nb_ops_flushed += curr_queue->cbuf.count;
5242ae84b39SGanapati Kundapura 		nb += nb_enqueued;
52599a2dd95SBruce Richardson 	}
52699a2dd95SBruce Richardson 
5272ae84b39SGanapati Kundapura 	return nb;
5282ae84b39SGanapati Kundapura }
5292ae84b39SGanapati Kundapura 
5302ae84b39SGanapati Kundapura static unsigned int
5312ae84b39SGanapati Kundapura eca_crypto_enq_flush(struct event_crypto_adapter *adapter)
5322ae84b39SGanapati Kundapura {
5332ae84b39SGanapati Kundapura 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
5342ae84b39SGanapati Kundapura 	uint8_t cdev_id;
5352ae84b39SGanapati Kundapura 	uint16_t nb_enqueued = 0;
5362ae84b39SGanapati Kundapura 	uint16_t nb_ops_flushed = 0;
5372ae84b39SGanapati Kundapura 	uint16_t num_cdev = rte_cryptodev_count();
5382ae84b39SGanapati Kundapura 
5392ae84b39SGanapati Kundapura 	for (cdev_id = 0; cdev_id < num_cdev; cdev_id++)
5402ae84b39SGanapati Kundapura 		nb_enqueued += eca_crypto_cdev_flush(adapter,
5412ae84b39SGanapati Kundapura 						    cdev_id,
5422ae84b39SGanapati Kundapura 						    &nb_ops_flushed);
5432ae84b39SGanapati Kundapura 	/**
5442ae84b39SGanapati Kundapura 	 * Enable dequeue from eventdev if all ops from circular
5452ae84b39SGanapati Kundapura 	 * buffer flushed to cdev
5462ae84b39SGanapati Kundapura 	 */
5472ae84b39SGanapati Kundapura 	if (!nb_ops_flushed)
5482ae84b39SGanapati Kundapura 		adapter->stop_enq_to_cryptodev = false;
5492ae84b39SGanapati Kundapura 
5502ae84b39SGanapati Kundapura 	stats->crypto_enq_count += nb_enqueued;
5512ae84b39SGanapati Kundapura 
5522ae84b39SGanapati Kundapura 	return nb_enqueued;
55399a2dd95SBruce Richardson }
55499a2dd95SBruce Richardson 
55599a2dd95SBruce Richardson static int
556a256a743SPavan Nikhilesh eca_crypto_adapter_enq_run(struct event_crypto_adapter *adapter,
55799a2dd95SBruce Richardson 			   unsigned int max_enq)
55899a2dd95SBruce Richardson {
55999a2dd95SBruce Richardson 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
56099a2dd95SBruce Richardson 	struct rte_event ev[BATCH_SIZE];
56199a2dd95SBruce Richardson 	unsigned int nb_enq, nb_enqueued;
56299a2dd95SBruce Richardson 	uint16_t n;
56399a2dd95SBruce Richardson 	uint8_t event_dev_id = adapter->eventdev_id;
56499a2dd95SBruce Richardson 	uint8_t event_port_id = adapter->event_port_id;
56599a2dd95SBruce Richardson 
56699a2dd95SBruce Richardson 	nb_enqueued = 0;
56799a2dd95SBruce Richardson 	if (adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)
56899a2dd95SBruce Richardson 		return 0;
56999a2dd95SBruce Richardson 
5702ae84b39SGanapati Kundapura 	if (unlikely(adapter->stop_enq_to_cryptodev)) {
5712ae84b39SGanapati Kundapura 		nb_enqueued += eca_crypto_enq_flush(adapter);
5722ae84b39SGanapati Kundapura 
5732ae84b39SGanapati Kundapura 		if (unlikely(adapter->stop_enq_to_cryptodev))
5742ae84b39SGanapati Kundapura 			goto skip_event_dequeue_burst;
5752ae84b39SGanapati Kundapura 	}
5762ae84b39SGanapati Kundapura 
57799a2dd95SBruce Richardson 	for (nb_enq = 0; nb_enq < max_enq; nb_enq += n) {
57899a2dd95SBruce Richardson 		stats->event_poll_count++;
57999a2dd95SBruce Richardson 		n = rte_event_dequeue_burst(event_dev_id,
58099a2dd95SBruce Richardson 					    event_port_id, ev, BATCH_SIZE, 0);
58199a2dd95SBruce Richardson 
58299a2dd95SBruce Richardson 		if (!n)
58399a2dd95SBruce Richardson 			break;
58499a2dd95SBruce Richardson 
58599a2dd95SBruce Richardson 		nb_enqueued += eca_enq_to_cryptodev(adapter, ev, n);
58699a2dd95SBruce Richardson 	}
58799a2dd95SBruce Richardson 
5882ae84b39SGanapati Kundapura skip_event_dequeue_burst:
5892ae84b39SGanapati Kundapura 
59099a2dd95SBruce Richardson 	if ((++adapter->transmit_loop_count &
59199a2dd95SBruce Richardson 		(CRYPTO_ENQ_FLUSH_THRESHOLD - 1)) == 0) {
59299a2dd95SBruce Richardson 		nb_enqueued += eca_crypto_enq_flush(adapter);
59399a2dd95SBruce Richardson 	}
59499a2dd95SBruce Richardson 
59599a2dd95SBruce Richardson 	return nb_enqueued;
59699a2dd95SBruce Richardson }
59799a2dd95SBruce Richardson 
5982ae84b39SGanapati Kundapura static inline uint16_t
599a256a743SPavan Nikhilesh eca_ops_enqueue_burst(struct event_crypto_adapter *adapter,
60099a2dd95SBruce Richardson 		  struct rte_crypto_op **ops, uint16_t num)
60199a2dd95SBruce Richardson {
60299a2dd95SBruce Richardson 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
60399a2dd95SBruce Richardson 	union rte_event_crypto_metadata *m_data = NULL;
60499a2dd95SBruce Richardson 	uint8_t event_dev_id = adapter->eventdev_id;
60599a2dd95SBruce Richardson 	uint8_t event_port_id = adapter->event_port_id;
60699a2dd95SBruce Richardson 	struct rte_event events[BATCH_SIZE];
60799a2dd95SBruce Richardson 	uint16_t nb_enqueued, nb_ev;
60899a2dd95SBruce Richardson 	uint8_t retry;
60999a2dd95SBruce Richardson 	uint8_t i;
61099a2dd95SBruce Richardson 
61199a2dd95SBruce Richardson 	nb_ev = 0;
61299a2dd95SBruce Richardson 	retry = 0;
61399a2dd95SBruce Richardson 	nb_enqueued = 0;
61499a2dd95SBruce Richardson 	num = RTE_MIN(num, BATCH_SIZE);
61599a2dd95SBruce Richardson 	for (i = 0; i < num; i++) {
61699a2dd95SBruce Richardson 		struct rte_event *ev = &events[nb_ev++];
6172ae84b39SGanapati Kundapura 
618b8c8a6ddSAkhil Goyal 		m_data = rte_cryptodev_session_event_mdata_get(ops[i]);
61999a2dd95SBruce Richardson 		if (unlikely(m_data == NULL)) {
62099a2dd95SBruce Richardson 			rte_pktmbuf_free(ops[i]->sym->m_src);
62199a2dd95SBruce Richardson 			rte_crypto_op_free(ops[i]);
62299a2dd95SBruce Richardson 			continue;
62399a2dd95SBruce Richardson 		}
62499a2dd95SBruce Richardson 
62599a2dd95SBruce Richardson 		rte_memcpy(ev, &m_data->response_info, sizeof(*ev));
62699a2dd95SBruce Richardson 		ev->event_ptr = ops[i];
62799a2dd95SBruce Richardson 		ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
62899a2dd95SBruce Richardson 		if (adapter->implicit_release_disabled)
62999a2dd95SBruce Richardson 			ev->op = RTE_EVENT_OP_FORWARD;
63099a2dd95SBruce Richardson 		else
63199a2dd95SBruce Richardson 			ev->op = RTE_EVENT_OP_NEW;
63299a2dd95SBruce Richardson 	}
63399a2dd95SBruce Richardson 
63499a2dd95SBruce Richardson 	do {
63599a2dd95SBruce Richardson 		nb_enqueued += rte_event_enqueue_burst(event_dev_id,
63699a2dd95SBruce Richardson 						  event_port_id,
63799a2dd95SBruce Richardson 						  &events[nb_enqueued],
63899a2dd95SBruce Richardson 						  nb_ev - nb_enqueued);
6392ae84b39SGanapati Kundapura 
64099a2dd95SBruce Richardson 	} while (retry++ < CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES &&
64199a2dd95SBruce Richardson 		 nb_enqueued < nb_ev);
64299a2dd95SBruce Richardson 
64399a2dd95SBruce Richardson 	stats->event_enq_fail_count += nb_ev - nb_enqueued;
64499a2dd95SBruce Richardson 	stats->event_enq_count += nb_enqueued;
64599a2dd95SBruce Richardson 	stats->event_enq_retry_count += retry - 1;
6462ae84b39SGanapati Kundapura 
6472ae84b39SGanapati Kundapura 	return nb_enqueued;
64899a2dd95SBruce Richardson }
64999a2dd95SBruce Richardson 
6502ae84b39SGanapati Kundapura static int
6512ae84b39SGanapati Kundapura eca_circular_buffer_flush_to_evdev(struct event_crypto_adapter *adapter,
6522ae84b39SGanapati Kundapura 				   struct crypto_ops_circular_buffer *bufp)
6532ae84b39SGanapati Kundapura {
6542ae84b39SGanapati Kundapura 	uint16_t n = 0, nb_ops_flushed;
6552ae84b39SGanapati Kundapura 	uint16_t *headp = &bufp->head;
6562ae84b39SGanapati Kundapura 	uint16_t *tailp = &bufp->tail;
6572ae84b39SGanapati Kundapura 	struct rte_crypto_op **ops = bufp->op_buffer;
6582ae84b39SGanapati Kundapura 
6592ae84b39SGanapati Kundapura 	if (*tailp > *headp)
6602ae84b39SGanapati Kundapura 		n = *tailp - *headp;
6612ae84b39SGanapati Kundapura 	else if (*tailp < *headp)
6622ae84b39SGanapati Kundapura 		n = bufp->size - *headp;
6632ae84b39SGanapati Kundapura 	else
6642ae84b39SGanapati Kundapura 		return 0;  /* buffer empty */
6652ae84b39SGanapati Kundapura 
6662ae84b39SGanapati Kundapura 	nb_ops_flushed =  eca_ops_enqueue_burst(adapter, ops, n);
6672ae84b39SGanapati Kundapura 	bufp->count -= nb_ops_flushed;
6682ae84b39SGanapati Kundapura 	if (!bufp->count) {
6692ae84b39SGanapati Kundapura 		*headp = 0;
6702ae84b39SGanapati Kundapura 		*tailp = 0;
6712ae84b39SGanapati Kundapura 		return 0;  /* buffer empty */
6722ae84b39SGanapati Kundapura 	}
6732ae84b39SGanapati Kundapura 
6742ae84b39SGanapati Kundapura 	*headp = (*headp + nb_ops_flushed) % bufp->size;
6752ae84b39SGanapati Kundapura 	return 1;
6762ae84b39SGanapati Kundapura }
6772ae84b39SGanapati Kundapura 
6782ae84b39SGanapati Kundapura 
6792ae84b39SGanapati Kundapura static void
6802ae84b39SGanapati Kundapura eca_ops_buffer_flush(struct event_crypto_adapter *adapter)
6812ae84b39SGanapati Kundapura {
6822ae84b39SGanapati Kundapura 	if (likely(adapter->ebuf.count == 0))
6832ae84b39SGanapati Kundapura 		return;
6842ae84b39SGanapati Kundapura 
6852ae84b39SGanapati Kundapura 	while (eca_circular_buffer_flush_to_evdev(adapter,
6862ae84b39SGanapati Kundapura 						  &adapter->ebuf))
6872ae84b39SGanapati Kundapura 		;
6882ae84b39SGanapati Kundapura }
68999a2dd95SBruce Richardson static inline unsigned int
690a256a743SPavan Nikhilesh eca_crypto_adapter_deq_run(struct event_crypto_adapter *adapter,
69199a2dd95SBruce Richardson 			   unsigned int max_deq)
69299a2dd95SBruce Richardson {
69399a2dd95SBruce Richardson 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
69499a2dd95SBruce Richardson 	struct crypto_device_info *curr_dev;
69599a2dd95SBruce Richardson 	struct crypto_queue_pair_info *curr_queue;
69699a2dd95SBruce Richardson 	struct rte_crypto_op *ops[BATCH_SIZE];
6972ae84b39SGanapati Kundapura 	uint16_t n, nb_deq, nb_enqueued, i;
69899a2dd95SBruce Richardson 	struct rte_cryptodev *dev;
69999a2dd95SBruce Richardson 	uint8_t cdev_id;
70099a2dd95SBruce Richardson 	uint16_t qp, dev_qps;
70199a2dd95SBruce Richardson 	bool done;
70299a2dd95SBruce Richardson 	uint16_t num_cdev = rte_cryptodev_count();
70399a2dd95SBruce Richardson 
70499a2dd95SBruce Richardson 	nb_deq = 0;
7052ae84b39SGanapati Kundapura 	eca_ops_buffer_flush(adapter);
7062ae84b39SGanapati Kundapura 
70799a2dd95SBruce Richardson 	do {
70899a2dd95SBruce Richardson 		done = true;
70999a2dd95SBruce Richardson 
71099a2dd95SBruce Richardson 		for (cdev_id = adapter->next_cdev_id;
71199a2dd95SBruce Richardson 			cdev_id < num_cdev; cdev_id++) {
7122ae84b39SGanapati Kundapura 			uint16_t queues = 0;
7132ae84b39SGanapati Kundapura 
71499a2dd95SBruce Richardson 			curr_dev = &adapter->cdevs[cdev_id];
71599a2dd95SBruce Richardson 			dev = curr_dev->dev;
7162ae84b39SGanapati Kundapura 			if (unlikely(dev == NULL))
71799a2dd95SBruce Richardson 				continue;
7182ae84b39SGanapati Kundapura 
71999a2dd95SBruce Richardson 			dev_qps = dev->data->nb_queue_pairs;
72099a2dd95SBruce Richardson 
72199a2dd95SBruce Richardson 			for (qp = curr_dev->next_queue_pair_id;
72299a2dd95SBruce Richardson 				queues < dev_qps; qp = (qp + 1) % dev_qps,
72399a2dd95SBruce Richardson 				queues++) {
72499a2dd95SBruce Richardson 
72599a2dd95SBruce Richardson 				curr_queue = &curr_dev->qpairs[qp];
7262ae84b39SGanapati Kundapura 				if (unlikely(curr_queue == NULL ||
7272ae84b39SGanapati Kundapura 				    !curr_queue->qp_enabled))
72899a2dd95SBruce Richardson 					continue;
72999a2dd95SBruce Richardson 
73099a2dd95SBruce Richardson 				n = rte_cryptodev_dequeue_burst(cdev_id, qp,
73199a2dd95SBruce Richardson 					ops, BATCH_SIZE);
73299a2dd95SBruce Richardson 				if (!n)
73399a2dd95SBruce Richardson 					continue;
73499a2dd95SBruce Richardson 
73599a2dd95SBruce Richardson 				done = false;
7362ae84b39SGanapati Kundapura 				nb_enqueued = 0;
7372ae84b39SGanapati Kundapura 
73899a2dd95SBruce Richardson 				stats->crypto_deq_count += n;
7392ae84b39SGanapati Kundapura 
7402ae84b39SGanapati Kundapura 				if (unlikely(!adapter->ebuf.count))
7412ae84b39SGanapati Kundapura 					nb_enqueued = eca_ops_enqueue_burst(
7422ae84b39SGanapati Kundapura 							adapter, ops, n);
7432ae84b39SGanapati Kundapura 
7442ae84b39SGanapati Kundapura 				if (likely(nb_enqueued == n))
7452ae84b39SGanapati Kundapura 					goto check;
7462ae84b39SGanapati Kundapura 
7472ae84b39SGanapati Kundapura 				/* Failed to enqueue events case */
7482ae84b39SGanapati Kundapura 				for (i = nb_enqueued; i < n; i++)
7492ae84b39SGanapati Kundapura 					eca_circular_buffer_add(
7502ae84b39SGanapati Kundapura 						&adapter->ebuf,
7512ae84b39SGanapati Kundapura 						ops[nb_enqueued]);
7522ae84b39SGanapati Kundapura 
7532ae84b39SGanapati Kundapura check:
75499a2dd95SBruce Richardson 				nb_deq += n;
75599a2dd95SBruce Richardson 
7562ae84b39SGanapati Kundapura 				if (nb_deq >= max_deq) {
75799a2dd95SBruce Richardson 					if ((qp + 1) == dev_qps) {
75899a2dd95SBruce Richardson 						adapter->next_cdev_id =
75999a2dd95SBruce Richardson 							(cdev_id + 1)
76099a2dd95SBruce Richardson 							% num_cdev;
76199a2dd95SBruce Richardson 					}
76299a2dd95SBruce Richardson 					curr_dev->next_queue_pair_id = (qp + 1)
76399a2dd95SBruce Richardson 						% dev->data->nb_queue_pairs;
76499a2dd95SBruce Richardson 
76599a2dd95SBruce Richardson 					return nb_deq;
76699a2dd95SBruce Richardson 				}
76799a2dd95SBruce Richardson 			}
76899a2dd95SBruce Richardson 		}
7692ae84b39SGanapati Kundapura 		adapter->next_cdev_id = 0;
77099a2dd95SBruce Richardson 	} while (done == false);
77199a2dd95SBruce Richardson 	return nb_deq;
77299a2dd95SBruce Richardson }
77399a2dd95SBruce Richardson 
774*34d78557SMattias Rönnblom static int
775a256a743SPavan Nikhilesh eca_crypto_adapter_run(struct event_crypto_adapter *adapter,
77699a2dd95SBruce Richardson 		       unsigned int max_ops)
77799a2dd95SBruce Richardson {
778578402f2SMattias Rönnblom 	unsigned int ops_left = max_ops;
779578402f2SMattias Rönnblom 
780578402f2SMattias Rönnblom 	while (ops_left > 0) {
78199a2dd95SBruce Richardson 		unsigned int e_cnt, d_cnt;
78299a2dd95SBruce Richardson 
783578402f2SMattias Rönnblom 		e_cnt = eca_crypto_adapter_deq_run(adapter, ops_left);
784578402f2SMattias Rönnblom 		ops_left -= RTE_MIN(ops_left, e_cnt);
78599a2dd95SBruce Richardson 
786578402f2SMattias Rönnblom 		d_cnt = eca_crypto_adapter_enq_run(adapter, ops_left);
787578402f2SMattias Rönnblom 		ops_left -= RTE_MIN(ops_left, d_cnt);
78899a2dd95SBruce Richardson 
78999a2dd95SBruce Richardson 		if (e_cnt == 0 && d_cnt == 0)
79099a2dd95SBruce Richardson 			break;
79199a2dd95SBruce Richardson 
79299a2dd95SBruce Richardson 	}
793578402f2SMattias Rönnblom 
794*34d78557SMattias Rönnblom 	if (ops_left == max_ops) {
795578402f2SMattias Rönnblom 		rte_event_maintain(adapter->eventdev_id,
796578402f2SMattias Rönnblom 				   adapter->event_port_id, 0);
797*34d78557SMattias Rönnblom 		return -EAGAIN;
798*34d78557SMattias Rönnblom 	} else
799*34d78557SMattias Rönnblom 		return 0;
80099a2dd95SBruce Richardson }
80199a2dd95SBruce Richardson 
80299a2dd95SBruce Richardson static int
80399a2dd95SBruce Richardson eca_service_func(void *args)
80499a2dd95SBruce Richardson {
805a256a743SPavan Nikhilesh 	struct event_crypto_adapter *adapter = args;
806*34d78557SMattias Rönnblom 	int ret;
80799a2dd95SBruce Richardson 
80899a2dd95SBruce Richardson 	if (rte_spinlock_trylock(&adapter->lock) == 0)
80999a2dd95SBruce Richardson 		return 0;
810*34d78557SMattias Rönnblom 	ret = eca_crypto_adapter_run(adapter, adapter->max_nb);
81199a2dd95SBruce Richardson 	rte_spinlock_unlock(&adapter->lock);
81299a2dd95SBruce Richardson 
813*34d78557SMattias Rönnblom 	return ret;
81499a2dd95SBruce Richardson }
81599a2dd95SBruce Richardson 
81699a2dd95SBruce Richardson static int
817a256a743SPavan Nikhilesh eca_init_service(struct event_crypto_adapter *adapter, uint8_t id)
81899a2dd95SBruce Richardson {
81999a2dd95SBruce Richardson 	struct rte_event_crypto_adapter_conf adapter_conf;
82099a2dd95SBruce Richardson 	struct rte_service_spec service;
82199a2dd95SBruce Richardson 	int ret;
82299a2dd95SBruce Richardson 
82399a2dd95SBruce Richardson 	if (adapter->service_inited)
82499a2dd95SBruce Richardson 		return 0;
82599a2dd95SBruce Richardson 
82699a2dd95SBruce Richardson 	memset(&service, 0, sizeof(service));
82799a2dd95SBruce Richardson 	snprintf(service.name, CRYPTO_ADAPTER_NAME_LEN,
82899a2dd95SBruce Richardson 		"rte_event_crypto_adapter_%d", id);
82999a2dd95SBruce Richardson 	service.socket_id = adapter->socket_id;
83099a2dd95SBruce Richardson 	service.callback = eca_service_func;
83199a2dd95SBruce Richardson 	service.callback_userdata = adapter;
83299a2dd95SBruce Richardson 	/* Service function handles locking for queue add/del updates */
83399a2dd95SBruce Richardson 	service.capabilities = RTE_SERVICE_CAP_MT_SAFE;
83499a2dd95SBruce Richardson 	ret = rte_service_component_register(&service, &adapter->service_id);
83599a2dd95SBruce Richardson 	if (ret) {
83699a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("failed to register service %s err = %" PRId32,
83799a2dd95SBruce Richardson 			service.name, ret);
83899a2dd95SBruce Richardson 		return ret;
83999a2dd95SBruce Richardson 	}
84099a2dd95SBruce Richardson 
84199a2dd95SBruce Richardson 	ret = adapter->conf_cb(id, adapter->eventdev_id,
84299a2dd95SBruce Richardson 		&adapter_conf, adapter->conf_arg);
84399a2dd95SBruce Richardson 	if (ret) {
84499a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("configuration callback failed err = %" PRId32,
84599a2dd95SBruce Richardson 			ret);
84699a2dd95SBruce Richardson 		return ret;
84799a2dd95SBruce Richardson 	}
84899a2dd95SBruce Richardson 
84999a2dd95SBruce Richardson 	adapter->max_nb = adapter_conf.max_nb;
85099a2dd95SBruce Richardson 	adapter->event_port_id = adapter_conf.event_port_id;
85199a2dd95SBruce Richardson 	adapter->service_inited = 1;
85299a2dd95SBruce Richardson 
85399a2dd95SBruce Richardson 	return ret;
85499a2dd95SBruce Richardson }
85599a2dd95SBruce Richardson 
85699a2dd95SBruce Richardson static void
857a256a743SPavan Nikhilesh eca_update_qp_info(struct event_crypto_adapter *adapter,
858a256a743SPavan Nikhilesh 		   struct crypto_device_info *dev_info, int32_t queue_pair_id,
85999a2dd95SBruce Richardson 		   uint8_t add)
86099a2dd95SBruce Richardson {
86199a2dd95SBruce Richardson 	struct crypto_queue_pair_info *qp_info;
86299a2dd95SBruce Richardson 	int enabled;
86399a2dd95SBruce Richardson 	uint16_t i;
86499a2dd95SBruce Richardson 
86599a2dd95SBruce Richardson 	if (dev_info->qpairs == NULL)
86699a2dd95SBruce Richardson 		return;
86799a2dd95SBruce Richardson 
86899a2dd95SBruce Richardson 	if (queue_pair_id == -1) {
86999a2dd95SBruce Richardson 		for (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++)
87099a2dd95SBruce Richardson 			eca_update_qp_info(adapter, dev_info, i, add);
87199a2dd95SBruce Richardson 	} else {
87299a2dd95SBruce Richardson 		qp_info = &dev_info->qpairs[queue_pair_id];
87399a2dd95SBruce Richardson 		enabled = qp_info->qp_enabled;
87499a2dd95SBruce Richardson 		if (add) {
87599a2dd95SBruce Richardson 			adapter->nb_qps += !enabled;
87699a2dd95SBruce Richardson 			dev_info->num_qpairs += !enabled;
87799a2dd95SBruce Richardson 		} else {
87899a2dd95SBruce Richardson 			adapter->nb_qps -= enabled;
87999a2dd95SBruce Richardson 			dev_info->num_qpairs -= enabled;
88099a2dd95SBruce Richardson 		}
88199a2dd95SBruce Richardson 		qp_info->qp_enabled = !!add;
88299a2dd95SBruce Richardson 	}
88399a2dd95SBruce Richardson }
88499a2dd95SBruce Richardson 
88599a2dd95SBruce Richardson static int
886a256a743SPavan Nikhilesh eca_add_queue_pair(struct event_crypto_adapter *adapter, uint8_t cdev_id,
88799a2dd95SBruce Richardson 		   int queue_pair_id)
88899a2dd95SBruce Richardson {
88999a2dd95SBruce Richardson 	struct crypto_device_info *dev_info = &adapter->cdevs[cdev_id];
89099a2dd95SBruce Richardson 	struct crypto_queue_pair_info *qpairs;
89199a2dd95SBruce Richardson 	uint32_t i;
89299a2dd95SBruce Richardson 
89399a2dd95SBruce Richardson 	if (dev_info->qpairs == NULL) {
89499a2dd95SBruce Richardson 		dev_info->qpairs =
89599a2dd95SBruce Richardson 		    rte_zmalloc_socket(adapter->mem_name,
89699a2dd95SBruce Richardson 					dev_info->dev->data->nb_queue_pairs *
89799a2dd95SBruce Richardson 					sizeof(struct crypto_queue_pair_info),
89899a2dd95SBruce Richardson 					0, adapter->socket_id);
89999a2dd95SBruce Richardson 		if (dev_info->qpairs == NULL)
90099a2dd95SBruce Richardson 			return -ENOMEM;
90199a2dd95SBruce Richardson 
90299a2dd95SBruce Richardson 		qpairs = dev_info->qpairs;
9032ae84b39SGanapati Kundapura 
9042ae84b39SGanapati Kundapura 		if (eca_circular_buffer_init("eca_cdev_circular_buffer",
9052ae84b39SGanapati Kundapura 					     &qpairs->cbuf,
9062ae84b39SGanapati Kundapura 					     CRYPTO_ADAPTER_OPS_BUFFER_SZ)) {
9072ae84b39SGanapati Kundapura 			RTE_EDEV_LOG_ERR("Failed to get memory for cryptodev "
9082ae84b39SGanapati Kundapura 					 "buffer");
90999a2dd95SBruce Richardson 			rte_free(qpairs);
91099a2dd95SBruce Richardson 			return -ENOMEM;
91199a2dd95SBruce Richardson 		}
91299a2dd95SBruce Richardson 	}
91399a2dd95SBruce Richardson 
91499a2dd95SBruce Richardson 	if (queue_pair_id == -1) {
91599a2dd95SBruce Richardson 		for (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++)
91699a2dd95SBruce Richardson 			eca_update_qp_info(adapter, dev_info, i, 1);
91799a2dd95SBruce Richardson 	} else
91899a2dd95SBruce Richardson 		eca_update_qp_info(adapter, dev_info,
91999a2dd95SBruce Richardson 					(uint16_t)queue_pair_id, 1);
92099a2dd95SBruce Richardson 
92199a2dd95SBruce Richardson 	return 0;
92299a2dd95SBruce Richardson }
92399a2dd95SBruce Richardson 
92499a2dd95SBruce Richardson int
92599a2dd95SBruce Richardson rte_event_crypto_adapter_queue_pair_add(uint8_t id,
92699a2dd95SBruce Richardson 			uint8_t cdev_id,
92799a2dd95SBruce Richardson 			int32_t queue_pair_id,
928c1749bc5SVolodymyr Fialko 			const struct rte_event_crypto_adapter_queue_conf *conf)
92999a2dd95SBruce Richardson {
930c1749bc5SVolodymyr Fialko 	struct rte_event_crypto_adapter_vector_limits limits;
931a256a743SPavan Nikhilesh 	struct event_crypto_adapter *adapter;
93299a2dd95SBruce Richardson 	struct crypto_device_info *dev_info;
933c1749bc5SVolodymyr Fialko 	struct rte_eventdev *dev;
93499a2dd95SBruce Richardson 	uint32_t cap;
93599a2dd95SBruce Richardson 	int ret;
93699a2dd95SBruce Richardson 
93799a2dd95SBruce Richardson 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
93899a2dd95SBruce Richardson 
939e74abd48SAkhil Goyal 	if (!rte_cryptodev_is_valid_dev(cdev_id)) {
94099a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
94199a2dd95SBruce Richardson 		return -EINVAL;
94299a2dd95SBruce Richardson 	}
94399a2dd95SBruce Richardson 
94499a2dd95SBruce Richardson 	adapter = eca_id_to_adapter(id);
94599a2dd95SBruce Richardson 	if (adapter == NULL)
94699a2dd95SBruce Richardson 		return -EINVAL;
94799a2dd95SBruce Richardson 
94899a2dd95SBruce Richardson 	dev = &rte_eventdevs[adapter->eventdev_id];
94999a2dd95SBruce Richardson 	ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
95099a2dd95SBruce Richardson 						cdev_id,
95199a2dd95SBruce Richardson 						&cap);
95299a2dd95SBruce Richardson 	if (ret) {
95399a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Failed to get adapter caps dev %" PRIu8
95499a2dd95SBruce Richardson 			" cdev %" PRIu8, id, cdev_id);
95599a2dd95SBruce Richardson 		return ret;
95699a2dd95SBruce Richardson 	}
95799a2dd95SBruce Richardson 
958c1749bc5SVolodymyr Fialko 	if (conf == NULL) {
959c1749bc5SVolodymyr Fialko 		if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
96099a2dd95SBruce Richardson 			RTE_EDEV_LOG_ERR("Conf value can not be NULL for dev_id=%u",
96199a2dd95SBruce Richardson 					 cdev_id);
96299a2dd95SBruce Richardson 			return -EINVAL;
96399a2dd95SBruce Richardson 		}
964c1749bc5SVolodymyr Fialko 	} else {
965c1749bc5SVolodymyr Fialko 		if (conf->flags & RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR) {
966c1749bc5SVolodymyr Fialko 			if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR) == 0) {
967c1749bc5SVolodymyr Fialko 				RTE_EDEV_LOG_ERR("Event vectorization is not supported,"
968c1749bc5SVolodymyr Fialko 						 "dev %" PRIu8 " cdev %" PRIu8, id,
969c1749bc5SVolodymyr Fialko 						 cdev_id);
970c1749bc5SVolodymyr Fialko 				return -ENOTSUP;
971c1749bc5SVolodymyr Fialko 			}
972c1749bc5SVolodymyr Fialko 
973c1749bc5SVolodymyr Fialko 			ret = rte_event_crypto_adapter_vector_limits_get(
974c1749bc5SVolodymyr Fialko 				adapter->eventdev_id, cdev_id, &limits);
975c1749bc5SVolodymyr Fialko 			if (ret < 0) {
976c1749bc5SVolodymyr Fialko 				RTE_EDEV_LOG_ERR("Failed to get event device vector "
977c1749bc5SVolodymyr Fialko 						 "limits, dev %" PRIu8 " cdev %" PRIu8,
978c1749bc5SVolodymyr Fialko 						 id, cdev_id);
979c1749bc5SVolodymyr Fialko 				return -EINVAL;
980c1749bc5SVolodymyr Fialko 			}
981c1749bc5SVolodymyr Fialko 
982c1749bc5SVolodymyr Fialko 			if (conf->vector_sz < limits.min_sz ||
983c1749bc5SVolodymyr Fialko 			    conf->vector_sz > limits.max_sz ||
984c1749bc5SVolodymyr Fialko 			    conf->vector_timeout_ns < limits.min_timeout_ns ||
985c1749bc5SVolodymyr Fialko 			    conf->vector_timeout_ns > limits.max_timeout_ns ||
986c1749bc5SVolodymyr Fialko 			    conf->vector_mp == NULL) {
987c1749bc5SVolodymyr Fialko 				RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
988c1749bc5SVolodymyr Fialko 						" dev %" PRIu8 " cdev %" PRIu8,
989c1749bc5SVolodymyr Fialko 						id, cdev_id);
990c1749bc5SVolodymyr Fialko 				return -EINVAL;
991c1749bc5SVolodymyr Fialko 			}
992c1749bc5SVolodymyr Fialko 
993c1749bc5SVolodymyr Fialko 			if (conf->vector_mp->elt_size < (sizeof(struct rte_event_vector) +
994c1749bc5SVolodymyr Fialko 			    (sizeof(uintptr_t) * conf->vector_sz))) {
995c1749bc5SVolodymyr Fialko 				RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
996c1749bc5SVolodymyr Fialko 						" dev %" PRIu8 " cdev %" PRIu8,
997c1749bc5SVolodymyr Fialko 						id, cdev_id);
998c1749bc5SVolodymyr Fialko 				return -EINVAL;
999c1749bc5SVolodymyr Fialko 			}
1000c1749bc5SVolodymyr Fialko 		}
1001c1749bc5SVolodymyr Fialko 	}
100299a2dd95SBruce Richardson 
100399a2dd95SBruce Richardson 	dev_info = &adapter->cdevs[cdev_id];
100499a2dd95SBruce Richardson 
100599a2dd95SBruce Richardson 	if (queue_pair_id != -1 &&
100699a2dd95SBruce Richardson 	    (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) {
100799a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16,
100899a2dd95SBruce Richardson 				 (uint16_t)queue_pair_id);
100999a2dd95SBruce Richardson 		return -EINVAL;
101099a2dd95SBruce Richardson 	}
101199a2dd95SBruce Richardson 
101299a2dd95SBruce Richardson 	/* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD,
101399a2dd95SBruce Richardson 	 * no need of service core as HW supports event forward capability.
101499a2dd95SBruce Richardson 	 */
101599a2dd95SBruce Richardson 	if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
101699a2dd95SBruce Richardson 	    (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND &&
101799a2dd95SBruce Richardson 	     adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW) ||
101899a2dd95SBruce Richardson 	    (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
101999a2dd95SBruce Richardson 	     adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) {
10208f1d23ecSDavid Marchand 		if (*dev->dev_ops->crypto_adapter_queue_pair_add == NULL)
10218f1d23ecSDavid Marchand 			return -ENOTSUP;
102299a2dd95SBruce Richardson 		if (dev_info->qpairs == NULL) {
102399a2dd95SBruce Richardson 			dev_info->qpairs =
102499a2dd95SBruce Richardson 			    rte_zmalloc_socket(adapter->mem_name,
102599a2dd95SBruce Richardson 					dev_info->dev->data->nb_queue_pairs *
102699a2dd95SBruce Richardson 					sizeof(struct crypto_queue_pair_info),
102799a2dd95SBruce Richardson 					0, adapter->socket_id);
102899a2dd95SBruce Richardson 			if (dev_info->qpairs == NULL)
102999a2dd95SBruce Richardson 				return -ENOMEM;
103099a2dd95SBruce Richardson 		}
103199a2dd95SBruce Richardson 
103299a2dd95SBruce Richardson 		ret = (*dev->dev_ops->crypto_adapter_queue_pair_add)(dev,
103399a2dd95SBruce Richardson 				dev_info->dev,
103499a2dd95SBruce Richardson 				queue_pair_id,
1035c1749bc5SVolodymyr Fialko 				conf);
103699a2dd95SBruce Richardson 		if (ret)
103799a2dd95SBruce Richardson 			return ret;
103899a2dd95SBruce Richardson 
103999a2dd95SBruce Richardson 		else
104099a2dd95SBruce Richardson 			eca_update_qp_info(adapter, &adapter->cdevs[cdev_id],
104199a2dd95SBruce Richardson 					   queue_pair_id, 1);
104299a2dd95SBruce Richardson 	}
104399a2dd95SBruce Richardson 
104499a2dd95SBruce Richardson 	/* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW,
104599a2dd95SBruce Richardson 	 * or SW adapter, initiate services so the application can choose
104699a2dd95SBruce Richardson 	 * which ever way it wants to use the adapter.
104799a2dd95SBruce Richardson 	 * Case 1: RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW
104899a2dd95SBruce Richardson 	 *         Application may wants to use one of below two mode
104999a2dd95SBruce Richardson 	 *          a. OP_FORWARD mode -> HW Dequeue + SW enqueue
105099a2dd95SBruce Richardson 	 *          b. OP_NEW mode -> HW Dequeue
105199a2dd95SBruce Richardson 	 * Case 2: No HW caps, use SW adapter
105299a2dd95SBruce Richardson 	 *          a. OP_FORWARD mode -> SW enqueue & dequeue
105399a2dd95SBruce Richardson 	 *          b. OP_NEW mode -> SW Dequeue
105499a2dd95SBruce Richardson 	 */
105599a2dd95SBruce Richardson 	if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
105699a2dd95SBruce Richardson 	     !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
105799a2dd95SBruce Richardson 	     adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD) ||
105899a2dd95SBruce Richardson 	     (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW) &&
105999a2dd95SBruce Richardson 	      !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
106099a2dd95SBruce Richardson 	      !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) &&
106199a2dd95SBruce Richardson 	       (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA))) {
106299a2dd95SBruce Richardson 		rte_spinlock_lock(&adapter->lock);
106399a2dd95SBruce Richardson 		ret = eca_init_service(adapter, id);
106499a2dd95SBruce Richardson 		if (ret == 0)
106599a2dd95SBruce Richardson 			ret = eca_add_queue_pair(adapter, cdev_id,
106699a2dd95SBruce Richardson 						 queue_pair_id);
106799a2dd95SBruce Richardson 		rte_spinlock_unlock(&adapter->lock);
106899a2dd95SBruce Richardson 
106999a2dd95SBruce Richardson 		if (ret)
107099a2dd95SBruce Richardson 			return ret;
107199a2dd95SBruce Richardson 
107299a2dd95SBruce Richardson 		rte_service_component_runstate_set(adapter->service_id, 1);
107399a2dd95SBruce Richardson 	}
107499a2dd95SBruce Richardson 
1075c1749bc5SVolodymyr Fialko 	rte_eventdev_trace_crypto_adapter_queue_pair_add(id, cdev_id,
1076c1749bc5SVolodymyr Fialko 		queue_pair_id, conf);
107799a2dd95SBruce Richardson 	return 0;
107899a2dd95SBruce Richardson }
107999a2dd95SBruce Richardson 
108099a2dd95SBruce Richardson int
108199a2dd95SBruce Richardson rte_event_crypto_adapter_queue_pair_del(uint8_t id, uint8_t cdev_id,
108299a2dd95SBruce Richardson 					int32_t queue_pair_id)
108399a2dd95SBruce Richardson {
1084a256a743SPavan Nikhilesh 	struct event_crypto_adapter *adapter;
108599a2dd95SBruce Richardson 	struct crypto_device_info *dev_info;
108699a2dd95SBruce Richardson 	struct rte_eventdev *dev;
108799a2dd95SBruce Richardson 	int ret;
108899a2dd95SBruce Richardson 	uint32_t cap;
108999a2dd95SBruce Richardson 	uint16_t i;
109099a2dd95SBruce Richardson 
109199a2dd95SBruce Richardson 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
109299a2dd95SBruce Richardson 
1093e74abd48SAkhil Goyal 	if (!rte_cryptodev_is_valid_dev(cdev_id)) {
109499a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
109599a2dd95SBruce Richardson 		return -EINVAL;
109699a2dd95SBruce Richardson 	}
109799a2dd95SBruce Richardson 
109899a2dd95SBruce Richardson 	adapter = eca_id_to_adapter(id);
109999a2dd95SBruce Richardson 	if (adapter == NULL)
110099a2dd95SBruce Richardson 		return -EINVAL;
110199a2dd95SBruce Richardson 
110299a2dd95SBruce Richardson 	dev = &rte_eventdevs[adapter->eventdev_id];
110399a2dd95SBruce Richardson 	ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
110499a2dd95SBruce Richardson 						cdev_id,
110599a2dd95SBruce Richardson 						&cap);
110699a2dd95SBruce Richardson 	if (ret)
110799a2dd95SBruce Richardson 		return ret;
110899a2dd95SBruce Richardson 
110999a2dd95SBruce Richardson 	dev_info = &adapter->cdevs[cdev_id];
111099a2dd95SBruce Richardson 
111199a2dd95SBruce Richardson 	if (queue_pair_id != -1 &&
111299a2dd95SBruce Richardson 	    (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) {
111399a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16,
111499a2dd95SBruce Richardson 				 (uint16_t)queue_pair_id);
111599a2dd95SBruce Richardson 		return -EINVAL;
111699a2dd95SBruce Richardson 	}
111799a2dd95SBruce Richardson 
111899a2dd95SBruce Richardson 	if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
111999a2dd95SBruce Richardson 	    (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
112099a2dd95SBruce Richardson 	     adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) {
11218f1d23ecSDavid Marchand 		if (*dev->dev_ops->crypto_adapter_queue_pair_del == NULL)
11228f1d23ecSDavid Marchand 			return -ENOTSUP;
112399a2dd95SBruce Richardson 		ret = (*dev->dev_ops->crypto_adapter_queue_pair_del)(dev,
112499a2dd95SBruce Richardson 						dev_info->dev,
112599a2dd95SBruce Richardson 						queue_pair_id);
112699a2dd95SBruce Richardson 		if (ret == 0) {
112799a2dd95SBruce Richardson 			eca_update_qp_info(adapter,
112899a2dd95SBruce Richardson 					&adapter->cdevs[cdev_id],
112999a2dd95SBruce Richardson 					queue_pair_id,
113099a2dd95SBruce Richardson 					0);
113199a2dd95SBruce Richardson 			if (dev_info->num_qpairs == 0) {
113299a2dd95SBruce Richardson 				rte_free(dev_info->qpairs);
113399a2dd95SBruce Richardson 				dev_info->qpairs = NULL;
113499a2dd95SBruce Richardson 			}
113599a2dd95SBruce Richardson 		}
113699a2dd95SBruce Richardson 	} else {
113799a2dd95SBruce Richardson 		if (adapter->nb_qps == 0)
113899a2dd95SBruce Richardson 			return 0;
113999a2dd95SBruce Richardson 
114099a2dd95SBruce Richardson 		rte_spinlock_lock(&adapter->lock);
114199a2dd95SBruce Richardson 		if (queue_pair_id == -1) {
114299a2dd95SBruce Richardson 			for (i = 0; i < dev_info->dev->data->nb_queue_pairs;
114399a2dd95SBruce Richardson 				i++)
114499a2dd95SBruce Richardson 				eca_update_qp_info(adapter, dev_info,
114599a2dd95SBruce Richardson 							queue_pair_id, 0);
114699a2dd95SBruce Richardson 		} else {
114799a2dd95SBruce Richardson 			eca_update_qp_info(adapter, dev_info,
114899a2dd95SBruce Richardson 						(uint16_t)queue_pair_id, 0);
114999a2dd95SBruce Richardson 		}
115099a2dd95SBruce Richardson 
115199a2dd95SBruce Richardson 		if (dev_info->num_qpairs == 0) {
115299a2dd95SBruce Richardson 			rte_free(dev_info->qpairs);
115399a2dd95SBruce Richardson 			dev_info->qpairs = NULL;
115499a2dd95SBruce Richardson 		}
115599a2dd95SBruce Richardson 
115699a2dd95SBruce Richardson 		rte_spinlock_unlock(&adapter->lock);
115799a2dd95SBruce Richardson 		rte_service_component_runstate_set(adapter->service_id,
115899a2dd95SBruce Richardson 				adapter->nb_qps);
115999a2dd95SBruce Richardson 	}
116099a2dd95SBruce Richardson 
116199a2dd95SBruce Richardson 	rte_eventdev_trace_crypto_adapter_queue_pair_del(id, cdev_id,
116299a2dd95SBruce Richardson 		queue_pair_id, ret);
116399a2dd95SBruce Richardson 	return ret;
116499a2dd95SBruce Richardson }
116599a2dd95SBruce Richardson 
116699a2dd95SBruce Richardson static int
116799a2dd95SBruce Richardson eca_adapter_ctrl(uint8_t id, int start)
116899a2dd95SBruce Richardson {
1169a256a743SPavan Nikhilesh 	struct event_crypto_adapter *adapter;
117099a2dd95SBruce Richardson 	struct crypto_device_info *dev_info;
117199a2dd95SBruce Richardson 	struct rte_eventdev *dev;
117299a2dd95SBruce Richardson 	uint32_t i;
117399a2dd95SBruce Richardson 	int use_service;
117499a2dd95SBruce Richardson 	int stop = !start;
117599a2dd95SBruce Richardson 
117699a2dd95SBruce Richardson 	use_service = 0;
117799a2dd95SBruce Richardson 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
117899a2dd95SBruce Richardson 	adapter = eca_id_to_adapter(id);
117999a2dd95SBruce Richardson 	if (adapter == NULL)
118099a2dd95SBruce Richardson 		return -EINVAL;
118199a2dd95SBruce Richardson 
118299a2dd95SBruce Richardson 	dev = &rte_eventdevs[adapter->eventdev_id];
118399a2dd95SBruce Richardson 
118499a2dd95SBruce Richardson 	for (i = 0; i < rte_cryptodev_count(); i++) {
118599a2dd95SBruce Richardson 		dev_info = &adapter->cdevs[i];
118699a2dd95SBruce Richardson 		/* if start  check for num queue pairs */
118799a2dd95SBruce Richardson 		if (start && !dev_info->num_qpairs)
118899a2dd95SBruce Richardson 			continue;
118999a2dd95SBruce Richardson 		/* if stop check if dev has been started */
119099a2dd95SBruce Richardson 		if (stop && !dev_info->dev_started)
119199a2dd95SBruce Richardson 			continue;
119299a2dd95SBruce Richardson 		use_service |= !dev_info->internal_event_port;
119399a2dd95SBruce Richardson 		dev_info->dev_started = start;
119499a2dd95SBruce Richardson 		if (dev_info->internal_event_port == 0)
119599a2dd95SBruce Richardson 			continue;
119699a2dd95SBruce Richardson 		start ? (*dev->dev_ops->crypto_adapter_start)(dev,
119799a2dd95SBruce Richardson 						&dev_info->dev[i]) :
119899a2dd95SBruce Richardson 			(*dev->dev_ops->crypto_adapter_stop)(dev,
119999a2dd95SBruce Richardson 						&dev_info->dev[i]);
120099a2dd95SBruce Richardson 	}
120199a2dd95SBruce Richardson 
120299a2dd95SBruce Richardson 	if (use_service)
120399a2dd95SBruce Richardson 		rte_service_runstate_set(adapter->service_id, start);
120499a2dd95SBruce Richardson 
120599a2dd95SBruce Richardson 	return 0;
120699a2dd95SBruce Richardson }
120799a2dd95SBruce Richardson 
120899a2dd95SBruce Richardson int
120999a2dd95SBruce Richardson rte_event_crypto_adapter_start(uint8_t id)
121099a2dd95SBruce Richardson {
1211a256a743SPavan Nikhilesh 	struct event_crypto_adapter *adapter;
121299a2dd95SBruce Richardson 
121399a2dd95SBruce Richardson 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
121499a2dd95SBruce Richardson 	adapter = eca_id_to_adapter(id);
121599a2dd95SBruce Richardson 	if (adapter == NULL)
121699a2dd95SBruce Richardson 		return -EINVAL;
121799a2dd95SBruce Richardson 
121899a2dd95SBruce Richardson 	rte_eventdev_trace_crypto_adapter_start(id, adapter);
121999a2dd95SBruce Richardson 	return eca_adapter_ctrl(id, 1);
122099a2dd95SBruce Richardson }
122199a2dd95SBruce Richardson 
122299a2dd95SBruce Richardson int
122399a2dd95SBruce Richardson rte_event_crypto_adapter_stop(uint8_t id)
122499a2dd95SBruce Richardson {
122599a2dd95SBruce Richardson 	rte_eventdev_trace_crypto_adapter_stop(id);
122699a2dd95SBruce Richardson 	return eca_adapter_ctrl(id, 0);
122799a2dd95SBruce Richardson }
122899a2dd95SBruce Richardson 
122999a2dd95SBruce Richardson int
123099a2dd95SBruce Richardson rte_event_crypto_adapter_stats_get(uint8_t id,
123199a2dd95SBruce Richardson 				struct rte_event_crypto_adapter_stats *stats)
123299a2dd95SBruce Richardson {
1233a256a743SPavan Nikhilesh 	struct event_crypto_adapter *adapter;
123499a2dd95SBruce Richardson 	struct rte_event_crypto_adapter_stats dev_stats_sum = { 0 };
123599a2dd95SBruce Richardson 	struct rte_event_crypto_adapter_stats dev_stats;
123699a2dd95SBruce Richardson 	struct rte_eventdev *dev;
123799a2dd95SBruce Richardson 	struct crypto_device_info *dev_info;
123899a2dd95SBruce Richardson 	uint32_t i;
123999a2dd95SBruce Richardson 	int ret;
124099a2dd95SBruce Richardson 
124199a2dd95SBruce Richardson 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
124299a2dd95SBruce Richardson 
124399a2dd95SBruce Richardson 	adapter = eca_id_to_adapter(id);
124499a2dd95SBruce Richardson 	if (adapter == NULL || stats == NULL)
124599a2dd95SBruce Richardson 		return -EINVAL;
124699a2dd95SBruce Richardson 
124799a2dd95SBruce Richardson 	dev = &rte_eventdevs[adapter->eventdev_id];
124899a2dd95SBruce Richardson 	memset(stats, 0, sizeof(*stats));
124999a2dd95SBruce Richardson 	for (i = 0; i < rte_cryptodev_count(); i++) {
125099a2dd95SBruce Richardson 		dev_info = &adapter->cdevs[i];
125199a2dd95SBruce Richardson 		if (dev_info->internal_event_port == 0 ||
125299a2dd95SBruce Richardson 			dev->dev_ops->crypto_adapter_stats_get == NULL)
125399a2dd95SBruce Richardson 			continue;
125499a2dd95SBruce Richardson 		ret = (*dev->dev_ops->crypto_adapter_stats_get)(dev,
125599a2dd95SBruce Richardson 						dev_info->dev,
125699a2dd95SBruce Richardson 						&dev_stats);
125799a2dd95SBruce Richardson 		if (ret)
125899a2dd95SBruce Richardson 			continue;
125999a2dd95SBruce Richardson 
126099a2dd95SBruce Richardson 		dev_stats_sum.crypto_deq_count += dev_stats.crypto_deq_count;
126199a2dd95SBruce Richardson 		dev_stats_sum.event_enq_count +=
126299a2dd95SBruce Richardson 			dev_stats.event_enq_count;
126399a2dd95SBruce Richardson 	}
126499a2dd95SBruce Richardson 
126599a2dd95SBruce Richardson 	if (adapter->service_inited)
126699a2dd95SBruce Richardson 		*stats = adapter->crypto_stats;
126799a2dd95SBruce Richardson 
126899a2dd95SBruce Richardson 	stats->crypto_deq_count += dev_stats_sum.crypto_deq_count;
126999a2dd95SBruce Richardson 	stats->event_enq_count += dev_stats_sum.event_enq_count;
127099a2dd95SBruce Richardson 
127199a2dd95SBruce Richardson 	return 0;
127299a2dd95SBruce Richardson }
127399a2dd95SBruce Richardson 
127499a2dd95SBruce Richardson int
127599a2dd95SBruce Richardson rte_event_crypto_adapter_stats_reset(uint8_t id)
127699a2dd95SBruce Richardson {
1277a256a743SPavan Nikhilesh 	struct event_crypto_adapter *adapter;
127899a2dd95SBruce Richardson 	struct crypto_device_info *dev_info;
127999a2dd95SBruce Richardson 	struct rte_eventdev *dev;
128099a2dd95SBruce Richardson 	uint32_t i;
128199a2dd95SBruce Richardson 
128299a2dd95SBruce Richardson 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
128399a2dd95SBruce Richardson 
128499a2dd95SBruce Richardson 	adapter = eca_id_to_adapter(id);
128599a2dd95SBruce Richardson 	if (adapter == NULL)
128699a2dd95SBruce Richardson 		return -EINVAL;
128799a2dd95SBruce Richardson 
128899a2dd95SBruce Richardson 	dev = &rte_eventdevs[adapter->eventdev_id];
128999a2dd95SBruce Richardson 	for (i = 0; i < rte_cryptodev_count(); i++) {
129099a2dd95SBruce Richardson 		dev_info = &adapter->cdevs[i];
129199a2dd95SBruce Richardson 		if (dev_info->internal_event_port == 0 ||
129299a2dd95SBruce Richardson 			dev->dev_ops->crypto_adapter_stats_reset == NULL)
129399a2dd95SBruce Richardson 			continue;
129499a2dd95SBruce Richardson 		(*dev->dev_ops->crypto_adapter_stats_reset)(dev,
129599a2dd95SBruce Richardson 						dev_info->dev);
129699a2dd95SBruce Richardson 	}
129799a2dd95SBruce Richardson 
129899a2dd95SBruce Richardson 	memset(&adapter->crypto_stats, 0, sizeof(adapter->crypto_stats));
129999a2dd95SBruce Richardson 	return 0;
130099a2dd95SBruce Richardson }
130199a2dd95SBruce Richardson 
130299a2dd95SBruce Richardson int
130399a2dd95SBruce Richardson rte_event_crypto_adapter_service_id_get(uint8_t id, uint32_t *service_id)
130499a2dd95SBruce Richardson {
1305a256a743SPavan Nikhilesh 	struct event_crypto_adapter *adapter;
130699a2dd95SBruce Richardson 
130799a2dd95SBruce Richardson 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
130899a2dd95SBruce Richardson 
130999a2dd95SBruce Richardson 	adapter = eca_id_to_adapter(id);
131099a2dd95SBruce Richardson 	if (adapter == NULL || service_id == NULL)
131199a2dd95SBruce Richardson 		return -EINVAL;
131299a2dd95SBruce Richardson 
131399a2dd95SBruce Richardson 	if (adapter->service_inited)
131499a2dd95SBruce Richardson 		*service_id = adapter->service_id;
131599a2dd95SBruce Richardson 
131699a2dd95SBruce Richardson 	return adapter->service_inited ? 0 : -ESRCH;
131799a2dd95SBruce Richardson }
131899a2dd95SBruce Richardson 
131999a2dd95SBruce Richardson int
132099a2dd95SBruce Richardson rte_event_crypto_adapter_event_port_get(uint8_t id, uint8_t *event_port_id)
132199a2dd95SBruce Richardson {
1322a256a743SPavan Nikhilesh 	struct event_crypto_adapter *adapter;
132399a2dd95SBruce Richardson 
132499a2dd95SBruce Richardson 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
132599a2dd95SBruce Richardson 
132699a2dd95SBruce Richardson 	adapter = eca_id_to_adapter(id);
132799a2dd95SBruce Richardson 	if (adapter == NULL || event_port_id == NULL)
132899a2dd95SBruce Richardson 		return -EINVAL;
132999a2dd95SBruce Richardson 
133099a2dd95SBruce Richardson 	*event_port_id = adapter->event_port_id;
133199a2dd95SBruce Richardson 
133299a2dd95SBruce Richardson 	return 0;
133399a2dd95SBruce Richardson }
1334c1749bc5SVolodymyr Fialko 
1335c1749bc5SVolodymyr Fialko int
1336c1749bc5SVolodymyr Fialko rte_event_crypto_adapter_vector_limits_get(
1337c1749bc5SVolodymyr Fialko 	uint8_t dev_id, uint16_t cdev_id,
1338c1749bc5SVolodymyr Fialko 	struct rte_event_crypto_adapter_vector_limits *limits)
1339c1749bc5SVolodymyr Fialko {
1340c1749bc5SVolodymyr Fialko 	struct rte_cryptodev *cdev;
1341c1749bc5SVolodymyr Fialko 	struct rte_eventdev *dev;
1342c1749bc5SVolodymyr Fialko 	uint32_t cap;
1343c1749bc5SVolodymyr Fialko 	int ret;
1344c1749bc5SVolodymyr Fialko 
1345c1749bc5SVolodymyr Fialko 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1346c1749bc5SVolodymyr Fialko 
1347c1749bc5SVolodymyr Fialko 	if (!rte_cryptodev_is_valid_dev(cdev_id)) {
1348c1749bc5SVolodymyr Fialko 		RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
1349c1749bc5SVolodymyr Fialko 		return -EINVAL;
1350c1749bc5SVolodymyr Fialko 	}
1351c1749bc5SVolodymyr Fialko 
1352c1749bc5SVolodymyr Fialko 	if (limits == NULL) {
1353c1749bc5SVolodymyr Fialko 		RTE_EDEV_LOG_ERR("Invalid limits storage provided");
1354c1749bc5SVolodymyr Fialko 		return -EINVAL;
1355c1749bc5SVolodymyr Fialko 	}
1356c1749bc5SVolodymyr Fialko 
1357c1749bc5SVolodymyr Fialko 	dev = &rte_eventdevs[dev_id];
1358c1749bc5SVolodymyr Fialko 	cdev = rte_cryptodev_pmd_get_dev(cdev_id);
1359c1749bc5SVolodymyr Fialko 
1360c1749bc5SVolodymyr Fialko 	ret = rte_event_crypto_adapter_caps_get(dev_id, cdev_id, &cap);
1361c1749bc5SVolodymyr Fialko 	if (ret) {
1362c1749bc5SVolodymyr Fialko 		RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
1363c1749bc5SVolodymyr Fialko 				 "cdev %" PRIu16, dev_id, cdev_id);
1364c1749bc5SVolodymyr Fialko 		return ret;
1365c1749bc5SVolodymyr Fialko 	}
1366c1749bc5SVolodymyr Fialko 
1367c1749bc5SVolodymyr Fialko 	if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR)) {
1368c1749bc5SVolodymyr Fialko 		RTE_EDEV_LOG_ERR("Event vectorization is not supported,"
1369c1749bc5SVolodymyr Fialko 				 "dev %" PRIu8 " cdev %" PRIu8, dev_id, cdev_id);
1370c1749bc5SVolodymyr Fialko 		return -ENOTSUP;
1371c1749bc5SVolodymyr Fialko 	}
1372c1749bc5SVolodymyr Fialko 
1373c1749bc5SVolodymyr Fialko 	if ((*dev->dev_ops->crypto_adapter_vector_limits_get) == NULL)
1374c1749bc5SVolodymyr Fialko 		return -ENOTSUP;
1375c1749bc5SVolodymyr Fialko 
1376c1749bc5SVolodymyr Fialko 	return dev->dev_ops->crypto_adapter_vector_limits_get(
1377c1749bc5SVolodymyr Fialko 		dev, cdev, limits);
1378c1749bc5SVolodymyr Fialko }
1379