199a2dd95SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause 299a2dd95SBruce Richardson * Copyright(c) 2018 Intel Corporation. 399a2dd95SBruce Richardson * All rights reserved. 499a2dd95SBruce Richardson */ 599a2dd95SBruce Richardson 699a2dd95SBruce Richardson #include <string.h> 799a2dd95SBruce Richardson #include <stdbool.h> 899a2dd95SBruce Richardson #include <rte_common.h> 999a2dd95SBruce Richardson #include <rte_dev.h> 1099a2dd95SBruce Richardson #include <rte_errno.h> 1199a2dd95SBruce Richardson #include <rte_cryptodev.h> 12af668035SAkhil Goyal #include <cryptodev_pmd.h> 1399a2dd95SBruce Richardson #include <rte_log.h> 1499a2dd95SBruce Richardson #include <rte_malloc.h> 1599a2dd95SBruce Richardson #include <rte_service_component.h> 1699a2dd95SBruce Richardson 1799a2dd95SBruce Richardson #include "rte_eventdev.h" 1899a2dd95SBruce Richardson #include "eventdev_pmd.h" 19f26f2ca6SPavan Nikhilesh #include "eventdev_trace.h" 2099a2dd95SBruce Richardson #include "rte_event_crypto_adapter.h" 2199a2dd95SBruce Richardson 2299a2dd95SBruce Richardson #define BATCH_SIZE 32 2399a2dd95SBruce Richardson #define DEFAULT_MAX_NB 128 2499a2dd95SBruce Richardson #define CRYPTO_ADAPTER_NAME_LEN 32 2599a2dd95SBruce Richardson #define CRYPTO_ADAPTER_MEM_NAME_LEN 32 2699a2dd95SBruce Richardson #define CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES 100 2799a2dd95SBruce Richardson 28*2ae84b39SGanapati Kundapura #define CRYPTO_ADAPTER_OPS_BUFFER_SZ (BATCH_SIZE + BATCH_SIZE) 29*2ae84b39SGanapati Kundapura #define CRYPTO_ADAPTER_BUFFER_SZ 1024 30*2ae84b39SGanapati Kundapura 3199a2dd95SBruce Richardson /* Flush an instance's enqueue buffers every CRYPTO_ENQ_FLUSH_THRESHOLD 3299a2dd95SBruce Richardson * iterations of eca_crypto_adapter_enq_run() 3399a2dd95SBruce Richardson */ 3499a2dd95SBruce Richardson #define CRYPTO_ENQ_FLUSH_THRESHOLD 1024 3599a2dd95SBruce Richardson 36*2ae84b39SGanapati Kundapura struct crypto_ops_circular_buffer { 37*2ae84b39SGanapati Kundapura /* index of head element in circular buffer */ 38*2ae84b39SGanapati Kundapura uint16_t head; 39*2ae84b39SGanapati Kundapura /* index of tail element in circular buffer */ 40*2ae84b39SGanapati Kundapura uint16_t tail; 41*2ae84b39SGanapati Kundapura /* number of elements in buffer */ 42*2ae84b39SGanapati Kundapura uint16_t count; 43*2ae84b39SGanapati Kundapura /* size of circular buffer */ 44*2ae84b39SGanapati Kundapura uint16_t size; 45*2ae84b39SGanapati Kundapura /* Pointer to hold rte_crypto_ops for batching */ 46*2ae84b39SGanapati Kundapura struct rte_crypto_op **op_buffer; 47*2ae84b39SGanapati Kundapura } __rte_cache_aligned; 48*2ae84b39SGanapati Kundapura 49a256a743SPavan Nikhilesh struct event_crypto_adapter { 5099a2dd95SBruce Richardson /* Event device identifier */ 5199a2dd95SBruce Richardson uint8_t eventdev_id; 5299a2dd95SBruce Richardson /* Event port identifier */ 5399a2dd95SBruce Richardson uint8_t event_port_id; 5499a2dd95SBruce Richardson /* Store event device's implicit release capability */ 5599a2dd95SBruce Richardson uint8_t implicit_release_disabled; 56*2ae84b39SGanapati Kundapura /* Flag to indicate backpressure at cryptodev 57*2ae84b39SGanapati Kundapura * Stop further dequeuing events from eventdev 58*2ae84b39SGanapati Kundapura */ 59*2ae84b39SGanapati Kundapura bool stop_enq_to_cryptodev; 6099a2dd95SBruce Richardson /* Max crypto ops processed in any service function invocation */ 6199a2dd95SBruce Richardson uint32_t max_nb; 6299a2dd95SBruce Richardson /* Lock to serialize config updates with service function */ 6399a2dd95SBruce Richardson rte_spinlock_t lock; 6499a2dd95SBruce Richardson /* Next crypto device to be processed */ 6599a2dd95SBruce Richardson uint16_t next_cdev_id; 6699a2dd95SBruce Richardson /* Per crypto device structure */ 6799a2dd95SBruce Richardson struct crypto_device_info *cdevs; 6899a2dd95SBruce Richardson /* Loop counter to flush crypto ops */ 6999a2dd95SBruce Richardson uint16_t transmit_loop_count; 70*2ae84b39SGanapati Kundapura /* Circular buffer for batching crypto ops to eventdev */ 71*2ae84b39SGanapati Kundapura struct crypto_ops_circular_buffer ebuf; 7299a2dd95SBruce Richardson /* Per instance stats structure */ 7399a2dd95SBruce Richardson struct rte_event_crypto_adapter_stats crypto_stats; 7499a2dd95SBruce Richardson /* Configuration callback for rte_service configuration */ 7599a2dd95SBruce Richardson rte_event_crypto_adapter_conf_cb conf_cb; 7699a2dd95SBruce Richardson /* Configuration callback argument */ 7799a2dd95SBruce Richardson void *conf_arg; 7899a2dd95SBruce Richardson /* Set if default_cb is being used */ 7999a2dd95SBruce Richardson int default_cb_arg; 8099a2dd95SBruce Richardson /* Service initialization state */ 8199a2dd95SBruce Richardson uint8_t service_inited; 8299a2dd95SBruce Richardson /* Memory allocation name */ 8399a2dd95SBruce Richardson char mem_name[CRYPTO_ADAPTER_MEM_NAME_LEN]; 8499a2dd95SBruce Richardson /* Socket identifier cached from eventdev */ 8599a2dd95SBruce Richardson int socket_id; 8699a2dd95SBruce Richardson /* Per adapter EAL service */ 8799a2dd95SBruce Richardson uint32_t service_id; 8899a2dd95SBruce Richardson /* No. of queue pairs configured */ 8999a2dd95SBruce Richardson uint16_t nb_qps; 9099a2dd95SBruce Richardson /* Adapter mode */ 9199a2dd95SBruce Richardson enum rte_event_crypto_adapter_mode mode; 9299a2dd95SBruce Richardson } __rte_cache_aligned; 9399a2dd95SBruce Richardson 9499a2dd95SBruce Richardson /* Per crypto device information */ 9599a2dd95SBruce Richardson struct crypto_device_info { 9699a2dd95SBruce Richardson /* Pointer to cryptodev */ 9799a2dd95SBruce Richardson struct rte_cryptodev *dev; 9899a2dd95SBruce Richardson /* Pointer to queue pair info */ 9999a2dd95SBruce Richardson struct crypto_queue_pair_info *qpairs; 10099a2dd95SBruce Richardson /* Next queue pair to be processed */ 10199a2dd95SBruce Richardson uint16_t next_queue_pair_id; 10299a2dd95SBruce Richardson /* Set to indicate cryptodev->eventdev packet 10399a2dd95SBruce Richardson * transfer uses a hardware mechanism 10499a2dd95SBruce Richardson */ 10599a2dd95SBruce Richardson uint8_t internal_event_port; 10699a2dd95SBruce Richardson /* Set to indicate processing has been started */ 10799a2dd95SBruce Richardson uint8_t dev_started; 10899a2dd95SBruce Richardson /* If num_qpairs > 0, the start callback will 10999a2dd95SBruce Richardson * be invoked if not already invoked 11099a2dd95SBruce Richardson */ 11199a2dd95SBruce Richardson uint16_t num_qpairs; 11299a2dd95SBruce Richardson } __rte_cache_aligned; 11399a2dd95SBruce Richardson 11499a2dd95SBruce Richardson /* Per queue pair information */ 11599a2dd95SBruce Richardson struct crypto_queue_pair_info { 11699a2dd95SBruce Richardson /* Set to indicate queue pair is enabled */ 11799a2dd95SBruce Richardson bool qp_enabled; 118*2ae84b39SGanapati Kundapura /* Circular buffer for batching crypto ops to cdev */ 119*2ae84b39SGanapati Kundapura struct crypto_ops_circular_buffer cbuf; 12099a2dd95SBruce Richardson } __rte_cache_aligned; 12199a2dd95SBruce Richardson 122a256a743SPavan Nikhilesh static struct event_crypto_adapter **event_crypto_adapter; 12399a2dd95SBruce Richardson 12499a2dd95SBruce Richardson /* Macros to check for valid adapter */ 12599a2dd95SBruce Richardson #define EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \ 12699a2dd95SBruce Richardson if (!eca_valid_id(id)) { \ 12799a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("Invalid crypto adapter id = %d\n", id); \ 12899a2dd95SBruce Richardson return retval; \ 12999a2dd95SBruce Richardson } \ 13099a2dd95SBruce Richardson } while (0) 13199a2dd95SBruce Richardson 13299a2dd95SBruce Richardson static inline int 13399a2dd95SBruce Richardson eca_valid_id(uint8_t id) 13499a2dd95SBruce Richardson { 13599a2dd95SBruce Richardson return id < RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE; 13699a2dd95SBruce Richardson } 13799a2dd95SBruce Richardson 13899a2dd95SBruce Richardson static int 13999a2dd95SBruce Richardson eca_init(void) 14099a2dd95SBruce Richardson { 14199a2dd95SBruce Richardson const char *name = "crypto_adapter_array"; 14299a2dd95SBruce Richardson const struct rte_memzone *mz; 14399a2dd95SBruce Richardson unsigned int sz; 14499a2dd95SBruce Richardson 14599a2dd95SBruce Richardson sz = sizeof(*event_crypto_adapter) * 14699a2dd95SBruce Richardson RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE; 14799a2dd95SBruce Richardson sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE); 14899a2dd95SBruce Richardson 14999a2dd95SBruce Richardson mz = rte_memzone_lookup(name); 15099a2dd95SBruce Richardson if (mz == NULL) { 15199a2dd95SBruce Richardson mz = rte_memzone_reserve_aligned(name, sz, rte_socket_id(), 0, 15299a2dd95SBruce Richardson RTE_CACHE_LINE_SIZE); 15399a2dd95SBruce Richardson if (mz == NULL) { 15499a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("failed to reserve memzone err = %" 15599a2dd95SBruce Richardson PRId32, rte_errno); 15699a2dd95SBruce Richardson return -rte_errno; 15799a2dd95SBruce Richardson } 15899a2dd95SBruce Richardson } 15999a2dd95SBruce Richardson 16099a2dd95SBruce Richardson event_crypto_adapter = mz->addr; 16199a2dd95SBruce Richardson return 0; 16299a2dd95SBruce Richardson } 16399a2dd95SBruce Richardson 164*2ae84b39SGanapati Kundapura static inline bool 165*2ae84b39SGanapati Kundapura eca_circular_buffer_batch_ready(struct crypto_ops_circular_buffer *bufp) 166*2ae84b39SGanapati Kundapura { 167*2ae84b39SGanapati Kundapura return bufp->count >= BATCH_SIZE; 168*2ae84b39SGanapati Kundapura } 169*2ae84b39SGanapati Kundapura 170*2ae84b39SGanapati Kundapura static inline bool 171*2ae84b39SGanapati Kundapura eca_circular_buffer_space_for_batch(struct crypto_ops_circular_buffer *bufp) 172*2ae84b39SGanapati Kundapura { 173*2ae84b39SGanapati Kundapura return (bufp->size - bufp->count) >= BATCH_SIZE; 174*2ae84b39SGanapati Kundapura } 175*2ae84b39SGanapati Kundapura 176*2ae84b39SGanapati Kundapura static inline void 177*2ae84b39SGanapati Kundapura eca_circular_buffer_free(struct crypto_ops_circular_buffer *bufp) 178*2ae84b39SGanapati Kundapura { 179*2ae84b39SGanapati Kundapura rte_free(bufp->op_buffer); 180*2ae84b39SGanapati Kundapura } 181*2ae84b39SGanapati Kundapura 182*2ae84b39SGanapati Kundapura static inline int 183*2ae84b39SGanapati Kundapura eca_circular_buffer_init(const char *name, 184*2ae84b39SGanapati Kundapura struct crypto_ops_circular_buffer *bufp, 185*2ae84b39SGanapati Kundapura uint16_t sz) 186*2ae84b39SGanapati Kundapura { 187*2ae84b39SGanapati Kundapura bufp->op_buffer = rte_zmalloc(name, 188*2ae84b39SGanapati Kundapura sizeof(struct rte_crypto_op *) * sz, 189*2ae84b39SGanapati Kundapura 0); 190*2ae84b39SGanapati Kundapura if (bufp->op_buffer == NULL) 191*2ae84b39SGanapati Kundapura return -ENOMEM; 192*2ae84b39SGanapati Kundapura 193*2ae84b39SGanapati Kundapura bufp->size = sz; 194*2ae84b39SGanapati Kundapura return 0; 195*2ae84b39SGanapati Kundapura } 196*2ae84b39SGanapati Kundapura 197*2ae84b39SGanapati Kundapura static inline int 198*2ae84b39SGanapati Kundapura eca_circular_buffer_add(struct crypto_ops_circular_buffer *bufp, 199*2ae84b39SGanapati Kundapura struct rte_crypto_op *op) 200*2ae84b39SGanapati Kundapura { 201*2ae84b39SGanapati Kundapura uint16_t *tailp = &bufp->tail; 202*2ae84b39SGanapati Kundapura 203*2ae84b39SGanapati Kundapura bufp->op_buffer[*tailp] = op; 204*2ae84b39SGanapati Kundapura /* circular buffer, go round */ 205*2ae84b39SGanapati Kundapura *tailp = (*tailp + 1) % bufp->size; 206*2ae84b39SGanapati Kundapura bufp->count++; 207*2ae84b39SGanapati Kundapura 208*2ae84b39SGanapati Kundapura return 0; 209*2ae84b39SGanapati Kundapura } 210*2ae84b39SGanapati Kundapura 211*2ae84b39SGanapati Kundapura static inline int 212*2ae84b39SGanapati Kundapura eca_circular_buffer_flush_to_cdev(struct crypto_ops_circular_buffer *bufp, 213*2ae84b39SGanapati Kundapura uint8_t cdev_id, uint16_t qp_id, 214*2ae84b39SGanapati Kundapura uint16_t *nb_ops_flushed) 215*2ae84b39SGanapati Kundapura { 216*2ae84b39SGanapati Kundapura uint16_t n = 0; 217*2ae84b39SGanapati Kundapura uint16_t *headp = &bufp->head; 218*2ae84b39SGanapati Kundapura uint16_t *tailp = &bufp->tail; 219*2ae84b39SGanapati Kundapura struct rte_crypto_op **ops = bufp->op_buffer; 220*2ae84b39SGanapati Kundapura 221*2ae84b39SGanapati Kundapura if (*tailp > *headp) 222*2ae84b39SGanapati Kundapura n = *tailp - *headp; 223*2ae84b39SGanapati Kundapura else if (*tailp < *headp) 224*2ae84b39SGanapati Kundapura n = bufp->size - *headp; 225*2ae84b39SGanapati Kundapura else { 226*2ae84b39SGanapati Kundapura *nb_ops_flushed = 0; 227*2ae84b39SGanapati Kundapura return 0; /* buffer empty */ 228*2ae84b39SGanapati Kundapura } 229*2ae84b39SGanapati Kundapura 230*2ae84b39SGanapati Kundapura *nb_ops_flushed = rte_cryptodev_enqueue_burst(cdev_id, qp_id, 231*2ae84b39SGanapati Kundapura &ops[*headp], n); 232*2ae84b39SGanapati Kundapura bufp->count -= *nb_ops_flushed; 233*2ae84b39SGanapati Kundapura if (!bufp->count) { 234*2ae84b39SGanapati Kundapura *headp = 0; 235*2ae84b39SGanapati Kundapura *tailp = 0; 236*2ae84b39SGanapati Kundapura } else 237*2ae84b39SGanapati Kundapura *headp = (*headp + *nb_ops_flushed) % bufp->size; 238*2ae84b39SGanapati Kundapura 239*2ae84b39SGanapati Kundapura return *nb_ops_flushed == n ? 0 : -1; 240*2ae84b39SGanapati Kundapura } 241*2ae84b39SGanapati Kundapura 242a256a743SPavan Nikhilesh static inline struct event_crypto_adapter * 24399a2dd95SBruce Richardson eca_id_to_adapter(uint8_t id) 24499a2dd95SBruce Richardson { 24599a2dd95SBruce Richardson return event_crypto_adapter ? 24699a2dd95SBruce Richardson event_crypto_adapter[id] : NULL; 24799a2dd95SBruce Richardson } 24899a2dd95SBruce Richardson 24999a2dd95SBruce Richardson static int 25099a2dd95SBruce Richardson eca_default_config_cb(uint8_t id, uint8_t dev_id, 25199a2dd95SBruce Richardson struct rte_event_crypto_adapter_conf *conf, void *arg) 25299a2dd95SBruce Richardson { 25399a2dd95SBruce Richardson struct rte_event_dev_config dev_conf; 25499a2dd95SBruce Richardson struct rte_eventdev *dev; 25599a2dd95SBruce Richardson uint8_t port_id; 25699a2dd95SBruce Richardson int started; 25799a2dd95SBruce Richardson int ret; 25899a2dd95SBruce Richardson struct rte_event_port_conf *port_conf = arg; 259a256a743SPavan Nikhilesh struct event_crypto_adapter *adapter = eca_id_to_adapter(id); 26099a2dd95SBruce Richardson 26199a2dd95SBruce Richardson if (adapter == NULL) 26299a2dd95SBruce Richardson return -EINVAL; 26399a2dd95SBruce Richardson 26499a2dd95SBruce Richardson dev = &rte_eventdevs[adapter->eventdev_id]; 26599a2dd95SBruce Richardson dev_conf = dev->data->dev_conf; 26699a2dd95SBruce Richardson 26799a2dd95SBruce Richardson started = dev->data->dev_started; 26899a2dd95SBruce Richardson if (started) 26999a2dd95SBruce Richardson rte_event_dev_stop(dev_id); 27099a2dd95SBruce Richardson port_id = dev_conf.nb_event_ports; 27199a2dd95SBruce Richardson dev_conf.nb_event_ports += 1; 27299a2dd95SBruce Richardson ret = rte_event_dev_configure(dev_id, &dev_conf); 27399a2dd95SBruce Richardson if (ret) { 27499a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("failed to configure event dev %u\n", dev_id); 27599a2dd95SBruce Richardson if (started) { 27699a2dd95SBruce Richardson if (rte_event_dev_start(dev_id)) 27799a2dd95SBruce Richardson return -EIO; 27899a2dd95SBruce Richardson } 27999a2dd95SBruce Richardson return ret; 28099a2dd95SBruce Richardson } 28199a2dd95SBruce Richardson 28299a2dd95SBruce Richardson ret = rte_event_port_setup(dev_id, port_id, port_conf); 28399a2dd95SBruce Richardson if (ret) { 28499a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("failed to setup event port %u\n", port_id); 28599a2dd95SBruce Richardson return ret; 28699a2dd95SBruce Richardson } 28799a2dd95SBruce Richardson 28899a2dd95SBruce Richardson conf->event_port_id = port_id; 28999a2dd95SBruce Richardson conf->max_nb = DEFAULT_MAX_NB; 29099a2dd95SBruce Richardson if (started) 29199a2dd95SBruce Richardson ret = rte_event_dev_start(dev_id); 29299a2dd95SBruce Richardson 29399a2dd95SBruce Richardson adapter->default_cb_arg = 1; 29499a2dd95SBruce Richardson return ret; 29599a2dd95SBruce Richardson } 29699a2dd95SBruce Richardson 29799a2dd95SBruce Richardson int 29899a2dd95SBruce Richardson rte_event_crypto_adapter_create_ext(uint8_t id, uint8_t dev_id, 29999a2dd95SBruce Richardson rte_event_crypto_adapter_conf_cb conf_cb, 30099a2dd95SBruce Richardson enum rte_event_crypto_adapter_mode mode, 30199a2dd95SBruce Richardson void *conf_arg) 30299a2dd95SBruce Richardson { 303a256a743SPavan Nikhilesh struct event_crypto_adapter *adapter; 30499a2dd95SBruce Richardson char mem_name[CRYPTO_ADAPTER_NAME_LEN]; 30599a2dd95SBruce Richardson struct rte_event_dev_info dev_info; 30699a2dd95SBruce Richardson int socket_id; 30799a2dd95SBruce Richardson uint8_t i; 30899a2dd95SBruce Richardson int ret; 30999a2dd95SBruce Richardson 31099a2dd95SBruce Richardson EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); 31199a2dd95SBruce Richardson RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 31299a2dd95SBruce Richardson if (conf_cb == NULL) 31399a2dd95SBruce Richardson return -EINVAL; 31499a2dd95SBruce Richardson 31599a2dd95SBruce Richardson if (event_crypto_adapter == NULL) { 31699a2dd95SBruce Richardson ret = eca_init(); 31799a2dd95SBruce Richardson if (ret) 31899a2dd95SBruce Richardson return ret; 31999a2dd95SBruce Richardson } 32099a2dd95SBruce Richardson 32199a2dd95SBruce Richardson adapter = eca_id_to_adapter(id); 32299a2dd95SBruce Richardson if (adapter != NULL) { 32399a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("Crypto adapter id %u already exists!", id); 32499a2dd95SBruce Richardson return -EEXIST; 32599a2dd95SBruce Richardson } 32699a2dd95SBruce Richardson 32799a2dd95SBruce Richardson socket_id = rte_event_dev_socket_id(dev_id); 32899a2dd95SBruce Richardson snprintf(mem_name, CRYPTO_ADAPTER_MEM_NAME_LEN, 32999a2dd95SBruce Richardson "rte_event_crypto_adapter_%d", id); 33099a2dd95SBruce Richardson 33199a2dd95SBruce Richardson adapter = rte_zmalloc_socket(mem_name, sizeof(*adapter), 33299a2dd95SBruce Richardson RTE_CACHE_LINE_SIZE, socket_id); 33399a2dd95SBruce Richardson if (adapter == NULL) { 33499a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("Failed to get mem for event crypto adapter!"); 33599a2dd95SBruce Richardson return -ENOMEM; 33699a2dd95SBruce Richardson } 33799a2dd95SBruce Richardson 338*2ae84b39SGanapati Kundapura if (eca_circular_buffer_init("eca_edev_circular_buffer", 339*2ae84b39SGanapati Kundapura &adapter->ebuf, 340*2ae84b39SGanapati Kundapura CRYPTO_ADAPTER_BUFFER_SZ)) { 341*2ae84b39SGanapati Kundapura RTE_EDEV_LOG_ERR("Failed to get memory for eventdev buffer"); 342*2ae84b39SGanapati Kundapura rte_free(adapter); 343*2ae84b39SGanapati Kundapura return -ENOMEM; 344*2ae84b39SGanapati Kundapura } 345*2ae84b39SGanapati Kundapura 34699a2dd95SBruce Richardson ret = rte_event_dev_info_get(dev_id, &dev_info); 34799a2dd95SBruce Richardson if (ret < 0) { 34899a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("Failed to get info for eventdev %d: %s!", 34999a2dd95SBruce Richardson dev_id, dev_info.driver_name); 350*2ae84b39SGanapati Kundapura eca_circular_buffer_free(&adapter->ebuf); 35199a2dd95SBruce Richardson rte_free(adapter); 35299a2dd95SBruce Richardson return ret; 35399a2dd95SBruce Richardson } 35499a2dd95SBruce Richardson 35599a2dd95SBruce Richardson adapter->implicit_release_disabled = (dev_info.event_dev_cap & 35699a2dd95SBruce Richardson RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE); 35799a2dd95SBruce Richardson adapter->eventdev_id = dev_id; 35899a2dd95SBruce Richardson adapter->socket_id = socket_id; 35999a2dd95SBruce Richardson adapter->conf_cb = conf_cb; 36099a2dd95SBruce Richardson adapter->conf_arg = conf_arg; 36199a2dd95SBruce Richardson adapter->mode = mode; 36299a2dd95SBruce Richardson strcpy(adapter->mem_name, mem_name); 36399a2dd95SBruce Richardson adapter->cdevs = rte_zmalloc_socket(adapter->mem_name, 36499a2dd95SBruce Richardson rte_cryptodev_count() * 36599a2dd95SBruce Richardson sizeof(struct crypto_device_info), 0, 36699a2dd95SBruce Richardson socket_id); 36799a2dd95SBruce Richardson if (adapter->cdevs == NULL) { 36899a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("Failed to get mem for crypto devices\n"); 369*2ae84b39SGanapati Kundapura eca_circular_buffer_free(&adapter->ebuf); 37099a2dd95SBruce Richardson rte_free(adapter); 37199a2dd95SBruce Richardson return -ENOMEM; 37299a2dd95SBruce Richardson } 37399a2dd95SBruce Richardson 37499a2dd95SBruce Richardson rte_spinlock_init(&adapter->lock); 37599a2dd95SBruce Richardson for (i = 0; i < rte_cryptodev_count(); i++) 37699a2dd95SBruce Richardson adapter->cdevs[i].dev = rte_cryptodev_pmd_get_dev(i); 37799a2dd95SBruce Richardson 37899a2dd95SBruce Richardson event_crypto_adapter[id] = adapter; 37999a2dd95SBruce Richardson 38099a2dd95SBruce Richardson rte_eventdev_trace_crypto_adapter_create(id, dev_id, adapter, conf_arg, 38199a2dd95SBruce Richardson mode); 38299a2dd95SBruce Richardson return 0; 38399a2dd95SBruce Richardson } 38499a2dd95SBruce Richardson 38599a2dd95SBruce Richardson 38699a2dd95SBruce Richardson int 38799a2dd95SBruce Richardson rte_event_crypto_adapter_create(uint8_t id, uint8_t dev_id, 38899a2dd95SBruce Richardson struct rte_event_port_conf *port_config, 38999a2dd95SBruce Richardson enum rte_event_crypto_adapter_mode mode) 39099a2dd95SBruce Richardson { 39199a2dd95SBruce Richardson struct rte_event_port_conf *pc; 39299a2dd95SBruce Richardson int ret; 39399a2dd95SBruce Richardson 39499a2dd95SBruce Richardson if (port_config == NULL) 39599a2dd95SBruce Richardson return -EINVAL; 39699a2dd95SBruce Richardson EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); 39799a2dd95SBruce Richardson 39899a2dd95SBruce Richardson pc = rte_malloc(NULL, sizeof(*pc), 0); 39999a2dd95SBruce Richardson if (pc == NULL) 40099a2dd95SBruce Richardson return -ENOMEM; 40199a2dd95SBruce Richardson *pc = *port_config; 40299a2dd95SBruce Richardson ret = rte_event_crypto_adapter_create_ext(id, dev_id, 40399a2dd95SBruce Richardson eca_default_config_cb, 40499a2dd95SBruce Richardson mode, 40599a2dd95SBruce Richardson pc); 40699a2dd95SBruce Richardson if (ret) 40799a2dd95SBruce Richardson rte_free(pc); 40899a2dd95SBruce Richardson 40999a2dd95SBruce Richardson return ret; 41099a2dd95SBruce Richardson } 41199a2dd95SBruce Richardson 41299a2dd95SBruce Richardson int 41399a2dd95SBruce Richardson rte_event_crypto_adapter_free(uint8_t id) 41499a2dd95SBruce Richardson { 415a256a743SPavan Nikhilesh struct event_crypto_adapter *adapter; 41699a2dd95SBruce Richardson 41799a2dd95SBruce Richardson EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); 41899a2dd95SBruce Richardson 41999a2dd95SBruce Richardson adapter = eca_id_to_adapter(id); 42099a2dd95SBruce Richardson if (adapter == NULL) 42199a2dd95SBruce Richardson return -EINVAL; 42299a2dd95SBruce Richardson 42399a2dd95SBruce Richardson if (adapter->nb_qps) { 42499a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("%" PRIu16 "Queue pairs not deleted", 42599a2dd95SBruce Richardson adapter->nb_qps); 42699a2dd95SBruce Richardson return -EBUSY; 42799a2dd95SBruce Richardson } 42899a2dd95SBruce Richardson 42999a2dd95SBruce Richardson rte_eventdev_trace_crypto_adapter_free(id, adapter); 43099a2dd95SBruce Richardson if (adapter->default_cb_arg) 43199a2dd95SBruce Richardson rte_free(adapter->conf_arg); 43299a2dd95SBruce Richardson rte_free(adapter->cdevs); 43399a2dd95SBruce Richardson rte_free(adapter); 43499a2dd95SBruce Richardson event_crypto_adapter[id] = NULL; 43599a2dd95SBruce Richardson 43699a2dd95SBruce Richardson return 0; 43799a2dd95SBruce Richardson } 43899a2dd95SBruce Richardson 43999a2dd95SBruce Richardson static inline unsigned int 440a256a743SPavan Nikhilesh eca_enq_to_cryptodev(struct event_crypto_adapter *adapter, struct rte_event *ev, 441a256a743SPavan Nikhilesh unsigned int cnt) 44299a2dd95SBruce Richardson { 44399a2dd95SBruce Richardson struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats; 44499a2dd95SBruce Richardson union rte_event_crypto_metadata *m_data = NULL; 44599a2dd95SBruce Richardson struct crypto_queue_pair_info *qp_info = NULL; 44699a2dd95SBruce Richardson struct rte_crypto_op *crypto_op; 44799a2dd95SBruce Richardson unsigned int i, n; 448*2ae84b39SGanapati Kundapura uint16_t qp_id, nb_enqueued = 0; 44999a2dd95SBruce Richardson uint8_t cdev_id; 450*2ae84b39SGanapati Kundapura int ret; 45199a2dd95SBruce Richardson 45299a2dd95SBruce Richardson ret = 0; 45399a2dd95SBruce Richardson n = 0; 45499a2dd95SBruce Richardson stats->event_deq_count += cnt; 45599a2dd95SBruce Richardson 45699a2dd95SBruce Richardson for (i = 0; i < cnt; i++) { 45799a2dd95SBruce Richardson crypto_op = ev[i].event_ptr; 45899a2dd95SBruce Richardson if (crypto_op == NULL) 45999a2dd95SBruce Richardson continue; 46099a2dd95SBruce Richardson if (crypto_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { 46199a2dd95SBruce Richardson m_data = rte_cryptodev_sym_session_get_user_data( 46299a2dd95SBruce Richardson crypto_op->sym->session); 46399a2dd95SBruce Richardson if (m_data == NULL) { 46499a2dd95SBruce Richardson rte_pktmbuf_free(crypto_op->sym->m_src); 46599a2dd95SBruce Richardson rte_crypto_op_free(crypto_op); 46699a2dd95SBruce Richardson continue; 46799a2dd95SBruce Richardson } 46899a2dd95SBruce Richardson 46999a2dd95SBruce Richardson cdev_id = m_data->request_info.cdev_id; 47099a2dd95SBruce Richardson qp_id = m_data->request_info.queue_pair_id; 47199a2dd95SBruce Richardson qp_info = &adapter->cdevs[cdev_id].qpairs[qp_id]; 47299a2dd95SBruce Richardson if (!qp_info->qp_enabled) { 47399a2dd95SBruce Richardson rte_pktmbuf_free(crypto_op->sym->m_src); 47499a2dd95SBruce Richardson rte_crypto_op_free(crypto_op); 47599a2dd95SBruce Richardson continue; 47699a2dd95SBruce Richardson } 477*2ae84b39SGanapati Kundapura eca_circular_buffer_add(&qp_info->cbuf, crypto_op); 47899a2dd95SBruce Richardson } else if (crypto_op->sess_type == RTE_CRYPTO_OP_SESSIONLESS && 47999a2dd95SBruce Richardson crypto_op->private_data_offset) { 48099a2dd95SBruce Richardson m_data = (union rte_event_crypto_metadata *) 48199a2dd95SBruce Richardson ((uint8_t *)crypto_op + 48299a2dd95SBruce Richardson crypto_op->private_data_offset); 48399a2dd95SBruce Richardson cdev_id = m_data->request_info.cdev_id; 48499a2dd95SBruce Richardson qp_id = m_data->request_info.queue_pair_id; 48599a2dd95SBruce Richardson qp_info = &adapter->cdevs[cdev_id].qpairs[qp_id]; 48699a2dd95SBruce Richardson if (!qp_info->qp_enabled) { 48799a2dd95SBruce Richardson rte_pktmbuf_free(crypto_op->sym->m_src); 48899a2dd95SBruce Richardson rte_crypto_op_free(crypto_op); 48999a2dd95SBruce Richardson continue; 49099a2dd95SBruce Richardson } 491*2ae84b39SGanapati Kundapura eca_circular_buffer_add(&qp_info->cbuf, crypto_op); 49299a2dd95SBruce Richardson } else { 49399a2dd95SBruce Richardson rte_pktmbuf_free(crypto_op->sym->m_src); 49499a2dd95SBruce Richardson rte_crypto_op_free(crypto_op); 49599a2dd95SBruce Richardson continue; 49699a2dd95SBruce Richardson } 49799a2dd95SBruce Richardson 498*2ae84b39SGanapati Kundapura if (eca_circular_buffer_batch_ready(&qp_info->cbuf)) { 499*2ae84b39SGanapati Kundapura ret = eca_circular_buffer_flush_to_cdev(&qp_info->cbuf, 500*2ae84b39SGanapati Kundapura cdev_id, 50199a2dd95SBruce Richardson qp_id, 502*2ae84b39SGanapati Kundapura &nb_enqueued); 503*2ae84b39SGanapati Kundapura /** 504*2ae84b39SGanapati Kundapura * If some crypto ops failed to flush to cdev and 505*2ae84b39SGanapati Kundapura * space for another batch is not available, stop 506*2ae84b39SGanapati Kundapura * dequeue from eventdev momentarily 507*2ae84b39SGanapati Kundapura */ 508*2ae84b39SGanapati Kundapura if (unlikely(ret < 0 && 509*2ae84b39SGanapati Kundapura !eca_circular_buffer_space_for_batch( 510*2ae84b39SGanapati Kundapura &qp_info->cbuf))) 511*2ae84b39SGanapati Kundapura adapter->stop_enq_to_cryptodev = true; 51299a2dd95SBruce Richardson } 51399a2dd95SBruce Richardson 514*2ae84b39SGanapati Kundapura stats->crypto_enq_count += nb_enqueued; 515*2ae84b39SGanapati Kundapura n += nb_enqueued; 51699a2dd95SBruce Richardson } 51799a2dd95SBruce Richardson 51899a2dd95SBruce Richardson return n; 51999a2dd95SBruce Richardson } 52099a2dd95SBruce Richardson 52199a2dd95SBruce Richardson static unsigned int 522*2ae84b39SGanapati Kundapura eca_crypto_cdev_flush(struct event_crypto_adapter *adapter, 523*2ae84b39SGanapati Kundapura uint8_t cdev_id, uint16_t *nb_ops_flushed) 52499a2dd95SBruce Richardson { 52599a2dd95SBruce Richardson struct crypto_device_info *curr_dev; 52699a2dd95SBruce Richardson struct crypto_queue_pair_info *curr_queue; 52799a2dd95SBruce Richardson struct rte_cryptodev *dev; 528*2ae84b39SGanapati Kundapura uint16_t nb = 0, nb_enqueued = 0; 52999a2dd95SBruce Richardson uint16_t qp; 53099a2dd95SBruce Richardson 53199a2dd95SBruce Richardson curr_dev = &adapter->cdevs[cdev_id]; 532*2ae84b39SGanapati Kundapura if (unlikely(curr_dev == NULL)) 533*2ae84b39SGanapati Kundapura return 0; 534*2ae84b39SGanapati Kundapura 535*2ae84b39SGanapati Kundapura dev = rte_cryptodev_pmd_get_dev(cdev_id); 53699a2dd95SBruce Richardson for (qp = 0; qp < dev->data->nb_queue_pairs; qp++) { 53799a2dd95SBruce Richardson 53899a2dd95SBruce Richardson curr_queue = &curr_dev->qpairs[qp]; 539*2ae84b39SGanapati Kundapura if (unlikely(curr_queue == NULL || !curr_queue->qp_enabled)) 54099a2dd95SBruce Richardson continue; 54199a2dd95SBruce Richardson 542*2ae84b39SGanapati Kundapura eca_circular_buffer_flush_to_cdev(&curr_queue->cbuf, 543*2ae84b39SGanapati Kundapura cdev_id, 54499a2dd95SBruce Richardson qp, 545*2ae84b39SGanapati Kundapura &nb_enqueued); 546*2ae84b39SGanapati Kundapura *nb_ops_flushed += curr_queue->cbuf.count; 547*2ae84b39SGanapati Kundapura nb += nb_enqueued; 54899a2dd95SBruce Richardson } 54999a2dd95SBruce Richardson 550*2ae84b39SGanapati Kundapura return nb; 551*2ae84b39SGanapati Kundapura } 552*2ae84b39SGanapati Kundapura 553*2ae84b39SGanapati Kundapura static unsigned int 554*2ae84b39SGanapati Kundapura eca_crypto_enq_flush(struct event_crypto_adapter *adapter) 555*2ae84b39SGanapati Kundapura { 556*2ae84b39SGanapati Kundapura struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats; 557*2ae84b39SGanapati Kundapura uint8_t cdev_id; 558*2ae84b39SGanapati Kundapura uint16_t nb_enqueued = 0; 559*2ae84b39SGanapati Kundapura uint16_t nb_ops_flushed = 0; 560*2ae84b39SGanapati Kundapura uint16_t num_cdev = rte_cryptodev_count(); 561*2ae84b39SGanapati Kundapura 562*2ae84b39SGanapati Kundapura for (cdev_id = 0; cdev_id < num_cdev; cdev_id++) 563*2ae84b39SGanapati Kundapura nb_enqueued += eca_crypto_cdev_flush(adapter, 564*2ae84b39SGanapati Kundapura cdev_id, 565*2ae84b39SGanapati Kundapura &nb_ops_flushed); 566*2ae84b39SGanapati Kundapura /** 567*2ae84b39SGanapati Kundapura * Enable dequeue from eventdev if all ops from circular 568*2ae84b39SGanapati Kundapura * buffer flushed to cdev 569*2ae84b39SGanapati Kundapura */ 570*2ae84b39SGanapati Kundapura if (!nb_ops_flushed) 571*2ae84b39SGanapati Kundapura adapter->stop_enq_to_cryptodev = false; 572*2ae84b39SGanapati Kundapura 573*2ae84b39SGanapati Kundapura stats->crypto_enq_count += nb_enqueued; 574*2ae84b39SGanapati Kundapura 575*2ae84b39SGanapati Kundapura return nb_enqueued; 57699a2dd95SBruce Richardson } 57799a2dd95SBruce Richardson 57899a2dd95SBruce Richardson static int 579a256a743SPavan Nikhilesh eca_crypto_adapter_enq_run(struct event_crypto_adapter *adapter, 58099a2dd95SBruce Richardson unsigned int max_enq) 58199a2dd95SBruce Richardson { 58299a2dd95SBruce Richardson struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats; 58399a2dd95SBruce Richardson struct rte_event ev[BATCH_SIZE]; 58499a2dd95SBruce Richardson unsigned int nb_enq, nb_enqueued; 58599a2dd95SBruce Richardson uint16_t n; 58699a2dd95SBruce Richardson uint8_t event_dev_id = adapter->eventdev_id; 58799a2dd95SBruce Richardson uint8_t event_port_id = adapter->event_port_id; 58899a2dd95SBruce Richardson 58999a2dd95SBruce Richardson nb_enqueued = 0; 59099a2dd95SBruce Richardson if (adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW) 59199a2dd95SBruce Richardson return 0; 59299a2dd95SBruce Richardson 593*2ae84b39SGanapati Kundapura if (unlikely(adapter->stop_enq_to_cryptodev)) { 594*2ae84b39SGanapati Kundapura nb_enqueued += eca_crypto_enq_flush(adapter); 595*2ae84b39SGanapati Kundapura 596*2ae84b39SGanapati Kundapura if (unlikely(adapter->stop_enq_to_cryptodev)) 597*2ae84b39SGanapati Kundapura goto skip_event_dequeue_burst; 598*2ae84b39SGanapati Kundapura } 599*2ae84b39SGanapati Kundapura 60099a2dd95SBruce Richardson for (nb_enq = 0; nb_enq < max_enq; nb_enq += n) { 60199a2dd95SBruce Richardson stats->event_poll_count++; 60299a2dd95SBruce Richardson n = rte_event_dequeue_burst(event_dev_id, 60399a2dd95SBruce Richardson event_port_id, ev, BATCH_SIZE, 0); 60499a2dd95SBruce Richardson 60599a2dd95SBruce Richardson if (!n) 60699a2dd95SBruce Richardson break; 60799a2dd95SBruce Richardson 60899a2dd95SBruce Richardson nb_enqueued += eca_enq_to_cryptodev(adapter, ev, n); 60999a2dd95SBruce Richardson } 61099a2dd95SBruce Richardson 611*2ae84b39SGanapati Kundapura skip_event_dequeue_burst: 612*2ae84b39SGanapati Kundapura 61399a2dd95SBruce Richardson if ((++adapter->transmit_loop_count & 61499a2dd95SBruce Richardson (CRYPTO_ENQ_FLUSH_THRESHOLD - 1)) == 0) { 61599a2dd95SBruce Richardson nb_enqueued += eca_crypto_enq_flush(adapter); 61699a2dd95SBruce Richardson } 61799a2dd95SBruce Richardson 61899a2dd95SBruce Richardson return nb_enqueued; 61999a2dd95SBruce Richardson } 62099a2dd95SBruce Richardson 621*2ae84b39SGanapati Kundapura static inline uint16_t 622a256a743SPavan Nikhilesh eca_ops_enqueue_burst(struct event_crypto_adapter *adapter, 62399a2dd95SBruce Richardson struct rte_crypto_op **ops, uint16_t num) 62499a2dd95SBruce Richardson { 62599a2dd95SBruce Richardson struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats; 62699a2dd95SBruce Richardson union rte_event_crypto_metadata *m_data = NULL; 62799a2dd95SBruce Richardson uint8_t event_dev_id = adapter->eventdev_id; 62899a2dd95SBruce Richardson uint8_t event_port_id = adapter->event_port_id; 62999a2dd95SBruce Richardson struct rte_event events[BATCH_SIZE]; 63099a2dd95SBruce Richardson uint16_t nb_enqueued, nb_ev; 63199a2dd95SBruce Richardson uint8_t retry; 63299a2dd95SBruce Richardson uint8_t i; 63399a2dd95SBruce Richardson 63499a2dd95SBruce Richardson nb_ev = 0; 63599a2dd95SBruce Richardson retry = 0; 63699a2dd95SBruce Richardson nb_enqueued = 0; 63799a2dd95SBruce Richardson num = RTE_MIN(num, BATCH_SIZE); 63899a2dd95SBruce Richardson for (i = 0; i < num; i++) { 63999a2dd95SBruce Richardson struct rte_event *ev = &events[nb_ev++]; 640*2ae84b39SGanapati Kundapura 641*2ae84b39SGanapati Kundapura m_data = NULL; 64299a2dd95SBruce Richardson if (ops[i]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { 64399a2dd95SBruce Richardson m_data = rte_cryptodev_sym_session_get_user_data( 64499a2dd95SBruce Richardson ops[i]->sym->session); 64599a2dd95SBruce Richardson } else if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS && 64699a2dd95SBruce Richardson ops[i]->private_data_offset) { 64799a2dd95SBruce Richardson m_data = (union rte_event_crypto_metadata *) 64899a2dd95SBruce Richardson ((uint8_t *)ops[i] + 64999a2dd95SBruce Richardson ops[i]->private_data_offset); 65099a2dd95SBruce Richardson } 65199a2dd95SBruce Richardson 65299a2dd95SBruce Richardson if (unlikely(m_data == NULL)) { 65399a2dd95SBruce Richardson rte_pktmbuf_free(ops[i]->sym->m_src); 65499a2dd95SBruce Richardson rte_crypto_op_free(ops[i]); 65599a2dd95SBruce Richardson continue; 65699a2dd95SBruce Richardson } 65799a2dd95SBruce Richardson 65899a2dd95SBruce Richardson rte_memcpy(ev, &m_data->response_info, sizeof(*ev)); 65999a2dd95SBruce Richardson ev->event_ptr = ops[i]; 66099a2dd95SBruce Richardson ev->event_type = RTE_EVENT_TYPE_CRYPTODEV; 66199a2dd95SBruce Richardson if (adapter->implicit_release_disabled) 66299a2dd95SBruce Richardson ev->op = RTE_EVENT_OP_FORWARD; 66399a2dd95SBruce Richardson else 66499a2dd95SBruce Richardson ev->op = RTE_EVENT_OP_NEW; 66599a2dd95SBruce Richardson } 66699a2dd95SBruce Richardson 66799a2dd95SBruce Richardson do { 66899a2dd95SBruce Richardson nb_enqueued += rte_event_enqueue_burst(event_dev_id, 66999a2dd95SBruce Richardson event_port_id, 67099a2dd95SBruce Richardson &events[nb_enqueued], 67199a2dd95SBruce Richardson nb_ev - nb_enqueued); 672*2ae84b39SGanapati Kundapura 67399a2dd95SBruce Richardson } while (retry++ < CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES && 67499a2dd95SBruce Richardson nb_enqueued < nb_ev); 67599a2dd95SBruce Richardson 67699a2dd95SBruce Richardson stats->event_enq_fail_count += nb_ev - nb_enqueued; 67799a2dd95SBruce Richardson stats->event_enq_count += nb_enqueued; 67899a2dd95SBruce Richardson stats->event_enq_retry_count += retry - 1; 679*2ae84b39SGanapati Kundapura 680*2ae84b39SGanapati Kundapura return nb_enqueued; 68199a2dd95SBruce Richardson } 68299a2dd95SBruce Richardson 683*2ae84b39SGanapati Kundapura static int 684*2ae84b39SGanapati Kundapura eca_circular_buffer_flush_to_evdev(struct event_crypto_adapter *adapter, 685*2ae84b39SGanapati Kundapura struct crypto_ops_circular_buffer *bufp) 686*2ae84b39SGanapati Kundapura { 687*2ae84b39SGanapati Kundapura uint16_t n = 0, nb_ops_flushed; 688*2ae84b39SGanapati Kundapura uint16_t *headp = &bufp->head; 689*2ae84b39SGanapati Kundapura uint16_t *tailp = &bufp->tail; 690*2ae84b39SGanapati Kundapura struct rte_crypto_op **ops = bufp->op_buffer; 691*2ae84b39SGanapati Kundapura 692*2ae84b39SGanapati Kundapura if (*tailp > *headp) 693*2ae84b39SGanapati Kundapura n = *tailp - *headp; 694*2ae84b39SGanapati Kundapura else if (*tailp < *headp) 695*2ae84b39SGanapati Kundapura n = bufp->size - *headp; 696*2ae84b39SGanapati Kundapura else 697*2ae84b39SGanapati Kundapura return 0; /* buffer empty */ 698*2ae84b39SGanapati Kundapura 699*2ae84b39SGanapati Kundapura nb_ops_flushed = eca_ops_enqueue_burst(adapter, ops, n); 700*2ae84b39SGanapati Kundapura bufp->count -= nb_ops_flushed; 701*2ae84b39SGanapati Kundapura if (!bufp->count) { 702*2ae84b39SGanapati Kundapura *headp = 0; 703*2ae84b39SGanapati Kundapura *tailp = 0; 704*2ae84b39SGanapati Kundapura return 0; /* buffer empty */ 705*2ae84b39SGanapati Kundapura } 706*2ae84b39SGanapati Kundapura 707*2ae84b39SGanapati Kundapura *headp = (*headp + nb_ops_flushed) % bufp->size; 708*2ae84b39SGanapati Kundapura return 1; 709*2ae84b39SGanapati Kundapura } 710*2ae84b39SGanapati Kundapura 711*2ae84b39SGanapati Kundapura 712*2ae84b39SGanapati Kundapura static void 713*2ae84b39SGanapati Kundapura eca_ops_buffer_flush(struct event_crypto_adapter *adapter) 714*2ae84b39SGanapati Kundapura { 715*2ae84b39SGanapati Kundapura if (likely(adapter->ebuf.count == 0)) 716*2ae84b39SGanapati Kundapura return; 717*2ae84b39SGanapati Kundapura 718*2ae84b39SGanapati Kundapura while (eca_circular_buffer_flush_to_evdev(adapter, 719*2ae84b39SGanapati Kundapura &adapter->ebuf)) 720*2ae84b39SGanapati Kundapura ; 721*2ae84b39SGanapati Kundapura } 72299a2dd95SBruce Richardson static inline unsigned int 723a256a743SPavan Nikhilesh eca_crypto_adapter_deq_run(struct event_crypto_adapter *adapter, 72499a2dd95SBruce Richardson unsigned int max_deq) 72599a2dd95SBruce Richardson { 72699a2dd95SBruce Richardson struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats; 72799a2dd95SBruce Richardson struct crypto_device_info *curr_dev; 72899a2dd95SBruce Richardson struct crypto_queue_pair_info *curr_queue; 72999a2dd95SBruce Richardson struct rte_crypto_op *ops[BATCH_SIZE]; 730*2ae84b39SGanapati Kundapura uint16_t n, nb_deq, nb_enqueued, i; 73199a2dd95SBruce Richardson struct rte_cryptodev *dev; 73299a2dd95SBruce Richardson uint8_t cdev_id; 73399a2dd95SBruce Richardson uint16_t qp, dev_qps; 73499a2dd95SBruce Richardson bool done; 73599a2dd95SBruce Richardson uint16_t num_cdev = rte_cryptodev_count(); 73699a2dd95SBruce Richardson 73799a2dd95SBruce Richardson nb_deq = 0; 738*2ae84b39SGanapati Kundapura eca_ops_buffer_flush(adapter); 739*2ae84b39SGanapati Kundapura 74099a2dd95SBruce Richardson do { 74199a2dd95SBruce Richardson done = true; 74299a2dd95SBruce Richardson 74399a2dd95SBruce Richardson for (cdev_id = adapter->next_cdev_id; 74499a2dd95SBruce Richardson cdev_id < num_cdev; cdev_id++) { 745*2ae84b39SGanapati Kundapura uint16_t queues = 0; 746*2ae84b39SGanapati Kundapura 74799a2dd95SBruce Richardson curr_dev = &adapter->cdevs[cdev_id]; 74899a2dd95SBruce Richardson dev = curr_dev->dev; 749*2ae84b39SGanapati Kundapura if (unlikely(dev == NULL)) 75099a2dd95SBruce Richardson continue; 751*2ae84b39SGanapati Kundapura 75299a2dd95SBruce Richardson dev_qps = dev->data->nb_queue_pairs; 75399a2dd95SBruce Richardson 75499a2dd95SBruce Richardson for (qp = curr_dev->next_queue_pair_id; 75599a2dd95SBruce Richardson queues < dev_qps; qp = (qp + 1) % dev_qps, 75699a2dd95SBruce Richardson queues++) { 75799a2dd95SBruce Richardson 75899a2dd95SBruce Richardson curr_queue = &curr_dev->qpairs[qp]; 759*2ae84b39SGanapati Kundapura if (unlikely(curr_queue == NULL || 760*2ae84b39SGanapati Kundapura !curr_queue->qp_enabled)) 76199a2dd95SBruce Richardson continue; 76299a2dd95SBruce Richardson 76399a2dd95SBruce Richardson n = rte_cryptodev_dequeue_burst(cdev_id, qp, 76499a2dd95SBruce Richardson ops, BATCH_SIZE); 76599a2dd95SBruce Richardson if (!n) 76699a2dd95SBruce Richardson continue; 76799a2dd95SBruce Richardson 76899a2dd95SBruce Richardson done = false; 769*2ae84b39SGanapati Kundapura nb_enqueued = 0; 770*2ae84b39SGanapati Kundapura 77199a2dd95SBruce Richardson stats->crypto_deq_count += n; 772*2ae84b39SGanapati Kundapura 773*2ae84b39SGanapati Kundapura if (unlikely(!adapter->ebuf.count)) 774*2ae84b39SGanapati Kundapura nb_enqueued = eca_ops_enqueue_burst( 775*2ae84b39SGanapati Kundapura adapter, ops, n); 776*2ae84b39SGanapati Kundapura 777*2ae84b39SGanapati Kundapura if (likely(nb_enqueued == n)) 778*2ae84b39SGanapati Kundapura goto check; 779*2ae84b39SGanapati Kundapura 780*2ae84b39SGanapati Kundapura /* Failed to enqueue events case */ 781*2ae84b39SGanapati Kundapura for (i = nb_enqueued; i < n; i++) 782*2ae84b39SGanapati Kundapura eca_circular_buffer_add( 783*2ae84b39SGanapati Kundapura &adapter->ebuf, 784*2ae84b39SGanapati Kundapura ops[nb_enqueued]); 785*2ae84b39SGanapati Kundapura 786*2ae84b39SGanapati Kundapura check: 78799a2dd95SBruce Richardson nb_deq += n; 78899a2dd95SBruce Richardson 789*2ae84b39SGanapati Kundapura if (nb_deq >= max_deq) { 79099a2dd95SBruce Richardson if ((qp + 1) == dev_qps) { 79199a2dd95SBruce Richardson adapter->next_cdev_id = 79299a2dd95SBruce Richardson (cdev_id + 1) 79399a2dd95SBruce Richardson % num_cdev; 79499a2dd95SBruce Richardson } 79599a2dd95SBruce Richardson curr_dev->next_queue_pair_id = (qp + 1) 79699a2dd95SBruce Richardson % dev->data->nb_queue_pairs; 79799a2dd95SBruce Richardson 79899a2dd95SBruce Richardson return nb_deq; 79999a2dd95SBruce Richardson } 80099a2dd95SBruce Richardson } 80199a2dd95SBruce Richardson } 802*2ae84b39SGanapati Kundapura adapter->next_cdev_id = 0; 80399a2dd95SBruce Richardson } while (done == false); 80499a2dd95SBruce Richardson return nb_deq; 80599a2dd95SBruce Richardson } 80699a2dd95SBruce Richardson 80799a2dd95SBruce Richardson static void 808a256a743SPavan Nikhilesh eca_crypto_adapter_run(struct event_crypto_adapter *adapter, 80999a2dd95SBruce Richardson unsigned int max_ops) 81099a2dd95SBruce Richardson { 811578402f2SMattias Rönnblom unsigned int ops_left = max_ops; 812578402f2SMattias Rönnblom 813578402f2SMattias Rönnblom while (ops_left > 0) { 81499a2dd95SBruce Richardson unsigned int e_cnt, d_cnt; 81599a2dd95SBruce Richardson 816578402f2SMattias Rönnblom e_cnt = eca_crypto_adapter_deq_run(adapter, ops_left); 817578402f2SMattias Rönnblom ops_left -= RTE_MIN(ops_left, e_cnt); 81899a2dd95SBruce Richardson 819578402f2SMattias Rönnblom d_cnt = eca_crypto_adapter_enq_run(adapter, ops_left); 820578402f2SMattias Rönnblom ops_left -= RTE_MIN(ops_left, d_cnt); 82199a2dd95SBruce Richardson 82299a2dd95SBruce Richardson if (e_cnt == 0 && d_cnt == 0) 82399a2dd95SBruce Richardson break; 82499a2dd95SBruce Richardson 82599a2dd95SBruce Richardson } 826578402f2SMattias Rönnblom 827578402f2SMattias Rönnblom if (ops_left == max_ops) 828578402f2SMattias Rönnblom rte_event_maintain(adapter->eventdev_id, 829578402f2SMattias Rönnblom adapter->event_port_id, 0); 83099a2dd95SBruce Richardson } 83199a2dd95SBruce Richardson 83299a2dd95SBruce Richardson static int 83399a2dd95SBruce Richardson eca_service_func(void *args) 83499a2dd95SBruce Richardson { 835a256a743SPavan Nikhilesh struct event_crypto_adapter *adapter = args; 83699a2dd95SBruce Richardson 83799a2dd95SBruce Richardson if (rte_spinlock_trylock(&adapter->lock) == 0) 83899a2dd95SBruce Richardson return 0; 83999a2dd95SBruce Richardson eca_crypto_adapter_run(adapter, adapter->max_nb); 84099a2dd95SBruce Richardson rte_spinlock_unlock(&adapter->lock); 84199a2dd95SBruce Richardson 84299a2dd95SBruce Richardson return 0; 84399a2dd95SBruce Richardson } 84499a2dd95SBruce Richardson 84599a2dd95SBruce Richardson static int 846a256a743SPavan Nikhilesh eca_init_service(struct event_crypto_adapter *adapter, uint8_t id) 84799a2dd95SBruce Richardson { 84899a2dd95SBruce Richardson struct rte_event_crypto_adapter_conf adapter_conf; 84999a2dd95SBruce Richardson struct rte_service_spec service; 85099a2dd95SBruce Richardson int ret; 85199a2dd95SBruce Richardson 85299a2dd95SBruce Richardson if (adapter->service_inited) 85399a2dd95SBruce Richardson return 0; 85499a2dd95SBruce Richardson 85599a2dd95SBruce Richardson memset(&service, 0, sizeof(service)); 85699a2dd95SBruce Richardson snprintf(service.name, CRYPTO_ADAPTER_NAME_LEN, 85799a2dd95SBruce Richardson "rte_event_crypto_adapter_%d", id); 85899a2dd95SBruce Richardson service.socket_id = adapter->socket_id; 85999a2dd95SBruce Richardson service.callback = eca_service_func; 86099a2dd95SBruce Richardson service.callback_userdata = adapter; 86199a2dd95SBruce Richardson /* Service function handles locking for queue add/del updates */ 86299a2dd95SBruce Richardson service.capabilities = RTE_SERVICE_CAP_MT_SAFE; 86399a2dd95SBruce Richardson ret = rte_service_component_register(&service, &adapter->service_id); 86499a2dd95SBruce Richardson if (ret) { 86599a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("failed to register service %s err = %" PRId32, 86699a2dd95SBruce Richardson service.name, ret); 86799a2dd95SBruce Richardson return ret; 86899a2dd95SBruce Richardson } 86999a2dd95SBruce Richardson 87099a2dd95SBruce Richardson ret = adapter->conf_cb(id, adapter->eventdev_id, 87199a2dd95SBruce Richardson &adapter_conf, adapter->conf_arg); 87299a2dd95SBruce Richardson if (ret) { 87399a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("configuration callback failed err = %" PRId32, 87499a2dd95SBruce Richardson ret); 87599a2dd95SBruce Richardson return ret; 87699a2dd95SBruce Richardson } 87799a2dd95SBruce Richardson 87899a2dd95SBruce Richardson adapter->max_nb = adapter_conf.max_nb; 87999a2dd95SBruce Richardson adapter->event_port_id = adapter_conf.event_port_id; 88099a2dd95SBruce Richardson adapter->service_inited = 1; 88199a2dd95SBruce Richardson 88299a2dd95SBruce Richardson return ret; 88399a2dd95SBruce Richardson } 88499a2dd95SBruce Richardson 88599a2dd95SBruce Richardson static void 886a256a743SPavan Nikhilesh eca_update_qp_info(struct event_crypto_adapter *adapter, 887a256a743SPavan Nikhilesh struct crypto_device_info *dev_info, int32_t queue_pair_id, 88899a2dd95SBruce Richardson uint8_t add) 88999a2dd95SBruce Richardson { 89099a2dd95SBruce Richardson struct crypto_queue_pair_info *qp_info; 89199a2dd95SBruce Richardson int enabled; 89299a2dd95SBruce Richardson uint16_t i; 89399a2dd95SBruce Richardson 89499a2dd95SBruce Richardson if (dev_info->qpairs == NULL) 89599a2dd95SBruce Richardson return; 89699a2dd95SBruce Richardson 89799a2dd95SBruce Richardson if (queue_pair_id == -1) { 89899a2dd95SBruce Richardson for (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++) 89999a2dd95SBruce Richardson eca_update_qp_info(adapter, dev_info, i, add); 90099a2dd95SBruce Richardson } else { 90199a2dd95SBruce Richardson qp_info = &dev_info->qpairs[queue_pair_id]; 90299a2dd95SBruce Richardson enabled = qp_info->qp_enabled; 90399a2dd95SBruce Richardson if (add) { 90499a2dd95SBruce Richardson adapter->nb_qps += !enabled; 90599a2dd95SBruce Richardson dev_info->num_qpairs += !enabled; 90699a2dd95SBruce Richardson } else { 90799a2dd95SBruce Richardson adapter->nb_qps -= enabled; 90899a2dd95SBruce Richardson dev_info->num_qpairs -= enabled; 90999a2dd95SBruce Richardson } 91099a2dd95SBruce Richardson qp_info->qp_enabled = !!add; 91199a2dd95SBruce Richardson } 91299a2dd95SBruce Richardson } 91399a2dd95SBruce Richardson 91499a2dd95SBruce Richardson static int 915a256a743SPavan Nikhilesh eca_add_queue_pair(struct event_crypto_adapter *adapter, uint8_t cdev_id, 91699a2dd95SBruce Richardson int queue_pair_id) 91799a2dd95SBruce Richardson { 91899a2dd95SBruce Richardson struct crypto_device_info *dev_info = &adapter->cdevs[cdev_id]; 91999a2dd95SBruce Richardson struct crypto_queue_pair_info *qpairs; 92099a2dd95SBruce Richardson uint32_t i; 92199a2dd95SBruce Richardson 92299a2dd95SBruce Richardson if (dev_info->qpairs == NULL) { 92399a2dd95SBruce Richardson dev_info->qpairs = 92499a2dd95SBruce Richardson rte_zmalloc_socket(adapter->mem_name, 92599a2dd95SBruce Richardson dev_info->dev->data->nb_queue_pairs * 92699a2dd95SBruce Richardson sizeof(struct crypto_queue_pair_info), 92799a2dd95SBruce Richardson 0, adapter->socket_id); 92899a2dd95SBruce Richardson if (dev_info->qpairs == NULL) 92999a2dd95SBruce Richardson return -ENOMEM; 93099a2dd95SBruce Richardson 93199a2dd95SBruce Richardson qpairs = dev_info->qpairs; 932*2ae84b39SGanapati Kundapura 933*2ae84b39SGanapati Kundapura if (eca_circular_buffer_init("eca_cdev_circular_buffer", 934*2ae84b39SGanapati Kundapura &qpairs->cbuf, 935*2ae84b39SGanapati Kundapura CRYPTO_ADAPTER_OPS_BUFFER_SZ)) { 936*2ae84b39SGanapati Kundapura RTE_EDEV_LOG_ERR("Failed to get memory for cryptodev " 937*2ae84b39SGanapati Kundapura "buffer"); 93899a2dd95SBruce Richardson rte_free(qpairs); 93999a2dd95SBruce Richardson return -ENOMEM; 94099a2dd95SBruce Richardson } 94199a2dd95SBruce Richardson } 94299a2dd95SBruce Richardson 94399a2dd95SBruce Richardson if (queue_pair_id == -1) { 94499a2dd95SBruce Richardson for (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++) 94599a2dd95SBruce Richardson eca_update_qp_info(adapter, dev_info, i, 1); 94699a2dd95SBruce Richardson } else 94799a2dd95SBruce Richardson eca_update_qp_info(adapter, dev_info, 94899a2dd95SBruce Richardson (uint16_t)queue_pair_id, 1); 94999a2dd95SBruce Richardson 95099a2dd95SBruce Richardson return 0; 95199a2dd95SBruce Richardson } 95299a2dd95SBruce Richardson 95399a2dd95SBruce Richardson int 95499a2dd95SBruce Richardson rte_event_crypto_adapter_queue_pair_add(uint8_t id, 95599a2dd95SBruce Richardson uint8_t cdev_id, 95699a2dd95SBruce Richardson int32_t queue_pair_id, 95799a2dd95SBruce Richardson const struct rte_event *event) 95899a2dd95SBruce Richardson { 959a256a743SPavan Nikhilesh struct event_crypto_adapter *adapter; 96099a2dd95SBruce Richardson struct rte_eventdev *dev; 96199a2dd95SBruce Richardson struct crypto_device_info *dev_info; 96299a2dd95SBruce Richardson uint32_t cap; 96399a2dd95SBruce Richardson int ret; 96499a2dd95SBruce Richardson 96599a2dd95SBruce Richardson EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); 96699a2dd95SBruce Richardson 967e74abd48SAkhil Goyal if (!rte_cryptodev_is_valid_dev(cdev_id)) { 96899a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id); 96999a2dd95SBruce Richardson return -EINVAL; 97099a2dd95SBruce Richardson } 97199a2dd95SBruce Richardson 97299a2dd95SBruce Richardson adapter = eca_id_to_adapter(id); 97399a2dd95SBruce Richardson if (adapter == NULL) 97499a2dd95SBruce Richardson return -EINVAL; 97599a2dd95SBruce Richardson 97699a2dd95SBruce Richardson dev = &rte_eventdevs[adapter->eventdev_id]; 97799a2dd95SBruce Richardson ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id, 97899a2dd95SBruce Richardson cdev_id, 97999a2dd95SBruce Richardson &cap); 98099a2dd95SBruce Richardson if (ret) { 98199a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("Failed to get adapter caps dev %" PRIu8 98299a2dd95SBruce Richardson " cdev %" PRIu8, id, cdev_id); 98399a2dd95SBruce Richardson return ret; 98499a2dd95SBruce Richardson } 98599a2dd95SBruce Richardson 98699a2dd95SBruce Richardson if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) && 98799a2dd95SBruce Richardson (event == NULL)) { 98899a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("Conf value can not be NULL for dev_id=%u", 98999a2dd95SBruce Richardson cdev_id); 99099a2dd95SBruce Richardson return -EINVAL; 99199a2dd95SBruce Richardson } 99299a2dd95SBruce Richardson 99399a2dd95SBruce Richardson dev_info = &adapter->cdevs[cdev_id]; 99499a2dd95SBruce Richardson 99599a2dd95SBruce Richardson if (queue_pair_id != -1 && 99699a2dd95SBruce Richardson (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) { 99799a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16, 99899a2dd95SBruce Richardson (uint16_t)queue_pair_id); 99999a2dd95SBruce Richardson return -EINVAL; 100099a2dd95SBruce Richardson } 100199a2dd95SBruce Richardson 100299a2dd95SBruce Richardson /* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD, 100399a2dd95SBruce Richardson * no need of service core as HW supports event forward capability. 100499a2dd95SBruce Richardson */ 100599a2dd95SBruce Richardson if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) || 100699a2dd95SBruce Richardson (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND && 100799a2dd95SBruce Richardson adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW) || 100899a2dd95SBruce Richardson (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW && 100999a2dd95SBruce Richardson adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) { 101099a2dd95SBruce Richardson RTE_FUNC_PTR_OR_ERR_RET( 101199a2dd95SBruce Richardson *dev->dev_ops->crypto_adapter_queue_pair_add, 101299a2dd95SBruce Richardson -ENOTSUP); 101399a2dd95SBruce Richardson if (dev_info->qpairs == NULL) { 101499a2dd95SBruce Richardson dev_info->qpairs = 101599a2dd95SBruce Richardson rte_zmalloc_socket(adapter->mem_name, 101699a2dd95SBruce Richardson dev_info->dev->data->nb_queue_pairs * 101799a2dd95SBruce Richardson sizeof(struct crypto_queue_pair_info), 101899a2dd95SBruce Richardson 0, adapter->socket_id); 101999a2dd95SBruce Richardson if (dev_info->qpairs == NULL) 102099a2dd95SBruce Richardson return -ENOMEM; 102199a2dd95SBruce Richardson } 102299a2dd95SBruce Richardson 102399a2dd95SBruce Richardson ret = (*dev->dev_ops->crypto_adapter_queue_pair_add)(dev, 102499a2dd95SBruce Richardson dev_info->dev, 102599a2dd95SBruce Richardson queue_pair_id, 102699a2dd95SBruce Richardson event); 102799a2dd95SBruce Richardson if (ret) 102899a2dd95SBruce Richardson return ret; 102999a2dd95SBruce Richardson 103099a2dd95SBruce Richardson else 103199a2dd95SBruce Richardson eca_update_qp_info(adapter, &adapter->cdevs[cdev_id], 103299a2dd95SBruce Richardson queue_pair_id, 1); 103399a2dd95SBruce Richardson } 103499a2dd95SBruce Richardson 103599a2dd95SBruce Richardson /* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW, 103699a2dd95SBruce Richardson * or SW adapter, initiate services so the application can choose 103799a2dd95SBruce Richardson * which ever way it wants to use the adapter. 103899a2dd95SBruce Richardson * Case 1: RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW 103999a2dd95SBruce Richardson * Application may wants to use one of below two mode 104099a2dd95SBruce Richardson * a. OP_FORWARD mode -> HW Dequeue + SW enqueue 104199a2dd95SBruce Richardson * b. OP_NEW mode -> HW Dequeue 104299a2dd95SBruce Richardson * Case 2: No HW caps, use SW adapter 104399a2dd95SBruce Richardson * a. OP_FORWARD mode -> SW enqueue & dequeue 104499a2dd95SBruce Richardson * b. OP_NEW mode -> SW Dequeue 104599a2dd95SBruce Richardson */ 104699a2dd95SBruce Richardson if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW && 104799a2dd95SBruce Richardson !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) && 104899a2dd95SBruce Richardson adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD) || 104999a2dd95SBruce Richardson (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW) && 105099a2dd95SBruce Richardson !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) && 105199a2dd95SBruce Richardson !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) && 105299a2dd95SBruce Richardson (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA))) { 105399a2dd95SBruce Richardson rte_spinlock_lock(&adapter->lock); 105499a2dd95SBruce Richardson ret = eca_init_service(adapter, id); 105599a2dd95SBruce Richardson if (ret == 0) 105699a2dd95SBruce Richardson ret = eca_add_queue_pair(adapter, cdev_id, 105799a2dd95SBruce Richardson queue_pair_id); 105899a2dd95SBruce Richardson rte_spinlock_unlock(&adapter->lock); 105999a2dd95SBruce Richardson 106099a2dd95SBruce Richardson if (ret) 106199a2dd95SBruce Richardson return ret; 106299a2dd95SBruce Richardson 106399a2dd95SBruce Richardson rte_service_component_runstate_set(adapter->service_id, 1); 106499a2dd95SBruce Richardson } 106599a2dd95SBruce Richardson 106699a2dd95SBruce Richardson rte_eventdev_trace_crypto_adapter_queue_pair_add(id, cdev_id, event, 106799a2dd95SBruce Richardson queue_pair_id); 106899a2dd95SBruce Richardson return 0; 106999a2dd95SBruce Richardson } 107099a2dd95SBruce Richardson 107199a2dd95SBruce Richardson int 107299a2dd95SBruce Richardson rte_event_crypto_adapter_queue_pair_del(uint8_t id, uint8_t cdev_id, 107399a2dd95SBruce Richardson int32_t queue_pair_id) 107499a2dd95SBruce Richardson { 1075a256a743SPavan Nikhilesh struct event_crypto_adapter *adapter; 107699a2dd95SBruce Richardson struct crypto_device_info *dev_info; 107799a2dd95SBruce Richardson struct rte_eventdev *dev; 107899a2dd95SBruce Richardson int ret; 107999a2dd95SBruce Richardson uint32_t cap; 108099a2dd95SBruce Richardson uint16_t i; 108199a2dd95SBruce Richardson 108299a2dd95SBruce Richardson EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); 108399a2dd95SBruce Richardson 1084e74abd48SAkhil Goyal if (!rte_cryptodev_is_valid_dev(cdev_id)) { 108599a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id); 108699a2dd95SBruce Richardson return -EINVAL; 108799a2dd95SBruce Richardson } 108899a2dd95SBruce Richardson 108999a2dd95SBruce Richardson adapter = eca_id_to_adapter(id); 109099a2dd95SBruce Richardson if (adapter == NULL) 109199a2dd95SBruce Richardson return -EINVAL; 109299a2dd95SBruce Richardson 109399a2dd95SBruce Richardson dev = &rte_eventdevs[adapter->eventdev_id]; 109499a2dd95SBruce Richardson ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id, 109599a2dd95SBruce Richardson cdev_id, 109699a2dd95SBruce Richardson &cap); 109799a2dd95SBruce Richardson if (ret) 109899a2dd95SBruce Richardson return ret; 109999a2dd95SBruce Richardson 110099a2dd95SBruce Richardson dev_info = &adapter->cdevs[cdev_id]; 110199a2dd95SBruce Richardson 110299a2dd95SBruce Richardson if (queue_pair_id != -1 && 110399a2dd95SBruce Richardson (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) { 110499a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16, 110599a2dd95SBruce Richardson (uint16_t)queue_pair_id); 110699a2dd95SBruce Richardson return -EINVAL; 110799a2dd95SBruce Richardson } 110899a2dd95SBruce Richardson 110999a2dd95SBruce Richardson if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) || 111099a2dd95SBruce Richardson (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW && 111199a2dd95SBruce Richardson adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) { 111299a2dd95SBruce Richardson RTE_FUNC_PTR_OR_ERR_RET( 111399a2dd95SBruce Richardson *dev->dev_ops->crypto_adapter_queue_pair_del, 111499a2dd95SBruce Richardson -ENOTSUP); 111599a2dd95SBruce Richardson ret = (*dev->dev_ops->crypto_adapter_queue_pair_del)(dev, 111699a2dd95SBruce Richardson dev_info->dev, 111799a2dd95SBruce Richardson queue_pair_id); 111899a2dd95SBruce Richardson if (ret == 0) { 111999a2dd95SBruce Richardson eca_update_qp_info(adapter, 112099a2dd95SBruce Richardson &adapter->cdevs[cdev_id], 112199a2dd95SBruce Richardson queue_pair_id, 112299a2dd95SBruce Richardson 0); 112399a2dd95SBruce Richardson if (dev_info->num_qpairs == 0) { 112499a2dd95SBruce Richardson rte_free(dev_info->qpairs); 112599a2dd95SBruce Richardson dev_info->qpairs = NULL; 112699a2dd95SBruce Richardson } 112799a2dd95SBruce Richardson } 112899a2dd95SBruce Richardson } else { 112999a2dd95SBruce Richardson if (adapter->nb_qps == 0) 113099a2dd95SBruce Richardson return 0; 113199a2dd95SBruce Richardson 113299a2dd95SBruce Richardson rte_spinlock_lock(&adapter->lock); 113399a2dd95SBruce Richardson if (queue_pair_id == -1) { 113499a2dd95SBruce Richardson for (i = 0; i < dev_info->dev->data->nb_queue_pairs; 113599a2dd95SBruce Richardson i++) 113699a2dd95SBruce Richardson eca_update_qp_info(adapter, dev_info, 113799a2dd95SBruce Richardson queue_pair_id, 0); 113899a2dd95SBruce Richardson } else { 113999a2dd95SBruce Richardson eca_update_qp_info(adapter, dev_info, 114099a2dd95SBruce Richardson (uint16_t)queue_pair_id, 0); 114199a2dd95SBruce Richardson } 114299a2dd95SBruce Richardson 114399a2dd95SBruce Richardson if (dev_info->num_qpairs == 0) { 114499a2dd95SBruce Richardson rte_free(dev_info->qpairs); 114599a2dd95SBruce Richardson dev_info->qpairs = NULL; 114699a2dd95SBruce Richardson } 114799a2dd95SBruce Richardson 114899a2dd95SBruce Richardson rte_spinlock_unlock(&adapter->lock); 114999a2dd95SBruce Richardson rte_service_component_runstate_set(adapter->service_id, 115099a2dd95SBruce Richardson adapter->nb_qps); 115199a2dd95SBruce Richardson } 115299a2dd95SBruce Richardson 115399a2dd95SBruce Richardson rte_eventdev_trace_crypto_adapter_queue_pair_del(id, cdev_id, 115499a2dd95SBruce Richardson queue_pair_id, ret); 115599a2dd95SBruce Richardson return ret; 115699a2dd95SBruce Richardson } 115799a2dd95SBruce Richardson 115899a2dd95SBruce Richardson static int 115999a2dd95SBruce Richardson eca_adapter_ctrl(uint8_t id, int start) 116099a2dd95SBruce Richardson { 1161a256a743SPavan Nikhilesh struct event_crypto_adapter *adapter; 116299a2dd95SBruce Richardson struct crypto_device_info *dev_info; 116399a2dd95SBruce Richardson struct rte_eventdev *dev; 116499a2dd95SBruce Richardson uint32_t i; 116599a2dd95SBruce Richardson int use_service; 116699a2dd95SBruce Richardson int stop = !start; 116799a2dd95SBruce Richardson 116899a2dd95SBruce Richardson use_service = 0; 116999a2dd95SBruce Richardson EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); 117099a2dd95SBruce Richardson adapter = eca_id_to_adapter(id); 117199a2dd95SBruce Richardson if (adapter == NULL) 117299a2dd95SBruce Richardson return -EINVAL; 117399a2dd95SBruce Richardson 117499a2dd95SBruce Richardson dev = &rte_eventdevs[adapter->eventdev_id]; 117599a2dd95SBruce Richardson 117699a2dd95SBruce Richardson for (i = 0; i < rte_cryptodev_count(); i++) { 117799a2dd95SBruce Richardson dev_info = &adapter->cdevs[i]; 117899a2dd95SBruce Richardson /* if start check for num queue pairs */ 117999a2dd95SBruce Richardson if (start && !dev_info->num_qpairs) 118099a2dd95SBruce Richardson continue; 118199a2dd95SBruce Richardson /* if stop check if dev has been started */ 118299a2dd95SBruce Richardson if (stop && !dev_info->dev_started) 118399a2dd95SBruce Richardson continue; 118499a2dd95SBruce Richardson use_service |= !dev_info->internal_event_port; 118599a2dd95SBruce Richardson dev_info->dev_started = start; 118699a2dd95SBruce Richardson if (dev_info->internal_event_port == 0) 118799a2dd95SBruce Richardson continue; 118899a2dd95SBruce Richardson start ? (*dev->dev_ops->crypto_adapter_start)(dev, 118999a2dd95SBruce Richardson &dev_info->dev[i]) : 119099a2dd95SBruce Richardson (*dev->dev_ops->crypto_adapter_stop)(dev, 119199a2dd95SBruce Richardson &dev_info->dev[i]); 119299a2dd95SBruce Richardson } 119399a2dd95SBruce Richardson 119499a2dd95SBruce Richardson if (use_service) 119599a2dd95SBruce Richardson rte_service_runstate_set(adapter->service_id, start); 119699a2dd95SBruce Richardson 119799a2dd95SBruce Richardson return 0; 119899a2dd95SBruce Richardson } 119999a2dd95SBruce Richardson 120099a2dd95SBruce Richardson int 120199a2dd95SBruce Richardson rte_event_crypto_adapter_start(uint8_t id) 120299a2dd95SBruce Richardson { 1203a256a743SPavan Nikhilesh struct event_crypto_adapter *adapter; 120499a2dd95SBruce Richardson 120599a2dd95SBruce Richardson EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); 120699a2dd95SBruce Richardson adapter = eca_id_to_adapter(id); 120799a2dd95SBruce Richardson if (adapter == NULL) 120899a2dd95SBruce Richardson return -EINVAL; 120999a2dd95SBruce Richardson 121099a2dd95SBruce Richardson rte_eventdev_trace_crypto_adapter_start(id, adapter); 121199a2dd95SBruce Richardson return eca_adapter_ctrl(id, 1); 121299a2dd95SBruce Richardson } 121399a2dd95SBruce Richardson 121499a2dd95SBruce Richardson int 121599a2dd95SBruce Richardson rte_event_crypto_adapter_stop(uint8_t id) 121699a2dd95SBruce Richardson { 121799a2dd95SBruce Richardson rte_eventdev_trace_crypto_adapter_stop(id); 121899a2dd95SBruce Richardson return eca_adapter_ctrl(id, 0); 121999a2dd95SBruce Richardson } 122099a2dd95SBruce Richardson 122199a2dd95SBruce Richardson int 122299a2dd95SBruce Richardson rte_event_crypto_adapter_stats_get(uint8_t id, 122399a2dd95SBruce Richardson struct rte_event_crypto_adapter_stats *stats) 122499a2dd95SBruce Richardson { 1225a256a743SPavan Nikhilesh struct event_crypto_adapter *adapter; 122699a2dd95SBruce Richardson struct rte_event_crypto_adapter_stats dev_stats_sum = { 0 }; 122799a2dd95SBruce Richardson struct rte_event_crypto_adapter_stats dev_stats; 122899a2dd95SBruce Richardson struct rte_eventdev *dev; 122999a2dd95SBruce Richardson struct crypto_device_info *dev_info; 123099a2dd95SBruce Richardson uint32_t i; 123199a2dd95SBruce Richardson int ret; 123299a2dd95SBruce Richardson 123399a2dd95SBruce Richardson EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); 123499a2dd95SBruce Richardson 123599a2dd95SBruce Richardson adapter = eca_id_to_adapter(id); 123699a2dd95SBruce Richardson if (adapter == NULL || stats == NULL) 123799a2dd95SBruce Richardson return -EINVAL; 123899a2dd95SBruce Richardson 123999a2dd95SBruce Richardson dev = &rte_eventdevs[adapter->eventdev_id]; 124099a2dd95SBruce Richardson memset(stats, 0, sizeof(*stats)); 124199a2dd95SBruce Richardson for (i = 0; i < rte_cryptodev_count(); i++) { 124299a2dd95SBruce Richardson dev_info = &adapter->cdevs[i]; 124399a2dd95SBruce Richardson if (dev_info->internal_event_port == 0 || 124499a2dd95SBruce Richardson dev->dev_ops->crypto_adapter_stats_get == NULL) 124599a2dd95SBruce Richardson continue; 124699a2dd95SBruce Richardson ret = (*dev->dev_ops->crypto_adapter_stats_get)(dev, 124799a2dd95SBruce Richardson dev_info->dev, 124899a2dd95SBruce Richardson &dev_stats); 124999a2dd95SBruce Richardson if (ret) 125099a2dd95SBruce Richardson continue; 125199a2dd95SBruce Richardson 125299a2dd95SBruce Richardson dev_stats_sum.crypto_deq_count += dev_stats.crypto_deq_count; 125399a2dd95SBruce Richardson dev_stats_sum.event_enq_count += 125499a2dd95SBruce Richardson dev_stats.event_enq_count; 125599a2dd95SBruce Richardson } 125699a2dd95SBruce Richardson 125799a2dd95SBruce Richardson if (adapter->service_inited) 125899a2dd95SBruce Richardson *stats = adapter->crypto_stats; 125999a2dd95SBruce Richardson 126099a2dd95SBruce Richardson stats->crypto_deq_count += dev_stats_sum.crypto_deq_count; 126199a2dd95SBruce Richardson stats->event_enq_count += dev_stats_sum.event_enq_count; 126299a2dd95SBruce Richardson 126399a2dd95SBruce Richardson return 0; 126499a2dd95SBruce Richardson } 126599a2dd95SBruce Richardson 126699a2dd95SBruce Richardson int 126799a2dd95SBruce Richardson rte_event_crypto_adapter_stats_reset(uint8_t id) 126899a2dd95SBruce Richardson { 1269a256a743SPavan Nikhilesh struct event_crypto_adapter *adapter; 127099a2dd95SBruce Richardson struct crypto_device_info *dev_info; 127199a2dd95SBruce Richardson struct rte_eventdev *dev; 127299a2dd95SBruce Richardson uint32_t i; 127399a2dd95SBruce Richardson 127499a2dd95SBruce Richardson EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); 127599a2dd95SBruce Richardson 127699a2dd95SBruce Richardson adapter = eca_id_to_adapter(id); 127799a2dd95SBruce Richardson if (adapter == NULL) 127899a2dd95SBruce Richardson return -EINVAL; 127999a2dd95SBruce Richardson 128099a2dd95SBruce Richardson dev = &rte_eventdevs[adapter->eventdev_id]; 128199a2dd95SBruce Richardson for (i = 0; i < rte_cryptodev_count(); i++) { 128299a2dd95SBruce Richardson dev_info = &adapter->cdevs[i]; 128399a2dd95SBruce Richardson if (dev_info->internal_event_port == 0 || 128499a2dd95SBruce Richardson dev->dev_ops->crypto_adapter_stats_reset == NULL) 128599a2dd95SBruce Richardson continue; 128699a2dd95SBruce Richardson (*dev->dev_ops->crypto_adapter_stats_reset)(dev, 128799a2dd95SBruce Richardson dev_info->dev); 128899a2dd95SBruce Richardson } 128999a2dd95SBruce Richardson 129099a2dd95SBruce Richardson memset(&adapter->crypto_stats, 0, sizeof(adapter->crypto_stats)); 129199a2dd95SBruce Richardson return 0; 129299a2dd95SBruce Richardson } 129399a2dd95SBruce Richardson 129499a2dd95SBruce Richardson int 129599a2dd95SBruce Richardson rte_event_crypto_adapter_service_id_get(uint8_t id, uint32_t *service_id) 129699a2dd95SBruce Richardson { 1297a256a743SPavan Nikhilesh struct event_crypto_adapter *adapter; 129899a2dd95SBruce Richardson 129999a2dd95SBruce Richardson EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); 130099a2dd95SBruce Richardson 130199a2dd95SBruce Richardson adapter = eca_id_to_adapter(id); 130299a2dd95SBruce Richardson if (adapter == NULL || service_id == NULL) 130399a2dd95SBruce Richardson return -EINVAL; 130499a2dd95SBruce Richardson 130599a2dd95SBruce Richardson if (adapter->service_inited) 130699a2dd95SBruce Richardson *service_id = adapter->service_id; 130799a2dd95SBruce Richardson 130899a2dd95SBruce Richardson return adapter->service_inited ? 0 : -ESRCH; 130999a2dd95SBruce Richardson } 131099a2dd95SBruce Richardson 131199a2dd95SBruce Richardson int 131299a2dd95SBruce Richardson rte_event_crypto_adapter_event_port_get(uint8_t id, uint8_t *event_port_id) 131399a2dd95SBruce Richardson { 1314a256a743SPavan Nikhilesh struct event_crypto_adapter *adapter; 131599a2dd95SBruce Richardson 131699a2dd95SBruce Richardson EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); 131799a2dd95SBruce Richardson 131899a2dd95SBruce Richardson adapter = eca_id_to_adapter(id); 131999a2dd95SBruce Richardson if (adapter == NULL || event_port_id == NULL) 132099a2dd95SBruce Richardson return -EINVAL; 132199a2dd95SBruce Richardson 132299a2dd95SBruce Richardson *event_port_id = adapter->event_port_id; 132399a2dd95SBruce Richardson 132499a2dd95SBruce Richardson return 0; 132599a2dd95SBruce Richardson } 1326