1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation.
3 * All rights reserved.
4 */
5
6 #include <string.h>
7 #include <stdbool.h>
8 #include <rte_common.h>
9 #include <dev_driver.h>
10 #include <rte_errno.h>
11 #include <rte_cryptodev.h>
12 #include <cryptodev_pmd.h>
13 #include <rte_log.h>
14 #include <rte_malloc.h>
15 #include <rte_service_component.h>
16
17 #include "rte_eventdev.h"
18 #include "eventdev_pmd.h"
19 #include "eventdev_trace.h"
20 #include "rte_event_crypto_adapter.h"
21
22 #define BATCH_SIZE 32
23 #define DEFAULT_MAX_NB 128
24 #define CRYPTO_ADAPTER_NAME_LEN 32
25 #define CRYPTO_ADAPTER_MEM_NAME_LEN 32
26 #define CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES 100
27
28 /* MAX_OPS_IN_BUFFER contains size for batch of dequeued events */
29 #define MAX_OPS_IN_BUFFER BATCH_SIZE
30
31 /* CRYPTO_ADAPTER_OPS_BUFFER_SZ to accommodate MAX_OPS_IN_BUFFER +
32 * additional space for one batch
33 */
34 #define CRYPTO_ADAPTER_OPS_BUFFER_SZ (MAX_OPS_IN_BUFFER + BATCH_SIZE)
35
36 #define CRYPTO_ADAPTER_BUFFER_SZ 1024
37
38 /* Flush an instance's enqueue buffers every CRYPTO_ENQ_FLUSH_THRESHOLD
39 * iterations of eca_crypto_adapter_enq_run()
40 */
41 #define CRYPTO_ENQ_FLUSH_THRESHOLD 1024
42
43 #define ECA_ADAPTER_ARRAY "crypto_adapter_array"
44
45 struct __rte_cache_aligned crypto_ops_circular_buffer {
46 /* index of head element in circular buffer */
47 uint16_t head;
48 /* index of tail element in circular buffer */
49 uint16_t tail;
50 /* number of elements in buffer */
51 uint16_t count;
52 /* size of circular buffer */
53 uint16_t size;
54 /* Pointer to hold rte_crypto_ops for batching */
55 struct rte_crypto_op **op_buffer;
56 };
57
58 struct __rte_cache_aligned event_crypto_adapter {
59 /* Event device identifier */
60 uint8_t eventdev_id;
61 /* Event port identifier */
62 uint8_t event_port_id;
63 /* Store event port's implicit release capability */
64 uint8_t implicit_release_disabled;
65 /* Flag to indicate backpressure at cryptodev
66 * Stop further dequeuing events from eventdev
67 */
68 bool stop_enq_to_cryptodev;
69 /* Max crypto ops processed in any service function invocation */
70 uint32_t max_nb;
71 /* Lock to serialize config updates with service function */
72 rte_spinlock_t lock;
73 /* Next crypto device to be processed */
74 uint16_t next_cdev_id;
75 /* Per crypto device structure */
76 struct crypto_device_info *cdevs;
77 /* Loop counter to flush crypto ops */
78 uint16_t transmit_loop_count;
79 /* Circular buffer for batching crypto ops to eventdev */
80 struct crypto_ops_circular_buffer ebuf;
81 /* Per instance stats structure */
82 struct rte_event_crypto_adapter_stats crypto_stats;
83 /* Configuration callback for rte_service configuration */
84 rte_event_crypto_adapter_conf_cb conf_cb;
85 /* Configuration callback argument */
86 void *conf_arg;
87 /* Set if default_cb is being used */
88 int default_cb_arg;
89 /* Service initialization state */
90 uint8_t service_inited;
91 /* Memory allocation name */
92 char mem_name[CRYPTO_ADAPTER_MEM_NAME_LEN];
93 /* Socket identifier cached from eventdev */
94 int socket_id;
95 /* Per adapter EAL service */
96 uint32_t service_id;
97 /* No. of queue pairs configured */
98 uint16_t nb_qps;
99 /* Adapter mode */
100 enum rte_event_crypto_adapter_mode mode;
101 };
102
103 /* Per crypto device information */
104 struct __rte_cache_aligned crypto_device_info {
105 /* Pointer to cryptodev */
106 struct rte_cryptodev *dev;
107 /* Pointer to queue pair info */
108 struct crypto_queue_pair_info *qpairs;
109 /* Next queue pair to be processed */
110 uint16_t next_queue_pair_id;
111 /* Set to indicate cryptodev->eventdev packet
112 * transfer uses a hardware mechanism
113 */
114 uint8_t internal_event_port;
115 /* Set to indicate processing has been started */
116 uint8_t dev_started;
117 /* If num_qpairs > 0, the start callback will
118 * be invoked if not already invoked
119 */
120 uint16_t num_qpairs;
121 };
122
123 /* Per queue pair information */
124 struct __rte_cache_aligned crypto_queue_pair_info {
125 /* Set to indicate queue pair is enabled */
126 bool qp_enabled;
127 /* Circular buffer for batching crypto ops to cdev */
128 struct crypto_ops_circular_buffer cbuf;
129 };
130
131 static struct event_crypto_adapter **event_crypto_adapter;
132
133 /* Macros to check for valid adapter */
134 #define EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
135 if (!eca_valid_id(id)) { \
136 RTE_EDEV_LOG_ERR("Invalid crypto adapter id = %d", id); \
137 return retval; \
138 } \
139 } while (0)
140
141 #define ECA_DYNFIELD_NAME "eca_ev_opaque_data"
142 /* Device-specific metadata field type */
143 typedef uint8_t eca_dynfield_t;
144
145 /* mbuf dynamic field offset for device-specific metadata */
146 int eca_dynfield_offset = -1;
147
148 static int
eca_dynfield_register(void)149 eca_dynfield_register(void)
150 {
151 static const struct rte_mbuf_dynfield eca_dynfield_desc = {
152 .name = ECA_DYNFIELD_NAME,
153 .size = sizeof(eca_dynfield_t),
154 .align = alignof(eca_dynfield_t),
155 .flags = 0,
156 };
157
158 eca_dynfield_offset =
159 rte_mbuf_dynfield_register(&eca_dynfield_desc);
160 return eca_dynfield_offset;
161 }
162
163 static inline int
eca_valid_id(uint8_t id)164 eca_valid_id(uint8_t id)
165 {
166 return id < RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE;
167 }
168
169 static int
eca_init(void)170 eca_init(void)
171 {
172 const struct rte_memzone *mz;
173 unsigned int sz;
174
175 sz = sizeof(*event_crypto_adapter) *
176 RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE;
177 sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
178
179 mz = rte_memzone_lookup(ECA_ADAPTER_ARRAY);
180 if (mz == NULL) {
181 mz = rte_memzone_reserve_aligned(ECA_ADAPTER_ARRAY, sz,
182 rte_socket_id(), 0,
183 RTE_CACHE_LINE_SIZE);
184 if (mz == NULL) {
185 RTE_EDEV_LOG_ERR("failed to reserve memzone err = %"
186 PRId32, rte_errno);
187 return -rte_errno;
188 }
189 }
190
191 event_crypto_adapter = mz->addr;
192 return 0;
193 }
194
195 static int
eca_memzone_lookup(void)196 eca_memzone_lookup(void)
197 {
198 const struct rte_memzone *mz;
199
200 if (event_crypto_adapter == NULL) {
201 mz = rte_memzone_lookup(ECA_ADAPTER_ARRAY);
202 if (mz == NULL)
203 return -ENOMEM;
204
205 event_crypto_adapter = mz->addr;
206 }
207
208 return 0;
209 }
210
211 static inline bool
eca_circular_buffer_batch_ready(struct crypto_ops_circular_buffer * bufp)212 eca_circular_buffer_batch_ready(struct crypto_ops_circular_buffer *bufp)
213 {
214 return bufp->count >= BATCH_SIZE;
215 }
216
217 static inline bool
eca_circular_buffer_space_for_batch(struct crypto_ops_circular_buffer * bufp)218 eca_circular_buffer_space_for_batch(struct crypto_ops_circular_buffer *bufp)
219 {
220 /* circular buffer can have atmost MAX_OPS_IN_BUFFER */
221 return (bufp->size - bufp->count) >= MAX_OPS_IN_BUFFER;
222 }
223
224 static inline void
eca_circular_buffer_free(struct crypto_ops_circular_buffer * bufp)225 eca_circular_buffer_free(struct crypto_ops_circular_buffer *bufp)
226 {
227 rte_free(bufp->op_buffer);
228 }
229
230 static inline int
eca_circular_buffer_init(const char * name,struct crypto_ops_circular_buffer * bufp,uint16_t sz)231 eca_circular_buffer_init(const char *name,
232 struct crypto_ops_circular_buffer *bufp,
233 uint16_t sz)
234 {
235 bufp->op_buffer = rte_zmalloc(name,
236 sizeof(struct rte_crypto_op *) * sz,
237 0);
238 if (bufp->op_buffer == NULL)
239 return -ENOMEM;
240
241 bufp->size = sz;
242 return 0;
243 }
244
245 static inline int
eca_circular_buffer_add(struct crypto_ops_circular_buffer * bufp,struct rte_crypto_op * op)246 eca_circular_buffer_add(struct crypto_ops_circular_buffer *bufp,
247 struct rte_crypto_op *op)
248 {
249 uint16_t *tailp = &bufp->tail;
250
251 bufp->op_buffer[*tailp] = op;
252 /* circular buffer, go round */
253 *tailp = (*tailp + 1) % bufp->size;
254 bufp->count++;
255
256 return 0;
257 }
258
259 static inline int
eca_circular_buffer_flush_to_cdev(struct crypto_ops_circular_buffer * bufp,uint8_t cdev_id,uint16_t qp_id,uint16_t * nb_ops_flushed)260 eca_circular_buffer_flush_to_cdev(struct crypto_ops_circular_buffer *bufp,
261 uint8_t cdev_id, uint16_t qp_id,
262 uint16_t *nb_ops_flushed)
263 {
264 uint16_t n = 0;
265 uint16_t *headp = &bufp->head;
266 uint16_t *tailp = &bufp->tail;
267 struct rte_crypto_op **ops = bufp->op_buffer;
268
269 if (*tailp > *headp)
270 /* Flush ops from head pointer to (tail - head) OPs */
271 n = *tailp - *headp;
272 else if (*tailp < *headp)
273 /* Circ buffer - Rollover.
274 * Flush OPs from head to max size of buffer.
275 * Rest of the OPs will be flushed in next iteration.
276 */
277 n = bufp->size - *headp;
278 else { /* head == tail case */
279 /* when head == tail,
280 * circ buff is either full(tail pointer roll over) or empty
281 */
282 if (bufp->count != 0) {
283 /* Circ buffer - FULL.
284 * Flush OPs from head to max size of buffer.
285 * Rest of the OPS will be flushed in next iteration.
286 */
287 n = bufp->size - *headp;
288 } else {
289 /* Circ buffer - Empty */
290 *nb_ops_flushed = 0;
291 return 0;
292 }
293 }
294
295 *nb_ops_flushed = rte_cryptodev_enqueue_burst(cdev_id, qp_id,
296 &ops[*headp], n);
297 bufp->count -= *nb_ops_flushed;
298 if (!bufp->count) {
299 *headp = 0;
300 *tailp = 0;
301 } else
302 *headp = (*headp + *nb_ops_flushed) % bufp->size;
303
304 return *nb_ops_flushed == n ? 0 : -1;
305 }
306
307 static inline struct event_crypto_adapter *
eca_id_to_adapter(uint8_t id)308 eca_id_to_adapter(uint8_t id)
309 {
310 return event_crypto_adapter ?
311 event_crypto_adapter[id] : NULL;
312 }
313
314 static int
eca_default_config_cb(uint8_t id,uint8_t dev_id,struct rte_event_crypto_adapter_conf * conf,void * arg)315 eca_default_config_cb(uint8_t id, uint8_t dev_id,
316 struct rte_event_crypto_adapter_conf *conf, void *arg)
317 {
318 struct rte_event_dev_config dev_conf;
319 struct rte_eventdev *dev;
320 uint8_t port_id;
321 int started;
322 int ret;
323 struct rte_event_port_conf *port_conf = arg;
324 struct event_crypto_adapter *adapter = eca_id_to_adapter(id);
325
326 if (adapter == NULL)
327 return -EINVAL;
328
329 dev = &rte_eventdevs[adapter->eventdev_id];
330 dev_conf = dev->data->dev_conf;
331
332 started = dev->data->dev_started;
333 if (started)
334 rte_event_dev_stop(dev_id);
335 port_id = dev_conf.nb_event_ports;
336 dev_conf.nb_event_ports += 1;
337 if (port_conf->event_port_cfg & RTE_EVENT_PORT_CFG_SINGLE_LINK)
338 dev_conf.nb_single_link_event_port_queues += 1;
339
340 ret = rte_event_dev_configure(dev_id, &dev_conf);
341 if (ret) {
342 RTE_EDEV_LOG_ERR("failed to configure event dev %u", dev_id);
343 if (started) {
344 if (rte_event_dev_start(dev_id))
345 return -EIO;
346 }
347 return ret;
348 }
349
350 ret = rte_event_port_setup(dev_id, port_id, port_conf);
351 if (ret) {
352 RTE_EDEV_LOG_ERR("failed to setup event port %u", port_id);
353 return ret;
354 }
355
356 conf->event_port_id = port_id;
357 conf->max_nb = DEFAULT_MAX_NB;
358 if (started)
359 ret = rte_event_dev_start(dev_id);
360
361 adapter->default_cb_arg = 1;
362 return ret;
363 }
364
365 int
rte_event_crypto_adapter_create_ext(uint8_t id,uint8_t dev_id,rte_event_crypto_adapter_conf_cb conf_cb,enum rte_event_crypto_adapter_mode mode,void * conf_arg)366 rte_event_crypto_adapter_create_ext(uint8_t id, uint8_t dev_id,
367 rte_event_crypto_adapter_conf_cb conf_cb,
368 enum rte_event_crypto_adapter_mode mode,
369 void *conf_arg)
370 {
371 struct event_crypto_adapter *adapter;
372 char mem_name[CRYPTO_ADAPTER_NAME_LEN];
373 int socket_id;
374 uint8_t i;
375 int ret;
376
377 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
378 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
379 if (conf_cb == NULL)
380 return -EINVAL;
381
382 if (event_crypto_adapter == NULL) {
383 ret = eca_init();
384 if (ret)
385 return ret;
386 }
387
388 adapter = eca_id_to_adapter(id);
389 if (adapter != NULL) {
390 RTE_EDEV_LOG_ERR("Crypto adapter id %u already exists!", id);
391 return -EEXIST;
392 }
393
394 socket_id = rte_event_dev_socket_id(dev_id);
395 snprintf(mem_name, CRYPTO_ADAPTER_MEM_NAME_LEN,
396 "rte_event_crypto_adapter_%d", id);
397
398 adapter = rte_zmalloc_socket(mem_name, sizeof(*adapter),
399 RTE_CACHE_LINE_SIZE, socket_id);
400 if (adapter == NULL) {
401 RTE_EDEV_LOG_ERR("Failed to get mem for event crypto adapter!");
402 return -ENOMEM;
403 }
404
405 if (eca_circular_buffer_init("eca_edev_circular_buffer",
406 &adapter->ebuf,
407 CRYPTO_ADAPTER_BUFFER_SZ)) {
408 RTE_EDEV_LOG_ERR("Failed to get memory for eventdev buffer");
409 rte_free(adapter);
410 return -ENOMEM;
411 }
412
413 adapter->eventdev_id = dev_id;
414 adapter->socket_id = socket_id;
415 adapter->conf_cb = conf_cb;
416 adapter->conf_arg = conf_arg;
417 adapter->mode = mode;
418 strcpy(adapter->mem_name, mem_name);
419 adapter->cdevs = rte_zmalloc_socket(adapter->mem_name,
420 rte_cryptodev_count() *
421 sizeof(struct crypto_device_info), 0,
422 socket_id);
423 if (adapter->cdevs == NULL) {
424 RTE_EDEV_LOG_ERR("Failed to get mem for crypto devices");
425 eca_circular_buffer_free(&adapter->ebuf);
426 rte_free(adapter);
427 return -ENOMEM;
428 }
429
430 rte_spinlock_init(&adapter->lock);
431 for (i = 0; i < rte_cryptodev_count(); i++)
432 adapter->cdevs[i].dev = rte_cryptodev_pmd_get_dev(i);
433
434 event_crypto_adapter[id] = adapter;
435
436 return 0;
437 }
438
439
440 int
rte_event_crypto_adapter_create(uint8_t id,uint8_t dev_id,struct rte_event_port_conf * port_config,enum rte_event_crypto_adapter_mode mode)441 rte_event_crypto_adapter_create(uint8_t id, uint8_t dev_id,
442 struct rte_event_port_conf *port_config,
443 enum rte_event_crypto_adapter_mode mode)
444 {
445 struct rte_event_port_conf *pc;
446 int ret;
447
448 if (port_config == NULL)
449 return -EINVAL;
450 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
451
452 pc = rte_malloc(NULL, sizeof(*pc), 0);
453 if (pc == NULL)
454 return -ENOMEM;
455 *pc = *port_config;
456 ret = rte_event_crypto_adapter_create_ext(id, dev_id,
457 eca_default_config_cb,
458 mode,
459 pc);
460 if (ret)
461 rte_free(pc);
462
463 rte_eventdev_trace_crypto_adapter_create(id, dev_id, port_config, mode, ret);
464
465 return ret;
466 }
467
468 int
rte_event_crypto_adapter_free(uint8_t id)469 rte_event_crypto_adapter_free(uint8_t id)
470 {
471 struct event_crypto_adapter *adapter;
472
473 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
474
475 adapter = eca_id_to_adapter(id);
476 if (adapter == NULL)
477 return -EINVAL;
478
479 if (adapter->nb_qps) {
480 RTE_EDEV_LOG_ERR("%" PRIu16 "Queue pairs not deleted",
481 adapter->nb_qps);
482 return -EBUSY;
483 }
484
485 rte_eventdev_trace_crypto_adapter_free(id, adapter);
486 if (adapter->default_cb_arg)
487 rte_free(adapter->conf_arg);
488 rte_free(adapter->cdevs);
489 rte_free(adapter);
490 event_crypto_adapter[id] = NULL;
491
492 return 0;
493 }
494
495 static inline unsigned int
eca_enq_to_cryptodev(struct event_crypto_adapter * adapter,struct rte_event * ev,unsigned int cnt)496 eca_enq_to_cryptodev(struct event_crypto_adapter *adapter, struct rte_event *ev,
497 unsigned int cnt)
498 {
499 struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
500 union rte_event_crypto_metadata *m_data = NULL;
501 struct crypto_queue_pair_info *qp_info = NULL;
502 struct rte_crypto_op *crypto_op;
503 unsigned int i, n;
504 uint16_t qp_id, nb_enqueued = 0;
505 uint8_t cdev_id;
506 int ret;
507
508 ret = 0;
509 n = 0;
510 stats->event_deq_count += cnt;
511
512 for (i = 0; i < cnt; i++) {
513 crypto_op = ev[i].event_ptr;
514 if (crypto_op == NULL)
515 continue;
516
517 /** "struct rte_event::impl_opaque" field passed on from
518 * eventdev PMD could have different value per event.
519 * For session-based crypto operations retain
520 * "struct rte_event::impl_opaque" into mbuf dynamic field and
521 * restore it back after copying event information from
522 * session event metadata.
523 * For session-less, each crypto operation carries event
524 * metadata and retains "struct rte_event:impl_opaque"
525 * information to be passed back to eventdev PMD.
526 */
527 if (crypto_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
528 struct rte_mbuf *mbuf = crypto_op->sym->m_src;
529
530 *RTE_MBUF_DYNFIELD(mbuf,
531 eca_dynfield_offset,
532 eca_dynfield_t *) = ev[i].impl_opaque;
533 }
534
535 m_data = rte_cryptodev_session_event_mdata_get(crypto_op);
536 if (m_data == NULL) {
537 rte_pktmbuf_free(crypto_op->sym->m_src);
538 rte_crypto_op_free(crypto_op);
539 continue;
540 }
541
542 cdev_id = m_data->request_info.cdev_id;
543 qp_id = m_data->request_info.queue_pair_id;
544 qp_info = &adapter->cdevs[cdev_id].qpairs[qp_id];
545 if (!qp_info->qp_enabled) {
546 rte_pktmbuf_free(crypto_op->sym->m_src);
547 rte_crypto_op_free(crypto_op);
548 continue;
549 }
550 eca_circular_buffer_add(&qp_info->cbuf, crypto_op);
551
552 if (eca_circular_buffer_batch_ready(&qp_info->cbuf)) {
553 ret = eca_circular_buffer_flush_to_cdev(&qp_info->cbuf,
554 cdev_id,
555 qp_id,
556 &nb_enqueued);
557 stats->crypto_enq_count += nb_enqueued;
558 n += nb_enqueued;
559
560 /**
561 * If some crypto ops failed to flush to cdev and
562 * space for another batch is not available, stop
563 * dequeue from eventdev momentarily
564 */
565 if (unlikely(ret < 0 &&
566 !eca_circular_buffer_space_for_batch(
567 &qp_info->cbuf)))
568 adapter->stop_enq_to_cryptodev = true;
569 }
570 }
571
572 return n;
573 }
574
575 static unsigned int
eca_crypto_cdev_flush(struct event_crypto_adapter * adapter,uint8_t cdev_id,uint16_t * nb_ops_flushed)576 eca_crypto_cdev_flush(struct event_crypto_adapter *adapter,
577 uint8_t cdev_id, uint16_t *nb_ops_flushed)
578 {
579 struct crypto_device_info *curr_dev;
580 struct crypto_queue_pair_info *curr_queue;
581 struct rte_cryptodev *dev;
582 uint16_t nb = 0, nb_enqueued = 0;
583 uint16_t qp;
584
585 curr_dev = &adapter->cdevs[cdev_id];
586 dev = rte_cryptodev_pmd_get_dev(cdev_id);
587
588 for (qp = 0; qp < dev->data->nb_queue_pairs; qp++) {
589
590 curr_queue = &curr_dev->qpairs[qp];
591 if (unlikely(curr_queue == NULL || !curr_queue->qp_enabled))
592 continue;
593
594 eca_circular_buffer_flush_to_cdev(&curr_queue->cbuf,
595 cdev_id,
596 qp,
597 &nb_enqueued);
598 *nb_ops_flushed += curr_queue->cbuf.count;
599 nb += nb_enqueued;
600 }
601
602 return nb;
603 }
604
605 static unsigned int
eca_crypto_enq_flush(struct event_crypto_adapter * adapter)606 eca_crypto_enq_flush(struct event_crypto_adapter *adapter)
607 {
608 struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
609 uint8_t cdev_id;
610 uint16_t nb_enqueued = 0;
611 uint16_t nb_ops_flushed = 0;
612 uint16_t num_cdev = rte_cryptodev_count();
613
614 for (cdev_id = 0; cdev_id < num_cdev; cdev_id++)
615 nb_enqueued += eca_crypto_cdev_flush(adapter,
616 cdev_id,
617 &nb_ops_flushed);
618 /**
619 * Enable dequeue from eventdev if all ops from circular
620 * buffer flushed to cdev
621 */
622 if (!nb_ops_flushed)
623 adapter->stop_enq_to_cryptodev = false;
624
625 stats->crypto_enq_count += nb_enqueued;
626
627 return nb_enqueued;
628 }
629
630 static int
eca_crypto_adapter_enq_run(struct event_crypto_adapter * adapter,unsigned int max_enq)631 eca_crypto_adapter_enq_run(struct event_crypto_adapter *adapter,
632 unsigned int max_enq)
633 {
634 struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
635 struct rte_event ev[BATCH_SIZE];
636 unsigned int nb_enq, nb_enqueued;
637 uint16_t n;
638 uint8_t event_dev_id = adapter->eventdev_id;
639 uint8_t event_port_id = adapter->event_port_id;
640
641 nb_enqueued = 0;
642 if (adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)
643 return 0;
644
645 for (nb_enq = 0; nb_enq < max_enq; nb_enq += n) {
646
647 if (unlikely(adapter->stop_enq_to_cryptodev)) {
648 nb_enqueued += eca_crypto_enq_flush(adapter);
649
650 if (unlikely(adapter->stop_enq_to_cryptodev))
651 break;
652 }
653
654 stats->event_poll_count++;
655 n = rte_event_dequeue_burst(event_dev_id,
656 event_port_id, ev, BATCH_SIZE, 0);
657
658 if (!n)
659 break;
660
661 nb_enqueued += eca_enq_to_cryptodev(adapter, ev, n);
662 }
663
664 if ((++adapter->transmit_loop_count &
665 (CRYPTO_ENQ_FLUSH_THRESHOLD - 1)) == 0) {
666 nb_enqueued += eca_crypto_enq_flush(adapter);
667 }
668
669 return nb_enqueued;
670 }
671
672 static inline uint16_t
eca_ops_enqueue_burst(struct event_crypto_adapter * adapter,struct rte_crypto_op ** ops,uint16_t num)673 eca_ops_enqueue_burst(struct event_crypto_adapter *adapter,
674 struct rte_crypto_op **ops, uint16_t num)
675 {
676 struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
677 union rte_event_crypto_metadata *m_data = NULL;
678 uint8_t event_dev_id = adapter->eventdev_id;
679 uint8_t event_port_id = adapter->event_port_id;
680 struct rte_event events[BATCH_SIZE];
681 uint16_t nb_enqueued, nb_ev;
682 uint8_t retry;
683 uint8_t i;
684
685 nb_ev = 0;
686 retry = 0;
687 nb_enqueued = 0;
688 num = RTE_MIN(num, BATCH_SIZE);
689 for (i = 0; i < num; i++) {
690 struct rte_event *ev = &events[nb_ev++];
691
692 m_data = rte_cryptodev_session_event_mdata_get(ops[i]);
693 if (unlikely(m_data == NULL)) {
694 rte_pktmbuf_free(ops[i]->sym->m_src);
695 rte_crypto_op_free(ops[i]);
696 continue;
697 }
698
699 rte_memcpy(ev, &m_data->response_info, sizeof(*ev));
700 ev->event_ptr = ops[i];
701
702 /** Restore "struct rte_event::impl_opaque" from mbuf
703 * dynamic field for session based crypto operation.
704 * For session-less, each crypto operations carries event
705 * metadata and retains "struct rte_event::impl_opaque"
706 * information to be passed back to eventdev PMD.
707 */
708 if (ops[i]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
709 struct rte_mbuf *mbuf = ops[i]->sym->m_src;
710
711 ev->impl_opaque = *RTE_MBUF_DYNFIELD(mbuf,
712 eca_dynfield_offset,
713 eca_dynfield_t *);
714 }
715
716 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
717 if (adapter->implicit_release_disabled)
718 ev->op = RTE_EVENT_OP_FORWARD;
719 else
720 ev->op = RTE_EVENT_OP_NEW;
721 }
722
723 do {
724 nb_enqueued += rte_event_enqueue_burst(event_dev_id,
725 event_port_id,
726 &events[nb_enqueued],
727 nb_ev - nb_enqueued);
728
729 } while (retry++ < CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES &&
730 nb_enqueued < nb_ev);
731
732 stats->event_enq_fail_count += nb_ev - nb_enqueued;
733 stats->event_enq_count += nb_enqueued;
734 stats->event_enq_retry_count += retry - 1;
735
736 return nb_enqueued;
737 }
738
739 static int
eca_circular_buffer_flush_to_evdev(struct event_crypto_adapter * adapter,struct crypto_ops_circular_buffer * bufp)740 eca_circular_buffer_flush_to_evdev(struct event_crypto_adapter *adapter,
741 struct crypto_ops_circular_buffer *bufp)
742 {
743 uint16_t n = 0, nb_ops_flushed;
744 uint16_t *headp = &bufp->head;
745 uint16_t *tailp = &bufp->tail;
746 struct rte_crypto_op **ops = bufp->op_buffer;
747
748 if (*tailp > *headp)
749 n = *tailp - *headp;
750 else if (*tailp < *headp)
751 n = bufp->size - *headp;
752 else
753 return 0; /* buffer empty */
754
755 nb_ops_flushed = eca_ops_enqueue_burst(adapter, &ops[*headp], n);
756 bufp->count -= nb_ops_flushed;
757 if (!bufp->count) {
758 *headp = 0;
759 *tailp = 0;
760 return 0; /* buffer empty */
761 }
762
763 *headp = (*headp + nb_ops_flushed) % bufp->size;
764 return 1;
765 }
766
767
768 static void
eca_ops_buffer_flush(struct event_crypto_adapter * adapter)769 eca_ops_buffer_flush(struct event_crypto_adapter *adapter)
770 {
771 if (likely(adapter->ebuf.count == 0))
772 return;
773
774 while (eca_circular_buffer_flush_to_evdev(adapter,
775 &adapter->ebuf))
776 ;
777 }
778 static inline unsigned int
eca_crypto_adapter_deq_run(struct event_crypto_adapter * adapter,unsigned int max_deq)779 eca_crypto_adapter_deq_run(struct event_crypto_adapter *adapter,
780 unsigned int max_deq)
781 {
782 struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
783 struct crypto_device_info *curr_dev;
784 struct crypto_queue_pair_info *curr_queue;
785 struct rte_crypto_op *ops[BATCH_SIZE];
786 uint16_t n, nb_deq, nb_enqueued, i;
787 struct rte_cryptodev *dev;
788 uint8_t cdev_id;
789 uint16_t qp, dev_qps;
790 bool done;
791 uint16_t num_cdev = rte_cryptodev_count();
792
793 nb_deq = 0;
794 eca_ops_buffer_flush(adapter);
795
796 do {
797 done = true;
798
799 for (cdev_id = adapter->next_cdev_id;
800 cdev_id < num_cdev; cdev_id++) {
801 uint16_t queues = 0;
802
803 curr_dev = &adapter->cdevs[cdev_id];
804 dev = curr_dev->dev;
805 if (unlikely(dev == NULL))
806 continue;
807
808 dev_qps = dev->data->nb_queue_pairs;
809
810 for (qp = curr_dev->next_queue_pair_id;
811 queues < dev_qps; qp = (qp + 1) % dev_qps,
812 queues++) {
813
814 curr_queue = &curr_dev->qpairs[qp];
815 if (unlikely(curr_queue == NULL ||
816 !curr_queue->qp_enabled))
817 continue;
818
819 n = rte_cryptodev_dequeue_burst(cdev_id, qp,
820 ops, BATCH_SIZE);
821 if (!n)
822 continue;
823
824 done = false;
825 nb_enqueued = 0;
826
827 stats->crypto_deq_count += n;
828
829 if (unlikely(!adapter->ebuf.count))
830 nb_enqueued = eca_ops_enqueue_burst(
831 adapter, ops, n);
832
833 if (likely(nb_enqueued == n))
834 goto check;
835
836 /* Failed to enqueue events case */
837 for (i = nb_enqueued; i < n; i++)
838 eca_circular_buffer_add(
839 &adapter->ebuf,
840 ops[i]);
841
842 check:
843 nb_deq += n;
844
845 if (nb_deq >= max_deq) {
846 if ((qp + 1) == dev_qps) {
847 adapter->next_cdev_id =
848 (cdev_id + 1)
849 % num_cdev;
850 }
851 curr_dev->next_queue_pair_id = (qp + 1)
852 % dev->data->nb_queue_pairs;
853
854 return nb_deq;
855 }
856 }
857 }
858 adapter->next_cdev_id = 0;
859 } while (done == false);
860 return nb_deq;
861 }
862
863 static int
eca_crypto_adapter_run(struct event_crypto_adapter * adapter,unsigned int max_ops)864 eca_crypto_adapter_run(struct event_crypto_adapter *adapter,
865 unsigned int max_ops)
866 {
867 unsigned int ops_left = max_ops;
868
869 while (ops_left > 0) {
870 unsigned int e_cnt, d_cnt;
871
872 e_cnt = eca_crypto_adapter_deq_run(adapter, ops_left);
873 ops_left -= RTE_MIN(ops_left, e_cnt);
874
875 d_cnt = eca_crypto_adapter_enq_run(adapter, ops_left);
876 ops_left -= RTE_MIN(ops_left, d_cnt);
877
878 if (e_cnt == 0 && d_cnt == 0)
879 break;
880
881 }
882
883 if (ops_left == max_ops) {
884 rte_event_maintain(adapter->eventdev_id,
885 adapter->event_port_id, 0);
886 return -EAGAIN;
887 } else
888 return 0;
889 }
890
891 static int
eca_service_func(void * args)892 eca_service_func(void *args)
893 {
894 struct event_crypto_adapter *adapter = args;
895 int ret;
896
897 if (rte_spinlock_trylock(&adapter->lock) == 0)
898 return 0;
899 ret = eca_crypto_adapter_run(adapter, adapter->max_nb);
900 rte_spinlock_unlock(&adapter->lock);
901
902 return ret;
903 }
904
905 static int
eca_init_service(struct event_crypto_adapter * adapter,uint8_t id)906 eca_init_service(struct event_crypto_adapter *adapter, uint8_t id)
907 {
908 struct rte_event_crypto_adapter_conf adapter_conf;
909 struct rte_service_spec service;
910 int ret;
911 uint32_t impl_rel;
912
913 if (adapter->service_inited)
914 return 0;
915
916 memset(&service, 0, sizeof(service));
917 snprintf(service.name, CRYPTO_ADAPTER_NAME_LEN,
918 "rte_event_crypto_adapter_%d", id);
919 service.socket_id = adapter->socket_id;
920 service.callback = eca_service_func;
921 service.callback_userdata = adapter;
922 /* Service function handles locking for queue add/del updates */
923 service.capabilities = RTE_SERVICE_CAP_MT_SAFE;
924 ret = rte_service_component_register(&service, &adapter->service_id);
925 if (ret) {
926 RTE_EDEV_LOG_ERR("failed to register service %s err = %" PRId32,
927 service.name, ret);
928 return ret;
929 }
930
931 ret = adapter->conf_cb(id, adapter->eventdev_id,
932 &adapter_conf, adapter->conf_arg);
933 if (ret) {
934 RTE_EDEV_LOG_ERR("configuration callback failed err = %" PRId32,
935 ret);
936 return ret;
937 }
938
939 adapter->max_nb = adapter_conf.max_nb;
940 adapter->event_port_id = adapter_conf.event_port_id;
941
942 if (rte_event_port_attr_get(adapter->eventdev_id,
943 adapter->event_port_id,
944 RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE,
945 &impl_rel)) {
946 RTE_EDEV_LOG_ERR("Failed to get port info for eventdev %" PRId32,
947 adapter->eventdev_id);
948 eca_circular_buffer_free(&adapter->ebuf);
949 rte_free(adapter);
950 return -EINVAL;
951 }
952
953 adapter->implicit_release_disabled = (uint8_t)impl_rel;
954
955 /** Register for mbuf dyn field to store/restore
956 * "struct rte_event::impl_opaque"
957 */
958 eca_dynfield_offset = eca_dynfield_register();
959 if (eca_dynfield_offset < 0) {
960 RTE_EDEV_LOG_ERR("Failed to register eca mbuf dyn field");
961 eca_circular_buffer_free(&adapter->ebuf);
962 rte_free(adapter);
963 return -EINVAL;
964 }
965
966 adapter->service_inited = 1;
967
968 return ret;
969 }
970
971 static void
eca_update_qp_info(struct event_crypto_adapter * adapter,struct crypto_device_info * dev_info,int32_t queue_pair_id,uint8_t add)972 eca_update_qp_info(struct event_crypto_adapter *adapter,
973 struct crypto_device_info *dev_info, int32_t queue_pair_id,
974 uint8_t add)
975 {
976 struct crypto_queue_pair_info *qp_info;
977 int enabled;
978 uint16_t i;
979
980 if (dev_info->qpairs == NULL)
981 return;
982
983 if (queue_pair_id == -1) {
984 for (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++)
985 eca_update_qp_info(adapter, dev_info, i, add);
986 } else {
987 qp_info = &dev_info->qpairs[queue_pair_id];
988 enabled = qp_info->qp_enabled;
989 if (add) {
990 adapter->nb_qps += !enabled;
991 dev_info->num_qpairs += !enabled;
992 } else {
993 adapter->nb_qps -= enabled;
994 dev_info->num_qpairs -= enabled;
995 }
996 qp_info->qp_enabled = !!add;
997 }
998 }
999
1000 static int
eca_add_queue_pair(struct event_crypto_adapter * adapter,uint8_t cdev_id,int queue_pair_id)1001 eca_add_queue_pair(struct event_crypto_adapter *adapter, uint8_t cdev_id,
1002 int queue_pair_id)
1003 {
1004 struct crypto_device_info *dev_info = &adapter->cdevs[cdev_id];
1005 struct crypto_queue_pair_info *qpairs;
1006 uint32_t i;
1007
1008 if (dev_info->qpairs == NULL) {
1009 dev_info->qpairs =
1010 rte_zmalloc_socket(adapter->mem_name,
1011 dev_info->dev->data->nb_queue_pairs *
1012 sizeof(struct crypto_queue_pair_info),
1013 0, adapter->socket_id);
1014 if (dev_info->qpairs == NULL)
1015 return -ENOMEM;
1016
1017 qpairs = dev_info->qpairs;
1018
1019 if (eca_circular_buffer_init("eca_cdev_circular_buffer",
1020 &qpairs->cbuf,
1021 CRYPTO_ADAPTER_OPS_BUFFER_SZ)) {
1022 RTE_EDEV_LOG_ERR("Failed to get memory for cryptodev "
1023 "buffer");
1024 rte_free(qpairs);
1025 return -ENOMEM;
1026 }
1027 }
1028
1029 if (queue_pair_id == -1) {
1030 for (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++)
1031 eca_update_qp_info(adapter, dev_info, i, 1);
1032 } else
1033 eca_update_qp_info(adapter, dev_info,
1034 (uint16_t)queue_pair_id, 1);
1035
1036 return 0;
1037 }
1038
1039 int
rte_event_crypto_adapter_queue_pair_add(uint8_t id,uint8_t cdev_id,int32_t queue_pair_id,const struct rte_event_crypto_adapter_queue_conf * conf)1040 rte_event_crypto_adapter_queue_pair_add(uint8_t id,
1041 uint8_t cdev_id,
1042 int32_t queue_pair_id,
1043 const struct rte_event_crypto_adapter_queue_conf *conf)
1044 {
1045 struct rte_event_crypto_adapter_vector_limits limits;
1046 struct event_crypto_adapter *adapter;
1047 struct crypto_device_info *dev_info;
1048 struct rte_eventdev *dev;
1049 uint32_t cap;
1050 int ret;
1051
1052 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1053
1054 if (!rte_cryptodev_is_valid_dev(cdev_id)) {
1055 RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
1056 return -EINVAL;
1057 }
1058
1059 adapter = eca_id_to_adapter(id);
1060 if (adapter == NULL)
1061 return -EINVAL;
1062
1063 dev = &rte_eventdevs[adapter->eventdev_id];
1064 ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
1065 cdev_id,
1066 &cap);
1067 if (ret) {
1068 RTE_EDEV_LOG_ERR("Failed to get adapter caps dev %" PRIu8
1069 " cdev %" PRIu8, id, cdev_id);
1070 return ret;
1071 }
1072
1073 if (conf == NULL) {
1074 if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
1075 RTE_EDEV_LOG_ERR("Conf value can not be NULL for dev_id=%u",
1076 cdev_id);
1077 return -EINVAL;
1078 }
1079 } else {
1080 if (conf->flags & RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR) {
1081 if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR) == 0) {
1082 RTE_EDEV_LOG_ERR("Event vectorization is not supported,"
1083 "dev %" PRIu8 " cdev %" PRIu8, id,
1084 cdev_id);
1085 return -ENOTSUP;
1086 }
1087
1088 ret = rte_event_crypto_adapter_vector_limits_get(
1089 adapter->eventdev_id, cdev_id, &limits);
1090 if (ret < 0) {
1091 RTE_EDEV_LOG_ERR("Failed to get event device vector "
1092 "limits, dev %" PRIu8 " cdev %" PRIu8,
1093 id, cdev_id);
1094 return -EINVAL;
1095 }
1096
1097 if (conf->vector_sz < limits.min_sz ||
1098 conf->vector_sz > limits.max_sz ||
1099 conf->vector_timeout_ns < limits.min_timeout_ns ||
1100 conf->vector_timeout_ns > limits.max_timeout_ns ||
1101 conf->vector_mp == NULL) {
1102 RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
1103 " dev %" PRIu8 " cdev %" PRIu8,
1104 id, cdev_id);
1105 return -EINVAL;
1106 }
1107
1108 if (conf->vector_mp->elt_size < (sizeof(struct rte_event_vector) +
1109 (sizeof(uintptr_t) * conf->vector_sz))) {
1110 RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
1111 " dev %" PRIu8 " cdev %" PRIu8,
1112 id, cdev_id);
1113 return -EINVAL;
1114 }
1115 }
1116 }
1117
1118 dev_info = &adapter->cdevs[cdev_id];
1119
1120 if (queue_pair_id != -1 &&
1121 (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) {
1122 RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16,
1123 (uint16_t)queue_pair_id);
1124 return -EINVAL;
1125 }
1126
1127 /* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD,
1128 * no need of service core as HW supports event forward capability.
1129 */
1130 if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
1131 (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND &&
1132 adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW) ||
1133 (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
1134 adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) {
1135 if (*dev->dev_ops->crypto_adapter_queue_pair_add == NULL)
1136 return -ENOTSUP;
1137 if (dev_info->qpairs == NULL) {
1138 dev_info->qpairs =
1139 rte_zmalloc_socket(adapter->mem_name,
1140 dev_info->dev->data->nb_queue_pairs *
1141 sizeof(struct crypto_queue_pair_info),
1142 0, adapter->socket_id);
1143 if (dev_info->qpairs == NULL)
1144 return -ENOMEM;
1145 }
1146
1147 ret = (*dev->dev_ops->crypto_adapter_queue_pair_add)(dev,
1148 dev_info->dev,
1149 queue_pair_id,
1150 conf);
1151 if (ret)
1152 return ret;
1153
1154 else
1155 eca_update_qp_info(adapter, &adapter->cdevs[cdev_id],
1156 queue_pair_id, 1);
1157 }
1158
1159 /* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW,
1160 * or SW adapter, initiate services so the application can choose
1161 * which ever way it wants to use the adapter.
1162 * Case 1: RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW
1163 * Application may wants to use one of below two mode
1164 * a. OP_FORWARD mode -> HW Dequeue + SW enqueue
1165 * b. OP_NEW mode -> HW Dequeue
1166 * Case 2: No HW caps, use SW adapter
1167 * a. OP_FORWARD mode -> SW enqueue & dequeue
1168 * b. OP_NEW mode -> SW Dequeue
1169 */
1170 if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
1171 !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
1172 adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD) ||
1173 (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW) &&
1174 !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
1175 !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) &&
1176 (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA))) {
1177 rte_spinlock_lock(&adapter->lock);
1178 ret = eca_init_service(adapter, id);
1179 if (ret == 0)
1180 ret = eca_add_queue_pair(adapter, cdev_id,
1181 queue_pair_id);
1182 rte_spinlock_unlock(&adapter->lock);
1183
1184 if (ret)
1185 return ret;
1186
1187 rte_service_component_runstate_set(adapter->service_id, 1);
1188 }
1189
1190 rte_eventdev_trace_crypto_adapter_queue_pair_add(id, cdev_id,
1191 queue_pair_id, conf);
1192 return 0;
1193 }
1194
1195 int
rte_event_crypto_adapter_queue_pair_del(uint8_t id,uint8_t cdev_id,int32_t queue_pair_id)1196 rte_event_crypto_adapter_queue_pair_del(uint8_t id, uint8_t cdev_id,
1197 int32_t queue_pair_id)
1198 {
1199 struct event_crypto_adapter *adapter;
1200 struct crypto_device_info *dev_info;
1201 struct rte_eventdev *dev;
1202 int ret;
1203 uint32_t cap;
1204 uint16_t i;
1205
1206 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1207
1208 if (!rte_cryptodev_is_valid_dev(cdev_id)) {
1209 RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
1210 return -EINVAL;
1211 }
1212
1213 adapter = eca_id_to_adapter(id);
1214 if (adapter == NULL)
1215 return -EINVAL;
1216
1217 dev = &rte_eventdevs[adapter->eventdev_id];
1218 ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
1219 cdev_id,
1220 &cap);
1221 if (ret)
1222 return ret;
1223
1224 dev_info = &adapter->cdevs[cdev_id];
1225
1226 if (queue_pair_id != -1 &&
1227 (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) {
1228 RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16,
1229 (uint16_t)queue_pair_id);
1230 return -EINVAL;
1231 }
1232
1233 if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
1234 (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
1235 adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) {
1236 if (*dev->dev_ops->crypto_adapter_queue_pair_del == NULL)
1237 return -ENOTSUP;
1238 ret = (*dev->dev_ops->crypto_adapter_queue_pair_del)(dev,
1239 dev_info->dev,
1240 queue_pair_id);
1241 if (ret == 0) {
1242 eca_update_qp_info(adapter,
1243 &adapter->cdevs[cdev_id],
1244 queue_pair_id,
1245 0);
1246 if (dev_info->num_qpairs == 0) {
1247 rte_free(dev_info->qpairs);
1248 dev_info->qpairs = NULL;
1249 }
1250 }
1251 } else {
1252 if (adapter->nb_qps == 0)
1253 return 0;
1254
1255 rte_spinlock_lock(&adapter->lock);
1256 if (queue_pair_id == -1) {
1257 for (i = 0; i < dev_info->dev->data->nb_queue_pairs;
1258 i++)
1259 eca_update_qp_info(adapter, dev_info,
1260 queue_pair_id, 0);
1261 } else {
1262 eca_update_qp_info(adapter, dev_info,
1263 (uint16_t)queue_pair_id, 0);
1264 }
1265
1266 if (dev_info->num_qpairs == 0) {
1267 rte_free(dev_info->qpairs);
1268 dev_info->qpairs = NULL;
1269 }
1270
1271 rte_spinlock_unlock(&adapter->lock);
1272 rte_service_component_runstate_set(adapter->service_id,
1273 adapter->nb_qps);
1274 }
1275
1276 rte_eventdev_trace_crypto_adapter_queue_pair_del(id, cdev_id,
1277 queue_pair_id, ret);
1278 return ret;
1279 }
1280
1281 static int
eca_adapter_ctrl(uint8_t id,int start)1282 eca_adapter_ctrl(uint8_t id, int start)
1283 {
1284 struct event_crypto_adapter *adapter;
1285 struct crypto_device_info *dev_info;
1286 struct rte_eventdev *dev;
1287 uint32_t i;
1288 int use_service;
1289 int stop = !start;
1290
1291 use_service = 0;
1292 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1293 adapter = eca_id_to_adapter(id);
1294 if (adapter == NULL)
1295 return -EINVAL;
1296
1297 dev = &rte_eventdevs[adapter->eventdev_id];
1298
1299 for (i = 0; i < rte_cryptodev_count(); i++) {
1300 dev_info = &adapter->cdevs[i];
1301 /* if start check for num queue pairs */
1302 if (start && !dev_info->num_qpairs)
1303 continue;
1304 /* if stop check if dev has been started */
1305 if (stop && !dev_info->dev_started)
1306 continue;
1307 use_service |= !dev_info->internal_event_port;
1308 dev_info->dev_started = start;
1309 if (dev_info->internal_event_port == 0)
1310 continue;
1311 start ? (*dev->dev_ops->crypto_adapter_start)(dev,
1312 &dev_info->dev[i]) :
1313 (*dev->dev_ops->crypto_adapter_stop)(dev,
1314 &dev_info->dev[i]);
1315 }
1316
1317 if (use_service)
1318 rte_service_runstate_set(adapter->service_id, start);
1319
1320 return 0;
1321 }
1322
1323 int
rte_event_crypto_adapter_start(uint8_t id)1324 rte_event_crypto_adapter_start(uint8_t id)
1325 {
1326 struct event_crypto_adapter *adapter;
1327
1328 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1329 adapter = eca_id_to_adapter(id);
1330 if (adapter == NULL)
1331 return -EINVAL;
1332
1333 rte_eventdev_trace_crypto_adapter_start(id, adapter);
1334 return eca_adapter_ctrl(id, 1);
1335 }
1336
1337 int
rte_event_crypto_adapter_stop(uint8_t id)1338 rte_event_crypto_adapter_stop(uint8_t id)
1339 {
1340 rte_eventdev_trace_crypto_adapter_stop(id);
1341 return eca_adapter_ctrl(id, 0);
1342 }
1343
1344 int
rte_event_crypto_adapter_stats_get(uint8_t id,struct rte_event_crypto_adapter_stats * stats)1345 rte_event_crypto_adapter_stats_get(uint8_t id,
1346 struct rte_event_crypto_adapter_stats *stats)
1347 {
1348 struct event_crypto_adapter *adapter;
1349 struct rte_event_crypto_adapter_stats dev_stats_sum = { 0 };
1350 struct rte_event_crypto_adapter_stats dev_stats;
1351 struct rte_eventdev *dev;
1352 struct crypto_device_info *dev_info;
1353 uint32_t i;
1354 int ret;
1355
1356 if (eca_memzone_lookup())
1357 return -ENOMEM;
1358
1359 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1360
1361 adapter = eca_id_to_adapter(id);
1362 if (adapter == NULL || stats == NULL)
1363 return -EINVAL;
1364
1365 dev = &rte_eventdevs[adapter->eventdev_id];
1366 memset(stats, 0, sizeof(*stats));
1367 for (i = 0; i < rte_cryptodev_count(); i++) {
1368 dev_info = &adapter->cdevs[i];
1369 if (dev_info->internal_event_port == 0 ||
1370 dev->dev_ops->crypto_adapter_stats_get == NULL)
1371 continue;
1372 ret = (*dev->dev_ops->crypto_adapter_stats_get)(dev,
1373 dev_info->dev,
1374 &dev_stats);
1375 if (ret)
1376 continue;
1377
1378 dev_stats_sum.crypto_deq_count += dev_stats.crypto_deq_count;
1379 dev_stats_sum.event_enq_count +=
1380 dev_stats.event_enq_count;
1381 }
1382
1383 if (adapter->service_inited)
1384 *stats = adapter->crypto_stats;
1385
1386 stats->crypto_deq_count += dev_stats_sum.crypto_deq_count;
1387 stats->event_enq_count += dev_stats_sum.event_enq_count;
1388
1389 rte_eventdev_trace_crypto_adapter_stats_get(id, stats,
1390 stats->event_poll_count, stats->event_deq_count,
1391 stats->crypto_enq_count, stats->crypto_enq_fail,
1392 stats->crypto_deq_count, stats->event_enq_count,
1393 stats->event_enq_retry_count, stats->event_enq_fail_count);
1394
1395 return 0;
1396 }
1397
1398 int
rte_event_crypto_adapter_stats_reset(uint8_t id)1399 rte_event_crypto_adapter_stats_reset(uint8_t id)
1400 {
1401 struct event_crypto_adapter *adapter;
1402 struct crypto_device_info *dev_info;
1403 struct rte_eventdev *dev;
1404 uint32_t i;
1405
1406 rte_eventdev_trace_crypto_adapter_stats_reset(id);
1407
1408 if (eca_memzone_lookup())
1409 return -ENOMEM;
1410
1411 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1412
1413 adapter = eca_id_to_adapter(id);
1414 if (adapter == NULL)
1415 return -EINVAL;
1416
1417 dev = &rte_eventdevs[adapter->eventdev_id];
1418 for (i = 0; i < rte_cryptodev_count(); i++) {
1419 dev_info = &adapter->cdevs[i];
1420 if (dev_info->internal_event_port == 0 ||
1421 dev->dev_ops->crypto_adapter_stats_reset == NULL)
1422 continue;
1423 (*dev->dev_ops->crypto_adapter_stats_reset)(dev,
1424 dev_info->dev);
1425 }
1426
1427 memset(&adapter->crypto_stats, 0, sizeof(adapter->crypto_stats));
1428 return 0;
1429 }
1430
1431 int
rte_event_crypto_adapter_runtime_params_init(struct rte_event_crypto_adapter_runtime_params * params)1432 rte_event_crypto_adapter_runtime_params_init(
1433 struct rte_event_crypto_adapter_runtime_params *params)
1434 {
1435 if (params == NULL)
1436 return -EINVAL;
1437
1438 memset(params, 0, sizeof(*params));
1439 params->max_nb = DEFAULT_MAX_NB;
1440
1441 return 0;
1442 }
1443
1444 static int
crypto_adapter_cap_check(struct event_crypto_adapter * adapter)1445 crypto_adapter_cap_check(struct event_crypto_adapter *adapter)
1446 {
1447 int ret;
1448 uint32_t caps;
1449
1450 if (!adapter->nb_qps)
1451 return -EINVAL;
1452 ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
1453 adapter->next_cdev_id,
1454 &caps);
1455 if (ret) {
1456 RTE_EDEV_LOG_ERR("Failed to get adapter caps dev %" PRIu8
1457 " cdev %" PRIu8, adapter->eventdev_id,
1458 adapter->next_cdev_id);
1459 return ret;
1460 }
1461
1462 if ((caps & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
1463 (caps & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW))
1464 return -ENOTSUP;
1465
1466 return 0;
1467 }
1468
1469 int
rte_event_crypto_adapter_runtime_params_set(uint8_t id,struct rte_event_crypto_adapter_runtime_params * params)1470 rte_event_crypto_adapter_runtime_params_set(uint8_t id,
1471 struct rte_event_crypto_adapter_runtime_params *params)
1472 {
1473 struct event_crypto_adapter *adapter;
1474 int ret;
1475
1476 if (eca_memzone_lookup())
1477 return -ENOMEM;
1478
1479 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1480
1481 if (params == NULL) {
1482 RTE_EDEV_LOG_ERR("params pointer is NULL");
1483 return -EINVAL;
1484 }
1485
1486 adapter = eca_id_to_adapter(id);
1487 if (adapter == NULL)
1488 return -EINVAL;
1489
1490 ret = crypto_adapter_cap_check(adapter);
1491 if (ret)
1492 return ret;
1493
1494 rte_spinlock_lock(&adapter->lock);
1495 adapter->max_nb = params->max_nb;
1496 rte_spinlock_unlock(&adapter->lock);
1497
1498 return 0;
1499 }
1500
1501 int
rte_event_crypto_adapter_runtime_params_get(uint8_t id,struct rte_event_crypto_adapter_runtime_params * params)1502 rte_event_crypto_adapter_runtime_params_get(uint8_t id,
1503 struct rte_event_crypto_adapter_runtime_params *params)
1504 {
1505 struct event_crypto_adapter *adapter;
1506 int ret;
1507
1508 if (eca_memzone_lookup())
1509 return -ENOMEM;
1510
1511
1512 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1513
1514 if (params == NULL) {
1515 RTE_EDEV_LOG_ERR("params pointer is NULL");
1516 return -EINVAL;
1517 }
1518
1519 adapter = eca_id_to_adapter(id);
1520 if (adapter == NULL)
1521 return -EINVAL;
1522
1523 ret = crypto_adapter_cap_check(adapter);
1524 if (ret)
1525 return ret;
1526
1527 params->max_nb = adapter->max_nb;
1528
1529 return 0;
1530 }
1531
1532 int
rte_event_crypto_adapter_service_id_get(uint8_t id,uint32_t * service_id)1533 rte_event_crypto_adapter_service_id_get(uint8_t id, uint32_t *service_id)
1534 {
1535 struct event_crypto_adapter *adapter;
1536
1537 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1538
1539 adapter = eca_id_to_adapter(id);
1540 if (adapter == NULL || service_id == NULL)
1541 return -EINVAL;
1542
1543 if (adapter->service_inited)
1544 *service_id = adapter->service_id;
1545
1546 rte_eventdev_trace_crypto_adapter_service_id_get(id, *service_id);
1547
1548 return adapter->service_inited ? 0 : -ESRCH;
1549 }
1550
1551 int
rte_event_crypto_adapter_event_port_get(uint8_t id,uint8_t * event_port_id)1552 rte_event_crypto_adapter_event_port_get(uint8_t id, uint8_t *event_port_id)
1553 {
1554 struct event_crypto_adapter *adapter;
1555
1556 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1557
1558 adapter = eca_id_to_adapter(id);
1559 if (adapter == NULL || event_port_id == NULL)
1560 return -EINVAL;
1561
1562 *event_port_id = adapter->event_port_id;
1563
1564 rte_eventdev_trace_crypto_adapter_event_port_get(id, *event_port_id);
1565
1566 return 0;
1567 }
1568
1569 int
rte_event_crypto_adapter_vector_limits_get(uint8_t dev_id,uint16_t cdev_id,struct rte_event_crypto_adapter_vector_limits * limits)1570 rte_event_crypto_adapter_vector_limits_get(
1571 uint8_t dev_id, uint16_t cdev_id,
1572 struct rte_event_crypto_adapter_vector_limits *limits)
1573 {
1574 struct rte_cryptodev *cdev;
1575 struct rte_eventdev *dev;
1576 uint32_t cap;
1577 int ret;
1578
1579 rte_eventdev_trace_crypto_adapter_vector_limits_get(dev_id, cdev_id, limits);
1580
1581 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1582
1583 if (!rte_cryptodev_is_valid_dev(cdev_id)) {
1584 RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
1585 return -EINVAL;
1586 }
1587
1588 if (limits == NULL) {
1589 RTE_EDEV_LOG_ERR("Invalid limits storage provided");
1590 return -EINVAL;
1591 }
1592
1593 dev = &rte_eventdevs[dev_id];
1594 cdev = rte_cryptodev_pmd_get_dev(cdev_id);
1595
1596 ret = rte_event_crypto_adapter_caps_get(dev_id, cdev_id, &cap);
1597 if (ret) {
1598 RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
1599 "cdev %" PRIu16, dev_id, cdev_id);
1600 return ret;
1601 }
1602
1603 if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR)) {
1604 RTE_EDEV_LOG_ERR("Event vectorization is not supported,"
1605 "dev %" PRIu8 " cdev %" PRIu8, dev_id, cdev_id);
1606 return -ENOTSUP;
1607 }
1608
1609 if ((*dev->dev_ops->crypto_adapter_vector_limits_get) == NULL)
1610 return -ENOTSUP;
1611
1612 return dev->dev_ops->crypto_adapter_vector_limits_get(
1613 dev, cdev, limits);
1614 }
1615