xref: /dpdk/lib/eventdev/rte_event_crypto_adapter.c (revision b7fe612ac1de393f869c9818d5503633c8e96b36)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation.
3  * All rights reserved.
4  */
5 
6 #include <string.h>
7 #include <stdbool.h>
8 #include <rte_common.h>
9 #include <rte_dev.h>
10 #include <rte_errno.h>
11 #include <rte_cryptodev.h>
12 #include <rte_cryptodev_pmd.h>
13 #include <rte_log.h>
14 #include <rte_malloc.h>
15 #include <rte_service_component.h>
16 
17 #include "rte_eventdev.h"
18 #include "eventdev_pmd.h"
19 #include "rte_eventdev_trace.h"
20 #include "rte_event_crypto_adapter.h"
21 
22 #define BATCH_SIZE 32
23 #define DEFAULT_MAX_NB 128
24 #define CRYPTO_ADAPTER_NAME_LEN 32
25 #define CRYPTO_ADAPTER_MEM_NAME_LEN 32
26 #define CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES 100
27 
28 /* Flush an instance's enqueue buffers every CRYPTO_ENQ_FLUSH_THRESHOLD
29  * iterations of eca_crypto_adapter_enq_run()
30  */
31 #define CRYPTO_ENQ_FLUSH_THRESHOLD 1024
32 
33 struct rte_event_crypto_adapter {
34 	/* Event device identifier */
35 	uint8_t eventdev_id;
36 	/* Event port identifier */
37 	uint8_t event_port_id;
38 	/* Store event device's implicit release capability */
39 	uint8_t implicit_release_disabled;
40 	/* Max crypto ops processed in any service function invocation */
41 	uint32_t max_nb;
42 	/* Lock to serialize config updates with service function */
43 	rte_spinlock_t lock;
44 	/* Next crypto device to be processed */
45 	uint16_t next_cdev_id;
46 	/* Per crypto device structure */
47 	struct crypto_device_info *cdevs;
48 	/* Loop counter to flush crypto ops */
49 	uint16_t transmit_loop_count;
50 	/* Per instance stats structure */
51 	struct rte_event_crypto_adapter_stats crypto_stats;
52 	/* Configuration callback for rte_service configuration */
53 	rte_event_crypto_adapter_conf_cb conf_cb;
54 	/* Configuration callback argument */
55 	void *conf_arg;
56 	/* Set if  default_cb is being used */
57 	int default_cb_arg;
58 	/* Service initialization state */
59 	uint8_t service_inited;
60 	/* Memory allocation name */
61 	char mem_name[CRYPTO_ADAPTER_MEM_NAME_LEN];
62 	/* Socket identifier cached from eventdev */
63 	int socket_id;
64 	/* Per adapter EAL service */
65 	uint32_t service_id;
66 	/* No. of queue pairs configured */
67 	uint16_t nb_qps;
68 	/* Adapter mode */
69 	enum rte_event_crypto_adapter_mode mode;
70 } __rte_cache_aligned;
71 
72 /* Per crypto device information */
73 struct crypto_device_info {
74 	/* Pointer to cryptodev */
75 	struct rte_cryptodev *dev;
76 	/* Pointer to queue pair info */
77 	struct crypto_queue_pair_info *qpairs;
78 	/* Next queue pair to be processed */
79 	uint16_t next_queue_pair_id;
80 	/* Set to indicate cryptodev->eventdev packet
81 	 * transfer uses a hardware mechanism
82 	 */
83 	uint8_t internal_event_port;
84 	/* Set to indicate processing has been started */
85 	uint8_t dev_started;
86 	/* If num_qpairs > 0, the start callback will
87 	 * be invoked if not already invoked
88 	 */
89 	uint16_t num_qpairs;
90 } __rte_cache_aligned;
91 
92 /* Per queue pair information */
93 struct crypto_queue_pair_info {
94 	/* Set to indicate queue pair is enabled */
95 	bool qp_enabled;
96 	/* Pointer to hold rte_crypto_ops for batching */
97 	struct rte_crypto_op **op_buffer;
98 	/* No of crypto ops accumulated */
99 	uint8_t len;
100 } __rte_cache_aligned;
101 
102 static struct rte_event_crypto_adapter **event_crypto_adapter;
103 
104 /* Macros to check for valid adapter */
105 #define EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
106 	if (!eca_valid_id(id)) { \
107 		RTE_EDEV_LOG_ERR("Invalid crypto adapter id = %d\n", id); \
108 		return retval; \
109 	} \
110 } while (0)
111 
112 static inline int
113 eca_valid_id(uint8_t id)
114 {
115 	return id < RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE;
116 }
117 
118 static int
119 eca_init(void)
120 {
121 	const char *name = "crypto_adapter_array";
122 	const struct rte_memzone *mz;
123 	unsigned int sz;
124 
125 	sz = sizeof(*event_crypto_adapter) *
126 	    RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE;
127 	sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
128 
129 	mz = rte_memzone_lookup(name);
130 	if (mz == NULL) {
131 		mz = rte_memzone_reserve_aligned(name, sz, rte_socket_id(), 0,
132 						 RTE_CACHE_LINE_SIZE);
133 		if (mz == NULL) {
134 			RTE_EDEV_LOG_ERR("failed to reserve memzone err = %"
135 					PRId32, rte_errno);
136 			return -rte_errno;
137 		}
138 	}
139 
140 	event_crypto_adapter = mz->addr;
141 	return 0;
142 }
143 
144 static inline struct rte_event_crypto_adapter *
145 eca_id_to_adapter(uint8_t id)
146 {
147 	return event_crypto_adapter ?
148 		event_crypto_adapter[id] : NULL;
149 }
150 
151 static int
152 eca_default_config_cb(uint8_t id, uint8_t dev_id,
153 			struct rte_event_crypto_adapter_conf *conf, void *arg)
154 {
155 	struct rte_event_dev_config dev_conf;
156 	struct rte_eventdev *dev;
157 	uint8_t port_id;
158 	int started;
159 	int ret;
160 	struct rte_event_port_conf *port_conf = arg;
161 	struct rte_event_crypto_adapter *adapter = eca_id_to_adapter(id);
162 
163 	if (adapter == NULL)
164 		return -EINVAL;
165 
166 	dev = &rte_eventdevs[adapter->eventdev_id];
167 	dev_conf = dev->data->dev_conf;
168 
169 	started = dev->data->dev_started;
170 	if (started)
171 		rte_event_dev_stop(dev_id);
172 	port_id = dev_conf.nb_event_ports;
173 	dev_conf.nb_event_ports += 1;
174 	ret = rte_event_dev_configure(dev_id, &dev_conf);
175 	if (ret) {
176 		RTE_EDEV_LOG_ERR("failed to configure event dev %u\n", dev_id);
177 		if (started) {
178 			if (rte_event_dev_start(dev_id))
179 				return -EIO;
180 		}
181 		return ret;
182 	}
183 
184 	ret = rte_event_port_setup(dev_id, port_id, port_conf);
185 	if (ret) {
186 		RTE_EDEV_LOG_ERR("failed to setup event port %u\n", port_id);
187 		return ret;
188 	}
189 
190 	conf->event_port_id = port_id;
191 	conf->max_nb = DEFAULT_MAX_NB;
192 	if (started)
193 		ret = rte_event_dev_start(dev_id);
194 
195 	adapter->default_cb_arg = 1;
196 	return ret;
197 }
198 
199 int
200 rte_event_crypto_adapter_create_ext(uint8_t id, uint8_t dev_id,
201 				rte_event_crypto_adapter_conf_cb conf_cb,
202 				enum rte_event_crypto_adapter_mode mode,
203 				void *conf_arg)
204 {
205 	struct rte_event_crypto_adapter *adapter;
206 	char mem_name[CRYPTO_ADAPTER_NAME_LEN];
207 	struct rte_event_dev_info dev_info;
208 	int socket_id;
209 	uint8_t i;
210 	int ret;
211 
212 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
213 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
214 	if (conf_cb == NULL)
215 		return -EINVAL;
216 
217 	if (event_crypto_adapter == NULL) {
218 		ret = eca_init();
219 		if (ret)
220 			return ret;
221 	}
222 
223 	adapter = eca_id_to_adapter(id);
224 	if (adapter != NULL) {
225 		RTE_EDEV_LOG_ERR("Crypto adapter id %u already exists!", id);
226 		return -EEXIST;
227 	}
228 
229 	socket_id = rte_event_dev_socket_id(dev_id);
230 	snprintf(mem_name, CRYPTO_ADAPTER_MEM_NAME_LEN,
231 		 "rte_event_crypto_adapter_%d", id);
232 
233 	adapter = rte_zmalloc_socket(mem_name, sizeof(*adapter),
234 			RTE_CACHE_LINE_SIZE, socket_id);
235 	if (adapter == NULL) {
236 		RTE_EDEV_LOG_ERR("Failed to get mem for event crypto adapter!");
237 		return -ENOMEM;
238 	}
239 
240 	ret = rte_event_dev_info_get(dev_id, &dev_info);
241 	if (ret < 0) {
242 		RTE_EDEV_LOG_ERR("Failed to get info for eventdev %d: %s!",
243 				 dev_id, dev_info.driver_name);
244 		rte_free(adapter);
245 		return ret;
246 	}
247 
248 	adapter->implicit_release_disabled = (dev_info.event_dev_cap &
249 			RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE);
250 	adapter->eventdev_id = dev_id;
251 	adapter->socket_id = socket_id;
252 	adapter->conf_cb = conf_cb;
253 	adapter->conf_arg = conf_arg;
254 	adapter->mode = mode;
255 	strcpy(adapter->mem_name, mem_name);
256 	adapter->cdevs = rte_zmalloc_socket(adapter->mem_name,
257 					rte_cryptodev_count() *
258 					sizeof(struct crypto_device_info), 0,
259 					socket_id);
260 	if (adapter->cdevs == NULL) {
261 		RTE_EDEV_LOG_ERR("Failed to get mem for crypto devices\n");
262 		rte_free(adapter);
263 		return -ENOMEM;
264 	}
265 
266 	rte_spinlock_init(&adapter->lock);
267 	for (i = 0; i < rte_cryptodev_count(); i++)
268 		adapter->cdevs[i].dev = rte_cryptodev_pmd_get_dev(i);
269 
270 	event_crypto_adapter[id] = adapter;
271 
272 	rte_eventdev_trace_crypto_adapter_create(id, dev_id, adapter, conf_arg,
273 		mode);
274 	return 0;
275 }
276 
277 
278 int
279 rte_event_crypto_adapter_create(uint8_t id, uint8_t dev_id,
280 				struct rte_event_port_conf *port_config,
281 				enum rte_event_crypto_adapter_mode mode)
282 {
283 	struct rte_event_port_conf *pc;
284 	int ret;
285 
286 	if (port_config == NULL)
287 		return -EINVAL;
288 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
289 
290 	pc = rte_malloc(NULL, sizeof(*pc), 0);
291 	if (pc == NULL)
292 		return -ENOMEM;
293 	*pc = *port_config;
294 	ret = rte_event_crypto_adapter_create_ext(id, dev_id,
295 						  eca_default_config_cb,
296 						  mode,
297 						  pc);
298 	if (ret)
299 		rte_free(pc);
300 
301 	return ret;
302 }
303 
304 int
305 rte_event_crypto_adapter_free(uint8_t id)
306 {
307 	struct rte_event_crypto_adapter *adapter;
308 
309 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
310 
311 	adapter = eca_id_to_adapter(id);
312 	if (adapter == NULL)
313 		return -EINVAL;
314 
315 	if (adapter->nb_qps) {
316 		RTE_EDEV_LOG_ERR("%" PRIu16 "Queue pairs not deleted",
317 				adapter->nb_qps);
318 		return -EBUSY;
319 	}
320 
321 	rte_eventdev_trace_crypto_adapter_free(id, adapter);
322 	if (adapter->default_cb_arg)
323 		rte_free(adapter->conf_arg);
324 	rte_free(adapter->cdevs);
325 	rte_free(adapter);
326 	event_crypto_adapter[id] = NULL;
327 
328 	return 0;
329 }
330 
331 static inline unsigned int
332 eca_enq_to_cryptodev(struct rte_event_crypto_adapter *adapter,
333 		 struct rte_event *ev, unsigned int cnt)
334 {
335 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
336 	union rte_event_crypto_metadata *m_data = NULL;
337 	struct crypto_queue_pair_info *qp_info = NULL;
338 	struct rte_crypto_op *crypto_op;
339 	unsigned int i, n;
340 	uint16_t qp_id, len, ret;
341 	uint8_t cdev_id;
342 
343 	len = 0;
344 	ret = 0;
345 	n = 0;
346 	stats->event_deq_count += cnt;
347 
348 	for (i = 0; i < cnt; i++) {
349 		crypto_op = ev[i].event_ptr;
350 		if (crypto_op == NULL)
351 			continue;
352 		if (crypto_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
353 			m_data = rte_cryptodev_sym_session_get_user_data(
354 					crypto_op->sym->session);
355 			if (m_data == NULL) {
356 				rte_pktmbuf_free(crypto_op->sym->m_src);
357 				rte_crypto_op_free(crypto_op);
358 				continue;
359 			}
360 
361 			cdev_id = m_data->request_info.cdev_id;
362 			qp_id = m_data->request_info.queue_pair_id;
363 			qp_info = &adapter->cdevs[cdev_id].qpairs[qp_id];
364 			if (!qp_info->qp_enabled) {
365 				rte_pktmbuf_free(crypto_op->sym->m_src);
366 				rte_crypto_op_free(crypto_op);
367 				continue;
368 			}
369 			len = qp_info->len;
370 			qp_info->op_buffer[len] = crypto_op;
371 			len++;
372 		} else if (crypto_op->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&
373 				crypto_op->private_data_offset) {
374 			m_data = (union rte_event_crypto_metadata *)
375 				 ((uint8_t *)crypto_op +
376 					crypto_op->private_data_offset);
377 			cdev_id = m_data->request_info.cdev_id;
378 			qp_id = m_data->request_info.queue_pair_id;
379 			qp_info = &adapter->cdevs[cdev_id].qpairs[qp_id];
380 			if (!qp_info->qp_enabled) {
381 				rte_pktmbuf_free(crypto_op->sym->m_src);
382 				rte_crypto_op_free(crypto_op);
383 				continue;
384 			}
385 			len = qp_info->len;
386 			qp_info->op_buffer[len] = crypto_op;
387 			len++;
388 		} else {
389 			rte_pktmbuf_free(crypto_op->sym->m_src);
390 			rte_crypto_op_free(crypto_op);
391 			continue;
392 		}
393 
394 		if (len == BATCH_SIZE) {
395 			struct rte_crypto_op **op_buffer = qp_info->op_buffer;
396 			ret = rte_cryptodev_enqueue_burst(cdev_id,
397 							  qp_id,
398 							  op_buffer,
399 							  BATCH_SIZE);
400 
401 			stats->crypto_enq_count += ret;
402 
403 			while (ret < len) {
404 				struct rte_crypto_op *op;
405 				op = op_buffer[ret++];
406 				stats->crypto_enq_fail++;
407 				rte_pktmbuf_free(op->sym->m_src);
408 				rte_crypto_op_free(op);
409 			}
410 
411 			len = 0;
412 		}
413 
414 		if (qp_info)
415 			qp_info->len = len;
416 		n += ret;
417 	}
418 
419 	return n;
420 }
421 
422 static unsigned int
423 eca_crypto_enq_flush(struct rte_event_crypto_adapter *adapter)
424 {
425 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
426 	struct crypto_device_info *curr_dev;
427 	struct crypto_queue_pair_info *curr_queue;
428 	struct rte_crypto_op **op_buffer;
429 	struct rte_cryptodev *dev;
430 	uint8_t cdev_id;
431 	uint16_t qp;
432 	uint16_t ret;
433 	uint16_t num_cdev = rte_cryptodev_count();
434 
435 	ret = 0;
436 	for (cdev_id = 0; cdev_id < num_cdev; cdev_id++) {
437 		curr_dev = &adapter->cdevs[cdev_id];
438 		dev = curr_dev->dev;
439 		if (dev == NULL)
440 			continue;
441 		for (qp = 0; qp < dev->data->nb_queue_pairs; qp++) {
442 
443 			curr_queue = &curr_dev->qpairs[qp];
444 			if (!curr_queue->qp_enabled)
445 				continue;
446 
447 			op_buffer = curr_queue->op_buffer;
448 			ret = rte_cryptodev_enqueue_burst(cdev_id,
449 							  qp,
450 							  op_buffer,
451 							  curr_queue->len);
452 			stats->crypto_enq_count += ret;
453 
454 			while (ret < curr_queue->len) {
455 				struct rte_crypto_op *op;
456 				op = op_buffer[ret++];
457 				stats->crypto_enq_fail++;
458 				rte_pktmbuf_free(op->sym->m_src);
459 				rte_crypto_op_free(op);
460 			}
461 			curr_queue->len = 0;
462 		}
463 	}
464 
465 	return ret;
466 }
467 
468 static int
469 eca_crypto_adapter_enq_run(struct rte_event_crypto_adapter *adapter,
470 			unsigned int max_enq)
471 {
472 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
473 	struct rte_event ev[BATCH_SIZE];
474 	unsigned int nb_enq, nb_enqueued;
475 	uint16_t n;
476 	uint8_t event_dev_id = adapter->eventdev_id;
477 	uint8_t event_port_id = adapter->event_port_id;
478 
479 	nb_enqueued = 0;
480 	if (adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)
481 		return 0;
482 
483 	for (nb_enq = 0; nb_enq < max_enq; nb_enq += n) {
484 		stats->event_poll_count++;
485 		n = rte_event_dequeue_burst(event_dev_id,
486 					    event_port_id, ev, BATCH_SIZE, 0);
487 
488 		if (!n)
489 			break;
490 
491 		nb_enqueued += eca_enq_to_cryptodev(adapter, ev, n);
492 	}
493 
494 	if ((++adapter->transmit_loop_count &
495 		(CRYPTO_ENQ_FLUSH_THRESHOLD - 1)) == 0) {
496 		nb_enqueued += eca_crypto_enq_flush(adapter);
497 	}
498 
499 	return nb_enqueued;
500 }
501 
502 static inline void
503 eca_ops_enqueue_burst(struct rte_event_crypto_adapter *adapter,
504 		  struct rte_crypto_op **ops, uint16_t num)
505 {
506 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
507 	union rte_event_crypto_metadata *m_data = NULL;
508 	uint8_t event_dev_id = adapter->eventdev_id;
509 	uint8_t event_port_id = adapter->event_port_id;
510 	struct rte_event events[BATCH_SIZE];
511 	uint16_t nb_enqueued, nb_ev;
512 	uint8_t retry;
513 	uint8_t i;
514 
515 	nb_ev = 0;
516 	retry = 0;
517 	nb_enqueued = 0;
518 	num = RTE_MIN(num, BATCH_SIZE);
519 	for (i = 0; i < num; i++) {
520 		struct rte_event *ev = &events[nb_ev++];
521 		if (ops[i]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
522 			m_data = rte_cryptodev_sym_session_get_user_data(
523 					ops[i]->sym->session);
524 		} else if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&
525 				ops[i]->private_data_offset) {
526 			m_data = (union rte_event_crypto_metadata *)
527 				 ((uint8_t *)ops[i] +
528 				  ops[i]->private_data_offset);
529 		}
530 
531 		if (unlikely(m_data == NULL)) {
532 			rte_pktmbuf_free(ops[i]->sym->m_src);
533 			rte_crypto_op_free(ops[i]);
534 			continue;
535 		}
536 
537 		rte_memcpy(ev, &m_data->response_info, sizeof(*ev));
538 		ev->event_ptr = ops[i];
539 		ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
540 		if (adapter->implicit_release_disabled)
541 			ev->op = RTE_EVENT_OP_FORWARD;
542 		else
543 			ev->op = RTE_EVENT_OP_NEW;
544 	}
545 
546 	do {
547 		nb_enqueued += rte_event_enqueue_burst(event_dev_id,
548 						  event_port_id,
549 						  &events[nb_enqueued],
550 						  nb_ev - nb_enqueued);
551 	} while (retry++ < CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES &&
552 		 nb_enqueued < nb_ev);
553 
554 	/* Free mbufs and rte_crypto_ops for failed events */
555 	for (i = nb_enqueued; i < nb_ev; i++) {
556 		struct rte_crypto_op *op = events[i].event_ptr;
557 		rte_pktmbuf_free(op->sym->m_src);
558 		rte_crypto_op_free(op);
559 	}
560 
561 	stats->event_enq_fail_count += nb_ev - nb_enqueued;
562 	stats->event_enq_count += nb_enqueued;
563 	stats->event_enq_retry_count += retry - 1;
564 }
565 
566 static inline unsigned int
567 eca_crypto_adapter_deq_run(struct rte_event_crypto_adapter *adapter,
568 			unsigned int max_deq)
569 {
570 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
571 	struct crypto_device_info *curr_dev;
572 	struct crypto_queue_pair_info *curr_queue;
573 	struct rte_crypto_op *ops[BATCH_SIZE];
574 	uint16_t n, nb_deq;
575 	struct rte_cryptodev *dev;
576 	uint8_t cdev_id;
577 	uint16_t qp, dev_qps;
578 	bool done;
579 	uint16_t num_cdev = rte_cryptodev_count();
580 
581 	nb_deq = 0;
582 	do {
583 		uint16_t queues = 0;
584 		done = true;
585 
586 		for (cdev_id = adapter->next_cdev_id;
587 			cdev_id < num_cdev; cdev_id++) {
588 			curr_dev = &adapter->cdevs[cdev_id];
589 			dev = curr_dev->dev;
590 			if (dev == NULL)
591 				continue;
592 			dev_qps = dev->data->nb_queue_pairs;
593 
594 			for (qp = curr_dev->next_queue_pair_id;
595 				queues < dev_qps; qp = (qp + 1) % dev_qps,
596 				queues++) {
597 
598 				curr_queue = &curr_dev->qpairs[qp];
599 				if (!curr_queue->qp_enabled)
600 					continue;
601 
602 				n = rte_cryptodev_dequeue_burst(cdev_id, qp,
603 					ops, BATCH_SIZE);
604 				if (!n)
605 					continue;
606 
607 				done = false;
608 				stats->crypto_deq_count += n;
609 				eca_ops_enqueue_burst(adapter, ops, n);
610 				nb_deq += n;
611 
612 				if (nb_deq > max_deq) {
613 					if ((qp + 1) == dev_qps) {
614 						adapter->next_cdev_id =
615 							(cdev_id + 1)
616 							% num_cdev;
617 					}
618 					curr_dev->next_queue_pair_id = (qp + 1)
619 						% dev->data->nb_queue_pairs;
620 
621 					return nb_deq;
622 				}
623 			}
624 		}
625 	} while (done == false);
626 	return nb_deq;
627 }
628 
629 static void
630 eca_crypto_adapter_run(struct rte_event_crypto_adapter *adapter,
631 			unsigned int max_ops)
632 {
633 	while (max_ops) {
634 		unsigned int e_cnt, d_cnt;
635 
636 		e_cnt = eca_crypto_adapter_deq_run(adapter, max_ops);
637 		max_ops -= RTE_MIN(max_ops, e_cnt);
638 
639 		d_cnt = eca_crypto_adapter_enq_run(adapter, max_ops);
640 		max_ops -= RTE_MIN(max_ops, d_cnt);
641 
642 		if (e_cnt == 0 && d_cnt == 0)
643 			break;
644 
645 	}
646 }
647 
648 static int
649 eca_service_func(void *args)
650 {
651 	struct rte_event_crypto_adapter *adapter = args;
652 
653 	if (rte_spinlock_trylock(&adapter->lock) == 0)
654 		return 0;
655 	eca_crypto_adapter_run(adapter, adapter->max_nb);
656 	rte_spinlock_unlock(&adapter->lock);
657 
658 	return 0;
659 }
660 
661 static int
662 eca_init_service(struct rte_event_crypto_adapter *adapter, uint8_t id)
663 {
664 	struct rte_event_crypto_adapter_conf adapter_conf;
665 	struct rte_service_spec service;
666 	int ret;
667 
668 	if (adapter->service_inited)
669 		return 0;
670 
671 	memset(&service, 0, sizeof(service));
672 	snprintf(service.name, CRYPTO_ADAPTER_NAME_LEN,
673 		"rte_event_crypto_adapter_%d", id);
674 	service.socket_id = adapter->socket_id;
675 	service.callback = eca_service_func;
676 	service.callback_userdata = adapter;
677 	/* Service function handles locking for queue add/del updates */
678 	service.capabilities = RTE_SERVICE_CAP_MT_SAFE;
679 	ret = rte_service_component_register(&service, &adapter->service_id);
680 	if (ret) {
681 		RTE_EDEV_LOG_ERR("failed to register service %s err = %" PRId32,
682 			service.name, ret);
683 		return ret;
684 	}
685 
686 	ret = adapter->conf_cb(id, adapter->eventdev_id,
687 		&adapter_conf, adapter->conf_arg);
688 	if (ret) {
689 		RTE_EDEV_LOG_ERR("configuration callback failed err = %" PRId32,
690 			ret);
691 		return ret;
692 	}
693 
694 	adapter->max_nb = adapter_conf.max_nb;
695 	adapter->event_port_id = adapter_conf.event_port_id;
696 	adapter->service_inited = 1;
697 
698 	return ret;
699 }
700 
701 static void
702 eca_update_qp_info(struct rte_event_crypto_adapter *adapter,
703 			struct crypto_device_info *dev_info,
704 			int32_t queue_pair_id,
705 			uint8_t add)
706 {
707 	struct crypto_queue_pair_info *qp_info;
708 	int enabled;
709 	uint16_t i;
710 
711 	if (dev_info->qpairs == NULL)
712 		return;
713 
714 	if (queue_pair_id == -1) {
715 		for (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++)
716 			eca_update_qp_info(adapter, dev_info, i, add);
717 	} else {
718 		qp_info = &dev_info->qpairs[queue_pair_id];
719 		enabled = qp_info->qp_enabled;
720 		if (add) {
721 			adapter->nb_qps += !enabled;
722 			dev_info->num_qpairs += !enabled;
723 		} else {
724 			adapter->nb_qps -= enabled;
725 			dev_info->num_qpairs -= enabled;
726 		}
727 		qp_info->qp_enabled = !!add;
728 	}
729 }
730 
731 static int
732 eca_add_queue_pair(struct rte_event_crypto_adapter *adapter,
733 		uint8_t cdev_id,
734 		int queue_pair_id)
735 {
736 	struct crypto_device_info *dev_info = &adapter->cdevs[cdev_id];
737 	struct crypto_queue_pair_info *qpairs;
738 	uint32_t i;
739 
740 	if (dev_info->qpairs == NULL) {
741 		dev_info->qpairs =
742 		    rte_zmalloc_socket(adapter->mem_name,
743 					dev_info->dev->data->nb_queue_pairs *
744 					sizeof(struct crypto_queue_pair_info),
745 					0, adapter->socket_id);
746 		if (dev_info->qpairs == NULL)
747 			return -ENOMEM;
748 
749 		qpairs = dev_info->qpairs;
750 		qpairs->op_buffer = rte_zmalloc_socket(adapter->mem_name,
751 					BATCH_SIZE *
752 					sizeof(struct rte_crypto_op *),
753 					0, adapter->socket_id);
754 		if (!qpairs->op_buffer) {
755 			rte_free(qpairs);
756 			return -ENOMEM;
757 		}
758 	}
759 
760 	if (queue_pair_id == -1) {
761 		for (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++)
762 			eca_update_qp_info(adapter, dev_info, i, 1);
763 	} else
764 		eca_update_qp_info(adapter, dev_info,
765 					(uint16_t)queue_pair_id, 1);
766 
767 	return 0;
768 }
769 
770 int
771 rte_event_crypto_adapter_queue_pair_add(uint8_t id,
772 			uint8_t cdev_id,
773 			int32_t queue_pair_id,
774 			const struct rte_event *event)
775 {
776 	struct rte_event_crypto_adapter *adapter;
777 	struct rte_eventdev *dev;
778 	struct crypto_device_info *dev_info;
779 	uint32_t cap;
780 	int ret;
781 
782 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
783 
784 	if (!rte_cryptodev_pmd_is_valid_dev(cdev_id)) {
785 		RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
786 		return -EINVAL;
787 	}
788 
789 	adapter = eca_id_to_adapter(id);
790 	if (adapter == NULL)
791 		return -EINVAL;
792 
793 	dev = &rte_eventdevs[adapter->eventdev_id];
794 	ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
795 						cdev_id,
796 						&cap);
797 	if (ret) {
798 		RTE_EDEV_LOG_ERR("Failed to get adapter caps dev %" PRIu8
799 			" cdev %" PRIu8, id, cdev_id);
800 		return ret;
801 	}
802 
803 	if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) &&
804 	    (event == NULL)) {
805 		RTE_EDEV_LOG_ERR("Conf value can not be NULL for dev_id=%u",
806 				  cdev_id);
807 		return -EINVAL;
808 	}
809 
810 	dev_info = &adapter->cdevs[cdev_id];
811 
812 	if (queue_pair_id != -1 &&
813 	    (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) {
814 		RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16,
815 				 (uint16_t)queue_pair_id);
816 		return -EINVAL;
817 	}
818 
819 	/* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD,
820 	 * no need of service core as HW supports event forward capability.
821 	 */
822 	if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
823 	    (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND &&
824 	     adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW) ||
825 	    (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
826 	     adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) {
827 		RTE_FUNC_PTR_OR_ERR_RET(
828 			*dev->dev_ops->crypto_adapter_queue_pair_add,
829 			-ENOTSUP);
830 		if (dev_info->qpairs == NULL) {
831 			dev_info->qpairs =
832 			    rte_zmalloc_socket(adapter->mem_name,
833 					dev_info->dev->data->nb_queue_pairs *
834 					sizeof(struct crypto_queue_pair_info),
835 					0, adapter->socket_id);
836 			if (dev_info->qpairs == NULL)
837 				return -ENOMEM;
838 		}
839 
840 		ret = (*dev->dev_ops->crypto_adapter_queue_pair_add)(dev,
841 				dev_info->dev,
842 				queue_pair_id,
843 				event);
844 		if (ret)
845 			return ret;
846 
847 		else
848 			eca_update_qp_info(adapter, &adapter->cdevs[cdev_id],
849 					   queue_pair_id, 1);
850 	}
851 
852 	/* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW,
853 	 * or SW adapter, initiate services so the application can choose
854 	 * which ever way it wants to use the adapter.
855 	 * Case 1: RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW
856 	 *         Application may wants to use one of below two mode
857 	 *          a. OP_FORWARD mode -> HW Dequeue + SW enqueue
858 	 *          b. OP_NEW mode -> HW Dequeue
859 	 * Case 2: No HW caps, use SW adapter
860 	 *          a. OP_FORWARD mode -> SW enqueue & dequeue
861 	 *          b. OP_NEW mode -> SW Dequeue
862 	 */
863 	if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
864 	     !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
865 	     adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD) ||
866 	     (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW) &&
867 	      !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
868 	      !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) &&
869 	       (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA))) {
870 		rte_spinlock_lock(&adapter->lock);
871 		ret = eca_init_service(adapter, id);
872 		if (ret == 0)
873 			ret = eca_add_queue_pair(adapter, cdev_id,
874 						 queue_pair_id);
875 		rte_spinlock_unlock(&adapter->lock);
876 
877 		if (ret)
878 			return ret;
879 
880 		rte_service_component_runstate_set(adapter->service_id, 1);
881 	}
882 
883 	rte_eventdev_trace_crypto_adapter_queue_pair_add(id, cdev_id, event,
884 		queue_pair_id);
885 	return 0;
886 }
887 
888 int
889 rte_event_crypto_adapter_queue_pair_del(uint8_t id, uint8_t cdev_id,
890 					int32_t queue_pair_id)
891 {
892 	struct rte_event_crypto_adapter *adapter;
893 	struct crypto_device_info *dev_info;
894 	struct rte_eventdev *dev;
895 	int ret;
896 	uint32_t cap;
897 	uint16_t i;
898 
899 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
900 
901 	if (!rte_cryptodev_pmd_is_valid_dev(cdev_id)) {
902 		RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
903 		return -EINVAL;
904 	}
905 
906 	adapter = eca_id_to_adapter(id);
907 	if (adapter == NULL)
908 		return -EINVAL;
909 
910 	dev = &rte_eventdevs[adapter->eventdev_id];
911 	ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
912 						cdev_id,
913 						&cap);
914 	if (ret)
915 		return ret;
916 
917 	dev_info = &adapter->cdevs[cdev_id];
918 
919 	if (queue_pair_id != -1 &&
920 	    (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) {
921 		RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16,
922 				 (uint16_t)queue_pair_id);
923 		return -EINVAL;
924 	}
925 
926 	if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
927 	    (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
928 	     adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) {
929 		RTE_FUNC_PTR_OR_ERR_RET(
930 			*dev->dev_ops->crypto_adapter_queue_pair_del,
931 			-ENOTSUP);
932 		ret = (*dev->dev_ops->crypto_adapter_queue_pair_del)(dev,
933 						dev_info->dev,
934 						queue_pair_id);
935 		if (ret == 0) {
936 			eca_update_qp_info(adapter,
937 					&adapter->cdevs[cdev_id],
938 					queue_pair_id,
939 					0);
940 			if (dev_info->num_qpairs == 0) {
941 				rte_free(dev_info->qpairs);
942 				dev_info->qpairs = NULL;
943 			}
944 		}
945 	} else {
946 		if (adapter->nb_qps == 0)
947 			return 0;
948 
949 		rte_spinlock_lock(&adapter->lock);
950 		if (queue_pair_id == -1) {
951 			for (i = 0; i < dev_info->dev->data->nb_queue_pairs;
952 				i++)
953 				eca_update_qp_info(adapter, dev_info,
954 							queue_pair_id, 0);
955 		} else {
956 			eca_update_qp_info(adapter, dev_info,
957 						(uint16_t)queue_pair_id, 0);
958 		}
959 
960 		if (dev_info->num_qpairs == 0) {
961 			rte_free(dev_info->qpairs);
962 			dev_info->qpairs = NULL;
963 		}
964 
965 		rte_spinlock_unlock(&adapter->lock);
966 		rte_service_component_runstate_set(adapter->service_id,
967 				adapter->nb_qps);
968 	}
969 
970 	rte_eventdev_trace_crypto_adapter_queue_pair_del(id, cdev_id,
971 		queue_pair_id, ret);
972 	return ret;
973 }
974 
975 static int
976 eca_adapter_ctrl(uint8_t id, int start)
977 {
978 	struct rte_event_crypto_adapter *adapter;
979 	struct crypto_device_info *dev_info;
980 	struct rte_eventdev *dev;
981 	uint32_t i;
982 	int use_service;
983 	int stop = !start;
984 
985 	use_service = 0;
986 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
987 	adapter = eca_id_to_adapter(id);
988 	if (adapter == NULL)
989 		return -EINVAL;
990 
991 	dev = &rte_eventdevs[adapter->eventdev_id];
992 
993 	for (i = 0; i < rte_cryptodev_count(); i++) {
994 		dev_info = &adapter->cdevs[i];
995 		/* if start  check for num queue pairs */
996 		if (start && !dev_info->num_qpairs)
997 			continue;
998 		/* if stop check if dev has been started */
999 		if (stop && !dev_info->dev_started)
1000 			continue;
1001 		use_service |= !dev_info->internal_event_port;
1002 		dev_info->dev_started = start;
1003 		if (dev_info->internal_event_port == 0)
1004 			continue;
1005 		start ? (*dev->dev_ops->crypto_adapter_start)(dev,
1006 						&dev_info->dev[i]) :
1007 			(*dev->dev_ops->crypto_adapter_stop)(dev,
1008 						&dev_info->dev[i]);
1009 	}
1010 
1011 	if (use_service)
1012 		rte_service_runstate_set(adapter->service_id, start);
1013 
1014 	return 0;
1015 }
1016 
1017 int
1018 rte_event_crypto_adapter_start(uint8_t id)
1019 {
1020 	struct rte_event_crypto_adapter *adapter;
1021 
1022 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1023 	adapter = eca_id_to_adapter(id);
1024 	if (adapter == NULL)
1025 		return -EINVAL;
1026 
1027 	rte_eventdev_trace_crypto_adapter_start(id, adapter);
1028 	return eca_adapter_ctrl(id, 1);
1029 }
1030 
1031 int
1032 rte_event_crypto_adapter_stop(uint8_t id)
1033 {
1034 	rte_eventdev_trace_crypto_adapter_stop(id);
1035 	return eca_adapter_ctrl(id, 0);
1036 }
1037 
1038 int
1039 rte_event_crypto_adapter_stats_get(uint8_t id,
1040 				struct rte_event_crypto_adapter_stats *stats)
1041 {
1042 	struct rte_event_crypto_adapter *adapter;
1043 	struct rte_event_crypto_adapter_stats dev_stats_sum = { 0 };
1044 	struct rte_event_crypto_adapter_stats dev_stats;
1045 	struct rte_eventdev *dev;
1046 	struct crypto_device_info *dev_info;
1047 	uint32_t i;
1048 	int ret;
1049 
1050 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1051 
1052 	adapter = eca_id_to_adapter(id);
1053 	if (adapter == NULL || stats == NULL)
1054 		return -EINVAL;
1055 
1056 	dev = &rte_eventdevs[adapter->eventdev_id];
1057 	memset(stats, 0, sizeof(*stats));
1058 	for (i = 0; i < rte_cryptodev_count(); i++) {
1059 		dev_info = &adapter->cdevs[i];
1060 		if (dev_info->internal_event_port == 0 ||
1061 			dev->dev_ops->crypto_adapter_stats_get == NULL)
1062 			continue;
1063 		ret = (*dev->dev_ops->crypto_adapter_stats_get)(dev,
1064 						dev_info->dev,
1065 						&dev_stats);
1066 		if (ret)
1067 			continue;
1068 
1069 		dev_stats_sum.crypto_deq_count += dev_stats.crypto_deq_count;
1070 		dev_stats_sum.event_enq_count +=
1071 			dev_stats.event_enq_count;
1072 	}
1073 
1074 	if (adapter->service_inited)
1075 		*stats = adapter->crypto_stats;
1076 
1077 	stats->crypto_deq_count += dev_stats_sum.crypto_deq_count;
1078 	stats->event_enq_count += dev_stats_sum.event_enq_count;
1079 
1080 	return 0;
1081 }
1082 
1083 int
1084 rte_event_crypto_adapter_stats_reset(uint8_t id)
1085 {
1086 	struct rte_event_crypto_adapter *adapter;
1087 	struct crypto_device_info *dev_info;
1088 	struct rte_eventdev *dev;
1089 	uint32_t i;
1090 
1091 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1092 
1093 	adapter = eca_id_to_adapter(id);
1094 	if (adapter == NULL)
1095 		return -EINVAL;
1096 
1097 	dev = &rte_eventdevs[adapter->eventdev_id];
1098 	for (i = 0; i < rte_cryptodev_count(); i++) {
1099 		dev_info = &adapter->cdevs[i];
1100 		if (dev_info->internal_event_port == 0 ||
1101 			dev->dev_ops->crypto_adapter_stats_reset == NULL)
1102 			continue;
1103 		(*dev->dev_ops->crypto_adapter_stats_reset)(dev,
1104 						dev_info->dev);
1105 	}
1106 
1107 	memset(&adapter->crypto_stats, 0, sizeof(adapter->crypto_stats));
1108 	return 0;
1109 }
1110 
1111 int
1112 rte_event_crypto_adapter_service_id_get(uint8_t id, uint32_t *service_id)
1113 {
1114 	struct rte_event_crypto_adapter *adapter;
1115 
1116 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1117 
1118 	adapter = eca_id_to_adapter(id);
1119 	if (adapter == NULL || service_id == NULL)
1120 		return -EINVAL;
1121 
1122 	if (adapter->service_inited)
1123 		*service_id = adapter->service_id;
1124 
1125 	return adapter->service_inited ? 0 : -ESRCH;
1126 }
1127 
1128 int
1129 rte_event_crypto_adapter_event_port_get(uint8_t id, uint8_t *event_port_id)
1130 {
1131 	struct rte_event_crypto_adapter *adapter;
1132 
1133 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1134 
1135 	adapter = eca_id_to_adapter(id);
1136 	if (adapter == NULL || event_port_id == NULL)
1137 		return -EINVAL;
1138 
1139 	*event_port_id = adapter->event_port_id;
1140 
1141 	return 0;
1142 }
1143