xref: /dpdk/examples/ipsec-secgw/event_helper.c (revision 6cf329f9d8c2eb97c8f39becd514c14b25251ac1)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (C) 2020 Marvell International Ltd.
3  */
4 #include <stdlib.h>
5 
6 #include <rte_bitmap.h>
7 #include <rte_cryptodev.h>
8 #include <rte_ethdev.h>
9 #include <rte_eventdev.h>
10 #include <rte_event_crypto_adapter.h>
11 #include <rte_event_eth_rx_adapter.h>
12 #include <rte_event_eth_tx_adapter.h>
13 #include <rte_malloc.h>
14 #include <stdbool.h>
15 
16 #include "event_helper.h"
17 #include "ipsec-secgw.h"
18 
19 #define DEFAULT_VECTOR_SIZE  16
20 #define DEFAULT_VECTOR_TMO   102400
21 
22 #define INVALID_EV_QUEUE_ID -1
23 
24 static volatile bool eth_core_running;
25 
26 static int
27 eh_get_enabled_cores(struct rte_bitmap *eth_core_mask)
28 {
29 	int i, count = 0;
30 
31 	RTE_LCORE_FOREACH(i) {
32 		/* Check if this core is enabled in core mask*/
33 		if (rte_bitmap_get(eth_core_mask, i)) {
34 			/* Found enabled core */
35 			count++;
36 		}
37 	}
38 	return count;
39 }
40 
41 static inline unsigned int
42 eh_get_next_eth_core(struct eventmode_conf *em_conf)
43 {
44 	static unsigned int prev_core = -1;
45 	unsigned int next_core;
46 
47 	/*
48 	 * Make sure we have at least one eth core running, else the following
49 	 * logic would lead to an infinite loop.
50 	 */
51 	if (eh_get_enabled_cores(em_conf->eth_core_mask) == 0) {
52 		EH_LOG_ERR("No enabled eth core found");
53 		return RTE_MAX_LCORE;
54 	}
55 
56 	/* Only some cores are marked as eth cores, skip others */
57 	do {
58 		/* Get the next core */
59 		next_core = rte_get_next_lcore(prev_core, 0, 1);
60 
61 		/* Check if we have reached max lcores */
62 		if (next_core == RTE_MAX_LCORE)
63 			return next_core;
64 
65 		/* Update prev_core */
66 		prev_core = next_core;
67 	} while (!(rte_bitmap_get(em_conf->eth_core_mask, next_core)));
68 
69 	return next_core;
70 }
71 
72 static inline unsigned int
73 eh_get_next_active_core(struct eventmode_conf *em_conf, unsigned int prev_core)
74 {
75 	unsigned int next_core;
76 
77 	/* Get next active core skipping cores reserved as eth cores */
78 	do {
79 		/* Get the next core */
80 		next_core = rte_get_next_lcore(prev_core, 0, 0);
81 
82 		/* Check if we have reached max lcores */
83 		if (next_core == RTE_MAX_LCORE)
84 			return next_core;
85 
86 		prev_core = next_core;
87 	} while (rte_bitmap_get(em_conf->eth_core_mask, next_core));
88 
89 	return next_core;
90 }
91 
92 static struct eventdev_params *
93 eh_get_eventdev_params(struct eventmode_conf *em_conf, uint8_t eventdev_id)
94 {
95 	int i;
96 
97 	for (i = 0; i < em_conf->nb_eventdev; i++) {
98 		if (em_conf->eventdev_config[i].eventdev_id == eventdev_id)
99 			break;
100 	}
101 
102 	/* No match */
103 	if (i == em_conf->nb_eventdev)
104 		return NULL;
105 
106 	return &(em_conf->eventdev_config[i]);
107 }
108 
109 static inline bool
110 eh_dev_has_rx_internal_port(uint8_t eventdev_id)
111 {
112 	bool flag = true;
113 	int j, ret;
114 
115 	RTE_ETH_FOREACH_DEV(j) {
116 		uint32_t caps = 0;
117 
118 		ret = rte_event_eth_rx_adapter_caps_get(eventdev_id, j, &caps);
119 		if (ret < 0)
120 			return false;
121 
122 		if (!(caps & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT))
123 			flag = false;
124 	}
125 	return flag;
126 }
127 
128 static inline bool
129 eh_dev_has_tx_internal_port(uint8_t eventdev_id)
130 {
131 	bool flag = true;
132 	int j, ret;
133 
134 	RTE_ETH_FOREACH_DEV(j) {
135 		uint32_t caps = 0;
136 
137 		ret = rte_event_eth_tx_adapter_caps_get(eventdev_id, j, &caps);
138 		if (ret < 0)
139 			return false;
140 
141 		if (!(caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT))
142 			flag = false;
143 	}
144 	return flag;
145 }
146 
147 static inline bool
148 eh_dev_has_burst_mode(uint8_t dev_id)
149 {
150 	struct rte_event_dev_info dev_info;
151 
152 	rte_event_dev_info_get(dev_id, &dev_info);
153 	return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) ?
154 			true : false;
155 }
156 
157 static int
158 eh_set_nb_eventdev(struct eventmode_conf *em_conf)
159 {
160 	struct eventdev_params *eventdev_config;
161 	int nb_eventdev;
162 
163 	/* Get the number of event devices */
164 	nb_eventdev = rte_event_dev_count();
165 	if (nb_eventdev == 0) {
166 		EH_LOG_ERR("No event devices detected");
167 		return -EINVAL;
168 	}
169 
170 	if (nb_eventdev != 1) {
171 		EH_LOG_ERR("Event mode does not support multiple event devices. "
172 			   "Please provide only one event device.");
173 		return -EINVAL;
174 	}
175 
176 	/* Set event dev id*/
177 	eventdev_config = &(em_conf->eventdev_config[0]);
178 	eventdev_config->eventdev_id = 0;
179 
180 	/* Update the number of event devices */
181 	em_conf->nb_eventdev = 1;
182 
183 	return 0;
184 }
185 
186 static int
187 eh_set_default_conf_eventdev(struct eventmode_conf *em_conf)
188 {
189 	int lcore_count, nb_eth_dev, ret;
190 	struct eventdev_params *eventdev_config;
191 	struct rte_event_dev_info dev_info;
192 
193 	/* Get the number of eth devs */
194 	nb_eth_dev = rte_eth_dev_count_avail();
195 	if (nb_eth_dev == 0) {
196 		EH_LOG_ERR("No eth devices detected");
197 		return -EINVAL;
198 	}
199 
200 	/* Get the number of lcores */
201 	lcore_count = rte_lcore_count();
202 
203 	/* Read event device info */
204 	ret = rte_event_dev_info_get(0, &dev_info);
205 	if (ret < 0) {
206 		EH_LOG_ERR("Failed to read event device info %d", ret);
207 		return ret;
208 	}
209 
210 	/* Check if enough ports are available */
211 	if (dev_info.max_event_ports < 2) {
212 		EH_LOG_ERR("Not enough event ports available");
213 		return -EINVAL;
214 	}
215 
216 	/* Get the first event dev conf */
217 	eventdev_config = &(em_conf->eventdev_config[0]);
218 
219 	/* Save number of queues & ports available */
220 	eventdev_config->nb_eventqueue = nb_eth_dev;
221 	eventdev_config->nb_eventport = dev_info.max_event_ports;
222 	eventdev_config->ev_queue_mode = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
223 
224 	/* One queue is reserved for Tx */
225 	eventdev_config->tx_queue_id = INVALID_EV_QUEUE_ID;
226 	if (eventdev_config->all_internal_ports) {
227 		if (eventdev_config->nb_eventqueue >= dev_info.max_event_queues) {
228 			EH_LOG_ERR("Not enough event queues available");
229 			return -EINVAL;
230 		}
231 		eventdev_config->tx_queue_id =
232 			eventdev_config->nb_eventqueue++;
233 	}
234 
235 	/* One queue is reserved for event crypto adapter */
236 	eventdev_config->ev_cpt_queue_id = INVALID_EV_QUEUE_ID;
237 	if (em_conf->enable_event_crypto_adapter) {
238 		if (eventdev_config->nb_eventqueue >= dev_info.max_event_queues) {
239 			EH_LOG_ERR("Not enough event queues available");
240 			return -EINVAL;
241 		}
242 		eventdev_config->ev_cpt_queue_id =
243 			eventdev_config->nb_eventqueue++;
244 	}
245 
246 	/* Check if there are more ports than required */
247 	if (eventdev_config->nb_eventport > lcore_count) {
248 		/* One port per lcore is enough */
249 		eventdev_config->nb_eventport = lcore_count;
250 	}
251 
252 	return 0;
253 }
254 
255 static void
256 eh_do_capability_check(struct eventmode_conf *em_conf)
257 {
258 	struct eventdev_params *eventdev_config;
259 	int all_internal_ports = 1;
260 	uint32_t eventdev_id;
261 	int i;
262 
263 	for (i = 0; i < em_conf->nb_eventdev; i++) {
264 
265 		/* Get the event dev conf */
266 		eventdev_config = &(em_conf->eventdev_config[i]);
267 		eventdev_id = eventdev_config->eventdev_id;
268 
269 		/* Check if event device has internal port for Rx & Tx */
270 		if (eh_dev_has_rx_internal_port(eventdev_id) &&
271 		    eh_dev_has_tx_internal_port(eventdev_id)) {
272 			eventdev_config->all_internal_ports = 1;
273 		} else {
274 			all_internal_ports = 0;
275 		}
276 	}
277 
278 	/*
279 	 * If Rx & Tx internal ports are supported by all event devices then
280 	 * eth cores won't be required. Override the eth core mask requested.
281 	 */
282 	if (all_internal_ports)
283 		rte_bitmap_reset(em_conf->eth_core_mask);
284 }
285 
286 static int
287 eh_set_default_conf_link(struct eventmode_conf *em_conf)
288 {
289 	struct eventdev_params *eventdev_config;
290 	struct eh_event_link_info *link;
291 	unsigned int lcore_id = -1;
292 	int i, link_index;
293 
294 	/*
295 	 * Create a 1:1 mapping from event ports to cores. If the number
296 	 * of event ports is lesser than the cores, some cores won't
297 	 * execute worker. If there are more event ports, then some ports
298 	 * won't be used.
299 	 *
300 	 */
301 
302 	/*
303 	 * The event queue-port mapping is done according to the link. Since
304 	 * we are falling back to the default link config, enabling
305 	 * "all_ev_queue_to_ev_port" mode flag. This will map all queues
306 	 * to the port.
307 	 */
308 	em_conf->ext_params.all_ev_queue_to_ev_port = 1;
309 
310 	/* Get first event dev conf */
311 	eventdev_config = &(em_conf->eventdev_config[0]);
312 
313 	/* Loop through the ports */
314 	for (i = 0; i < eventdev_config->nb_eventport; i++) {
315 
316 		/* Get next active core id */
317 		lcore_id = eh_get_next_active_core(em_conf,
318 				lcore_id);
319 
320 		if (lcore_id == RTE_MAX_LCORE) {
321 			/* Reached max cores */
322 			return 0;
323 		}
324 
325 		/* Save the current combination as one link */
326 
327 		/* Get the index */
328 		link_index = em_conf->nb_link;
329 
330 		/* Get the corresponding link */
331 		link = &(em_conf->link[link_index]);
332 
333 		/* Save link */
334 		link->eventdev_id = eventdev_config->eventdev_id;
335 		link->event_port_id = i;
336 		link->lcore_id = lcore_id;
337 
338 		/*
339 		 * Don't set eventq_id as by default all queues
340 		 * need to be mapped to the port, which is controlled
341 		 * by the operating mode.
342 		 */
343 
344 		/* Update number of links */
345 		em_conf->nb_link++;
346 	}
347 
348 	return 0;
349 }
350 
351 static int
352 eh_set_default_conf_rx_adapter(struct eventmode_conf *em_conf)
353 {
354 	struct rx_adapter_connection_info *conn;
355 	struct eventdev_params *eventdev_config;
356 	struct rx_adapter_conf *adapter;
357 	bool rx_internal_port = true;
358 	bool single_ev_queue = false;
359 	int nb_eventqueue;
360 	uint32_t caps = 0;
361 	int eventdev_id;
362 	int nb_eth_dev;
363 	int adapter_id;
364 	int conn_id;
365 	int ret;
366 	int i;
367 
368 	/* Create one adapter with eth queues mapped to event queue(s) */
369 
370 	if (em_conf->nb_eventdev == 0) {
371 		EH_LOG_ERR("No event devs registered");
372 		return -EINVAL;
373 	}
374 
375 	/* Get the number of eth devs */
376 	nb_eth_dev = rte_eth_dev_count_avail();
377 
378 	/* Use the first event dev */
379 	eventdev_config = &(em_conf->eventdev_config[0]);
380 
381 	/* Get eventdev ID */
382 	eventdev_id = eventdev_config->eventdev_id;
383 	adapter_id = 0;
384 
385 	/* Get adapter conf */
386 	adapter = &(em_conf->rx_adapter[adapter_id]);
387 
388 	/* Set adapter conf */
389 	adapter->eventdev_id = eventdev_id;
390 	adapter->adapter_id = adapter_id;
391 
392 	/*
393 	 * If event device does not have internal ports for passing
394 	 * packets then reserved one queue for Tx path
395 	 */
396 	nb_eventqueue = eventdev_config->all_internal_ports ?
397 			eventdev_config->nb_eventqueue :
398 			eventdev_config->nb_eventqueue - 1;
399 
400 	/* Reserve one queue for event crypto adapter */
401 	if (em_conf->enable_event_crypto_adapter)
402 		nb_eventqueue--;
403 
404 	/*
405 	 * Map all queues of eth device (port) to an event queue. If there
406 	 * are more event queues than eth ports then create 1:1 mapping.
407 	 * Otherwise map all eth ports to a single event queue.
408 	 */
409 	if (nb_eth_dev > nb_eventqueue)
410 		single_ev_queue = true;
411 
412 	for (i = 0; i < nb_eth_dev; i++) {
413 
414 		/* Use only the ports enabled */
415 		if ((em_conf->eth_portmask & (1 << i)) == 0)
416 			continue;
417 
418 		/* Get the connection id */
419 		conn_id = adapter->nb_connections;
420 
421 		/* Get the connection */
422 		conn = &(adapter->conn[conn_id]);
423 
424 		/* Set mapping between eth ports & event queues*/
425 		conn->ethdev_id = i;
426 		conn->eventq_id = single_ev_queue ? 0 : i;
427 
428 		/* Add all eth queues eth port to event queue */
429 		conn->ethdev_rx_qid = -1;
430 
431 		/* Get Rx adapter capabilities */
432 		ret = rte_event_eth_rx_adapter_caps_get(eventdev_id, i, &caps);
433 		if (ret < 0) {
434 			EH_LOG_ERR("Failed to get event device %d eth rx adapter"
435 				   " capabilities for port %d", eventdev_id, i);
436 			return ret;
437 		}
438 		if (!(caps & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT))
439 			rx_internal_port = false;
440 
441 		/* Update no of connections */
442 		adapter->nb_connections++;
443 
444 	}
445 
446 	if (rx_internal_port) {
447 		/* Rx core is not required */
448 		adapter->rx_core_id = -1;
449 	} else {
450 		/* Rx core is required */
451 		adapter->rx_core_id = eh_get_next_eth_core(em_conf);
452 	}
453 
454 	/* We have setup one adapter */
455 	em_conf->nb_rx_adapter = 1;
456 
457 	return 0;
458 }
459 
460 static int
461 eh_set_default_conf_tx_adapter(struct eventmode_conf *em_conf)
462 {
463 	struct tx_adapter_connection_info *conn;
464 	struct eventdev_params *eventdev_config;
465 	struct tx_adapter_conf *tx_adapter;
466 	bool tx_internal_port = true;
467 	uint32_t caps = 0;
468 	int eventdev_id;
469 	int adapter_id;
470 	int nb_eth_dev;
471 	int conn_id;
472 	int ret;
473 	int i;
474 
475 	/*
476 	 * Create one Tx adapter with all eth queues mapped to event queues
477 	 * 1:1.
478 	 */
479 
480 	if (em_conf->nb_eventdev == 0) {
481 		EH_LOG_ERR("No event devs registered");
482 		return -EINVAL;
483 	}
484 
485 	/* Get the number of eth devs */
486 	nb_eth_dev = rte_eth_dev_count_avail();
487 
488 	/* Use the first event dev */
489 	eventdev_config = &(em_conf->eventdev_config[0]);
490 
491 	/* Get eventdev ID */
492 	eventdev_id = eventdev_config->eventdev_id;
493 	adapter_id = 0;
494 
495 	/* Get adapter conf */
496 	tx_adapter = &(em_conf->tx_adapter[adapter_id]);
497 
498 	/* Set adapter conf */
499 	tx_adapter->eventdev_id = eventdev_id;
500 	tx_adapter->adapter_id = adapter_id;
501 
502 	/*
503 	 * Map all Tx queues of the eth device (port) to the event device.
504 	 */
505 
506 	/* Set defaults for connections */
507 
508 	/*
509 	 * One eth device (port) is one connection. Map all Tx queues
510 	 * of the device to the Tx adapter.
511 	 */
512 
513 	for (i = 0; i < nb_eth_dev; i++) {
514 
515 		/* Use only the ports enabled */
516 		if ((em_conf->eth_portmask & (1 << i)) == 0)
517 			continue;
518 
519 		/* Get the connection id */
520 		conn_id = tx_adapter->nb_connections;
521 
522 		/* Get the connection */
523 		conn = &(tx_adapter->conn[conn_id]);
524 
525 		/* Add ethdev to connections */
526 		conn->ethdev_id = i;
527 
528 		/* Add all eth tx queues to adapter */
529 		conn->ethdev_tx_qid = -1;
530 
531 		/* Get Tx adapter capabilities */
532 		ret = rte_event_eth_tx_adapter_caps_get(eventdev_id, i, &caps);
533 		if (ret < 0) {
534 			EH_LOG_ERR("Failed to get event device %d eth tx adapter"
535 				   " capabilities for port %d", eventdev_id, i);
536 			return ret;
537 		}
538 		if (!(caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT))
539 			tx_internal_port = false;
540 
541 		/* Update no of connections */
542 		tx_adapter->nb_connections++;
543 	}
544 
545 	if (tx_internal_port) {
546 		/* Tx core is not required */
547 		tx_adapter->tx_core_id = -1;
548 	} else {
549 		/* Tx core is required */
550 		tx_adapter->tx_core_id = eh_get_next_eth_core(em_conf);
551 
552 		/*
553 		 * Use one event queue per adapter for submitting packets
554 		 * for Tx. Reserving the last queue available
555 		 */
556 		/* Queue numbers start at 0 */
557 		tx_adapter->tx_ev_queue = eventdev_config->nb_eventqueue - 1;
558 	}
559 
560 	/* We have setup one adapter */
561 	em_conf->nb_tx_adapter = 1;
562 	return 0;
563 }
564 
565 static int
566 eh_validate_conf(struct eventmode_conf *em_conf)
567 {
568 	int ret;
569 
570 	/*
571 	 * Check if event devs are specified. Else probe the event devices
572 	 * and initialize the config with all ports & queues available
573 	 */
574 	if (em_conf->nb_eventdev == 0) {
575 		ret = eh_set_nb_eventdev(em_conf);
576 		if (ret != 0)
577 			return ret;
578 		eh_do_capability_check(em_conf);
579 		ret = eh_set_default_conf_eventdev(em_conf);
580 		if (ret != 0)
581 			return ret;
582 	} else {
583 		/* Perform capability check for the selected event devices */
584 		eh_do_capability_check(em_conf);
585 	}
586 
587 	/*
588 	 * Check if links are specified. Else generate a default config for
589 	 * the event ports used.
590 	 */
591 	if (em_conf->nb_link == 0) {
592 		ret = eh_set_default_conf_link(em_conf);
593 		if (ret != 0)
594 			return ret;
595 	}
596 
597 	/*
598 	 * Check if rx adapters are specified. Else generate a default config
599 	 * with one rx adapter and all eth queues - event queue mapped.
600 	 */
601 	if (em_conf->nb_rx_adapter == 0) {
602 		ret = eh_set_default_conf_rx_adapter(em_conf);
603 		if (ret != 0)
604 			return ret;
605 	}
606 
607 	/*
608 	 * Check if tx adapters are specified. Else generate a default config
609 	 * with one tx adapter.
610 	 */
611 	if (em_conf->nb_tx_adapter == 0) {
612 		ret = eh_set_default_conf_tx_adapter(em_conf);
613 		if (ret != 0)
614 			return ret;
615 	}
616 
617 	return 0;
618 }
619 
620 static int
621 eh_initialize_eventdev(struct eventmode_conf *em_conf)
622 {
623 	struct rte_event_queue_conf eventq_conf = {0};
624 	struct rte_event_dev_info evdev_default_conf;
625 	struct rte_event_dev_config eventdev_conf;
626 	struct eventdev_params *eventdev_config;
627 	int nb_eventdev = em_conf->nb_eventdev;
628 	struct eh_event_link_info *link;
629 	uint8_t *queue = NULL;
630 	uint8_t eventdev_id;
631 	int nb_eventqueue;
632 	int ret, j;
633 	uint8_t i;
634 
635 	for (i = 0; i < nb_eventdev; i++) {
636 
637 		/* Get eventdev config */
638 		eventdev_config = &(em_conf->eventdev_config[i]);
639 
640 		/* Get event dev ID */
641 		eventdev_id = eventdev_config->eventdev_id;
642 
643 		/* Get the number of queues */
644 		nb_eventqueue = eventdev_config->nb_eventqueue;
645 
646 		/* Reset the default conf */
647 		memset(&evdev_default_conf, 0,
648 			sizeof(struct rte_event_dev_info));
649 
650 		/* Get default conf of eventdev */
651 		ret = rte_event_dev_info_get(eventdev_id, &evdev_default_conf);
652 		if (ret < 0) {
653 			EH_LOG_ERR(
654 				"Error in getting event device info[devID:%d]",
655 				eventdev_id);
656 			return ret;
657 		}
658 
659 		memset(&eventdev_conf, 0, sizeof(struct rte_event_dev_config));
660 		eventdev_conf.nb_events_limit =
661 				evdev_default_conf.max_num_events;
662 		eventdev_conf.nb_event_queues = nb_eventqueue;
663 		eventdev_conf.nb_event_ports =
664 				eventdev_config->nb_eventport;
665 		eventdev_conf.nb_event_queue_flows =
666 				evdev_default_conf.max_event_queue_flows;
667 		eventdev_conf.nb_event_port_dequeue_depth =
668 				evdev_default_conf.max_event_port_dequeue_depth;
669 		eventdev_conf.nb_event_port_enqueue_depth =
670 				evdev_default_conf.max_event_port_enqueue_depth;
671 
672 		if (evdev_default_conf.event_dev_cap & RTE_EVENT_DEV_CAP_EVENT_PRESCHEDULE)
673 			eventdev_conf.preschedule_type = RTE_EVENT_PRESCHEDULE;
674 
675 		if (evdev_default_conf.event_dev_cap & RTE_EVENT_DEV_CAP_EVENT_PRESCHEDULE_ADAPTIVE)
676 			eventdev_conf.preschedule_type = RTE_EVENT_PRESCHEDULE_ADAPTIVE;
677 
678 		/* Configure event device */
679 		ret = rte_event_dev_configure(eventdev_id, &eventdev_conf);
680 		if (ret < 0) {
681 			EH_LOG_ERR("Error in configuring event device");
682 			return ret;
683 		}
684 
685 		/* Configure event queues */
686 		for (j = 0; j < nb_eventqueue; j++) {
687 
688 			memset(&eventq_conf, 0,
689 					sizeof(struct rte_event_queue_conf));
690 
691 			/* Per event dev queues can be ATQ or SINGLE LINK */
692 			eventq_conf.event_queue_cfg =
693 					eventdev_config->ev_queue_mode;
694 			/*
695 			 * All queues need to be set with sched_type as
696 			 * schedule type for the application stage. One
697 			 * queue would be reserved for the final eth tx
698 			 * stage if event device does not have internal
699 			 * ports. This will be an atomic queue.
700 			 */
701 			if (j == eventdev_config->tx_queue_id) {
702 				eventq_conf.schedule_type =
703 					RTE_SCHED_TYPE_ATOMIC;
704 			} else {
705 				eventq_conf.schedule_type =
706 					em_conf->ext_params.sched_type;
707 			}
708 			/*
709 			 * Give event crypto device's queue higher priority then Rx queues. This
710 			 * will allow crypto events to be processed with highest priority.
711 			 */
712 			if (j == eventdev_config->ev_cpt_queue_id) {
713 				eventq_conf.priority =
714 					RTE_EVENT_DEV_PRIORITY_HIGHEST;
715 			} else {
716 				eventq_conf.priority =
717 					RTE_EVENT_DEV_PRIORITY_NORMAL;
718 			}
719 
720 			/* Set max atomic flows to 1024 */
721 			eventq_conf.nb_atomic_flows = 1024;
722 			eventq_conf.nb_atomic_order_sequences = 1024;
723 
724 			/* Setup the queue */
725 			ret = rte_event_queue_setup(eventdev_id, j,
726 					&eventq_conf);
727 			if (ret < 0) {
728 				EH_LOG_ERR("Failed to setup event queue %d",
729 					   ret);
730 				return ret;
731 			}
732 		}
733 
734 		/* Configure event ports */
735 		for (j = 0; j <  eventdev_config->nb_eventport; j++) {
736 			ret = rte_event_port_setup(eventdev_id, j, NULL);
737 			if (ret < 0) {
738 				EH_LOG_ERR("Failed to setup event port %d",
739 					   ret);
740 				return ret;
741 			}
742 		}
743 	}
744 
745 	/* Make event queue - event port link */
746 	for (j = 0; j <  em_conf->nb_link; j++) {
747 
748 		/* Get link info */
749 		link = &(em_conf->link[j]);
750 
751 		/* Get event dev ID */
752 		eventdev_id = link->eventdev_id;
753 
754 		/*
755 		 * If "all_ev_queue_to_ev_port" params flag is selected, all
756 		 * queues need to be mapped to the port.
757 		 */
758 		if (em_conf->ext_params.all_ev_queue_to_ev_port)
759 			queue = NULL;
760 		else
761 			queue = &(link->eventq_id);
762 
763 		/* Link queue to port */
764 		ret = rte_event_port_link(eventdev_id, link->event_port_id,
765 				queue, NULL, 1);
766 		if (ret < 0) {
767 			EH_LOG_ERR("Failed to link event port %d", ret);
768 			return ret;
769 		}
770 	}
771 
772 	return 0;
773 }
774 
775 static int
776 eh_start_eventdev(struct eventmode_conf *em_conf)
777 {
778 	struct eventdev_params *eventdev_config;
779 	int nb_eventdev = em_conf->nb_eventdev;
780 	int i, ret;
781 
782 	/* Start event devices */
783 	for (i = 0; i < nb_eventdev; i++) {
784 
785 		/* Get eventdev config */
786 		eventdev_config = &(em_conf->eventdev_config[i]);
787 
788 		ret = rte_event_dev_start(eventdev_config->eventdev_id);
789 		if (ret < 0) {
790 			EH_LOG_ERR("Failed to start event device %d, %d",
791 				   i, ret);
792 			return ret;
793 		}
794 	}
795 	return 0;
796 }
797 
798 static int
799 eh_initialize_crypto_adapter(struct eventmode_conf *em_conf)
800 {
801 	struct rte_event_crypto_adapter_queue_conf queue_conf;
802 	struct rte_event_dev_info evdev_default_conf = {0};
803 	struct rte_event_port_conf port_conf = {0};
804 	struct eventdev_params *eventdev_config;
805 	char mp_name[RTE_MEMPOOL_NAMESIZE];
806 	const uint8_t nb_qp_per_cdev = 1;
807 	uint8_t eventdev_id, cdev_id, n;
808 	uint32_t cap, nb_elem;
809 	int ret, socket_id;
810 
811 	if (!em_conf->enable_event_crypto_adapter)
812 		return 0;
813 
814 	/*
815 	 * More then one eventdev is not supported,
816 	 * all event crypto adapters will be assigned to one eventdev
817 	 */
818 	RTE_ASSERT(em_conf->nb_eventdev == 1);
819 
820 	/* Get event device configuration */
821 	eventdev_config = &(em_conf->eventdev_config[0]);
822 	eventdev_id = eventdev_config->eventdev_id;
823 
824 	n = rte_cryptodev_count();
825 
826 	for (cdev_id = 0; cdev_id != n; cdev_id++) {
827 		/* Check event's crypto capabilities */
828 		ret = rte_event_crypto_adapter_caps_get(eventdev_id, cdev_id, &cap);
829 		if (ret < 0) {
830 			EH_LOG_ERR("Failed to get event device's crypto capabilities %d", ret);
831 			return ret;
832 		}
833 
834 		if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD)) {
835 			EH_LOG_ERR("Event crypto adapter does not support forward mode!");
836 			return -EINVAL;
837 		}
838 
839 		/* Create event crypto adapter */
840 
841 		/* Get default configuration of event dev */
842 		ret = rte_event_dev_info_get(eventdev_id, &evdev_default_conf);
843 		if (ret < 0) {
844 			EH_LOG_ERR("Failed to get event dev info %d", ret);
845 			return ret;
846 		}
847 
848 		/* Setup port conf */
849 		port_conf.new_event_threshold =
850 				evdev_default_conf.max_num_events;
851 		port_conf.dequeue_depth =
852 				evdev_default_conf.max_event_port_dequeue_depth;
853 		port_conf.enqueue_depth =
854 				evdev_default_conf.max_event_port_enqueue_depth;
855 
856 		/* Create adapter */
857 		ret = rte_event_crypto_adapter_create(cdev_id, eventdev_id,
858 				&port_conf, RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD);
859 		if (ret < 0) {
860 			EH_LOG_ERR("Failed to create event crypto adapter %d", ret);
861 			return ret;
862 		}
863 
864 		memset(&queue_conf, 0, sizeof(queue_conf));
865 		if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR) &&
866 		    (em_conf->ext_params.event_vector)) {
867 			queue_conf.flags |= RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR;
868 			queue_conf.vector_sz = em_conf->ext_params.vector_size;
869 			/*
870 			 * Currently all sessions configured with same response
871 			 * info fields, so packets will be aggregated to the
872 			 * same vector. This allows us to configure number of
873 			 * vectors only to hold all queue pair descriptors.
874 			 */
875 			nb_elem = (qp_desc_nb / queue_conf.vector_sz) + 1;
876 			nb_elem *= nb_qp_per_cdev;
877 			socket_id = rte_cryptodev_socket_id(cdev_id);
878 			snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
879 					"QP_VEC_%u_%u", socket_id, cdev_id);
880 			queue_conf.vector_mp = rte_event_vector_pool_create(
881 					mp_name, nb_elem, 0,
882 					queue_conf.vector_sz, socket_id);
883 			if (queue_conf.vector_mp == NULL) {
884 				EH_LOG_ERR("failed to create event vector pool");
885 				return -ENOMEM;
886 			}
887 		}
888 
889 		/* Add crypto queue pairs to event crypto adapter */
890 		ret = rte_event_crypto_adapter_queue_pair_add(cdev_id, eventdev_id,
891 				-1, /* adds all the pre configured queue pairs to the instance */
892 				&queue_conf);
893 		if (ret < 0) {
894 			EH_LOG_ERR("Failed to add queue pairs to event crypto adapter %d", ret);
895 			return ret;
896 		}
897 	}
898 
899 	return 0;
900 }
901 
902 static int
903 eh_start_crypto_adapter(struct eventmode_conf *em_conf)
904 {
905 	uint8_t cdev_id, n;
906 	int ret;
907 
908 	if (!em_conf->enable_event_crypto_adapter)
909 		return 0;
910 
911 	n = rte_cryptodev_count();
912 	for (cdev_id = 0; cdev_id != n; cdev_id++) {
913 		ret = rte_event_crypto_adapter_start(cdev_id);
914 		if (ret < 0) {
915 			EH_LOG_ERR("Failed to start event crypto device %d (%d)",
916 					cdev_id, ret);
917 			return ret;
918 		}
919 	}
920 
921 	return 0;
922 }
923 
924 static int
925 eh_stop_crypto_adapter(struct eventmode_conf *em_conf)
926 {
927 	uint8_t cdev_id, n;
928 	int ret;
929 
930 	if (!em_conf->enable_event_crypto_adapter)
931 		return 0;
932 
933 	n = rte_cryptodev_count();
934 	for (cdev_id = 0; cdev_id != n; cdev_id++) {
935 		ret = rte_event_crypto_adapter_stop(cdev_id);
936 		if (ret < 0) {
937 			EH_LOG_ERR("Failed to stop event crypto device %d (%d)",
938 					cdev_id, ret);
939 			return ret;
940 		}
941 	}
942 
943 	return 0;
944 }
945 
946 static int
947 eh_event_vector_limits_validate(struct eventmode_conf *em_conf,
948 				uint8_t ev_dev_id, uint8_t ethdev_id)
949 {
950 	struct rte_event_eth_rx_adapter_vector_limits limits = {0};
951 	uint16_t vector_size = em_conf->ext_params.vector_size;
952 	int ret;
953 
954 	ret = rte_event_eth_rx_adapter_vector_limits_get(ev_dev_id, ethdev_id,
955 							 &limits);
956 	if (ret) {
957 		EH_LOG_ERR("failed to get vector limits");
958 		return ret;
959 	}
960 
961 	if (vector_size < limits.min_sz || vector_size > limits.max_sz) {
962 		EH_LOG_ERR("Vector size [%d] not within limits min[%d] max[%d]",
963 			   vector_size, limits.min_sz, limits.max_sz);
964 		return -EINVAL;
965 	}
966 
967 	if (limits.log2_sz && !rte_is_power_of_2(vector_size)) {
968 		EH_LOG_ERR("Vector size [%d] not power of 2", vector_size);
969 		return -EINVAL;
970 	}
971 
972 	if (em_conf->vector_tmo_ns > limits.max_timeout_ns ||
973 	    em_conf->vector_tmo_ns < limits.min_timeout_ns) {
974 		EH_LOG_ERR("Vector timeout [%" PRIu64
975 			   "] not within limits max[%" PRIu64
976 			   "] min[%" PRIu64 "]",
977 			   em_conf->vector_tmo_ns,
978 			   limits.max_timeout_ns,
979 			   limits.min_timeout_ns);
980 		return -EINVAL;
981 	}
982 	return 0;
983 }
984 
985 static int
986 eh_rx_adapter_configure(struct eventmode_conf *em_conf,
987 		struct rx_adapter_conf *adapter)
988 {
989 	struct rte_event_eth_rx_adapter_queue_conf queue_conf = {0};
990 	struct rte_event_dev_info evdev_default_conf = {0};
991 	struct rte_event_port_conf port_conf = {0};
992 	struct rx_adapter_connection_info *conn;
993 	uint32_t service_id, socket_id, nb_elem;
994 	struct rte_mempool *vector_pool = NULL;
995 	uint32_t lcore_id = rte_lcore_id();
996 	int ret, portid, nb_ports = 0;
997 	uint8_t eventdev_id;
998 	int j;
999 
1000 	/* Get event dev ID */
1001 	eventdev_id = adapter->eventdev_id;
1002 
1003 	/* Get default configuration of event dev */
1004 	ret = rte_event_dev_info_get(eventdev_id, &evdev_default_conf);
1005 	if (ret < 0) {
1006 		EH_LOG_ERR("Failed to get event dev info %d", ret);
1007 		return ret;
1008 	}
1009 
1010 	RTE_ETH_FOREACH_DEV(portid)
1011 		if ((em_conf->eth_portmask & (1 << portid)))
1012 			nb_ports++;
1013 
1014 	if (em_conf->ext_params.event_vector) {
1015 		socket_id = rte_lcore_to_socket_id(lcore_id);
1016 
1017 		if (em_conf->vector_pool_sz) {
1018 			nb_elem = em_conf->vector_pool_sz;
1019 		} else {
1020 			nb_elem = (nb_bufs_in_pool /
1021 				   em_conf->ext_params.vector_size) + 1;
1022 			if (per_port_pool)
1023 				nb_elem = nb_ports * nb_elem;
1024 			nb_elem = RTE_MAX(512U, nb_elem);
1025 		}
1026 		nb_elem += rte_lcore_count() * 32;
1027 		vector_pool = rte_event_vector_pool_create(
1028 			"vector_pool", nb_elem, 32,
1029 			em_conf->ext_params.vector_size, socket_id);
1030 		if (vector_pool == NULL) {
1031 			EH_LOG_ERR("failed to create event vector pool");
1032 			return -ENOMEM;
1033 		}
1034 	}
1035 	/* Setup port conf */
1036 	port_conf.new_event_threshold = 1200;
1037 	port_conf.dequeue_depth =
1038 			evdev_default_conf.max_event_port_dequeue_depth;
1039 	port_conf.enqueue_depth =
1040 			evdev_default_conf.max_event_port_enqueue_depth;
1041 
1042 	/* Create Rx adapter */
1043 	ret = rte_event_eth_rx_adapter_create(adapter->adapter_id,
1044 			adapter->eventdev_id, &port_conf);
1045 	if (ret < 0) {
1046 		EH_LOG_ERR("Failed to create rx adapter %d", ret);
1047 		return ret;
1048 	}
1049 
1050 	/* Setup various connections in the adapter */
1051 	for (j = 0; j < adapter->nb_connections; j++) {
1052 		/* Get connection */
1053 		conn = &(adapter->conn[j]);
1054 
1055 		/* Setup queue conf */
1056 		queue_conf.ev.queue_id = conn->eventq_id;
1057 		queue_conf.ev.sched_type = em_conf->ext_params.sched_type;
1058 		queue_conf.ev.event_type = RTE_EVENT_TYPE_ETHDEV;
1059 
1060 		if (em_conf->ext_params.event_vector) {
1061 			ret = eh_event_vector_limits_validate(em_conf,
1062 							      eventdev_id,
1063 							      conn->ethdev_id);
1064 			if (ret)
1065 				return ret;
1066 
1067 			queue_conf.vector_sz = em_conf->ext_params.vector_size;
1068 			queue_conf.vector_timeout_ns = em_conf->vector_tmo_ns;
1069 			queue_conf.vector_mp = vector_pool;
1070 			queue_conf.rx_queue_flags =
1071 				RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR;
1072 		}
1073 
1074 		/* Add queue to the adapter */
1075 		ret = rte_event_eth_rx_adapter_queue_add(adapter->adapter_id,
1076 				conn->ethdev_id, conn->ethdev_rx_qid,
1077 				&queue_conf);
1078 		if (ret < 0) {
1079 			EH_LOG_ERR("Failed to add eth queue to rx adapter %d",
1080 				   ret);
1081 			return ret;
1082 		}
1083 	}
1084 
1085 	/* Get the service ID used by rx adapter */
1086 	ret = rte_event_eth_rx_adapter_service_id_get(adapter->adapter_id,
1087 						      &service_id);
1088 	if (ret != -ESRCH && ret < 0) {
1089 		EH_LOG_ERR("Failed to get service id used by rx adapter %d",
1090 			   ret);
1091 		return ret;
1092 	}
1093 
1094 	rte_service_set_runstate_mapped_check(service_id, 0);
1095 
1096 	/* Start adapter */
1097 	ret = rte_event_eth_rx_adapter_start(adapter->adapter_id);
1098 	if (ret < 0) {
1099 		EH_LOG_ERR("Failed to start rx adapter %d", ret);
1100 		return ret;
1101 	}
1102 
1103 	return 0;
1104 }
1105 
1106 static int
1107 eh_initialize_rx_adapter(struct eventmode_conf *em_conf)
1108 {
1109 	struct rx_adapter_conf *adapter;
1110 	int i, ret;
1111 
1112 	/* Configure rx adapters */
1113 	for (i = 0; i < em_conf->nb_rx_adapter; i++) {
1114 		adapter = &(em_conf->rx_adapter[i]);
1115 		ret = eh_rx_adapter_configure(em_conf, adapter);
1116 		if (ret < 0) {
1117 			EH_LOG_ERR("Failed to configure rx adapter %d", ret);
1118 			return ret;
1119 		}
1120 	}
1121 	return 0;
1122 }
1123 
1124 static int32_t
1125 eh_start_worker_eth_core(struct eventmode_conf *conf, uint32_t lcore_id)
1126 {
1127 	uint32_t service_id[EVENT_MODE_MAX_ADAPTERS_PER_RX_CORE];
1128 	struct rx_adapter_conf *rx_adapter;
1129 	struct tx_adapter_conf *tx_adapter;
1130 	int service_count = 0;
1131 	int adapter_id;
1132 	int32_t ret;
1133 	int i;
1134 
1135 	EH_LOG_INFO("Entering eth_core processing on lcore %u", lcore_id);
1136 
1137 	/*
1138 	 * Parse adapter config to check which of all Rx adapters need
1139 	 * to be handled by this core.
1140 	 */
1141 	for (i = 0; i < conf->nb_rx_adapter; i++) {
1142 		/* Check if we have exceeded the max allowed */
1143 		if (service_count > EVENT_MODE_MAX_ADAPTERS_PER_RX_CORE) {
1144 			EH_LOG_ERR(
1145 			      "Exceeded the max allowed adapters per rx core");
1146 			break;
1147 		}
1148 
1149 		rx_adapter = &(conf->rx_adapter[i]);
1150 		if (rx_adapter->rx_core_id != lcore_id)
1151 			continue;
1152 
1153 		/* Adapter is handled by this core */
1154 		adapter_id = rx_adapter->adapter_id;
1155 
1156 		/* Get the service ID for the adapters */
1157 		ret = rte_event_eth_rx_adapter_service_id_get(adapter_id,
1158 				&(service_id[service_count]));
1159 
1160 		if (ret != -ESRCH && ret < 0) {
1161 			EH_LOG_ERR(
1162 				"Failed to get service id used by rx adapter");
1163 			return ret;
1164 		}
1165 
1166 		/* Update service count */
1167 		service_count++;
1168 	}
1169 
1170 	/*
1171 	 * Parse adapter config to see which of all Tx adapters need
1172 	 * to be handled by this core.
1173 	 */
1174 	for (i = 0; i < conf->nb_tx_adapter; i++) {
1175 		/* Check if we have exceeded the max allowed */
1176 		if (service_count > EVENT_MODE_MAX_ADAPTERS_PER_TX_CORE) {
1177 			EH_LOG_ERR(
1178 				"Exceeded the max allowed adapters per tx core");
1179 			break;
1180 		}
1181 
1182 		tx_adapter = &conf->tx_adapter[i];
1183 		if (tx_adapter->tx_core_id != lcore_id)
1184 			continue;
1185 
1186 		/* Adapter is handled by this core */
1187 		adapter_id = tx_adapter->adapter_id;
1188 
1189 		/* Get the service ID for the adapters */
1190 		ret = rte_event_eth_tx_adapter_service_id_get(adapter_id,
1191 				&(service_id[service_count]));
1192 
1193 		if (ret != -ESRCH && ret < 0) {
1194 			EH_LOG_ERR(
1195 				"Failed to get service id used by tx adapter");
1196 			return ret;
1197 		}
1198 
1199 		/* Update service count */
1200 		service_count++;
1201 	}
1202 
1203 	eth_core_running = true;
1204 
1205 	while (eth_core_running) {
1206 		for (i = 0; i < service_count; i++) {
1207 			/* Initiate adapter service */
1208 			rte_service_run_iter_on_app_lcore(service_id[i], 0);
1209 		}
1210 	}
1211 
1212 	return 0;
1213 }
1214 
1215 static int32_t
1216 eh_stop_worker_eth_core(void)
1217 {
1218 	if (eth_core_running) {
1219 		EH_LOG_INFO("Stopping eth cores");
1220 		eth_core_running = false;
1221 	}
1222 	return 0;
1223 }
1224 
1225 static struct eh_app_worker_params *
1226 eh_find_worker(uint32_t lcore_id, struct eh_conf *conf,
1227 		struct eh_app_worker_params *app_wrkrs, uint8_t nb_wrkr_param)
1228 {
1229 	struct eh_app_worker_params curr_conf = { {{0} }, NULL};
1230 	struct eh_event_link_info *link = NULL;
1231 	struct eh_app_worker_params *tmp_wrkr;
1232 	struct eventmode_conf *em_conf;
1233 	uint8_t eventdev_id;
1234 	int i;
1235 
1236 	/* Get eventmode config */
1237 	em_conf = conf->mode_params;
1238 
1239 	/*
1240 	 * Use event device from the first lcore-event link.
1241 	 *
1242 	 * Assumption: All lcore-event links tied to a core are using the
1243 	 * same event device. In other words, one core would be polling on
1244 	 * queues of a single event device only.
1245 	 */
1246 
1247 	/* Get a link for this lcore */
1248 	for (i = 0; i < em_conf->nb_link; i++) {
1249 		link = &(em_conf->link[i]);
1250 		if (link->lcore_id == lcore_id)
1251 			break;
1252 	}
1253 
1254 	if (link == NULL) {
1255 		EH_LOG_ERR("No valid link found for lcore %d", lcore_id);
1256 		return NULL;
1257 	}
1258 
1259 	/* Get event dev ID */
1260 	eventdev_id = link->eventdev_id;
1261 
1262 	/* Populate the curr_conf with the capabilities */
1263 
1264 	/* Check for Tx internal port */
1265 	if (eh_dev_has_tx_internal_port(eventdev_id))
1266 		curr_conf.cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
1267 	else
1268 		curr_conf.cap.tx_internal_port = EH_TX_TYPE_NO_INTERNAL_PORT;
1269 
1270 	/* Check for burst mode */
1271 	if (eh_dev_has_burst_mode(eventdev_id))
1272 		curr_conf.cap.burst = EH_RX_TYPE_BURST;
1273 	else
1274 		curr_conf.cap.burst = EH_RX_TYPE_NON_BURST;
1275 
1276 	curr_conf.cap.ipsec_mode = conf->ipsec_mode;
1277 
1278 	/* Parse the passed list and see if we have matching capabilities */
1279 
1280 	/* Initialize the pointer used to traverse the list */
1281 	tmp_wrkr = app_wrkrs;
1282 
1283 	for (i = 0; i < nb_wrkr_param; i++, tmp_wrkr++) {
1284 
1285 		/* Skip this if capabilities are not matching */
1286 		if (tmp_wrkr->cap.u64 != curr_conf.cap.u64)
1287 			continue;
1288 
1289 		/* If the checks pass, we have a match */
1290 		return tmp_wrkr;
1291 	}
1292 
1293 	return NULL;
1294 }
1295 
1296 static int
1297 eh_verify_match_worker(struct eh_app_worker_params *match_wrkr)
1298 {
1299 	/* Verify registered worker */
1300 	if (match_wrkr->worker_thread == NULL) {
1301 		EH_LOG_ERR("No worker registered");
1302 		return 0;
1303 	}
1304 
1305 	/* Success */
1306 	return 1;
1307 }
1308 
1309 static uint8_t
1310 eh_get_event_lcore_links(uint32_t lcore_id, struct eh_conf *conf,
1311 		struct eh_event_link_info **links)
1312 {
1313 	struct eh_event_link_info *link_cache;
1314 	struct eventmode_conf *em_conf = NULL;
1315 	struct eh_event_link_info *link;
1316 	uint8_t lcore_nb_link = 0;
1317 	size_t single_link_size;
1318 	size_t cache_size;
1319 	int index = 0;
1320 	int i;
1321 
1322 	if (conf == NULL || links == NULL) {
1323 		EH_LOG_ERR("Invalid args");
1324 		return -EINVAL;
1325 	}
1326 
1327 	/* Get eventmode conf */
1328 	em_conf = conf->mode_params;
1329 
1330 	if (em_conf == NULL) {
1331 		EH_LOG_ERR("Invalid event mode parameters");
1332 		return -EINVAL;
1333 	}
1334 
1335 	/* Get the number of links registered */
1336 	for (i = 0; i < em_conf->nb_link; i++) {
1337 
1338 		/* Get link */
1339 		link = &(em_conf->link[i]);
1340 
1341 		/* Check if we have link intended for this lcore */
1342 		if (link->lcore_id == lcore_id) {
1343 
1344 			/* Update the number of links for this core */
1345 			lcore_nb_link++;
1346 
1347 		}
1348 	}
1349 
1350 	/* Compute size of one entry to be copied */
1351 	single_link_size = sizeof(struct eh_event_link_info);
1352 
1353 	/* Compute size of the buffer required */
1354 	cache_size = lcore_nb_link * sizeof(struct eh_event_link_info);
1355 
1356 	/* Compute size of the buffer required */
1357 	link_cache = calloc(1, cache_size);
1358 
1359 	/* Get the number of links registered */
1360 	for (i = 0; i < em_conf->nb_link; i++) {
1361 
1362 		/* Get link */
1363 		link = &(em_conf->link[i]);
1364 
1365 		/* Check if we have link intended for this lcore */
1366 		if (link->lcore_id == lcore_id) {
1367 
1368 			/* Cache the link */
1369 			memcpy(&link_cache[index], link, single_link_size);
1370 
1371 			/* Update index */
1372 			index++;
1373 		}
1374 	}
1375 
1376 	/* Update the links for application to use the cached links */
1377 	*links = link_cache;
1378 
1379 	/* Return the number of cached links */
1380 	return lcore_nb_link;
1381 }
1382 
1383 static int
1384 eh_tx_adapter_configure(struct eventmode_conf *em_conf,
1385 		struct tx_adapter_conf *adapter)
1386 {
1387 	struct rte_event_dev_info evdev_default_conf = {0};
1388 	struct rte_event_port_conf port_conf = {0};
1389 	struct tx_adapter_connection_info *conn;
1390 	struct eventdev_params *eventdev_config;
1391 	uint8_t tx_port_id = 0;
1392 	uint8_t eventdev_id;
1393 	uint32_t service_id;
1394 	int ret, j;
1395 
1396 	/* Get event dev ID */
1397 	eventdev_id = adapter->eventdev_id;
1398 
1399 	/* Get event device conf */
1400 	eventdev_config = eh_get_eventdev_params(em_conf, eventdev_id);
1401 
1402 	/* Create Tx adapter */
1403 
1404 	/* Get default configuration of event dev */
1405 	ret = rte_event_dev_info_get(eventdev_id, &evdev_default_conf);
1406 	if (ret < 0) {
1407 		EH_LOG_ERR("Failed to get event dev info %d", ret);
1408 		return ret;
1409 	}
1410 
1411 	/* Setup port conf */
1412 	port_conf.new_event_threshold =
1413 			evdev_default_conf.max_num_events;
1414 	port_conf.dequeue_depth =
1415 			evdev_default_conf.max_event_port_dequeue_depth;
1416 	port_conf.enqueue_depth =
1417 			evdev_default_conf.max_event_port_enqueue_depth;
1418 
1419 	/* Create adapter */
1420 	ret = rte_event_eth_tx_adapter_create(adapter->adapter_id,
1421 			adapter->eventdev_id, &port_conf);
1422 	if (ret < 0) {
1423 		EH_LOG_ERR("Failed to create tx adapter %d", ret);
1424 		return ret;
1425 	}
1426 
1427 	/* Setup various connections in the adapter */
1428 	for (j = 0; j < adapter->nb_connections; j++) {
1429 
1430 		/* Get connection */
1431 		conn = &(adapter->conn[j]);
1432 
1433 		/* Add queue to the adapter */
1434 		ret = rte_event_eth_tx_adapter_queue_add(adapter->adapter_id,
1435 				conn->ethdev_id, conn->ethdev_tx_qid);
1436 		if (ret < 0) {
1437 			EH_LOG_ERR("Failed to add eth queue to tx adapter %d",
1438 				   ret);
1439 			return ret;
1440 		}
1441 	}
1442 
1443 	/*
1444 	 * Check if Tx core is assigned. If Tx core is not assigned then
1445 	 * the adapter has internal port for submitting Tx packets and
1446 	 * Tx event queue & port setup is not required
1447 	 */
1448 	if (adapter->tx_core_id == (uint32_t) (-1)) {
1449 		/* Internal port is present */
1450 		goto skip_tx_queue_port_setup;
1451 	}
1452 
1453 	/* Setup Tx queue & port */
1454 
1455 	/* Get event port used by the adapter */
1456 	ret = rte_event_eth_tx_adapter_event_port_get(
1457 			adapter->adapter_id, &tx_port_id);
1458 	if (ret) {
1459 		EH_LOG_ERR("Failed to get tx adapter port id %d", ret);
1460 		return ret;
1461 	}
1462 
1463 	/*
1464 	 * Tx event queue is reserved for Tx adapter. Unlink this queue
1465 	 * from all other ports
1466 	 *
1467 	 */
1468 	for (j = 0; j < eventdev_config->nb_eventport; j++) {
1469 		rte_event_port_unlink(eventdev_id, j,
1470 				      &(adapter->tx_ev_queue), 1);
1471 	}
1472 
1473 	/* Link Tx event queue to Tx port */
1474 	ret = rte_event_port_link(eventdev_id, tx_port_id,
1475 			&(adapter->tx_ev_queue), NULL, 1);
1476 	if (ret != 1) {
1477 		EH_LOG_ERR("Failed to link event queue to port");
1478 		return ret;
1479 	}
1480 
1481 	/* Get the service ID used by Tx adapter */
1482 	ret = rte_event_eth_tx_adapter_service_id_get(adapter->adapter_id,
1483 						      &service_id);
1484 	if (ret != -ESRCH && ret < 0) {
1485 		EH_LOG_ERR("Failed to get service id used by tx adapter %d",
1486 			   ret);
1487 		return ret;
1488 	}
1489 
1490 	rte_service_set_runstate_mapped_check(service_id, 0);
1491 
1492 skip_tx_queue_port_setup:
1493 	/* Start adapter */
1494 	ret = rte_event_eth_tx_adapter_start(adapter->adapter_id);
1495 	if (ret < 0) {
1496 		EH_LOG_ERR("Failed to start tx adapter %d", ret);
1497 		return ret;
1498 	}
1499 
1500 	return 0;
1501 }
1502 
1503 static int
1504 eh_initialize_tx_adapter(struct eventmode_conf *em_conf)
1505 {
1506 	struct tx_adapter_conf *adapter;
1507 	int i, ret;
1508 
1509 	/* Configure Tx adapters */
1510 	for (i = 0; i < em_conf->nb_tx_adapter; i++) {
1511 		adapter = &(em_conf->tx_adapter[i]);
1512 		ret = eh_tx_adapter_configure(em_conf, adapter);
1513 		if (ret < 0) {
1514 			EH_LOG_ERR("Failed to configure tx adapter %d", ret);
1515 			return ret;
1516 		}
1517 	}
1518 	return 0;
1519 }
1520 
1521 static void
1522 eh_display_operating_mode(struct eventmode_conf *em_conf)
1523 {
1524 	char sched_types[][32] = {
1525 		"RTE_SCHED_TYPE_ORDERED",
1526 		"RTE_SCHED_TYPE_ATOMIC",
1527 		"RTE_SCHED_TYPE_PARALLEL",
1528 	};
1529 	EH_LOG_INFO("Operating mode:");
1530 
1531 	EH_LOG_INFO("\tScheduling type: \t%s",
1532 		sched_types[em_conf->ext_params.sched_type]);
1533 
1534 	EH_LOG_INFO("");
1535 }
1536 
1537 static void
1538 eh_display_event_dev_conf(struct eventmode_conf *em_conf)
1539 {
1540 	char queue_mode[][32] = {
1541 		"",
1542 		"ATQ (ALL TYPE QUEUE)",
1543 		"SINGLE LINK",
1544 	};
1545 	char print_buf[256] = { 0 };
1546 	int i;
1547 
1548 	EH_LOG_INFO("Event Device Configuration:");
1549 
1550 	for (i = 0; i < em_conf->nb_eventdev; i++) {
1551 		sprintf(print_buf,
1552 			"\tDev ID: %-2d \tQueues: %-2d \tPorts: %-2d",
1553 			em_conf->eventdev_config[i].eventdev_id,
1554 			em_conf->eventdev_config[i].nb_eventqueue,
1555 			em_conf->eventdev_config[i].nb_eventport);
1556 		sprintf(print_buf + strlen(print_buf),
1557 			"\tQueue mode: %s",
1558 			queue_mode[em_conf->eventdev_config[i].ev_queue_mode]);
1559 		EH_LOG_INFO("%s", print_buf);
1560 	}
1561 	EH_LOG_INFO("");
1562 }
1563 
1564 static void
1565 eh_display_rx_adapter_conf(struct eventmode_conf *em_conf)
1566 {
1567 	int nb_rx_adapter = em_conf->nb_rx_adapter;
1568 	struct rx_adapter_connection_info *conn;
1569 	struct rx_adapter_conf *adapter;
1570 	char print_buf[256] = { 0 };
1571 	int i, j;
1572 
1573 	EH_LOG_INFO("Rx adapters configured: %d", nb_rx_adapter);
1574 
1575 	for (i = 0; i < nb_rx_adapter; i++) {
1576 		adapter = &(em_conf->rx_adapter[i]);
1577 		sprintf(print_buf,
1578 			"\tRx adapter ID: %-2d\tConnections: %-2d\tEvent dev ID: %-2d",
1579 			adapter->adapter_id,
1580 			adapter->nb_connections,
1581 			adapter->eventdev_id);
1582 		if (adapter->rx_core_id == (uint32_t)-1)
1583 			sprintf(print_buf + strlen(print_buf),
1584 				"\tRx core: %-2s", "[INTERNAL PORT]");
1585 		else if (adapter->rx_core_id == RTE_MAX_LCORE)
1586 			sprintf(print_buf + strlen(print_buf),
1587 				"\tRx core: %-2s", "[NONE]");
1588 		else
1589 			sprintf(print_buf + strlen(print_buf),
1590 				"\tRx core: %-2d", adapter->rx_core_id);
1591 
1592 		EH_LOG_INFO("%s", print_buf);
1593 
1594 		for (j = 0; j < adapter->nb_connections; j++) {
1595 			conn = &(adapter->conn[j]);
1596 
1597 			sprintf(print_buf,
1598 				"\t\tEthdev ID: %-2d", conn->ethdev_id);
1599 
1600 			if (conn->ethdev_rx_qid == -1)
1601 				sprintf(print_buf + strlen(print_buf),
1602 					"\tEth rx queue: %-2s", "ALL");
1603 			else
1604 				sprintf(print_buf + strlen(print_buf),
1605 					"\tEth rx queue: %-2d",
1606 					conn->ethdev_rx_qid);
1607 
1608 			sprintf(print_buf + strlen(print_buf),
1609 				"\tEvent queue: %-2d", conn->eventq_id);
1610 			EH_LOG_INFO("%s", print_buf);
1611 		}
1612 	}
1613 	EH_LOG_INFO("");
1614 }
1615 
1616 static void
1617 eh_display_tx_adapter_conf(struct eventmode_conf *em_conf)
1618 {
1619 	int nb_tx_adapter = em_conf->nb_tx_adapter;
1620 	struct tx_adapter_connection_info *conn;
1621 	struct tx_adapter_conf *adapter;
1622 	char print_buf[256] = { 0 };
1623 	int i, j;
1624 
1625 	EH_LOG_INFO("Tx adapters configured: %d", nb_tx_adapter);
1626 
1627 	for (i = 0; i < nb_tx_adapter; i++) {
1628 		adapter = &(em_conf->tx_adapter[i]);
1629 		sprintf(print_buf,
1630 			"\tTx adapter ID: %-2d\tConnections: %-2d\tEvent dev ID: %-2d",
1631 			adapter->adapter_id,
1632 			adapter->nb_connections,
1633 			adapter->eventdev_id);
1634 		if (adapter->tx_core_id == (uint32_t)-1)
1635 			sprintf(print_buf + strlen(print_buf),
1636 				"\tTx core: %-2s", "[INTERNAL PORT]");
1637 		else if (adapter->tx_core_id == RTE_MAX_LCORE)
1638 			sprintf(print_buf + strlen(print_buf),
1639 				"\tTx core: %-2s", "[NONE]");
1640 		else
1641 			sprintf(print_buf + strlen(print_buf),
1642 				"\tTx core: %-2d,\tInput event queue: %-2d",
1643 				adapter->tx_core_id, adapter->tx_ev_queue);
1644 
1645 		EH_LOG_INFO("%s", print_buf);
1646 
1647 		for (j = 0; j < adapter->nb_connections; j++) {
1648 			conn = &(adapter->conn[j]);
1649 
1650 			sprintf(print_buf,
1651 				"\t\tEthdev ID: %-2d", conn->ethdev_id);
1652 
1653 			if (conn->ethdev_tx_qid == -1)
1654 				sprintf(print_buf + strlen(print_buf),
1655 					"\tEth tx queue: %-2s", "ALL");
1656 			else
1657 				sprintf(print_buf + strlen(print_buf),
1658 					"\tEth tx queue: %-2d",
1659 					conn->ethdev_tx_qid);
1660 			EH_LOG_INFO("%s", print_buf);
1661 		}
1662 	}
1663 	EH_LOG_INFO("");
1664 }
1665 
1666 static void
1667 eh_display_link_conf(struct eventmode_conf *em_conf)
1668 {
1669 	struct eh_event_link_info *link;
1670 	char print_buf[256] = { 0 };
1671 	int i;
1672 
1673 	EH_LOG_INFO("Links configured: %d", em_conf->nb_link);
1674 
1675 	for (i = 0; i < em_conf->nb_link; i++) {
1676 		link = &(em_conf->link[i]);
1677 
1678 		sprintf(print_buf,
1679 			"\tEvent dev ID: %-2d\tEvent port: %-2d",
1680 			link->eventdev_id,
1681 			link->event_port_id);
1682 
1683 		if (em_conf->ext_params.all_ev_queue_to_ev_port)
1684 			sprintf(print_buf + strlen(print_buf),
1685 				"Event queue: %-2s\t", "ALL");
1686 		else
1687 			sprintf(print_buf + strlen(print_buf),
1688 				"Event queue: %-2d\t", link->eventq_id);
1689 
1690 		sprintf(print_buf + strlen(print_buf),
1691 			"Lcore: %-2d", link->lcore_id);
1692 		EH_LOG_INFO("%s", print_buf);
1693 	}
1694 	EH_LOG_INFO("");
1695 }
1696 
1697 struct eh_conf *
1698 eh_conf_init(void)
1699 {
1700 	struct eventmode_conf *em_conf = NULL;
1701 	struct eh_conf *conf = NULL;
1702 	unsigned int eth_core_id;
1703 	void *bitmap = NULL;
1704 	uint32_t nb_bytes;
1705 
1706 	/* Allocate memory for config */
1707 	conf = calloc(1, sizeof(struct eh_conf));
1708 	if (conf == NULL) {
1709 		EH_LOG_ERR("Failed to allocate memory for eventmode helper "
1710 			   "config");
1711 		return NULL;
1712 	}
1713 
1714 	/* Set default conf */
1715 
1716 	/* Packet transfer mode: poll */
1717 	conf->mode = EH_PKT_TRANSFER_MODE_POLL;
1718 	conf->ipsec_mode = EH_IPSEC_MODE_TYPE_APP;
1719 
1720 	/* Keep all ethernet ports enabled by default */
1721 	conf->eth_portmask = -1;
1722 
1723 	/* Allocate memory for event mode params */
1724 	conf->mode_params = calloc(1, sizeof(struct eventmode_conf));
1725 	if (conf->mode_params == NULL) {
1726 		EH_LOG_ERR("Failed to allocate memory for event mode params");
1727 		goto free_conf;
1728 	}
1729 
1730 	/* Get eventmode conf */
1731 	em_conf = conf->mode_params;
1732 
1733 	/* Allocate and initialize bitmap for eth cores */
1734 	nb_bytes = rte_bitmap_get_memory_footprint(RTE_MAX_LCORE);
1735 	if (!nb_bytes) {
1736 		EH_LOG_ERR("Failed to get bitmap footprint");
1737 		goto free_em_conf;
1738 	}
1739 
1740 	bitmap = rte_zmalloc("event-helper-ethcore-bitmap", nb_bytes,
1741 			     RTE_CACHE_LINE_SIZE);
1742 	if (!bitmap) {
1743 		EH_LOG_ERR("Failed to allocate memory for eth cores bitmap\n");
1744 		goto free_em_conf;
1745 	}
1746 
1747 	em_conf->eth_core_mask = rte_bitmap_init(RTE_MAX_LCORE, bitmap,
1748 						 nb_bytes);
1749 	if (!em_conf->eth_core_mask) {
1750 		EH_LOG_ERR("Failed to initialize bitmap");
1751 		goto free_bitmap;
1752 	}
1753 
1754 	/* Set schedule type as not set */
1755 	em_conf->ext_params.sched_type = SCHED_TYPE_NOT_SET;
1756 
1757 	/* Set two cores as eth cores for Rx & Tx */
1758 
1759 	/* Use first core other than main core as Rx core */
1760 	eth_core_id = rte_get_next_lcore(0,	/* curr core */
1761 					 1,	/* skip main core */
1762 					 0	/* wrap */);
1763 
1764 	rte_bitmap_set(em_conf->eth_core_mask, eth_core_id);
1765 
1766 	/* Use next core as Tx core */
1767 	eth_core_id = rte_get_next_lcore(eth_core_id,	/* curr core */
1768 					 1,		/* skip main core */
1769 					 0		/* wrap */);
1770 
1771 	rte_bitmap_set(em_conf->eth_core_mask, eth_core_id);
1772 
1773 	em_conf->ext_params.vector_size = DEFAULT_VECTOR_SIZE;
1774 	em_conf->vector_tmo_ns = DEFAULT_VECTOR_TMO;
1775 
1776 	return conf;
1777 
1778 free_bitmap:
1779 	rte_free(bitmap);
1780 free_em_conf:
1781 	free(em_conf);
1782 free_conf:
1783 	free(conf);
1784 	return NULL;
1785 }
1786 
1787 void
1788 eh_conf_uninit(struct eh_conf *conf)
1789 {
1790 	struct eventmode_conf *em_conf = NULL;
1791 
1792 	if (!conf || !conf->mode_params)
1793 		return;
1794 
1795 	/* Get eventmode conf */
1796 	em_conf = conf->mode_params;
1797 
1798 	/* Free evenmode configuration memory */
1799 	rte_free(em_conf->eth_core_mask);
1800 	free(em_conf);
1801 	free(conf);
1802 }
1803 
1804 void
1805 eh_display_conf(struct eh_conf *conf)
1806 {
1807 	struct eventmode_conf *em_conf;
1808 
1809 	if (conf == NULL) {
1810 		EH_LOG_ERR("Invalid event helper configuration");
1811 		return;
1812 	}
1813 
1814 	if (conf->mode != EH_PKT_TRANSFER_MODE_EVENT)
1815 		return;
1816 
1817 	if (conf->mode_params == NULL) {
1818 		EH_LOG_ERR("Invalid event mode parameters");
1819 		return;
1820 	}
1821 
1822 	/* Get eventmode conf */
1823 	em_conf = (struct eventmode_conf *)(conf->mode_params);
1824 
1825 	/* Display user exposed operating modes */
1826 	eh_display_operating_mode(em_conf);
1827 
1828 	/* Display event device conf */
1829 	eh_display_event_dev_conf(em_conf);
1830 
1831 	/* Display Rx adapter conf */
1832 	eh_display_rx_adapter_conf(em_conf);
1833 
1834 	/* Display Tx adapter conf */
1835 	eh_display_tx_adapter_conf(em_conf);
1836 
1837 	/* Display event-lcore link */
1838 	eh_display_link_conf(em_conf);
1839 }
1840 
1841 int32_t
1842 eh_devs_init(struct eh_conf *conf)
1843 {
1844 	struct eventmode_conf *em_conf;
1845 	uint16_t port_id;
1846 	int ret;
1847 
1848 	if (conf == NULL) {
1849 		EH_LOG_ERR("Invalid event helper configuration");
1850 		return -EINVAL;
1851 	}
1852 
1853 	if (conf->mode != EH_PKT_TRANSFER_MODE_EVENT)
1854 		return 0;
1855 
1856 	if (conf->mode_params == NULL) {
1857 		EH_LOG_ERR("Invalid event mode parameters");
1858 		return -EINVAL;
1859 	}
1860 
1861 	/* Get eventmode conf */
1862 	em_conf = conf->mode_params;
1863 
1864 	/* Eventmode conf would need eth portmask */
1865 	em_conf->eth_portmask = conf->eth_portmask;
1866 
1867 	/* Validate the requested config */
1868 	ret = eh_validate_conf(em_conf);
1869 	if (ret < 0) {
1870 		EH_LOG_ERR("Failed to validate the requested config %d", ret);
1871 		return ret;
1872 	}
1873 
1874 	/* Display the current configuration */
1875 	eh_display_conf(conf);
1876 
1877 	/* Stop eth devices before setting up adapter */
1878 	RTE_ETH_FOREACH_DEV(port_id) {
1879 
1880 		/* Use only the ports enabled */
1881 		if ((conf->eth_portmask & (1 << port_id)) == 0)
1882 			continue;
1883 
1884 		ret = rte_eth_dev_stop(port_id);
1885 		if (ret != 0) {
1886 			EH_LOG_ERR("Failed to stop port %u, err: %d",
1887 					port_id, ret);
1888 			return ret;
1889 		}
1890 	}
1891 
1892 	/* Setup eventdev */
1893 	ret = eh_initialize_eventdev(em_conf);
1894 	if (ret < 0) {
1895 		EH_LOG_ERR("Failed to initialize event dev %d", ret);
1896 		return ret;
1897 	}
1898 
1899 	/* Setup event crypto adapter */
1900 	ret = eh_initialize_crypto_adapter(em_conf);
1901 	if (ret < 0) {
1902 		EH_LOG_ERR("Failed to start event dev %d", ret);
1903 		return ret;
1904 	}
1905 
1906 	/* Setup Rx adapter */
1907 	ret = eh_initialize_rx_adapter(em_conf);
1908 	if (ret < 0) {
1909 		EH_LOG_ERR("Failed to initialize rx adapter %d", ret);
1910 		return ret;
1911 	}
1912 
1913 	/* Setup Tx adapter */
1914 	ret = eh_initialize_tx_adapter(em_conf);
1915 	if (ret < 0) {
1916 		EH_LOG_ERR("Failed to initialize tx adapter %d", ret);
1917 		return ret;
1918 	}
1919 
1920 	/* Start eventdev */
1921 	ret = eh_start_eventdev(em_conf);
1922 	if (ret < 0) {
1923 		EH_LOG_ERR("Failed to start event dev %d", ret);
1924 		return ret;
1925 	}
1926 
1927 	/* Start event crypto adapter */
1928 	ret = eh_start_crypto_adapter(em_conf);
1929 	if (ret < 0) {
1930 		EH_LOG_ERR("Failed to start event crypto dev %d", ret);
1931 		return ret;
1932 	}
1933 
1934 
1935 	/* Start eth devices after setting up adapter */
1936 	RTE_ETH_FOREACH_DEV(port_id) {
1937 
1938 		/* Use only the ports enabled */
1939 		if ((conf->eth_portmask & (1 << port_id)) == 0)
1940 			continue;
1941 
1942 		ret = rte_eth_dev_start(port_id);
1943 		if (ret < 0) {
1944 			EH_LOG_ERR("Failed to start eth dev %d, %d",
1945 				   port_id, ret);
1946 			return ret;
1947 		}
1948 	}
1949 
1950 	return 0;
1951 }
1952 
1953 int32_t
1954 eh_devs_uninit(struct eh_conf *conf)
1955 {
1956 	struct eventmode_conf *em_conf;
1957 	int ret, i, j;
1958 	uint16_t id;
1959 
1960 	if (conf == NULL) {
1961 		EH_LOG_ERR("Invalid event helper configuration");
1962 		return -EINVAL;
1963 	}
1964 
1965 	if (conf->mode != EH_PKT_TRANSFER_MODE_EVENT)
1966 		return 0;
1967 
1968 	if (conf->mode_params == NULL) {
1969 		EH_LOG_ERR("Invalid event mode parameters");
1970 		return -EINVAL;
1971 	}
1972 
1973 	/* Get eventmode conf */
1974 	em_conf = conf->mode_params;
1975 
1976 	/* Stop and release rx adapters */
1977 	for (i = 0; i < em_conf->nb_rx_adapter; i++) {
1978 
1979 		id = em_conf->rx_adapter[i].adapter_id;
1980 		ret = rte_event_eth_rx_adapter_stop(id);
1981 		if (ret < 0) {
1982 			EH_LOG_ERR("Failed to stop rx adapter %d", ret);
1983 			return ret;
1984 		}
1985 
1986 		for (j = 0; j < em_conf->rx_adapter[i].nb_connections; j++) {
1987 
1988 			ret = rte_event_eth_rx_adapter_queue_del(id,
1989 				em_conf->rx_adapter[i].conn[j].ethdev_id, -1);
1990 			if (ret < 0) {
1991 				EH_LOG_ERR(
1992 				       "Failed to remove rx adapter queues %d",
1993 				       ret);
1994 				return ret;
1995 			}
1996 		}
1997 
1998 		ret = rte_event_eth_rx_adapter_free(id);
1999 		if (ret < 0) {
2000 			EH_LOG_ERR("Failed to free rx adapter %d", ret);
2001 			return ret;
2002 		}
2003 	}
2004 
2005 	/* Stop event crypto adapter */
2006 	ret = eh_stop_crypto_adapter(em_conf);
2007 	if (ret < 0) {
2008 		EH_LOG_ERR("Failed to start event crypto dev %d", ret);
2009 		return ret;
2010 	}
2011 
2012 	/* Stop and release event devices */
2013 	for (i = 0; i < em_conf->nb_eventdev; i++) {
2014 
2015 		id = em_conf->eventdev_config[i].eventdev_id;
2016 		rte_event_dev_stop(id);
2017 
2018 		ret = rte_event_dev_close(id);
2019 		if (ret < 0) {
2020 			EH_LOG_ERR("Failed to close event dev %d, %d", id, ret);
2021 			return ret;
2022 		}
2023 	}
2024 
2025 	/* Stop and release tx adapters */
2026 	for (i = 0; i < em_conf->nb_tx_adapter; i++) {
2027 
2028 		id = em_conf->tx_adapter[i].adapter_id;
2029 		ret = rte_event_eth_tx_adapter_stop(id);
2030 		if (ret < 0) {
2031 			EH_LOG_ERR("Failed to stop tx adapter %d", ret);
2032 			return ret;
2033 		}
2034 
2035 		for (j = 0; j < em_conf->tx_adapter[i].nb_connections; j++) {
2036 
2037 			ret = rte_event_eth_tx_adapter_queue_del(id,
2038 				em_conf->tx_adapter[i].conn[j].ethdev_id, -1);
2039 			if (ret < 0) {
2040 				EH_LOG_ERR(
2041 					"Failed to remove tx adapter queues %d",
2042 					ret);
2043 				return ret;
2044 			}
2045 		}
2046 
2047 		ret = rte_event_eth_tx_adapter_free(id);
2048 		if (ret < 0) {
2049 			EH_LOG_ERR("Failed to free tx adapter %d", ret);
2050 			return ret;
2051 		}
2052 	}
2053 
2054 	return 0;
2055 }
2056 
2057 void
2058 eh_launch_worker(struct eh_conf *conf, struct eh_app_worker_params *app_wrkr,
2059 		uint8_t nb_wrkr_param)
2060 {
2061 	struct eh_app_worker_params *match_wrkr;
2062 	struct eh_event_link_info *links = NULL;
2063 	struct eventmode_conf *em_conf;
2064 	uint32_t lcore_id;
2065 	uint8_t nb_links;
2066 
2067 	if (conf == NULL) {
2068 		EH_LOG_ERR("Invalid event helper configuration");
2069 		return;
2070 	}
2071 
2072 	if (conf->mode_params == NULL) {
2073 		EH_LOG_ERR("Invalid event mode parameters");
2074 		return;
2075 	}
2076 
2077 	/* Get eventmode conf */
2078 	em_conf = conf->mode_params;
2079 
2080 	/* Get core ID */
2081 	lcore_id = rte_lcore_id();
2082 
2083 	/* Check if this is eth core */
2084 	if (rte_bitmap_get(em_conf->eth_core_mask, lcore_id)) {
2085 		eh_start_worker_eth_core(em_conf, lcore_id);
2086 		return;
2087 	}
2088 
2089 	if (app_wrkr == NULL || nb_wrkr_param == 0) {
2090 		EH_LOG_ERR("Invalid args");
2091 		return;
2092 	}
2093 
2094 	/*
2095 	 * This is a regular worker thread. The application registers
2096 	 * multiple workers with various capabilities. Run worker
2097 	 * based on the selected capabilities of the event
2098 	 * device configured.
2099 	 */
2100 
2101 	/* Get the first matching worker for the event device */
2102 	match_wrkr = eh_find_worker(lcore_id, conf, app_wrkr, nb_wrkr_param);
2103 	if (match_wrkr == NULL) {
2104 		EH_LOG_ERR("Failed to match worker registered for lcore %d",
2105 			   lcore_id);
2106 		goto clean_and_exit;
2107 	}
2108 
2109 	/* Verify sanity of the matched worker */
2110 	if (eh_verify_match_worker(match_wrkr) != 1) {
2111 		EH_LOG_ERR("Failed to validate the matched worker");
2112 		goto clean_and_exit;
2113 	}
2114 
2115 	/* Get worker links */
2116 	nb_links = eh_get_event_lcore_links(lcore_id, conf, &links);
2117 
2118 	/* Launch the worker thread */
2119 	match_wrkr->worker_thread(links, nb_links);
2120 
2121 	/* Free links info memory */
2122 	free(links);
2123 
2124 clean_and_exit:
2125 
2126 	/* Flag eth_cores to stop, if started */
2127 	eh_stop_worker_eth_core();
2128 }
2129 
2130 uint8_t
2131 eh_get_tx_queue(struct eh_conf *conf, uint8_t eventdev_id)
2132 {
2133 	struct eventdev_params *eventdev_config;
2134 	struct eventmode_conf *em_conf;
2135 
2136 	if (conf == NULL) {
2137 		EH_LOG_ERR("Invalid event helper configuration");
2138 		return -EINVAL;
2139 	}
2140 
2141 	if (conf->mode_params == NULL) {
2142 		EH_LOG_ERR("Invalid event mode parameters");
2143 		return -EINVAL;
2144 	}
2145 
2146 	/* Get eventmode conf */
2147 	em_conf = conf->mode_params;
2148 
2149 	/* Get event device conf */
2150 	eventdev_config = eh_get_eventdev_params(em_conf, eventdev_id);
2151 
2152 	if (eventdev_config == NULL) {
2153 		EH_LOG_ERR("Failed to read eventdev config");
2154 		return -EINVAL;
2155 	}
2156 
2157 	/*
2158 	 * The last queue is reserved to be used as atomic queue for the
2159 	 * last stage (eth packet tx stage)
2160 	 */
2161 	return eventdev_config->nb_eventqueue - 1;
2162 }
2163