xref: /dpdk/examples/ipsec-secgw/event_helper.c (revision f8dbaebbf1c9efcbb2e2354b341ed62175466a57)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (C) 2020 Marvell International Ltd.
3  */
4 #include <rte_bitmap.h>
5 #include <rte_ethdev.h>
6 #include <rte_eventdev.h>
7 #include <rte_event_eth_rx_adapter.h>
8 #include <rte_event_eth_tx_adapter.h>
9 #include <rte_malloc.h>
10 #include <stdbool.h>
11 
12 #include "event_helper.h"
13 #include "ipsec-secgw.h"
14 
15 #define DEFAULT_VECTOR_SIZE  16
16 #define DEFAULT_VECTOR_TMO   102400
17 
18 static volatile bool eth_core_running;
19 
20 static int
21 eh_get_enabled_cores(struct rte_bitmap *eth_core_mask)
22 {
23 	int i, count = 0;
24 
25 	RTE_LCORE_FOREACH(i) {
26 		/* Check if this core is enabled in core mask*/
27 		if (rte_bitmap_get(eth_core_mask, i)) {
28 			/* Found enabled core */
29 			count++;
30 		}
31 	}
32 	return count;
33 }
34 
35 static inline unsigned int
36 eh_get_next_eth_core(struct eventmode_conf *em_conf)
37 {
38 	static unsigned int prev_core = -1;
39 	unsigned int next_core;
40 
41 	/*
42 	 * Make sure we have at least one eth core running, else the following
43 	 * logic would lead to an infinite loop.
44 	 */
45 	if (eh_get_enabled_cores(em_conf->eth_core_mask) == 0) {
46 		EH_LOG_ERR("No enabled eth core found");
47 		return RTE_MAX_LCORE;
48 	}
49 
50 	/* Only some cores are marked as eth cores, skip others */
51 	do {
52 		/* Get the next core */
53 		next_core = rte_get_next_lcore(prev_core, 0, 1);
54 
55 		/* Check if we have reached max lcores */
56 		if (next_core == RTE_MAX_LCORE)
57 			return next_core;
58 
59 		/* Update prev_core */
60 		prev_core = next_core;
61 	} while (!(rte_bitmap_get(em_conf->eth_core_mask, next_core)));
62 
63 	return next_core;
64 }
65 
66 static inline unsigned int
67 eh_get_next_active_core(struct eventmode_conf *em_conf, unsigned int prev_core)
68 {
69 	unsigned int next_core;
70 
71 	/* Get next active core skipping cores reserved as eth cores */
72 	do {
73 		/* Get the next core */
74 		next_core = rte_get_next_lcore(prev_core, 0, 0);
75 
76 		/* Check if we have reached max lcores */
77 		if (next_core == RTE_MAX_LCORE)
78 			return next_core;
79 
80 		prev_core = next_core;
81 	} while (rte_bitmap_get(em_conf->eth_core_mask, next_core));
82 
83 	return next_core;
84 }
85 
86 static struct eventdev_params *
87 eh_get_eventdev_params(struct eventmode_conf *em_conf, uint8_t eventdev_id)
88 {
89 	int i;
90 
91 	for (i = 0; i < em_conf->nb_eventdev; i++) {
92 		if (em_conf->eventdev_config[i].eventdev_id == eventdev_id)
93 			break;
94 	}
95 
96 	/* No match */
97 	if (i == em_conf->nb_eventdev)
98 		return NULL;
99 
100 	return &(em_conf->eventdev_config[i]);
101 }
102 
103 static inline bool
104 eh_dev_has_rx_internal_port(uint8_t eventdev_id)
105 {
106 	bool flag = true;
107 	int j, ret;
108 
109 	RTE_ETH_FOREACH_DEV(j) {
110 		uint32_t caps = 0;
111 
112 		ret = rte_event_eth_rx_adapter_caps_get(eventdev_id, j, &caps);
113 		if (ret < 0)
114 			return false;
115 
116 		if (!(caps & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT))
117 			flag = false;
118 	}
119 	return flag;
120 }
121 
122 static inline bool
123 eh_dev_has_tx_internal_port(uint8_t eventdev_id)
124 {
125 	bool flag = true;
126 	int j, ret;
127 
128 	RTE_ETH_FOREACH_DEV(j) {
129 		uint32_t caps = 0;
130 
131 		ret = rte_event_eth_tx_adapter_caps_get(eventdev_id, j, &caps);
132 		if (ret < 0)
133 			return false;
134 
135 		if (!(caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT))
136 			flag = false;
137 	}
138 	return flag;
139 }
140 
141 static inline bool
142 eh_dev_has_burst_mode(uint8_t dev_id)
143 {
144 	struct rte_event_dev_info dev_info;
145 
146 	rte_event_dev_info_get(dev_id, &dev_info);
147 	return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) ?
148 			true : false;
149 }
150 
151 static int
152 eh_set_default_conf_eventdev(struct eventmode_conf *em_conf)
153 {
154 	int lcore_count, nb_eventdev, nb_eth_dev, ret;
155 	struct eventdev_params *eventdev_config;
156 	struct rte_event_dev_info dev_info;
157 
158 	/* Get the number of event devices */
159 	nb_eventdev = rte_event_dev_count();
160 	if (nb_eventdev == 0) {
161 		EH_LOG_ERR("No event devices detected");
162 		return -EINVAL;
163 	}
164 
165 	if (nb_eventdev != 1) {
166 		EH_LOG_ERR("Event mode does not support multiple event devices. "
167 			   "Please provide only one event device.");
168 		return -EINVAL;
169 	}
170 
171 	/* Get the number of eth devs */
172 	nb_eth_dev = rte_eth_dev_count_avail();
173 	if (nb_eth_dev == 0) {
174 		EH_LOG_ERR("No eth devices detected");
175 		return -EINVAL;
176 	}
177 
178 	/* Get the number of lcores */
179 	lcore_count = rte_lcore_count();
180 
181 	/* Read event device info */
182 	ret = rte_event_dev_info_get(0, &dev_info);
183 	if (ret < 0) {
184 		EH_LOG_ERR("Failed to read event device info %d", ret);
185 		return ret;
186 	}
187 
188 	/* Check if enough ports are available */
189 	if (dev_info.max_event_ports < 2) {
190 		EH_LOG_ERR("Not enough event ports available");
191 		return -EINVAL;
192 	}
193 
194 	/* Get the first event dev conf */
195 	eventdev_config = &(em_conf->eventdev_config[0]);
196 
197 	/* Save number of queues & ports available */
198 	eventdev_config->eventdev_id = 0;
199 	eventdev_config->nb_eventqueue = dev_info.max_event_queues;
200 	eventdev_config->nb_eventport = dev_info.max_event_ports;
201 	eventdev_config->ev_queue_mode = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
202 
203 	/* Check if there are more queues than required */
204 	if (eventdev_config->nb_eventqueue > nb_eth_dev + 1) {
205 		/* One queue is reserved for Tx */
206 		eventdev_config->nb_eventqueue = nb_eth_dev + 1;
207 	}
208 
209 	/* Check if there are more ports than required */
210 	if (eventdev_config->nb_eventport > lcore_count) {
211 		/* One port per lcore is enough */
212 		eventdev_config->nb_eventport = lcore_count;
213 	}
214 
215 	/* Update the number of event devices */
216 	em_conf->nb_eventdev++;
217 
218 	return 0;
219 }
220 
221 static void
222 eh_do_capability_check(struct eventmode_conf *em_conf)
223 {
224 	struct eventdev_params *eventdev_config;
225 	int all_internal_ports = 1;
226 	uint32_t eventdev_id;
227 	int i;
228 
229 	for (i = 0; i < em_conf->nb_eventdev; i++) {
230 
231 		/* Get the event dev conf */
232 		eventdev_config = &(em_conf->eventdev_config[i]);
233 		eventdev_id = eventdev_config->eventdev_id;
234 
235 		/* Check if event device has internal port for Rx & Tx */
236 		if (eh_dev_has_rx_internal_port(eventdev_id) &&
237 		    eh_dev_has_tx_internal_port(eventdev_id)) {
238 			eventdev_config->all_internal_ports = 1;
239 		} else {
240 			all_internal_ports = 0;
241 		}
242 	}
243 
244 	/*
245 	 * If Rx & Tx internal ports are supported by all event devices then
246 	 * eth cores won't be required. Override the eth core mask requested
247 	 * and decrement number of event queues by one as it won't be needed
248 	 * for Tx.
249 	 */
250 	if (all_internal_ports) {
251 		rte_bitmap_reset(em_conf->eth_core_mask);
252 		for (i = 0; i < em_conf->nb_eventdev; i++)
253 			em_conf->eventdev_config[i].nb_eventqueue--;
254 	}
255 }
256 
257 static int
258 eh_set_default_conf_link(struct eventmode_conf *em_conf)
259 {
260 	struct eventdev_params *eventdev_config;
261 	struct eh_event_link_info *link;
262 	unsigned int lcore_id = -1;
263 	int i, link_index;
264 
265 	/*
266 	 * Create a 1:1 mapping from event ports to cores. If the number
267 	 * of event ports is lesser than the cores, some cores won't
268 	 * execute worker. If there are more event ports, then some ports
269 	 * won't be used.
270 	 *
271 	 */
272 
273 	/*
274 	 * The event queue-port mapping is done according to the link. Since
275 	 * we are falling back to the default link config, enabling
276 	 * "all_ev_queue_to_ev_port" mode flag. This will map all queues
277 	 * to the port.
278 	 */
279 	em_conf->ext_params.all_ev_queue_to_ev_port = 1;
280 
281 	/* Get first event dev conf */
282 	eventdev_config = &(em_conf->eventdev_config[0]);
283 
284 	/* Loop through the ports */
285 	for (i = 0; i < eventdev_config->nb_eventport; i++) {
286 
287 		/* Get next active core id */
288 		lcore_id = eh_get_next_active_core(em_conf,
289 				lcore_id);
290 
291 		if (lcore_id == RTE_MAX_LCORE) {
292 			/* Reached max cores */
293 			return 0;
294 		}
295 
296 		/* Save the current combination as one link */
297 
298 		/* Get the index */
299 		link_index = em_conf->nb_link;
300 
301 		/* Get the corresponding link */
302 		link = &(em_conf->link[link_index]);
303 
304 		/* Save link */
305 		link->eventdev_id = eventdev_config->eventdev_id;
306 		link->event_port_id = i;
307 		link->lcore_id = lcore_id;
308 
309 		/*
310 		 * Don't set eventq_id as by default all queues
311 		 * need to be mapped to the port, which is controlled
312 		 * by the operating mode.
313 		 */
314 
315 		/* Update number of links */
316 		em_conf->nb_link++;
317 	}
318 
319 	return 0;
320 }
321 
322 static int
323 eh_set_default_conf_rx_adapter(struct eventmode_conf *em_conf)
324 {
325 	struct rx_adapter_connection_info *conn;
326 	struct eventdev_params *eventdev_config;
327 	struct rx_adapter_conf *adapter;
328 	bool rx_internal_port = true;
329 	bool single_ev_queue = false;
330 	int nb_eventqueue;
331 	uint32_t caps = 0;
332 	int eventdev_id;
333 	int nb_eth_dev;
334 	int adapter_id;
335 	int conn_id;
336 	int ret;
337 	int i;
338 
339 	/* Create one adapter with eth queues mapped to event queue(s) */
340 
341 	if (em_conf->nb_eventdev == 0) {
342 		EH_LOG_ERR("No event devs registered");
343 		return -EINVAL;
344 	}
345 
346 	/* Get the number of eth devs */
347 	nb_eth_dev = rte_eth_dev_count_avail();
348 
349 	/* Use the first event dev */
350 	eventdev_config = &(em_conf->eventdev_config[0]);
351 
352 	/* Get eventdev ID */
353 	eventdev_id = eventdev_config->eventdev_id;
354 	adapter_id = 0;
355 
356 	/* Get adapter conf */
357 	adapter = &(em_conf->rx_adapter[adapter_id]);
358 
359 	/* Set adapter conf */
360 	adapter->eventdev_id = eventdev_id;
361 	adapter->adapter_id = adapter_id;
362 
363 	/*
364 	 * If event device does not have internal ports for passing
365 	 * packets then reserved one queue for Tx path
366 	 */
367 	nb_eventqueue = eventdev_config->all_internal_ports ?
368 			eventdev_config->nb_eventqueue :
369 			eventdev_config->nb_eventqueue - 1;
370 
371 	/*
372 	 * Map all queues of eth device (port) to an event queue. If there
373 	 * are more event queues than eth ports then create 1:1 mapping.
374 	 * Otherwise map all eth ports to a single event queue.
375 	 */
376 	if (nb_eth_dev > nb_eventqueue)
377 		single_ev_queue = true;
378 
379 	for (i = 0; i < nb_eth_dev; i++) {
380 
381 		/* Use only the ports enabled */
382 		if ((em_conf->eth_portmask & (1 << i)) == 0)
383 			continue;
384 
385 		/* Get the connection id */
386 		conn_id = adapter->nb_connections;
387 
388 		/* Get the connection */
389 		conn = &(adapter->conn[conn_id]);
390 
391 		/* Set mapping between eth ports & event queues*/
392 		conn->ethdev_id = i;
393 		conn->eventq_id = single_ev_queue ? 0 : i;
394 
395 		/* Add all eth queues eth port to event queue */
396 		conn->ethdev_rx_qid = -1;
397 
398 		/* Get Rx adapter capabilities */
399 		ret = rte_event_eth_rx_adapter_caps_get(eventdev_id, i, &caps);
400 		if (ret < 0) {
401 			EH_LOG_ERR("Failed to get event device %d eth rx adapter"
402 				   " capabilities for port %d", eventdev_id, i);
403 			return ret;
404 		}
405 		if (!(caps & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT))
406 			rx_internal_port = false;
407 
408 		/* Update no of connections */
409 		adapter->nb_connections++;
410 
411 	}
412 
413 	if (rx_internal_port) {
414 		/* Rx core is not required */
415 		adapter->rx_core_id = -1;
416 	} else {
417 		/* Rx core is required */
418 		adapter->rx_core_id = eh_get_next_eth_core(em_conf);
419 	}
420 
421 	/* We have setup one adapter */
422 	em_conf->nb_rx_adapter = 1;
423 
424 	return 0;
425 }
426 
427 static int
428 eh_set_default_conf_tx_adapter(struct eventmode_conf *em_conf)
429 {
430 	struct tx_adapter_connection_info *conn;
431 	struct eventdev_params *eventdev_config;
432 	struct tx_adapter_conf *tx_adapter;
433 	bool tx_internal_port = true;
434 	uint32_t caps = 0;
435 	int eventdev_id;
436 	int adapter_id;
437 	int nb_eth_dev;
438 	int conn_id;
439 	int ret;
440 	int i;
441 
442 	/*
443 	 * Create one Tx adapter with all eth queues mapped to event queues
444 	 * 1:1.
445 	 */
446 
447 	if (em_conf->nb_eventdev == 0) {
448 		EH_LOG_ERR("No event devs registered");
449 		return -EINVAL;
450 	}
451 
452 	/* Get the number of eth devs */
453 	nb_eth_dev = rte_eth_dev_count_avail();
454 
455 	/* Use the first event dev */
456 	eventdev_config = &(em_conf->eventdev_config[0]);
457 
458 	/* Get eventdev ID */
459 	eventdev_id = eventdev_config->eventdev_id;
460 	adapter_id = 0;
461 
462 	/* Get adapter conf */
463 	tx_adapter = &(em_conf->tx_adapter[adapter_id]);
464 
465 	/* Set adapter conf */
466 	tx_adapter->eventdev_id = eventdev_id;
467 	tx_adapter->adapter_id = adapter_id;
468 
469 	/*
470 	 * Map all Tx queues of the eth device (port) to the event device.
471 	 */
472 
473 	/* Set defaults for connections */
474 
475 	/*
476 	 * One eth device (port) is one connection. Map all Tx queues
477 	 * of the device to the Tx adapter.
478 	 */
479 
480 	for (i = 0; i < nb_eth_dev; i++) {
481 
482 		/* Use only the ports enabled */
483 		if ((em_conf->eth_portmask & (1 << i)) == 0)
484 			continue;
485 
486 		/* Get the connection id */
487 		conn_id = tx_adapter->nb_connections;
488 
489 		/* Get the connection */
490 		conn = &(tx_adapter->conn[conn_id]);
491 
492 		/* Add ethdev to connections */
493 		conn->ethdev_id = i;
494 
495 		/* Add all eth tx queues to adapter */
496 		conn->ethdev_tx_qid = -1;
497 
498 		/* Get Tx adapter capabilities */
499 		ret = rte_event_eth_tx_adapter_caps_get(eventdev_id, i, &caps);
500 		if (ret < 0) {
501 			EH_LOG_ERR("Failed to get event device %d eth tx adapter"
502 				   " capabilities for port %d", eventdev_id, i);
503 			return ret;
504 		}
505 		if (!(caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT))
506 			tx_internal_port = false;
507 
508 		/* Update no of connections */
509 		tx_adapter->nb_connections++;
510 	}
511 
512 	if (tx_internal_port) {
513 		/* Tx core is not required */
514 		tx_adapter->tx_core_id = -1;
515 	} else {
516 		/* Tx core is required */
517 		tx_adapter->tx_core_id = eh_get_next_eth_core(em_conf);
518 
519 		/*
520 		 * Use one event queue per adapter for submitting packets
521 		 * for Tx. Reserving the last queue available
522 		 */
523 		/* Queue numbers start at 0 */
524 		tx_adapter->tx_ev_queue = eventdev_config->nb_eventqueue - 1;
525 	}
526 
527 	/* We have setup one adapter */
528 	em_conf->nb_tx_adapter = 1;
529 	return 0;
530 }
531 
532 static int
533 eh_validate_conf(struct eventmode_conf *em_conf)
534 {
535 	int ret;
536 
537 	/*
538 	 * Check if event devs are specified. Else probe the event devices
539 	 * and initialize the config with all ports & queues available
540 	 */
541 	if (em_conf->nb_eventdev == 0) {
542 		ret = eh_set_default_conf_eventdev(em_conf);
543 		if (ret != 0)
544 			return ret;
545 	}
546 
547 	/* Perform capability check for the selected event devices */
548 	eh_do_capability_check(em_conf);
549 
550 	/*
551 	 * Check if links are specified. Else generate a default config for
552 	 * the event ports used.
553 	 */
554 	if (em_conf->nb_link == 0) {
555 		ret = eh_set_default_conf_link(em_conf);
556 		if (ret != 0)
557 			return ret;
558 	}
559 
560 	/*
561 	 * Check if rx adapters are specified. Else generate a default config
562 	 * with one rx adapter and all eth queues - event queue mapped.
563 	 */
564 	if (em_conf->nb_rx_adapter == 0) {
565 		ret = eh_set_default_conf_rx_adapter(em_conf);
566 		if (ret != 0)
567 			return ret;
568 	}
569 
570 	/*
571 	 * Check if tx adapters are specified. Else generate a default config
572 	 * with one tx adapter.
573 	 */
574 	if (em_conf->nb_tx_adapter == 0) {
575 		ret = eh_set_default_conf_tx_adapter(em_conf);
576 		if (ret != 0)
577 			return ret;
578 	}
579 
580 	return 0;
581 }
582 
583 static int
584 eh_initialize_eventdev(struct eventmode_conf *em_conf)
585 {
586 	struct rte_event_queue_conf eventq_conf = {0};
587 	struct rte_event_dev_info evdev_default_conf;
588 	struct rte_event_dev_config eventdev_conf;
589 	struct eventdev_params *eventdev_config;
590 	int nb_eventdev = em_conf->nb_eventdev;
591 	struct eh_event_link_info *link;
592 	uint8_t *queue = NULL;
593 	uint8_t eventdev_id;
594 	int nb_eventqueue;
595 	uint8_t i, j;
596 	int ret;
597 
598 	for (i = 0; i < nb_eventdev; i++) {
599 
600 		/* Get eventdev config */
601 		eventdev_config = &(em_conf->eventdev_config[i]);
602 
603 		/* Get event dev ID */
604 		eventdev_id = eventdev_config->eventdev_id;
605 
606 		/* Get the number of queues */
607 		nb_eventqueue = eventdev_config->nb_eventqueue;
608 
609 		/* Reset the default conf */
610 		memset(&evdev_default_conf, 0,
611 			sizeof(struct rte_event_dev_info));
612 
613 		/* Get default conf of eventdev */
614 		ret = rte_event_dev_info_get(eventdev_id, &evdev_default_conf);
615 		if (ret < 0) {
616 			EH_LOG_ERR(
617 				"Error in getting event device info[devID:%d]",
618 				eventdev_id);
619 			return ret;
620 		}
621 
622 		memset(&eventdev_conf, 0, sizeof(struct rte_event_dev_config));
623 		eventdev_conf.nb_events_limit =
624 				evdev_default_conf.max_num_events;
625 		eventdev_conf.nb_event_queues = nb_eventqueue;
626 		eventdev_conf.nb_event_ports =
627 				eventdev_config->nb_eventport;
628 		eventdev_conf.nb_event_queue_flows =
629 				evdev_default_conf.max_event_queue_flows;
630 		eventdev_conf.nb_event_port_dequeue_depth =
631 				evdev_default_conf.max_event_port_dequeue_depth;
632 		eventdev_conf.nb_event_port_enqueue_depth =
633 				evdev_default_conf.max_event_port_enqueue_depth;
634 
635 		/* Configure event device */
636 		ret = rte_event_dev_configure(eventdev_id, &eventdev_conf);
637 		if (ret < 0) {
638 			EH_LOG_ERR("Error in configuring event device");
639 			return ret;
640 		}
641 
642 		/* Configure event queues */
643 		for (j = 0; j < nb_eventqueue; j++) {
644 
645 			memset(&eventq_conf, 0,
646 					sizeof(struct rte_event_queue_conf));
647 
648 			/* Per event dev queues can be ATQ or SINGLE LINK */
649 			eventq_conf.event_queue_cfg =
650 					eventdev_config->ev_queue_mode;
651 			/*
652 			 * All queues need to be set with sched_type as
653 			 * schedule type for the application stage. One
654 			 * queue would be reserved for the final eth tx
655 			 * stage if event device does not have internal
656 			 * ports. This will be an atomic queue.
657 			 */
658 			if (!eventdev_config->all_internal_ports &&
659 			    j == nb_eventqueue-1) {
660 				eventq_conf.schedule_type =
661 					RTE_SCHED_TYPE_ATOMIC;
662 			} else {
663 				eventq_conf.schedule_type =
664 					em_conf->ext_params.sched_type;
665 			}
666 
667 			/* Set max atomic flows to 1024 */
668 			eventq_conf.nb_atomic_flows = 1024;
669 			eventq_conf.nb_atomic_order_sequences = 1024;
670 
671 			/* Setup the queue */
672 			ret = rte_event_queue_setup(eventdev_id, j,
673 					&eventq_conf);
674 			if (ret < 0) {
675 				EH_LOG_ERR("Failed to setup event queue %d",
676 					   ret);
677 				return ret;
678 			}
679 		}
680 
681 		/* Configure event ports */
682 		for (j = 0; j <  eventdev_config->nb_eventport; j++) {
683 			ret = rte_event_port_setup(eventdev_id, j, NULL);
684 			if (ret < 0) {
685 				EH_LOG_ERR("Failed to setup event port %d",
686 					   ret);
687 				return ret;
688 			}
689 		}
690 	}
691 
692 	/* Make event queue - event port link */
693 	for (j = 0; j <  em_conf->nb_link; j++) {
694 
695 		/* Get link info */
696 		link = &(em_conf->link[j]);
697 
698 		/* Get event dev ID */
699 		eventdev_id = link->eventdev_id;
700 
701 		/*
702 		 * If "all_ev_queue_to_ev_port" params flag is selected, all
703 		 * queues need to be mapped to the port.
704 		 */
705 		if (em_conf->ext_params.all_ev_queue_to_ev_port)
706 			queue = NULL;
707 		else
708 			queue = &(link->eventq_id);
709 
710 		/* Link queue to port */
711 		ret = rte_event_port_link(eventdev_id, link->event_port_id,
712 				queue, NULL, 1);
713 		if (ret < 0) {
714 			EH_LOG_ERR("Failed to link event port %d", ret);
715 			return ret;
716 		}
717 	}
718 
719 	/* Start event devices */
720 	for (i = 0; i < nb_eventdev; i++) {
721 
722 		/* Get eventdev config */
723 		eventdev_config = &(em_conf->eventdev_config[i]);
724 
725 		ret = rte_event_dev_start(eventdev_config->eventdev_id);
726 		if (ret < 0) {
727 			EH_LOG_ERR("Failed to start event device %d, %d",
728 				   i, ret);
729 			return ret;
730 		}
731 	}
732 	return 0;
733 }
734 
735 static int
736 eh_event_vector_limits_validate(struct eventmode_conf *em_conf,
737 				uint8_t ev_dev_id, uint8_t ethdev_id)
738 {
739 	struct rte_event_eth_rx_adapter_vector_limits limits = {0};
740 	uint16_t vector_size = em_conf->ext_params.vector_size;
741 	int ret;
742 
743 	ret = rte_event_eth_rx_adapter_vector_limits_get(ev_dev_id, ethdev_id,
744 							 &limits);
745 	if (ret) {
746 		EH_LOG_ERR("failed to get vector limits");
747 		return ret;
748 	}
749 
750 	if (vector_size < limits.min_sz || vector_size > limits.max_sz) {
751 		EH_LOG_ERR("Vector size [%d] not within limits min[%d] max[%d]",
752 			   vector_size, limits.min_sz, limits.max_sz);
753 		return -EINVAL;
754 	}
755 
756 	if (limits.log2_sz && !rte_is_power_of_2(vector_size)) {
757 		EH_LOG_ERR("Vector size [%d] not power of 2", vector_size);
758 		return -EINVAL;
759 	}
760 
761 	if (em_conf->vector_tmo_ns > limits.max_timeout_ns ||
762 	    em_conf->vector_tmo_ns < limits.min_timeout_ns) {
763 		EH_LOG_ERR("Vector timeout [%" PRIu64
764 			   "] not within limits max[%" PRIu64
765 			   "] min[%" PRIu64 "]",
766 			   em_conf->vector_tmo_ns,
767 			   limits.max_timeout_ns,
768 			   limits.min_timeout_ns);
769 		return -EINVAL;
770 	}
771 	return 0;
772 }
773 
774 static int
775 eh_rx_adapter_configure(struct eventmode_conf *em_conf,
776 		struct rx_adapter_conf *adapter)
777 {
778 	struct rte_event_eth_rx_adapter_queue_conf queue_conf = {0};
779 	struct rte_event_dev_info evdev_default_conf = {0};
780 	struct rte_event_port_conf port_conf = {0};
781 	struct rx_adapter_connection_info *conn;
782 	uint32_t service_id, socket_id, nb_elem;
783 	struct rte_mempool *vector_pool = NULL;
784 	uint32_t lcore_id = rte_lcore_id();
785 	uint8_t eventdev_id;
786 	int ret;
787 	int j;
788 
789 	/* Get event dev ID */
790 	eventdev_id = adapter->eventdev_id;
791 
792 	/* Get default configuration of event dev */
793 	ret = rte_event_dev_info_get(eventdev_id, &evdev_default_conf);
794 	if (ret < 0) {
795 		EH_LOG_ERR("Failed to get event dev info %d", ret);
796 		return ret;
797 	}
798 
799 	if (em_conf->ext_params.event_vector) {
800 		socket_id = rte_lcore_to_socket_id(lcore_id);
801 		nb_elem = (nb_bufs_in_pool / em_conf->ext_params.vector_size)
802 			  + 1;
803 
804 		vector_pool = rte_event_vector_pool_create(
805 			"vector_pool", nb_elem, 0,
806 			em_conf->ext_params.vector_size,
807 			socket_id);
808 		if (vector_pool == NULL) {
809 			EH_LOG_ERR("failed to create event vector pool");
810 			return -ENOMEM;
811 		}
812 	}
813 	/* Setup port conf */
814 	port_conf.new_event_threshold = 1200;
815 	port_conf.dequeue_depth =
816 			evdev_default_conf.max_event_port_dequeue_depth;
817 	port_conf.enqueue_depth =
818 			evdev_default_conf.max_event_port_enqueue_depth;
819 
820 	/* Create Rx adapter */
821 	ret = rte_event_eth_rx_adapter_create(adapter->adapter_id,
822 			adapter->eventdev_id, &port_conf);
823 	if (ret < 0) {
824 		EH_LOG_ERR("Failed to create rx adapter %d", ret);
825 		return ret;
826 	}
827 
828 	/* Setup various connections in the adapter */
829 	for (j = 0; j < adapter->nb_connections; j++) {
830 		/* Get connection */
831 		conn = &(adapter->conn[j]);
832 
833 		/* Setup queue conf */
834 		queue_conf.ev.queue_id = conn->eventq_id;
835 		queue_conf.ev.sched_type = em_conf->ext_params.sched_type;
836 		queue_conf.ev.event_type = RTE_EVENT_TYPE_ETHDEV;
837 
838 		if (em_conf->ext_params.event_vector) {
839 			ret = eh_event_vector_limits_validate(em_conf,
840 							      eventdev_id,
841 							      conn->ethdev_id);
842 			if (ret)
843 				return ret;
844 
845 			queue_conf.vector_sz = em_conf->ext_params.vector_size;
846 			queue_conf.vector_timeout_ns = em_conf->vector_tmo_ns;
847 			queue_conf.vector_mp = vector_pool;
848 			queue_conf.rx_queue_flags =
849 				RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR;
850 		}
851 
852 		/* Add queue to the adapter */
853 		ret = rte_event_eth_rx_adapter_queue_add(adapter->adapter_id,
854 				conn->ethdev_id, conn->ethdev_rx_qid,
855 				&queue_conf);
856 		if (ret < 0) {
857 			EH_LOG_ERR("Failed to add eth queue to rx adapter %d",
858 				   ret);
859 			return ret;
860 		}
861 	}
862 
863 	/* Get the service ID used by rx adapter */
864 	ret = rte_event_eth_rx_adapter_service_id_get(adapter->adapter_id,
865 						      &service_id);
866 	if (ret != -ESRCH && ret < 0) {
867 		EH_LOG_ERR("Failed to get service id used by rx adapter %d",
868 			   ret);
869 		return ret;
870 	}
871 
872 	rte_service_set_runstate_mapped_check(service_id, 0);
873 
874 	/* Start adapter */
875 	ret = rte_event_eth_rx_adapter_start(adapter->adapter_id);
876 	if (ret < 0) {
877 		EH_LOG_ERR("Failed to start rx adapter %d", ret);
878 		return ret;
879 	}
880 
881 	return 0;
882 }
883 
884 static int
885 eh_initialize_rx_adapter(struct eventmode_conf *em_conf)
886 {
887 	struct rx_adapter_conf *adapter;
888 	int i, ret;
889 
890 	/* Configure rx adapters */
891 	for (i = 0; i < em_conf->nb_rx_adapter; i++) {
892 		adapter = &(em_conf->rx_adapter[i]);
893 		ret = eh_rx_adapter_configure(em_conf, adapter);
894 		if (ret < 0) {
895 			EH_LOG_ERR("Failed to configure rx adapter %d", ret);
896 			return ret;
897 		}
898 	}
899 	return 0;
900 }
901 
902 static int32_t
903 eh_start_worker_eth_core(struct eventmode_conf *conf, uint32_t lcore_id)
904 {
905 	uint32_t service_id[EVENT_MODE_MAX_ADAPTERS_PER_RX_CORE];
906 	struct rx_adapter_conf *rx_adapter;
907 	struct tx_adapter_conf *tx_adapter;
908 	int service_count = 0;
909 	int adapter_id;
910 	int32_t ret;
911 	int i;
912 
913 	EH_LOG_INFO("Entering eth_core processing on lcore %u", lcore_id);
914 
915 	/*
916 	 * Parse adapter config to check which of all Rx adapters need
917 	 * to be handled by this core.
918 	 */
919 	for (i = 0; i < conf->nb_rx_adapter; i++) {
920 		/* Check if we have exceeded the max allowed */
921 		if (service_count > EVENT_MODE_MAX_ADAPTERS_PER_RX_CORE) {
922 			EH_LOG_ERR(
923 			      "Exceeded the max allowed adapters per rx core");
924 			break;
925 		}
926 
927 		rx_adapter = &(conf->rx_adapter[i]);
928 		if (rx_adapter->rx_core_id != lcore_id)
929 			continue;
930 
931 		/* Adapter is handled by this core */
932 		adapter_id = rx_adapter->adapter_id;
933 
934 		/* Get the service ID for the adapters */
935 		ret = rte_event_eth_rx_adapter_service_id_get(adapter_id,
936 				&(service_id[service_count]));
937 
938 		if (ret != -ESRCH && ret < 0) {
939 			EH_LOG_ERR(
940 				"Failed to get service id used by rx adapter");
941 			return ret;
942 		}
943 
944 		/* Update service count */
945 		service_count++;
946 	}
947 
948 	/*
949 	 * Parse adapter config to see which of all Tx adapters need
950 	 * to be handled by this core.
951 	 */
952 	for (i = 0; i < conf->nb_tx_adapter; i++) {
953 		/* Check if we have exceeded the max allowed */
954 		if (service_count > EVENT_MODE_MAX_ADAPTERS_PER_TX_CORE) {
955 			EH_LOG_ERR(
956 				"Exceeded the max allowed adapters per tx core");
957 			break;
958 		}
959 
960 		tx_adapter = &conf->tx_adapter[i];
961 		if (tx_adapter->tx_core_id != lcore_id)
962 			continue;
963 
964 		/* Adapter is handled by this core */
965 		adapter_id = tx_adapter->adapter_id;
966 
967 		/* Get the service ID for the adapters */
968 		ret = rte_event_eth_tx_adapter_service_id_get(adapter_id,
969 				&(service_id[service_count]));
970 
971 		if (ret != -ESRCH && ret < 0) {
972 			EH_LOG_ERR(
973 				"Failed to get service id used by tx adapter");
974 			return ret;
975 		}
976 
977 		/* Update service count */
978 		service_count++;
979 	}
980 
981 	eth_core_running = true;
982 
983 	while (eth_core_running) {
984 		for (i = 0; i < service_count; i++) {
985 			/* Initiate adapter service */
986 			rte_service_run_iter_on_app_lcore(service_id[i], 0);
987 		}
988 	}
989 
990 	return 0;
991 }
992 
993 static int32_t
994 eh_stop_worker_eth_core(void)
995 {
996 	if (eth_core_running) {
997 		EH_LOG_INFO("Stopping eth cores");
998 		eth_core_running = false;
999 	}
1000 	return 0;
1001 }
1002 
1003 static struct eh_app_worker_params *
1004 eh_find_worker(uint32_t lcore_id, struct eh_conf *conf,
1005 		struct eh_app_worker_params *app_wrkrs, uint8_t nb_wrkr_param)
1006 {
1007 	struct eh_app_worker_params curr_conf = { {{0} }, NULL};
1008 	struct eh_event_link_info *link = NULL;
1009 	struct eh_app_worker_params *tmp_wrkr;
1010 	struct eventmode_conf *em_conf;
1011 	uint8_t eventdev_id;
1012 	int i;
1013 
1014 	/* Get eventmode config */
1015 	em_conf = conf->mode_params;
1016 
1017 	/*
1018 	 * Use event device from the first lcore-event link.
1019 	 *
1020 	 * Assumption: All lcore-event links tied to a core are using the
1021 	 * same event device. In other words, one core would be polling on
1022 	 * queues of a single event device only.
1023 	 */
1024 
1025 	/* Get a link for this lcore */
1026 	for (i = 0; i < em_conf->nb_link; i++) {
1027 		link = &(em_conf->link[i]);
1028 		if (link->lcore_id == lcore_id)
1029 			break;
1030 	}
1031 
1032 	if (link == NULL) {
1033 		EH_LOG_ERR("No valid link found for lcore %d", lcore_id);
1034 		return NULL;
1035 	}
1036 
1037 	/* Get event dev ID */
1038 	eventdev_id = link->eventdev_id;
1039 
1040 	/* Populate the curr_conf with the capabilities */
1041 
1042 	/* Check for Tx internal port */
1043 	if (eh_dev_has_tx_internal_port(eventdev_id))
1044 		curr_conf.cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
1045 	else
1046 		curr_conf.cap.tx_internal_port = EH_TX_TYPE_NO_INTERNAL_PORT;
1047 
1048 	/* Check for burst mode */
1049 	if (eh_dev_has_burst_mode(eventdev_id))
1050 		curr_conf.cap.burst = EH_RX_TYPE_BURST;
1051 	else
1052 		curr_conf.cap.burst = EH_RX_TYPE_NON_BURST;
1053 
1054 	curr_conf.cap.ipsec_mode = conf->ipsec_mode;
1055 
1056 	/* Parse the passed list and see if we have matching capabilities */
1057 
1058 	/* Initialize the pointer used to traverse the list */
1059 	tmp_wrkr = app_wrkrs;
1060 
1061 	for (i = 0; i < nb_wrkr_param; i++, tmp_wrkr++) {
1062 
1063 		/* Skip this if capabilities are not matching */
1064 		if (tmp_wrkr->cap.u64 != curr_conf.cap.u64)
1065 			continue;
1066 
1067 		/* If the checks pass, we have a match */
1068 		return tmp_wrkr;
1069 	}
1070 
1071 	return NULL;
1072 }
1073 
1074 static int
1075 eh_verify_match_worker(struct eh_app_worker_params *match_wrkr)
1076 {
1077 	/* Verify registered worker */
1078 	if (match_wrkr->worker_thread == NULL) {
1079 		EH_LOG_ERR("No worker registered");
1080 		return 0;
1081 	}
1082 
1083 	/* Success */
1084 	return 1;
1085 }
1086 
1087 static uint8_t
1088 eh_get_event_lcore_links(uint32_t lcore_id, struct eh_conf *conf,
1089 		struct eh_event_link_info **links)
1090 {
1091 	struct eh_event_link_info *link_cache;
1092 	struct eventmode_conf *em_conf = NULL;
1093 	struct eh_event_link_info *link;
1094 	uint8_t lcore_nb_link = 0;
1095 	size_t single_link_size;
1096 	size_t cache_size;
1097 	int index = 0;
1098 	int i;
1099 
1100 	if (conf == NULL || links == NULL) {
1101 		EH_LOG_ERR("Invalid args");
1102 		return -EINVAL;
1103 	}
1104 
1105 	/* Get eventmode conf */
1106 	em_conf = conf->mode_params;
1107 
1108 	if (em_conf == NULL) {
1109 		EH_LOG_ERR("Invalid event mode parameters");
1110 		return -EINVAL;
1111 	}
1112 
1113 	/* Get the number of links registered */
1114 	for (i = 0; i < em_conf->nb_link; i++) {
1115 
1116 		/* Get link */
1117 		link = &(em_conf->link[i]);
1118 
1119 		/* Check if we have link intended for this lcore */
1120 		if (link->lcore_id == lcore_id) {
1121 
1122 			/* Update the number of links for this core */
1123 			lcore_nb_link++;
1124 
1125 		}
1126 	}
1127 
1128 	/* Compute size of one entry to be copied */
1129 	single_link_size = sizeof(struct eh_event_link_info);
1130 
1131 	/* Compute size of the buffer required */
1132 	cache_size = lcore_nb_link * sizeof(struct eh_event_link_info);
1133 
1134 	/* Compute size of the buffer required */
1135 	link_cache = calloc(1, cache_size);
1136 
1137 	/* Get the number of links registered */
1138 	for (i = 0; i < em_conf->nb_link; i++) {
1139 
1140 		/* Get link */
1141 		link = &(em_conf->link[i]);
1142 
1143 		/* Check if we have link intended for this lcore */
1144 		if (link->lcore_id == lcore_id) {
1145 
1146 			/* Cache the link */
1147 			memcpy(&link_cache[index], link, single_link_size);
1148 
1149 			/* Update index */
1150 			index++;
1151 		}
1152 	}
1153 
1154 	/* Update the links for application to use the cached links */
1155 	*links = link_cache;
1156 
1157 	/* Return the number of cached links */
1158 	return lcore_nb_link;
1159 }
1160 
1161 static int
1162 eh_tx_adapter_configure(struct eventmode_conf *em_conf,
1163 		struct tx_adapter_conf *adapter)
1164 {
1165 	struct rte_event_dev_info evdev_default_conf = {0};
1166 	struct rte_event_port_conf port_conf = {0};
1167 	struct tx_adapter_connection_info *conn;
1168 	struct eventdev_params *eventdev_config;
1169 	uint8_t tx_port_id = 0;
1170 	uint8_t eventdev_id;
1171 	uint32_t service_id;
1172 	int ret, j;
1173 
1174 	/* Get event dev ID */
1175 	eventdev_id = adapter->eventdev_id;
1176 
1177 	/* Get event device conf */
1178 	eventdev_config = eh_get_eventdev_params(em_conf, eventdev_id);
1179 
1180 	/* Create Tx adapter */
1181 
1182 	/* Get default configuration of event dev */
1183 	ret = rte_event_dev_info_get(eventdev_id, &evdev_default_conf);
1184 	if (ret < 0) {
1185 		EH_LOG_ERR("Failed to get event dev info %d", ret);
1186 		return ret;
1187 	}
1188 
1189 	/* Setup port conf */
1190 	port_conf.new_event_threshold =
1191 			evdev_default_conf.max_num_events;
1192 	port_conf.dequeue_depth =
1193 			evdev_default_conf.max_event_port_dequeue_depth;
1194 	port_conf.enqueue_depth =
1195 			evdev_default_conf.max_event_port_enqueue_depth;
1196 
1197 	/* Create adapter */
1198 	ret = rte_event_eth_tx_adapter_create(adapter->adapter_id,
1199 			adapter->eventdev_id, &port_conf);
1200 	if (ret < 0) {
1201 		EH_LOG_ERR("Failed to create tx adapter %d", ret);
1202 		return ret;
1203 	}
1204 
1205 	/* Setup various connections in the adapter */
1206 	for (j = 0; j < adapter->nb_connections; j++) {
1207 
1208 		/* Get connection */
1209 		conn = &(adapter->conn[j]);
1210 
1211 		/* Add queue to the adapter */
1212 		ret = rte_event_eth_tx_adapter_queue_add(adapter->adapter_id,
1213 				conn->ethdev_id, conn->ethdev_tx_qid);
1214 		if (ret < 0) {
1215 			EH_LOG_ERR("Failed to add eth queue to tx adapter %d",
1216 				   ret);
1217 			return ret;
1218 		}
1219 	}
1220 
1221 	/*
1222 	 * Check if Tx core is assigned. If Tx core is not assigned then
1223 	 * the adapter has internal port for submitting Tx packets and
1224 	 * Tx event queue & port setup is not required
1225 	 */
1226 	if (adapter->tx_core_id == (uint32_t) (-1)) {
1227 		/* Internal port is present */
1228 		goto skip_tx_queue_port_setup;
1229 	}
1230 
1231 	/* Setup Tx queue & port */
1232 
1233 	/* Get event port used by the adapter */
1234 	ret = rte_event_eth_tx_adapter_event_port_get(
1235 			adapter->adapter_id, &tx_port_id);
1236 	if (ret) {
1237 		EH_LOG_ERR("Failed to get tx adapter port id %d", ret);
1238 		return ret;
1239 	}
1240 
1241 	/*
1242 	 * Tx event queue is reserved for Tx adapter. Unlink this queue
1243 	 * from all other ports
1244 	 *
1245 	 */
1246 	for (j = 0; j < eventdev_config->nb_eventport; j++) {
1247 		rte_event_port_unlink(eventdev_id, j,
1248 				      &(adapter->tx_ev_queue), 1);
1249 	}
1250 
1251 	/* Link Tx event queue to Tx port */
1252 	ret = rte_event_port_link(eventdev_id, tx_port_id,
1253 			&(adapter->tx_ev_queue), NULL, 1);
1254 	if (ret != 1) {
1255 		EH_LOG_ERR("Failed to link event queue to port");
1256 		return ret;
1257 	}
1258 
1259 	/* Get the service ID used by Tx adapter */
1260 	ret = rte_event_eth_tx_adapter_service_id_get(adapter->adapter_id,
1261 						      &service_id);
1262 	if (ret != -ESRCH && ret < 0) {
1263 		EH_LOG_ERR("Failed to get service id used by tx adapter %d",
1264 			   ret);
1265 		return ret;
1266 	}
1267 
1268 	rte_service_set_runstate_mapped_check(service_id, 0);
1269 
1270 skip_tx_queue_port_setup:
1271 	/* Start adapter */
1272 	ret = rte_event_eth_tx_adapter_start(adapter->adapter_id);
1273 	if (ret < 0) {
1274 		EH_LOG_ERR("Failed to start tx adapter %d", ret);
1275 		return ret;
1276 	}
1277 
1278 	return 0;
1279 }
1280 
1281 static int
1282 eh_initialize_tx_adapter(struct eventmode_conf *em_conf)
1283 {
1284 	struct tx_adapter_conf *adapter;
1285 	int i, ret;
1286 
1287 	/* Configure Tx adapters */
1288 	for (i = 0; i < em_conf->nb_tx_adapter; i++) {
1289 		adapter = &(em_conf->tx_adapter[i]);
1290 		ret = eh_tx_adapter_configure(em_conf, adapter);
1291 		if (ret < 0) {
1292 			EH_LOG_ERR("Failed to configure tx adapter %d", ret);
1293 			return ret;
1294 		}
1295 	}
1296 	return 0;
1297 }
1298 
1299 static void
1300 eh_display_operating_mode(struct eventmode_conf *em_conf)
1301 {
1302 	char sched_types[][32] = {
1303 		"RTE_SCHED_TYPE_ORDERED",
1304 		"RTE_SCHED_TYPE_ATOMIC",
1305 		"RTE_SCHED_TYPE_PARALLEL",
1306 	};
1307 	EH_LOG_INFO("Operating mode:");
1308 
1309 	EH_LOG_INFO("\tScheduling type: \t%s",
1310 		sched_types[em_conf->ext_params.sched_type]);
1311 
1312 	EH_LOG_INFO("");
1313 }
1314 
1315 static void
1316 eh_display_event_dev_conf(struct eventmode_conf *em_conf)
1317 {
1318 	char queue_mode[][32] = {
1319 		"",
1320 		"ATQ (ALL TYPE QUEUE)",
1321 		"SINGLE LINK",
1322 	};
1323 	char print_buf[256] = { 0 };
1324 	int i;
1325 
1326 	EH_LOG_INFO("Event Device Configuration:");
1327 
1328 	for (i = 0; i < em_conf->nb_eventdev; i++) {
1329 		sprintf(print_buf,
1330 			"\tDev ID: %-2d \tQueues: %-2d \tPorts: %-2d",
1331 			em_conf->eventdev_config[i].eventdev_id,
1332 			em_conf->eventdev_config[i].nb_eventqueue,
1333 			em_conf->eventdev_config[i].nb_eventport);
1334 		sprintf(print_buf + strlen(print_buf),
1335 			"\tQueue mode: %s",
1336 			queue_mode[em_conf->eventdev_config[i].ev_queue_mode]);
1337 		EH_LOG_INFO("%s", print_buf);
1338 	}
1339 	EH_LOG_INFO("");
1340 }
1341 
1342 static void
1343 eh_display_rx_adapter_conf(struct eventmode_conf *em_conf)
1344 {
1345 	int nb_rx_adapter = em_conf->nb_rx_adapter;
1346 	struct rx_adapter_connection_info *conn;
1347 	struct rx_adapter_conf *adapter;
1348 	char print_buf[256] = { 0 };
1349 	int i, j;
1350 
1351 	EH_LOG_INFO("Rx adapters configured: %d", nb_rx_adapter);
1352 
1353 	for (i = 0; i < nb_rx_adapter; i++) {
1354 		adapter = &(em_conf->rx_adapter[i]);
1355 		sprintf(print_buf,
1356 			"\tRx adaper ID: %-2d\tConnections: %-2d\tEvent dev ID: %-2d",
1357 			adapter->adapter_id,
1358 			adapter->nb_connections,
1359 			adapter->eventdev_id);
1360 		if (adapter->rx_core_id == (uint32_t)-1)
1361 			sprintf(print_buf + strlen(print_buf),
1362 				"\tRx core: %-2s", "[INTERNAL PORT]");
1363 		else if (adapter->rx_core_id == RTE_MAX_LCORE)
1364 			sprintf(print_buf + strlen(print_buf),
1365 				"\tRx core: %-2s", "[NONE]");
1366 		else
1367 			sprintf(print_buf + strlen(print_buf),
1368 				"\tRx core: %-2d", adapter->rx_core_id);
1369 
1370 		EH_LOG_INFO("%s", print_buf);
1371 
1372 		for (j = 0; j < adapter->nb_connections; j++) {
1373 			conn = &(adapter->conn[j]);
1374 
1375 			sprintf(print_buf,
1376 				"\t\tEthdev ID: %-2d", conn->ethdev_id);
1377 
1378 			if (conn->ethdev_rx_qid == -1)
1379 				sprintf(print_buf + strlen(print_buf),
1380 					"\tEth rx queue: %-2s", "ALL");
1381 			else
1382 				sprintf(print_buf + strlen(print_buf),
1383 					"\tEth rx queue: %-2d",
1384 					conn->ethdev_rx_qid);
1385 
1386 			sprintf(print_buf + strlen(print_buf),
1387 				"\tEvent queue: %-2d", conn->eventq_id);
1388 			EH_LOG_INFO("%s", print_buf);
1389 		}
1390 	}
1391 	EH_LOG_INFO("");
1392 }
1393 
1394 static void
1395 eh_display_tx_adapter_conf(struct eventmode_conf *em_conf)
1396 {
1397 	int nb_tx_adapter = em_conf->nb_tx_adapter;
1398 	struct tx_adapter_connection_info *conn;
1399 	struct tx_adapter_conf *adapter;
1400 	char print_buf[256] = { 0 };
1401 	int i, j;
1402 
1403 	EH_LOG_INFO("Tx adapters configured: %d", nb_tx_adapter);
1404 
1405 	for (i = 0; i < nb_tx_adapter; i++) {
1406 		adapter = &(em_conf->tx_adapter[i]);
1407 		sprintf(print_buf,
1408 			"\tTx adapter ID: %-2d\tConnections: %-2d\tEvent dev ID: %-2d",
1409 			adapter->adapter_id,
1410 			adapter->nb_connections,
1411 			adapter->eventdev_id);
1412 		if (adapter->tx_core_id == (uint32_t)-1)
1413 			sprintf(print_buf + strlen(print_buf),
1414 				"\tTx core: %-2s", "[INTERNAL PORT]");
1415 		else if (adapter->tx_core_id == RTE_MAX_LCORE)
1416 			sprintf(print_buf + strlen(print_buf),
1417 				"\tTx core: %-2s", "[NONE]");
1418 		else
1419 			sprintf(print_buf + strlen(print_buf),
1420 				"\tTx core: %-2d,\tInput event queue: %-2d",
1421 				adapter->tx_core_id, adapter->tx_ev_queue);
1422 
1423 		EH_LOG_INFO("%s", print_buf);
1424 
1425 		for (j = 0; j < adapter->nb_connections; j++) {
1426 			conn = &(adapter->conn[j]);
1427 
1428 			sprintf(print_buf,
1429 				"\t\tEthdev ID: %-2d", conn->ethdev_id);
1430 
1431 			if (conn->ethdev_tx_qid == -1)
1432 				sprintf(print_buf + strlen(print_buf),
1433 					"\tEth tx queue: %-2s", "ALL");
1434 			else
1435 				sprintf(print_buf + strlen(print_buf),
1436 					"\tEth tx queue: %-2d",
1437 					conn->ethdev_tx_qid);
1438 			EH_LOG_INFO("%s", print_buf);
1439 		}
1440 	}
1441 	EH_LOG_INFO("");
1442 }
1443 
1444 static void
1445 eh_display_link_conf(struct eventmode_conf *em_conf)
1446 {
1447 	struct eh_event_link_info *link;
1448 	char print_buf[256] = { 0 };
1449 	int i;
1450 
1451 	EH_LOG_INFO("Links configured: %d", em_conf->nb_link);
1452 
1453 	for (i = 0; i < em_conf->nb_link; i++) {
1454 		link = &(em_conf->link[i]);
1455 
1456 		sprintf(print_buf,
1457 			"\tEvent dev ID: %-2d\tEvent port: %-2d",
1458 			link->eventdev_id,
1459 			link->event_port_id);
1460 
1461 		if (em_conf->ext_params.all_ev_queue_to_ev_port)
1462 			sprintf(print_buf + strlen(print_buf),
1463 				"Event queue: %-2s\t", "ALL");
1464 		else
1465 			sprintf(print_buf + strlen(print_buf),
1466 				"Event queue: %-2d\t", link->eventq_id);
1467 
1468 		sprintf(print_buf + strlen(print_buf),
1469 			"Lcore: %-2d", link->lcore_id);
1470 		EH_LOG_INFO("%s", print_buf);
1471 	}
1472 	EH_LOG_INFO("");
1473 }
1474 
1475 struct eh_conf *
1476 eh_conf_init(void)
1477 {
1478 	struct eventmode_conf *em_conf = NULL;
1479 	struct eh_conf *conf = NULL;
1480 	unsigned int eth_core_id;
1481 	void *bitmap = NULL;
1482 	uint32_t nb_bytes;
1483 
1484 	/* Allocate memory for config */
1485 	conf = calloc(1, sizeof(struct eh_conf));
1486 	if (conf == NULL) {
1487 		EH_LOG_ERR("Failed to allocate memory for eventmode helper "
1488 			   "config");
1489 		return NULL;
1490 	}
1491 
1492 	/* Set default conf */
1493 
1494 	/* Packet transfer mode: poll */
1495 	conf->mode = EH_PKT_TRANSFER_MODE_POLL;
1496 	conf->ipsec_mode = EH_IPSEC_MODE_TYPE_APP;
1497 
1498 	/* Keep all ethernet ports enabled by default */
1499 	conf->eth_portmask = -1;
1500 
1501 	/* Allocate memory for event mode params */
1502 	conf->mode_params = calloc(1, sizeof(struct eventmode_conf));
1503 	if (conf->mode_params == NULL) {
1504 		EH_LOG_ERR("Failed to allocate memory for event mode params");
1505 		goto free_conf;
1506 	}
1507 
1508 	/* Get eventmode conf */
1509 	em_conf = conf->mode_params;
1510 
1511 	/* Allocate and initialize bitmap for eth cores */
1512 	nb_bytes = rte_bitmap_get_memory_footprint(RTE_MAX_LCORE);
1513 	if (!nb_bytes) {
1514 		EH_LOG_ERR("Failed to get bitmap footprint");
1515 		goto free_em_conf;
1516 	}
1517 
1518 	bitmap = rte_zmalloc("event-helper-ethcore-bitmap", nb_bytes,
1519 			     RTE_CACHE_LINE_SIZE);
1520 	if (!bitmap) {
1521 		EH_LOG_ERR("Failed to allocate memory for eth cores bitmap\n");
1522 		goto free_em_conf;
1523 	}
1524 
1525 	em_conf->eth_core_mask = rte_bitmap_init(RTE_MAX_LCORE, bitmap,
1526 						 nb_bytes);
1527 	if (!em_conf->eth_core_mask) {
1528 		EH_LOG_ERR("Failed to initialize bitmap");
1529 		goto free_bitmap;
1530 	}
1531 
1532 	/* Set schedule type as not set */
1533 	em_conf->ext_params.sched_type = SCHED_TYPE_NOT_SET;
1534 
1535 	/* Set two cores as eth cores for Rx & Tx */
1536 
1537 	/* Use first core other than main core as Rx core */
1538 	eth_core_id = rte_get_next_lcore(0,	/* curr core */
1539 					 1,	/* skip main core */
1540 					 0	/* wrap */);
1541 
1542 	rte_bitmap_set(em_conf->eth_core_mask, eth_core_id);
1543 
1544 	/* Use next core as Tx core */
1545 	eth_core_id = rte_get_next_lcore(eth_core_id,	/* curr core */
1546 					 1,		/* skip main core */
1547 					 0		/* wrap */);
1548 
1549 	rte_bitmap_set(em_conf->eth_core_mask, eth_core_id);
1550 
1551 	em_conf->ext_params.vector_size = DEFAULT_VECTOR_SIZE;
1552 	em_conf->vector_tmo_ns = DEFAULT_VECTOR_TMO;
1553 
1554 	return conf;
1555 
1556 free_bitmap:
1557 	rte_free(bitmap);
1558 free_em_conf:
1559 	free(em_conf);
1560 free_conf:
1561 	free(conf);
1562 	return NULL;
1563 }
1564 
1565 void
1566 eh_conf_uninit(struct eh_conf *conf)
1567 {
1568 	struct eventmode_conf *em_conf = NULL;
1569 
1570 	if (!conf || !conf->mode_params)
1571 		return;
1572 
1573 	/* Get eventmode conf */
1574 	em_conf = conf->mode_params;
1575 
1576 	/* Free evenmode configuration memory */
1577 	rte_free(em_conf->eth_core_mask);
1578 	free(em_conf);
1579 	free(conf);
1580 }
1581 
1582 void
1583 eh_display_conf(struct eh_conf *conf)
1584 {
1585 	struct eventmode_conf *em_conf;
1586 
1587 	if (conf == NULL) {
1588 		EH_LOG_ERR("Invalid event helper configuration");
1589 		return;
1590 	}
1591 
1592 	if (conf->mode != EH_PKT_TRANSFER_MODE_EVENT)
1593 		return;
1594 
1595 	if (conf->mode_params == NULL) {
1596 		EH_LOG_ERR("Invalid event mode parameters");
1597 		return;
1598 	}
1599 
1600 	/* Get eventmode conf */
1601 	em_conf = (struct eventmode_conf *)(conf->mode_params);
1602 
1603 	/* Display user exposed operating modes */
1604 	eh_display_operating_mode(em_conf);
1605 
1606 	/* Display event device conf */
1607 	eh_display_event_dev_conf(em_conf);
1608 
1609 	/* Display Rx adapter conf */
1610 	eh_display_rx_adapter_conf(em_conf);
1611 
1612 	/* Display Tx adapter conf */
1613 	eh_display_tx_adapter_conf(em_conf);
1614 
1615 	/* Display event-lcore link */
1616 	eh_display_link_conf(em_conf);
1617 }
1618 
1619 int32_t
1620 eh_devs_init(struct eh_conf *conf)
1621 {
1622 	struct eventmode_conf *em_conf;
1623 	uint16_t port_id;
1624 	int ret;
1625 
1626 	if (conf == NULL) {
1627 		EH_LOG_ERR("Invalid event helper configuration");
1628 		return -EINVAL;
1629 	}
1630 
1631 	if (conf->mode != EH_PKT_TRANSFER_MODE_EVENT)
1632 		return 0;
1633 
1634 	if (conf->mode_params == NULL) {
1635 		EH_LOG_ERR("Invalid event mode parameters");
1636 		return -EINVAL;
1637 	}
1638 
1639 	/* Get eventmode conf */
1640 	em_conf = conf->mode_params;
1641 
1642 	/* Eventmode conf would need eth portmask */
1643 	em_conf->eth_portmask = conf->eth_portmask;
1644 
1645 	/* Validate the requested config */
1646 	ret = eh_validate_conf(em_conf);
1647 	if (ret < 0) {
1648 		EH_LOG_ERR("Failed to validate the requested config %d", ret);
1649 		return ret;
1650 	}
1651 
1652 	/* Display the current configuration */
1653 	eh_display_conf(conf);
1654 
1655 	/* Stop eth devices before setting up adapter */
1656 	RTE_ETH_FOREACH_DEV(port_id) {
1657 
1658 		/* Use only the ports enabled */
1659 		if ((conf->eth_portmask & (1 << port_id)) == 0)
1660 			continue;
1661 
1662 		ret = rte_eth_dev_stop(port_id);
1663 		if (ret != 0) {
1664 			EH_LOG_ERR("Failed to stop port %u, err: %d",
1665 					port_id, ret);
1666 			return ret;
1667 		}
1668 	}
1669 
1670 	/* Setup eventdev */
1671 	ret = eh_initialize_eventdev(em_conf);
1672 	if (ret < 0) {
1673 		EH_LOG_ERR("Failed to initialize event dev %d", ret);
1674 		return ret;
1675 	}
1676 
1677 	/* Setup Rx adapter */
1678 	ret = eh_initialize_rx_adapter(em_conf);
1679 	if (ret < 0) {
1680 		EH_LOG_ERR("Failed to initialize rx adapter %d", ret);
1681 		return ret;
1682 	}
1683 
1684 	/* Setup Tx adapter */
1685 	ret = eh_initialize_tx_adapter(em_conf);
1686 	if (ret < 0) {
1687 		EH_LOG_ERR("Failed to initialize tx adapter %d", ret);
1688 		return ret;
1689 	}
1690 
1691 	/* Start eth devices after setting up adapter */
1692 	RTE_ETH_FOREACH_DEV(port_id) {
1693 
1694 		/* Use only the ports enabled */
1695 		if ((conf->eth_portmask & (1 << port_id)) == 0)
1696 			continue;
1697 
1698 		ret = rte_eth_dev_start(port_id);
1699 		if (ret < 0) {
1700 			EH_LOG_ERR("Failed to start eth dev %d, %d",
1701 				   port_id, ret);
1702 			return ret;
1703 		}
1704 	}
1705 
1706 	return 0;
1707 }
1708 
1709 int32_t
1710 eh_devs_uninit(struct eh_conf *conf)
1711 {
1712 	struct eventmode_conf *em_conf;
1713 	int ret, i, j;
1714 	uint16_t id;
1715 
1716 	if (conf == NULL) {
1717 		EH_LOG_ERR("Invalid event helper configuration");
1718 		return -EINVAL;
1719 	}
1720 
1721 	if (conf->mode != EH_PKT_TRANSFER_MODE_EVENT)
1722 		return 0;
1723 
1724 	if (conf->mode_params == NULL) {
1725 		EH_LOG_ERR("Invalid event mode parameters");
1726 		return -EINVAL;
1727 	}
1728 
1729 	/* Get eventmode conf */
1730 	em_conf = conf->mode_params;
1731 
1732 	/* Stop and release rx adapters */
1733 	for (i = 0; i < em_conf->nb_rx_adapter; i++) {
1734 
1735 		id = em_conf->rx_adapter[i].adapter_id;
1736 		ret = rte_event_eth_rx_adapter_stop(id);
1737 		if (ret < 0) {
1738 			EH_LOG_ERR("Failed to stop rx adapter %d", ret);
1739 			return ret;
1740 		}
1741 
1742 		for (j = 0; j < em_conf->rx_adapter[i].nb_connections; j++) {
1743 
1744 			ret = rte_event_eth_rx_adapter_queue_del(id,
1745 				em_conf->rx_adapter[i].conn[j].ethdev_id, -1);
1746 			if (ret < 0) {
1747 				EH_LOG_ERR(
1748 				       "Failed to remove rx adapter queues %d",
1749 				       ret);
1750 				return ret;
1751 			}
1752 		}
1753 
1754 		ret = rte_event_eth_rx_adapter_free(id);
1755 		if (ret < 0) {
1756 			EH_LOG_ERR("Failed to free rx adapter %d", ret);
1757 			return ret;
1758 		}
1759 	}
1760 
1761 	/* Stop and release event devices */
1762 	for (i = 0; i < em_conf->nb_eventdev; i++) {
1763 
1764 		id = em_conf->eventdev_config[i].eventdev_id;
1765 		rte_event_dev_stop(id);
1766 
1767 		ret = rte_event_dev_close(id);
1768 		if (ret < 0) {
1769 			EH_LOG_ERR("Failed to close event dev %d, %d", id, ret);
1770 			return ret;
1771 		}
1772 	}
1773 
1774 	/* Stop and release tx adapters */
1775 	for (i = 0; i < em_conf->nb_tx_adapter; i++) {
1776 
1777 		id = em_conf->tx_adapter[i].adapter_id;
1778 		ret = rte_event_eth_tx_adapter_stop(id);
1779 		if (ret < 0) {
1780 			EH_LOG_ERR("Failed to stop tx adapter %d", ret);
1781 			return ret;
1782 		}
1783 
1784 		for (j = 0; j < em_conf->tx_adapter[i].nb_connections; j++) {
1785 
1786 			ret = rte_event_eth_tx_adapter_queue_del(id,
1787 				em_conf->tx_adapter[i].conn[j].ethdev_id, -1);
1788 			if (ret < 0) {
1789 				EH_LOG_ERR(
1790 					"Failed to remove tx adapter queues %d",
1791 					ret);
1792 				return ret;
1793 			}
1794 		}
1795 
1796 		ret = rte_event_eth_tx_adapter_free(id);
1797 		if (ret < 0) {
1798 			EH_LOG_ERR("Failed to free tx adapter %d", ret);
1799 			return ret;
1800 		}
1801 	}
1802 
1803 	return 0;
1804 }
1805 
1806 void
1807 eh_launch_worker(struct eh_conf *conf, struct eh_app_worker_params *app_wrkr,
1808 		uint8_t nb_wrkr_param)
1809 {
1810 	struct eh_app_worker_params *match_wrkr;
1811 	struct eh_event_link_info *links = NULL;
1812 	struct eventmode_conf *em_conf;
1813 	uint32_t lcore_id;
1814 	uint8_t nb_links;
1815 
1816 	if (conf == NULL) {
1817 		EH_LOG_ERR("Invalid event helper configuration");
1818 		return;
1819 	}
1820 
1821 	if (conf->mode_params == NULL) {
1822 		EH_LOG_ERR("Invalid event mode parameters");
1823 		return;
1824 	}
1825 
1826 	/* Get eventmode conf */
1827 	em_conf = conf->mode_params;
1828 
1829 	/* Get core ID */
1830 	lcore_id = rte_lcore_id();
1831 
1832 	/* Check if this is eth core */
1833 	if (rte_bitmap_get(em_conf->eth_core_mask, lcore_id)) {
1834 		eh_start_worker_eth_core(em_conf, lcore_id);
1835 		return;
1836 	}
1837 
1838 	if (app_wrkr == NULL || nb_wrkr_param == 0) {
1839 		EH_LOG_ERR("Invalid args");
1840 		return;
1841 	}
1842 
1843 	/*
1844 	 * This is a regular worker thread. The application registers
1845 	 * multiple workers with various capabilities. Run worker
1846 	 * based on the selected capabilities of the event
1847 	 * device configured.
1848 	 */
1849 
1850 	/* Get the first matching worker for the event device */
1851 	match_wrkr = eh_find_worker(lcore_id, conf, app_wrkr, nb_wrkr_param);
1852 	if (match_wrkr == NULL) {
1853 		EH_LOG_ERR("Failed to match worker registered for lcore %d",
1854 			   lcore_id);
1855 		goto clean_and_exit;
1856 	}
1857 
1858 	/* Verify sanity of the matched worker */
1859 	if (eh_verify_match_worker(match_wrkr) != 1) {
1860 		EH_LOG_ERR("Failed to validate the matched worker");
1861 		goto clean_and_exit;
1862 	}
1863 
1864 	/* Get worker links */
1865 	nb_links = eh_get_event_lcore_links(lcore_id, conf, &links);
1866 
1867 	/* Launch the worker thread */
1868 	match_wrkr->worker_thread(links, nb_links);
1869 
1870 	/* Free links info memory */
1871 	free(links);
1872 
1873 clean_and_exit:
1874 
1875 	/* Flag eth_cores to stop, if started */
1876 	eh_stop_worker_eth_core();
1877 }
1878 
1879 uint8_t
1880 eh_get_tx_queue(struct eh_conf *conf, uint8_t eventdev_id)
1881 {
1882 	struct eventdev_params *eventdev_config;
1883 	struct eventmode_conf *em_conf;
1884 
1885 	if (conf == NULL) {
1886 		EH_LOG_ERR("Invalid event helper configuration");
1887 		return -EINVAL;
1888 	}
1889 
1890 	if (conf->mode_params == NULL) {
1891 		EH_LOG_ERR("Invalid event mode parameters");
1892 		return -EINVAL;
1893 	}
1894 
1895 	/* Get eventmode conf */
1896 	em_conf = conf->mode_params;
1897 
1898 	/* Get event device conf */
1899 	eventdev_config = eh_get_eventdev_params(em_conf, eventdev_id);
1900 
1901 	if (eventdev_config == NULL) {
1902 		EH_LOG_ERR("Failed to read eventdev config");
1903 		return -EINVAL;
1904 	}
1905 
1906 	/*
1907 	 * The last queue is reserved to be used as atomic queue for the
1908 	 * last stage (eth packet tx stage)
1909 	 */
1910 	return eventdev_config->nb_eventqueue - 1;
1911 }
1912