xref: /dpdk/examples/l2fwd-event/l2fwd_event.c (revision 1f41deac447d7938198a2acdd1b7862161feef91)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2019 Marvell International Ltd.
3  */
4 
5 #include <stdbool.h>
6 #include <getopt.h>
7 
8 #include <rte_cycles.h>
9 #include <rte_ethdev.h>
10 #include <rte_eventdev.h>
11 #include <rte_event_eth_rx_adapter.h>
12 #include <rte_event_eth_tx_adapter.h>
13 #include <rte_lcore.h>
14 #include <rte_malloc.h>
15 #include <rte_spinlock.h>
16 
17 #include "l2fwd_event.h"
18 
19 #define L2FWD_EVENT_SINGLE	0x1
20 #define L2FWD_EVENT_BURST	0x2
21 #define L2FWD_EVENT_TX_DIRECT	0x4
22 #define L2FWD_EVENT_TX_ENQ	0x8
23 #define L2FWD_EVENT_UPDT_MAC	0x10
24 
25 static inline int
26 l2fwd_event_service_enable(uint32_t service_id)
27 {
28 	uint8_t min_service_count = UINT8_MAX;
29 	uint32_t slcore_array[RTE_MAX_LCORE];
30 	unsigned int slcore = 0;
31 	uint8_t service_count;
32 	int32_t slcore_count;
33 
34 	if (!rte_service_lcore_count())
35 		return -ENOENT;
36 
37 	slcore_count = rte_service_lcore_list(slcore_array, RTE_MAX_LCORE);
38 	if (slcore_count < 0)
39 		return -ENOENT;
40 	/* Get the core which has least number of services running. */
41 	while (slcore_count--) {
42 		/* Reset default mapping */
43 		if (rte_service_map_lcore_set(service_id,
44 					slcore_array[slcore_count], 0) != 0)
45 			return -ENOENT;
46 		service_count = rte_service_lcore_count_services(
47 				slcore_array[slcore_count]);
48 		if (service_count < min_service_count) {
49 			slcore = slcore_array[slcore_count];
50 			min_service_count = service_count;
51 		}
52 	}
53 	if (rte_service_map_lcore_set(service_id, slcore, 1) != 0)
54 		return -ENOENT;
55 	rte_service_lcore_start(slcore);
56 
57 	return 0;
58 }
59 
60 void
61 l2fwd_event_service_setup(struct l2fwd_resources *rsrc)
62 {
63 	struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
64 	struct rte_event_dev_info evdev_info;
65 	uint32_t service_id, caps;
66 	int ret, i;
67 
68 	/* Running eventdev scheduler service on service core. 8< */
69 	rte_event_dev_info_get(evt_rsrc->event_d_id, &evdev_info);
70 	if (!(evdev_info.event_dev_cap & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED)) {
71 		ret = rte_event_dev_service_id_get(evt_rsrc->event_d_id,
72 				&service_id);
73 		if (ret != -ESRCH && ret != 0)
74 			rte_panic("Error in starting eventdev service\n");
75 		l2fwd_event_service_enable(service_id);
76 	}
77 	/* >8 End of running eventdev scheduler service on service core. */
78 
79 	/* Gets service ID for RX/TX adapters. 8< */
80 	for (i = 0; i < evt_rsrc->rx_adptr.nb_rx_adptr; i++) {
81 		ret = rte_event_eth_rx_adapter_caps_get(evt_rsrc->event_d_id,
82 				evt_rsrc->rx_adptr.rx_adptr[i], &caps);
83 		if (ret < 0)
84 			rte_panic("Failed to get Rx adapter[%d] caps\n",
85 				  evt_rsrc->rx_adptr.rx_adptr[i]);
86 		ret = rte_event_eth_rx_adapter_service_id_get(
87 				evt_rsrc->event_d_id,
88 				&service_id);
89 		if (ret != -ESRCH && ret != 0)
90 			rte_panic("Error in starting Rx adapter[%d] service\n",
91 				  evt_rsrc->rx_adptr.rx_adptr[i]);
92 		l2fwd_event_service_enable(service_id);
93 	}
94 
95 	for (i = 0; i < evt_rsrc->tx_adptr.nb_tx_adptr; i++) {
96 		ret = rte_event_eth_tx_adapter_caps_get(evt_rsrc->event_d_id,
97 				evt_rsrc->tx_adptr.tx_adptr[i], &caps);
98 		if (ret < 0)
99 			rte_panic("Failed to get Rx adapter[%d] caps\n",
100 				  evt_rsrc->tx_adptr.tx_adptr[i]);
101 		ret = rte_event_eth_tx_adapter_service_id_get(
102 				evt_rsrc->event_d_id,
103 				&service_id);
104 		if (ret != -ESRCH && ret != 0)
105 			rte_panic("Error in starting Rx adapter[%d] service\n",
106 				  evt_rsrc->tx_adptr.tx_adptr[i]);
107 		l2fwd_event_service_enable(service_id);
108 	}
109 	/* >8 End of get service ID for RX/TX adapters. */
110 }
111 
112 static void
113 l2fwd_event_capability_setup(struct l2fwd_event_resources *evt_rsrc)
114 {
115 	uint32_t caps = 0;
116 	uint16_t i;
117 	int ret;
118 
119 	RTE_ETH_FOREACH_DEV(i) {
120 		ret = rte_event_eth_tx_adapter_caps_get(0, i, &caps);
121 		if (ret)
122 			rte_panic("Invalid capability for Tx adptr port %d\n",
123 				  i);
124 
125 		evt_rsrc->tx_mode_q |= !(caps &
126 				   RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT);
127 	}
128 
129 	if (evt_rsrc->tx_mode_q)
130 		l2fwd_event_set_generic_ops(&evt_rsrc->ops);
131 	else
132 		l2fwd_event_set_internal_port_ops(&evt_rsrc->ops);
133 }
134 
135 static __rte_noinline int
136 l2fwd_get_free_event_port(struct l2fwd_event_resources *evt_rsrc)
137 {
138 	static int index;
139 	int port_id;
140 
141 	rte_spinlock_lock(&evt_rsrc->evp.lock);
142 	if (index >= evt_rsrc->evp.nb_ports) {
143 		printf("No free event port is available\n");
144 		rte_spinlock_unlock(&evt_rsrc->evp.lock);
145 		return -1;
146 	}
147 
148 	port_id = evt_rsrc->evp.event_p_id[index];
149 	index++;
150 	rte_spinlock_unlock(&evt_rsrc->evp.lock);
151 
152 	return port_id;
153 }
154 
155 static  __rte_always_inline void
156 l2fwd_event_fwd(struct l2fwd_resources *rsrc, struct rte_event *ev,
157 		const uint8_t tx_q_id, const uint64_t timer_period,
158 		const uint32_t flags)
159 {
160 	struct rte_mbuf *mbuf = ev->mbuf;
161 	uint16_t dst_port;
162 
163 	rte_prefetch0(rte_pktmbuf_mtod(mbuf, void *));
164 	dst_port = rsrc->dst_ports[mbuf->port];
165 
166 	if (timer_period > 0)
167 		rte_atomic_fetch_add_explicit(&rsrc->port_stats[mbuf->port].rx,
168 				1, rte_memory_order_relaxed);
169 	mbuf->port = dst_port;
170 
171 	if (flags & L2FWD_EVENT_UPDT_MAC)
172 		l2fwd_mac_updating(mbuf, dst_port, &rsrc->eth_addr[dst_port]);
173 
174 	if (flags & L2FWD_EVENT_TX_ENQ) {
175 		ev->queue_id = tx_q_id;
176 		ev->op = RTE_EVENT_OP_FORWARD;
177 	}
178 
179 	if (flags & L2FWD_EVENT_TX_DIRECT)
180 		rte_event_eth_tx_adapter_txq_set(mbuf, 0);
181 
182 	if (timer_period > 0)
183 		rte_atomic_fetch_add_explicit(&rsrc->port_stats[mbuf->port].tx,
184 				1, rte_memory_order_relaxed);
185 }
186 
187 static __rte_always_inline void
188 l2fwd_event_loop_single(struct l2fwd_resources *rsrc,
189 			const uint32_t flags)
190 {
191 	struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
192 	const int port_id = l2fwd_get_free_event_port(evt_rsrc);
193 	const uint8_t tx_q_id = evt_rsrc->evq.event_q_id[
194 					evt_rsrc->evq.nb_queues - 1];
195 	const uint64_t timer_period = rsrc->timer_period;
196 	const uint8_t event_d_id = evt_rsrc->event_d_id;
197 	uint8_t enq = 0, deq = 0;
198 	struct rte_event ev;
199 
200 	if (port_id < 0)
201 		return;
202 
203 	printf("%s(): entering eventdev main loop on lcore %u\n", __func__,
204 		rte_lcore_id());
205 
206 	while (!rsrc->force_quit) {
207 		/* Read packet from eventdev */
208 		deq = rte_event_dequeue_burst(event_d_id, port_id, &ev, 1, 0);
209 		if (!deq)
210 			continue;
211 
212 		l2fwd_event_fwd(rsrc, &ev, tx_q_id, timer_period, flags);
213 
214 		if (flags & L2FWD_EVENT_TX_ENQ) {
215 			do {
216 				enq = rte_event_enqueue_burst(event_d_id,
217 							      port_id, &ev, 1);
218 			} while (!enq && !rsrc->force_quit);
219 		}
220 
221 		if (flags & L2FWD_EVENT_TX_DIRECT) {
222 			do {
223 				enq = rte_event_eth_tx_adapter_enqueue(
224 					event_d_id, port_id, &ev, 1, 0);
225 			} while (!enq && !rsrc->force_quit);
226 		}
227 	}
228 
229 	l2fwd_event_worker_cleanup(event_d_id, port_id, &ev, enq, deq, 0);
230 }
231 
232 static __rte_always_inline void
233 l2fwd_event_loop_burst(struct l2fwd_resources *rsrc,
234 		       const uint32_t flags)
235 {
236 	struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
237 	const int port_id = l2fwd_get_free_event_port(evt_rsrc);
238 	const uint8_t tx_q_id = evt_rsrc->evq.event_q_id[
239 					evt_rsrc->evq.nb_queues - 1];
240 	const uint64_t timer_period = rsrc->timer_period;
241 	const uint8_t event_d_id = evt_rsrc->event_d_id;
242 	const uint8_t deq_len = evt_rsrc->deq_depth;
243 	struct rte_event ev[MAX_PKT_BURST];
244 	uint16_t nb_rx = 0, nb_tx = 0;
245 	uint8_t i;
246 
247 	if (port_id < 0)
248 		return;
249 
250 	printf("%s(): entering eventdev main loop on lcore %u\n", __func__,
251 		rte_lcore_id());
252 
253 	while (!rsrc->force_quit) {
254 		/* Read packet from eventdev. 8< */
255 		nb_rx = rte_event_dequeue_burst(event_d_id, port_id, ev,
256 						deq_len, 0);
257 		if (nb_rx == 0)
258 			continue;
259 
260 		for (i = 0; i < nb_rx; i++) {
261 			l2fwd_event_fwd(rsrc, &ev[i], tx_q_id, timer_period,
262 					flags);
263 		}
264 		/* >8 End of reading packets from eventdev. */
265 
266 		if (flags & L2FWD_EVENT_TX_ENQ) {
267 			/* Forwarding to destination ports. 8< */
268 			nb_tx = rte_event_enqueue_burst(event_d_id, port_id,
269 							ev, nb_rx);
270 			while (nb_tx < nb_rx && !rsrc->force_quit)
271 				nb_tx += rte_event_enqueue_burst(event_d_id,
272 						port_id, ev + nb_tx,
273 						nb_rx - nb_tx);
274 			/* >8 End of forwarding to destination ports. */
275 		}
276 
277 		if (flags & L2FWD_EVENT_TX_DIRECT) {
278 			nb_tx = rte_event_eth_tx_adapter_enqueue(event_d_id,
279 								 port_id, ev,
280 								 nb_rx, 0);
281 			while (nb_tx < nb_rx && !rsrc->force_quit)
282 				nb_tx += rte_event_eth_tx_adapter_enqueue(
283 						event_d_id, port_id,
284 						ev + nb_tx, nb_rx - nb_tx, 0);
285 		}
286 	}
287 
288 	l2fwd_event_worker_cleanup(event_d_id, port_id, ev, nb_tx, nb_rx, 0);
289 }
290 
291 static __rte_always_inline void
292 l2fwd_event_loop(struct l2fwd_resources *rsrc,
293 			const uint32_t flags)
294 {
295 	if (flags & L2FWD_EVENT_SINGLE)
296 		l2fwd_event_loop_single(rsrc, flags);
297 	if (flags & L2FWD_EVENT_BURST)
298 		l2fwd_event_loop_burst(rsrc, flags);
299 }
300 
301 static void __rte_noinline
302 l2fwd_event_main_loop_tx_d(struct l2fwd_resources *rsrc)
303 {
304 	l2fwd_event_loop(rsrc,
305 			 L2FWD_EVENT_TX_DIRECT | L2FWD_EVENT_SINGLE);
306 }
307 
308 static void __rte_noinline
309 l2fwd_event_main_loop_tx_d_brst(struct l2fwd_resources *rsrc)
310 {
311 	l2fwd_event_loop(rsrc, L2FWD_EVENT_TX_DIRECT | L2FWD_EVENT_BURST);
312 }
313 
314 static void __rte_noinline
315 l2fwd_event_main_loop_tx_q(struct l2fwd_resources *rsrc)
316 {
317 	l2fwd_event_loop(rsrc, L2FWD_EVENT_TX_ENQ | L2FWD_EVENT_SINGLE);
318 }
319 
320 static void __rte_noinline
321 l2fwd_event_main_loop_tx_q_brst(struct l2fwd_resources *rsrc)
322 {
323 	l2fwd_event_loop(rsrc, L2FWD_EVENT_TX_ENQ | L2FWD_EVENT_BURST);
324 }
325 
326 static void __rte_noinline
327 l2fwd_event_main_loop_tx_d_mac(struct l2fwd_resources *rsrc)
328 {
329 	l2fwd_event_loop(rsrc, L2FWD_EVENT_UPDT_MAC |
330 			L2FWD_EVENT_TX_DIRECT | L2FWD_EVENT_SINGLE);
331 }
332 
333 static void __rte_noinline
334 l2fwd_event_main_loop_tx_d_brst_mac(struct l2fwd_resources *rsrc)
335 {
336 	l2fwd_event_loop(rsrc, L2FWD_EVENT_UPDT_MAC |
337 			L2FWD_EVENT_TX_DIRECT | L2FWD_EVENT_BURST);
338 }
339 
340 static void __rte_noinline
341 l2fwd_event_main_loop_tx_q_mac(struct l2fwd_resources *rsrc)
342 {
343 	l2fwd_event_loop(rsrc, L2FWD_EVENT_UPDT_MAC |
344 			L2FWD_EVENT_TX_ENQ | L2FWD_EVENT_SINGLE);
345 }
346 
347 static void __rte_noinline
348 l2fwd_event_main_loop_tx_q_brst_mac(struct l2fwd_resources *rsrc)
349 {
350 	l2fwd_event_loop(rsrc, L2FWD_EVENT_UPDT_MAC |
351 			L2FWD_EVENT_TX_ENQ | L2FWD_EVENT_BURST);
352 }
353 
354 static __rte_always_inline void
355 l2fwd_event_vector_fwd(struct l2fwd_resources *rsrc,
356 		       struct rte_event_vector *vec,
357 		       const uint64_t timer_period, const uint32_t flags)
358 {
359 	struct rte_mbuf **mbufs = vec->mbufs;
360 	uint16_t i, j;
361 
362 	rte_prefetch0(rte_pktmbuf_mtod(mbufs[0], void *));
363 
364 	/* If vector attribute is valid, mbufs will be from same port/queue */
365 	if (vec->attr_valid) {
366 		vec->port = rsrc->dst_ports[mbufs[0]->port];
367 		if (flags & L2FWD_EVENT_TX_DIRECT)
368 			vec->queue = 0;
369 
370 		if (timer_period > 0)
371 			rte_atomic_fetch_add_explicit(&rsrc->port_stats[mbufs[0]->port].rx,
372 					   vec->nb_elem, rte_memory_order_relaxed);
373 
374 		for (i = 0, j = 1; i < vec->nb_elem; i++, j++) {
375 			if (j < vec->nb_elem)
376 				rte_prefetch0(
377 					rte_pktmbuf_mtod(mbufs[j], void *));
378 
379 			if (flags & L2FWD_EVENT_UPDT_MAC)
380 				l2fwd_mac_updating(
381 					mbufs[i], vec->port,
382 					&rsrc->eth_addr[vec->port]);
383 		}
384 
385 		if (timer_period > 0)
386 			rte_atomic_fetch_add_explicit(&rsrc->port_stats[vec->port].tx,
387 					   vec->nb_elem, rte_memory_order_relaxed);
388 	} else {
389 		for (i = 0, j = 1; i < vec->nb_elem; i++, j++) {
390 			if (timer_period > 0)
391 				rte_atomic_fetch_add_explicit(
392 					&rsrc->port_stats[mbufs[i]->port].rx, 1,
393 					rte_memory_order_relaxed);
394 
395 			if (j < vec->nb_elem)
396 				rte_prefetch0(
397 					rte_pktmbuf_mtod(mbufs[j], void *));
398 
399 			mbufs[i]->port = rsrc->dst_ports[mbufs[i]->port];
400 
401 			if (flags & L2FWD_EVENT_UPDT_MAC)
402 				l2fwd_mac_updating(
403 					mbufs[i], mbufs[i]->port,
404 					&rsrc->eth_addr[mbufs[i]->port]);
405 
406 			if (flags & L2FWD_EVENT_TX_DIRECT)
407 				rte_event_eth_tx_adapter_txq_set(mbufs[i], 0);
408 
409 			if (timer_period > 0)
410 				rte_atomic_fetch_add_explicit(
411 					&rsrc->port_stats[mbufs[i]->port].tx, 1,
412 					rte_memory_order_relaxed);
413 		}
414 	}
415 }
416 
417 static __rte_always_inline void
418 l2fwd_event_loop_vector(struct l2fwd_resources *rsrc, const uint32_t flags)
419 {
420 	struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
421 	const int port_id = l2fwd_get_free_event_port(evt_rsrc);
422 	const uint8_t tx_q_id =
423 		evt_rsrc->evq.event_q_id[evt_rsrc->evq.nb_queues - 1];
424 	const uint64_t timer_period = rsrc->timer_period;
425 	const uint8_t event_d_id = evt_rsrc->event_d_id;
426 	const uint8_t deq_len = evt_rsrc->deq_depth;
427 	struct rte_event ev[MAX_PKT_BURST];
428 	uint16_t nb_rx = 0, nb_tx = 0;
429 	uint8_t i;
430 
431 	if (port_id < 0)
432 		return;
433 
434 	printf("%s(): entering eventdev main loop on lcore %u\n", __func__,
435 	       rte_lcore_id());
436 
437 	while (!rsrc->force_quit) {
438 		nb_rx = rte_event_dequeue_burst(event_d_id, port_id, ev,
439 						deq_len, 0);
440 		if (nb_rx == 0)
441 			continue;
442 
443 		for (i = 0; i < nb_rx; i++) {
444 			if (flags & L2FWD_EVENT_TX_ENQ) {
445 				ev[i].queue_id = tx_q_id;
446 				ev[i].op = RTE_EVENT_OP_FORWARD;
447 			}
448 
449 			l2fwd_event_vector_fwd(rsrc, ev[i].vec, timer_period,
450 					       flags);
451 		}
452 
453 		if (flags & L2FWD_EVENT_TX_ENQ) {
454 			nb_tx = rte_event_enqueue_burst(event_d_id, port_id, ev,
455 							nb_rx);
456 			while (nb_tx < nb_rx && !rsrc->force_quit)
457 				nb_tx += rte_event_enqueue_burst(
458 					event_d_id, port_id, ev + nb_tx,
459 					nb_rx - nb_tx);
460 		}
461 
462 		if (flags & L2FWD_EVENT_TX_DIRECT) {
463 			nb_tx = rte_event_eth_tx_adapter_enqueue(
464 				event_d_id, port_id, ev, nb_rx, 0);
465 			while (nb_tx < nb_rx && !rsrc->force_quit)
466 				nb_tx += rte_event_eth_tx_adapter_enqueue(
467 					event_d_id, port_id, ev + nb_tx,
468 					nb_rx - nb_tx, 0);
469 		}
470 	}
471 
472 	l2fwd_event_worker_cleanup(event_d_id, port_id, ev, nb_tx, nb_rx, 1);
473 }
474 
475 static void __rte_noinline
476 l2fwd_event_main_loop_tx_d_vec(struct l2fwd_resources *rsrc)
477 {
478 	l2fwd_event_loop_vector(rsrc, L2FWD_EVENT_TX_DIRECT);
479 }
480 
481 static void __rte_noinline
482 l2fwd_event_main_loop_tx_d_brst_vec(struct l2fwd_resources *rsrc)
483 {
484 	l2fwd_event_loop_vector(rsrc, L2FWD_EVENT_TX_DIRECT);
485 }
486 
487 static void __rte_noinline
488 l2fwd_event_main_loop_tx_q_vec(struct l2fwd_resources *rsrc)
489 {
490 	l2fwd_event_loop_vector(rsrc, L2FWD_EVENT_TX_ENQ);
491 }
492 
493 static void __rte_noinline
494 l2fwd_event_main_loop_tx_q_brst_vec(struct l2fwd_resources *rsrc)
495 {
496 	l2fwd_event_loop_vector(rsrc, L2FWD_EVENT_TX_ENQ);
497 }
498 
499 static void __rte_noinline
500 l2fwd_event_main_loop_tx_d_mac_vec(struct l2fwd_resources *rsrc)
501 {
502 	l2fwd_event_loop_vector(rsrc,
503 				L2FWD_EVENT_UPDT_MAC | L2FWD_EVENT_TX_DIRECT);
504 }
505 
506 static void __rte_noinline
507 l2fwd_event_main_loop_tx_d_brst_mac_vec(struct l2fwd_resources *rsrc)
508 {
509 	l2fwd_event_loop_vector(rsrc,
510 				L2FWD_EVENT_UPDT_MAC | L2FWD_EVENT_TX_DIRECT);
511 }
512 
513 static void __rte_noinline
514 l2fwd_event_main_loop_tx_q_mac_vec(struct l2fwd_resources *rsrc)
515 {
516 	l2fwd_event_loop_vector(rsrc,
517 				L2FWD_EVENT_UPDT_MAC | L2FWD_EVENT_TX_ENQ);
518 }
519 
520 static void __rte_noinline
521 l2fwd_event_main_loop_tx_q_brst_mac_vec(struct l2fwd_resources *rsrc)
522 {
523 	l2fwd_event_loop_vector(rsrc,
524 				L2FWD_EVENT_UPDT_MAC | L2FWD_EVENT_TX_ENQ);
525 }
526 
527 void
528 l2fwd_event_resource_setup(struct l2fwd_resources *rsrc)
529 {
530 	/* [MAC_UPDT][TX_MODE][BURST] */
531 	const event_loop_cb event_loop[2][2][2][2] = {
532 		[0][0][0][0] = l2fwd_event_main_loop_tx_d,
533 		[0][0][0][1] = l2fwd_event_main_loop_tx_d_brst,
534 		[0][0][1][0] = l2fwd_event_main_loop_tx_q,
535 		[0][0][1][1] = l2fwd_event_main_loop_tx_q_brst,
536 		[0][1][0][0] = l2fwd_event_main_loop_tx_d_mac,
537 		[0][1][0][1] = l2fwd_event_main_loop_tx_d_brst_mac,
538 		[0][1][1][0] = l2fwd_event_main_loop_tx_q_mac,
539 		[0][1][1][1] = l2fwd_event_main_loop_tx_q_brst_mac,
540 		[1][0][0][0] = l2fwd_event_main_loop_tx_d_vec,
541 		[1][0][0][1] = l2fwd_event_main_loop_tx_d_brst_vec,
542 		[1][0][1][0] = l2fwd_event_main_loop_tx_q_vec,
543 		[1][0][1][1] = l2fwd_event_main_loop_tx_q_brst_vec,
544 		[1][1][0][0] = l2fwd_event_main_loop_tx_d_mac_vec,
545 		[1][1][0][1] = l2fwd_event_main_loop_tx_d_brst_mac_vec,
546 		[1][1][1][0] = l2fwd_event_main_loop_tx_q_mac_vec,
547 		[1][1][1][1] = l2fwd_event_main_loop_tx_q_brst_mac_vec,
548 	};
549 	struct l2fwd_event_resources *evt_rsrc;
550 	uint32_t event_queue_cfg;
551 	int ret;
552 
553 	if (!rte_event_dev_count())
554 		rte_panic("No Eventdev found\n");
555 
556 	evt_rsrc = rte_zmalloc("l2fwd_event",
557 				 sizeof(struct l2fwd_event_resources), 0);
558 	if (evt_rsrc == NULL)
559 		rte_panic("Failed to allocate memory\n");
560 
561 	rsrc->evt_rsrc = evt_rsrc;
562 
563 	/* Setup eventdev capability callbacks */
564 	l2fwd_event_capability_setup(evt_rsrc);
565 
566 	/* Event device configuration */
567 	event_queue_cfg = evt_rsrc->ops.event_device_setup(rsrc);
568 
569 	/* Event queue configuration */
570 	evt_rsrc->ops.event_queue_setup(rsrc, event_queue_cfg);
571 
572 	/* Event port configuration */
573 	evt_rsrc->ops.event_port_setup(rsrc);
574 
575 	/* Rx/Tx adapters configuration */
576 	evt_rsrc->ops.adapter_setup(rsrc);
577 
578 	/* Start event device */
579 	ret = rte_event_dev_start(evt_rsrc->event_d_id);
580 	if (ret < 0)
581 		rte_panic("Error in starting eventdev\n");
582 
583 	evt_rsrc->ops.l2fwd_event_loop =
584 		event_loop[rsrc->evt_vec.enabled][rsrc->mac_updating]
585 			  [evt_rsrc->tx_mode_q][evt_rsrc->has_burst];
586 }
587