xref: /dpdk/examples/l3fwd/l3fwd_fib.c (revision d5c4897ecfb2540dc4990d9b367ddbe5013d0e66)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2021 Intel Corporation
3  */
4 
5 #include <stdio.h>
6 #include <stddef.h>
7 #include <stdint.h>
8 #include <sys/socket.h>
9 #include <arpa/inet.h>
10 
11 #include <rte_fib.h>
12 #include <rte_fib6.h>
13 
14 #include "l3fwd.h"
15 #if defined RTE_ARCH_X86
16 #include "l3fwd_sse.h"
17 #elif defined __ARM_NEON
18 #include "l3fwd_neon.h"
19 #elif defined RTE_ARCH_PPC_64
20 #include "l3fwd_altivec.h"
21 #else
22 #include "l3fwd_common.h"
23 #endif
24 #include "l3fwd_event.h"
25 #include "l3fwd_route.h"
26 
27 /* Configure how many packets ahead to prefetch for fib. */
28 #define FIB_PREFETCH_OFFSET 4
29 
30 /* A non-existent portid is needed to denote a default hop for fib. */
31 #define FIB_DEFAULT_HOP 999
32 
33 /*
34  * If the machine has SSE, NEON or PPC 64 then multiple packets
35  * can be sent at once if not only single packets will be sent
36  */
37 #if defined RTE_ARCH_X86 || defined __ARM_NEON \
38 		|| defined RTE_ARCH_PPC_64
39 #define FIB_SEND_MULTI
40 #endif
41 
42 static struct rte_fib *ipv4_l3fwd_fib_lookup_struct[NB_SOCKETS];
43 static struct rte_fib6 *ipv6_l3fwd_fib_lookup_struct[NB_SOCKETS];
44 
45 /* Parse packet type and ip address. */
46 static inline void
47 fib_parse_packet(struct rte_mbuf *mbuf,
48 		uint32_t *ipv4, uint32_t *ipv4_cnt,
49 		struct rte_ipv6_addr *ipv6,
50 		uint32_t *ipv6_cnt, uint8_t *ip_type)
51 {
52 	struct rte_ether_hdr *eth_hdr;
53 	struct rte_ipv4_hdr *ipv4_hdr;
54 	struct rte_ipv6_hdr *ipv6_hdr;
55 
56 	eth_hdr = rte_pktmbuf_mtod(mbuf, struct rte_ether_hdr *);
57 	/* IPv4 */
58 	if (mbuf->packet_type & RTE_PTYPE_L3_IPV4) {
59 		ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
60 		*ipv4 = rte_be_to_cpu_32(ipv4_hdr->dst_addr);
61 		/* Store type of packet in type_arr (IPv4=1, IPv6=0). */
62 		*ip_type = 1;
63 		(*ipv4_cnt)++;
64 	}
65 	/* IPv6 */
66 	else {
67 		ipv6_hdr = (struct rte_ipv6_hdr *)(eth_hdr + 1);
68 		*ipv6 = ipv6_hdr->dst_addr;
69 		*ip_type = 0;
70 		(*ipv6_cnt)++;
71 	}
72 }
73 
74 /*
75  * If the machine does not have SSE, NEON or PPC 64 then the packets
76  * are sent one at a time using send_single_packet()
77  */
78 #if !defined FIB_SEND_MULTI
79 static inline void
80 process_packet(struct rte_mbuf *pkt, uint16_t *hop)
81 {
82 	struct rte_ether_hdr *eth_hdr;
83 
84 	/* Run rfc1812 if packet is ipv4 and checks enabled. */
85 #if defined DO_RFC_1812_CHECKS
86 	rfc1812_process(
87 		(struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(
88 						pkt, struct rte_ether_hdr *) +
89 					1),
90 		hop, pkt->packet_type);
91 #endif
92 
93 	/* Set MAC addresses. */
94 	eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
95 	*(uint64_t *)&eth_hdr->dst_addr = dest_eth_addr[*hop];
96 	rte_ether_addr_copy(&ports_eth_addr[*hop], &eth_hdr->src_addr);
97 }
98 
99 static inline void
100 fib_send_single(int nb_tx, struct lcore_conf *qconf,
101 		struct rte_mbuf **pkts_burst, uint16_t hops[nb_tx])
102 {
103 	int32_t j;
104 
105 	for (j = 0; j < nb_tx; j++) {
106 		process_packet(pkts_burst[j], &hops[j]);
107 		if (hops[j] == BAD_PORT) {
108 			rte_pktmbuf_free(pkts_burst[j]);
109 			continue;
110 		}
111 		/* Send single packet. */
112 		send_single_packet(qconf, pkts_burst[j], hops[j]);
113 	}
114 }
115 #endif
116 
117 /* Bulk parse, fib lookup and send. */
118 static inline void
119 fib_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
120 		uint16_t portid, struct lcore_conf *qconf)
121 {
122 	uint32_t ipv4_arr[nb_rx];
123 	struct rte_ipv6_addr ipv6_arr[nb_rx];
124 	uint16_t hops[SENDM_PORT_OVERHEAD(nb_rx)];
125 	uint64_t hopsv4[nb_rx], hopsv6[nb_rx];
126 	uint8_t type_arr[nb_rx];
127 	uint32_t ipv4_cnt = 0, ipv6_cnt = 0;
128 	uint32_t ipv4_arr_assem = 0, ipv6_arr_assem = 0;
129 	uint16_t nh;
130 	int32_t i;
131 
132 	/* Prefetch first packets. */
133 	for (i = 0; i < FIB_PREFETCH_OFFSET && i < nb_rx; i++)
134 		rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i], void *));
135 
136 	/* Parse packet info and prefetch. */
137 	for (i = 0; i < (nb_rx - FIB_PREFETCH_OFFSET); i++) {
138 		/* Prefetch packet. */
139 		rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[
140 				i + FIB_PREFETCH_OFFSET], void *));
141 		fib_parse_packet(pkts_burst[i],
142 				&ipv4_arr[ipv4_cnt], &ipv4_cnt,
143 				&ipv6_arr[ipv6_cnt], &ipv6_cnt,
144 				&type_arr[i]);
145 	}
146 
147 	/* Parse remaining packet info. */
148 	for (; i < nb_rx; i++)
149 		fib_parse_packet(pkts_burst[i],
150 				&ipv4_arr[ipv4_cnt], &ipv4_cnt,
151 				&ipv6_arr[ipv6_cnt], &ipv6_cnt,
152 				&type_arr[i]);
153 
154 	/* Lookup IPv4 hops if IPv4 packets are present. */
155 	if (likely(ipv4_cnt > 0))
156 		rte_fib_lookup_bulk(qconf->ipv4_lookup_struct,
157 				ipv4_arr, hopsv4, ipv4_cnt);
158 
159 	/* Lookup IPv6 hops if IPv6 packets are present. */
160 	if (ipv6_cnt > 0)
161 		rte_fib6_lookup_bulk(qconf->ipv6_lookup_struct,
162 				ipv6_arr, hopsv6, ipv6_cnt);
163 
164 	/* Add IPv4 and IPv6 hops to one array depending on type. */
165 	for (i = 0; i < nb_rx; i++) {
166 		if (type_arr[i])
167 			nh = (uint16_t)hopsv4[ipv4_arr_assem++];
168 		else
169 			nh = (uint16_t)hopsv6[ipv6_arr_assem++];
170 		hops[i] = nh != FIB_DEFAULT_HOP ? nh : portid;
171 	}
172 
173 #if defined FIB_SEND_MULTI
174 	send_packets_multi(qconf, pkts_burst, hops, nb_rx);
175 #else
176 	fib_send_single(nb_rx, qconf, pkts_burst, hops);
177 #endif
178 }
179 
180 /* Main fib processing loop. */
181 int
182 fib_main_loop(__rte_unused void *dummy)
183 {
184 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
185 	unsigned int lcore_id;
186 	uint64_t prev_tsc, diff_tsc, cur_tsc;
187 	int i, nb_rx;
188 	uint16_t portid;
189 	uint16_t queueid;
190 	struct lcore_conf *qconf;
191 	const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
192 			US_PER_S * BURST_TX_DRAIN_US;
193 
194 	lcore_id = rte_lcore_id();
195 	qconf = &lcore_conf[lcore_id];
196 
197 	const uint16_t n_rx_q = qconf->n_rx_queue;
198 	const uint16_t n_tx_p = qconf->n_tx_port;
199 	if (n_rx_q == 0) {
200 		RTE_LOG(INFO, L3FWD, "lcore %u has nothing to do\n", lcore_id);
201 		return 0;
202 	}
203 
204 	RTE_LOG(INFO, L3FWD, "entering main loop on lcore %u\n", lcore_id);
205 
206 	for (i = 0; i < n_rx_q; i++) {
207 
208 		portid = qconf->rx_queue_list[i].port_id;
209 		queueid = qconf->rx_queue_list[i].queue_id;
210 		RTE_LOG(INFO, L3FWD,
211 				" -- lcoreid=%u portid=%u rxqueueid=%" PRIu16 "\n",
212 				lcore_id, portid, queueid);
213 	}
214 
215 	cur_tsc = rte_rdtsc();
216 	prev_tsc = cur_tsc;
217 
218 	while (!force_quit) {
219 
220 		/* TX burst queue drain. */
221 		diff_tsc = cur_tsc - prev_tsc;
222 		if (unlikely(diff_tsc > drain_tsc)) {
223 
224 			for (i = 0; i < n_tx_p; ++i) {
225 				portid = qconf->tx_port_id[i];
226 				if (qconf->tx_mbufs[portid].len == 0)
227 					continue;
228 				send_burst(qconf,
229 					qconf->tx_mbufs[portid].len,
230 					portid);
231 				qconf->tx_mbufs[portid].len = 0;
232 			}
233 
234 			prev_tsc = cur_tsc;
235 		}
236 
237 		/* Read packet from RX queues. */
238 		for (i = 0; i < n_rx_q; ++i) {
239 			portid = qconf->rx_queue_list[i].port_id;
240 			queueid = qconf->rx_queue_list[i].queue_id;
241 			nb_rx = rte_eth_rx_burst(portid, queueid, pkts_burst,
242 					nb_pkt_per_burst);
243 			if (nb_rx == 0)
244 				continue;
245 
246 			/* Use fib to lookup port IDs and transmit them. */
247 			fib_send_packets(nb_rx, pkts_burst,	portid, qconf);
248 		}
249 
250 		cur_tsc = rte_rdtsc();
251 	}
252 
253 	return 0;
254 }
255 
256 #ifdef RTE_LIB_EVENTDEV
257 /* One eventdev loop for single and burst using fib. */
258 static __rte_always_inline void
259 fib_event_loop(struct l3fwd_event_resources *evt_rsrc,
260 		const uint8_t flags)
261 {
262 	const int event_p_id = l3fwd_get_free_event_port(evt_rsrc);
263 	const uint8_t tx_q_id = evt_rsrc->evq.event_q_id[
264 			evt_rsrc->evq.nb_queues - 1];
265 	const uint8_t event_d_id = evt_rsrc->event_d_id;
266 	const uint16_t deq_len = evt_rsrc->deq_depth;
267 	struct rte_event events[MAX_PKT_BURST];
268 	int i, nb_enq = 0, nb_deq = 0;
269 	struct lcore_conf *lconf;
270 	unsigned int lcore_id;
271 
272 	uint32_t ipv4_arr[MAX_PKT_BURST];
273 	struct rte_ipv6_addr ipv6_arr[MAX_PKT_BURST];
274 	uint64_t hopsv4[MAX_PKT_BURST], hopsv6[MAX_PKT_BURST];
275 	uint16_t nh, hops[MAX_PKT_BURST];
276 	uint8_t type_arr[MAX_PKT_BURST];
277 	uint32_t ipv4_cnt, ipv6_cnt;
278 	uint32_t ipv4_arr_assem, ipv6_arr_assem;
279 
280 	if (event_p_id < 0)
281 		return;
282 
283 	lcore_id = rte_lcore_id();
284 
285 	lconf = &lcore_conf[lcore_id];
286 
287 	RTE_LOG(INFO, L3FWD, "entering %s on lcore %u\n", __func__, lcore_id);
288 
289 	while (!force_quit) {
290 		/* Read events from RX queues. */
291 		nb_deq = rte_event_dequeue_burst(event_d_id, event_p_id,
292 				events, deq_len, 0);
293 		if (nb_deq == 0) {
294 			rte_pause();
295 			continue;
296 		}
297 
298 		/* Reset counters. */
299 		ipv4_cnt = 0;
300 		ipv6_cnt = 0;
301 		ipv4_arr_assem = 0;
302 		ipv6_arr_assem = 0;
303 
304 		/* Prefetch first packets. */
305 		for (i = 0; i < FIB_PREFETCH_OFFSET && i < nb_deq; i++)
306 			rte_prefetch0(rte_pktmbuf_mtod(events[i].mbuf, void *));
307 
308 		/* Parse packet info and prefetch. */
309 		for (i = 0; i < (nb_deq - FIB_PREFETCH_OFFSET); i++) {
310 			if (flags & L3FWD_EVENT_TX_ENQ) {
311 				events[i].queue_id = tx_q_id;
312 				events[i].op = RTE_EVENT_OP_FORWARD;
313 			}
314 
315 			if (flags & L3FWD_EVENT_TX_DIRECT)
316 				rte_event_eth_tx_adapter_txq_set(events[i].mbuf,
317 						0);
318 
319 			/* Prefetch packet. */
320 			rte_prefetch0(rte_pktmbuf_mtod(events[
321 					i + FIB_PREFETCH_OFFSET].mbuf,
322 					void *));
323 
324 			fib_parse_packet(events[i].mbuf,
325 					&ipv4_arr[ipv4_cnt], &ipv4_cnt,
326 					&ipv6_arr[ipv6_cnt], &ipv6_cnt,
327 					&type_arr[i]);
328 		}
329 
330 		/* Parse remaining packet info. */
331 		for (; i < nb_deq; i++) {
332 			if (flags & L3FWD_EVENT_TX_ENQ) {
333 				events[i].queue_id = tx_q_id;
334 				events[i].op = RTE_EVENT_OP_FORWARD;
335 			}
336 
337 			if (flags & L3FWD_EVENT_TX_DIRECT)
338 				rte_event_eth_tx_adapter_txq_set(events[i].mbuf,
339 						0);
340 
341 			fib_parse_packet(events[i].mbuf,
342 					&ipv4_arr[ipv4_cnt], &ipv4_cnt,
343 					&ipv6_arr[ipv6_cnt], &ipv6_cnt,
344 					&type_arr[i]);
345 		}
346 
347 		/* Lookup IPv4 hops if IPv4 packets are present. */
348 		if (likely(ipv4_cnt > 0))
349 			rte_fib_lookup_bulk(lconf->ipv4_lookup_struct,
350 					ipv4_arr, hopsv4, ipv4_cnt);
351 
352 		/* Lookup IPv6 hops if IPv6 packets are present. */
353 		if (ipv6_cnt > 0)
354 			rte_fib6_lookup_bulk(lconf->ipv6_lookup_struct,
355 					ipv6_arr, hopsv6, ipv6_cnt);
356 
357 		/* Assign ports looked up in fib depending on IPv4 or IPv6 */
358 		for (i = 0; i < nb_deq; i++) {
359 			if (type_arr[i])
360 				nh = (uint16_t)hopsv4[ipv4_arr_assem++];
361 			else
362 				nh = (uint16_t)hopsv6[ipv6_arr_assem++];
363 
364 			hops[i] = nh != FIB_DEFAULT_HOP ?
365 				  nh :
366 				  events[i].mbuf->port;
367 			process_packet(events[i].mbuf, &hops[i]);
368 			events[i].mbuf->port = hops[i] != BAD_PORT ?
369 						       hops[i] :
370 						       events[i].mbuf->port;
371 		}
372 
373 		if (flags & L3FWD_EVENT_TX_ENQ) {
374 			nb_enq = rte_event_enqueue_burst(event_d_id, event_p_id,
375 					events, nb_deq);
376 			while (nb_enq < nb_deq && !force_quit)
377 				nb_enq += rte_event_enqueue_burst(event_d_id,
378 						event_p_id, events + nb_enq,
379 						nb_deq - nb_enq);
380 		}
381 
382 		if (flags & L3FWD_EVENT_TX_DIRECT) {
383 			nb_enq = rte_event_eth_tx_adapter_enqueue(event_d_id,
384 					event_p_id, events, nb_deq, 0);
385 			while (nb_enq < nb_deq && !force_quit)
386 				nb_enq += rte_event_eth_tx_adapter_enqueue(
387 						event_d_id, event_p_id,
388 						events + nb_enq,
389 						nb_deq - nb_enq, 0);
390 		}
391 	}
392 
393 	l3fwd_event_worker_cleanup(event_d_id, event_p_id, events, nb_enq,
394 				   nb_deq, 0);
395 }
396 
397 int __rte_noinline
398 fib_event_main_loop_tx_d(__rte_unused void *dummy)
399 {
400 	struct l3fwd_event_resources *evt_rsrc =
401 			l3fwd_get_eventdev_rsrc();
402 
403 	fib_event_loop(evt_rsrc, L3FWD_EVENT_TX_DIRECT);
404 	return 0;
405 }
406 
407 int __rte_noinline
408 fib_event_main_loop_tx_d_burst(__rte_unused void *dummy)
409 {
410 	struct l3fwd_event_resources *evt_rsrc =
411 			l3fwd_get_eventdev_rsrc();
412 
413 	fib_event_loop(evt_rsrc, L3FWD_EVENT_TX_DIRECT);
414 	return 0;
415 }
416 
417 int __rte_noinline
418 fib_event_main_loop_tx_q(__rte_unused void *dummy)
419 {
420 	struct l3fwd_event_resources *evt_rsrc =
421 			l3fwd_get_eventdev_rsrc();
422 
423 	fib_event_loop(evt_rsrc, L3FWD_EVENT_TX_ENQ);
424 	return 0;
425 }
426 
427 int __rte_noinline
428 fib_event_main_loop_tx_q_burst(__rte_unused void *dummy)
429 {
430 	struct l3fwd_event_resources *evt_rsrc =
431 			l3fwd_get_eventdev_rsrc();
432 
433 	fib_event_loop(evt_rsrc, L3FWD_EVENT_TX_ENQ);
434 	return 0;
435 }
436 
437 static __rte_always_inline void
438 fib_process_event_vector(struct rte_event_vector *vec, uint8_t *type_arr,
439 			 struct rte_ipv6_addr *ipv6_arr, uint64_t *hopsv4, uint64_t *hopsv6,
440 			 uint32_t *ipv4_arr, uint16_t *hops)
441 {
442 	uint32_t ipv4_arr_assem, ipv6_arr_assem;
443 	struct rte_mbuf **mbufs = vec->mbufs;
444 	uint32_t ipv4_cnt, ipv6_cnt;
445 	struct lcore_conf *lconf;
446 	uint16_t nh;
447 	int i;
448 
449 	lconf = &lcore_conf[rte_lcore_id()];
450 
451 	/* Reset counters. */
452 	ipv4_cnt = 0;
453 	ipv6_cnt = 0;
454 	ipv4_arr_assem = 0;
455 	ipv6_arr_assem = 0;
456 
457 	/* Prefetch first packets. */
458 	for (i = 0; i < FIB_PREFETCH_OFFSET && i < vec->nb_elem; i++)
459 		rte_prefetch0(rte_pktmbuf_mtod(mbufs[i], void *));
460 
461 	/* Parse packet info and prefetch. */
462 	for (i = 0; i < (vec->nb_elem - FIB_PREFETCH_OFFSET); i++) {
463 		rte_prefetch0(rte_pktmbuf_mtod(mbufs[i + FIB_PREFETCH_OFFSET],
464 					       void *));
465 		fib_parse_packet(mbufs[i], &ipv4_arr[ipv4_cnt], &ipv4_cnt,
466 				 &ipv6_arr[ipv6_cnt], &ipv6_cnt, &type_arr[i]);
467 	}
468 
469 	/* Parse remaining packet info. */
470 	for (; i < vec->nb_elem; i++)
471 		fib_parse_packet(mbufs[i], &ipv4_arr[ipv4_cnt], &ipv4_cnt,
472 				 &ipv6_arr[ipv6_cnt], &ipv6_cnt, &type_arr[i]);
473 
474 	/* Lookup IPv4 hops if IPv4 packets are present. */
475 	if (likely(ipv4_cnt > 0))
476 		rte_fib_lookup_bulk(lconf->ipv4_lookup_struct, ipv4_arr, hopsv4,
477 				    ipv4_cnt);
478 
479 	/* Lookup IPv6 hops if IPv6 packets are present. */
480 	if (ipv6_cnt > 0)
481 		rte_fib6_lookup_bulk(
482 			lconf->ipv6_lookup_struct,
483 			ipv6_arr, hopsv6,
484 			ipv6_cnt);
485 
486 	/* Assign ports looked up in fib depending on IPv4 or IPv6 */
487 	for (i = 0; i < vec->nb_elem; i++) {
488 		if (type_arr[i])
489 			nh = (uint16_t)hopsv4[ipv4_arr_assem++];
490 		else
491 			nh = (uint16_t)hopsv6[ipv6_arr_assem++];
492 		if (nh != FIB_DEFAULT_HOP)
493 			hops[i] = nh;
494 		else
495 			hops[i] = vec->attr_valid ? vec->port :
496 						    vec->mbufs[i]->port;
497 	}
498 
499 #if defined FIB_SEND_MULTI
500 	uint16_t k;
501 	k = RTE_ALIGN_FLOOR(vec->nb_elem, FWDSTEP);
502 
503 	for (i = 0; i != k; i += FWDSTEP)
504 		processx4_step3(&vec->mbufs[i], &hops[i]);
505 	for (; i < vec->nb_elem; i++)
506 		process_packet(vec->mbufs[i], &hops[i]);
507 #else
508 	for (i = 0; i < vec->nb_elem; i++)
509 		process_packet(vec->mbufs[i], &hops[i]);
510 #endif
511 
512 	process_event_vector(vec, hops);
513 }
514 
515 static __rte_always_inline void
516 fib_event_loop_vector(struct l3fwd_event_resources *evt_rsrc,
517 		      const uint8_t flags)
518 {
519 	const int event_p_id = l3fwd_get_free_event_port(evt_rsrc);
520 	const uint8_t tx_q_id =
521 		evt_rsrc->evq.event_q_id[evt_rsrc->evq.nb_queues - 1];
522 	const uint8_t event_d_id = evt_rsrc->event_d_id;
523 	const uint16_t deq_len = evt_rsrc->deq_depth;
524 	struct rte_event events[MAX_PKT_BURST];
525 	uint8_t *type_arr;
526 	struct rte_ipv6_addr *ipv6_arr;
527 	int nb_enq = 0, nb_deq = 0, i;
528 	uint64_t *hopsv4, *hopsv6;
529 	uint32_t *ipv4_arr;
530 	uint16_t *hops;
531 	uintptr_t mem;
532 
533 	mem = (uintptr_t)rte_zmalloc(
534 		"vector_fib",
535 		(sizeof(uint32_t) + sizeof(uint8_t) + sizeof(uint64_t) +
536 		 sizeof(uint64_t) + sizeof(uint16_t) + sizeof(uint8_t *) +
537 		 sizeof(struct rte_ipv6_addr)) *
538 			evt_rsrc->vector_size,
539 		RTE_CACHE_LINE_SIZE);
540 	if (mem == 0)
541 		return;
542 	ipv4_arr = (uint32_t *)mem;
543 	type_arr = (uint8_t *)&ipv4_arr[evt_rsrc->vector_size];
544 	hopsv4 = (uint64_t *)&type_arr[evt_rsrc->vector_size];
545 	hopsv6 = (uint64_t *)&hopsv4[evt_rsrc->vector_size];
546 	hops = (uint16_t *)&hopsv6[evt_rsrc->vector_size];
547 	ipv6_arr = (struct rte_ipv6_addr *)&hops[evt_rsrc->vector_size];
548 
549 	if (event_p_id < 0) {
550 		rte_free((void *)mem);
551 		return;
552 	}
553 
554 	RTE_LOG(INFO, L3FWD, "entering %s on lcore %u\n", __func__,
555 		rte_lcore_id());
556 
557 	while (!force_quit) {
558 		/* Read events from RX queues. */
559 		nb_deq = rte_event_dequeue_burst(event_d_id, event_p_id, events,
560 						 deq_len, 0);
561 		if (nb_deq == 0) {
562 			rte_pause();
563 			continue;
564 		}
565 
566 		for (i = 0; i < nb_deq; i++) {
567 			if (flags & L3FWD_EVENT_TX_ENQ) {
568 				events[i].queue_id = tx_q_id;
569 				events[i].op = RTE_EVENT_OP_FORWARD;
570 			}
571 
572 			fib_process_event_vector(events[i].vec, type_arr,
573 						 ipv6_arr, hopsv4, hopsv6,
574 						 ipv4_arr, hops);
575 		}
576 
577 		if (flags & L3FWD_EVENT_TX_ENQ) {
578 			nb_enq = rte_event_enqueue_burst(event_d_id, event_p_id,
579 							 events, nb_deq);
580 			while (nb_enq < nb_deq && !force_quit)
581 				nb_enq += rte_event_enqueue_burst(
582 					event_d_id, event_p_id, events + nb_enq,
583 					nb_deq - nb_enq);
584 		}
585 
586 		if (flags & L3FWD_EVENT_TX_DIRECT) {
587 			nb_enq = rte_event_eth_tx_adapter_enqueue(
588 				event_d_id, event_p_id, events, nb_deq, 0);
589 			while (nb_enq < nb_deq && !force_quit)
590 				nb_enq += rte_event_eth_tx_adapter_enqueue(
591 					event_d_id, event_p_id, events + nb_enq,
592 					nb_deq - nb_enq, 0);
593 		}
594 	}
595 
596 	l3fwd_event_worker_cleanup(event_d_id, event_p_id, events, nb_enq,
597 				   nb_deq, 1);
598 	rte_free((void *)mem);
599 }
600 
601 int __rte_noinline
602 fib_event_main_loop_tx_d_vector(__rte_unused void *dummy)
603 {
604 	struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
605 
606 	fib_event_loop_vector(evt_rsrc, L3FWD_EVENT_TX_DIRECT);
607 	return 0;
608 }
609 
610 int __rte_noinline
611 fib_event_main_loop_tx_d_burst_vector(__rte_unused void *dummy)
612 {
613 	struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
614 
615 	fib_event_loop_vector(evt_rsrc, L3FWD_EVENT_TX_DIRECT);
616 	return 0;
617 }
618 
619 int __rte_noinline
620 fib_event_main_loop_tx_q_vector(__rte_unused void *dummy)
621 {
622 	struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
623 
624 	fib_event_loop_vector(evt_rsrc, L3FWD_EVENT_TX_ENQ);
625 	return 0;
626 }
627 
628 int __rte_noinline
629 fib_event_main_loop_tx_q_burst_vector(__rte_unused void *dummy)
630 {
631 	struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
632 
633 	fib_event_loop_vector(evt_rsrc, L3FWD_EVENT_TX_ENQ);
634 	return 0;
635 }
636 #endif
637 
638 /* Function to setup fib. 8< */
639 void
640 setup_fib(const int socketid)
641 {
642 	struct rte_eth_dev_info dev_info;
643 	struct rte_fib6_conf config;
644 	struct rte_fib_conf config_ipv4 = { 0 };
645 	int i;
646 	int ret;
647 	char s[64];
648 	char abuf[INET6_ADDRSTRLEN];
649 
650 	/* Create the fib IPv4 table. */
651 	config_ipv4.type = RTE_FIB_DIR24_8;
652 	config_ipv4.max_routes = (1 << 16);
653 	config_ipv4.rib_ext_sz = 0;
654 	config_ipv4.default_nh = FIB_DEFAULT_HOP;
655 	config_ipv4.dir24_8.nh_sz = RTE_FIB_DIR24_8_4B;
656 	config_ipv4.dir24_8.num_tbl8 = (1 << 15);
657 	snprintf(s, sizeof(s), "IPV4_L3FWD_FIB_%d", socketid);
658 	ipv4_l3fwd_fib_lookup_struct[socketid] =
659 			rte_fib_create(s, socketid, &config_ipv4);
660 	if (ipv4_l3fwd_fib_lookup_struct[socketid] == NULL)
661 		rte_exit(EXIT_FAILURE,
662 			"Unable to create the l3fwd FIB table on socket %d\n",
663 			socketid);
664 
665 
666 	/* Populate the fib ipv4 table. */
667 	for (i = 0; i < route_num_v4; i++) {
668 		struct in_addr in;
669 
670 		/* Skip unused ports. */
671 		if ((1 << route_base_v4[i].if_out &
672 				enabled_port_mask) == 0)
673 			continue;
674 
675 		ret = rte_eth_dev_info_get(route_base_v4[i].if_out, &dev_info);
676 		if (ret < 0)
677 			rte_exit(EXIT_FAILURE,
678 				 "Unable to get device info for port %u\n",
679 				 route_base_v4[i].if_out);
680 
681 		ret = rte_fib_add(ipv4_l3fwd_fib_lookup_struct[socketid],
682 			route_base_v4[i].ip,
683 			route_base_v4[i].depth,
684 			route_base_v4[i].if_out);
685 
686 		if (ret < 0) {
687 			free(route_base_v4);
688 			rte_exit(EXIT_FAILURE,
689 					"Unable to add entry %u to the l3fwd FIB table on socket %d\n",
690 					i, socketid);
691 		}
692 
693 		in.s_addr = htonl(route_base_v4[i].ip);
694 		if (inet_ntop(AF_INET, &in, abuf, sizeof(abuf)) != NULL) {
695 			printf("FIB: Adding route %s / %d (%d) [%s]\n", abuf,
696 			       route_base_v4[i].depth,
697 			       route_base_v4[i].if_out,
698 			       rte_dev_name(dev_info.device));
699 		} else {
700 			printf("FIB: IPv4 route added to port %d [%s]\n",
701 			       route_base_v4[i].if_out,
702 			       rte_dev_name(dev_info.device));
703 		}
704 	}
705 	/* >8 End of setup fib. */
706 
707 	/* Create the fib IPv6 table. */
708 	snprintf(s, sizeof(s), "IPV6_L3FWD_FIB_%d", socketid);
709 
710 	config.type = RTE_FIB6_TRIE;
711 	config.max_routes = (1 << 16) - 1;
712 	config.rib_ext_sz = 0;
713 	config.default_nh = FIB_DEFAULT_HOP;
714 	config.trie.nh_sz = RTE_FIB6_TRIE_4B;
715 	config.trie.num_tbl8 = (1 << 15);
716 	ipv6_l3fwd_fib_lookup_struct[socketid] = rte_fib6_create(s, socketid,
717 			&config);
718 	if (ipv6_l3fwd_fib_lookup_struct[socketid] == NULL) {
719 		free(route_base_v4);
720 		rte_exit(EXIT_FAILURE,
721 				"Unable to create the l3fwd FIB table on socket %d\n",
722 				socketid);
723 	}
724 
725 	/* Populate the fib IPv6 table. */
726 	for (i = 0; i < route_num_v6; i++) {
727 
728 		/* Skip unused ports. */
729 		if ((1 << route_base_v6[i].if_out &
730 				enabled_port_mask) == 0)
731 			continue;
732 
733 		ret = rte_eth_dev_info_get(route_base_v6[i].if_out, &dev_info);
734 		if (ret < 0)
735 			rte_exit(EXIT_FAILURE,
736 				 "Unable to get device info for port %u\n",
737 				 route_base_v6[i].if_out);
738 
739 		ret = rte_fib6_add(ipv6_l3fwd_fib_lookup_struct[socketid],
740 			&route_base_v6[i].ip6,
741 			route_base_v6[i].depth,
742 			route_base_v6[i].if_out);
743 
744 		if (ret < 0) {
745 			free(route_base_v4);
746 			free(route_base_v6);
747 			rte_exit(EXIT_FAILURE,
748 					"Unable to add entry %u to the l3fwd FIB table on socket %d\n",
749 					i, socketid);
750 		}
751 
752 		if (inet_ntop(AF_INET6, &route_base_v6[i].ip6,
753 				abuf, sizeof(abuf)) != NULL) {
754 			printf("FIB: Adding route %s / %d (%d) [%s]\n", abuf,
755 			       route_base_v6[i].depth,
756 			       route_base_v6[i].if_out,
757 			       rte_dev_name(dev_info.device));
758 		} else {
759 			printf("FIB: IPv6 route added to port %d [%s]\n",
760 			       route_base_v6[i].if_out,
761 			       rte_dev_name(dev_info.device));
762 		}
763 	}
764 }
765 
766 /* Return ipv4 fib lookup struct. */
767 void *
768 fib_get_ipv4_l3fwd_lookup_struct(const int socketid)
769 {
770 	return ipv4_l3fwd_fib_lookup_struct[socketid];
771 }
772 
773 /* Return ipv6 fib lookup struct. */
774 void *
775 fib_get_ipv6_l3fwd_lookup_struct(const int socketid)
776 {
777 	return ipv6_l3fwd_fib_lookup_struct[socketid];
778 }
779