xref: /dpdk/examples/l2fwd-event/l2fwd_event.c (revision 68a03efeed657e6e05f281479b33b51102797e15)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2019 Marvell International Ltd.
3  */
4 
5 #include <stdbool.h>
6 #include <getopt.h>
7 
8 #include <rte_atomic.h>
9 #include <rte_cycles.h>
10 #include <rte_ethdev.h>
11 #include <rte_eventdev.h>
12 #include <rte_event_eth_rx_adapter.h>
13 #include <rte_event_eth_tx_adapter.h>
14 #include <rte_lcore.h>
15 #include <rte_malloc.h>
16 #include <rte_spinlock.h>
17 
18 #include "l2fwd_event.h"
19 
20 #define L2FWD_EVENT_SINGLE	0x1
21 #define L2FWD_EVENT_BURST	0x2
22 #define L2FWD_EVENT_TX_DIRECT	0x4
23 #define L2FWD_EVENT_TX_ENQ	0x8
24 #define L2FWD_EVENT_UPDT_MAC	0x10
25 
26 static inline int
27 l2fwd_event_service_enable(uint32_t service_id)
28 {
29 	uint8_t min_service_count = UINT8_MAX;
30 	uint32_t slcore_array[RTE_MAX_LCORE];
31 	unsigned int slcore = 0;
32 	uint8_t service_count;
33 	int32_t slcore_count;
34 
35 	if (!rte_service_lcore_count())
36 		return -ENOENT;
37 
38 	slcore_count = rte_service_lcore_list(slcore_array, RTE_MAX_LCORE);
39 	if (slcore_count < 0)
40 		return -ENOENT;
41 	/* Get the core which has least number of services running. */
42 	while (slcore_count--) {
43 		/* Reset default mapping */
44 		if (rte_service_map_lcore_set(service_id,
45 					slcore_array[slcore_count], 0) != 0)
46 			return -ENOENT;
47 		service_count = rte_service_lcore_count_services(
48 				slcore_array[slcore_count]);
49 		if (service_count < min_service_count) {
50 			slcore = slcore_array[slcore_count];
51 			min_service_count = service_count;
52 		}
53 	}
54 	if (rte_service_map_lcore_set(service_id, slcore, 1) != 0)
55 		return -ENOENT;
56 	rte_service_lcore_start(slcore);
57 
58 	return 0;
59 }
60 
61 void
62 l2fwd_event_service_setup(struct l2fwd_resources *rsrc)
63 {
64 	struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
65 	struct rte_event_dev_info evdev_info;
66 	uint32_t service_id, caps;
67 	int ret, i;
68 
69 	rte_event_dev_info_get(evt_rsrc->event_d_id, &evdev_info);
70 	if (!(evdev_info.event_dev_cap & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED)) {
71 		ret = rte_event_dev_service_id_get(evt_rsrc->event_d_id,
72 				&service_id);
73 		if (ret != -ESRCH && ret != 0)
74 			rte_panic("Error in starting eventdev service\n");
75 		l2fwd_event_service_enable(service_id);
76 	}
77 
78 	for (i = 0; i < evt_rsrc->rx_adptr.nb_rx_adptr; i++) {
79 		ret = rte_event_eth_rx_adapter_caps_get(evt_rsrc->event_d_id,
80 				evt_rsrc->rx_adptr.rx_adptr[i], &caps);
81 		if (ret < 0)
82 			rte_panic("Failed to get Rx adapter[%d] caps\n",
83 				  evt_rsrc->rx_adptr.rx_adptr[i]);
84 		ret = rte_event_eth_rx_adapter_service_id_get(
85 				evt_rsrc->event_d_id,
86 				&service_id);
87 		if (ret != -ESRCH && ret != 0)
88 			rte_panic("Error in starting Rx adapter[%d] service\n",
89 				  evt_rsrc->rx_adptr.rx_adptr[i]);
90 		l2fwd_event_service_enable(service_id);
91 	}
92 
93 	for (i = 0; i < evt_rsrc->tx_adptr.nb_tx_adptr; i++) {
94 		ret = rte_event_eth_tx_adapter_caps_get(evt_rsrc->event_d_id,
95 				evt_rsrc->tx_adptr.tx_adptr[i], &caps);
96 		if (ret < 0)
97 			rte_panic("Failed to get Rx adapter[%d] caps\n",
98 				  evt_rsrc->tx_adptr.tx_adptr[i]);
99 		ret = rte_event_eth_tx_adapter_service_id_get(
100 				evt_rsrc->event_d_id,
101 				&service_id);
102 		if (ret != -ESRCH && ret != 0)
103 			rte_panic("Error in starting Rx adapter[%d] service\n",
104 				  evt_rsrc->tx_adptr.tx_adptr[i]);
105 		l2fwd_event_service_enable(service_id);
106 	}
107 }
108 
109 static void
110 l2fwd_event_capability_setup(struct l2fwd_event_resources *evt_rsrc)
111 {
112 	uint32_t caps = 0;
113 	uint16_t i;
114 	int ret;
115 
116 	RTE_ETH_FOREACH_DEV(i) {
117 		ret = rte_event_eth_tx_adapter_caps_get(0, i, &caps);
118 		if (ret)
119 			rte_panic("Invalid capability for Tx adptr port %d\n",
120 				  i);
121 
122 		evt_rsrc->tx_mode_q |= !(caps &
123 				   RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT);
124 	}
125 
126 	if (evt_rsrc->tx_mode_q)
127 		l2fwd_event_set_generic_ops(&evt_rsrc->ops);
128 	else
129 		l2fwd_event_set_internal_port_ops(&evt_rsrc->ops);
130 }
131 
132 static __rte_noinline int
133 l2fwd_get_free_event_port(struct l2fwd_event_resources *evt_rsrc)
134 {
135 	static int index;
136 	int port_id;
137 
138 	rte_spinlock_lock(&evt_rsrc->evp.lock);
139 	if (index >= evt_rsrc->evp.nb_ports) {
140 		printf("No free event port is available\n");
141 		return -1;
142 	}
143 
144 	port_id = evt_rsrc->evp.event_p_id[index];
145 	index++;
146 	rte_spinlock_unlock(&evt_rsrc->evp.lock);
147 
148 	return port_id;
149 }
150 
151 static  __rte_always_inline void
152 l2fwd_event_fwd(struct l2fwd_resources *rsrc, struct rte_event *ev,
153 		const uint8_t tx_q_id, const uint64_t timer_period,
154 		const uint32_t flags)
155 {
156 	struct rte_mbuf *mbuf = ev->mbuf;
157 	uint16_t dst_port;
158 
159 	rte_prefetch0(rte_pktmbuf_mtod(mbuf, void *));
160 	dst_port = rsrc->dst_ports[mbuf->port];
161 
162 	if (timer_period > 0)
163 		__atomic_fetch_add(&rsrc->port_stats[mbuf->port].rx,
164 				1, __ATOMIC_RELAXED);
165 	mbuf->port = dst_port;
166 
167 	if (flags & L2FWD_EVENT_UPDT_MAC)
168 		l2fwd_mac_updating(mbuf, dst_port, &rsrc->eth_addr[dst_port]);
169 
170 	if (flags & L2FWD_EVENT_TX_ENQ) {
171 		ev->queue_id = tx_q_id;
172 		ev->op = RTE_EVENT_OP_FORWARD;
173 	}
174 
175 	if (flags & L2FWD_EVENT_TX_DIRECT)
176 		rte_event_eth_tx_adapter_txq_set(mbuf, 0);
177 
178 	if (timer_period > 0)
179 		__atomic_fetch_add(&rsrc->port_stats[mbuf->port].tx,
180 				1, __ATOMIC_RELAXED);
181 }
182 
183 static __rte_always_inline void
184 l2fwd_event_loop_single(struct l2fwd_resources *rsrc,
185 			const uint32_t flags)
186 {
187 	struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
188 	const int port_id = l2fwd_get_free_event_port(evt_rsrc);
189 	const uint8_t tx_q_id = evt_rsrc->evq.event_q_id[
190 					evt_rsrc->evq.nb_queues - 1];
191 	const uint64_t timer_period = rsrc->timer_period;
192 	const uint8_t event_d_id = evt_rsrc->event_d_id;
193 	struct rte_event ev;
194 
195 	if (port_id < 0)
196 		return;
197 
198 	printf("%s(): entering eventdev main loop on lcore %u\n", __func__,
199 		rte_lcore_id());
200 
201 	while (!rsrc->force_quit) {
202 		/* Read packet from eventdev */
203 		if (!rte_event_dequeue_burst(event_d_id, port_id, &ev, 1, 0))
204 			continue;
205 
206 		l2fwd_event_fwd(rsrc, &ev, tx_q_id, timer_period, flags);
207 
208 		if (flags & L2FWD_EVENT_TX_ENQ) {
209 			while (rte_event_enqueue_burst(event_d_id, port_id,
210 						       &ev, 1) &&
211 					!rsrc->force_quit)
212 				;
213 		}
214 
215 		if (flags & L2FWD_EVENT_TX_DIRECT) {
216 			while (!rte_event_eth_tx_adapter_enqueue(event_d_id,
217 								port_id,
218 								&ev, 1, 0) &&
219 					!rsrc->force_quit)
220 				;
221 		}
222 	}
223 }
224 
225 static __rte_always_inline void
226 l2fwd_event_loop_burst(struct l2fwd_resources *rsrc,
227 		       const uint32_t flags)
228 {
229 	struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
230 	const int port_id = l2fwd_get_free_event_port(evt_rsrc);
231 	const uint8_t tx_q_id = evt_rsrc->evq.event_q_id[
232 					evt_rsrc->evq.nb_queues - 1];
233 	const uint64_t timer_period = rsrc->timer_period;
234 	const uint8_t event_d_id = evt_rsrc->event_d_id;
235 	const uint8_t deq_len = evt_rsrc->deq_depth;
236 	struct rte_event ev[MAX_PKT_BURST];
237 	uint16_t nb_rx, nb_tx;
238 	uint8_t i;
239 
240 	if (port_id < 0)
241 		return;
242 
243 	printf("%s(): entering eventdev main loop on lcore %u\n", __func__,
244 		rte_lcore_id());
245 
246 	while (!rsrc->force_quit) {
247 		/* Read packet from eventdev */
248 		nb_rx = rte_event_dequeue_burst(event_d_id, port_id, ev,
249 						deq_len, 0);
250 		if (nb_rx == 0)
251 			continue;
252 
253 		for (i = 0; i < nb_rx; i++) {
254 			l2fwd_event_fwd(rsrc, &ev[i], tx_q_id, timer_period,
255 					flags);
256 		}
257 
258 		if (flags & L2FWD_EVENT_TX_ENQ) {
259 			nb_tx = rte_event_enqueue_burst(event_d_id, port_id,
260 							ev, nb_rx);
261 			while (nb_tx < nb_rx && !rsrc->force_quit)
262 				nb_tx += rte_event_enqueue_burst(event_d_id,
263 						port_id, ev + nb_tx,
264 						nb_rx - nb_tx);
265 		}
266 
267 		if (flags & L2FWD_EVENT_TX_DIRECT) {
268 			nb_tx = rte_event_eth_tx_adapter_enqueue(event_d_id,
269 								 port_id, ev,
270 								 nb_rx, 0);
271 			while (nb_tx < nb_rx && !rsrc->force_quit)
272 				nb_tx += rte_event_eth_tx_adapter_enqueue(
273 						event_d_id, port_id,
274 						ev + nb_tx, nb_rx - nb_tx, 0);
275 		}
276 	}
277 }
278 
279 static __rte_always_inline void
280 l2fwd_event_loop(struct l2fwd_resources *rsrc,
281 			const uint32_t flags)
282 {
283 	if (flags & L2FWD_EVENT_SINGLE)
284 		l2fwd_event_loop_single(rsrc, flags);
285 	if (flags & L2FWD_EVENT_BURST)
286 		l2fwd_event_loop_burst(rsrc, flags);
287 }
288 
289 static void __rte_noinline
290 l2fwd_event_main_loop_tx_d(struct l2fwd_resources *rsrc)
291 {
292 	l2fwd_event_loop(rsrc,
293 			 L2FWD_EVENT_TX_DIRECT | L2FWD_EVENT_SINGLE);
294 }
295 
296 static void __rte_noinline
297 l2fwd_event_main_loop_tx_d_brst(struct l2fwd_resources *rsrc)
298 {
299 	l2fwd_event_loop(rsrc, L2FWD_EVENT_TX_DIRECT | L2FWD_EVENT_BURST);
300 }
301 
302 static void __rte_noinline
303 l2fwd_event_main_loop_tx_q(struct l2fwd_resources *rsrc)
304 {
305 	l2fwd_event_loop(rsrc, L2FWD_EVENT_TX_ENQ | L2FWD_EVENT_SINGLE);
306 }
307 
308 static void __rte_noinline
309 l2fwd_event_main_loop_tx_q_brst(struct l2fwd_resources *rsrc)
310 {
311 	l2fwd_event_loop(rsrc, L2FWD_EVENT_TX_ENQ | L2FWD_EVENT_BURST);
312 }
313 
314 static void __rte_noinline
315 l2fwd_event_main_loop_tx_d_mac(struct l2fwd_resources *rsrc)
316 {
317 	l2fwd_event_loop(rsrc, L2FWD_EVENT_UPDT_MAC |
318 			L2FWD_EVENT_TX_DIRECT | L2FWD_EVENT_SINGLE);
319 }
320 
321 static void __rte_noinline
322 l2fwd_event_main_loop_tx_d_brst_mac(struct l2fwd_resources *rsrc)
323 {
324 	l2fwd_event_loop(rsrc, L2FWD_EVENT_UPDT_MAC |
325 			L2FWD_EVENT_TX_DIRECT | L2FWD_EVENT_BURST);
326 }
327 
328 static void __rte_noinline
329 l2fwd_event_main_loop_tx_q_mac(struct l2fwd_resources *rsrc)
330 {
331 	l2fwd_event_loop(rsrc, L2FWD_EVENT_UPDT_MAC |
332 			L2FWD_EVENT_TX_ENQ | L2FWD_EVENT_SINGLE);
333 }
334 
335 static void __rte_noinline
336 l2fwd_event_main_loop_tx_q_brst_mac(struct l2fwd_resources *rsrc)
337 {
338 	l2fwd_event_loop(rsrc, L2FWD_EVENT_UPDT_MAC |
339 			L2FWD_EVENT_TX_ENQ | L2FWD_EVENT_BURST);
340 }
341 
342 void
343 l2fwd_event_resource_setup(struct l2fwd_resources *rsrc)
344 {
345 	/* [MAC_UPDT][TX_MODE][BURST] */
346 	const event_loop_cb event_loop[2][2][2] = {
347 		[0][0][0] = l2fwd_event_main_loop_tx_d,
348 		[0][0][1] = l2fwd_event_main_loop_tx_d_brst,
349 		[0][1][0] = l2fwd_event_main_loop_tx_q,
350 		[0][1][1] = l2fwd_event_main_loop_tx_q_brst,
351 		[1][0][0] = l2fwd_event_main_loop_tx_d_mac,
352 		[1][0][1] = l2fwd_event_main_loop_tx_d_brst_mac,
353 		[1][1][0] = l2fwd_event_main_loop_tx_q_mac,
354 		[1][1][1] = l2fwd_event_main_loop_tx_q_brst_mac,
355 	};
356 	struct l2fwd_event_resources *evt_rsrc;
357 	uint32_t event_queue_cfg;
358 	int ret;
359 
360 	if (!rte_event_dev_count())
361 		rte_panic("No Eventdev found\n");
362 
363 	evt_rsrc = rte_zmalloc("l2fwd_event",
364 				 sizeof(struct l2fwd_event_resources), 0);
365 	if (evt_rsrc == NULL)
366 		rte_panic("Failed to allocate memory\n");
367 
368 	rsrc->evt_rsrc = evt_rsrc;
369 
370 	/* Setup eventdev capability callbacks */
371 	l2fwd_event_capability_setup(evt_rsrc);
372 
373 	/* Event device configuration */
374 	event_queue_cfg = evt_rsrc->ops.event_device_setup(rsrc);
375 
376 	/* Event queue configuration */
377 	evt_rsrc->ops.event_queue_setup(rsrc, event_queue_cfg);
378 
379 	/* Event port configuration */
380 	evt_rsrc->ops.event_port_setup(rsrc);
381 
382 	/* Rx/Tx adapters configuration */
383 	evt_rsrc->ops.adapter_setup(rsrc);
384 
385 	/* Start event device */
386 	ret = rte_event_dev_start(evt_rsrc->event_d_id);
387 	if (ret < 0)
388 		rte_panic("Error in starting eventdev\n");
389 
390 	evt_rsrc->ops.l2fwd_event_loop = event_loop
391 					[rsrc->mac_updating]
392 					[evt_rsrc->tx_mode_q]
393 					[evt_rsrc->has_burst];
394 }
395