xref: /dpdk/examples/l3fwd/l3fwd_event.c (revision ef8270785aab9447fe7810acaf546df9bcf88c85)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2019 Marvell International Ltd.
3  */
4 
5 #ifdef RTE_LIB_EVENTDEV
6 #include <stdbool.h>
7 #include <getopt.h>
8 
9 #include <rte_malloc.h>
10 
11 #include "l3fwd.h"
12 #include "l3fwd_event.h"
13 
14 static void
print_ethaddr(const char * name,const struct rte_ether_addr * eth_addr)15 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
16 {
17 	char buf[RTE_ETHER_ADDR_FMT_SIZE];
18 	rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
19 	printf("%s%s", name, buf);
20 }
21 
22 struct l3fwd_event_resources *
l3fwd_get_eventdev_rsrc(void)23 l3fwd_get_eventdev_rsrc(void)
24 {
25 	static struct l3fwd_event_resources *rsrc;
26 
27 	if (rsrc != NULL)
28 		return rsrc;
29 
30 	rsrc = rte_zmalloc("l3fwd", sizeof(struct l3fwd_event_resources), 0);
31 	if (rsrc != NULL) {
32 		rsrc->sched_type = RTE_SCHED_TYPE_ATOMIC;
33 		rsrc->eth_rx_queues = 1;
34 		return rsrc;
35 	}
36 
37 	rte_exit(EXIT_FAILURE, "Unable to allocate memory for eventdev cfg\n");
38 
39 	return NULL;
40 }
41 
42 static void
l3fwd_eth_dev_port_setup(struct rte_eth_conf * port_conf)43 l3fwd_eth_dev_port_setup(struct rte_eth_conf *port_conf)
44 {
45 	struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
46 	uint16_t nb_ports = rte_eth_dev_count_avail();
47 	unsigned int nb_lcores = rte_lcore_count();
48 	struct rte_eth_conf local_port_conf;
49 	struct rte_eth_dev_info dev_info;
50 	struct rte_eth_txconf txconf;
51 	struct rte_eth_rxconf rxconf;
52 	unsigned int nb_mbuf;
53 	uint16_t port_id;
54 	uint8_t eth_qid;
55 	int32_t ret;
56 
57 	/* initialize all ports */
58 	RTE_ETH_FOREACH_DEV(port_id) {
59 		local_port_conf = *port_conf;
60 		/* skip ports that are not enabled */
61 		if ((evt_rsrc->port_mask & (1 << port_id)) == 0) {
62 			printf("\nSkipping disabled port %d\n", port_id);
63 			continue;
64 		}
65 
66 		/* init port */
67 		printf("Initializing port %d ... ", port_id);
68 		fflush(stdout);
69 		printf("Creating queues: nb_rxq=%d nb_txq=1...\n",
70 		       evt_rsrc->eth_rx_queues);
71 
72 		ret = rte_eth_dev_info_get(port_id, &dev_info);
73 		if (ret != 0)
74 			rte_panic("Error during getting device (port %u) info:"
75 				  "%s\n", port_id, strerror(-ret));
76 
77 		ret = config_port_max_pkt_len(&local_port_conf, &dev_info);
78 		if (ret != 0)
79 			rte_exit(EXIT_FAILURE,
80 				"Invalid max packet length: %u (port %u)\n",
81 				max_pkt_len, port_id);
82 
83 		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
84 			local_port_conf.txmode.offloads |=
85 						RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
86 
87 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
88 						dev_info.flow_type_rss_offloads;
89 		if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
90 				port_conf->rx_adv_conf.rss_conf.rss_hf) {
91 			printf("Port %u modified RSS hash function "
92 			       "based on hardware support,"
93 			       "requested:%#"PRIx64" configured:%#"PRIx64"\n",
94 			       port_id,
95 			       port_conf->rx_adv_conf.rss_conf.rss_hf,
96 			       local_port_conf.rx_adv_conf.rss_conf.rss_hf);
97 		}
98 
99 		ret = rte_eth_dev_configure(port_id, evt_rsrc->eth_rx_queues,
100 					    1, &local_port_conf);
101 		if (ret < 0)
102 			rte_exit(EXIT_FAILURE,
103 				 "Cannot configure device: err=%d, port=%d\n",
104 				 ret, port_id);
105 
106 		ret = rte_eth_dev_adjust_nb_rx_tx_desc(port_id, &nb_rxd,
107 						       &nb_txd);
108 		if (ret < 0)
109 			rte_exit(EXIT_FAILURE,
110 				 "Cannot adjust number of descriptors: err=%d, "
111 				 "port=%d\n", ret, port_id);
112 
113 		rte_eth_macaddr_get(port_id, &ports_eth_addr[port_id]);
114 		print_ethaddr(" Address:", &ports_eth_addr[port_id]);
115 		printf(", ");
116 		print_ethaddr("Destination:",
117 			(const struct rte_ether_addr *)&dest_eth_addr[port_id]);
118 		printf(", ");
119 
120 		/* prepare source MAC for each port. */
121 		rte_ether_addr_copy(&ports_eth_addr[port_id],
122 			(struct rte_ether_addr *)(val_eth + port_id) + 1);
123 
124 		/* init memory */
125 		if (!evt_rsrc->per_port_pool) {
126 			/* port_id = 0; this is *not* signifying the first port,
127 			 * rather, it signifies that port_id is ignored.
128 			 */
129 			nb_mbuf = RTE_MAX(nb_ports * nb_rxd +
130 					  nb_ports * nb_txd +
131 					  nb_ports * nb_lcores *
132 							MAX_PKT_BURST +
133 					  nb_lcores * MEMPOOL_CACHE_SIZE,
134 					  8192u);
135 			ret = init_mem(0, nb_mbuf);
136 		} else {
137 			nb_mbuf = RTE_MAX(nb_rxd + nb_rxd +
138 					  nb_lcores * MAX_PKT_BURST +
139 					  nb_lcores * MEMPOOL_CACHE_SIZE,
140 					  8192u);
141 			ret = init_mem(port_id, nb_mbuf);
142 		}
143 		/* init Rx queues per port */
144 		rxconf = dev_info.default_rxconf;
145 		rxconf.offloads = local_port_conf.rxmode.offloads;
146 
147 		for (eth_qid = 0; eth_qid < evt_rsrc->eth_rx_queues;
148 		     eth_qid++) {
149 			if (!evt_rsrc->per_port_pool)
150 				ret = rte_eth_rx_queue_setup(port_id, eth_qid,
151 					nb_rxd, 0, &rxconf,
152 					evt_rsrc->pkt_pool[0][0]);
153 			else
154 				ret = rte_eth_rx_queue_setup(port_id, eth_qid,
155 					nb_rxd, 0, &rxconf,
156 					evt_rsrc->pkt_pool[port_id][0]);
157 			if (ret < 0)
158 				rte_exit(EXIT_FAILURE,
159 					 "rte_eth_rx_queue_setup: err=%d, "
160 					 "port=%d, eth_qid: %d\n",
161 					 ret, port_id, eth_qid);
162 		}
163 
164 		/* init one Tx queue per port */
165 		txconf = dev_info.default_txconf;
166 		txconf.offloads = local_port_conf.txmode.offloads;
167 		ret = rte_eth_tx_queue_setup(port_id, 0, nb_txd, 0, &txconf);
168 		if (ret < 0)
169 			rte_exit(EXIT_FAILURE,
170 				 "rte_eth_tx_queue_setup: err=%d, "
171 				 "port=%d\n", ret, port_id);
172 	}
173 }
174 
175 static void
l3fwd_event_capability_setup(void)176 l3fwd_event_capability_setup(void)
177 {
178 	struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
179 	uint32_t caps = 0;
180 	uint16_t i;
181 	int ret;
182 
183 	RTE_ETH_FOREACH_DEV(i) {
184 		ret = rte_event_eth_tx_adapter_caps_get(0, i, &caps);
185 		if (ret)
186 			rte_exit(EXIT_FAILURE,
187 				 "Invalid capability for Tx adptr port %d\n",
188 				 i);
189 
190 		evt_rsrc->tx_mode_q |= !(caps &
191 				   RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT);
192 	}
193 
194 	if (evt_rsrc->tx_mode_q)
195 		l3fwd_event_set_generic_ops(&evt_rsrc->ops);
196 	else
197 		l3fwd_event_set_internal_port_ops(&evt_rsrc->ops);
198 }
199 
200 int
l3fwd_get_free_event_port(struct l3fwd_event_resources * evt_rsrc)201 l3fwd_get_free_event_port(struct l3fwd_event_resources *evt_rsrc)
202 {
203 	static int index;
204 	int port_id;
205 
206 	rte_spinlock_lock(&evt_rsrc->evp.lock);
207 	if (index >= evt_rsrc->evp.nb_ports) {
208 		printf("No free event port is available\n");
209 		return -1;
210 	}
211 
212 	port_id = evt_rsrc->evp.event_p_id[index];
213 	index++;
214 	rte_spinlock_unlock(&evt_rsrc->evp.lock);
215 
216 	return port_id;
217 }
218 
219 void
l3fwd_event_resource_setup(struct rte_eth_conf * port_conf)220 l3fwd_event_resource_setup(struct rte_eth_conf *port_conf)
221 {
222 	struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
223 	const event_loop_cb lpm_event_loop[2][2][2] = {
224 		[0][0][0] = lpm_event_main_loop_tx_d,
225 		[0][0][1] = lpm_event_main_loop_tx_d_burst,
226 		[0][1][0] = lpm_event_main_loop_tx_q,
227 		[0][1][1] = lpm_event_main_loop_tx_q_burst,
228 		[1][0][0] = lpm_event_main_loop_tx_d_vector,
229 		[1][0][1] = lpm_event_main_loop_tx_d_burst_vector,
230 		[1][1][0] = lpm_event_main_loop_tx_q_vector,
231 		[1][1][1] = lpm_event_main_loop_tx_q_burst_vector,
232 	};
233 	const event_loop_cb em_event_loop[2][2][2] = {
234 		[0][0][0] = em_event_main_loop_tx_d,
235 		[0][0][1] = em_event_main_loop_tx_d_burst,
236 		[0][1][0] = em_event_main_loop_tx_q,
237 		[0][1][1] = em_event_main_loop_tx_q_burst,
238 		[1][0][0] = em_event_main_loop_tx_d_vector,
239 		[1][0][1] = em_event_main_loop_tx_d_burst_vector,
240 		[1][1][0] = em_event_main_loop_tx_q_vector,
241 		[1][1][1] = em_event_main_loop_tx_q_burst_vector,
242 	};
243 	const event_loop_cb fib_event_loop[2][2][2] = {
244 		[0][0][0] = fib_event_main_loop_tx_d,
245 		[0][0][1] = fib_event_main_loop_tx_d_burst,
246 		[0][1][0] = fib_event_main_loop_tx_q,
247 		[0][1][1] = fib_event_main_loop_tx_q_burst,
248 		[1][0][0] = fib_event_main_loop_tx_d_vector,
249 		[1][0][1] = fib_event_main_loop_tx_d_burst_vector,
250 		[1][1][0] = fib_event_main_loop_tx_q_vector,
251 		[1][1][1] = fib_event_main_loop_tx_q_burst_vector,
252 	};
253 	uint32_t event_queue_cfg;
254 	int ret;
255 
256 	if (!evt_rsrc->enabled)
257 		return;
258 
259 	if (!rte_event_dev_count())
260 		rte_exit(EXIT_FAILURE, "No Eventdev found");
261 
262 	/* Setup eventdev capability callbacks */
263 	l3fwd_event_capability_setup();
264 
265 	/* Ethernet device configuration */
266 	l3fwd_eth_dev_port_setup(port_conf);
267 
268 	/* Event device configuration */
269 	event_queue_cfg = evt_rsrc->ops.event_device_setup();
270 
271 	/* Event queue configuration */
272 	evt_rsrc->ops.event_queue_setup(event_queue_cfg);
273 
274 	/* Event port configuration */
275 	evt_rsrc->ops.event_port_setup();
276 
277 	/* Rx/Tx adapters configuration */
278 	evt_rsrc->ops.adapter_setup();
279 
280 	/* Start event device */
281 	ret = rte_event_dev_start(evt_rsrc->event_d_id);
282 	if (ret < 0)
283 		rte_exit(EXIT_FAILURE, "Error in starting eventdev");
284 
285 	evt_rsrc->ops.lpm_event_loop =
286 		lpm_event_loop[evt_rsrc->vector_enabled][evt_rsrc->tx_mode_q]
287 			      [evt_rsrc->has_burst];
288 
289 	evt_rsrc->ops.em_event_loop =
290 		em_event_loop[evt_rsrc->vector_enabled][evt_rsrc->tx_mode_q]
291 			     [evt_rsrc->has_burst];
292 
293 	evt_rsrc->ops.fib_event_loop =
294 		fib_event_loop[evt_rsrc->vector_enabled][evt_rsrc->tx_mode_q]
295 			      [evt_rsrc->has_burst];
296 }
297 
298 static void
l3fwd_event_vector_array_free(struct rte_event events[],uint16_t num)299 l3fwd_event_vector_array_free(struct rte_event events[], uint16_t num)
300 {
301 	uint16_t i;
302 
303 	for (i = 0; i < num; i++) {
304 		rte_pktmbuf_free_bulk(
305 			&events[i].vec->mbufs[events[i].vec->elem_offset],
306 			events[i].vec->nb_elem);
307 		rte_mempool_put(rte_mempool_from_obj(events[i].vec),
308 				events[i].vec);
309 	}
310 }
311 
312 static void
l3fwd_event_port_flush(uint8_t event_d_id __rte_unused,struct rte_event ev,void * args __rte_unused)313 l3fwd_event_port_flush(uint8_t event_d_id __rte_unused, struct rte_event ev,
314 		       void *args __rte_unused)
315 {
316 	if (ev.event_type & RTE_EVENT_TYPE_VECTOR)
317 		l3fwd_event_vector_array_free(&ev, 1);
318 	else
319 		rte_pktmbuf_free(ev.mbuf);
320 }
321 
322 void
l3fwd_event_worker_cleanup(uint8_t event_d_id,uint8_t event_p_id,struct rte_event events[],uint16_t nb_enq,uint16_t nb_deq,uint8_t is_vector)323 l3fwd_event_worker_cleanup(uint8_t event_d_id, uint8_t event_p_id,
324 			   struct rte_event events[], uint16_t nb_enq,
325 			   uint16_t nb_deq, uint8_t is_vector)
326 {
327 	int i;
328 
329 	if (nb_deq) {
330 		if (is_vector)
331 			l3fwd_event_vector_array_free(events + nb_enq,
332 						      nb_deq - nb_enq);
333 		else
334 			for (i = nb_enq; i < nb_deq; i++)
335 				rte_pktmbuf_free(events[i].mbuf);
336 
337 		for (i = 0; i < nb_deq; i++)
338 			events[i].op = RTE_EVENT_OP_RELEASE;
339 		rte_event_enqueue_burst(event_d_id, event_p_id, events, nb_deq);
340 	}
341 
342 	rte_event_port_quiesce(event_d_id, event_p_id, l3fwd_event_port_flush,
343 			       NULL);
344 }
345 #endif /* RTE_LIB_EVENTDEV */
346