xref: /dpdk/examples/l3fwd/l3fwd_event_generic.c (revision 6cf329f9d8c2eb97c8f39becd514c14b25251ac1)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2019 Marvell International Ltd.
3  */
4 
5 #ifdef RTE_LIB_EVENTDEV
6 #include <stdbool.h>
7 
8 #include "l3fwd.h"
9 #include "l3fwd_event.h"
10 
11 static uint32_t
12 l3fwd_event_device_setup_generic(void)
13 {
14 	struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
15 	struct rte_event_dev_config event_d_conf = {
16 		.nb_events_limit  = 4096,
17 		.nb_event_queue_flows = 1024,
18 		.nb_event_port_dequeue_depth = 128,
19 		.nb_event_port_enqueue_depth = 128
20 	};
21 	struct rte_event_dev_info dev_info;
22 	const uint8_t event_d_id = 0; /* Always use first event device only */
23 	uint32_t event_queue_cfg = 0;
24 	uint16_t ethdev_count = 0;
25 	uint16_t num_workers = 0;
26 	uint16_t port_id;
27 	int ret;
28 
29 	RTE_ETH_FOREACH_DEV(port_id) {
30 		if ((evt_rsrc->port_mask & (1 << port_id)) == 0)
31 			continue;
32 		ethdev_count++;
33 	}
34 
35 	/* Event device configuration */
36 	rte_event_dev_info_get(event_d_id, &dev_info);
37 	/* Enable implicit release */
38 	if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)
39 		evt_rsrc->disable_implicit_release = 0;
40 
41 	if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES)
42 		event_queue_cfg |= RTE_EVENT_QUEUE_CFG_ALL_TYPES;
43 
44 	/* One queue for each ethdev port + one Tx adapter Single link queue. */
45 	event_d_conf.nb_event_queues = ethdev_count + 1;
46 	if (dev_info.max_event_queues < event_d_conf.nb_event_queues)
47 		event_d_conf.nb_event_queues = dev_info.max_event_queues;
48 
49 	if (dev_info.max_num_events < event_d_conf.nb_events_limit)
50 		event_d_conf.nb_events_limit = dev_info.max_num_events;
51 
52 	if (dev_info.max_event_queue_flows < event_d_conf.nb_event_queue_flows)
53 		event_d_conf.nb_event_queue_flows =
54 						dev_info.max_event_queue_flows;
55 
56 	if (dev_info.max_event_port_dequeue_depth <
57 				event_d_conf.nb_event_port_dequeue_depth)
58 		event_d_conf.nb_event_port_dequeue_depth =
59 				dev_info.max_event_port_dequeue_depth;
60 
61 	if (dev_info.max_event_port_enqueue_depth <
62 				event_d_conf.nb_event_port_enqueue_depth)
63 		event_d_conf.nb_event_port_enqueue_depth =
64 				dev_info.max_event_port_enqueue_depth;
65 
66 	num_workers = rte_lcore_count() - rte_service_lcore_count();
67 	if (dev_info.max_event_ports < num_workers)
68 		num_workers = dev_info.max_event_ports;
69 
70 	event_d_conf.nb_event_ports = num_workers;
71 	evt_rsrc->evp.nb_ports = num_workers;
72 	evt_rsrc->evq.nb_queues = event_d_conf.nb_event_queues;
73 
74 	evt_rsrc->has_burst = !!(dev_info.event_dev_cap &
75 				    RTE_EVENT_DEV_CAP_BURST_MODE);
76 
77 	if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_EVENT_PRESCHEDULE)
78 		event_d_conf.preschedule_type = RTE_EVENT_PRESCHEDULE;
79 
80 	if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_EVENT_PRESCHEDULE_ADAPTIVE)
81 		event_d_conf.preschedule_type = RTE_EVENT_PRESCHEDULE_ADAPTIVE;
82 
83 	ret = rte_event_dev_configure(event_d_id, &event_d_conf);
84 	if (ret < 0)
85 		rte_panic("Error in configuring event device\n");
86 
87 	evt_rsrc->event_d_id = event_d_id;
88 	return event_queue_cfg;
89 }
90 
91 static void
92 l3fwd_event_port_setup_generic(void)
93 {
94 	struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
95 	uint8_t event_d_id = evt_rsrc->event_d_id;
96 	struct rte_event_port_conf event_p_conf = {
97 		.dequeue_depth = 32,
98 		.enqueue_depth = 32,
99 		.new_event_threshold = 4096
100 	};
101 	struct rte_event_port_conf def_p_conf;
102 	uint8_t event_p_id;
103 	int32_t ret;
104 
105 	evt_rsrc->evp.event_p_id = (uint8_t *)malloc(sizeof(uint8_t) *
106 					evt_rsrc->evp.nb_ports);
107 	if (!evt_rsrc->evp.event_p_id)
108 		rte_panic("No space is available\n");
109 
110 	memset(&def_p_conf, 0, sizeof(struct rte_event_port_conf));
111 	ret = rte_event_port_default_conf_get(event_d_id, 0, &def_p_conf);
112 	if (ret < 0)
113 		rte_panic("Error to get default configuration of event port\n");
114 
115 	if (def_p_conf.new_event_threshold < event_p_conf.new_event_threshold)
116 		event_p_conf.new_event_threshold =
117 			def_p_conf.new_event_threshold;
118 
119 	if (def_p_conf.dequeue_depth < event_p_conf.dequeue_depth)
120 		event_p_conf.dequeue_depth = def_p_conf.dequeue_depth;
121 
122 	if (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth)
123 		event_p_conf.enqueue_depth = def_p_conf.enqueue_depth;
124 
125 	event_p_conf.event_port_cfg = 0;
126 	if (evt_rsrc->disable_implicit_release)
127 		event_p_conf.event_port_cfg |=
128 			RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;
129 
130 	evt_rsrc->deq_depth = def_p_conf.dequeue_depth;
131 
132 	for (event_p_id = 0; event_p_id < evt_rsrc->evp.nb_ports;
133 								event_p_id++) {
134 		ret = rte_event_port_setup(event_d_id, event_p_id,
135 					   &event_p_conf);
136 		if (ret < 0)
137 			rte_panic("Error in configuring event port %d\n",
138 				  event_p_id);
139 
140 		ret = rte_event_port_link(event_d_id, event_p_id,
141 					  evt_rsrc->evq.event_q_id,
142 					  NULL,
143 					  evt_rsrc->evq.nb_queues - 1);
144 		if (ret != (evt_rsrc->evq.nb_queues - 1))
145 			rte_panic("Error in linking event port %d to queues\n",
146 				  event_p_id);
147 		evt_rsrc->evp.event_p_id[event_p_id] = event_p_id;
148 	}
149 	/* init spinlock */
150 	rte_spinlock_init(&evt_rsrc->evp.lock);
151 
152 	evt_rsrc->def_p_conf = event_p_conf;
153 }
154 
155 static void
156 l3fwd_event_queue_setup_generic(uint32_t event_queue_cfg)
157 {
158 	struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
159 	uint8_t event_d_id = evt_rsrc->event_d_id;
160 	struct rte_event_queue_conf event_q_conf = {
161 		.nb_atomic_flows = 1024,
162 		.nb_atomic_order_sequences = 1024,
163 		.event_queue_cfg = event_queue_cfg,
164 		.priority = RTE_EVENT_DEV_PRIORITY_NORMAL
165 	};
166 	struct rte_event_queue_conf def_q_conf;
167 	uint8_t event_q_id;
168 	int32_t ret;
169 
170 	event_q_conf.schedule_type = evt_rsrc->sched_type;
171 	evt_rsrc->evq.event_q_id = (uint8_t *)malloc(sizeof(uint8_t) *
172 					evt_rsrc->evq.nb_queues);
173 	if (!evt_rsrc->evq.event_q_id)
174 		rte_panic("Memory allocation failure\n");
175 
176 	ret = rte_event_queue_default_conf_get(event_d_id, 0, &def_q_conf);
177 	if (ret < 0)
178 		rte_panic("Error to get default config of event queue\n");
179 
180 	if (def_q_conf.nb_atomic_flows < event_q_conf.nb_atomic_flows)
181 		event_q_conf.nb_atomic_flows = def_q_conf.nb_atomic_flows;
182 
183 	for (event_q_id = 0; event_q_id < (evt_rsrc->evq.nb_queues - 1);
184 								event_q_id++) {
185 		ret = rte_event_queue_setup(event_d_id, event_q_id,
186 					    &event_q_conf);
187 		if (ret < 0)
188 			rte_panic("Error in configuring event queue\n");
189 		evt_rsrc->evq.event_q_id[event_q_id] = event_q_id;
190 	}
191 
192 	event_q_conf.event_queue_cfg |= RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
193 	event_q_conf.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST,
194 	ret = rte_event_queue_setup(event_d_id, event_q_id, &event_q_conf);
195 	if (ret < 0)
196 		rte_panic("Error in configuring event queue for Tx adapter\n");
197 	evt_rsrc->evq.event_q_id[event_q_id] = event_q_id;
198 }
199 
200 static void
201 l3fwd_rx_tx_adapter_setup_generic(void)
202 {
203 	struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
204 	struct rte_event_eth_rx_adapter_queue_conf eth_q_conf;
205 	uint8_t event_d_id = evt_rsrc->event_d_id;
206 	uint8_t rx_adptr_id = 0;
207 	uint8_t tx_adptr_id = 0;
208 	uint8_t tx_port_id = 0;
209 	uint16_t port_id;
210 	uint32_t service_id;
211 	int32_t ret, i = 0;
212 
213 	memset(&eth_q_conf, 0, sizeof(eth_q_conf));
214 	eth_q_conf.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
215 
216 	/* Rx adapter setup */
217 	evt_rsrc->rx_adptr.nb_rx_adptr = 1;
218 	evt_rsrc->rx_adptr.rx_adptr = (uint8_t *)malloc(sizeof(uint8_t) *
219 					evt_rsrc->rx_adptr.nb_rx_adptr);
220 	if (!evt_rsrc->rx_adptr.rx_adptr) {
221 		free(evt_rsrc->evp.event_p_id);
222 		free(evt_rsrc->evq.event_q_id);
223 		rte_panic("Failed to allocate memory for Rx adapter\n");
224 	}
225 
226 	ret = rte_event_eth_rx_adapter_create(rx_adptr_id, event_d_id,
227 					      &evt_rsrc->def_p_conf);
228 	if (ret)
229 		rte_panic("Failed to create rx adapter\n");
230 
231 	/* Configure user requested sched type */
232 	eth_q_conf.ev.sched_type = evt_rsrc->sched_type;
233 	RTE_ETH_FOREACH_DEV(port_id) {
234 		if ((evt_rsrc->port_mask & (1 << port_id)) == 0)
235 			continue;
236 		eth_q_conf.ev.queue_id = evt_rsrc->evq.event_q_id[i];
237 		ret = rte_event_eth_rx_adapter_queue_add(rx_adptr_id, port_id,
238 							 -1, &eth_q_conf);
239 		if (ret)
240 			rte_panic("Failed to add queues to Rx adapter\n");
241 		if (i < evt_rsrc->evq.nb_queues)
242 			i++;
243 	}
244 
245 	ret = rte_event_eth_rx_adapter_service_id_get(rx_adptr_id, &service_id);
246 	if (ret != -ESRCH && ret != 0)
247 		rte_panic("Error getting the service ID for rx adptr\n");
248 
249 	rte_service_runstate_set(service_id, 1);
250 	rte_service_set_runstate_mapped_check(service_id, 0);
251 	evt_rsrc->rx_adptr.service_id = service_id;
252 
253 	ret = rte_event_eth_rx_adapter_start(rx_adptr_id);
254 	if (ret)
255 		rte_panic("Rx adapter[%d] start Failed\n", rx_adptr_id);
256 
257 	evt_rsrc->rx_adptr.rx_adptr[0] = rx_adptr_id;
258 
259 	/* Tx adapter setup */
260 	evt_rsrc->tx_adptr.nb_tx_adptr = 1;
261 	evt_rsrc->tx_adptr.tx_adptr = (uint8_t *)malloc(sizeof(uint8_t) *
262 					evt_rsrc->tx_adptr.nb_tx_adptr);
263 	if (!evt_rsrc->tx_adptr.tx_adptr) {
264 		free(evt_rsrc->rx_adptr.rx_adptr);
265 		free(evt_rsrc->evp.event_p_id);
266 		free(evt_rsrc->evq.event_q_id);
267 		rte_panic("Failed to allocate memory for Rx adapter\n");
268 	}
269 
270 	ret = rte_event_eth_tx_adapter_create(tx_adptr_id, event_d_id,
271 					      &evt_rsrc->def_p_conf);
272 	if (ret)
273 		rte_panic("Failed to create tx adapter\n");
274 
275 	RTE_ETH_FOREACH_DEV(port_id) {
276 		if ((evt_rsrc->port_mask & (1 << port_id)) == 0)
277 			continue;
278 		ret = rte_event_eth_tx_adapter_queue_add(tx_adptr_id, port_id,
279 							 -1);
280 		if (ret)
281 			rte_panic("Failed to add queues to Tx adapter\n");
282 	}
283 
284 	ret = rte_event_eth_tx_adapter_service_id_get(tx_adptr_id, &service_id);
285 	if (ret != -ESRCH && ret != 0)
286 		rte_panic("Failed to get Tx adapter service ID\n");
287 
288 	rte_service_runstate_set(service_id, 1);
289 	rte_service_set_runstate_mapped_check(service_id, 0);
290 	evt_rsrc->tx_adptr.service_id = service_id;
291 
292 	ret = rte_event_eth_tx_adapter_event_port_get(tx_adptr_id, &tx_port_id);
293 	if (ret)
294 		rte_panic("Failed to get Tx adapter port id: %d\n", ret);
295 
296 	ret = rte_event_port_link(event_d_id, tx_port_id,
297 				  &evt_rsrc->evq.event_q_id[
298 					evt_rsrc->evq.nb_queues - 1],
299 				  NULL, 1);
300 	if (ret != 1)
301 		rte_panic("Unable to link Tx adapter port to Tx queue:err=%d\n",
302 			 ret);
303 
304 	ret = rte_event_eth_tx_adapter_start(tx_adptr_id);
305 	if (ret)
306 		rte_panic("Tx adapter[%d] start Failed\n", tx_adptr_id);
307 
308 	evt_rsrc->tx_adptr.tx_adptr[0] = tx_adptr_id;
309 }
310 
311 void
312 l3fwd_event_set_generic_ops(struct l3fwd_event_setup_ops *ops)
313 {
314 	ops->event_device_setup = l3fwd_event_device_setup_generic;
315 	ops->event_queue_setup = l3fwd_event_queue_setup_generic;
316 	ops->event_port_setup = l3fwd_event_port_setup_generic;
317 	ops->adapter_setup = l3fwd_rx_tx_adapter_setup_generic;
318 }
319 #endif /* RTE_LIB_EVENTDEV */
320