xref: /dpdk/examples/eventdev_pipeline/pipeline_worker_generic.c (revision 9a710863decb1cdb98efbdd5e11df3ebcfcc37b6)
1 /*
2  * SPDX-License-Identifier: BSD-3-Clause
3  * Copyright 2016 Intel Corporation.
4  * Copyright 2017 Cavium, Inc.
5  */
6 
7 #include "pipeline_common.h"
8 
9 static __rte_always_inline int
10 worker_generic(void *arg)
11 {
12 	struct rte_event ev;
13 
14 	struct worker_data *data = (struct worker_data *)arg;
15 	uint8_t dev_id = data->dev_id;
16 	uint8_t port_id = data->port_id;
17 	size_t sent = 0, received = 0;
18 	unsigned int lcore_id = rte_lcore_id();
19 
20 	while (!fdata->done) {
21 
22 		if (fdata->cap.scheduler)
23 			fdata->cap.scheduler(lcore_id);
24 
25 		if (!fdata->worker_core[lcore_id]) {
26 			rte_pause();
27 			continue;
28 		}
29 
30 		const uint16_t nb_rx = rte_event_dequeue_burst(dev_id, port_id,
31 				&ev, 1, 0);
32 
33 		if (nb_rx == 0) {
34 			rte_pause();
35 			continue;
36 		}
37 		received++;
38 
39 		/* The first worker stage does classification */
40 		if (ev.queue_id == cdata.qid[0])
41 			ev.flow_id = ev.mbuf->hash.rss
42 						% cdata.num_fids;
43 
44 		ev.queue_id = cdata.next_qid[ev.queue_id];
45 		ev.op = RTE_EVENT_OP_FORWARD;
46 		ev.sched_type = cdata.queue_type;
47 
48 		work();
49 
50 		while (rte_event_enqueue_burst(dev_id, port_id, &ev, 1) != 1)
51 			rte_pause();
52 		sent++;
53 	}
54 
55 	if (!cdata.quiet)
56 		printf("  worker %u thread done. RX=%zu TX=%zu\n",
57 				rte_lcore_id(), received, sent);
58 
59 	return 0;
60 }
61 
62 static int
63 worker_generic_burst(void *arg)
64 {
65 	struct rte_event events[BATCH_SIZE];
66 
67 	struct worker_data *data = (struct worker_data *)arg;
68 	uint8_t dev_id = data->dev_id;
69 	uint8_t port_id = data->port_id;
70 	size_t sent = 0, received = 0;
71 	unsigned int lcore_id = rte_lcore_id();
72 
73 	while (!fdata->done) {
74 		uint16_t i;
75 
76 		if (fdata->cap.scheduler)
77 			fdata->cap.scheduler(lcore_id);
78 
79 		if (!fdata->worker_core[lcore_id]) {
80 			rte_pause();
81 			continue;
82 		}
83 
84 		const uint16_t nb_rx = rte_event_dequeue_burst(dev_id, port_id,
85 				events, RTE_DIM(events), 0);
86 
87 		if (nb_rx == 0) {
88 			rte_pause();
89 			continue;
90 		}
91 		received += nb_rx;
92 
93 		for (i = 0; i < nb_rx; i++) {
94 
95 			/* The first worker stage does classification */
96 			if (events[i].queue_id == cdata.qid[0])
97 				events[i].flow_id = events[i].mbuf->hash.rss
98 							% cdata.num_fids;
99 
100 			events[i].queue_id = cdata.next_qid[events[i].queue_id];
101 			events[i].op = RTE_EVENT_OP_FORWARD;
102 			events[i].sched_type = cdata.queue_type;
103 
104 			work();
105 		}
106 		uint16_t nb_tx = rte_event_enqueue_burst(dev_id, port_id,
107 				events, nb_rx);
108 		while (nb_tx < nb_rx && !fdata->done)
109 			nb_tx += rte_event_enqueue_burst(dev_id, port_id,
110 							events + nb_tx,
111 							nb_rx - nb_tx);
112 		sent += nb_tx;
113 	}
114 
115 	if (!cdata.quiet)
116 		printf("  worker %u thread done. RX=%zu TX=%zu\n",
117 				rte_lcore_id(), received, sent);
118 
119 	return 0;
120 }
121 
122 static int
123 setup_eventdev_generic(struct worker_data *worker_data)
124 {
125 	const uint8_t dev_id = 0;
126 	/* +1 stages is for a SINGLE_LINK TX stage */
127 	const uint8_t nb_queues = cdata.num_stages + 1;
128 	const uint8_t nb_ports = cdata.num_workers;
129 	struct rte_event_dev_config config = {
130 			.nb_event_queues = nb_queues,
131 			.nb_event_ports = nb_ports,
132 			.nb_events_limit  = 4096,
133 			.nb_event_queue_flows = 1024,
134 			.nb_event_port_dequeue_depth = 128,
135 			.nb_event_port_enqueue_depth = 128,
136 	};
137 	struct rte_event_port_conf wkr_p_conf = {
138 			.dequeue_depth = cdata.worker_cq_depth,
139 			.enqueue_depth = 64,
140 			.new_event_threshold = 4096,
141 	};
142 	struct rte_event_queue_conf wkr_q_conf = {
143 			.schedule_type = cdata.queue_type,
144 			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
145 			.nb_atomic_flows = 1024,
146 		.nb_atomic_order_sequences = 1024,
147 	};
148 	struct rte_event_queue_conf tx_q_conf = {
149 			.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST,
150 			.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
151 	};
152 
153 	struct port_link worker_queues[MAX_NUM_STAGES];
154 	uint8_t disable_implicit_release;
155 	unsigned int i;
156 
157 	int ret, ndev = rte_event_dev_count();
158 	if (ndev < 1) {
159 		printf("%d: No Eventdev Devices Found\n", __LINE__);
160 		return -1;
161 	}
162 
163 	struct rte_event_dev_info dev_info;
164 	ret = rte_event_dev_info_get(dev_id, &dev_info);
165 	printf("\tEventdev %d: %s\n", dev_id, dev_info.driver_name);
166 
167 	disable_implicit_release = (dev_info.event_dev_cap &
168 			RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE);
169 
170 	wkr_p_conf.disable_implicit_release = disable_implicit_release;
171 
172 	if (dev_info.max_num_events < config.nb_events_limit)
173 		config.nb_events_limit = dev_info.max_num_events;
174 	if (dev_info.max_event_port_dequeue_depth <
175 			config.nb_event_port_dequeue_depth)
176 		config.nb_event_port_dequeue_depth =
177 				dev_info.max_event_port_dequeue_depth;
178 	if (dev_info.max_event_port_enqueue_depth <
179 			config.nb_event_port_enqueue_depth)
180 		config.nb_event_port_enqueue_depth =
181 				dev_info.max_event_port_enqueue_depth;
182 
183 	ret = rte_event_dev_configure(dev_id, &config);
184 	if (ret < 0) {
185 		printf("%d: Error configuring device\n", __LINE__);
186 		return -1;
187 	}
188 
189 	/* Q creation - one load balanced per pipeline stage*/
190 	printf("  Stages:\n");
191 	for (i = 0; i < cdata.num_stages; i++) {
192 		if (rte_event_queue_setup(dev_id, i, &wkr_q_conf) < 0) {
193 			printf("%d: error creating qid %d\n", __LINE__, i);
194 			return -1;
195 		}
196 		cdata.qid[i] = i;
197 		cdata.next_qid[i] = i+1;
198 		worker_queues[i].queue_id = i;
199 		if (cdata.enable_queue_priorities) {
200 			/* calculate priority stepping for each stage, leaving
201 			 * headroom of 1 for the SINGLE_LINK TX below
202 			 */
203 			const uint32_t prio_delta =
204 				(RTE_EVENT_DEV_PRIORITY_LOWEST-1) /  nb_queues;
205 
206 			/* higher priority for queues closer to tx */
207 			wkr_q_conf.priority =
208 				RTE_EVENT_DEV_PRIORITY_LOWEST - prio_delta * i;
209 		}
210 
211 		const char *type_str = "Atomic";
212 		switch (wkr_q_conf.schedule_type) {
213 		case RTE_SCHED_TYPE_ORDERED:
214 			type_str = "Ordered";
215 			break;
216 		case RTE_SCHED_TYPE_PARALLEL:
217 			type_str = "Parallel";
218 			break;
219 		}
220 		printf("\tStage %d, Type %s\tPriority = %d\n", i, type_str,
221 				wkr_q_conf.priority);
222 	}
223 	printf("\n");
224 
225 	/* final queue for sending to TX core */
226 	if (rte_event_queue_setup(dev_id, i, &tx_q_conf) < 0) {
227 		printf("%d: error creating qid %d\n", __LINE__, i);
228 		return -1;
229 	}
230 	cdata.tx_queue_id = i;
231 
232 	if (wkr_p_conf.new_event_threshold > config.nb_events_limit)
233 		wkr_p_conf.new_event_threshold = config.nb_events_limit;
234 	if (wkr_p_conf.dequeue_depth > config.nb_event_port_dequeue_depth)
235 		wkr_p_conf.dequeue_depth = config.nb_event_port_dequeue_depth;
236 	if (wkr_p_conf.enqueue_depth > config.nb_event_port_enqueue_depth)
237 		wkr_p_conf.enqueue_depth = config.nb_event_port_enqueue_depth;
238 
239 	/* set up one port per worker, linking to all stage queues */
240 	for (i = 0; i < cdata.num_workers; i++) {
241 		struct worker_data *w = &worker_data[i];
242 		w->dev_id = dev_id;
243 		if (rte_event_port_setup(dev_id, i, &wkr_p_conf) < 0) {
244 			printf("Error setting up port %d\n", i);
245 			return -1;
246 		}
247 
248 		uint32_t s;
249 		for (s = 0; s < cdata.num_stages; s++) {
250 			if (rte_event_port_link(dev_id, i,
251 						&worker_queues[s].queue_id,
252 						&worker_queues[s].priority,
253 						1) != 1) {
254 				printf("%d: error creating link for port %d\n",
255 						__LINE__, i);
256 				return -1;
257 			}
258 		}
259 		w->port_id = i;
260 	}
261 
262 	ret = rte_event_dev_service_id_get(dev_id,
263 				&fdata->evdev_service_id);
264 	if (ret != -ESRCH && ret != 0) {
265 		printf("Error getting the service ID for sw eventdev\n");
266 		return -1;
267 	}
268 	rte_service_runstate_set(fdata->evdev_service_id, 1);
269 	rte_service_set_runstate_mapped_check(fdata->evdev_service_id, 0);
270 
271 	return dev_id;
272 }
273 
274 static void
275 init_adapters(uint16_t nb_ports)
276 {
277 	int i;
278 	int ret;
279 	uint8_t tx_port_id = 0;
280 	uint8_t evdev_id = 0;
281 	struct rte_event_dev_info dev_info;
282 
283 	ret = rte_event_dev_info_get(evdev_id, &dev_info);
284 
285 	struct rte_event_port_conf adptr_p_conf = {
286 		.dequeue_depth = cdata.worker_cq_depth,
287 		.enqueue_depth = 64,
288 		.new_event_threshold = 4096,
289 	};
290 
291 	if (adptr_p_conf.new_event_threshold > dev_info.max_num_events)
292 		adptr_p_conf.new_event_threshold = dev_info.max_num_events;
293 	if (adptr_p_conf.dequeue_depth > dev_info.max_event_port_dequeue_depth)
294 		adptr_p_conf.dequeue_depth =
295 			dev_info.max_event_port_dequeue_depth;
296 	if (adptr_p_conf.enqueue_depth > dev_info.max_event_port_enqueue_depth)
297 		adptr_p_conf.enqueue_depth =
298 			dev_info.max_event_port_enqueue_depth;
299 
300 	/* Create one adapter for all the ethernet ports. */
301 	ret = rte_event_eth_rx_adapter_create(cdata.rx_adapter_id, evdev_id,
302 			&adptr_p_conf);
303 	if (ret)
304 		rte_exit(EXIT_FAILURE, "failed to create rx adapter[%d]",
305 				cdata.rx_adapter_id);
306 
307 	ret = rte_event_eth_tx_adapter_create(cdata.tx_adapter_id, evdev_id,
308 			&adptr_p_conf);
309 	if (ret)
310 		rte_exit(EXIT_FAILURE, "failed to create tx adapter[%d]",
311 				cdata.tx_adapter_id);
312 
313 	struct rte_event_eth_rx_adapter_queue_conf queue_conf;
314 	memset(&queue_conf, 0, sizeof(queue_conf));
315 	queue_conf.ev.sched_type = cdata.queue_type;
316 	queue_conf.ev.queue_id = cdata.qid[0];
317 
318 	for (i = 0; i < nb_ports; i++) {
319 		ret = rte_event_eth_rx_adapter_queue_add(cdata.rx_adapter_id, i,
320 				-1, &queue_conf);
321 		if (ret)
322 			rte_exit(EXIT_FAILURE,
323 					"Failed to add queues to Rx adapter");
324 
325 		ret = rte_event_eth_tx_adapter_queue_add(cdata.tx_adapter_id, i,
326 				-1);
327 		if (ret)
328 			rte_exit(EXIT_FAILURE,
329 					"Failed to add queues to Tx adapter");
330 	}
331 
332 	ret = rte_event_eth_tx_adapter_event_port_get(cdata.tx_adapter_id,
333 			&tx_port_id);
334 	if (ret)
335 		rte_exit(EXIT_FAILURE,
336 				"Failed to get Tx adapter port id");
337 	ret = rte_event_port_link(evdev_id, tx_port_id, &cdata.tx_queue_id,
338 			NULL, 1);
339 	if (ret != 1)
340 		rte_exit(EXIT_FAILURE,
341 				"Unable to link Tx adapter port to Tx queue");
342 
343 	ret = rte_event_eth_rx_adapter_service_id_get(cdata.rx_adapter_id,
344 				&fdata->rxadptr_service_id);
345 	if (ret != -ESRCH && ret != 0) {
346 		rte_exit(EXIT_FAILURE,
347 			"Error getting the service ID for Rx adapter\n");
348 	}
349 	rte_service_runstate_set(fdata->rxadptr_service_id, 1);
350 	rte_service_set_runstate_mapped_check(fdata->rxadptr_service_id, 0);
351 
352 	ret = rte_event_eth_tx_adapter_service_id_get(cdata.tx_adapter_id,
353 				&fdata->txadptr_service_id);
354 	if (ret != -ESRCH && ret != 0) {
355 		rte_exit(EXIT_FAILURE,
356 			"Error getting the service ID for Tx adapter\n");
357 	}
358 	rte_service_runstate_set(fdata->txadptr_service_id, 1);
359 	rte_service_set_runstate_mapped_check(fdata->txadptr_service_id, 0);
360 
361 	ret = rte_event_eth_rx_adapter_start(cdata.rx_adapter_id);
362 	if (ret)
363 		rte_exit(EXIT_FAILURE, "Rx adapter[%d] start failed",
364 				cdata.rx_adapter_id);
365 
366 	ret = rte_event_eth_tx_adapter_start(cdata.tx_adapter_id);
367 	if (ret)
368 		rte_exit(EXIT_FAILURE, "Tx adapter[%d] start failed",
369 				cdata.tx_adapter_id);
370 
371 	if (rte_event_dev_start(evdev_id) < 0)
372 		rte_exit(EXIT_FAILURE, "Error starting eventdev");
373 }
374 
375 static void
376 generic_opt_check(void)
377 {
378 	int i;
379 	int ret;
380 	uint32_t cap = 0;
381 	uint8_t rx_needed = 0;
382 	uint8_t sched_needed = 0;
383 	struct rte_event_dev_info eventdev_info;
384 
385 	memset(&eventdev_info, 0, sizeof(struct rte_event_dev_info));
386 	rte_event_dev_info_get(0, &eventdev_info);
387 
388 	if (cdata.all_type_queues && !(eventdev_info.event_dev_cap &
389 				RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES))
390 		rte_exit(EXIT_FAILURE,
391 				"Event dev doesn't support all type queues\n");
392 	sched_needed = !(eventdev_info.event_dev_cap &
393 		RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED);
394 
395 	RTE_ETH_FOREACH_DEV(i) {
396 		ret = rte_event_eth_rx_adapter_caps_get(0, i, &cap);
397 		if (ret)
398 			rte_exit(EXIT_FAILURE,
399 				"failed to get event rx adapter capabilities");
400 		rx_needed |=
401 			!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT);
402 	}
403 
404 	if (cdata.worker_lcore_mask == 0 ||
405 			(rx_needed && cdata.rx_lcore_mask == 0) ||
406 			(cdata.tx_lcore_mask == 0) ||
407 			(sched_needed && cdata.sched_lcore_mask == 0)) {
408 		printf("Core part of pipeline was not assigned any cores. "
409 			"This will stall the pipeline, please check core masks "
410 			"(use -h for details on setting core masks):\n"
411 			"\trx: %"PRIu64"\n\ttx: %"PRIu64"\n\tsched: %"PRIu64
412 			"\n\tworkers: %"PRIu64"\n",
413 			cdata.rx_lcore_mask, cdata.tx_lcore_mask,
414 			cdata.sched_lcore_mask,
415 			cdata.worker_lcore_mask);
416 		rte_exit(-1, "Fix core masks\n");
417 	}
418 
419 	if (!sched_needed)
420 		memset(fdata->sched_core, 0,
421 				sizeof(unsigned int) * MAX_NUM_CORE);
422 	if (!rx_needed)
423 		memset(fdata->rx_core, 0,
424 				sizeof(unsigned int) * MAX_NUM_CORE);
425 }
426 
427 void
428 set_worker_generic_setup_data(struct setup_data *caps, bool burst)
429 {
430 	if (burst) {
431 		caps->worker = worker_generic_burst;
432 	} else {
433 		caps->worker = worker_generic;
434 	}
435 
436 	caps->adptr_setup = init_adapters;
437 	caps->scheduler = schedule_devices;
438 	caps->evdev_setup = setup_eventdev_generic;
439 	caps->check_opt = generic_opt_check;
440 }
441