xref: /dpdk/examples/eventdev_pipeline/pipeline_worker_generic.c (revision 59f3a8acbcdbafeebe816a26d76dfb06e6450f31)
1 /*
2  * SPDX-License-Identifier: BSD-3-Clause
3  * Copyright 2016 Intel Corporation.
4  * Copyright 2017 Cavium, Inc.
5  */
6 
7 #include "pipeline_common.h"
8 
9 static __rte_always_inline int
10 worker_generic(void *arg)
11 {
12 	struct rte_event ev;
13 
14 	struct worker_data *data = (struct worker_data *)arg;
15 	uint8_t dev_id = data->dev_id;
16 	uint8_t port_id = data->port_id;
17 	size_t sent = 0, received = 0;
18 	unsigned int lcore_id = rte_lcore_id();
19 
20 	while (!fdata->done) {
21 
22 		if (fdata->cap.scheduler)
23 			fdata->cap.scheduler(lcore_id);
24 
25 		if (!fdata->worker_core[lcore_id]) {
26 			rte_pause();
27 			continue;
28 		}
29 
30 		const uint16_t nb_rx = rte_event_dequeue_burst(dev_id, port_id,
31 				&ev, 1, 0);
32 
33 		if (nb_rx == 0) {
34 			rte_pause();
35 			continue;
36 		}
37 		received++;
38 
39 		/* The first worker stage does classification */
40 		if (ev.queue_id == cdata.qid[0])
41 			ev.flow_id = ev.mbuf->hash.rss
42 						% cdata.num_fids;
43 
44 		ev.queue_id = cdata.next_qid[ev.queue_id];
45 		ev.op = RTE_EVENT_OP_FORWARD;
46 		ev.sched_type = cdata.queue_type;
47 
48 		work();
49 
50 		while (rte_event_enqueue_burst(dev_id, port_id, &ev, 1) != 1)
51 			rte_pause();
52 		sent++;
53 	}
54 
55 	if (!cdata.quiet)
56 		printf("  worker %u thread done. RX=%zu TX=%zu\n",
57 				rte_lcore_id(), received, sent);
58 
59 	return 0;
60 }
61 
62 static int
63 worker_generic_burst(void *arg)
64 {
65 	struct rte_event events[BATCH_SIZE];
66 
67 	struct worker_data *data = (struct worker_data *)arg;
68 	uint8_t dev_id = data->dev_id;
69 	uint8_t port_id = data->port_id;
70 	size_t sent = 0, received = 0;
71 	unsigned int lcore_id = rte_lcore_id();
72 
73 	while (!fdata->done) {
74 		uint16_t i;
75 
76 		if (fdata->cap.scheduler)
77 			fdata->cap.scheduler(lcore_id);
78 
79 		if (!fdata->worker_core[lcore_id]) {
80 			rte_pause();
81 			continue;
82 		}
83 
84 		const uint16_t nb_rx = rte_event_dequeue_burst(dev_id, port_id,
85 				events, RTE_DIM(events), 0);
86 
87 		if (nb_rx == 0) {
88 			rte_pause();
89 			continue;
90 		}
91 		received += nb_rx;
92 
93 		for (i = 0; i < nb_rx; i++) {
94 
95 			/* The first worker stage does classification */
96 			if (events[i].queue_id == cdata.qid[0])
97 				events[i].flow_id = events[i].mbuf->hash.rss
98 							% cdata.num_fids;
99 
100 			events[i].queue_id = cdata.next_qid[events[i].queue_id];
101 			events[i].op = RTE_EVENT_OP_FORWARD;
102 			events[i].sched_type = cdata.queue_type;
103 
104 			work();
105 		}
106 		uint16_t nb_tx = rte_event_enqueue_burst(dev_id, port_id,
107 				events, nb_rx);
108 		while (nb_tx < nb_rx && !fdata->done)
109 			nb_tx += rte_event_enqueue_burst(dev_id, port_id,
110 							events + nb_tx,
111 							nb_rx - nb_tx);
112 		sent += nb_tx;
113 	}
114 
115 	if (!cdata.quiet)
116 		printf("  worker %u thread done. RX=%zu TX=%zu\n",
117 				rte_lcore_id(), received, sent);
118 
119 	return 0;
120 }
121 
122 static int
123 setup_eventdev_generic(struct worker_data *worker_data)
124 {
125 	const uint8_t dev_id = 0;
126 	/* +1 stages is for a SINGLE_LINK TX stage */
127 	const uint8_t nb_queues = cdata.num_stages + 1;
128 	const uint8_t nb_ports = cdata.num_workers;
129 	struct rte_event_dev_config config = {
130 			.nb_event_queues = nb_queues,
131 			.nb_event_ports = nb_ports,
132 			.nb_single_link_event_port_queues = 1,
133 			.nb_events_limit  = 4096,
134 			.nb_event_queue_flows = 1024,
135 			.nb_event_port_dequeue_depth = 128,
136 			.nb_event_port_enqueue_depth = 128,
137 	};
138 	struct rte_event_port_conf wkr_p_conf = {
139 			.dequeue_depth = cdata.worker_cq_depth,
140 			.enqueue_depth = 64,
141 			.new_event_threshold = 4096,
142 	};
143 	struct rte_event_queue_conf wkr_q_conf = {
144 			.schedule_type = cdata.queue_type,
145 			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
146 			.nb_atomic_flows = 1024,
147 			.nb_atomic_order_sequences = 1024,
148 	};
149 	struct rte_event_queue_conf tx_q_conf = {
150 			.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST,
151 			.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
152 	};
153 
154 	struct port_link worker_queues[MAX_NUM_STAGES];
155 	uint8_t disable_implicit_release;
156 	unsigned int i;
157 
158 	int ret, ndev = rte_event_dev_count();
159 	if (ndev < 1) {
160 		printf("%d: No Eventdev Devices Found\n", __LINE__);
161 		return -1;
162 	}
163 
164 	struct rte_event_dev_info dev_info;
165 	ret = rte_event_dev_info_get(dev_id, &dev_info);
166 	printf("\tEventdev %d: %s\n", dev_id, dev_info.driver_name);
167 
168 	disable_implicit_release = (dev_info.event_dev_cap &
169 			RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE);
170 
171 	wkr_p_conf.event_port_cfg = disable_implicit_release ?
172 		RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL : 0;
173 
174 	if (dev_info.max_num_events < config.nb_events_limit)
175 		config.nb_events_limit = dev_info.max_num_events;
176 	if (dev_info.max_event_port_dequeue_depth <
177 			config.nb_event_port_dequeue_depth)
178 		config.nb_event_port_dequeue_depth =
179 				dev_info.max_event_port_dequeue_depth;
180 	if (dev_info.max_event_port_enqueue_depth <
181 			config.nb_event_port_enqueue_depth)
182 		config.nb_event_port_enqueue_depth =
183 				dev_info.max_event_port_enqueue_depth;
184 
185 	ret = rte_event_dev_configure(dev_id, &config);
186 	if (ret < 0) {
187 		printf("%d: Error configuring device\n", __LINE__);
188 		return -1;
189 	}
190 
191 	/* Q creation - one load balanced per pipeline stage*/
192 	printf("  Stages:\n");
193 	for (i = 0; i < cdata.num_stages; i++) {
194 		if (rte_event_queue_setup(dev_id, i, &wkr_q_conf) < 0) {
195 			printf("%d: error creating qid %d\n", __LINE__, i);
196 			return -1;
197 		}
198 		cdata.qid[i] = i;
199 		cdata.next_qid[i] = i+1;
200 		worker_queues[i].queue_id = i;
201 		if (cdata.enable_queue_priorities) {
202 			/* calculate priority stepping for each stage, leaving
203 			 * headroom of 1 for the SINGLE_LINK TX below
204 			 */
205 			const uint32_t prio_delta =
206 				(RTE_EVENT_DEV_PRIORITY_LOWEST-1) /  nb_queues;
207 
208 			/* higher priority for queues closer to tx */
209 			wkr_q_conf.priority =
210 				RTE_EVENT_DEV_PRIORITY_LOWEST - prio_delta * i;
211 		}
212 
213 		const char *type_str = "Atomic";
214 		switch (wkr_q_conf.schedule_type) {
215 		case RTE_SCHED_TYPE_ORDERED:
216 			type_str = "Ordered";
217 			break;
218 		case RTE_SCHED_TYPE_PARALLEL:
219 			type_str = "Parallel";
220 			break;
221 		}
222 		printf("\tStage %d, Type %s\tPriority = %d\n", i, type_str,
223 				wkr_q_conf.priority);
224 	}
225 	printf("\n");
226 
227 	/* final queue for sending to TX core */
228 	if (rte_event_queue_setup(dev_id, i, &tx_q_conf) < 0) {
229 		printf("%d: error creating qid %d\n", __LINE__, i);
230 		return -1;
231 	}
232 	cdata.tx_queue_id = i;
233 
234 	if (wkr_p_conf.new_event_threshold > config.nb_events_limit)
235 		wkr_p_conf.new_event_threshold = config.nb_events_limit;
236 	if (wkr_p_conf.dequeue_depth > config.nb_event_port_dequeue_depth)
237 		wkr_p_conf.dequeue_depth = config.nb_event_port_dequeue_depth;
238 	if (wkr_p_conf.enqueue_depth > config.nb_event_port_enqueue_depth)
239 		wkr_p_conf.enqueue_depth = config.nb_event_port_enqueue_depth;
240 
241 	/* set up one port per worker, linking to all stage queues */
242 	for (i = 0; i < cdata.num_workers; i++) {
243 		struct worker_data *w = &worker_data[i];
244 		w->dev_id = dev_id;
245 		if (rte_event_port_setup(dev_id, i, &wkr_p_conf) < 0) {
246 			printf("Error setting up port %d\n", i);
247 			return -1;
248 		}
249 
250 		uint32_t s;
251 		for (s = 0; s < cdata.num_stages; s++) {
252 			if (rte_event_port_link(dev_id, i,
253 						&worker_queues[s].queue_id,
254 						&worker_queues[s].priority,
255 						1) != 1) {
256 				printf("%d: error creating link for port %d\n",
257 						__LINE__, i);
258 				return -1;
259 			}
260 		}
261 		w->port_id = i;
262 	}
263 
264 	ret = rte_event_dev_service_id_get(dev_id,
265 				&fdata->evdev_service_id);
266 	if (ret != -ESRCH && ret != 0) {
267 		printf("Error getting the service ID for sw eventdev\n");
268 		return -1;
269 	}
270 	rte_service_runstate_set(fdata->evdev_service_id, 1);
271 	rte_service_set_runstate_mapped_check(fdata->evdev_service_id, 0);
272 
273 	return dev_id;
274 }
275 
276 /*
277  * Initializes a given port using global settings and with the RX buffers
278  * coming from the mbuf_pool passed as a parameter.
279  */
280 static inline int
281 port_init(uint8_t port, struct rte_mempool *mbuf_pool)
282 {
283 	struct rte_eth_rxconf rx_conf;
284 	static const struct rte_eth_conf port_conf_default = {
285 		.rxmode = {
286 			.mq_mode = ETH_MQ_RX_RSS,
287 		},
288 		.rx_adv_conf = {
289 			.rss_conf = {
290 				.rss_hf = ETH_RSS_IP |
291 					  ETH_RSS_TCP |
292 					  ETH_RSS_UDP,
293 			}
294 		}
295 	};
296 	const uint16_t rx_rings = 1, tx_rings = 1;
297 	const uint16_t rx_ring_size = 512, tx_ring_size = 512;
298 	struct rte_eth_conf port_conf = port_conf_default;
299 	int retval;
300 	uint16_t q;
301 	struct rte_eth_dev_info dev_info;
302 	struct rte_eth_txconf txconf;
303 
304 	if (!rte_eth_dev_is_valid_port(port))
305 		return -1;
306 
307 	retval = rte_eth_dev_info_get(port, &dev_info);
308 	if (retval != 0) {
309 		printf("Error during getting device (port %u) info: %s\n",
310 				port, strerror(-retval));
311 		return retval;
312 	}
313 
314 	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
315 		port_conf.txmode.offloads |=
316 			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
317 
318 	if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_RSS_HASH)
319 		port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
320 
321 	rx_conf = dev_info.default_rxconf;
322 	rx_conf.offloads = port_conf.rxmode.offloads;
323 
324 	port_conf.rx_adv_conf.rss_conf.rss_hf &=
325 		dev_info.flow_type_rss_offloads;
326 	if (port_conf.rx_adv_conf.rss_conf.rss_hf !=
327 			port_conf_default.rx_adv_conf.rss_conf.rss_hf) {
328 		printf("Port %u modified RSS hash function based on hardware support,"
329 			"requested:%#"PRIx64" configured:%#"PRIx64"\n",
330 			port,
331 			port_conf_default.rx_adv_conf.rss_conf.rss_hf,
332 			port_conf.rx_adv_conf.rss_conf.rss_hf);
333 	}
334 
335 	/* Configure the Ethernet device. */
336 	retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
337 	if (retval != 0)
338 		return retval;
339 
340 	/* Allocate and set up 1 RX queue per Ethernet port. */
341 	for (q = 0; q < rx_rings; q++) {
342 		retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
343 				rte_eth_dev_socket_id(port), &rx_conf,
344 				mbuf_pool);
345 		if (retval < 0)
346 			return retval;
347 	}
348 
349 	txconf = dev_info.default_txconf;
350 	txconf.offloads = port_conf_default.txmode.offloads;
351 	/* Allocate and set up 1 TX queue per Ethernet port. */
352 	for (q = 0; q < tx_rings; q++) {
353 		retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
354 				rte_eth_dev_socket_id(port), &txconf);
355 		if (retval < 0)
356 			return retval;
357 	}
358 
359 	/* Display the port MAC address. */
360 	struct rte_ether_addr addr;
361 	retval = rte_eth_macaddr_get(port, &addr);
362 	if (retval != 0) {
363 		printf("Failed to get MAC address (port %u): %s\n",
364 				port, rte_strerror(-retval));
365 		return retval;
366 	}
367 
368 	printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
369 			" %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
370 			(unsigned int)port, RTE_ETHER_ADDR_BYTES(&addr));
371 
372 	/* Enable RX in promiscuous mode for the Ethernet device. */
373 	retval = rte_eth_promiscuous_enable(port);
374 	if (retval != 0)
375 		return retval;
376 
377 	return 0;
378 }
379 
380 static int
381 init_ports(uint16_t num_ports)
382 {
383 	uint16_t portid;
384 
385 	if (!cdata.num_mbuf)
386 		cdata.num_mbuf = 16384 * num_ports;
387 
388 	struct rte_mempool *mp = rte_pktmbuf_pool_create("packet_pool",
389 			/* mbufs */ cdata.num_mbuf,
390 			/* cache_size */ 512,
391 			/* priv_size*/ 0,
392 			/* data_room_size */ RTE_MBUF_DEFAULT_BUF_SIZE,
393 			rte_socket_id());
394 
395 	RTE_ETH_FOREACH_DEV(portid)
396 		if (port_init(portid, mp) != 0)
397 			rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu16 "\n",
398 					portid);
399 
400 	return 0;
401 }
402 
403 static void
404 init_adapters(uint16_t nb_ports)
405 {
406 	int i;
407 	int ret;
408 	uint8_t tx_port_id = 0;
409 	uint8_t evdev_id = 0;
410 	struct rte_event_dev_info dev_info;
411 
412 	ret = rte_event_dev_info_get(evdev_id, &dev_info);
413 
414 	struct rte_event_port_conf adptr_p_conf = {
415 		.dequeue_depth = cdata.worker_cq_depth,
416 		.enqueue_depth = 64,
417 		.new_event_threshold = 4096,
418 	};
419 
420 	if (adptr_p_conf.new_event_threshold > dev_info.max_num_events)
421 		adptr_p_conf.new_event_threshold = dev_info.max_num_events;
422 	if (adptr_p_conf.dequeue_depth > dev_info.max_event_port_dequeue_depth)
423 		adptr_p_conf.dequeue_depth =
424 			dev_info.max_event_port_dequeue_depth;
425 	if (adptr_p_conf.enqueue_depth > dev_info.max_event_port_enqueue_depth)
426 		adptr_p_conf.enqueue_depth =
427 			dev_info.max_event_port_enqueue_depth;
428 
429 	init_ports(nb_ports);
430 	/* Create one adapter for all the ethernet ports. */
431 	ret = rte_event_eth_rx_adapter_create(cdata.rx_adapter_id, evdev_id,
432 			&adptr_p_conf);
433 	if (ret)
434 		rte_exit(EXIT_FAILURE, "failed to create rx adapter[%d]",
435 				cdata.rx_adapter_id);
436 
437 	ret = rte_event_eth_tx_adapter_create(cdata.tx_adapter_id, evdev_id,
438 			&adptr_p_conf);
439 	if (ret)
440 		rte_exit(EXIT_FAILURE, "failed to create tx adapter[%d]",
441 				cdata.tx_adapter_id);
442 
443 	struct rte_event_eth_rx_adapter_queue_conf queue_conf;
444 	memset(&queue_conf, 0, sizeof(queue_conf));
445 	queue_conf.ev.sched_type = cdata.queue_type;
446 	queue_conf.ev.queue_id = cdata.qid[0];
447 
448 	for (i = 0; i < nb_ports; i++) {
449 		ret = rte_event_eth_rx_adapter_queue_add(cdata.rx_adapter_id, i,
450 				-1, &queue_conf);
451 		if (ret)
452 			rte_exit(EXIT_FAILURE,
453 					"Failed to add queues to Rx adapter");
454 
455 		ret = rte_event_eth_tx_adapter_queue_add(cdata.tx_adapter_id, i,
456 				-1);
457 		if (ret)
458 			rte_exit(EXIT_FAILURE,
459 					"Failed to add queues to Tx adapter");
460 	}
461 
462 	ret = rte_event_eth_tx_adapter_event_port_get(cdata.tx_adapter_id,
463 			&tx_port_id);
464 	if (ret)
465 		rte_exit(EXIT_FAILURE,
466 				"Failed to get Tx adapter port id");
467 	ret = rte_event_port_link(evdev_id, tx_port_id, &cdata.tx_queue_id,
468 			NULL, 1);
469 	if (ret != 1)
470 		rte_exit(EXIT_FAILURE,
471 				"Unable to link Tx adapter port to Tx queue");
472 
473 	ret = rte_event_eth_rx_adapter_service_id_get(cdata.rx_adapter_id,
474 				&fdata->rxadptr_service_id);
475 	if (ret != -ESRCH && ret != 0) {
476 		rte_exit(EXIT_FAILURE,
477 			"Error getting the service ID for Rx adapter\n");
478 	}
479 	rte_service_runstate_set(fdata->rxadptr_service_id, 1);
480 	rte_service_set_runstate_mapped_check(fdata->rxadptr_service_id, 0);
481 
482 	ret = rte_event_eth_tx_adapter_service_id_get(cdata.tx_adapter_id,
483 				&fdata->txadptr_service_id);
484 	if (ret != -ESRCH && ret != 0) {
485 		rte_exit(EXIT_FAILURE,
486 			"Error getting the service ID for Tx adapter\n");
487 	}
488 	rte_service_runstate_set(fdata->txadptr_service_id, 1);
489 	rte_service_set_runstate_mapped_check(fdata->txadptr_service_id, 0);
490 
491 	ret = rte_event_eth_rx_adapter_start(cdata.rx_adapter_id);
492 	if (ret)
493 		rte_exit(EXIT_FAILURE, "Rx adapter[%d] start failed",
494 				cdata.rx_adapter_id);
495 
496 	ret = rte_event_eth_tx_adapter_start(cdata.tx_adapter_id);
497 	if (ret)
498 		rte_exit(EXIT_FAILURE, "Tx adapter[%d] start failed",
499 				cdata.tx_adapter_id);
500 
501 	if (rte_event_dev_start(evdev_id) < 0)
502 		rte_exit(EXIT_FAILURE, "Error starting eventdev");
503 }
504 
505 static void
506 generic_opt_check(void)
507 {
508 	int i;
509 	int ret;
510 	uint32_t cap = 0;
511 	uint8_t rx_needed = 0;
512 	uint8_t sched_needed = 0;
513 	struct rte_event_dev_info eventdev_info;
514 
515 	memset(&eventdev_info, 0, sizeof(struct rte_event_dev_info));
516 	rte_event_dev_info_get(0, &eventdev_info);
517 
518 	if (cdata.all_type_queues && !(eventdev_info.event_dev_cap &
519 				RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES))
520 		rte_exit(EXIT_FAILURE,
521 				"Event dev doesn't support all type queues\n");
522 	sched_needed = !(eventdev_info.event_dev_cap &
523 		RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED);
524 
525 	RTE_ETH_FOREACH_DEV(i) {
526 		ret = rte_event_eth_rx_adapter_caps_get(0, i, &cap);
527 		if (ret)
528 			rte_exit(EXIT_FAILURE,
529 				"failed to get event rx adapter capabilities");
530 		rx_needed |=
531 			!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT);
532 	}
533 
534 	if (cdata.worker_lcore_mask == 0 ||
535 			(rx_needed && cdata.rx_lcore_mask == 0) ||
536 			(cdata.tx_lcore_mask == 0) ||
537 			(sched_needed && cdata.sched_lcore_mask == 0)) {
538 		printf("Core part of pipeline was not assigned any cores. "
539 			"This will stall the pipeline, please check core masks "
540 			"(use -h for details on setting core masks):\n"
541 			"\trx: %"PRIu64"\n\ttx: %"PRIu64"\n\tsched: %"PRIu64
542 			"\n\tworkers: %"PRIu64"\n",
543 			cdata.rx_lcore_mask, cdata.tx_lcore_mask,
544 			cdata.sched_lcore_mask,
545 			cdata.worker_lcore_mask);
546 		rte_exit(-1, "Fix core masks\n");
547 	}
548 
549 	if (!sched_needed)
550 		memset(fdata->sched_core, 0,
551 				sizeof(unsigned int) * MAX_NUM_CORE);
552 	if (!rx_needed)
553 		memset(fdata->rx_core, 0,
554 				sizeof(unsigned int) * MAX_NUM_CORE);
555 }
556 
557 void
558 set_worker_generic_setup_data(struct setup_data *caps, bool burst)
559 {
560 	if (burst) {
561 		caps->worker = worker_generic_burst;
562 	} else {
563 		caps->worker = worker_generic;
564 	}
565 
566 	caps->adptr_setup = init_adapters;
567 	caps->scheduler = schedule_devices;
568 	caps->evdev_setup = setup_eventdev_generic;
569 	caps->check_opt = generic_opt_check;
570 }
571