xref: /dpdk/app/test-eventdev/test_pipeline_queue.c (revision 032a965a8f1db80c9e2aded84c041b17cf0d9546)
1 /*
2  * SPDX-License-Identifier: BSD-3-Clause
3  * Copyright 2017 Cavium, Inc.
4  */
5 
6 #include "test_pipeline_common.h"
7 
8 /* See http://dpdk.org/doc/guides/tools/testeventdev.html for test details */
9 
10 static __rte_always_inline int
11 pipeline_queue_nb_event_queues(struct evt_options *opt)
12 {
13 	uint16_t eth_count = rte_eth_dev_count_avail();
14 
15 	return (eth_count * opt->nb_stages) + eth_count;
16 }
17 
18 static __rte_noinline int
19 pipeline_queue_worker_single_stage_tx(void *arg)
20 {
21 	PIPELINE_WORKER_SINGLE_STAGE_INIT;
22 
23 	while (t->done == false) {
24 		uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
25 
26 		if (!event) {
27 			rte_pause();
28 			continue;
29 		}
30 
31 		if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
32 			pipeline_event_tx(dev, port, &ev);
33 			w->processed_pkts++;
34 		} else {
35 			ev.queue_id++;
36 			pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
37 			pipeline_event_enqueue(dev, port, &ev);
38 		}
39 	}
40 
41 	return 0;
42 }
43 
44 static __rte_noinline int
45 pipeline_queue_worker_single_stage_fwd(void *arg)
46 {
47 	PIPELINE_WORKER_SINGLE_STAGE_INIT;
48 	const uint8_t *tx_queue = t->tx_evqueue_id;
49 
50 	while (t->done == false) {
51 		uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
52 
53 		if (!event) {
54 			rte_pause();
55 			continue;
56 		}
57 
58 		ev.queue_id = tx_queue[ev.mbuf->port];
59 		rte_event_eth_tx_adapter_txq_set(ev.mbuf, 0);
60 		pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
61 		pipeline_event_enqueue(dev, port, &ev);
62 		w->processed_pkts++;
63 	}
64 
65 	return 0;
66 }
67 
68 static __rte_noinline int
69 pipeline_queue_worker_single_stage_burst_tx(void *arg)
70 {
71 	PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT;
72 
73 	while (t->done == false) {
74 		uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
75 				BURST_SIZE, 0);
76 
77 		if (!nb_rx) {
78 			rte_pause();
79 			continue;
80 		}
81 
82 		for (i = 0; i < nb_rx; i++) {
83 			rte_prefetch0(ev[i + 1].mbuf);
84 			if (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) {
85 				pipeline_event_tx(dev, port, &ev[i]);
86 				ev[i].op = RTE_EVENT_OP_RELEASE;
87 				w->processed_pkts++;
88 			} else {
89 				ev[i].queue_id++;
90 				pipeline_fwd_event(&ev[i],
91 						RTE_SCHED_TYPE_ATOMIC);
92 			}
93 		}
94 
95 		pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
96 	}
97 
98 	return 0;
99 }
100 
101 static __rte_noinline int
102 pipeline_queue_worker_single_stage_burst_fwd(void *arg)
103 {
104 	PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT;
105 	const uint8_t *tx_queue = t->tx_evqueue_id;
106 
107 	while (t->done == false) {
108 		uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
109 				BURST_SIZE, 0);
110 
111 		if (!nb_rx) {
112 			rte_pause();
113 			continue;
114 		}
115 
116 		for (i = 0; i < nb_rx; i++) {
117 			rte_prefetch0(ev[i + 1].mbuf);
118 			ev[i].queue_id = tx_queue[ev[i].mbuf->port];
119 			rte_event_eth_tx_adapter_txq_set(ev[i].mbuf, 0);
120 			pipeline_fwd_event(&ev[i], RTE_SCHED_TYPE_ATOMIC);
121 		}
122 
123 		pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
124 		w->processed_pkts += nb_rx;
125 	}
126 
127 	return 0;
128 }
129 
130 
131 static __rte_noinline int
132 pipeline_queue_worker_multi_stage_tx(void *arg)
133 {
134 	PIPELINE_WORKER_MULTI_STAGE_INIT;
135 	const uint8_t *tx_queue = t->tx_evqueue_id;
136 
137 	while (t->done == false) {
138 		uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
139 
140 		if (!event) {
141 			rte_pause();
142 			continue;
143 		}
144 
145 		cq_id = ev.queue_id % nb_stages;
146 
147 		if (ev.queue_id == tx_queue[ev.mbuf->port]) {
148 			pipeline_event_tx(dev, port, &ev);
149 			w->processed_pkts++;
150 			continue;
151 		}
152 
153 		ev.queue_id++;
154 		pipeline_fwd_event(&ev, cq_id != last_queue ?
155 				sched_type_list[cq_id] :
156 				RTE_SCHED_TYPE_ATOMIC);
157 		pipeline_event_enqueue(dev, port, &ev);
158 	}
159 
160 	return 0;
161 }
162 
163 static __rte_noinline int
164 pipeline_queue_worker_multi_stage_fwd(void *arg)
165 {
166 	PIPELINE_WORKER_MULTI_STAGE_INIT;
167 	const uint8_t *tx_queue = t->tx_evqueue_id;
168 
169 	while (t->done == false) {
170 		uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
171 
172 		if (!event) {
173 			rte_pause();
174 			continue;
175 		}
176 
177 		cq_id = ev.queue_id % nb_stages;
178 
179 		if (cq_id == last_queue) {
180 			ev.queue_id = tx_queue[ev.mbuf->port];
181 			rte_event_eth_tx_adapter_txq_set(ev.mbuf, 0);
182 			pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
183 			w->processed_pkts++;
184 		} else {
185 			ev.queue_id++;
186 			pipeline_fwd_event(&ev, sched_type_list[cq_id]);
187 		}
188 
189 		pipeline_event_enqueue(dev, port, &ev);
190 	}
191 
192 	return 0;
193 }
194 
195 static __rte_noinline int
196 pipeline_queue_worker_multi_stage_burst_tx(void *arg)
197 {
198 	PIPELINE_WORKER_MULTI_STAGE_BURST_INIT;
199 	const uint8_t *tx_queue = t->tx_evqueue_id;
200 
201 	while (t->done == false) {
202 		uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
203 				BURST_SIZE, 0);
204 
205 		if (!nb_rx) {
206 			rte_pause();
207 			continue;
208 		}
209 
210 		for (i = 0; i < nb_rx; i++) {
211 			rte_prefetch0(ev[i + 1].mbuf);
212 			cq_id = ev[i].queue_id % nb_stages;
213 
214 			if (ev[i].queue_id == tx_queue[ev[i].mbuf->port]) {
215 				pipeline_event_tx(dev, port, &ev[i]);
216 				ev[i].op = RTE_EVENT_OP_RELEASE;
217 				w->processed_pkts++;
218 				continue;
219 			}
220 
221 			ev[i].queue_id++;
222 			pipeline_fwd_event(&ev[i], cq_id != last_queue ?
223 					sched_type_list[cq_id] :
224 					RTE_SCHED_TYPE_ATOMIC);
225 		}
226 
227 		pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
228 	}
229 
230 	return 0;
231 }
232 
233 static __rte_noinline int
234 pipeline_queue_worker_multi_stage_burst_fwd(void *arg)
235 {
236 	PIPELINE_WORKER_MULTI_STAGE_BURST_INIT;
237 	const uint8_t *tx_queue = t->tx_evqueue_id;
238 
239 	while (t->done == false) {
240 		uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
241 				BURST_SIZE, 0);
242 
243 		if (!nb_rx) {
244 			rte_pause();
245 			continue;
246 		}
247 
248 		for (i = 0; i < nb_rx; i++) {
249 			rte_prefetch0(ev[i + 1].mbuf);
250 			cq_id = ev[i].queue_id % nb_stages;
251 
252 			if (cq_id == last_queue) {
253 				ev[i].queue_id = tx_queue[ev[i].mbuf->port];
254 				rte_event_eth_tx_adapter_txq_set(ev[i].mbuf, 0);
255 				pipeline_fwd_event(&ev[i],
256 						RTE_SCHED_TYPE_ATOMIC);
257 				w->processed_pkts++;
258 			} else {
259 				ev[i].queue_id++;
260 				pipeline_fwd_event(&ev[i],
261 						sched_type_list[cq_id]);
262 			}
263 		}
264 
265 		pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
266 	}
267 
268 	return 0;
269 }
270 
271 static int
272 worker_wrapper(void *arg)
273 {
274 	struct worker_data *w  = arg;
275 	struct evt_options *opt = w->t->opt;
276 	const bool burst = evt_has_burst_mode(w->dev_id);
277 	const bool internal_port = w->t->internal_port;
278 	const uint8_t nb_stages = opt->nb_stages;
279 	RTE_SET_USED(opt);
280 
281 	if (nb_stages == 1) {
282 		if (!burst && internal_port)
283 			return pipeline_queue_worker_single_stage_tx(arg);
284 		else if (!burst && !internal_port)
285 			return pipeline_queue_worker_single_stage_fwd(arg);
286 		else if (burst && internal_port)
287 			return pipeline_queue_worker_single_stage_burst_tx(arg);
288 		else if (burst && !internal_port)
289 			return pipeline_queue_worker_single_stage_burst_fwd(
290 					arg);
291 	} else {
292 		if (!burst && internal_port)
293 			return pipeline_queue_worker_multi_stage_tx(arg);
294 		else if (!burst && !internal_port)
295 			return pipeline_queue_worker_multi_stage_fwd(arg);
296 		else if (burst && internal_port)
297 			return pipeline_queue_worker_multi_stage_burst_tx(arg);
298 		else if (burst && !internal_port)
299 			return pipeline_queue_worker_multi_stage_burst_fwd(arg);
300 
301 	}
302 	rte_panic("invalid worker\n");
303 }
304 
305 static int
306 pipeline_queue_launch_lcores(struct evt_test *test, struct evt_options *opt)
307 {
308 	return pipeline_launch_lcores(test, opt, worker_wrapper);
309 }
310 
311 static int
312 pipeline_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
313 {
314 	int ret;
315 	int nb_ports;
316 	int nb_queues;
317 	int nb_stages = opt->nb_stages;
318 	uint8_t queue;
319 	uint8_t tx_evport_id = 0;
320 	uint8_t tx_evqueue_id[RTE_MAX_ETHPORTS];
321 	uint8_t queue_arr[RTE_EVENT_MAX_QUEUES_PER_DEV];
322 	uint8_t nb_worker_queues = 0;
323 	uint16_t prod = 0;
324 	struct rte_event_dev_info info;
325 	struct test_pipeline *t = evt_test_priv(test);
326 
327 	nb_ports = evt_nr_active_lcores(opt->wlcores);
328 	nb_queues = rte_eth_dev_count_avail() * (nb_stages);
329 
330 	/* One queue for Tx adapter per port */
331 	nb_queues += rte_eth_dev_count_avail();
332 
333 	memset(tx_evqueue_id, 0, sizeof(uint8_t) * RTE_MAX_ETHPORTS);
334 	memset(queue_arr, 0, sizeof(uint8_t) * RTE_EVENT_MAX_QUEUES_PER_DEV);
335 
336 	rte_event_dev_info_get(opt->dev_id, &info);
337 	const struct rte_event_dev_config config = {
338 			.nb_event_queues = nb_queues,
339 			.nb_event_ports = nb_ports,
340 			.nb_events_limit  = info.max_num_events,
341 			.nb_event_queue_flows = opt->nb_flows,
342 			.nb_event_port_dequeue_depth =
343 				info.max_event_port_dequeue_depth,
344 			.nb_event_port_enqueue_depth =
345 				info.max_event_port_enqueue_depth,
346 	};
347 	ret = rte_event_dev_configure(opt->dev_id, &config);
348 	if (ret) {
349 		evt_err("failed to configure eventdev %d", opt->dev_id);
350 		return ret;
351 	}
352 
353 	struct rte_event_queue_conf q_conf = {
354 			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
355 			.nb_atomic_flows = opt->nb_flows,
356 			.nb_atomic_order_sequences = opt->nb_flows,
357 	};
358 	/* queue configurations */
359 	for (queue = 0; queue < nb_queues; queue++) {
360 		uint8_t slot;
361 
362 		q_conf.event_queue_cfg = 0;
363 		slot = queue % (nb_stages + 1);
364 		if (slot == nb_stages) {
365 			q_conf.schedule_type = RTE_SCHED_TYPE_ATOMIC;
366 			if (!t->internal_port) {
367 				q_conf.event_queue_cfg =
368 					RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
369 			}
370 			tx_evqueue_id[prod++] = queue;
371 		} else {
372 			q_conf.schedule_type = opt->sched_type_list[slot];
373 			queue_arr[nb_worker_queues] = queue;
374 			nb_worker_queues++;
375 		}
376 
377 		ret = rte_event_queue_setup(opt->dev_id, queue, &q_conf);
378 		if (ret) {
379 			evt_err("failed to setup queue=%d", queue);
380 			return ret;
381 		}
382 	}
383 
384 	if (opt->wkr_deq_dep > info.max_event_port_dequeue_depth)
385 		opt->wkr_deq_dep = info.max_event_port_dequeue_depth;
386 
387 	/* port configuration */
388 	const struct rte_event_port_conf p_conf = {
389 			.dequeue_depth = opt->wkr_deq_dep,
390 			.enqueue_depth = info.max_event_port_dequeue_depth,
391 			.new_event_threshold = info.max_num_events,
392 	};
393 
394 	if (!t->internal_port) {
395 		ret = pipeline_event_port_setup(test, opt, queue_arr,
396 				nb_worker_queues, p_conf);
397 		if (ret)
398 			return ret;
399 	} else
400 		ret = pipeline_event_port_setup(test, opt, NULL, nb_queues,
401 				p_conf);
402 
403 	if (ret)
404 		return ret;
405 	/*
406 	 * The pipelines are setup in the following manner:
407 	 *
408 	 * eth_dev_count = 2, nb_stages = 2.
409 	 *
410 	 *	queues = 6
411 	 *	stride = 3
412 	 *
413 	 *	event queue pipelines:
414 	 *	eth0 -> q0 -> q1 -> (q2->tx)
415 	 *	eth1 -> q3 -> q4 -> (q5->tx)
416 	 *
417 	 *	q2, q5 configured as ATOMIC | SINGLE_LINK
418 	 *
419 	 */
420 	ret = pipeline_event_rx_adapter_setup(opt, nb_stages + 1, p_conf);
421 	if (ret)
422 		return ret;
423 
424 	ret = pipeline_event_tx_adapter_setup(opt, p_conf);
425 	if (ret)
426 		return ret;
427 
428 	if (!evt_has_distributed_sched(opt->dev_id)) {
429 		uint32_t service_id;
430 		rte_event_dev_service_id_get(opt->dev_id, &service_id);
431 		ret = evt_service_setup(service_id);
432 		if (ret) {
433 			evt_err("No service lcore found to run event dev.");
434 			return ret;
435 		}
436 	}
437 
438 	/* Connect the tx_evqueue_id to the Tx adapter port */
439 	if (!t->internal_port) {
440 		RTE_ETH_FOREACH_DEV(prod) {
441 			ret = rte_event_eth_tx_adapter_event_port_get(prod,
442 					&tx_evport_id);
443 			if (ret) {
444 				evt_err("Unable to get Tx adptr[%d] evprt[%d]",
445 						prod, tx_evport_id);
446 				return ret;
447 			}
448 
449 			if (rte_event_port_link(opt->dev_id, tx_evport_id,
450 						&tx_evqueue_id[prod],
451 						NULL, 1) != 1) {
452 				evt_err("Unable to link Tx adptr[%d] evprt[%d]",
453 						prod, tx_evport_id);
454 				return ret;
455 			}
456 		}
457 	}
458 
459 	RTE_ETH_FOREACH_DEV(prod) {
460 		ret = rte_eth_dev_start(prod);
461 		if (ret) {
462 			evt_err("Ethernet dev [%d] failed to start."
463 					" Using synthetic producer", prod);
464 			return ret;
465 		}
466 
467 	}
468 
469 	ret = rte_event_dev_start(opt->dev_id);
470 	if (ret) {
471 		evt_err("failed to start eventdev %d", opt->dev_id);
472 		return ret;
473 	}
474 
475 	RTE_ETH_FOREACH_DEV(prod) {
476 		ret = rte_event_eth_rx_adapter_start(prod);
477 		if (ret) {
478 			evt_err("Rx adapter[%d] start failed", prod);
479 			return ret;
480 		}
481 
482 		ret = rte_event_eth_tx_adapter_start(prod);
483 		if (ret) {
484 			evt_err("Tx adapter[%d] start failed", prod);
485 			return ret;
486 		}
487 	}
488 
489 	memcpy(t->tx_evqueue_id, tx_evqueue_id, sizeof(uint8_t) *
490 			RTE_MAX_ETHPORTS);
491 
492 	return 0;
493 }
494 
495 static void
496 pipeline_queue_opt_dump(struct evt_options *opt)
497 {
498 	pipeline_opt_dump(opt, pipeline_queue_nb_event_queues(opt));
499 }
500 
501 static int
502 pipeline_queue_opt_check(struct evt_options *opt)
503 {
504 	return pipeline_opt_check(opt, pipeline_queue_nb_event_queues(opt));
505 }
506 
507 static bool
508 pipeline_queue_capability_check(struct evt_options *opt)
509 {
510 	struct rte_event_dev_info dev_info;
511 
512 	rte_event_dev_info_get(opt->dev_id, &dev_info);
513 	if (dev_info.max_event_queues < pipeline_queue_nb_event_queues(opt) ||
514 			dev_info.max_event_ports <
515 			evt_nr_active_lcores(opt->wlcores)) {
516 		evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
517 			pipeline_queue_nb_event_queues(opt),
518 			dev_info.max_event_queues,
519 			evt_nr_active_lcores(opt->wlcores),
520 			dev_info.max_event_ports);
521 	}
522 
523 	return true;
524 }
525 
526 static const struct evt_test_ops pipeline_queue =  {
527 	.cap_check          = pipeline_queue_capability_check,
528 	.opt_check          = pipeline_queue_opt_check,
529 	.opt_dump           = pipeline_queue_opt_dump,
530 	.test_setup         = pipeline_test_setup,
531 	.mempool_setup      = pipeline_mempool_setup,
532 	.ethdev_setup	    = pipeline_ethdev_setup,
533 	.eventdev_setup     = pipeline_queue_eventdev_setup,
534 	.launch_lcores      = pipeline_queue_launch_lcores,
535 	.eventdev_destroy   = pipeline_eventdev_destroy,
536 	.mempool_destroy    = pipeline_mempool_destroy,
537 	.ethdev_destroy	    = pipeline_ethdev_destroy,
538 	.test_result        = pipeline_test_result,
539 	.test_destroy       = pipeline_test_destroy,
540 };
541 
542 EVT_TEST_REGISTER(pipeline_queue);
543