xref: /dpdk/app/test-eventdev/test_pipeline_queue.c (revision 21b1ca4843a09d822cf992872445beb73163f1fb)
1 /*
2  * SPDX-License-Identifier: BSD-3-Clause
3  * Copyright 2017 Cavium, Inc.
4  */
5 
6 #include "test_pipeline_common.h"
7 
8 /* See http://doc.dpdk.org/guides/tools/testeventdev.html for test details */
9 
10 static __rte_always_inline int
11 pipeline_queue_nb_event_queues(struct evt_options *opt)
12 {
13 	uint16_t eth_count = rte_eth_dev_count_avail();
14 
15 	return (eth_count * opt->nb_stages) + eth_count;
16 }
17 
18 static __rte_noinline int
19 pipeline_queue_worker_single_stage_tx(void *arg)
20 {
21 	PIPELINE_WORKER_SINGLE_STAGE_INIT;
22 
23 	while (t->done == false) {
24 		uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
25 
26 		if (!event) {
27 			rte_pause();
28 			continue;
29 		}
30 
31 		if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
32 			pipeline_event_tx(dev, port, &ev);
33 			w->processed_pkts++;
34 		} else {
35 			ev.queue_id++;
36 			pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
37 			pipeline_event_enqueue(dev, port, &ev);
38 		}
39 	}
40 
41 	return 0;
42 }
43 
44 static __rte_noinline int
45 pipeline_queue_worker_single_stage_fwd(void *arg)
46 {
47 	PIPELINE_WORKER_SINGLE_STAGE_INIT;
48 	const uint8_t *tx_queue = t->tx_evqueue_id;
49 
50 	while (t->done == false) {
51 		uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
52 
53 		if (!event) {
54 			rte_pause();
55 			continue;
56 		}
57 
58 		ev.queue_id = tx_queue[ev.mbuf->port];
59 		rte_event_eth_tx_adapter_txq_set(ev.mbuf, 0);
60 		pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
61 		pipeline_event_enqueue(dev, port, &ev);
62 		w->processed_pkts++;
63 	}
64 
65 	return 0;
66 }
67 
68 static __rte_noinline int
69 pipeline_queue_worker_single_stage_burst_tx(void *arg)
70 {
71 	PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT;
72 
73 	while (t->done == false) {
74 		uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
75 				BURST_SIZE, 0);
76 
77 		if (!nb_rx) {
78 			rte_pause();
79 			continue;
80 		}
81 
82 		for (i = 0; i < nb_rx; i++) {
83 			rte_prefetch0(ev[i + 1].mbuf);
84 			if (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) {
85 				pipeline_event_tx(dev, port, &ev[i]);
86 				w->processed_pkts++;
87 			} else {
88 				ev[i].queue_id++;
89 				pipeline_fwd_event(&ev[i],
90 						RTE_SCHED_TYPE_ATOMIC);
91 				pipeline_event_enqueue_burst(dev, port, ev,
92 						nb_rx);
93 			}
94 		}
95 	}
96 
97 	return 0;
98 }
99 
100 static __rte_noinline int
101 pipeline_queue_worker_single_stage_burst_fwd(void *arg)
102 {
103 	PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT;
104 	const uint8_t *tx_queue = t->tx_evqueue_id;
105 
106 	while (t->done == false) {
107 		uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
108 				BURST_SIZE, 0);
109 
110 		if (!nb_rx) {
111 			rte_pause();
112 			continue;
113 		}
114 
115 		for (i = 0; i < nb_rx; i++) {
116 			rte_prefetch0(ev[i + 1].mbuf);
117 			ev[i].queue_id = tx_queue[ev[i].mbuf->port];
118 			rte_event_eth_tx_adapter_txq_set(ev[i].mbuf, 0);
119 			pipeline_fwd_event(&ev[i], RTE_SCHED_TYPE_ATOMIC);
120 		}
121 
122 		pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
123 		w->processed_pkts += nb_rx;
124 	}
125 
126 	return 0;
127 }
128 
129 
130 static __rte_noinline int
131 pipeline_queue_worker_multi_stage_tx(void *arg)
132 {
133 	PIPELINE_WORKER_MULTI_STAGE_INIT;
134 	const uint8_t *tx_queue = t->tx_evqueue_id;
135 
136 	while (t->done == false) {
137 		uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
138 
139 		if (!event) {
140 			rte_pause();
141 			continue;
142 		}
143 
144 		cq_id = ev.queue_id % nb_stages;
145 
146 		if (ev.queue_id == tx_queue[ev.mbuf->port]) {
147 			pipeline_event_tx(dev, port, &ev);
148 			w->processed_pkts++;
149 			continue;
150 		}
151 
152 		ev.queue_id++;
153 		pipeline_fwd_event(&ev, cq_id != last_queue ?
154 				sched_type_list[cq_id] :
155 				RTE_SCHED_TYPE_ATOMIC);
156 		pipeline_event_enqueue(dev, port, &ev);
157 	}
158 
159 	return 0;
160 }
161 
162 static __rte_noinline int
163 pipeline_queue_worker_multi_stage_fwd(void *arg)
164 {
165 	PIPELINE_WORKER_MULTI_STAGE_INIT;
166 	const uint8_t *tx_queue = t->tx_evqueue_id;
167 
168 	while (t->done == false) {
169 		uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
170 
171 		if (!event) {
172 			rte_pause();
173 			continue;
174 		}
175 
176 		cq_id = ev.queue_id % nb_stages;
177 
178 		if (cq_id == last_queue) {
179 			ev.queue_id = tx_queue[ev.mbuf->port];
180 			rte_event_eth_tx_adapter_txq_set(ev.mbuf, 0);
181 			pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
182 			pipeline_event_enqueue(dev, port, &ev);
183 			w->processed_pkts++;
184 		} else {
185 			ev.queue_id++;
186 			pipeline_fwd_event(&ev, sched_type_list[cq_id]);
187 			pipeline_event_enqueue(dev, port, &ev);
188 		}
189 	}
190 
191 	return 0;
192 }
193 
194 static __rte_noinline int
195 pipeline_queue_worker_multi_stage_burst_tx(void *arg)
196 {
197 	PIPELINE_WORKER_MULTI_STAGE_BURST_INIT;
198 	const uint8_t *tx_queue = t->tx_evqueue_id;
199 
200 	while (t->done == false) {
201 		uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
202 				BURST_SIZE, 0);
203 
204 		if (!nb_rx) {
205 			rte_pause();
206 			continue;
207 		}
208 
209 		for (i = 0; i < nb_rx; i++) {
210 			rte_prefetch0(ev[i + 1].mbuf);
211 			cq_id = ev[i].queue_id % nb_stages;
212 
213 			if (ev[i].queue_id == tx_queue[ev[i].mbuf->port]) {
214 				pipeline_event_tx(dev, port, &ev[i]);
215 				w->processed_pkts++;
216 				continue;
217 			}
218 
219 			ev[i].queue_id++;
220 			pipeline_fwd_event(&ev[i], cq_id != last_queue ?
221 					sched_type_list[cq_id] :
222 					RTE_SCHED_TYPE_ATOMIC);
223 			pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
224 		}
225 	}
226 
227 	return 0;
228 }
229 
230 static __rte_noinline int
231 pipeline_queue_worker_multi_stage_burst_fwd(void *arg)
232 {
233 	PIPELINE_WORKER_MULTI_STAGE_BURST_INIT;
234 	const uint8_t *tx_queue = t->tx_evqueue_id;
235 
236 	while (t->done == false) {
237 		uint16_t processed_pkts = 0;
238 		uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
239 				BURST_SIZE, 0);
240 
241 		if (!nb_rx) {
242 			rte_pause();
243 			continue;
244 		}
245 
246 		for (i = 0; i < nb_rx; i++) {
247 			rte_prefetch0(ev[i + 1].mbuf);
248 			cq_id = ev[i].queue_id % nb_stages;
249 
250 			if (cq_id == last_queue) {
251 				ev[i].queue_id = tx_queue[ev[i].mbuf->port];
252 				rte_event_eth_tx_adapter_txq_set(ev[i].mbuf, 0);
253 				pipeline_fwd_event(&ev[i],
254 						RTE_SCHED_TYPE_ATOMIC);
255 				processed_pkts++;
256 			} else {
257 				ev[i].queue_id++;
258 				pipeline_fwd_event(&ev[i],
259 						sched_type_list[cq_id]);
260 			}
261 		}
262 
263 		pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
264 		w->processed_pkts += processed_pkts;
265 	}
266 
267 	return 0;
268 }
269 
270 static int
271 worker_wrapper(void *arg)
272 {
273 	struct worker_data *w  = arg;
274 	struct evt_options *opt = w->t->opt;
275 	const bool burst = evt_has_burst_mode(w->dev_id);
276 	const bool internal_port = w->t->internal_port;
277 	const uint8_t nb_stages = opt->nb_stages;
278 	RTE_SET_USED(opt);
279 
280 	if (nb_stages == 1) {
281 		if (!burst && internal_port)
282 			return pipeline_queue_worker_single_stage_tx(arg);
283 		else if (!burst && !internal_port)
284 			return pipeline_queue_worker_single_stage_fwd(arg);
285 		else if (burst && internal_port)
286 			return pipeline_queue_worker_single_stage_burst_tx(arg);
287 		else if (burst && !internal_port)
288 			return pipeline_queue_worker_single_stage_burst_fwd(
289 					arg);
290 	} else {
291 		if (!burst && internal_port)
292 			return pipeline_queue_worker_multi_stage_tx(arg);
293 		else if (!burst && !internal_port)
294 			return pipeline_queue_worker_multi_stage_fwd(arg);
295 		else if (burst && internal_port)
296 			return pipeline_queue_worker_multi_stage_burst_tx(arg);
297 		else if (burst && !internal_port)
298 			return pipeline_queue_worker_multi_stage_burst_fwd(arg);
299 
300 	}
301 	rte_panic("invalid worker\n");
302 }
303 
304 static int
305 pipeline_queue_launch_lcores(struct evt_test *test, struct evt_options *opt)
306 {
307 	return pipeline_launch_lcores(test, opt, worker_wrapper);
308 }
309 
310 static int
311 pipeline_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
312 {
313 	int ret;
314 	int nb_ports;
315 	int nb_queues;
316 	int nb_stages = opt->nb_stages;
317 	uint8_t queue;
318 	uint8_t tx_evport_id = 0;
319 	uint8_t tx_evqueue_id[RTE_MAX_ETHPORTS];
320 	uint8_t queue_arr[RTE_EVENT_MAX_QUEUES_PER_DEV];
321 	uint8_t nb_worker_queues = 0;
322 	uint16_t prod = 0;
323 	struct rte_event_dev_info info;
324 	struct test_pipeline *t = evt_test_priv(test);
325 
326 	nb_ports = evt_nr_active_lcores(opt->wlcores);
327 	nb_queues = rte_eth_dev_count_avail() * (nb_stages);
328 
329 	/* One queue for Tx adapter per port */
330 	nb_queues += rte_eth_dev_count_avail();
331 
332 	memset(tx_evqueue_id, 0, sizeof(uint8_t) * RTE_MAX_ETHPORTS);
333 	memset(queue_arr, 0, sizeof(uint8_t) * RTE_EVENT_MAX_QUEUES_PER_DEV);
334 
335 	rte_event_dev_info_get(opt->dev_id, &info);
336 	ret = evt_configure_eventdev(opt, nb_queues, nb_ports);
337 	if (ret) {
338 		evt_err("failed to configure eventdev %d", opt->dev_id);
339 		return ret;
340 	}
341 
342 	struct rte_event_queue_conf q_conf = {
343 			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
344 			.nb_atomic_flows = opt->nb_flows,
345 			.nb_atomic_order_sequences = opt->nb_flows,
346 	};
347 	/* queue configurations */
348 	for (queue = 0; queue < nb_queues; queue++) {
349 		uint8_t slot;
350 
351 		q_conf.event_queue_cfg = 0;
352 		slot = queue % (nb_stages + 1);
353 		if (slot == nb_stages) {
354 			q_conf.schedule_type = RTE_SCHED_TYPE_ATOMIC;
355 			if (!t->internal_port) {
356 				q_conf.event_queue_cfg =
357 					RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
358 			}
359 			tx_evqueue_id[prod++] = queue;
360 		} else {
361 			q_conf.schedule_type = opt->sched_type_list[slot];
362 			queue_arr[nb_worker_queues] = queue;
363 			nb_worker_queues++;
364 		}
365 
366 		ret = rte_event_queue_setup(opt->dev_id, queue, &q_conf);
367 		if (ret) {
368 			evt_err("failed to setup queue=%d", queue);
369 			return ret;
370 		}
371 	}
372 
373 	if (opt->wkr_deq_dep > info.max_event_port_dequeue_depth)
374 		opt->wkr_deq_dep = info.max_event_port_dequeue_depth;
375 
376 	/* port configuration */
377 	const struct rte_event_port_conf p_conf = {
378 			.dequeue_depth = opt->wkr_deq_dep,
379 			.enqueue_depth = info.max_event_port_dequeue_depth,
380 			.new_event_threshold = info.max_num_events,
381 	};
382 
383 	if (!t->internal_port) {
384 		ret = pipeline_event_port_setup(test, opt, queue_arr,
385 				nb_worker_queues, p_conf);
386 		if (ret)
387 			return ret;
388 	} else
389 		ret = pipeline_event_port_setup(test, opt, NULL, nb_queues,
390 				p_conf);
391 
392 	if (ret)
393 		return ret;
394 	/*
395 	 * The pipelines are setup in the following manner:
396 	 *
397 	 * eth_dev_count = 2, nb_stages = 2.
398 	 *
399 	 *	queues = 6
400 	 *	stride = 3
401 	 *
402 	 *	event queue pipelines:
403 	 *	eth0 -> q0 -> q1 -> (q2->tx)
404 	 *	eth1 -> q3 -> q4 -> (q5->tx)
405 	 *
406 	 *	q2, q5 configured as ATOMIC | SINGLE_LINK
407 	 *
408 	 */
409 	ret = pipeline_event_rx_adapter_setup(opt, nb_stages + 1, p_conf);
410 	if (ret)
411 		return ret;
412 
413 	ret = pipeline_event_tx_adapter_setup(opt, p_conf);
414 	if (ret)
415 		return ret;
416 
417 	if (!evt_has_distributed_sched(opt->dev_id)) {
418 		uint32_t service_id;
419 		rte_event_dev_service_id_get(opt->dev_id, &service_id);
420 		ret = evt_service_setup(service_id);
421 		if (ret) {
422 			evt_err("No service lcore found to run event dev.");
423 			return ret;
424 		}
425 	}
426 
427 	/* Connect the tx_evqueue_id to the Tx adapter port */
428 	if (!t->internal_port) {
429 		RTE_ETH_FOREACH_DEV(prod) {
430 			ret = rte_event_eth_tx_adapter_event_port_get(prod,
431 					&tx_evport_id);
432 			if (ret) {
433 				evt_err("Unable to get Tx adptr[%d] evprt[%d]",
434 						prod, tx_evport_id);
435 				return ret;
436 			}
437 
438 			if (rte_event_port_link(opt->dev_id, tx_evport_id,
439 						&tx_evqueue_id[prod],
440 						NULL, 1) != 1) {
441 				evt_err("Unable to link Tx adptr[%d] evprt[%d]",
442 						prod, tx_evport_id);
443 				return ret;
444 			}
445 		}
446 	}
447 
448 	ret = rte_event_dev_start(opt->dev_id);
449 	if (ret) {
450 		evt_err("failed to start eventdev %d", opt->dev_id);
451 		return ret;
452 	}
453 
454 
455 	RTE_ETH_FOREACH_DEV(prod) {
456 		ret = rte_eth_dev_start(prod);
457 		if (ret) {
458 			evt_err("Ethernet dev [%d] failed to start."
459 					" Using synthetic producer", prod);
460 			return ret;
461 		}
462 
463 	}
464 
465 	RTE_ETH_FOREACH_DEV(prod) {
466 		ret = rte_event_eth_rx_adapter_start(prod);
467 		if (ret) {
468 			evt_err("Rx adapter[%d] start failed", prod);
469 			return ret;
470 		}
471 
472 		ret = rte_event_eth_tx_adapter_start(prod);
473 		if (ret) {
474 			evt_err("Tx adapter[%d] start failed", prod);
475 			return ret;
476 		}
477 	}
478 
479 	memcpy(t->tx_evqueue_id, tx_evqueue_id, sizeof(uint8_t) *
480 			RTE_MAX_ETHPORTS);
481 
482 	return 0;
483 }
484 
485 static void
486 pipeline_queue_opt_dump(struct evt_options *opt)
487 {
488 	pipeline_opt_dump(opt, pipeline_queue_nb_event_queues(opt));
489 }
490 
491 static int
492 pipeline_queue_opt_check(struct evt_options *opt)
493 {
494 	return pipeline_opt_check(opt, pipeline_queue_nb_event_queues(opt));
495 }
496 
497 static bool
498 pipeline_queue_capability_check(struct evt_options *opt)
499 {
500 	struct rte_event_dev_info dev_info;
501 
502 	rte_event_dev_info_get(opt->dev_id, &dev_info);
503 	if (dev_info.max_event_queues < pipeline_queue_nb_event_queues(opt) ||
504 			dev_info.max_event_ports <
505 			evt_nr_active_lcores(opt->wlcores)) {
506 		evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
507 			pipeline_queue_nb_event_queues(opt),
508 			dev_info.max_event_queues,
509 			evt_nr_active_lcores(opt->wlcores),
510 			dev_info.max_event_ports);
511 	}
512 
513 	return true;
514 }
515 
516 static const struct evt_test_ops pipeline_queue =  {
517 	.cap_check          = pipeline_queue_capability_check,
518 	.opt_check          = pipeline_queue_opt_check,
519 	.opt_dump           = pipeline_queue_opt_dump,
520 	.test_setup         = pipeline_test_setup,
521 	.mempool_setup      = pipeline_mempool_setup,
522 	.ethdev_setup	    = pipeline_ethdev_setup,
523 	.eventdev_setup     = pipeline_queue_eventdev_setup,
524 	.launch_lcores      = pipeline_queue_launch_lcores,
525 	.eventdev_destroy   = pipeline_eventdev_destroy,
526 	.mempool_destroy    = pipeline_mempool_destroy,
527 	.ethdev_destroy	    = pipeline_ethdev_destroy,
528 	.test_result        = pipeline_test_result,
529 	.test_destroy       = pipeline_test_destroy,
530 };
531 
532 EVT_TEST_REGISTER(pipeline_queue);
533