xref: /dpdk/app/test-eventdev/test_pipeline_queue.c (revision e0c0573783455288dbe2fd70c24acd1cd66d6cb6)
1 /*
2  * SPDX-License-Identifier: BSD-3-Clause
3  * Copyright 2017 Cavium, Inc.
4  */
5 
6 #include "test_pipeline_common.h"
7 
8 /* See http://doc.dpdk.org/guides/tools/testeventdev.html for test details */
9 
10 static __rte_always_inline int
11 pipeline_queue_nb_event_queues(struct evt_options *opt)
12 {
13 	uint16_t eth_count = rte_eth_dev_count_avail();
14 
15 	return (eth_count * opt->nb_stages) + eth_count;
16 }
17 
18 static __rte_noinline int
19 pipeline_queue_worker_single_stage_tx(void *arg)
20 {
21 	PIPELINE_WORKER_SINGLE_STAGE_INIT;
22 
23 	while (t->done == false) {
24 		uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
25 
26 		if (!event) {
27 			rte_pause();
28 			continue;
29 		}
30 
31 		if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
32 			pipeline_event_tx(dev, port, &ev);
33 			w->processed_pkts++;
34 		} else {
35 			ev.queue_id++;
36 			pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
37 			pipeline_event_enqueue(dev, port, &ev);
38 		}
39 	}
40 
41 	return 0;
42 }
43 
44 static __rte_noinline int
45 pipeline_queue_worker_single_stage_fwd(void *arg)
46 {
47 	PIPELINE_WORKER_SINGLE_STAGE_INIT;
48 	const uint8_t *tx_queue = t->tx_evqueue_id;
49 
50 	while (t->done == false) {
51 		uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
52 
53 		if (!event) {
54 			rte_pause();
55 			continue;
56 		}
57 
58 		ev.queue_id = tx_queue[ev.mbuf->port];
59 		rte_event_eth_tx_adapter_txq_set(ev.mbuf, 0);
60 		pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
61 		pipeline_event_enqueue(dev, port, &ev);
62 		w->processed_pkts++;
63 	}
64 
65 	return 0;
66 }
67 
68 static __rte_noinline int
69 pipeline_queue_worker_single_stage_burst_tx(void *arg)
70 {
71 	PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT;
72 
73 	while (t->done == false) {
74 		uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
75 				BURST_SIZE, 0);
76 
77 		if (!nb_rx) {
78 			rte_pause();
79 			continue;
80 		}
81 
82 		for (i = 0; i < nb_rx; i++) {
83 			rte_prefetch0(ev[i + 1].mbuf);
84 			if (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) {
85 				pipeline_event_tx(dev, port, &ev[i]);
86 				ev[i].op = RTE_EVENT_OP_RELEASE;
87 				w->processed_pkts++;
88 			} else {
89 				ev[i].queue_id++;
90 				pipeline_fwd_event(&ev[i],
91 						RTE_SCHED_TYPE_ATOMIC);
92 			}
93 		}
94 
95 		pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
96 	}
97 
98 	return 0;
99 }
100 
101 static __rte_noinline int
102 pipeline_queue_worker_single_stage_burst_fwd(void *arg)
103 {
104 	PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT;
105 	const uint8_t *tx_queue = t->tx_evqueue_id;
106 
107 	while (t->done == false) {
108 		uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
109 				BURST_SIZE, 0);
110 
111 		if (!nb_rx) {
112 			rte_pause();
113 			continue;
114 		}
115 
116 		for (i = 0; i < nb_rx; i++) {
117 			rte_prefetch0(ev[i + 1].mbuf);
118 			ev[i].queue_id = tx_queue[ev[i].mbuf->port];
119 			rte_event_eth_tx_adapter_txq_set(ev[i].mbuf, 0);
120 			pipeline_fwd_event(&ev[i], RTE_SCHED_TYPE_ATOMIC);
121 		}
122 
123 		pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
124 		w->processed_pkts += nb_rx;
125 	}
126 
127 	return 0;
128 }
129 
130 
131 static __rte_noinline int
132 pipeline_queue_worker_multi_stage_tx(void *arg)
133 {
134 	PIPELINE_WORKER_MULTI_STAGE_INIT;
135 	const uint8_t *tx_queue = t->tx_evqueue_id;
136 
137 	while (t->done == false) {
138 		uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
139 
140 		if (!event) {
141 			rte_pause();
142 			continue;
143 		}
144 
145 		cq_id = ev.queue_id % nb_stages;
146 
147 		if (ev.queue_id == tx_queue[ev.mbuf->port]) {
148 			pipeline_event_tx(dev, port, &ev);
149 			w->processed_pkts++;
150 			continue;
151 		}
152 
153 		ev.queue_id++;
154 		pipeline_fwd_event(&ev, cq_id != last_queue ?
155 				sched_type_list[cq_id] :
156 				RTE_SCHED_TYPE_ATOMIC);
157 		pipeline_event_enqueue(dev, port, &ev);
158 	}
159 
160 	return 0;
161 }
162 
163 static __rte_noinline int
164 pipeline_queue_worker_multi_stage_fwd(void *arg)
165 {
166 	PIPELINE_WORKER_MULTI_STAGE_INIT;
167 	const uint8_t *tx_queue = t->tx_evqueue_id;
168 
169 	while (t->done == false) {
170 		uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
171 
172 		if (!event) {
173 			rte_pause();
174 			continue;
175 		}
176 
177 		cq_id = ev.queue_id % nb_stages;
178 
179 		if (cq_id == last_queue) {
180 			ev.queue_id = tx_queue[ev.mbuf->port];
181 			rte_event_eth_tx_adapter_txq_set(ev.mbuf, 0);
182 			pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
183 			pipeline_event_enqueue(dev, port, &ev);
184 			w->processed_pkts++;
185 		} else {
186 			ev.queue_id++;
187 			pipeline_fwd_event(&ev, sched_type_list[cq_id]);
188 			pipeline_event_enqueue(dev, port, &ev);
189 		}
190 	}
191 
192 	return 0;
193 }
194 
195 static __rte_noinline int
196 pipeline_queue_worker_multi_stage_burst_tx(void *arg)
197 {
198 	PIPELINE_WORKER_MULTI_STAGE_BURST_INIT;
199 	const uint8_t *tx_queue = t->tx_evqueue_id;
200 
201 	while (t->done == false) {
202 		uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
203 				BURST_SIZE, 0);
204 
205 		if (!nb_rx) {
206 			rte_pause();
207 			continue;
208 		}
209 
210 		for (i = 0; i < nb_rx; i++) {
211 			rte_prefetch0(ev[i + 1].mbuf);
212 			cq_id = ev[i].queue_id % nb_stages;
213 
214 			if (ev[i].queue_id == tx_queue[ev[i].mbuf->port]) {
215 				pipeline_event_tx(dev, port, &ev[i]);
216 				ev[i].op = RTE_EVENT_OP_RELEASE;
217 				w->processed_pkts++;
218 				continue;
219 			}
220 
221 			ev[i].queue_id++;
222 			pipeline_fwd_event(&ev[i], cq_id != last_queue ?
223 					sched_type_list[cq_id] :
224 					RTE_SCHED_TYPE_ATOMIC);
225 		}
226 
227 		pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
228 	}
229 
230 	return 0;
231 }
232 
233 static __rte_noinline int
234 pipeline_queue_worker_multi_stage_burst_fwd(void *arg)
235 {
236 	PIPELINE_WORKER_MULTI_STAGE_BURST_INIT;
237 	const uint8_t *tx_queue = t->tx_evqueue_id;
238 
239 	while (t->done == false) {
240 		uint16_t processed_pkts = 0;
241 		uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
242 				BURST_SIZE, 0);
243 
244 		if (!nb_rx) {
245 			rte_pause();
246 			continue;
247 		}
248 
249 		for (i = 0; i < nb_rx; i++) {
250 			rte_prefetch0(ev[i + 1].mbuf);
251 			cq_id = ev[i].queue_id % nb_stages;
252 
253 			if (cq_id == last_queue) {
254 				ev[i].queue_id = tx_queue[ev[i].mbuf->port];
255 				rte_event_eth_tx_adapter_txq_set(ev[i].mbuf, 0);
256 				pipeline_fwd_event(&ev[i],
257 						RTE_SCHED_TYPE_ATOMIC);
258 				processed_pkts++;
259 			} else {
260 				ev[i].queue_id++;
261 				pipeline_fwd_event(&ev[i],
262 						sched_type_list[cq_id]);
263 			}
264 		}
265 
266 		pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
267 		w->processed_pkts += processed_pkts;
268 	}
269 
270 	return 0;
271 }
272 
273 static int
274 worker_wrapper(void *arg)
275 {
276 	struct worker_data *w  = arg;
277 	struct evt_options *opt = w->t->opt;
278 	const bool burst = evt_has_burst_mode(w->dev_id);
279 	const bool internal_port = w->t->internal_port;
280 	const uint8_t nb_stages = opt->nb_stages;
281 	RTE_SET_USED(opt);
282 
283 	if (nb_stages == 1) {
284 		if (!burst && internal_port)
285 			return pipeline_queue_worker_single_stage_tx(arg);
286 		else if (!burst && !internal_port)
287 			return pipeline_queue_worker_single_stage_fwd(arg);
288 		else if (burst && internal_port)
289 			return pipeline_queue_worker_single_stage_burst_tx(arg);
290 		else if (burst && !internal_port)
291 			return pipeline_queue_worker_single_stage_burst_fwd(
292 					arg);
293 	} else {
294 		if (!burst && internal_port)
295 			return pipeline_queue_worker_multi_stage_tx(arg);
296 		else if (!burst && !internal_port)
297 			return pipeline_queue_worker_multi_stage_fwd(arg);
298 		else if (burst && internal_port)
299 			return pipeline_queue_worker_multi_stage_burst_tx(arg);
300 		else if (burst && !internal_port)
301 			return pipeline_queue_worker_multi_stage_burst_fwd(arg);
302 
303 	}
304 	rte_panic("invalid worker\n");
305 }
306 
307 static int
308 pipeline_queue_launch_lcores(struct evt_test *test, struct evt_options *opt)
309 {
310 	return pipeline_launch_lcores(test, opt, worker_wrapper);
311 }
312 
313 static int
314 pipeline_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
315 {
316 	int ret;
317 	int nb_ports;
318 	int nb_queues;
319 	int nb_stages = opt->nb_stages;
320 	uint8_t queue;
321 	uint8_t tx_evport_id = 0;
322 	uint8_t tx_evqueue_id[RTE_MAX_ETHPORTS];
323 	uint8_t queue_arr[RTE_EVENT_MAX_QUEUES_PER_DEV];
324 	uint8_t nb_worker_queues = 0;
325 	uint16_t prod = 0;
326 	struct rte_event_dev_info info;
327 	struct test_pipeline *t = evt_test_priv(test);
328 
329 	nb_ports = evt_nr_active_lcores(opt->wlcores);
330 	nb_queues = rte_eth_dev_count_avail() * (nb_stages);
331 
332 	/* One queue for Tx adapter per port */
333 	nb_queues += rte_eth_dev_count_avail();
334 
335 	memset(tx_evqueue_id, 0, sizeof(uint8_t) * RTE_MAX_ETHPORTS);
336 	memset(queue_arr, 0, sizeof(uint8_t) * RTE_EVENT_MAX_QUEUES_PER_DEV);
337 
338 	rte_event_dev_info_get(opt->dev_id, &info);
339 	ret = evt_configure_eventdev(opt, nb_queues, nb_ports);
340 	if (ret) {
341 		evt_err("failed to configure eventdev %d", opt->dev_id);
342 		return ret;
343 	}
344 
345 	struct rte_event_queue_conf q_conf = {
346 			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
347 			.nb_atomic_flows = opt->nb_flows,
348 			.nb_atomic_order_sequences = opt->nb_flows,
349 	};
350 	/* queue configurations */
351 	for (queue = 0; queue < nb_queues; queue++) {
352 		uint8_t slot;
353 
354 		q_conf.event_queue_cfg = 0;
355 		slot = queue % (nb_stages + 1);
356 		if (slot == nb_stages) {
357 			q_conf.schedule_type = RTE_SCHED_TYPE_ATOMIC;
358 			if (!t->internal_port) {
359 				q_conf.event_queue_cfg =
360 					RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
361 			}
362 			tx_evqueue_id[prod++] = queue;
363 		} else {
364 			q_conf.schedule_type = opt->sched_type_list[slot];
365 			queue_arr[nb_worker_queues] = queue;
366 			nb_worker_queues++;
367 		}
368 
369 		ret = rte_event_queue_setup(opt->dev_id, queue, &q_conf);
370 		if (ret) {
371 			evt_err("failed to setup queue=%d", queue);
372 			return ret;
373 		}
374 	}
375 
376 	if (opt->wkr_deq_dep > info.max_event_port_dequeue_depth)
377 		opt->wkr_deq_dep = info.max_event_port_dequeue_depth;
378 
379 	/* port configuration */
380 	const struct rte_event_port_conf p_conf = {
381 			.dequeue_depth = opt->wkr_deq_dep,
382 			.enqueue_depth = info.max_event_port_dequeue_depth,
383 			.new_event_threshold = info.max_num_events,
384 	};
385 
386 	if (!t->internal_port) {
387 		ret = pipeline_event_port_setup(test, opt, queue_arr,
388 				nb_worker_queues, p_conf);
389 		if (ret)
390 			return ret;
391 	} else
392 		ret = pipeline_event_port_setup(test, opt, NULL, nb_queues,
393 				p_conf);
394 
395 	if (ret)
396 		return ret;
397 	/*
398 	 * The pipelines are setup in the following manner:
399 	 *
400 	 * eth_dev_count = 2, nb_stages = 2.
401 	 *
402 	 *	queues = 6
403 	 *	stride = 3
404 	 *
405 	 *	event queue pipelines:
406 	 *	eth0 -> q0 -> q1 -> (q2->tx)
407 	 *	eth1 -> q3 -> q4 -> (q5->tx)
408 	 *
409 	 *	q2, q5 configured as ATOMIC | SINGLE_LINK
410 	 *
411 	 */
412 	ret = pipeline_event_rx_adapter_setup(opt, nb_stages + 1, p_conf);
413 	if (ret)
414 		return ret;
415 
416 	ret = pipeline_event_tx_adapter_setup(opt, p_conf);
417 	if (ret)
418 		return ret;
419 
420 	if (!evt_has_distributed_sched(opt->dev_id)) {
421 		uint32_t service_id;
422 		rte_event_dev_service_id_get(opt->dev_id, &service_id);
423 		ret = evt_service_setup(service_id);
424 		if (ret) {
425 			evt_err("No service lcore found to run event dev.");
426 			return ret;
427 		}
428 	}
429 
430 	/* Connect the tx_evqueue_id to the Tx adapter port */
431 	if (!t->internal_port) {
432 		RTE_ETH_FOREACH_DEV(prod) {
433 			ret = rte_event_eth_tx_adapter_event_port_get(prod,
434 					&tx_evport_id);
435 			if (ret) {
436 				evt_err("Unable to get Tx adptr[%d] evprt[%d]",
437 						prod, tx_evport_id);
438 				return ret;
439 			}
440 
441 			if (rte_event_port_link(opt->dev_id, tx_evport_id,
442 						&tx_evqueue_id[prod],
443 						NULL, 1) != 1) {
444 				evt_err("Unable to link Tx adptr[%d] evprt[%d]",
445 						prod, tx_evport_id);
446 				return ret;
447 			}
448 		}
449 	}
450 
451 	ret = rte_event_dev_start(opt->dev_id);
452 	if (ret) {
453 		evt_err("failed to start eventdev %d", opt->dev_id);
454 		return ret;
455 	}
456 
457 
458 	RTE_ETH_FOREACH_DEV(prod) {
459 		ret = rte_eth_dev_start(prod);
460 		if (ret) {
461 			evt_err("Ethernet dev [%d] failed to start."
462 					" Using synthetic producer", prod);
463 			return ret;
464 		}
465 
466 	}
467 
468 	RTE_ETH_FOREACH_DEV(prod) {
469 		ret = rte_event_eth_rx_adapter_start(prod);
470 		if (ret) {
471 			evt_err("Rx adapter[%d] start failed", prod);
472 			return ret;
473 		}
474 
475 		ret = rte_event_eth_tx_adapter_start(prod);
476 		if (ret) {
477 			evt_err("Tx adapter[%d] start failed", prod);
478 			return ret;
479 		}
480 	}
481 
482 	memcpy(t->tx_evqueue_id, tx_evqueue_id, sizeof(uint8_t) *
483 			RTE_MAX_ETHPORTS);
484 
485 	return 0;
486 }
487 
488 static void
489 pipeline_queue_opt_dump(struct evt_options *opt)
490 {
491 	pipeline_opt_dump(opt, pipeline_queue_nb_event_queues(opt));
492 }
493 
494 static int
495 pipeline_queue_opt_check(struct evt_options *opt)
496 {
497 	return pipeline_opt_check(opt, pipeline_queue_nb_event_queues(opt));
498 }
499 
500 static bool
501 pipeline_queue_capability_check(struct evt_options *opt)
502 {
503 	struct rte_event_dev_info dev_info;
504 
505 	rte_event_dev_info_get(opt->dev_id, &dev_info);
506 	if (dev_info.max_event_queues < pipeline_queue_nb_event_queues(opt) ||
507 			dev_info.max_event_ports <
508 			evt_nr_active_lcores(opt->wlcores)) {
509 		evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
510 			pipeline_queue_nb_event_queues(opt),
511 			dev_info.max_event_queues,
512 			evt_nr_active_lcores(opt->wlcores),
513 			dev_info.max_event_ports);
514 	}
515 
516 	return true;
517 }
518 
519 static const struct evt_test_ops pipeline_queue =  {
520 	.cap_check          = pipeline_queue_capability_check,
521 	.opt_check          = pipeline_queue_opt_check,
522 	.opt_dump           = pipeline_queue_opt_dump,
523 	.test_setup         = pipeline_test_setup,
524 	.mempool_setup      = pipeline_mempool_setup,
525 	.ethdev_setup	    = pipeline_ethdev_setup,
526 	.eventdev_setup     = pipeline_queue_eventdev_setup,
527 	.launch_lcores      = pipeline_queue_launch_lcores,
528 	.eventdev_destroy   = pipeline_eventdev_destroy,
529 	.mempool_destroy    = pipeline_mempool_destroy,
530 	.ethdev_destroy	    = pipeline_ethdev_destroy,
531 	.test_result        = pipeline_test_result,
532 	.test_destroy       = pipeline_test_destroy,
533 };
534 
535 EVT_TEST_REGISTER(pipeline_queue);
536