xref: /dpdk/drivers/event/dpaa2/dpaa2_eventdev_selftest.c (revision 68a03efeed657e6e05f281479b33b51102797e15)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2019 NXP
3  */
4 
5 #include <rte_atomic.h>
6 #include <rte_common.h>
7 #include <rte_cycles.h>
8 #include <rte_debug.h>
9 #include <rte_eal.h>
10 #include <rte_ethdev.h>
11 #include <rte_eventdev.h>
12 #include <rte_hexdump.h>
13 #include <rte_mbuf.h>
14 #include <rte_malloc.h>
15 #include <rte_memcpy.h>
16 #include <rte_launch.h>
17 #include <rte_lcore.h>
18 #include <rte_per_lcore.h>
19 #include <rte_random.h>
20 #include <rte_bus_vdev.h>
21 #include <rte_test.h>
22 #include <rte_fslmc.h>
23 
24 #include "dpaa2_eventdev.h"
25 #include "dpaa2_eventdev_logs.h"
26 
27 #define MAX_PORTS 4
28 #define NUM_PACKETS (1 << 18)
29 #define MAX_EVENTS  8
30 #define DPAA2_TEST_RUN(setup, teardown, test) \
31 	dpaa2_test_run(setup, teardown, test, #test)
32 
33 static int total;
34 static int passed;
35 static int failed;
36 static int unsupported;
37 
38 static int evdev;
39 static struct rte_mempool *eventdev_test_mempool;
40 
41 struct event_attr {
42 	uint32_t flow_id;
43 	uint8_t event_type;
44 	uint8_t sub_event_type;
45 	uint8_t sched_type;
46 	uint8_t queue;
47 	uint8_t port;
48 	uint8_t seq;
49 };
50 
51 struct test_core_param {
52 	rte_atomic32_t *total_events;
53 	uint64_t dequeue_tmo_ticks;
54 	uint8_t port;
55 	uint8_t sched_type;
56 };
57 
58 static int
59 testsuite_setup(void)
60 {
61 	const char *eventdev_name = "event_dpaa2";
62 
63 	evdev = rte_event_dev_get_dev_id(eventdev_name);
64 	if (evdev < 0) {
65 		dpaa2_evdev_dbg("%d: Eventdev %s not found - creating.",
66 				__LINE__, eventdev_name);
67 		if (rte_vdev_init(eventdev_name, NULL) < 0) {
68 			dpaa2_evdev_err("Error creating eventdev %s",
69 					eventdev_name);
70 			return -1;
71 		}
72 		evdev = rte_event_dev_get_dev_id(eventdev_name);
73 		if (evdev < 0) {
74 			dpaa2_evdev_err("Error finding newly created eventdev");
75 			return -1;
76 		}
77 	}
78 
79 	return 0;
80 }
81 
82 static void
83 testsuite_teardown(void)
84 {
85 	rte_event_dev_close(evdev);
86 }
87 
88 static void
89 devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
90 			struct rte_event_dev_info *info)
91 {
92 	memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
93 	dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
94 	dev_conf->nb_event_ports = info->max_event_ports;
95 	dev_conf->nb_event_queues = info->max_event_queues;
96 	dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
97 	dev_conf->nb_event_port_dequeue_depth =
98 			info->max_event_port_dequeue_depth;
99 	dev_conf->nb_event_port_enqueue_depth =
100 			info->max_event_port_enqueue_depth;
101 	dev_conf->nb_event_port_enqueue_depth =
102 			info->max_event_port_enqueue_depth;
103 	dev_conf->nb_events_limit =
104 			info->max_num_events;
105 }
106 
107 enum {
108 	TEST_EVENTDEV_SETUP_DEFAULT,
109 	TEST_EVENTDEV_SETUP_PRIORITY,
110 	TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT,
111 };
112 
113 static int
114 _eventdev_setup(int mode)
115 {
116 	int i, ret;
117 	struct rte_event_dev_config dev_conf;
118 	struct rte_event_dev_info info;
119 	const char *pool_name = "evdev_dpaa2_test_pool";
120 
121 	/* Create and destrory pool for each test case to make it standalone */
122 	eventdev_test_mempool = rte_pktmbuf_pool_create(pool_name,
123 					MAX_EVENTS,
124 					0 /*MBUF_CACHE_SIZE*/,
125 					0,
126 					512, /* Use very small mbufs */
127 					rte_socket_id());
128 	if (!eventdev_test_mempool) {
129 		dpaa2_evdev_err("ERROR creating mempool");
130 		return -1;
131 	}
132 
133 	ret = rte_event_dev_info_get(evdev, &info);
134 	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
135 	RTE_TEST_ASSERT(info.max_num_events >= (int32_t)MAX_EVENTS,
136 			"ERROR max_num_events=%d < max_events=%d",
137 				info.max_num_events, MAX_EVENTS);
138 
139 	devconf_set_default_sane_values(&dev_conf, &info);
140 	if (mode == TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT)
141 		dev_conf.event_dev_cfg |= RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
142 
143 	ret = rte_event_dev_configure(evdev, &dev_conf);
144 	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
145 
146 	uint32_t queue_count;
147 	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
148 			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
149 			    &queue_count), "Queue count get failed");
150 
151 	if (mode == TEST_EVENTDEV_SETUP_PRIORITY) {
152 		if (queue_count > 8) {
153 			dpaa2_evdev_err(
154 				"test expects the unique priority per queue");
155 			return -ENOTSUP;
156 		}
157 
158 		/* Configure event queues(0 to n) with
159 		 * RTE_EVENT_DEV_PRIORITY_HIGHEST to
160 		 * RTE_EVENT_DEV_PRIORITY_LOWEST
161 		 */
162 		uint8_t step = (RTE_EVENT_DEV_PRIORITY_LOWEST + 1) /
163 				queue_count;
164 		for (i = 0; i < (int)queue_count; i++) {
165 			struct rte_event_queue_conf queue_conf;
166 
167 			ret = rte_event_queue_default_conf_get(evdev, i,
168 						&queue_conf);
169 			RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get def_conf%d",
170 					i);
171 			queue_conf.priority = i * step;
172 			ret = rte_event_queue_setup(evdev, i, &queue_conf);
173 			RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d",
174 					i);
175 		}
176 
177 	} else {
178 		/* Configure event queues with default priority */
179 		for (i = 0; i < (int)queue_count; i++) {
180 			ret = rte_event_queue_setup(evdev, i, NULL);
181 			RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d",
182 					i);
183 		}
184 	}
185 	/* Configure event ports */
186 	uint32_t port_count;
187 	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
188 				RTE_EVENT_DEV_ATTR_PORT_COUNT,
189 				&port_count), "Port count get failed");
190 	for (i = 0; i < (int)port_count; i++) {
191 		ret = rte_event_port_setup(evdev, i, NULL);
192 		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d", i);
193 		ret = rte_event_port_link(evdev, i, NULL, NULL, 0);
194 		RTE_TEST_ASSERT(ret >= 0, "Failed to link all queues port=%d",
195 				i);
196 	}
197 
198 	ret = rte_event_dev_start(evdev);
199 	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start device");
200 
201 	return 0;
202 }
203 
204 static int
205 eventdev_setup(void)
206 {
207 	return _eventdev_setup(TEST_EVENTDEV_SETUP_DEFAULT);
208 }
209 
210 static void
211 eventdev_teardown(void)
212 {
213 	rte_event_dev_stop(evdev);
214 	rte_mempool_free(eventdev_test_mempool);
215 }
216 
217 static void
218 update_event_and_validation_attr(struct rte_mbuf *m, struct rte_event *ev,
219 			uint32_t flow_id, uint8_t event_type,
220 			uint8_t sub_event_type, uint8_t sched_type,
221 			uint8_t queue, uint8_t port, uint8_t seq)
222 {
223 	struct event_attr *attr;
224 
225 	/* Store the event attributes in mbuf for future reference */
226 	attr = rte_pktmbuf_mtod(m, struct event_attr *);
227 	attr->flow_id = flow_id;
228 	attr->event_type = event_type;
229 	attr->sub_event_type = sub_event_type;
230 	attr->sched_type = sched_type;
231 	attr->queue = queue;
232 	attr->port = port;
233 	attr->seq = seq;
234 
235 	ev->flow_id = flow_id;
236 	ev->sub_event_type = sub_event_type;
237 	ev->event_type = event_type;
238 	/* Inject the new event */
239 	ev->op = RTE_EVENT_OP_NEW;
240 	ev->sched_type = sched_type;
241 	ev->queue_id = queue;
242 	ev->mbuf = m;
243 }
244 
245 static int
246 inject_events(uint32_t flow_id, uint8_t event_type, uint8_t sub_event_type,
247 		uint8_t sched_type, uint8_t queue, uint8_t port,
248 		unsigned int events)
249 {
250 	struct rte_mbuf *m;
251 	unsigned int i;
252 
253 	for (i = 0; i < events; i++) {
254 		struct rte_event ev = {.event = 0, .u64 = 0};
255 
256 		m = rte_pktmbuf_alloc(eventdev_test_mempool);
257 		RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
258 
259 		update_event_and_validation_attr(m, &ev, flow_id, event_type,
260 			sub_event_type, sched_type, queue, port, i);
261 		rte_event_enqueue_burst(evdev, port, &ev, 1);
262 	}
263 	return 0;
264 }
265 
266 static int
267 check_excess_events(uint8_t port)
268 {
269 	int i;
270 	uint16_t valid_event;
271 	struct rte_event ev;
272 
273 	/* Check for excess events, try for a few times and exit */
274 	for (i = 0; i < 32; i++) {
275 		valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
276 
277 		RTE_TEST_ASSERT_SUCCESS(valid_event,
278 				"Unexpected valid event=%d",
279 				*dpaa2_seqn(ev.mbuf));
280 	}
281 	return 0;
282 }
283 
284 static int
285 generate_random_events(const unsigned int total_events)
286 {
287 	struct rte_event_dev_info info;
288 	unsigned int i;
289 	int ret;
290 
291 	uint32_t queue_count;
292 	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
293 			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
294 			    &queue_count), "Queue count get failed");
295 
296 	ret = rte_event_dev_info_get(evdev, &info);
297 	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
298 	for (i = 0; i < total_events; i++) {
299 		ret = inject_events(
300 			rte_rand() % info.max_event_queue_flows /*flow_id */,
301 			RTE_EVENT_TYPE_CPU /* event_type */,
302 			rte_rand() % 256 /* sub_event_type */,
303 			rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
304 			rte_rand() % queue_count /* queue */,
305 			0 /* port */,
306 			1 /* events */);
307 		if (ret)
308 			return -1;
309 	}
310 	return ret;
311 }
312 
313 
314 static int
315 validate_event(struct rte_event *ev)
316 {
317 	struct event_attr *attr;
318 
319 	attr = rte_pktmbuf_mtod(ev->mbuf, struct event_attr *);
320 	RTE_TEST_ASSERT_EQUAL(attr->flow_id, ev->flow_id,
321 			"flow_id mismatch enq=%d deq =%d",
322 			attr->flow_id, ev->flow_id);
323 	RTE_TEST_ASSERT_EQUAL(attr->event_type, ev->event_type,
324 			"event_type mismatch enq=%d deq =%d",
325 			attr->event_type, ev->event_type);
326 	RTE_TEST_ASSERT_EQUAL(attr->sub_event_type, ev->sub_event_type,
327 			"sub_event_type mismatch enq=%d deq =%d",
328 			attr->sub_event_type, ev->sub_event_type);
329 	RTE_TEST_ASSERT_EQUAL(attr->sched_type, ev->sched_type,
330 			"sched_type mismatch enq=%d deq =%d",
331 			attr->sched_type, ev->sched_type);
332 	RTE_TEST_ASSERT_EQUAL(attr->queue, ev->queue_id,
333 			"queue mismatch enq=%d deq =%d",
334 			attr->queue, ev->queue_id);
335 	return 0;
336 }
337 
338 typedef int (*validate_event_cb)(uint32_t index, uint8_t port,
339 				 struct rte_event *ev);
340 
341 static int
342 consume_events(uint8_t port, const uint32_t total_events, validate_event_cb fn)
343 {
344 	int ret;
345 	uint16_t valid_event;
346 	uint32_t events = 0, forward_progress_cnt = 0, index = 0;
347 	struct rte_event ev;
348 
349 	while (1) {
350 		if (++forward_progress_cnt > UINT16_MAX) {
351 			dpaa2_evdev_err("Detected deadlock");
352 			return -1;
353 		}
354 
355 		valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
356 		if (!valid_event)
357 			continue;
358 
359 		forward_progress_cnt = 0;
360 		ret = validate_event(&ev);
361 		if (ret)
362 			return -1;
363 
364 		if (fn != NULL) {
365 			ret = fn(index, port, &ev);
366 			RTE_TEST_ASSERT_SUCCESS(ret,
367 				"Failed to validate test specific event");
368 		}
369 
370 		++index;
371 
372 		rte_pktmbuf_free(ev.mbuf);
373 		if (++events >= total_events)
374 			break;
375 	}
376 
377 	return check_excess_events(port);
378 }
379 
380 static int
381 validate_simple_enqdeq(uint32_t index, uint8_t port, struct rte_event *ev)
382 {
383 	struct event_attr *attr;
384 
385 	attr = rte_pktmbuf_mtod(ev->mbuf, struct event_attr *);
386 
387 	RTE_SET_USED(port);
388 	RTE_TEST_ASSERT_EQUAL(index, attr->seq,
389 		"index=%d != seqn=%d", index, attr->seq);
390 	return 0;
391 }
392 
393 static int
394 test_simple_enqdeq(uint8_t sched_type)
395 {
396 	int ret;
397 
398 	ret = inject_events(0 /*flow_id */,
399 				RTE_EVENT_TYPE_CPU /* event_type */,
400 				0 /* sub_event_type */,
401 				sched_type,
402 				0 /* queue */,
403 				0 /* port */,
404 				MAX_EVENTS);
405 	if (ret)
406 		return -1;
407 
408 	return consume_events(0 /* port */, MAX_EVENTS,	validate_simple_enqdeq);
409 }
410 
411 static int
412 test_simple_enqdeq_atomic(void)
413 {
414 	return test_simple_enqdeq(RTE_SCHED_TYPE_ATOMIC);
415 }
416 
417 static int
418 test_simple_enqdeq_parallel(void)
419 {
420 	return test_simple_enqdeq(RTE_SCHED_TYPE_PARALLEL);
421 }
422 
423 /*
424  * Generate a prescribed number of events and spread them across available
425  * queues. On dequeue, using single event port(port 0) verify the enqueued
426  * event attributes
427  */
428 static int
429 test_multi_queue_enq_single_port_deq(void)
430 {
431 	int ret;
432 
433 	ret = generate_random_events(MAX_EVENTS);
434 	if (ret)
435 		return -1;
436 
437 	return consume_events(0 /* port */, MAX_EVENTS, NULL);
438 }
439 
440 static int
441 worker_multi_port_fn(void *arg)
442 {
443 	struct test_core_param *param = arg;
444 	struct rte_event ev;
445 	uint16_t valid_event;
446 	uint8_t port = param->port;
447 	rte_atomic32_t *total_events = param->total_events;
448 	int ret;
449 
450 	while (rte_atomic32_read(total_events) > 0) {
451 		valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
452 		if (!valid_event)
453 			continue;
454 
455 		ret = validate_event(&ev);
456 		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to validate event");
457 		rte_pktmbuf_free(ev.mbuf);
458 		rte_atomic32_sub(total_events, 1);
459 	}
460 	return 0;
461 }
462 
463 static int
464 wait_workers_to_join(int lcore, const rte_atomic32_t *count)
465 {
466 	uint64_t cycles, print_cycles;
467 
468 	RTE_SET_USED(count);
469 
470 	print_cycles = cycles = rte_get_timer_cycles();
471 	while (rte_eal_get_lcore_state(lcore) != FINISHED) {
472 		uint64_t new_cycles = rte_get_timer_cycles();
473 
474 		if (new_cycles - print_cycles > rte_get_timer_hz()) {
475 			dpaa2_evdev_dbg("\r%s: events %d", __func__,
476 				rte_atomic32_read(count));
477 			print_cycles = new_cycles;
478 		}
479 		if (new_cycles - cycles > rte_get_timer_hz() * 10) {
480 			dpaa2_evdev_info(
481 				"%s: No schedules for seconds, deadlock (%d)",
482 				__func__,
483 				rte_atomic32_read(count));
484 			rte_event_dev_dump(evdev, stdout);
485 			cycles = new_cycles;
486 			return -1;
487 		}
488 	}
489 	rte_eal_mp_wait_lcore();
490 	return 0;
491 }
492 
493 
494 static int
495 launch_workers_and_wait(int (*main_worker)(void *),
496 			int (*workers)(void *), uint32_t total_events,
497 			uint8_t nb_workers, uint8_t sched_type)
498 {
499 	uint8_t port = 0;
500 	int w_lcore;
501 	int ret;
502 	struct test_core_param *param;
503 	rte_atomic32_t atomic_total_events;
504 	uint64_t dequeue_tmo_ticks;
505 
506 	if (!nb_workers)
507 		return 0;
508 
509 	rte_atomic32_set(&atomic_total_events, total_events);
510 	RTE_BUILD_BUG_ON(NUM_PACKETS < MAX_EVENTS);
511 
512 	param = malloc(sizeof(struct test_core_param) * nb_workers);
513 	if (!param)
514 		return -1;
515 
516 	ret = rte_event_dequeue_timeout_ticks(evdev,
517 		rte_rand() % 10000000/* 10ms */, &dequeue_tmo_ticks);
518 	if (ret) {
519 		free(param);
520 		return -1;
521 	}
522 
523 	param[0].total_events = &atomic_total_events;
524 	param[0].sched_type = sched_type;
525 	param[0].port = 0;
526 	param[0].dequeue_tmo_ticks = dequeue_tmo_ticks;
527 	rte_smp_wmb();
528 
529 	w_lcore = rte_get_next_lcore(
530 			/* start core */ -1,
531 			/* skip main */ 1,
532 			/* wrap */ 0);
533 	rte_eal_remote_launch(main_worker, &param[0], w_lcore);
534 
535 	for (port = 1; port < nb_workers; port++) {
536 		param[port].total_events = &atomic_total_events;
537 		param[port].sched_type = sched_type;
538 		param[port].port = port;
539 		param[port].dequeue_tmo_ticks = dequeue_tmo_ticks;
540 		rte_smp_wmb();
541 		w_lcore = rte_get_next_lcore(w_lcore, 1, 0);
542 		rte_eal_remote_launch(workers, &param[port], w_lcore);
543 	}
544 
545 	ret = wait_workers_to_join(w_lcore, &atomic_total_events);
546 	free(param);
547 	return ret;
548 }
549 
550 /*
551  * Generate a prescribed number of events and spread them across available
552  * queues. Dequeue the events through multiple ports and verify the enqueued
553  * event attributes
554  */
555 static int
556 test_multi_queue_enq_multi_port_deq(void)
557 {
558 	const unsigned int total_events = MAX_EVENTS;
559 	uint32_t nr_ports;
560 	int ret;
561 
562 	ret = generate_random_events(total_events);
563 	if (ret)
564 		return -1;
565 
566 	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
567 				RTE_EVENT_DEV_ATTR_PORT_COUNT,
568 				&nr_ports), "Port count get failed");
569 	nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
570 
571 	if (!nr_ports) {
572 		dpaa2_evdev_err("%s: Not enough ports=%d or workers=%d",
573 				__func__, nr_ports, rte_lcore_count() - 1);
574 		return 0;
575 	}
576 
577 	return launch_workers_and_wait(worker_multi_port_fn,
578 					worker_multi_port_fn, total_events,
579 					nr_ports, 0xff /* invalid */);
580 }
581 
582 static
583 void flush(uint8_t dev_id, struct rte_event event, void *arg)
584 {
585 	unsigned int *count = arg;
586 
587 	RTE_SET_USED(dev_id);
588 	if (event.event_type == RTE_EVENT_TYPE_CPU)
589 		*count = *count + 1;
590 
591 }
592 
593 static int
594 test_dev_stop_flush(void)
595 {
596 	unsigned int total_events = MAX_EVENTS, count = 0;
597 	int ret;
598 
599 	ret = generate_random_events(total_events);
600 	if (ret)
601 		return -1;
602 
603 	ret = rte_event_dev_stop_flush_callback_register(evdev, flush, &count);
604 	if (ret)
605 		return -2;
606 	rte_event_dev_stop(evdev);
607 	ret = rte_event_dev_stop_flush_callback_register(evdev, NULL, NULL);
608 	if (ret)
609 		return -3;
610 	RTE_TEST_ASSERT_EQUAL(total_events, count,
611 				"count mismatch total_events=%d count=%d",
612 				total_events, count);
613 	return 0;
614 }
615 
616 static int
617 validate_queue_to_port_single_link(uint32_t index, uint8_t port,
618 			struct rte_event *ev)
619 {
620 	RTE_SET_USED(index);
621 	RTE_TEST_ASSERT_EQUAL(port, ev->queue_id,
622 				"queue mismatch enq=%d deq =%d",
623 				port, ev->queue_id);
624 	return 0;
625 }
626 
627 /*
628  * Link queue x to port x and check correctness of link by checking
629  * queue_id == x on dequeue on the specific port x
630  */
631 static int
632 test_queue_to_port_single_link(void)
633 {
634 	int i, nr_links, ret;
635 
636 	uint32_t port_count;
637 
638 	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
639 				RTE_EVENT_DEV_ATTR_PORT_COUNT,
640 				&port_count), "Port count get failed");
641 
642 	/* Unlink all connections that created in eventdev_setup */
643 	for (i = 0; i < (int)port_count; i++) {
644 		ret = rte_event_port_unlink(evdev, i, NULL, 0);
645 		RTE_TEST_ASSERT(ret >= 0,
646 				"Failed to unlink all queues port=%d", i);
647 	}
648 
649 	uint32_t queue_count;
650 
651 	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
652 			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
653 			    &queue_count), "Queue count get failed");
654 
655 	nr_links = RTE_MIN(port_count, queue_count);
656 	const unsigned int total_events = MAX_EVENTS / nr_links;
657 
658 	/* Link queue x to port x and inject events to queue x through port x */
659 	for (i = 0; i < nr_links; i++) {
660 		uint8_t queue = (uint8_t)i;
661 
662 		ret = rte_event_port_link(evdev, i, &queue, NULL, 1);
663 		RTE_TEST_ASSERT(ret == 1, "Failed to link queue to port %d", i);
664 
665 		ret = inject_events(
666 			0x100 /*flow_id */,
667 			RTE_EVENT_TYPE_CPU /* event_type */,
668 			rte_rand() % 256 /* sub_event_type */,
669 			rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
670 			queue /* queue */,
671 			i /* port */,
672 			total_events /* events */);
673 		if (ret)
674 			return -1;
675 	}
676 
677 	/* Verify the events generated from correct queue */
678 	for (i = 0; i < nr_links; i++) {
679 		ret = consume_events(i /* port */, total_events,
680 				validate_queue_to_port_single_link);
681 		if (ret)
682 			return -1;
683 	}
684 
685 	return 0;
686 }
687 
688 static int
689 validate_queue_to_port_multi_link(uint32_t index, uint8_t port,
690 			struct rte_event *ev)
691 {
692 	RTE_SET_USED(index);
693 	RTE_TEST_ASSERT_EQUAL(port, (ev->queue_id & 0x1),
694 				"queue mismatch enq=%d deq =%d",
695 				port, ev->queue_id);
696 	return 0;
697 }
698 
699 /*
700  * Link all even number of queues to port 0 and all odd number of queues to
701  * port 1 and verify the link connection on dequeue
702  */
703 static int
704 test_queue_to_port_multi_link(void)
705 {
706 	int ret, port0_events = 0, port1_events = 0;
707 	uint8_t queue, port;
708 	uint32_t nr_queues = 0;
709 	uint32_t nr_ports = 0;
710 
711 	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
712 			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
713 			    &nr_queues), "Queue count get failed");
714 
715 	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
716 				RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
717 				&nr_queues), "Queue count get failed");
718 	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
719 				RTE_EVENT_DEV_ATTR_PORT_COUNT,
720 				&nr_ports), "Port count get failed");
721 
722 	if (nr_ports < 2) {
723 		dpaa2_evdev_err("%s: Not enough ports to test ports=%d",
724 				__func__, nr_ports);
725 		return 0;
726 	}
727 
728 	/* Unlink all connections that created in eventdev_setup */
729 	for (port = 0; port < nr_ports; port++) {
730 		ret = rte_event_port_unlink(evdev, port, NULL, 0);
731 		RTE_TEST_ASSERT(ret >= 0, "Failed to unlink all queues port=%d",
732 					port);
733 	}
734 
735 	const unsigned int total_events = MAX_EVENTS / nr_queues;
736 
737 	/* Link all even number of queues to port0 and odd numbers to port 1*/
738 	for (queue = 0; queue < nr_queues; queue++) {
739 		port = queue & 0x1;
740 		ret = rte_event_port_link(evdev, port, &queue, NULL, 1);
741 		RTE_TEST_ASSERT(ret == 1, "Failed to link queue=%d to port=%d",
742 					queue, port);
743 
744 		ret = inject_events(
745 			0x100 /*flow_id */,
746 			RTE_EVENT_TYPE_CPU /* event_type */,
747 			rte_rand() % 256 /* sub_event_type */,
748 			rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
749 			queue /* queue */,
750 			port /* port */,
751 			total_events /* events */);
752 		if (ret)
753 			return -1;
754 
755 		if (port == 0)
756 			port0_events += total_events;
757 		else
758 			port1_events += total_events;
759 	}
760 
761 	ret = consume_events(0 /* port */, port0_events,
762 				validate_queue_to_port_multi_link);
763 	if (ret)
764 		return -1;
765 	ret = consume_events(1 /* port */, port1_events,
766 				validate_queue_to_port_multi_link);
767 	if (ret)
768 		return -1;
769 
770 	return 0;
771 }
772 
773 static void dpaa2_test_run(int (*setup)(void), void (*tdown)(void),
774 		int (*test)(void), const char *name)
775 {
776 	if (setup() < 0) {
777 		RTE_LOG(INFO, PMD, "Error setting up test %s", name);
778 		unsupported++;
779 	} else {
780 		if (test() < 0) {
781 			failed++;
782 			RTE_LOG(INFO, PMD, "%s Failed\n", name);
783 		} else {
784 			passed++;
785 			RTE_LOG(INFO, PMD, "%s Passed", name);
786 		}
787 	}
788 
789 	total++;
790 	tdown();
791 }
792 
793 int
794 test_eventdev_dpaa2(void)
795 {
796 	testsuite_setup();
797 
798 	DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
799 			test_simple_enqdeq_atomic);
800 	DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
801 			test_simple_enqdeq_parallel);
802 	DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
803 			test_multi_queue_enq_single_port_deq);
804 	DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
805 			test_dev_stop_flush);
806 	DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
807 			test_multi_queue_enq_multi_port_deq);
808 	DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
809 			test_queue_to_port_single_link);
810 	DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
811 			test_queue_to_port_multi_link);
812 
813 	DPAA2_EVENTDEV_INFO("Total tests   : %d", total);
814 	DPAA2_EVENTDEV_INFO("Passed        : %d", passed);
815 	DPAA2_EVENTDEV_INFO("Failed        : %d", failed);
816 	DPAA2_EVENTDEV_INFO("Not supported : %d", unsupported);
817 
818 	testsuite_teardown();
819 
820 	if (failed)
821 		return -1;
822 
823 	return 0;
824 }
825