xref: /dpdk/drivers/event/octeontx/ssovf_evdev_selftest.c (revision 4851ef2b40bc31accfffc3bb476930a73f50afac)
150fb749aSPavan Nikhilesh /* SPDX-License-Identifier: BSD-3-Clause
250fb749aSPavan Nikhilesh  * Copyright(c) 2017 Cavium, Inc
350fb749aSPavan Nikhilesh  */
450fb749aSPavan Nikhilesh 
572b452c5SDmitry Kozlyuk #include <stdlib.h>
672b452c5SDmitry Kozlyuk 
750fb749aSPavan Nikhilesh #include <rte_atomic.h>
850fb749aSPavan Nikhilesh #include <rte_common.h>
950fb749aSPavan Nikhilesh #include <rte_cycles.h>
1050fb749aSPavan Nikhilesh #include <rte_debug.h>
1150fb749aSPavan Nikhilesh #include <rte_eal.h>
1250fb749aSPavan Nikhilesh #include <rte_ethdev.h>
1350fb749aSPavan Nikhilesh #include <rte_eventdev.h>
1450fb749aSPavan Nikhilesh #include <rte_hexdump.h>
1550fb749aSPavan Nikhilesh #include <rte_mbuf.h>
1650fb749aSPavan Nikhilesh #include <rte_malloc.h>
1750fb749aSPavan Nikhilesh #include <rte_memcpy.h>
1850fb749aSPavan Nikhilesh #include <rte_launch.h>
1950fb749aSPavan Nikhilesh #include <rte_lcore.h>
2050fb749aSPavan Nikhilesh #include <rte_per_lcore.h>
2150fb749aSPavan Nikhilesh #include <rte_random.h>
22*4851ef2bSDavid Marchand #include <bus_vdev_driver.h>
23daeda14cSPavan Nikhilesh #include <rte_test.h>
2450fb749aSPavan Nikhilesh 
25daeda14cSPavan Nikhilesh #include "ssovf_evdev.h"
2650fb749aSPavan Nikhilesh 
2750fb749aSPavan Nikhilesh #define NUM_PACKETS (1 << 18)
2850fb749aSPavan Nikhilesh #define MAX_EVENTS  (16 * 1024)
2950fb749aSPavan Nikhilesh 
30daeda14cSPavan Nikhilesh #define OCTEONTX_TEST_RUN(setup, teardown, test) \
31daeda14cSPavan Nikhilesh 	octeontx_test_run(setup, teardown, test, #test)
32daeda14cSPavan Nikhilesh 
33daeda14cSPavan Nikhilesh static int total;
34daeda14cSPavan Nikhilesh static int passed;
35daeda14cSPavan Nikhilesh static int failed;
36daeda14cSPavan Nikhilesh static int unsupported;
37daeda14cSPavan Nikhilesh 
3850fb749aSPavan Nikhilesh static int evdev;
3950fb749aSPavan Nikhilesh static struct rte_mempool *eventdev_test_mempool;
4050fb749aSPavan Nikhilesh 
4150fb749aSPavan Nikhilesh struct event_attr {
4250fb749aSPavan Nikhilesh 	uint32_t flow_id;
4350fb749aSPavan Nikhilesh 	uint8_t event_type;
4450fb749aSPavan Nikhilesh 	uint8_t sub_event_type;
4550fb749aSPavan Nikhilesh 	uint8_t sched_type;
4650fb749aSPavan Nikhilesh 	uint8_t queue;
4750fb749aSPavan Nikhilesh 	uint8_t port;
4850fb749aSPavan Nikhilesh };
4950fb749aSPavan Nikhilesh 
5050fb749aSPavan Nikhilesh static uint32_t seqn_list_index;
5150fb749aSPavan Nikhilesh static int seqn_list[NUM_PACKETS];
5250fb749aSPavan Nikhilesh 
5350fb749aSPavan Nikhilesh static inline void
seqn_list_init(void)5450fb749aSPavan Nikhilesh seqn_list_init(void)
5550fb749aSPavan Nikhilesh {
5650fb749aSPavan Nikhilesh 	RTE_BUILD_BUG_ON(NUM_PACKETS < MAX_EVENTS);
5750fb749aSPavan Nikhilesh 	memset(seqn_list, 0, sizeof(seqn_list));
5850fb749aSPavan Nikhilesh 	seqn_list_index = 0;
5950fb749aSPavan Nikhilesh }
6050fb749aSPavan Nikhilesh 
6150fb749aSPavan Nikhilesh static inline int
seqn_list_update(int val)6250fb749aSPavan Nikhilesh seqn_list_update(int val)
6350fb749aSPavan Nikhilesh {
6450fb749aSPavan Nikhilesh 	if (seqn_list_index >= NUM_PACKETS)
65daeda14cSPavan Nikhilesh 		return -1;
6650fb749aSPavan Nikhilesh 
6750fb749aSPavan Nikhilesh 	seqn_list[seqn_list_index++] = val;
6850fb749aSPavan Nikhilesh 	rte_smp_wmb();
69daeda14cSPavan Nikhilesh 	return 0;
7050fb749aSPavan Nikhilesh }
7150fb749aSPavan Nikhilesh 
7250fb749aSPavan Nikhilesh static inline int
seqn_list_check(int limit)7350fb749aSPavan Nikhilesh seqn_list_check(int limit)
7450fb749aSPavan Nikhilesh {
7550fb749aSPavan Nikhilesh 	int i;
7650fb749aSPavan Nikhilesh 
7750fb749aSPavan Nikhilesh 	for (i = 0; i < limit; i++) {
7850fb749aSPavan Nikhilesh 		if (seqn_list[i] != i) {
79daeda14cSPavan Nikhilesh 			ssovf_log_dbg("Seqn mismatch %d %d", seqn_list[i], i);
80daeda14cSPavan Nikhilesh 			return -1;
8150fb749aSPavan Nikhilesh 		}
8250fb749aSPavan Nikhilesh 	}
83daeda14cSPavan Nikhilesh 	return 0;
8450fb749aSPavan Nikhilesh }
8550fb749aSPavan Nikhilesh 
8650fb749aSPavan Nikhilesh struct test_core_param {
8750fb749aSPavan Nikhilesh 	rte_atomic32_t *total_events;
8850fb749aSPavan Nikhilesh 	uint64_t dequeue_tmo_ticks;
8950fb749aSPavan Nikhilesh 	uint8_t port;
9050fb749aSPavan Nikhilesh 	uint8_t sched_type;
9150fb749aSPavan Nikhilesh };
9250fb749aSPavan Nikhilesh 
9350fb749aSPavan Nikhilesh static int
testsuite_setup(void)9450fb749aSPavan Nikhilesh testsuite_setup(void)
9550fb749aSPavan Nikhilesh {
9650fb749aSPavan Nikhilesh 	const char *eventdev_name = "event_octeontx";
9750fb749aSPavan Nikhilesh 
9850fb749aSPavan Nikhilesh 	evdev = rte_event_dev_get_dev_id(eventdev_name);
9950fb749aSPavan Nikhilesh 	if (evdev < 0) {
100daeda14cSPavan Nikhilesh 		ssovf_log_dbg("%d: Eventdev %s not found - creating.",
10150fb749aSPavan Nikhilesh 				__LINE__, eventdev_name);
10250fb749aSPavan Nikhilesh 		if (rte_vdev_init(eventdev_name, NULL) < 0) {
103daeda14cSPavan Nikhilesh 			ssovf_log_dbg("Error creating eventdev %s",
104daeda14cSPavan Nikhilesh 					eventdev_name);
105daeda14cSPavan Nikhilesh 			return -1;
10650fb749aSPavan Nikhilesh 		}
10750fb749aSPavan Nikhilesh 		evdev = rte_event_dev_get_dev_id(eventdev_name);
10850fb749aSPavan Nikhilesh 		if (evdev < 0) {
109daeda14cSPavan Nikhilesh 			ssovf_log_dbg("Error finding newly created eventdev");
110daeda14cSPavan Nikhilesh 			return -1;
11150fb749aSPavan Nikhilesh 		}
11250fb749aSPavan Nikhilesh 	}
11350fb749aSPavan Nikhilesh 
114daeda14cSPavan Nikhilesh 	return 0;
11550fb749aSPavan Nikhilesh }
11650fb749aSPavan Nikhilesh 
11750fb749aSPavan Nikhilesh static void
testsuite_teardown(void)11850fb749aSPavan Nikhilesh testsuite_teardown(void)
11950fb749aSPavan Nikhilesh {
12050fb749aSPavan Nikhilesh 	rte_event_dev_close(evdev);
12150fb749aSPavan Nikhilesh }
12250fb749aSPavan Nikhilesh 
12350fb749aSPavan Nikhilesh static inline void
devconf_set_default_sane_values(struct rte_event_dev_config * dev_conf,struct rte_event_dev_info * info)12450fb749aSPavan Nikhilesh devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
12550fb749aSPavan Nikhilesh 			struct rte_event_dev_info *info)
12650fb749aSPavan Nikhilesh {
12750fb749aSPavan Nikhilesh 	memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
12850fb749aSPavan Nikhilesh 	dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
12950fb749aSPavan Nikhilesh 	dev_conf->nb_event_ports = info->max_event_ports;
13050fb749aSPavan Nikhilesh 	dev_conf->nb_event_queues = info->max_event_queues;
13150fb749aSPavan Nikhilesh 	dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
13250fb749aSPavan Nikhilesh 	dev_conf->nb_event_port_dequeue_depth =
13350fb749aSPavan Nikhilesh 			info->max_event_port_dequeue_depth;
13450fb749aSPavan Nikhilesh 	dev_conf->nb_event_port_enqueue_depth =
13550fb749aSPavan Nikhilesh 			info->max_event_port_enqueue_depth;
13650fb749aSPavan Nikhilesh 	dev_conf->nb_event_port_enqueue_depth =
13750fb749aSPavan Nikhilesh 			info->max_event_port_enqueue_depth;
13850fb749aSPavan Nikhilesh 	dev_conf->nb_events_limit =
13950fb749aSPavan Nikhilesh 			info->max_num_events;
14050fb749aSPavan Nikhilesh }
14150fb749aSPavan Nikhilesh 
14250fb749aSPavan Nikhilesh enum {
14350fb749aSPavan Nikhilesh 	TEST_EVENTDEV_SETUP_DEFAULT,
14450fb749aSPavan Nikhilesh 	TEST_EVENTDEV_SETUP_PRIORITY,
14550fb749aSPavan Nikhilesh 	TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT,
14650fb749aSPavan Nikhilesh };
14750fb749aSPavan Nikhilesh 
14850fb749aSPavan Nikhilesh static inline int
_eventdev_setup(int mode)14950fb749aSPavan Nikhilesh _eventdev_setup(int mode)
15050fb749aSPavan Nikhilesh {
15150fb749aSPavan Nikhilesh 	int i, ret;
15250fb749aSPavan Nikhilesh 	struct rte_event_dev_config dev_conf;
15350fb749aSPavan Nikhilesh 	struct rte_event_dev_info info;
15450fb749aSPavan Nikhilesh 	const char *pool_name = "evdev_octeontx_test_pool";
15550fb749aSPavan Nikhilesh 
1567be78d02SJosh Soref 	/* Create and destroy pool for each test case to make it standalone */
15750fb749aSPavan Nikhilesh 	eventdev_test_mempool = rte_pktmbuf_pool_create(pool_name,
15850fb749aSPavan Nikhilesh 					MAX_EVENTS,
15950fb749aSPavan Nikhilesh 					0 /*MBUF_CACHE_SIZE*/,
16050fb749aSPavan Nikhilesh 					0,
16150fb749aSPavan Nikhilesh 					512, /* Use very small mbufs */
16250fb749aSPavan Nikhilesh 					rte_socket_id());
16350fb749aSPavan Nikhilesh 	if (!eventdev_test_mempool) {
164daeda14cSPavan Nikhilesh 		ssovf_log_dbg("ERROR creating mempool");
165daeda14cSPavan Nikhilesh 		return -1;
16650fb749aSPavan Nikhilesh 	}
16750fb749aSPavan Nikhilesh 
16850fb749aSPavan Nikhilesh 	ret = rte_event_dev_info_get(evdev, &info);
169daeda14cSPavan Nikhilesh 	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
170daeda14cSPavan Nikhilesh 	RTE_TEST_ASSERT(info.max_num_events >= (int32_t)MAX_EVENTS,
171daeda14cSPavan Nikhilesh 			"ERROR max_num_events=%d < max_events=%d",
17250fb749aSPavan Nikhilesh 				info.max_num_events, MAX_EVENTS);
17350fb749aSPavan Nikhilesh 
17450fb749aSPavan Nikhilesh 	devconf_set_default_sane_values(&dev_conf, &info);
17550fb749aSPavan Nikhilesh 	if (mode == TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT)
17650fb749aSPavan Nikhilesh 		dev_conf.event_dev_cfg |= RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
17750fb749aSPavan Nikhilesh 
17850fb749aSPavan Nikhilesh 	ret = rte_event_dev_configure(evdev, &dev_conf);
179daeda14cSPavan Nikhilesh 	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
18050fb749aSPavan Nikhilesh 
18150fb749aSPavan Nikhilesh 	uint32_t queue_count;
182daeda14cSPavan Nikhilesh 	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
18350fb749aSPavan Nikhilesh 			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
18450fb749aSPavan Nikhilesh 			    &queue_count), "Queue count get failed");
18550fb749aSPavan Nikhilesh 
18650fb749aSPavan Nikhilesh 	if (mode == TEST_EVENTDEV_SETUP_PRIORITY) {
18750fb749aSPavan Nikhilesh 		if (queue_count > 8) {
188daeda14cSPavan Nikhilesh 			ssovf_log_dbg(
189daeda14cSPavan Nikhilesh 				"test expects the unique priority per queue");
19050fb749aSPavan Nikhilesh 			return -ENOTSUP;
19150fb749aSPavan Nikhilesh 		}
19250fb749aSPavan Nikhilesh 
19350fb749aSPavan Nikhilesh 		/* Configure event queues(0 to n) with
19450fb749aSPavan Nikhilesh 		 * RTE_EVENT_DEV_PRIORITY_HIGHEST to
19550fb749aSPavan Nikhilesh 		 * RTE_EVENT_DEV_PRIORITY_LOWEST
19650fb749aSPavan Nikhilesh 		 */
19750fb749aSPavan Nikhilesh 		uint8_t step = (RTE_EVENT_DEV_PRIORITY_LOWEST + 1) /
19850fb749aSPavan Nikhilesh 				queue_count;
19950fb749aSPavan Nikhilesh 		for (i = 0; i < (int)queue_count; i++) {
20050fb749aSPavan Nikhilesh 			struct rte_event_queue_conf queue_conf;
20150fb749aSPavan Nikhilesh 
20250fb749aSPavan Nikhilesh 			ret = rte_event_queue_default_conf_get(evdev, i,
20350fb749aSPavan Nikhilesh 						&queue_conf);
204daeda14cSPavan Nikhilesh 			RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get def_conf%d",
205daeda14cSPavan Nikhilesh 					i);
20650fb749aSPavan Nikhilesh 			queue_conf.priority = i * step;
20750fb749aSPavan Nikhilesh 			ret = rte_event_queue_setup(evdev, i, &queue_conf);
208daeda14cSPavan Nikhilesh 			RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d",
209daeda14cSPavan Nikhilesh 					i);
21050fb749aSPavan Nikhilesh 		}
21150fb749aSPavan Nikhilesh 
21250fb749aSPavan Nikhilesh 	} else {
21350fb749aSPavan Nikhilesh 		/* Configure event queues with default priority */
21450fb749aSPavan Nikhilesh 		for (i = 0; i < (int)queue_count; i++) {
21550fb749aSPavan Nikhilesh 			ret = rte_event_queue_setup(evdev, i, NULL);
216daeda14cSPavan Nikhilesh 			RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d",
217daeda14cSPavan Nikhilesh 					i);
21850fb749aSPavan Nikhilesh 		}
21950fb749aSPavan Nikhilesh 	}
22050fb749aSPavan Nikhilesh 	/* Configure event ports */
22150fb749aSPavan Nikhilesh 	uint32_t port_count;
222daeda14cSPavan Nikhilesh 	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
22350fb749aSPavan Nikhilesh 				RTE_EVENT_DEV_ATTR_PORT_COUNT,
22450fb749aSPavan Nikhilesh 				&port_count), "Port count get failed");
22550fb749aSPavan Nikhilesh 	for (i = 0; i < (int)port_count; i++) {
22650fb749aSPavan Nikhilesh 		ret = rte_event_port_setup(evdev, i, NULL);
227daeda14cSPavan Nikhilesh 		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d", i);
22850fb749aSPavan Nikhilesh 		ret = rte_event_port_link(evdev, i, NULL, NULL, 0);
229daeda14cSPavan Nikhilesh 		RTE_TEST_ASSERT(ret >= 0, "Failed to link all queues port=%d",
230daeda14cSPavan Nikhilesh 				i);
23150fb749aSPavan Nikhilesh 	}
23250fb749aSPavan Nikhilesh 
23350fb749aSPavan Nikhilesh 	ret = rte_event_dev_start(evdev);
234daeda14cSPavan Nikhilesh 	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start device");
23550fb749aSPavan Nikhilesh 
236daeda14cSPavan Nikhilesh 	return 0;
23750fb749aSPavan Nikhilesh }
23850fb749aSPavan Nikhilesh 
23950fb749aSPavan Nikhilesh static inline int
eventdev_setup(void)24050fb749aSPavan Nikhilesh eventdev_setup(void)
24150fb749aSPavan Nikhilesh {
24250fb749aSPavan Nikhilesh 	return _eventdev_setup(TEST_EVENTDEV_SETUP_DEFAULT);
24350fb749aSPavan Nikhilesh }
24450fb749aSPavan Nikhilesh 
24550fb749aSPavan Nikhilesh static inline int
eventdev_setup_priority(void)24650fb749aSPavan Nikhilesh eventdev_setup_priority(void)
24750fb749aSPavan Nikhilesh {
24850fb749aSPavan Nikhilesh 	return _eventdev_setup(TEST_EVENTDEV_SETUP_PRIORITY);
24950fb749aSPavan Nikhilesh }
25050fb749aSPavan Nikhilesh 
25150fb749aSPavan Nikhilesh static inline int
eventdev_setup_dequeue_timeout(void)25250fb749aSPavan Nikhilesh eventdev_setup_dequeue_timeout(void)
25350fb749aSPavan Nikhilesh {
25450fb749aSPavan Nikhilesh 	return _eventdev_setup(TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT);
25550fb749aSPavan Nikhilesh }
25650fb749aSPavan Nikhilesh 
25750fb749aSPavan Nikhilesh static inline void
eventdev_teardown(void)25850fb749aSPavan Nikhilesh eventdev_teardown(void)
25950fb749aSPavan Nikhilesh {
26050fb749aSPavan Nikhilesh 	rte_event_dev_stop(evdev);
26150fb749aSPavan Nikhilesh 	rte_mempool_free(eventdev_test_mempool);
26250fb749aSPavan Nikhilesh }
26350fb749aSPavan Nikhilesh 
26450fb749aSPavan Nikhilesh static inline void
update_event_and_validation_attr(struct rte_mbuf * m,struct rte_event * ev,uint32_t flow_id,uint8_t event_type,uint8_t sub_event_type,uint8_t sched_type,uint8_t queue,uint8_t port)26550fb749aSPavan Nikhilesh update_event_and_validation_attr(struct rte_mbuf *m, struct rte_event *ev,
26650fb749aSPavan Nikhilesh 			uint32_t flow_id, uint8_t event_type,
26750fb749aSPavan Nikhilesh 			uint8_t sub_event_type, uint8_t sched_type,
26850fb749aSPavan Nikhilesh 			uint8_t queue, uint8_t port)
26950fb749aSPavan Nikhilesh {
27050fb749aSPavan Nikhilesh 	struct event_attr *attr;
27150fb749aSPavan Nikhilesh 
27250fb749aSPavan Nikhilesh 	/* Store the event attributes in mbuf for future reference */
27350fb749aSPavan Nikhilesh 	attr = rte_pktmbuf_mtod(m, struct event_attr *);
27450fb749aSPavan Nikhilesh 	attr->flow_id = flow_id;
27550fb749aSPavan Nikhilesh 	attr->event_type = event_type;
27650fb749aSPavan Nikhilesh 	attr->sub_event_type = sub_event_type;
27750fb749aSPavan Nikhilesh 	attr->sched_type = sched_type;
27850fb749aSPavan Nikhilesh 	attr->queue = queue;
27950fb749aSPavan Nikhilesh 	attr->port = port;
28050fb749aSPavan Nikhilesh 
28150fb749aSPavan Nikhilesh 	ev->flow_id = flow_id;
28250fb749aSPavan Nikhilesh 	ev->sub_event_type = sub_event_type;
28350fb749aSPavan Nikhilesh 	ev->event_type = event_type;
28450fb749aSPavan Nikhilesh 	/* Inject the new event */
28550fb749aSPavan Nikhilesh 	ev->op = RTE_EVENT_OP_NEW;
28650fb749aSPavan Nikhilesh 	ev->sched_type = sched_type;
28750fb749aSPavan Nikhilesh 	ev->queue_id = queue;
28850fb749aSPavan Nikhilesh 	ev->mbuf = m;
28950fb749aSPavan Nikhilesh }
29050fb749aSPavan Nikhilesh 
29150fb749aSPavan Nikhilesh static inline int
inject_events(uint32_t flow_id,uint8_t event_type,uint8_t sub_event_type,uint8_t sched_type,uint8_t queue,uint8_t port,unsigned int events)29250fb749aSPavan Nikhilesh inject_events(uint32_t flow_id, uint8_t event_type, uint8_t sub_event_type,
29350fb749aSPavan Nikhilesh 		uint8_t sched_type, uint8_t queue, uint8_t port,
29450fb749aSPavan Nikhilesh 		unsigned int events)
29550fb749aSPavan Nikhilesh {
29650fb749aSPavan Nikhilesh 	struct rte_mbuf *m;
29750fb749aSPavan Nikhilesh 	unsigned int i;
29850fb749aSPavan Nikhilesh 
29950fb749aSPavan Nikhilesh 	for (i = 0; i < events; i++) {
30050fb749aSPavan Nikhilesh 		struct rte_event ev = {.event = 0, .u64 = 0};
30150fb749aSPavan Nikhilesh 
30250fb749aSPavan Nikhilesh 		m = rte_pktmbuf_alloc(eventdev_test_mempool);
303daeda14cSPavan Nikhilesh 		RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
30450fb749aSPavan Nikhilesh 
305ca4355e4SDavid Marchand 		*rte_event_pmd_selftest_seqn(m) = i;
30650fb749aSPavan Nikhilesh 		update_event_and_validation_attr(m, &ev, flow_id, event_type,
30750fb749aSPavan Nikhilesh 			sub_event_type, sched_type, queue, port);
30850fb749aSPavan Nikhilesh 		rte_event_enqueue_burst(evdev, port, &ev, 1);
30950fb749aSPavan Nikhilesh 	}
31050fb749aSPavan Nikhilesh 	return 0;
31150fb749aSPavan Nikhilesh }
31250fb749aSPavan Nikhilesh 
31350fb749aSPavan Nikhilesh static inline int
check_excess_events(uint8_t port)31450fb749aSPavan Nikhilesh check_excess_events(uint8_t port)
31550fb749aSPavan Nikhilesh {
31650fb749aSPavan Nikhilesh 	int i;
31750fb749aSPavan Nikhilesh 	uint16_t valid_event;
31850fb749aSPavan Nikhilesh 	struct rte_event ev;
31950fb749aSPavan Nikhilesh 
32050fb749aSPavan Nikhilesh 	/* Check for excess events, try for a few times and exit */
32150fb749aSPavan Nikhilesh 	for (i = 0; i < 32; i++) {
32250fb749aSPavan Nikhilesh 		valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
32350fb749aSPavan Nikhilesh 
324daeda14cSPavan Nikhilesh 		RTE_TEST_ASSERT_SUCCESS(valid_event,
325ca4355e4SDavid Marchand 			"Unexpected valid event=%d",
326ca4355e4SDavid Marchand 			*rte_event_pmd_selftest_seqn(ev.mbuf));
32750fb749aSPavan Nikhilesh 	}
32850fb749aSPavan Nikhilesh 	return 0;
32950fb749aSPavan Nikhilesh }
33050fb749aSPavan Nikhilesh 
33150fb749aSPavan Nikhilesh static inline int
generate_random_events(const unsigned int total_events)33250fb749aSPavan Nikhilesh generate_random_events(const unsigned int total_events)
33350fb749aSPavan Nikhilesh {
33450fb749aSPavan Nikhilesh 	struct rte_event_dev_info info;
33550fb749aSPavan Nikhilesh 	unsigned int i;
33650fb749aSPavan Nikhilesh 	int ret;
33750fb749aSPavan Nikhilesh 
33850fb749aSPavan Nikhilesh 	uint32_t queue_count;
339daeda14cSPavan Nikhilesh 	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
34050fb749aSPavan Nikhilesh 			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
34150fb749aSPavan Nikhilesh 			    &queue_count), "Queue count get failed");
34250fb749aSPavan Nikhilesh 
34350fb749aSPavan Nikhilesh 	ret = rte_event_dev_info_get(evdev, &info);
344daeda14cSPavan Nikhilesh 	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
34550fb749aSPavan Nikhilesh 	for (i = 0; i < total_events; i++) {
34650fb749aSPavan Nikhilesh 		ret = inject_events(
34750fb749aSPavan Nikhilesh 			rte_rand() % info.max_event_queue_flows /*flow_id */,
34850fb749aSPavan Nikhilesh 			RTE_EVENT_TYPE_CPU /* event_type */,
34950fb749aSPavan Nikhilesh 			rte_rand() % 256 /* sub_event_type */,
35050fb749aSPavan Nikhilesh 			rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
35150fb749aSPavan Nikhilesh 			rte_rand() % queue_count /* queue */,
35250fb749aSPavan Nikhilesh 			0 /* port */,
35350fb749aSPavan Nikhilesh 			1 /* events */);
35450fb749aSPavan Nikhilesh 		if (ret)
355daeda14cSPavan Nikhilesh 			return -1;
35650fb749aSPavan Nikhilesh 	}
35750fb749aSPavan Nikhilesh 	return ret;
35850fb749aSPavan Nikhilesh }
35950fb749aSPavan Nikhilesh 
36050fb749aSPavan Nikhilesh 
36150fb749aSPavan Nikhilesh static inline int
validate_event(struct rte_event * ev)36250fb749aSPavan Nikhilesh validate_event(struct rte_event *ev)
36350fb749aSPavan Nikhilesh {
36450fb749aSPavan Nikhilesh 	struct event_attr *attr;
36550fb749aSPavan Nikhilesh 
36650fb749aSPavan Nikhilesh 	attr = rte_pktmbuf_mtod(ev->mbuf, struct event_attr *);
367daeda14cSPavan Nikhilesh 	RTE_TEST_ASSERT_EQUAL(attr->flow_id, ev->flow_id,
36850fb749aSPavan Nikhilesh 			"flow_id mismatch enq=%d deq =%d",
36950fb749aSPavan Nikhilesh 			attr->flow_id, ev->flow_id);
370daeda14cSPavan Nikhilesh 	RTE_TEST_ASSERT_EQUAL(attr->event_type, ev->event_type,
37150fb749aSPavan Nikhilesh 			"event_type mismatch enq=%d deq =%d",
37250fb749aSPavan Nikhilesh 			attr->event_type, ev->event_type);
373daeda14cSPavan Nikhilesh 	RTE_TEST_ASSERT_EQUAL(attr->sub_event_type, ev->sub_event_type,
37450fb749aSPavan Nikhilesh 			"sub_event_type mismatch enq=%d deq =%d",
37550fb749aSPavan Nikhilesh 			attr->sub_event_type, ev->sub_event_type);
376daeda14cSPavan Nikhilesh 	RTE_TEST_ASSERT_EQUAL(attr->sched_type, ev->sched_type,
37750fb749aSPavan Nikhilesh 			"sched_type mismatch enq=%d deq =%d",
37850fb749aSPavan Nikhilesh 			attr->sched_type, ev->sched_type);
379daeda14cSPavan Nikhilesh 	RTE_TEST_ASSERT_EQUAL(attr->queue, ev->queue_id,
38050fb749aSPavan Nikhilesh 			"queue mismatch enq=%d deq =%d",
38150fb749aSPavan Nikhilesh 			attr->queue, ev->queue_id);
38250fb749aSPavan Nikhilesh 	return 0;
38350fb749aSPavan Nikhilesh }
38450fb749aSPavan Nikhilesh 
38550fb749aSPavan Nikhilesh typedef int (*validate_event_cb)(uint32_t index, uint8_t port,
38650fb749aSPavan Nikhilesh 				 struct rte_event *ev);
38750fb749aSPavan Nikhilesh 
38850fb749aSPavan Nikhilesh static inline int
consume_events(uint8_t port,const uint32_t total_events,validate_event_cb fn)38950fb749aSPavan Nikhilesh consume_events(uint8_t port, const uint32_t total_events, validate_event_cb fn)
39050fb749aSPavan Nikhilesh {
39150fb749aSPavan Nikhilesh 	int ret;
39250fb749aSPavan Nikhilesh 	uint16_t valid_event;
39350fb749aSPavan Nikhilesh 	uint32_t events = 0, forward_progress_cnt = 0, index = 0;
39450fb749aSPavan Nikhilesh 	struct rte_event ev;
39550fb749aSPavan Nikhilesh 
39650fb749aSPavan Nikhilesh 	while (1) {
39750fb749aSPavan Nikhilesh 		if (++forward_progress_cnt > UINT16_MAX) {
398daeda14cSPavan Nikhilesh 			ssovf_log_dbg("Detected deadlock");
399daeda14cSPavan Nikhilesh 			return -1;
40050fb749aSPavan Nikhilesh 		}
40150fb749aSPavan Nikhilesh 
40250fb749aSPavan Nikhilesh 		valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
40350fb749aSPavan Nikhilesh 		if (!valid_event)
40450fb749aSPavan Nikhilesh 			continue;
40550fb749aSPavan Nikhilesh 
40650fb749aSPavan Nikhilesh 		forward_progress_cnt = 0;
40750fb749aSPavan Nikhilesh 		ret = validate_event(&ev);
40850fb749aSPavan Nikhilesh 		if (ret)
409daeda14cSPavan Nikhilesh 			return -1;
41050fb749aSPavan Nikhilesh 
41150fb749aSPavan Nikhilesh 		if (fn != NULL) {
41250fb749aSPavan Nikhilesh 			ret = fn(index, port, &ev);
413daeda14cSPavan Nikhilesh 			RTE_TEST_ASSERT_SUCCESS(ret,
41450fb749aSPavan Nikhilesh 				"Failed to validate test specific event");
41550fb749aSPavan Nikhilesh 		}
41650fb749aSPavan Nikhilesh 
41750fb749aSPavan Nikhilesh 		++index;
41850fb749aSPavan Nikhilesh 
41950fb749aSPavan Nikhilesh 		rte_pktmbuf_free(ev.mbuf);
42050fb749aSPavan Nikhilesh 		if (++events >= total_events)
42150fb749aSPavan Nikhilesh 			break;
42250fb749aSPavan Nikhilesh 	}
42350fb749aSPavan Nikhilesh 
42450fb749aSPavan Nikhilesh 	return check_excess_events(port);
42550fb749aSPavan Nikhilesh }
42650fb749aSPavan Nikhilesh 
42750fb749aSPavan Nikhilesh static int
validate_simple_enqdeq(uint32_t index,uint8_t port,struct rte_event * ev)42850fb749aSPavan Nikhilesh validate_simple_enqdeq(uint32_t index, uint8_t port, struct rte_event *ev)
42950fb749aSPavan Nikhilesh {
43050fb749aSPavan Nikhilesh 	RTE_SET_USED(port);
431ca4355e4SDavid Marchand 	RTE_TEST_ASSERT_EQUAL(index, *rte_event_pmd_selftest_seqn(ev->mbuf),
432ca4355e4SDavid Marchand 		"index=%d != seqn=%d", index,
433ca4355e4SDavid Marchand 		*rte_event_pmd_selftest_seqn(ev->mbuf));
43450fb749aSPavan Nikhilesh 	return 0;
43550fb749aSPavan Nikhilesh }
43650fb749aSPavan Nikhilesh 
43750fb749aSPavan Nikhilesh static inline int
test_simple_enqdeq(uint8_t sched_type)43850fb749aSPavan Nikhilesh test_simple_enqdeq(uint8_t sched_type)
43950fb749aSPavan Nikhilesh {
44050fb749aSPavan Nikhilesh 	int ret;
44150fb749aSPavan Nikhilesh 
44250fb749aSPavan Nikhilesh 	ret = inject_events(0 /*flow_id */,
44350fb749aSPavan Nikhilesh 				RTE_EVENT_TYPE_CPU /* event_type */,
44450fb749aSPavan Nikhilesh 				0 /* sub_event_type */,
44550fb749aSPavan Nikhilesh 				sched_type,
44650fb749aSPavan Nikhilesh 				0 /* queue */,
44750fb749aSPavan Nikhilesh 				0 /* port */,
44850fb749aSPavan Nikhilesh 				MAX_EVENTS);
44950fb749aSPavan Nikhilesh 	if (ret)
450daeda14cSPavan Nikhilesh 		return -1;
45150fb749aSPavan Nikhilesh 
45250fb749aSPavan Nikhilesh 	return consume_events(0 /* port */, MAX_EVENTS,	validate_simple_enqdeq);
45350fb749aSPavan Nikhilesh }
45450fb749aSPavan Nikhilesh 
45550fb749aSPavan Nikhilesh static int
test_simple_enqdeq_ordered(void)45650fb749aSPavan Nikhilesh test_simple_enqdeq_ordered(void)
45750fb749aSPavan Nikhilesh {
45850fb749aSPavan Nikhilesh 	return test_simple_enqdeq(RTE_SCHED_TYPE_ORDERED);
45950fb749aSPavan Nikhilesh }
46050fb749aSPavan Nikhilesh 
46150fb749aSPavan Nikhilesh static int
test_simple_enqdeq_atomic(void)46250fb749aSPavan Nikhilesh test_simple_enqdeq_atomic(void)
46350fb749aSPavan Nikhilesh {
46450fb749aSPavan Nikhilesh 	return test_simple_enqdeq(RTE_SCHED_TYPE_ATOMIC);
46550fb749aSPavan Nikhilesh }
46650fb749aSPavan Nikhilesh 
46750fb749aSPavan Nikhilesh static int
test_simple_enqdeq_parallel(void)46850fb749aSPavan Nikhilesh test_simple_enqdeq_parallel(void)
46950fb749aSPavan Nikhilesh {
47050fb749aSPavan Nikhilesh 	return test_simple_enqdeq(RTE_SCHED_TYPE_PARALLEL);
47150fb749aSPavan Nikhilesh }
47250fb749aSPavan Nikhilesh 
47350fb749aSPavan Nikhilesh /*
47450fb749aSPavan Nikhilesh  * Generate a prescribed number of events and spread them across available
47550fb749aSPavan Nikhilesh  * queues. On dequeue, using single event port(port 0) verify the enqueued
47650fb749aSPavan Nikhilesh  * event attributes
47750fb749aSPavan Nikhilesh  */
47850fb749aSPavan Nikhilesh static int
test_multi_queue_enq_single_port_deq(void)47950fb749aSPavan Nikhilesh test_multi_queue_enq_single_port_deq(void)
48050fb749aSPavan Nikhilesh {
48150fb749aSPavan Nikhilesh 	int ret;
48250fb749aSPavan Nikhilesh 
48350fb749aSPavan Nikhilesh 	ret = generate_random_events(MAX_EVENTS);
48450fb749aSPavan Nikhilesh 	if (ret)
485daeda14cSPavan Nikhilesh 		return -1;
48650fb749aSPavan Nikhilesh 
48750fb749aSPavan Nikhilesh 	return consume_events(0 /* port */, MAX_EVENTS, NULL);
48850fb749aSPavan Nikhilesh }
48950fb749aSPavan Nikhilesh 
49050fb749aSPavan Nikhilesh /*
49150fb749aSPavan Nikhilesh  * Inject 0..MAX_EVENTS events over 0..queue_count with modulus
49250fb749aSPavan Nikhilesh  * operation
49350fb749aSPavan Nikhilesh  *
49450fb749aSPavan Nikhilesh  * For example, Inject 32 events over 0..7 queues
49550fb749aSPavan Nikhilesh  * enqueue events 0, 8, 16, 24 in queue 0
49650fb749aSPavan Nikhilesh  * enqueue events 1, 9, 17, 25 in queue 1
49750fb749aSPavan Nikhilesh  * ..
49850fb749aSPavan Nikhilesh  * ..
49950fb749aSPavan Nikhilesh  * enqueue events 7, 15, 23, 31 in queue 7
50050fb749aSPavan Nikhilesh  *
50150fb749aSPavan Nikhilesh  * On dequeue, Validate the events comes in 0,8,16,24,1,9,17,25..,7,15,23,31
50250fb749aSPavan Nikhilesh  * order from queue0(highest priority) to queue7(lowest_priority)
50350fb749aSPavan Nikhilesh  */
50450fb749aSPavan Nikhilesh static int
validate_queue_priority(uint32_t index,uint8_t port,struct rte_event * ev)50550fb749aSPavan Nikhilesh validate_queue_priority(uint32_t index, uint8_t port, struct rte_event *ev)
50650fb749aSPavan Nikhilesh {
50750fb749aSPavan Nikhilesh 	uint32_t queue_count;
508daeda14cSPavan Nikhilesh 	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
50950fb749aSPavan Nikhilesh 			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
51050fb749aSPavan Nikhilesh 			    &queue_count), "Queue count get failed");
51150fb749aSPavan Nikhilesh 	uint32_t range = MAX_EVENTS / queue_count;
51250fb749aSPavan Nikhilesh 	uint32_t expected_val = (index % range) * queue_count;
51350fb749aSPavan Nikhilesh 
51450fb749aSPavan Nikhilesh 	expected_val += ev->queue_id;
51550fb749aSPavan Nikhilesh 	RTE_SET_USED(port);
516ca4355e4SDavid Marchand 	RTE_TEST_ASSERT_EQUAL(*rte_event_pmd_selftest_seqn(ev->mbuf), expected_val,
51750fb749aSPavan Nikhilesh 		"seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d",
518ca4355e4SDavid Marchand 		*rte_event_pmd_selftest_seqn(ev->mbuf), index, expected_val, range,
51950fb749aSPavan Nikhilesh 		queue_count, MAX_EVENTS);
52050fb749aSPavan Nikhilesh 	return 0;
52150fb749aSPavan Nikhilesh }
52250fb749aSPavan Nikhilesh 
52350fb749aSPavan Nikhilesh static int
test_multi_queue_priority(void)52450fb749aSPavan Nikhilesh test_multi_queue_priority(void)
52550fb749aSPavan Nikhilesh {
52650fb749aSPavan Nikhilesh 	uint8_t queue;
52750fb749aSPavan Nikhilesh 	struct rte_mbuf *m;
52850fb749aSPavan Nikhilesh 	int i, max_evts_roundoff;
52950fb749aSPavan Nikhilesh 
53050fb749aSPavan Nikhilesh 	/* See validate_queue_priority() comments for priority validate logic */
53150fb749aSPavan Nikhilesh 	uint32_t queue_count;
532daeda14cSPavan Nikhilesh 	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
53350fb749aSPavan Nikhilesh 			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
53450fb749aSPavan Nikhilesh 			    &queue_count), "Queue count get failed");
53550fb749aSPavan Nikhilesh 	max_evts_roundoff  = MAX_EVENTS / queue_count;
53650fb749aSPavan Nikhilesh 	max_evts_roundoff *= queue_count;
53750fb749aSPavan Nikhilesh 
53850fb749aSPavan Nikhilesh 	for (i = 0; i < max_evts_roundoff; i++) {
53950fb749aSPavan Nikhilesh 		struct rte_event ev = {.event = 0, .u64 = 0};
54050fb749aSPavan Nikhilesh 
54150fb749aSPavan Nikhilesh 		m = rte_pktmbuf_alloc(eventdev_test_mempool);
542daeda14cSPavan Nikhilesh 		RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
54350fb749aSPavan Nikhilesh 
544ca4355e4SDavid Marchand 		*rte_event_pmd_selftest_seqn(m) = i;
54550fb749aSPavan Nikhilesh 		queue = i % queue_count;
54650fb749aSPavan Nikhilesh 		update_event_and_validation_attr(m, &ev, 0, RTE_EVENT_TYPE_CPU,
54750fb749aSPavan Nikhilesh 			0, RTE_SCHED_TYPE_PARALLEL, queue, 0);
54850fb749aSPavan Nikhilesh 		rte_event_enqueue_burst(evdev, 0, &ev, 1);
54950fb749aSPavan Nikhilesh 	}
55050fb749aSPavan Nikhilesh 
55150fb749aSPavan Nikhilesh 	return consume_events(0, max_evts_roundoff, validate_queue_priority);
55250fb749aSPavan Nikhilesh }
55350fb749aSPavan Nikhilesh 
55450fb749aSPavan Nikhilesh static int
worker_multi_port_fn(void * arg)55550fb749aSPavan Nikhilesh worker_multi_port_fn(void *arg)
55650fb749aSPavan Nikhilesh {
55750fb749aSPavan Nikhilesh 	struct test_core_param *param = arg;
55850fb749aSPavan Nikhilesh 	struct rte_event ev;
55950fb749aSPavan Nikhilesh 	uint16_t valid_event;
56050fb749aSPavan Nikhilesh 	uint8_t port = param->port;
56150fb749aSPavan Nikhilesh 	rte_atomic32_t *total_events = param->total_events;
56250fb749aSPavan Nikhilesh 	int ret;
56350fb749aSPavan Nikhilesh 
56450fb749aSPavan Nikhilesh 	while (rte_atomic32_read(total_events) > 0) {
56550fb749aSPavan Nikhilesh 		valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
56650fb749aSPavan Nikhilesh 		if (!valid_event)
56750fb749aSPavan Nikhilesh 			continue;
56850fb749aSPavan Nikhilesh 
56950fb749aSPavan Nikhilesh 		ret = validate_event(&ev);
570daeda14cSPavan Nikhilesh 		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to validate event");
57150fb749aSPavan Nikhilesh 		rte_pktmbuf_free(ev.mbuf);
57250fb749aSPavan Nikhilesh 		rte_atomic32_sub(total_events, 1);
57350fb749aSPavan Nikhilesh 	}
57450fb749aSPavan Nikhilesh 	return 0;
57550fb749aSPavan Nikhilesh }
57650fb749aSPavan Nikhilesh 
57750fb749aSPavan Nikhilesh static inline int
wait_workers_to_join(int lcore,const rte_atomic32_t * count)57850fb749aSPavan Nikhilesh wait_workers_to_join(int lcore, const rte_atomic32_t *count)
57950fb749aSPavan Nikhilesh {
58050fb749aSPavan Nikhilesh 	uint64_t cycles, print_cycles;
581daeda14cSPavan Nikhilesh 	RTE_SET_USED(count);
58250fb749aSPavan Nikhilesh 
58350fb749aSPavan Nikhilesh 	print_cycles = cycles = rte_get_timer_cycles();
584f6c6c686SHonnappa Nagarahalli 	while (rte_eal_get_lcore_state(lcore) != WAIT) {
58550fb749aSPavan Nikhilesh 		uint64_t new_cycles = rte_get_timer_cycles();
58650fb749aSPavan Nikhilesh 
58750fb749aSPavan Nikhilesh 		if (new_cycles - print_cycles > rte_get_timer_hz()) {
588daeda14cSPavan Nikhilesh 			ssovf_log_dbg("\r%s: events %d", __func__,
58950fb749aSPavan Nikhilesh 				rte_atomic32_read(count));
59050fb749aSPavan Nikhilesh 			print_cycles = new_cycles;
59150fb749aSPavan Nikhilesh 		}
59250fb749aSPavan Nikhilesh 		if (new_cycles - cycles > rte_get_timer_hz() * 10) {
593daeda14cSPavan Nikhilesh 			ssovf_log_dbg(
594daeda14cSPavan Nikhilesh 				"%s: No schedules for seconds, deadlock (%d)",
59550fb749aSPavan Nikhilesh 				__func__,
59650fb749aSPavan Nikhilesh 				rte_atomic32_read(count));
59750fb749aSPavan Nikhilesh 			rte_event_dev_dump(evdev, stdout);
59850fb749aSPavan Nikhilesh 			cycles = new_cycles;
599daeda14cSPavan Nikhilesh 			return -1;
60050fb749aSPavan Nikhilesh 		}
60150fb749aSPavan Nikhilesh 	}
60250fb749aSPavan Nikhilesh 	rte_eal_mp_wait_lcore();
603daeda14cSPavan Nikhilesh 	return 0;
60450fb749aSPavan Nikhilesh }
60550fb749aSPavan Nikhilesh 
60650fb749aSPavan Nikhilesh 
60750fb749aSPavan Nikhilesh static inline int
launch_workers_and_wait(int (* main_worker)(void *),int (* worker)(void *),uint32_t total_events,uint8_t nb_workers,uint8_t sched_type)608cb056611SStephen Hemminger launch_workers_and_wait(int (*main_worker)(void *),
609cb056611SStephen Hemminger 			int (*worker)(void *), uint32_t total_events,
61050fb749aSPavan Nikhilesh 			uint8_t nb_workers, uint8_t sched_type)
61150fb749aSPavan Nikhilesh {
61250fb749aSPavan Nikhilesh 	uint8_t port = 0;
61350fb749aSPavan Nikhilesh 	int w_lcore;
61450fb749aSPavan Nikhilesh 	int ret;
61550fb749aSPavan Nikhilesh 	struct test_core_param *param;
61650fb749aSPavan Nikhilesh 	rte_atomic32_t atomic_total_events;
61750fb749aSPavan Nikhilesh 	uint64_t dequeue_tmo_ticks;
61850fb749aSPavan Nikhilesh 
61950fb749aSPavan Nikhilesh 	if (!nb_workers)
62050fb749aSPavan Nikhilesh 		return 0;
62150fb749aSPavan Nikhilesh 
62250fb749aSPavan Nikhilesh 	rte_atomic32_set(&atomic_total_events, total_events);
62350fb749aSPavan Nikhilesh 	seqn_list_init();
62450fb749aSPavan Nikhilesh 
62550fb749aSPavan Nikhilesh 	param = malloc(sizeof(struct test_core_param) * nb_workers);
62650fb749aSPavan Nikhilesh 	if (!param)
627daeda14cSPavan Nikhilesh 		return -1;
62850fb749aSPavan Nikhilesh 
62950fb749aSPavan Nikhilesh 	ret = rte_event_dequeue_timeout_ticks(evdev,
63050fb749aSPavan Nikhilesh 		rte_rand() % 10000000/* 10ms */, &dequeue_tmo_ticks);
631e865cb4dSPavan Nikhilesh 	if (ret) {
632e865cb4dSPavan Nikhilesh 		free(param);
633daeda14cSPavan Nikhilesh 		return -1;
634e865cb4dSPavan Nikhilesh 	}
63550fb749aSPavan Nikhilesh 
63650fb749aSPavan Nikhilesh 	param[0].total_events = &atomic_total_events;
63750fb749aSPavan Nikhilesh 	param[0].sched_type = sched_type;
63850fb749aSPavan Nikhilesh 	param[0].port = 0;
63950fb749aSPavan Nikhilesh 	param[0].dequeue_tmo_ticks = dequeue_tmo_ticks;
64050fb749aSPavan Nikhilesh 	rte_smp_wmb();
64150fb749aSPavan Nikhilesh 
64250fb749aSPavan Nikhilesh 	w_lcore = rte_get_next_lcore(
64350fb749aSPavan Nikhilesh 			/* start core */ -1,
644cb056611SStephen Hemminger 			/* skip main */ 1,
64550fb749aSPavan Nikhilesh 			/* wrap */ 0);
646cb056611SStephen Hemminger 	rte_eal_remote_launch(main_worker, &param[0], w_lcore);
64750fb749aSPavan Nikhilesh 
64850fb749aSPavan Nikhilesh 	for (port = 1; port < nb_workers; port++) {
64950fb749aSPavan Nikhilesh 		param[port].total_events = &atomic_total_events;
65050fb749aSPavan Nikhilesh 		param[port].sched_type = sched_type;
65150fb749aSPavan Nikhilesh 		param[port].port = port;
65250fb749aSPavan Nikhilesh 		param[port].dequeue_tmo_ticks = dequeue_tmo_ticks;
65350fb749aSPavan Nikhilesh 		rte_smp_wmb();
65450fb749aSPavan Nikhilesh 		w_lcore = rte_get_next_lcore(w_lcore, 1, 0);
655cb056611SStephen Hemminger 		rte_eal_remote_launch(worker, &param[port], w_lcore);
65650fb749aSPavan Nikhilesh 	}
65750fb749aSPavan Nikhilesh 
65850fb749aSPavan Nikhilesh 	ret = wait_workers_to_join(w_lcore, &atomic_total_events);
65950fb749aSPavan Nikhilesh 	free(param);
66050fb749aSPavan Nikhilesh 	return ret;
66150fb749aSPavan Nikhilesh }
66250fb749aSPavan Nikhilesh 
66350fb749aSPavan Nikhilesh /*
66450fb749aSPavan Nikhilesh  * Generate a prescribed number of events and spread them across available
66550fb749aSPavan Nikhilesh  * queues. Dequeue the events through multiple ports and verify the enqueued
66650fb749aSPavan Nikhilesh  * event attributes
66750fb749aSPavan Nikhilesh  */
66850fb749aSPavan Nikhilesh static int
test_multi_queue_enq_multi_port_deq(void)66950fb749aSPavan Nikhilesh test_multi_queue_enq_multi_port_deq(void)
67050fb749aSPavan Nikhilesh {
67150fb749aSPavan Nikhilesh 	const unsigned int total_events = MAX_EVENTS;
67250fb749aSPavan Nikhilesh 	uint32_t nr_ports;
67350fb749aSPavan Nikhilesh 	int ret;
67450fb749aSPavan Nikhilesh 
67550fb749aSPavan Nikhilesh 	ret = generate_random_events(total_events);
67650fb749aSPavan Nikhilesh 	if (ret)
677daeda14cSPavan Nikhilesh 		return -1;
67850fb749aSPavan Nikhilesh 
679daeda14cSPavan Nikhilesh 	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
68050fb749aSPavan Nikhilesh 				RTE_EVENT_DEV_ATTR_PORT_COUNT,
68150fb749aSPavan Nikhilesh 				&nr_ports), "Port count get failed");
68250fb749aSPavan Nikhilesh 	nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
68350fb749aSPavan Nikhilesh 
68450fb749aSPavan Nikhilesh 	if (!nr_ports) {
685daeda14cSPavan Nikhilesh 		ssovf_log_dbg("%s: Not enough ports=%d or workers=%d", __func__,
68650fb749aSPavan Nikhilesh 			nr_ports, rte_lcore_count() - 1);
687daeda14cSPavan Nikhilesh 		return 0;
68850fb749aSPavan Nikhilesh 	}
68950fb749aSPavan Nikhilesh 
69050fb749aSPavan Nikhilesh 	return launch_workers_and_wait(worker_multi_port_fn,
69150fb749aSPavan Nikhilesh 					worker_multi_port_fn, total_events,
69250fb749aSPavan Nikhilesh 					nr_ports, 0xff /* invalid */);
69350fb749aSPavan Nikhilesh }
69450fb749aSPavan Nikhilesh 
6958384f0e0SJerin Jacob static
flush(uint8_t dev_id,struct rte_event event,void * arg)6968384f0e0SJerin Jacob void flush(uint8_t dev_id, struct rte_event event, void *arg)
6978384f0e0SJerin Jacob {
6988384f0e0SJerin Jacob 	unsigned int *count = arg;
6998384f0e0SJerin Jacob 
7008384f0e0SJerin Jacob 	RTE_SET_USED(dev_id);
7018384f0e0SJerin Jacob 	if (event.event_type == RTE_EVENT_TYPE_CPU)
7028384f0e0SJerin Jacob 		*count = *count + 1;
7038384f0e0SJerin Jacob 
7048384f0e0SJerin Jacob }
7058384f0e0SJerin Jacob 
7068384f0e0SJerin Jacob static int
test_dev_stop_flush(void)7078384f0e0SJerin Jacob test_dev_stop_flush(void)
7088384f0e0SJerin Jacob {
7098384f0e0SJerin Jacob 	unsigned int total_events = MAX_EVENTS, count = 0;
7108384f0e0SJerin Jacob 	int ret;
7118384f0e0SJerin Jacob 
7128384f0e0SJerin Jacob 	ret = generate_random_events(total_events);
7138384f0e0SJerin Jacob 	if (ret)
7148384f0e0SJerin Jacob 		return -1;
7158384f0e0SJerin Jacob 
7168384f0e0SJerin Jacob 	ret = rte_event_dev_stop_flush_callback_register(evdev, flush, &count);
7178384f0e0SJerin Jacob 	if (ret)
7188384f0e0SJerin Jacob 		return -2;
7198384f0e0SJerin Jacob 	rte_event_dev_stop(evdev);
7208384f0e0SJerin Jacob 	ret = rte_event_dev_stop_flush_callback_register(evdev, NULL, NULL);
7218384f0e0SJerin Jacob 	if (ret)
7228384f0e0SJerin Jacob 		return -3;
7238384f0e0SJerin Jacob 	RTE_TEST_ASSERT_EQUAL(total_events, count,
7248384f0e0SJerin Jacob 				"count mismatch total_events=%d count=%d",
7258384f0e0SJerin Jacob 				total_events, count);
7268384f0e0SJerin Jacob 	return 0;
7278384f0e0SJerin Jacob }
7288384f0e0SJerin Jacob 
72950fb749aSPavan Nikhilesh static int
validate_queue_to_port_single_link(uint32_t index,uint8_t port,struct rte_event * ev)73050fb749aSPavan Nikhilesh validate_queue_to_port_single_link(uint32_t index, uint8_t port,
73150fb749aSPavan Nikhilesh 			struct rte_event *ev)
73250fb749aSPavan Nikhilesh {
73350fb749aSPavan Nikhilesh 	RTE_SET_USED(index);
734daeda14cSPavan Nikhilesh 	RTE_TEST_ASSERT_EQUAL(port, ev->queue_id,
73550fb749aSPavan Nikhilesh 				"queue mismatch enq=%d deq =%d",
73650fb749aSPavan Nikhilesh 				port, ev->queue_id);
73750fb749aSPavan Nikhilesh 	return 0;
73850fb749aSPavan Nikhilesh }
73950fb749aSPavan Nikhilesh 
74050fb749aSPavan Nikhilesh /*
74150fb749aSPavan Nikhilesh  * Link queue x to port x and check correctness of link by checking
74250fb749aSPavan Nikhilesh  * queue_id == x on dequeue on the specific port x
74350fb749aSPavan Nikhilesh  */
74450fb749aSPavan Nikhilesh static int
test_queue_to_port_single_link(void)74550fb749aSPavan Nikhilesh test_queue_to_port_single_link(void)
74650fb749aSPavan Nikhilesh {
74750fb749aSPavan Nikhilesh 	int i, nr_links, ret;
74850fb749aSPavan Nikhilesh 
74950fb749aSPavan Nikhilesh 	uint32_t port_count;
750daeda14cSPavan Nikhilesh 	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
75150fb749aSPavan Nikhilesh 				RTE_EVENT_DEV_ATTR_PORT_COUNT,
75250fb749aSPavan Nikhilesh 				&port_count), "Port count get failed");
75350fb749aSPavan Nikhilesh 
75450fb749aSPavan Nikhilesh 	/* Unlink all connections that created in eventdev_setup */
75550fb749aSPavan Nikhilesh 	for (i = 0; i < (int)port_count; i++) {
75650fb749aSPavan Nikhilesh 		ret = rte_event_port_unlink(evdev, i, NULL, 0);
757daeda14cSPavan Nikhilesh 		RTE_TEST_ASSERT(ret >= 0,
758daeda14cSPavan Nikhilesh 				"Failed to unlink all queues port=%d", i);
75950fb749aSPavan Nikhilesh 	}
76050fb749aSPavan Nikhilesh 
76150fb749aSPavan Nikhilesh 	uint32_t queue_count;
762daeda14cSPavan Nikhilesh 	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
76350fb749aSPavan Nikhilesh 			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
76450fb749aSPavan Nikhilesh 			    &queue_count), "Queue count get failed");
76550fb749aSPavan Nikhilesh 
76650fb749aSPavan Nikhilesh 	nr_links = RTE_MIN(port_count, queue_count);
76750fb749aSPavan Nikhilesh 	const unsigned int total_events = MAX_EVENTS / nr_links;
76850fb749aSPavan Nikhilesh 
76950fb749aSPavan Nikhilesh 	/* Link queue x to port x and inject events to queue x through port x */
77050fb749aSPavan Nikhilesh 	for (i = 0; i < nr_links; i++) {
77150fb749aSPavan Nikhilesh 		uint8_t queue = (uint8_t)i;
77250fb749aSPavan Nikhilesh 
77350fb749aSPavan Nikhilesh 		ret = rte_event_port_link(evdev, i, &queue, NULL, 1);
774daeda14cSPavan Nikhilesh 		RTE_TEST_ASSERT(ret == 1, "Failed to link queue to port %d", i);
77550fb749aSPavan Nikhilesh 
77650fb749aSPavan Nikhilesh 		ret = inject_events(
77750fb749aSPavan Nikhilesh 			0x100 /*flow_id */,
77850fb749aSPavan Nikhilesh 			RTE_EVENT_TYPE_CPU /* event_type */,
77950fb749aSPavan Nikhilesh 			rte_rand() % 256 /* sub_event_type */,
78050fb749aSPavan Nikhilesh 			rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
78150fb749aSPavan Nikhilesh 			queue /* queue */,
78250fb749aSPavan Nikhilesh 			i /* port */,
78350fb749aSPavan Nikhilesh 			total_events /* events */);
78450fb749aSPavan Nikhilesh 		if (ret)
785daeda14cSPavan Nikhilesh 			return -1;
78650fb749aSPavan Nikhilesh 	}
78750fb749aSPavan Nikhilesh 
78850fb749aSPavan Nikhilesh 	/* Verify the events generated from correct queue */
78950fb749aSPavan Nikhilesh 	for (i = 0; i < nr_links; i++) {
79050fb749aSPavan Nikhilesh 		ret = consume_events(i /* port */, total_events,
79150fb749aSPavan Nikhilesh 				validate_queue_to_port_single_link);
79250fb749aSPavan Nikhilesh 		if (ret)
793daeda14cSPavan Nikhilesh 			return -1;
79450fb749aSPavan Nikhilesh 	}
79550fb749aSPavan Nikhilesh 
796daeda14cSPavan Nikhilesh 	return 0;
79750fb749aSPavan Nikhilesh }
79850fb749aSPavan Nikhilesh 
79950fb749aSPavan Nikhilesh static int
validate_queue_to_port_multi_link(uint32_t index,uint8_t port,struct rte_event * ev)80050fb749aSPavan Nikhilesh validate_queue_to_port_multi_link(uint32_t index, uint8_t port,
80150fb749aSPavan Nikhilesh 			struct rte_event *ev)
80250fb749aSPavan Nikhilesh {
80350fb749aSPavan Nikhilesh 	RTE_SET_USED(index);
804daeda14cSPavan Nikhilesh 	RTE_TEST_ASSERT_EQUAL(port, (ev->queue_id & 0x1),
80550fb749aSPavan Nikhilesh 				"queue mismatch enq=%d deq =%d",
80650fb749aSPavan Nikhilesh 				port, ev->queue_id);
80750fb749aSPavan Nikhilesh 	return 0;
80850fb749aSPavan Nikhilesh }
80950fb749aSPavan Nikhilesh 
81050fb749aSPavan Nikhilesh /*
81150fb749aSPavan Nikhilesh  * Link all even number of queues to port 0 and all odd number of queues to
81250fb749aSPavan Nikhilesh  * port 1 and verify the link connection on dequeue
81350fb749aSPavan Nikhilesh  */
81450fb749aSPavan Nikhilesh static int
test_queue_to_port_multi_link(void)81550fb749aSPavan Nikhilesh test_queue_to_port_multi_link(void)
81650fb749aSPavan Nikhilesh {
81750fb749aSPavan Nikhilesh 	int ret, port0_events = 0, port1_events = 0;
81850fb749aSPavan Nikhilesh 	uint8_t queue, port;
81950fb749aSPavan Nikhilesh 	uint32_t nr_queues = 0;
82050fb749aSPavan Nikhilesh 	uint32_t nr_ports = 0;
82150fb749aSPavan Nikhilesh 
822daeda14cSPavan Nikhilesh 	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
82350fb749aSPavan Nikhilesh 			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
82450fb749aSPavan Nikhilesh 			    &nr_queues), "Queue count get failed");
82550fb749aSPavan Nikhilesh 
826daeda14cSPavan Nikhilesh 	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
82750fb749aSPavan Nikhilesh 				RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
82850fb749aSPavan Nikhilesh 				&nr_queues), "Queue count get failed");
829daeda14cSPavan Nikhilesh 	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
83050fb749aSPavan Nikhilesh 				RTE_EVENT_DEV_ATTR_PORT_COUNT,
83150fb749aSPavan Nikhilesh 				&nr_ports), "Port count get failed");
83250fb749aSPavan Nikhilesh 
83350fb749aSPavan Nikhilesh 	if (nr_ports < 2) {
834daeda14cSPavan Nikhilesh 		ssovf_log_dbg("%s: Not enough ports to test ports=%d",
83550fb749aSPavan Nikhilesh 				__func__, nr_ports);
836daeda14cSPavan Nikhilesh 		return 0;
83750fb749aSPavan Nikhilesh 	}
83850fb749aSPavan Nikhilesh 
83950fb749aSPavan Nikhilesh 	/* Unlink all connections that created in eventdev_setup */
84050fb749aSPavan Nikhilesh 	for (port = 0; port < nr_ports; port++) {
84150fb749aSPavan Nikhilesh 		ret = rte_event_port_unlink(evdev, port, NULL, 0);
842daeda14cSPavan Nikhilesh 		RTE_TEST_ASSERT(ret >= 0, "Failed to unlink all queues port=%d",
84350fb749aSPavan Nikhilesh 					port);
84450fb749aSPavan Nikhilesh 	}
84550fb749aSPavan Nikhilesh 
84650fb749aSPavan Nikhilesh 	const unsigned int total_events = MAX_EVENTS / nr_queues;
84750fb749aSPavan Nikhilesh 
84850fb749aSPavan Nikhilesh 	/* Link all even number of queues to port0 and odd numbers to port 1*/
84950fb749aSPavan Nikhilesh 	for (queue = 0; queue < nr_queues; queue++) {
85050fb749aSPavan Nikhilesh 		port = queue & 0x1;
85150fb749aSPavan Nikhilesh 		ret = rte_event_port_link(evdev, port, &queue, NULL, 1);
852daeda14cSPavan Nikhilesh 		RTE_TEST_ASSERT(ret == 1, "Failed to link queue=%d to port=%d",
85350fb749aSPavan Nikhilesh 					queue, port);
85450fb749aSPavan Nikhilesh 
85550fb749aSPavan Nikhilesh 		ret = inject_events(
85650fb749aSPavan Nikhilesh 			0x100 /*flow_id */,
85750fb749aSPavan Nikhilesh 			RTE_EVENT_TYPE_CPU /* event_type */,
85850fb749aSPavan Nikhilesh 			rte_rand() % 256 /* sub_event_type */,
85950fb749aSPavan Nikhilesh 			rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
86050fb749aSPavan Nikhilesh 			queue /* queue */,
86150fb749aSPavan Nikhilesh 			port /* port */,
86250fb749aSPavan Nikhilesh 			total_events /* events */);
86350fb749aSPavan Nikhilesh 		if (ret)
864daeda14cSPavan Nikhilesh 			return -1;
86550fb749aSPavan Nikhilesh 
86650fb749aSPavan Nikhilesh 		if (port == 0)
86750fb749aSPavan Nikhilesh 			port0_events += total_events;
86850fb749aSPavan Nikhilesh 		else
86950fb749aSPavan Nikhilesh 			port1_events += total_events;
87050fb749aSPavan Nikhilesh 	}
87150fb749aSPavan Nikhilesh 
87250fb749aSPavan Nikhilesh 	ret = consume_events(0 /* port */, port0_events,
87350fb749aSPavan Nikhilesh 				validate_queue_to_port_multi_link);
87450fb749aSPavan Nikhilesh 	if (ret)
875daeda14cSPavan Nikhilesh 		return -1;
87650fb749aSPavan Nikhilesh 	ret = consume_events(1 /* port */, port1_events,
87750fb749aSPavan Nikhilesh 				validate_queue_to_port_multi_link);
87850fb749aSPavan Nikhilesh 	if (ret)
879daeda14cSPavan Nikhilesh 		return -1;
88050fb749aSPavan Nikhilesh 
881daeda14cSPavan Nikhilesh 	return 0;
88250fb749aSPavan Nikhilesh }
88350fb749aSPavan Nikhilesh 
88450fb749aSPavan Nikhilesh static int
worker_flow_based_pipeline(void * arg)88550fb749aSPavan Nikhilesh worker_flow_based_pipeline(void *arg)
88650fb749aSPavan Nikhilesh {
88750fb749aSPavan Nikhilesh 	struct test_core_param *param = arg;
88850fb749aSPavan Nikhilesh 	struct rte_event ev;
88950fb749aSPavan Nikhilesh 	uint16_t valid_event;
89050fb749aSPavan Nikhilesh 	uint8_t port = param->port;
89150fb749aSPavan Nikhilesh 	uint8_t new_sched_type = param->sched_type;
89250fb749aSPavan Nikhilesh 	rte_atomic32_t *total_events = param->total_events;
89350fb749aSPavan Nikhilesh 	uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;
89450fb749aSPavan Nikhilesh 
89550fb749aSPavan Nikhilesh 	while (rte_atomic32_read(total_events) > 0) {
89650fb749aSPavan Nikhilesh 		valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,
89750fb749aSPavan Nikhilesh 					dequeue_tmo_ticks);
89850fb749aSPavan Nikhilesh 		if (!valid_event)
89950fb749aSPavan Nikhilesh 			continue;
90050fb749aSPavan Nikhilesh 
90150fb749aSPavan Nikhilesh 		/* Events from stage 0 */
90250fb749aSPavan Nikhilesh 		if (ev.sub_event_type == 0) {
90350fb749aSPavan Nikhilesh 			/* Move to atomic flow to maintain the ordering */
90450fb749aSPavan Nikhilesh 			ev.flow_id = 0x2;
90550fb749aSPavan Nikhilesh 			ev.event_type = RTE_EVENT_TYPE_CPU;
90650fb749aSPavan Nikhilesh 			ev.sub_event_type = 1; /* stage 1 */
90750fb749aSPavan Nikhilesh 			ev.sched_type = new_sched_type;
90850fb749aSPavan Nikhilesh 			ev.op = RTE_EVENT_OP_FORWARD;
90950fb749aSPavan Nikhilesh 			rte_event_enqueue_burst(evdev, port, &ev, 1);
91050fb749aSPavan Nikhilesh 		} else if (ev.sub_event_type == 1) { /* Events from stage 1*/
911ca4355e4SDavid Marchand 			if (seqn_list_update(*rte_event_pmd_selftest_seqn(ev.mbuf)) == 0) {
91250fb749aSPavan Nikhilesh 				rte_pktmbuf_free(ev.mbuf);
91350fb749aSPavan Nikhilesh 				rte_atomic32_sub(total_events, 1);
91450fb749aSPavan Nikhilesh 			} else {
915daeda14cSPavan Nikhilesh 				ssovf_log_dbg("Failed to update seqn_list");
916daeda14cSPavan Nikhilesh 				return -1;
91750fb749aSPavan Nikhilesh 			}
91850fb749aSPavan Nikhilesh 		} else {
919daeda14cSPavan Nikhilesh 			ssovf_log_dbg("Invalid ev.sub_event_type = %d",
92050fb749aSPavan Nikhilesh 					ev.sub_event_type);
921daeda14cSPavan Nikhilesh 			return -1;
92250fb749aSPavan Nikhilesh 		}
92350fb749aSPavan Nikhilesh 	}
92450fb749aSPavan Nikhilesh 	return 0;
92550fb749aSPavan Nikhilesh }
92650fb749aSPavan Nikhilesh 
92750fb749aSPavan Nikhilesh static int
test_multiport_flow_sched_type_test(uint8_t in_sched_type,uint8_t out_sched_type)92850fb749aSPavan Nikhilesh test_multiport_flow_sched_type_test(uint8_t in_sched_type,
92950fb749aSPavan Nikhilesh 			uint8_t out_sched_type)
93050fb749aSPavan Nikhilesh {
93150fb749aSPavan Nikhilesh 	const unsigned int total_events = MAX_EVENTS;
93250fb749aSPavan Nikhilesh 	uint32_t nr_ports;
93350fb749aSPavan Nikhilesh 	int ret;
93450fb749aSPavan Nikhilesh 
935daeda14cSPavan Nikhilesh 	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
93650fb749aSPavan Nikhilesh 				RTE_EVENT_DEV_ATTR_PORT_COUNT,
93750fb749aSPavan Nikhilesh 				&nr_ports), "Port count get failed");
93850fb749aSPavan Nikhilesh 	nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
93950fb749aSPavan Nikhilesh 
94050fb749aSPavan Nikhilesh 	if (!nr_ports) {
941daeda14cSPavan Nikhilesh 		ssovf_log_dbg("%s: Not enough ports=%d or workers=%d", __func__,
94250fb749aSPavan Nikhilesh 			nr_ports, rte_lcore_count() - 1);
943daeda14cSPavan Nikhilesh 		return 0;
94450fb749aSPavan Nikhilesh 	}
94550fb749aSPavan Nikhilesh 
946ca4355e4SDavid Marchand 	/* Injects events with a 0 sequence number to total_events */
94750fb749aSPavan Nikhilesh 	ret = inject_events(
94850fb749aSPavan Nikhilesh 		0x1 /*flow_id */,
94950fb749aSPavan Nikhilesh 		RTE_EVENT_TYPE_CPU /* event_type */,
95050fb749aSPavan Nikhilesh 		0 /* sub_event_type (stage 0) */,
95150fb749aSPavan Nikhilesh 		in_sched_type,
95250fb749aSPavan Nikhilesh 		0 /* queue */,
95350fb749aSPavan Nikhilesh 		0 /* port */,
95450fb749aSPavan Nikhilesh 		total_events /* events */);
95550fb749aSPavan Nikhilesh 	if (ret)
956daeda14cSPavan Nikhilesh 		return -1;
95750fb749aSPavan Nikhilesh 
95850fb749aSPavan Nikhilesh 	ret = launch_workers_and_wait(worker_flow_based_pipeline,
95950fb749aSPavan Nikhilesh 					worker_flow_based_pipeline,
96050fb749aSPavan Nikhilesh 					total_events, nr_ports, out_sched_type);
96150fb749aSPavan Nikhilesh 	if (ret)
962daeda14cSPavan Nikhilesh 		return -1;
96350fb749aSPavan Nikhilesh 
96450fb749aSPavan Nikhilesh 	if (in_sched_type != RTE_SCHED_TYPE_PARALLEL &&
96550fb749aSPavan Nikhilesh 			out_sched_type == RTE_SCHED_TYPE_ATOMIC) {
96650fb749aSPavan Nikhilesh 		/* Check the events order maintained or not */
96750fb749aSPavan Nikhilesh 		return seqn_list_check(total_events);
96850fb749aSPavan Nikhilesh 	}
969daeda14cSPavan Nikhilesh 	return 0;
97050fb749aSPavan Nikhilesh }
97150fb749aSPavan Nikhilesh 
97250fb749aSPavan Nikhilesh 
97350fb749aSPavan Nikhilesh /* Multi port ordered to atomic transaction */
97450fb749aSPavan Nikhilesh static int
test_multi_port_flow_ordered_to_atomic(void)97550fb749aSPavan Nikhilesh test_multi_port_flow_ordered_to_atomic(void)
97650fb749aSPavan Nikhilesh {
97750fb749aSPavan Nikhilesh 	/* Ingress event order test */
97850fb749aSPavan Nikhilesh 	return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
97950fb749aSPavan Nikhilesh 				RTE_SCHED_TYPE_ATOMIC);
98050fb749aSPavan Nikhilesh }
98150fb749aSPavan Nikhilesh 
98250fb749aSPavan Nikhilesh static int
test_multi_port_flow_ordered_to_ordered(void)98350fb749aSPavan Nikhilesh test_multi_port_flow_ordered_to_ordered(void)
98450fb749aSPavan Nikhilesh {
98550fb749aSPavan Nikhilesh 	return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
98650fb749aSPavan Nikhilesh 				RTE_SCHED_TYPE_ORDERED);
98750fb749aSPavan Nikhilesh }
98850fb749aSPavan Nikhilesh 
98950fb749aSPavan Nikhilesh static int
test_multi_port_flow_ordered_to_parallel(void)99050fb749aSPavan Nikhilesh test_multi_port_flow_ordered_to_parallel(void)
99150fb749aSPavan Nikhilesh {
99250fb749aSPavan Nikhilesh 	return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
99350fb749aSPavan Nikhilesh 				RTE_SCHED_TYPE_PARALLEL);
99450fb749aSPavan Nikhilesh }
99550fb749aSPavan Nikhilesh 
99650fb749aSPavan Nikhilesh static int
test_multi_port_flow_atomic_to_atomic(void)99750fb749aSPavan Nikhilesh test_multi_port_flow_atomic_to_atomic(void)
99850fb749aSPavan Nikhilesh {
99950fb749aSPavan Nikhilesh 	/* Ingress event order test */
100050fb749aSPavan Nikhilesh 	return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
100150fb749aSPavan Nikhilesh 				RTE_SCHED_TYPE_ATOMIC);
100250fb749aSPavan Nikhilesh }
100350fb749aSPavan Nikhilesh 
100450fb749aSPavan Nikhilesh static int
test_multi_port_flow_atomic_to_ordered(void)100550fb749aSPavan Nikhilesh test_multi_port_flow_atomic_to_ordered(void)
100650fb749aSPavan Nikhilesh {
100750fb749aSPavan Nikhilesh 	return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
100850fb749aSPavan Nikhilesh 				RTE_SCHED_TYPE_ORDERED);
100950fb749aSPavan Nikhilesh }
101050fb749aSPavan Nikhilesh 
101150fb749aSPavan Nikhilesh static int
test_multi_port_flow_atomic_to_parallel(void)101250fb749aSPavan Nikhilesh test_multi_port_flow_atomic_to_parallel(void)
101350fb749aSPavan Nikhilesh {
101450fb749aSPavan Nikhilesh 	return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
101550fb749aSPavan Nikhilesh 				RTE_SCHED_TYPE_PARALLEL);
101650fb749aSPavan Nikhilesh }
101750fb749aSPavan Nikhilesh 
101850fb749aSPavan Nikhilesh static int
test_multi_port_flow_parallel_to_atomic(void)101950fb749aSPavan Nikhilesh test_multi_port_flow_parallel_to_atomic(void)
102050fb749aSPavan Nikhilesh {
102150fb749aSPavan Nikhilesh 	return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
102250fb749aSPavan Nikhilesh 				RTE_SCHED_TYPE_ATOMIC);
102350fb749aSPavan Nikhilesh }
102450fb749aSPavan Nikhilesh 
102550fb749aSPavan Nikhilesh static int
test_multi_port_flow_parallel_to_ordered(void)102650fb749aSPavan Nikhilesh test_multi_port_flow_parallel_to_ordered(void)
102750fb749aSPavan Nikhilesh {
102850fb749aSPavan Nikhilesh 	return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
102950fb749aSPavan Nikhilesh 				RTE_SCHED_TYPE_ORDERED);
103050fb749aSPavan Nikhilesh }
103150fb749aSPavan Nikhilesh 
103250fb749aSPavan Nikhilesh static int
test_multi_port_flow_parallel_to_parallel(void)103350fb749aSPavan Nikhilesh test_multi_port_flow_parallel_to_parallel(void)
103450fb749aSPavan Nikhilesh {
103550fb749aSPavan Nikhilesh 	return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
103650fb749aSPavan Nikhilesh 				RTE_SCHED_TYPE_PARALLEL);
103750fb749aSPavan Nikhilesh }
103850fb749aSPavan Nikhilesh 
103950fb749aSPavan Nikhilesh static int
worker_group_based_pipeline(void * arg)104050fb749aSPavan Nikhilesh worker_group_based_pipeline(void *arg)
104150fb749aSPavan Nikhilesh {
104250fb749aSPavan Nikhilesh 	struct test_core_param *param = arg;
104350fb749aSPavan Nikhilesh 	struct rte_event ev;
104450fb749aSPavan Nikhilesh 	uint16_t valid_event;
104550fb749aSPavan Nikhilesh 	uint8_t port = param->port;
104650fb749aSPavan Nikhilesh 	uint8_t new_sched_type = param->sched_type;
104750fb749aSPavan Nikhilesh 	rte_atomic32_t *total_events = param->total_events;
104850fb749aSPavan Nikhilesh 	uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;
104950fb749aSPavan Nikhilesh 
105050fb749aSPavan Nikhilesh 	while (rte_atomic32_read(total_events) > 0) {
105150fb749aSPavan Nikhilesh 		valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,
105250fb749aSPavan Nikhilesh 					dequeue_tmo_ticks);
105350fb749aSPavan Nikhilesh 		if (!valid_event)
105450fb749aSPavan Nikhilesh 			continue;
105550fb749aSPavan Nikhilesh 
105650fb749aSPavan Nikhilesh 		/* Events from stage 0(group 0) */
105750fb749aSPavan Nikhilesh 		if (ev.queue_id == 0) {
105850fb749aSPavan Nikhilesh 			/* Move to atomic flow to maintain the ordering */
105950fb749aSPavan Nikhilesh 			ev.flow_id = 0x2;
106050fb749aSPavan Nikhilesh 			ev.event_type = RTE_EVENT_TYPE_CPU;
106150fb749aSPavan Nikhilesh 			ev.sched_type = new_sched_type;
106250fb749aSPavan Nikhilesh 			ev.queue_id = 1; /* Stage 1*/
106350fb749aSPavan Nikhilesh 			ev.op = RTE_EVENT_OP_FORWARD;
106450fb749aSPavan Nikhilesh 			rte_event_enqueue_burst(evdev, port, &ev, 1);
106550fb749aSPavan Nikhilesh 		} else if (ev.queue_id == 1) { /* Events from stage 1(group 1)*/
1066ca4355e4SDavid Marchand 			if (seqn_list_update(*rte_event_pmd_selftest_seqn(ev.mbuf)) == 0) {
106750fb749aSPavan Nikhilesh 				rte_pktmbuf_free(ev.mbuf);
106850fb749aSPavan Nikhilesh 				rte_atomic32_sub(total_events, 1);
106950fb749aSPavan Nikhilesh 			} else {
1070daeda14cSPavan Nikhilesh 				ssovf_log_dbg("Failed to update seqn_list");
1071daeda14cSPavan Nikhilesh 				return -1;
107250fb749aSPavan Nikhilesh 			}
107350fb749aSPavan Nikhilesh 		} else {
1074daeda14cSPavan Nikhilesh 			ssovf_log_dbg("Invalid ev.queue_id = %d", ev.queue_id);
1075daeda14cSPavan Nikhilesh 			return -1;
107650fb749aSPavan Nikhilesh 		}
107750fb749aSPavan Nikhilesh 	}
107850fb749aSPavan Nikhilesh 
107950fb749aSPavan Nikhilesh 
108050fb749aSPavan Nikhilesh 	return 0;
108150fb749aSPavan Nikhilesh }
108250fb749aSPavan Nikhilesh 
108350fb749aSPavan Nikhilesh static int
test_multiport_queue_sched_type_test(uint8_t in_sched_type,uint8_t out_sched_type)108450fb749aSPavan Nikhilesh test_multiport_queue_sched_type_test(uint8_t in_sched_type,
108550fb749aSPavan Nikhilesh 			uint8_t out_sched_type)
108650fb749aSPavan Nikhilesh {
108750fb749aSPavan Nikhilesh 	const unsigned int total_events = MAX_EVENTS;
108850fb749aSPavan Nikhilesh 	uint32_t nr_ports;
108950fb749aSPavan Nikhilesh 	int ret;
109050fb749aSPavan Nikhilesh 
1091daeda14cSPavan Nikhilesh 	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
109250fb749aSPavan Nikhilesh 				RTE_EVENT_DEV_ATTR_PORT_COUNT,
109350fb749aSPavan Nikhilesh 				&nr_ports), "Port count get failed");
109450fb749aSPavan Nikhilesh 
109550fb749aSPavan Nikhilesh 	nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
109650fb749aSPavan Nikhilesh 
109750fb749aSPavan Nikhilesh 	uint32_t queue_count;
1098daeda14cSPavan Nikhilesh 	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
109950fb749aSPavan Nikhilesh 			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
110050fb749aSPavan Nikhilesh 			    &queue_count), "Queue count get failed");
110150fb749aSPavan Nikhilesh 	if (queue_count < 2 ||  !nr_ports) {
1102daeda14cSPavan Nikhilesh 		ssovf_log_dbg("%s: Not enough queues=%d ports=%d or workers=%d",
110350fb749aSPavan Nikhilesh 			 __func__, queue_count, nr_ports,
110450fb749aSPavan Nikhilesh 			 rte_lcore_count() - 1);
1105daeda14cSPavan Nikhilesh 		return 0;
110650fb749aSPavan Nikhilesh 	}
110750fb749aSPavan Nikhilesh 
1108ca4355e4SDavid Marchand 	/* Injects events with a 0 sequence number to total_events */
110950fb749aSPavan Nikhilesh 	ret = inject_events(
111050fb749aSPavan Nikhilesh 		0x1 /*flow_id */,
111150fb749aSPavan Nikhilesh 		RTE_EVENT_TYPE_CPU /* event_type */,
111250fb749aSPavan Nikhilesh 		0 /* sub_event_type (stage 0) */,
111350fb749aSPavan Nikhilesh 		in_sched_type,
111450fb749aSPavan Nikhilesh 		0 /* queue */,
111550fb749aSPavan Nikhilesh 		0 /* port */,
111650fb749aSPavan Nikhilesh 		total_events /* events */);
111750fb749aSPavan Nikhilesh 	if (ret)
1118daeda14cSPavan Nikhilesh 		return -1;
111950fb749aSPavan Nikhilesh 
112050fb749aSPavan Nikhilesh 	ret = launch_workers_and_wait(worker_group_based_pipeline,
112150fb749aSPavan Nikhilesh 					worker_group_based_pipeline,
112250fb749aSPavan Nikhilesh 					total_events, nr_ports, out_sched_type);
112350fb749aSPavan Nikhilesh 	if (ret)
1124daeda14cSPavan Nikhilesh 		return -1;
112550fb749aSPavan Nikhilesh 
112650fb749aSPavan Nikhilesh 	if (in_sched_type != RTE_SCHED_TYPE_PARALLEL &&
112750fb749aSPavan Nikhilesh 			out_sched_type == RTE_SCHED_TYPE_ATOMIC) {
112850fb749aSPavan Nikhilesh 		/* Check the events order maintained or not */
112950fb749aSPavan Nikhilesh 		return seqn_list_check(total_events);
113050fb749aSPavan Nikhilesh 	}
1131daeda14cSPavan Nikhilesh 	return 0;
113250fb749aSPavan Nikhilesh }
113350fb749aSPavan Nikhilesh 
113450fb749aSPavan Nikhilesh static int
test_multi_port_queue_ordered_to_atomic(void)113550fb749aSPavan Nikhilesh test_multi_port_queue_ordered_to_atomic(void)
113650fb749aSPavan Nikhilesh {
113750fb749aSPavan Nikhilesh 	/* Ingress event order test */
113850fb749aSPavan Nikhilesh 	return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
113950fb749aSPavan Nikhilesh 				RTE_SCHED_TYPE_ATOMIC);
114050fb749aSPavan Nikhilesh }
114150fb749aSPavan Nikhilesh 
114250fb749aSPavan Nikhilesh static int
test_multi_port_queue_ordered_to_ordered(void)114350fb749aSPavan Nikhilesh test_multi_port_queue_ordered_to_ordered(void)
114450fb749aSPavan Nikhilesh {
114550fb749aSPavan Nikhilesh 	return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
114650fb749aSPavan Nikhilesh 				RTE_SCHED_TYPE_ORDERED);
114750fb749aSPavan Nikhilesh }
114850fb749aSPavan Nikhilesh 
114950fb749aSPavan Nikhilesh static int
test_multi_port_queue_ordered_to_parallel(void)115050fb749aSPavan Nikhilesh test_multi_port_queue_ordered_to_parallel(void)
115150fb749aSPavan Nikhilesh {
115250fb749aSPavan Nikhilesh 	return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
115350fb749aSPavan Nikhilesh 				RTE_SCHED_TYPE_PARALLEL);
115450fb749aSPavan Nikhilesh }
115550fb749aSPavan Nikhilesh 
115650fb749aSPavan Nikhilesh static int
test_multi_port_queue_atomic_to_atomic(void)115750fb749aSPavan Nikhilesh test_multi_port_queue_atomic_to_atomic(void)
115850fb749aSPavan Nikhilesh {
115950fb749aSPavan Nikhilesh 	/* Ingress event order test */
116050fb749aSPavan Nikhilesh 	return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
116150fb749aSPavan Nikhilesh 				RTE_SCHED_TYPE_ATOMIC);
116250fb749aSPavan Nikhilesh }
116350fb749aSPavan Nikhilesh 
116450fb749aSPavan Nikhilesh static int
test_multi_port_queue_atomic_to_ordered(void)116550fb749aSPavan Nikhilesh test_multi_port_queue_atomic_to_ordered(void)
116650fb749aSPavan Nikhilesh {
116750fb749aSPavan Nikhilesh 	return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
116850fb749aSPavan Nikhilesh 				RTE_SCHED_TYPE_ORDERED);
116950fb749aSPavan Nikhilesh }
117050fb749aSPavan Nikhilesh 
117150fb749aSPavan Nikhilesh static int
test_multi_port_queue_atomic_to_parallel(void)117250fb749aSPavan Nikhilesh test_multi_port_queue_atomic_to_parallel(void)
117350fb749aSPavan Nikhilesh {
117450fb749aSPavan Nikhilesh 	return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
117550fb749aSPavan Nikhilesh 				RTE_SCHED_TYPE_PARALLEL);
117650fb749aSPavan Nikhilesh }
117750fb749aSPavan Nikhilesh 
117850fb749aSPavan Nikhilesh static int
test_multi_port_queue_parallel_to_atomic(void)117950fb749aSPavan Nikhilesh test_multi_port_queue_parallel_to_atomic(void)
118050fb749aSPavan Nikhilesh {
118150fb749aSPavan Nikhilesh 	return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
118250fb749aSPavan Nikhilesh 				RTE_SCHED_TYPE_ATOMIC);
118350fb749aSPavan Nikhilesh }
118450fb749aSPavan Nikhilesh 
118550fb749aSPavan Nikhilesh static int
test_multi_port_queue_parallel_to_ordered(void)118650fb749aSPavan Nikhilesh test_multi_port_queue_parallel_to_ordered(void)
118750fb749aSPavan Nikhilesh {
118850fb749aSPavan Nikhilesh 	return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
118950fb749aSPavan Nikhilesh 				RTE_SCHED_TYPE_ORDERED);
119050fb749aSPavan Nikhilesh }
119150fb749aSPavan Nikhilesh 
119250fb749aSPavan Nikhilesh static int
test_multi_port_queue_parallel_to_parallel(void)119350fb749aSPavan Nikhilesh test_multi_port_queue_parallel_to_parallel(void)
119450fb749aSPavan Nikhilesh {
119550fb749aSPavan Nikhilesh 	return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
119650fb749aSPavan Nikhilesh 				RTE_SCHED_TYPE_PARALLEL);
119750fb749aSPavan Nikhilesh }
119850fb749aSPavan Nikhilesh 
119950fb749aSPavan Nikhilesh static int
worker_flow_based_pipeline_max_stages_rand_sched_type(void * arg)120050fb749aSPavan Nikhilesh worker_flow_based_pipeline_max_stages_rand_sched_type(void *arg)
120150fb749aSPavan Nikhilesh {
120250fb749aSPavan Nikhilesh 	struct test_core_param *param = arg;
120350fb749aSPavan Nikhilesh 	struct rte_event ev;
120450fb749aSPavan Nikhilesh 	uint16_t valid_event;
120550fb749aSPavan Nikhilesh 	uint8_t port = param->port;
120650fb749aSPavan Nikhilesh 	rte_atomic32_t *total_events = param->total_events;
120750fb749aSPavan Nikhilesh 
120850fb749aSPavan Nikhilesh 	while (rte_atomic32_read(total_events) > 0) {
120950fb749aSPavan Nikhilesh 		valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
121050fb749aSPavan Nikhilesh 		if (!valid_event)
121150fb749aSPavan Nikhilesh 			continue;
121250fb749aSPavan Nikhilesh 
121350fb749aSPavan Nikhilesh 		if (ev.sub_event_type == 255) { /* last stage */
121450fb749aSPavan Nikhilesh 			rte_pktmbuf_free(ev.mbuf);
121550fb749aSPavan Nikhilesh 			rte_atomic32_sub(total_events, 1);
121650fb749aSPavan Nikhilesh 		} else {
121750fb749aSPavan Nikhilesh 			ev.event_type = RTE_EVENT_TYPE_CPU;
121850fb749aSPavan Nikhilesh 			ev.sub_event_type++;
121950fb749aSPavan Nikhilesh 			ev.sched_type =
122050fb749aSPavan Nikhilesh 				rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
122150fb749aSPavan Nikhilesh 			ev.op = RTE_EVENT_OP_FORWARD;
122250fb749aSPavan Nikhilesh 			rte_event_enqueue_burst(evdev, port, &ev, 1);
122350fb749aSPavan Nikhilesh 		}
122450fb749aSPavan Nikhilesh 	}
122550fb749aSPavan Nikhilesh 	return 0;
122650fb749aSPavan Nikhilesh }
122750fb749aSPavan Nikhilesh 
122850fb749aSPavan Nikhilesh static int
launch_multi_port_max_stages_random_sched_type(int (* fn)(void *))122950fb749aSPavan Nikhilesh launch_multi_port_max_stages_random_sched_type(int (*fn)(void *))
123050fb749aSPavan Nikhilesh {
123150fb749aSPavan Nikhilesh 	uint32_t nr_ports;
123250fb749aSPavan Nikhilesh 	int ret;
123350fb749aSPavan Nikhilesh 
1234daeda14cSPavan Nikhilesh 	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
123550fb749aSPavan Nikhilesh 				RTE_EVENT_DEV_ATTR_PORT_COUNT,
123650fb749aSPavan Nikhilesh 				&nr_ports), "Port count get failed");
123750fb749aSPavan Nikhilesh 	nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
123850fb749aSPavan Nikhilesh 
123950fb749aSPavan Nikhilesh 	if (!nr_ports) {
1240daeda14cSPavan Nikhilesh 		ssovf_log_dbg("%s: Not enough ports=%d or workers=%d", __func__,
124150fb749aSPavan Nikhilesh 			nr_ports, rte_lcore_count() - 1);
1242daeda14cSPavan Nikhilesh 		return 0;
124350fb749aSPavan Nikhilesh 	}
124450fb749aSPavan Nikhilesh 
1245ca4355e4SDavid Marchand 	/* Injects events with a 0 sequence number to total_events */
124650fb749aSPavan Nikhilesh 	ret = inject_events(
124750fb749aSPavan Nikhilesh 		0x1 /*flow_id */,
124850fb749aSPavan Nikhilesh 		RTE_EVENT_TYPE_CPU /* event_type */,
124950fb749aSPavan Nikhilesh 		0 /* sub_event_type (stage 0) */,
125050fb749aSPavan Nikhilesh 		rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1) /* sched_type */,
125150fb749aSPavan Nikhilesh 		0 /* queue */,
125250fb749aSPavan Nikhilesh 		0 /* port */,
125350fb749aSPavan Nikhilesh 		MAX_EVENTS /* events */);
125450fb749aSPavan Nikhilesh 	if (ret)
1255daeda14cSPavan Nikhilesh 		return -1;
125650fb749aSPavan Nikhilesh 
125750fb749aSPavan Nikhilesh 	return launch_workers_and_wait(fn, fn, MAX_EVENTS, nr_ports,
125850fb749aSPavan Nikhilesh 					 0xff /* invalid */);
125950fb749aSPavan Nikhilesh }
126050fb749aSPavan Nikhilesh 
126150fb749aSPavan Nikhilesh /* Flow based pipeline with maximum stages with random sched type */
126250fb749aSPavan Nikhilesh static int
test_multi_port_flow_max_stages_random_sched_type(void)126350fb749aSPavan Nikhilesh test_multi_port_flow_max_stages_random_sched_type(void)
126450fb749aSPavan Nikhilesh {
126550fb749aSPavan Nikhilesh 	return launch_multi_port_max_stages_random_sched_type(
126650fb749aSPavan Nikhilesh 		worker_flow_based_pipeline_max_stages_rand_sched_type);
126750fb749aSPavan Nikhilesh }
126850fb749aSPavan Nikhilesh 
126950fb749aSPavan Nikhilesh static int
worker_queue_based_pipeline_max_stages_rand_sched_type(void * arg)127050fb749aSPavan Nikhilesh worker_queue_based_pipeline_max_stages_rand_sched_type(void *arg)
127150fb749aSPavan Nikhilesh {
127250fb749aSPavan Nikhilesh 	struct test_core_param *param = arg;
127350fb749aSPavan Nikhilesh 	struct rte_event ev;
127450fb749aSPavan Nikhilesh 	uint16_t valid_event;
127550fb749aSPavan Nikhilesh 	uint8_t port = param->port;
127650fb749aSPavan Nikhilesh 	uint32_t queue_count;
1277daeda14cSPavan Nikhilesh 	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
127850fb749aSPavan Nikhilesh 			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
127950fb749aSPavan Nikhilesh 			    &queue_count), "Queue count get failed");
128050fb749aSPavan Nikhilesh 	uint8_t nr_queues = queue_count;
128150fb749aSPavan Nikhilesh 	rte_atomic32_t *total_events = param->total_events;
128250fb749aSPavan Nikhilesh 
128350fb749aSPavan Nikhilesh 	while (rte_atomic32_read(total_events) > 0) {
128450fb749aSPavan Nikhilesh 		valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
128550fb749aSPavan Nikhilesh 		if (!valid_event)
128650fb749aSPavan Nikhilesh 			continue;
128750fb749aSPavan Nikhilesh 
128850fb749aSPavan Nikhilesh 		if (ev.queue_id == nr_queues - 1) { /* last stage */
128950fb749aSPavan Nikhilesh 			rte_pktmbuf_free(ev.mbuf);
129050fb749aSPavan Nikhilesh 			rte_atomic32_sub(total_events, 1);
129150fb749aSPavan Nikhilesh 		} else {
129250fb749aSPavan Nikhilesh 			ev.event_type = RTE_EVENT_TYPE_CPU;
129350fb749aSPavan Nikhilesh 			ev.queue_id++;
129450fb749aSPavan Nikhilesh 			ev.sched_type =
129550fb749aSPavan Nikhilesh 				rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
129650fb749aSPavan Nikhilesh 			ev.op = RTE_EVENT_OP_FORWARD;
129750fb749aSPavan Nikhilesh 			rte_event_enqueue_burst(evdev, port, &ev, 1);
129850fb749aSPavan Nikhilesh 		}
129950fb749aSPavan Nikhilesh 	}
130050fb749aSPavan Nikhilesh 	return 0;
130150fb749aSPavan Nikhilesh }
130250fb749aSPavan Nikhilesh 
130350fb749aSPavan Nikhilesh /* Queue based pipeline with maximum stages with random sched type */
130450fb749aSPavan Nikhilesh static int
test_multi_port_queue_max_stages_random_sched_type(void)130550fb749aSPavan Nikhilesh test_multi_port_queue_max_stages_random_sched_type(void)
130650fb749aSPavan Nikhilesh {
130750fb749aSPavan Nikhilesh 	return launch_multi_port_max_stages_random_sched_type(
130850fb749aSPavan Nikhilesh 		worker_queue_based_pipeline_max_stages_rand_sched_type);
130950fb749aSPavan Nikhilesh }
131050fb749aSPavan Nikhilesh 
131150fb749aSPavan Nikhilesh static int
worker_mixed_pipeline_max_stages_rand_sched_type(void * arg)131250fb749aSPavan Nikhilesh worker_mixed_pipeline_max_stages_rand_sched_type(void *arg)
131350fb749aSPavan Nikhilesh {
131450fb749aSPavan Nikhilesh 	struct test_core_param *param = arg;
131550fb749aSPavan Nikhilesh 	struct rte_event ev;
131650fb749aSPavan Nikhilesh 	uint16_t valid_event;
131750fb749aSPavan Nikhilesh 	uint8_t port = param->port;
131850fb749aSPavan Nikhilesh 	uint32_t queue_count;
1319daeda14cSPavan Nikhilesh 	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
132050fb749aSPavan Nikhilesh 			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
132150fb749aSPavan Nikhilesh 			    &queue_count), "Queue count get failed");
132250fb749aSPavan Nikhilesh 	uint8_t nr_queues = queue_count;
132350fb749aSPavan Nikhilesh 	rte_atomic32_t *total_events = param->total_events;
132450fb749aSPavan Nikhilesh 
132550fb749aSPavan Nikhilesh 	while (rte_atomic32_read(total_events) > 0) {
132650fb749aSPavan Nikhilesh 		valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
132750fb749aSPavan Nikhilesh 		if (!valid_event)
132850fb749aSPavan Nikhilesh 			continue;
132950fb749aSPavan Nikhilesh 
133050fb749aSPavan Nikhilesh 		if (ev.queue_id == nr_queues - 1) { /* Last stage */
133150fb749aSPavan Nikhilesh 			rte_pktmbuf_free(ev.mbuf);
133250fb749aSPavan Nikhilesh 			rte_atomic32_sub(total_events, 1);
133350fb749aSPavan Nikhilesh 		} else {
133450fb749aSPavan Nikhilesh 			ev.event_type = RTE_EVENT_TYPE_CPU;
133550fb749aSPavan Nikhilesh 			ev.queue_id++;
133650fb749aSPavan Nikhilesh 			ev.sub_event_type = rte_rand() % 256;
133750fb749aSPavan Nikhilesh 			ev.sched_type =
133850fb749aSPavan Nikhilesh 				rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
133950fb749aSPavan Nikhilesh 			ev.op = RTE_EVENT_OP_FORWARD;
134050fb749aSPavan Nikhilesh 			rte_event_enqueue_burst(evdev, port, &ev, 1);
134150fb749aSPavan Nikhilesh 		}
134250fb749aSPavan Nikhilesh 	}
134350fb749aSPavan Nikhilesh 	return 0;
134450fb749aSPavan Nikhilesh }
134550fb749aSPavan Nikhilesh 
134650fb749aSPavan Nikhilesh /* Queue and flow based pipeline with maximum stages with random sched type */
134750fb749aSPavan Nikhilesh static int
test_multi_port_mixed_max_stages_random_sched_type(void)134850fb749aSPavan Nikhilesh test_multi_port_mixed_max_stages_random_sched_type(void)
134950fb749aSPavan Nikhilesh {
135050fb749aSPavan Nikhilesh 	return launch_multi_port_max_stages_random_sched_type(
135150fb749aSPavan Nikhilesh 		worker_mixed_pipeline_max_stages_rand_sched_type);
135250fb749aSPavan Nikhilesh }
135350fb749aSPavan Nikhilesh 
135450fb749aSPavan Nikhilesh static int
worker_ordered_flow_producer(void * arg)135550fb749aSPavan Nikhilesh worker_ordered_flow_producer(void *arg)
135650fb749aSPavan Nikhilesh {
135750fb749aSPavan Nikhilesh 	struct test_core_param *param = arg;
135850fb749aSPavan Nikhilesh 	uint8_t port = param->port;
135950fb749aSPavan Nikhilesh 	struct rte_mbuf *m;
136050fb749aSPavan Nikhilesh 	int counter = 0;
136150fb749aSPavan Nikhilesh 
136250fb749aSPavan Nikhilesh 	while (counter < NUM_PACKETS) {
136350fb749aSPavan Nikhilesh 		m = rte_pktmbuf_alloc(eventdev_test_mempool);
136450fb749aSPavan Nikhilesh 		if (m == NULL)
136550fb749aSPavan Nikhilesh 			continue;
136650fb749aSPavan Nikhilesh 
1367ca4355e4SDavid Marchand 		*rte_event_pmd_selftest_seqn(m) = counter++;
136850fb749aSPavan Nikhilesh 
136950fb749aSPavan Nikhilesh 		struct rte_event ev = {.event = 0, .u64 = 0};
137050fb749aSPavan Nikhilesh 
137150fb749aSPavan Nikhilesh 		ev.flow_id = 0x1; /* Generate a fat flow */
137250fb749aSPavan Nikhilesh 		ev.sub_event_type = 0;
137350fb749aSPavan Nikhilesh 		/* Inject the new event */
137450fb749aSPavan Nikhilesh 		ev.op = RTE_EVENT_OP_NEW;
137550fb749aSPavan Nikhilesh 		ev.event_type = RTE_EVENT_TYPE_CPU;
137650fb749aSPavan Nikhilesh 		ev.sched_type = RTE_SCHED_TYPE_ORDERED;
137750fb749aSPavan Nikhilesh 		ev.queue_id = 0;
137850fb749aSPavan Nikhilesh 		ev.mbuf = m;
137950fb749aSPavan Nikhilesh 		rte_event_enqueue_burst(evdev, port, &ev, 1);
138050fb749aSPavan Nikhilesh 	}
138150fb749aSPavan Nikhilesh 
138250fb749aSPavan Nikhilesh 	return 0;
138350fb749aSPavan Nikhilesh }
138450fb749aSPavan Nikhilesh 
138550fb749aSPavan Nikhilesh static inline int
test_producer_consumer_ingress_order_test(int (* fn)(void *))138650fb749aSPavan Nikhilesh test_producer_consumer_ingress_order_test(int (*fn)(void *))
138750fb749aSPavan Nikhilesh {
138850fb749aSPavan Nikhilesh 	uint32_t nr_ports;
138950fb749aSPavan Nikhilesh 
1390daeda14cSPavan Nikhilesh 	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
139150fb749aSPavan Nikhilesh 				RTE_EVENT_DEV_ATTR_PORT_COUNT,
139250fb749aSPavan Nikhilesh 				&nr_ports), "Port count get failed");
139350fb749aSPavan Nikhilesh 	nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
139450fb749aSPavan Nikhilesh 
139550fb749aSPavan Nikhilesh 	if (rte_lcore_count() < 3 || nr_ports < 2) {
1396daeda14cSPavan Nikhilesh 		ssovf_log_dbg("### Not enough cores for %s test.", __func__);
1397daeda14cSPavan Nikhilesh 		return 0;
139850fb749aSPavan Nikhilesh 	}
139950fb749aSPavan Nikhilesh 
140050fb749aSPavan Nikhilesh 	launch_workers_and_wait(worker_ordered_flow_producer, fn,
140150fb749aSPavan Nikhilesh 				NUM_PACKETS, nr_ports, RTE_SCHED_TYPE_ATOMIC);
140250fb749aSPavan Nikhilesh 	/* Check the events order maintained or not */
140350fb749aSPavan Nikhilesh 	return seqn_list_check(NUM_PACKETS);
140450fb749aSPavan Nikhilesh }
140550fb749aSPavan Nikhilesh 
140650fb749aSPavan Nikhilesh /* Flow based producer consumer ingress order test */
140750fb749aSPavan Nikhilesh static int
test_flow_producer_consumer_ingress_order_test(void)140850fb749aSPavan Nikhilesh test_flow_producer_consumer_ingress_order_test(void)
140950fb749aSPavan Nikhilesh {
141050fb749aSPavan Nikhilesh 	return test_producer_consumer_ingress_order_test(
141150fb749aSPavan Nikhilesh 				worker_flow_based_pipeline);
141250fb749aSPavan Nikhilesh }
141350fb749aSPavan Nikhilesh 
141450fb749aSPavan Nikhilesh /* Queue based producer consumer ingress order test */
141550fb749aSPavan Nikhilesh static int
test_queue_producer_consumer_ingress_order_test(void)141650fb749aSPavan Nikhilesh test_queue_producer_consumer_ingress_order_test(void)
141750fb749aSPavan Nikhilesh {
141850fb749aSPavan Nikhilesh 	return test_producer_consumer_ingress_order_test(
141950fb749aSPavan Nikhilesh 				worker_group_based_pipeline);
142050fb749aSPavan Nikhilesh }
142150fb749aSPavan Nikhilesh 
octeontx_test_run(int (* setup)(void),void (* tdown)(void),int (* test)(void),const char * name)1422daeda14cSPavan Nikhilesh static void octeontx_test_run(int (*setup)(void), void (*tdown)(void),
1423daeda14cSPavan Nikhilesh 		int (*test)(void), const char *name)
1424daeda14cSPavan Nikhilesh {
1425daeda14cSPavan Nikhilesh 	if (setup() < 0) {
1426daeda14cSPavan Nikhilesh 		ssovf_log_selftest("Error setting up test %s", name);
1427daeda14cSPavan Nikhilesh 		unsupported++;
1428daeda14cSPavan Nikhilesh 	} else {
1429daeda14cSPavan Nikhilesh 		if (test() < 0) {
1430daeda14cSPavan Nikhilesh 			failed++;
1431daeda14cSPavan Nikhilesh 			ssovf_log_selftest("%s Failed", name);
1432daeda14cSPavan Nikhilesh 		} else {
1433daeda14cSPavan Nikhilesh 			passed++;
1434daeda14cSPavan Nikhilesh 			ssovf_log_selftest("%s Passed", name);
143550fb749aSPavan Nikhilesh 		}
1436daeda14cSPavan Nikhilesh 	}
143750fb749aSPavan Nikhilesh 
1438daeda14cSPavan Nikhilesh 	total++;
1439daeda14cSPavan Nikhilesh 	tdown();
1440daeda14cSPavan Nikhilesh }
1441daeda14cSPavan Nikhilesh 
1442daeda14cSPavan Nikhilesh int
test_eventdev_octeontx(void)144350fb749aSPavan Nikhilesh test_eventdev_octeontx(void)
144450fb749aSPavan Nikhilesh {
1445daeda14cSPavan Nikhilesh 	testsuite_setup();
144650fb749aSPavan Nikhilesh 
1447daeda14cSPavan Nikhilesh 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1448daeda14cSPavan Nikhilesh 			test_simple_enqdeq_ordered);
1449daeda14cSPavan Nikhilesh 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1450daeda14cSPavan Nikhilesh 			test_simple_enqdeq_atomic);
1451daeda14cSPavan Nikhilesh 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1452daeda14cSPavan Nikhilesh 			test_simple_enqdeq_parallel);
1453daeda14cSPavan Nikhilesh 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1454daeda14cSPavan Nikhilesh 			test_multi_queue_enq_single_port_deq);
1455daeda14cSPavan Nikhilesh 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
14568384f0e0SJerin Jacob 			test_dev_stop_flush);
14578384f0e0SJerin Jacob 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1458daeda14cSPavan Nikhilesh 			test_multi_queue_enq_multi_port_deq);
1459daeda14cSPavan Nikhilesh 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1460daeda14cSPavan Nikhilesh 			test_queue_to_port_single_link);
1461daeda14cSPavan Nikhilesh 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1462daeda14cSPavan Nikhilesh 			test_queue_to_port_multi_link);
1463daeda14cSPavan Nikhilesh 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1464daeda14cSPavan Nikhilesh 			test_multi_port_flow_ordered_to_atomic);
1465daeda14cSPavan Nikhilesh 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1466daeda14cSPavan Nikhilesh 			test_multi_port_flow_ordered_to_ordered);
1467daeda14cSPavan Nikhilesh 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1468daeda14cSPavan Nikhilesh 			test_multi_port_flow_ordered_to_parallel);
1469daeda14cSPavan Nikhilesh 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1470daeda14cSPavan Nikhilesh 			test_multi_port_flow_atomic_to_atomic);
1471daeda14cSPavan Nikhilesh 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1472daeda14cSPavan Nikhilesh 			test_multi_port_flow_atomic_to_ordered);
1473daeda14cSPavan Nikhilesh 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1474daeda14cSPavan Nikhilesh 			test_multi_port_flow_atomic_to_parallel);
1475daeda14cSPavan Nikhilesh 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1476daeda14cSPavan Nikhilesh 			test_multi_port_flow_parallel_to_atomic);
1477daeda14cSPavan Nikhilesh 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1478daeda14cSPavan Nikhilesh 			test_multi_port_flow_parallel_to_ordered);
1479daeda14cSPavan Nikhilesh 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1480daeda14cSPavan Nikhilesh 			test_multi_port_flow_parallel_to_parallel);
1481daeda14cSPavan Nikhilesh 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1482daeda14cSPavan Nikhilesh 			test_multi_port_queue_ordered_to_atomic);
1483daeda14cSPavan Nikhilesh 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1484daeda14cSPavan Nikhilesh 			test_multi_port_queue_ordered_to_ordered);
1485daeda14cSPavan Nikhilesh 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1486daeda14cSPavan Nikhilesh 			test_multi_port_queue_ordered_to_parallel);
1487daeda14cSPavan Nikhilesh 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1488daeda14cSPavan Nikhilesh 			test_multi_port_queue_atomic_to_atomic);
1489daeda14cSPavan Nikhilesh 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1490daeda14cSPavan Nikhilesh 			test_multi_port_queue_atomic_to_ordered);
1491daeda14cSPavan Nikhilesh 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1492daeda14cSPavan Nikhilesh 			test_multi_port_queue_atomic_to_parallel);
1493daeda14cSPavan Nikhilesh 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1494daeda14cSPavan Nikhilesh 			test_multi_port_queue_parallel_to_atomic);
1495daeda14cSPavan Nikhilesh 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1496daeda14cSPavan Nikhilesh 			test_multi_port_queue_parallel_to_ordered);
1497daeda14cSPavan Nikhilesh 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1498daeda14cSPavan Nikhilesh 			test_multi_port_queue_parallel_to_parallel);
1499daeda14cSPavan Nikhilesh 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1500daeda14cSPavan Nikhilesh 			test_multi_port_flow_max_stages_random_sched_type);
1501daeda14cSPavan Nikhilesh 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1502daeda14cSPavan Nikhilesh 			test_multi_port_queue_max_stages_random_sched_type);
1503daeda14cSPavan Nikhilesh 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1504daeda14cSPavan Nikhilesh 			test_multi_port_mixed_max_stages_random_sched_type);
1505daeda14cSPavan Nikhilesh 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1506daeda14cSPavan Nikhilesh 			test_flow_producer_consumer_ingress_order_test);
1507daeda14cSPavan Nikhilesh 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1508daeda14cSPavan Nikhilesh 			test_queue_producer_consumer_ingress_order_test);
1509daeda14cSPavan Nikhilesh 	OCTEONTX_TEST_RUN(eventdev_setup_priority, eventdev_teardown,
1510daeda14cSPavan Nikhilesh 			test_multi_queue_priority);
1511daeda14cSPavan Nikhilesh 	OCTEONTX_TEST_RUN(eventdev_setup_dequeue_timeout, eventdev_teardown,
1512daeda14cSPavan Nikhilesh 			test_multi_port_flow_ordered_to_atomic);
1513daeda14cSPavan Nikhilesh 	OCTEONTX_TEST_RUN(eventdev_setup_dequeue_timeout, eventdev_teardown,
1514daeda14cSPavan Nikhilesh 			test_multi_port_queue_ordered_to_atomic);
1515daeda14cSPavan Nikhilesh 
1516daeda14cSPavan Nikhilesh 	ssovf_log_selftest("Total tests   : %d", total);
1517daeda14cSPavan Nikhilesh 	ssovf_log_selftest("Passed        : %d", passed);
1518daeda14cSPavan Nikhilesh 	ssovf_log_selftest("Failed        : %d", failed);
1519daeda14cSPavan Nikhilesh 	ssovf_log_selftest("Not supported : %d", unsupported);
1520daeda14cSPavan Nikhilesh 
1521daeda14cSPavan Nikhilesh 	testsuite_teardown();
1522daeda14cSPavan Nikhilesh 
1523daeda14cSPavan Nikhilesh 	if (failed)
1524daeda14cSPavan Nikhilesh 		return -1;
1525daeda14cSPavan Nikhilesh 
1526daeda14cSPavan Nikhilesh 	return 0;
1527daeda14cSPavan Nikhilesh }
1528