xref: /dpdk/drivers/event/octeontx/ssovf_evdev_selftest.c (revision 25d11a86c56d50947af33d0b79ede622809bd8b9)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4 
5 #include <rte_atomic.h>
6 #include <rte_common.h>
7 #include <rte_cycles.h>
8 #include <rte_debug.h>
9 #include <rte_eal.h>
10 #include <rte_ethdev.h>
11 #include <rte_eventdev.h>
12 #include <rte_hexdump.h>
13 #include <rte_mbuf.h>
14 #include <rte_malloc.h>
15 #include <rte_memcpy.h>
16 #include <rte_launch.h>
17 #include <rte_lcore.h>
18 #include <rte_per_lcore.h>
19 #include <rte_random.h>
20 #include <rte_bus_vdev.h>
21 #include <rte_test.h>
22 
23 #include "ssovf_evdev.h"
24 
25 #define NUM_PACKETS (1 << 18)
26 #define MAX_EVENTS  (16 * 1024)
27 
28 #define OCTEONTX_TEST_RUN(setup, teardown, test) \
29 	octeontx_test_run(setup, teardown, test, #test)
30 
31 static int total;
32 static int passed;
33 static int failed;
34 static int unsupported;
35 
36 static int evdev;
37 static struct rte_mempool *eventdev_test_mempool;
38 
39 struct event_attr {
40 	uint32_t flow_id;
41 	uint8_t event_type;
42 	uint8_t sub_event_type;
43 	uint8_t sched_type;
44 	uint8_t queue;
45 	uint8_t port;
46 };
47 
48 static uint32_t seqn_list_index;
49 static int seqn_list[NUM_PACKETS];
50 
51 static inline void
52 seqn_list_init(void)
53 {
54 	RTE_BUILD_BUG_ON(NUM_PACKETS < MAX_EVENTS);
55 	memset(seqn_list, 0, sizeof(seqn_list));
56 	seqn_list_index = 0;
57 }
58 
59 static inline int
60 seqn_list_update(int val)
61 {
62 	if (seqn_list_index >= NUM_PACKETS)
63 		return -1;
64 
65 	seqn_list[seqn_list_index++] = val;
66 	rte_smp_wmb();
67 	return 0;
68 }
69 
70 static inline int
71 seqn_list_check(int limit)
72 {
73 	int i;
74 
75 	for (i = 0; i < limit; i++) {
76 		if (seqn_list[i] != i) {
77 			ssovf_log_dbg("Seqn mismatch %d %d", seqn_list[i], i);
78 			return -1;
79 		}
80 	}
81 	return 0;
82 }
83 
84 struct test_core_param {
85 	rte_atomic32_t *total_events;
86 	uint64_t dequeue_tmo_ticks;
87 	uint8_t port;
88 	uint8_t sched_type;
89 };
90 
91 static int
92 testsuite_setup(void)
93 {
94 	const char *eventdev_name = "event_octeontx";
95 
96 	evdev = rte_event_dev_get_dev_id(eventdev_name);
97 	if (evdev < 0) {
98 		ssovf_log_dbg("%d: Eventdev %s not found - creating.",
99 				__LINE__, eventdev_name);
100 		if (rte_vdev_init(eventdev_name, NULL) < 0) {
101 			ssovf_log_dbg("Error creating eventdev %s",
102 					eventdev_name);
103 			return -1;
104 		}
105 		evdev = rte_event_dev_get_dev_id(eventdev_name);
106 		if (evdev < 0) {
107 			ssovf_log_dbg("Error finding newly created eventdev");
108 			return -1;
109 		}
110 	}
111 
112 	return 0;
113 }
114 
115 static void
116 testsuite_teardown(void)
117 {
118 	rte_event_dev_close(evdev);
119 }
120 
121 static inline void
122 devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
123 			struct rte_event_dev_info *info)
124 {
125 	memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
126 	dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
127 	dev_conf->nb_event_ports = info->max_event_ports;
128 	dev_conf->nb_event_queues = info->max_event_queues;
129 	dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
130 	dev_conf->nb_event_port_dequeue_depth =
131 			info->max_event_port_dequeue_depth;
132 	dev_conf->nb_event_port_enqueue_depth =
133 			info->max_event_port_enqueue_depth;
134 	dev_conf->nb_event_port_enqueue_depth =
135 			info->max_event_port_enqueue_depth;
136 	dev_conf->nb_events_limit =
137 			info->max_num_events;
138 }
139 
140 enum {
141 	TEST_EVENTDEV_SETUP_DEFAULT,
142 	TEST_EVENTDEV_SETUP_PRIORITY,
143 	TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT,
144 };
145 
146 static inline int
147 _eventdev_setup(int mode)
148 {
149 	int i, ret;
150 	struct rte_event_dev_config dev_conf;
151 	struct rte_event_dev_info info;
152 	const char *pool_name = "evdev_octeontx_test_pool";
153 
154 	/* Create and destrory pool for each test case to make it standalone */
155 	eventdev_test_mempool = rte_pktmbuf_pool_create(pool_name,
156 					MAX_EVENTS,
157 					0 /*MBUF_CACHE_SIZE*/,
158 					0,
159 					512, /* Use very small mbufs */
160 					rte_socket_id());
161 	if (!eventdev_test_mempool) {
162 		ssovf_log_dbg("ERROR creating mempool");
163 		return -1;
164 	}
165 
166 	ret = rte_event_dev_info_get(evdev, &info);
167 	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
168 	RTE_TEST_ASSERT(info.max_num_events >= (int32_t)MAX_EVENTS,
169 			"ERROR max_num_events=%d < max_events=%d",
170 				info.max_num_events, MAX_EVENTS);
171 
172 	devconf_set_default_sane_values(&dev_conf, &info);
173 	if (mode == TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT)
174 		dev_conf.event_dev_cfg |= RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
175 
176 	ret = rte_event_dev_configure(evdev, &dev_conf);
177 	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
178 
179 	uint32_t queue_count;
180 	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
181 			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
182 			    &queue_count), "Queue count get failed");
183 
184 	if (mode == TEST_EVENTDEV_SETUP_PRIORITY) {
185 		if (queue_count > 8) {
186 			ssovf_log_dbg(
187 				"test expects the unique priority per queue");
188 			return -ENOTSUP;
189 		}
190 
191 		/* Configure event queues(0 to n) with
192 		 * RTE_EVENT_DEV_PRIORITY_HIGHEST to
193 		 * RTE_EVENT_DEV_PRIORITY_LOWEST
194 		 */
195 		uint8_t step = (RTE_EVENT_DEV_PRIORITY_LOWEST + 1) /
196 				queue_count;
197 		for (i = 0; i < (int)queue_count; i++) {
198 			struct rte_event_queue_conf queue_conf;
199 
200 			ret = rte_event_queue_default_conf_get(evdev, i,
201 						&queue_conf);
202 			RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get def_conf%d",
203 					i);
204 			queue_conf.priority = i * step;
205 			ret = rte_event_queue_setup(evdev, i, &queue_conf);
206 			RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d",
207 					i);
208 		}
209 
210 	} else {
211 		/* Configure event queues with default priority */
212 		for (i = 0; i < (int)queue_count; i++) {
213 			ret = rte_event_queue_setup(evdev, i, NULL);
214 			RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d",
215 					i);
216 		}
217 	}
218 	/* Configure event ports */
219 	uint32_t port_count;
220 	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
221 				RTE_EVENT_DEV_ATTR_PORT_COUNT,
222 				&port_count), "Port count get failed");
223 	for (i = 0; i < (int)port_count; i++) {
224 		ret = rte_event_port_setup(evdev, i, NULL);
225 		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d", i);
226 		ret = rte_event_port_link(evdev, i, NULL, NULL, 0);
227 		RTE_TEST_ASSERT(ret >= 0, "Failed to link all queues port=%d",
228 				i);
229 	}
230 
231 	ret = rte_event_dev_start(evdev);
232 	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start device");
233 
234 	return 0;
235 }
236 
237 static inline int
238 eventdev_setup(void)
239 {
240 	return _eventdev_setup(TEST_EVENTDEV_SETUP_DEFAULT);
241 }
242 
243 static inline int
244 eventdev_setup_priority(void)
245 {
246 	return _eventdev_setup(TEST_EVENTDEV_SETUP_PRIORITY);
247 }
248 
249 static inline int
250 eventdev_setup_dequeue_timeout(void)
251 {
252 	return _eventdev_setup(TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT);
253 }
254 
255 static inline void
256 eventdev_teardown(void)
257 {
258 	rte_event_dev_stop(evdev);
259 	rte_mempool_free(eventdev_test_mempool);
260 }
261 
262 static inline void
263 update_event_and_validation_attr(struct rte_mbuf *m, struct rte_event *ev,
264 			uint32_t flow_id, uint8_t event_type,
265 			uint8_t sub_event_type, uint8_t sched_type,
266 			uint8_t queue, uint8_t port)
267 {
268 	struct event_attr *attr;
269 
270 	/* Store the event attributes in mbuf for future reference */
271 	attr = rte_pktmbuf_mtod(m, struct event_attr *);
272 	attr->flow_id = flow_id;
273 	attr->event_type = event_type;
274 	attr->sub_event_type = sub_event_type;
275 	attr->sched_type = sched_type;
276 	attr->queue = queue;
277 	attr->port = port;
278 
279 	ev->flow_id = flow_id;
280 	ev->sub_event_type = sub_event_type;
281 	ev->event_type = event_type;
282 	/* Inject the new event */
283 	ev->op = RTE_EVENT_OP_NEW;
284 	ev->sched_type = sched_type;
285 	ev->queue_id = queue;
286 	ev->mbuf = m;
287 }
288 
289 static inline int
290 inject_events(uint32_t flow_id, uint8_t event_type, uint8_t sub_event_type,
291 		uint8_t sched_type, uint8_t queue, uint8_t port,
292 		unsigned int events)
293 {
294 	struct rte_mbuf *m;
295 	unsigned int i;
296 
297 	for (i = 0; i < events; i++) {
298 		struct rte_event ev = {.event = 0, .u64 = 0};
299 
300 		m = rte_pktmbuf_alloc(eventdev_test_mempool);
301 		RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
302 
303 		m->seqn = i;
304 		update_event_and_validation_attr(m, &ev, flow_id, event_type,
305 			sub_event_type, sched_type, queue, port);
306 		rte_event_enqueue_burst(evdev, port, &ev, 1);
307 	}
308 	return 0;
309 }
310 
311 static inline int
312 check_excess_events(uint8_t port)
313 {
314 	int i;
315 	uint16_t valid_event;
316 	struct rte_event ev;
317 
318 	/* Check for excess events, try for a few times and exit */
319 	for (i = 0; i < 32; i++) {
320 		valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
321 
322 		RTE_TEST_ASSERT_SUCCESS(valid_event,
323 				"Unexpected valid event=%d", ev.mbuf->seqn);
324 	}
325 	return 0;
326 }
327 
328 static inline int
329 generate_random_events(const unsigned int total_events)
330 {
331 	struct rte_event_dev_info info;
332 	unsigned int i;
333 	int ret;
334 
335 	uint32_t queue_count;
336 	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
337 			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
338 			    &queue_count), "Queue count get failed");
339 
340 	ret = rte_event_dev_info_get(evdev, &info);
341 	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
342 	for (i = 0; i < total_events; i++) {
343 		ret = inject_events(
344 			rte_rand() % info.max_event_queue_flows /*flow_id */,
345 			RTE_EVENT_TYPE_CPU /* event_type */,
346 			rte_rand() % 256 /* sub_event_type */,
347 			rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
348 			rte_rand() % queue_count /* queue */,
349 			0 /* port */,
350 			1 /* events */);
351 		if (ret)
352 			return -1;
353 	}
354 	return ret;
355 }
356 
357 
358 static inline int
359 validate_event(struct rte_event *ev)
360 {
361 	struct event_attr *attr;
362 
363 	attr = rte_pktmbuf_mtod(ev->mbuf, struct event_attr *);
364 	RTE_TEST_ASSERT_EQUAL(attr->flow_id, ev->flow_id,
365 			"flow_id mismatch enq=%d deq =%d",
366 			attr->flow_id, ev->flow_id);
367 	RTE_TEST_ASSERT_EQUAL(attr->event_type, ev->event_type,
368 			"event_type mismatch enq=%d deq =%d",
369 			attr->event_type, ev->event_type);
370 	RTE_TEST_ASSERT_EQUAL(attr->sub_event_type, ev->sub_event_type,
371 			"sub_event_type mismatch enq=%d deq =%d",
372 			attr->sub_event_type, ev->sub_event_type);
373 	RTE_TEST_ASSERT_EQUAL(attr->sched_type, ev->sched_type,
374 			"sched_type mismatch enq=%d deq =%d",
375 			attr->sched_type, ev->sched_type);
376 	RTE_TEST_ASSERT_EQUAL(attr->queue, ev->queue_id,
377 			"queue mismatch enq=%d deq =%d",
378 			attr->queue, ev->queue_id);
379 	return 0;
380 }
381 
382 typedef int (*validate_event_cb)(uint32_t index, uint8_t port,
383 				 struct rte_event *ev);
384 
385 static inline int
386 consume_events(uint8_t port, const uint32_t total_events, validate_event_cb fn)
387 {
388 	int ret;
389 	uint16_t valid_event;
390 	uint32_t events = 0, forward_progress_cnt = 0, index = 0;
391 	struct rte_event ev;
392 
393 	while (1) {
394 		if (++forward_progress_cnt > UINT16_MAX) {
395 			ssovf_log_dbg("Detected deadlock");
396 			return -1;
397 		}
398 
399 		valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
400 		if (!valid_event)
401 			continue;
402 
403 		forward_progress_cnt = 0;
404 		ret = validate_event(&ev);
405 		if (ret)
406 			return -1;
407 
408 		if (fn != NULL) {
409 			ret = fn(index, port, &ev);
410 			RTE_TEST_ASSERT_SUCCESS(ret,
411 				"Failed to validate test specific event");
412 		}
413 
414 		++index;
415 
416 		rte_pktmbuf_free(ev.mbuf);
417 		if (++events >= total_events)
418 			break;
419 	}
420 
421 	return check_excess_events(port);
422 }
423 
424 static int
425 validate_simple_enqdeq(uint32_t index, uint8_t port, struct rte_event *ev)
426 {
427 	RTE_SET_USED(port);
428 	RTE_TEST_ASSERT_EQUAL(index, ev->mbuf->seqn, "index=%d != seqn=%d",
429 			index, ev->mbuf->seqn);
430 	return 0;
431 }
432 
433 static inline int
434 test_simple_enqdeq(uint8_t sched_type)
435 {
436 	int ret;
437 
438 	ret = inject_events(0 /*flow_id */,
439 				RTE_EVENT_TYPE_CPU /* event_type */,
440 				0 /* sub_event_type */,
441 				sched_type,
442 				0 /* queue */,
443 				0 /* port */,
444 				MAX_EVENTS);
445 	if (ret)
446 		return -1;
447 
448 	return consume_events(0 /* port */, MAX_EVENTS,	validate_simple_enqdeq);
449 }
450 
451 static int
452 test_simple_enqdeq_ordered(void)
453 {
454 	return test_simple_enqdeq(RTE_SCHED_TYPE_ORDERED);
455 }
456 
457 static int
458 test_simple_enqdeq_atomic(void)
459 {
460 	return test_simple_enqdeq(RTE_SCHED_TYPE_ATOMIC);
461 }
462 
463 static int
464 test_simple_enqdeq_parallel(void)
465 {
466 	return test_simple_enqdeq(RTE_SCHED_TYPE_PARALLEL);
467 }
468 
469 /*
470  * Generate a prescribed number of events and spread them across available
471  * queues. On dequeue, using single event port(port 0) verify the enqueued
472  * event attributes
473  */
474 static int
475 test_multi_queue_enq_single_port_deq(void)
476 {
477 	int ret;
478 
479 	ret = generate_random_events(MAX_EVENTS);
480 	if (ret)
481 		return -1;
482 
483 	return consume_events(0 /* port */, MAX_EVENTS, NULL);
484 }
485 
486 /*
487  * Inject 0..MAX_EVENTS events over 0..queue_count with modulus
488  * operation
489  *
490  * For example, Inject 32 events over 0..7 queues
491  * enqueue events 0, 8, 16, 24 in queue 0
492  * enqueue events 1, 9, 17, 25 in queue 1
493  * ..
494  * ..
495  * enqueue events 7, 15, 23, 31 in queue 7
496  *
497  * On dequeue, Validate the events comes in 0,8,16,24,1,9,17,25..,7,15,23,31
498  * order from queue0(highest priority) to queue7(lowest_priority)
499  */
500 static int
501 validate_queue_priority(uint32_t index, uint8_t port, struct rte_event *ev)
502 {
503 	uint32_t queue_count;
504 	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
505 			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
506 			    &queue_count), "Queue count get failed");
507 	uint32_t range = MAX_EVENTS / queue_count;
508 	uint32_t expected_val = (index % range) * queue_count;
509 
510 	expected_val += ev->queue_id;
511 	RTE_SET_USED(port);
512 	RTE_TEST_ASSERT_EQUAL(ev->mbuf->seqn, expected_val,
513 	"seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d",
514 			ev->mbuf->seqn, index, expected_val, range,
515 			queue_count, MAX_EVENTS);
516 	return 0;
517 }
518 
519 static int
520 test_multi_queue_priority(void)
521 {
522 	uint8_t queue;
523 	struct rte_mbuf *m;
524 	int i, max_evts_roundoff;
525 
526 	/* See validate_queue_priority() comments for priority validate logic */
527 	uint32_t queue_count;
528 	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
529 			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
530 			    &queue_count), "Queue count get failed");
531 	max_evts_roundoff  = MAX_EVENTS / queue_count;
532 	max_evts_roundoff *= queue_count;
533 
534 	for (i = 0; i < max_evts_roundoff; i++) {
535 		struct rte_event ev = {.event = 0, .u64 = 0};
536 
537 		m = rte_pktmbuf_alloc(eventdev_test_mempool);
538 		RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
539 
540 		m->seqn = i;
541 		queue = i % queue_count;
542 		update_event_and_validation_attr(m, &ev, 0, RTE_EVENT_TYPE_CPU,
543 			0, RTE_SCHED_TYPE_PARALLEL, queue, 0);
544 		rte_event_enqueue_burst(evdev, 0, &ev, 1);
545 	}
546 
547 	return consume_events(0, max_evts_roundoff, validate_queue_priority);
548 }
549 
550 static int
551 worker_multi_port_fn(void *arg)
552 {
553 	struct test_core_param *param = arg;
554 	struct rte_event ev;
555 	uint16_t valid_event;
556 	uint8_t port = param->port;
557 	rte_atomic32_t *total_events = param->total_events;
558 	int ret;
559 
560 	while (rte_atomic32_read(total_events) > 0) {
561 		valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
562 		if (!valid_event)
563 			continue;
564 
565 		ret = validate_event(&ev);
566 		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to validate event");
567 		rte_pktmbuf_free(ev.mbuf);
568 		rte_atomic32_sub(total_events, 1);
569 	}
570 	return 0;
571 }
572 
573 static inline int
574 wait_workers_to_join(int lcore, const rte_atomic32_t *count)
575 {
576 	uint64_t cycles, print_cycles;
577 	RTE_SET_USED(count);
578 
579 	print_cycles = cycles = rte_get_timer_cycles();
580 	while (rte_eal_get_lcore_state(lcore) != FINISHED) {
581 		uint64_t new_cycles = rte_get_timer_cycles();
582 
583 		if (new_cycles - print_cycles > rte_get_timer_hz()) {
584 			ssovf_log_dbg("\r%s: events %d", __func__,
585 				rte_atomic32_read(count));
586 			print_cycles = new_cycles;
587 		}
588 		if (new_cycles - cycles > rte_get_timer_hz() * 10) {
589 			ssovf_log_dbg(
590 				"%s: No schedules for seconds, deadlock (%d)",
591 				__func__,
592 				rte_atomic32_read(count));
593 			rte_event_dev_dump(evdev, stdout);
594 			cycles = new_cycles;
595 			return -1;
596 		}
597 	}
598 	rte_eal_mp_wait_lcore();
599 	return 0;
600 }
601 
602 
603 static inline int
604 launch_workers_and_wait(int (*master_worker)(void *),
605 			int (*slave_workers)(void *), uint32_t total_events,
606 			uint8_t nb_workers, uint8_t sched_type)
607 {
608 	uint8_t port = 0;
609 	int w_lcore;
610 	int ret;
611 	struct test_core_param *param;
612 	rte_atomic32_t atomic_total_events;
613 	uint64_t dequeue_tmo_ticks;
614 
615 	if (!nb_workers)
616 		return 0;
617 
618 	rte_atomic32_set(&atomic_total_events, total_events);
619 	seqn_list_init();
620 
621 	param = malloc(sizeof(struct test_core_param) * nb_workers);
622 	if (!param)
623 		return -1;
624 
625 	ret = rte_event_dequeue_timeout_ticks(evdev,
626 		rte_rand() % 10000000/* 10ms */, &dequeue_tmo_ticks);
627 	if (ret) {
628 		free(param);
629 		return -1;
630 	}
631 
632 	param[0].total_events = &atomic_total_events;
633 	param[0].sched_type = sched_type;
634 	param[0].port = 0;
635 	param[0].dequeue_tmo_ticks = dequeue_tmo_ticks;
636 	rte_smp_wmb();
637 
638 	w_lcore = rte_get_next_lcore(
639 			/* start core */ -1,
640 			/* skip master */ 1,
641 			/* wrap */ 0);
642 	rte_eal_remote_launch(master_worker, &param[0], w_lcore);
643 
644 	for (port = 1; port < nb_workers; port++) {
645 		param[port].total_events = &atomic_total_events;
646 		param[port].sched_type = sched_type;
647 		param[port].port = port;
648 		param[port].dequeue_tmo_ticks = dequeue_tmo_ticks;
649 		rte_smp_wmb();
650 		w_lcore = rte_get_next_lcore(w_lcore, 1, 0);
651 		rte_eal_remote_launch(slave_workers, &param[port], w_lcore);
652 	}
653 
654 	ret = wait_workers_to_join(w_lcore, &atomic_total_events);
655 	free(param);
656 	return ret;
657 }
658 
659 /*
660  * Generate a prescribed number of events and spread them across available
661  * queues. Dequeue the events through multiple ports and verify the enqueued
662  * event attributes
663  */
664 static int
665 test_multi_queue_enq_multi_port_deq(void)
666 {
667 	const unsigned int total_events = MAX_EVENTS;
668 	uint32_t nr_ports;
669 	int ret;
670 
671 	ret = generate_random_events(total_events);
672 	if (ret)
673 		return -1;
674 
675 	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
676 				RTE_EVENT_DEV_ATTR_PORT_COUNT,
677 				&nr_ports), "Port count get failed");
678 	nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
679 
680 	if (!nr_ports) {
681 		ssovf_log_dbg("%s: Not enough ports=%d or workers=%d", __func__,
682 			nr_ports, rte_lcore_count() - 1);
683 		return 0;
684 	}
685 
686 	return launch_workers_and_wait(worker_multi_port_fn,
687 					worker_multi_port_fn, total_events,
688 					nr_ports, 0xff /* invalid */);
689 }
690 
691 static
692 void flush(uint8_t dev_id, struct rte_event event, void *arg)
693 {
694 	unsigned int *count = arg;
695 
696 	RTE_SET_USED(dev_id);
697 	if (event.event_type == RTE_EVENT_TYPE_CPU)
698 		*count = *count + 1;
699 
700 }
701 
702 static int
703 test_dev_stop_flush(void)
704 {
705 	unsigned int total_events = MAX_EVENTS, count = 0;
706 	int ret;
707 
708 	ret = generate_random_events(total_events);
709 	if (ret)
710 		return -1;
711 
712 	ret = rte_event_dev_stop_flush_callback_register(evdev, flush, &count);
713 	if (ret)
714 		return -2;
715 	rte_event_dev_stop(evdev);
716 	ret = rte_event_dev_stop_flush_callback_register(evdev, NULL, NULL);
717 	if (ret)
718 		return -3;
719 	RTE_TEST_ASSERT_EQUAL(total_events, count,
720 				"count mismatch total_events=%d count=%d",
721 				total_events, count);
722 	return 0;
723 }
724 
725 static int
726 validate_queue_to_port_single_link(uint32_t index, uint8_t port,
727 			struct rte_event *ev)
728 {
729 	RTE_SET_USED(index);
730 	RTE_TEST_ASSERT_EQUAL(port, ev->queue_id,
731 				"queue mismatch enq=%d deq =%d",
732 				port, ev->queue_id);
733 	return 0;
734 }
735 
736 /*
737  * Link queue x to port x and check correctness of link by checking
738  * queue_id == x on dequeue on the specific port x
739  */
740 static int
741 test_queue_to_port_single_link(void)
742 {
743 	int i, nr_links, ret;
744 
745 	uint32_t port_count;
746 	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
747 				RTE_EVENT_DEV_ATTR_PORT_COUNT,
748 				&port_count), "Port count get failed");
749 
750 	/* Unlink all connections that created in eventdev_setup */
751 	for (i = 0; i < (int)port_count; i++) {
752 		ret = rte_event_port_unlink(evdev, i, NULL, 0);
753 		RTE_TEST_ASSERT(ret >= 0,
754 				"Failed to unlink all queues port=%d", i);
755 	}
756 
757 	uint32_t queue_count;
758 	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
759 			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
760 			    &queue_count), "Queue count get failed");
761 
762 	nr_links = RTE_MIN(port_count, queue_count);
763 	const unsigned int total_events = MAX_EVENTS / nr_links;
764 
765 	/* Link queue x to port x and inject events to queue x through port x */
766 	for (i = 0; i < nr_links; i++) {
767 		uint8_t queue = (uint8_t)i;
768 
769 		ret = rte_event_port_link(evdev, i, &queue, NULL, 1);
770 		RTE_TEST_ASSERT(ret == 1, "Failed to link queue to port %d", i);
771 
772 		ret = inject_events(
773 			0x100 /*flow_id */,
774 			RTE_EVENT_TYPE_CPU /* event_type */,
775 			rte_rand() % 256 /* sub_event_type */,
776 			rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
777 			queue /* queue */,
778 			i /* port */,
779 			total_events /* events */);
780 		if (ret)
781 			return -1;
782 	}
783 
784 	/* Verify the events generated from correct queue */
785 	for (i = 0; i < nr_links; i++) {
786 		ret = consume_events(i /* port */, total_events,
787 				validate_queue_to_port_single_link);
788 		if (ret)
789 			return -1;
790 	}
791 
792 	return 0;
793 }
794 
795 static int
796 validate_queue_to_port_multi_link(uint32_t index, uint8_t port,
797 			struct rte_event *ev)
798 {
799 	RTE_SET_USED(index);
800 	RTE_TEST_ASSERT_EQUAL(port, (ev->queue_id & 0x1),
801 				"queue mismatch enq=%d deq =%d",
802 				port, ev->queue_id);
803 	return 0;
804 }
805 
806 /*
807  * Link all even number of queues to port 0 and all odd number of queues to
808  * port 1 and verify the link connection on dequeue
809  */
810 static int
811 test_queue_to_port_multi_link(void)
812 {
813 	int ret, port0_events = 0, port1_events = 0;
814 	uint8_t queue, port;
815 	uint32_t nr_queues = 0;
816 	uint32_t nr_ports = 0;
817 
818 	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
819 			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
820 			    &nr_queues), "Queue count get failed");
821 
822 	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
823 				RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
824 				&nr_queues), "Queue count get failed");
825 	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
826 				RTE_EVENT_DEV_ATTR_PORT_COUNT,
827 				&nr_ports), "Port count get failed");
828 
829 	if (nr_ports < 2) {
830 		ssovf_log_dbg("%s: Not enough ports to test ports=%d",
831 				__func__, nr_ports);
832 		return 0;
833 	}
834 
835 	/* Unlink all connections that created in eventdev_setup */
836 	for (port = 0; port < nr_ports; port++) {
837 		ret = rte_event_port_unlink(evdev, port, NULL, 0);
838 		RTE_TEST_ASSERT(ret >= 0, "Failed to unlink all queues port=%d",
839 					port);
840 	}
841 
842 	const unsigned int total_events = MAX_EVENTS / nr_queues;
843 
844 	/* Link all even number of queues to port0 and odd numbers to port 1*/
845 	for (queue = 0; queue < nr_queues; queue++) {
846 		port = queue & 0x1;
847 		ret = rte_event_port_link(evdev, port, &queue, NULL, 1);
848 		RTE_TEST_ASSERT(ret == 1, "Failed to link queue=%d to port=%d",
849 					queue, port);
850 
851 		ret = inject_events(
852 			0x100 /*flow_id */,
853 			RTE_EVENT_TYPE_CPU /* event_type */,
854 			rte_rand() % 256 /* sub_event_type */,
855 			rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
856 			queue /* queue */,
857 			port /* port */,
858 			total_events /* events */);
859 		if (ret)
860 			return -1;
861 
862 		if (port == 0)
863 			port0_events += total_events;
864 		else
865 			port1_events += total_events;
866 	}
867 
868 	ret = consume_events(0 /* port */, port0_events,
869 				validate_queue_to_port_multi_link);
870 	if (ret)
871 		return -1;
872 	ret = consume_events(1 /* port */, port1_events,
873 				validate_queue_to_port_multi_link);
874 	if (ret)
875 		return -1;
876 
877 	return 0;
878 }
879 
880 static int
881 worker_flow_based_pipeline(void *arg)
882 {
883 	struct test_core_param *param = arg;
884 	struct rte_event ev;
885 	uint16_t valid_event;
886 	uint8_t port = param->port;
887 	uint8_t new_sched_type = param->sched_type;
888 	rte_atomic32_t *total_events = param->total_events;
889 	uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;
890 
891 	while (rte_atomic32_read(total_events) > 0) {
892 		valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,
893 					dequeue_tmo_ticks);
894 		if (!valid_event)
895 			continue;
896 
897 		/* Events from stage 0 */
898 		if (ev.sub_event_type == 0) {
899 			/* Move to atomic flow to maintain the ordering */
900 			ev.flow_id = 0x2;
901 			ev.event_type = RTE_EVENT_TYPE_CPU;
902 			ev.sub_event_type = 1; /* stage 1 */
903 			ev.sched_type = new_sched_type;
904 			ev.op = RTE_EVENT_OP_FORWARD;
905 			rte_event_enqueue_burst(evdev, port, &ev, 1);
906 		} else if (ev.sub_event_type == 1) { /* Events from stage 1*/
907 			if (seqn_list_update(ev.mbuf->seqn) == 0) {
908 				rte_pktmbuf_free(ev.mbuf);
909 				rte_atomic32_sub(total_events, 1);
910 			} else {
911 				ssovf_log_dbg("Failed to update seqn_list");
912 				return -1;
913 			}
914 		} else {
915 			ssovf_log_dbg("Invalid ev.sub_event_type = %d",
916 					ev.sub_event_type);
917 			return -1;
918 		}
919 	}
920 	return 0;
921 }
922 
923 static int
924 test_multiport_flow_sched_type_test(uint8_t in_sched_type,
925 			uint8_t out_sched_type)
926 {
927 	const unsigned int total_events = MAX_EVENTS;
928 	uint32_t nr_ports;
929 	int ret;
930 
931 	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
932 				RTE_EVENT_DEV_ATTR_PORT_COUNT,
933 				&nr_ports), "Port count get failed");
934 	nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
935 
936 	if (!nr_ports) {
937 		ssovf_log_dbg("%s: Not enough ports=%d or workers=%d", __func__,
938 			nr_ports, rte_lcore_count() - 1);
939 		return 0;
940 	}
941 
942 	/* Injects events with m->seqn=0 to total_events */
943 	ret = inject_events(
944 		0x1 /*flow_id */,
945 		RTE_EVENT_TYPE_CPU /* event_type */,
946 		0 /* sub_event_type (stage 0) */,
947 		in_sched_type,
948 		0 /* queue */,
949 		0 /* port */,
950 		total_events /* events */);
951 	if (ret)
952 		return -1;
953 
954 	ret = launch_workers_and_wait(worker_flow_based_pipeline,
955 					worker_flow_based_pipeline,
956 					total_events, nr_ports, out_sched_type);
957 	if (ret)
958 		return -1;
959 
960 	if (in_sched_type != RTE_SCHED_TYPE_PARALLEL &&
961 			out_sched_type == RTE_SCHED_TYPE_ATOMIC) {
962 		/* Check the events order maintained or not */
963 		return seqn_list_check(total_events);
964 	}
965 	return 0;
966 }
967 
968 
969 /* Multi port ordered to atomic transaction */
970 static int
971 test_multi_port_flow_ordered_to_atomic(void)
972 {
973 	/* Ingress event order test */
974 	return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
975 				RTE_SCHED_TYPE_ATOMIC);
976 }
977 
978 static int
979 test_multi_port_flow_ordered_to_ordered(void)
980 {
981 	return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
982 				RTE_SCHED_TYPE_ORDERED);
983 }
984 
985 static int
986 test_multi_port_flow_ordered_to_parallel(void)
987 {
988 	return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
989 				RTE_SCHED_TYPE_PARALLEL);
990 }
991 
992 static int
993 test_multi_port_flow_atomic_to_atomic(void)
994 {
995 	/* Ingress event order test */
996 	return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
997 				RTE_SCHED_TYPE_ATOMIC);
998 }
999 
1000 static int
1001 test_multi_port_flow_atomic_to_ordered(void)
1002 {
1003 	return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1004 				RTE_SCHED_TYPE_ORDERED);
1005 }
1006 
1007 static int
1008 test_multi_port_flow_atomic_to_parallel(void)
1009 {
1010 	return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1011 				RTE_SCHED_TYPE_PARALLEL);
1012 }
1013 
1014 static int
1015 test_multi_port_flow_parallel_to_atomic(void)
1016 {
1017 	return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1018 				RTE_SCHED_TYPE_ATOMIC);
1019 }
1020 
1021 static int
1022 test_multi_port_flow_parallel_to_ordered(void)
1023 {
1024 	return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1025 				RTE_SCHED_TYPE_ORDERED);
1026 }
1027 
1028 static int
1029 test_multi_port_flow_parallel_to_parallel(void)
1030 {
1031 	return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1032 				RTE_SCHED_TYPE_PARALLEL);
1033 }
1034 
1035 static int
1036 worker_group_based_pipeline(void *arg)
1037 {
1038 	struct test_core_param *param = arg;
1039 	struct rte_event ev;
1040 	uint16_t valid_event;
1041 	uint8_t port = param->port;
1042 	uint8_t new_sched_type = param->sched_type;
1043 	rte_atomic32_t *total_events = param->total_events;
1044 	uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;
1045 
1046 	while (rte_atomic32_read(total_events) > 0) {
1047 		valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,
1048 					dequeue_tmo_ticks);
1049 		if (!valid_event)
1050 			continue;
1051 
1052 		/* Events from stage 0(group 0) */
1053 		if (ev.queue_id == 0) {
1054 			/* Move to atomic flow to maintain the ordering */
1055 			ev.flow_id = 0x2;
1056 			ev.event_type = RTE_EVENT_TYPE_CPU;
1057 			ev.sched_type = new_sched_type;
1058 			ev.queue_id = 1; /* Stage 1*/
1059 			ev.op = RTE_EVENT_OP_FORWARD;
1060 			rte_event_enqueue_burst(evdev, port, &ev, 1);
1061 		} else if (ev.queue_id == 1) { /* Events from stage 1(group 1)*/
1062 			if (seqn_list_update(ev.mbuf->seqn) == 0) {
1063 				rte_pktmbuf_free(ev.mbuf);
1064 				rte_atomic32_sub(total_events, 1);
1065 			} else {
1066 				ssovf_log_dbg("Failed to update seqn_list");
1067 				return -1;
1068 			}
1069 		} else {
1070 			ssovf_log_dbg("Invalid ev.queue_id = %d", ev.queue_id);
1071 			return -1;
1072 		}
1073 	}
1074 
1075 
1076 	return 0;
1077 }
1078 
1079 static int
1080 test_multiport_queue_sched_type_test(uint8_t in_sched_type,
1081 			uint8_t out_sched_type)
1082 {
1083 	const unsigned int total_events = MAX_EVENTS;
1084 	uint32_t nr_ports;
1085 	int ret;
1086 
1087 	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1088 				RTE_EVENT_DEV_ATTR_PORT_COUNT,
1089 				&nr_ports), "Port count get failed");
1090 
1091 	nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
1092 
1093 	uint32_t queue_count;
1094 	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1095 			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
1096 			    &queue_count), "Queue count get failed");
1097 	if (queue_count < 2 ||  !nr_ports) {
1098 		ssovf_log_dbg("%s: Not enough queues=%d ports=%d or workers=%d",
1099 			 __func__, queue_count, nr_ports,
1100 			 rte_lcore_count() - 1);
1101 		return 0;
1102 	}
1103 
1104 	/* Injects events with m->seqn=0 to total_events */
1105 	ret = inject_events(
1106 		0x1 /*flow_id */,
1107 		RTE_EVENT_TYPE_CPU /* event_type */,
1108 		0 /* sub_event_type (stage 0) */,
1109 		in_sched_type,
1110 		0 /* queue */,
1111 		0 /* port */,
1112 		total_events /* events */);
1113 	if (ret)
1114 		return -1;
1115 
1116 	ret = launch_workers_and_wait(worker_group_based_pipeline,
1117 					worker_group_based_pipeline,
1118 					total_events, nr_ports, out_sched_type);
1119 	if (ret)
1120 		return -1;
1121 
1122 	if (in_sched_type != RTE_SCHED_TYPE_PARALLEL &&
1123 			out_sched_type == RTE_SCHED_TYPE_ATOMIC) {
1124 		/* Check the events order maintained or not */
1125 		return seqn_list_check(total_events);
1126 	}
1127 	return 0;
1128 }
1129 
1130 static int
1131 test_multi_port_queue_ordered_to_atomic(void)
1132 {
1133 	/* Ingress event order test */
1134 	return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
1135 				RTE_SCHED_TYPE_ATOMIC);
1136 }
1137 
1138 static int
1139 test_multi_port_queue_ordered_to_ordered(void)
1140 {
1141 	return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
1142 				RTE_SCHED_TYPE_ORDERED);
1143 }
1144 
1145 static int
1146 test_multi_port_queue_ordered_to_parallel(void)
1147 {
1148 	return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
1149 				RTE_SCHED_TYPE_PARALLEL);
1150 }
1151 
1152 static int
1153 test_multi_port_queue_atomic_to_atomic(void)
1154 {
1155 	/* Ingress event order test */
1156 	return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1157 				RTE_SCHED_TYPE_ATOMIC);
1158 }
1159 
1160 static int
1161 test_multi_port_queue_atomic_to_ordered(void)
1162 {
1163 	return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1164 				RTE_SCHED_TYPE_ORDERED);
1165 }
1166 
1167 static int
1168 test_multi_port_queue_atomic_to_parallel(void)
1169 {
1170 	return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1171 				RTE_SCHED_TYPE_PARALLEL);
1172 }
1173 
1174 static int
1175 test_multi_port_queue_parallel_to_atomic(void)
1176 {
1177 	return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1178 				RTE_SCHED_TYPE_ATOMIC);
1179 }
1180 
1181 static int
1182 test_multi_port_queue_parallel_to_ordered(void)
1183 {
1184 	return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1185 				RTE_SCHED_TYPE_ORDERED);
1186 }
1187 
1188 static int
1189 test_multi_port_queue_parallel_to_parallel(void)
1190 {
1191 	return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1192 				RTE_SCHED_TYPE_PARALLEL);
1193 }
1194 
1195 static int
1196 worker_flow_based_pipeline_max_stages_rand_sched_type(void *arg)
1197 {
1198 	struct test_core_param *param = arg;
1199 	struct rte_event ev;
1200 	uint16_t valid_event;
1201 	uint8_t port = param->port;
1202 	rte_atomic32_t *total_events = param->total_events;
1203 
1204 	while (rte_atomic32_read(total_events) > 0) {
1205 		valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
1206 		if (!valid_event)
1207 			continue;
1208 
1209 		if (ev.sub_event_type == 255) { /* last stage */
1210 			rte_pktmbuf_free(ev.mbuf);
1211 			rte_atomic32_sub(total_events, 1);
1212 		} else {
1213 			ev.event_type = RTE_EVENT_TYPE_CPU;
1214 			ev.sub_event_type++;
1215 			ev.sched_type =
1216 				rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
1217 			ev.op = RTE_EVENT_OP_FORWARD;
1218 			rte_event_enqueue_burst(evdev, port, &ev, 1);
1219 		}
1220 	}
1221 	return 0;
1222 }
1223 
1224 static int
1225 launch_multi_port_max_stages_random_sched_type(int (*fn)(void *))
1226 {
1227 	uint32_t nr_ports;
1228 	int ret;
1229 
1230 	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1231 				RTE_EVENT_DEV_ATTR_PORT_COUNT,
1232 				&nr_ports), "Port count get failed");
1233 	nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
1234 
1235 	if (!nr_ports) {
1236 		ssovf_log_dbg("%s: Not enough ports=%d or workers=%d", __func__,
1237 			nr_ports, rte_lcore_count() - 1);
1238 		return 0;
1239 	}
1240 
1241 	/* Injects events with m->seqn=0 to total_events */
1242 	ret = inject_events(
1243 		0x1 /*flow_id */,
1244 		RTE_EVENT_TYPE_CPU /* event_type */,
1245 		0 /* sub_event_type (stage 0) */,
1246 		rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1) /* sched_type */,
1247 		0 /* queue */,
1248 		0 /* port */,
1249 		MAX_EVENTS /* events */);
1250 	if (ret)
1251 		return -1;
1252 
1253 	return launch_workers_and_wait(fn, fn, MAX_EVENTS, nr_ports,
1254 					 0xff /* invalid */);
1255 }
1256 
1257 /* Flow based pipeline with maximum stages with random sched type */
1258 static int
1259 test_multi_port_flow_max_stages_random_sched_type(void)
1260 {
1261 	return launch_multi_port_max_stages_random_sched_type(
1262 		worker_flow_based_pipeline_max_stages_rand_sched_type);
1263 }
1264 
1265 static int
1266 worker_queue_based_pipeline_max_stages_rand_sched_type(void *arg)
1267 {
1268 	struct test_core_param *param = arg;
1269 	struct rte_event ev;
1270 	uint16_t valid_event;
1271 	uint8_t port = param->port;
1272 	uint32_t queue_count;
1273 	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1274 			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
1275 			    &queue_count), "Queue count get failed");
1276 	uint8_t nr_queues = queue_count;
1277 	rte_atomic32_t *total_events = param->total_events;
1278 
1279 	while (rte_atomic32_read(total_events) > 0) {
1280 		valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
1281 		if (!valid_event)
1282 			continue;
1283 
1284 		if (ev.queue_id == nr_queues - 1) { /* last stage */
1285 			rte_pktmbuf_free(ev.mbuf);
1286 			rte_atomic32_sub(total_events, 1);
1287 		} else {
1288 			ev.event_type = RTE_EVENT_TYPE_CPU;
1289 			ev.queue_id++;
1290 			ev.sched_type =
1291 				rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
1292 			ev.op = RTE_EVENT_OP_FORWARD;
1293 			rte_event_enqueue_burst(evdev, port, &ev, 1);
1294 		}
1295 	}
1296 	return 0;
1297 }
1298 
1299 /* Queue based pipeline with maximum stages with random sched type */
1300 static int
1301 test_multi_port_queue_max_stages_random_sched_type(void)
1302 {
1303 	return launch_multi_port_max_stages_random_sched_type(
1304 		worker_queue_based_pipeline_max_stages_rand_sched_type);
1305 }
1306 
1307 static int
1308 worker_mixed_pipeline_max_stages_rand_sched_type(void *arg)
1309 {
1310 	struct test_core_param *param = arg;
1311 	struct rte_event ev;
1312 	uint16_t valid_event;
1313 	uint8_t port = param->port;
1314 	uint32_t queue_count;
1315 	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1316 			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
1317 			    &queue_count), "Queue count get failed");
1318 	uint8_t nr_queues = queue_count;
1319 	rte_atomic32_t *total_events = param->total_events;
1320 
1321 	while (rte_atomic32_read(total_events) > 0) {
1322 		valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
1323 		if (!valid_event)
1324 			continue;
1325 
1326 		if (ev.queue_id == nr_queues - 1) { /* Last stage */
1327 			rte_pktmbuf_free(ev.mbuf);
1328 			rte_atomic32_sub(total_events, 1);
1329 		} else {
1330 			ev.event_type = RTE_EVENT_TYPE_CPU;
1331 			ev.queue_id++;
1332 			ev.sub_event_type = rte_rand() % 256;
1333 			ev.sched_type =
1334 				rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
1335 			ev.op = RTE_EVENT_OP_FORWARD;
1336 			rte_event_enqueue_burst(evdev, port, &ev, 1);
1337 		}
1338 	}
1339 	return 0;
1340 }
1341 
1342 /* Queue and flow based pipeline with maximum stages with random sched type */
1343 static int
1344 test_multi_port_mixed_max_stages_random_sched_type(void)
1345 {
1346 	return launch_multi_port_max_stages_random_sched_type(
1347 		worker_mixed_pipeline_max_stages_rand_sched_type);
1348 }
1349 
1350 static int
1351 worker_ordered_flow_producer(void *arg)
1352 {
1353 	struct test_core_param *param = arg;
1354 	uint8_t port = param->port;
1355 	struct rte_mbuf *m;
1356 	int counter = 0;
1357 
1358 	while (counter < NUM_PACKETS) {
1359 		m = rte_pktmbuf_alloc(eventdev_test_mempool);
1360 		if (m == NULL)
1361 			continue;
1362 
1363 		m->seqn = counter++;
1364 
1365 		struct rte_event ev = {.event = 0, .u64 = 0};
1366 
1367 		ev.flow_id = 0x1; /* Generate a fat flow */
1368 		ev.sub_event_type = 0;
1369 		/* Inject the new event */
1370 		ev.op = RTE_EVENT_OP_NEW;
1371 		ev.event_type = RTE_EVENT_TYPE_CPU;
1372 		ev.sched_type = RTE_SCHED_TYPE_ORDERED;
1373 		ev.queue_id = 0;
1374 		ev.mbuf = m;
1375 		rte_event_enqueue_burst(evdev, port, &ev, 1);
1376 	}
1377 
1378 	return 0;
1379 }
1380 
1381 static inline int
1382 test_producer_consumer_ingress_order_test(int (*fn)(void *))
1383 {
1384 	uint32_t nr_ports;
1385 
1386 	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1387 				RTE_EVENT_DEV_ATTR_PORT_COUNT,
1388 				&nr_ports), "Port count get failed");
1389 	nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
1390 
1391 	if (rte_lcore_count() < 3 || nr_ports < 2) {
1392 		ssovf_log_dbg("### Not enough cores for %s test.", __func__);
1393 		return 0;
1394 	}
1395 
1396 	launch_workers_and_wait(worker_ordered_flow_producer, fn,
1397 				NUM_PACKETS, nr_ports, RTE_SCHED_TYPE_ATOMIC);
1398 	/* Check the events order maintained or not */
1399 	return seqn_list_check(NUM_PACKETS);
1400 }
1401 
1402 /* Flow based producer consumer ingress order test */
1403 static int
1404 test_flow_producer_consumer_ingress_order_test(void)
1405 {
1406 	return test_producer_consumer_ingress_order_test(
1407 				worker_flow_based_pipeline);
1408 }
1409 
1410 /* Queue based producer consumer ingress order test */
1411 static int
1412 test_queue_producer_consumer_ingress_order_test(void)
1413 {
1414 	return test_producer_consumer_ingress_order_test(
1415 				worker_group_based_pipeline);
1416 }
1417 
1418 static void octeontx_test_run(int (*setup)(void), void (*tdown)(void),
1419 		int (*test)(void), const char *name)
1420 {
1421 	if (setup() < 0) {
1422 		ssovf_log_selftest("Error setting up test %s", name);
1423 		unsupported++;
1424 	} else {
1425 		if (test() < 0) {
1426 			failed++;
1427 			ssovf_log_selftest("%s Failed", name);
1428 		} else {
1429 			passed++;
1430 			ssovf_log_selftest("%s Passed", name);
1431 		}
1432 	}
1433 
1434 	total++;
1435 	tdown();
1436 }
1437 
1438 int
1439 test_eventdev_octeontx(void)
1440 {
1441 	testsuite_setup();
1442 
1443 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1444 			test_simple_enqdeq_ordered);
1445 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1446 			test_simple_enqdeq_atomic);
1447 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1448 			test_simple_enqdeq_parallel);
1449 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1450 			test_multi_queue_enq_single_port_deq);
1451 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1452 			test_dev_stop_flush);
1453 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1454 			test_multi_queue_enq_multi_port_deq);
1455 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1456 			test_queue_to_port_single_link);
1457 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1458 			test_queue_to_port_multi_link);
1459 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1460 			test_multi_port_flow_ordered_to_atomic);
1461 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1462 			test_multi_port_flow_ordered_to_ordered);
1463 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1464 			test_multi_port_flow_ordered_to_parallel);
1465 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1466 			test_multi_port_flow_atomic_to_atomic);
1467 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1468 			test_multi_port_flow_atomic_to_ordered);
1469 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1470 			test_multi_port_flow_atomic_to_parallel);
1471 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1472 			test_multi_port_flow_parallel_to_atomic);
1473 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1474 			test_multi_port_flow_parallel_to_ordered);
1475 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1476 			test_multi_port_flow_parallel_to_parallel);
1477 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1478 			test_multi_port_queue_ordered_to_atomic);
1479 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1480 			test_multi_port_queue_ordered_to_ordered);
1481 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1482 			test_multi_port_queue_ordered_to_parallel);
1483 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1484 			test_multi_port_queue_atomic_to_atomic);
1485 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1486 			test_multi_port_queue_atomic_to_ordered);
1487 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1488 			test_multi_port_queue_atomic_to_parallel);
1489 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1490 			test_multi_port_queue_parallel_to_atomic);
1491 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1492 			test_multi_port_queue_parallel_to_ordered);
1493 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1494 			test_multi_port_queue_parallel_to_parallel);
1495 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1496 			test_multi_port_flow_max_stages_random_sched_type);
1497 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1498 			test_multi_port_queue_max_stages_random_sched_type);
1499 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1500 			test_multi_port_mixed_max_stages_random_sched_type);
1501 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1502 			test_flow_producer_consumer_ingress_order_test);
1503 	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1504 			test_queue_producer_consumer_ingress_order_test);
1505 	OCTEONTX_TEST_RUN(eventdev_setup_priority, eventdev_teardown,
1506 			test_multi_queue_priority);
1507 	OCTEONTX_TEST_RUN(eventdev_setup_dequeue_timeout, eventdev_teardown,
1508 			test_multi_port_flow_ordered_to_atomic);
1509 	OCTEONTX_TEST_RUN(eventdev_setup_dequeue_timeout, eventdev_teardown,
1510 			test_multi_port_queue_ordered_to_atomic);
1511 
1512 	ssovf_log_selftest("Total tests   : %d", total);
1513 	ssovf_log_selftest("Passed        : %d", passed);
1514 	ssovf_log_selftest("Failed        : %d", failed);
1515 	ssovf_log_selftest("Not supported : %d", unsupported);
1516 
1517 	testsuite_teardown();
1518 
1519 	if (failed)
1520 		return -1;
1521 
1522 	return 0;
1523 }
1524