1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
3 */
4
5 #include <stdlib.h>
6
7 #include <rte_atomic.h>
8 #include <rte_common.h>
9 #include <rte_cycles.h>
10 #include <rte_debug.h>
11 #include <rte_eal.h>
12 #include <rte_ethdev.h>
13 #include <rte_eventdev.h>
14 #include <rte_hexdump.h>
15 #include <rte_mbuf.h>
16 #include <rte_malloc.h>
17 #include <rte_memcpy.h>
18 #include <rte_launch.h>
19 #include <rte_lcore.h>
20 #include <rte_per_lcore.h>
21 #include <rte_random.h>
22 #include <bus_vdev_driver.h>
23 #include <rte_test.h>
24
25 #include "ssovf_evdev.h"
26
27 #define NUM_PACKETS (1 << 18)
28 #define MAX_EVENTS (16 * 1024)
29
30 #define OCTEONTX_TEST_RUN(setup, teardown, test) \
31 octeontx_test_run(setup, teardown, test, #test)
32
33 static int total;
34 static int passed;
35 static int failed;
36 static int unsupported;
37
38 static int evdev;
39 static struct rte_mempool *eventdev_test_mempool;
40
41 struct event_attr {
42 uint32_t flow_id;
43 uint8_t event_type;
44 uint8_t sub_event_type;
45 uint8_t sched_type;
46 uint8_t queue;
47 uint8_t port;
48 };
49
50 static uint32_t seqn_list_index;
51 static int seqn_list[NUM_PACKETS];
52
53 static inline void
seqn_list_init(void)54 seqn_list_init(void)
55 {
56 RTE_BUILD_BUG_ON(NUM_PACKETS < MAX_EVENTS);
57 memset(seqn_list, 0, sizeof(seqn_list));
58 seqn_list_index = 0;
59 }
60
61 static inline int
seqn_list_update(int val)62 seqn_list_update(int val)
63 {
64 if (seqn_list_index >= NUM_PACKETS)
65 return -1;
66
67 seqn_list[seqn_list_index++] = val;
68 rte_smp_wmb();
69 return 0;
70 }
71
72 static inline int
seqn_list_check(int limit)73 seqn_list_check(int limit)
74 {
75 int i;
76
77 for (i = 0; i < limit; i++) {
78 if (seqn_list[i] != i) {
79 ssovf_log_dbg("Seqn mismatch %d %d", seqn_list[i], i);
80 return -1;
81 }
82 }
83 return 0;
84 }
85
86 struct test_core_param {
87 rte_atomic32_t *total_events;
88 uint64_t dequeue_tmo_ticks;
89 uint8_t port;
90 uint8_t sched_type;
91 };
92
93 static int
testsuite_setup(void)94 testsuite_setup(void)
95 {
96 const char *eventdev_name = "event_octeontx";
97
98 evdev = rte_event_dev_get_dev_id(eventdev_name);
99 if (evdev < 0) {
100 ssovf_log_dbg("%d: Eventdev %s not found - creating.",
101 __LINE__, eventdev_name);
102 if (rte_vdev_init(eventdev_name, NULL) < 0) {
103 ssovf_log_dbg("Error creating eventdev %s",
104 eventdev_name);
105 return -1;
106 }
107 evdev = rte_event_dev_get_dev_id(eventdev_name);
108 if (evdev < 0) {
109 ssovf_log_dbg("Error finding newly created eventdev");
110 return -1;
111 }
112 }
113
114 return 0;
115 }
116
117 static void
testsuite_teardown(void)118 testsuite_teardown(void)
119 {
120 rte_event_dev_close(evdev);
121 }
122
123 static inline void
devconf_set_default_sane_values(struct rte_event_dev_config * dev_conf,struct rte_event_dev_info * info)124 devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
125 struct rte_event_dev_info *info)
126 {
127 memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
128 dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
129 dev_conf->nb_event_ports = info->max_event_ports;
130 dev_conf->nb_event_queues = info->max_event_queues;
131 dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
132 dev_conf->nb_event_port_dequeue_depth =
133 info->max_event_port_dequeue_depth;
134 dev_conf->nb_event_port_enqueue_depth =
135 info->max_event_port_enqueue_depth;
136 dev_conf->nb_event_port_enqueue_depth =
137 info->max_event_port_enqueue_depth;
138 dev_conf->nb_events_limit =
139 info->max_num_events;
140 }
141
142 enum {
143 TEST_EVENTDEV_SETUP_DEFAULT,
144 TEST_EVENTDEV_SETUP_PRIORITY,
145 TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT,
146 };
147
148 static inline int
_eventdev_setup(int mode)149 _eventdev_setup(int mode)
150 {
151 int i, ret;
152 struct rte_event_dev_config dev_conf;
153 struct rte_event_dev_info info;
154 const char *pool_name = "evdev_octeontx_test_pool";
155
156 /* Create and destroy pool for each test case to make it standalone */
157 eventdev_test_mempool = rte_pktmbuf_pool_create(pool_name,
158 MAX_EVENTS,
159 0 /*MBUF_CACHE_SIZE*/,
160 0,
161 512, /* Use very small mbufs */
162 rte_socket_id());
163 if (!eventdev_test_mempool) {
164 ssovf_log_dbg("ERROR creating mempool");
165 return -1;
166 }
167
168 ret = rte_event_dev_info_get(evdev, &info);
169 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
170 RTE_TEST_ASSERT(info.max_num_events >= (int32_t)MAX_EVENTS,
171 "ERROR max_num_events=%d < max_events=%d",
172 info.max_num_events, MAX_EVENTS);
173
174 devconf_set_default_sane_values(&dev_conf, &info);
175 if (mode == TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT)
176 dev_conf.event_dev_cfg |= RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
177
178 ret = rte_event_dev_configure(evdev, &dev_conf);
179 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
180
181 uint32_t queue_count;
182 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
183 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
184 &queue_count), "Queue count get failed");
185
186 if (mode == TEST_EVENTDEV_SETUP_PRIORITY) {
187 if (queue_count > 8) {
188 ssovf_log_dbg(
189 "test expects the unique priority per queue");
190 return -ENOTSUP;
191 }
192
193 /* Configure event queues(0 to n) with
194 * RTE_EVENT_DEV_PRIORITY_HIGHEST to
195 * RTE_EVENT_DEV_PRIORITY_LOWEST
196 */
197 uint8_t step = (RTE_EVENT_DEV_PRIORITY_LOWEST + 1) /
198 queue_count;
199 for (i = 0; i < (int)queue_count; i++) {
200 struct rte_event_queue_conf queue_conf;
201
202 ret = rte_event_queue_default_conf_get(evdev, i,
203 &queue_conf);
204 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get def_conf%d",
205 i);
206 queue_conf.priority = i * step;
207 ret = rte_event_queue_setup(evdev, i, &queue_conf);
208 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d",
209 i);
210 }
211
212 } else {
213 /* Configure event queues with default priority */
214 for (i = 0; i < (int)queue_count; i++) {
215 ret = rte_event_queue_setup(evdev, i, NULL);
216 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d",
217 i);
218 }
219 }
220 /* Configure event ports */
221 uint32_t port_count;
222 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
223 RTE_EVENT_DEV_ATTR_PORT_COUNT,
224 &port_count), "Port count get failed");
225 for (i = 0; i < (int)port_count; i++) {
226 ret = rte_event_port_setup(evdev, i, NULL);
227 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d", i);
228 ret = rte_event_port_link(evdev, i, NULL, NULL, 0);
229 RTE_TEST_ASSERT(ret >= 0, "Failed to link all queues port=%d",
230 i);
231 }
232
233 ret = rte_event_dev_start(evdev);
234 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start device");
235
236 return 0;
237 }
238
239 static inline int
eventdev_setup(void)240 eventdev_setup(void)
241 {
242 return _eventdev_setup(TEST_EVENTDEV_SETUP_DEFAULT);
243 }
244
245 static inline int
eventdev_setup_priority(void)246 eventdev_setup_priority(void)
247 {
248 return _eventdev_setup(TEST_EVENTDEV_SETUP_PRIORITY);
249 }
250
251 static inline int
eventdev_setup_dequeue_timeout(void)252 eventdev_setup_dequeue_timeout(void)
253 {
254 return _eventdev_setup(TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT);
255 }
256
257 static inline void
eventdev_teardown(void)258 eventdev_teardown(void)
259 {
260 rte_event_dev_stop(evdev);
261 rte_mempool_free(eventdev_test_mempool);
262 }
263
264 static inline void
update_event_and_validation_attr(struct rte_mbuf * m,struct rte_event * ev,uint32_t flow_id,uint8_t event_type,uint8_t sub_event_type,uint8_t sched_type,uint8_t queue,uint8_t port)265 update_event_and_validation_attr(struct rte_mbuf *m, struct rte_event *ev,
266 uint32_t flow_id, uint8_t event_type,
267 uint8_t sub_event_type, uint8_t sched_type,
268 uint8_t queue, uint8_t port)
269 {
270 struct event_attr *attr;
271
272 /* Store the event attributes in mbuf for future reference */
273 attr = rte_pktmbuf_mtod(m, struct event_attr *);
274 attr->flow_id = flow_id;
275 attr->event_type = event_type;
276 attr->sub_event_type = sub_event_type;
277 attr->sched_type = sched_type;
278 attr->queue = queue;
279 attr->port = port;
280
281 ev->flow_id = flow_id;
282 ev->sub_event_type = sub_event_type;
283 ev->event_type = event_type;
284 /* Inject the new event */
285 ev->op = RTE_EVENT_OP_NEW;
286 ev->sched_type = sched_type;
287 ev->queue_id = queue;
288 ev->mbuf = m;
289 }
290
291 static inline int
inject_events(uint32_t flow_id,uint8_t event_type,uint8_t sub_event_type,uint8_t sched_type,uint8_t queue,uint8_t port,unsigned int events)292 inject_events(uint32_t flow_id, uint8_t event_type, uint8_t sub_event_type,
293 uint8_t sched_type, uint8_t queue, uint8_t port,
294 unsigned int events)
295 {
296 struct rte_mbuf *m;
297 unsigned int i;
298
299 for (i = 0; i < events; i++) {
300 struct rte_event ev = {.event = 0, .u64 = 0};
301
302 m = rte_pktmbuf_alloc(eventdev_test_mempool);
303 RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
304
305 *rte_event_pmd_selftest_seqn(m) = i;
306 update_event_and_validation_attr(m, &ev, flow_id, event_type,
307 sub_event_type, sched_type, queue, port);
308 rte_event_enqueue_burst(evdev, port, &ev, 1);
309 }
310 return 0;
311 }
312
313 static inline int
check_excess_events(uint8_t port)314 check_excess_events(uint8_t port)
315 {
316 int i;
317 uint16_t valid_event;
318 struct rte_event ev;
319
320 /* Check for excess events, try for a few times and exit */
321 for (i = 0; i < 32; i++) {
322 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
323
324 RTE_TEST_ASSERT_SUCCESS(valid_event,
325 "Unexpected valid event=%d",
326 *rte_event_pmd_selftest_seqn(ev.mbuf));
327 }
328 return 0;
329 }
330
331 static inline int
generate_random_events(const unsigned int total_events)332 generate_random_events(const unsigned int total_events)
333 {
334 struct rte_event_dev_info info;
335 unsigned int i;
336 int ret;
337
338 uint32_t queue_count;
339 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
340 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
341 &queue_count), "Queue count get failed");
342
343 ret = rte_event_dev_info_get(evdev, &info);
344 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
345 for (i = 0; i < total_events; i++) {
346 ret = inject_events(
347 rte_rand() % info.max_event_queue_flows /*flow_id */,
348 RTE_EVENT_TYPE_CPU /* event_type */,
349 rte_rand() % 256 /* sub_event_type */,
350 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
351 rte_rand() % queue_count /* queue */,
352 0 /* port */,
353 1 /* events */);
354 if (ret)
355 return -1;
356 }
357 return ret;
358 }
359
360
361 static inline int
validate_event(struct rte_event * ev)362 validate_event(struct rte_event *ev)
363 {
364 struct event_attr *attr;
365
366 attr = rte_pktmbuf_mtod(ev->mbuf, struct event_attr *);
367 RTE_TEST_ASSERT_EQUAL(attr->flow_id, ev->flow_id,
368 "flow_id mismatch enq=%d deq =%d",
369 attr->flow_id, ev->flow_id);
370 RTE_TEST_ASSERT_EQUAL(attr->event_type, ev->event_type,
371 "event_type mismatch enq=%d deq =%d",
372 attr->event_type, ev->event_type);
373 RTE_TEST_ASSERT_EQUAL(attr->sub_event_type, ev->sub_event_type,
374 "sub_event_type mismatch enq=%d deq =%d",
375 attr->sub_event_type, ev->sub_event_type);
376 RTE_TEST_ASSERT_EQUAL(attr->sched_type, ev->sched_type,
377 "sched_type mismatch enq=%d deq =%d",
378 attr->sched_type, ev->sched_type);
379 RTE_TEST_ASSERT_EQUAL(attr->queue, ev->queue_id,
380 "queue mismatch enq=%d deq =%d",
381 attr->queue, ev->queue_id);
382 return 0;
383 }
384
385 typedef int (*validate_event_cb)(uint32_t index, uint8_t port,
386 struct rte_event *ev);
387
388 static inline int
consume_events(uint8_t port,const uint32_t total_events,validate_event_cb fn)389 consume_events(uint8_t port, const uint32_t total_events, validate_event_cb fn)
390 {
391 int ret;
392 uint16_t valid_event;
393 uint32_t events = 0, forward_progress_cnt = 0, index = 0;
394 struct rte_event ev;
395
396 while (1) {
397 if (++forward_progress_cnt > UINT16_MAX) {
398 ssovf_log_dbg("Detected deadlock");
399 return -1;
400 }
401
402 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
403 if (!valid_event)
404 continue;
405
406 forward_progress_cnt = 0;
407 ret = validate_event(&ev);
408 if (ret)
409 return -1;
410
411 if (fn != NULL) {
412 ret = fn(index, port, &ev);
413 RTE_TEST_ASSERT_SUCCESS(ret,
414 "Failed to validate test specific event");
415 }
416
417 ++index;
418
419 rte_pktmbuf_free(ev.mbuf);
420 if (++events >= total_events)
421 break;
422 }
423
424 return check_excess_events(port);
425 }
426
427 static int
validate_simple_enqdeq(uint32_t index,uint8_t port,struct rte_event * ev)428 validate_simple_enqdeq(uint32_t index, uint8_t port, struct rte_event *ev)
429 {
430 RTE_SET_USED(port);
431 RTE_TEST_ASSERT_EQUAL(index, *rte_event_pmd_selftest_seqn(ev->mbuf),
432 "index=%d != seqn=%d", index,
433 *rte_event_pmd_selftest_seqn(ev->mbuf));
434 return 0;
435 }
436
437 static inline int
test_simple_enqdeq(uint8_t sched_type)438 test_simple_enqdeq(uint8_t sched_type)
439 {
440 int ret;
441
442 ret = inject_events(0 /*flow_id */,
443 RTE_EVENT_TYPE_CPU /* event_type */,
444 0 /* sub_event_type */,
445 sched_type,
446 0 /* queue */,
447 0 /* port */,
448 MAX_EVENTS);
449 if (ret)
450 return -1;
451
452 return consume_events(0 /* port */, MAX_EVENTS, validate_simple_enqdeq);
453 }
454
455 static int
test_simple_enqdeq_ordered(void)456 test_simple_enqdeq_ordered(void)
457 {
458 return test_simple_enqdeq(RTE_SCHED_TYPE_ORDERED);
459 }
460
461 static int
test_simple_enqdeq_atomic(void)462 test_simple_enqdeq_atomic(void)
463 {
464 return test_simple_enqdeq(RTE_SCHED_TYPE_ATOMIC);
465 }
466
467 static int
test_simple_enqdeq_parallel(void)468 test_simple_enqdeq_parallel(void)
469 {
470 return test_simple_enqdeq(RTE_SCHED_TYPE_PARALLEL);
471 }
472
473 /*
474 * Generate a prescribed number of events and spread them across available
475 * queues. On dequeue, using single event port(port 0) verify the enqueued
476 * event attributes
477 */
478 static int
test_multi_queue_enq_single_port_deq(void)479 test_multi_queue_enq_single_port_deq(void)
480 {
481 int ret;
482
483 ret = generate_random_events(MAX_EVENTS);
484 if (ret)
485 return -1;
486
487 return consume_events(0 /* port */, MAX_EVENTS, NULL);
488 }
489
490 /*
491 * Inject 0..MAX_EVENTS events over 0..queue_count with modulus
492 * operation
493 *
494 * For example, Inject 32 events over 0..7 queues
495 * enqueue events 0, 8, 16, 24 in queue 0
496 * enqueue events 1, 9, 17, 25 in queue 1
497 * ..
498 * ..
499 * enqueue events 7, 15, 23, 31 in queue 7
500 *
501 * On dequeue, Validate the events comes in 0,8,16,24,1,9,17,25..,7,15,23,31
502 * order from queue0(highest priority) to queue7(lowest_priority)
503 */
504 static int
validate_queue_priority(uint32_t index,uint8_t port,struct rte_event * ev)505 validate_queue_priority(uint32_t index, uint8_t port, struct rte_event *ev)
506 {
507 uint32_t queue_count;
508 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
509 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
510 &queue_count), "Queue count get failed");
511 uint32_t range = MAX_EVENTS / queue_count;
512 uint32_t expected_val = (index % range) * queue_count;
513
514 expected_val += ev->queue_id;
515 RTE_SET_USED(port);
516 RTE_TEST_ASSERT_EQUAL(*rte_event_pmd_selftest_seqn(ev->mbuf), expected_val,
517 "seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d",
518 *rte_event_pmd_selftest_seqn(ev->mbuf), index, expected_val, range,
519 queue_count, MAX_EVENTS);
520 return 0;
521 }
522
523 static int
test_multi_queue_priority(void)524 test_multi_queue_priority(void)
525 {
526 uint8_t queue;
527 struct rte_mbuf *m;
528 int i, max_evts_roundoff;
529
530 /* See validate_queue_priority() comments for priority validate logic */
531 uint32_t queue_count;
532 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
533 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
534 &queue_count), "Queue count get failed");
535 max_evts_roundoff = MAX_EVENTS / queue_count;
536 max_evts_roundoff *= queue_count;
537
538 for (i = 0; i < max_evts_roundoff; i++) {
539 struct rte_event ev = {.event = 0, .u64 = 0};
540
541 m = rte_pktmbuf_alloc(eventdev_test_mempool);
542 RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
543
544 *rte_event_pmd_selftest_seqn(m) = i;
545 queue = i % queue_count;
546 update_event_and_validation_attr(m, &ev, 0, RTE_EVENT_TYPE_CPU,
547 0, RTE_SCHED_TYPE_PARALLEL, queue, 0);
548 rte_event_enqueue_burst(evdev, 0, &ev, 1);
549 }
550
551 return consume_events(0, max_evts_roundoff, validate_queue_priority);
552 }
553
554 static int
worker_multi_port_fn(void * arg)555 worker_multi_port_fn(void *arg)
556 {
557 struct test_core_param *param = arg;
558 struct rte_event ev;
559 uint16_t valid_event;
560 uint8_t port = param->port;
561 rte_atomic32_t *total_events = param->total_events;
562 int ret;
563
564 while (rte_atomic32_read(total_events) > 0) {
565 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
566 if (!valid_event)
567 continue;
568
569 ret = validate_event(&ev);
570 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to validate event");
571 rte_pktmbuf_free(ev.mbuf);
572 rte_atomic32_sub(total_events, 1);
573 }
574 return 0;
575 }
576
577 static inline int
wait_workers_to_join(int lcore,const rte_atomic32_t * count)578 wait_workers_to_join(int lcore, const rte_atomic32_t *count)
579 {
580 uint64_t cycles, print_cycles;
581 RTE_SET_USED(count);
582
583 print_cycles = cycles = rte_get_timer_cycles();
584 while (rte_eal_get_lcore_state(lcore) != WAIT) {
585 uint64_t new_cycles = rte_get_timer_cycles();
586
587 if (new_cycles - print_cycles > rte_get_timer_hz()) {
588 ssovf_log_dbg("\r%s: events %d", __func__,
589 rte_atomic32_read(count));
590 print_cycles = new_cycles;
591 }
592 if (new_cycles - cycles > rte_get_timer_hz() * 10) {
593 ssovf_log_dbg(
594 "%s: No schedules for seconds, deadlock (%d)",
595 __func__,
596 rte_atomic32_read(count));
597 rte_event_dev_dump(evdev, stdout);
598 cycles = new_cycles;
599 return -1;
600 }
601 }
602 rte_eal_mp_wait_lcore();
603 return 0;
604 }
605
606
607 static inline int
launch_workers_and_wait(int (* main_worker)(void *),int (* worker)(void *),uint32_t total_events,uint8_t nb_workers,uint8_t sched_type)608 launch_workers_and_wait(int (*main_worker)(void *),
609 int (*worker)(void *), uint32_t total_events,
610 uint8_t nb_workers, uint8_t sched_type)
611 {
612 uint8_t port = 0;
613 int w_lcore;
614 int ret;
615 struct test_core_param *param;
616 rte_atomic32_t atomic_total_events;
617 uint64_t dequeue_tmo_ticks;
618
619 if (!nb_workers)
620 return 0;
621
622 rte_atomic32_set(&atomic_total_events, total_events);
623 seqn_list_init();
624
625 param = malloc(sizeof(struct test_core_param) * nb_workers);
626 if (!param)
627 return -1;
628
629 ret = rte_event_dequeue_timeout_ticks(evdev,
630 rte_rand() % 10000000/* 10ms */, &dequeue_tmo_ticks);
631 if (ret) {
632 free(param);
633 return -1;
634 }
635
636 param[0].total_events = &atomic_total_events;
637 param[0].sched_type = sched_type;
638 param[0].port = 0;
639 param[0].dequeue_tmo_ticks = dequeue_tmo_ticks;
640 rte_smp_wmb();
641
642 w_lcore = rte_get_next_lcore(
643 /* start core */ -1,
644 /* skip main */ 1,
645 /* wrap */ 0);
646 rte_eal_remote_launch(main_worker, ¶m[0], w_lcore);
647
648 for (port = 1; port < nb_workers; port++) {
649 param[port].total_events = &atomic_total_events;
650 param[port].sched_type = sched_type;
651 param[port].port = port;
652 param[port].dequeue_tmo_ticks = dequeue_tmo_ticks;
653 rte_smp_wmb();
654 w_lcore = rte_get_next_lcore(w_lcore, 1, 0);
655 rte_eal_remote_launch(worker, ¶m[port], w_lcore);
656 }
657
658 ret = wait_workers_to_join(w_lcore, &atomic_total_events);
659 free(param);
660 return ret;
661 }
662
663 /*
664 * Generate a prescribed number of events and spread them across available
665 * queues. Dequeue the events through multiple ports and verify the enqueued
666 * event attributes
667 */
668 static int
test_multi_queue_enq_multi_port_deq(void)669 test_multi_queue_enq_multi_port_deq(void)
670 {
671 const unsigned int total_events = MAX_EVENTS;
672 uint32_t nr_ports;
673 int ret;
674
675 ret = generate_random_events(total_events);
676 if (ret)
677 return -1;
678
679 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
680 RTE_EVENT_DEV_ATTR_PORT_COUNT,
681 &nr_ports), "Port count get failed");
682 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
683
684 if (!nr_ports) {
685 ssovf_log_dbg("%s: Not enough ports=%d or workers=%d", __func__,
686 nr_ports, rte_lcore_count() - 1);
687 return 0;
688 }
689
690 return launch_workers_and_wait(worker_multi_port_fn,
691 worker_multi_port_fn, total_events,
692 nr_ports, 0xff /* invalid */);
693 }
694
695 static
flush(uint8_t dev_id,struct rte_event event,void * arg)696 void flush(uint8_t dev_id, struct rte_event event, void *arg)
697 {
698 unsigned int *count = arg;
699
700 RTE_SET_USED(dev_id);
701 if (event.event_type == RTE_EVENT_TYPE_CPU)
702 *count = *count + 1;
703
704 }
705
706 static int
test_dev_stop_flush(void)707 test_dev_stop_flush(void)
708 {
709 unsigned int total_events = MAX_EVENTS, count = 0;
710 int ret;
711
712 ret = generate_random_events(total_events);
713 if (ret)
714 return -1;
715
716 ret = rte_event_dev_stop_flush_callback_register(evdev, flush, &count);
717 if (ret)
718 return -2;
719 rte_event_dev_stop(evdev);
720 ret = rte_event_dev_stop_flush_callback_register(evdev, NULL, NULL);
721 if (ret)
722 return -3;
723 RTE_TEST_ASSERT_EQUAL(total_events, count,
724 "count mismatch total_events=%d count=%d",
725 total_events, count);
726 return 0;
727 }
728
729 static int
validate_queue_to_port_single_link(uint32_t index,uint8_t port,struct rte_event * ev)730 validate_queue_to_port_single_link(uint32_t index, uint8_t port,
731 struct rte_event *ev)
732 {
733 RTE_SET_USED(index);
734 RTE_TEST_ASSERT_EQUAL(port, ev->queue_id,
735 "queue mismatch enq=%d deq =%d",
736 port, ev->queue_id);
737 return 0;
738 }
739
740 /*
741 * Link queue x to port x and check correctness of link by checking
742 * queue_id == x on dequeue on the specific port x
743 */
744 static int
test_queue_to_port_single_link(void)745 test_queue_to_port_single_link(void)
746 {
747 int i, nr_links, ret;
748
749 uint32_t port_count;
750 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
751 RTE_EVENT_DEV_ATTR_PORT_COUNT,
752 &port_count), "Port count get failed");
753
754 /* Unlink all connections that created in eventdev_setup */
755 for (i = 0; i < (int)port_count; i++) {
756 ret = rte_event_port_unlink(evdev, i, NULL, 0);
757 RTE_TEST_ASSERT(ret >= 0,
758 "Failed to unlink all queues port=%d", i);
759 }
760
761 uint32_t queue_count;
762 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
763 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
764 &queue_count), "Queue count get failed");
765
766 nr_links = RTE_MIN(port_count, queue_count);
767 const unsigned int total_events = MAX_EVENTS / nr_links;
768
769 /* Link queue x to port x and inject events to queue x through port x */
770 for (i = 0; i < nr_links; i++) {
771 uint8_t queue = (uint8_t)i;
772
773 ret = rte_event_port_link(evdev, i, &queue, NULL, 1);
774 RTE_TEST_ASSERT(ret == 1, "Failed to link queue to port %d", i);
775
776 ret = inject_events(
777 0x100 /*flow_id */,
778 RTE_EVENT_TYPE_CPU /* event_type */,
779 rte_rand() % 256 /* sub_event_type */,
780 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
781 queue /* queue */,
782 i /* port */,
783 total_events /* events */);
784 if (ret)
785 return -1;
786 }
787
788 /* Verify the events generated from correct queue */
789 for (i = 0; i < nr_links; i++) {
790 ret = consume_events(i /* port */, total_events,
791 validate_queue_to_port_single_link);
792 if (ret)
793 return -1;
794 }
795
796 return 0;
797 }
798
799 static int
validate_queue_to_port_multi_link(uint32_t index,uint8_t port,struct rte_event * ev)800 validate_queue_to_port_multi_link(uint32_t index, uint8_t port,
801 struct rte_event *ev)
802 {
803 RTE_SET_USED(index);
804 RTE_TEST_ASSERT_EQUAL(port, (ev->queue_id & 0x1),
805 "queue mismatch enq=%d deq =%d",
806 port, ev->queue_id);
807 return 0;
808 }
809
810 /*
811 * Link all even number of queues to port 0 and all odd number of queues to
812 * port 1 and verify the link connection on dequeue
813 */
814 static int
test_queue_to_port_multi_link(void)815 test_queue_to_port_multi_link(void)
816 {
817 int ret, port0_events = 0, port1_events = 0;
818 uint8_t queue, port;
819 uint32_t nr_queues = 0;
820 uint32_t nr_ports = 0;
821
822 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
823 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
824 &nr_queues), "Queue count get failed");
825
826 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
827 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
828 &nr_queues), "Queue count get failed");
829 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
830 RTE_EVENT_DEV_ATTR_PORT_COUNT,
831 &nr_ports), "Port count get failed");
832
833 if (nr_ports < 2) {
834 ssovf_log_dbg("%s: Not enough ports to test ports=%d",
835 __func__, nr_ports);
836 return 0;
837 }
838
839 /* Unlink all connections that created in eventdev_setup */
840 for (port = 0; port < nr_ports; port++) {
841 ret = rte_event_port_unlink(evdev, port, NULL, 0);
842 RTE_TEST_ASSERT(ret >= 0, "Failed to unlink all queues port=%d",
843 port);
844 }
845
846 const unsigned int total_events = MAX_EVENTS / nr_queues;
847
848 /* Link all even number of queues to port0 and odd numbers to port 1*/
849 for (queue = 0; queue < nr_queues; queue++) {
850 port = queue & 0x1;
851 ret = rte_event_port_link(evdev, port, &queue, NULL, 1);
852 RTE_TEST_ASSERT(ret == 1, "Failed to link queue=%d to port=%d",
853 queue, port);
854
855 ret = inject_events(
856 0x100 /*flow_id */,
857 RTE_EVENT_TYPE_CPU /* event_type */,
858 rte_rand() % 256 /* sub_event_type */,
859 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
860 queue /* queue */,
861 port /* port */,
862 total_events /* events */);
863 if (ret)
864 return -1;
865
866 if (port == 0)
867 port0_events += total_events;
868 else
869 port1_events += total_events;
870 }
871
872 ret = consume_events(0 /* port */, port0_events,
873 validate_queue_to_port_multi_link);
874 if (ret)
875 return -1;
876 ret = consume_events(1 /* port */, port1_events,
877 validate_queue_to_port_multi_link);
878 if (ret)
879 return -1;
880
881 return 0;
882 }
883
884 static int
worker_flow_based_pipeline(void * arg)885 worker_flow_based_pipeline(void *arg)
886 {
887 struct test_core_param *param = arg;
888 struct rte_event ev;
889 uint16_t valid_event;
890 uint8_t port = param->port;
891 uint8_t new_sched_type = param->sched_type;
892 rte_atomic32_t *total_events = param->total_events;
893 uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;
894
895 while (rte_atomic32_read(total_events) > 0) {
896 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,
897 dequeue_tmo_ticks);
898 if (!valid_event)
899 continue;
900
901 /* Events from stage 0 */
902 if (ev.sub_event_type == 0) {
903 /* Move to atomic flow to maintain the ordering */
904 ev.flow_id = 0x2;
905 ev.event_type = RTE_EVENT_TYPE_CPU;
906 ev.sub_event_type = 1; /* stage 1 */
907 ev.sched_type = new_sched_type;
908 ev.op = RTE_EVENT_OP_FORWARD;
909 rte_event_enqueue_burst(evdev, port, &ev, 1);
910 } else if (ev.sub_event_type == 1) { /* Events from stage 1*/
911 if (seqn_list_update(*rte_event_pmd_selftest_seqn(ev.mbuf)) == 0) {
912 rte_pktmbuf_free(ev.mbuf);
913 rte_atomic32_sub(total_events, 1);
914 } else {
915 ssovf_log_dbg("Failed to update seqn_list");
916 return -1;
917 }
918 } else {
919 ssovf_log_dbg("Invalid ev.sub_event_type = %d",
920 ev.sub_event_type);
921 return -1;
922 }
923 }
924 return 0;
925 }
926
927 static int
test_multiport_flow_sched_type_test(uint8_t in_sched_type,uint8_t out_sched_type)928 test_multiport_flow_sched_type_test(uint8_t in_sched_type,
929 uint8_t out_sched_type)
930 {
931 const unsigned int total_events = MAX_EVENTS;
932 uint32_t nr_ports;
933 int ret;
934
935 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
936 RTE_EVENT_DEV_ATTR_PORT_COUNT,
937 &nr_ports), "Port count get failed");
938 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
939
940 if (!nr_ports) {
941 ssovf_log_dbg("%s: Not enough ports=%d or workers=%d", __func__,
942 nr_ports, rte_lcore_count() - 1);
943 return 0;
944 }
945
946 /* Injects events with a 0 sequence number to total_events */
947 ret = inject_events(
948 0x1 /*flow_id */,
949 RTE_EVENT_TYPE_CPU /* event_type */,
950 0 /* sub_event_type (stage 0) */,
951 in_sched_type,
952 0 /* queue */,
953 0 /* port */,
954 total_events /* events */);
955 if (ret)
956 return -1;
957
958 ret = launch_workers_and_wait(worker_flow_based_pipeline,
959 worker_flow_based_pipeline,
960 total_events, nr_ports, out_sched_type);
961 if (ret)
962 return -1;
963
964 if (in_sched_type != RTE_SCHED_TYPE_PARALLEL &&
965 out_sched_type == RTE_SCHED_TYPE_ATOMIC) {
966 /* Check the events order maintained or not */
967 return seqn_list_check(total_events);
968 }
969 return 0;
970 }
971
972
973 /* Multi port ordered to atomic transaction */
974 static int
test_multi_port_flow_ordered_to_atomic(void)975 test_multi_port_flow_ordered_to_atomic(void)
976 {
977 /* Ingress event order test */
978 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
979 RTE_SCHED_TYPE_ATOMIC);
980 }
981
982 static int
test_multi_port_flow_ordered_to_ordered(void)983 test_multi_port_flow_ordered_to_ordered(void)
984 {
985 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
986 RTE_SCHED_TYPE_ORDERED);
987 }
988
989 static int
test_multi_port_flow_ordered_to_parallel(void)990 test_multi_port_flow_ordered_to_parallel(void)
991 {
992 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
993 RTE_SCHED_TYPE_PARALLEL);
994 }
995
996 static int
test_multi_port_flow_atomic_to_atomic(void)997 test_multi_port_flow_atomic_to_atomic(void)
998 {
999 /* Ingress event order test */
1000 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1001 RTE_SCHED_TYPE_ATOMIC);
1002 }
1003
1004 static int
test_multi_port_flow_atomic_to_ordered(void)1005 test_multi_port_flow_atomic_to_ordered(void)
1006 {
1007 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1008 RTE_SCHED_TYPE_ORDERED);
1009 }
1010
1011 static int
test_multi_port_flow_atomic_to_parallel(void)1012 test_multi_port_flow_atomic_to_parallel(void)
1013 {
1014 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1015 RTE_SCHED_TYPE_PARALLEL);
1016 }
1017
1018 static int
test_multi_port_flow_parallel_to_atomic(void)1019 test_multi_port_flow_parallel_to_atomic(void)
1020 {
1021 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1022 RTE_SCHED_TYPE_ATOMIC);
1023 }
1024
1025 static int
test_multi_port_flow_parallel_to_ordered(void)1026 test_multi_port_flow_parallel_to_ordered(void)
1027 {
1028 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1029 RTE_SCHED_TYPE_ORDERED);
1030 }
1031
1032 static int
test_multi_port_flow_parallel_to_parallel(void)1033 test_multi_port_flow_parallel_to_parallel(void)
1034 {
1035 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1036 RTE_SCHED_TYPE_PARALLEL);
1037 }
1038
1039 static int
worker_group_based_pipeline(void * arg)1040 worker_group_based_pipeline(void *arg)
1041 {
1042 struct test_core_param *param = arg;
1043 struct rte_event ev;
1044 uint16_t valid_event;
1045 uint8_t port = param->port;
1046 uint8_t new_sched_type = param->sched_type;
1047 rte_atomic32_t *total_events = param->total_events;
1048 uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;
1049
1050 while (rte_atomic32_read(total_events) > 0) {
1051 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,
1052 dequeue_tmo_ticks);
1053 if (!valid_event)
1054 continue;
1055
1056 /* Events from stage 0(group 0) */
1057 if (ev.queue_id == 0) {
1058 /* Move to atomic flow to maintain the ordering */
1059 ev.flow_id = 0x2;
1060 ev.event_type = RTE_EVENT_TYPE_CPU;
1061 ev.sched_type = new_sched_type;
1062 ev.queue_id = 1; /* Stage 1*/
1063 ev.op = RTE_EVENT_OP_FORWARD;
1064 rte_event_enqueue_burst(evdev, port, &ev, 1);
1065 } else if (ev.queue_id == 1) { /* Events from stage 1(group 1)*/
1066 if (seqn_list_update(*rte_event_pmd_selftest_seqn(ev.mbuf)) == 0) {
1067 rte_pktmbuf_free(ev.mbuf);
1068 rte_atomic32_sub(total_events, 1);
1069 } else {
1070 ssovf_log_dbg("Failed to update seqn_list");
1071 return -1;
1072 }
1073 } else {
1074 ssovf_log_dbg("Invalid ev.queue_id = %d", ev.queue_id);
1075 return -1;
1076 }
1077 }
1078
1079
1080 return 0;
1081 }
1082
1083 static int
test_multiport_queue_sched_type_test(uint8_t in_sched_type,uint8_t out_sched_type)1084 test_multiport_queue_sched_type_test(uint8_t in_sched_type,
1085 uint8_t out_sched_type)
1086 {
1087 const unsigned int total_events = MAX_EVENTS;
1088 uint32_t nr_ports;
1089 int ret;
1090
1091 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1092 RTE_EVENT_DEV_ATTR_PORT_COUNT,
1093 &nr_ports), "Port count get failed");
1094
1095 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
1096
1097 uint32_t queue_count;
1098 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1099 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
1100 &queue_count), "Queue count get failed");
1101 if (queue_count < 2 || !nr_ports) {
1102 ssovf_log_dbg("%s: Not enough queues=%d ports=%d or workers=%d",
1103 __func__, queue_count, nr_ports,
1104 rte_lcore_count() - 1);
1105 return 0;
1106 }
1107
1108 /* Injects events with a 0 sequence number to total_events */
1109 ret = inject_events(
1110 0x1 /*flow_id */,
1111 RTE_EVENT_TYPE_CPU /* event_type */,
1112 0 /* sub_event_type (stage 0) */,
1113 in_sched_type,
1114 0 /* queue */,
1115 0 /* port */,
1116 total_events /* events */);
1117 if (ret)
1118 return -1;
1119
1120 ret = launch_workers_and_wait(worker_group_based_pipeline,
1121 worker_group_based_pipeline,
1122 total_events, nr_ports, out_sched_type);
1123 if (ret)
1124 return -1;
1125
1126 if (in_sched_type != RTE_SCHED_TYPE_PARALLEL &&
1127 out_sched_type == RTE_SCHED_TYPE_ATOMIC) {
1128 /* Check the events order maintained or not */
1129 return seqn_list_check(total_events);
1130 }
1131 return 0;
1132 }
1133
1134 static int
test_multi_port_queue_ordered_to_atomic(void)1135 test_multi_port_queue_ordered_to_atomic(void)
1136 {
1137 /* Ingress event order test */
1138 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
1139 RTE_SCHED_TYPE_ATOMIC);
1140 }
1141
1142 static int
test_multi_port_queue_ordered_to_ordered(void)1143 test_multi_port_queue_ordered_to_ordered(void)
1144 {
1145 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
1146 RTE_SCHED_TYPE_ORDERED);
1147 }
1148
1149 static int
test_multi_port_queue_ordered_to_parallel(void)1150 test_multi_port_queue_ordered_to_parallel(void)
1151 {
1152 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
1153 RTE_SCHED_TYPE_PARALLEL);
1154 }
1155
1156 static int
test_multi_port_queue_atomic_to_atomic(void)1157 test_multi_port_queue_atomic_to_atomic(void)
1158 {
1159 /* Ingress event order test */
1160 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1161 RTE_SCHED_TYPE_ATOMIC);
1162 }
1163
1164 static int
test_multi_port_queue_atomic_to_ordered(void)1165 test_multi_port_queue_atomic_to_ordered(void)
1166 {
1167 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1168 RTE_SCHED_TYPE_ORDERED);
1169 }
1170
1171 static int
test_multi_port_queue_atomic_to_parallel(void)1172 test_multi_port_queue_atomic_to_parallel(void)
1173 {
1174 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1175 RTE_SCHED_TYPE_PARALLEL);
1176 }
1177
1178 static int
test_multi_port_queue_parallel_to_atomic(void)1179 test_multi_port_queue_parallel_to_atomic(void)
1180 {
1181 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1182 RTE_SCHED_TYPE_ATOMIC);
1183 }
1184
1185 static int
test_multi_port_queue_parallel_to_ordered(void)1186 test_multi_port_queue_parallel_to_ordered(void)
1187 {
1188 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1189 RTE_SCHED_TYPE_ORDERED);
1190 }
1191
1192 static int
test_multi_port_queue_parallel_to_parallel(void)1193 test_multi_port_queue_parallel_to_parallel(void)
1194 {
1195 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1196 RTE_SCHED_TYPE_PARALLEL);
1197 }
1198
1199 static int
worker_flow_based_pipeline_max_stages_rand_sched_type(void * arg)1200 worker_flow_based_pipeline_max_stages_rand_sched_type(void *arg)
1201 {
1202 struct test_core_param *param = arg;
1203 struct rte_event ev;
1204 uint16_t valid_event;
1205 uint8_t port = param->port;
1206 rte_atomic32_t *total_events = param->total_events;
1207
1208 while (rte_atomic32_read(total_events) > 0) {
1209 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
1210 if (!valid_event)
1211 continue;
1212
1213 if (ev.sub_event_type == 255) { /* last stage */
1214 rte_pktmbuf_free(ev.mbuf);
1215 rte_atomic32_sub(total_events, 1);
1216 } else {
1217 ev.event_type = RTE_EVENT_TYPE_CPU;
1218 ev.sub_event_type++;
1219 ev.sched_type =
1220 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
1221 ev.op = RTE_EVENT_OP_FORWARD;
1222 rte_event_enqueue_burst(evdev, port, &ev, 1);
1223 }
1224 }
1225 return 0;
1226 }
1227
1228 static int
launch_multi_port_max_stages_random_sched_type(int (* fn)(void *))1229 launch_multi_port_max_stages_random_sched_type(int (*fn)(void *))
1230 {
1231 uint32_t nr_ports;
1232 int ret;
1233
1234 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1235 RTE_EVENT_DEV_ATTR_PORT_COUNT,
1236 &nr_ports), "Port count get failed");
1237 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
1238
1239 if (!nr_ports) {
1240 ssovf_log_dbg("%s: Not enough ports=%d or workers=%d", __func__,
1241 nr_ports, rte_lcore_count() - 1);
1242 return 0;
1243 }
1244
1245 /* Injects events with a 0 sequence number to total_events */
1246 ret = inject_events(
1247 0x1 /*flow_id */,
1248 RTE_EVENT_TYPE_CPU /* event_type */,
1249 0 /* sub_event_type (stage 0) */,
1250 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1) /* sched_type */,
1251 0 /* queue */,
1252 0 /* port */,
1253 MAX_EVENTS /* events */);
1254 if (ret)
1255 return -1;
1256
1257 return launch_workers_and_wait(fn, fn, MAX_EVENTS, nr_ports,
1258 0xff /* invalid */);
1259 }
1260
1261 /* Flow based pipeline with maximum stages with random sched type */
1262 static int
test_multi_port_flow_max_stages_random_sched_type(void)1263 test_multi_port_flow_max_stages_random_sched_type(void)
1264 {
1265 return launch_multi_port_max_stages_random_sched_type(
1266 worker_flow_based_pipeline_max_stages_rand_sched_type);
1267 }
1268
1269 static int
worker_queue_based_pipeline_max_stages_rand_sched_type(void * arg)1270 worker_queue_based_pipeline_max_stages_rand_sched_type(void *arg)
1271 {
1272 struct test_core_param *param = arg;
1273 struct rte_event ev;
1274 uint16_t valid_event;
1275 uint8_t port = param->port;
1276 uint32_t queue_count;
1277 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1278 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
1279 &queue_count), "Queue count get failed");
1280 uint8_t nr_queues = queue_count;
1281 rte_atomic32_t *total_events = param->total_events;
1282
1283 while (rte_atomic32_read(total_events) > 0) {
1284 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
1285 if (!valid_event)
1286 continue;
1287
1288 if (ev.queue_id == nr_queues - 1) { /* last stage */
1289 rte_pktmbuf_free(ev.mbuf);
1290 rte_atomic32_sub(total_events, 1);
1291 } else {
1292 ev.event_type = RTE_EVENT_TYPE_CPU;
1293 ev.queue_id++;
1294 ev.sched_type =
1295 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
1296 ev.op = RTE_EVENT_OP_FORWARD;
1297 rte_event_enqueue_burst(evdev, port, &ev, 1);
1298 }
1299 }
1300 return 0;
1301 }
1302
1303 /* Queue based pipeline with maximum stages with random sched type */
1304 static int
test_multi_port_queue_max_stages_random_sched_type(void)1305 test_multi_port_queue_max_stages_random_sched_type(void)
1306 {
1307 return launch_multi_port_max_stages_random_sched_type(
1308 worker_queue_based_pipeline_max_stages_rand_sched_type);
1309 }
1310
1311 static int
worker_mixed_pipeline_max_stages_rand_sched_type(void * arg)1312 worker_mixed_pipeline_max_stages_rand_sched_type(void *arg)
1313 {
1314 struct test_core_param *param = arg;
1315 struct rte_event ev;
1316 uint16_t valid_event;
1317 uint8_t port = param->port;
1318 uint32_t queue_count;
1319 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1320 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
1321 &queue_count), "Queue count get failed");
1322 uint8_t nr_queues = queue_count;
1323 rte_atomic32_t *total_events = param->total_events;
1324
1325 while (rte_atomic32_read(total_events) > 0) {
1326 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
1327 if (!valid_event)
1328 continue;
1329
1330 if (ev.queue_id == nr_queues - 1) { /* Last stage */
1331 rte_pktmbuf_free(ev.mbuf);
1332 rte_atomic32_sub(total_events, 1);
1333 } else {
1334 ev.event_type = RTE_EVENT_TYPE_CPU;
1335 ev.queue_id++;
1336 ev.sub_event_type = rte_rand() % 256;
1337 ev.sched_type =
1338 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
1339 ev.op = RTE_EVENT_OP_FORWARD;
1340 rte_event_enqueue_burst(evdev, port, &ev, 1);
1341 }
1342 }
1343 return 0;
1344 }
1345
1346 /* Queue and flow based pipeline with maximum stages with random sched type */
1347 static int
test_multi_port_mixed_max_stages_random_sched_type(void)1348 test_multi_port_mixed_max_stages_random_sched_type(void)
1349 {
1350 return launch_multi_port_max_stages_random_sched_type(
1351 worker_mixed_pipeline_max_stages_rand_sched_type);
1352 }
1353
1354 static int
worker_ordered_flow_producer(void * arg)1355 worker_ordered_flow_producer(void *arg)
1356 {
1357 struct test_core_param *param = arg;
1358 uint8_t port = param->port;
1359 struct rte_mbuf *m;
1360 int counter = 0;
1361
1362 while (counter < NUM_PACKETS) {
1363 m = rte_pktmbuf_alloc(eventdev_test_mempool);
1364 if (m == NULL)
1365 continue;
1366
1367 *rte_event_pmd_selftest_seqn(m) = counter++;
1368
1369 struct rte_event ev = {.event = 0, .u64 = 0};
1370
1371 ev.flow_id = 0x1; /* Generate a fat flow */
1372 ev.sub_event_type = 0;
1373 /* Inject the new event */
1374 ev.op = RTE_EVENT_OP_NEW;
1375 ev.event_type = RTE_EVENT_TYPE_CPU;
1376 ev.sched_type = RTE_SCHED_TYPE_ORDERED;
1377 ev.queue_id = 0;
1378 ev.mbuf = m;
1379 rte_event_enqueue_burst(evdev, port, &ev, 1);
1380 }
1381
1382 return 0;
1383 }
1384
1385 static inline int
test_producer_consumer_ingress_order_test(int (* fn)(void *))1386 test_producer_consumer_ingress_order_test(int (*fn)(void *))
1387 {
1388 uint32_t nr_ports;
1389
1390 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1391 RTE_EVENT_DEV_ATTR_PORT_COUNT,
1392 &nr_ports), "Port count get failed");
1393 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
1394
1395 if (rte_lcore_count() < 3 || nr_ports < 2) {
1396 ssovf_log_dbg("### Not enough cores for %s test.", __func__);
1397 return 0;
1398 }
1399
1400 launch_workers_and_wait(worker_ordered_flow_producer, fn,
1401 NUM_PACKETS, nr_ports, RTE_SCHED_TYPE_ATOMIC);
1402 /* Check the events order maintained or not */
1403 return seqn_list_check(NUM_PACKETS);
1404 }
1405
1406 /* Flow based producer consumer ingress order test */
1407 static int
test_flow_producer_consumer_ingress_order_test(void)1408 test_flow_producer_consumer_ingress_order_test(void)
1409 {
1410 return test_producer_consumer_ingress_order_test(
1411 worker_flow_based_pipeline);
1412 }
1413
1414 /* Queue based producer consumer ingress order test */
1415 static int
test_queue_producer_consumer_ingress_order_test(void)1416 test_queue_producer_consumer_ingress_order_test(void)
1417 {
1418 return test_producer_consumer_ingress_order_test(
1419 worker_group_based_pipeline);
1420 }
1421
octeontx_test_run(int (* setup)(void),void (* tdown)(void),int (* test)(void),const char * name)1422 static void octeontx_test_run(int (*setup)(void), void (*tdown)(void),
1423 int (*test)(void), const char *name)
1424 {
1425 if (setup() < 0) {
1426 ssovf_log_selftest("Error setting up test %s", name);
1427 unsupported++;
1428 } else {
1429 if (test() < 0) {
1430 failed++;
1431 ssovf_log_selftest("%s Failed", name);
1432 } else {
1433 passed++;
1434 ssovf_log_selftest("%s Passed", name);
1435 }
1436 }
1437
1438 total++;
1439 tdown();
1440 }
1441
1442 int
test_eventdev_octeontx(void)1443 test_eventdev_octeontx(void)
1444 {
1445 testsuite_setup();
1446
1447 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1448 test_simple_enqdeq_ordered);
1449 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1450 test_simple_enqdeq_atomic);
1451 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1452 test_simple_enqdeq_parallel);
1453 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1454 test_multi_queue_enq_single_port_deq);
1455 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1456 test_dev_stop_flush);
1457 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1458 test_multi_queue_enq_multi_port_deq);
1459 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1460 test_queue_to_port_single_link);
1461 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1462 test_queue_to_port_multi_link);
1463 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1464 test_multi_port_flow_ordered_to_atomic);
1465 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1466 test_multi_port_flow_ordered_to_ordered);
1467 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1468 test_multi_port_flow_ordered_to_parallel);
1469 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1470 test_multi_port_flow_atomic_to_atomic);
1471 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1472 test_multi_port_flow_atomic_to_ordered);
1473 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1474 test_multi_port_flow_atomic_to_parallel);
1475 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1476 test_multi_port_flow_parallel_to_atomic);
1477 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1478 test_multi_port_flow_parallel_to_ordered);
1479 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1480 test_multi_port_flow_parallel_to_parallel);
1481 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1482 test_multi_port_queue_ordered_to_atomic);
1483 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1484 test_multi_port_queue_ordered_to_ordered);
1485 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1486 test_multi_port_queue_ordered_to_parallel);
1487 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1488 test_multi_port_queue_atomic_to_atomic);
1489 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1490 test_multi_port_queue_atomic_to_ordered);
1491 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1492 test_multi_port_queue_atomic_to_parallel);
1493 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1494 test_multi_port_queue_parallel_to_atomic);
1495 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1496 test_multi_port_queue_parallel_to_ordered);
1497 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1498 test_multi_port_queue_parallel_to_parallel);
1499 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1500 test_multi_port_flow_max_stages_random_sched_type);
1501 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1502 test_multi_port_queue_max_stages_random_sched_type);
1503 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1504 test_multi_port_mixed_max_stages_random_sched_type);
1505 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1506 test_flow_producer_consumer_ingress_order_test);
1507 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1508 test_queue_producer_consumer_ingress_order_test);
1509 OCTEONTX_TEST_RUN(eventdev_setup_priority, eventdev_teardown,
1510 test_multi_queue_priority);
1511 OCTEONTX_TEST_RUN(eventdev_setup_dequeue_timeout, eventdev_teardown,
1512 test_multi_port_flow_ordered_to_atomic);
1513 OCTEONTX_TEST_RUN(eventdev_setup_dequeue_timeout, eventdev_teardown,
1514 test_multi_port_queue_ordered_to_atomic);
1515
1516 ssovf_log_selftest("Total tests : %d", total);
1517 ssovf_log_selftest("Passed : %d", passed);
1518 ssovf_log_selftest("Failed : %d", failed);
1519 ssovf_log_selftest("Not supported : %d", unsupported);
1520
1521 testsuite_teardown();
1522
1523 if (failed)
1524 return -1;
1525
1526 return 0;
1527 }
1528