1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2023 Ericsson AB
3 */
4
5 #include <rte_bus_vdev.h>
6 #include <rte_dispatcher.h>
7 #include <rte_eventdev.h>
8 #include <rte_random.h>
9 #include <rte_service.h>
10 #include <rte_stdatomic.h>
11
12 #include "test.h"
13
14 #define NUM_WORKERS 3
15 #define NUM_PORTS (NUM_WORKERS + 1)
16 #define WORKER_PORT_ID(worker_idx) (worker_idx)
17 #define DRIVER_PORT_ID (NUM_PORTS - 1)
18
19 #define NUM_SERVICE_CORES NUM_WORKERS
20 #define MIN_LCORES (NUM_SERVICE_CORES + 1)
21
22 /* Eventdev */
23 #define NUM_QUEUES 8
24 #define LAST_QUEUE_ID (NUM_QUEUES - 1)
25 #define MAX_EVENTS 4096
26 #define NEW_EVENT_THRESHOLD (MAX_EVENTS / 2)
27 #define DEQUEUE_BURST_SIZE 32
28 #define ENQUEUE_BURST_SIZE 32
29
30 #define NUM_EVENTS 10000000
31 #define NUM_FLOWS 16
32
33 #define DSW_VDEV "event_dsw0"
34
35 struct app_queue {
36 uint8_t queue_id;
37 uint64_t sn[NUM_FLOWS];
38 int dispatcher_reg_id;
39 };
40
41 struct cb_count {
42 uint8_t expected_event_dev_id;
43 uint8_t expected_event_port_id[RTE_MAX_LCORE];
44 RTE_ATOMIC(int) count;
45 };
46
47 struct test_app {
48 uint8_t event_dev_id;
49 struct rte_dispatcher *dispatcher;
50 uint32_t dispatcher_service_id;
51
52 unsigned int service_lcores[NUM_SERVICE_CORES];
53
54 int never_match_reg_id;
55 uint64_t never_match_count;
56 struct cb_count never_process_count;
57
58 struct app_queue queues[NUM_QUEUES];
59
60 int finalize_reg_id;
61 struct cb_count finalize_count;
62
63 bool running;
64
65 RTE_ATOMIC(int) completed_events;
66 RTE_ATOMIC(int) errors;
67 };
68
69 static struct test_app *
test_app_create(void)70 test_app_create(void)
71 {
72 int i;
73 struct test_app *app;
74
75 app = calloc(1, sizeof(struct test_app));
76
77 if (app == NULL)
78 return NULL;
79
80 for (i = 0; i < NUM_QUEUES; i++)
81 app->queues[i].queue_id = i;
82
83 return app;
84 }
85
86 static void
test_app_free(struct test_app * app)87 test_app_free(struct test_app *app)
88 {
89 free(app);
90 }
91
92 static int
test_app_create_vdev(struct test_app * app)93 test_app_create_vdev(struct test_app *app)
94 {
95 int rc;
96
97 rc = rte_vdev_init(DSW_VDEV, NULL);
98 if (rc < 0)
99 return TEST_SKIPPED;
100
101 rc = rte_event_dev_get_dev_id(DSW_VDEV);
102
103 app->event_dev_id = (uint8_t)rc;
104
105 return TEST_SUCCESS;
106 }
107
108 static int
test_app_destroy_vdev(struct test_app * app)109 test_app_destroy_vdev(struct test_app *app)
110 {
111 int rc;
112
113 rc = rte_event_dev_close(app->event_dev_id);
114 TEST_ASSERT_SUCCESS(rc, "Error while closing event device");
115
116 rc = rte_vdev_uninit(DSW_VDEV);
117 TEST_ASSERT_SUCCESS(rc, "Error while uninitializing virtual device");
118
119 return TEST_SUCCESS;
120 }
121
122 static int
test_app_setup_event_dev(struct test_app * app)123 test_app_setup_event_dev(struct test_app *app)
124 {
125 int rc;
126 int i;
127
128 rc = test_app_create_vdev(app);
129 if (rc != TEST_SUCCESS)
130 return rc;
131
132 struct rte_event_dev_config config = {
133 .nb_event_queues = NUM_QUEUES,
134 .nb_event_ports = NUM_PORTS,
135 .nb_events_limit = MAX_EVENTS,
136 .nb_event_queue_flows = 64,
137 .nb_event_port_dequeue_depth = DEQUEUE_BURST_SIZE,
138 .nb_event_port_enqueue_depth = ENQUEUE_BURST_SIZE
139 };
140
141 rc = rte_event_dev_configure(app->event_dev_id, &config);
142
143 TEST_ASSERT_SUCCESS(rc, "Unable to configure event device");
144
145 struct rte_event_queue_conf queue_config = {
146 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
147 .schedule_type = RTE_SCHED_TYPE_ATOMIC,
148 .nb_atomic_flows = 64
149 };
150
151 for (i = 0; i < NUM_QUEUES; i++) {
152 uint8_t queue_id = i;
153
154 rc = rte_event_queue_setup(app->event_dev_id, queue_id,
155 &queue_config);
156
157 TEST_ASSERT_SUCCESS(rc, "Unable to setup queue %d", queue_id);
158 }
159
160 struct rte_event_port_conf port_config = {
161 .new_event_threshold = NEW_EVENT_THRESHOLD,
162 .dequeue_depth = DEQUEUE_BURST_SIZE,
163 .enqueue_depth = ENQUEUE_BURST_SIZE
164 };
165
166 for (i = 0; i < NUM_PORTS; i++) {
167 uint8_t event_port_id = i;
168
169 rc = rte_event_port_setup(app->event_dev_id, event_port_id,
170 &port_config);
171 TEST_ASSERT_SUCCESS(rc, "Failed to create event port %d",
172 event_port_id);
173
174 if (event_port_id == DRIVER_PORT_ID)
175 continue;
176
177 rc = rte_event_port_link(app->event_dev_id, event_port_id,
178 NULL, NULL, 0);
179
180 TEST_ASSERT_EQUAL(rc, NUM_QUEUES, "Failed to link port %d",
181 event_port_id);
182 }
183
184 return TEST_SUCCESS;
185 }
186
187 static int
test_app_teardown_event_dev(struct test_app * app)188 test_app_teardown_event_dev(struct test_app *app)
189 {
190 return test_app_destroy_vdev(app);
191 }
192
193 static int
test_app_start_event_dev(struct test_app * app)194 test_app_start_event_dev(struct test_app *app)
195 {
196 int rc;
197
198 rc = rte_event_dev_start(app->event_dev_id);
199 TEST_ASSERT_SUCCESS(rc, "Unable to start event device");
200
201 return TEST_SUCCESS;
202 }
203
204 static void
test_app_stop_event_dev(struct test_app * app)205 test_app_stop_event_dev(struct test_app *app)
206 {
207 rte_event_dev_stop(app->event_dev_id);
208 }
209
210 static int
test_app_create_dispatcher(struct test_app * app)211 test_app_create_dispatcher(struct test_app *app)
212 {
213 int rc;
214
215 app->dispatcher = rte_dispatcher_create(app->event_dev_id);
216
217 TEST_ASSERT(app->dispatcher != NULL, "Unable to create event "
218 "dispatcher");
219
220 app->dispatcher_service_id =
221 rte_dispatcher_service_id_get(app->dispatcher);
222
223 rc = rte_service_set_stats_enable(app->dispatcher_service_id, 1);
224
225 TEST_ASSERT_SUCCESS(rc, "Unable to enable event dispatcher service "
226 "stats");
227
228 rc = rte_service_runstate_set(app->dispatcher_service_id, 1);
229
230 TEST_ASSERT_SUCCESS(rc, "Unable to set dispatcher service runstate");
231
232 return TEST_SUCCESS;
233 }
234
235 static int
test_app_free_dispatcher(struct test_app * app)236 test_app_free_dispatcher(struct test_app *app)
237 {
238 int rc;
239
240 rc = rte_service_runstate_set(app->dispatcher_service_id, 0);
241 TEST_ASSERT_SUCCESS(rc, "Error disabling dispatcher service");
242
243 rc = rte_dispatcher_free(app->dispatcher);
244 TEST_ASSERT_SUCCESS(rc, "Error freeing dispatcher");
245
246 return TEST_SUCCESS;
247 }
248
249 static int
test_app_bind_ports(struct test_app * app)250 test_app_bind_ports(struct test_app *app)
251 {
252 int i;
253
254 app->never_process_count.expected_event_dev_id =
255 app->event_dev_id;
256 app->finalize_count.expected_event_dev_id =
257 app->event_dev_id;
258
259 for (i = 0; i < NUM_WORKERS; i++) {
260 unsigned int lcore_id = app->service_lcores[i];
261 uint8_t port_id = WORKER_PORT_ID(i);
262
263 int rc = rte_dispatcher_bind_port_to_lcore(
264 app->dispatcher, port_id, DEQUEUE_BURST_SIZE, 0,
265 lcore_id
266 );
267
268 TEST_ASSERT_SUCCESS(rc, "Unable to bind event device port %d "
269 "to lcore %d", port_id, lcore_id);
270
271 app->never_process_count.expected_event_port_id[lcore_id] =
272 port_id;
273 app->finalize_count.expected_event_port_id[lcore_id] = port_id;
274 }
275
276
277 return TEST_SUCCESS;
278 }
279
280 static int
test_app_unbind_ports(struct test_app * app)281 test_app_unbind_ports(struct test_app *app)
282 {
283 int i;
284
285 for (i = 0; i < NUM_WORKERS; i++) {
286 unsigned int lcore_id = app->service_lcores[i];
287
288 int rc = rte_dispatcher_unbind_port_from_lcore(
289 app->dispatcher,
290 WORKER_PORT_ID(i),
291 lcore_id
292 );
293
294 TEST_ASSERT_SUCCESS(rc, "Unable to unbind event device port %d "
295 "from lcore %d", WORKER_PORT_ID(i),
296 lcore_id);
297 }
298
299 return TEST_SUCCESS;
300 }
301
302 static bool
match_queue(const struct rte_event * event,void * cb_data)303 match_queue(const struct rte_event *event, void *cb_data)
304 {
305 uintptr_t queue_id = (uintptr_t)cb_data;
306
307 return event->queue_id == queue_id;
308 }
309
310 static int
test_app_get_worker_index(struct test_app * app,unsigned int lcore_id)311 test_app_get_worker_index(struct test_app *app, unsigned int lcore_id)
312 {
313 int i;
314
315 for (i = 0; i < NUM_SERVICE_CORES; i++)
316 if (app->service_lcores[i] == lcore_id)
317 return i;
318
319 return -1;
320 }
321
322 static int
test_app_get_worker_port(struct test_app * app,unsigned int lcore_id)323 test_app_get_worker_port(struct test_app *app, unsigned int lcore_id)
324 {
325 int worker;
326
327 worker = test_app_get_worker_index(app, lcore_id);
328
329 if (worker < 0)
330 return -1;
331
332 return WORKER_PORT_ID(worker);
333 }
334
335 static void
test_app_queue_note_error(struct test_app * app)336 test_app_queue_note_error(struct test_app *app)
337 {
338 rte_atomic_fetch_add_explicit(&app->errors, 1, rte_memory_order_relaxed);
339 }
340
341 static void
test_app_process_queue(uint8_t p_event_dev_id,uint8_t p_event_port_id,struct rte_event * in_events,uint16_t num,void * cb_data)342 test_app_process_queue(uint8_t p_event_dev_id, uint8_t p_event_port_id,
343 struct rte_event *in_events, uint16_t num,
344 void *cb_data)
345 {
346 struct app_queue *app_queue = cb_data;
347 struct test_app *app = container_of(app_queue, struct test_app,
348 queues[app_queue->queue_id]);
349 unsigned int lcore_id = rte_lcore_id();
350 bool intermediate_queue = app_queue->queue_id != LAST_QUEUE_ID;
351 int event_port_id;
352 uint16_t i;
353 struct rte_event out_events[num];
354
355 event_port_id = test_app_get_worker_port(app, lcore_id);
356
357 if (event_port_id < 0 || p_event_dev_id != app->event_dev_id ||
358 p_event_port_id != event_port_id) {
359 test_app_queue_note_error(app);
360 return;
361 }
362
363 for (i = 0; i < num; i++) {
364 const struct rte_event *in_event = &in_events[i];
365 struct rte_event *out_event = &out_events[i];
366 uint64_t sn = in_event->u64;
367 uint64_t expected_sn;
368
369 if (in_event->queue_id != app_queue->queue_id) {
370 test_app_queue_note_error(app);
371 return;
372 }
373
374 expected_sn = app_queue->sn[in_event->flow_id]++;
375
376 if (expected_sn != sn) {
377 test_app_queue_note_error(app);
378 return;
379 }
380
381 if (intermediate_queue)
382 *out_event = (struct rte_event) {
383 .queue_id = in_event->queue_id + 1,
384 .flow_id = in_event->flow_id,
385 .sched_type = RTE_SCHED_TYPE_ATOMIC,
386 .op = RTE_EVENT_OP_FORWARD,
387 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
388 .u64 = sn
389 };
390 }
391
392 if (intermediate_queue) {
393 uint16_t n = 0;
394
395 do {
396 n += rte_event_enqueue_forward_burst(p_event_dev_id,
397 p_event_port_id,
398 out_events + n,
399 num - n);
400 } while (n != num);
401 } else
402 rte_atomic_fetch_add_explicit(&app->completed_events, num,
403 rte_memory_order_relaxed);
404 }
405
406 static bool
never_match(const struct rte_event * event __rte_unused,void * cb_data)407 never_match(const struct rte_event *event __rte_unused, void *cb_data)
408 {
409 uint64_t *count = cb_data;
410
411 (*count)++;
412
413 return false;
414 }
415
416 static void
test_app_never_process(uint8_t event_dev_id,uint8_t event_port_id,struct rte_event * in_events __rte_unused,uint16_t num,void * cb_data)417 test_app_never_process(uint8_t event_dev_id, uint8_t event_port_id,
418 struct rte_event *in_events __rte_unused, uint16_t num, void *cb_data)
419 {
420 struct cb_count *count = cb_data;
421 unsigned int lcore_id = rte_lcore_id();
422
423 if (event_dev_id == count->expected_event_dev_id &&
424 event_port_id == count->expected_event_port_id[lcore_id])
425 rte_atomic_fetch_add_explicit(&count->count, num,
426 rte_memory_order_relaxed);
427 }
428
429 static void
finalize(uint8_t event_dev_id,uint8_t event_port_id,void * cb_data)430 finalize(uint8_t event_dev_id, uint8_t event_port_id, void *cb_data)
431 {
432 struct cb_count *count = cb_data;
433 unsigned int lcore_id = rte_lcore_id();
434
435 if (event_dev_id == count->expected_event_dev_id &&
436 event_port_id == count->expected_event_port_id[lcore_id])
437 rte_atomic_fetch_add_explicit(&count->count, 1,
438 rte_memory_order_relaxed);
439 }
440
441 static int
test_app_register_callbacks(struct test_app * app)442 test_app_register_callbacks(struct test_app *app)
443 {
444 int i;
445
446 app->never_match_reg_id =
447 rte_dispatcher_register(app->dispatcher, never_match,
448 &app->never_match_count,
449 test_app_never_process,
450 &app->never_process_count);
451
452 TEST_ASSERT(app->never_match_reg_id >= 0, "Unable to register "
453 "never-match handler");
454
455 for (i = 0; i < NUM_QUEUES; i++) {
456 struct app_queue *app_queue = &app->queues[i];
457 uintptr_t queue_id = app_queue->queue_id;
458 int reg_id;
459
460 reg_id = rte_dispatcher_register(app->dispatcher,
461 match_queue, (void *)queue_id,
462 test_app_process_queue,
463 app_queue);
464
465 TEST_ASSERT(reg_id >= 0, "Unable to register consumer "
466 "callback for queue %d", i);
467
468 app_queue->dispatcher_reg_id = reg_id;
469 }
470
471 app->finalize_reg_id =
472 rte_dispatcher_finalize_register(app->dispatcher,
473 finalize,
474 &app->finalize_count);
475 TEST_ASSERT_SUCCESS(app->finalize_reg_id, "Error registering "
476 "finalize callback");
477
478 return TEST_SUCCESS;
479 }
480
481 static int
test_app_unregister_callback(struct test_app * app,uint8_t queue_id)482 test_app_unregister_callback(struct test_app *app, uint8_t queue_id)
483 {
484 int reg_id = app->queues[queue_id].dispatcher_reg_id;
485 int rc;
486
487 if (reg_id < 0) /* unregistered already */
488 return 0;
489
490 rc = rte_dispatcher_unregister(app->dispatcher, reg_id);
491
492 TEST_ASSERT_SUCCESS(rc, "Unable to unregister consumer "
493 "callback for queue %d", queue_id);
494
495 app->queues[queue_id].dispatcher_reg_id = -1;
496
497 return TEST_SUCCESS;
498 }
499
500 static int
test_app_unregister_callbacks(struct test_app * app)501 test_app_unregister_callbacks(struct test_app *app)
502 {
503 int i;
504 int rc;
505
506 if (app->never_match_reg_id >= 0) {
507 rc = rte_dispatcher_unregister(app->dispatcher,
508 app->never_match_reg_id);
509
510 TEST_ASSERT_SUCCESS(rc, "Unable to unregister never-match "
511 "handler");
512 app->never_match_reg_id = -1;
513 }
514
515 for (i = 0; i < NUM_QUEUES; i++) {
516 rc = test_app_unregister_callback(app, i);
517 if (rc != TEST_SUCCESS)
518 return rc;
519 }
520
521 if (app->finalize_reg_id >= 0) {
522 rc = rte_dispatcher_finalize_unregister(
523 app->dispatcher, app->finalize_reg_id
524 );
525 app->finalize_reg_id = -1;
526 }
527
528 return TEST_SUCCESS;
529 }
530
531 static void
test_app_start_dispatcher(struct test_app * app)532 test_app_start_dispatcher(struct test_app *app)
533 {
534 rte_dispatcher_start(app->dispatcher);
535 }
536
537 static void
test_app_stop_dispatcher(struct test_app * app)538 test_app_stop_dispatcher(struct test_app *app)
539 {
540 rte_dispatcher_stop(app->dispatcher);
541 }
542
543 static int
test_app_reset_dispatcher_stats(struct test_app * app)544 test_app_reset_dispatcher_stats(struct test_app *app)
545 {
546 struct rte_dispatcher_stats stats;
547
548 rte_dispatcher_stats_reset(app->dispatcher);
549
550 memset(&stats, 0xff, sizeof(stats));
551
552 rte_dispatcher_stats_get(app->dispatcher, &stats);
553
554 TEST_ASSERT_EQUAL(stats.poll_count, 0, "Poll count not zero");
555 TEST_ASSERT_EQUAL(stats.ev_batch_count, 0, "Batch count not zero");
556 TEST_ASSERT_EQUAL(stats.ev_dispatch_count, 0, "Dispatch count "
557 "not zero");
558 TEST_ASSERT_EQUAL(stats.ev_drop_count, 0, "Drop count not zero");
559
560 return TEST_SUCCESS;
561 }
562
563 static int
test_app_setup_service_core(struct test_app * app,unsigned int lcore_id)564 test_app_setup_service_core(struct test_app *app, unsigned int lcore_id)
565 {
566 int rc;
567
568 rc = rte_service_lcore_add(lcore_id);
569 TEST_ASSERT_SUCCESS(rc, "Unable to make lcore %d an event dispatcher "
570 "service core", lcore_id);
571
572 rc = rte_service_map_lcore_set(app->dispatcher_service_id, lcore_id, 1);
573 TEST_ASSERT_SUCCESS(rc, "Unable to map event dispatcher service");
574
575 return TEST_SUCCESS;
576 }
577
578 static int
test_app_setup_service_cores(struct test_app * app)579 test_app_setup_service_cores(struct test_app *app)
580 {
581 int i;
582 int lcore_id = -1;
583
584 for (i = 0; i < NUM_SERVICE_CORES; i++) {
585 lcore_id = rte_get_next_lcore(lcore_id, 1, 0);
586
587 app->service_lcores[i] = lcore_id;
588 }
589
590 for (i = 0; i < NUM_SERVICE_CORES; i++) {
591 int rc;
592
593 rc = test_app_setup_service_core(app, app->service_lcores[i]);
594 if (rc != TEST_SUCCESS)
595 return rc;
596 }
597
598 return TEST_SUCCESS;
599 }
600
601 static int
test_app_teardown_service_core(struct test_app * app,unsigned int lcore_id)602 test_app_teardown_service_core(struct test_app *app, unsigned int lcore_id)
603 {
604 int rc;
605
606 rc = rte_service_map_lcore_set(app->dispatcher_service_id, lcore_id, 0);
607 TEST_ASSERT_SUCCESS(rc, "Unable to unmap event dispatcher service");
608
609 rc = rte_service_lcore_del(lcore_id);
610 TEST_ASSERT_SUCCESS(rc, "Unable change role of service lcore %d",
611 lcore_id);
612
613 return TEST_SUCCESS;
614 }
615
616 static int
test_app_teardown_service_cores(struct test_app * app)617 test_app_teardown_service_cores(struct test_app *app)
618 {
619 int i;
620
621 for (i = 0; i < NUM_SERVICE_CORES; i++) {
622 unsigned int lcore_id = app->service_lcores[i];
623 int rc;
624
625 rc = test_app_teardown_service_core(app, lcore_id);
626 if (rc != TEST_SUCCESS)
627 return rc;
628 }
629
630 return TEST_SUCCESS;
631 }
632
633 static int
test_app_start_service_cores(struct test_app * app)634 test_app_start_service_cores(struct test_app *app)
635 {
636 int i;
637
638 for (i = 0; i < NUM_SERVICE_CORES; i++) {
639 unsigned int lcore_id = app->service_lcores[i];
640 int rc;
641
642 rc = rte_service_lcore_start(lcore_id);
643 TEST_ASSERT_SUCCESS(rc, "Unable to start service lcore %d",
644 lcore_id);
645 }
646
647 return TEST_SUCCESS;
648 }
649
650 static int
test_app_stop_service_cores(struct test_app * app)651 test_app_stop_service_cores(struct test_app *app)
652 {
653 int i;
654
655 for (i = 0; i < NUM_SERVICE_CORES; i++) {
656 unsigned int lcore_id = app->service_lcores[i];
657 int rc;
658
659 rc = rte_service_lcore_stop(lcore_id);
660 TEST_ASSERT_SUCCESS(rc, "Unable to stop service lcore %d",
661 lcore_id);
662 }
663
664 return TEST_SUCCESS;
665 }
666
667 static int
test_app_start(struct test_app * app)668 test_app_start(struct test_app *app)
669 {
670 int rc;
671
672 rc = test_app_start_event_dev(app);
673 if (rc != TEST_SUCCESS)
674 return rc;
675
676 rc = test_app_start_service_cores(app);
677 if (rc != TEST_SUCCESS)
678 return rc;
679
680 test_app_start_dispatcher(app);
681
682 app->running = true;
683
684 return TEST_SUCCESS;
685 }
686
687 static int
test_app_stop(struct test_app * app)688 test_app_stop(struct test_app *app)
689 {
690 int rc;
691
692 test_app_stop_dispatcher(app);
693
694 rc = test_app_stop_service_cores(app);
695 if (rc != TEST_SUCCESS)
696 return rc;
697
698 test_app_stop_event_dev(app);
699
700 app->running = false;
701
702 return TEST_SUCCESS;
703 }
704
705 struct test_app *test_app;
706
707 static int
test_setup(void)708 test_setup(void)
709 {
710 int rc;
711
712 if (rte_lcore_count() < MIN_LCORES) {
713 printf("Not enough cores for dispatcher_autotest; expecting at "
714 "least %d.\n", MIN_LCORES);
715 return TEST_SKIPPED;
716 }
717
718 test_app = test_app_create();
719 TEST_ASSERT(test_app != NULL, "Unable to allocate memory");
720
721 rc = test_app_setup_event_dev(test_app);
722 if (rc != TEST_SUCCESS)
723 goto err_free_app;
724
725 rc = test_app_create_dispatcher(test_app);
726 if (rc != TEST_SUCCESS)
727 goto err_teardown_event_dev;
728
729 rc = test_app_setup_service_cores(test_app);
730 if (rc != TEST_SUCCESS)
731 goto err_free_dispatcher;
732
733 rc = test_app_register_callbacks(test_app);
734 if (rc != TEST_SUCCESS)
735 goto err_teardown_service_cores;
736
737 rc = test_app_bind_ports(test_app);
738 if (rc != TEST_SUCCESS)
739 goto err_unregister_callbacks;
740
741 return TEST_SUCCESS;
742
743 err_unregister_callbacks:
744 test_app_unregister_callbacks(test_app);
745 err_teardown_service_cores:
746 test_app_teardown_service_cores(test_app);
747 err_free_dispatcher:
748 test_app_free_dispatcher(test_app);
749 err_teardown_event_dev:
750 test_app_teardown_event_dev(test_app);
751 err_free_app:
752 test_app_free(test_app);
753
754 test_app = NULL;
755
756 return rc;
757 }
758
test_teardown(void)759 static void test_teardown(void)
760 {
761 if (test_app == NULL)
762 return;
763
764 if (test_app->running)
765 test_app_stop(test_app);
766
767 test_app_teardown_service_cores(test_app);
768
769 test_app_unregister_callbacks(test_app);
770
771 test_app_unbind_ports(test_app);
772
773 test_app_free_dispatcher(test_app);
774
775 test_app_teardown_event_dev(test_app);
776
777 test_app_free(test_app);
778
779 test_app = NULL;
780 }
781
782 static int
test_app_get_completed_events(struct test_app * app)783 test_app_get_completed_events(struct test_app *app)
784 {
785 return rte_atomic_load_explicit(&app->completed_events,
786 rte_memory_order_relaxed);
787 }
788
789 static int
test_app_get_errors(struct test_app * app)790 test_app_get_errors(struct test_app *app)
791 {
792 return rte_atomic_load_explicit(&app->errors, rte_memory_order_relaxed);
793 }
794
795 static int
test_basic(void)796 test_basic(void)
797 {
798 int rc;
799 int i;
800
801 rc = test_app_start(test_app);
802 if (rc != TEST_SUCCESS)
803 return rc;
804
805 uint64_t sns[NUM_FLOWS] = { 0 };
806
807 for (i = 0; i < NUM_EVENTS;) {
808 struct rte_event events[ENQUEUE_BURST_SIZE];
809 int left;
810 int batch_size;
811 int j;
812 uint16_t n = 0;
813
814 batch_size = 1 + rte_rand_max(ENQUEUE_BURST_SIZE);
815 left = NUM_EVENTS - i;
816
817 batch_size = RTE_MIN(left, batch_size);
818
819 for (j = 0; j < batch_size; j++) {
820 struct rte_event *event = &events[j];
821 uint64_t sn;
822 uint32_t flow_id;
823
824 flow_id = rte_rand_max(NUM_FLOWS);
825
826 sn = sns[flow_id]++;
827
828 *event = (struct rte_event) {
829 .queue_id = 0,
830 .flow_id = flow_id,
831 .sched_type = RTE_SCHED_TYPE_ATOMIC,
832 .op = RTE_EVENT_OP_NEW,
833 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
834 .u64 = sn
835 };
836 }
837
838 while (n < batch_size)
839 n += rte_event_enqueue_new_burst(test_app->event_dev_id,
840 DRIVER_PORT_ID,
841 events + n,
842 batch_size - n);
843
844 i += batch_size;
845 }
846
847 while (test_app_get_completed_events(test_app) != NUM_EVENTS)
848 rte_event_maintain(test_app->event_dev_id, DRIVER_PORT_ID, 0);
849
850 rc = test_app_get_errors(test_app);
851 TEST_ASSERT(rc == 0, "%d errors occurred", rc);
852
853 rc = test_app_stop(test_app);
854 if (rc != TEST_SUCCESS)
855 return rc;
856
857 struct rte_dispatcher_stats stats;
858 rte_dispatcher_stats_get(test_app->dispatcher, &stats);
859
860 TEST_ASSERT_EQUAL(stats.ev_drop_count, 0, "Drop count is not zero");
861 TEST_ASSERT_EQUAL(stats.ev_dispatch_count, NUM_EVENTS * NUM_QUEUES,
862 "Invalid dispatch count");
863 TEST_ASSERT(stats.poll_count > 0, "Poll count is zero");
864
865 TEST_ASSERT_EQUAL(test_app->never_process_count.count, 0,
866 "Never-match handler's process function has "
867 "been called");
868
869 int finalize_count =
870 rte_atomic_load_explicit(&test_app->finalize_count.count,
871 rte_memory_order_relaxed);
872
873 TEST_ASSERT(finalize_count > 0, "Finalize count is zero");
874 TEST_ASSERT(finalize_count <= (int)stats.ev_dispatch_count,
875 "Finalize count larger than event count");
876
877 TEST_ASSERT_EQUAL(finalize_count, (int)stats.ev_batch_count,
878 "%"PRIu64" batches dequeued, but finalize called %d "
879 "times", stats.ev_batch_count, finalize_count);
880
881 /*
882 * The event dispatcher should call often-matching match functions
883 * more often, and thus this never-matching match function should
884 * be called relatively infrequently.
885 */
886 TEST_ASSERT(test_app->never_match_count <
887 (stats.ev_dispatch_count / 4),
888 "Never-matching match function called suspiciously often");
889
890 rc = test_app_reset_dispatcher_stats(test_app);
891 if (rc != TEST_SUCCESS)
892 return rc;
893
894 return TEST_SUCCESS;
895 }
896
897 static int
test_drop(void)898 test_drop(void)
899 {
900 int rc;
901 uint8_t unhandled_queue;
902 struct rte_dispatcher_stats stats;
903
904 unhandled_queue = (uint8_t)rte_rand_max(NUM_QUEUES);
905
906 rc = test_app_start(test_app);
907 if (rc != TEST_SUCCESS)
908 return rc;
909
910 rc = test_app_unregister_callback(test_app, unhandled_queue);
911 if (rc != TEST_SUCCESS)
912 return rc;
913
914 struct rte_event event = {
915 .queue_id = unhandled_queue,
916 .flow_id = 0,
917 .sched_type = RTE_SCHED_TYPE_ATOMIC,
918 .op = RTE_EVENT_OP_NEW,
919 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
920 .u64 = 0
921 };
922
923 do {
924 rc = rte_event_enqueue_burst(test_app->event_dev_id,
925 DRIVER_PORT_ID, &event, 1);
926 } while (rc == 0);
927
928 do {
929 rte_dispatcher_stats_get(test_app->dispatcher, &stats);
930
931 rte_event_maintain(test_app->event_dev_id, DRIVER_PORT_ID, 0);
932 } while (stats.ev_drop_count == 0 && stats.ev_dispatch_count == 0);
933
934 rc = test_app_stop(test_app);
935 if (rc != TEST_SUCCESS)
936 return rc;
937
938 TEST_ASSERT_EQUAL(stats.ev_drop_count, 1, "Drop count is not one");
939 TEST_ASSERT_EQUAL(stats.ev_dispatch_count, 0,
940 "Dispatch count is not zero");
941 TEST_ASSERT(stats.poll_count > 0, "Poll count is zero");
942
943 return TEST_SUCCESS;
944 }
945
946 #define MORE_THAN_MAX_HANDLERS 1000
947 #define MIN_HANDLERS 32
948
949 static int
test_many_handler_registrations(void)950 test_many_handler_registrations(void)
951 {
952 int rc;
953 int num_regs = 0;
954 int reg_ids[MORE_THAN_MAX_HANDLERS];
955 int reg_id;
956 int i;
957
958 rc = test_app_unregister_callbacks(test_app);
959 if (rc != TEST_SUCCESS)
960 return rc;
961
962 for (i = 0; i < MORE_THAN_MAX_HANDLERS; i++) {
963 reg_id = rte_dispatcher_register(test_app->dispatcher,
964 never_match, NULL,
965 test_app_never_process, NULL);
966 if (reg_id < 0)
967 break;
968
969 reg_ids[num_regs++] = reg_id;
970 }
971
972 TEST_ASSERT_EQUAL(reg_id, -ENOMEM, "Incorrect return code. Expected "
973 "%d but was %d", -ENOMEM, reg_id);
974 TEST_ASSERT(num_regs >= MIN_HANDLERS, "Registration failed already "
975 "after %d handler registrations.", num_regs);
976
977 for (i = 0; i < num_regs; i++) {
978 rc = rte_dispatcher_unregister(test_app->dispatcher,
979 reg_ids[i]);
980 TEST_ASSERT_SUCCESS(rc, "Unable to unregister handler %d",
981 reg_ids[i]);
982 }
983
984 return TEST_SUCCESS;
985 }
986
987 static void
dummy_finalize(uint8_t event_dev_id __rte_unused,uint8_t event_port_id __rte_unused,void * cb_data __rte_unused)988 dummy_finalize(uint8_t event_dev_id __rte_unused,
989 uint8_t event_port_id __rte_unused,
990 void *cb_data __rte_unused)
991 {
992 }
993
994 #define MORE_THAN_MAX_FINALIZERS 1000
995 #define MIN_FINALIZERS 16
996
997 static int
test_many_finalize_registrations(void)998 test_many_finalize_registrations(void)
999 {
1000 int rc;
1001 int num_regs = 0;
1002 int reg_ids[MORE_THAN_MAX_FINALIZERS];
1003 int reg_id;
1004 int i;
1005
1006 rc = test_app_unregister_callbacks(test_app);
1007 if (rc != TEST_SUCCESS)
1008 return rc;
1009
1010 for (i = 0; i < MORE_THAN_MAX_FINALIZERS; i++) {
1011 reg_id = rte_dispatcher_finalize_register(
1012 test_app->dispatcher, dummy_finalize, NULL
1013 );
1014
1015 if (reg_id < 0)
1016 break;
1017
1018 reg_ids[num_regs++] = reg_id;
1019 }
1020
1021 TEST_ASSERT_EQUAL(reg_id, -ENOMEM, "Incorrect return code. Expected "
1022 "%d but was %d", -ENOMEM, reg_id);
1023 TEST_ASSERT(num_regs >= MIN_FINALIZERS, "Finalize registration failed "
1024 "already after %d registrations.", num_regs);
1025
1026 for (i = 0; i < num_regs; i++) {
1027 rc = rte_dispatcher_finalize_unregister(
1028 test_app->dispatcher, reg_ids[i]
1029 );
1030 TEST_ASSERT_SUCCESS(rc, "Unable to unregister finalizer %d",
1031 reg_ids[i]);
1032 }
1033
1034 return TEST_SUCCESS;
1035 }
1036
1037 static struct unit_test_suite test_suite = {
1038 .suite_name = "Event dispatcher test suite",
1039 .unit_test_cases = {
1040 TEST_CASE_ST(test_setup, test_teardown, test_basic),
1041 TEST_CASE_ST(test_setup, test_teardown, test_drop),
1042 TEST_CASE_ST(test_setup, test_teardown,
1043 test_many_handler_registrations),
1044 TEST_CASE_ST(test_setup, test_teardown,
1045 test_many_finalize_registrations),
1046 TEST_CASES_END()
1047 }
1048 };
1049
1050 static int
test_dispatcher(void)1051 test_dispatcher(void)
1052 {
1053 return unit_test_suite_runner(&test_suite);
1054 }
1055
1056 REGISTER_FAST_TEST(dispatcher_autotest, false, true, test_dispatcher);
1057