1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Cavium, Inc 3 * Copyright(c) 2017-2018 Intel Corporation. 4 */ 5 6 #include <rte_atomic.h> 7 #include <rte_common.h> 8 #include <rte_cycles.h> 9 #include <rte_debug.h> 10 #include <rte_eal.h> 11 #include <rte_ethdev.h> 12 #include <rte_eventdev.h> 13 #include <rte_event_timer_adapter.h> 14 #include <rte_mempool.h> 15 #include <rte_launch.h> 16 #include <rte_lcore.h> 17 #include <rte_per_lcore.h> 18 #include <rte_random.h> 19 #include <rte_bus_vdev.h> 20 #include <rte_service.h> 21 #include <stdbool.h> 22 23 #include "test.h" 24 25 /* 4K timers corresponds to sw evdev max inflight events */ 26 #define MAX_TIMERS (4 * 1024) 27 #define BKT_TCK_NSEC 28 29 #define NSECPERSEC 1E9 30 #define BATCH_SIZE 16 31 /* Both the app lcore and adapter ports are linked to this queue */ 32 #define TEST_QUEUE_ID 0 33 /* Port the application dequeues from */ 34 #define TEST_PORT_ID 0 35 #define TEST_ADAPTER_ID 0 36 37 /* Handle log statements in same manner as test macros */ 38 #define LOG_DBG(...) RTE_LOG(DEBUG, EAL, __VA_ARGS__) 39 40 static int evdev; 41 static struct rte_event_timer_adapter *timdev; 42 static struct rte_mempool *eventdev_test_mempool; 43 static struct rte_ring *timer_producer_ring; 44 static uint64_t global_bkt_tck_ns; 45 static uint64_t global_info_bkt_tck_ns; 46 static volatile uint8_t arm_done; 47 48 #define CALC_TICKS(tks) \ 49 ((tks * global_bkt_tck_ns) / global_info_bkt_tck_ns) 50 51 52 static bool using_services; 53 static uint32_t test_lcore1; 54 static uint32_t test_lcore2; 55 static uint32_t test_lcore3; 56 static uint32_t sw_evdev_slcore; 57 static uint32_t sw_adptr_slcore; 58 59 static inline void 60 devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf, 61 struct rte_event_dev_info *info) 62 { 63 memset(dev_conf, 0, sizeof(struct rte_event_dev_config)); 64 dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns; 65 dev_conf->nb_event_ports = 1; 66 dev_conf->nb_event_queues = 1; 67 dev_conf->nb_event_queue_flows = info->max_event_queue_flows; 68 dev_conf->nb_event_port_dequeue_depth = 69 info->max_event_port_dequeue_depth; 70 dev_conf->nb_event_port_enqueue_depth = 71 info->max_event_port_enqueue_depth; 72 dev_conf->nb_event_port_enqueue_depth = 73 info->max_event_port_enqueue_depth; 74 dev_conf->nb_events_limit = 75 info->max_num_events; 76 } 77 78 static inline int 79 eventdev_setup(void) 80 { 81 int ret; 82 struct rte_event_dev_config dev_conf; 83 struct rte_event_dev_info info; 84 uint32_t service_id; 85 86 ret = rte_event_dev_info_get(evdev, &info); 87 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info"); 88 TEST_ASSERT(info.max_num_events < 0 || 89 info.max_num_events >= (int32_t)MAX_TIMERS, 90 "ERROR max_num_events=%d < max_events=%d", 91 info.max_num_events, MAX_TIMERS); 92 93 devconf_set_default_sane_values(&dev_conf, &info); 94 ret = rte_event_dev_configure(evdev, &dev_conf); 95 TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev"); 96 97 ret = rte_event_queue_setup(evdev, 0, NULL); 98 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d", 0); 99 100 /* Configure event port */ 101 ret = rte_event_port_setup(evdev, 0, NULL); 102 TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d", 0); 103 ret = rte_event_port_link(evdev, 0, NULL, NULL, 0); 104 TEST_ASSERT(ret >= 0, "Failed to link all queues port=%d", 0); 105 106 /* If this is a software event device, map and start its service */ 107 if (rte_event_dev_service_id_get(evdev, &service_id) == 0) { 108 TEST_ASSERT_SUCCESS(rte_service_lcore_add(sw_evdev_slcore), 109 "Failed to add service core"); 110 TEST_ASSERT_SUCCESS(rte_service_lcore_start( 111 sw_evdev_slcore), 112 "Failed to start service core"); 113 TEST_ASSERT_SUCCESS(rte_service_map_lcore_set( 114 service_id, sw_evdev_slcore, 1), 115 "Failed to map evdev service"); 116 TEST_ASSERT_SUCCESS(rte_service_runstate_set( 117 service_id, 1), 118 "Failed to start evdev service"); 119 } 120 121 ret = rte_event_dev_start(evdev); 122 TEST_ASSERT_SUCCESS(ret, "Failed to start device"); 123 124 return TEST_SUCCESS; 125 } 126 127 static int 128 testsuite_setup(void) 129 { 130 /* Some of the multithreaded tests require 3 other lcores to run */ 131 unsigned int required_lcore_count = 4; 132 uint32_t service_id; 133 134 /* To make it easier to map services later if needed, just reset 135 * service core state. 136 */ 137 (void) rte_service_lcore_reset_all(); 138 139 if (!rte_event_dev_count()) { 140 /* If there is no hardware eventdev, or no software vdev was 141 * specified on the command line, create an instance of 142 * event_sw. 143 */ 144 LOG_DBG("Failed to find a valid event device... testing with" 145 " event_sw device\n"); 146 TEST_ASSERT_SUCCESS(rte_vdev_init("event_sw0", NULL), 147 "Error creating eventdev"); 148 evdev = rte_event_dev_get_dev_id("event_sw0"); 149 } 150 151 if (rte_event_dev_service_id_get(evdev, &service_id) == 0) { 152 /* A software event device will use a software event timer 153 * adapter as well. 2 more cores required to convert to 154 * service cores. 155 */ 156 required_lcore_count += 2; 157 using_services = true; 158 } 159 160 if (rte_lcore_count() < required_lcore_count) { 161 printf("%d lcores needed to run tests", required_lcore_count); 162 return TEST_FAILED; 163 } 164 165 /* Assign lcores for various tasks */ 166 test_lcore1 = rte_get_next_lcore(-1, 1, 0); 167 test_lcore2 = rte_get_next_lcore(test_lcore1, 1, 0); 168 test_lcore3 = rte_get_next_lcore(test_lcore2, 1, 0); 169 if (using_services) { 170 sw_evdev_slcore = rte_get_next_lcore(test_lcore3, 1, 0); 171 sw_adptr_slcore = rte_get_next_lcore(sw_evdev_slcore, 1, 0); 172 } 173 174 return eventdev_setup(); 175 } 176 177 static void 178 testsuite_teardown(void) 179 { 180 rte_event_dev_stop(evdev); 181 rte_event_dev_close(evdev); 182 } 183 184 static int 185 setup_adapter_service(struct rte_event_timer_adapter *adptr) 186 { 187 uint32_t adapter_service_id; 188 int ret; 189 190 /* retrieve service ids */ 191 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_service_id_get(adptr, 192 &adapter_service_id), "Failed to get event timer " 193 "adapter service id"); 194 /* add a service core and start it */ 195 ret = rte_service_lcore_add(sw_adptr_slcore); 196 TEST_ASSERT(ret == 0 || ret == -EALREADY, 197 "Failed to add service core"); 198 ret = rte_service_lcore_start(sw_adptr_slcore); 199 TEST_ASSERT(ret == 0 || ret == -EALREADY, 200 "Failed to start service core"); 201 202 /* map services to it */ 203 TEST_ASSERT_SUCCESS(rte_service_map_lcore_set(adapter_service_id, 204 sw_adptr_slcore, 1), 205 "Failed to map adapter service"); 206 207 /* set services to running */ 208 TEST_ASSERT_SUCCESS(rte_service_runstate_set(adapter_service_id, 1), 209 "Failed to start event timer adapter service"); 210 211 return TEST_SUCCESS; 212 } 213 214 static int 215 test_port_conf_cb(uint16_t id, uint8_t event_dev_id, uint8_t *event_port_id, 216 void *conf_arg) 217 { 218 struct rte_event_dev_config dev_conf; 219 struct rte_event_dev_info info; 220 struct rte_event_port_conf *port_conf, def_port_conf = {0}; 221 uint32_t started; 222 static int port_allocated; 223 static uint8_t port_id; 224 int ret; 225 226 if (port_allocated) { 227 *event_port_id = port_id; 228 return 0; 229 } 230 231 RTE_SET_USED(id); 232 233 ret = rte_event_dev_attr_get(event_dev_id, RTE_EVENT_DEV_ATTR_STARTED, 234 &started); 235 if (ret < 0) 236 return ret; 237 238 if (started) 239 rte_event_dev_stop(event_dev_id); 240 241 ret = rte_event_dev_info_get(evdev, &info); 242 if (ret < 0) 243 return ret; 244 245 devconf_set_default_sane_values(&dev_conf, &info); 246 247 port_id = dev_conf.nb_event_ports; 248 dev_conf.nb_event_ports++; 249 250 ret = rte_event_dev_configure(event_dev_id, &dev_conf); 251 if (ret < 0) { 252 if (started) 253 rte_event_dev_start(event_dev_id); 254 return ret; 255 } 256 257 if (conf_arg != NULL) 258 port_conf = conf_arg; 259 else { 260 port_conf = &def_port_conf; 261 ret = rte_event_port_default_conf_get(event_dev_id, port_id, 262 port_conf); 263 if (ret < 0) 264 return ret; 265 } 266 267 ret = rte_event_port_setup(event_dev_id, port_id, port_conf); 268 if (ret < 0) 269 return ret; 270 271 *event_port_id = port_id; 272 273 if (started) 274 rte_event_dev_start(event_dev_id); 275 276 /* Reuse this port number next time this is called */ 277 port_allocated = 1; 278 279 return 0; 280 } 281 282 static int 283 _timdev_setup(uint64_t max_tmo_ns, uint64_t bkt_tck_ns) 284 { 285 struct rte_event_timer_adapter_info info; 286 struct rte_event_timer_adapter_conf config = { 287 .event_dev_id = evdev, 288 .timer_adapter_id = TEST_ADAPTER_ID, 289 .timer_tick_ns = bkt_tck_ns, 290 .max_tmo_ns = max_tmo_ns, 291 .nb_timers = MAX_TIMERS * 10, 292 .flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES, 293 }; 294 uint32_t caps = 0; 295 const char *pool_name = "timdev_test_pool"; 296 297 global_bkt_tck_ns = bkt_tck_ns; 298 299 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_caps_get(evdev, &caps), 300 "failed to get adapter capabilities"); 301 if (!(caps & RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) { 302 timdev = rte_event_timer_adapter_create_ext(&config, 303 test_port_conf_cb, 304 NULL); 305 setup_adapter_service(timdev); 306 using_services = true; 307 } else 308 timdev = rte_event_timer_adapter_create(&config); 309 310 TEST_ASSERT_NOT_NULL(timdev, 311 "failed to create event timer ring"); 312 313 TEST_ASSERT_EQUAL(rte_event_timer_adapter_start(timdev), 0, 314 "failed to Start event timer adapter"); 315 316 /* Create event timer mempool */ 317 eventdev_test_mempool = rte_mempool_create(pool_name, 318 MAX_TIMERS * 2, 319 sizeof(struct rte_event_timer), /* element size*/ 320 0, /* cache size*/ 321 0, NULL, NULL, NULL, NULL, 322 rte_socket_id(), 0); 323 if (!eventdev_test_mempool) { 324 printf("ERROR creating mempool\n"); 325 return TEST_FAILED; 326 } 327 328 rte_event_timer_adapter_get_info(timdev, &info); 329 330 global_info_bkt_tck_ns = info.min_resolution_ns; 331 332 return TEST_SUCCESS; 333 } 334 335 static int 336 timdev_setup_usec(void) 337 { 338 return using_services ? 339 /* Max timeout is 10,000us and bucket interval is 100us */ 340 _timdev_setup(1E7, 1E5) : 341 /* Max timeout is 100us and bucket interval is 1us */ 342 _timdev_setup(1E5, 1E3); 343 } 344 345 static int 346 timdev_setup_usec_multicore(void) 347 { 348 return using_services ? 349 /* Max timeout is 10,000us and bucket interval is 100us */ 350 _timdev_setup(1E7, 1E5) : 351 /* Max timeout is 100us and bucket interval is 1us */ 352 _timdev_setup(1E5, 1E3); 353 } 354 355 static int 356 timdev_setup_msec(void) 357 { 358 /* Max timeout is 2 mins, and bucket interval is 100 ms */ 359 return _timdev_setup(180 * NSECPERSEC, NSECPERSEC / 10); 360 } 361 362 static int 363 timdev_setup_sec(void) 364 { 365 /* Max timeout is 100sec and bucket interval is 1sec */ 366 return _timdev_setup(1E11, 1E9); 367 } 368 369 static int 370 timdev_setup_sec_multicore(void) 371 { 372 /* Max timeout is 100sec and bucket interval is 1sec */ 373 return _timdev_setup(1E11, 1E9); 374 } 375 376 static void 377 timdev_teardown(void) 378 { 379 rte_event_timer_adapter_stop(timdev); 380 rte_event_timer_adapter_free(timdev); 381 382 rte_mempool_free(eventdev_test_mempool); 383 } 384 385 static inline int 386 test_timer_state(void) 387 { 388 struct rte_event_timer *ev_tim; 389 struct rte_event ev; 390 const struct rte_event_timer tim = { 391 .ev.op = RTE_EVENT_OP_NEW, 392 .ev.queue_id = 0, 393 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 394 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 395 .ev.event_type = RTE_EVENT_TYPE_TIMER, 396 .state = RTE_EVENT_TIMER_NOT_ARMED, 397 }; 398 399 400 rte_mempool_get(eventdev_test_mempool, (void **)&ev_tim); 401 *ev_tim = tim; 402 ev_tim->ev.event_ptr = ev_tim; 403 ev_tim->timeout_ticks = CALC_TICKS(120); 404 405 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 1), 0, 406 "Armed timer exceeding max_timeout."); 407 TEST_ASSERT_EQUAL(ev_tim->state, RTE_EVENT_TIMER_ERROR_TOOLATE, 408 "Improper timer state set expected %d returned %d", 409 RTE_EVENT_TIMER_ERROR_TOOLATE, ev_tim->state); 410 411 ev_tim->state = RTE_EVENT_TIMER_NOT_ARMED; 412 ev_tim->timeout_ticks = CALC_TICKS(10); 413 414 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 1), 1, 415 "Failed to arm timer with proper timeout."); 416 TEST_ASSERT_EQUAL(ev_tim->state, RTE_EVENT_TIMER_ARMED, 417 "Improper timer state set expected %d returned %d", 418 RTE_EVENT_TIMER_ARMED, ev_tim->state); 419 420 if (!using_services) 421 rte_delay_us(20); 422 else 423 rte_delay_us(1000 + 200); 424 TEST_ASSERT_EQUAL(rte_event_dequeue_burst(evdev, 0, &ev, 1, 0), 1, 425 "Armed timer failed to trigger."); 426 427 ev_tim->state = RTE_EVENT_TIMER_NOT_ARMED; 428 ev_tim->timeout_ticks = CALC_TICKS(90); 429 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 1), 1, 430 "Failed to arm timer with proper timeout."); 431 TEST_ASSERT_EQUAL(rte_event_timer_cancel_burst(timdev, &ev_tim, 1), 432 1, "Failed to cancel armed timer"); 433 TEST_ASSERT_EQUAL(ev_tim->state, RTE_EVENT_TIMER_CANCELED, 434 "Improper timer state set expected %d returned %d", 435 RTE_EVENT_TIMER_CANCELED, ev_tim->state); 436 437 rte_mempool_put(eventdev_test_mempool, (void *)ev_tim); 438 439 return TEST_SUCCESS; 440 } 441 442 static inline int 443 _arm_timers(uint64_t timeout_tcks, uint64_t timers) 444 { 445 uint64_t i; 446 struct rte_event_timer *ev_tim; 447 const struct rte_event_timer tim = { 448 .ev.op = RTE_EVENT_OP_NEW, 449 .ev.queue_id = 0, 450 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 451 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 452 .ev.event_type = RTE_EVENT_TYPE_TIMER, 453 .state = RTE_EVENT_TIMER_NOT_ARMED, 454 .timeout_ticks = CALC_TICKS(timeout_tcks), 455 }; 456 457 for (i = 0; i < timers; i++) { 458 459 TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool, 460 (void **)&ev_tim), 461 "mempool alloc failed"); 462 *ev_tim = tim; 463 ev_tim->ev.event_ptr = ev_tim; 464 465 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 466 1), 1, "Failed to arm timer %d", 467 rte_errno); 468 } 469 470 return TEST_SUCCESS; 471 } 472 473 static inline int 474 _wait_timer_triggers(uint64_t wait_sec, uint64_t arm_count, 475 uint64_t cancel_count) 476 { 477 uint8_t valid_event; 478 uint64_t events = 0; 479 uint64_t wait_start, max_wait; 480 struct rte_event ev; 481 482 max_wait = rte_get_timer_hz() * wait_sec; 483 wait_start = rte_get_timer_cycles(); 484 while (1) { 485 if (rte_get_timer_cycles() - wait_start > max_wait) { 486 if (events + cancel_count != arm_count) 487 TEST_ASSERT_SUCCESS(max_wait, 488 "Max time limit for timers exceeded."); 489 break; 490 } 491 492 valid_event = rte_event_dequeue_burst(evdev, 0, &ev, 1, 0); 493 if (!valid_event) 494 continue; 495 496 rte_mempool_put(eventdev_test_mempool, ev.event_ptr); 497 events++; 498 } 499 500 return TEST_SUCCESS; 501 } 502 503 static inline int 504 test_timer_arm(void) 505 { 506 TEST_ASSERT_SUCCESS(_arm_timers(20, MAX_TIMERS), 507 "Failed to arm timers"); 508 TEST_ASSERT_SUCCESS(_wait_timer_triggers(10, MAX_TIMERS, 0), 509 "Timer triggered count doesn't match arm count"); 510 return TEST_SUCCESS; 511 } 512 513 static int 514 _arm_wrapper(void *arg) 515 { 516 RTE_SET_USED(arg); 517 518 TEST_ASSERT_SUCCESS(_arm_timers(20, MAX_TIMERS), 519 "Failed to arm timers"); 520 521 return TEST_SUCCESS; 522 } 523 524 static inline int 525 test_timer_arm_multicore(void) 526 { 527 528 uint32_t lcore_1 = rte_get_next_lcore(-1, 1, 0); 529 uint32_t lcore_2 = rte_get_next_lcore(lcore_1, 1, 0); 530 531 rte_eal_remote_launch(_arm_wrapper, NULL, lcore_1); 532 rte_eal_remote_launch(_arm_wrapper, NULL, lcore_2); 533 534 rte_eal_mp_wait_lcore(); 535 TEST_ASSERT_SUCCESS(_wait_timer_triggers(10, MAX_TIMERS * 2, 0), 536 "Timer triggered count doesn't match arm count"); 537 538 return TEST_SUCCESS; 539 } 540 541 #define MAX_BURST 16 542 static inline int 543 _arm_timers_burst(uint64_t timeout_tcks, uint64_t timers) 544 { 545 uint64_t i; 546 int j; 547 struct rte_event_timer *ev_tim[MAX_BURST]; 548 const struct rte_event_timer tim = { 549 .ev.op = RTE_EVENT_OP_NEW, 550 .ev.queue_id = 0, 551 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 552 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 553 .ev.event_type = RTE_EVENT_TYPE_TIMER, 554 .state = RTE_EVENT_TIMER_NOT_ARMED, 555 .timeout_ticks = CALC_TICKS(timeout_tcks), 556 }; 557 558 for (i = 0; i < timers / MAX_BURST; i++) { 559 TEST_ASSERT_SUCCESS(rte_mempool_get_bulk( 560 eventdev_test_mempool, 561 (void **)ev_tim, MAX_BURST), 562 "mempool alloc failed"); 563 564 for (j = 0; j < MAX_BURST; j++) { 565 *ev_tim[j] = tim; 566 ev_tim[j]->ev.event_ptr = ev_tim[j]; 567 } 568 569 TEST_ASSERT_EQUAL(rte_event_timer_arm_tmo_tick_burst(timdev, 570 ev_tim, tim.timeout_ticks, MAX_BURST), 571 MAX_BURST, "Failed to arm timer %d", rte_errno); 572 } 573 574 return TEST_SUCCESS; 575 } 576 577 static inline int 578 test_timer_arm_burst(void) 579 { 580 TEST_ASSERT_SUCCESS(_arm_timers_burst(20, MAX_TIMERS), 581 "Failed to arm timers"); 582 TEST_ASSERT_SUCCESS(_wait_timer_triggers(10, MAX_TIMERS, 0), 583 "Timer triggered count doesn't match arm count"); 584 585 return TEST_SUCCESS; 586 } 587 588 static int 589 _arm_wrapper_burst(void *arg) 590 { 591 RTE_SET_USED(arg); 592 593 TEST_ASSERT_SUCCESS(_arm_timers_burst(20, MAX_TIMERS), 594 "Failed to arm timers"); 595 596 return TEST_SUCCESS; 597 } 598 599 static inline int 600 test_timer_arm_burst_multicore(void) 601 { 602 rte_eal_remote_launch(_arm_wrapper_burst, NULL, test_lcore1); 603 rte_eal_remote_launch(_arm_wrapper_burst, NULL, test_lcore2); 604 605 rte_eal_mp_wait_lcore(); 606 TEST_ASSERT_SUCCESS(_wait_timer_triggers(10, MAX_TIMERS * 2, 0), 607 "Timer triggered count doesn't match arm count"); 608 609 return TEST_SUCCESS; 610 } 611 612 static inline int 613 test_timer_cancel(void) 614 { 615 uint64_t i; 616 struct rte_event_timer *ev_tim; 617 const struct rte_event_timer tim = { 618 .ev.op = RTE_EVENT_OP_NEW, 619 .ev.queue_id = 0, 620 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 621 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 622 .ev.event_type = RTE_EVENT_TYPE_TIMER, 623 .state = RTE_EVENT_TIMER_NOT_ARMED, 624 .timeout_ticks = CALC_TICKS(20), 625 }; 626 627 for (i = 0; i < MAX_TIMERS; i++) { 628 TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool, 629 (void **)&ev_tim), 630 "mempool alloc failed"); 631 *ev_tim = tim; 632 ev_tim->ev.event_ptr = ev_tim; 633 634 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 635 1), 1, "Failed to arm timer %d", 636 rte_errno); 637 638 rte_delay_us(100 + (i % 5000)); 639 640 TEST_ASSERT_EQUAL(rte_event_timer_cancel_burst(timdev, 641 &ev_tim, 1), 1, 642 "Failed to cancel event timer %d", rte_errno); 643 rte_mempool_put(eventdev_test_mempool, ev_tim); 644 } 645 646 647 TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS, 648 MAX_TIMERS), 649 "Timer triggered count doesn't match arm, cancel count"); 650 651 return TEST_SUCCESS; 652 } 653 654 static int 655 _cancel_producer(uint64_t timeout_tcks, uint64_t timers) 656 { 657 uint64_t i; 658 struct rte_event_timer *ev_tim; 659 const struct rte_event_timer tim = { 660 .ev.op = RTE_EVENT_OP_NEW, 661 .ev.queue_id = 0, 662 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 663 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 664 .ev.event_type = RTE_EVENT_TYPE_TIMER, 665 .state = RTE_EVENT_TIMER_NOT_ARMED, 666 .timeout_ticks = CALC_TICKS(timeout_tcks), 667 }; 668 669 for (i = 0; i < timers; i++) { 670 TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool, 671 (void **)&ev_tim), 672 "mempool alloc failed"); 673 674 *ev_tim = tim; 675 ev_tim->ev.event_ptr = ev_tim; 676 677 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 678 1), 1, "Failed to arm timer %d", 679 rte_errno); 680 681 TEST_ASSERT_EQUAL(ev_tim->state, RTE_EVENT_TIMER_ARMED, 682 "Failed to arm event timer"); 683 684 while (rte_ring_enqueue(timer_producer_ring, ev_tim) != 0) 685 ; 686 } 687 688 return TEST_SUCCESS; 689 } 690 691 static int 692 _cancel_producer_burst(uint64_t timeout_tcks, uint64_t timers) 693 { 694 695 uint64_t i; 696 int j, ret; 697 struct rte_event_timer *ev_tim[MAX_BURST]; 698 const struct rte_event_timer tim = { 699 .ev.op = RTE_EVENT_OP_NEW, 700 .ev.queue_id = 0, 701 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 702 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 703 .ev.event_type = RTE_EVENT_TYPE_TIMER, 704 .state = RTE_EVENT_TIMER_NOT_ARMED, 705 .timeout_ticks = CALC_TICKS(timeout_tcks), 706 }; 707 int arm_count = 0; 708 709 for (i = 0; i < timers / MAX_BURST; i++) { 710 TEST_ASSERT_SUCCESS(rte_mempool_get_bulk( 711 eventdev_test_mempool, 712 (void **)ev_tim, MAX_BURST), 713 "mempool alloc failed"); 714 715 for (j = 0; j < MAX_BURST; j++) { 716 *ev_tim[j] = tim; 717 ev_tim[j]->ev.event_ptr = ev_tim[j]; 718 } 719 720 TEST_ASSERT_EQUAL(rte_event_timer_arm_tmo_tick_burst(timdev, 721 ev_tim, tim.timeout_ticks, MAX_BURST), 722 MAX_BURST, "Failed to arm timer %d", rte_errno); 723 724 for (j = 0; j < MAX_BURST; j++) 725 TEST_ASSERT_EQUAL(ev_tim[j]->state, 726 RTE_EVENT_TIMER_ARMED, 727 "Event timer not armed, state = %d", 728 ev_tim[j]->state); 729 730 ret = rte_ring_enqueue_bulk(timer_producer_ring, 731 (void **)ev_tim, MAX_BURST, NULL); 732 TEST_ASSERT_EQUAL(ret, MAX_BURST, 733 "Failed to enqueue event timers to ring"); 734 arm_count += ret; 735 } 736 737 TEST_ASSERT_EQUAL(arm_count, MAX_TIMERS, 738 "Failed to arm expected number of event timers"); 739 740 return TEST_SUCCESS; 741 } 742 743 static int 744 _cancel_producer_wrapper(void *args) 745 { 746 RTE_SET_USED(args); 747 748 return _cancel_producer(20, MAX_TIMERS); 749 } 750 751 static int 752 _cancel_producer_burst_wrapper(void *args) 753 { 754 RTE_SET_USED(args); 755 756 return _cancel_producer_burst(100, MAX_TIMERS); 757 } 758 759 static int 760 _cancel_thread(void *args) 761 { 762 RTE_SET_USED(args); 763 struct rte_event_timer *ev_tim = NULL; 764 uint64_t cancel_count = 0; 765 uint16_t ret; 766 767 while (!arm_done || rte_ring_count(timer_producer_ring) > 0) { 768 if (rte_ring_dequeue(timer_producer_ring, (void **)&ev_tim)) 769 continue; 770 771 ret = rte_event_timer_cancel_burst(timdev, &ev_tim, 1); 772 TEST_ASSERT_EQUAL(ret, 1, "Failed to cancel timer"); 773 rte_mempool_put(eventdev_test_mempool, (void *)ev_tim); 774 cancel_count++; 775 } 776 777 return TEST_SUCCESS; 778 } 779 780 static int 781 _cancel_burst_thread(void *args) 782 { 783 RTE_SET_USED(args); 784 785 int ret, i, n; 786 struct rte_event_timer *ev_tim[MAX_BURST]; 787 uint64_t cancel_count = 0; 788 uint64_t dequeue_count = 0; 789 790 while (!arm_done || rte_ring_count(timer_producer_ring) > 0) { 791 n = rte_ring_dequeue_burst(timer_producer_ring, 792 (void **)ev_tim, MAX_BURST, NULL); 793 if (!n) 794 continue; 795 796 dequeue_count += n; 797 798 for (i = 0; i < n; i++) 799 TEST_ASSERT_EQUAL(ev_tim[i]->state, 800 RTE_EVENT_TIMER_ARMED, 801 "Event timer not armed, state = %d", 802 ev_tim[i]->state); 803 804 ret = rte_event_timer_cancel_burst(timdev, ev_tim, n); 805 TEST_ASSERT_EQUAL(n, ret, "Failed to cancel complete burst of " 806 "event timers"); 807 rte_mempool_put_bulk(eventdev_test_mempool, (void **)ev_tim, 808 RTE_MIN(ret, MAX_BURST)); 809 810 cancel_count += ret; 811 } 812 813 TEST_ASSERT_EQUAL(cancel_count, MAX_TIMERS, 814 "Failed to cancel expected number of timers: " 815 "expected = %d, cancel_count = %"PRIu64", " 816 "dequeue_count = %"PRIu64"\n", MAX_TIMERS, 817 cancel_count, dequeue_count); 818 819 return TEST_SUCCESS; 820 } 821 822 static inline int 823 test_timer_cancel_multicore(void) 824 { 825 arm_done = 0; 826 timer_producer_ring = rte_ring_create("timer_cancel_queue", 827 MAX_TIMERS * 2, rte_socket_id(), 0); 828 TEST_ASSERT_NOT_NULL(timer_producer_ring, 829 "Unable to reserve memory for ring"); 830 831 rte_eal_remote_launch(_cancel_thread, NULL, test_lcore3); 832 rte_eal_remote_launch(_cancel_producer_wrapper, NULL, test_lcore1); 833 rte_eal_remote_launch(_cancel_producer_wrapper, NULL, test_lcore2); 834 835 rte_eal_wait_lcore(test_lcore1); 836 rte_eal_wait_lcore(test_lcore2); 837 arm_done = 1; 838 rte_eal_wait_lcore(test_lcore3); 839 rte_ring_free(timer_producer_ring); 840 841 TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS * 2, 842 MAX_TIMERS * 2), 843 "Timer triggered count doesn't match arm count"); 844 845 return TEST_SUCCESS; 846 } 847 848 static inline int 849 test_timer_cancel_burst_multicore(void) 850 { 851 arm_done = 0; 852 timer_producer_ring = rte_ring_create("timer_cancel_queue", 853 MAX_TIMERS * 2, rte_socket_id(), 0); 854 TEST_ASSERT_NOT_NULL(timer_producer_ring, 855 "Unable to reserve memory for ring"); 856 857 rte_eal_remote_launch(_cancel_burst_thread, NULL, test_lcore2); 858 rte_eal_remote_launch(_cancel_producer_burst_wrapper, NULL, 859 test_lcore1); 860 861 rte_eal_wait_lcore(test_lcore1); 862 arm_done = 1; 863 rte_eal_wait_lcore(test_lcore2); 864 rte_ring_free(timer_producer_ring); 865 866 TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS, 867 MAX_TIMERS), 868 "Timer triggered count doesn't match arm count"); 869 870 return TEST_SUCCESS; 871 } 872 873 static inline int 874 test_timer_cancel_random(void) 875 { 876 uint64_t i; 877 uint64_t events_canceled = 0; 878 struct rte_event_timer *ev_tim; 879 const struct rte_event_timer tim = { 880 .ev.op = RTE_EVENT_OP_NEW, 881 .ev.queue_id = 0, 882 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 883 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 884 .ev.event_type = RTE_EVENT_TYPE_TIMER, 885 .state = RTE_EVENT_TIMER_NOT_ARMED, 886 .timeout_ticks = CALC_TICKS(20), 887 }; 888 889 for (i = 0; i < MAX_TIMERS; i++) { 890 891 TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool, 892 (void **)&ev_tim), 893 "mempool alloc failed"); 894 *ev_tim = tim; 895 ev_tim->ev.event_ptr = ev_tim; 896 897 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 898 1), 1, "Failed to arm timer %d", 899 rte_errno); 900 901 if (rte_rand() & 1) { 902 rte_delay_us(100 + (i % 5000)); 903 TEST_ASSERT_EQUAL(rte_event_timer_cancel_burst( 904 timdev, 905 &ev_tim, 1), 1, 906 "Failed to cancel event timer %d", rte_errno); 907 rte_mempool_put(eventdev_test_mempool, ev_tim); 908 events_canceled++; 909 } 910 } 911 912 TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS, 913 events_canceled), 914 "Timer triggered count doesn't match arm, cancel count"); 915 916 return TEST_SUCCESS; 917 } 918 919 /* Check that the adapter can be created correctly */ 920 static int 921 adapter_create(void) 922 { 923 int adapter_id = 0; 924 struct rte_event_timer_adapter *adapter, *adapter2; 925 926 struct rte_event_timer_adapter_conf conf = { 927 .event_dev_id = evdev + 1, // invalid event dev id 928 .timer_adapter_id = adapter_id, 929 .clk_src = RTE_EVENT_TIMER_ADAPTER_CPU_CLK, 930 .timer_tick_ns = NSECPERSEC / 10, 931 .max_tmo_ns = 180 * NSECPERSEC, 932 .nb_timers = MAX_TIMERS, 933 .flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES, 934 }; 935 uint32_t caps = 0; 936 937 /* Test invalid conf */ 938 adapter = rte_event_timer_adapter_create(&conf); 939 TEST_ASSERT_NULL(adapter, "Created adapter with invalid " 940 "event device id"); 941 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Incorrect errno value for " 942 "invalid event device id"); 943 944 /* Test valid conf */ 945 conf.event_dev_id = evdev; 946 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_caps_get(evdev, &caps), 947 "failed to get adapter capabilities"); 948 if (!(caps & RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) 949 adapter = rte_event_timer_adapter_create_ext(&conf, 950 test_port_conf_cb, 951 NULL); 952 else 953 adapter = rte_event_timer_adapter_create(&conf); 954 TEST_ASSERT_NOT_NULL(adapter, "Failed to create adapter with valid " 955 "configuration"); 956 957 /* Test existing id */ 958 adapter2 = rte_event_timer_adapter_create(&conf); 959 TEST_ASSERT_NULL(adapter2, "Created adapter with in-use id"); 960 TEST_ASSERT(rte_errno == EEXIST, "Incorrect errno value for existing " 961 "id"); 962 963 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_free(adapter), 964 "Failed to free adapter"); 965 966 rte_mempool_free(eventdev_test_mempool); 967 968 return TEST_SUCCESS; 969 } 970 971 972 /* Test that adapter can be freed correctly. */ 973 static int 974 adapter_free(void) 975 { 976 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stop(timdev), 977 "Failed to stop adapter"); 978 979 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_free(timdev), 980 "Failed to free valid adapter"); 981 982 /* Test free of already freed adapter */ 983 TEST_ASSERT_FAIL(rte_event_timer_adapter_free(timdev), 984 "Freed adapter that was already freed"); 985 986 /* Test free of null adapter */ 987 timdev = NULL; 988 TEST_ASSERT_FAIL(rte_event_timer_adapter_free(timdev), 989 "Freed null adapter"); 990 991 rte_mempool_free(eventdev_test_mempool); 992 993 return TEST_SUCCESS; 994 } 995 996 /* Test that adapter info can be retrieved and is correct. */ 997 static int 998 adapter_get_info(void) 999 { 1000 struct rte_event_timer_adapter_info info; 1001 1002 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_get_info(timdev, &info), 1003 "Failed to get adapter info"); 1004 1005 if (using_services) 1006 TEST_ASSERT_EQUAL(info.event_dev_port_id, 1, 1007 "Expected port id = 1, got port id = %d", 1008 info.event_dev_port_id); 1009 1010 return TEST_SUCCESS; 1011 } 1012 1013 /* Test adapter lookup via adapter ID. */ 1014 static int 1015 adapter_lookup(void) 1016 { 1017 struct rte_event_timer_adapter *adapter; 1018 1019 adapter = rte_event_timer_adapter_lookup(TEST_ADAPTER_ID); 1020 TEST_ASSERT_NOT_NULL(adapter, "Failed to lookup adapter"); 1021 1022 return TEST_SUCCESS; 1023 } 1024 1025 static int 1026 adapter_start(void) 1027 { 1028 TEST_ASSERT_SUCCESS(_timdev_setup(180 * NSECPERSEC, 1029 NSECPERSEC / 10), 1030 "Failed to start adapter"); 1031 TEST_ASSERT_EQUAL(rte_event_timer_adapter_start(timdev), -EALREADY, 1032 "Timer adapter started without call to stop."); 1033 1034 return TEST_SUCCESS; 1035 } 1036 1037 /* Test that adapter stops correctly. */ 1038 static int 1039 adapter_stop(void) 1040 { 1041 struct rte_event_timer_adapter *l_adapter = NULL; 1042 1043 /* Test adapter stop */ 1044 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stop(timdev), 1045 "Failed to stop event adapter"); 1046 1047 TEST_ASSERT_FAIL(rte_event_timer_adapter_stop(l_adapter), 1048 "Erroneously stopped null event adapter"); 1049 1050 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_free(timdev), 1051 "Failed to free adapter"); 1052 1053 rte_mempool_free(eventdev_test_mempool); 1054 1055 return TEST_SUCCESS; 1056 } 1057 1058 /* Test increment and reset of ev_enq_count stat */ 1059 static int 1060 stat_inc_reset_ev_enq(void) 1061 { 1062 int ret, i, n; 1063 int num_evtims = MAX_TIMERS; 1064 struct rte_event_timer *evtims[num_evtims]; 1065 struct rte_event evs[BATCH_SIZE]; 1066 struct rte_event_timer_adapter_stats stats; 1067 const struct rte_event_timer init_tim = { 1068 .ev.op = RTE_EVENT_OP_NEW, 1069 .ev.queue_id = TEST_QUEUE_ID, 1070 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1071 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1072 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1073 .state = RTE_EVENT_TIMER_NOT_ARMED, 1074 .timeout_ticks = CALC_TICKS(5), // expire in .5 sec 1075 }; 1076 1077 ret = rte_mempool_get_bulk(eventdev_test_mempool, (void **)evtims, 1078 num_evtims); 1079 TEST_ASSERT_EQUAL(ret, 0, "Failed to get array of timer objs: ret = %d", 1080 ret); 1081 1082 for (i = 0; i < num_evtims; i++) { 1083 *evtims[i] = init_tim; 1084 evtims[i]->ev.event_ptr = evtims[i]; 1085 } 1086 1087 ret = rte_event_timer_adapter_stats_get(timdev, &stats); 1088 TEST_ASSERT_EQUAL(ret, 0, "Failed to get stats"); 1089 TEST_ASSERT_EQUAL((int)stats.ev_enq_count, 0, "Stats not clear at " 1090 "startup"); 1091 1092 /* Test with the max value for the adapter */ 1093 ret = rte_event_timer_arm_burst(timdev, evtims, num_evtims); 1094 TEST_ASSERT_EQUAL(ret, num_evtims, 1095 "Failed to arm all event timers: attempted = %d, " 1096 "succeeded = %d, rte_errno = %s", 1097 num_evtims, ret, rte_strerror(rte_errno)); 1098 1099 rte_delay_ms(1000); 1100 1101 #define MAX_TRIES num_evtims 1102 int sum = 0; 1103 int tries = 0; 1104 bool done = false; 1105 while (!done) { 1106 sum += rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, 1107 RTE_DIM(evs), 10); 1108 if (sum >= num_evtims || ++tries >= MAX_TRIES) 1109 done = true; 1110 1111 rte_delay_ms(10); 1112 } 1113 1114 TEST_ASSERT_EQUAL(sum, num_evtims, "Expected %d timer expiry events, " 1115 "got %d", num_evtims, sum); 1116 1117 TEST_ASSERT(tries < MAX_TRIES, "Exceeded max tries"); 1118 1119 rte_delay_ms(100); 1120 1121 /* Make sure the eventdev is still empty */ 1122 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 1123 10); 1124 1125 TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected number of timer expiry " 1126 "events from event device"); 1127 1128 /* Check stats again */ 1129 ret = rte_event_timer_adapter_stats_get(timdev, &stats); 1130 TEST_ASSERT_EQUAL(ret, 0, "Failed to get stats"); 1131 TEST_ASSERT_EQUAL((int)stats.ev_enq_count, num_evtims, 1132 "Expected enqueue stat = %d; got %d", num_evtims, 1133 (int)stats.ev_enq_count); 1134 1135 /* Reset and check again */ 1136 ret = rte_event_timer_adapter_stats_reset(timdev); 1137 TEST_ASSERT_EQUAL(ret, 0, "Failed to reset stats"); 1138 1139 ret = rte_event_timer_adapter_stats_get(timdev, &stats); 1140 TEST_ASSERT_EQUAL(ret, 0, "Failed to get stats"); 1141 TEST_ASSERT_EQUAL((int)stats.ev_enq_count, 0, 1142 "Expected enqueue stat = %d; got %d", 0, 1143 (int)stats.ev_enq_count); 1144 1145 rte_mempool_put_bulk(eventdev_test_mempool, (void **)evtims, 1146 num_evtims); 1147 1148 return TEST_SUCCESS; 1149 } 1150 1151 /* Test various cases in arming timers */ 1152 static int 1153 event_timer_arm(void) 1154 { 1155 uint16_t n; 1156 int ret; 1157 struct rte_event_timer_adapter *adapter = timdev; 1158 struct rte_event_timer *evtim = NULL; 1159 struct rte_event evs[BATCH_SIZE]; 1160 const struct rte_event_timer init_tim = { 1161 .ev.op = RTE_EVENT_OP_NEW, 1162 .ev.queue_id = TEST_QUEUE_ID, 1163 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1164 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1165 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1166 .state = RTE_EVENT_TIMER_NOT_ARMED, 1167 .timeout_ticks = CALC_TICKS(5), // expire in .5 sec 1168 }; 1169 1170 rte_mempool_get(eventdev_test_mempool, (void **)&evtim); 1171 if (evtim == NULL) { 1172 /* Failed to get an event timer object */ 1173 return TEST_FAILED; 1174 } 1175 1176 /* Set up a timer */ 1177 *evtim = init_tim; 1178 evtim->ev.event_ptr = evtim; 1179 1180 /* Test single timer arm succeeds */ 1181 ret = rte_event_timer_arm_burst(adapter, &evtim, 1); 1182 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n", 1183 rte_strerror(rte_errno)); 1184 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ARMED, "Event timer " 1185 "in incorrect state"); 1186 1187 /* Test arm of armed timer fails */ 1188 ret = rte_event_timer_arm_burst(adapter, &evtim, 1); 1189 TEST_ASSERT_EQUAL(ret, 0, "expected return value from " 1190 "rte_event_timer_arm_burst: 0, got: %d", ret); 1191 TEST_ASSERT_EQUAL(rte_errno, EALREADY, "Unexpected rte_errno value " 1192 "after arming already armed timer"); 1193 1194 /* Let timer expire */ 1195 rte_delay_ms(1000); 1196 1197 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); 1198 TEST_ASSERT_EQUAL(n, 1, "Failed to dequeue expected number of expiry " 1199 "events from event device"); 1200 1201 rte_mempool_put(eventdev_test_mempool, evtim); 1202 1203 return TEST_SUCCESS; 1204 } 1205 1206 /* This test checks that repeated references to the same event timer in the 1207 * arm request work as expected; only the first one through should succeed. 1208 */ 1209 static int 1210 event_timer_arm_double(void) 1211 { 1212 uint16_t n; 1213 int ret; 1214 struct rte_event_timer_adapter *adapter = timdev; 1215 struct rte_event_timer *evtim = NULL; 1216 struct rte_event evs[BATCH_SIZE]; 1217 const struct rte_event_timer init_tim = { 1218 .ev.op = RTE_EVENT_OP_NEW, 1219 .ev.queue_id = TEST_QUEUE_ID, 1220 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1221 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1222 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1223 .state = RTE_EVENT_TIMER_NOT_ARMED, 1224 .timeout_ticks = CALC_TICKS(5), // expire in .5 sec 1225 }; 1226 1227 rte_mempool_get(eventdev_test_mempool, (void **)&evtim); 1228 if (evtim == NULL) { 1229 /* Failed to get an event timer object */ 1230 return TEST_FAILED; 1231 } 1232 1233 /* Set up a timer */ 1234 *evtim = init_tim; 1235 evtim->ev.event_ptr = evtim; 1236 1237 struct rte_event_timer *evtim_arr[] = {evtim, evtim}; 1238 ret = rte_event_timer_arm_burst(adapter, evtim_arr, RTE_DIM(evtim_arr)); 1239 TEST_ASSERT_EQUAL(ret, 1, "Unexpected return value from " 1240 "rte_event_timer_arm_burst"); 1241 TEST_ASSERT_EQUAL(rte_errno, EALREADY, "Unexpected rte_errno value " 1242 "after double-arm"); 1243 1244 /* Let timer expire */ 1245 rte_delay_ms(600); 1246 1247 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); 1248 TEST_ASSERT_EQUAL(n, 1, "Dequeued incorrect number of expiry events - " 1249 "expected: 1, actual: %d", n); 1250 1251 rte_mempool_put(eventdev_test_mempool, evtim); 1252 1253 return TEST_SUCCESS; 1254 } 1255 1256 /* Test the timer expiry event is generated at the expected time. */ 1257 static int 1258 event_timer_arm_expiry(void) 1259 { 1260 uint16_t n; 1261 int ret; 1262 struct rte_event_timer_adapter *adapter = timdev; 1263 struct rte_event_timer *evtim = NULL; 1264 struct rte_event_timer *evtim2 = NULL; 1265 struct rte_event evs[BATCH_SIZE]; 1266 const struct rte_event_timer init_tim = { 1267 .ev.op = RTE_EVENT_OP_NEW, 1268 .ev.queue_id = TEST_QUEUE_ID, 1269 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1270 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1271 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1272 .state = RTE_EVENT_TIMER_NOT_ARMED, 1273 }; 1274 1275 rte_mempool_get(eventdev_test_mempool, (void **)&evtim); 1276 if (evtim == NULL) { 1277 /* Failed to get an event timer object */ 1278 return TEST_FAILED; 1279 } 1280 1281 /* Set up an event timer */ 1282 *evtim = init_tim; 1283 evtim->timeout_ticks = CALC_TICKS(30), // expire in 3 secs 1284 evtim->ev.event_ptr = evtim; 1285 1286 ret = rte_event_timer_arm_burst(adapter, &evtim, 1); 1287 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s", 1288 rte_strerror(rte_errno)); 1289 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ARMED, "Event " 1290 "timer in incorrect state"); 1291 1292 rte_delay_ms(2999); 1293 1294 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); 1295 TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected timer expiry event"); 1296 1297 /* Delay 100 ms to account for the adapter tick window - should let us 1298 * dequeue one event 1299 */ 1300 rte_delay_ms(100); 1301 1302 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); 1303 TEST_ASSERT_EQUAL(n, 1, "Dequeued incorrect number (%d) of timer " 1304 "expiry events", n); 1305 TEST_ASSERT_EQUAL(evs[0].event_type, RTE_EVENT_TYPE_TIMER, 1306 "Dequeued unexpected type of event"); 1307 1308 /* Check that we recover the original event timer and then free it */ 1309 evtim2 = evs[0].event_ptr; 1310 TEST_ASSERT_EQUAL(evtim, evtim2, 1311 "Failed to recover pointer to original event timer"); 1312 rte_mempool_put(eventdev_test_mempool, evtim2); 1313 1314 return TEST_SUCCESS; 1315 } 1316 1317 /* Check that rearming a timer works as expected. */ 1318 static int 1319 event_timer_arm_rearm(void) 1320 { 1321 uint16_t n; 1322 int ret; 1323 struct rte_event_timer *evtim = NULL; 1324 struct rte_event_timer *evtim2 = NULL; 1325 struct rte_event evs[BATCH_SIZE]; 1326 const struct rte_event_timer init_tim = { 1327 .ev.op = RTE_EVENT_OP_NEW, 1328 .ev.queue_id = TEST_QUEUE_ID, 1329 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1330 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1331 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1332 .state = RTE_EVENT_TIMER_NOT_ARMED, 1333 }; 1334 1335 rte_mempool_get(eventdev_test_mempool, (void **)&evtim); 1336 if (evtim == NULL) { 1337 /* Failed to get an event timer object */ 1338 return TEST_FAILED; 1339 } 1340 1341 /* Set up a timer */ 1342 *evtim = init_tim; 1343 evtim->timeout_ticks = CALC_TICKS(1); // expire in 0.1 sec 1344 evtim->ev.event_ptr = evtim; 1345 1346 /* Arm it */ 1347 ret = rte_event_timer_arm_burst(timdev, &evtim, 1); 1348 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n", 1349 rte_strerror(rte_errno)); 1350 1351 /* Add 100ms to account for the adapter tick window */ 1352 rte_delay_ms(100 + 100); 1353 1354 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); 1355 TEST_ASSERT_EQUAL(n, 1, "Failed to dequeue expected number of expiry " 1356 "events from event device"); 1357 1358 /* Recover the timer through the event that was dequeued. */ 1359 evtim2 = evs[0].event_ptr; 1360 TEST_ASSERT_EQUAL(evtim, evtim2, 1361 "Failed to recover pointer to original event timer"); 1362 1363 /* Need to reset state in case implementation can't do it */ 1364 evtim2->state = RTE_EVENT_TIMER_NOT_ARMED; 1365 1366 /* Rearm it */ 1367 ret = rte_event_timer_arm_burst(timdev, &evtim2, 1); 1368 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n", 1369 rte_strerror(rte_errno)); 1370 1371 /* Add 100ms to account for the adapter tick window */ 1372 rte_delay_ms(100 + 100); 1373 1374 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); 1375 TEST_ASSERT_EQUAL(n, 1, "Failed to dequeue expected number of expiry " 1376 "events from event device"); 1377 1378 /* Free it */ 1379 evtim2 = evs[0].event_ptr; 1380 TEST_ASSERT_EQUAL(evtim, evtim2, 1381 "Failed to recover pointer to original event timer"); 1382 rte_mempool_put(eventdev_test_mempool, evtim2); 1383 1384 return TEST_SUCCESS; 1385 } 1386 1387 /* Check that the adapter handles the max specified number of timers as 1388 * expected. 1389 */ 1390 static int 1391 event_timer_arm_max(void) 1392 { 1393 int ret, i, n; 1394 int num_evtims = MAX_TIMERS; 1395 struct rte_event_timer *evtims[num_evtims]; 1396 struct rte_event evs[BATCH_SIZE]; 1397 const struct rte_event_timer init_tim = { 1398 .ev.op = RTE_EVENT_OP_NEW, 1399 .ev.queue_id = TEST_QUEUE_ID, 1400 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1401 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1402 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1403 .state = RTE_EVENT_TIMER_NOT_ARMED, 1404 .timeout_ticks = CALC_TICKS(5), // expire in .5 sec 1405 }; 1406 1407 ret = rte_mempool_get_bulk(eventdev_test_mempool, (void **)evtims, 1408 num_evtims); 1409 TEST_ASSERT_EQUAL(ret, 0, "Failed to get array of timer objs: ret = %d", 1410 ret); 1411 1412 for (i = 0; i < num_evtims; i++) { 1413 *evtims[i] = init_tim; 1414 evtims[i]->ev.event_ptr = evtims[i]; 1415 } 1416 1417 /* Test with the max value for the adapter */ 1418 ret = rte_event_timer_arm_burst(timdev, evtims, num_evtims); 1419 TEST_ASSERT_EQUAL(ret, num_evtims, 1420 "Failed to arm all event timers: attempted = %d, " 1421 "succeeded = %d, rte_errno = %s", 1422 num_evtims, ret, rte_strerror(rte_errno)); 1423 1424 rte_delay_ms(1000); 1425 1426 #define MAX_TRIES num_evtims 1427 int sum = 0; 1428 int tries = 0; 1429 bool done = false; 1430 while (!done) { 1431 sum += rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, 1432 RTE_DIM(evs), 10); 1433 if (sum >= num_evtims || ++tries >= MAX_TRIES) 1434 done = true; 1435 1436 rte_delay_ms(10); 1437 } 1438 1439 TEST_ASSERT_EQUAL(sum, num_evtims, "Expected %d timer expiry events, " 1440 "got %d", num_evtims, sum); 1441 1442 TEST_ASSERT(tries < MAX_TRIES, "Exceeded max tries"); 1443 1444 rte_delay_ms(100); 1445 1446 /* Make sure the eventdev is still empty */ 1447 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 1448 10); 1449 1450 TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected number of timer expiry " 1451 "events from event device"); 1452 1453 rte_mempool_put_bulk(eventdev_test_mempool, (void **)evtims, 1454 num_evtims); 1455 1456 return TEST_SUCCESS; 1457 } 1458 1459 /* Check that creating an event timer with incorrect event sched type fails. */ 1460 static int 1461 event_timer_arm_invalid_sched_type(void) 1462 { 1463 int ret; 1464 struct rte_event_timer *evtim = NULL; 1465 const struct rte_event_timer init_tim = { 1466 .ev.op = RTE_EVENT_OP_NEW, 1467 .ev.queue_id = TEST_QUEUE_ID, 1468 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1469 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1470 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1471 .state = RTE_EVENT_TIMER_NOT_ARMED, 1472 .timeout_ticks = CALC_TICKS(5), // expire in .5 sec 1473 }; 1474 1475 if (!using_services) 1476 return -ENOTSUP; 1477 1478 rte_mempool_get(eventdev_test_mempool, (void **)&evtim); 1479 if (evtim == NULL) { 1480 /* Failed to get an event timer object */ 1481 return TEST_FAILED; 1482 } 1483 1484 *evtim = init_tim; 1485 evtim->ev.event_ptr = evtim; 1486 evtim->ev.sched_type = RTE_SCHED_TYPE_PARALLEL; // bad sched type 1487 1488 ret = rte_event_timer_arm_burst(timdev, &evtim, 1); 1489 TEST_ASSERT_EQUAL(ret, 0, "Expected to fail timer arm with invalid " 1490 "sched type, but didn't"); 1491 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after" 1492 " arm fail with invalid queue"); 1493 1494 rte_mempool_put(eventdev_test_mempool, &evtim); 1495 1496 return TEST_SUCCESS; 1497 } 1498 1499 /* Check that creating an event timer with a timeout value that is too small or 1500 * too big fails. 1501 */ 1502 static int 1503 event_timer_arm_invalid_timeout(void) 1504 { 1505 int ret; 1506 struct rte_event_timer *evtim = NULL; 1507 const struct rte_event_timer init_tim = { 1508 .ev.op = RTE_EVENT_OP_NEW, 1509 .ev.queue_id = TEST_QUEUE_ID, 1510 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1511 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1512 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1513 .state = RTE_EVENT_TIMER_NOT_ARMED, 1514 .timeout_ticks = CALC_TICKS(5), // expire in .5 sec 1515 }; 1516 1517 rte_mempool_get(eventdev_test_mempool, (void **)&evtim); 1518 if (evtim == NULL) { 1519 /* Failed to get an event timer object */ 1520 return TEST_FAILED; 1521 } 1522 1523 *evtim = init_tim; 1524 evtim->ev.event_ptr = evtim; 1525 evtim->timeout_ticks = 0; // timeout too small 1526 1527 ret = rte_event_timer_arm_burst(timdev, &evtim, 1); 1528 TEST_ASSERT_EQUAL(ret, 0, "Expected to fail timer arm with invalid " 1529 "timeout, but didn't"); 1530 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after" 1531 " arm fail with invalid timeout"); 1532 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ERROR_TOOEARLY, 1533 "Unexpected event timer state"); 1534 1535 *evtim = init_tim; 1536 evtim->ev.event_ptr = evtim; 1537 evtim->timeout_ticks = CALC_TICKS(1801); // timeout too big 1538 1539 ret = rte_event_timer_arm_burst(timdev, &evtim, 1); 1540 TEST_ASSERT_EQUAL(ret, 0, "Expected to fail timer arm with invalid " 1541 "timeout, but didn't"); 1542 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after" 1543 " arm fail with invalid timeout"); 1544 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ERROR_TOOLATE, 1545 "Unexpected event timer state"); 1546 1547 rte_mempool_put(eventdev_test_mempool, evtim); 1548 1549 return TEST_SUCCESS; 1550 } 1551 1552 static int 1553 event_timer_cancel(void) 1554 { 1555 uint16_t n; 1556 int ret; 1557 struct rte_event_timer_adapter *adapter = timdev; 1558 struct rte_event_timer *evtim = NULL; 1559 struct rte_event evs[BATCH_SIZE]; 1560 const struct rte_event_timer init_tim = { 1561 .ev.op = RTE_EVENT_OP_NEW, 1562 .ev.queue_id = TEST_QUEUE_ID, 1563 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1564 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1565 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1566 .state = RTE_EVENT_TIMER_NOT_ARMED, 1567 }; 1568 1569 rte_mempool_get(eventdev_test_mempool, (void **)&evtim); 1570 if (evtim == NULL) { 1571 /* Failed to get an event timer object */ 1572 return TEST_FAILED; 1573 } 1574 1575 /* Check that cancelling an uninited timer fails */ 1576 ret = rte_event_timer_cancel_burst(adapter, &evtim, 1); 1577 TEST_ASSERT_EQUAL(ret, 0, "Succeeded unexpectedly in canceling " 1578 "uninited timer"); 1579 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after " 1580 "cancelling uninited timer"); 1581 1582 /* Set up a timer */ 1583 *evtim = init_tim; 1584 evtim->ev.event_ptr = evtim; 1585 evtim->timeout_ticks = CALC_TICKS(30); // expire in 3 sec 1586 1587 /* Check that cancelling an inited but unarmed timer fails */ 1588 ret = rte_event_timer_cancel_burst(adapter, &evtim, 1); 1589 TEST_ASSERT_EQUAL(ret, 0, "Succeeded unexpectedly in canceling " 1590 "unarmed timer"); 1591 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after " 1592 "cancelling unarmed timer"); 1593 1594 ret = rte_event_timer_arm_burst(adapter, &evtim, 1); 1595 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n", 1596 rte_strerror(rte_errno)); 1597 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ARMED, 1598 "evtim in incorrect state"); 1599 1600 /* Delay 1 sec */ 1601 rte_delay_ms(1000); 1602 1603 ret = rte_event_timer_cancel_burst(adapter, &evtim, 1); 1604 TEST_ASSERT_EQUAL(ret, 1, "Failed to cancel event_timer: %s\n", 1605 rte_strerror(rte_errno)); 1606 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_CANCELED, 1607 "evtim in incorrect state"); 1608 1609 rte_delay_ms(3000); 1610 1611 /* Make sure that no expiry event was generated */ 1612 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); 1613 TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected timer expiry event\n"); 1614 1615 rte_mempool_put(eventdev_test_mempool, evtim); 1616 1617 return TEST_SUCCESS; 1618 } 1619 1620 static int 1621 event_timer_cancel_double(void) 1622 { 1623 uint16_t n; 1624 int ret; 1625 struct rte_event_timer_adapter *adapter = timdev; 1626 struct rte_event_timer *evtim = NULL; 1627 struct rte_event evs[BATCH_SIZE]; 1628 const struct rte_event_timer init_tim = { 1629 .ev.op = RTE_EVENT_OP_NEW, 1630 .ev.queue_id = TEST_QUEUE_ID, 1631 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1632 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1633 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1634 .state = RTE_EVENT_TIMER_NOT_ARMED, 1635 .timeout_ticks = CALC_TICKS(5), // expire in .5 sec 1636 }; 1637 1638 rte_mempool_get(eventdev_test_mempool, (void **)&evtim); 1639 if (evtim == NULL) { 1640 /* Failed to get an event timer object */ 1641 return TEST_FAILED; 1642 } 1643 1644 /* Set up a timer */ 1645 *evtim = init_tim; 1646 evtim->ev.event_ptr = evtim; 1647 evtim->timeout_ticks = CALC_TICKS(30); // expire in 3 sec 1648 1649 ret = rte_event_timer_arm_burst(adapter, &evtim, 1); 1650 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n", 1651 rte_strerror(rte_errno)); 1652 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ARMED, 1653 "timer in unexpected state"); 1654 1655 /* Now, test that referencing the same timer twice in the same call 1656 * fails 1657 */ 1658 struct rte_event_timer *evtim_arr[] = {evtim, evtim}; 1659 ret = rte_event_timer_cancel_burst(adapter, evtim_arr, 1660 RTE_DIM(evtim_arr)); 1661 1662 /* Two requests to cancel same timer, only one should succeed */ 1663 TEST_ASSERT_EQUAL(ret, 1, "Succeeded unexpectedly in canceling timer " 1664 "twice"); 1665 1666 TEST_ASSERT_EQUAL(rte_errno, EALREADY, "Unexpected rte_errno value " 1667 "after double-cancel: rte_errno = %d", rte_errno); 1668 1669 rte_delay_ms(3000); 1670 1671 /* Still make sure that no expiry event was generated */ 1672 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); 1673 TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected timer expiry event\n"); 1674 1675 rte_mempool_put(eventdev_test_mempool, evtim); 1676 1677 return TEST_SUCCESS; 1678 } 1679 1680 /* Check that event timer adapter tick resolution works as expected by testing 1681 * the number of adapter ticks that occur within a particular time interval. 1682 */ 1683 static int 1684 adapter_tick_resolution(void) 1685 { 1686 struct rte_event_timer_adapter_stats stats; 1687 uint64_t adapter_tick_count; 1688 1689 /* Only run this test in the software driver case */ 1690 if (!using_services) 1691 return -ENOTSUP; 1692 1693 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stats_reset(timdev), 1694 "Failed to reset stats"); 1695 1696 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stats_get(timdev, 1697 &stats), "Failed to get adapter stats"); 1698 TEST_ASSERT_EQUAL(stats.adapter_tick_count, 0, "Adapter tick count " 1699 "not zeroed out"); 1700 1701 /* Delay 1 second; should let at least 10 ticks occur with the default 1702 * adapter configuration used by this test. 1703 */ 1704 rte_delay_ms(1000); 1705 1706 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stats_get(timdev, 1707 &stats), "Failed to get adapter stats"); 1708 1709 adapter_tick_count = stats.adapter_tick_count; 1710 TEST_ASSERT(adapter_tick_count >= 10 && adapter_tick_count <= 12, 1711 "Expected 10-12 adapter ticks, got %"PRIu64"\n", 1712 adapter_tick_count); 1713 1714 return TEST_SUCCESS; 1715 } 1716 1717 static int 1718 adapter_create_max(void) 1719 { 1720 int i; 1721 uint32_t svc_start_count, svc_end_count; 1722 struct rte_event_timer_adapter *adapters[ 1723 RTE_EVENT_TIMER_ADAPTER_NUM_MAX + 1]; 1724 1725 struct rte_event_timer_adapter_conf conf = { 1726 .event_dev_id = evdev, 1727 // timer_adapter_id set in loop 1728 .clk_src = RTE_EVENT_TIMER_ADAPTER_CPU_CLK, 1729 .timer_tick_ns = NSECPERSEC / 10, 1730 .max_tmo_ns = 180 * NSECPERSEC, 1731 .nb_timers = MAX_TIMERS, 1732 .flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES, 1733 }; 1734 1735 if (!using_services) 1736 return -ENOTSUP; 1737 1738 svc_start_count = rte_service_get_count(); 1739 1740 /* This test expects that there are sufficient service IDs available 1741 * to be allocated. I.e., RTE_EVENT_TIMER_ADAPTER_NUM_MAX may need to 1742 * be less than RTE_SERVICE_NUM_MAX if anything else uses a service 1743 * (the SW event device, for example). 1744 */ 1745 for (i = 0; i < RTE_EVENT_TIMER_ADAPTER_NUM_MAX; i++) { 1746 conf.timer_adapter_id = i; 1747 adapters[i] = rte_event_timer_adapter_create_ext(&conf, 1748 test_port_conf_cb, NULL); 1749 TEST_ASSERT_NOT_NULL(adapters[i], "Failed to create adapter " 1750 "%d", i); 1751 } 1752 1753 conf.timer_adapter_id = i; 1754 adapters[i] = rte_event_timer_adapter_create(&conf); 1755 TEST_ASSERT_NULL(adapters[i], "Created too many adapters"); 1756 1757 /* Check that at least RTE_EVENT_TIMER_ADAPTER_NUM_MAX services 1758 * have been created 1759 */ 1760 svc_end_count = rte_service_get_count(); 1761 TEST_ASSERT_EQUAL(svc_end_count - svc_start_count, 1762 RTE_EVENT_TIMER_ADAPTER_NUM_MAX, 1763 "Failed to create expected number of services"); 1764 1765 for (i = 0; i < RTE_EVENT_TIMER_ADAPTER_NUM_MAX; i++) 1766 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_free(adapters[i]), 1767 "Failed to free adapter %d", i); 1768 1769 /* Check that service count is back to where it was at start */ 1770 svc_end_count = rte_service_get_count(); 1771 TEST_ASSERT_EQUAL(svc_start_count, svc_end_count, "Failed to release " 1772 "correct number of services"); 1773 1774 return TEST_SUCCESS; 1775 } 1776 1777 static struct unit_test_suite event_timer_adptr_functional_testsuite = { 1778 .suite_name = "event timer functional test suite", 1779 .setup = testsuite_setup, 1780 .teardown = testsuite_teardown, 1781 .unit_test_cases = { 1782 TEST_CASE_ST(timdev_setup_usec, timdev_teardown, 1783 test_timer_state), 1784 TEST_CASE_ST(timdev_setup_usec, timdev_teardown, 1785 test_timer_arm), 1786 TEST_CASE_ST(timdev_setup_usec, timdev_teardown, 1787 test_timer_arm_burst), 1788 TEST_CASE_ST(timdev_setup_sec, timdev_teardown, 1789 test_timer_cancel), 1790 TEST_CASE_ST(timdev_setup_sec, timdev_teardown, 1791 test_timer_cancel_random), 1792 TEST_CASE_ST(timdev_setup_usec_multicore, timdev_teardown, 1793 test_timer_arm_multicore), 1794 TEST_CASE_ST(timdev_setup_usec_multicore, timdev_teardown, 1795 test_timer_arm_burst_multicore), 1796 TEST_CASE_ST(timdev_setup_sec_multicore, timdev_teardown, 1797 test_timer_cancel_multicore), 1798 TEST_CASE_ST(timdev_setup_sec_multicore, timdev_teardown, 1799 test_timer_cancel_burst_multicore), 1800 TEST_CASE(adapter_create), 1801 TEST_CASE_ST(timdev_setup_msec, NULL, adapter_free), 1802 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1803 adapter_get_info), 1804 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1805 adapter_lookup), 1806 TEST_CASE_ST(NULL, timdev_teardown, 1807 adapter_start), 1808 TEST_CASE_ST(timdev_setup_msec, NULL, 1809 adapter_stop), 1810 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1811 stat_inc_reset_ev_enq), 1812 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1813 event_timer_arm), 1814 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1815 event_timer_arm_double), 1816 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1817 event_timer_arm_expiry), 1818 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1819 event_timer_arm_rearm), 1820 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1821 event_timer_arm_max), 1822 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1823 event_timer_arm_invalid_sched_type), 1824 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1825 event_timer_arm_invalid_timeout), 1826 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1827 event_timer_cancel), 1828 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1829 event_timer_cancel_double), 1830 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1831 adapter_tick_resolution), 1832 TEST_CASE(adapter_create_max), 1833 TEST_CASES_END() /**< NULL terminate unit test array */ 1834 } 1835 }; 1836 1837 static int 1838 test_event_timer_adapter_func(void) 1839 { 1840 return unit_test_suite_runner(&event_timer_adptr_functional_testsuite); 1841 } 1842 1843 REGISTER_TEST_COMMAND(event_timer_adapter_test, test_event_timer_adapter_func); 1844