1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Cavium, Inc 3 * Copyright(c) 2017-2018 Intel Corporation. 4 */ 5 6 #include "test.h" 7 8 #include <math.h> 9 10 #include <rte_common.h> 11 #include <rte_cycles.h> 12 #include <rte_debug.h> 13 #include <rte_eal.h> 14 #include <rte_ethdev.h> 15 16 #ifdef RTE_EXEC_ENV_WINDOWS 17 static int 18 test_event_timer_adapter_func(void) 19 { 20 printf("event_timer_adapter not supported on Windows, skipping test\n"); 21 return TEST_SKIPPED; 22 } 23 24 #else 25 26 #include <rte_eventdev.h> 27 #include <rte_event_timer_adapter.h> 28 #include <rte_mempool.h> 29 #include <rte_launch.h> 30 #include <rte_lcore.h> 31 #include <rte_per_lcore.h> 32 #include <rte_random.h> 33 #include <rte_bus_vdev.h> 34 #include <rte_service.h> 35 #include <stdbool.h> 36 37 /* 4K timers corresponds to sw evdev max inflight events */ 38 #define MAX_TIMERS (4 * 1024) 39 #define BKT_TCK_NSEC 40 41 #define NSECPERSEC 1E9 42 #define BATCH_SIZE 16 43 /* Both the app lcore and adapter ports are linked to this queue */ 44 #define TEST_QUEUE_ID 0 45 /* Port the application dequeues from */ 46 #define TEST_PORT_ID 0 47 #define TEST_ADAPTER_ID 0 48 49 /* Handle log statements in same manner as test macros */ 50 #define LOG_DBG(...) RTE_LOG(DEBUG, EAL, __VA_ARGS__) 51 52 static int evdev; 53 static struct rte_event_timer_adapter *timdev; 54 static struct rte_mempool *eventdev_test_mempool; 55 static struct rte_ring *timer_producer_ring; 56 static uint64_t global_bkt_tck_ns; 57 static uint64_t global_info_bkt_tck_ns; 58 static volatile uint8_t arm_done; 59 60 #define CALC_TICKS(tks) \ 61 ceil((double)(tks * global_bkt_tck_ns) / global_info_bkt_tck_ns) 62 63 64 static bool using_services; 65 static uint32_t test_lcore1; 66 static uint32_t test_lcore2; 67 static uint32_t test_lcore3; 68 static uint32_t sw_evdev_slcore; 69 static uint32_t sw_adptr_slcore; 70 71 static inline void 72 devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf, 73 struct rte_event_dev_info *info) 74 { 75 memset(dev_conf, 0, sizeof(struct rte_event_dev_config)); 76 dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns; 77 dev_conf->nb_event_ports = 1; 78 dev_conf->nb_event_queues = 1; 79 dev_conf->nb_event_queue_flows = info->max_event_queue_flows; 80 dev_conf->nb_event_port_dequeue_depth = 81 info->max_event_port_dequeue_depth; 82 dev_conf->nb_event_port_enqueue_depth = 83 info->max_event_port_enqueue_depth; 84 dev_conf->nb_event_port_enqueue_depth = 85 info->max_event_port_enqueue_depth; 86 dev_conf->nb_events_limit = 87 info->max_num_events; 88 } 89 90 static inline int 91 eventdev_setup(void) 92 { 93 int ret; 94 struct rte_event_dev_config dev_conf; 95 struct rte_event_dev_info info; 96 uint32_t service_id; 97 98 ret = rte_event_dev_info_get(evdev, &info); 99 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info"); 100 TEST_ASSERT(info.max_num_events < 0 || 101 info.max_num_events >= (int32_t)MAX_TIMERS, 102 "ERROR max_num_events=%d < max_events=%d", 103 info.max_num_events, MAX_TIMERS); 104 105 devconf_set_default_sane_values(&dev_conf, &info); 106 ret = rte_event_dev_configure(evdev, &dev_conf); 107 TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev"); 108 109 ret = rte_event_queue_setup(evdev, 0, NULL); 110 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d", 0); 111 112 /* Configure event port */ 113 ret = rte_event_port_setup(evdev, 0, NULL); 114 TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d", 0); 115 ret = rte_event_port_link(evdev, 0, NULL, NULL, 0); 116 TEST_ASSERT(ret >= 0, "Failed to link all queues port=%d", 0); 117 118 /* If this is a software event device, map and start its service */ 119 if (rte_event_dev_service_id_get(evdev, &service_id) == 0) { 120 TEST_ASSERT_SUCCESS(rte_service_lcore_add(sw_evdev_slcore), 121 "Failed to add service core"); 122 TEST_ASSERT_SUCCESS(rte_service_lcore_start( 123 sw_evdev_slcore), 124 "Failed to start service core"); 125 TEST_ASSERT_SUCCESS(rte_service_map_lcore_set( 126 service_id, sw_evdev_slcore, 1), 127 "Failed to map evdev service"); 128 TEST_ASSERT_SUCCESS(rte_service_runstate_set( 129 service_id, 1), 130 "Failed to start evdev service"); 131 } 132 133 ret = rte_event_dev_start(evdev); 134 TEST_ASSERT_SUCCESS(ret, "Failed to start device"); 135 136 return TEST_SUCCESS; 137 } 138 139 static int 140 testsuite_setup(void) 141 { 142 /* Some of the multithreaded tests require 3 other lcores to run */ 143 unsigned int required_lcore_count = 4; 144 uint32_t service_id; 145 146 /* To make it easier to map services later if needed, just reset 147 * service core state. 148 */ 149 (void) rte_service_lcore_reset_all(); 150 151 if (!rte_event_dev_count()) { 152 /* If there is no hardware eventdev, or no software vdev was 153 * specified on the command line, create an instance of 154 * event_sw. 155 */ 156 LOG_DBG("Failed to find a valid event device... testing with" 157 " event_sw device\n"); 158 TEST_ASSERT_SUCCESS(rte_vdev_init("event_sw0", NULL), 159 "Error creating eventdev"); 160 evdev = rte_event_dev_get_dev_id("event_sw0"); 161 } 162 163 if (rte_event_dev_service_id_get(evdev, &service_id) == 0) { 164 /* A software event device will use a software event timer 165 * adapter as well. 2 more cores required to convert to 166 * service cores. 167 */ 168 required_lcore_count += 2; 169 using_services = true; 170 } 171 172 if (rte_lcore_count() < required_lcore_count) { 173 printf("Not enough cores for event_timer_adapter_test, expecting at least %u\n", 174 required_lcore_count); 175 return TEST_SKIPPED; 176 } 177 178 /* Assign lcores for various tasks */ 179 test_lcore1 = rte_get_next_lcore(-1, 1, 0); 180 test_lcore2 = rte_get_next_lcore(test_lcore1, 1, 0); 181 test_lcore3 = rte_get_next_lcore(test_lcore2, 1, 0); 182 if (using_services) { 183 sw_evdev_slcore = rte_get_next_lcore(test_lcore3, 1, 0); 184 sw_adptr_slcore = rte_get_next_lcore(sw_evdev_slcore, 1, 0); 185 } 186 187 return eventdev_setup(); 188 } 189 190 static void 191 testsuite_teardown(void) 192 { 193 rte_event_dev_stop(evdev); 194 rte_event_dev_close(evdev); 195 } 196 197 static int 198 setup_adapter_service(struct rte_event_timer_adapter *adptr) 199 { 200 uint32_t adapter_service_id; 201 int ret; 202 203 /* retrieve service ids */ 204 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_service_id_get(adptr, 205 &adapter_service_id), "Failed to get event timer " 206 "adapter service id"); 207 /* add a service core and start it */ 208 ret = rte_service_lcore_add(sw_adptr_slcore); 209 TEST_ASSERT(ret == 0 || ret == -EALREADY, 210 "Failed to add service core"); 211 ret = rte_service_lcore_start(sw_adptr_slcore); 212 TEST_ASSERT(ret == 0 || ret == -EALREADY, 213 "Failed to start service core"); 214 215 /* map services to it */ 216 TEST_ASSERT_SUCCESS(rte_service_map_lcore_set(adapter_service_id, 217 sw_adptr_slcore, 1), 218 "Failed to map adapter service"); 219 220 /* set services to running */ 221 TEST_ASSERT_SUCCESS(rte_service_runstate_set(adapter_service_id, 1), 222 "Failed to start event timer adapter service"); 223 224 return TEST_SUCCESS; 225 } 226 227 static int 228 test_port_conf_cb(uint16_t id, uint8_t event_dev_id, uint8_t *event_port_id, 229 void *conf_arg) 230 { 231 struct rte_event_dev_config dev_conf; 232 struct rte_event_dev_info info; 233 struct rte_event_port_conf *port_conf, def_port_conf = {0}; 234 uint32_t started; 235 static int port_allocated; 236 static uint8_t port_id; 237 int ret; 238 239 if (port_allocated) { 240 *event_port_id = port_id; 241 return 0; 242 } 243 244 RTE_SET_USED(id); 245 246 ret = rte_event_dev_attr_get(event_dev_id, RTE_EVENT_DEV_ATTR_STARTED, 247 &started); 248 if (ret < 0) 249 return ret; 250 251 if (started) 252 rte_event_dev_stop(event_dev_id); 253 254 ret = rte_event_dev_info_get(evdev, &info); 255 if (ret < 0) 256 return ret; 257 258 devconf_set_default_sane_values(&dev_conf, &info); 259 260 port_id = dev_conf.nb_event_ports; 261 dev_conf.nb_event_ports++; 262 263 ret = rte_event_dev_configure(event_dev_id, &dev_conf); 264 if (ret < 0) { 265 if (started) 266 rte_event_dev_start(event_dev_id); 267 return ret; 268 } 269 270 if (conf_arg != NULL) 271 port_conf = conf_arg; 272 else { 273 port_conf = &def_port_conf; 274 ret = rte_event_port_default_conf_get(event_dev_id, port_id, 275 port_conf); 276 if (ret < 0) 277 return ret; 278 } 279 280 ret = rte_event_port_setup(event_dev_id, port_id, port_conf); 281 if (ret < 0) 282 return ret; 283 284 *event_port_id = port_id; 285 286 if (started) 287 rte_event_dev_start(event_dev_id); 288 289 /* Reuse this port number next time this is called */ 290 port_allocated = 1; 291 292 return 0; 293 } 294 295 static int 296 _timdev_setup(uint64_t max_tmo_ns, uint64_t bkt_tck_ns, uint64_t flags) 297 { 298 struct rte_event_timer_adapter_info info; 299 struct rte_event_timer_adapter_conf config = { 300 .event_dev_id = evdev, 301 .timer_adapter_id = TEST_ADAPTER_ID, 302 .timer_tick_ns = bkt_tck_ns, 303 .max_tmo_ns = max_tmo_ns, 304 .nb_timers = MAX_TIMERS * 10, 305 .flags = flags, 306 }; 307 uint32_t caps = 0; 308 const char *pool_name = "timdev_test_pool"; 309 310 global_bkt_tck_ns = bkt_tck_ns; 311 312 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_caps_get(evdev, &caps), 313 "failed to get adapter capabilities"); 314 315 if (flags & RTE_EVENT_TIMER_ADAPTER_F_PERIODIC && 316 !(caps & RTE_EVENT_TIMER_ADAPTER_CAP_PERIODIC)) 317 return -ENOTSUP; 318 319 if (!(caps & RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) { 320 timdev = rte_event_timer_adapter_create_ext(&config, 321 test_port_conf_cb, 322 NULL); 323 setup_adapter_service(timdev); 324 using_services = true; 325 } else 326 timdev = rte_event_timer_adapter_create(&config); 327 328 TEST_ASSERT_NOT_NULL(timdev, 329 "failed to create event timer ring"); 330 331 TEST_ASSERT_EQUAL(rte_event_timer_adapter_start(timdev), 0, 332 "failed to Start event timer adapter"); 333 334 /* Create event timer mempool */ 335 eventdev_test_mempool = rte_mempool_create(pool_name, 336 MAX_TIMERS * 2, 337 sizeof(struct rte_event_timer), /* element size*/ 338 0, /* cache size*/ 339 0, NULL, NULL, NULL, NULL, 340 rte_socket_id(), 0); 341 if (!eventdev_test_mempool) { 342 printf("ERROR creating mempool\n"); 343 return TEST_FAILED; 344 } 345 346 rte_event_timer_adapter_get_info(timdev, &info); 347 348 global_info_bkt_tck_ns = info.min_resolution_ns; 349 350 return TEST_SUCCESS; 351 } 352 353 static int 354 timdev_setup_usec(void) 355 { 356 uint64_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES; 357 358 return using_services ? 359 /* Max timeout is 10,000us and bucket interval is 100us */ 360 _timdev_setup(1E7, 1E5, flags) : 361 /* Max timeout is 100us and bucket interval is 1us */ 362 _timdev_setup(1E5, 1E3, flags); 363 } 364 365 static int 366 timdev_setup_usec_multicore(void) 367 { 368 uint64_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES; 369 370 return using_services ? 371 /* Max timeout is 10,000us and bucket interval is 100us */ 372 _timdev_setup(1E7, 1E5, flags) : 373 /* Max timeout is 100us and bucket interval is 1us */ 374 _timdev_setup(1E5, 1E3, flags); 375 } 376 377 static int 378 timdev_setup_msec(void) 379 { 380 uint64_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES; 381 382 /* Max timeout is 3 mins, and bucket interval is 100 ms */ 383 return _timdev_setup(180 * NSECPERSEC, NSECPERSEC / 10, flags); 384 } 385 386 static int 387 timdev_setup_msec_periodic(void) 388 { 389 uint32_t caps = 0; 390 uint64_t max_tmo_ns; 391 392 uint64_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES | 393 RTE_EVENT_TIMER_ADAPTER_F_PERIODIC; 394 395 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_caps_get(evdev, &caps), 396 "failed to get adapter capabilities"); 397 398 if (caps & RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT) 399 max_tmo_ns = 0; 400 else 401 max_tmo_ns = 180 * NSECPERSEC; 402 403 /* Periodic mode with 100 ms resolution */ 404 return _timdev_setup(max_tmo_ns, NSECPERSEC / 10, flags); 405 } 406 407 static int 408 timdev_setup_sec(void) 409 { 410 uint64_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES; 411 412 /* Max timeout is 100sec and bucket interval is 1sec */ 413 return _timdev_setup(1E11, 1E9, flags); 414 } 415 416 static int 417 timdev_setup_sec_periodic(void) 418 { 419 uint64_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES | 420 RTE_EVENT_TIMER_ADAPTER_F_PERIODIC; 421 422 /* Periodic mode with 1 sec resolution */ 423 return _timdev_setup(180 * NSECPERSEC, NSECPERSEC, flags); 424 } 425 426 static int 427 timdev_setup_sec_multicore(void) 428 { 429 uint64_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES; 430 431 /* Max timeout is 100sec and bucket interval is 1sec */ 432 return _timdev_setup(1E11, 1E9, flags); 433 } 434 435 static void 436 timdev_teardown(void) 437 { 438 rte_event_timer_adapter_stop(timdev); 439 rte_event_timer_adapter_free(timdev); 440 441 rte_mempool_free(eventdev_test_mempool); 442 } 443 444 static inline int 445 test_timer_state(void) 446 { 447 struct rte_event_timer *ev_tim; 448 struct rte_event ev; 449 const struct rte_event_timer tim = { 450 .ev.op = RTE_EVENT_OP_NEW, 451 .ev.queue_id = 0, 452 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 453 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 454 .ev.event_type = RTE_EVENT_TYPE_TIMER, 455 .state = RTE_EVENT_TIMER_NOT_ARMED, 456 }; 457 458 459 rte_mempool_get(eventdev_test_mempool, (void **)&ev_tim); 460 *ev_tim = tim; 461 ev_tim->ev.event_ptr = ev_tim; 462 ev_tim->timeout_ticks = CALC_TICKS(120); 463 464 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 1), 0, 465 "Armed timer exceeding max_timeout."); 466 TEST_ASSERT_EQUAL(ev_tim->state, RTE_EVENT_TIMER_ERROR_TOOLATE, 467 "Improper timer state set expected %d returned %d", 468 RTE_EVENT_TIMER_ERROR_TOOLATE, ev_tim->state); 469 470 ev_tim->state = RTE_EVENT_TIMER_NOT_ARMED; 471 ev_tim->timeout_ticks = CALC_TICKS(10); 472 473 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 1), 1, 474 "Failed to arm timer with proper timeout."); 475 TEST_ASSERT_EQUAL(ev_tim->state, RTE_EVENT_TIMER_ARMED, 476 "Improper timer state set expected %d returned %d", 477 RTE_EVENT_TIMER_ARMED, ev_tim->state); 478 479 if (!using_services) 480 rte_delay_us(20); 481 else 482 rte_delay_us(1000 + 200); 483 TEST_ASSERT_EQUAL(rte_event_dequeue_burst(evdev, 0, &ev, 1, 0), 1, 484 "Armed timer failed to trigger."); 485 486 ev_tim->state = RTE_EVENT_TIMER_NOT_ARMED; 487 ev_tim->timeout_ticks = CALC_TICKS(90); 488 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 1), 1, 489 "Failed to arm timer with proper timeout."); 490 TEST_ASSERT_EQUAL(rte_event_timer_cancel_burst(timdev, &ev_tim, 1), 491 1, "Failed to cancel armed timer"); 492 TEST_ASSERT_EQUAL(ev_tim->state, RTE_EVENT_TIMER_CANCELED, 493 "Improper timer state set expected %d returned %d", 494 RTE_EVENT_TIMER_CANCELED, ev_tim->state); 495 496 rte_mempool_put(eventdev_test_mempool, (void *)ev_tim); 497 498 return TEST_SUCCESS; 499 } 500 501 static inline int 502 _arm_timers(uint64_t timeout_tcks, uint64_t timers) 503 { 504 uint64_t i; 505 struct rte_event_timer *ev_tim; 506 const struct rte_event_timer tim = { 507 .ev.op = RTE_EVENT_OP_NEW, 508 .ev.queue_id = 0, 509 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 510 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 511 .ev.event_type = RTE_EVENT_TYPE_TIMER, 512 .state = RTE_EVENT_TIMER_NOT_ARMED, 513 .timeout_ticks = CALC_TICKS(timeout_tcks), 514 }; 515 516 for (i = 0; i < timers; i++) { 517 518 TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool, 519 (void **)&ev_tim), 520 "mempool alloc failed"); 521 *ev_tim = tim; 522 ev_tim->ev.event_ptr = ev_tim; 523 524 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 525 1), 1, "Failed to arm timer %d", 526 rte_errno); 527 } 528 529 return TEST_SUCCESS; 530 } 531 532 static inline int 533 _wait_timer_triggers(uint64_t wait_sec, uint64_t arm_count, 534 uint64_t cancel_count) 535 { 536 uint8_t valid_event; 537 uint64_t events = 0; 538 uint64_t wait_start, max_wait; 539 struct rte_event ev; 540 541 max_wait = rte_get_timer_hz() * wait_sec; 542 wait_start = rte_get_timer_cycles(); 543 while (1) { 544 if (rte_get_timer_cycles() - wait_start > max_wait) { 545 if (events + cancel_count != arm_count) 546 TEST_ASSERT_SUCCESS(max_wait, 547 "Max time limit for timers exceeded."); 548 break; 549 } 550 551 valid_event = rte_event_dequeue_burst(evdev, 0, &ev, 1, 0); 552 if (!valid_event) 553 continue; 554 555 rte_mempool_put(eventdev_test_mempool, ev.event_ptr); 556 events++; 557 } 558 559 return TEST_SUCCESS; 560 } 561 562 static inline int 563 test_timer_arm(void) 564 { 565 TEST_ASSERT_SUCCESS(_arm_timers(20, MAX_TIMERS), 566 "Failed to arm timers"); 567 TEST_ASSERT_SUCCESS(_wait_timer_triggers(10, MAX_TIMERS, 0), 568 "Timer triggered count doesn't match arm count"); 569 return TEST_SUCCESS; 570 } 571 572 static inline int 573 test_timer_arm_periodic(void) 574 { 575 uint32_t caps = 0; 576 uint32_t timeout_count = 0; 577 578 TEST_ASSERT_SUCCESS(_arm_timers(1, MAX_TIMERS), 579 "Failed to arm timers"); 580 /* With a resolution of 100ms and wait time of 1sec, 581 * there will be 10 * MAX_TIMERS periodic timer triggers. 582 */ 583 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_caps_get(evdev, &caps), 584 "failed to get adapter capabilities"); 585 586 if (caps & RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT) 587 timeout_count = 10; 588 else 589 timeout_count = 9; 590 591 TEST_ASSERT_SUCCESS(_wait_timer_triggers(1, timeout_count * MAX_TIMERS, 0), 592 "Timer triggered count doesn't match arm count"); 593 return TEST_SUCCESS; 594 } 595 596 static int 597 _arm_wrapper(void *arg) 598 { 599 RTE_SET_USED(arg); 600 601 TEST_ASSERT_SUCCESS(_arm_timers(20, MAX_TIMERS), 602 "Failed to arm timers"); 603 604 return TEST_SUCCESS; 605 } 606 607 static inline int 608 test_timer_arm_multicore(void) 609 { 610 611 uint32_t lcore_1 = rte_get_next_lcore(-1, 1, 0); 612 uint32_t lcore_2 = rte_get_next_lcore(lcore_1, 1, 0); 613 614 rte_eal_remote_launch(_arm_wrapper, NULL, lcore_1); 615 rte_eal_remote_launch(_arm_wrapper, NULL, lcore_2); 616 617 rte_eal_mp_wait_lcore(); 618 TEST_ASSERT_SUCCESS(_wait_timer_triggers(10, MAX_TIMERS * 2, 0), 619 "Timer triggered count doesn't match arm count"); 620 621 return TEST_SUCCESS; 622 } 623 624 #define MAX_BURST 16 625 static inline int 626 _arm_timers_burst(uint64_t timeout_tcks, uint64_t timers) 627 { 628 uint64_t i; 629 int j; 630 struct rte_event_timer *ev_tim[MAX_BURST]; 631 const struct rte_event_timer tim = { 632 .ev.op = RTE_EVENT_OP_NEW, 633 .ev.queue_id = 0, 634 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 635 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 636 .ev.event_type = RTE_EVENT_TYPE_TIMER, 637 .state = RTE_EVENT_TIMER_NOT_ARMED, 638 .timeout_ticks = CALC_TICKS(timeout_tcks), 639 }; 640 641 for (i = 0; i < timers / MAX_BURST; i++) { 642 TEST_ASSERT_SUCCESS(rte_mempool_get_bulk( 643 eventdev_test_mempool, 644 (void **)ev_tim, MAX_BURST), 645 "mempool alloc failed"); 646 647 for (j = 0; j < MAX_BURST; j++) { 648 *ev_tim[j] = tim; 649 ev_tim[j]->ev.event_ptr = ev_tim[j]; 650 } 651 652 TEST_ASSERT_EQUAL(rte_event_timer_arm_tmo_tick_burst(timdev, 653 ev_tim, tim.timeout_ticks, MAX_BURST), 654 MAX_BURST, "Failed to arm timer %d", rte_errno); 655 } 656 657 return TEST_SUCCESS; 658 } 659 660 static inline int 661 test_timer_arm_burst(void) 662 { 663 TEST_ASSERT_SUCCESS(_arm_timers_burst(20, MAX_TIMERS), 664 "Failed to arm timers"); 665 TEST_ASSERT_SUCCESS(_wait_timer_triggers(10, MAX_TIMERS, 0), 666 "Timer triggered count doesn't match arm count"); 667 668 return TEST_SUCCESS; 669 } 670 671 static inline int 672 test_timer_arm_burst_periodic(void) 673 { 674 uint32_t caps = 0; 675 uint32_t timeout_count = 0; 676 677 TEST_ASSERT_SUCCESS(_arm_timers_burst(1, MAX_TIMERS), 678 "Failed to arm timers"); 679 /* With a resolution of 100ms and wait time of 1sec, 680 * there will be 10 * MAX_TIMERS periodic timer triggers. 681 */ 682 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_caps_get(evdev, &caps), 683 "failed to get adapter capabilities"); 684 685 if (caps & RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT) 686 timeout_count = 10; 687 else 688 timeout_count = 9; 689 690 TEST_ASSERT_SUCCESS(_wait_timer_triggers(1, timeout_count * MAX_TIMERS, 0), 691 "Timer triggered count doesn't match arm count"); 692 693 return TEST_SUCCESS; 694 } 695 696 static int 697 _arm_wrapper_burst(void *arg) 698 { 699 RTE_SET_USED(arg); 700 701 TEST_ASSERT_SUCCESS(_arm_timers_burst(20, MAX_TIMERS), 702 "Failed to arm timers"); 703 704 return TEST_SUCCESS; 705 } 706 707 static inline int 708 test_timer_arm_burst_multicore(void) 709 { 710 rte_eal_remote_launch(_arm_wrapper_burst, NULL, test_lcore1); 711 rte_eal_remote_launch(_arm_wrapper_burst, NULL, test_lcore2); 712 713 rte_eal_mp_wait_lcore(); 714 TEST_ASSERT_SUCCESS(_wait_timer_triggers(10, MAX_TIMERS * 2, 0), 715 "Timer triggered count doesn't match arm count"); 716 717 return TEST_SUCCESS; 718 } 719 720 static inline int 721 test_timer_cancel_periodic(void) 722 { 723 uint64_t i; 724 struct rte_event_timer *ev_tim; 725 const struct rte_event_timer tim = { 726 .ev.op = RTE_EVENT_OP_NEW, 727 .ev.queue_id = 0, 728 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 729 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 730 .ev.event_type = RTE_EVENT_TYPE_TIMER, 731 .state = RTE_EVENT_TIMER_NOT_ARMED, 732 .timeout_ticks = CALC_TICKS(1), 733 }; 734 735 for (i = 0; i < MAX_TIMERS; i++) { 736 TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool, 737 (void **)&ev_tim), 738 "mempool alloc failed"); 739 *ev_tim = tim; 740 ev_tim->ev.event_ptr = ev_tim; 741 742 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 743 1), 1, "Failed to arm timer %d", 744 rte_errno); 745 746 rte_delay_us(100 + (i % 5000)); 747 748 TEST_ASSERT_EQUAL(rte_event_timer_cancel_burst(timdev, 749 &ev_tim, 1), 1, 750 "Failed to cancel event timer %d", rte_errno); 751 rte_mempool_put(eventdev_test_mempool, ev_tim); 752 } 753 754 755 TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS, 756 MAX_TIMERS), 757 "Timer triggered count doesn't match arm, cancel count"); 758 759 return TEST_SUCCESS; 760 } 761 762 static inline int 763 test_timer_cancel(void) 764 { 765 uint64_t i; 766 struct rte_event_timer *ev_tim; 767 const struct rte_event_timer tim = { 768 .ev.op = RTE_EVENT_OP_NEW, 769 .ev.queue_id = 0, 770 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 771 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 772 .ev.event_type = RTE_EVENT_TYPE_TIMER, 773 .state = RTE_EVENT_TIMER_NOT_ARMED, 774 .timeout_ticks = CALC_TICKS(20), 775 }; 776 777 for (i = 0; i < MAX_TIMERS; i++) { 778 TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool, 779 (void **)&ev_tim), 780 "mempool alloc failed"); 781 *ev_tim = tim; 782 ev_tim->ev.event_ptr = ev_tim; 783 784 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 785 1), 1, "Failed to arm timer %d", 786 rte_errno); 787 788 rte_delay_us(100 + (i % 5000)); 789 790 TEST_ASSERT_EQUAL(rte_event_timer_cancel_burst(timdev, 791 &ev_tim, 1), 1, 792 "Failed to cancel event timer %d", rte_errno); 793 rte_mempool_put(eventdev_test_mempool, ev_tim); 794 } 795 796 797 TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS, 798 MAX_TIMERS), 799 "Timer triggered count doesn't match arm, cancel count"); 800 801 return TEST_SUCCESS; 802 } 803 804 static int 805 _cancel_producer(uint64_t timeout_tcks, uint64_t timers) 806 { 807 uint64_t i; 808 struct rte_event_timer *ev_tim; 809 const struct rte_event_timer tim = { 810 .ev.op = RTE_EVENT_OP_NEW, 811 .ev.queue_id = 0, 812 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 813 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 814 .ev.event_type = RTE_EVENT_TYPE_TIMER, 815 .state = RTE_EVENT_TIMER_NOT_ARMED, 816 .timeout_ticks = CALC_TICKS(timeout_tcks), 817 }; 818 819 for (i = 0; i < timers; i++) { 820 TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool, 821 (void **)&ev_tim), 822 "mempool alloc failed"); 823 824 *ev_tim = tim; 825 ev_tim->ev.event_ptr = ev_tim; 826 827 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 828 1), 1, "Failed to arm timer %d", 829 rte_errno); 830 831 TEST_ASSERT_EQUAL(ev_tim->state, RTE_EVENT_TIMER_ARMED, 832 "Failed to arm event timer"); 833 834 while (rte_ring_enqueue(timer_producer_ring, ev_tim) != 0) 835 ; 836 } 837 838 return TEST_SUCCESS; 839 } 840 841 static int 842 _cancel_producer_burst(uint64_t timeout_tcks, uint64_t timers) 843 { 844 845 uint64_t i; 846 int j, ret; 847 struct rte_event_timer *ev_tim[MAX_BURST]; 848 const struct rte_event_timer tim = { 849 .ev.op = RTE_EVENT_OP_NEW, 850 .ev.queue_id = 0, 851 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 852 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 853 .ev.event_type = RTE_EVENT_TYPE_TIMER, 854 .state = RTE_EVENT_TIMER_NOT_ARMED, 855 .timeout_ticks = CALC_TICKS(timeout_tcks), 856 }; 857 int arm_count = 0; 858 859 for (i = 0; i < timers / MAX_BURST; i++) { 860 TEST_ASSERT_SUCCESS(rte_mempool_get_bulk( 861 eventdev_test_mempool, 862 (void **)ev_tim, MAX_BURST), 863 "mempool alloc failed"); 864 865 for (j = 0; j < MAX_BURST; j++) { 866 *ev_tim[j] = tim; 867 ev_tim[j]->ev.event_ptr = ev_tim[j]; 868 } 869 870 TEST_ASSERT_EQUAL(rte_event_timer_arm_tmo_tick_burst(timdev, 871 ev_tim, tim.timeout_ticks, MAX_BURST), 872 MAX_BURST, "Failed to arm timer %d", rte_errno); 873 874 for (j = 0; j < MAX_BURST; j++) 875 TEST_ASSERT_EQUAL(ev_tim[j]->state, 876 RTE_EVENT_TIMER_ARMED, 877 "Event timer not armed, state = %d", 878 ev_tim[j]->state); 879 880 ret = rte_ring_enqueue_bulk(timer_producer_ring, 881 (void **)ev_tim, MAX_BURST, NULL); 882 TEST_ASSERT_EQUAL(ret, MAX_BURST, 883 "Failed to enqueue event timers to ring"); 884 arm_count += ret; 885 } 886 887 TEST_ASSERT_EQUAL(arm_count, MAX_TIMERS, 888 "Failed to arm expected number of event timers"); 889 890 return TEST_SUCCESS; 891 } 892 893 static int 894 _cancel_producer_wrapper(void *args) 895 { 896 RTE_SET_USED(args); 897 898 return _cancel_producer(20, MAX_TIMERS); 899 } 900 901 static int 902 _cancel_producer_burst_wrapper(void *args) 903 { 904 RTE_SET_USED(args); 905 906 return _cancel_producer_burst(100, MAX_TIMERS); 907 } 908 909 static int 910 _cancel_thread(void *args) 911 { 912 RTE_SET_USED(args); 913 struct rte_event_timer *ev_tim = NULL; 914 uint16_t ret; 915 916 while (!arm_done || rte_ring_count(timer_producer_ring) > 0) { 917 if (rte_ring_dequeue(timer_producer_ring, (void **)&ev_tim)) 918 continue; 919 920 ret = rte_event_timer_cancel_burst(timdev, &ev_tim, 1); 921 TEST_ASSERT_EQUAL(ret, 1, "Failed to cancel timer"); 922 rte_mempool_put(eventdev_test_mempool, (void *)ev_tim); 923 } 924 925 return TEST_SUCCESS; 926 } 927 928 static int 929 _cancel_burst_thread(void *args) 930 { 931 RTE_SET_USED(args); 932 933 int ret, i, n; 934 struct rte_event_timer *ev_tim[MAX_BURST]; 935 uint64_t cancel_count = 0; 936 uint64_t dequeue_count = 0; 937 938 while (!arm_done || rte_ring_count(timer_producer_ring) > 0) { 939 n = rte_ring_dequeue_burst(timer_producer_ring, 940 (void **)ev_tim, MAX_BURST, NULL); 941 if (!n) 942 continue; 943 944 dequeue_count += n; 945 946 for (i = 0; i < n; i++) 947 TEST_ASSERT_EQUAL(ev_tim[i]->state, 948 RTE_EVENT_TIMER_ARMED, 949 "Event timer not armed, state = %d", 950 ev_tim[i]->state); 951 952 ret = rte_event_timer_cancel_burst(timdev, ev_tim, n); 953 TEST_ASSERT_EQUAL(n, ret, "Failed to cancel complete burst of " 954 "event timers"); 955 rte_mempool_put_bulk(eventdev_test_mempool, (void **)ev_tim, 956 RTE_MIN(ret, MAX_BURST)); 957 958 cancel_count += ret; 959 } 960 961 TEST_ASSERT_EQUAL(cancel_count, MAX_TIMERS, 962 "Failed to cancel expected number of timers: " 963 "expected = %d, cancel_count = %"PRIu64", " 964 "dequeue_count = %"PRIu64"\n", MAX_TIMERS, 965 cancel_count, dequeue_count); 966 967 return TEST_SUCCESS; 968 } 969 970 static inline int 971 test_timer_cancel_multicore(void) 972 { 973 arm_done = 0; 974 timer_producer_ring = rte_ring_create("timer_cancel_queue", 975 MAX_TIMERS * 2, rte_socket_id(), 0); 976 TEST_ASSERT_NOT_NULL(timer_producer_ring, 977 "Unable to reserve memory for ring"); 978 979 rte_eal_remote_launch(_cancel_thread, NULL, test_lcore3); 980 rte_eal_remote_launch(_cancel_producer_wrapper, NULL, test_lcore1); 981 rte_eal_remote_launch(_cancel_producer_wrapper, NULL, test_lcore2); 982 983 rte_eal_wait_lcore(test_lcore1); 984 rte_eal_wait_lcore(test_lcore2); 985 arm_done = 1; 986 rte_eal_wait_lcore(test_lcore3); 987 rte_ring_free(timer_producer_ring); 988 989 TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS * 2, 990 MAX_TIMERS * 2), 991 "Timer triggered count doesn't match arm count"); 992 993 return TEST_SUCCESS; 994 } 995 996 static inline int 997 test_timer_cancel_burst_multicore(void) 998 { 999 arm_done = 0; 1000 timer_producer_ring = rte_ring_create("timer_cancel_queue", 1001 MAX_TIMERS * 2, rte_socket_id(), 0); 1002 TEST_ASSERT_NOT_NULL(timer_producer_ring, 1003 "Unable to reserve memory for ring"); 1004 1005 rte_eal_remote_launch(_cancel_burst_thread, NULL, test_lcore2); 1006 rte_eal_remote_launch(_cancel_producer_burst_wrapper, NULL, 1007 test_lcore1); 1008 1009 rte_eal_wait_lcore(test_lcore1); 1010 arm_done = 1; 1011 rte_eal_wait_lcore(test_lcore2); 1012 rte_ring_free(timer_producer_ring); 1013 1014 TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS, 1015 MAX_TIMERS), 1016 "Timer triggered count doesn't match arm count"); 1017 1018 return TEST_SUCCESS; 1019 } 1020 1021 static inline int 1022 test_timer_cancel_random(void) 1023 { 1024 uint64_t i; 1025 uint64_t events_canceled = 0; 1026 struct rte_event_timer *ev_tim; 1027 const struct rte_event_timer tim = { 1028 .ev.op = RTE_EVENT_OP_NEW, 1029 .ev.queue_id = 0, 1030 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1031 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1032 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1033 .state = RTE_EVENT_TIMER_NOT_ARMED, 1034 .timeout_ticks = CALC_TICKS(20), 1035 }; 1036 1037 for (i = 0; i < MAX_TIMERS; i++) { 1038 1039 TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool, 1040 (void **)&ev_tim), 1041 "mempool alloc failed"); 1042 *ev_tim = tim; 1043 ev_tim->ev.event_ptr = ev_tim; 1044 1045 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 1046 1), 1, "Failed to arm timer %d", 1047 rte_errno); 1048 1049 if (rte_rand() & 1) { 1050 rte_delay_us(100 + (i % 5000)); 1051 TEST_ASSERT_EQUAL(rte_event_timer_cancel_burst( 1052 timdev, 1053 &ev_tim, 1), 1, 1054 "Failed to cancel event timer %d", rte_errno); 1055 rte_mempool_put(eventdev_test_mempool, ev_tim); 1056 events_canceled++; 1057 } 1058 } 1059 1060 TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS, 1061 events_canceled), 1062 "Timer triggered count doesn't match arm, cancel count"); 1063 1064 return TEST_SUCCESS; 1065 } 1066 1067 /* Check that the adapter can be created correctly */ 1068 static int 1069 adapter_create(void) 1070 { 1071 int adapter_id = 0; 1072 struct rte_event_timer_adapter *adapter, *adapter2; 1073 1074 struct rte_event_timer_adapter_conf conf = { 1075 .event_dev_id = evdev + 1, // invalid event dev id 1076 .timer_adapter_id = adapter_id, 1077 .clk_src = RTE_EVENT_TIMER_ADAPTER_CPU_CLK, 1078 .timer_tick_ns = NSECPERSEC / 10, 1079 .max_tmo_ns = 180 * NSECPERSEC, 1080 .nb_timers = MAX_TIMERS, 1081 .flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES, 1082 }; 1083 uint32_t caps = 0; 1084 1085 /* Test invalid conf */ 1086 adapter = rte_event_timer_adapter_create(&conf); 1087 TEST_ASSERT_NULL(adapter, "Created adapter with invalid " 1088 "event device id"); 1089 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Incorrect errno value for " 1090 "invalid event device id"); 1091 1092 /* Test valid conf */ 1093 conf.event_dev_id = evdev; 1094 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_caps_get(evdev, &caps), 1095 "failed to get adapter capabilities"); 1096 if (!(caps & RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) 1097 adapter = rte_event_timer_adapter_create_ext(&conf, 1098 test_port_conf_cb, 1099 NULL); 1100 else 1101 adapter = rte_event_timer_adapter_create(&conf); 1102 TEST_ASSERT_NOT_NULL(adapter, "Failed to create adapter with valid " 1103 "configuration"); 1104 1105 /* Test existing id */ 1106 adapter2 = rte_event_timer_adapter_create(&conf); 1107 TEST_ASSERT_NULL(adapter2, "Created adapter with in-use id"); 1108 TEST_ASSERT(rte_errno == EEXIST, "Incorrect errno value for existing " 1109 "id"); 1110 1111 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_free(adapter), 1112 "Failed to free adapter"); 1113 1114 return TEST_SUCCESS; 1115 } 1116 1117 1118 /* Test that adapter can be freed correctly. */ 1119 static int 1120 adapter_free(void) 1121 { 1122 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stop(timdev), 1123 "Failed to stop adapter"); 1124 1125 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_free(timdev), 1126 "Failed to free valid adapter"); 1127 1128 /* Test free of already freed adapter */ 1129 TEST_ASSERT_FAIL(rte_event_timer_adapter_free(timdev), 1130 "Freed adapter that was already freed"); 1131 1132 /* Test free of null adapter */ 1133 timdev = NULL; 1134 TEST_ASSERT_FAIL(rte_event_timer_adapter_free(timdev), 1135 "Freed null adapter"); 1136 1137 rte_mempool_free(eventdev_test_mempool); 1138 1139 return TEST_SUCCESS; 1140 } 1141 1142 /* Test that adapter info can be retrieved and is correct. */ 1143 static int 1144 adapter_get_info(void) 1145 { 1146 struct rte_event_timer_adapter_info info; 1147 1148 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_get_info(timdev, &info), 1149 "Failed to get adapter info"); 1150 1151 if (using_services) 1152 TEST_ASSERT_EQUAL(info.event_dev_port_id, 1, 1153 "Expected port id = 1, got port id = %d", 1154 info.event_dev_port_id); 1155 1156 return TEST_SUCCESS; 1157 } 1158 1159 /* Test adapter lookup via adapter ID. */ 1160 static int 1161 adapter_lookup(void) 1162 { 1163 struct rte_event_timer_adapter *adapter; 1164 1165 adapter = rte_event_timer_adapter_lookup(TEST_ADAPTER_ID); 1166 TEST_ASSERT_NOT_NULL(adapter, "Failed to lookup adapter"); 1167 1168 return TEST_SUCCESS; 1169 } 1170 1171 static int 1172 adapter_start(void) 1173 { 1174 TEST_ASSERT_SUCCESS(_timdev_setup(180 * NSECPERSEC, NSECPERSEC / 10, 1175 RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES), 1176 "Failed to start adapter"); 1177 TEST_ASSERT_EQUAL(rte_event_timer_adapter_start(timdev), -EALREADY, 1178 "Timer adapter started without call to stop."); 1179 1180 return TEST_SUCCESS; 1181 } 1182 1183 /* Test that adapter stops correctly. */ 1184 static int 1185 adapter_stop(void) 1186 { 1187 struct rte_event_timer_adapter *l_adapter = NULL; 1188 1189 /* Test adapter stop */ 1190 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stop(timdev), 1191 "Failed to stop event adapter"); 1192 1193 TEST_ASSERT_FAIL(rte_event_timer_adapter_stop(l_adapter), 1194 "Erroneously stopped null event adapter"); 1195 1196 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_free(timdev), 1197 "Failed to free adapter"); 1198 1199 rte_mempool_free(eventdev_test_mempool); 1200 1201 return TEST_SUCCESS; 1202 } 1203 1204 /* Test increment and reset of ev_enq_count stat */ 1205 static int 1206 stat_inc_reset_ev_enq(void) 1207 { 1208 int ret, i, n; 1209 int num_evtims = MAX_TIMERS; 1210 struct rte_event_timer *evtims[num_evtims]; 1211 struct rte_event evs[BATCH_SIZE]; 1212 struct rte_event_timer_adapter_stats stats; 1213 const struct rte_event_timer init_tim = { 1214 .ev.op = RTE_EVENT_OP_NEW, 1215 .ev.queue_id = TEST_QUEUE_ID, 1216 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1217 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1218 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1219 .state = RTE_EVENT_TIMER_NOT_ARMED, 1220 .timeout_ticks = CALC_TICKS(5), // expire in .5 sec 1221 }; 1222 1223 ret = rte_mempool_get_bulk(eventdev_test_mempool, (void **)evtims, 1224 num_evtims); 1225 TEST_ASSERT_EQUAL(ret, 0, "Failed to get array of timer objs: ret = %d", 1226 ret); 1227 1228 for (i = 0; i < num_evtims; i++) { 1229 *evtims[i] = init_tim; 1230 evtims[i]->ev.event_ptr = evtims[i]; 1231 } 1232 1233 ret = rte_event_timer_adapter_stats_get(timdev, &stats); 1234 TEST_ASSERT_EQUAL(ret, 0, "Failed to get stats"); 1235 TEST_ASSERT_EQUAL((int)stats.ev_enq_count, 0, "Stats not clear at " 1236 "startup"); 1237 1238 /* Test with the max value for the adapter */ 1239 ret = rte_event_timer_arm_burst(timdev, evtims, num_evtims); 1240 TEST_ASSERT_EQUAL(ret, num_evtims, 1241 "Failed to arm all event timers: attempted = %d, " 1242 "succeeded = %d, rte_errno = %s", 1243 num_evtims, ret, rte_strerror(rte_errno)); 1244 1245 rte_delay_ms(1000); 1246 1247 #define MAX_TRIES num_evtims 1248 int sum = 0; 1249 int tries = 0; 1250 bool done = false; 1251 while (!done) { 1252 sum += rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, 1253 RTE_DIM(evs), 10); 1254 if (sum >= num_evtims || ++tries >= MAX_TRIES) 1255 done = true; 1256 1257 rte_delay_ms(10); 1258 } 1259 1260 TEST_ASSERT_EQUAL(sum, num_evtims, "Expected %d timer expiry events, " 1261 "got %d", num_evtims, sum); 1262 1263 TEST_ASSERT(tries < MAX_TRIES, "Exceeded max tries"); 1264 1265 rte_delay_ms(100); 1266 1267 /* Make sure the eventdev is still empty */ 1268 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 1269 10); 1270 1271 TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected number of timer expiry " 1272 "events from event device"); 1273 1274 /* Check stats again */ 1275 ret = rte_event_timer_adapter_stats_get(timdev, &stats); 1276 TEST_ASSERT_EQUAL(ret, 0, "Failed to get stats"); 1277 TEST_ASSERT_EQUAL((int)stats.ev_enq_count, num_evtims, 1278 "Expected enqueue stat = %d; got %d", num_evtims, 1279 (int)stats.ev_enq_count); 1280 1281 /* Reset and check again */ 1282 ret = rte_event_timer_adapter_stats_reset(timdev); 1283 TEST_ASSERT_EQUAL(ret, 0, "Failed to reset stats"); 1284 1285 ret = rte_event_timer_adapter_stats_get(timdev, &stats); 1286 TEST_ASSERT_EQUAL(ret, 0, "Failed to get stats"); 1287 TEST_ASSERT_EQUAL((int)stats.ev_enq_count, 0, 1288 "Expected enqueue stat = %d; got %d", 0, 1289 (int)stats.ev_enq_count); 1290 1291 rte_mempool_put_bulk(eventdev_test_mempool, (void **)evtims, 1292 num_evtims); 1293 1294 return TEST_SUCCESS; 1295 } 1296 1297 /* Test various cases in arming timers */ 1298 static int 1299 event_timer_arm(void) 1300 { 1301 uint16_t n; 1302 int ret; 1303 struct rte_event_timer_adapter *adapter = timdev; 1304 struct rte_event_timer *evtim = NULL; 1305 struct rte_event evs[BATCH_SIZE]; 1306 const struct rte_event_timer init_tim = { 1307 .ev.op = RTE_EVENT_OP_NEW, 1308 .ev.queue_id = TEST_QUEUE_ID, 1309 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1310 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1311 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1312 .state = RTE_EVENT_TIMER_NOT_ARMED, 1313 .timeout_ticks = CALC_TICKS(5), // expire in .5 sec 1314 }; 1315 1316 rte_mempool_get(eventdev_test_mempool, (void **)&evtim); 1317 if (evtim == NULL) { 1318 /* Failed to get an event timer object */ 1319 return TEST_FAILED; 1320 } 1321 1322 /* Set up a timer */ 1323 *evtim = init_tim; 1324 evtim->ev.event_ptr = evtim; 1325 1326 /* Test single timer arm succeeds */ 1327 ret = rte_event_timer_arm_burst(adapter, &evtim, 1); 1328 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n", 1329 rte_strerror(rte_errno)); 1330 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ARMED, "Event timer " 1331 "in incorrect state"); 1332 1333 /* Test arm of armed timer fails */ 1334 ret = rte_event_timer_arm_burst(adapter, &evtim, 1); 1335 TEST_ASSERT_EQUAL(ret, 0, "expected return value from " 1336 "rte_event_timer_arm_burst: 0, got: %d", ret); 1337 TEST_ASSERT_EQUAL(rte_errno, EALREADY, "Unexpected rte_errno value " 1338 "after arming already armed timer"); 1339 1340 /* Let timer expire */ 1341 rte_delay_ms(1000); 1342 1343 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); 1344 TEST_ASSERT_EQUAL(n, 1, "Failed to dequeue expected number of expiry " 1345 "events from event device"); 1346 1347 rte_mempool_put(eventdev_test_mempool, evtim); 1348 1349 return TEST_SUCCESS; 1350 } 1351 1352 /* This test checks that repeated references to the same event timer in the 1353 * arm request work as expected; only the first one through should succeed. 1354 */ 1355 static int 1356 event_timer_arm_double(void) 1357 { 1358 uint16_t n; 1359 int ret; 1360 struct rte_event_timer_adapter *adapter = timdev; 1361 struct rte_event_timer *evtim = NULL; 1362 struct rte_event evs[BATCH_SIZE]; 1363 const struct rte_event_timer init_tim = { 1364 .ev.op = RTE_EVENT_OP_NEW, 1365 .ev.queue_id = TEST_QUEUE_ID, 1366 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1367 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1368 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1369 .state = RTE_EVENT_TIMER_NOT_ARMED, 1370 .timeout_ticks = CALC_TICKS(5), // expire in .5 sec 1371 }; 1372 1373 rte_mempool_get(eventdev_test_mempool, (void **)&evtim); 1374 if (evtim == NULL) { 1375 /* Failed to get an event timer object */ 1376 return TEST_FAILED; 1377 } 1378 1379 /* Set up a timer */ 1380 *evtim = init_tim; 1381 evtim->ev.event_ptr = evtim; 1382 1383 struct rte_event_timer *evtim_arr[] = {evtim, evtim}; 1384 ret = rte_event_timer_arm_burst(adapter, evtim_arr, RTE_DIM(evtim_arr)); 1385 TEST_ASSERT_EQUAL(ret, 1, "Unexpected return value from " 1386 "rte_event_timer_arm_burst"); 1387 TEST_ASSERT_EQUAL(rte_errno, EALREADY, "Unexpected rte_errno value " 1388 "after double-arm"); 1389 1390 /* Let timer expire */ 1391 rte_delay_ms(600); 1392 1393 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); 1394 TEST_ASSERT_EQUAL(n, 1, "Dequeued incorrect number of expiry events - " 1395 "expected: 1, actual: %d", n); 1396 1397 rte_mempool_put(eventdev_test_mempool, evtim); 1398 1399 return TEST_SUCCESS; 1400 } 1401 1402 /* Test the timer expiry event is generated at the expected time. */ 1403 static int 1404 event_timer_arm_expiry(void) 1405 { 1406 uint16_t n; 1407 int ret; 1408 struct rte_event_timer_adapter *adapter = timdev; 1409 struct rte_event_timer *evtim = NULL; 1410 struct rte_event_timer *evtim2 = NULL; 1411 struct rte_event evs[BATCH_SIZE]; 1412 const struct rte_event_timer init_tim = { 1413 .ev.op = RTE_EVENT_OP_NEW, 1414 .ev.queue_id = TEST_QUEUE_ID, 1415 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1416 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1417 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1418 .state = RTE_EVENT_TIMER_NOT_ARMED, 1419 }; 1420 1421 rte_mempool_get(eventdev_test_mempool, (void **)&evtim); 1422 if (evtim == NULL) { 1423 /* Failed to get an event timer object */ 1424 return TEST_FAILED; 1425 } 1426 1427 /* Set up an event timer */ 1428 *evtim = init_tim; 1429 evtim->timeout_ticks = CALC_TICKS(30), // expire in 3 secs 1430 evtim->ev.event_ptr = evtim; 1431 1432 ret = rte_event_timer_arm_burst(adapter, &evtim, 1); 1433 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s", 1434 rte_strerror(rte_errno)); 1435 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ARMED, "Event " 1436 "timer in incorrect state"); 1437 1438 rte_delay_ms(2999); 1439 1440 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); 1441 TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected timer expiry event"); 1442 1443 /* Delay 100 ms to account for the adapter tick window - should let us 1444 * dequeue one event 1445 */ 1446 rte_delay_ms(100); 1447 1448 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); 1449 TEST_ASSERT_EQUAL(n, 1, "Dequeued incorrect number (%d) of timer " 1450 "expiry events", n); 1451 TEST_ASSERT_EQUAL(evs[0].event_type, RTE_EVENT_TYPE_TIMER, 1452 "Dequeued unexpected type of event"); 1453 1454 /* Check that we recover the original event timer and then free it */ 1455 evtim2 = evs[0].event_ptr; 1456 TEST_ASSERT_EQUAL(evtim, evtim2, 1457 "Failed to recover pointer to original event timer"); 1458 rte_mempool_put(eventdev_test_mempool, evtim2); 1459 1460 return TEST_SUCCESS; 1461 } 1462 1463 /* Check that rearming a timer works as expected. */ 1464 static int 1465 event_timer_arm_rearm(void) 1466 { 1467 uint16_t n; 1468 int ret; 1469 struct rte_event_timer *evtim = NULL; 1470 struct rte_event_timer *evtim2 = NULL; 1471 struct rte_event evs[BATCH_SIZE]; 1472 const struct rte_event_timer init_tim = { 1473 .ev.op = RTE_EVENT_OP_NEW, 1474 .ev.queue_id = TEST_QUEUE_ID, 1475 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1476 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1477 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1478 .state = RTE_EVENT_TIMER_NOT_ARMED, 1479 }; 1480 1481 rte_mempool_get(eventdev_test_mempool, (void **)&evtim); 1482 if (evtim == NULL) { 1483 /* Failed to get an event timer object */ 1484 return TEST_FAILED; 1485 } 1486 1487 /* Set up a timer */ 1488 *evtim = init_tim; 1489 evtim->timeout_ticks = CALC_TICKS(1); // expire in 0.1 sec 1490 evtim->ev.event_ptr = evtim; 1491 1492 /* Arm it */ 1493 ret = rte_event_timer_arm_burst(timdev, &evtim, 1); 1494 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n", 1495 rte_strerror(rte_errno)); 1496 1497 /* Add 100ms to account for the adapter tick window */ 1498 rte_delay_ms(100 + 100); 1499 1500 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); 1501 TEST_ASSERT_EQUAL(n, 1, "Failed to dequeue expected number of expiry " 1502 "events from event device"); 1503 1504 /* Recover the timer through the event that was dequeued. */ 1505 evtim2 = evs[0].event_ptr; 1506 TEST_ASSERT_EQUAL(evtim, evtim2, 1507 "Failed to recover pointer to original event timer"); 1508 1509 /* Need to reset state in case implementation can't do it */ 1510 evtim2->state = RTE_EVENT_TIMER_NOT_ARMED; 1511 1512 /* Rearm it */ 1513 ret = rte_event_timer_arm_burst(timdev, &evtim2, 1); 1514 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n", 1515 rte_strerror(rte_errno)); 1516 1517 /* Add 100ms to account for the adapter tick window */ 1518 rte_delay_ms(100 + 100); 1519 1520 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); 1521 TEST_ASSERT_EQUAL(n, 1, "Failed to dequeue expected number of expiry " 1522 "events from event device"); 1523 1524 /* Free it */ 1525 evtim2 = evs[0].event_ptr; 1526 TEST_ASSERT_EQUAL(evtim, evtim2, 1527 "Failed to recover pointer to original event timer"); 1528 rte_mempool_put(eventdev_test_mempool, evtim2); 1529 1530 return TEST_SUCCESS; 1531 } 1532 1533 /* Check that the adapter handles the max specified number of timers as 1534 * expected. 1535 */ 1536 static int 1537 event_timer_arm_max(void) 1538 { 1539 int ret, i, n; 1540 int num_evtims = MAX_TIMERS; 1541 struct rte_event_timer *evtims[num_evtims]; 1542 struct rte_event evs[BATCH_SIZE]; 1543 const struct rte_event_timer init_tim = { 1544 .ev.op = RTE_EVENT_OP_NEW, 1545 .ev.queue_id = TEST_QUEUE_ID, 1546 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1547 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1548 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1549 .state = RTE_EVENT_TIMER_NOT_ARMED, 1550 .timeout_ticks = CALC_TICKS(5), // expire in .5 sec 1551 }; 1552 1553 ret = rte_mempool_get_bulk(eventdev_test_mempool, (void **)evtims, 1554 num_evtims); 1555 TEST_ASSERT_EQUAL(ret, 0, "Failed to get array of timer objs: ret = %d", 1556 ret); 1557 1558 for (i = 0; i < num_evtims; i++) { 1559 *evtims[i] = init_tim; 1560 evtims[i]->ev.event_ptr = evtims[i]; 1561 } 1562 1563 /* Test with the max value for the adapter */ 1564 ret = rte_event_timer_arm_burst(timdev, evtims, num_evtims); 1565 TEST_ASSERT_EQUAL(ret, num_evtims, 1566 "Failed to arm all event timers: attempted = %d, " 1567 "succeeded = %d, rte_errno = %s", 1568 num_evtims, ret, rte_strerror(rte_errno)); 1569 1570 rte_delay_ms(1000); 1571 1572 #define MAX_TRIES num_evtims 1573 int sum = 0; 1574 int tries = 0; 1575 bool done = false; 1576 while (!done) { 1577 sum += rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, 1578 RTE_DIM(evs), 10); 1579 if (sum >= num_evtims || ++tries >= MAX_TRIES) 1580 done = true; 1581 1582 rte_delay_ms(10); 1583 } 1584 1585 TEST_ASSERT_EQUAL(sum, num_evtims, "Expected %d timer expiry events, " 1586 "got %d", num_evtims, sum); 1587 1588 TEST_ASSERT(tries < MAX_TRIES, "Exceeded max tries"); 1589 1590 rte_delay_ms(100); 1591 1592 /* Make sure the eventdev is still empty */ 1593 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 1594 10); 1595 1596 TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected number of timer expiry " 1597 "events from event device"); 1598 1599 rte_mempool_put_bulk(eventdev_test_mempool, (void **)evtims, 1600 num_evtims); 1601 1602 return TEST_SUCCESS; 1603 } 1604 1605 /* Check that creating an event timer with incorrect event sched type fails. */ 1606 static int 1607 event_timer_arm_invalid_sched_type(void) 1608 { 1609 int ret; 1610 struct rte_event_timer *evtim = NULL; 1611 const struct rte_event_timer init_tim = { 1612 .ev.op = RTE_EVENT_OP_NEW, 1613 .ev.queue_id = TEST_QUEUE_ID, 1614 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1615 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1616 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1617 .state = RTE_EVENT_TIMER_NOT_ARMED, 1618 .timeout_ticks = CALC_TICKS(5), // expire in .5 sec 1619 }; 1620 1621 if (!using_services) 1622 return -ENOTSUP; 1623 1624 rte_mempool_get(eventdev_test_mempool, (void **)&evtim); 1625 if (evtim == NULL) { 1626 /* Failed to get an event timer object */ 1627 return TEST_FAILED; 1628 } 1629 1630 *evtim = init_tim; 1631 evtim->ev.event_ptr = evtim; 1632 evtim->ev.sched_type = RTE_SCHED_TYPE_PARALLEL; // bad sched type 1633 1634 ret = rte_event_timer_arm_burst(timdev, &evtim, 1); 1635 TEST_ASSERT_EQUAL(ret, 0, "Expected to fail timer arm with invalid " 1636 "sched type, but didn't"); 1637 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after" 1638 " arm fail with invalid queue"); 1639 1640 rte_mempool_put(eventdev_test_mempool, &evtim); 1641 1642 return TEST_SUCCESS; 1643 } 1644 1645 /* Check that creating an event timer with a timeout value that is too small or 1646 * too big fails. 1647 */ 1648 static int 1649 event_timer_arm_invalid_timeout(void) 1650 { 1651 int ret; 1652 struct rte_event_timer *evtim = NULL; 1653 const struct rte_event_timer init_tim = { 1654 .ev.op = RTE_EVENT_OP_NEW, 1655 .ev.queue_id = TEST_QUEUE_ID, 1656 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1657 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1658 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1659 .state = RTE_EVENT_TIMER_NOT_ARMED, 1660 .timeout_ticks = CALC_TICKS(5), // expire in .5 sec 1661 }; 1662 1663 rte_mempool_get(eventdev_test_mempool, (void **)&evtim); 1664 if (evtim == NULL) { 1665 /* Failed to get an event timer object */ 1666 return TEST_FAILED; 1667 } 1668 1669 *evtim = init_tim; 1670 evtim->ev.event_ptr = evtim; 1671 evtim->timeout_ticks = 0; // timeout too small 1672 1673 ret = rte_event_timer_arm_burst(timdev, &evtim, 1); 1674 TEST_ASSERT_EQUAL(ret, 0, "Expected to fail timer arm with invalid " 1675 "timeout, but didn't"); 1676 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after" 1677 " arm fail with invalid timeout"); 1678 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ERROR_TOOEARLY, 1679 "Unexpected event timer state"); 1680 1681 *evtim = init_tim; 1682 evtim->ev.event_ptr = evtim; 1683 evtim->timeout_ticks = CALC_TICKS(1801); // timeout too big 1684 1685 ret = rte_event_timer_arm_burst(timdev, &evtim, 1); 1686 TEST_ASSERT_EQUAL(ret, 0, "Expected to fail timer arm with invalid " 1687 "timeout, but didn't"); 1688 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after" 1689 " arm fail with invalid timeout"); 1690 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ERROR_TOOLATE, 1691 "Unexpected event timer state"); 1692 1693 rte_mempool_put(eventdev_test_mempool, evtim); 1694 1695 return TEST_SUCCESS; 1696 } 1697 1698 static int 1699 event_timer_cancel(void) 1700 { 1701 uint16_t n; 1702 int ret; 1703 struct rte_event_timer_adapter *adapter = timdev; 1704 struct rte_event_timer *evtim = NULL; 1705 struct rte_event evs[BATCH_SIZE]; 1706 const struct rte_event_timer init_tim = { 1707 .ev.op = RTE_EVENT_OP_NEW, 1708 .ev.queue_id = TEST_QUEUE_ID, 1709 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1710 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1711 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1712 .state = RTE_EVENT_TIMER_NOT_ARMED, 1713 }; 1714 1715 rte_mempool_get(eventdev_test_mempool, (void **)&evtim); 1716 if (evtim == NULL) { 1717 /* Failed to get an event timer object */ 1718 return TEST_FAILED; 1719 } 1720 1721 /* Check that cancelling an uninited timer fails */ 1722 ret = rte_event_timer_cancel_burst(adapter, &evtim, 1); 1723 TEST_ASSERT_EQUAL(ret, 0, "Succeeded unexpectedly in canceling " 1724 "uninited timer"); 1725 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after " 1726 "cancelling uninited timer"); 1727 1728 /* Set up a timer */ 1729 *evtim = init_tim; 1730 evtim->ev.event_ptr = evtim; 1731 evtim->timeout_ticks = CALC_TICKS(30); // expire in 3 sec 1732 1733 /* Check that cancelling an inited but unarmed timer fails */ 1734 ret = rte_event_timer_cancel_burst(adapter, &evtim, 1); 1735 TEST_ASSERT_EQUAL(ret, 0, "Succeeded unexpectedly in canceling " 1736 "unarmed timer"); 1737 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after " 1738 "cancelling unarmed timer"); 1739 1740 ret = rte_event_timer_arm_burst(adapter, &evtim, 1); 1741 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n", 1742 rte_strerror(rte_errno)); 1743 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ARMED, 1744 "evtim in incorrect state"); 1745 1746 /* Delay 1 sec */ 1747 rte_delay_ms(1000); 1748 1749 ret = rte_event_timer_cancel_burst(adapter, &evtim, 1); 1750 TEST_ASSERT_EQUAL(ret, 1, "Failed to cancel event_timer: %s\n", 1751 rte_strerror(rte_errno)); 1752 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_CANCELED, 1753 "evtim in incorrect state"); 1754 1755 rte_delay_ms(3000); 1756 1757 /* Make sure that no expiry event was generated */ 1758 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); 1759 TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected timer expiry event\n"); 1760 1761 rte_mempool_put(eventdev_test_mempool, evtim); 1762 1763 return TEST_SUCCESS; 1764 } 1765 1766 static int 1767 event_timer_cancel_double(void) 1768 { 1769 uint16_t n; 1770 int ret; 1771 struct rte_event_timer_adapter *adapter = timdev; 1772 struct rte_event_timer *evtim = NULL; 1773 struct rte_event evs[BATCH_SIZE]; 1774 const struct rte_event_timer init_tim = { 1775 .ev.op = RTE_EVENT_OP_NEW, 1776 .ev.queue_id = TEST_QUEUE_ID, 1777 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1778 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1779 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1780 .state = RTE_EVENT_TIMER_NOT_ARMED, 1781 .timeout_ticks = CALC_TICKS(5), // expire in .5 sec 1782 }; 1783 1784 rte_mempool_get(eventdev_test_mempool, (void **)&evtim); 1785 if (evtim == NULL) { 1786 /* Failed to get an event timer object */ 1787 return TEST_FAILED; 1788 } 1789 1790 /* Set up a timer */ 1791 *evtim = init_tim; 1792 evtim->ev.event_ptr = evtim; 1793 evtim->timeout_ticks = CALC_TICKS(30); // expire in 3 sec 1794 1795 ret = rte_event_timer_arm_burst(adapter, &evtim, 1); 1796 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n", 1797 rte_strerror(rte_errno)); 1798 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ARMED, 1799 "timer in unexpected state"); 1800 1801 /* Now, test that referencing the same timer twice in the same call 1802 * fails 1803 */ 1804 struct rte_event_timer *evtim_arr[] = {evtim, evtim}; 1805 ret = rte_event_timer_cancel_burst(adapter, evtim_arr, 1806 RTE_DIM(evtim_arr)); 1807 1808 /* Two requests to cancel same timer, only one should succeed */ 1809 TEST_ASSERT_EQUAL(ret, 1, "Succeeded unexpectedly in canceling timer " 1810 "twice"); 1811 1812 TEST_ASSERT_EQUAL(rte_errno, EALREADY, "Unexpected rte_errno value " 1813 "after double-cancel: rte_errno = %d", rte_errno); 1814 1815 rte_delay_ms(3000); 1816 1817 /* Still make sure that no expiry event was generated */ 1818 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); 1819 TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected timer expiry event\n"); 1820 1821 rte_mempool_put(eventdev_test_mempool, evtim); 1822 1823 return TEST_SUCCESS; 1824 } 1825 1826 /* Check that event timer adapter tick resolution works as expected by testing 1827 * the number of adapter ticks that occur within a particular time interval. 1828 */ 1829 static int 1830 adapter_tick_resolution(void) 1831 { 1832 struct rte_event_timer_adapter_stats stats; 1833 uint64_t adapter_tick_count; 1834 1835 /* Only run this test in the software driver case */ 1836 if (!using_services) 1837 return -ENOTSUP; 1838 1839 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stats_reset(timdev), 1840 "Failed to reset stats"); 1841 1842 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stats_get(timdev, 1843 &stats), "Failed to get adapter stats"); 1844 TEST_ASSERT_EQUAL(stats.adapter_tick_count, 0, "Adapter tick count " 1845 "not zeroed out"); 1846 1847 /* Delay 1 second; should let at least 10 ticks occur with the default 1848 * adapter configuration used by this test. 1849 */ 1850 rte_delay_ms(1000); 1851 1852 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stats_get(timdev, 1853 &stats), "Failed to get adapter stats"); 1854 1855 adapter_tick_count = stats.adapter_tick_count; 1856 TEST_ASSERT(adapter_tick_count >= 10 && adapter_tick_count <= 12, 1857 "Expected 10-12 adapter ticks, got %"PRIu64"\n", 1858 adapter_tick_count); 1859 1860 return TEST_SUCCESS; 1861 } 1862 1863 static int 1864 adapter_create_max(void) 1865 { 1866 int i; 1867 uint32_t svc_start_count, svc_end_count; 1868 struct rte_event_timer_adapter *adapters[ 1869 RTE_EVENT_TIMER_ADAPTER_NUM_MAX + 1]; 1870 1871 struct rte_event_timer_adapter_conf conf = { 1872 .event_dev_id = evdev, 1873 // timer_adapter_id set in loop 1874 .clk_src = RTE_EVENT_TIMER_ADAPTER_CPU_CLK, 1875 .timer_tick_ns = NSECPERSEC / 10, 1876 .max_tmo_ns = 180 * NSECPERSEC, 1877 .nb_timers = MAX_TIMERS, 1878 .flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES, 1879 }; 1880 1881 if (!using_services) 1882 return -ENOTSUP; 1883 1884 svc_start_count = rte_service_get_count(); 1885 1886 /* This test expects that there are sufficient service IDs available 1887 * to be allocated. I.e., RTE_EVENT_TIMER_ADAPTER_NUM_MAX may need to 1888 * be less than RTE_SERVICE_NUM_MAX if anything else uses a service 1889 * (the SW event device, for example). 1890 */ 1891 for (i = 0; i < RTE_EVENT_TIMER_ADAPTER_NUM_MAX; i++) { 1892 conf.timer_adapter_id = i; 1893 adapters[i] = rte_event_timer_adapter_create_ext(&conf, 1894 test_port_conf_cb, NULL); 1895 TEST_ASSERT_NOT_NULL(adapters[i], "Failed to create adapter " 1896 "%d", i); 1897 } 1898 1899 conf.timer_adapter_id = i; 1900 adapters[i] = rte_event_timer_adapter_create(&conf); 1901 TEST_ASSERT_NULL(adapters[i], "Created too many adapters"); 1902 1903 /* Check that at least RTE_EVENT_TIMER_ADAPTER_NUM_MAX services 1904 * have been created 1905 */ 1906 svc_end_count = rte_service_get_count(); 1907 TEST_ASSERT_EQUAL(svc_end_count - svc_start_count, 1908 RTE_EVENT_TIMER_ADAPTER_NUM_MAX, 1909 "Failed to create expected number of services"); 1910 1911 for (i = 0; i < RTE_EVENT_TIMER_ADAPTER_NUM_MAX; i++) 1912 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_free(adapters[i]), 1913 "Failed to free adapter %d", i); 1914 1915 /* Check that service count is back to where it was at start */ 1916 svc_end_count = rte_service_get_count(); 1917 TEST_ASSERT_EQUAL(svc_start_count, svc_end_count, "Failed to release " 1918 "correct number of services"); 1919 1920 return TEST_SUCCESS; 1921 } 1922 1923 static inline int 1924 test_timer_ticks_remaining(void) 1925 { 1926 uint64_t ticks_remaining = UINT64_MAX; 1927 struct rte_event_timer *ev_tim; 1928 struct rte_event ev; 1929 int ret, i; 1930 const struct rte_event_timer tim = { 1931 .ev.op = RTE_EVENT_OP_NEW, 1932 .ev.queue_id = 0, 1933 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1934 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1935 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1936 .state = RTE_EVENT_TIMER_NOT_ARMED, 1937 }; 1938 1939 rte_mempool_get(eventdev_test_mempool, (void **)&ev_tim); 1940 *ev_tim = tim; 1941 ev_tim->ev.event_ptr = ev_tim; 1942 #define TEST_TICKS 5 1943 ev_tim->timeout_ticks = CALC_TICKS(TEST_TICKS); 1944 1945 ret = rte_event_timer_remaining_ticks_get(timdev, ev_tim, 1946 &ticks_remaining); 1947 if (ret == -ENOTSUP) { 1948 rte_mempool_put(eventdev_test_mempool, (void *)ev_tim); 1949 printf("API not supported, skipping test\n"); 1950 return TEST_SKIPPED; 1951 } 1952 1953 /* Test that unarmed timer returns error */ 1954 TEST_ASSERT_FAIL(ret, 1955 "Didn't fail to get ticks for unarmed event timer"); 1956 1957 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 1), 1, 1958 "Failed to arm timer with proper timeout."); 1959 TEST_ASSERT_EQUAL(ev_tim->state, RTE_EVENT_TIMER_ARMED, 1960 "Improper timer state set expected %d returned %d", 1961 RTE_EVENT_TIMER_ARMED, ev_tim->state); 1962 1963 for (i = 0; i < TEST_TICKS; i++) { 1964 ret = rte_event_timer_remaining_ticks_get(timdev, ev_tim, 1965 &ticks_remaining); 1966 if (ret < 0) 1967 return TEST_FAILED; 1968 1969 TEST_ASSERT_EQUAL((int)ticks_remaining, TEST_TICKS - i, 1970 "Expected %d ticks remaining, got %"PRIu64"", 1971 TEST_TICKS - i, ticks_remaining); 1972 1973 rte_delay_ms(100); 1974 } 1975 1976 rte_delay_ms(100); 1977 1978 TEST_ASSERT_EQUAL(rte_event_dequeue_burst(evdev, 0, &ev, 1, 0), 1, 1979 "Armed timer failed to trigger."); 1980 TEST_ASSERT_EQUAL(ev_tim->state, RTE_EVENT_TIMER_NOT_ARMED, 1981 "Improper timer state set expected %d returned %d", 1982 RTE_EVENT_TIMER_NOT_ARMED, ev_tim->state); 1983 1984 /* Test that timer that fired returns error */ 1985 TEST_ASSERT_FAIL(rte_event_timer_remaining_ticks_get(timdev, ev_tim, 1986 &ticks_remaining), 1987 "Didn't fail to get ticks for unarmed event timer"); 1988 1989 rte_mempool_put(eventdev_test_mempool, (void *)ev_tim); 1990 1991 #undef TEST_TICKS 1992 return TEST_SUCCESS; 1993 } 1994 1995 1996 static struct unit_test_suite event_timer_adptr_functional_testsuite = { 1997 .suite_name = "event timer functional test suite", 1998 .setup = testsuite_setup, 1999 .teardown = testsuite_teardown, 2000 .unit_test_cases = { 2001 TEST_CASE_ST(timdev_setup_usec, timdev_teardown, 2002 test_timer_state), 2003 TEST_CASE_ST(timdev_setup_usec, timdev_teardown, 2004 test_timer_arm), 2005 TEST_CASE_ST(timdev_setup_msec_periodic, timdev_teardown, 2006 test_timer_arm_periodic), 2007 TEST_CASE_ST(timdev_setup_usec, timdev_teardown, 2008 test_timer_arm_burst), 2009 TEST_CASE_ST(timdev_setup_msec_periodic, timdev_teardown, 2010 test_timer_arm_burst_periodic), 2011 TEST_CASE_ST(timdev_setup_sec, timdev_teardown, 2012 test_timer_cancel), 2013 TEST_CASE_ST(timdev_setup_sec_periodic, timdev_teardown, 2014 test_timer_cancel_periodic), 2015 TEST_CASE_ST(timdev_setup_sec, timdev_teardown, 2016 test_timer_cancel_random), 2017 TEST_CASE_ST(timdev_setup_usec_multicore, timdev_teardown, 2018 test_timer_arm_multicore), 2019 TEST_CASE_ST(timdev_setup_usec_multicore, timdev_teardown, 2020 test_timer_arm_burst_multicore), 2021 TEST_CASE_ST(timdev_setup_sec_multicore, timdev_teardown, 2022 test_timer_cancel_multicore), 2023 TEST_CASE_ST(timdev_setup_sec_multicore, timdev_teardown, 2024 test_timer_cancel_burst_multicore), 2025 TEST_CASE(adapter_create), 2026 TEST_CASE_ST(timdev_setup_msec, NULL, adapter_free), 2027 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 2028 adapter_get_info), 2029 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 2030 adapter_lookup), 2031 TEST_CASE_ST(NULL, timdev_teardown, 2032 adapter_start), 2033 TEST_CASE_ST(timdev_setup_msec, NULL, 2034 adapter_stop), 2035 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 2036 stat_inc_reset_ev_enq), 2037 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 2038 event_timer_arm), 2039 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 2040 event_timer_arm_double), 2041 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 2042 event_timer_arm_expiry), 2043 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 2044 event_timer_arm_rearm), 2045 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 2046 event_timer_arm_max), 2047 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 2048 event_timer_arm_invalid_sched_type), 2049 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 2050 event_timer_arm_invalid_timeout), 2051 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 2052 event_timer_cancel), 2053 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 2054 event_timer_cancel_double), 2055 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 2056 adapter_tick_resolution), 2057 TEST_CASE(adapter_create_max), 2058 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 2059 test_timer_ticks_remaining), 2060 TEST_CASES_END() /**< NULL terminate unit test array */ 2061 } 2062 }; 2063 2064 static int 2065 test_event_timer_adapter_func(void) 2066 { 2067 return unit_test_suite_runner(&event_timer_adptr_functional_testsuite); 2068 } 2069 2070 #endif /* !RTE_EXEC_ENV_WINDOWS */ 2071 2072 REGISTER_TEST_COMMAND(event_timer_adapter_test, test_event_timer_adapter_func); 2073