1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Cavium, Inc 3 * Copyright(c) 2017-2018 Intel Corporation. 4 */ 5 6 #include <math.h> 7 8 #include <rte_atomic.h> 9 #include <rte_common.h> 10 #include <rte_cycles.h> 11 #include <rte_debug.h> 12 #include <rte_eal.h> 13 #include <rte_ethdev.h> 14 #include <rte_eventdev.h> 15 #include <rte_event_timer_adapter.h> 16 #include <rte_mempool.h> 17 #include <rte_launch.h> 18 #include <rte_lcore.h> 19 #include <rte_per_lcore.h> 20 #include <rte_random.h> 21 #include <rte_bus_vdev.h> 22 #include <rte_service.h> 23 #include <stdbool.h> 24 25 #include "test.h" 26 27 /* 4K timers corresponds to sw evdev max inflight events */ 28 #define MAX_TIMERS (4 * 1024) 29 #define BKT_TCK_NSEC 30 31 #define NSECPERSEC 1E9 32 #define BATCH_SIZE 16 33 /* Both the app lcore and adapter ports are linked to this queue */ 34 #define TEST_QUEUE_ID 0 35 /* Port the application dequeues from */ 36 #define TEST_PORT_ID 0 37 #define TEST_ADAPTER_ID 0 38 39 /* Handle log statements in same manner as test macros */ 40 #define LOG_DBG(...) RTE_LOG(DEBUG, EAL, __VA_ARGS__) 41 42 static int evdev; 43 static struct rte_event_timer_adapter *timdev; 44 static struct rte_mempool *eventdev_test_mempool; 45 static struct rte_ring *timer_producer_ring; 46 static uint64_t global_bkt_tck_ns; 47 static uint64_t global_info_bkt_tck_ns; 48 static volatile uint8_t arm_done; 49 50 #define CALC_TICKS(tks) \ 51 ceil((double)(tks * global_bkt_tck_ns) / global_info_bkt_tck_ns) 52 53 54 static bool using_services; 55 static uint32_t test_lcore1; 56 static uint32_t test_lcore2; 57 static uint32_t test_lcore3; 58 static uint32_t sw_evdev_slcore; 59 static uint32_t sw_adptr_slcore; 60 61 static inline void 62 devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf, 63 struct rte_event_dev_info *info) 64 { 65 memset(dev_conf, 0, sizeof(struct rte_event_dev_config)); 66 dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns; 67 dev_conf->nb_event_ports = 1; 68 dev_conf->nb_event_queues = 1; 69 dev_conf->nb_event_queue_flows = info->max_event_queue_flows; 70 dev_conf->nb_event_port_dequeue_depth = 71 info->max_event_port_dequeue_depth; 72 dev_conf->nb_event_port_enqueue_depth = 73 info->max_event_port_enqueue_depth; 74 dev_conf->nb_event_port_enqueue_depth = 75 info->max_event_port_enqueue_depth; 76 dev_conf->nb_events_limit = 77 info->max_num_events; 78 } 79 80 static inline int 81 eventdev_setup(void) 82 { 83 int ret; 84 struct rte_event_dev_config dev_conf; 85 struct rte_event_dev_info info; 86 uint32_t service_id; 87 88 ret = rte_event_dev_info_get(evdev, &info); 89 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info"); 90 TEST_ASSERT(info.max_num_events < 0 || 91 info.max_num_events >= (int32_t)MAX_TIMERS, 92 "ERROR max_num_events=%d < max_events=%d", 93 info.max_num_events, MAX_TIMERS); 94 95 devconf_set_default_sane_values(&dev_conf, &info); 96 ret = rte_event_dev_configure(evdev, &dev_conf); 97 TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev"); 98 99 ret = rte_event_queue_setup(evdev, 0, NULL); 100 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d", 0); 101 102 /* Configure event port */ 103 ret = rte_event_port_setup(evdev, 0, NULL); 104 TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d", 0); 105 ret = rte_event_port_link(evdev, 0, NULL, NULL, 0); 106 TEST_ASSERT(ret >= 0, "Failed to link all queues port=%d", 0); 107 108 /* If this is a software event device, map and start its service */ 109 if (rte_event_dev_service_id_get(evdev, &service_id) == 0) { 110 TEST_ASSERT_SUCCESS(rte_service_lcore_add(sw_evdev_slcore), 111 "Failed to add service core"); 112 TEST_ASSERT_SUCCESS(rte_service_lcore_start( 113 sw_evdev_slcore), 114 "Failed to start service core"); 115 TEST_ASSERT_SUCCESS(rte_service_map_lcore_set( 116 service_id, sw_evdev_slcore, 1), 117 "Failed to map evdev service"); 118 TEST_ASSERT_SUCCESS(rte_service_runstate_set( 119 service_id, 1), 120 "Failed to start evdev service"); 121 } 122 123 ret = rte_event_dev_start(evdev); 124 TEST_ASSERT_SUCCESS(ret, "Failed to start device"); 125 126 return TEST_SUCCESS; 127 } 128 129 static int 130 testsuite_setup(void) 131 { 132 /* Some of the multithreaded tests require 3 other lcores to run */ 133 unsigned int required_lcore_count = 4; 134 uint32_t service_id; 135 136 /* To make it easier to map services later if needed, just reset 137 * service core state. 138 */ 139 (void) rte_service_lcore_reset_all(); 140 141 if (!rte_event_dev_count()) { 142 /* If there is no hardware eventdev, or no software vdev was 143 * specified on the command line, create an instance of 144 * event_sw. 145 */ 146 LOG_DBG("Failed to find a valid event device... testing with" 147 " event_sw device\n"); 148 TEST_ASSERT_SUCCESS(rte_vdev_init("event_sw0", NULL), 149 "Error creating eventdev"); 150 evdev = rte_event_dev_get_dev_id("event_sw0"); 151 } 152 153 if (rte_event_dev_service_id_get(evdev, &service_id) == 0) { 154 /* A software event device will use a software event timer 155 * adapter as well. 2 more cores required to convert to 156 * service cores. 157 */ 158 required_lcore_count += 2; 159 using_services = true; 160 } 161 162 if (rte_lcore_count() < required_lcore_count) { 163 printf("Not enough cores for event_timer_adapter_test, expecting at least %u\n", 164 required_lcore_count); 165 return TEST_SKIPPED; 166 } 167 168 /* Assign lcores for various tasks */ 169 test_lcore1 = rte_get_next_lcore(-1, 1, 0); 170 test_lcore2 = rte_get_next_lcore(test_lcore1, 1, 0); 171 test_lcore3 = rte_get_next_lcore(test_lcore2, 1, 0); 172 if (using_services) { 173 sw_evdev_slcore = rte_get_next_lcore(test_lcore3, 1, 0); 174 sw_adptr_slcore = rte_get_next_lcore(sw_evdev_slcore, 1, 0); 175 } 176 177 return eventdev_setup(); 178 } 179 180 static void 181 testsuite_teardown(void) 182 { 183 rte_event_dev_stop(evdev); 184 rte_event_dev_close(evdev); 185 } 186 187 static int 188 setup_adapter_service(struct rte_event_timer_adapter *adptr) 189 { 190 uint32_t adapter_service_id; 191 int ret; 192 193 /* retrieve service ids */ 194 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_service_id_get(adptr, 195 &adapter_service_id), "Failed to get event timer " 196 "adapter service id"); 197 /* add a service core and start it */ 198 ret = rte_service_lcore_add(sw_adptr_slcore); 199 TEST_ASSERT(ret == 0 || ret == -EALREADY, 200 "Failed to add service core"); 201 ret = rte_service_lcore_start(sw_adptr_slcore); 202 TEST_ASSERT(ret == 0 || ret == -EALREADY, 203 "Failed to start service core"); 204 205 /* map services to it */ 206 TEST_ASSERT_SUCCESS(rte_service_map_lcore_set(adapter_service_id, 207 sw_adptr_slcore, 1), 208 "Failed to map adapter service"); 209 210 /* set services to running */ 211 TEST_ASSERT_SUCCESS(rte_service_runstate_set(adapter_service_id, 1), 212 "Failed to start event timer adapter service"); 213 214 return TEST_SUCCESS; 215 } 216 217 static int 218 test_port_conf_cb(uint16_t id, uint8_t event_dev_id, uint8_t *event_port_id, 219 void *conf_arg) 220 { 221 struct rte_event_dev_config dev_conf; 222 struct rte_event_dev_info info; 223 struct rte_event_port_conf *port_conf, def_port_conf = {0}; 224 uint32_t started; 225 static int port_allocated; 226 static uint8_t port_id; 227 int ret; 228 229 if (port_allocated) { 230 *event_port_id = port_id; 231 return 0; 232 } 233 234 RTE_SET_USED(id); 235 236 ret = rte_event_dev_attr_get(event_dev_id, RTE_EVENT_DEV_ATTR_STARTED, 237 &started); 238 if (ret < 0) 239 return ret; 240 241 if (started) 242 rte_event_dev_stop(event_dev_id); 243 244 ret = rte_event_dev_info_get(evdev, &info); 245 if (ret < 0) 246 return ret; 247 248 devconf_set_default_sane_values(&dev_conf, &info); 249 250 port_id = dev_conf.nb_event_ports; 251 dev_conf.nb_event_ports++; 252 253 ret = rte_event_dev_configure(event_dev_id, &dev_conf); 254 if (ret < 0) { 255 if (started) 256 rte_event_dev_start(event_dev_id); 257 return ret; 258 } 259 260 if (conf_arg != NULL) 261 port_conf = conf_arg; 262 else { 263 port_conf = &def_port_conf; 264 ret = rte_event_port_default_conf_get(event_dev_id, port_id, 265 port_conf); 266 if (ret < 0) 267 return ret; 268 } 269 270 ret = rte_event_port_setup(event_dev_id, port_id, port_conf); 271 if (ret < 0) 272 return ret; 273 274 *event_port_id = port_id; 275 276 if (started) 277 rte_event_dev_start(event_dev_id); 278 279 /* Reuse this port number next time this is called */ 280 port_allocated = 1; 281 282 return 0; 283 } 284 285 static int 286 _timdev_setup(uint64_t max_tmo_ns, uint64_t bkt_tck_ns, uint64_t flags) 287 { 288 struct rte_event_timer_adapter_info info; 289 struct rte_event_timer_adapter_conf config = { 290 .event_dev_id = evdev, 291 .timer_adapter_id = TEST_ADAPTER_ID, 292 .timer_tick_ns = bkt_tck_ns, 293 .max_tmo_ns = max_tmo_ns, 294 .nb_timers = MAX_TIMERS * 10, 295 .flags = flags, 296 }; 297 uint32_t caps = 0; 298 const char *pool_name = "timdev_test_pool"; 299 300 global_bkt_tck_ns = bkt_tck_ns; 301 302 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_caps_get(evdev, &caps), 303 "failed to get adapter capabilities"); 304 305 if (flags & RTE_EVENT_TIMER_ADAPTER_F_PERIODIC && 306 !(caps & RTE_EVENT_TIMER_ADAPTER_CAP_PERIODIC)) 307 return -ENOTSUP; 308 309 if (!(caps & RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) { 310 timdev = rte_event_timer_adapter_create_ext(&config, 311 test_port_conf_cb, 312 NULL); 313 setup_adapter_service(timdev); 314 using_services = true; 315 } else 316 timdev = rte_event_timer_adapter_create(&config); 317 318 TEST_ASSERT_NOT_NULL(timdev, 319 "failed to create event timer ring"); 320 321 TEST_ASSERT_EQUAL(rte_event_timer_adapter_start(timdev), 0, 322 "failed to Start event timer adapter"); 323 324 /* Create event timer mempool */ 325 eventdev_test_mempool = rte_mempool_create(pool_name, 326 MAX_TIMERS * 2, 327 sizeof(struct rte_event_timer), /* element size*/ 328 0, /* cache size*/ 329 0, NULL, NULL, NULL, NULL, 330 rte_socket_id(), 0); 331 if (!eventdev_test_mempool) { 332 printf("ERROR creating mempool\n"); 333 return TEST_FAILED; 334 } 335 336 rte_event_timer_adapter_get_info(timdev, &info); 337 338 global_info_bkt_tck_ns = info.min_resolution_ns; 339 340 return TEST_SUCCESS; 341 } 342 343 static int 344 timdev_setup_usec(void) 345 { 346 uint64_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES; 347 348 return using_services ? 349 /* Max timeout is 10,000us and bucket interval is 100us */ 350 _timdev_setup(1E7, 1E5, flags) : 351 /* Max timeout is 100us and bucket interval is 1us */ 352 _timdev_setup(1E5, 1E3, flags); 353 } 354 355 static int 356 timdev_setup_usec_multicore(void) 357 { 358 uint64_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES; 359 360 return using_services ? 361 /* Max timeout is 10,000us and bucket interval is 100us */ 362 _timdev_setup(1E7, 1E5, flags) : 363 /* Max timeout is 100us and bucket interval is 1us */ 364 _timdev_setup(1E5, 1E3, flags); 365 } 366 367 static int 368 timdev_setup_msec(void) 369 { 370 uint64_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES; 371 372 /* Max timeout is 3 mins, and bucket interval is 100 ms */ 373 return _timdev_setup(180 * NSECPERSEC, NSECPERSEC / 10, flags); 374 } 375 376 static int 377 timdev_setup_msec_periodic(void) 378 { 379 uint64_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES | 380 RTE_EVENT_TIMER_ADAPTER_F_PERIODIC; 381 382 /* Periodic mode with 100 ms resolution */ 383 return _timdev_setup(0, NSECPERSEC / 10, flags); 384 } 385 386 static int 387 timdev_setup_sec(void) 388 { 389 uint64_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES; 390 391 /* Max timeout is 100sec and bucket interval is 1sec */ 392 return _timdev_setup(1E11, 1E9, flags); 393 } 394 395 static int 396 timdev_setup_sec_periodic(void) 397 { 398 uint64_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES | 399 RTE_EVENT_TIMER_ADAPTER_F_PERIODIC; 400 401 /* Periodic mode with 1 sec resolution */ 402 return _timdev_setup(0, NSECPERSEC, flags); 403 } 404 405 static int 406 timdev_setup_sec_multicore(void) 407 { 408 uint64_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES; 409 410 /* Max timeout is 100sec and bucket interval is 1sec */ 411 return _timdev_setup(1E11, 1E9, flags); 412 } 413 414 static void 415 timdev_teardown(void) 416 { 417 rte_event_timer_adapter_stop(timdev); 418 rte_event_timer_adapter_free(timdev); 419 420 rte_mempool_free(eventdev_test_mempool); 421 } 422 423 static inline int 424 test_timer_state(void) 425 { 426 struct rte_event_timer *ev_tim; 427 struct rte_event ev; 428 const struct rte_event_timer tim = { 429 .ev.op = RTE_EVENT_OP_NEW, 430 .ev.queue_id = 0, 431 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 432 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 433 .ev.event_type = RTE_EVENT_TYPE_TIMER, 434 .state = RTE_EVENT_TIMER_NOT_ARMED, 435 }; 436 437 438 rte_mempool_get(eventdev_test_mempool, (void **)&ev_tim); 439 *ev_tim = tim; 440 ev_tim->ev.event_ptr = ev_tim; 441 ev_tim->timeout_ticks = CALC_TICKS(120); 442 443 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 1), 0, 444 "Armed timer exceeding max_timeout."); 445 TEST_ASSERT_EQUAL(ev_tim->state, RTE_EVENT_TIMER_ERROR_TOOLATE, 446 "Improper timer state set expected %d returned %d", 447 RTE_EVENT_TIMER_ERROR_TOOLATE, ev_tim->state); 448 449 ev_tim->state = RTE_EVENT_TIMER_NOT_ARMED; 450 ev_tim->timeout_ticks = CALC_TICKS(10); 451 452 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 1), 1, 453 "Failed to arm timer with proper timeout."); 454 TEST_ASSERT_EQUAL(ev_tim->state, RTE_EVENT_TIMER_ARMED, 455 "Improper timer state set expected %d returned %d", 456 RTE_EVENT_TIMER_ARMED, ev_tim->state); 457 458 if (!using_services) 459 rte_delay_us(20); 460 else 461 rte_delay_us(1000 + 200); 462 TEST_ASSERT_EQUAL(rte_event_dequeue_burst(evdev, 0, &ev, 1, 0), 1, 463 "Armed timer failed to trigger."); 464 465 ev_tim->state = RTE_EVENT_TIMER_NOT_ARMED; 466 ev_tim->timeout_ticks = CALC_TICKS(90); 467 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 1), 1, 468 "Failed to arm timer with proper timeout."); 469 TEST_ASSERT_EQUAL(rte_event_timer_cancel_burst(timdev, &ev_tim, 1), 470 1, "Failed to cancel armed timer"); 471 TEST_ASSERT_EQUAL(ev_tim->state, RTE_EVENT_TIMER_CANCELED, 472 "Improper timer state set expected %d returned %d", 473 RTE_EVENT_TIMER_CANCELED, ev_tim->state); 474 475 rte_mempool_put(eventdev_test_mempool, (void *)ev_tim); 476 477 return TEST_SUCCESS; 478 } 479 480 static inline int 481 _arm_timers(uint64_t timeout_tcks, uint64_t timers) 482 { 483 uint64_t i; 484 struct rte_event_timer *ev_tim; 485 const struct rte_event_timer tim = { 486 .ev.op = RTE_EVENT_OP_NEW, 487 .ev.queue_id = 0, 488 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 489 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 490 .ev.event_type = RTE_EVENT_TYPE_TIMER, 491 .state = RTE_EVENT_TIMER_NOT_ARMED, 492 .timeout_ticks = CALC_TICKS(timeout_tcks), 493 }; 494 495 for (i = 0; i < timers; i++) { 496 497 TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool, 498 (void **)&ev_tim), 499 "mempool alloc failed"); 500 *ev_tim = tim; 501 ev_tim->ev.event_ptr = ev_tim; 502 503 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 504 1), 1, "Failed to arm timer %d", 505 rte_errno); 506 } 507 508 return TEST_SUCCESS; 509 } 510 511 static inline int 512 _wait_timer_triggers(uint64_t wait_sec, uint64_t arm_count, 513 uint64_t cancel_count) 514 { 515 uint8_t valid_event; 516 uint64_t events = 0; 517 uint64_t wait_start, max_wait; 518 struct rte_event ev; 519 520 max_wait = rte_get_timer_hz() * wait_sec; 521 wait_start = rte_get_timer_cycles(); 522 while (1) { 523 if (rte_get_timer_cycles() - wait_start > max_wait) { 524 if (events + cancel_count != arm_count) 525 TEST_ASSERT_SUCCESS(max_wait, 526 "Max time limit for timers exceeded."); 527 break; 528 } 529 530 valid_event = rte_event_dequeue_burst(evdev, 0, &ev, 1, 0); 531 if (!valid_event) 532 continue; 533 534 rte_mempool_put(eventdev_test_mempool, ev.event_ptr); 535 events++; 536 } 537 538 return TEST_SUCCESS; 539 } 540 541 static inline int 542 test_timer_arm(void) 543 { 544 TEST_ASSERT_SUCCESS(_arm_timers(20, MAX_TIMERS), 545 "Failed to arm timers"); 546 TEST_ASSERT_SUCCESS(_wait_timer_triggers(10, MAX_TIMERS, 0), 547 "Timer triggered count doesn't match arm count"); 548 return TEST_SUCCESS; 549 } 550 551 static inline int 552 test_timer_arm_periodic(void) 553 { 554 TEST_ASSERT_SUCCESS(_arm_timers(1, MAX_TIMERS), 555 "Failed to arm timers"); 556 /* With a resolution of 100ms and wait time of 1sec, 557 * there will be 10 * MAX_TIMERS periodic timer triggers. 558 */ 559 TEST_ASSERT_SUCCESS(_wait_timer_triggers(1, 10 * MAX_TIMERS, 0), 560 "Timer triggered count doesn't match arm count"); 561 return TEST_SUCCESS; 562 } 563 564 static int 565 _arm_wrapper(void *arg) 566 { 567 RTE_SET_USED(arg); 568 569 TEST_ASSERT_SUCCESS(_arm_timers(20, MAX_TIMERS), 570 "Failed to arm timers"); 571 572 return TEST_SUCCESS; 573 } 574 575 static inline int 576 test_timer_arm_multicore(void) 577 { 578 579 uint32_t lcore_1 = rte_get_next_lcore(-1, 1, 0); 580 uint32_t lcore_2 = rte_get_next_lcore(lcore_1, 1, 0); 581 582 rte_eal_remote_launch(_arm_wrapper, NULL, lcore_1); 583 rte_eal_remote_launch(_arm_wrapper, NULL, lcore_2); 584 585 rte_eal_mp_wait_lcore(); 586 TEST_ASSERT_SUCCESS(_wait_timer_triggers(10, MAX_TIMERS * 2, 0), 587 "Timer triggered count doesn't match arm count"); 588 589 return TEST_SUCCESS; 590 } 591 592 #define MAX_BURST 16 593 static inline int 594 _arm_timers_burst(uint64_t timeout_tcks, uint64_t timers) 595 { 596 uint64_t i; 597 int j; 598 struct rte_event_timer *ev_tim[MAX_BURST]; 599 const struct rte_event_timer tim = { 600 .ev.op = RTE_EVENT_OP_NEW, 601 .ev.queue_id = 0, 602 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 603 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 604 .ev.event_type = RTE_EVENT_TYPE_TIMER, 605 .state = RTE_EVENT_TIMER_NOT_ARMED, 606 .timeout_ticks = CALC_TICKS(timeout_tcks), 607 }; 608 609 for (i = 0; i < timers / MAX_BURST; i++) { 610 TEST_ASSERT_SUCCESS(rte_mempool_get_bulk( 611 eventdev_test_mempool, 612 (void **)ev_tim, MAX_BURST), 613 "mempool alloc failed"); 614 615 for (j = 0; j < MAX_BURST; j++) { 616 *ev_tim[j] = tim; 617 ev_tim[j]->ev.event_ptr = ev_tim[j]; 618 } 619 620 TEST_ASSERT_EQUAL(rte_event_timer_arm_tmo_tick_burst(timdev, 621 ev_tim, tim.timeout_ticks, MAX_BURST), 622 MAX_BURST, "Failed to arm timer %d", rte_errno); 623 } 624 625 return TEST_SUCCESS; 626 } 627 628 static inline int 629 test_timer_arm_burst(void) 630 { 631 TEST_ASSERT_SUCCESS(_arm_timers_burst(20, MAX_TIMERS), 632 "Failed to arm timers"); 633 TEST_ASSERT_SUCCESS(_wait_timer_triggers(10, MAX_TIMERS, 0), 634 "Timer triggered count doesn't match arm count"); 635 636 return TEST_SUCCESS; 637 } 638 639 static inline int 640 test_timer_arm_burst_periodic(void) 641 { 642 TEST_ASSERT_SUCCESS(_arm_timers_burst(1, MAX_TIMERS), 643 "Failed to arm timers"); 644 /* With a resolution of 100ms and wait time of 1sec, 645 * there will be 10 * MAX_TIMERS periodic timer triggers. 646 */ 647 TEST_ASSERT_SUCCESS(_wait_timer_triggers(1, 10 * MAX_TIMERS, 0), 648 "Timer triggered count doesn't match arm count"); 649 650 return TEST_SUCCESS; 651 } 652 653 static int 654 _arm_wrapper_burst(void *arg) 655 { 656 RTE_SET_USED(arg); 657 658 TEST_ASSERT_SUCCESS(_arm_timers_burst(20, MAX_TIMERS), 659 "Failed to arm timers"); 660 661 return TEST_SUCCESS; 662 } 663 664 static inline int 665 test_timer_arm_burst_multicore(void) 666 { 667 rte_eal_remote_launch(_arm_wrapper_burst, NULL, test_lcore1); 668 rte_eal_remote_launch(_arm_wrapper_burst, NULL, test_lcore2); 669 670 rte_eal_mp_wait_lcore(); 671 TEST_ASSERT_SUCCESS(_wait_timer_triggers(10, MAX_TIMERS * 2, 0), 672 "Timer triggered count doesn't match arm count"); 673 674 return TEST_SUCCESS; 675 } 676 677 static inline int 678 test_timer_cancel_periodic(void) 679 { 680 uint64_t i; 681 struct rte_event_timer *ev_tim; 682 const struct rte_event_timer tim = { 683 .ev.op = RTE_EVENT_OP_NEW, 684 .ev.queue_id = 0, 685 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 686 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 687 .ev.event_type = RTE_EVENT_TYPE_TIMER, 688 .state = RTE_EVENT_TIMER_NOT_ARMED, 689 .timeout_ticks = CALC_TICKS(1), 690 }; 691 692 for (i = 0; i < MAX_TIMERS; i++) { 693 TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool, 694 (void **)&ev_tim), 695 "mempool alloc failed"); 696 *ev_tim = tim; 697 ev_tim->ev.event_ptr = ev_tim; 698 699 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 700 1), 1, "Failed to arm timer %d", 701 rte_errno); 702 703 rte_delay_us(100 + (i % 5000)); 704 705 TEST_ASSERT_EQUAL(rte_event_timer_cancel_burst(timdev, 706 &ev_tim, 1), 1, 707 "Failed to cancel event timer %d", rte_errno); 708 rte_mempool_put(eventdev_test_mempool, ev_tim); 709 } 710 711 712 TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS, 713 MAX_TIMERS), 714 "Timer triggered count doesn't match arm, cancel count"); 715 716 return TEST_SUCCESS; 717 } 718 719 static inline int 720 test_timer_cancel(void) 721 { 722 uint64_t i; 723 struct rte_event_timer *ev_tim; 724 const struct rte_event_timer tim = { 725 .ev.op = RTE_EVENT_OP_NEW, 726 .ev.queue_id = 0, 727 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 728 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 729 .ev.event_type = RTE_EVENT_TYPE_TIMER, 730 .state = RTE_EVENT_TIMER_NOT_ARMED, 731 .timeout_ticks = CALC_TICKS(20), 732 }; 733 734 for (i = 0; i < MAX_TIMERS; i++) { 735 TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool, 736 (void **)&ev_tim), 737 "mempool alloc failed"); 738 *ev_tim = tim; 739 ev_tim->ev.event_ptr = ev_tim; 740 741 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 742 1), 1, "Failed to arm timer %d", 743 rte_errno); 744 745 rte_delay_us(100 + (i % 5000)); 746 747 TEST_ASSERT_EQUAL(rte_event_timer_cancel_burst(timdev, 748 &ev_tim, 1), 1, 749 "Failed to cancel event timer %d", rte_errno); 750 rte_mempool_put(eventdev_test_mempool, ev_tim); 751 } 752 753 754 TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS, 755 MAX_TIMERS), 756 "Timer triggered count doesn't match arm, cancel count"); 757 758 return TEST_SUCCESS; 759 } 760 761 static int 762 _cancel_producer(uint64_t timeout_tcks, uint64_t timers) 763 { 764 uint64_t i; 765 struct rte_event_timer *ev_tim; 766 const struct rte_event_timer tim = { 767 .ev.op = RTE_EVENT_OP_NEW, 768 .ev.queue_id = 0, 769 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 770 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 771 .ev.event_type = RTE_EVENT_TYPE_TIMER, 772 .state = RTE_EVENT_TIMER_NOT_ARMED, 773 .timeout_ticks = CALC_TICKS(timeout_tcks), 774 }; 775 776 for (i = 0; i < timers; i++) { 777 TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool, 778 (void **)&ev_tim), 779 "mempool alloc failed"); 780 781 *ev_tim = tim; 782 ev_tim->ev.event_ptr = ev_tim; 783 784 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 785 1), 1, "Failed to arm timer %d", 786 rte_errno); 787 788 TEST_ASSERT_EQUAL(ev_tim->state, RTE_EVENT_TIMER_ARMED, 789 "Failed to arm event timer"); 790 791 while (rte_ring_enqueue(timer_producer_ring, ev_tim) != 0) 792 ; 793 } 794 795 return TEST_SUCCESS; 796 } 797 798 static int 799 _cancel_producer_burst(uint64_t timeout_tcks, uint64_t timers) 800 { 801 802 uint64_t i; 803 int j, ret; 804 struct rte_event_timer *ev_tim[MAX_BURST]; 805 const struct rte_event_timer tim = { 806 .ev.op = RTE_EVENT_OP_NEW, 807 .ev.queue_id = 0, 808 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 809 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 810 .ev.event_type = RTE_EVENT_TYPE_TIMER, 811 .state = RTE_EVENT_TIMER_NOT_ARMED, 812 .timeout_ticks = CALC_TICKS(timeout_tcks), 813 }; 814 int arm_count = 0; 815 816 for (i = 0; i < timers / MAX_BURST; i++) { 817 TEST_ASSERT_SUCCESS(rte_mempool_get_bulk( 818 eventdev_test_mempool, 819 (void **)ev_tim, MAX_BURST), 820 "mempool alloc failed"); 821 822 for (j = 0; j < MAX_BURST; j++) { 823 *ev_tim[j] = tim; 824 ev_tim[j]->ev.event_ptr = ev_tim[j]; 825 } 826 827 TEST_ASSERT_EQUAL(rte_event_timer_arm_tmo_tick_burst(timdev, 828 ev_tim, tim.timeout_ticks, MAX_BURST), 829 MAX_BURST, "Failed to arm timer %d", rte_errno); 830 831 for (j = 0; j < MAX_BURST; j++) 832 TEST_ASSERT_EQUAL(ev_tim[j]->state, 833 RTE_EVENT_TIMER_ARMED, 834 "Event timer not armed, state = %d", 835 ev_tim[j]->state); 836 837 ret = rte_ring_enqueue_bulk(timer_producer_ring, 838 (void **)ev_tim, MAX_BURST, NULL); 839 TEST_ASSERT_EQUAL(ret, MAX_BURST, 840 "Failed to enqueue event timers to ring"); 841 arm_count += ret; 842 } 843 844 TEST_ASSERT_EQUAL(arm_count, MAX_TIMERS, 845 "Failed to arm expected number of event timers"); 846 847 return TEST_SUCCESS; 848 } 849 850 static int 851 _cancel_producer_wrapper(void *args) 852 { 853 RTE_SET_USED(args); 854 855 return _cancel_producer(20, MAX_TIMERS); 856 } 857 858 static int 859 _cancel_producer_burst_wrapper(void *args) 860 { 861 RTE_SET_USED(args); 862 863 return _cancel_producer_burst(100, MAX_TIMERS); 864 } 865 866 static int 867 _cancel_thread(void *args) 868 { 869 RTE_SET_USED(args); 870 struct rte_event_timer *ev_tim = NULL; 871 uint64_t cancel_count = 0; 872 uint16_t ret; 873 874 while (!arm_done || rte_ring_count(timer_producer_ring) > 0) { 875 if (rte_ring_dequeue(timer_producer_ring, (void **)&ev_tim)) 876 continue; 877 878 ret = rte_event_timer_cancel_burst(timdev, &ev_tim, 1); 879 TEST_ASSERT_EQUAL(ret, 1, "Failed to cancel timer"); 880 rte_mempool_put(eventdev_test_mempool, (void *)ev_tim); 881 cancel_count++; 882 } 883 884 return TEST_SUCCESS; 885 } 886 887 static int 888 _cancel_burst_thread(void *args) 889 { 890 RTE_SET_USED(args); 891 892 int ret, i, n; 893 struct rte_event_timer *ev_tim[MAX_BURST]; 894 uint64_t cancel_count = 0; 895 uint64_t dequeue_count = 0; 896 897 while (!arm_done || rte_ring_count(timer_producer_ring) > 0) { 898 n = rte_ring_dequeue_burst(timer_producer_ring, 899 (void **)ev_tim, MAX_BURST, NULL); 900 if (!n) 901 continue; 902 903 dequeue_count += n; 904 905 for (i = 0; i < n; i++) 906 TEST_ASSERT_EQUAL(ev_tim[i]->state, 907 RTE_EVENT_TIMER_ARMED, 908 "Event timer not armed, state = %d", 909 ev_tim[i]->state); 910 911 ret = rte_event_timer_cancel_burst(timdev, ev_tim, n); 912 TEST_ASSERT_EQUAL(n, ret, "Failed to cancel complete burst of " 913 "event timers"); 914 rte_mempool_put_bulk(eventdev_test_mempool, (void **)ev_tim, 915 RTE_MIN(ret, MAX_BURST)); 916 917 cancel_count += ret; 918 } 919 920 TEST_ASSERT_EQUAL(cancel_count, MAX_TIMERS, 921 "Failed to cancel expected number of timers: " 922 "expected = %d, cancel_count = %"PRIu64", " 923 "dequeue_count = %"PRIu64"\n", MAX_TIMERS, 924 cancel_count, dequeue_count); 925 926 return TEST_SUCCESS; 927 } 928 929 static inline int 930 test_timer_cancel_multicore(void) 931 { 932 arm_done = 0; 933 timer_producer_ring = rte_ring_create("timer_cancel_queue", 934 MAX_TIMERS * 2, rte_socket_id(), 0); 935 TEST_ASSERT_NOT_NULL(timer_producer_ring, 936 "Unable to reserve memory for ring"); 937 938 rte_eal_remote_launch(_cancel_thread, NULL, test_lcore3); 939 rte_eal_remote_launch(_cancel_producer_wrapper, NULL, test_lcore1); 940 rte_eal_remote_launch(_cancel_producer_wrapper, NULL, test_lcore2); 941 942 rte_eal_wait_lcore(test_lcore1); 943 rte_eal_wait_lcore(test_lcore2); 944 arm_done = 1; 945 rte_eal_wait_lcore(test_lcore3); 946 rte_ring_free(timer_producer_ring); 947 948 TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS * 2, 949 MAX_TIMERS * 2), 950 "Timer triggered count doesn't match arm count"); 951 952 return TEST_SUCCESS; 953 } 954 955 static inline int 956 test_timer_cancel_burst_multicore(void) 957 { 958 arm_done = 0; 959 timer_producer_ring = rte_ring_create("timer_cancel_queue", 960 MAX_TIMERS * 2, rte_socket_id(), 0); 961 TEST_ASSERT_NOT_NULL(timer_producer_ring, 962 "Unable to reserve memory for ring"); 963 964 rte_eal_remote_launch(_cancel_burst_thread, NULL, test_lcore2); 965 rte_eal_remote_launch(_cancel_producer_burst_wrapper, NULL, 966 test_lcore1); 967 968 rte_eal_wait_lcore(test_lcore1); 969 arm_done = 1; 970 rte_eal_wait_lcore(test_lcore2); 971 rte_ring_free(timer_producer_ring); 972 973 TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS, 974 MAX_TIMERS), 975 "Timer triggered count doesn't match arm count"); 976 977 return TEST_SUCCESS; 978 } 979 980 static inline int 981 test_timer_cancel_random(void) 982 { 983 uint64_t i; 984 uint64_t events_canceled = 0; 985 struct rte_event_timer *ev_tim; 986 const struct rte_event_timer tim = { 987 .ev.op = RTE_EVENT_OP_NEW, 988 .ev.queue_id = 0, 989 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 990 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 991 .ev.event_type = RTE_EVENT_TYPE_TIMER, 992 .state = RTE_EVENT_TIMER_NOT_ARMED, 993 .timeout_ticks = CALC_TICKS(20), 994 }; 995 996 for (i = 0; i < MAX_TIMERS; i++) { 997 998 TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool, 999 (void **)&ev_tim), 1000 "mempool alloc failed"); 1001 *ev_tim = tim; 1002 ev_tim->ev.event_ptr = ev_tim; 1003 1004 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 1005 1), 1, "Failed to arm timer %d", 1006 rte_errno); 1007 1008 if (rte_rand() & 1) { 1009 rte_delay_us(100 + (i % 5000)); 1010 TEST_ASSERT_EQUAL(rte_event_timer_cancel_burst( 1011 timdev, 1012 &ev_tim, 1), 1, 1013 "Failed to cancel event timer %d", rte_errno); 1014 rte_mempool_put(eventdev_test_mempool, ev_tim); 1015 events_canceled++; 1016 } 1017 } 1018 1019 TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS, 1020 events_canceled), 1021 "Timer triggered count doesn't match arm, cancel count"); 1022 1023 return TEST_SUCCESS; 1024 } 1025 1026 /* Check that the adapter can be created correctly */ 1027 static int 1028 adapter_create(void) 1029 { 1030 int adapter_id = 0; 1031 struct rte_event_timer_adapter *adapter, *adapter2; 1032 1033 struct rte_event_timer_adapter_conf conf = { 1034 .event_dev_id = evdev + 1, // invalid event dev id 1035 .timer_adapter_id = adapter_id, 1036 .clk_src = RTE_EVENT_TIMER_ADAPTER_CPU_CLK, 1037 .timer_tick_ns = NSECPERSEC / 10, 1038 .max_tmo_ns = 180 * NSECPERSEC, 1039 .nb_timers = MAX_TIMERS, 1040 .flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES, 1041 }; 1042 uint32_t caps = 0; 1043 1044 /* Test invalid conf */ 1045 adapter = rte_event_timer_adapter_create(&conf); 1046 TEST_ASSERT_NULL(adapter, "Created adapter with invalid " 1047 "event device id"); 1048 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Incorrect errno value for " 1049 "invalid event device id"); 1050 1051 /* Test valid conf */ 1052 conf.event_dev_id = evdev; 1053 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_caps_get(evdev, &caps), 1054 "failed to get adapter capabilities"); 1055 if (!(caps & RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) 1056 adapter = rte_event_timer_adapter_create_ext(&conf, 1057 test_port_conf_cb, 1058 NULL); 1059 else 1060 adapter = rte_event_timer_adapter_create(&conf); 1061 TEST_ASSERT_NOT_NULL(adapter, "Failed to create adapter with valid " 1062 "configuration"); 1063 1064 /* Test existing id */ 1065 adapter2 = rte_event_timer_adapter_create(&conf); 1066 TEST_ASSERT_NULL(adapter2, "Created adapter with in-use id"); 1067 TEST_ASSERT(rte_errno == EEXIST, "Incorrect errno value for existing " 1068 "id"); 1069 1070 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_free(adapter), 1071 "Failed to free adapter"); 1072 1073 rte_mempool_free(eventdev_test_mempool); 1074 1075 return TEST_SUCCESS; 1076 } 1077 1078 1079 /* Test that adapter can be freed correctly. */ 1080 static int 1081 adapter_free(void) 1082 { 1083 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stop(timdev), 1084 "Failed to stop adapter"); 1085 1086 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_free(timdev), 1087 "Failed to free valid adapter"); 1088 1089 /* Test free of already freed adapter */ 1090 TEST_ASSERT_FAIL(rte_event_timer_adapter_free(timdev), 1091 "Freed adapter that was already freed"); 1092 1093 /* Test free of null adapter */ 1094 timdev = NULL; 1095 TEST_ASSERT_FAIL(rte_event_timer_adapter_free(timdev), 1096 "Freed null adapter"); 1097 1098 rte_mempool_free(eventdev_test_mempool); 1099 1100 return TEST_SUCCESS; 1101 } 1102 1103 /* Test that adapter info can be retrieved and is correct. */ 1104 static int 1105 adapter_get_info(void) 1106 { 1107 struct rte_event_timer_adapter_info info; 1108 1109 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_get_info(timdev, &info), 1110 "Failed to get adapter info"); 1111 1112 if (using_services) 1113 TEST_ASSERT_EQUAL(info.event_dev_port_id, 1, 1114 "Expected port id = 1, got port id = %d", 1115 info.event_dev_port_id); 1116 1117 return TEST_SUCCESS; 1118 } 1119 1120 /* Test adapter lookup via adapter ID. */ 1121 static int 1122 adapter_lookup(void) 1123 { 1124 struct rte_event_timer_adapter *adapter; 1125 1126 adapter = rte_event_timer_adapter_lookup(TEST_ADAPTER_ID); 1127 TEST_ASSERT_NOT_NULL(adapter, "Failed to lookup adapter"); 1128 1129 return TEST_SUCCESS; 1130 } 1131 1132 static int 1133 adapter_start(void) 1134 { 1135 TEST_ASSERT_SUCCESS(_timdev_setup(180 * NSECPERSEC, NSECPERSEC / 10, 1136 RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES), 1137 "Failed to start adapter"); 1138 TEST_ASSERT_EQUAL(rte_event_timer_adapter_start(timdev), -EALREADY, 1139 "Timer adapter started without call to stop."); 1140 1141 return TEST_SUCCESS; 1142 } 1143 1144 /* Test that adapter stops correctly. */ 1145 static int 1146 adapter_stop(void) 1147 { 1148 struct rte_event_timer_adapter *l_adapter = NULL; 1149 1150 /* Test adapter stop */ 1151 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stop(timdev), 1152 "Failed to stop event adapter"); 1153 1154 TEST_ASSERT_FAIL(rte_event_timer_adapter_stop(l_adapter), 1155 "Erroneously stopped null event adapter"); 1156 1157 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_free(timdev), 1158 "Failed to free adapter"); 1159 1160 rte_mempool_free(eventdev_test_mempool); 1161 1162 return TEST_SUCCESS; 1163 } 1164 1165 /* Test increment and reset of ev_enq_count stat */ 1166 static int 1167 stat_inc_reset_ev_enq(void) 1168 { 1169 int ret, i, n; 1170 int num_evtims = MAX_TIMERS; 1171 struct rte_event_timer *evtims[num_evtims]; 1172 struct rte_event evs[BATCH_SIZE]; 1173 struct rte_event_timer_adapter_stats stats; 1174 const struct rte_event_timer init_tim = { 1175 .ev.op = RTE_EVENT_OP_NEW, 1176 .ev.queue_id = TEST_QUEUE_ID, 1177 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1178 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1179 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1180 .state = RTE_EVENT_TIMER_NOT_ARMED, 1181 .timeout_ticks = CALC_TICKS(5), // expire in .5 sec 1182 }; 1183 1184 ret = rte_mempool_get_bulk(eventdev_test_mempool, (void **)evtims, 1185 num_evtims); 1186 TEST_ASSERT_EQUAL(ret, 0, "Failed to get array of timer objs: ret = %d", 1187 ret); 1188 1189 for (i = 0; i < num_evtims; i++) { 1190 *evtims[i] = init_tim; 1191 evtims[i]->ev.event_ptr = evtims[i]; 1192 } 1193 1194 ret = rte_event_timer_adapter_stats_get(timdev, &stats); 1195 TEST_ASSERT_EQUAL(ret, 0, "Failed to get stats"); 1196 TEST_ASSERT_EQUAL((int)stats.ev_enq_count, 0, "Stats not clear at " 1197 "startup"); 1198 1199 /* Test with the max value for the adapter */ 1200 ret = rte_event_timer_arm_burst(timdev, evtims, num_evtims); 1201 TEST_ASSERT_EQUAL(ret, num_evtims, 1202 "Failed to arm all event timers: attempted = %d, " 1203 "succeeded = %d, rte_errno = %s", 1204 num_evtims, ret, rte_strerror(rte_errno)); 1205 1206 rte_delay_ms(1000); 1207 1208 #define MAX_TRIES num_evtims 1209 int sum = 0; 1210 int tries = 0; 1211 bool done = false; 1212 while (!done) { 1213 sum += rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, 1214 RTE_DIM(evs), 10); 1215 if (sum >= num_evtims || ++tries >= MAX_TRIES) 1216 done = true; 1217 1218 rte_delay_ms(10); 1219 } 1220 1221 TEST_ASSERT_EQUAL(sum, num_evtims, "Expected %d timer expiry events, " 1222 "got %d", num_evtims, sum); 1223 1224 TEST_ASSERT(tries < MAX_TRIES, "Exceeded max tries"); 1225 1226 rte_delay_ms(100); 1227 1228 /* Make sure the eventdev is still empty */ 1229 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 1230 10); 1231 1232 TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected number of timer expiry " 1233 "events from event device"); 1234 1235 /* Check stats again */ 1236 ret = rte_event_timer_adapter_stats_get(timdev, &stats); 1237 TEST_ASSERT_EQUAL(ret, 0, "Failed to get stats"); 1238 TEST_ASSERT_EQUAL((int)stats.ev_enq_count, num_evtims, 1239 "Expected enqueue stat = %d; got %d", num_evtims, 1240 (int)stats.ev_enq_count); 1241 1242 /* Reset and check again */ 1243 ret = rte_event_timer_adapter_stats_reset(timdev); 1244 TEST_ASSERT_EQUAL(ret, 0, "Failed to reset stats"); 1245 1246 ret = rte_event_timer_adapter_stats_get(timdev, &stats); 1247 TEST_ASSERT_EQUAL(ret, 0, "Failed to get stats"); 1248 TEST_ASSERT_EQUAL((int)stats.ev_enq_count, 0, 1249 "Expected enqueue stat = %d; got %d", 0, 1250 (int)stats.ev_enq_count); 1251 1252 rte_mempool_put_bulk(eventdev_test_mempool, (void **)evtims, 1253 num_evtims); 1254 1255 return TEST_SUCCESS; 1256 } 1257 1258 /* Test various cases in arming timers */ 1259 static int 1260 event_timer_arm(void) 1261 { 1262 uint16_t n; 1263 int ret; 1264 struct rte_event_timer_adapter *adapter = timdev; 1265 struct rte_event_timer *evtim = NULL; 1266 struct rte_event evs[BATCH_SIZE]; 1267 const struct rte_event_timer init_tim = { 1268 .ev.op = RTE_EVENT_OP_NEW, 1269 .ev.queue_id = TEST_QUEUE_ID, 1270 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1271 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1272 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1273 .state = RTE_EVENT_TIMER_NOT_ARMED, 1274 .timeout_ticks = CALC_TICKS(5), // expire in .5 sec 1275 }; 1276 1277 rte_mempool_get(eventdev_test_mempool, (void **)&evtim); 1278 if (evtim == NULL) { 1279 /* Failed to get an event timer object */ 1280 return TEST_FAILED; 1281 } 1282 1283 /* Set up a timer */ 1284 *evtim = init_tim; 1285 evtim->ev.event_ptr = evtim; 1286 1287 /* Test single timer arm succeeds */ 1288 ret = rte_event_timer_arm_burst(adapter, &evtim, 1); 1289 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n", 1290 rte_strerror(rte_errno)); 1291 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ARMED, "Event timer " 1292 "in incorrect state"); 1293 1294 /* Test arm of armed timer fails */ 1295 ret = rte_event_timer_arm_burst(adapter, &evtim, 1); 1296 TEST_ASSERT_EQUAL(ret, 0, "expected return value from " 1297 "rte_event_timer_arm_burst: 0, got: %d", ret); 1298 TEST_ASSERT_EQUAL(rte_errno, EALREADY, "Unexpected rte_errno value " 1299 "after arming already armed timer"); 1300 1301 /* Let timer expire */ 1302 rte_delay_ms(1000); 1303 1304 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); 1305 TEST_ASSERT_EQUAL(n, 1, "Failed to dequeue expected number of expiry " 1306 "events from event device"); 1307 1308 rte_mempool_put(eventdev_test_mempool, evtim); 1309 1310 return TEST_SUCCESS; 1311 } 1312 1313 /* This test checks that repeated references to the same event timer in the 1314 * arm request work as expected; only the first one through should succeed. 1315 */ 1316 static int 1317 event_timer_arm_double(void) 1318 { 1319 uint16_t n; 1320 int ret; 1321 struct rte_event_timer_adapter *adapter = timdev; 1322 struct rte_event_timer *evtim = NULL; 1323 struct rte_event evs[BATCH_SIZE]; 1324 const struct rte_event_timer init_tim = { 1325 .ev.op = RTE_EVENT_OP_NEW, 1326 .ev.queue_id = TEST_QUEUE_ID, 1327 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1328 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1329 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1330 .state = RTE_EVENT_TIMER_NOT_ARMED, 1331 .timeout_ticks = CALC_TICKS(5), // expire in .5 sec 1332 }; 1333 1334 rte_mempool_get(eventdev_test_mempool, (void **)&evtim); 1335 if (evtim == NULL) { 1336 /* Failed to get an event timer object */ 1337 return TEST_FAILED; 1338 } 1339 1340 /* Set up a timer */ 1341 *evtim = init_tim; 1342 evtim->ev.event_ptr = evtim; 1343 1344 struct rte_event_timer *evtim_arr[] = {evtim, evtim}; 1345 ret = rte_event_timer_arm_burst(adapter, evtim_arr, RTE_DIM(evtim_arr)); 1346 TEST_ASSERT_EQUAL(ret, 1, "Unexpected return value from " 1347 "rte_event_timer_arm_burst"); 1348 TEST_ASSERT_EQUAL(rte_errno, EALREADY, "Unexpected rte_errno value " 1349 "after double-arm"); 1350 1351 /* Let timer expire */ 1352 rte_delay_ms(600); 1353 1354 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); 1355 TEST_ASSERT_EQUAL(n, 1, "Dequeued incorrect number of expiry events - " 1356 "expected: 1, actual: %d", n); 1357 1358 rte_mempool_put(eventdev_test_mempool, evtim); 1359 1360 return TEST_SUCCESS; 1361 } 1362 1363 /* Test the timer expiry event is generated at the expected time. */ 1364 static int 1365 event_timer_arm_expiry(void) 1366 { 1367 uint16_t n; 1368 int ret; 1369 struct rte_event_timer_adapter *adapter = timdev; 1370 struct rte_event_timer *evtim = NULL; 1371 struct rte_event_timer *evtim2 = NULL; 1372 struct rte_event evs[BATCH_SIZE]; 1373 const struct rte_event_timer init_tim = { 1374 .ev.op = RTE_EVENT_OP_NEW, 1375 .ev.queue_id = TEST_QUEUE_ID, 1376 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1377 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1378 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1379 .state = RTE_EVENT_TIMER_NOT_ARMED, 1380 }; 1381 1382 rte_mempool_get(eventdev_test_mempool, (void **)&evtim); 1383 if (evtim == NULL) { 1384 /* Failed to get an event timer object */ 1385 return TEST_FAILED; 1386 } 1387 1388 /* Set up an event timer */ 1389 *evtim = init_tim; 1390 evtim->timeout_ticks = CALC_TICKS(30), // expire in 3 secs 1391 evtim->ev.event_ptr = evtim; 1392 1393 ret = rte_event_timer_arm_burst(adapter, &evtim, 1); 1394 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s", 1395 rte_strerror(rte_errno)); 1396 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ARMED, "Event " 1397 "timer in incorrect state"); 1398 1399 rte_delay_ms(2999); 1400 1401 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); 1402 TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected timer expiry event"); 1403 1404 /* Delay 100 ms to account for the adapter tick window - should let us 1405 * dequeue one event 1406 */ 1407 rte_delay_ms(100); 1408 1409 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); 1410 TEST_ASSERT_EQUAL(n, 1, "Dequeued incorrect number (%d) of timer " 1411 "expiry events", n); 1412 TEST_ASSERT_EQUAL(evs[0].event_type, RTE_EVENT_TYPE_TIMER, 1413 "Dequeued unexpected type of event"); 1414 1415 /* Check that we recover the original event timer and then free it */ 1416 evtim2 = evs[0].event_ptr; 1417 TEST_ASSERT_EQUAL(evtim, evtim2, 1418 "Failed to recover pointer to original event timer"); 1419 rte_mempool_put(eventdev_test_mempool, evtim2); 1420 1421 return TEST_SUCCESS; 1422 } 1423 1424 /* Check that rearming a timer works as expected. */ 1425 static int 1426 event_timer_arm_rearm(void) 1427 { 1428 uint16_t n; 1429 int ret; 1430 struct rte_event_timer *evtim = NULL; 1431 struct rte_event_timer *evtim2 = NULL; 1432 struct rte_event evs[BATCH_SIZE]; 1433 const struct rte_event_timer init_tim = { 1434 .ev.op = RTE_EVENT_OP_NEW, 1435 .ev.queue_id = TEST_QUEUE_ID, 1436 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1437 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1438 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1439 .state = RTE_EVENT_TIMER_NOT_ARMED, 1440 }; 1441 1442 rte_mempool_get(eventdev_test_mempool, (void **)&evtim); 1443 if (evtim == NULL) { 1444 /* Failed to get an event timer object */ 1445 return TEST_FAILED; 1446 } 1447 1448 /* Set up a timer */ 1449 *evtim = init_tim; 1450 evtim->timeout_ticks = CALC_TICKS(1); // expire in 0.1 sec 1451 evtim->ev.event_ptr = evtim; 1452 1453 /* Arm it */ 1454 ret = rte_event_timer_arm_burst(timdev, &evtim, 1); 1455 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n", 1456 rte_strerror(rte_errno)); 1457 1458 /* Add 100ms to account for the adapter tick window */ 1459 rte_delay_ms(100 + 100); 1460 1461 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); 1462 TEST_ASSERT_EQUAL(n, 1, "Failed to dequeue expected number of expiry " 1463 "events from event device"); 1464 1465 /* Recover the timer through the event that was dequeued. */ 1466 evtim2 = evs[0].event_ptr; 1467 TEST_ASSERT_EQUAL(evtim, evtim2, 1468 "Failed to recover pointer to original event timer"); 1469 1470 /* Need to reset state in case implementation can't do it */ 1471 evtim2->state = RTE_EVENT_TIMER_NOT_ARMED; 1472 1473 /* Rearm it */ 1474 ret = rte_event_timer_arm_burst(timdev, &evtim2, 1); 1475 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n", 1476 rte_strerror(rte_errno)); 1477 1478 /* Add 100ms to account for the adapter tick window */ 1479 rte_delay_ms(100 + 100); 1480 1481 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); 1482 TEST_ASSERT_EQUAL(n, 1, "Failed to dequeue expected number of expiry " 1483 "events from event device"); 1484 1485 /* Free it */ 1486 evtim2 = evs[0].event_ptr; 1487 TEST_ASSERT_EQUAL(evtim, evtim2, 1488 "Failed to recover pointer to original event timer"); 1489 rte_mempool_put(eventdev_test_mempool, evtim2); 1490 1491 return TEST_SUCCESS; 1492 } 1493 1494 /* Check that the adapter handles the max specified number of timers as 1495 * expected. 1496 */ 1497 static int 1498 event_timer_arm_max(void) 1499 { 1500 int ret, i, n; 1501 int num_evtims = MAX_TIMERS; 1502 struct rte_event_timer *evtims[num_evtims]; 1503 struct rte_event evs[BATCH_SIZE]; 1504 const struct rte_event_timer init_tim = { 1505 .ev.op = RTE_EVENT_OP_NEW, 1506 .ev.queue_id = TEST_QUEUE_ID, 1507 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1508 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1509 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1510 .state = RTE_EVENT_TIMER_NOT_ARMED, 1511 .timeout_ticks = CALC_TICKS(5), // expire in .5 sec 1512 }; 1513 1514 ret = rte_mempool_get_bulk(eventdev_test_mempool, (void **)evtims, 1515 num_evtims); 1516 TEST_ASSERT_EQUAL(ret, 0, "Failed to get array of timer objs: ret = %d", 1517 ret); 1518 1519 for (i = 0; i < num_evtims; i++) { 1520 *evtims[i] = init_tim; 1521 evtims[i]->ev.event_ptr = evtims[i]; 1522 } 1523 1524 /* Test with the max value for the adapter */ 1525 ret = rte_event_timer_arm_burst(timdev, evtims, num_evtims); 1526 TEST_ASSERT_EQUAL(ret, num_evtims, 1527 "Failed to arm all event timers: attempted = %d, " 1528 "succeeded = %d, rte_errno = %s", 1529 num_evtims, ret, rte_strerror(rte_errno)); 1530 1531 rte_delay_ms(1000); 1532 1533 #define MAX_TRIES num_evtims 1534 int sum = 0; 1535 int tries = 0; 1536 bool done = false; 1537 while (!done) { 1538 sum += rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, 1539 RTE_DIM(evs), 10); 1540 if (sum >= num_evtims || ++tries >= MAX_TRIES) 1541 done = true; 1542 1543 rte_delay_ms(10); 1544 } 1545 1546 TEST_ASSERT_EQUAL(sum, num_evtims, "Expected %d timer expiry events, " 1547 "got %d", num_evtims, sum); 1548 1549 TEST_ASSERT(tries < MAX_TRIES, "Exceeded max tries"); 1550 1551 rte_delay_ms(100); 1552 1553 /* Make sure the eventdev is still empty */ 1554 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 1555 10); 1556 1557 TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected number of timer expiry " 1558 "events from event device"); 1559 1560 rte_mempool_put_bulk(eventdev_test_mempool, (void **)evtims, 1561 num_evtims); 1562 1563 return TEST_SUCCESS; 1564 } 1565 1566 /* Check that creating an event timer with incorrect event sched type fails. */ 1567 static int 1568 event_timer_arm_invalid_sched_type(void) 1569 { 1570 int ret; 1571 struct rte_event_timer *evtim = NULL; 1572 const struct rte_event_timer init_tim = { 1573 .ev.op = RTE_EVENT_OP_NEW, 1574 .ev.queue_id = TEST_QUEUE_ID, 1575 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1576 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1577 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1578 .state = RTE_EVENT_TIMER_NOT_ARMED, 1579 .timeout_ticks = CALC_TICKS(5), // expire in .5 sec 1580 }; 1581 1582 if (!using_services) 1583 return -ENOTSUP; 1584 1585 rte_mempool_get(eventdev_test_mempool, (void **)&evtim); 1586 if (evtim == NULL) { 1587 /* Failed to get an event timer object */ 1588 return TEST_FAILED; 1589 } 1590 1591 *evtim = init_tim; 1592 evtim->ev.event_ptr = evtim; 1593 evtim->ev.sched_type = RTE_SCHED_TYPE_PARALLEL; // bad sched type 1594 1595 ret = rte_event_timer_arm_burst(timdev, &evtim, 1); 1596 TEST_ASSERT_EQUAL(ret, 0, "Expected to fail timer arm with invalid " 1597 "sched type, but didn't"); 1598 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after" 1599 " arm fail with invalid queue"); 1600 1601 rte_mempool_put(eventdev_test_mempool, &evtim); 1602 1603 return TEST_SUCCESS; 1604 } 1605 1606 /* Check that creating an event timer with a timeout value that is too small or 1607 * too big fails. 1608 */ 1609 static int 1610 event_timer_arm_invalid_timeout(void) 1611 { 1612 int ret; 1613 struct rte_event_timer *evtim = NULL; 1614 const struct rte_event_timer init_tim = { 1615 .ev.op = RTE_EVENT_OP_NEW, 1616 .ev.queue_id = TEST_QUEUE_ID, 1617 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1618 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1619 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1620 .state = RTE_EVENT_TIMER_NOT_ARMED, 1621 .timeout_ticks = CALC_TICKS(5), // expire in .5 sec 1622 }; 1623 1624 rte_mempool_get(eventdev_test_mempool, (void **)&evtim); 1625 if (evtim == NULL) { 1626 /* Failed to get an event timer object */ 1627 return TEST_FAILED; 1628 } 1629 1630 *evtim = init_tim; 1631 evtim->ev.event_ptr = evtim; 1632 evtim->timeout_ticks = 0; // timeout too small 1633 1634 ret = rte_event_timer_arm_burst(timdev, &evtim, 1); 1635 TEST_ASSERT_EQUAL(ret, 0, "Expected to fail timer arm with invalid " 1636 "timeout, but didn't"); 1637 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after" 1638 " arm fail with invalid timeout"); 1639 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ERROR_TOOEARLY, 1640 "Unexpected event timer state"); 1641 1642 *evtim = init_tim; 1643 evtim->ev.event_ptr = evtim; 1644 evtim->timeout_ticks = CALC_TICKS(1801); // timeout too big 1645 1646 ret = rte_event_timer_arm_burst(timdev, &evtim, 1); 1647 TEST_ASSERT_EQUAL(ret, 0, "Expected to fail timer arm with invalid " 1648 "timeout, but didn't"); 1649 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after" 1650 " arm fail with invalid timeout"); 1651 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ERROR_TOOLATE, 1652 "Unexpected event timer state"); 1653 1654 rte_mempool_put(eventdev_test_mempool, evtim); 1655 1656 return TEST_SUCCESS; 1657 } 1658 1659 static int 1660 event_timer_cancel(void) 1661 { 1662 uint16_t n; 1663 int ret; 1664 struct rte_event_timer_adapter *adapter = timdev; 1665 struct rte_event_timer *evtim = NULL; 1666 struct rte_event evs[BATCH_SIZE]; 1667 const struct rte_event_timer init_tim = { 1668 .ev.op = RTE_EVENT_OP_NEW, 1669 .ev.queue_id = TEST_QUEUE_ID, 1670 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1671 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1672 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1673 .state = RTE_EVENT_TIMER_NOT_ARMED, 1674 }; 1675 1676 rte_mempool_get(eventdev_test_mempool, (void **)&evtim); 1677 if (evtim == NULL) { 1678 /* Failed to get an event timer object */ 1679 return TEST_FAILED; 1680 } 1681 1682 /* Check that cancelling an uninited timer fails */ 1683 ret = rte_event_timer_cancel_burst(adapter, &evtim, 1); 1684 TEST_ASSERT_EQUAL(ret, 0, "Succeeded unexpectedly in canceling " 1685 "uninited timer"); 1686 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after " 1687 "cancelling uninited timer"); 1688 1689 /* Set up a timer */ 1690 *evtim = init_tim; 1691 evtim->ev.event_ptr = evtim; 1692 evtim->timeout_ticks = CALC_TICKS(30); // expire in 3 sec 1693 1694 /* Check that cancelling an inited but unarmed timer fails */ 1695 ret = rte_event_timer_cancel_burst(adapter, &evtim, 1); 1696 TEST_ASSERT_EQUAL(ret, 0, "Succeeded unexpectedly in canceling " 1697 "unarmed timer"); 1698 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after " 1699 "cancelling unarmed timer"); 1700 1701 ret = rte_event_timer_arm_burst(adapter, &evtim, 1); 1702 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n", 1703 rte_strerror(rte_errno)); 1704 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ARMED, 1705 "evtim in incorrect state"); 1706 1707 /* Delay 1 sec */ 1708 rte_delay_ms(1000); 1709 1710 ret = rte_event_timer_cancel_burst(adapter, &evtim, 1); 1711 TEST_ASSERT_EQUAL(ret, 1, "Failed to cancel event_timer: %s\n", 1712 rte_strerror(rte_errno)); 1713 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_CANCELED, 1714 "evtim in incorrect state"); 1715 1716 rte_delay_ms(3000); 1717 1718 /* Make sure that no expiry event was generated */ 1719 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); 1720 TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected timer expiry event\n"); 1721 1722 rte_mempool_put(eventdev_test_mempool, evtim); 1723 1724 return TEST_SUCCESS; 1725 } 1726 1727 static int 1728 event_timer_cancel_double(void) 1729 { 1730 uint16_t n; 1731 int ret; 1732 struct rte_event_timer_adapter *adapter = timdev; 1733 struct rte_event_timer *evtim = NULL; 1734 struct rte_event evs[BATCH_SIZE]; 1735 const struct rte_event_timer init_tim = { 1736 .ev.op = RTE_EVENT_OP_NEW, 1737 .ev.queue_id = TEST_QUEUE_ID, 1738 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1739 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1740 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1741 .state = RTE_EVENT_TIMER_NOT_ARMED, 1742 .timeout_ticks = CALC_TICKS(5), // expire in .5 sec 1743 }; 1744 1745 rte_mempool_get(eventdev_test_mempool, (void **)&evtim); 1746 if (evtim == NULL) { 1747 /* Failed to get an event timer object */ 1748 return TEST_FAILED; 1749 } 1750 1751 /* Set up a timer */ 1752 *evtim = init_tim; 1753 evtim->ev.event_ptr = evtim; 1754 evtim->timeout_ticks = CALC_TICKS(30); // expire in 3 sec 1755 1756 ret = rte_event_timer_arm_burst(adapter, &evtim, 1); 1757 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n", 1758 rte_strerror(rte_errno)); 1759 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ARMED, 1760 "timer in unexpected state"); 1761 1762 /* Now, test that referencing the same timer twice in the same call 1763 * fails 1764 */ 1765 struct rte_event_timer *evtim_arr[] = {evtim, evtim}; 1766 ret = rte_event_timer_cancel_burst(adapter, evtim_arr, 1767 RTE_DIM(evtim_arr)); 1768 1769 /* Two requests to cancel same timer, only one should succeed */ 1770 TEST_ASSERT_EQUAL(ret, 1, "Succeeded unexpectedly in canceling timer " 1771 "twice"); 1772 1773 TEST_ASSERT_EQUAL(rte_errno, EALREADY, "Unexpected rte_errno value " 1774 "after double-cancel: rte_errno = %d", rte_errno); 1775 1776 rte_delay_ms(3000); 1777 1778 /* Still make sure that no expiry event was generated */ 1779 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); 1780 TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected timer expiry event\n"); 1781 1782 rte_mempool_put(eventdev_test_mempool, evtim); 1783 1784 return TEST_SUCCESS; 1785 } 1786 1787 /* Check that event timer adapter tick resolution works as expected by testing 1788 * the number of adapter ticks that occur within a particular time interval. 1789 */ 1790 static int 1791 adapter_tick_resolution(void) 1792 { 1793 struct rte_event_timer_adapter_stats stats; 1794 uint64_t adapter_tick_count; 1795 1796 /* Only run this test in the software driver case */ 1797 if (!using_services) 1798 return -ENOTSUP; 1799 1800 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stats_reset(timdev), 1801 "Failed to reset stats"); 1802 1803 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stats_get(timdev, 1804 &stats), "Failed to get adapter stats"); 1805 TEST_ASSERT_EQUAL(stats.adapter_tick_count, 0, "Adapter tick count " 1806 "not zeroed out"); 1807 1808 /* Delay 1 second; should let at least 10 ticks occur with the default 1809 * adapter configuration used by this test. 1810 */ 1811 rte_delay_ms(1000); 1812 1813 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stats_get(timdev, 1814 &stats), "Failed to get adapter stats"); 1815 1816 adapter_tick_count = stats.adapter_tick_count; 1817 TEST_ASSERT(adapter_tick_count >= 10 && adapter_tick_count <= 12, 1818 "Expected 10-12 adapter ticks, got %"PRIu64"\n", 1819 adapter_tick_count); 1820 1821 return TEST_SUCCESS; 1822 } 1823 1824 static int 1825 adapter_create_max(void) 1826 { 1827 int i; 1828 uint32_t svc_start_count, svc_end_count; 1829 struct rte_event_timer_adapter *adapters[ 1830 RTE_EVENT_TIMER_ADAPTER_NUM_MAX + 1]; 1831 1832 struct rte_event_timer_adapter_conf conf = { 1833 .event_dev_id = evdev, 1834 // timer_adapter_id set in loop 1835 .clk_src = RTE_EVENT_TIMER_ADAPTER_CPU_CLK, 1836 .timer_tick_ns = NSECPERSEC / 10, 1837 .max_tmo_ns = 180 * NSECPERSEC, 1838 .nb_timers = MAX_TIMERS, 1839 .flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES, 1840 }; 1841 1842 if (!using_services) 1843 return -ENOTSUP; 1844 1845 svc_start_count = rte_service_get_count(); 1846 1847 /* This test expects that there are sufficient service IDs available 1848 * to be allocated. I.e., RTE_EVENT_TIMER_ADAPTER_NUM_MAX may need to 1849 * be less than RTE_SERVICE_NUM_MAX if anything else uses a service 1850 * (the SW event device, for example). 1851 */ 1852 for (i = 0; i < RTE_EVENT_TIMER_ADAPTER_NUM_MAX; i++) { 1853 conf.timer_adapter_id = i; 1854 adapters[i] = rte_event_timer_adapter_create_ext(&conf, 1855 test_port_conf_cb, NULL); 1856 TEST_ASSERT_NOT_NULL(adapters[i], "Failed to create adapter " 1857 "%d", i); 1858 } 1859 1860 conf.timer_adapter_id = i; 1861 adapters[i] = rte_event_timer_adapter_create(&conf); 1862 TEST_ASSERT_NULL(adapters[i], "Created too many adapters"); 1863 1864 /* Check that at least RTE_EVENT_TIMER_ADAPTER_NUM_MAX services 1865 * have been created 1866 */ 1867 svc_end_count = rte_service_get_count(); 1868 TEST_ASSERT_EQUAL(svc_end_count - svc_start_count, 1869 RTE_EVENT_TIMER_ADAPTER_NUM_MAX, 1870 "Failed to create expected number of services"); 1871 1872 for (i = 0; i < RTE_EVENT_TIMER_ADAPTER_NUM_MAX; i++) 1873 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_free(adapters[i]), 1874 "Failed to free adapter %d", i); 1875 1876 /* Check that service count is back to where it was at start */ 1877 svc_end_count = rte_service_get_count(); 1878 TEST_ASSERT_EQUAL(svc_start_count, svc_end_count, "Failed to release " 1879 "correct number of services"); 1880 1881 return TEST_SUCCESS; 1882 } 1883 1884 static struct unit_test_suite event_timer_adptr_functional_testsuite = { 1885 .suite_name = "event timer functional test suite", 1886 .setup = testsuite_setup, 1887 .teardown = testsuite_teardown, 1888 .unit_test_cases = { 1889 TEST_CASE_ST(timdev_setup_usec, timdev_teardown, 1890 test_timer_state), 1891 TEST_CASE_ST(timdev_setup_usec, timdev_teardown, 1892 test_timer_arm), 1893 TEST_CASE_ST(timdev_setup_msec_periodic, timdev_teardown, 1894 test_timer_arm_periodic), 1895 TEST_CASE_ST(timdev_setup_usec, timdev_teardown, 1896 test_timer_arm_burst), 1897 TEST_CASE_ST(timdev_setup_msec_periodic, timdev_teardown, 1898 test_timer_arm_burst_periodic), 1899 TEST_CASE_ST(timdev_setup_sec, timdev_teardown, 1900 test_timer_cancel), 1901 TEST_CASE_ST(timdev_setup_sec_periodic, timdev_teardown, 1902 test_timer_cancel_periodic), 1903 TEST_CASE_ST(timdev_setup_sec, timdev_teardown, 1904 test_timer_cancel_random), 1905 TEST_CASE_ST(timdev_setup_usec_multicore, timdev_teardown, 1906 test_timer_arm_multicore), 1907 TEST_CASE_ST(timdev_setup_usec_multicore, timdev_teardown, 1908 test_timer_arm_burst_multicore), 1909 TEST_CASE_ST(timdev_setup_sec_multicore, timdev_teardown, 1910 test_timer_cancel_multicore), 1911 TEST_CASE_ST(timdev_setup_sec_multicore, timdev_teardown, 1912 test_timer_cancel_burst_multicore), 1913 TEST_CASE(adapter_create), 1914 TEST_CASE_ST(timdev_setup_msec, NULL, adapter_free), 1915 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1916 adapter_get_info), 1917 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1918 adapter_lookup), 1919 TEST_CASE_ST(NULL, timdev_teardown, 1920 adapter_start), 1921 TEST_CASE_ST(timdev_setup_msec, NULL, 1922 adapter_stop), 1923 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1924 stat_inc_reset_ev_enq), 1925 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1926 event_timer_arm), 1927 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1928 event_timer_arm_double), 1929 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1930 event_timer_arm_expiry), 1931 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1932 event_timer_arm_rearm), 1933 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1934 event_timer_arm_max), 1935 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1936 event_timer_arm_invalid_sched_type), 1937 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1938 event_timer_arm_invalid_timeout), 1939 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1940 event_timer_cancel), 1941 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1942 event_timer_cancel_double), 1943 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1944 adapter_tick_resolution), 1945 TEST_CASE(adapter_create_max), 1946 TEST_CASES_END() /**< NULL terminate unit test array */ 1947 } 1948 }; 1949 1950 static int 1951 test_event_timer_adapter_func(void) 1952 { 1953 return unit_test_suite_runner(&event_timer_adptr_functional_testsuite); 1954 } 1955 1956 REGISTER_TEST_COMMAND(event_timer_adapter_test, test_event_timer_adapter_func); 1957