1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Cavium, Inc 3 * Copyright(c) 2017-2018 Intel Corporation. 4 */ 5 6 #include <math.h> 7 8 #include <rte_common.h> 9 #include <rte_cycles.h> 10 #include <rte_debug.h> 11 #include <rte_eal.h> 12 #include <rte_ethdev.h> 13 #include <rte_eventdev.h> 14 #include <rte_event_timer_adapter.h> 15 #include <rte_mempool.h> 16 #include <rte_launch.h> 17 #include <rte_lcore.h> 18 #include <rte_per_lcore.h> 19 #include <rte_random.h> 20 #include <rte_bus_vdev.h> 21 #include <rte_service.h> 22 #include <stdbool.h> 23 24 #include "test.h" 25 26 /* 4K timers corresponds to sw evdev max inflight events */ 27 #define MAX_TIMERS (4 * 1024) 28 #define BKT_TCK_NSEC 29 30 #define NSECPERSEC 1E9 31 #define BATCH_SIZE 16 32 /* Both the app lcore and adapter ports are linked to this queue */ 33 #define TEST_QUEUE_ID 0 34 /* Port the application dequeues from */ 35 #define TEST_PORT_ID 0 36 #define TEST_ADAPTER_ID 0 37 38 /* Handle log statements in same manner as test macros */ 39 #define LOG_DBG(...) RTE_LOG(DEBUG, EAL, __VA_ARGS__) 40 41 static int evdev; 42 static struct rte_event_timer_adapter *timdev; 43 static struct rte_mempool *eventdev_test_mempool; 44 static struct rte_ring *timer_producer_ring; 45 static uint64_t global_bkt_tck_ns; 46 static uint64_t global_info_bkt_tck_ns; 47 static volatile uint8_t arm_done; 48 49 #define CALC_TICKS(tks) \ 50 ceil((double)(tks * global_bkt_tck_ns) / global_info_bkt_tck_ns) 51 52 53 static bool using_services; 54 static uint32_t test_lcore1; 55 static uint32_t test_lcore2; 56 static uint32_t test_lcore3; 57 static uint32_t sw_evdev_slcore; 58 static uint32_t sw_adptr_slcore; 59 60 static inline void 61 devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf, 62 struct rte_event_dev_info *info) 63 { 64 memset(dev_conf, 0, sizeof(struct rte_event_dev_config)); 65 dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns; 66 dev_conf->nb_event_ports = 1; 67 dev_conf->nb_event_queues = 1; 68 dev_conf->nb_event_queue_flows = info->max_event_queue_flows; 69 dev_conf->nb_event_port_dequeue_depth = 70 info->max_event_port_dequeue_depth; 71 dev_conf->nb_event_port_enqueue_depth = 72 info->max_event_port_enqueue_depth; 73 dev_conf->nb_event_port_enqueue_depth = 74 info->max_event_port_enqueue_depth; 75 dev_conf->nb_events_limit = 76 info->max_num_events; 77 } 78 79 static inline int 80 eventdev_setup(void) 81 { 82 int ret; 83 struct rte_event_dev_config dev_conf; 84 struct rte_event_dev_info info; 85 uint32_t service_id; 86 87 ret = rte_event_dev_info_get(evdev, &info); 88 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info"); 89 TEST_ASSERT(info.max_num_events < 0 || 90 info.max_num_events >= (int32_t)MAX_TIMERS, 91 "ERROR max_num_events=%d < max_events=%d", 92 info.max_num_events, MAX_TIMERS); 93 94 devconf_set_default_sane_values(&dev_conf, &info); 95 ret = rte_event_dev_configure(evdev, &dev_conf); 96 TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev"); 97 98 ret = rte_event_queue_setup(evdev, 0, NULL); 99 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d", 0); 100 101 /* Configure event port */ 102 ret = rte_event_port_setup(evdev, 0, NULL); 103 TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d", 0); 104 ret = rte_event_port_link(evdev, 0, NULL, NULL, 0); 105 TEST_ASSERT(ret >= 0, "Failed to link all queues port=%d", 0); 106 107 /* If this is a software event device, map and start its service */ 108 if (rte_event_dev_service_id_get(evdev, &service_id) == 0) { 109 TEST_ASSERT_SUCCESS(rte_service_lcore_add(sw_evdev_slcore), 110 "Failed to add service core"); 111 TEST_ASSERT_SUCCESS(rte_service_lcore_start( 112 sw_evdev_slcore), 113 "Failed to start service core"); 114 TEST_ASSERT_SUCCESS(rte_service_map_lcore_set( 115 service_id, sw_evdev_slcore, 1), 116 "Failed to map evdev service"); 117 TEST_ASSERT_SUCCESS(rte_service_runstate_set( 118 service_id, 1), 119 "Failed to start evdev service"); 120 } 121 122 ret = rte_event_dev_start(evdev); 123 TEST_ASSERT_SUCCESS(ret, "Failed to start device"); 124 125 return TEST_SUCCESS; 126 } 127 128 static int 129 testsuite_setup(void) 130 { 131 /* Some of the multithreaded tests require 3 other lcores to run */ 132 unsigned int required_lcore_count = 4; 133 uint32_t service_id; 134 135 /* To make it easier to map services later if needed, just reset 136 * service core state. 137 */ 138 (void) rte_service_lcore_reset_all(); 139 140 if (!rte_event_dev_count()) { 141 /* If there is no hardware eventdev, or no software vdev was 142 * specified on the command line, create an instance of 143 * event_sw. 144 */ 145 LOG_DBG("Failed to find a valid event device... testing with" 146 " event_sw device\n"); 147 TEST_ASSERT_SUCCESS(rte_vdev_init("event_sw0", NULL), 148 "Error creating eventdev"); 149 evdev = rte_event_dev_get_dev_id("event_sw0"); 150 } 151 152 if (rte_event_dev_service_id_get(evdev, &service_id) == 0) { 153 /* A software event device will use a software event timer 154 * adapter as well. 2 more cores required to convert to 155 * service cores. 156 */ 157 required_lcore_count += 2; 158 using_services = true; 159 } 160 161 if (rte_lcore_count() < required_lcore_count) { 162 printf("Not enough cores for event_timer_adapter_test, expecting at least %u\n", 163 required_lcore_count); 164 return TEST_SKIPPED; 165 } 166 167 /* Assign lcores for various tasks */ 168 test_lcore1 = rte_get_next_lcore(-1, 1, 0); 169 test_lcore2 = rte_get_next_lcore(test_lcore1, 1, 0); 170 test_lcore3 = rte_get_next_lcore(test_lcore2, 1, 0); 171 if (using_services) { 172 sw_evdev_slcore = rte_get_next_lcore(test_lcore3, 1, 0); 173 sw_adptr_slcore = rte_get_next_lcore(sw_evdev_slcore, 1, 0); 174 } 175 176 return eventdev_setup(); 177 } 178 179 static void 180 testsuite_teardown(void) 181 { 182 rte_event_dev_stop(evdev); 183 rte_event_dev_close(evdev); 184 } 185 186 static int 187 setup_adapter_service(struct rte_event_timer_adapter *adptr) 188 { 189 uint32_t adapter_service_id; 190 int ret; 191 192 /* retrieve service ids */ 193 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_service_id_get(adptr, 194 &adapter_service_id), "Failed to get event timer " 195 "adapter service id"); 196 /* add a service core and start it */ 197 ret = rte_service_lcore_add(sw_adptr_slcore); 198 TEST_ASSERT(ret == 0 || ret == -EALREADY, 199 "Failed to add service core"); 200 ret = rte_service_lcore_start(sw_adptr_slcore); 201 TEST_ASSERT(ret == 0 || ret == -EALREADY, 202 "Failed to start service core"); 203 204 /* map services to it */ 205 TEST_ASSERT_SUCCESS(rte_service_map_lcore_set(adapter_service_id, 206 sw_adptr_slcore, 1), 207 "Failed to map adapter service"); 208 209 /* set services to running */ 210 TEST_ASSERT_SUCCESS(rte_service_runstate_set(adapter_service_id, 1), 211 "Failed to start event timer adapter service"); 212 213 return TEST_SUCCESS; 214 } 215 216 static int 217 test_port_conf_cb(uint16_t id, uint8_t event_dev_id, uint8_t *event_port_id, 218 void *conf_arg) 219 { 220 struct rte_event_dev_config dev_conf; 221 struct rte_event_dev_info info; 222 struct rte_event_port_conf *port_conf, def_port_conf = {0}; 223 uint32_t started; 224 static int port_allocated; 225 static uint8_t port_id; 226 int ret; 227 228 if (port_allocated) { 229 *event_port_id = port_id; 230 return 0; 231 } 232 233 RTE_SET_USED(id); 234 235 ret = rte_event_dev_attr_get(event_dev_id, RTE_EVENT_DEV_ATTR_STARTED, 236 &started); 237 if (ret < 0) 238 return ret; 239 240 if (started) 241 rte_event_dev_stop(event_dev_id); 242 243 ret = rte_event_dev_info_get(evdev, &info); 244 if (ret < 0) 245 return ret; 246 247 devconf_set_default_sane_values(&dev_conf, &info); 248 249 port_id = dev_conf.nb_event_ports; 250 dev_conf.nb_event_ports++; 251 252 ret = rte_event_dev_configure(event_dev_id, &dev_conf); 253 if (ret < 0) { 254 if (started) 255 rte_event_dev_start(event_dev_id); 256 return ret; 257 } 258 259 if (conf_arg != NULL) 260 port_conf = conf_arg; 261 else { 262 port_conf = &def_port_conf; 263 ret = rte_event_port_default_conf_get(event_dev_id, port_id, 264 port_conf); 265 if (ret < 0) 266 return ret; 267 } 268 269 ret = rte_event_port_setup(event_dev_id, port_id, port_conf); 270 if (ret < 0) 271 return ret; 272 273 *event_port_id = port_id; 274 275 if (started) 276 rte_event_dev_start(event_dev_id); 277 278 /* Reuse this port number next time this is called */ 279 port_allocated = 1; 280 281 return 0; 282 } 283 284 static int 285 _timdev_setup(uint64_t max_tmo_ns, uint64_t bkt_tck_ns, uint64_t flags) 286 { 287 struct rte_event_timer_adapter_info info; 288 struct rte_event_timer_adapter_conf config = { 289 .event_dev_id = evdev, 290 .timer_adapter_id = TEST_ADAPTER_ID, 291 .timer_tick_ns = bkt_tck_ns, 292 .max_tmo_ns = max_tmo_ns, 293 .nb_timers = MAX_TIMERS * 10, 294 .flags = flags, 295 }; 296 uint32_t caps = 0; 297 const char *pool_name = "timdev_test_pool"; 298 299 global_bkt_tck_ns = bkt_tck_ns; 300 301 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_caps_get(evdev, &caps), 302 "failed to get adapter capabilities"); 303 304 if (flags & RTE_EVENT_TIMER_ADAPTER_F_PERIODIC && 305 !(caps & RTE_EVENT_TIMER_ADAPTER_CAP_PERIODIC)) 306 return -ENOTSUP; 307 308 if (!(caps & RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) { 309 timdev = rte_event_timer_adapter_create_ext(&config, 310 test_port_conf_cb, 311 NULL); 312 setup_adapter_service(timdev); 313 using_services = true; 314 } else 315 timdev = rte_event_timer_adapter_create(&config); 316 317 TEST_ASSERT_NOT_NULL(timdev, 318 "failed to create event timer ring"); 319 320 TEST_ASSERT_EQUAL(rte_event_timer_adapter_start(timdev), 0, 321 "failed to Start event timer adapter"); 322 323 /* Create event timer mempool */ 324 eventdev_test_mempool = rte_mempool_create(pool_name, 325 MAX_TIMERS * 2, 326 sizeof(struct rte_event_timer), /* element size*/ 327 0, /* cache size*/ 328 0, NULL, NULL, NULL, NULL, 329 rte_socket_id(), 0); 330 if (!eventdev_test_mempool) { 331 printf("ERROR creating mempool\n"); 332 return TEST_FAILED; 333 } 334 335 rte_event_timer_adapter_get_info(timdev, &info); 336 337 global_info_bkt_tck_ns = info.min_resolution_ns; 338 339 return TEST_SUCCESS; 340 } 341 342 static int 343 timdev_setup_usec(void) 344 { 345 uint64_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES; 346 347 return using_services ? 348 /* Max timeout is 10,000us and bucket interval is 100us */ 349 _timdev_setup(1E7, 1E5, flags) : 350 /* Max timeout is 100us and bucket interval is 1us */ 351 _timdev_setup(1E5, 1E3, flags); 352 } 353 354 static int 355 timdev_setup_usec_multicore(void) 356 { 357 uint64_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES; 358 359 return using_services ? 360 /* Max timeout is 10,000us and bucket interval is 100us */ 361 _timdev_setup(1E7, 1E5, flags) : 362 /* Max timeout is 100us and bucket interval is 1us */ 363 _timdev_setup(1E5, 1E3, flags); 364 } 365 366 static int 367 timdev_setup_msec(void) 368 { 369 uint64_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES; 370 371 /* Max timeout is 3 mins, and bucket interval is 100 ms */ 372 return _timdev_setup(180 * NSECPERSEC, NSECPERSEC / 10, flags); 373 } 374 375 static int 376 timdev_setup_msec_periodic(void) 377 { 378 uint64_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES | 379 RTE_EVENT_TIMER_ADAPTER_F_PERIODIC; 380 381 /* Periodic mode with 100 ms resolution */ 382 return _timdev_setup(0, NSECPERSEC / 10, flags); 383 } 384 385 static int 386 timdev_setup_sec(void) 387 { 388 uint64_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES; 389 390 /* Max timeout is 100sec and bucket interval is 1sec */ 391 return _timdev_setup(1E11, 1E9, flags); 392 } 393 394 static int 395 timdev_setup_sec_periodic(void) 396 { 397 uint64_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES | 398 RTE_EVENT_TIMER_ADAPTER_F_PERIODIC; 399 400 /* Periodic mode with 1 sec resolution */ 401 return _timdev_setup(0, NSECPERSEC, flags); 402 } 403 404 static int 405 timdev_setup_sec_multicore(void) 406 { 407 uint64_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES; 408 409 /* Max timeout is 100sec and bucket interval is 1sec */ 410 return _timdev_setup(1E11, 1E9, flags); 411 } 412 413 static void 414 timdev_teardown(void) 415 { 416 rte_event_timer_adapter_stop(timdev); 417 rte_event_timer_adapter_free(timdev); 418 419 rte_mempool_free(eventdev_test_mempool); 420 } 421 422 static inline int 423 test_timer_state(void) 424 { 425 struct rte_event_timer *ev_tim; 426 struct rte_event ev; 427 const struct rte_event_timer tim = { 428 .ev.op = RTE_EVENT_OP_NEW, 429 .ev.queue_id = 0, 430 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 431 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 432 .ev.event_type = RTE_EVENT_TYPE_TIMER, 433 .state = RTE_EVENT_TIMER_NOT_ARMED, 434 }; 435 436 437 rte_mempool_get(eventdev_test_mempool, (void **)&ev_tim); 438 *ev_tim = tim; 439 ev_tim->ev.event_ptr = ev_tim; 440 ev_tim->timeout_ticks = CALC_TICKS(120); 441 442 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 1), 0, 443 "Armed timer exceeding max_timeout."); 444 TEST_ASSERT_EQUAL(ev_tim->state, RTE_EVENT_TIMER_ERROR_TOOLATE, 445 "Improper timer state set expected %d returned %d", 446 RTE_EVENT_TIMER_ERROR_TOOLATE, ev_tim->state); 447 448 ev_tim->state = RTE_EVENT_TIMER_NOT_ARMED; 449 ev_tim->timeout_ticks = CALC_TICKS(10); 450 451 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 1), 1, 452 "Failed to arm timer with proper timeout."); 453 TEST_ASSERT_EQUAL(ev_tim->state, RTE_EVENT_TIMER_ARMED, 454 "Improper timer state set expected %d returned %d", 455 RTE_EVENT_TIMER_ARMED, ev_tim->state); 456 457 if (!using_services) 458 rte_delay_us(20); 459 else 460 rte_delay_us(1000 + 200); 461 TEST_ASSERT_EQUAL(rte_event_dequeue_burst(evdev, 0, &ev, 1, 0), 1, 462 "Armed timer failed to trigger."); 463 464 ev_tim->state = RTE_EVENT_TIMER_NOT_ARMED; 465 ev_tim->timeout_ticks = CALC_TICKS(90); 466 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 1), 1, 467 "Failed to arm timer with proper timeout."); 468 TEST_ASSERT_EQUAL(rte_event_timer_cancel_burst(timdev, &ev_tim, 1), 469 1, "Failed to cancel armed timer"); 470 TEST_ASSERT_EQUAL(ev_tim->state, RTE_EVENT_TIMER_CANCELED, 471 "Improper timer state set expected %d returned %d", 472 RTE_EVENT_TIMER_CANCELED, ev_tim->state); 473 474 rte_mempool_put(eventdev_test_mempool, (void *)ev_tim); 475 476 return TEST_SUCCESS; 477 } 478 479 static inline int 480 _arm_timers(uint64_t timeout_tcks, uint64_t timers) 481 { 482 uint64_t i; 483 struct rte_event_timer *ev_tim; 484 const struct rte_event_timer tim = { 485 .ev.op = RTE_EVENT_OP_NEW, 486 .ev.queue_id = 0, 487 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 488 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 489 .ev.event_type = RTE_EVENT_TYPE_TIMER, 490 .state = RTE_EVENT_TIMER_NOT_ARMED, 491 .timeout_ticks = CALC_TICKS(timeout_tcks), 492 }; 493 494 for (i = 0; i < timers; i++) { 495 496 TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool, 497 (void **)&ev_tim), 498 "mempool alloc failed"); 499 *ev_tim = tim; 500 ev_tim->ev.event_ptr = ev_tim; 501 502 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 503 1), 1, "Failed to arm timer %d", 504 rte_errno); 505 } 506 507 return TEST_SUCCESS; 508 } 509 510 static inline int 511 _wait_timer_triggers(uint64_t wait_sec, uint64_t arm_count, 512 uint64_t cancel_count) 513 { 514 uint8_t valid_event; 515 uint64_t events = 0; 516 uint64_t wait_start, max_wait; 517 struct rte_event ev; 518 519 max_wait = rte_get_timer_hz() * wait_sec; 520 wait_start = rte_get_timer_cycles(); 521 while (1) { 522 if (rte_get_timer_cycles() - wait_start > max_wait) { 523 if (events + cancel_count != arm_count) 524 TEST_ASSERT_SUCCESS(max_wait, 525 "Max time limit for timers exceeded."); 526 break; 527 } 528 529 valid_event = rte_event_dequeue_burst(evdev, 0, &ev, 1, 0); 530 if (!valid_event) 531 continue; 532 533 rte_mempool_put(eventdev_test_mempool, ev.event_ptr); 534 events++; 535 } 536 537 return TEST_SUCCESS; 538 } 539 540 static inline int 541 test_timer_arm(void) 542 { 543 TEST_ASSERT_SUCCESS(_arm_timers(20, MAX_TIMERS), 544 "Failed to arm timers"); 545 TEST_ASSERT_SUCCESS(_wait_timer_triggers(10, MAX_TIMERS, 0), 546 "Timer triggered count doesn't match arm count"); 547 return TEST_SUCCESS; 548 } 549 550 static inline int 551 test_timer_arm_periodic(void) 552 { 553 TEST_ASSERT_SUCCESS(_arm_timers(1, MAX_TIMERS), 554 "Failed to arm timers"); 555 /* With a resolution of 100ms and wait time of 1sec, 556 * there will be 10 * MAX_TIMERS periodic timer triggers. 557 */ 558 TEST_ASSERT_SUCCESS(_wait_timer_triggers(1, 10 * MAX_TIMERS, 0), 559 "Timer triggered count doesn't match arm count"); 560 return TEST_SUCCESS; 561 } 562 563 static int 564 _arm_wrapper(void *arg) 565 { 566 RTE_SET_USED(arg); 567 568 TEST_ASSERT_SUCCESS(_arm_timers(20, MAX_TIMERS), 569 "Failed to arm timers"); 570 571 return TEST_SUCCESS; 572 } 573 574 static inline int 575 test_timer_arm_multicore(void) 576 { 577 578 uint32_t lcore_1 = rte_get_next_lcore(-1, 1, 0); 579 uint32_t lcore_2 = rte_get_next_lcore(lcore_1, 1, 0); 580 581 rte_eal_remote_launch(_arm_wrapper, NULL, lcore_1); 582 rte_eal_remote_launch(_arm_wrapper, NULL, lcore_2); 583 584 rte_eal_mp_wait_lcore(); 585 TEST_ASSERT_SUCCESS(_wait_timer_triggers(10, MAX_TIMERS * 2, 0), 586 "Timer triggered count doesn't match arm count"); 587 588 return TEST_SUCCESS; 589 } 590 591 #define MAX_BURST 16 592 static inline int 593 _arm_timers_burst(uint64_t timeout_tcks, uint64_t timers) 594 { 595 uint64_t i; 596 int j; 597 struct rte_event_timer *ev_tim[MAX_BURST]; 598 const struct rte_event_timer tim = { 599 .ev.op = RTE_EVENT_OP_NEW, 600 .ev.queue_id = 0, 601 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 602 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 603 .ev.event_type = RTE_EVENT_TYPE_TIMER, 604 .state = RTE_EVENT_TIMER_NOT_ARMED, 605 .timeout_ticks = CALC_TICKS(timeout_tcks), 606 }; 607 608 for (i = 0; i < timers / MAX_BURST; i++) { 609 TEST_ASSERT_SUCCESS(rte_mempool_get_bulk( 610 eventdev_test_mempool, 611 (void **)ev_tim, MAX_BURST), 612 "mempool alloc failed"); 613 614 for (j = 0; j < MAX_BURST; j++) { 615 *ev_tim[j] = tim; 616 ev_tim[j]->ev.event_ptr = ev_tim[j]; 617 } 618 619 TEST_ASSERT_EQUAL(rte_event_timer_arm_tmo_tick_burst(timdev, 620 ev_tim, tim.timeout_ticks, MAX_BURST), 621 MAX_BURST, "Failed to arm timer %d", rte_errno); 622 } 623 624 return TEST_SUCCESS; 625 } 626 627 static inline int 628 test_timer_arm_burst(void) 629 { 630 TEST_ASSERT_SUCCESS(_arm_timers_burst(20, MAX_TIMERS), 631 "Failed to arm timers"); 632 TEST_ASSERT_SUCCESS(_wait_timer_triggers(10, MAX_TIMERS, 0), 633 "Timer triggered count doesn't match arm count"); 634 635 return TEST_SUCCESS; 636 } 637 638 static inline int 639 test_timer_arm_burst_periodic(void) 640 { 641 TEST_ASSERT_SUCCESS(_arm_timers_burst(1, MAX_TIMERS), 642 "Failed to arm timers"); 643 /* With a resolution of 100ms and wait time of 1sec, 644 * there will be 10 * MAX_TIMERS periodic timer triggers. 645 */ 646 TEST_ASSERT_SUCCESS(_wait_timer_triggers(1, 10 * MAX_TIMERS, 0), 647 "Timer triggered count doesn't match arm count"); 648 649 return TEST_SUCCESS; 650 } 651 652 static int 653 _arm_wrapper_burst(void *arg) 654 { 655 RTE_SET_USED(arg); 656 657 TEST_ASSERT_SUCCESS(_arm_timers_burst(20, MAX_TIMERS), 658 "Failed to arm timers"); 659 660 return TEST_SUCCESS; 661 } 662 663 static inline int 664 test_timer_arm_burst_multicore(void) 665 { 666 rte_eal_remote_launch(_arm_wrapper_burst, NULL, test_lcore1); 667 rte_eal_remote_launch(_arm_wrapper_burst, NULL, test_lcore2); 668 669 rte_eal_mp_wait_lcore(); 670 TEST_ASSERT_SUCCESS(_wait_timer_triggers(10, MAX_TIMERS * 2, 0), 671 "Timer triggered count doesn't match arm count"); 672 673 return TEST_SUCCESS; 674 } 675 676 static inline int 677 test_timer_cancel_periodic(void) 678 { 679 uint64_t i; 680 struct rte_event_timer *ev_tim; 681 const struct rte_event_timer tim = { 682 .ev.op = RTE_EVENT_OP_NEW, 683 .ev.queue_id = 0, 684 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 685 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 686 .ev.event_type = RTE_EVENT_TYPE_TIMER, 687 .state = RTE_EVENT_TIMER_NOT_ARMED, 688 .timeout_ticks = CALC_TICKS(1), 689 }; 690 691 for (i = 0; i < MAX_TIMERS; i++) { 692 TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool, 693 (void **)&ev_tim), 694 "mempool alloc failed"); 695 *ev_tim = tim; 696 ev_tim->ev.event_ptr = ev_tim; 697 698 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 699 1), 1, "Failed to arm timer %d", 700 rte_errno); 701 702 rte_delay_us(100 + (i % 5000)); 703 704 TEST_ASSERT_EQUAL(rte_event_timer_cancel_burst(timdev, 705 &ev_tim, 1), 1, 706 "Failed to cancel event timer %d", rte_errno); 707 rte_mempool_put(eventdev_test_mempool, ev_tim); 708 } 709 710 711 TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS, 712 MAX_TIMERS), 713 "Timer triggered count doesn't match arm, cancel count"); 714 715 return TEST_SUCCESS; 716 } 717 718 static inline int 719 test_timer_cancel(void) 720 { 721 uint64_t i; 722 struct rte_event_timer *ev_tim; 723 const struct rte_event_timer tim = { 724 .ev.op = RTE_EVENT_OP_NEW, 725 .ev.queue_id = 0, 726 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 727 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 728 .ev.event_type = RTE_EVENT_TYPE_TIMER, 729 .state = RTE_EVENT_TIMER_NOT_ARMED, 730 .timeout_ticks = CALC_TICKS(20), 731 }; 732 733 for (i = 0; i < MAX_TIMERS; i++) { 734 TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool, 735 (void **)&ev_tim), 736 "mempool alloc failed"); 737 *ev_tim = tim; 738 ev_tim->ev.event_ptr = ev_tim; 739 740 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 741 1), 1, "Failed to arm timer %d", 742 rte_errno); 743 744 rte_delay_us(100 + (i % 5000)); 745 746 TEST_ASSERT_EQUAL(rte_event_timer_cancel_burst(timdev, 747 &ev_tim, 1), 1, 748 "Failed to cancel event timer %d", rte_errno); 749 rte_mempool_put(eventdev_test_mempool, ev_tim); 750 } 751 752 753 TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS, 754 MAX_TIMERS), 755 "Timer triggered count doesn't match arm, cancel count"); 756 757 return TEST_SUCCESS; 758 } 759 760 static int 761 _cancel_producer(uint64_t timeout_tcks, uint64_t timers) 762 { 763 uint64_t i; 764 struct rte_event_timer *ev_tim; 765 const struct rte_event_timer tim = { 766 .ev.op = RTE_EVENT_OP_NEW, 767 .ev.queue_id = 0, 768 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 769 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 770 .ev.event_type = RTE_EVENT_TYPE_TIMER, 771 .state = RTE_EVENT_TIMER_NOT_ARMED, 772 .timeout_ticks = CALC_TICKS(timeout_tcks), 773 }; 774 775 for (i = 0; i < timers; i++) { 776 TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool, 777 (void **)&ev_tim), 778 "mempool alloc failed"); 779 780 *ev_tim = tim; 781 ev_tim->ev.event_ptr = ev_tim; 782 783 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 784 1), 1, "Failed to arm timer %d", 785 rte_errno); 786 787 TEST_ASSERT_EQUAL(ev_tim->state, RTE_EVENT_TIMER_ARMED, 788 "Failed to arm event timer"); 789 790 while (rte_ring_enqueue(timer_producer_ring, ev_tim) != 0) 791 ; 792 } 793 794 return TEST_SUCCESS; 795 } 796 797 static int 798 _cancel_producer_burst(uint64_t timeout_tcks, uint64_t timers) 799 { 800 801 uint64_t i; 802 int j, ret; 803 struct rte_event_timer *ev_tim[MAX_BURST]; 804 const struct rte_event_timer tim = { 805 .ev.op = RTE_EVENT_OP_NEW, 806 .ev.queue_id = 0, 807 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 808 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 809 .ev.event_type = RTE_EVENT_TYPE_TIMER, 810 .state = RTE_EVENT_TIMER_NOT_ARMED, 811 .timeout_ticks = CALC_TICKS(timeout_tcks), 812 }; 813 int arm_count = 0; 814 815 for (i = 0; i < timers / MAX_BURST; i++) { 816 TEST_ASSERT_SUCCESS(rte_mempool_get_bulk( 817 eventdev_test_mempool, 818 (void **)ev_tim, MAX_BURST), 819 "mempool alloc failed"); 820 821 for (j = 0; j < MAX_BURST; j++) { 822 *ev_tim[j] = tim; 823 ev_tim[j]->ev.event_ptr = ev_tim[j]; 824 } 825 826 TEST_ASSERT_EQUAL(rte_event_timer_arm_tmo_tick_burst(timdev, 827 ev_tim, tim.timeout_ticks, MAX_BURST), 828 MAX_BURST, "Failed to arm timer %d", rte_errno); 829 830 for (j = 0; j < MAX_BURST; j++) 831 TEST_ASSERT_EQUAL(ev_tim[j]->state, 832 RTE_EVENT_TIMER_ARMED, 833 "Event timer not armed, state = %d", 834 ev_tim[j]->state); 835 836 ret = rte_ring_enqueue_bulk(timer_producer_ring, 837 (void **)ev_tim, MAX_BURST, NULL); 838 TEST_ASSERT_EQUAL(ret, MAX_BURST, 839 "Failed to enqueue event timers to ring"); 840 arm_count += ret; 841 } 842 843 TEST_ASSERT_EQUAL(arm_count, MAX_TIMERS, 844 "Failed to arm expected number of event timers"); 845 846 return TEST_SUCCESS; 847 } 848 849 static int 850 _cancel_producer_wrapper(void *args) 851 { 852 RTE_SET_USED(args); 853 854 return _cancel_producer(20, MAX_TIMERS); 855 } 856 857 static int 858 _cancel_producer_burst_wrapper(void *args) 859 { 860 RTE_SET_USED(args); 861 862 return _cancel_producer_burst(100, MAX_TIMERS); 863 } 864 865 static int 866 _cancel_thread(void *args) 867 { 868 RTE_SET_USED(args); 869 struct rte_event_timer *ev_tim = NULL; 870 uint64_t cancel_count = 0; 871 uint16_t ret; 872 873 while (!arm_done || rte_ring_count(timer_producer_ring) > 0) { 874 if (rte_ring_dequeue(timer_producer_ring, (void **)&ev_tim)) 875 continue; 876 877 ret = rte_event_timer_cancel_burst(timdev, &ev_tim, 1); 878 TEST_ASSERT_EQUAL(ret, 1, "Failed to cancel timer"); 879 rte_mempool_put(eventdev_test_mempool, (void *)ev_tim); 880 cancel_count++; 881 } 882 883 return TEST_SUCCESS; 884 } 885 886 static int 887 _cancel_burst_thread(void *args) 888 { 889 RTE_SET_USED(args); 890 891 int ret, i, n; 892 struct rte_event_timer *ev_tim[MAX_BURST]; 893 uint64_t cancel_count = 0; 894 uint64_t dequeue_count = 0; 895 896 while (!arm_done || rte_ring_count(timer_producer_ring) > 0) { 897 n = rte_ring_dequeue_burst(timer_producer_ring, 898 (void **)ev_tim, MAX_BURST, NULL); 899 if (!n) 900 continue; 901 902 dequeue_count += n; 903 904 for (i = 0; i < n; i++) 905 TEST_ASSERT_EQUAL(ev_tim[i]->state, 906 RTE_EVENT_TIMER_ARMED, 907 "Event timer not armed, state = %d", 908 ev_tim[i]->state); 909 910 ret = rte_event_timer_cancel_burst(timdev, ev_tim, n); 911 TEST_ASSERT_EQUAL(n, ret, "Failed to cancel complete burst of " 912 "event timers"); 913 rte_mempool_put_bulk(eventdev_test_mempool, (void **)ev_tim, 914 RTE_MIN(ret, MAX_BURST)); 915 916 cancel_count += ret; 917 } 918 919 TEST_ASSERT_EQUAL(cancel_count, MAX_TIMERS, 920 "Failed to cancel expected number of timers: " 921 "expected = %d, cancel_count = %"PRIu64", " 922 "dequeue_count = %"PRIu64"\n", MAX_TIMERS, 923 cancel_count, dequeue_count); 924 925 return TEST_SUCCESS; 926 } 927 928 static inline int 929 test_timer_cancel_multicore(void) 930 { 931 arm_done = 0; 932 timer_producer_ring = rte_ring_create("timer_cancel_queue", 933 MAX_TIMERS * 2, rte_socket_id(), 0); 934 TEST_ASSERT_NOT_NULL(timer_producer_ring, 935 "Unable to reserve memory for ring"); 936 937 rte_eal_remote_launch(_cancel_thread, NULL, test_lcore3); 938 rte_eal_remote_launch(_cancel_producer_wrapper, NULL, test_lcore1); 939 rte_eal_remote_launch(_cancel_producer_wrapper, NULL, test_lcore2); 940 941 rte_eal_wait_lcore(test_lcore1); 942 rte_eal_wait_lcore(test_lcore2); 943 arm_done = 1; 944 rte_eal_wait_lcore(test_lcore3); 945 rte_ring_free(timer_producer_ring); 946 947 TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS * 2, 948 MAX_TIMERS * 2), 949 "Timer triggered count doesn't match arm count"); 950 951 return TEST_SUCCESS; 952 } 953 954 static inline int 955 test_timer_cancel_burst_multicore(void) 956 { 957 arm_done = 0; 958 timer_producer_ring = rte_ring_create("timer_cancel_queue", 959 MAX_TIMERS * 2, rte_socket_id(), 0); 960 TEST_ASSERT_NOT_NULL(timer_producer_ring, 961 "Unable to reserve memory for ring"); 962 963 rte_eal_remote_launch(_cancel_burst_thread, NULL, test_lcore2); 964 rte_eal_remote_launch(_cancel_producer_burst_wrapper, NULL, 965 test_lcore1); 966 967 rte_eal_wait_lcore(test_lcore1); 968 arm_done = 1; 969 rte_eal_wait_lcore(test_lcore2); 970 rte_ring_free(timer_producer_ring); 971 972 TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS, 973 MAX_TIMERS), 974 "Timer triggered count doesn't match arm count"); 975 976 return TEST_SUCCESS; 977 } 978 979 static inline int 980 test_timer_cancel_random(void) 981 { 982 uint64_t i; 983 uint64_t events_canceled = 0; 984 struct rte_event_timer *ev_tim; 985 const struct rte_event_timer tim = { 986 .ev.op = RTE_EVENT_OP_NEW, 987 .ev.queue_id = 0, 988 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 989 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 990 .ev.event_type = RTE_EVENT_TYPE_TIMER, 991 .state = RTE_EVENT_TIMER_NOT_ARMED, 992 .timeout_ticks = CALC_TICKS(20), 993 }; 994 995 for (i = 0; i < MAX_TIMERS; i++) { 996 997 TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool, 998 (void **)&ev_tim), 999 "mempool alloc failed"); 1000 *ev_tim = tim; 1001 ev_tim->ev.event_ptr = ev_tim; 1002 1003 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 1004 1), 1, "Failed to arm timer %d", 1005 rte_errno); 1006 1007 if (rte_rand() & 1) { 1008 rte_delay_us(100 + (i % 5000)); 1009 TEST_ASSERT_EQUAL(rte_event_timer_cancel_burst( 1010 timdev, 1011 &ev_tim, 1), 1, 1012 "Failed to cancel event timer %d", rte_errno); 1013 rte_mempool_put(eventdev_test_mempool, ev_tim); 1014 events_canceled++; 1015 } 1016 } 1017 1018 TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS, 1019 events_canceled), 1020 "Timer triggered count doesn't match arm, cancel count"); 1021 1022 return TEST_SUCCESS; 1023 } 1024 1025 /* Check that the adapter can be created correctly */ 1026 static int 1027 adapter_create(void) 1028 { 1029 int adapter_id = 0; 1030 struct rte_event_timer_adapter *adapter, *adapter2; 1031 1032 struct rte_event_timer_adapter_conf conf = { 1033 .event_dev_id = evdev + 1, // invalid event dev id 1034 .timer_adapter_id = adapter_id, 1035 .clk_src = RTE_EVENT_TIMER_ADAPTER_CPU_CLK, 1036 .timer_tick_ns = NSECPERSEC / 10, 1037 .max_tmo_ns = 180 * NSECPERSEC, 1038 .nb_timers = MAX_TIMERS, 1039 .flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES, 1040 }; 1041 uint32_t caps = 0; 1042 1043 /* Test invalid conf */ 1044 adapter = rte_event_timer_adapter_create(&conf); 1045 TEST_ASSERT_NULL(adapter, "Created adapter with invalid " 1046 "event device id"); 1047 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Incorrect errno value for " 1048 "invalid event device id"); 1049 1050 /* Test valid conf */ 1051 conf.event_dev_id = evdev; 1052 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_caps_get(evdev, &caps), 1053 "failed to get adapter capabilities"); 1054 if (!(caps & RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) 1055 adapter = rte_event_timer_adapter_create_ext(&conf, 1056 test_port_conf_cb, 1057 NULL); 1058 else 1059 adapter = rte_event_timer_adapter_create(&conf); 1060 TEST_ASSERT_NOT_NULL(adapter, "Failed to create adapter with valid " 1061 "configuration"); 1062 1063 /* Test existing id */ 1064 adapter2 = rte_event_timer_adapter_create(&conf); 1065 TEST_ASSERT_NULL(adapter2, "Created adapter with in-use id"); 1066 TEST_ASSERT(rte_errno == EEXIST, "Incorrect errno value for existing " 1067 "id"); 1068 1069 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_free(adapter), 1070 "Failed to free adapter"); 1071 1072 return TEST_SUCCESS; 1073 } 1074 1075 1076 /* Test that adapter can be freed correctly. */ 1077 static int 1078 adapter_free(void) 1079 { 1080 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stop(timdev), 1081 "Failed to stop adapter"); 1082 1083 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_free(timdev), 1084 "Failed to free valid adapter"); 1085 1086 /* Test free of already freed adapter */ 1087 TEST_ASSERT_FAIL(rte_event_timer_adapter_free(timdev), 1088 "Freed adapter that was already freed"); 1089 1090 /* Test free of null adapter */ 1091 timdev = NULL; 1092 TEST_ASSERT_FAIL(rte_event_timer_adapter_free(timdev), 1093 "Freed null adapter"); 1094 1095 rte_mempool_free(eventdev_test_mempool); 1096 1097 return TEST_SUCCESS; 1098 } 1099 1100 /* Test that adapter info can be retrieved and is correct. */ 1101 static int 1102 adapter_get_info(void) 1103 { 1104 struct rte_event_timer_adapter_info info; 1105 1106 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_get_info(timdev, &info), 1107 "Failed to get adapter info"); 1108 1109 if (using_services) 1110 TEST_ASSERT_EQUAL(info.event_dev_port_id, 1, 1111 "Expected port id = 1, got port id = %d", 1112 info.event_dev_port_id); 1113 1114 return TEST_SUCCESS; 1115 } 1116 1117 /* Test adapter lookup via adapter ID. */ 1118 static int 1119 adapter_lookup(void) 1120 { 1121 struct rte_event_timer_adapter *adapter; 1122 1123 adapter = rte_event_timer_adapter_lookup(TEST_ADAPTER_ID); 1124 TEST_ASSERT_NOT_NULL(adapter, "Failed to lookup adapter"); 1125 1126 return TEST_SUCCESS; 1127 } 1128 1129 static int 1130 adapter_start(void) 1131 { 1132 TEST_ASSERT_SUCCESS(_timdev_setup(180 * NSECPERSEC, NSECPERSEC / 10, 1133 RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES), 1134 "Failed to start adapter"); 1135 TEST_ASSERT_EQUAL(rte_event_timer_adapter_start(timdev), -EALREADY, 1136 "Timer adapter started without call to stop."); 1137 1138 return TEST_SUCCESS; 1139 } 1140 1141 /* Test that adapter stops correctly. */ 1142 static int 1143 adapter_stop(void) 1144 { 1145 struct rte_event_timer_adapter *l_adapter = NULL; 1146 1147 /* Test adapter stop */ 1148 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stop(timdev), 1149 "Failed to stop event adapter"); 1150 1151 TEST_ASSERT_FAIL(rte_event_timer_adapter_stop(l_adapter), 1152 "Erroneously stopped null event adapter"); 1153 1154 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_free(timdev), 1155 "Failed to free adapter"); 1156 1157 rte_mempool_free(eventdev_test_mempool); 1158 1159 return TEST_SUCCESS; 1160 } 1161 1162 /* Test increment and reset of ev_enq_count stat */ 1163 static int 1164 stat_inc_reset_ev_enq(void) 1165 { 1166 int ret, i, n; 1167 int num_evtims = MAX_TIMERS; 1168 struct rte_event_timer *evtims[num_evtims]; 1169 struct rte_event evs[BATCH_SIZE]; 1170 struct rte_event_timer_adapter_stats stats; 1171 const struct rte_event_timer init_tim = { 1172 .ev.op = RTE_EVENT_OP_NEW, 1173 .ev.queue_id = TEST_QUEUE_ID, 1174 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1175 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1176 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1177 .state = RTE_EVENT_TIMER_NOT_ARMED, 1178 .timeout_ticks = CALC_TICKS(5), // expire in .5 sec 1179 }; 1180 1181 ret = rte_mempool_get_bulk(eventdev_test_mempool, (void **)evtims, 1182 num_evtims); 1183 TEST_ASSERT_EQUAL(ret, 0, "Failed to get array of timer objs: ret = %d", 1184 ret); 1185 1186 for (i = 0; i < num_evtims; i++) { 1187 *evtims[i] = init_tim; 1188 evtims[i]->ev.event_ptr = evtims[i]; 1189 } 1190 1191 ret = rte_event_timer_adapter_stats_get(timdev, &stats); 1192 TEST_ASSERT_EQUAL(ret, 0, "Failed to get stats"); 1193 TEST_ASSERT_EQUAL((int)stats.ev_enq_count, 0, "Stats not clear at " 1194 "startup"); 1195 1196 /* Test with the max value for the adapter */ 1197 ret = rte_event_timer_arm_burst(timdev, evtims, num_evtims); 1198 TEST_ASSERT_EQUAL(ret, num_evtims, 1199 "Failed to arm all event timers: attempted = %d, " 1200 "succeeded = %d, rte_errno = %s", 1201 num_evtims, ret, rte_strerror(rte_errno)); 1202 1203 rte_delay_ms(1000); 1204 1205 #define MAX_TRIES num_evtims 1206 int sum = 0; 1207 int tries = 0; 1208 bool done = false; 1209 while (!done) { 1210 sum += rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, 1211 RTE_DIM(evs), 10); 1212 if (sum >= num_evtims || ++tries >= MAX_TRIES) 1213 done = true; 1214 1215 rte_delay_ms(10); 1216 } 1217 1218 TEST_ASSERT_EQUAL(sum, num_evtims, "Expected %d timer expiry events, " 1219 "got %d", num_evtims, sum); 1220 1221 TEST_ASSERT(tries < MAX_TRIES, "Exceeded max tries"); 1222 1223 rte_delay_ms(100); 1224 1225 /* Make sure the eventdev is still empty */ 1226 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 1227 10); 1228 1229 TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected number of timer expiry " 1230 "events from event device"); 1231 1232 /* Check stats again */ 1233 ret = rte_event_timer_adapter_stats_get(timdev, &stats); 1234 TEST_ASSERT_EQUAL(ret, 0, "Failed to get stats"); 1235 TEST_ASSERT_EQUAL((int)stats.ev_enq_count, num_evtims, 1236 "Expected enqueue stat = %d; got %d", num_evtims, 1237 (int)stats.ev_enq_count); 1238 1239 /* Reset and check again */ 1240 ret = rte_event_timer_adapter_stats_reset(timdev); 1241 TEST_ASSERT_EQUAL(ret, 0, "Failed to reset stats"); 1242 1243 ret = rte_event_timer_adapter_stats_get(timdev, &stats); 1244 TEST_ASSERT_EQUAL(ret, 0, "Failed to get stats"); 1245 TEST_ASSERT_EQUAL((int)stats.ev_enq_count, 0, 1246 "Expected enqueue stat = %d; got %d", 0, 1247 (int)stats.ev_enq_count); 1248 1249 rte_mempool_put_bulk(eventdev_test_mempool, (void **)evtims, 1250 num_evtims); 1251 1252 return TEST_SUCCESS; 1253 } 1254 1255 /* Test various cases in arming timers */ 1256 static int 1257 event_timer_arm(void) 1258 { 1259 uint16_t n; 1260 int ret; 1261 struct rte_event_timer_adapter *adapter = timdev; 1262 struct rte_event_timer *evtim = NULL; 1263 struct rte_event evs[BATCH_SIZE]; 1264 const struct rte_event_timer init_tim = { 1265 .ev.op = RTE_EVENT_OP_NEW, 1266 .ev.queue_id = TEST_QUEUE_ID, 1267 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1268 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1269 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1270 .state = RTE_EVENT_TIMER_NOT_ARMED, 1271 .timeout_ticks = CALC_TICKS(5), // expire in .5 sec 1272 }; 1273 1274 rte_mempool_get(eventdev_test_mempool, (void **)&evtim); 1275 if (evtim == NULL) { 1276 /* Failed to get an event timer object */ 1277 return TEST_FAILED; 1278 } 1279 1280 /* Set up a timer */ 1281 *evtim = init_tim; 1282 evtim->ev.event_ptr = evtim; 1283 1284 /* Test single timer arm succeeds */ 1285 ret = rte_event_timer_arm_burst(adapter, &evtim, 1); 1286 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n", 1287 rte_strerror(rte_errno)); 1288 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ARMED, "Event timer " 1289 "in incorrect state"); 1290 1291 /* Test arm of armed timer fails */ 1292 ret = rte_event_timer_arm_burst(adapter, &evtim, 1); 1293 TEST_ASSERT_EQUAL(ret, 0, "expected return value from " 1294 "rte_event_timer_arm_burst: 0, got: %d", ret); 1295 TEST_ASSERT_EQUAL(rte_errno, EALREADY, "Unexpected rte_errno value " 1296 "after arming already armed timer"); 1297 1298 /* Let timer expire */ 1299 rte_delay_ms(1000); 1300 1301 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); 1302 TEST_ASSERT_EQUAL(n, 1, "Failed to dequeue expected number of expiry " 1303 "events from event device"); 1304 1305 rte_mempool_put(eventdev_test_mempool, evtim); 1306 1307 return TEST_SUCCESS; 1308 } 1309 1310 /* This test checks that repeated references to the same event timer in the 1311 * arm request work as expected; only the first one through should succeed. 1312 */ 1313 static int 1314 event_timer_arm_double(void) 1315 { 1316 uint16_t n; 1317 int ret; 1318 struct rte_event_timer_adapter *adapter = timdev; 1319 struct rte_event_timer *evtim = NULL; 1320 struct rte_event evs[BATCH_SIZE]; 1321 const struct rte_event_timer init_tim = { 1322 .ev.op = RTE_EVENT_OP_NEW, 1323 .ev.queue_id = TEST_QUEUE_ID, 1324 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1325 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1326 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1327 .state = RTE_EVENT_TIMER_NOT_ARMED, 1328 .timeout_ticks = CALC_TICKS(5), // expire in .5 sec 1329 }; 1330 1331 rte_mempool_get(eventdev_test_mempool, (void **)&evtim); 1332 if (evtim == NULL) { 1333 /* Failed to get an event timer object */ 1334 return TEST_FAILED; 1335 } 1336 1337 /* Set up a timer */ 1338 *evtim = init_tim; 1339 evtim->ev.event_ptr = evtim; 1340 1341 struct rte_event_timer *evtim_arr[] = {evtim, evtim}; 1342 ret = rte_event_timer_arm_burst(adapter, evtim_arr, RTE_DIM(evtim_arr)); 1343 TEST_ASSERT_EQUAL(ret, 1, "Unexpected return value from " 1344 "rte_event_timer_arm_burst"); 1345 TEST_ASSERT_EQUAL(rte_errno, EALREADY, "Unexpected rte_errno value " 1346 "after double-arm"); 1347 1348 /* Let timer expire */ 1349 rte_delay_ms(600); 1350 1351 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); 1352 TEST_ASSERT_EQUAL(n, 1, "Dequeued incorrect number of expiry events - " 1353 "expected: 1, actual: %d", n); 1354 1355 rte_mempool_put(eventdev_test_mempool, evtim); 1356 1357 return TEST_SUCCESS; 1358 } 1359 1360 /* Test the timer expiry event is generated at the expected time. */ 1361 static int 1362 event_timer_arm_expiry(void) 1363 { 1364 uint16_t n; 1365 int ret; 1366 struct rte_event_timer_adapter *adapter = timdev; 1367 struct rte_event_timer *evtim = NULL; 1368 struct rte_event_timer *evtim2 = NULL; 1369 struct rte_event evs[BATCH_SIZE]; 1370 const struct rte_event_timer init_tim = { 1371 .ev.op = RTE_EVENT_OP_NEW, 1372 .ev.queue_id = TEST_QUEUE_ID, 1373 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1374 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1375 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1376 .state = RTE_EVENT_TIMER_NOT_ARMED, 1377 }; 1378 1379 rte_mempool_get(eventdev_test_mempool, (void **)&evtim); 1380 if (evtim == NULL) { 1381 /* Failed to get an event timer object */ 1382 return TEST_FAILED; 1383 } 1384 1385 /* Set up an event timer */ 1386 *evtim = init_tim; 1387 evtim->timeout_ticks = CALC_TICKS(30), // expire in 3 secs 1388 evtim->ev.event_ptr = evtim; 1389 1390 ret = rte_event_timer_arm_burst(adapter, &evtim, 1); 1391 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s", 1392 rte_strerror(rte_errno)); 1393 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ARMED, "Event " 1394 "timer in incorrect state"); 1395 1396 rte_delay_ms(2999); 1397 1398 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); 1399 TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected timer expiry event"); 1400 1401 /* Delay 100 ms to account for the adapter tick window - should let us 1402 * dequeue one event 1403 */ 1404 rte_delay_ms(100); 1405 1406 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); 1407 TEST_ASSERT_EQUAL(n, 1, "Dequeued incorrect number (%d) of timer " 1408 "expiry events", n); 1409 TEST_ASSERT_EQUAL(evs[0].event_type, RTE_EVENT_TYPE_TIMER, 1410 "Dequeued unexpected type of event"); 1411 1412 /* Check that we recover the original event timer and then free it */ 1413 evtim2 = evs[0].event_ptr; 1414 TEST_ASSERT_EQUAL(evtim, evtim2, 1415 "Failed to recover pointer to original event timer"); 1416 rte_mempool_put(eventdev_test_mempool, evtim2); 1417 1418 return TEST_SUCCESS; 1419 } 1420 1421 /* Check that rearming a timer works as expected. */ 1422 static int 1423 event_timer_arm_rearm(void) 1424 { 1425 uint16_t n; 1426 int ret; 1427 struct rte_event_timer *evtim = NULL; 1428 struct rte_event_timer *evtim2 = NULL; 1429 struct rte_event evs[BATCH_SIZE]; 1430 const struct rte_event_timer init_tim = { 1431 .ev.op = RTE_EVENT_OP_NEW, 1432 .ev.queue_id = TEST_QUEUE_ID, 1433 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1434 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1435 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1436 .state = RTE_EVENT_TIMER_NOT_ARMED, 1437 }; 1438 1439 rte_mempool_get(eventdev_test_mempool, (void **)&evtim); 1440 if (evtim == NULL) { 1441 /* Failed to get an event timer object */ 1442 return TEST_FAILED; 1443 } 1444 1445 /* Set up a timer */ 1446 *evtim = init_tim; 1447 evtim->timeout_ticks = CALC_TICKS(1); // expire in 0.1 sec 1448 evtim->ev.event_ptr = evtim; 1449 1450 /* Arm it */ 1451 ret = rte_event_timer_arm_burst(timdev, &evtim, 1); 1452 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n", 1453 rte_strerror(rte_errno)); 1454 1455 /* Add 100ms to account for the adapter tick window */ 1456 rte_delay_ms(100 + 100); 1457 1458 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); 1459 TEST_ASSERT_EQUAL(n, 1, "Failed to dequeue expected number of expiry " 1460 "events from event device"); 1461 1462 /* Recover the timer through the event that was dequeued. */ 1463 evtim2 = evs[0].event_ptr; 1464 TEST_ASSERT_EQUAL(evtim, evtim2, 1465 "Failed to recover pointer to original event timer"); 1466 1467 /* Need to reset state in case implementation can't do it */ 1468 evtim2->state = RTE_EVENT_TIMER_NOT_ARMED; 1469 1470 /* Rearm it */ 1471 ret = rte_event_timer_arm_burst(timdev, &evtim2, 1); 1472 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n", 1473 rte_strerror(rte_errno)); 1474 1475 /* Add 100ms to account for the adapter tick window */ 1476 rte_delay_ms(100 + 100); 1477 1478 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); 1479 TEST_ASSERT_EQUAL(n, 1, "Failed to dequeue expected number of expiry " 1480 "events from event device"); 1481 1482 /* Free it */ 1483 evtim2 = evs[0].event_ptr; 1484 TEST_ASSERT_EQUAL(evtim, evtim2, 1485 "Failed to recover pointer to original event timer"); 1486 rte_mempool_put(eventdev_test_mempool, evtim2); 1487 1488 return TEST_SUCCESS; 1489 } 1490 1491 /* Check that the adapter handles the max specified number of timers as 1492 * expected. 1493 */ 1494 static int 1495 event_timer_arm_max(void) 1496 { 1497 int ret, i, n; 1498 int num_evtims = MAX_TIMERS; 1499 struct rte_event_timer *evtims[num_evtims]; 1500 struct rte_event evs[BATCH_SIZE]; 1501 const struct rte_event_timer init_tim = { 1502 .ev.op = RTE_EVENT_OP_NEW, 1503 .ev.queue_id = TEST_QUEUE_ID, 1504 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1505 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1506 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1507 .state = RTE_EVENT_TIMER_NOT_ARMED, 1508 .timeout_ticks = CALC_TICKS(5), // expire in .5 sec 1509 }; 1510 1511 ret = rte_mempool_get_bulk(eventdev_test_mempool, (void **)evtims, 1512 num_evtims); 1513 TEST_ASSERT_EQUAL(ret, 0, "Failed to get array of timer objs: ret = %d", 1514 ret); 1515 1516 for (i = 0; i < num_evtims; i++) { 1517 *evtims[i] = init_tim; 1518 evtims[i]->ev.event_ptr = evtims[i]; 1519 } 1520 1521 /* Test with the max value for the adapter */ 1522 ret = rte_event_timer_arm_burst(timdev, evtims, num_evtims); 1523 TEST_ASSERT_EQUAL(ret, num_evtims, 1524 "Failed to arm all event timers: attempted = %d, " 1525 "succeeded = %d, rte_errno = %s", 1526 num_evtims, ret, rte_strerror(rte_errno)); 1527 1528 rte_delay_ms(1000); 1529 1530 #define MAX_TRIES num_evtims 1531 int sum = 0; 1532 int tries = 0; 1533 bool done = false; 1534 while (!done) { 1535 sum += rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, 1536 RTE_DIM(evs), 10); 1537 if (sum >= num_evtims || ++tries >= MAX_TRIES) 1538 done = true; 1539 1540 rte_delay_ms(10); 1541 } 1542 1543 TEST_ASSERT_EQUAL(sum, num_evtims, "Expected %d timer expiry events, " 1544 "got %d", num_evtims, sum); 1545 1546 TEST_ASSERT(tries < MAX_TRIES, "Exceeded max tries"); 1547 1548 rte_delay_ms(100); 1549 1550 /* Make sure the eventdev is still empty */ 1551 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 1552 10); 1553 1554 TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected number of timer expiry " 1555 "events from event device"); 1556 1557 rte_mempool_put_bulk(eventdev_test_mempool, (void **)evtims, 1558 num_evtims); 1559 1560 return TEST_SUCCESS; 1561 } 1562 1563 /* Check that creating an event timer with incorrect event sched type fails. */ 1564 static int 1565 event_timer_arm_invalid_sched_type(void) 1566 { 1567 int ret; 1568 struct rte_event_timer *evtim = NULL; 1569 const struct rte_event_timer init_tim = { 1570 .ev.op = RTE_EVENT_OP_NEW, 1571 .ev.queue_id = TEST_QUEUE_ID, 1572 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1573 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1574 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1575 .state = RTE_EVENT_TIMER_NOT_ARMED, 1576 .timeout_ticks = CALC_TICKS(5), // expire in .5 sec 1577 }; 1578 1579 if (!using_services) 1580 return -ENOTSUP; 1581 1582 rte_mempool_get(eventdev_test_mempool, (void **)&evtim); 1583 if (evtim == NULL) { 1584 /* Failed to get an event timer object */ 1585 return TEST_FAILED; 1586 } 1587 1588 *evtim = init_tim; 1589 evtim->ev.event_ptr = evtim; 1590 evtim->ev.sched_type = RTE_SCHED_TYPE_PARALLEL; // bad sched type 1591 1592 ret = rte_event_timer_arm_burst(timdev, &evtim, 1); 1593 TEST_ASSERT_EQUAL(ret, 0, "Expected to fail timer arm with invalid " 1594 "sched type, but didn't"); 1595 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after" 1596 " arm fail with invalid queue"); 1597 1598 rte_mempool_put(eventdev_test_mempool, &evtim); 1599 1600 return TEST_SUCCESS; 1601 } 1602 1603 /* Check that creating an event timer with a timeout value that is too small or 1604 * too big fails. 1605 */ 1606 static int 1607 event_timer_arm_invalid_timeout(void) 1608 { 1609 int ret; 1610 struct rte_event_timer *evtim = NULL; 1611 const struct rte_event_timer init_tim = { 1612 .ev.op = RTE_EVENT_OP_NEW, 1613 .ev.queue_id = TEST_QUEUE_ID, 1614 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1615 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1616 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1617 .state = RTE_EVENT_TIMER_NOT_ARMED, 1618 .timeout_ticks = CALC_TICKS(5), // expire in .5 sec 1619 }; 1620 1621 rte_mempool_get(eventdev_test_mempool, (void **)&evtim); 1622 if (evtim == NULL) { 1623 /* Failed to get an event timer object */ 1624 return TEST_FAILED; 1625 } 1626 1627 *evtim = init_tim; 1628 evtim->ev.event_ptr = evtim; 1629 evtim->timeout_ticks = 0; // timeout too small 1630 1631 ret = rte_event_timer_arm_burst(timdev, &evtim, 1); 1632 TEST_ASSERT_EQUAL(ret, 0, "Expected to fail timer arm with invalid " 1633 "timeout, but didn't"); 1634 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after" 1635 " arm fail with invalid timeout"); 1636 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ERROR_TOOEARLY, 1637 "Unexpected event timer state"); 1638 1639 *evtim = init_tim; 1640 evtim->ev.event_ptr = evtim; 1641 evtim->timeout_ticks = CALC_TICKS(1801); // timeout too big 1642 1643 ret = rte_event_timer_arm_burst(timdev, &evtim, 1); 1644 TEST_ASSERT_EQUAL(ret, 0, "Expected to fail timer arm with invalid " 1645 "timeout, but didn't"); 1646 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after" 1647 " arm fail with invalid timeout"); 1648 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ERROR_TOOLATE, 1649 "Unexpected event timer state"); 1650 1651 rte_mempool_put(eventdev_test_mempool, evtim); 1652 1653 return TEST_SUCCESS; 1654 } 1655 1656 static int 1657 event_timer_cancel(void) 1658 { 1659 uint16_t n; 1660 int ret; 1661 struct rte_event_timer_adapter *adapter = timdev; 1662 struct rte_event_timer *evtim = NULL; 1663 struct rte_event evs[BATCH_SIZE]; 1664 const struct rte_event_timer init_tim = { 1665 .ev.op = RTE_EVENT_OP_NEW, 1666 .ev.queue_id = TEST_QUEUE_ID, 1667 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1668 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1669 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1670 .state = RTE_EVENT_TIMER_NOT_ARMED, 1671 }; 1672 1673 rte_mempool_get(eventdev_test_mempool, (void **)&evtim); 1674 if (evtim == NULL) { 1675 /* Failed to get an event timer object */ 1676 return TEST_FAILED; 1677 } 1678 1679 /* Check that cancelling an uninited timer fails */ 1680 ret = rte_event_timer_cancel_burst(adapter, &evtim, 1); 1681 TEST_ASSERT_EQUAL(ret, 0, "Succeeded unexpectedly in canceling " 1682 "uninited timer"); 1683 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after " 1684 "cancelling uninited timer"); 1685 1686 /* Set up a timer */ 1687 *evtim = init_tim; 1688 evtim->ev.event_ptr = evtim; 1689 evtim->timeout_ticks = CALC_TICKS(30); // expire in 3 sec 1690 1691 /* Check that cancelling an inited but unarmed timer fails */ 1692 ret = rte_event_timer_cancel_burst(adapter, &evtim, 1); 1693 TEST_ASSERT_EQUAL(ret, 0, "Succeeded unexpectedly in canceling " 1694 "unarmed timer"); 1695 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after " 1696 "cancelling unarmed timer"); 1697 1698 ret = rte_event_timer_arm_burst(adapter, &evtim, 1); 1699 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n", 1700 rte_strerror(rte_errno)); 1701 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ARMED, 1702 "evtim in incorrect state"); 1703 1704 /* Delay 1 sec */ 1705 rte_delay_ms(1000); 1706 1707 ret = rte_event_timer_cancel_burst(adapter, &evtim, 1); 1708 TEST_ASSERT_EQUAL(ret, 1, "Failed to cancel event_timer: %s\n", 1709 rte_strerror(rte_errno)); 1710 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_CANCELED, 1711 "evtim in incorrect state"); 1712 1713 rte_delay_ms(3000); 1714 1715 /* Make sure that no expiry event was generated */ 1716 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); 1717 TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected timer expiry event\n"); 1718 1719 rte_mempool_put(eventdev_test_mempool, evtim); 1720 1721 return TEST_SUCCESS; 1722 } 1723 1724 static int 1725 event_timer_cancel_double(void) 1726 { 1727 uint16_t n; 1728 int ret; 1729 struct rte_event_timer_adapter *adapter = timdev; 1730 struct rte_event_timer *evtim = NULL; 1731 struct rte_event evs[BATCH_SIZE]; 1732 const struct rte_event_timer init_tim = { 1733 .ev.op = RTE_EVENT_OP_NEW, 1734 .ev.queue_id = TEST_QUEUE_ID, 1735 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1736 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1737 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1738 .state = RTE_EVENT_TIMER_NOT_ARMED, 1739 .timeout_ticks = CALC_TICKS(5), // expire in .5 sec 1740 }; 1741 1742 rte_mempool_get(eventdev_test_mempool, (void **)&evtim); 1743 if (evtim == NULL) { 1744 /* Failed to get an event timer object */ 1745 return TEST_FAILED; 1746 } 1747 1748 /* Set up a timer */ 1749 *evtim = init_tim; 1750 evtim->ev.event_ptr = evtim; 1751 evtim->timeout_ticks = CALC_TICKS(30); // expire in 3 sec 1752 1753 ret = rte_event_timer_arm_burst(adapter, &evtim, 1); 1754 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n", 1755 rte_strerror(rte_errno)); 1756 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ARMED, 1757 "timer in unexpected state"); 1758 1759 /* Now, test that referencing the same timer twice in the same call 1760 * fails 1761 */ 1762 struct rte_event_timer *evtim_arr[] = {evtim, evtim}; 1763 ret = rte_event_timer_cancel_burst(adapter, evtim_arr, 1764 RTE_DIM(evtim_arr)); 1765 1766 /* Two requests to cancel same timer, only one should succeed */ 1767 TEST_ASSERT_EQUAL(ret, 1, "Succeeded unexpectedly in canceling timer " 1768 "twice"); 1769 1770 TEST_ASSERT_EQUAL(rte_errno, EALREADY, "Unexpected rte_errno value " 1771 "after double-cancel: rte_errno = %d", rte_errno); 1772 1773 rte_delay_ms(3000); 1774 1775 /* Still make sure that no expiry event was generated */ 1776 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); 1777 TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected timer expiry event\n"); 1778 1779 rte_mempool_put(eventdev_test_mempool, evtim); 1780 1781 return TEST_SUCCESS; 1782 } 1783 1784 /* Check that event timer adapter tick resolution works as expected by testing 1785 * the number of adapter ticks that occur within a particular time interval. 1786 */ 1787 static int 1788 adapter_tick_resolution(void) 1789 { 1790 struct rte_event_timer_adapter_stats stats; 1791 uint64_t adapter_tick_count; 1792 1793 /* Only run this test in the software driver case */ 1794 if (!using_services) 1795 return -ENOTSUP; 1796 1797 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stats_reset(timdev), 1798 "Failed to reset stats"); 1799 1800 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stats_get(timdev, 1801 &stats), "Failed to get adapter stats"); 1802 TEST_ASSERT_EQUAL(stats.adapter_tick_count, 0, "Adapter tick count " 1803 "not zeroed out"); 1804 1805 /* Delay 1 second; should let at least 10 ticks occur with the default 1806 * adapter configuration used by this test. 1807 */ 1808 rte_delay_ms(1000); 1809 1810 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stats_get(timdev, 1811 &stats), "Failed to get adapter stats"); 1812 1813 adapter_tick_count = stats.adapter_tick_count; 1814 TEST_ASSERT(adapter_tick_count >= 10 && adapter_tick_count <= 12, 1815 "Expected 10-12 adapter ticks, got %"PRIu64"\n", 1816 adapter_tick_count); 1817 1818 return TEST_SUCCESS; 1819 } 1820 1821 static int 1822 adapter_create_max(void) 1823 { 1824 int i; 1825 uint32_t svc_start_count, svc_end_count; 1826 struct rte_event_timer_adapter *adapters[ 1827 RTE_EVENT_TIMER_ADAPTER_NUM_MAX + 1]; 1828 1829 struct rte_event_timer_adapter_conf conf = { 1830 .event_dev_id = evdev, 1831 // timer_adapter_id set in loop 1832 .clk_src = RTE_EVENT_TIMER_ADAPTER_CPU_CLK, 1833 .timer_tick_ns = NSECPERSEC / 10, 1834 .max_tmo_ns = 180 * NSECPERSEC, 1835 .nb_timers = MAX_TIMERS, 1836 .flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES, 1837 }; 1838 1839 if (!using_services) 1840 return -ENOTSUP; 1841 1842 svc_start_count = rte_service_get_count(); 1843 1844 /* This test expects that there are sufficient service IDs available 1845 * to be allocated. I.e., RTE_EVENT_TIMER_ADAPTER_NUM_MAX may need to 1846 * be less than RTE_SERVICE_NUM_MAX if anything else uses a service 1847 * (the SW event device, for example). 1848 */ 1849 for (i = 0; i < RTE_EVENT_TIMER_ADAPTER_NUM_MAX; i++) { 1850 conf.timer_adapter_id = i; 1851 adapters[i] = rte_event_timer_adapter_create_ext(&conf, 1852 test_port_conf_cb, NULL); 1853 TEST_ASSERT_NOT_NULL(adapters[i], "Failed to create adapter " 1854 "%d", i); 1855 } 1856 1857 conf.timer_adapter_id = i; 1858 adapters[i] = rte_event_timer_adapter_create(&conf); 1859 TEST_ASSERT_NULL(adapters[i], "Created too many adapters"); 1860 1861 /* Check that at least RTE_EVENT_TIMER_ADAPTER_NUM_MAX services 1862 * have been created 1863 */ 1864 svc_end_count = rte_service_get_count(); 1865 TEST_ASSERT_EQUAL(svc_end_count - svc_start_count, 1866 RTE_EVENT_TIMER_ADAPTER_NUM_MAX, 1867 "Failed to create expected number of services"); 1868 1869 for (i = 0; i < RTE_EVENT_TIMER_ADAPTER_NUM_MAX; i++) 1870 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_free(adapters[i]), 1871 "Failed to free adapter %d", i); 1872 1873 /* Check that service count is back to where it was at start */ 1874 svc_end_count = rte_service_get_count(); 1875 TEST_ASSERT_EQUAL(svc_start_count, svc_end_count, "Failed to release " 1876 "correct number of services"); 1877 1878 return TEST_SUCCESS; 1879 } 1880 1881 static struct unit_test_suite event_timer_adptr_functional_testsuite = { 1882 .suite_name = "event timer functional test suite", 1883 .setup = testsuite_setup, 1884 .teardown = testsuite_teardown, 1885 .unit_test_cases = { 1886 TEST_CASE_ST(timdev_setup_usec, timdev_teardown, 1887 test_timer_state), 1888 TEST_CASE_ST(timdev_setup_usec, timdev_teardown, 1889 test_timer_arm), 1890 TEST_CASE_ST(timdev_setup_msec_periodic, timdev_teardown, 1891 test_timer_arm_periodic), 1892 TEST_CASE_ST(timdev_setup_usec, timdev_teardown, 1893 test_timer_arm_burst), 1894 TEST_CASE_ST(timdev_setup_msec_periodic, timdev_teardown, 1895 test_timer_arm_burst_periodic), 1896 TEST_CASE_ST(timdev_setup_sec, timdev_teardown, 1897 test_timer_cancel), 1898 TEST_CASE_ST(timdev_setup_sec_periodic, timdev_teardown, 1899 test_timer_cancel_periodic), 1900 TEST_CASE_ST(timdev_setup_sec, timdev_teardown, 1901 test_timer_cancel_random), 1902 TEST_CASE_ST(timdev_setup_usec_multicore, timdev_teardown, 1903 test_timer_arm_multicore), 1904 TEST_CASE_ST(timdev_setup_usec_multicore, timdev_teardown, 1905 test_timer_arm_burst_multicore), 1906 TEST_CASE_ST(timdev_setup_sec_multicore, timdev_teardown, 1907 test_timer_cancel_multicore), 1908 TEST_CASE_ST(timdev_setup_sec_multicore, timdev_teardown, 1909 test_timer_cancel_burst_multicore), 1910 TEST_CASE(adapter_create), 1911 TEST_CASE_ST(timdev_setup_msec, NULL, adapter_free), 1912 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1913 adapter_get_info), 1914 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1915 adapter_lookup), 1916 TEST_CASE_ST(NULL, timdev_teardown, 1917 adapter_start), 1918 TEST_CASE_ST(timdev_setup_msec, NULL, 1919 adapter_stop), 1920 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1921 stat_inc_reset_ev_enq), 1922 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1923 event_timer_arm), 1924 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1925 event_timer_arm_double), 1926 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1927 event_timer_arm_expiry), 1928 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1929 event_timer_arm_rearm), 1930 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1931 event_timer_arm_max), 1932 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1933 event_timer_arm_invalid_sched_type), 1934 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1935 event_timer_arm_invalid_timeout), 1936 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1937 event_timer_cancel), 1938 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1939 event_timer_cancel_double), 1940 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1941 adapter_tick_resolution), 1942 TEST_CASE(adapter_create_max), 1943 TEST_CASES_END() /**< NULL terminate unit test array */ 1944 } 1945 }; 1946 1947 static int 1948 test_event_timer_adapter_func(void) 1949 { 1950 return unit_test_suite_runner(&event_timer_adptr_functional_testsuite); 1951 } 1952 1953 REGISTER_TEST_COMMAND(event_timer_adapter_test, test_event_timer_adapter_func); 1954