1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
3 * Copyright(c) 2017-2018 Intel Corporation.
4 */
5
6 #include "test.h"
7
8 #include <math.h>
9
10 #include <rte_common.h>
11 #include <rte_cycles.h>
12 #include <rte_debug.h>
13 #include <rte_eal.h>
14 #include <rte_ethdev.h>
15
16 #ifdef RTE_EXEC_ENV_WINDOWS
17 static int
test_event_timer_adapter_func(void)18 test_event_timer_adapter_func(void)
19 {
20 printf("event_timer_adapter not supported on Windows, skipping test\n");
21 return TEST_SKIPPED;
22 }
23
24 #else
25
26 #include <rte_eventdev.h>
27 #include <rte_event_timer_adapter.h>
28 #include <rte_mempool.h>
29 #include <rte_launch.h>
30 #include <rte_lcore.h>
31 #include <rte_per_lcore.h>
32 #include <rte_random.h>
33 #include <rte_bus_vdev.h>
34 #include <rte_service.h>
35 #include <stdbool.h>
36
37 /* 4K timers corresponds to sw evdev max inflight events */
38 #define MAX_TIMERS (4 * 1024)
39 #define BKT_TCK_NSEC
40
41 #define NSECPERSEC 1E9
42 #define BATCH_SIZE 16
43 /* Both the app lcore and adapter ports are linked to this queue */
44 #define TEST_QUEUE_ID 0
45 /* Port the application dequeues from */
46 #define TEST_PORT_ID 0
47 #define TEST_ADAPTER_ID 0
48
49 /* Handle log statements in same manner as test macros */
50 #define LOG_DBG(...) RTE_LOG(DEBUG, EAL, __VA_ARGS__)
51
52 static int evdev;
53 static struct rte_event_timer_adapter *timdev;
54 static struct rte_mempool *eventdev_test_mempool;
55 static struct rte_ring *timer_producer_ring;
56 static uint64_t global_bkt_tck_ns;
57 static uint64_t global_info_bkt_tck_ns;
58 static volatile uint8_t arm_done;
59
60 #define CALC_TICKS(tks) ceil((double)((tks) * global_bkt_tck_ns) / global_info_bkt_tck_ns)
61
62 /* Wait double timeout ticks for software and an extra tick for hardware */
63 #define WAIT_TICKS(tks) (using_services ? 2 * (tks) : tks + 1)
64
65 static bool using_services;
66 static uint32_t test_lcore1;
67 static uint32_t test_lcore2;
68 static uint32_t test_lcore3;
69 static uint32_t sw_evdev_slcore;
70 static uint32_t sw_adptr_slcore;
71
72 static inline void
devconf_set_default_sane_values(struct rte_event_dev_config * dev_conf,struct rte_event_dev_info * info)73 devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
74 struct rte_event_dev_info *info)
75 {
76 memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
77 dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
78 dev_conf->nb_event_ports = 1;
79 dev_conf->nb_event_queues = 1;
80 dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
81 dev_conf->nb_event_port_dequeue_depth =
82 info->max_event_port_dequeue_depth;
83 dev_conf->nb_event_port_enqueue_depth =
84 info->max_event_port_enqueue_depth;
85 dev_conf->nb_event_port_enqueue_depth =
86 info->max_event_port_enqueue_depth;
87 dev_conf->nb_events_limit =
88 info->max_num_events;
89 }
90
91 static inline int
eventdev_setup(void)92 eventdev_setup(void)
93 {
94 int ret;
95 struct rte_event_dev_config dev_conf;
96 struct rte_event_dev_info info;
97 uint32_t service_id;
98
99 ret = rte_event_dev_info_get(evdev, &info);
100 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
101 TEST_ASSERT(info.max_num_events < 0 ||
102 info.max_num_events >= (int32_t)MAX_TIMERS,
103 "ERROR max_num_events=%d < max_events=%d",
104 info.max_num_events, MAX_TIMERS);
105
106 devconf_set_default_sane_values(&dev_conf, &info);
107 ret = rte_event_dev_configure(evdev, &dev_conf);
108 TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
109
110 ret = rte_event_queue_setup(evdev, 0, NULL);
111 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d", 0);
112
113 /* Configure event port */
114 ret = rte_event_port_setup(evdev, 0, NULL);
115 TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d", 0);
116 ret = rte_event_port_link(evdev, 0, NULL, NULL, 0);
117 TEST_ASSERT(ret >= 0, "Failed to link all queues port=%d", 0);
118
119 /* If this is a software event device, map and start its service */
120 if (rte_event_dev_service_id_get(evdev, &service_id) == 0) {
121 TEST_ASSERT_SUCCESS(rte_service_lcore_add(sw_evdev_slcore),
122 "Failed to add service core");
123 TEST_ASSERT_SUCCESS(rte_service_lcore_start(
124 sw_evdev_slcore),
125 "Failed to start service core");
126 TEST_ASSERT_SUCCESS(rte_service_map_lcore_set(
127 service_id, sw_evdev_slcore, 1),
128 "Failed to map evdev service");
129 TEST_ASSERT_SUCCESS(rte_service_runstate_set(
130 service_id, 1),
131 "Failed to start evdev service");
132 }
133
134 ret = rte_event_dev_start(evdev);
135 TEST_ASSERT_SUCCESS(ret, "Failed to start device");
136
137 return TEST_SUCCESS;
138 }
139
140 static int
testsuite_setup(void)141 testsuite_setup(void)
142 {
143 /* Some of the multithreaded tests require 3 other lcores to run */
144 unsigned int required_lcore_count = 4;
145 uint32_t service_id;
146
147 /* To make it easier to map services later if needed, just reset
148 * service core state.
149 */
150 (void) rte_service_lcore_reset_all();
151
152 if (!rte_event_dev_count()) {
153 /* If there is no hardware eventdev, or no software vdev was
154 * specified on the command line, create an instance of
155 * event_sw.
156 */
157 LOG_DBG("Failed to find a valid event device... testing with"
158 " event_sw device\n");
159 TEST_ASSERT_SUCCESS(rte_vdev_init("event_sw0", NULL),
160 "Error creating eventdev");
161 evdev = rte_event_dev_get_dev_id("event_sw0");
162 }
163
164 if (rte_event_dev_service_id_get(evdev, &service_id) == 0) {
165 /* A software event device will use a software event timer
166 * adapter as well. 2 more cores required to convert to
167 * service cores.
168 */
169 required_lcore_count += 2;
170 using_services = true;
171 }
172
173 if (rte_lcore_count() < required_lcore_count) {
174 printf("Not enough cores for event_timer_adapter_test, expecting at least %u\n",
175 required_lcore_count);
176 return TEST_SKIPPED;
177 }
178
179 /* Assign lcores for various tasks */
180 test_lcore1 = rte_get_next_lcore(-1, 1, 0);
181 test_lcore2 = rte_get_next_lcore(test_lcore1, 1, 0);
182 test_lcore3 = rte_get_next_lcore(test_lcore2, 1, 0);
183 if (using_services) {
184 sw_evdev_slcore = rte_get_next_lcore(test_lcore3, 1, 0);
185 sw_adptr_slcore = rte_get_next_lcore(sw_evdev_slcore, 1, 0);
186 }
187
188 return eventdev_setup();
189 }
190
191 static void
testsuite_teardown(void)192 testsuite_teardown(void)
193 {
194 rte_event_dev_stop(evdev);
195 rte_event_dev_close(evdev);
196 }
197
198 static int
setup_adapter_service(struct rte_event_timer_adapter * adptr)199 setup_adapter_service(struct rte_event_timer_adapter *adptr)
200 {
201 uint32_t adapter_service_id;
202 int ret;
203
204 /* retrieve service ids */
205 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_service_id_get(adptr,
206 &adapter_service_id), "Failed to get event timer "
207 "adapter service id");
208 /* add a service core and start it */
209 ret = rte_service_lcore_add(sw_adptr_slcore);
210 TEST_ASSERT(ret == 0 || ret == -EALREADY,
211 "Failed to add service core");
212 ret = rte_service_lcore_start(sw_adptr_slcore);
213 TEST_ASSERT(ret == 0 || ret == -EALREADY,
214 "Failed to start service core");
215
216 /* map services to it */
217 TEST_ASSERT_SUCCESS(rte_service_map_lcore_set(adapter_service_id,
218 sw_adptr_slcore, 1),
219 "Failed to map adapter service");
220
221 /* set services to running */
222 TEST_ASSERT_SUCCESS(rte_service_runstate_set(adapter_service_id, 1),
223 "Failed to start event timer adapter service");
224
225 return TEST_SUCCESS;
226 }
227
228 static int
test_port_conf_cb(uint16_t id,uint8_t event_dev_id,uint8_t * event_port_id,void * conf_arg)229 test_port_conf_cb(uint16_t id, uint8_t event_dev_id, uint8_t *event_port_id,
230 void *conf_arg)
231 {
232 struct rte_event_dev_config dev_conf;
233 struct rte_event_dev_info info;
234 struct rte_event_port_conf *port_conf, def_port_conf = {0};
235 uint32_t started;
236 static int port_allocated;
237 static uint8_t port_id;
238 int ret;
239
240 if (port_allocated) {
241 *event_port_id = port_id;
242 return 0;
243 }
244
245 RTE_SET_USED(id);
246
247 ret = rte_event_dev_attr_get(event_dev_id, RTE_EVENT_DEV_ATTR_STARTED,
248 &started);
249 if (ret < 0)
250 return ret;
251
252 if (started)
253 rte_event_dev_stop(event_dev_id);
254
255 ret = rte_event_dev_info_get(evdev, &info);
256 if (ret < 0)
257 return ret;
258
259 devconf_set_default_sane_values(&dev_conf, &info);
260
261 port_id = dev_conf.nb_event_ports;
262 dev_conf.nb_event_ports++;
263
264 ret = rte_event_dev_configure(event_dev_id, &dev_conf);
265 if (ret < 0) {
266 if (started)
267 rte_event_dev_start(event_dev_id);
268 return ret;
269 }
270
271 if (conf_arg != NULL)
272 port_conf = conf_arg;
273 else {
274 port_conf = &def_port_conf;
275 ret = rte_event_port_default_conf_get(event_dev_id, port_id,
276 port_conf);
277 if (ret < 0)
278 return ret;
279 }
280
281 ret = rte_event_port_setup(event_dev_id, port_id, port_conf);
282 if (ret < 0)
283 return ret;
284
285 *event_port_id = port_id;
286
287 if (started)
288 rte_event_dev_start(event_dev_id);
289
290 /* Reuse this port number next time this is called */
291 port_allocated = 1;
292
293 return 0;
294 }
295
296 static int
_timdev_setup(uint64_t max_tmo_ns,uint64_t bkt_tck_ns,uint64_t flags)297 _timdev_setup(uint64_t max_tmo_ns, uint64_t bkt_tck_ns, uint64_t flags)
298 {
299 struct rte_event_timer_adapter_info info;
300 struct rte_event_timer_adapter_conf config = {
301 .event_dev_id = evdev,
302 .timer_adapter_id = TEST_ADAPTER_ID,
303 .timer_tick_ns = bkt_tck_ns,
304 .max_tmo_ns = max_tmo_ns,
305 .nb_timers = MAX_TIMERS * 10,
306 .flags = flags,
307 };
308 uint32_t caps = 0;
309 const char *pool_name = "timdev_test_pool";
310
311 global_bkt_tck_ns = bkt_tck_ns;
312
313 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_caps_get(evdev, &caps),
314 "failed to get adapter capabilities");
315
316 if (flags & RTE_EVENT_TIMER_ADAPTER_F_PERIODIC &&
317 !(caps & RTE_EVENT_TIMER_ADAPTER_CAP_PERIODIC))
318 return -ENOTSUP;
319
320 if (!(caps & RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) {
321 timdev = rte_event_timer_adapter_create_ext(&config,
322 test_port_conf_cb,
323 NULL);
324 setup_adapter_service(timdev);
325 using_services = true;
326 } else
327 timdev = rte_event_timer_adapter_create(&config);
328
329 TEST_ASSERT_NOT_NULL(timdev,
330 "failed to create event timer ring");
331
332 TEST_ASSERT_EQUAL(rte_event_timer_adapter_start(timdev), 0,
333 "failed to Start event timer adapter");
334
335 /* Create event timer mempool */
336 eventdev_test_mempool = rte_mempool_create(pool_name,
337 MAX_TIMERS * 2,
338 sizeof(struct rte_event_timer), /* element size*/
339 0, /* cache size*/
340 0, NULL, NULL, NULL, NULL,
341 rte_socket_id(), 0);
342 if (!eventdev_test_mempool) {
343 printf("ERROR creating mempool\n");
344 return TEST_FAILED;
345 }
346
347 rte_event_timer_adapter_get_info(timdev, &info);
348
349 global_info_bkt_tck_ns = info.min_resolution_ns;
350
351 return TEST_SUCCESS;
352 }
353
354 static int
timdev_setup_usec(void)355 timdev_setup_usec(void)
356 {
357 uint64_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES;
358
359 return using_services ?
360 /* Max timeout is 10,000us and bucket interval is 100us */
361 _timdev_setup(1E7, 1E5, flags) :
362 /* Max timeout is 100us and bucket interval is 1us */
363 _timdev_setup(1E5, 1E3, flags);
364 }
365
366 static int
timdev_setup_usec_multicore(void)367 timdev_setup_usec_multicore(void)
368 {
369 uint64_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES;
370
371 return using_services ?
372 /* Max timeout is 10,000us and bucket interval is 100us */
373 _timdev_setup(1E7, 1E5, flags) :
374 /* Max timeout is 100us and bucket interval is 1us */
375 _timdev_setup(1E5, 1E3, flags);
376 }
377
378 static int
timdev_setup_msec(void)379 timdev_setup_msec(void)
380 {
381 uint64_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES;
382
383 /* Max timeout is 3 mins, and bucket interval is 100 ms */
384 return _timdev_setup(180 * NSECPERSEC, NSECPERSEC / 10, flags);
385 }
386
387 static int
timdev_setup_msec_periodic(void)388 timdev_setup_msec_periodic(void)
389 {
390 uint32_t caps = 0;
391 uint64_t max_tmo_ns;
392
393 uint64_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES |
394 RTE_EVENT_TIMER_ADAPTER_F_PERIODIC;
395
396 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_caps_get(evdev, &caps),
397 "failed to get adapter capabilities");
398
399 if (caps & RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)
400 max_tmo_ns = 0;
401 else
402 max_tmo_ns = 180 * NSECPERSEC;
403
404 /* Periodic mode with 100 ms resolution */
405 return _timdev_setup(max_tmo_ns, NSECPERSEC / 10, flags);
406 }
407
408 static int
timdev_setup_sec(void)409 timdev_setup_sec(void)
410 {
411 uint64_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES;
412
413 /* Max timeout is 100sec and bucket interval is 1sec */
414 return _timdev_setup(1E11, 1E9, flags);
415 }
416
417 static int
timdev_setup_sec_periodic(void)418 timdev_setup_sec_periodic(void)
419 {
420 uint64_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES |
421 RTE_EVENT_TIMER_ADAPTER_F_PERIODIC;
422
423 /* Periodic mode with 1 sec resolution */
424 return _timdev_setup(180 * NSECPERSEC, NSECPERSEC, flags);
425 }
426
427 static int
timdev_setup_sec_multicore(void)428 timdev_setup_sec_multicore(void)
429 {
430 uint64_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES;
431
432 /* Max timeout is 100sec and bucket interval is 1sec */
433 return _timdev_setup(1E11, 1E9, flags);
434 }
435
436 static void
timdev_teardown(void)437 timdev_teardown(void)
438 {
439 rte_event_timer_adapter_stop(timdev);
440 rte_event_timer_adapter_free(timdev);
441
442 rte_mempool_free(eventdev_test_mempool);
443 }
444
445 static inline uint16_t
timeout_event_dequeue(struct rte_event * evs,uint64_t nb_evs,uint64_t ticks)446 timeout_event_dequeue(struct rte_event *evs, uint64_t nb_evs, uint64_t ticks)
447 {
448 uint16_t ev_cnt = 0;
449 uint64_t end_cycle;
450
451 if (using_services && nb_evs == MAX_TIMERS)
452 ticks = 2 * ticks;
453
454 end_cycle = rte_rdtsc() + ticks * global_bkt_tck_ns * rte_get_tsc_hz() / 1E9;
455
456 while (ev_cnt < nb_evs && rte_rdtsc() < end_cycle) {
457 ev_cnt += rte_event_dequeue_burst(evdev, TEST_PORT_ID, &evs[ev_cnt], nb_evs, 0);
458 rte_pause();
459 }
460
461 return ev_cnt;
462 }
463
464 static inline int
test_timer_state(void)465 test_timer_state(void)
466 {
467 struct rte_event_timer *ev_tim;
468 const uint64_t max_ticks = 100;
469 uint64_t ticks, wait_ticks;
470 struct rte_event ev;
471 const struct rte_event_timer tim = {
472 .ev.op = RTE_EVENT_OP_NEW,
473 .ev.queue_id = 0,
474 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
475 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
476 .ev.event_type = RTE_EVENT_TYPE_TIMER,
477 .state = RTE_EVENT_TIMER_NOT_ARMED,
478 };
479
480 rte_mempool_get(eventdev_test_mempool, (void **)&ev_tim);
481 *ev_tim = tim;
482 ev_tim->ev.event_ptr = ev_tim;
483 ev_tim->timeout_ticks = CALC_TICKS(max_ticks + 20);
484
485 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 1), 0,
486 "Armed timer exceeding max_timeout.");
487 TEST_ASSERT_EQUAL(ev_tim->state, RTE_EVENT_TIMER_ERROR_TOOLATE,
488 "Improper timer state set expected %d returned %d",
489 RTE_EVENT_TIMER_ERROR_TOOLATE, ev_tim->state);
490
491 ticks = 10;
492 ev_tim->state = RTE_EVENT_TIMER_NOT_ARMED;
493 ev_tim->timeout_ticks = CALC_TICKS(ticks);
494
495 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 1), 1,
496 "Failed to arm timer with proper timeout.");
497 TEST_ASSERT_EQUAL(ev_tim->state, RTE_EVENT_TIMER_ARMED,
498 "Improper timer state set expected %d returned %d",
499 RTE_EVENT_TIMER_ARMED, ev_tim->state);
500
501 if (!using_services)
502 wait_ticks = 2 * ticks;
503 else
504 wait_ticks = ticks;
505
506 TEST_ASSERT_EQUAL(timeout_event_dequeue(&ev, 1, WAIT_TICKS(wait_ticks)), 1,
507 "Armed timer failed to trigger.");
508
509 ev_tim->state = RTE_EVENT_TIMER_NOT_ARMED;
510 ev_tim->timeout_ticks = CALC_TICKS(max_ticks - 10);
511 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 1), 1,
512 "Failed to arm timer with proper timeout.");
513 TEST_ASSERT_EQUAL(rte_event_timer_cancel_burst(timdev, &ev_tim, 1),
514 1, "Failed to cancel armed timer");
515 TEST_ASSERT_EQUAL(ev_tim->state, RTE_EVENT_TIMER_CANCELED,
516 "Improper timer state set expected %d returned %d",
517 RTE_EVENT_TIMER_CANCELED, ev_tim->state);
518
519 rte_mempool_put(eventdev_test_mempool, (void *)ev_tim);
520
521 return TEST_SUCCESS;
522 }
523
524 static inline int
_arm_timers(uint64_t timeout_tcks,uint64_t timers)525 _arm_timers(uint64_t timeout_tcks, uint64_t timers)
526 {
527 uint64_t i;
528 struct rte_event_timer *ev_tim;
529 const struct rte_event_timer tim = {
530 .ev.op = RTE_EVENT_OP_NEW,
531 .ev.queue_id = 0,
532 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
533 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
534 .ev.event_type = RTE_EVENT_TYPE_TIMER,
535 .state = RTE_EVENT_TIMER_NOT_ARMED,
536 .timeout_ticks = CALC_TICKS(timeout_tcks),
537 };
538
539 for (i = 0; i < timers; i++) {
540
541 TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool,
542 (void **)&ev_tim),
543 "mempool alloc failed");
544 *ev_tim = tim;
545 ev_tim->ev.event_ptr = ev_tim;
546
547 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim,
548 1), 1, "Failed to arm timer %d",
549 rte_errno);
550 }
551
552 return TEST_SUCCESS;
553 }
554
555 static inline int
_wait_timer_triggers(uint64_t wait_sec,uint64_t arm_count,uint64_t cancel_count)556 _wait_timer_triggers(uint64_t wait_sec, uint64_t arm_count,
557 uint64_t cancel_count)
558 {
559 uint8_t valid_event;
560 uint64_t events = 0;
561 uint64_t wait_start, max_wait;
562 struct rte_event ev;
563
564 max_wait = rte_get_timer_hz() * wait_sec;
565 wait_start = rte_get_timer_cycles();
566 while (1) {
567 if (rte_get_timer_cycles() - wait_start > max_wait) {
568 if (events + cancel_count != arm_count)
569 TEST_ASSERT_SUCCESS(max_wait,
570 "Max time limit for timers exceeded.");
571 break;
572 }
573
574 valid_event = rte_event_dequeue_burst(evdev, 0, &ev, 1, 0);
575 if (!valid_event)
576 continue;
577
578 rte_mempool_put(eventdev_test_mempool, ev.event_ptr);
579 events++;
580 }
581
582 return TEST_SUCCESS;
583 }
584
585 static inline int
test_timer_arm(void)586 test_timer_arm(void)
587 {
588 TEST_ASSERT_SUCCESS(_arm_timers(20, MAX_TIMERS),
589 "Failed to arm timers");
590 TEST_ASSERT_SUCCESS(_wait_timer_triggers(10, MAX_TIMERS, 0),
591 "Timer triggered count doesn't match arm count");
592 return TEST_SUCCESS;
593 }
594
595 static inline int
test_timer_arm_periodic(void)596 test_timer_arm_periodic(void)
597 {
598 uint32_t caps = 0;
599 uint32_t timeout_count = 0;
600
601 TEST_ASSERT_SUCCESS(_arm_timers(1, MAX_TIMERS),
602 "Failed to arm timers");
603 /* With a resolution of 100ms and wait time of 1sec,
604 * there will be 10 * MAX_TIMERS periodic timer triggers.
605 */
606 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_caps_get(evdev, &caps),
607 "failed to get adapter capabilities");
608
609 if (caps & RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)
610 timeout_count = 10;
611 else
612 timeout_count = 9;
613
614 TEST_ASSERT_SUCCESS(_wait_timer_triggers(1, timeout_count * MAX_TIMERS, 0),
615 "Timer triggered count doesn't match arm count");
616 return TEST_SUCCESS;
617 }
618
619 static int
_arm_wrapper(void * arg)620 _arm_wrapper(void *arg)
621 {
622 RTE_SET_USED(arg);
623
624 TEST_ASSERT_SUCCESS(_arm_timers(20, MAX_TIMERS),
625 "Failed to arm timers");
626
627 return TEST_SUCCESS;
628 }
629
630 static inline int
test_timer_arm_multicore(void)631 test_timer_arm_multicore(void)
632 {
633
634 uint32_t lcore_1 = rte_get_next_lcore(-1, 1, 0);
635 uint32_t lcore_2 = rte_get_next_lcore(lcore_1, 1, 0);
636
637 rte_eal_remote_launch(_arm_wrapper, NULL, lcore_1);
638 rte_eal_remote_launch(_arm_wrapper, NULL, lcore_2);
639
640 rte_eal_mp_wait_lcore();
641 TEST_ASSERT_SUCCESS(_wait_timer_triggers(10, MAX_TIMERS * 2, 0),
642 "Timer triggered count doesn't match arm count");
643
644 return TEST_SUCCESS;
645 }
646
647 #define MAX_BURST 16
648 static inline int
_arm_timers_burst(uint64_t timeout_tcks,uint64_t timers)649 _arm_timers_burst(uint64_t timeout_tcks, uint64_t timers)
650 {
651 uint64_t i;
652 int j;
653 struct rte_event_timer *ev_tim[MAX_BURST];
654 const struct rte_event_timer tim = {
655 .ev.op = RTE_EVENT_OP_NEW,
656 .ev.queue_id = 0,
657 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
658 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
659 .ev.event_type = RTE_EVENT_TYPE_TIMER,
660 .state = RTE_EVENT_TIMER_NOT_ARMED,
661 .timeout_ticks = CALC_TICKS(timeout_tcks),
662 };
663
664 for (i = 0; i < timers / MAX_BURST; i++) {
665 TEST_ASSERT_SUCCESS(rte_mempool_get_bulk(
666 eventdev_test_mempool,
667 (void **)ev_tim, MAX_BURST),
668 "mempool alloc failed");
669
670 for (j = 0; j < MAX_BURST; j++) {
671 *ev_tim[j] = tim;
672 ev_tim[j]->ev.event_ptr = ev_tim[j];
673 }
674
675 TEST_ASSERT_EQUAL(rte_event_timer_arm_tmo_tick_burst(timdev,
676 ev_tim, tim.timeout_ticks, MAX_BURST),
677 MAX_BURST, "Failed to arm timer %d", rte_errno);
678 }
679
680 return TEST_SUCCESS;
681 }
682
683 static inline int
test_timer_arm_burst(void)684 test_timer_arm_burst(void)
685 {
686 TEST_ASSERT_SUCCESS(_arm_timers_burst(20, MAX_TIMERS),
687 "Failed to arm timers");
688 TEST_ASSERT_SUCCESS(_wait_timer_triggers(10, MAX_TIMERS, 0),
689 "Timer triggered count doesn't match arm count");
690
691 return TEST_SUCCESS;
692 }
693
694 static inline int
test_timer_arm_burst_periodic(void)695 test_timer_arm_burst_periodic(void)
696 {
697 uint32_t caps = 0;
698 uint32_t timeout_count = 0;
699
700 TEST_ASSERT_SUCCESS(_arm_timers_burst(1, MAX_TIMERS),
701 "Failed to arm timers");
702 /* With a resolution of 100ms and wait time of 1sec,
703 * there will be 10 * MAX_TIMERS periodic timer triggers.
704 */
705 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_caps_get(evdev, &caps),
706 "failed to get adapter capabilities");
707
708 if (caps & RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)
709 timeout_count = 10;
710 else
711 timeout_count = 9;
712
713 TEST_ASSERT_SUCCESS(_wait_timer_triggers(1, timeout_count * MAX_TIMERS, 0),
714 "Timer triggered count doesn't match arm count");
715
716 return TEST_SUCCESS;
717 }
718
719 static int
_arm_wrapper_burst(void * arg)720 _arm_wrapper_burst(void *arg)
721 {
722 RTE_SET_USED(arg);
723
724 TEST_ASSERT_SUCCESS(_arm_timers_burst(20, MAX_TIMERS),
725 "Failed to arm timers");
726
727 return TEST_SUCCESS;
728 }
729
730 static inline int
test_timer_arm_burst_multicore(void)731 test_timer_arm_burst_multicore(void)
732 {
733 rte_eal_remote_launch(_arm_wrapper_burst, NULL, test_lcore1);
734 rte_eal_remote_launch(_arm_wrapper_burst, NULL, test_lcore2);
735
736 rte_eal_mp_wait_lcore();
737 TEST_ASSERT_SUCCESS(_wait_timer_triggers(10, MAX_TIMERS * 2, 0),
738 "Timer triggered count doesn't match arm count");
739
740 return TEST_SUCCESS;
741 }
742
743 static inline int
test_timer_cancel_periodic(void)744 test_timer_cancel_periodic(void)
745 {
746 uint64_t i;
747 struct rte_event_timer *ev_tim;
748 const struct rte_event_timer tim = {
749 .ev.op = RTE_EVENT_OP_NEW,
750 .ev.queue_id = 0,
751 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
752 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
753 .ev.event_type = RTE_EVENT_TYPE_TIMER,
754 .state = RTE_EVENT_TIMER_NOT_ARMED,
755 .timeout_ticks = CALC_TICKS(1),
756 };
757
758 for (i = 0; i < MAX_TIMERS; i++) {
759 TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool,
760 (void **)&ev_tim),
761 "mempool alloc failed");
762 *ev_tim = tim;
763 ev_tim->ev.event_ptr = ev_tim;
764
765 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim,
766 1), 1, "Failed to arm timer %d",
767 rte_errno);
768
769 rte_delay_us(100 + (i % 5000));
770
771 TEST_ASSERT_EQUAL(rte_event_timer_cancel_burst(timdev,
772 &ev_tim, 1), 1,
773 "Failed to cancel event timer %d", rte_errno);
774 rte_mempool_put(eventdev_test_mempool, ev_tim);
775 }
776
777
778 TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS,
779 MAX_TIMERS),
780 "Timer triggered count doesn't match arm, cancel count");
781
782 return TEST_SUCCESS;
783 }
784
785 static inline int
test_timer_cancel(void)786 test_timer_cancel(void)
787 {
788 uint64_t i;
789 struct rte_event_timer *ev_tim;
790 const struct rte_event_timer tim = {
791 .ev.op = RTE_EVENT_OP_NEW,
792 .ev.queue_id = 0,
793 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
794 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
795 .ev.event_type = RTE_EVENT_TYPE_TIMER,
796 .state = RTE_EVENT_TIMER_NOT_ARMED,
797 .timeout_ticks = CALC_TICKS(20),
798 };
799
800 for (i = 0; i < MAX_TIMERS; i++) {
801 TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool,
802 (void **)&ev_tim),
803 "mempool alloc failed");
804 *ev_tim = tim;
805 ev_tim->ev.event_ptr = ev_tim;
806
807 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim,
808 1), 1, "Failed to arm timer %d",
809 rte_errno);
810
811 rte_delay_us(100 + (i % 5000));
812
813 TEST_ASSERT_EQUAL(rte_event_timer_cancel_burst(timdev,
814 &ev_tim, 1), 1,
815 "Failed to cancel event timer %d", rte_errno);
816 rte_mempool_put(eventdev_test_mempool, ev_tim);
817 }
818
819
820 TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS,
821 MAX_TIMERS),
822 "Timer triggered count doesn't match arm, cancel count");
823
824 return TEST_SUCCESS;
825 }
826
827 static int
_cancel_producer(uint64_t timeout_tcks,uint64_t timers)828 _cancel_producer(uint64_t timeout_tcks, uint64_t timers)
829 {
830 uint64_t i;
831 struct rte_event_timer *ev_tim;
832 const struct rte_event_timer tim = {
833 .ev.op = RTE_EVENT_OP_NEW,
834 .ev.queue_id = 0,
835 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
836 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
837 .ev.event_type = RTE_EVENT_TYPE_TIMER,
838 .state = RTE_EVENT_TIMER_NOT_ARMED,
839 .timeout_ticks = CALC_TICKS(timeout_tcks),
840 };
841
842 for (i = 0; i < timers; i++) {
843 TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool,
844 (void **)&ev_tim),
845 "mempool alloc failed");
846
847 *ev_tim = tim;
848 ev_tim->ev.event_ptr = ev_tim;
849
850 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim,
851 1), 1, "Failed to arm timer %d",
852 rte_errno);
853
854 TEST_ASSERT_EQUAL(ev_tim->state, RTE_EVENT_TIMER_ARMED,
855 "Failed to arm event timer");
856
857 while (rte_ring_enqueue(timer_producer_ring, ev_tim) != 0)
858 ;
859 }
860
861 return TEST_SUCCESS;
862 }
863
864 static int
_cancel_producer_burst(uint64_t timeout_tcks,uint64_t timers)865 _cancel_producer_burst(uint64_t timeout_tcks, uint64_t timers)
866 {
867
868 uint64_t i;
869 int j, ret;
870 struct rte_event_timer *ev_tim[MAX_BURST];
871 const struct rte_event_timer tim = {
872 .ev.op = RTE_EVENT_OP_NEW,
873 .ev.queue_id = 0,
874 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
875 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
876 .ev.event_type = RTE_EVENT_TYPE_TIMER,
877 .state = RTE_EVENT_TIMER_NOT_ARMED,
878 .timeout_ticks = CALC_TICKS(timeout_tcks),
879 };
880 int arm_count = 0;
881
882 for (i = 0; i < timers / MAX_BURST; i++) {
883 TEST_ASSERT_SUCCESS(rte_mempool_get_bulk(
884 eventdev_test_mempool,
885 (void **)ev_tim, MAX_BURST),
886 "mempool alloc failed");
887
888 for (j = 0; j < MAX_BURST; j++) {
889 *ev_tim[j] = tim;
890 ev_tim[j]->ev.event_ptr = ev_tim[j];
891 }
892
893 TEST_ASSERT_EQUAL(rte_event_timer_arm_tmo_tick_burst(timdev,
894 ev_tim, tim.timeout_ticks, MAX_BURST),
895 MAX_BURST, "Failed to arm timer %d", rte_errno);
896
897 for (j = 0; j < MAX_BURST; j++)
898 TEST_ASSERT_EQUAL(ev_tim[j]->state,
899 RTE_EVENT_TIMER_ARMED,
900 "Event timer not armed, state = %d",
901 ev_tim[j]->state);
902
903 ret = rte_ring_enqueue_bulk(timer_producer_ring,
904 (void **)ev_tim, MAX_BURST, NULL);
905 TEST_ASSERT_EQUAL(ret, MAX_BURST,
906 "Failed to enqueue event timers to ring");
907 arm_count += ret;
908 }
909
910 TEST_ASSERT_EQUAL(arm_count, MAX_TIMERS,
911 "Failed to arm expected number of event timers");
912
913 return TEST_SUCCESS;
914 }
915
916 static int
_cancel_producer_wrapper(void * args)917 _cancel_producer_wrapper(void *args)
918 {
919 RTE_SET_USED(args);
920
921 return _cancel_producer(20, MAX_TIMERS);
922 }
923
924 static int
_cancel_producer_burst_wrapper(void * args)925 _cancel_producer_burst_wrapper(void *args)
926 {
927 RTE_SET_USED(args);
928
929 return _cancel_producer_burst(100, MAX_TIMERS);
930 }
931
932 static int
_cancel_thread(void * args)933 _cancel_thread(void *args)
934 {
935 RTE_SET_USED(args);
936 struct rte_event_timer *ev_tim = NULL;
937 uint16_t ret;
938
939 while (!arm_done || rte_ring_count(timer_producer_ring) > 0) {
940 if (rte_ring_dequeue(timer_producer_ring, (void **)&ev_tim))
941 continue;
942
943 ret = rte_event_timer_cancel_burst(timdev, &ev_tim, 1);
944 TEST_ASSERT_EQUAL(ret, 1, "Failed to cancel timer");
945 rte_mempool_put(eventdev_test_mempool, (void *)ev_tim);
946 }
947
948 return TEST_SUCCESS;
949 }
950
951 static int
_cancel_burst_thread(void * args)952 _cancel_burst_thread(void *args)
953 {
954 RTE_SET_USED(args);
955
956 int ret, i, n;
957 struct rte_event_timer *ev_tim[MAX_BURST];
958 uint64_t cancel_count = 0;
959 uint64_t dequeue_count = 0;
960
961 while (!arm_done || rte_ring_count(timer_producer_ring) > 0) {
962 n = rte_ring_dequeue_burst(timer_producer_ring,
963 (void **)ev_tim, MAX_BURST, NULL);
964 if (!n)
965 continue;
966
967 dequeue_count += n;
968
969 for (i = 0; i < n; i++)
970 TEST_ASSERT_EQUAL(ev_tim[i]->state,
971 RTE_EVENT_TIMER_ARMED,
972 "Event timer not armed, state = %d",
973 ev_tim[i]->state);
974
975 ret = rte_event_timer_cancel_burst(timdev, ev_tim, n);
976 TEST_ASSERT_EQUAL(n, ret, "Failed to cancel complete burst of "
977 "event timers");
978 rte_mempool_put_bulk(eventdev_test_mempool, (void **)ev_tim,
979 RTE_MIN(ret, MAX_BURST));
980
981 cancel_count += ret;
982 }
983
984 TEST_ASSERT_EQUAL(cancel_count, MAX_TIMERS,
985 "Failed to cancel expected number of timers: "
986 "expected = %d, cancel_count = %"PRIu64", "
987 "dequeue_count = %"PRIu64"\n", MAX_TIMERS,
988 cancel_count, dequeue_count);
989
990 return TEST_SUCCESS;
991 }
992
993 static inline int
test_timer_cancel_multicore(void)994 test_timer_cancel_multicore(void)
995 {
996 arm_done = 0;
997 timer_producer_ring = rte_ring_create("timer_cancel_queue",
998 MAX_TIMERS * 2, rte_socket_id(), 0);
999 TEST_ASSERT_NOT_NULL(timer_producer_ring,
1000 "Unable to reserve memory for ring");
1001
1002 rte_eal_remote_launch(_cancel_thread, NULL, test_lcore3);
1003 rte_eal_remote_launch(_cancel_producer_wrapper, NULL, test_lcore1);
1004 rte_eal_remote_launch(_cancel_producer_wrapper, NULL, test_lcore2);
1005
1006 rte_eal_wait_lcore(test_lcore1);
1007 rte_eal_wait_lcore(test_lcore2);
1008 arm_done = 1;
1009 rte_eal_wait_lcore(test_lcore3);
1010 rte_ring_free(timer_producer_ring);
1011
1012 TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS * 2,
1013 MAX_TIMERS * 2),
1014 "Timer triggered count doesn't match arm count");
1015
1016 return TEST_SUCCESS;
1017 }
1018
1019 static inline int
test_timer_cancel_burst_multicore(void)1020 test_timer_cancel_burst_multicore(void)
1021 {
1022 arm_done = 0;
1023 timer_producer_ring = rte_ring_create("timer_cancel_queue",
1024 MAX_TIMERS * 2, rte_socket_id(), 0);
1025 TEST_ASSERT_NOT_NULL(timer_producer_ring,
1026 "Unable to reserve memory for ring");
1027
1028 rte_eal_remote_launch(_cancel_burst_thread, NULL, test_lcore2);
1029 rte_eal_remote_launch(_cancel_producer_burst_wrapper, NULL,
1030 test_lcore1);
1031
1032 rte_eal_wait_lcore(test_lcore1);
1033 arm_done = 1;
1034 rte_eal_wait_lcore(test_lcore2);
1035 rte_ring_free(timer_producer_ring);
1036
1037 TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS,
1038 MAX_TIMERS),
1039 "Timer triggered count doesn't match arm count");
1040
1041 return TEST_SUCCESS;
1042 }
1043
1044 static inline int
test_timer_cancel_random(void)1045 test_timer_cancel_random(void)
1046 {
1047 uint64_t i;
1048 uint64_t events_canceled = 0;
1049 struct rte_event_timer *ev_tim;
1050 const struct rte_event_timer tim = {
1051 .ev.op = RTE_EVENT_OP_NEW,
1052 .ev.queue_id = 0,
1053 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
1054 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1055 .ev.event_type = RTE_EVENT_TYPE_TIMER,
1056 .state = RTE_EVENT_TIMER_NOT_ARMED,
1057 .timeout_ticks = CALC_TICKS(20),
1058 };
1059
1060 for (i = 0; i < MAX_TIMERS; i++) {
1061
1062 TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool,
1063 (void **)&ev_tim),
1064 "mempool alloc failed");
1065 *ev_tim = tim;
1066 ev_tim->ev.event_ptr = ev_tim;
1067
1068 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim,
1069 1), 1, "Failed to arm timer %d",
1070 rte_errno);
1071
1072 if (rte_rand() & 1) {
1073 rte_delay_us(100 + (i % 5000));
1074 TEST_ASSERT_EQUAL(rte_event_timer_cancel_burst(
1075 timdev,
1076 &ev_tim, 1), 1,
1077 "Failed to cancel event timer %d", rte_errno);
1078 rte_mempool_put(eventdev_test_mempool, ev_tim);
1079 events_canceled++;
1080 }
1081 }
1082
1083 TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS,
1084 events_canceled),
1085 "Timer triggered count doesn't match arm, cancel count");
1086
1087 return TEST_SUCCESS;
1088 }
1089
1090 /* Check that the adapter can be created correctly */
1091 static int
adapter_create(void)1092 adapter_create(void)
1093 {
1094 int adapter_id = 0;
1095 struct rte_event_timer_adapter *adapter, *adapter2;
1096
1097 struct rte_event_timer_adapter_conf conf = {
1098 .event_dev_id = evdev + 1, // invalid event dev id
1099 .timer_adapter_id = adapter_id,
1100 .clk_src = RTE_EVENT_TIMER_ADAPTER_CPU_CLK,
1101 .timer_tick_ns = NSECPERSEC / 10,
1102 .max_tmo_ns = 180 * NSECPERSEC,
1103 .nb_timers = MAX_TIMERS,
1104 .flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES,
1105 };
1106 uint32_t caps = 0;
1107
1108 /* Test invalid conf */
1109 adapter = rte_event_timer_adapter_create(&conf);
1110 TEST_ASSERT_NULL(adapter, "Created adapter with invalid "
1111 "event device id");
1112 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Incorrect errno value for "
1113 "invalid event device id");
1114
1115 /* Test valid conf */
1116 conf.event_dev_id = evdev;
1117 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_caps_get(evdev, &caps),
1118 "failed to get adapter capabilities");
1119 if (!(caps & RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT))
1120 adapter = rte_event_timer_adapter_create_ext(&conf,
1121 test_port_conf_cb,
1122 NULL);
1123 else
1124 adapter = rte_event_timer_adapter_create(&conf);
1125 TEST_ASSERT_NOT_NULL(adapter, "Failed to create adapter with valid "
1126 "configuration");
1127
1128 /* Test existing id */
1129 adapter2 = rte_event_timer_adapter_create(&conf);
1130 TEST_ASSERT_NULL(adapter2, "Created adapter with in-use id");
1131 TEST_ASSERT(rte_errno == EEXIST, "Incorrect errno value for existing "
1132 "id");
1133
1134 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_free(adapter),
1135 "Failed to free adapter");
1136
1137 return TEST_SUCCESS;
1138 }
1139
1140
1141 /* Test that adapter can be freed correctly. */
1142 static int
adapter_free(void)1143 adapter_free(void)
1144 {
1145 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stop(timdev),
1146 "Failed to stop adapter");
1147
1148 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_free(timdev),
1149 "Failed to free valid adapter");
1150
1151 /* Test free of already freed adapter */
1152 TEST_ASSERT_FAIL(rte_event_timer_adapter_free(timdev),
1153 "Freed adapter that was already freed");
1154
1155 /* Test free of null adapter */
1156 timdev = NULL;
1157 TEST_ASSERT_FAIL(rte_event_timer_adapter_free(timdev),
1158 "Freed null adapter");
1159
1160 rte_mempool_free(eventdev_test_mempool);
1161
1162 return TEST_SUCCESS;
1163 }
1164
1165 /* Test that adapter info can be retrieved and is correct. */
1166 static int
adapter_get_info(void)1167 adapter_get_info(void)
1168 {
1169 struct rte_event_timer_adapter_info info;
1170
1171 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_get_info(timdev, &info),
1172 "Failed to get adapter info");
1173
1174 if (using_services)
1175 TEST_ASSERT_EQUAL(info.event_dev_port_id, 1,
1176 "Expected port id = 1, got port id = %d",
1177 info.event_dev_port_id);
1178
1179 return TEST_SUCCESS;
1180 }
1181
1182 /* Test adapter lookup via adapter ID. */
1183 static int
adapter_lookup(void)1184 adapter_lookup(void)
1185 {
1186 struct rte_event_timer_adapter *adapter;
1187
1188 adapter = rte_event_timer_adapter_lookup(TEST_ADAPTER_ID);
1189 TEST_ASSERT_NOT_NULL(adapter, "Failed to lookup adapter");
1190
1191 return TEST_SUCCESS;
1192 }
1193
1194 static int
adapter_start(void)1195 adapter_start(void)
1196 {
1197 TEST_ASSERT_SUCCESS(_timdev_setup(180 * NSECPERSEC, NSECPERSEC / 10,
1198 RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES),
1199 "Failed to start adapter");
1200 TEST_ASSERT_EQUAL(rte_event_timer_adapter_start(timdev), -EALREADY,
1201 "Timer adapter started without call to stop.");
1202
1203 return TEST_SUCCESS;
1204 }
1205
1206 /* Test that adapter stops correctly. */
1207 static int
adapter_stop(void)1208 adapter_stop(void)
1209 {
1210 struct rte_event_timer_adapter *l_adapter = NULL;
1211
1212 /* Test adapter stop */
1213 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stop(timdev),
1214 "Failed to stop event adapter");
1215
1216 TEST_ASSERT_FAIL(rte_event_timer_adapter_stop(l_adapter),
1217 "Erroneously stopped null event adapter");
1218
1219 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_free(timdev),
1220 "Failed to free adapter");
1221
1222 rte_mempool_free(eventdev_test_mempool);
1223
1224 return TEST_SUCCESS;
1225 }
1226
1227 /* Test increment and reset of ev_enq_count stat */
1228 static int
stat_inc_reset_ev_enq(void)1229 stat_inc_reset_ev_enq(void)
1230 {
1231 int ret, i, n;
1232 int num_evtims = MAX_TIMERS;
1233 struct rte_event_timer *evtims[num_evtims];
1234 struct rte_event evs[num_evtims];
1235 struct rte_event_timer_adapter_stats stats;
1236 uint64_t ticks = 5;
1237 const struct rte_event_timer init_tim = {
1238 .ev.op = RTE_EVENT_OP_NEW,
1239 .ev.queue_id = TEST_QUEUE_ID,
1240 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
1241 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1242 .ev.event_type = RTE_EVENT_TYPE_TIMER,
1243 .state = RTE_EVENT_TIMER_NOT_ARMED,
1244 .timeout_ticks = CALC_TICKS(ticks), /**< expire in .5 sec */
1245 };
1246
1247 ret = rte_mempool_get_bulk(eventdev_test_mempool, (void **)evtims,
1248 num_evtims);
1249 TEST_ASSERT_EQUAL(ret, 0, "Failed to get array of timer objs: ret = %d",
1250 ret);
1251
1252 for (i = 0; i < num_evtims; i++) {
1253 *evtims[i] = init_tim;
1254 evtims[i]->ev.event_ptr = evtims[i];
1255 }
1256
1257 ret = rte_event_timer_adapter_stats_get(timdev, &stats);
1258 TEST_ASSERT_EQUAL(ret, 0, "Failed to get stats");
1259 TEST_ASSERT_EQUAL((int)stats.ev_enq_count, 0, "Stats not clear at "
1260 "startup");
1261
1262 /* Test with the max value for the adapter */
1263 ret = rte_event_timer_arm_burst(timdev, evtims, num_evtims);
1264 TEST_ASSERT_EQUAL(ret, num_evtims,
1265 "Failed to arm all event timers: attempted = %d, "
1266 "succeeded = %d, rte_errno = %s",
1267 num_evtims, ret, rte_strerror(rte_errno));
1268
1269 n = timeout_event_dequeue(evs, RTE_DIM(evs), WAIT_TICKS(ticks));
1270 TEST_ASSERT_EQUAL(n, num_evtims, "Expected %d timer expiry events, got %d",
1271 num_evtims, n);
1272
1273 /* Make sure the eventdev is still empty */
1274 n = timeout_event_dequeue(evs, 1, WAIT_TICKS(1));
1275
1276 TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected number of timer expiry "
1277 "events from event device");
1278
1279 /* Check stats again */
1280 ret = rte_event_timer_adapter_stats_get(timdev, &stats);
1281 TEST_ASSERT_EQUAL(ret, 0, "Failed to get stats");
1282 TEST_ASSERT_EQUAL((int)stats.ev_enq_count, num_evtims,
1283 "Expected enqueue stat = %d; got %d", num_evtims,
1284 (int)stats.ev_enq_count);
1285
1286 /* Reset and check again */
1287 ret = rte_event_timer_adapter_stats_reset(timdev);
1288 TEST_ASSERT_EQUAL(ret, 0, "Failed to reset stats");
1289
1290 ret = rte_event_timer_adapter_stats_get(timdev, &stats);
1291 TEST_ASSERT_EQUAL(ret, 0, "Failed to get stats");
1292 TEST_ASSERT_EQUAL((int)stats.ev_enq_count, 0,
1293 "Expected enqueue stat = %d; got %d", 0,
1294 (int)stats.ev_enq_count);
1295
1296 rte_mempool_put_bulk(eventdev_test_mempool, (void **)evtims,
1297 num_evtims);
1298
1299 return TEST_SUCCESS;
1300 }
1301
1302 /* Test various cases in arming timers */
1303 static int
event_timer_arm(void)1304 event_timer_arm(void)
1305 {
1306 uint16_t n;
1307 int ret;
1308 struct rte_event_timer_adapter *adapter = timdev;
1309 struct rte_event_timer *evtim = NULL;
1310 struct rte_event evs[BATCH_SIZE];
1311 uint64_t ticks = 5;
1312 const struct rte_event_timer init_tim = {
1313 .ev.op = RTE_EVENT_OP_NEW,
1314 .ev.queue_id = TEST_QUEUE_ID,
1315 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
1316 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1317 .ev.event_type = RTE_EVENT_TYPE_TIMER,
1318 .state = RTE_EVENT_TIMER_NOT_ARMED,
1319 .timeout_ticks = CALC_TICKS(ticks), /**< expire in .5 sec */
1320 };
1321
1322 rte_mempool_get(eventdev_test_mempool, (void **)&evtim);
1323 if (evtim == NULL) {
1324 /* Failed to get an event timer object */
1325 return TEST_FAILED;
1326 }
1327
1328 /* Set up a timer */
1329 *evtim = init_tim;
1330 evtim->ev.event_ptr = evtim;
1331
1332 /* Test single timer arm succeeds */
1333 ret = rte_event_timer_arm_burst(adapter, &evtim, 1);
1334 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n",
1335 rte_strerror(rte_errno));
1336 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ARMED, "Event timer "
1337 "in incorrect state");
1338
1339 /* Test arm of armed timer fails */
1340 ret = rte_event_timer_arm_burst(adapter, &evtim, 1);
1341 TEST_ASSERT_EQUAL(ret, 0, "expected return value from "
1342 "rte_event_timer_arm_burst: 0, got: %d", ret);
1343 TEST_ASSERT_EQUAL(rte_errno, EALREADY, "Unexpected rte_errno value "
1344 "after arming already armed timer");
1345
1346 n = timeout_event_dequeue(evs, RTE_DIM(evs), WAIT_TICKS(ticks));
1347 TEST_ASSERT_EQUAL(n, 1, "Failed to dequeue expected number of expiry "
1348 "events from event device");
1349
1350 rte_mempool_put(eventdev_test_mempool, evtim);
1351
1352 return TEST_SUCCESS;
1353 }
1354
1355 /* This test checks that repeated references to the same event timer in the
1356 * arm request work as expected; only the first one through should succeed.
1357 */
1358 static int
event_timer_arm_double(void)1359 event_timer_arm_double(void)
1360 {
1361 uint16_t n;
1362 int ret;
1363 struct rte_event_timer_adapter *adapter = timdev;
1364 struct rte_event_timer *evtim = NULL;
1365 struct rte_event evs[BATCH_SIZE];
1366 uint64_t ticks = 5;
1367 const struct rte_event_timer init_tim = {
1368 .ev.op = RTE_EVENT_OP_NEW,
1369 .ev.queue_id = TEST_QUEUE_ID,
1370 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
1371 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1372 .ev.event_type = RTE_EVENT_TYPE_TIMER,
1373 .state = RTE_EVENT_TIMER_NOT_ARMED,
1374 .timeout_ticks = CALC_TICKS(ticks), /**< expire in .5 sec */
1375 };
1376
1377 rte_mempool_get(eventdev_test_mempool, (void **)&evtim);
1378 if (evtim == NULL) {
1379 /* Failed to get an event timer object */
1380 return TEST_FAILED;
1381 }
1382
1383 /* Set up a timer */
1384 *evtim = init_tim;
1385 evtim->ev.event_ptr = evtim;
1386
1387 struct rte_event_timer *evtim_arr[] = {evtim, evtim};
1388 ret = rte_event_timer_arm_burst(adapter, evtim_arr, RTE_DIM(evtim_arr));
1389 TEST_ASSERT_EQUAL(ret, 1, "Unexpected return value from "
1390 "rte_event_timer_arm_burst");
1391 TEST_ASSERT_EQUAL(rte_errno, EALREADY, "Unexpected rte_errno value "
1392 "after double-arm");
1393
1394 n = timeout_event_dequeue(evs, RTE_DIM(evs), WAIT_TICKS(ticks));
1395 TEST_ASSERT_EQUAL(n, 1, "Dequeued incorrect number of expiry events - "
1396 "expected: 1, actual: %d", n);
1397
1398 rte_mempool_put(eventdev_test_mempool, evtim);
1399
1400 return TEST_SUCCESS;
1401 }
1402
1403 /* Test the timer expiry event is generated at the expected time. */
1404 static int
event_timer_arm_expiry(void)1405 event_timer_arm_expiry(void)
1406 {
1407 uint16_t n;
1408 int ret;
1409 struct rte_event_timer_adapter *adapter = timdev;
1410 struct rte_event_timer *evtim = NULL;
1411 struct rte_event_timer *evtim2 = NULL;
1412 struct rte_event evs[BATCH_SIZE];
1413 const struct rte_event_timer init_tim = {
1414 .ev.op = RTE_EVENT_OP_NEW,
1415 .ev.queue_id = TEST_QUEUE_ID,
1416 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
1417 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1418 .ev.event_type = RTE_EVENT_TYPE_TIMER,
1419 .state = RTE_EVENT_TIMER_NOT_ARMED,
1420 };
1421 uint64_t ticks = 30;
1422
1423 rte_mempool_get(eventdev_test_mempool, (void **)&evtim);
1424 if (evtim == NULL) {
1425 /* Failed to get an event timer object */
1426 return TEST_FAILED;
1427 }
1428
1429 /* Set up an event timer */
1430 *evtim = init_tim;
1431 evtim->timeout_ticks = CALC_TICKS(ticks); /**< expire in 3 secs */
1432 evtim->ev.event_ptr = evtim;
1433
1434 ret = rte_event_timer_arm_burst(adapter, &evtim, 1);
1435 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s",
1436 rte_strerror(rte_errno));
1437 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ARMED, "Event "
1438 "timer in incorrect state");
1439
1440 n = timeout_event_dequeue(evs, RTE_DIM(evs), ticks - 1);
1441 TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected timer expiry event");
1442
1443 n = timeout_event_dequeue(evs, RTE_DIM(evs), WAIT_TICKS(1));
1444 TEST_ASSERT_EQUAL(n, 1, "Dequeued incorrect number (%d) of timer "
1445 "expiry events", n);
1446 TEST_ASSERT_EQUAL(evs[0].event_type, RTE_EVENT_TYPE_TIMER,
1447 "Dequeued unexpected type of event");
1448
1449 /* Check that we recover the original event timer and then free it */
1450 evtim2 = evs[0].event_ptr;
1451 TEST_ASSERT_EQUAL(evtim, evtim2,
1452 "Failed to recover pointer to original event timer");
1453 rte_mempool_put(eventdev_test_mempool, evtim2);
1454
1455 return TEST_SUCCESS;
1456 }
1457
1458 /* Check that rearming a timer works as expected. */
1459 static int
event_timer_arm_rearm(void)1460 event_timer_arm_rearm(void)
1461 {
1462 uint16_t n;
1463 int ret;
1464 struct rte_event_timer *evtim = NULL;
1465 struct rte_event_timer *evtim2 = NULL;
1466 struct rte_event evs[BATCH_SIZE];
1467 const struct rte_event_timer init_tim = {
1468 .ev.op = RTE_EVENT_OP_NEW,
1469 .ev.queue_id = TEST_QUEUE_ID,
1470 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
1471 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1472 .ev.event_type = RTE_EVENT_TYPE_TIMER,
1473 .state = RTE_EVENT_TIMER_NOT_ARMED,
1474 };
1475 uint64_t ticks = 1;
1476
1477 rte_mempool_get(eventdev_test_mempool, (void **)&evtim);
1478 if (evtim == NULL) {
1479 /* Failed to get an event timer object */
1480 return TEST_FAILED;
1481 }
1482
1483 /* Set up a timer */
1484 *evtim = init_tim;
1485 evtim->timeout_ticks = CALC_TICKS(ticks); /**< expire in 0.1 sec */
1486 evtim->ev.event_ptr = evtim;
1487
1488 /* Arm it */
1489 ret = rte_event_timer_arm_burst(timdev, &evtim, 1);
1490 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n",
1491 rte_strerror(rte_errno));
1492
1493 n = timeout_event_dequeue(evs, RTE_DIM(evs), WAIT_TICKS(ticks));
1494 TEST_ASSERT_EQUAL(n, 1, "Failed to dequeue expected number of expiry "
1495 "events from event device");
1496
1497 /* Recover the timer through the event that was dequeued. */
1498 evtim2 = evs[0].event_ptr;
1499 TEST_ASSERT_EQUAL(evtim, evtim2,
1500 "Failed to recover pointer to original event timer");
1501
1502 /* Need to reset state in case implementation can't do it */
1503 evtim2->state = RTE_EVENT_TIMER_NOT_ARMED;
1504
1505 /* Rearm it */
1506 ret = rte_event_timer_arm_burst(timdev, &evtim2, 1);
1507 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n",
1508 rte_strerror(rte_errno));
1509
1510 n = timeout_event_dequeue(evs, RTE_DIM(evs), WAIT_TICKS(ticks));
1511 TEST_ASSERT_EQUAL(n, 1, "Failed to dequeue expected number of expiry "
1512 "events from event device");
1513
1514 /* Free it */
1515 evtim2 = evs[0].event_ptr;
1516 TEST_ASSERT_EQUAL(evtim, evtim2,
1517 "Failed to recover pointer to original event timer");
1518 rte_mempool_put(eventdev_test_mempool, evtim2);
1519
1520 return TEST_SUCCESS;
1521 }
1522
1523 /* Check that the adapter handles the max specified number of timers as
1524 * expected.
1525 */
1526 static int
event_timer_arm_max(void)1527 event_timer_arm_max(void)
1528 {
1529 int ret, i, n;
1530 int num_evtims = MAX_TIMERS;
1531 struct rte_event_timer *evtims[num_evtims];
1532 struct rte_event evs[num_evtims];
1533 uint64_t ticks = 5;
1534 const struct rte_event_timer init_tim = {
1535 .ev.op = RTE_EVENT_OP_NEW,
1536 .ev.queue_id = TEST_QUEUE_ID,
1537 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
1538 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1539 .ev.event_type = RTE_EVENT_TYPE_TIMER,
1540 .state = RTE_EVENT_TIMER_NOT_ARMED,
1541 .timeout_ticks = CALC_TICKS(ticks), /**< expire in .5 sec */
1542 };
1543
1544 ret = rte_mempool_get_bulk(eventdev_test_mempool, (void **)evtims,
1545 num_evtims);
1546 TEST_ASSERT_EQUAL(ret, 0, "Failed to get array of timer objs: ret = %d",
1547 ret);
1548
1549 for (i = 0; i < num_evtims; i++) {
1550 *evtims[i] = init_tim;
1551 evtims[i]->ev.event_ptr = evtims[i];
1552 }
1553
1554 /* Test with the max value for the adapter */
1555 ret = rte_event_timer_arm_burst(timdev, evtims, num_evtims);
1556 TEST_ASSERT_EQUAL(ret, num_evtims,
1557 "Failed to arm all event timers: attempted = %d, "
1558 "succeeded = %d, rte_errno = %s",
1559 num_evtims, ret, rte_strerror(rte_errno));
1560
1561 n = timeout_event_dequeue(evs, RTE_DIM(evs), WAIT_TICKS(ticks));
1562 TEST_ASSERT_EQUAL(n, num_evtims, "Expected %d timer expiry events, got %d",
1563 num_evtims, n);
1564
1565 /* Make sure the eventdev is still empty */
1566 n = timeout_event_dequeue(evs, 1, WAIT_TICKS(1));
1567
1568 TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected number of timer expiry "
1569 "events from event device");
1570
1571 rte_mempool_put_bulk(eventdev_test_mempool, (void **)evtims,
1572 num_evtims);
1573
1574 return TEST_SUCCESS;
1575 }
1576
1577 /* Check that creating an event timer with incorrect event sched type fails. */
1578 static int
event_timer_arm_invalid_sched_type(void)1579 event_timer_arm_invalid_sched_type(void)
1580 {
1581 int ret;
1582 struct rte_event_timer *evtim = NULL;
1583 const struct rte_event_timer init_tim = {
1584 .ev.op = RTE_EVENT_OP_NEW,
1585 .ev.queue_id = TEST_QUEUE_ID,
1586 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
1587 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1588 .ev.event_type = RTE_EVENT_TYPE_TIMER,
1589 .state = RTE_EVENT_TIMER_NOT_ARMED,
1590 .timeout_ticks = CALC_TICKS(5), // expire in .5 sec
1591 };
1592
1593 if (!using_services)
1594 return -ENOTSUP;
1595
1596 rte_mempool_get(eventdev_test_mempool, (void **)&evtim);
1597 if (evtim == NULL) {
1598 /* Failed to get an event timer object */
1599 return TEST_FAILED;
1600 }
1601
1602 *evtim = init_tim;
1603 evtim->ev.event_ptr = evtim;
1604 evtim->ev.sched_type = RTE_SCHED_TYPE_PARALLEL; // bad sched type
1605
1606 ret = rte_event_timer_arm_burst(timdev, &evtim, 1);
1607 TEST_ASSERT_EQUAL(ret, 0, "Expected to fail timer arm with invalid "
1608 "sched type, but didn't");
1609 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after"
1610 " arm fail with invalid queue");
1611
1612 rte_mempool_put(eventdev_test_mempool, &evtim);
1613
1614 return TEST_SUCCESS;
1615 }
1616
1617 /* Check that creating an event timer with a timeout value that is too small or
1618 * too big fails.
1619 */
1620 static int
event_timer_arm_invalid_timeout(void)1621 event_timer_arm_invalid_timeout(void)
1622 {
1623 int ret;
1624 struct rte_event_timer *evtim = NULL;
1625 const struct rte_event_timer init_tim = {
1626 .ev.op = RTE_EVENT_OP_NEW,
1627 .ev.queue_id = TEST_QUEUE_ID,
1628 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
1629 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1630 .ev.event_type = RTE_EVENT_TYPE_TIMER,
1631 .state = RTE_EVENT_TIMER_NOT_ARMED,
1632 .timeout_ticks = CALC_TICKS(5), // expire in .5 sec
1633 };
1634
1635 rte_mempool_get(eventdev_test_mempool, (void **)&evtim);
1636 if (evtim == NULL) {
1637 /* Failed to get an event timer object */
1638 return TEST_FAILED;
1639 }
1640
1641 *evtim = init_tim;
1642 evtim->ev.event_ptr = evtim;
1643 evtim->timeout_ticks = 0; // timeout too small
1644
1645 ret = rte_event_timer_arm_burst(timdev, &evtim, 1);
1646 TEST_ASSERT_EQUAL(ret, 0, "Expected to fail timer arm with invalid "
1647 "timeout, but didn't");
1648 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after"
1649 " arm fail with invalid timeout");
1650 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ERROR_TOOEARLY,
1651 "Unexpected event timer state");
1652
1653 *evtim = init_tim;
1654 evtim->ev.event_ptr = evtim;
1655 evtim->timeout_ticks = CALC_TICKS(1801); // timeout too big
1656
1657 ret = rte_event_timer_arm_burst(timdev, &evtim, 1);
1658 TEST_ASSERT_EQUAL(ret, 0, "Expected to fail timer arm with invalid "
1659 "timeout, but didn't");
1660 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after"
1661 " arm fail with invalid timeout");
1662 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ERROR_TOOLATE,
1663 "Unexpected event timer state");
1664
1665 rte_mempool_put(eventdev_test_mempool, evtim);
1666
1667 return TEST_SUCCESS;
1668 }
1669
1670 static int
event_timer_cancel(void)1671 event_timer_cancel(void)
1672 {
1673 uint16_t n;
1674 int ret;
1675 struct rte_event_timer_adapter *adapter = timdev;
1676 struct rte_event_timer *evtim = NULL;
1677 struct rte_event evs[BATCH_SIZE];
1678 const struct rte_event_timer init_tim = {
1679 .ev.op = RTE_EVENT_OP_NEW,
1680 .ev.queue_id = TEST_QUEUE_ID,
1681 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
1682 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1683 .ev.event_type = RTE_EVENT_TYPE_TIMER,
1684 .state = RTE_EVENT_TIMER_NOT_ARMED,
1685 };
1686 uint64_t ticks = 30;
1687
1688 rte_mempool_get(eventdev_test_mempool, (void **)&evtim);
1689 if (evtim == NULL) {
1690 /* Failed to get an event timer object */
1691 return TEST_FAILED;
1692 }
1693
1694 /* Check that cancelling an uninited timer fails */
1695 ret = rte_event_timer_cancel_burst(adapter, &evtim, 1);
1696 TEST_ASSERT_EQUAL(ret, 0, "Succeeded unexpectedly in canceling "
1697 "uninited timer");
1698 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after "
1699 "cancelling uninited timer");
1700
1701 /* Set up a timer */
1702 *evtim = init_tim;
1703 evtim->ev.event_ptr = evtim;
1704 evtim->timeout_ticks = CALC_TICKS(ticks); /**< expire in 3 sec */
1705
1706 /* Check that cancelling an inited but unarmed timer fails */
1707 ret = rte_event_timer_cancel_burst(adapter, &evtim, 1);
1708 TEST_ASSERT_EQUAL(ret, 0, "Succeeded unexpectedly in canceling "
1709 "unarmed timer");
1710 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after "
1711 "cancelling unarmed timer");
1712
1713 ret = rte_event_timer_arm_burst(adapter, &evtim, 1);
1714 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n",
1715 rte_strerror(rte_errno));
1716 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ARMED,
1717 "evtim in incorrect state");
1718
1719 /* Delay 1 sec */
1720 rte_delay_ms(1000);
1721
1722 ret = rte_event_timer_cancel_burst(adapter, &evtim, 1);
1723 TEST_ASSERT_EQUAL(ret, 1, "Failed to cancel event_timer: %s\n",
1724 rte_strerror(rte_errno));
1725 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_CANCELED,
1726 "evtim in incorrect state");
1727
1728 /* Make sure that no expiry event was generated */
1729 n = timeout_event_dequeue(evs, RTE_DIM(evs), WAIT_TICKS(ticks));
1730 TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected timer expiry event\n");
1731
1732 rte_mempool_put(eventdev_test_mempool, evtim);
1733
1734 return TEST_SUCCESS;
1735 }
1736
1737 static int
event_timer_cancel_double(void)1738 event_timer_cancel_double(void)
1739 {
1740 uint16_t n;
1741 int ret;
1742 struct rte_event_timer_adapter *adapter = timdev;
1743 struct rte_event_timer *evtim = NULL;
1744 struct rte_event evs[BATCH_SIZE];
1745 const struct rte_event_timer init_tim = {
1746 .ev.op = RTE_EVENT_OP_NEW,
1747 .ev.queue_id = TEST_QUEUE_ID,
1748 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
1749 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1750 .ev.event_type = RTE_EVENT_TYPE_TIMER,
1751 .state = RTE_EVENT_TIMER_NOT_ARMED,
1752 };
1753 uint64_t ticks = 30;
1754
1755 rte_mempool_get(eventdev_test_mempool, (void **)&evtim);
1756 if (evtim == NULL) {
1757 /* Failed to get an event timer object */
1758 return TEST_FAILED;
1759 }
1760
1761 /* Set up a timer */
1762 *evtim = init_tim;
1763 evtim->ev.event_ptr = evtim;
1764 evtim->timeout_ticks = CALC_TICKS(ticks); /**< expire in 3 sec */
1765
1766 ret = rte_event_timer_arm_burst(adapter, &evtim, 1);
1767 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n",
1768 rte_strerror(rte_errno));
1769 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ARMED,
1770 "timer in unexpected state");
1771
1772 /* Now, test that referencing the same timer twice in the same call
1773 * fails
1774 */
1775 struct rte_event_timer *evtim_arr[] = {evtim, evtim};
1776 ret = rte_event_timer_cancel_burst(adapter, evtim_arr,
1777 RTE_DIM(evtim_arr));
1778
1779 /* Two requests to cancel same timer, only one should succeed */
1780 TEST_ASSERT_EQUAL(ret, 1, "Succeeded unexpectedly in canceling timer "
1781 "twice");
1782
1783 TEST_ASSERT_EQUAL(rte_errno, EALREADY, "Unexpected rte_errno value "
1784 "after double-cancel: rte_errno = %d", rte_errno);
1785
1786 /* Still make sure that no expiry event was generated */
1787 n = timeout_event_dequeue(evs, RTE_DIM(evs), WAIT_TICKS(ticks));
1788 TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected timer expiry event\n");
1789
1790 rte_mempool_put(eventdev_test_mempool, evtim);
1791
1792 return TEST_SUCCESS;
1793 }
1794
1795 /* Check that event timer adapter tick resolution works as expected by testing
1796 * the number of adapter ticks that occur within a particular time interval.
1797 */
1798 static int
adapter_tick_resolution(void)1799 adapter_tick_resolution(void)
1800 {
1801 struct rte_event_timer_adapter_stats stats;
1802 uint64_t adapter_tick_count;
1803
1804 /* Only run this test in the software driver case */
1805 if (!using_services)
1806 return -ENOTSUP;
1807
1808 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stats_reset(timdev),
1809 "Failed to reset stats");
1810
1811 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stats_get(timdev,
1812 &stats), "Failed to get adapter stats");
1813 TEST_ASSERT_EQUAL(stats.adapter_tick_count, 0, "Adapter tick count "
1814 "not zeroed out");
1815
1816 /* Delay 1 second; should let at least 10 ticks occur with the default
1817 * adapter configuration used by this test.
1818 */
1819 rte_delay_ms(1000);
1820
1821 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stats_get(timdev,
1822 &stats), "Failed to get adapter stats");
1823
1824 adapter_tick_count = stats.adapter_tick_count;
1825 TEST_ASSERT(adapter_tick_count >= 10 && adapter_tick_count <= 12,
1826 "Expected 10-12 adapter ticks, got %"PRIu64"\n",
1827 adapter_tick_count);
1828
1829 return TEST_SUCCESS;
1830 }
1831
1832 static int
adapter_create_max(void)1833 adapter_create_max(void)
1834 {
1835 int i;
1836 uint32_t svc_start_count, svc_end_count;
1837 struct rte_event_timer_adapter *adapters[
1838 RTE_EVENT_TIMER_ADAPTER_NUM_MAX + 1];
1839
1840 struct rte_event_timer_adapter_conf conf = {
1841 .event_dev_id = evdev,
1842 // timer_adapter_id set in loop
1843 .clk_src = RTE_EVENT_TIMER_ADAPTER_CPU_CLK,
1844 .timer_tick_ns = NSECPERSEC / 10,
1845 .max_tmo_ns = 180 * NSECPERSEC,
1846 .nb_timers = MAX_TIMERS,
1847 .flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES,
1848 };
1849
1850 if (!using_services)
1851 return -ENOTSUP;
1852
1853 svc_start_count = rte_service_get_count();
1854
1855 /* This test expects that there are sufficient service IDs available
1856 * to be allocated. I.e., RTE_EVENT_TIMER_ADAPTER_NUM_MAX may need to
1857 * be less than RTE_SERVICE_NUM_MAX if anything else uses a service
1858 * (the SW event device, for example).
1859 */
1860 for (i = 0; i < RTE_EVENT_TIMER_ADAPTER_NUM_MAX; i++) {
1861 conf.timer_adapter_id = i;
1862 adapters[i] = rte_event_timer_adapter_create_ext(&conf,
1863 test_port_conf_cb, NULL);
1864 TEST_ASSERT_NOT_NULL(adapters[i], "Failed to create adapter "
1865 "%d", i);
1866 }
1867
1868 conf.timer_adapter_id = i;
1869 adapters[i] = rte_event_timer_adapter_create(&conf);
1870 TEST_ASSERT_NULL(adapters[i], "Created too many adapters");
1871
1872 /* Check that at least RTE_EVENT_TIMER_ADAPTER_NUM_MAX services
1873 * have been created
1874 */
1875 svc_end_count = rte_service_get_count();
1876 TEST_ASSERT_EQUAL(svc_end_count - svc_start_count,
1877 RTE_EVENT_TIMER_ADAPTER_NUM_MAX,
1878 "Failed to create expected number of services");
1879
1880 for (i = 0; i < RTE_EVENT_TIMER_ADAPTER_NUM_MAX; i++)
1881 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_free(adapters[i]),
1882 "Failed to free adapter %d", i);
1883
1884 /* Check that service count is back to where it was at start */
1885 svc_end_count = rte_service_get_count();
1886 TEST_ASSERT_EQUAL(svc_start_count, svc_end_count, "Failed to release "
1887 "correct number of services");
1888
1889 return TEST_SUCCESS;
1890 }
1891
1892 static inline int
test_timer_ticks_remaining(void)1893 test_timer_ticks_remaining(void)
1894 {
1895 uint64_t ticks_remaining = UINT64_MAX;
1896 struct rte_event_timer *ev_tim;
1897 struct rte_event ev;
1898 int ret, i;
1899 const struct rte_event_timer tim = {
1900 .ev.op = RTE_EVENT_OP_NEW,
1901 .ev.queue_id = 0,
1902 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
1903 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1904 .ev.event_type = RTE_EVENT_TYPE_TIMER,
1905 .state = RTE_EVENT_TIMER_NOT_ARMED,
1906 };
1907
1908 rte_mempool_get(eventdev_test_mempool, (void **)&ev_tim);
1909 *ev_tim = tim;
1910 ev_tim->ev.event_ptr = ev_tim;
1911 #define TEST_TICKS 5
1912 ev_tim->timeout_ticks = CALC_TICKS(TEST_TICKS);
1913
1914 ret = rte_event_timer_remaining_ticks_get(timdev, ev_tim,
1915 &ticks_remaining);
1916 if (ret == -ENOTSUP) {
1917 rte_mempool_put(eventdev_test_mempool, (void *)ev_tim);
1918 printf("API not supported, skipping test\n");
1919 return TEST_SKIPPED;
1920 }
1921
1922 /* Test that unarmed timer returns error */
1923 TEST_ASSERT_FAIL(ret,
1924 "Didn't fail to get ticks for unarmed event timer");
1925
1926 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 1), 1,
1927 "Failed to arm timer with proper timeout.");
1928 TEST_ASSERT_EQUAL(ev_tim->state, RTE_EVENT_TIMER_ARMED,
1929 "Improper timer state set expected %d returned %d",
1930 RTE_EVENT_TIMER_ARMED, ev_tim->state);
1931
1932 for (i = 0; i < TEST_TICKS; i++) {
1933 ret = rte_event_timer_remaining_ticks_get(timdev, ev_tim,
1934 &ticks_remaining);
1935 if (ret < 0)
1936 return TEST_FAILED;
1937
1938 TEST_ASSERT_EQUAL((int)ticks_remaining, TEST_TICKS - i,
1939 "Expected %d ticks remaining, got %"PRIu64"",
1940 TEST_TICKS - i, ticks_remaining);
1941
1942 rte_delay_ms(100);
1943 }
1944
1945 TEST_ASSERT_EQUAL(timeout_event_dequeue(&ev, 1, WAIT_TICKS(1)), 1,
1946 "Armed timer failed to trigger.");
1947
1948 if (ev_tim->state != RTE_EVENT_TIMER_NOT_ARMED)
1949 ev_tim->state = RTE_EVENT_TIMER_NOT_ARMED;
1950
1951 /* Test that timer that fired returns error */
1952 TEST_ASSERT_FAIL(rte_event_timer_remaining_ticks_get(timdev, ev_tim,
1953 &ticks_remaining),
1954 "Didn't fail to get ticks for unarmed event timer");
1955
1956 rte_mempool_put(eventdev_test_mempool, (void *)ev_tim);
1957
1958 #undef TEST_TICKS
1959 return TEST_SUCCESS;
1960 }
1961
1962
1963 static struct unit_test_suite event_timer_adptr_functional_testsuite = {
1964 .suite_name = "event timer functional test suite",
1965 .setup = testsuite_setup,
1966 .teardown = testsuite_teardown,
1967 .unit_test_cases = {
1968 TEST_CASE_ST(timdev_setup_usec, timdev_teardown,
1969 test_timer_state),
1970 TEST_CASE_ST(timdev_setup_usec, timdev_teardown,
1971 test_timer_arm),
1972 TEST_CASE_ST(timdev_setup_msec_periodic, timdev_teardown,
1973 test_timer_arm_periodic),
1974 TEST_CASE_ST(timdev_setup_usec, timdev_teardown,
1975 test_timer_arm_burst),
1976 TEST_CASE_ST(timdev_setup_msec_periodic, timdev_teardown,
1977 test_timer_arm_burst_periodic),
1978 TEST_CASE_ST(timdev_setup_sec, timdev_teardown,
1979 test_timer_cancel),
1980 TEST_CASE_ST(timdev_setup_sec_periodic, timdev_teardown,
1981 test_timer_cancel_periodic),
1982 TEST_CASE_ST(timdev_setup_sec, timdev_teardown,
1983 test_timer_cancel_random),
1984 TEST_CASE_ST(timdev_setup_usec_multicore, timdev_teardown,
1985 test_timer_arm_multicore),
1986 TEST_CASE_ST(timdev_setup_usec_multicore, timdev_teardown,
1987 test_timer_arm_burst_multicore),
1988 TEST_CASE_ST(timdev_setup_sec_multicore, timdev_teardown,
1989 test_timer_cancel_multicore),
1990 TEST_CASE_ST(timdev_setup_sec_multicore, timdev_teardown,
1991 test_timer_cancel_burst_multicore),
1992 TEST_CASE(adapter_create),
1993 TEST_CASE_ST(timdev_setup_msec, NULL, adapter_free),
1994 TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
1995 adapter_get_info),
1996 TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
1997 adapter_lookup),
1998 TEST_CASE_ST(NULL, timdev_teardown,
1999 adapter_start),
2000 TEST_CASE_ST(timdev_setup_msec, NULL,
2001 adapter_stop),
2002 TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
2003 stat_inc_reset_ev_enq),
2004 TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
2005 event_timer_arm),
2006 TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
2007 event_timer_arm_double),
2008 TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
2009 event_timer_arm_expiry),
2010 TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
2011 event_timer_arm_rearm),
2012 TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
2013 event_timer_arm_max),
2014 TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
2015 event_timer_arm_invalid_sched_type),
2016 TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
2017 event_timer_arm_invalid_timeout),
2018 TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
2019 event_timer_cancel),
2020 TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
2021 event_timer_cancel_double),
2022 TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
2023 adapter_tick_resolution),
2024 TEST_CASE(adapter_create_max),
2025 TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
2026 test_timer_ticks_remaining),
2027 TEST_CASES_END() /**< NULL terminate unit test array */
2028 }
2029 };
2030
2031 static int
test_event_timer_adapter_func(void)2032 test_event_timer_adapter_func(void)
2033 {
2034 return unit_test_suite_runner(&event_timer_adptr_functional_testsuite);
2035 }
2036
2037 #endif /* !RTE_EXEC_ENV_WINDOWS */
2038
2039 REGISTER_TEST_COMMAND(event_timer_adapter_test, test_event_timer_adapter_func);
2040