1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
3 */
4
5 #include "test.h"
6
7 /*
8 * Timer
9 * =====
10 *
11 * #. Stress test 1.
12 *
13 * The objective of the timer stress tests is to check that there are no
14 * race conditions in list and status management. This test launches,
15 * resets and stops the timer very often on many cores at the same
16 * time.
17 *
18 * - Only one timer is used for this test.
19 * - On each core, the rte_timer_manage() function is called from the main
20 * loop every 3 microseconds.
21 * - In the main loop, the timer may be reset (randomly, with a
22 * probability of 0.5 %) 100 microseconds later on a random core, or
23 * stopped (with a probability of 0.5 % also).
24 * - In callback, the timer is can be reset (randomly, with a
25 * probability of 0.5 %) 100 microseconds later on the same core or
26 * on another core (same probability), or stopped (same
27 * probability).
28 *
29 * # Stress test 2.
30 *
31 * The objective of this test is similar to the first in that it attempts
32 * to find if there are any race conditions in the timer library. However,
33 * it is less complex in terms of operations performed and duration, as it
34 * is designed to have a predictable outcome that can be tested.
35 *
36 * - A set of timers is initialized for use by the test
37 * - All cores then simultaneously are set to schedule all the timers at
38 * the same time, so conflicts should occur.
39 * - Then there is a delay while we wait for the timers to expire
40 * - Then the main lcore calls timer_manage() and we check that all
41 * timers have had their callbacks called exactly once - no more no less.
42 * - Then we repeat the process, except after setting up the timers, we have
43 * all cores randomly reschedule them.
44 * - Again we check that the expected number of callbacks has occurred when
45 * we call timer-manage.
46 *
47 * #. Basic test.
48 *
49 * This test performs basic functional checks of the timers. The test
50 * uses four different timers that are loaded and stopped under
51 * specific conditions in specific contexts.
52 *
53 * - Four timers are used for this test.
54 * - On each core, the rte_timer_manage() function is called from main loop
55 * every 3 microseconds.
56 *
57 * The autotest python script checks that the behavior is correct:
58 *
59 * - timer0
60 *
61 * - At initialization, timer0 is loaded by the main core, on main core
62 * in "single" mode (time = 1 second).
63 * - In the first 19 callbacks, timer0 is reloaded on the same core,
64 * then, it is explicitly stopped at the 20th call.
65 * - At t=25s, timer0 is reloaded once by timer2.
66 *
67 * - timer1
68 *
69 * - At initialization, timer1 is loaded by the main core, on the
70 * main core in "single" mode (time = 2 seconds).
71 * - In the first 9 callbacks, timer1 is reloaded on another
72 * core. After the 10th callback, timer1 is not reloaded anymore.
73 *
74 * - timer2
75 *
76 * - At initialization, timer2 is loaded by the main core, on the
77 * main core in "periodical" mode (time = 1 second).
78 * - In the callback, when t=25s, it stops timer3 and reloads timer0
79 * on the current core.
80 *
81 * - timer3
82 *
83 * - At initialization, timer3 is loaded by the main core, on
84 * another core in "periodical" mode (time = 1 second).
85 * - It is stopped at t=25s by timer2.
86 */
87
88 #include <stdio.h>
89 #include <stdarg.h>
90 #include <string.h>
91 #include <stdlib.h>
92 #include <stdint.h>
93 #include <inttypes.h>
94 #include <sys/queue.h>
95 #include <math.h>
96
97 #include <rte_common.h>
98 #include <rte_log.h>
99 #include <rte_memory.h>
100 #include <rte_launch.h>
101 #include <rte_cycles.h>
102 #include <rte_eal.h>
103 #include <rte_per_lcore.h>
104 #include <rte_lcore.h>
105 #include <rte_timer.h>
106 #include <rte_random.h>
107 #include <rte_malloc.h>
108 #include <rte_pause.h>
109
110 #define TEST_DURATION_S 1 /* in seconds */
111 #define NB_TIMER 4
112
113 #define RTE_LOGTYPE_TESTTIMER RTE_LOGTYPE_USER3
114
115 static volatile uint64_t end_time;
116 static volatile int test_failed;
117
118 struct mytimerinfo {
119 struct rte_timer tim;
120 unsigned id;
121 unsigned count;
122 };
123
124 static struct mytimerinfo mytiminfo[NB_TIMER];
125
126 static void timer_basic_cb(struct rte_timer *tim, void *arg);
127
128 static void
mytimer_reset(struct mytimerinfo * timinfo,uint64_t ticks,enum rte_timer_type type,unsigned tim_lcore,rte_timer_cb_t fct)129 mytimer_reset(struct mytimerinfo *timinfo, uint64_t ticks,
130 enum rte_timer_type type, unsigned tim_lcore,
131 rte_timer_cb_t fct)
132 {
133 rte_timer_reset_sync(&timinfo->tim, ticks, type, tim_lcore,
134 fct, timinfo);
135 }
136
137 /* timer callback for stress tests */
138 static void
timer_stress_cb(__rte_unused struct rte_timer * tim,__rte_unused void * arg)139 timer_stress_cb(__rte_unused struct rte_timer *tim,
140 __rte_unused void *arg)
141 {
142 long r;
143 unsigned lcore_id = rte_lcore_id();
144 uint64_t hz = rte_get_timer_hz();
145
146 if (rte_timer_pending(tim))
147 return;
148
149 r = rte_rand();
150 if ((r & 0xff) == 0) {
151 mytimer_reset(&mytiminfo[0], hz, SINGLE, lcore_id,
152 timer_stress_cb);
153 }
154 else if ((r & 0xff) == 1) {
155 mytimer_reset(&mytiminfo[0], hz, SINGLE,
156 rte_get_next_lcore(lcore_id, 0, 1),
157 timer_stress_cb);
158 }
159 else if ((r & 0xff) == 2) {
160 rte_timer_stop(&mytiminfo[0].tim);
161 }
162 }
163
164 static int
timer_stress_main_loop(__rte_unused void * arg)165 timer_stress_main_loop(__rte_unused void *arg)
166 {
167 uint64_t hz = rte_get_timer_hz();
168 unsigned lcore_id = rte_lcore_id();
169 uint64_t cur_time;
170 int64_t diff = 0;
171 long r;
172
173 while (diff >= 0) {
174
175 /* call the timer handler on each core */
176 rte_timer_manage();
177
178 /* simulate the processing of a packet
179 * (1 us = 2000 cycles at 2 Ghz) */
180 rte_delay_us(1);
181
182 /* randomly stop or reset timer */
183 r = rte_rand();
184 lcore_id = rte_get_next_lcore(lcore_id, 0, 1);
185 if ((r & 0xff) == 0) {
186 /* 100 us */
187 mytimer_reset(&mytiminfo[0], hz/10000, SINGLE, lcore_id,
188 timer_stress_cb);
189 }
190 else if ((r & 0xff) == 1) {
191 rte_timer_stop_sync(&mytiminfo[0].tim);
192 }
193 cur_time = rte_get_timer_cycles();
194 diff = end_time - cur_time;
195 }
196
197 lcore_id = rte_lcore_id();
198 RTE_LOG(INFO, TESTTIMER, "core %u finished\n", lcore_id);
199
200 return 0;
201 }
202
203 /* Need to synchronize worker lcores through multiple steps. */
204 enum { WORKER_WAITING = 1, WORKER_RUN_SIGNAL, WORKER_RUNNING, WORKER_FINISHED };
RTE_ATOMIC(uint16_t)205 static RTE_ATOMIC(uint16_t) lcore_state[RTE_MAX_LCORE];
206
207 static void
208 main_init_workers(void)
209 {
210 unsigned i;
211
212 RTE_LCORE_FOREACH_WORKER(i) {
213 rte_atomic_store_explicit(&lcore_state[i], WORKER_WAITING,
214 rte_memory_order_relaxed);
215 }
216 }
217
218 static void
main_start_workers(void)219 main_start_workers(void)
220 {
221 unsigned i;
222
223 RTE_LCORE_FOREACH_WORKER(i) {
224 rte_atomic_store_explicit(&lcore_state[i], WORKER_RUN_SIGNAL,
225 rte_memory_order_release);
226 }
227 RTE_LCORE_FOREACH_WORKER(i) {
228 rte_wait_until_equal_16((uint16_t *)(uintptr_t)&lcore_state[i], WORKER_RUNNING,
229 rte_memory_order_acquire);
230 }
231 }
232
233 static void
main_wait_for_workers(void)234 main_wait_for_workers(void)
235 {
236 unsigned i;
237
238 RTE_LCORE_FOREACH_WORKER(i) {
239 rte_wait_until_equal_16((uint16_t *)(uintptr_t)&lcore_state[i], WORKER_FINISHED,
240 rte_memory_order_acquire);
241 }
242 }
243
244 static void
worker_wait_to_start(void)245 worker_wait_to_start(void)
246 {
247 unsigned lcore_id = rte_lcore_id();
248
249 rte_wait_until_equal_16((uint16_t *)(uintptr_t)&lcore_state[lcore_id], WORKER_RUN_SIGNAL,
250 rte_memory_order_acquire);
251 rte_atomic_store_explicit(&lcore_state[lcore_id], WORKER_RUNNING,
252 rte_memory_order_release);
253 }
254
255 static void
worker_finish(void)256 worker_finish(void)
257 {
258 unsigned lcore_id = rte_lcore_id();
259
260 rte_atomic_store_explicit(&lcore_state[lcore_id], WORKER_FINISHED,
261 rte_memory_order_release);
262 }
263
264
265 static volatile int cb_count = 0;
266
267 /* callback for second stress test. will only be called
268 * on main lcore
269 */
270 static void
timer_stress2_cb(struct rte_timer * tim __rte_unused,void * arg __rte_unused)271 timer_stress2_cb(struct rte_timer *tim __rte_unused, void *arg __rte_unused)
272 {
273 cb_count++;
274 }
275
276 #define NB_STRESS2_TIMERS 8192
277
278 static int
timer_stress2_main_loop(__rte_unused void * arg)279 timer_stress2_main_loop(__rte_unused void *arg)
280 {
281 static struct rte_timer *timers;
282 int i, ret;
283 uint64_t delay = rte_get_timer_hz() / 20;
284 unsigned int lcore_id = rte_lcore_id();
285 unsigned int main_lcore = rte_get_main_lcore();
286 int32_t my_collisions = 0;
287 static RTE_ATOMIC(uint32_t) collisions;
288
289 if (lcore_id == main_lcore) {
290 cb_count = 0;
291 test_failed = 0;
292 rte_atomic_store_explicit(&collisions, 0, rte_memory_order_relaxed);
293 timers = rte_malloc(NULL, sizeof(*timers) * NB_STRESS2_TIMERS, 0);
294 if (timers == NULL) {
295 printf("Test Failed\n");
296 printf("- Cannot allocate memory for timers\n" );
297 test_failed = 1;
298 main_start_workers();
299 goto cleanup;
300 }
301 for (i = 0; i < NB_STRESS2_TIMERS; i++)
302 rte_timer_init(&timers[i]);
303 main_start_workers();
304 } else {
305 worker_wait_to_start();
306 if (test_failed)
307 goto cleanup;
308 }
309
310 /* have all cores schedule all timers on main lcore */
311 for (i = 0; i < NB_STRESS2_TIMERS; i++) {
312 ret = rte_timer_reset(&timers[i], delay, SINGLE, main_lcore,
313 timer_stress2_cb, NULL);
314 /* there will be collisions when multiple cores simultaneously
315 * configure the same timers */
316 if (ret != 0)
317 my_collisions++;
318 }
319 if (my_collisions != 0)
320 rte_atomic_fetch_add_explicit(&collisions, my_collisions, rte_memory_order_relaxed);
321
322 /* wait long enough for timers to expire */
323 rte_delay_ms(100);
324
325 /* all cores rendezvous */
326 if (lcore_id == main_lcore) {
327 main_wait_for_workers();
328 } else {
329 worker_finish();
330 }
331
332 /* now check that we get the right number of callbacks */
333 if (lcore_id == main_lcore) {
334 my_collisions = rte_atomic_load_explicit(&collisions, rte_memory_order_relaxed);
335 if (my_collisions != 0)
336 printf("- %d timer reset collisions (OK)\n", my_collisions);
337 rte_timer_manage();
338 if (cb_count != NB_STRESS2_TIMERS) {
339 printf("Test Failed\n");
340 printf("- Stress test 2, part 1 failed\n");
341 printf("- Expected %d callbacks, got %d\n", NB_STRESS2_TIMERS,
342 cb_count);
343 test_failed = 1;
344 main_start_workers();
345 goto cleanup;
346 }
347 cb_count = 0;
348
349 /* proceed */
350 main_start_workers();
351 } else {
352 /* proceed */
353 worker_wait_to_start();
354 if (test_failed)
355 goto cleanup;
356 }
357
358 /* now test again, just stop and restart timers at random after init*/
359 for (i = 0; i < NB_STRESS2_TIMERS; i++)
360 rte_timer_reset(&timers[i], delay, SINGLE, main_lcore,
361 timer_stress2_cb, NULL);
362
363 /* pick random timer to reset, stopping them first half the time */
364 for (i = 0; i < 100000; i++) {
365 int r = rand() % NB_STRESS2_TIMERS;
366 if (i % 2)
367 rte_timer_stop(&timers[r]);
368 rte_timer_reset(&timers[r], delay, SINGLE, main_lcore,
369 timer_stress2_cb, NULL);
370 }
371
372 /* wait long enough for timers to expire */
373 rte_delay_ms(100);
374
375 /* now check that we get the right number of callbacks */
376 if (lcore_id == main_lcore) {
377 main_wait_for_workers();
378
379 rte_timer_manage();
380 if (cb_count != NB_STRESS2_TIMERS) {
381 printf("Test Failed\n");
382 printf("- Stress test 2, part 2 failed\n");
383 printf("- Expected %d callbacks, got %d\n", NB_STRESS2_TIMERS,
384 cb_count);
385 test_failed = 1;
386 } else {
387 printf("Test OK\n");
388 }
389 }
390
391 cleanup:
392 if (lcore_id == main_lcore) {
393 main_wait_for_workers();
394 if (timers != NULL) {
395 rte_free(timers);
396 timers = NULL;
397 }
398 } else {
399 worker_finish();
400 }
401
402 return 0;
403 }
404
405 /* timer callback for basic tests */
406 static void
timer_basic_cb(struct rte_timer * tim,void * arg)407 timer_basic_cb(struct rte_timer *tim, void *arg)
408 {
409 struct mytimerinfo *timinfo = arg;
410 uint64_t hz = rte_get_timer_hz();
411 unsigned lcore_id = rte_lcore_id();
412 uint64_t cur_time = rte_get_timer_cycles();
413
414 if (rte_timer_pending(tim))
415 return;
416
417 timinfo->count ++;
418
419 RTE_LOG(INFO, TESTTIMER,
420 "%"PRIu64": callback id=%u count=%u on core %u\n",
421 cur_time, timinfo->id, timinfo->count, lcore_id);
422
423 /* reload timer 0 on same core */
424 if (timinfo->id == 0 && timinfo->count < 20) {
425 mytimer_reset(timinfo, hz, SINGLE, lcore_id, timer_basic_cb);
426 return;
427 }
428
429 /* reload timer 1 on next core */
430 if (timinfo->id == 1 && timinfo->count < 10) {
431 mytimer_reset(timinfo, hz*2, SINGLE,
432 rte_get_next_lcore(lcore_id, 0, 1),
433 timer_basic_cb);
434 return;
435 }
436
437 /* Explicitly stop timer 0. Once stop() called, we can even
438 * erase the content of the structure: it is not referenced
439 * anymore by any code (in case of dynamic structure, it can
440 * be freed) */
441 if (timinfo->id == 0 && timinfo->count == 20) {
442
443 /* stop_sync() is not needed, because we know that the
444 * status of timer is only modified by this core */
445 rte_timer_stop(tim);
446 memset(tim, 0xAA, sizeof(struct rte_timer));
447 return;
448 }
449
450 /* stop timer3, and restart a new timer0 (it was removed 5
451 * seconds ago) for a single shot */
452 if (timinfo->id == 2 && timinfo->count == 25) {
453 rte_timer_stop_sync(&mytiminfo[3].tim);
454
455 /* need to reinit because structure was erased with 0xAA */
456 rte_timer_init(&mytiminfo[0].tim);
457 mytimer_reset(&mytiminfo[0], hz, SINGLE, lcore_id,
458 timer_basic_cb);
459 }
460 }
461
462 static int
timer_basic_main_loop(__rte_unused void * arg)463 timer_basic_main_loop(__rte_unused void *arg)
464 {
465 uint64_t hz = rte_get_timer_hz();
466 unsigned lcore_id = rte_lcore_id();
467 uint64_t cur_time;
468 int64_t diff = 0;
469
470 /* launch all timers on core 0 */
471 if (lcore_id == rte_get_main_lcore()) {
472 mytimer_reset(&mytiminfo[0], hz/4, SINGLE, lcore_id,
473 timer_basic_cb);
474 mytimer_reset(&mytiminfo[1], hz/2, SINGLE, lcore_id,
475 timer_basic_cb);
476 mytimer_reset(&mytiminfo[2], hz/4, PERIODICAL, lcore_id,
477 timer_basic_cb);
478 mytimer_reset(&mytiminfo[3], hz/4, PERIODICAL,
479 rte_get_next_lcore(lcore_id, 0, 1),
480 timer_basic_cb);
481 }
482
483 while (diff >= 0) {
484
485 /* call the timer handler on each core */
486 rte_timer_manage();
487
488 /* simulate the processing of a packet
489 * (3 us = 6000 cycles at 2 Ghz) */
490 rte_delay_us(3);
491
492 cur_time = rte_get_timer_cycles();
493 diff = end_time - cur_time;
494 }
495 RTE_LOG(INFO, TESTTIMER, "core %u finished\n", lcore_id);
496
497 return 0;
498 }
499
500 static int
timer_sanity_check(void)501 timer_sanity_check(void)
502 {
503 #ifdef RTE_LIBEAL_USE_HPET
504 if (eal_timer_source != EAL_TIMER_HPET) {
505 printf("Not using HPET, can't sanity check timer sources\n");
506 return 0;
507 }
508
509 const uint64_t t_hz = rte_get_tsc_hz();
510 const uint64_t h_hz = rte_get_hpet_hz();
511 printf("Hertz values: TSC = %"PRIu64", HPET = %"PRIu64"\n", t_hz, h_hz);
512
513 const uint64_t tsc_start = rte_get_tsc_cycles();
514 const uint64_t hpet_start = rte_get_hpet_cycles();
515 rte_delay_ms(100); /* delay 1/10 second */
516 const uint64_t tsc_end = rte_get_tsc_cycles();
517 const uint64_t hpet_end = rte_get_hpet_cycles();
518 printf("Measured cycles: TSC = %"PRIu64", HPET = %"PRIu64"\n",
519 tsc_end-tsc_start, hpet_end-hpet_start);
520
521 const double tsc_time = (double)(tsc_end - tsc_start)/t_hz;
522 const double hpet_time = (double)(hpet_end - hpet_start)/h_hz;
523 /* get the percentage that the times differ by */
524 const double time_diff = fabs(tsc_time - hpet_time)*100/tsc_time;
525 printf("Measured time: TSC = %.4f, HPET = %.4f\n", tsc_time, hpet_time);
526
527 printf("Elapsed time measured by TSC and HPET differ by %f%%\n",
528 time_diff);
529 if (time_diff > 0.1) {
530 printf("Error times differ by >0.1%%");
531 return -1;
532 }
533 #endif
534 return 0;
535 }
536
537 static int
test_timer(void)538 test_timer(void)
539 {
540 unsigned i;
541 uint64_t cur_time;
542 uint64_t hz;
543
544 if (rte_lcore_count() < 2) {
545 printf("Not enough cores for timer_autotest, expecting at least 2\n");
546 return TEST_SKIPPED;
547 }
548
549 /* sanity check our timer sources and timer config values */
550 if (timer_sanity_check() < 0) {
551 printf("Timer sanity checks failed\n");
552 return TEST_FAILED;
553 }
554
555 /* init timer */
556 for (i=0; i<NB_TIMER; i++) {
557 memset(&mytiminfo[i], 0, sizeof(struct mytimerinfo));
558 mytiminfo[i].id = i;
559 rte_timer_init(&mytiminfo[i].tim);
560 }
561
562 /* calculate the "end of test" time */
563 cur_time = rte_get_timer_cycles();
564 hz = rte_get_timer_hz();
565 end_time = cur_time + (hz * TEST_DURATION_S);
566
567 /* start other cores */
568 printf("Start timer stress tests\n");
569 rte_eal_mp_remote_launch(timer_stress_main_loop, NULL, CALL_MAIN);
570 rte_eal_mp_wait_lcore();
571
572 /* stop timer 0 used for stress test */
573 rte_timer_stop_sync(&mytiminfo[0].tim);
574
575 /* run a second, slightly different set of stress tests */
576 printf("\nStart timer stress tests 2\n");
577 test_failed = 0;
578 main_init_workers();
579 rte_eal_mp_remote_launch(timer_stress2_main_loop, NULL, CALL_MAIN);
580 rte_eal_mp_wait_lcore();
581 if (test_failed)
582 return TEST_FAILED;
583
584 /* calculate the "end of test" time */
585 cur_time = rte_get_timer_cycles();
586 hz = rte_get_timer_hz();
587 end_time = cur_time + (hz * TEST_DURATION_S);
588
589 /* start other cores */
590 printf("\nStart timer basic tests\n");
591 rte_eal_mp_remote_launch(timer_basic_main_loop, NULL, CALL_MAIN);
592 rte_eal_mp_wait_lcore();
593
594 /* stop all timers */
595 for (i=0; i<NB_TIMER; i++) {
596 rte_timer_stop_sync(&mytiminfo[i].tim);
597 }
598
599 rte_timer_dump_stats(stdout);
600
601 return TEST_SUCCESS;
602 }
603
604 REGISTER_FAST_TEST(timer_autotest, false, true, test_timer);
605