1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
3 */
4
5 #include "test.h"
6
7 /*
8 * Timer
9 * =====
10 *
11 * #. Stress test 1.
12 *
13 * The objective of the timer stress tests is to check that there are no
14 * race conditions in list and status management. This test launches,
15 * resets and stops the timer very often on many cores at the same
16 * time.
17 *
18 * - Only one timer is used for this test.
19 * - On each core, the rte_timer_manage() function is called from the main
20 * loop every 3 microseconds.
21 * - In the main loop, the timer may be reset (randomly, with a
22 * probability of 0.5 %) 100 microseconds later on a random core, or
23 * stopped (with a probability of 0.5 % also).
24 * - In callback, the timer is can be reset (randomly, with a
25 * probability of 0.5 %) 100 microseconds later on the same core or
26 * on another core (same probability), or stopped (same
27 * probability).
28 *
29 * # Stress test 2.
30 *
31 * The objective of this test is similar to the first in that it attempts
32 * to find if there are any race conditions in the timer library. However,
33 * it is less complex in terms of operations performed and duration, as it
34 * is designed to have a predictable outcome that can be tested.
35 *
36 * - A set of timers is initialized for use by the test
37 * - All cores then simultaneously are set to schedule all the timers at
38 * the same time, so conflicts should occur.
39 * - Then there is a delay while we wait for the timers to expire
40 * - Then the main lcore calls timer_manage() and we check that all
41 * timers have had their callbacks called exactly once - no more no less.
42 * - Then we repeat the process, except after setting up the timers, we have
43 * all cores randomly reschedule them.
44 * - Again we check that the expected number of callbacks has occurred when
45 * we call timer-manage.
46 *
47 * #. Basic test.
48 *
49 * This test performs basic functional checks of the timers. The test
50 * uses four different timers that are loaded and stopped under
51 * specific conditions in specific contexts.
52 *
53 * - Four timers are used for this test.
54 * - On each core, the rte_timer_manage() function is called from main loop
55 * every 3 microseconds.
56 *
57 * The autotest python script checks that the behavior is correct:
58 *
59 * - timer0
60 *
61 * - At initialization, timer0 is loaded by the main core, on main core
62 * in "single" mode (time = 1 second).
63 * - In the first 19 callbacks, timer0 is reloaded on the same core,
64 * then, it is explicitly stopped at the 20th call.
65 * - At t=25s, timer0 is reloaded once by timer2.
66 *
67 * - timer1
68 *
69 * - At initialization, timer1 is loaded by the main core, on the
70 * main core in "single" mode (time = 2 seconds).
71 * - In the first 9 callbacks, timer1 is reloaded on another
72 * core. After the 10th callback, timer1 is not reloaded anymore.
73 *
74 * - timer2
75 *
76 * - At initialization, timer2 is loaded by the main core, on the
77 * main core in "periodical" mode (time = 1 second).
78 * - In the callback, when t=25s, it stops timer3 and reloads timer0
79 * on the current core.
80 *
81 * - timer3
82 *
83 * - At initialization, timer3 is loaded by the main core, on
84 * another core in "periodical" mode (time = 1 second).
85 * - It is stopped at t=25s by timer2.
86 */
87
88 #include <stdio.h>
89 #include <stdarg.h>
90 #include <string.h>
91 #include <stdlib.h>
92 #include <stdint.h>
93 #include <inttypes.h>
94 #include <sys/queue.h>
95 #include <math.h>
96
97 #include <rte_common.h>
98 #include <rte_log.h>
99 #include <rte_memory.h>
100 #include <rte_launch.h>
101 #include <rte_cycles.h>
102 #include <rte_eal.h>
103 #include <rte_per_lcore.h>
104 #include <rte_lcore.h>
105 #include <rte_atomic.h>
106 #include <rte_timer.h>
107 #include <rte_random.h>
108 #include <rte_malloc.h>
109 #include <rte_pause.h>
110
111 #define TEST_DURATION_S 1 /* in seconds */
112 #define NB_TIMER 4
113
114 #define RTE_LOGTYPE_TESTTIMER RTE_LOGTYPE_USER3
115
116 static volatile uint64_t end_time;
117 static volatile int test_failed;
118
119 struct mytimerinfo {
120 struct rte_timer tim;
121 unsigned id;
122 unsigned count;
123 };
124
125 static struct mytimerinfo mytiminfo[NB_TIMER];
126
127 static void timer_basic_cb(struct rte_timer *tim, void *arg);
128
129 static void
mytimer_reset(struct mytimerinfo * timinfo,uint64_t ticks,enum rte_timer_type type,unsigned tim_lcore,rte_timer_cb_t fct)130 mytimer_reset(struct mytimerinfo *timinfo, uint64_t ticks,
131 enum rte_timer_type type, unsigned tim_lcore,
132 rte_timer_cb_t fct)
133 {
134 rte_timer_reset_sync(&timinfo->tim, ticks, type, tim_lcore,
135 fct, timinfo);
136 }
137
138 /* timer callback for stress tests */
139 static void
timer_stress_cb(__rte_unused struct rte_timer * tim,__rte_unused void * arg)140 timer_stress_cb(__rte_unused struct rte_timer *tim,
141 __rte_unused void *arg)
142 {
143 long r;
144 unsigned lcore_id = rte_lcore_id();
145 uint64_t hz = rte_get_timer_hz();
146
147 if (rte_timer_pending(tim))
148 return;
149
150 r = rte_rand();
151 if ((r & 0xff) == 0) {
152 mytimer_reset(&mytiminfo[0], hz, SINGLE, lcore_id,
153 timer_stress_cb);
154 }
155 else if ((r & 0xff) == 1) {
156 mytimer_reset(&mytiminfo[0], hz, SINGLE,
157 rte_get_next_lcore(lcore_id, 0, 1),
158 timer_stress_cb);
159 }
160 else if ((r & 0xff) == 2) {
161 rte_timer_stop(&mytiminfo[0].tim);
162 }
163 }
164
165 static int
timer_stress_main_loop(__rte_unused void * arg)166 timer_stress_main_loop(__rte_unused void *arg)
167 {
168 uint64_t hz = rte_get_timer_hz();
169 unsigned lcore_id = rte_lcore_id();
170 uint64_t cur_time;
171 int64_t diff = 0;
172 long r;
173
174 while (diff >= 0) {
175
176 /* call the timer handler on each core */
177 rte_timer_manage();
178
179 /* simulate the processing of a packet
180 * (1 us = 2000 cycles at 2 Ghz) */
181 rte_delay_us(1);
182
183 /* randomly stop or reset timer */
184 r = rte_rand();
185 lcore_id = rte_get_next_lcore(lcore_id, 0, 1);
186 if ((r & 0xff) == 0) {
187 /* 100 us */
188 mytimer_reset(&mytiminfo[0], hz/10000, SINGLE, lcore_id,
189 timer_stress_cb);
190 }
191 else if ((r & 0xff) == 1) {
192 rte_timer_stop_sync(&mytiminfo[0].tim);
193 }
194 cur_time = rte_get_timer_cycles();
195 diff = end_time - cur_time;
196 }
197
198 lcore_id = rte_lcore_id();
199 RTE_LOG(INFO, TESTTIMER, "core %u finished\n", lcore_id);
200
201 return 0;
202 }
203
204 /* Need to synchronize worker lcores through multiple steps. */
205 enum { WORKER_WAITING = 1, WORKER_RUN_SIGNAL, WORKER_RUNNING, WORKER_FINISHED };
206 static rte_atomic16_t lcore_state[RTE_MAX_LCORE];
207
208 static void
main_init_workers(void)209 main_init_workers(void)
210 {
211 unsigned i;
212
213 RTE_LCORE_FOREACH_WORKER(i) {
214 rte_atomic16_set(&lcore_state[i], WORKER_WAITING);
215 }
216 }
217
218 static void
main_start_workers(void)219 main_start_workers(void)
220 {
221 unsigned i;
222
223 RTE_LCORE_FOREACH_WORKER(i) {
224 rte_atomic16_set(&lcore_state[i], WORKER_RUN_SIGNAL);
225 }
226 RTE_LCORE_FOREACH_WORKER(i) {
227 while (rte_atomic16_read(&lcore_state[i]) != WORKER_RUNNING)
228 rte_pause();
229 }
230 }
231
232 static void
main_wait_for_workers(void)233 main_wait_for_workers(void)
234 {
235 unsigned i;
236
237 RTE_LCORE_FOREACH_WORKER(i) {
238 while (rte_atomic16_read(&lcore_state[i]) != WORKER_FINISHED)
239 rte_pause();
240 }
241 }
242
243 static void
worker_wait_to_start(void)244 worker_wait_to_start(void)
245 {
246 unsigned lcore_id = rte_lcore_id();
247
248 while (rte_atomic16_read(&lcore_state[lcore_id]) != WORKER_RUN_SIGNAL)
249 rte_pause();
250 rte_atomic16_set(&lcore_state[lcore_id], WORKER_RUNNING);
251 }
252
253 static void
worker_finish(void)254 worker_finish(void)
255 {
256 unsigned lcore_id = rte_lcore_id();
257
258 rte_atomic16_set(&lcore_state[lcore_id], WORKER_FINISHED);
259 }
260
261
262 static volatile int cb_count = 0;
263
264 /* callback for second stress test. will only be called
265 * on main lcore
266 */
267 static void
timer_stress2_cb(struct rte_timer * tim __rte_unused,void * arg __rte_unused)268 timer_stress2_cb(struct rte_timer *tim __rte_unused, void *arg __rte_unused)
269 {
270 cb_count++;
271 }
272
273 #define NB_STRESS2_TIMERS 8192
274
275 static int
timer_stress2_main_loop(__rte_unused void * arg)276 timer_stress2_main_loop(__rte_unused void *arg)
277 {
278 static struct rte_timer *timers;
279 int i, ret;
280 uint64_t delay = rte_get_timer_hz() / 20;
281 unsigned int lcore_id = rte_lcore_id();
282 unsigned int main_lcore = rte_get_main_lcore();
283 int32_t my_collisions = 0;
284 static rte_atomic32_t collisions;
285
286 if (lcore_id == main_lcore) {
287 cb_count = 0;
288 test_failed = 0;
289 rte_atomic32_set(&collisions, 0);
290 main_init_workers();
291 timers = rte_malloc(NULL, sizeof(*timers) * NB_STRESS2_TIMERS, 0);
292 if (timers == NULL) {
293 printf("Test Failed\n");
294 printf("- Cannot allocate memory for timers\n" );
295 test_failed = 1;
296 main_start_workers();
297 goto cleanup;
298 }
299 for (i = 0; i < NB_STRESS2_TIMERS; i++)
300 rte_timer_init(&timers[i]);
301 main_start_workers();
302 } else {
303 worker_wait_to_start();
304 if (test_failed)
305 goto cleanup;
306 }
307
308 /* have all cores schedule all timers on main lcore */
309 for (i = 0; i < NB_STRESS2_TIMERS; i++) {
310 ret = rte_timer_reset(&timers[i], delay, SINGLE, main_lcore,
311 timer_stress2_cb, NULL);
312 /* there will be collisions when multiple cores simultaneously
313 * configure the same timers */
314 if (ret != 0)
315 my_collisions++;
316 }
317 if (my_collisions != 0)
318 rte_atomic32_add(&collisions, my_collisions);
319
320 /* wait long enough for timers to expire */
321 rte_delay_ms(100);
322
323 /* all cores rendezvous */
324 if (lcore_id == main_lcore) {
325 main_wait_for_workers();
326 } else {
327 worker_finish();
328 }
329
330 /* now check that we get the right number of callbacks */
331 if (lcore_id == main_lcore) {
332 my_collisions = rte_atomic32_read(&collisions);
333 if (my_collisions != 0)
334 printf("- %d timer reset collisions (OK)\n", my_collisions);
335 rte_timer_manage();
336 if (cb_count != NB_STRESS2_TIMERS) {
337 printf("Test Failed\n");
338 printf("- Stress test 2, part 1 failed\n");
339 printf("- Expected %d callbacks, got %d\n", NB_STRESS2_TIMERS,
340 cb_count);
341 test_failed = 1;
342 main_start_workers();
343 goto cleanup;
344 }
345 cb_count = 0;
346
347 /* proceed */
348 main_start_workers();
349 } else {
350 /* proceed */
351 worker_wait_to_start();
352 if (test_failed)
353 goto cleanup;
354 }
355
356 /* now test again, just stop and restart timers at random after init*/
357 for (i = 0; i < NB_STRESS2_TIMERS; i++)
358 rte_timer_reset(&timers[i], delay, SINGLE, main_lcore,
359 timer_stress2_cb, NULL);
360
361 /* pick random timer to reset, stopping them first half the time */
362 for (i = 0; i < 100000; i++) {
363 int r = rand() % NB_STRESS2_TIMERS;
364 if (i % 2)
365 rte_timer_stop(&timers[r]);
366 rte_timer_reset(&timers[r], delay, SINGLE, main_lcore,
367 timer_stress2_cb, NULL);
368 }
369
370 /* wait long enough for timers to expire */
371 rte_delay_ms(100);
372
373 /* now check that we get the right number of callbacks */
374 if (lcore_id == main_lcore) {
375 main_wait_for_workers();
376
377 rte_timer_manage();
378 if (cb_count != NB_STRESS2_TIMERS) {
379 printf("Test Failed\n");
380 printf("- Stress test 2, part 2 failed\n");
381 printf("- Expected %d callbacks, got %d\n", NB_STRESS2_TIMERS,
382 cb_count);
383 test_failed = 1;
384 } else {
385 printf("Test OK\n");
386 }
387 }
388
389 cleanup:
390 if (lcore_id == main_lcore) {
391 main_wait_for_workers();
392 if (timers != NULL) {
393 rte_free(timers);
394 timers = NULL;
395 }
396 } else {
397 worker_finish();
398 }
399
400 return 0;
401 }
402
403 /* timer callback for basic tests */
404 static void
timer_basic_cb(struct rte_timer * tim,void * arg)405 timer_basic_cb(struct rte_timer *tim, void *arg)
406 {
407 struct mytimerinfo *timinfo = arg;
408 uint64_t hz = rte_get_timer_hz();
409 unsigned lcore_id = rte_lcore_id();
410 uint64_t cur_time = rte_get_timer_cycles();
411
412 if (rte_timer_pending(tim))
413 return;
414
415 timinfo->count ++;
416
417 RTE_LOG(INFO, TESTTIMER,
418 "%"PRIu64": callback id=%u count=%u on core %u\n",
419 cur_time, timinfo->id, timinfo->count, lcore_id);
420
421 /* reload timer 0 on same core */
422 if (timinfo->id == 0 && timinfo->count < 20) {
423 mytimer_reset(timinfo, hz, SINGLE, lcore_id, timer_basic_cb);
424 return;
425 }
426
427 /* reload timer 1 on next core */
428 if (timinfo->id == 1 && timinfo->count < 10) {
429 mytimer_reset(timinfo, hz*2, SINGLE,
430 rte_get_next_lcore(lcore_id, 0, 1),
431 timer_basic_cb);
432 return;
433 }
434
435 /* Explicitelly stop timer 0. Once stop() called, we can even
436 * erase the content of the structure: it is not referenced
437 * anymore by any code (in case of dynamic structure, it can
438 * be freed) */
439 if (timinfo->id == 0 && timinfo->count == 20) {
440
441 /* stop_sync() is not needed, because we know that the
442 * status of timer is only modified by this core */
443 rte_timer_stop(tim);
444 memset(tim, 0xAA, sizeof(struct rte_timer));
445 return;
446 }
447
448 /* stop timer3, and restart a new timer0 (it was removed 5
449 * seconds ago) for a single shot */
450 if (timinfo->id == 2 && timinfo->count == 25) {
451 rte_timer_stop_sync(&mytiminfo[3].tim);
452
453 /* need to reinit because structure was erased with 0xAA */
454 rte_timer_init(&mytiminfo[0].tim);
455 mytimer_reset(&mytiminfo[0], hz, SINGLE, lcore_id,
456 timer_basic_cb);
457 }
458 }
459
460 static int
timer_basic_main_loop(__rte_unused void * arg)461 timer_basic_main_loop(__rte_unused void *arg)
462 {
463 uint64_t hz = rte_get_timer_hz();
464 unsigned lcore_id = rte_lcore_id();
465 uint64_t cur_time;
466 int64_t diff = 0;
467
468 /* launch all timers on core 0 */
469 if (lcore_id == rte_get_main_lcore()) {
470 mytimer_reset(&mytiminfo[0], hz/4, SINGLE, lcore_id,
471 timer_basic_cb);
472 mytimer_reset(&mytiminfo[1], hz/2, SINGLE, lcore_id,
473 timer_basic_cb);
474 mytimer_reset(&mytiminfo[2], hz/4, PERIODICAL, lcore_id,
475 timer_basic_cb);
476 mytimer_reset(&mytiminfo[3], hz/4, PERIODICAL,
477 rte_get_next_lcore(lcore_id, 0, 1),
478 timer_basic_cb);
479 }
480
481 while (diff >= 0) {
482
483 /* call the timer handler on each core */
484 rte_timer_manage();
485
486 /* simulate the processing of a packet
487 * (3 us = 6000 cycles at 2 Ghz) */
488 rte_delay_us(3);
489
490 cur_time = rte_get_timer_cycles();
491 diff = end_time - cur_time;
492 }
493 RTE_LOG(INFO, TESTTIMER, "core %u finished\n", lcore_id);
494
495 return 0;
496 }
497
498 static int
timer_sanity_check(void)499 timer_sanity_check(void)
500 {
501 #ifdef RTE_LIBEAL_USE_HPET
502 if (eal_timer_source != EAL_TIMER_HPET) {
503 printf("Not using HPET, can't sanity check timer sources\n");
504 return 0;
505 }
506
507 const uint64_t t_hz = rte_get_tsc_hz();
508 const uint64_t h_hz = rte_get_hpet_hz();
509 printf("Hertz values: TSC = %"PRIu64", HPET = %"PRIu64"\n", t_hz, h_hz);
510
511 const uint64_t tsc_start = rte_get_tsc_cycles();
512 const uint64_t hpet_start = rte_get_hpet_cycles();
513 rte_delay_ms(100); /* delay 1/10 second */
514 const uint64_t tsc_end = rte_get_tsc_cycles();
515 const uint64_t hpet_end = rte_get_hpet_cycles();
516 printf("Measured cycles: TSC = %"PRIu64", HPET = %"PRIu64"\n",
517 tsc_end-tsc_start, hpet_end-hpet_start);
518
519 const double tsc_time = (double)(tsc_end - tsc_start)/t_hz;
520 const double hpet_time = (double)(hpet_end - hpet_start)/h_hz;
521 /* get the percentage that the times differ by */
522 const double time_diff = fabs(tsc_time - hpet_time)*100/tsc_time;
523 printf("Measured time: TSC = %.4f, HPET = %.4f\n", tsc_time, hpet_time);
524
525 printf("Elapsed time measured by TSC and HPET differ by %f%%\n",
526 time_diff);
527 if (time_diff > 0.1) {
528 printf("Error times differ by >0.1%%");
529 return -1;
530 }
531 #endif
532 return 0;
533 }
534
535 static int
test_timer(void)536 test_timer(void)
537 {
538 unsigned i;
539 uint64_t cur_time;
540 uint64_t hz;
541
542 if (rte_lcore_count() < 2) {
543 printf("Not enough cores for timer_autotest, expecting at least 2\n");
544 return TEST_SKIPPED;
545 }
546
547 /* sanity check our timer sources and timer config values */
548 if (timer_sanity_check() < 0) {
549 printf("Timer sanity checks failed\n");
550 return TEST_FAILED;
551 }
552
553 /* init timer */
554 for (i=0; i<NB_TIMER; i++) {
555 memset(&mytiminfo[i], 0, sizeof(struct mytimerinfo));
556 mytiminfo[i].id = i;
557 rte_timer_init(&mytiminfo[i].tim);
558 }
559
560 /* calculate the "end of test" time */
561 cur_time = rte_get_timer_cycles();
562 hz = rte_get_timer_hz();
563 end_time = cur_time + (hz * TEST_DURATION_S);
564
565 /* start other cores */
566 printf("Start timer stress tests\n");
567 rte_eal_mp_remote_launch(timer_stress_main_loop, NULL, CALL_MAIN);
568 rte_eal_mp_wait_lcore();
569
570 /* stop timer 0 used for stress test */
571 rte_timer_stop_sync(&mytiminfo[0].tim);
572
573 /* run a second, slightly different set of stress tests */
574 printf("\nStart timer stress tests 2\n");
575 test_failed = 0;
576 rte_eal_mp_remote_launch(timer_stress2_main_loop, NULL, CALL_MAIN);
577 rte_eal_mp_wait_lcore();
578 if (test_failed)
579 return TEST_FAILED;
580
581 /* calculate the "end of test" time */
582 cur_time = rte_get_timer_cycles();
583 hz = rte_get_timer_hz();
584 end_time = cur_time + (hz * TEST_DURATION_S);
585
586 /* start other cores */
587 printf("\nStart timer basic tests\n");
588 rte_eal_mp_remote_launch(timer_basic_main_loop, NULL, CALL_MAIN);
589 rte_eal_mp_wait_lcore();
590
591 /* stop all timers */
592 for (i=0; i<NB_TIMER; i++) {
593 rte_timer_stop_sync(&mytiminfo[i].tim);
594 }
595
596 rte_timer_dump_stats(stdout);
597
598 return TEST_SUCCESS;
599 }
600
601 REGISTER_TEST_COMMAND(timer_autotest, test_timer);
602