1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
3 * Copyright(c) 2017-2018 Intel Corporation.
4 */
5
6 #include "test.h"
7
8 #include <math.h>
9
10 #include <rte_common.h>
11 #include <rte_cycles.h>
12 #include <rte_debug.h>
13 #include <rte_eal.h>
14 #include <rte_ethdev.h>
15
16 #ifdef RTE_EXEC_ENV_WINDOWS
17 static int
test_event_timer_adapter_func(void)18 test_event_timer_adapter_func(void)
19 {
20 printf("event_timer_adapter not supported on Windows, skipping test\n");
21 return TEST_SKIPPED;
22 }
23
24 #else
25
26 #include <rte_eventdev.h>
27 #include <rte_event_timer_adapter.h>
28 #include <rte_mempool.h>
29 #include <rte_launch.h>
30 #include <rte_lcore.h>
31 #include <rte_per_lcore.h>
32 #include <rte_random.h>
33 #include <rte_bus_vdev.h>
34 #include <rte_service.h>
35 #include <stdbool.h>
36
37 /* 4K timers corresponds to sw evdev max inflight events */
38 #define MAX_TIMERS (4 * 1024)
39 #define BKT_TCK_NSEC
40
41 #define NSECPERSEC 1E9
42 #define BATCH_SIZE 16
43 /* Both the app lcore and adapter ports are linked to this queue */
44 #define TEST_QUEUE_ID 0
45 /* Port the application dequeues from */
46 #define TEST_PORT_ID 0
47 #define TEST_ADAPTER_ID 0
48
49 /* Handle log statements in same manner as test macros */
50 #define LOG_DBG(...) RTE_LOG(DEBUG, EAL, __VA_ARGS__)
51
52 static int evdev;
53 static struct rte_event_timer_adapter *timdev;
54 static struct rte_mempool *eventdev_test_mempool;
55 static struct rte_ring *timer_producer_ring;
56 static uint64_t global_bkt_tck_ns;
57 static uint64_t global_info_bkt_tck_ns;
58 static volatile uint8_t arm_done;
59
60 #define CALC_TICKS(tks) \
61 ceil((double)(tks * global_bkt_tck_ns) / global_info_bkt_tck_ns)
62
63
64 static bool using_services;
65 static uint32_t test_lcore1;
66 static uint32_t test_lcore2;
67 static uint32_t test_lcore3;
68 static uint32_t sw_evdev_slcore;
69 static uint32_t sw_adptr_slcore;
70
71 static inline void
devconf_set_default_sane_values(struct rte_event_dev_config * dev_conf,struct rte_event_dev_info * info)72 devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
73 struct rte_event_dev_info *info)
74 {
75 memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
76 dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
77 dev_conf->nb_event_ports = 1;
78 dev_conf->nb_event_queues = 1;
79 dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
80 dev_conf->nb_event_port_dequeue_depth =
81 info->max_event_port_dequeue_depth;
82 dev_conf->nb_event_port_enqueue_depth =
83 info->max_event_port_enqueue_depth;
84 dev_conf->nb_event_port_enqueue_depth =
85 info->max_event_port_enqueue_depth;
86 dev_conf->nb_events_limit =
87 info->max_num_events;
88 }
89
90 static inline int
eventdev_setup(void)91 eventdev_setup(void)
92 {
93 int ret;
94 struct rte_event_dev_config dev_conf;
95 struct rte_event_dev_info info;
96 uint32_t service_id;
97
98 ret = rte_event_dev_info_get(evdev, &info);
99 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
100 TEST_ASSERT(info.max_num_events < 0 ||
101 info.max_num_events >= (int32_t)MAX_TIMERS,
102 "ERROR max_num_events=%d < max_events=%d",
103 info.max_num_events, MAX_TIMERS);
104
105 devconf_set_default_sane_values(&dev_conf, &info);
106 ret = rte_event_dev_configure(evdev, &dev_conf);
107 TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
108
109 ret = rte_event_queue_setup(evdev, 0, NULL);
110 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d", 0);
111
112 /* Configure event port */
113 ret = rte_event_port_setup(evdev, 0, NULL);
114 TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d", 0);
115 ret = rte_event_port_link(evdev, 0, NULL, NULL, 0);
116 TEST_ASSERT(ret >= 0, "Failed to link all queues port=%d", 0);
117
118 /* If this is a software event device, map and start its service */
119 if (rte_event_dev_service_id_get(evdev, &service_id) == 0) {
120 TEST_ASSERT_SUCCESS(rte_service_lcore_add(sw_evdev_slcore),
121 "Failed to add service core");
122 TEST_ASSERT_SUCCESS(rte_service_lcore_start(
123 sw_evdev_slcore),
124 "Failed to start service core");
125 TEST_ASSERT_SUCCESS(rte_service_map_lcore_set(
126 service_id, sw_evdev_slcore, 1),
127 "Failed to map evdev service");
128 TEST_ASSERT_SUCCESS(rte_service_runstate_set(
129 service_id, 1),
130 "Failed to start evdev service");
131 }
132
133 ret = rte_event_dev_start(evdev);
134 TEST_ASSERT_SUCCESS(ret, "Failed to start device");
135
136 return TEST_SUCCESS;
137 }
138
139 static int
testsuite_setup(void)140 testsuite_setup(void)
141 {
142 /* Some of the multithreaded tests require 3 other lcores to run */
143 unsigned int required_lcore_count = 4;
144 uint32_t service_id;
145
146 /* To make it easier to map services later if needed, just reset
147 * service core state.
148 */
149 (void) rte_service_lcore_reset_all();
150
151 if (!rte_event_dev_count()) {
152 /* If there is no hardware eventdev, or no software vdev was
153 * specified on the command line, create an instance of
154 * event_sw.
155 */
156 LOG_DBG("Failed to find a valid event device... testing with"
157 " event_sw device\n");
158 TEST_ASSERT_SUCCESS(rte_vdev_init("event_sw0", NULL),
159 "Error creating eventdev");
160 evdev = rte_event_dev_get_dev_id("event_sw0");
161 }
162
163 if (rte_event_dev_service_id_get(evdev, &service_id) == 0) {
164 /* A software event device will use a software event timer
165 * adapter as well. 2 more cores required to convert to
166 * service cores.
167 */
168 required_lcore_count += 2;
169 using_services = true;
170 }
171
172 if (rte_lcore_count() < required_lcore_count) {
173 printf("Not enough cores for event_timer_adapter_test, expecting at least %u\n",
174 required_lcore_count);
175 return TEST_SKIPPED;
176 }
177
178 /* Assign lcores for various tasks */
179 test_lcore1 = rte_get_next_lcore(-1, 1, 0);
180 test_lcore2 = rte_get_next_lcore(test_lcore1, 1, 0);
181 test_lcore3 = rte_get_next_lcore(test_lcore2, 1, 0);
182 if (using_services) {
183 sw_evdev_slcore = rte_get_next_lcore(test_lcore3, 1, 0);
184 sw_adptr_slcore = rte_get_next_lcore(sw_evdev_slcore, 1, 0);
185 }
186
187 return eventdev_setup();
188 }
189
190 static void
testsuite_teardown(void)191 testsuite_teardown(void)
192 {
193 rte_event_dev_stop(evdev);
194 rte_event_dev_close(evdev);
195 }
196
197 static int
setup_adapter_service(struct rte_event_timer_adapter * adptr)198 setup_adapter_service(struct rte_event_timer_adapter *adptr)
199 {
200 uint32_t adapter_service_id;
201 int ret;
202
203 /* retrieve service ids */
204 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_service_id_get(adptr,
205 &adapter_service_id), "Failed to get event timer "
206 "adapter service id");
207 /* add a service core and start it */
208 ret = rte_service_lcore_add(sw_adptr_slcore);
209 TEST_ASSERT(ret == 0 || ret == -EALREADY,
210 "Failed to add service core");
211 ret = rte_service_lcore_start(sw_adptr_slcore);
212 TEST_ASSERT(ret == 0 || ret == -EALREADY,
213 "Failed to start service core");
214
215 /* map services to it */
216 TEST_ASSERT_SUCCESS(rte_service_map_lcore_set(adapter_service_id,
217 sw_adptr_slcore, 1),
218 "Failed to map adapter service");
219
220 /* set services to running */
221 TEST_ASSERT_SUCCESS(rte_service_runstate_set(adapter_service_id, 1),
222 "Failed to start event timer adapter service");
223
224 return TEST_SUCCESS;
225 }
226
227 static int
test_port_conf_cb(uint16_t id,uint8_t event_dev_id,uint8_t * event_port_id,void * conf_arg)228 test_port_conf_cb(uint16_t id, uint8_t event_dev_id, uint8_t *event_port_id,
229 void *conf_arg)
230 {
231 struct rte_event_dev_config dev_conf;
232 struct rte_event_dev_info info;
233 struct rte_event_port_conf *port_conf, def_port_conf = {0};
234 uint32_t started;
235 static int port_allocated;
236 static uint8_t port_id;
237 int ret;
238
239 if (port_allocated) {
240 *event_port_id = port_id;
241 return 0;
242 }
243
244 RTE_SET_USED(id);
245
246 ret = rte_event_dev_attr_get(event_dev_id, RTE_EVENT_DEV_ATTR_STARTED,
247 &started);
248 if (ret < 0)
249 return ret;
250
251 if (started)
252 rte_event_dev_stop(event_dev_id);
253
254 ret = rte_event_dev_info_get(evdev, &info);
255 if (ret < 0)
256 return ret;
257
258 devconf_set_default_sane_values(&dev_conf, &info);
259
260 port_id = dev_conf.nb_event_ports;
261 dev_conf.nb_event_ports++;
262
263 ret = rte_event_dev_configure(event_dev_id, &dev_conf);
264 if (ret < 0) {
265 if (started)
266 rte_event_dev_start(event_dev_id);
267 return ret;
268 }
269
270 if (conf_arg != NULL)
271 port_conf = conf_arg;
272 else {
273 port_conf = &def_port_conf;
274 ret = rte_event_port_default_conf_get(event_dev_id, port_id,
275 port_conf);
276 if (ret < 0)
277 return ret;
278 }
279
280 ret = rte_event_port_setup(event_dev_id, port_id, port_conf);
281 if (ret < 0)
282 return ret;
283
284 *event_port_id = port_id;
285
286 if (started)
287 rte_event_dev_start(event_dev_id);
288
289 /* Reuse this port number next time this is called */
290 port_allocated = 1;
291
292 return 0;
293 }
294
295 static int
_timdev_setup(uint64_t max_tmo_ns,uint64_t bkt_tck_ns,uint64_t flags)296 _timdev_setup(uint64_t max_tmo_ns, uint64_t bkt_tck_ns, uint64_t flags)
297 {
298 struct rte_event_timer_adapter_info info;
299 struct rte_event_timer_adapter_conf config = {
300 .event_dev_id = evdev,
301 .timer_adapter_id = TEST_ADAPTER_ID,
302 .timer_tick_ns = bkt_tck_ns,
303 .max_tmo_ns = max_tmo_ns,
304 .nb_timers = MAX_TIMERS * 10,
305 .flags = flags,
306 };
307 uint32_t caps = 0;
308 const char *pool_name = "timdev_test_pool";
309
310 global_bkt_tck_ns = bkt_tck_ns;
311
312 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_caps_get(evdev, &caps),
313 "failed to get adapter capabilities");
314
315 if (flags & RTE_EVENT_TIMER_ADAPTER_F_PERIODIC &&
316 !(caps & RTE_EVENT_TIMER_ADAPTER_CAP_PERIODIC))
317 return -ENOTSUP;
318
319 if (!(caps & RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) {
320 timdev = rte_event_timer_adapter_create_ext(&config,
321 test_port_conf_cb,
322 NULL);
323 setup_adapter_service(timdev);
324 using_services = true;
325 } else
326 timdev = rte_event_timer_adapter_create(&config);
327
328 TEST_ASSERT_NOT_NULL(timdev,
329 "failed to create event timer ring");
330
331 TEST_ASSERT_EQUAL(rte_event_timer_adapter_start(timdev), 0,
332 "failed to Start event timer adapter");
333
334 /* Create event timer mempool */
335 eventdev_test_mempool = rte_mempool_create(pool_name,
336 MAX_TIMERS * 2,
337 sizeof(struct rte_event_timer), /* element size*/
338 0, /* cache size*/
339 0, NULL, NULL, NULL, NULL,
340 rte_socket_id(), 0);
341 if (!eventdev_test_mempool) {
342 printf("ERROR creating mempool\n");
343 return TEST_FAILED;
344 }
345
346 rte_event_timer_adapter_get_info(timdev, &info);
347
348 global_info_bkt_tck_ns = info.min_resolution_ns;
349
350 return TEST_SUCCESS;
351 }
352
353 static int
timdev_setup_usec(void)354 timdev_setup_usec(void)
355 {
356 uint64_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES;
357
358 return using_services ?
359 /* Max timeout is 10,000us and bucket interval is 100us */
360 _timdev_setup(1E7, 1E5, flags) :
361 /* Max timeout is 100us and bucket interval is 1us */
362 _timdev_setup(1E5, 1E3, flags);
363 }
364
365 static int
timdev_setup_usec_multicore(void)366 timdev_setup_usec_multicore(void)
367 {
368 uint64_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES;
369
370 return using_services ?
371 /* Max timeout is 10,000us and bucket interval is 100us */
372 _timdev_setup(1E7, 1E5, flags) :
373 /* Max timeout is 100us and bucket interval is 1us */
374 _timdev_setup(1E5, 1E3, flags);
375 }
376
377 static int
timdev_setup_msec(void)378 timdev_setup_msec(void)
379 {
380 uint64_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES;
381
382 /* Max timeout is 3 mins, and bucket interval is 100 ms */
383 return _timdev_setup(180 * NSECPERSEC, NSECPERSEC / 10, flags);
384 }
385
386 static int
timdev_setup_msec_periodic(void)387 timdev_setup_msec_periodic(void)
388 {
389 uint64_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES |
390 RTE_EVENT_TIMER_ADAPTER_F_PERIODIC;
391
392 /* Periodic mode with 100 ms resolution */
393 return _timdev_setup(0, NSECPERSEC / 10, flags);
394 }
395
396 static int
timdev_setup_sec(void)397 timdev_setup_sec(void)
398 {
399 uint64_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES;
400
401 /* Max timeout is 100sec and bucket interval is 1sec */
402 return _timdev_setup(1E11, 1E9, flags);
403 }
404
405 static int
timdev_setup_sec_periodic(void)406 timdev_setup_sec_periodic(void)
407 {
408 uint64_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES |
409 RTE_EVENT_TIMER_ADAPTER_F_PERIODIC;
410
411 /* Periodic mode with 1 sec resolution */
412 return _timdev_setup(0, NSECPERSEC, flags);
413 }
414
415 static int
timdev_setup_sec_multicore(void)416 timdev_setup_sec_multicore(void)
417 {
418 uint64_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES;
419
420 /* Max timeout is 100sec and bucket interval is 1sec */
421 return _timdev_setup(1E11, 1E9, flags);
422 }
423
424 static void
timdev_teardown(void)425 timdev_teardown(void)
426 {
427 rte_event_timer_adapter_stop(timdev);
428 rte_event_timer_adapter_free(timdev);
429
430 rte_mempool_free(eventdev_test_mempool);
431 }
432
433 static inline int
test_timer_state(void)434 test_timer_state(void)
435 {
436 struct rte_event_timer *ev_tim;
437 struct rte_event ev;
438 const struct rte_event_timer tim = {
439 .ev.op = RTE_EVENT_OP_NEW,
440 .ev.queue_id = 0,
441 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
442 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
443 .ev.event_type = RTE_EVENT_TYPE_TIMER,
444 .state = RTE_EVENT_TIMER_NOT_ARMED,
445 };
446
447
448 rte_mempool_get(eventdev_test_mempool, (void **)&ev_tim);
449 *ev_tim = tim;
450 ev_tim->ev.event_ptr = ev_tim;
451 ev_tim->timeout_ticks = CALC_TICKS(120);
452
453 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 1), 0,
454 "Armed timer exceeding max_timeout.");
455 TEST_ASSERT_EQUAL(ev_tim->state, RTE_EVENT_TIMER_ERROR_TOOLATE,
456 "Improper timer state set expected %d returned %d",
457 RTE_EVENT_TIMER_ERROR_TOOLATE, ev_tim->state);
458
459 ev_tim->state = RTE_EVENT_TIMER_NOT_ARMED;
460 ev_tim->timeout_ticks = CALC_TICKS(10);
461
462 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 1), 1,
463 "Failed to arm timer with proper timeout.");
464 TEST_ASSERT_EQUAL(ev_tim->state, RTE_EVENT_TIMER_ARMED,
465 "Improper timer state set expected %d returned %d",
466 RTE_EVENT_TIMER_ARMED, ev_tim->state);
467
468 if (!using_services)
469 rte_delay_us(20);
470 else
471 rte_delay_us(1000 + 200);
472 TEST_ASSERT_EQUAL(rte_event_dequeue_burst(evdev, 0, &ev, 1, 0), 1,
473 "Armed timer failed to trigger.");
474
475 ev_tim->state = RTE_EVENT_TIMER_NOT_ARMED;
476 ev_tim->timeout_ticks = CALC_TICKS(90);
477 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 1), 1,
478 "Failed to arm timer with proper timeout.");
479 TEST_ASSERT_EQUAL(rte_event_timer_cancel_burst(timdev, &ev_tim, 1),
480 1, "Failed to cancel armed timer");
481 TEST_ASSERT_EQUAL(ev_tim->state, RTE_EVENT_TIMER_CANCELED,
482 "Improper timer state set expected %d returned %d",
483 RTE_EVENT_TIMER_CANCELED, ev_tim->state);
484
485 rte_mempool_put(eventdev_test_mempool, (void *)ev_tim);
486
487 return TEST_SUCCESS;
488 }
489
490 static inline int
_arm_timers(uint64_t timeout_tcks,uint64_t timers)491 _arm_timers(uint64_t timeout_tcks, uint64_t timers)
492 {
493 uint64_t i;
494 struct rte_event_timer *ev_tim;
495 const struct rte_event_timer tim = {
496 .ev.op = RTE_EVENT_OP_NEW,
497 .ev.queue_id = 0,
498 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
499 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
500 .ev.event_type = RTE_EVENT_TYPE_TIMER,
501 .state = RTE_EVENT_TIMER_NOT_ARMED,
502 .timeout_ticks = CALC_TICKS(timeout_tcks),
503 };
504
505 for (i = 0; i < timers; i++) {
506
507 TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool,
508 (void **)&ev_tim),
509 "mempool alloc failed");
510 *ev_tim = tim;
511 ev_tim->ev.event_ptr = ev_tim;
512
513 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim,
514 1), 1, "Failed to arm timer %d",
515 rte_errno);
516 }
517
518 return TEST_SUCCESS;
519 }
520
521 static inline int
_wait_timer_triggers(uint64_t wait_sec,uint64_t arm_count,uint64_t cancel_count)522 _wait_timer_triggers(uint64_t wait_sec, uint64_t arm_count,
523 uint64_t cancel_count)
524 {
525 uint8_t valid_event;
526 uint64_t events = 0;
527 uint64_t wait_start, max_wait;
528 struct rte_event ev;
529
530 max_wait = rte_get_timer_hz() * wait_sec;
531 wait_start = rte_get_timer_cycles();
532 while (1) {
533 if (rte_get_timer_cycles() - wait_start > max_wait) {
534 if (events + cancel_count != arm_count)
535 TEST_ASSERT_SUCCESS(max_wait,
536 "Max time limit for timers exceeded.");
537 break;
538 }
539
540 valid_event = rte_event_dequeue_burst(evdev, 0, &ev, 1, 0);
541 if (!valid_event)
542 continue;
543
544 rte_mempool_put(eventdev_test_mempool, ev.event_ptr);
545 events++;
546 }
547
548 return TEST_SUCCESS;
549 }
550
551 static inline int
test_timer_arm(void)552 test_timer_arm(void)
553 {
554 TEST_ASSERT_SUCCESS(_arm_timers(20, MAX_TIMERS),
555 "Failed to arm timers");
556 TEST_ASSERT_SUCCESS(_wait_timer_triggers(10, MAX_TIMERS, 0),
557 "Timer triggered count doesn't match arm count");
558 return TEST_SUCCESS;
559 }
560
561 static inline int
test_timer_arm_periodic(void)562 test_timer_arm_periodic(void)
563 {
564 TEST_ASSERT_SUCCESS(_arm_timers(1, MAX_TIMERS),
565 "Failed to arm timers");
566 /* With a resolution of 100ms and wait time of 1sec,
567 * there will be 10 * MAX_TIMERS periodic timer triggers.
568 */
569 TEST_ASSERT_SUCCESS(_wait_timer_triggers(1, 10 * MAX_TIMERS, 0),
570 "Timer triggered count doesn't match arm count");
571 return TEST_SUCCESS;
572 }
573
574 static int
_arm_wrapper(void * arg)575 _arm_wrapper(void *arg)
576 {
577 RTE_SET_USED(arg);
578
579 TEST_ASSERT_SUCCESS(_arm_timers(20, MAX_TIMERS),
580 "Failed to arm timers");
581
582 return TEST_SUCCESS;
583 }
584
585 static inline int
test_timer_arm_multicore(void)586 test_timer_arm_multicore(void)
587 {
588
589 uint32_t lcore_1 = rte_get_next_lcore(-1, 1, 0);
590 uint32_t lcore_2 = rte_get_next_lcore(lcore_1, 1, 0);
591
592 rte_eal_remote_launch(_arm_wrapper, NULL, lcore_1);
593 rte_eal_remote_launch(_arm_wrapper, NULL, lcore_2);
594
595 rte_eal_mp_wait_lcore();
596 TEST_ASSERT_SUCCESS(_wait_timer_triggers(10, MAX_TIMERS * 2, 0),
597 "Timer triggered count doesn't match arm count");
598
599 return TEST_SUCCESS;
600 }
601
602 #define MAX_BURST 16
603 static inline int
_arm_timers_burst(uint64_t timeout_tcks,uint64_t timers)604 _arm_timers_burst(uint64_t timeout_tcks, uint64_t timers)
605 {
606 uint64_t i;
607 int j;
608 struct rte_event_timer *ev_tim[MAX_BURST];
609 const struct rte_event_timer tim = {
610 .ev.op = RTE_EVENT_OP_NEW,
611 .ev.queue_id = 0,
612 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
613 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
614 .ev.event_type = RTE_EVENT_TYPE_TIMER,
615 .state = RTE_EVENT_TIMER_NOT_ARMED,
616 .timeout_ticks = CALC_TICKS(timeout_tcks),
617 };
618
619 for (i = 0; i < timers / MAX_BURST; i++) {
620 TEST_ASSERT_SUCCESS(rte_mempool_get_bulk(
621 eventdev_test_mempool,
622 (void **)ev_tim, MAX_BURST),
623 "mempool alloc failed");
624
625 for (j = 0; j < MAX_BURST; j++) {
626 *ev_tim[j] = tim;
627 ev_tim[j]->ev.event_ptr = ev_tim[j];
628 }
629
630 TEST_ASSERT_EQUAL(rte_event_timer_arm_tmo_tick_burst(timdev,
631 ev_tim, tim.timeout_ticks, MAX_BURST),
632 MAX_BURST, "Failed to arm timer %d", rte_errno);
633 }
634
635 return TEST_SUCCESS;
636 }
637
638 static inline int
test_timer_arm_burst(void)639 test_timer_arm_burst(void)
640 {
641 TEST_ASSERT_SUCCESS(_arm_timers_burst(20, MAX_TIMERS),
642 "Failed to arm timers");
643 TEST_ASSERT_SUCCESS(_wait_timer_triggers(10, MAX_TIMERS, 0),
644 "Timer triggered count doesn't match arm count");
645
646 return TEST_SUCCESS;
647 }
648
649 static inline int
test_timer_arm_burst_periodic(void)650 test_timer_arm_burst_periodic(void)
651 {
652 TEST_ASSERT_SUCCESS(_arm_timers_burst(1, MAX_TIMERS),
653 "Failed to arm timers");
654 /* With a resolution of 100ms and wait time of 1sec,
655 * there will be 10 * MAX_TIMERS periodic timer triggers.
656 */
657 TEST_ASSERT_SUCCESS(_wait_timer_triggers(1, 10 * MAX_TIMERS, 0),
658 "Timer triggered count doesn't match arm count");
659
660 return TEST_SUCCESS;
661 }
662
663 static int
_arm_wrapper_burst(void * arg)664 _arm_wrapper_burst(void *arg)
665 {
666 RTE_SET_USED(arg);
667
668 TEST_ASSERT_SUCCESS(_arm_timers_burst(20, MAX_TIMERS),
669 "Failed to arm timers");
670
671 return TEST_SUCCESS;
672 }
673
674 static inline int
test_timer_arm_burst_multicore(void)675 test_timer_arm_burst_multicore(void)
676 {
677 rte_eal_remote_launch(_arm_wrapper_burst, NULL, test_lcore1);
678 rte_eal_remote_launch(_arm_wrapper_burst, NULL, test_lcore2);
679
680 rte_eal_mp_wait_lcore();
681 TEST_ASSERT_SUCCESS(_wait_timer_triggers(10, MAX_TIMERS * 2, 0),
682 "Timer triggered count doesn't match arm count");
683
684 return TEST_SUCCESS;
685 }
686
687 static inline int
test_timer_cancel_periodic(void)688 test_timer_cancel_periodic(void)
689 {
690 uint64_t i;
691 struct rte_event_timer *ev_tim;
692 const struct rte_event_timer tim = {
693 .ev.op = RTE_EVENT_OP_NEW,
694 .ev.queue_id = 0,
695 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
696 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
697 .ev.event_type = RTE_EVENT_TYPE_TIMER,
698 .state = RTE_EVENT_TIMER_NOT_ARMED,
699 .timeout_ticks = CALC_TICKS(1),
700 };
701
702 for (i = 0; i < MAX_TIMERS; i++) {
703 TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool,
704 (void **)&ev_tim),
705 "mempool alloc failed");
706 *ev_tim = tim;
707 ev_tim->ev.event_ptr = ev_tim;
708
709 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim,
710 1), 1, "Failed to arm timer %d",
711 rte_errno);
712
713 rte_delay_us(100 + (i % 5000));
714
715 TEST_ASSERT_EQUAL(rte_event_timer_cancel_burst(timdev,
716 &ev_tim, 1), 1,
717 "Failed to cancel event timer %d", rte_errno);
718 rte_mempool_put(eventdev_test_mempool, ev_tim);
719 }
720
721
722 TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS,
723 MAX_TIMERS),
724 "Timer triggered count doesn't match arm, cancel count");
725
726 return TEST_SUCCESS;
727 }
728
729 static inline int
test_timer_cancel(void)730 test_timer_cancel(void)
731 {
732 uint64_t i;
733 struct rte_event_timer *ev_tim;
734 const struct rte_event_timer tim = {
735 .ev.op = RTE_EVENT_OP_NEW,
736 .ev.queue_id = 0,
737 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
738 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
739 .ev.event_type = RTE_EVENT_TYPE_TIMER,
740 .state = RTE_EVENT_TIMER_NOT_ARMED,
741 .timeout_ticks = CALC_TICKS(20),
742 };
743
744 for (i = 0; i < MAX_TIMERS; i++) {
745 TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool,
746 (void **)&ev_tim),
747 "mempool alloc failed");
748 *ev_tim = tim;
749 ev_tim->ev.event_ptr = ev_tim;
750
751 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim,
752 1), 1, "Failed to arm timer %d",
753 rte_errno);
754
755 rte_delay_us(100 + (i % 5000));
756
757 TEST_ASSERT_EQUAL(rte_event_timer_cancel_burst(timdev,
758 &ev_tim, 1), 1,
759 "Failed to cancel event timer %d", rte_errno);
760 rte_mempool_put(eventdev_test_mempool, ev_tim);
761 }
762
763
764 TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS,
765 MAX_TIMERS),
766 "Timer triggered count doesn't match arm, cancel count");
767
768 return TEST_SUCCESS;
769 }
770
771 static int
_cancel_producer(uint64_t timeout_tcks,uint64_t timers)772 _cancel_producer(uint64_t timeout_tcks, uint64_t timers)
773 {
774 uint64_t i;
775 struct rte_event_timer *ev_tim;
776 const struct rte_event_timer tim = {
777 .ev.op = RTE_EVENT_OP_NEW,
778 .ev.queue_id = 0,
779 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
780 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
781 .ev.event_type = RTE_EVENT_TYPE_TIMER,
782 .state = RTE_EVENT_TIMER_NOT_ARMED,
783 .timeout_ticks = CALC_TICKS(timeout_tcks),
784 };
785
786 for (i = 0; i < timers; i++) {
787 TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool,
788 (void **)&ev_tim),
789 "mempool alloc failed");
790
791 *ev_tim = tim;
792 ev_tim->ev.event_ptr = ev_tim;
793
794 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim,
795 1), 1, "Failed to arm timer %d",
796 rte_errno);
797
798 TEST_ASSERT_EQUAL(ev_tim->state, RTE_EVENT_TIMER_ARMED,
799 "Failed to arm event timer");
800
801 while (rte_ring_enqueue(timer_producer_ring, ev_tim) != 0)
802 ;
803 }
804
805 return TEST_SUCCESS;
806 }
807
808 static int
_cancel_producer_burst(uint64_t timeout_tcks,uint64_t timers)809 _cancel_producer_burst(uint64_t timeout_tcks, uint64_t timers)
810 {
811
812 uint64_t i;
813 int j, ret;
814 struct rte_event_timer *ev_tim[MAX_BURST];
815 const struct rte_event_timer tim = {
816 .ev.op = RTE_EVENT_OP_NEW,
817 .ev.queue_id = 0,
818 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
819 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
820 .ev.event_type = RTE_EVENT_TYPE_TIMER,
821 .state = RTE_EVENT_TIMER_NOT_ARMED,
822 .timeout_ticks = CALC_TICKS(timeout_tcks),
823 };
824 int arm_count = 0;
825
826 for (i = 0; i < timers / MAX_BURST; i++) {
827 TEST_ASSERT_SUCCESS(rte_mempool_get_bulk(
828 eventdev_test_mempool,
829 (void **)ev_tim, MAX_BURST),
830 "mempool alloc failed");
831
832 for (j = 0; j < MAX_BURST; j++) {
833 *ev_tim[j] = tim;
834 ev_tim[j]->ev.event_ptr = ev_tim[j];
835 }
836
837 TEST_ASSERT_EQUAL(rte_event_timer_arm_tmo_tick_burst(timdev,
838 ev_tim, tim.timeout_ticks, MAX_BURST),
839 MAX_BURST, "Failed to arm timer %d", rte_errno);
840
841 for (j = 0; j < MAX_BURST; j++)
842 TEST_ASSERT_EQUAL(ev_tim[j]->state,
843 RTE_EVENT_TIMER_ARMED,
844 "Event timer not armed, state = %d",
845 ev_tim[j]->state);
846
847 ret = rte_ring_enqueue_bulk(timer_producer_ring,
848 (void **)ev_tim, MAX_BURST, NULL);
849 TEST_ASSERT_EQUAL(ret, MAX_BURST,
850 "Failed to enqueue event timers to ring");
851 arm_count += ret;
852 }
853
854 TEST_ASSERT_EQUAL(arm_count, MAX_TIMERS,
855 "Failed to arm expected number of event timers");
856
857 return TEST_SUCCESS;
858 }
859
860 static int
_cancel_producer_wrapper(void * args)861 _cancel_producer_wrapper(void *args)
862 {
863 RTE_SET_USED(args);
864
865 return _cancel_producer(20, MAX_TIMERS);
866 }
867
868 static int
_cancel_producer_burst_wrapper(void * args)869 _cancel_producer_burst_wrapper(void *args)
870 {
871 RTE_SET_USED(args);
872
873 return _cancel_producer_burst(100, MAX_TIMERS);
874 }
875
876 static int
_cancel_thread(void * args)877 _cancel_thread(void *args)
878 {
879 RTE_SET_USED(args);
880 struct rte_event_timer *ev_tim = NULL;
881 uint64_t cancel_count = 0;
882 uint16_t ret;
883
884 while (!arm_done || rte_ring_count(timer_producer_ring) > 0) {
885 if (rte_ring_dequeue(timer_producer_ring, (void **)&ev_tim))
886 continue;
887
888 ret = rte_event_timer_cancel_burst(timdev, &ev_tim, 1);
889 TEST_ASSERT_EQUAL(ret, 1, "Failed to cancel timer");
890 rte_mempool_put(eventdev_test_mempool, (void *)ev_tim);
891 cancel_count++;
892 }
893
894 return TEST_SUCCESS;
895 }
896
897 static int
_cancel_burst_thread(void * args)898 _cancel_burst_thread(void *args)
899 {
900 RTE_SET_USED(args);
901
902 int ret, i, n;
903 struct rte_event_timer *ev_tim[MAX_BURST];
904 uint64_t cancel_count = 0;
905 uint64_t dequeue_count = 0;
906
907 while (!arm_done || rte_ring_count(timer_producer_ring) > 0) {
908 n = rte_ring_dequeue_burst(timer_producer_ring,
909 (void **)ev_tim, MAX_BURST, NULL);
910 if (!n)
911 continue;
912
913 dequeue_count += n;
914
915 for (i = 0; i < n; i++)
916 TEST_ASSERT_EQUAL(ev_tim[i]->state,
917 RTE_EVENT_TIMER_ARMED,
918 "Event timer not armed, state = %d",
919 ev_tim[i]->state);
920
921 ret = rte_event_timer_cancel_burst(timdev, ev_tim, n);
922 TEST_ASSERT_EQUAL(n, ret, "Failed to cancel complete burst of "
923 "event timers");
924 rte_mempool_put_bulk(eventdev_test_mempool, (void **)ev_tim,
925 RTE_MIN(ret, MAX_BURST));
926
927 cancel_count += ret;
928 }
929
930 TEST_ASSERT_EQUAL(cancel_count, MAX_TIMERS,
931 "Failed to cancel expected number of timers: "
932 "expected = %d, cancel_count = %"PRIu64", "
933 "dequeue_count = %"PRIu64"\n", MAX_TIMERS,
934 cancel_count, dequeue_count);
935
936 return TEST_SUCCESS;
937 }
938
939 static inline int
test_timer_cancel_multicore(void)940 test_timer_cancel_multicore(void)
941 {
942 arm_done = 0;
943 timer_producer_ring = rte_ring_create("timer_cancel_queue",
944 MAX_TIMERS * 2, rte_socket_id(), 0);
945 TEST_ASSERT_NOT_NULL(timer_producer_ring,
946 "Unable to reserve memory for ring");
947
948 rte_eal_remote_launch(_cancel_thread, NULL, test_lcore3);
949 rte_eal_remote_launch(_cancel_producer_wrapper, NULL, test_lcore1);
950 rte_eal_remote_launch(_cancel_producer_wrapper, NULL, test_lcore2);
951
952 rte_eal_wait_lcore(test_lcore1);
953 rte_eal_wait_lcore(test_lcore2);
954 arm_done = 1;
955 rte_eal_wait_lcore(test_lcore3);
956 rte_ring_free(timer_producer_ring);
957
958 TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS * 2,
959 MAX_TIMERS * 2),
960 "Timer triggered count doesn't match arm count");
961
962 return TEST_SUCCESS;
963 }
964
965 static inline int
test_timer_cancel_burst_multicore(void)966 test_timer_cancel_burst_multicore(void)
967 {
968 arm_done = 0;
969 timer_producer_ring = rte_ring_create("timer_cancel_queue",
970 MAX_TIMERS * 2, rte_socket_id(), 0);
971 TEST_ASSERT_NOT_NULL(timer_producer_ring,
972 "Unable to reserve memory for ring");
973
974 rte_eal_remote_launch(_cancel_burst_thread, NULL, test_lcore2);
975 rte_eal_remote_launch(_cancel_producer_burst_wrapper, NULL,
976 test_lcore1);
977
978 rte_eal_wait_lcore(test_lcore1);
979 arm_done = 1;
980 rte_eal_wait_lcore(test_lcore2);
981 rte_ring_free(timer_producer_ring);
982
983 TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS,
984 MAX_TIMERS),
985 "Timer triggered count doesn't match arm count");
986
987 return TEST_SUCCESS;
988 }
989
990 static inline int
test_timer_cancel_random(void)991 test_timer_cancel_random(void)
992 {
993 uint64_t i;
994 uint64_t events_canceled = 0;
995 struct rte_event_timer *ev_tim;
996 const struct rte_event_timer tim = {
997 .ev.op = RTE_EVENT_OP_NEW,
998 .ev.queue_id = 0,
999 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
1000 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1001 .ev.event_type = RTE_EVENT_TYPE_TIMER,
1002 .state = RTE_EVENT_TIMER_NOT_ARMED,
1003 .timeout_ticks = CALC_TICKS(20),
1004 };
1005
1006 for (i = 0; i < MAX_TIMERS; i++) {
1007
1008 TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool,
1009 (void **)&ev_tim),
1010 "mempool alloc failed");
1011 *ev_tim = tim;
1012 ev_tim->ev.event_ptr = ev_tim;
1013
1014 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim,
1015 1), 1, "Failed to arm timer %d",
1016 rte_errno);
1017
1018 if (rte_rand() & 1) {
1019 rte_delay_us(100 + (i % 5000));
1020 TEST_ASSERT_EQUAL(rte_event_timer_cancel_burst(
1021 timdev,
1022 &ev_tim, 1), 1,
1023 "Failed to cancel event timer %d", rte_errno);
1024 rte_mempool_put(eventdev_test_mempool, ev_tim);
1025 events_canceled++;
1026 }
1027 }
1028
1029 TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS,
1030 events_canceled),
1031 "Timer triggered count doesn't match arm, cancel count");
1032
1033 return TEST_SUCCESS;
1034 }
1035
1036 /* Check that the adapter can be created correctly */
1037 static int
adapter_create(void)1038 adapter_create(void)
1039 {
1040 int adapter_id = 0;
1041 struct rte_event_timer_adapter *adapter, *adapter2;
1042
1043 struct rte_event_timer_adapter_conf conf = {
1044 .event_dev_id = evdev + 1, // invalid event dev id
1045 .timer_adapter_id = adapter_id,
1046 .clk_src = RTE_EVENT_TIMER_ADAPTER_CPU_CLK,
1047 .timer_tick_ns = NSECPERSEC / 10,
1048 .max_tmo_ns = 180 * NSECPERSEC,
1049 .nb_timers = MAX_TIMERS,
1050 .flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES,
1051 };
1052 uint32_t caps = 0;
1053
1054 /* Test invalid conf */
1055 adapter = rte_event_timer_adapter_create(&conf);
1056 TEST_ASSERT_NULL(adapter, "Created adapter with invalid "
1057 "event device id");
1058 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Incorrect errno value for "
1059 "invalid event device id");
1060
1061 /* Test valid conf */
1062 conf.event_dev_id = evdev;
1063 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_caps_get(evdev, &caps),
1064 "failed to get adapter capabilities");
1065 if (!(caps & RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT))
1066 adapter = rte_event_timer_adapter_create_ext(&conf,
1067 test_port_conf_cb,
1068 NULL);
1069 else
1070 adapter = rte_event_timer_adapter_create(&conf);
1071 TEST_ASSERT_NOT_NULL(adapter, "Failed to create adapter with valid "
1072 "configuration");
1073
1074 /* Test existing id */
1075 adapter2 = rte_event_timer_adapter_create(&conf);
1076 TEST_ASSERT_NULL(adapter2, "Created adapter with in-use id");
1077 TEST_ASSERT(rte_errno == EEXIST, "Incorrect errno value for existing "
1078 "id");
1079
1080 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_free(adapter),
1081 "Failed to free adapter");
1082
1083 return TEST_SUCCESS;
1084 }
1085
1086
1087 /* Test that adapter can be freed correctly. */
1088 static int
adapter_free(void)1089 adapter_free(void)
1090 {
1091 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stop(timdev),
1092 "Failed to stop adapter");
1093
1094 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_free(timdev),
1095 "Failed to free valid adapter");
1096
1097 /* Test free of already freed adapter */
1098 TEST_ASSERT_FAIL(rte_event_timer_adapter_free(timdev),
1099 "Freed adapter that was already freed");
1100
1101 /* Test free of null adapter */
1102 timdev = NULL;
1103 TEST_ASSERT_FAIL(rte_event_timer_adapter_free(timdev),
1104 "Freed null adapter");
1105
1106 rte_mempool_free(eventdev_test_mempool);
1107
1108 return TEST_SUCCESS;
1109 }
1110
1111 /* Test that adapter info can be retrieved and is correct. */
1112 static int
adapter_get_info(void)1113 adapter_get_info(void)
1114 {
1115 struct rte_event_timer_adapter_info info;
1116
1117 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_get_info(timdev, &info),
1118 "Failed to get adapter info");
1119
1120 if (using_services)
1121 TEST_ASSERT_EQUAL(info.event_dev_port_id, 1,
1122 "Expected port id = 1, got port id = %d",
1123 info.event_dev_port_id);
1124
1125 return TEST_SUCCESS;
1126 }
1127
1128 /* Test adapter lookup via adapter ID. */
1129 static int
adapter_lookup(void)1130 adapter_lookup(void)
1131 {
1132 struct rte_event_timer_adapter *adapter;
1133
1134 adapter = rte_event_timer_adapter_lookup(TEST_ADAPTER_ID);
1135 TEST_ASSERT_NOT_NULL(adapter, "Failed to lookup adapter");
1136
1137 return TEST_SUCCESS;
1138 }
1139
1140 static int
adapter_start(void)1141 adapter_start(void)
1142 {
1143 TEST_ASSERT_SUCCESS(_timdev_setup(180 * NSECPERSEC, NSECPERSEC / 10,
1144 RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES),
1145 "Failed to start adapter");
1146 TEST_ASSERT_EQUAL(rte_event_timer_adapter_start(timdev), -EALREADY,
1147 "Timer adapter started without call to stop.");
1148
1149 return TEST_SUCCESS;
1150 }
1151
1152 /* Test that adapter stops correctly. */
1153 static int
adapter_stop(void)1154 adapter_stop(void)
1155 {
1156 struct rte_event_timer_adapter *l_adapter = NULL;
1157
1158 /* Test adapter stop */
1159 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stop(timdev),
1160 "Failed to stop event adapter");
1161
1162 TEST_ASSERT_FAIL(rte_event_timer_adapter_stop(l_adapter),
1163 "Erroneously stopped null event adapter");
1164
1165 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_free(timdev),
1166 "Failed to free adapter");
1167
1168 rte_mempool_free(eventdev_test_mempool);
1169
1170 return TEST_SUCCESS;
1171 }
1172
1173 /* Test increment and reset of ev_enq_count stat */
1174 static int
stat_inc_reset_ev_enq(void)1175 stat_inc_reset_ev_enq(void)
1176 {
1177 int ret, i, n;
1178 int num_evtims = MAX_TIMERS;
1179 struct rte_event_timer *evtims[num_evtims];
1180 struct rte_event evs[BATCH_SIZE];
1181 struct rte_event_timer_adapter_stats stats;
1182 const struct rte_event_timer init_tim = {
1183 .ev.op = RTE_EVENT_OP_NEW,
1184 .ev.queue_id = TEST_QUEUE_ID,
1185 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
1186 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1187 .ev.event_type = RTE_EVENT_TYPE_TIMER,
1188 .state = RTE_EVENT_TIMER_NOT_ARMED,
1189 .timeout_ticks = CALC_TICKS(5), // expire in .5 sec
1190 };
1191
1192 ret = rte_mempool_get_bulk(eventdev_test_mempool, (void **)evtims,
1193 num_evtims);
1194 TEST_ASSERT_EQUAL(ret, 0, "Failed to get array of timer objs: ret = %d",
1195 ret);
1196
1197 for (i = 0; i < num_evtims; i++) {
1198 *evtims[i] = init_tim;
1199 evtims[i]->ev.event_ptr = evtims[i];
1200 }
1201
1202 ret = rte_event_timer_adapter_stats_get(timdev, &stats);
1203 TEST_ASSERT_EQUAL(ret, 0, "Failed to get stats");
1204 TEST_ASSERT_EQUAL((int)stats.ev_enq_count, 0, "Stats not clear at "
1205 "startup");
1206
1207 /* Test with the max value for the adapter */
1208 ret = rte_event_timer_arm_burst(timdev, evtims, num_evtims);
1209 TEST_ASSERT_EQUAL(ret, num_evtims,
1210 "Failed to arm all event timers: attempted = %d, "
1211 "succeeded = %d, rte_errno = %s",
1212 num_evtims, ret, rte_strerror(rte_errno));
1213
1214 rte_delay_ms(1000);
1215
1216 #define MAX_TRIES num_evtims
1217 int sum = 0;
1218 int tries = 0;
1219 bool done = false;
1220 while (!done) {
1221 sum += rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs,
1222 RTE_DIM(evs), 10);
1223 if (sum >= num_evtims || ++tries >= MAX_TRIES)
1224 done = true;
1225
1226 rte_delay_ms(10);
1227 }
1228
1229 TEST_ASSERT_EQUAL(sum, num_evtims, "Expected %d timer expiry events, "
1230 "got %d", num_evtims, sum);
1231
1232 TEST_ASSERT(tries < MAX_TRIES, "Exceeded max tries");
1233
1234 rte_delay_ms(100);
1235
1236 /* Make sure the eventdev is still empty */
1237 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs),
1238 10);
1239
1240 TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected number of timer expiry "
1241 "events from event device");
1242
1243 /* Check stats again */
1244 ret = rte_event_timer_adapter_stats_get(timdev, &stats);
1245 TEST_ASSERT_EQUAL(ret, 0, "Failed to get stats");
1246 TEST_ASSERT_EQUAL((int)stats.ev_enq_count, num_evtims,
1247 "Expected enqueue stat = %d; got %d", num_evtims,
1248 (int)stats.ev_enq_count);
1249
1250 /* Reset and check again */
1251 ret = rte_event_timer_adapter_stats_reset(timdev);
1252 TEST_ASSERT_EQUAL(ret, 0, "Failed to reset stats");
1253
1254 ret = rte_event_timer_adapter_stats_get(timdev, &stats);
1255 TEST_ASSERT_EQUAL(ret, 0, "Failed to get stats");
1256 TEST_ASSERT_EQUAL((int)stats.ev_enq_count, 0,
1257 "Expected enqueue stat = %d; got %d", 0,
1258 (int)stats.ev_enq_count);
1259
1260 rte_mempool_put_bulk(eventdev_test_mempool, (void **)evtims,
1261 num_evtims);
1262
1263 return TEST_SUCCESS;
1264 }
1265
1266 /* Test various cases in arming timers */
1267 static int
event_timer_arm(void)1268 event_timer_arm(void)
1269 {
1270 uint16_t n;
1271 int ret;
1272 struct rte_event_timer_adapter *adapter = timdev;
1273 struct rte_event_timer *evtim = NULL;
1274 struct rte_event evs[BATCH_SIZE];
1275 const struct rte_event_timer init_tim = {
1276 .ev.op = RTE_EVENT_OP_NEW,
1277 .ev.queue_id = TEST_QUEUE_ID,
1278 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
1279 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1280 .ev.event_type = RTE_EVENT_TYPE_TIMER,
1281 .state = RTE_EVENT_TIMER_NOT_ARMED,
1282 .timeout_ticks = CALC_TICKS(5), // expire in .5 sec
1283 };
1284
1285 rte_mempool_get(eventdev_test_mempool, (void **)&evtim);
1286 if (evtim == NULL) {
1287 /* Failed to get an event timer object */
1288 return TEST_FAILED;
1289 }
1290
1291 /* Set up a timer */
1292 *evtim = init_tim;
1293 evtim->ev.event_ptr = evtim;
1294
1295 /* Test single timer arm succeeds */
1296 ret = rte_event_timer_arm_burst(adapter, &evtim, 1);
1297 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n",
1298 rte_strerror(rte_errno));
1299 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ARMED, "Event timer "
1300 "in incorrect state");
1301
1302 /* Test arm of armed timer fails */
1303 ret = rte_event_timer_arm_burst(adapter, &evtim, 1);
1304 TEST_ASSERT_EQUAL(ret, 0, "expected return value from "
1305 "rte_event_timer_arm_burst: 0, got: %d", ret);
1306 TEST_ASSERT_EQUAL(rte_errno, EALREADY, "Unexpected rte_errno value "
1307 "after arming already armed timer");
1308
1309 /* Let timer expire */
1310 rte_delay_ms(1000);
1311
1312 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0);
1313 TEST_ASSERT_EQUAL(n, 1, "Failed to dequeue expected number of expiry "
1314 "events from event device");
1315
1316 rte_mempool_put(eventdev_test_mempool, evtim);
1317
1318 return TEST_SUCCESS;
1319 }
1320
1321 /* This test checks that repeated references to the same event timer in the
1322 * arm request work as expected; only the first one through should succeed.
1323 */
1324 static int
event_timer_arm_double(void)1325 event_timer_arm_double(void)
1326 {
1327 uint16_t n;
1328 int ret;
1329 struct rte_event_timer_adapter *adapter = timdev;
1330 struct rte_event_timer *evtim = NULL;
1331 struct rte_event evs[BATCH_SIZE];
1332 const struct rte_event_timer init_tim = {
1333 .ev.op = RTE_EVENT_OP_NEW,
1334 .ev.queue_id = TEST_QUEUE_ID,
1335 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
1336 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1337 .ev.event_type = RTE_EVENT_TYPE_TIMER,
1338 .state = RTE_EVENT_TIMER_NOT_ARMED,
1339 .timeout_ticks = CALC_TICKS(5), // expire in .5 sec
1340 };
1341
1342 rte_mempool_get(eventdev_test_mempool, (void **)&evtim);
1343 if (evtim == NULL) {
1344 /* Failed to get an event timer object */
1345 return TEST_FAILED;
1346 }
1347
1348 /* Set up a timer */
1349 *evtim = init_tim;
1350 evtim->ev.event_ptr = evtim;
1351
1352 struct rte_event_timer *evtim_arr[] = {evtim, evtim};
1353 ret = rte_event_timer_arm_burst(adapter, evtim_arr, RTE_DIM(evtim_arr));
1354 TEST_ASSERT_EQUAL(ret, 1, "Unexpected return value from "
1355 "rte_event_timer_arm_burst");
1356 TEST_ASSERT_EQUAL(rte_errno, EALREADY, "Unexpected rte_errno value "
1357 "after double-arm");
1358
1359 /* Let timer expire */
1360 rte_delay_ms(600);
1361
1362 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0);
1363 TEST_ASSERT_EQUAL(n, 1, "Dequeued incorrect number of expiry events - "
1364 "expected: 1, actual: %d", n);
1365
1366 rte_mempool_put(eventdev_test_mempool, evtim);
1367
1368 return TEST_SUCCESS;
1369 }
1370
1371 /* Test the timer expiry event is generated at the expected time. */
1372 static int
event_timer_arm_expiry(void)1373 event_timer_arm_expiry(void)
1374 {
1375 uint16_t n;
1376 int ret;
1377 struct rte_event_timer_adapter *adapter = timdev;
1378 struct rte_event_timer *evtim = NULL;
1379 struct rte_event_timer *evtim2 = NULL;
1380 struct rte_event evs[BATCH_SIZE];
1381 const struct rte_event_timer init_tim = {
1382 .ev.op = RTE_EVENT_OP_NEW,
1383 .ev.queue_id = TEST_QUEUE_ID,
1384 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
1385 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1386 .ev.event_type = RTE_EVENT_TYPE_TIMER,
1387 .state = RTE_EVENT_TIMER_NOT_ARMED,
1388 };
1389
1390 rte_mempool_get(eventdev_test_mempool, (void **)&evtim);
1391 if (evtim == NULL) {
1392 /* Failed to get an event timer object */
1393 return TEST_FAILED;
1394 }
1395
1396 /* Set up an event timer */
1397 *evtim = init_tim;
1398 evtim->timeout_ticks = CALC_TICKS(30), // expire in 3 secs
1399 evtim->ev.event_ptr = evtim;
1400
1401 ret = rte_event_timer_arm_burst(adapter, &evtim, 1);
1402 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s",
1403 rte_strerror(rte_errno));
1404 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ARMED, "Event "
1405 "timer in incorrect state");
1406
1407 rte_delay_ms(2999);
1408
1409 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0);
1410 TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected timer expiry event");
1411
1412 /* Delay 100 ms to account for the adapter tick window - should let us
1413 * dequeue one event
1414 */
1415 rte_delay_ms(100);
1416
1417 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0);
1418 TEST_ASSERT_EQUAL(n, 1, "Dequeued incorrect number (%d) of timer "
1419 "expiry events", n);
1420 TEST_ASSERT_EQUAL(evs[0].event_type, RTE_EVENT_TYPE_TIMER,
1421 "Dequeued unexpected type of event");
1422
1423 /* Check that we recover the original event timer and then free it */
1424 evtim2 = evs[0].event_ptr;
1425 TEST_ASSERT_EQUAL(evtim, evtim2,
1426 "Failed to recover pointer to original event timer");
1427 rte_mempool_put(eventdev_test_mempool, evtim2);
1428
1429 return TEST_SUCCESS;
1430 }
1431
1432 /* Check that rearming a timer works as expected. */
1433 static int
event_timer_arm_rearm(void)1434 event_timer_arm_rearm(void)
1435 {
1436 uint16_t n;
1437 int ret;
1438 struct rte_event_timer *evtim = NULL;
1439 struct rte_event_timer *evtim2 = NULL;
1440 struct rte_event evs[BATCH_SIZE];
1441 const struct rte_event_timer init_tim = {
1442 .ev.op = RTE_EVENT_OP_NEW,
1443 .ev.queue_id = TEST_QUEUE_ID,
1444 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
1445 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1446 .ev.event_type = RTE_EVENT_TYPE_TIMER,
1447 .state = RTE_EVENT_TIMER_NOT_ARMED,
1448 };
1449
1450 rte_mempool_get(eventdev_test_mempool, (void **)&evtim);
1451 if (evtim == NULL) {
1452 /* Failed to get an event timer object */
1453 return TEST_FAILED;
1454 }
1455
1456 /* Set up a timer */
1457 *evtim = init_tim;
1458 evtim->timeout_ticks = CALC_TICKS(1); // expire in 0.1 sec
1459 evtim->ev.event_ptr = evtim;
1460
1461 /* Arm it */
1462 ret = rte_event_timer_arm_burst(timdev, &evtim, 1);
1463 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n",
1464 rte_strerror(rte_errno));
1465
1466 /* Add 100ms to account for the adapter tick window */
1467 rte_delay_ms(100 + 100);
1468
1469 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0);
1470 TEST_ASSERT_EQUAL(n, 1, "Failed to dequeue expected number of expiry "
1471 "events from event device");
1472
1473 /* Recover the timer through the event that was dequeued. */
1474 evtim2 = evs[0].event_ptr;
1475 TEST_ASSERT_EQUAL(evtim, evtim2,
1476 "Failed to recover pointer to original event timer");
1477
1478 /* Need to reset state in case implementation can't do it */
1479 evtim2->state = RTE_EVENT_TIMER_NOT_ARMED;
1480
1481 /* Rearm it */
1482 ret = rte_event_timer_arm_burst(timdev, &evtim2, 1);
1483 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n",
1484 rte_strerror(rte_errno));
1485
1486 /* Add 100ms to account for the adapter tick window */
1487 rte_delay_ms(100 + 100);
1488
1489 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0);
1490 TEST_ASSERT_EQUAL(n, 1, "Failed to dequeue expected number of expiry "
1491 "events from event device");
1492
1493 /* Free it */
1494 evtim2 = evs[0].event_ptr;
1495 TEST_ASSERT_EQUAL(evtim, evtim2,
1496 "Failed to recover pointer to original event timer");
1497 rte_mempool_put(eventdev_test_mempool, evtim2);
1498
1499 return TEST_SUCCESS;
1500 }
1501
1502 /* Check that the adapter handles the max specified number of timers as
1503 * expected.
1504 */
1505 static int
event_timer_arm_max(void)1506 event_timer_arm_max(void)
1507 {
1508 int ret, i, n;
1509 int num_evtims = MAX_TIMERS;
1510 struct rte_event_timer *evtims[num_evtims];
1511 struct rte_event evs[BATCH_SIZE];
1512 const struct rte_event_timer init_tim = {
1513 .ev.op = RTE_EVENT_OP_NEW,
1514 .ev.queue_id = TEST_QUEUE_ID,
1515 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
1516 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1517 .ev.event_type = RTE_EVENT_TYPE_TIMER,
1518 .state = RTE_EVENT_TIMER_NOT_ARMED,
1519 .timeout_ticks = CALC_TICKS(5), // expire in .5 sec
1520 };
1521
1522 ret = rte_mempool_get_bulk(eventdev_test_mempool, (void **)evtims,
1523 num_evtims);
1524 TEST_ASSERT_EQUAL(ret, 0, "Failed to get array of timer objs: ret = %d",
1525 ret);
1526
1527 for (i = 0; i < num_evtims; i++) {
1528 *evtims[i] = init_tim;
1529 evtims[i]->ev.event_ptr = evtims[i];
1530 }
1531
1532 /* Test with the max value for the adapter */
1533 ret = rte_event_timer_arm_burst(timdev, evtims, num_evtims);
1534 TEST_ASSERT_EQUAL(ret, num_evtims,
1535 "Failed to arm all event timers: attempted = %d, "
1536 "succeeded = %d, rte_errno = %s",
1537 num_evtims, ret, rte_strerror(rte_errno));
1538
1539 rte_delay_ms(1000);
1540
1541 #define MAX_TRIES num_evtims
1542 int sum = 0;
1543 int tries = 0;
1544 bool done = false;
1545 while (!done) {
1546 sum += rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs,
1547 RTE_DIM(evs), 10);
1548 if (sum >= num_evtims || ++tries >= MAX_TRIES)
1549 done = true;
1550
1551 rte_delay_ms(10);
1552 }
1553
1554 TEST_ASSERT_EQUAL(sum, num_evtims, "Expected %d timer expiry events, "
1555 "got %d", num_evtims, sum);
1556
1557 TEST_ASSERT(tries < MAX_TRIES, "Exceeded max tries");
1558
1559 rte_delay_ms(100);
1560
1561 /* Make sure the eventdev is still empty */
1562 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs),
1563 10);
1564
1565 TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected number of timer expiry "
1566 "events from event device");
1567
1568 rte_mempool_put_bulk(eventdev_test_mempool, (void **)evtims,
1569 num_evtims);
1570
1571 return TEST_SUCCESS;
1572 }
1573
1574 /* Check that creating an event timer with incorrect event sched type fails. */
1575 static int
event_timer_arm_invalid_sched_type(void)1576 event_timer_arm_invalid_sched_type(void)
1577 {
1578 int ret;
1579 struct rte_event_timer *evtim = NULL;
1580 const struct rte_event_timer init_tim = {
1581 .ev.op = RTE_EVENT_OP_NEW,
1582 .ev.queue_id = TEST_QUEUE_ID,
1583 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
1584 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1585 .ev.event_type = RTE_EVENT_TYPE_TIMER,
1586 .state = RTE_EVENT_TIMER_NOT_ARMED,
1587 .timeout_ticks = CALC_TICKS(5), // expire in .5 sec
1588 };
1589
1590 if (!using_services)
1591 return -ENOTSUP;
1592
1593 rte_mempool_get(eventdev_test_mempool, (void **)&evtim);
1594 if (evtim == NULL) {
1595 /* Failed to get an event timer object */
1596 return TEST_FAILED;
1597 }
1598
1599 *evtim = init_tim;
1600 evtim->ev.event_ptr = evtim;
1601 evtim->ev.sched_type = RTE_SCHED_TYPE_PARALLEL; // bad sched type
1602
1603 ret = rte_event_timer_arm_burst(timdev, &evtim, 1);
1604 TEST_ASSERT_EQUAL(ret, 0, "Expected to fail timer arm with invalid "
1605 "sched type, but didn't");
1606 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after"
1607 " arm fail with invalid queue");
1608
1609 rte_mempool_put(eventdev_test_mempool, &evtim);
1610
1611 return TEST_SUCCESS;
1612 }
1613
1614 /* Check that creating an event timer with a timeout value that is too small or
1615 * too big fails.
1616 */
1617 static int
event_timer_arm_invalid_timeout(void)1618 event_timer_arm_invalid_timeout(void)
1619 {
1620 int ret;
1621 struct rte_event_timer *evtim = NULL;
1622 const struct rte_event_timer init_tim = {
1623 .ev.op = RTE_EVENT_OP_NEW,
1624 .ev.queue_id = TEST_QUEUE_ID,
1625 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
1626 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1627 .ev.event_type = RTE_EVENT_TYPE_TIMER,
1628 .state = RTE_EVENT_TIMER_NOT_ARMED,
1629 .timeout_ticks = CALC_TICKS(5), // expire in .5 sec
1630 };
1631
1632 rte_mempool_get(eventdev_test_mempool, (void **)&evtim);
1633 if (evtim == NULL) {
1634 /* Failed to get an event timer object */
1635 return TEST_FAILED;
1636 }
1637
1638 *evtim = init_tim;
1639 evtim->ev.event_ptr = evtim;
1640 evtim->timeout_ticks = 0; // timeout too small
1641
1642 ret = rte_event_timer_arm_burst(timdev, &evtim, 1);
1643 TEST_ASSERT_EQUAL(ret, 0, "Expected to fail timer arm with invalid "
1644 "timeout, but didn't");
1645 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after"
1646 " arm fail with invalid timeout");
1647 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ERROR_TOOEARLY,
1648 "Unexpected event timer state");
1649
1650 *evtim = init_tim;
1651 evtim->ev.event_ptr = evtim;
1652 evtim->timeout_ticks = CALC_TICKS(1801); // timeout too big
1653
1654 ret = rte_event_timer_arm_burst(timdev, &evtim, 1);
1655 TEST_ASSERT_EQUAL(ret, 0, "Expected to fail timer arm with invalid "
1656 "timeout, but didn't");
1657 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after"
1658 " arm fail with invalid timeout");
1659 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ERROR_TOOLATE,
1660 "Unexpected event timer state");
1661
1662 rte_mempool_put(eventdev_test_mempool, evtim);
1663
1664 return TEST_SUCCESS;
1665 }
1666
1667 static int
event_timer_cancel(void)1668 event_timer_cancel(void)
1669 {
1670 uint16_t n;
1671 int ret;
1672 struct rte_event_timer_adapter *adapter = timdev;
1673 struct rte_event_timer *evtim = NULL;
1674 struct rte_event evs[BATCH_SIZE];
1675 const struct rte_event_timer init_tim = {
1676 .ev.op = RTE_EVENT_OP_NEW,
1677 .ev.queue_id = TEST_QUEUE_ID,
1678 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
1679 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1680 .ev.event_type = RTE_EVENT_TYPE_TIMER,
1681 .state = RTE_EVENT_TIMER_NOT_ARMED,
1682 };
1683
1684 rte_mempool_get(eventdev_test_mempool, (void **)&evtim);
1685 if (evtim == NULL) {
1686 /* Failed to get an event timer object */
1687 return TEST_FAILED;
1688 }
1689
1690 /* Check that cancelling an uninited timer fails */
1691 ret = rte_event_timer_cancel_burst(adapter, &evtim, 1);
1692 TEST_ASSERT_EQUAL(ret, 0, "Succeeded unexpectedly in canceling "
1693 "uninited timer");
1694 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after "
1695 "cancelling uninited timer");
1696
1697 /* Set up a timer */
1698 *evtim = init_tim;
1699 evtim->ev.event_ptr = evtim;
1700 evtim->timeout_ticks = CALC_TICKS(30); // expire in 3 sec
1701
1702 /* Check that cancelling an inited but unarmed timer fails */
1703 ret = rte_event_timer_cancel_burst(adapter, &evtim, 1);
1704 TEST_ASSERT_EQUAL(ret, 0, "Succeeded unexpectedly in canceling "
1705 "unarmed timer");
1706 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after "
1707 "cancelling unarmed timer");
1708
1709 ret = rte_event_timer_arm_burst(adapter, &evtim, 1);
1710 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n",
1711 rte_strerror(rte_errno));
1712 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ARMED,
1713 "evtim in incorrect state");
1714
1715 /* Delay 1 sec */
1716 rte_delay_ms(1000);
1717
1718 ret = rte_event_timer_cancel_burst(adapter, &evtim, 1);
1719 TEST_ASSERT_EQUAL(ret, 1, "Failed to cancel event_timer: %s\n",
1720 rte_strerror(rte_errno));
1721 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_CANCELED,
1722 "evtim in incorrect state");
1723
1724 rte_delay_ms(3000);
1725
1726 /* Make sure that no expiry event was generated */
1727 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0);
1728 TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected timer expiry event\n");
1729
1730 rte_mempool_put(eventdev_test_mempool, evtim);
1731
1732 return TEST_SUCCESS;
1733 }
1734
1735 static int
event_timer_cancel_double(void)1736 event_timer_cancel_double(void)
1737 {
1738 uint16_t n;
1739 int ret;
1740 struct rte_event_timer_adapter *adapter = timdev;
1741 struct rte_event_timer *evtim = NULL;
1742 struct rte_event evs[BATCH_SIZE];
1743 const struct rte_event_timer init_tim = {
1744 .ev.op = RTE_EVENT_OP_NEW,
1745 .ev.queue_id = TEST_QUEUE_ID,
1746 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
1747 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1748 .ev.event_type = RTE_EVENT_TYPE_TIMER,
1749 .state = RTE_EVENT_TIMER_NOT_ARMED,
1750 .timeout_ticks = CALC_TICKS(5), // expire in .5 sec
1751 };
1752
1753 rte_mempool_get(eventdev_test_mempool, (void **)&evtim);
1754 if (evtim == NULL) {
1755 /* Failed to get an event timer object */
1756 return TEST_FAILED;
1757 }
1758
1759 /* Set up a timer */
1760 *evtim = init_tim;
1761 evtim->ev.event_ptr = evtim;
1762 evtim->timeout_ticks = CALC_TICKS(30); // expire in 3 sec
1763
1764 ret = rte_event_timer_arm_burst(adapter, &evtim, 1);
1765 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n",
1766 rte_strerror(rte_errno));
1767 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ARMED,
1768 "timer in unexpected state");
1769
1770 /* Now, test that referencing the same timer twice in the same call
1771 * fails
1772 */
1773 struct rte_event_timer *evtim_arr[] = {evtim, evtim};
1774 ret = rte_event_timer_cancel_burst(adapter, evtim_arr,
1775 RTE_DIM(evtim_arr));
1776
1777 /* Two requests to cancel same timer, only one should succeed */
1778 TEST_ASSERT_EQUAL(ret, 1, "Succeeded unexpectedly in canceling timer "
1779 "twice");
1780
1781 TEST_ASSERT_EQUAL(rte_errno, EALREADY, "Unexpected rte_errno value "
1782 "after double-cancel: rte_errno = %d", rte_errno);
1783
1784 rte_delay_ms(3000);
1785
1786 /* Still make sure that no expiry event was generated */
1787 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0);
1788 TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected timer expiry event\n");
1789
1790 rte_mempool_put(eventdev_test_mempool, evtim);
1791
1792 return TEST_SUCCESS;
1793 }
1794
1795 /* Check that event timer adapter tick resolution works as expected by testing
1796 * the number of adapter ticks that occur within a particular time interval.
1797 */
1798 static int
adapter_tick_resolution(void)1799 adapter_tick_resolution(void)
1800 {
1801 struct rte_event_timer_adapter_stats stats;
1802 uint64_t adapter_tick_count;
1803
1804 /* Only run this test in the software driver case */
1805 if (!using_services)
1806 return -ENOTSUP;
1807
1808 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stats_reset(timdev),
1809 "Failed to reset stats");
1810
1811 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stats_get(timdev,
1812 &stats), "Failed to get adapter stats");
1813 TEST_ASSERT_EQUAL(stats.adapter_tick_count, 0, "Adapter tick count "
1814 "not zeroed out");
1815
1816 /* Delay 1 second; should let at least 10 ticks occur with the default
1817 * adapter configuration used by this test.
1818 */
1819 rte_delay_ms(1000);
1820
1821 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stats_get(timdev,
1822 &stats), "Failed to get adapter stats");
1823
1824 adapter_tick_count = stats.adapter_tick_count;
1825 TEST_ASSERT(adapter_tick_count >= 10 && adapter_tick_count <= 12,
1826 "Expected 10-12 adapter ticks, got %"PRIu64"\n",
1827 adapter_tick_count);
1828
1829 return TEST_SUCCESS;
1830 }
1831
1832 static int
adapter_create_max(void)1833 adapter_create_max(void)
1834 {
1835 int i;
1836 uint32_t svc_start_count, svc_end_count;
1837 struct rte_event_timer_adapter *adapters[
1838 RTE_EVENT_TIMER_ADAPTER_NUM_MAX + 1];
1839
1840 struct rte_event_timer_adapter_conf conf = {
1841 .event_dev_id = evdev,
1842 // timer_adapter_id set in loop
1843 .clk_src = RTE_EVENT_TIMER_ADAPTER_CPU_CLK,
1844 .timer_tick_ns = NSECPERSEC / 10,
1845 .max_tmo_ns = 180 * NSECPERSEC,
1846 .nb_timers = MAX_TIMERS,
1847 .flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES,
1848 };
1849
1850 if (!using_services)
1851 return -ENOTSUP;
1852
1853 svc_start_count = rte_service_get_count();
1854
1855 /* This test expects that there are sufficient service IDs available
1856 * to be allocated. I.e., RTE_EVENT_TIMER_ADAPTER_NUM_MAX may need to
1857 * be less than RTE_SERVICE_NUM_MAX if anything else uses a service
1858 * (the SW event device, for example).
1859 */
1860 for (i = 0; i < RTE_EVENT_TIMER_ADAPTER_NUM_MAX; i++) {
1861 conf.timer_adapter_id = i;
1862 adapters[i] = rte_event_timer_adapter_create_ext(&conf,
1863 test_port_conf_cb, NULL);
1864 TEST_ASSERT_NOT_NULL(adapters[i], "Failed to create adapter "
1865 "%d", i);
1866 }
1867
1868 conf.timer_adapter_id = i;
1869 adapters[i] = rte_event_timer_adapter_create(&conf);
1870 TEST_ASSERT_NULL(adapters[i], "Created too many adapters");
1871
1872 /* Check that at least RTE_EVENT_TIMER_ADAPTER_NUM_MAX services
1873 * have been created
1874 */
1875 svc_end_count = rte_service_get_count();
1876 TEST_ASSERT_EQUAL(svc_end_count - svc_start_count,
1877 RTE_EVENT_TIMER_ADAPTER_NUM_MAX,
1878 "Failed to create expected number of services");
1879
1880 for (i = 0; i < RTE_EVENT_TIMER_ADAPTER_NUM_MAX; i++)
1881 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_free(adapters[i]),
1882 "Failed to free adapter %d", i);
1883
1884 /* Check that service count is back to where it was at start */
1885 svc_end_count = rte_service_get_count();
1886 TEST_ASSERT_EQUAL(svc_start_count, svc_end_count, "Failed to release "
1887 "correct number of services");
1888
1889 return TEST_SUCCESS;
1890 }
1891
1892 static struct unit_test_suite event_timer_adptr_functional_testsuite = {
1893 .suite_name = "event timer functional test suite",
1894 .setup = testsuite_setup,
1895 .teardown = testsuite_teardown,
1896 .unit_test_cases = {
1897 TEST_CASE_ST(timdev_setup_usec, timdev_teardown,
1898 test_timer_state),
1899 TEST_CASE_ST(timdev_setup_usec, timdev_teardown,
1900 test_timer_arm),
1901 TEST_CASE_ST(timdev_setup_msec_periodic, timdev_teardown,
1902 test_timer_arm_periodic),
1903 TEST_CASE_ST(timdev_setup_usec, timdev_teardown,
1904 test_timer_arm_burst),
1905 TEST_CASE_ST(timdev_setup_msec_periodic, timdev_teardown,
1906 test_timer_arm_burst_periodic),
1907 TEST_CASE_ST(timdev_setup_sec, timdev_teardown,
1908 test_timer_cancel),
1909 TEST_CASE_ST(timdev_setup_sec_periodic, timdev_teardown,
1910 test_timer_cancel_periodic),
1911 TEST_CASE_ST(timdev_setup_sec, timdev_teardown,
1912 test_timer_cancel_random),
1913 TEST_CASE_ST(timdev_setup_usec_multicore, timdev_teardown,
1914 test_timer_arm_multicore),
1915 TEST_CASE_ST(timdev_setup_usec_multicore, timdev_teardown,
1916 test_timer_arm_burst_multicore),
1917 TEST_CASE_ST(timdev_setup_sec_multicore, timdev_teardown,
1918 test_timer_cancel_multicore),
1919 TEST_CASE_ST(timdev_setup_sec_multicore, timdev_teardown,
1920 test_timer_cancel_burst_multicore),
1921 TEST_CASE(adapter_create),
1922 TEST_CASE_ST(timdev_setup_msec, NULL, adapter_free),
1923 TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
1924 adapter_get_info),
1925 TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
1926 adapter_lookup),
1927 TEST_CASE_ST(NULL, timdev_teardown,
1928 adapter_start),
1929 TEST_CASE_ST(timdev_setup_msec, NULL,
1930 adapter_stop),
1931 TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
1932 stat_inc_reset_ev_enq),
1933 TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
1934 event_timer_arm),
1935 TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
1936 event_timer_arm_double),
1937 TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
1938 event_timer_arm_expiry),
1939 TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
1940 event_timer_arm_rearm),
1941 TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
1942 event_timer_arm_max),
1943 TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
1944 event_timer_arm_invalid_sched_type),
1945 TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
1946 event_timer_arm_invalid_timeout),
1947 TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
1948 event_timer_cancel),
1949 TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
1950 event_timer_cancel_double),
1951 TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
1952 adapter_tick_resolution),
1953 TEST_CASE(adapter_create_max),
1954 TEST_CASES_END() /**< NULL terminate unit test array */
1955 }
1956 };
1957
1958 static int
test_event_timer_adapter_func(void)1959 test_event_timer_adapter_func(void)
1960 {
1961 return unit_test_suite_runner(&event_timer_adptr_functional_testsuite);
1962 }
1963
1964 #endif /* !RTE_EXEC_ENV_WINDOWS */
1965
1966 REGISTER_TEST_COMMAND(event_timer_adapter_test, test_event_timer_adapter_func);
1967