1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
3 * Copyright(c) 2017-2018 Intel Corporation.
4 */
5
6 #include <rte_atomic.h>
7 #include <rte_common.h>
8 #include <rte_cycles.h>
9 #include <rte_debug.h>
10 #include <rte_eal.h>
11 #include <rte_ethdev.h>
12 #include <rte_eventdev.h>
13 #include <rte_event_timer_adapter.h>
14 #include <rte_mempool.h>
15 #include <rte_launch.h>
16 #include <rte_lcore.h>
17 #include <rte_per_lcore.h>
18 #include <rte_random.h>
19 #include <rte_bus_vdev.h>
20 #include <rte_service.h>
21 #include <stdbool.h>
22
23 #include "test.h"
24
25 /* 4K timers corresponds to sw evdev max inflight events */
26 #define MAX_TIMERS (4 * 1024)
27 #define BKT_TCK_NSEC
28
29 #define NSECPERSEC 1E9
30 #define BATCH_SIZE 16
31 /* Both the app lcore and adapter ports are linked to this queue */
32 #define TEST_QUEUE_ID 0
33 /* Port the application dequeues from */
34 #define TEST_PORT_ID 0
35 #define TEST_ADAPTER_ID 0
36
37 /* Handle log statements in same manner as test macros */
38 #define LOG_DBG(...) RTE_LOG(DEBUG, EAL, __VA_ARGS__)
39
40 static int evdev;
41 static struct rte_event_timer_adapter *timdev;
42 static struct rte_mempool *eventdev_test_mempool;
43 static struct rte_ring *timer_producer_ring;
44 static uint64_t global_bkt_tck_ns;
45 static uint64_t global_info_bkt_tck_ns;
46 static volatile uint8_t arm_done;
47
48 #define CALC_TICKS(tks) \
49 ((tks * global_bkt_tck_ns) / global_info_bkt_tck_ns)
50
51
52 static bool using_services;
53 static uint32_t test_lcore1;
54 static uint32_t test_lcore2;
55 static uint32_t test_lcore3;
56 static uint32_t sw_evdev_slcore;
57 static uint32_t sw_adptr_slcore;
58
59 static inline void
devconf_set_default_sane_values(struct rte_event_dev_config * dev_conf,struct rte_event_dev_info * info)60 devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
61 struct rte_event_dev_info *info)
62 {
63 memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
64 dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
65 dev_conf->nb_event_ports = 1;
66 dev_conf->nb_event_queues = 1;
67 dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
68 dev_conf->nb_event_port_dequeue_depth =
69 info->max_event_port_dequeue_depth;
70 dev_conf->nb_event_port_enqueue_depth =
71 info->max_event_port_enqueue_depth;
72 dev_conf->nb_event_port_enqueue_depth =
73 info->max_event_port_enqueue_depth;
74 dev_conf->nb_events_limit =
75 info->max_num_events;
76 }
77
78 static inline int
eventdev_setup(void)79 eventdev_setup(void)
80 {
81 int ret;
82 struct rte_event_dev_config dev_conf;
83 struct rte_event_dev_info info;
84 uint32_t service_id;
85
86 ret = rte_event_dev_info_get(evdev, &info);
87 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
88 TEST_ASSERT(info.max_num_events < 0 ||
89 info.max_num_events >= (int32_t)MAX_TIMERS,
90 "ERROR max_num_events=%d < max_events=%d",
91 info.max_num_events, MAX_TIMERS);
92
93 devconf_set_default_sane_values(&dev_conf, &info);
94 ret = rte_event_dev_configure(evdev, &dev_conf);
95 TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
96
97 ret = rte_event_queue_setup(evdev, 0, NULL);
98 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d", 0);
99
100 /* Configure event port */
101 ret = rte_event_port_setup(evdev, 0, NULL);
102 TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d", 0);
103 ret = rte_event_port_link(evdev, 0, NULL, NULL, 0);
104 TEST_ASSERT(ret >= 0, "Failed to link all queues port=%d", 0);
105
106 /* If this is a software event device, map and start its service */
107 if (rte_event_dev_service_id_get(evdev, &service_id) == 0) {
108 TEST_ASSERT_SUCCESS(rte_service_lcore_add(sw_evdev_slcore),
109 "Failed to add service core");
110 TEST_ASSERT_SUCCESS(rte_service_lcore_start(
111 sw_evdev_slcore),
112 "Failed to start service core");
113 TEST_ASSERT_SUCCESS(rte_service_map_lcore_set(
114 service_id, sw_evdev_slcore, 1),
115 "Failed to map evdev service");
116 TEST_ASSERT_SUCCESS(rte_service_runstate_set(
117 service_id, 1),
118 "Failed to start evdev service");
119 }
120
121 ret = rte_event_dev_start(evdev);
122 TEST_ASSERT_SUCCESS(ret, "Failed to start device");
123
124 return TEST_SUCCESS;
125 }
126
127 static int
testsuite_setup(void)128 testsuite_setup(void)
129 {
130 /* Some of the multithreaded tests require 3 other lcores to run */
131 unsigned int required_lcore_count = 4;
132 uint32_t service_id;
133
134 /* To make it easier to map services later if needed, just reset
135 * service core state.
136 */
137 (void) rte_service_lcore_reset_all();
138
139 if (!rte_event_dev_count()) {
140 /* If there is no hardware eventdev, or no software vdev was
141 * specified on the command line, create an instance of
142 * event_sw.
143 */
144 LOG_DBG("Failed to find a valid event device... testing with"
145 " event_sw device\n");
146 TEST_ASSERT_SUCCESS(rte_vdev_init("event_sw0", NULL),
147 "Error creating eventdev");
148 evdev = rte_event_dev_get_dev_id("event_sw0");
149 }
150
151 if (rte_event_dev_service_id_get(evdev, &service_id) == 0) {
152 /* A software event device will use a software event timer
153 * adapter as well. 2 more cores required to convert to
154 * service cores.
155 */
156 required_lcore_count += 2;
157 using_services = true;
158 }
159
160 if (rte_lcore_count() < required_lcore_count) {
161 printf("Not enough cores for event_timer_adapter_test, expecting at least %u\n",
162 required_lcore_count);
163 return TEST_SKIPPED;
164 }
165
166 /* Assign lcores for various tasks */
167 test_lcore1 = rte_get_next_lcore(-1, 1, 0);
168 test_lcore2 = rte_get_next_lcore(test_lcore1, 1, 0);
169 test_lcore3 = rte_get_next_lcore(test_lcore2, 1, 0);
170 if (using_services) {
171 sw_evdev_slcore = rte_get_next_lcore(test_lcore3, 1, 0);
172 sw_adptr_slcore = rte_get_next_lcore(sw_evdev_slcore, 1, 0);
173 }
174
175 return eventdev_setup();
176 }
177
178 static void
testsuite_teardown(void)179 testsuite_teardown(void)
180 {
181 rte_event_dev_stop(evdev);
182 rte_event_dev_close(evdev);
183 }
184
185 static int
setup_adapter_service(struct rte_event_timer_adapter * adptr)186 setup_adapter_service(struct rte_event_timer_adapter *adptr)
187 {
188 uint32_t adapter_service_id;
189 int ret;
190
191 /* retrieve service ids */
192 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_service_id_get(adptr,
193 &adapter_service_id), "Failed to get event timer "
194 "adapter service id");
195 /* add a service core and start it */
196 ret = rte_service_lcore_add(sw_adptr_slcore);
197 TEST_ASSERT(ret == 0 || ret == -EALREADY,
198 "Failed to add service core");
199 ret = rte_service_lcore_start(sw_adptr_slcore);
200 TEST_ASSERT(ret == 0 || ret == -EALREADY,
201 "Failed to start service core");
202
203 /* map services to it */
204 TEST_ASSERT_SUCCESS(rte_service_map_lcore_set(adapter_service_id,
205 sw_adptr_slcore, 1),
206 "Failed to map adapter service");
207
208 /* set services to running */
209 TEST_ASSERT_SUCCESS(rte_service_runstate_set(adapter_service_id, 1),
210 "Failed to start event timer adapter service");
211
212 return TEST_SUCCESS;
213 }
214
215 static int
test_port_conf_cb(uint16_t id,uint8_t event_dev_id,uint8_t * event_port_id,void * conf_arg)216 test_port_conf_cb(uint16_t id, uint8_t event_dev_id, uint8_t *event_port_id,
217 void *conf_arg)
218 {
219 struct rte_event_dev_config dev_conf;
220 struct rte_event_dev_info info;
221 struct rte_event_port_conf *port_conf, def_port_conf = {0};
222 uint32_t started;
223 static int port_allocated;
224 static uint8_t port_id;
225 int ret;
226
227 if (port_allocated) {
228 *event_port_id = port_id;
229 return 0;
230 }
231
232 RTE_SET_USED(id);
233
234 ret = rte_event_dev_attr_get(event_dev_id, RTE_EVENT_DEV_ATTR_STARTED,
235 &started);
236 if (ret < 0)
237 return ret;
238
239 if (started)
240 rte_event_dev_stop(event_dev_id);
241
242 ret = rte_event_dev_info_get(evdev, &info);
243 if (ret < 0)
244 return ret;
245
246 devconf_set_default_sane_values(&dev_conf, &info);
247
248 port_id = dev_conf.nb_event_ports;
249 dev_conf.nb_event_ports++;
250
251 ret = rte_event_dev_configure(event_dev_id, &dev_conf);
252 if (ret < 0) {
253 if (started)
254 rte_event_dev_start(event_dev_id);
255 return ret;
256 }
257
258 if (conf_arg != NULL)
259 port_conf = conf_arg;
260 else {
261 port_conf = &def_port_conf;
262 ret = rte_event_port_default_conf_get(event_dev_id, port_id,
263 port_conf);
264 if (ret < 0)
265 return ret;
266 }
267
268 ret = rte_event_port_setup(event_dev_id, port_id, port_conf);
269 if (ret < 0)
270 return ret;
271
272 *event_port_id = port_id;
273
274 if (started)
275 rte_event_dev_start(event_dev_id);
276
277 /* Reuse this port number next time this is called */
278 port_allocated = 1;
279
280 return 0;
281 }
282
283 static int
_timdev_setup(uint64_t max_tmo_ns,uint64_t bkt_tck_ns)284 _timdev_setup(uint64_t max_tmo_ns, uint64_t bkt_tck_ns)
285 {
286 struct rte_event_timer_adapter_info info;
287 struct rte_event_timer_adapter_conf config = {
288 .event_dev_id = evdev,
289 .timer_adapter_id = TEST_ADAPTER_ID,
290 .timer_tick_ns = bkt_tck_ns,
291 .max_tmo_ns = max_tmo_ns,
292 .nb_timers = MAX_TIMERS * 10,
293 .flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES,
294 };
295 uint32_t caps = 0;
296 const char *pool_name = "timdev_test_pool";
297
298 global_bkt_tck_ns = bkt_tck_ns;
299
300 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_caps_get(evdev, &caps),
301 "failed to get adapter capabilities");
302 if (!(caps & RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) {
303 timdev = rte_event_timer_adapter_create_ext(&config,
304 test_port_conf_cb,
305 NULL);
306 setup_adapter_service(timdev);
307 using_services = true;
308 } else
309 timdev = rte_event_timer_adapter_create(&config);
310
311 TEST_ASSERT_NOT_NULL(timdev,
312 "failed to create event timer ring");
313
314 TEST_ASSERT_EQUAL(rte_event_timer_adapter_start(timdev), 0,
315 "failed to Start event timer adapter");
316
317 /* Create event timer mempool */
318 eventdev_test_mempool = rte_mempool_create(pool_name,
319 MAX_TIMERS * 2,
320 sizeof(struct rte_event_timer), /* element size*/
321 0, /* cache size*/
322 0, NULL, NULL, NULL, NULL,
323 rte_socket_id(), 0);
324 if (!eventdev_test_mempool) {
325 printf("ERROR creating mempool\n");
326 return TEST_FAILED;
327 }
328
329 rte_event_timer_adapter_get_info(timdev, &info);
330
331 global_info_bkt_tck_ns = info.min_resolution_ns;
332
333 return TEST_SUCCESS;
334 }
335
336 static int
timdev_setup_usec(void)337 timdev_setup_usec(void)
338 {
339 return using_services ?
340 /* Max timeout is 10,000us and bucket interval is 100us */
341 _timdev_setup(1E7, 1E5) :
342 /* Max timeout is 100us and bucket interval is 1us */
343 _timdev_setup(1E5, 1E3);
344 }
345
346 static int
timdev_setup_usec_multicore(void)347 timdev_setup_usec_multicore(void)
348 {
349 return using_services ?
350 /* Max timeout is 10,000us and bucket interval is 100us */
351 _timdev_setup(1E7, 1E5) :
352 /* Max timeout is 100us and bucket interval is 1us */
353 _timdev_setup(1E5, 1E3);
354 }
355
356 static int
timdev_setup_msec(void)357 timdev_setup_msec(void)
358 {
359 /* Max timeout is 2 mins, and bucket interval is 100 ms */
360 return _timdev_setup(180 * NSECPERSEC, NSECPERSEC / 10);
361 }
362
363 static int
timdev_setup_sec(void)364 timdev_setup_sec(void)
365 {
366 /* Max timeout is 100sec and bucket interval is 1sec */
367 return _timdev_setup(1E11, 1E9);
368 }
369
370 static int
timdev_setup_sec_multicore(void)371 timdev_setup_sec_multicore(void)
372 {
373 /* Max timeout is 100sec and bucket interval is 1sec */
374 return _timdev_setup(1E11, 1E9);
375 }
376
377 static void
timdev_teardown(void)378 timdev_teardown(void)
379 {
380 rte_event_timer_adapter_stop(timdev);
381 rte_event_timer_adapter_free(timdev);
382
383 rte_mempool_free(eventdev_test_mempool);
384 }
385
386 static inline int
test_timer_state(void)387 test_timer_state(void)
388 {
389 struct rte_event_timer *ev_tim;
390 struct rte_event ev;
391 const struct rte_event_timer tim = {
392 .ev.op = RTE_EVENT_OP_NEW,
393 .ev.queue_id = 0,
394 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
395 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
396 .ev.event_type = RTE_EVENT_TYPE_TIMER,
397 .state = RTE_EVENT_TIMER_NOT_ARMED,
398 };
399
400
401 rte_mempool_get(eventdev_test_mempool, (void **)&ev_tim);
402 *ev_tim = tim;
403 ev_tim->ev.event_ptr = ev_tim;
404 ev_tim->timeout_ticks = CALC_TICKS(120);
405
406 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 1), 0,
407 "Armed timer exceeding max_timeout.");
408 TEST_ASSERT_EQUAL(ev_tim->state, RTE_EVENT_TIMER_ERROR_TOOLATE,
409 "Improper timer state set expected %d returned %d",
410 RTE_EVENT_TIMER_ERROR_TOOLATE, ev_tim->state);
411
412 ev_tim->state = RTE_EVENT_TIMER_NOT_ARMED;
413 ev_tim->timeout_ticks = CALC_TICKS(10);
414
415 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 1), 1,
416 "Failed to arm timer with proper timeout.");
417 TEST_ASSERT_EQUAL(ev_tim->state, RTE_EVENT_TIMER_ARMED,
418 "Improper timer state set expected %d returned %d",
419 RTE_EVENT_TIMER_ARMED, ev_tim->state);
420
421 if (!using_services)
422 rte_delay_us(20);
423 else
424 rte_delay_us(1000 + 200);
425 TEST_ASSERT_EQUAL(rte_event_dequeue_burst(evdev, 0, &ev, 1, 0), 1,
426 "Armed timer failed to trigger.");
427
428 ev_tim->state = RTE_EVENT_TIMER_NOT_ARMED;
429 ev_tim->timeout_ticks = CALC_TICKS(90);
430 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 1), 1,
431 "Failed to arm timer with proper timeout.");
432 TEST_ASSERT_EQUAL(rte_event_timer_cancel_burst(timdev, &ev_tim, 1),
433 1, "Failed to cancel armed timer");
434 TEST_ASSERT_EQUAL(ev_tim->state, RTE_EVENT_TIMER_CANCELED,
435 "Improper timer state set expected %d returned %d",
436 RTE_EVENT_TIMER_CANCELED, ev_tim->state);
437
438 rte_mempool_put(eventdev_test_mempool, (void *)ev_tim);
439
440 return TEST_SUCCESS;
441 }
442
443 static inline int
_arm_timers(uint64_t timeout_tcks,uint64_t timers)444 _arm_timers(uint64_t timeout_tcks, uint64_t timers)
445 {
446 uint64_t i;
447 struct rte_event_timer *ev_tim;
448 const struct rte_event_timer tim = {
449 .ev.op = RTE_EVENT_OP_NEW,
450 .ev.queue_id = 0,
451 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
452 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
453 .ev.event_type = RTE_EVENT_TYPE_TIMER,
454 .state = RTE_EVENT_TIMER_NOT_ARMED,
455 .timeout_ticks = CALC_TICKS(timeout_tcks),
456 };
457
458 for (i = 0; i < timers; i++) {
459
460 TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool,
461 (void **)&ev_tim),
462 "mempool alloc failed");
463 *ev_tim = tim;
464 ev_tim->ev.event_ptr = ev_tim;
465
466 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim,
467 1), 1, "Failed to arm timer %d",
468 rte_errno);
469 }
470
471 return TEST_SUCCESS;
472 }
473
474 static inline int
_wait_timer_triggers(uint64_t wait_sec,uint64_t arm_count,uint64_t cancel_count)475 _wait_timer_triggers(uint64_t wait_sec, uint64_t arm_count,
476 uint64_t cancel_count)
477 {
478 uint8_t valid_event;
479 uint64_t events = 0;
480 uint64_t wait_start, max_wait;
481 struct rte_event ev;
482
483 max_wait = rte_get_timer_hz() * wait_sec;
484 wait_start = rte_get_timer_cycles();
485 while (1) {
486 if (rte_get_timer_cycles() - wait_start > max_wait) {
487 if (events + cancel_count != arm_count)
488 TEST_ASSERT_SUCCESS(max_wait,
489 "Max time limit for timers exceeded.");
490 break;
491 }
492
493 valid_event = rte_event_dequeue_burst(evdev, 0, &ev, 1, 0);
494 if (!valid_event)
495 continue;
496
497 rte_mempool_put(eventdev_test_mempool, ev.event_ptr);
498 events++;
499 }
500
501 return TEST_SUCCESS;
502 }
503
504 static inline int
test_timer_arm(void)505 test_timer_arm(void)
506 {
507 TEST_ASSERT_SUCCESS(_arm_timers(20, MAX_TIMERS),
508 "Failed to arm timers");
509 TEST_ASSERT_SUCCESS(_wait_timer_triggers(10, MAX_TIMERS, 0),
510 "Timer triggered count doesn't match arm count");
511 return TEST_SUCCESS;
512 }
513
514 static int
_arm_wrapper(void * arg)515 _arm_wrapper(void *arg)
516 {
517 RTE_SET_USED(arg);
518
519 TEST_ASSERT_SUCCESS(_arm_timers(20, MAX_TIMERS),
520 "Failed to arm timers");
521
522 return TEST_SUCCESS;
523 }
524
525 static inline int
test_timer_arm_multicore(void)526 test_timer_arm_multicore(void)
527 {
528
529 uint32_t lcore_1 = rte_get_next_lcore(-1, 1, 0);
530 uint32_t lcore_2 = rte_get_next_lcore(lcore_1, 1, 0);
531
532 rte_eal_remote_launch(_arm_wrapper, NULL, lcore_1);
533 rte_eal_remote_launch(_arm_wrapper, NULL, lcore_2);
534
535 rte_eal_mp_wait_lcore();
536 TEST_ASSERT_SUCCESS(_wait_timer_triggers(10, MAX_TIMERS * 2, 0),
537 "Timer triggered count doesn't match arm count");
538
539 return TEST_SUCCESS;
540 }
541
542 #define MAX_BURST 16
543 static inline int
_arm_timers_burst(uint64_t timeout_tcks,uint64_t timers)544 _arm_timers_burst(uint64_t timeout_tcks, uint64_t timers)
545 {
546 uint64_t i;
547 int j;
548 struct rte_event_timer *ev_tim[MAX_BURST];
549 const struct rte_event_timer tim = {
550 .ev.op = RTE_EVENT_OP_NEW,
551 .ev.queue_id = 0,
552 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
553 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
554 .ev.event_type = RTE_EVENT_TYPE_TIMER,
555 .state = RTE_EVENT_TIMER_NOT_ARMED,
556 .timeout_ticks = CALC_TICKS(timeout_tcks),
557 };
558
559 for (i = 0; i < timers / MAX_BURST; i++) {
560 TEST_ASSERT_SUCCESS(rte_mempool_get_bulk(
561 eventdev_test_mempool,
562 (void **)ev_tim, MAX_BURST),
563 "mempool alloc failed");
564
565 for (j = 0; j < MAX_BURST; j++) {
566 *ev_tim[j] = tim;
567 ev_tim[j]->ev.event_ptr = ev_tim[j];
568 }
569
570 TEST_ASSERT_EQUAL(rte_event_timer_arm_tmo_tick_burst(timdev,
571 ev_tim, tim.timeout_ticks, MAX_BURST),
572 MAX_BURST, "Failed to arm timer %d", rte_errno);
573 }
574
575 return TEST_SUCCESS;
576 }
577
578 static inline int
test_timer_arm_burst(void)579 test_timer_arm_burst(void)
580 {
581 TEST_ASSERT_SUCCESS(_arm_timers_burst(20, MAX_TIMERS),
582 "Failed to arm timers");
583 TEST_ASSERT_SUCCESS(_wait_timer_triggers(10, MAX_TIMERS, 0),
584 "Timer triggered count doesn't match arm count");
585
586 return TEST_SUCCESS;
587 }
588
589 static int
_arm_wrapper_burst(void * arg)590 _arm_wrapper_burst(void *arg)
591 {
592 RTE_SET_USED(arg);
593
594 TEST_ASSERT_SUCCESS(_arm_timers_burst(20, MAX_TIMERS),
595 "Failed to arm timers");
596
597 return TEST_SUCCESS;
598 }
599
600 static inline int
test_timer_arm_burst_multicore(void)601 test_timer_arm_burst_multicore(void)
602 {
603 rte_eal_remote_launch(_arm_wrapper_burst, NULL, test_lcore1);
604 rte_eal_remote_launch(_arm_wrapper_burst, NULL, test_lcore2);
605
606 rte_eal_mp_wait_lcore();
607 TEST_ASSERT_SUCCESS(_wait_timer_triggers(10, MAX_TIMERS * 2, 0),
608 "Timer triggered count doesn't match arm count");
609
610 return TEST_SUCCESS;
611 }
612
613 static inline int
test_timer_cancel(void)614 test_timer_cancel(void)
615 {
616 uint64_t i;
617 struct rte_event_timer *ev_tim;
618 const struct rte_event_timer tim = {
619 .ev.op = RTE_EVENT_OP_NEW,
620 .ev.queue_id = 0,
621 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
622 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
623 .ev.event_type = RTE_EVENT_TYPE_TIMER,
624 .state = RTE_EVENT_TIMER_NOT_ARMED,
625 .timeout_ticks = CALC_TICKS(20),
626 };
627
628 for (i = 0; i < MAX_TIMERS; i++) {
629 TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool,
630 (void **)&ev_tim),
631 "mempool alloc failed");
632 *ev_tim = tim;
633 ev_tim->ev.event_ptr = ev_tim;
634
635 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim,
636 1), 1, "Failed to arm timer %d",
637 rte_errno);
638
639 rte_delay_us(100 + (i % 5000));
640
641 TEST_ASSERT_EQUAL(rte_event_timer_cancel_burst(timdev,
642 &ev_tim, 1), 1,
643 "Failed to cancel event timer %d", rte_errno);
644 rte_mempool_put(eventdev_test_mempool, ev_tim);
645 }
646
647
648 TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS,
649 MAX_TIMERS),
650 "Timer triggered count doesn't match arm, cancel count");
651
652 return TEST_SUCCESS;
653 }
654
655 static int
_cancel_producer(uint64_t timeout_tcks,uint64_t timers)656 _cancel_producer(uint64_t timeout_tcks, uint64_t timers)
657 {
658 uint64_t i;
659 struct rte_event_timer *ev_tim;
660 const struct rte_event_timer tim = {
661 .ev.op = RTE_EVENT_OP_NEW,
662 .ev.queue_id = 0,
663 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
664 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
665 .ev.event_type = RTE_EVENT_TYPE_TIMER,
666 .state = RTE_EVENT_TIMER_NOT_ARMED,
667 .timeout_ticks = CALC_TICKS(timeout_tcks),
668 };
669
670 for (i = 0; i < timers; i++) {
671 TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool,
672 (void **)&ev_tim),
673 "mempool alloc failed");
674
675 *ev_tim = tim;
676 ev_tim->ev.event_ptr = ev_tim;
677
678 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim,
679 1), 1, "Failed to arm timer %d",
680 rte_errno);
681
682 TEST_ASSERT_EQUAL(ev_tim->state, RTE_EVENT_TIMER_ARMED,
683 "Failed to arm event timer");
684
685 while (rte_ring_enqueue(timer_producer_ring, ev_tim) != 0)
686 ;
687 }
688
689 return TEST_SUCCESS;
690 }
691
692 static int
_cancel_producer_burst(uint64_t timeout_tcks,uint64_t timers)693 _cancel_producer_burst(uint64_t timeout_tcks, uint64_t timers)
694 {
695
696 uint64_t i;
697 int j, ret;
698 struct rte_event_timer *ev_tim[MAX_BURST];
699 const struct rte_event_timer tim = {
700 .ev.op = RTE_EVENT_OP_NEW,
701 .ev.queue_id = 0,
702 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
703 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
704 .ev.event_type = RTE_EVENT_TYPE_TIMER,
705 .state = RTE_EVENT_TIMER_NOT_ARMED,
706 .timeout_ticks = CALC_TICKS(timeout_tcks),
707 };
708 int arm_count = 0;
709
710 for (i = 0; i < timers / MAX_BURST; i++) {
711 TEST_ASSERT_SUCCESS(rte_mempool_get_bulk(
712 eventdev_test_mempool,
713 (void **)ev_tim, MAX_BURST),
714 "mempool alloc failed");
715
716 for (j = 0; j < MAX_BURST; j++) {
717 *ev_tim[j] = tim;
718 ev_tim[j]->ev.event_ptr = ev_tim[j];
719 }
720
721 TEST_ASSERT_EQUAL(rte_event_timer_arm_tmo_tick_burst(timdev,
722 ev_tim, tim.timeout_ticks, MAX_BURST),
723 MAX_BURST, "Failed to arm timer %d", rte_errno);
724
725 for (j = 0; j < MAX_BURST; j++)
726 TEST_ASSERT_EQUAL(ev_tim[j]->state,
727 RTE_EVENT_TIMER_ARMED,
728 "Event timer not armed, state = %d",
729 ev_tim[j]->state);
730
731 ret = rte_ring_enqueue_bulk(timer_producer_ring,
732 (void **)ev_tim, MAX_BURST, NULL);
733 TEST_ASSERT_EQUAL(ret, MAX_BURST,
734 "Failed to enqueue event timers to ring");
735 arm_count += ret;
736 }
737
738 TEST_ASSERT_EQUAL(arm_count, MAX_TIMERS,
739 "Failed to arm expected number of event timers");
740
741 return TEST_SUCCESS;
742 }
743
744 static int
_cancel_producer_wrapper(void * args)745 _cancel_producer_wrapper(void *args)
746 {
747 RTE_SET_USED(args);
748
749 return _cancel_producer(20, MAX_TIMERS);
750 }
751
752 static int
_cancel_producer_burst_wrapper(void * args)753 _cancel_producer_burst_wrapper(void *args)
754 {
755 RTE_SET_USED(args);
756
757 return _cancel_producer_burst(100, MAX_TIMERS);
758 }
759
760 static int
_cancel_thread(void * args)761 _cancel_thread(void *args)
762 {
763 RTE_SET_USED(args);
764 struct rte_event_timer *ev_tim = NULL;
765 uint64_t cancel_count = 0;
766 uint16_t ret;
767
768 while (!arm_done || rte_ring_count(timer_producer_ring) > 0) {
769 if (rte_ring_dequeue(timer_producer_ring, (void **)&ev_tim))
770 continue;
771
772 ret = rte_event_timer_cancel_burst(timdev, &ev_tim, 1);
773 TEST_ASSERT_EQUAL(ret, 1, "Failed to cancel timer");
774 rte_mempool_put(eventdev_test_mempool, (void *)ev_tim);
775 cancel_count++;
776 }
777
778 return TEST_SUCCESS;
779 }
780
781 static int
_cancel_burst_thread(void * args)782 _cancel_burst_thread(void *args)
783 {
784 RTE_SET_USED(args);
785
786 int ret, i, n;
787 struct rte_event_timer *ev_tim[MAX_BURST];
788 uint64_t cancel_count = 0;
789 uint64_t dequeue_count = 0;
790
791 while (!arm_done || rte_ring_count(timer_producer_ring) > 0) {
792 n = rte_ring_dequeue_burst(timer_producer_ring,
793 (void **)ev_tim, MAX_BURST, NULL);
794 if (!n)
795 continue;
796
797 dequeue_count += n;
798
799 for (i = 0; i < n; i++)
800 TEST_ASSERT_EQUAL(ev_tim[i]->state,
801 RTE_EVENT_TIMER_ARMED,
802 "Event timer not armed, state = %d",
803 ev_tim[i]->state);
804
805 ret = rte_event_timer_cancel_burst(timdev, ev_tim, n);
806 TEST_ASSERT_EQUAL(n, ret, "Failed to cancel complete burst of "
807 "event timers");
808 rte_mempool_put_bulk(eventdev_test_mempool, (void **)ev_tim,
809 RTE_MIN(ret, MAX_BURST));
810
811 cancel_count += ret;
812 }
813
814 TEST_ASSERT_EQUAL(cancel_count, MAX_TIMERS,
815 "Failed to cancel expected number of timers: "
816 "expected = %d, cancel_count = %"PRIu64", "
817 "dequeue_count = %"PRIu64"\n", MAX_TIMERS,
818 cancel_count, dequeue_count);
819
820 return TEST_SUCCESS;
821 }
822
823 static inline int
test_timer_cancel_multicore(void)824 test_timer_cancel_multicore(void)
825 {
826 arm_done = 0;
827 timer_producer_ring = rte_ring_create("timer_cancel_queue",
828 MAX_TIMERS * 2, rte_socket_id(), 0);
829 TEST_ASSERT_NOT_NULL(timer_producer_ring,
830 "Unable to reserve memory for ring");
831
832 rte_eal_remote_launch(_cancel_thread, NULL, test_lcore3);
833 rte_eal_remote_launch(_cancel_producer_wrapper, NULL, test_lcore1);
834 rte_eal_remote_launch(_cancel_producer_wrapper, NULL, test_lcore2);
835
836 rte_eal_wait_lcore(test_lcore1);
837 rte_eal_wait_lcore(test_lcore2);
838 arm_done = 1;
839 rte_eal_wait_lcore(test_lcore3);
840 rte_ring_free(timer_producer_ring);
841
842 TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS * 2,
843 MAX_TIMERS * 2),
844 "Timer triggered count doesn't match arm count");
845
846 return TEST_SUCCESS;
847 }
848
849 static inline int
test_timer_cancel_burst_multicore(void)850 test_timer_cancel_burst_multicore(void)
851 {
852 arm_done = 0;
853 timer_producer_ring = rte_ring_create("timer_cancel_queue",
854 MAX_TIMERS * 2, rte_socket_id(), 0);
855 TEST_ASSERT_NOT_NULL(timer_producer_ring,
856 "Unable to reserve memory for ring");
857
858 rte_eal_remote_launch(_cancel_burst_thread, NULL, test_lcore2);
859 rte_eal_remote_launch(_cancel_producer_burst_wrapper, NULL,
860 test_lcore1);
861
862 rte_eal_wait_lcore(test_lcore1);
863 arm_done = 1;
864 rte_eal_wait_lcore(test_lcore2);
865 rte_ring_free(timer_producer_ring);
866
867 TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS,
868 MAX_TIMERS),
869 "Timer triggered count doesn't match arm count");
870
871 return TEST_SUCCESS;
872 }
873
874 static inline int
test_timer_cancel_random(void)875 test_timer_cancel_random(void)
876 {
877 uint64_t i;
878 uint64_t events_canceled = 0;
879 struct rte_event_timer *ev_tim;
880 const struct rte_event_timer tim = {
881 .ev.op = RTE_EVENT_OP_NEW,
882 .ev.queue_id = 0,
883 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
884 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
885 .ev.event_type = RTE_EVENT_TYPE_TIMER,
886 .state = RTE_EVENT_TIMER_NOT_ARMED,
887 .timeout_ticks = CALC_TICKS(20),
888 };
889
890 for (i = 0; i < MAX_TIMERS; i++) {
891
892 TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool,
893 (void **)&ev_tim),
894 "mempool alloc failed");
895 *ev_tim = tim;
896 ev_tim->ev.event_ptr = ev_tim;
897
898 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim,
899 1), 1, "Failed to arm timer %d",
900 rte_errno);
901
902 if (rte_rand() & 1) {
903 rte_delay_us(100 + (i % 5000));
904 TEST_ASSERT_EQUAL(rte_event_timer_cancel_burst(
905 timdev,
906 &ev_tim, 1), 1,
907 "Failed to cancel event timer %d", rte_errno);
908 rte_mempool_put(eventdev_test_mempool, ev_tim);
909 events_canceled++;
910 }
911 }
912
913 TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS,
914 events_canceled),
915 "Timer triggered count doesn't match arm, cancel count");
916
917 return TEST_SUCCESS;
918 }
919
920 /* Check that the adapter can be created correctly */
921 static int
adapter_create(void)922 adapter_create(void)
923 {
924 int adapter_id = 0;
925 struct rte_event_timer_adapter *adapter, *adapter2;
926
927 struct rte_event_timer_adapter_conf conf = {
928 .event_dev_id = evdev + 1, // invalid event dev id
929 .timer_adapter_id = adapter_id,
930 .clk_src = RTE_EVENT_TIMER_ADAPTER_CPU_CLK,
931 .timer_tick_ns = NSECPERSEC / 10,
932 .max_tmo_ns = 180 * NSECPERSEC,
933 .nb_timers = MAX_TIMERS,
934 .flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES,
935 };
936 uint32_t caps = 0;
937
938 /* Test invalid conf */
939 adapter = rte_event_timer_adapter_create(&conf);
940 TEST_ASSERT_NULL(adapter, "Created adapter with invalid "
941 "event device id");
942 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Incorrect errno value for "
943 "invalid event device id");
944
945 /* Test valid conf */
946 conf.event_dev_id = evdev;
947 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_caps_get(evdev, &caps),
948 "failed to get adapter capabilities");
949 if (!(caps & RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT))
950 adapter = rte_event_timer_adapter_create_ext(&conf,
951 test_port_conf_cb,
952 NULL);
953 else
954 adapter = rte_event_timer_adapter_create(&conf);
955 TEST_ASSERT_NOT_NULL(adapter, "Failed to create adapter with valid "
956 "configuration");
957
958 /* Test existing id */
959 adapter2 = rte_event_timer_adapter_create(&conf);
960 TEST_ASSERT_NULL(adapter2, "Created adapter with in-use id");
961 TEST_ASSERT(rte_errno == EEXIST, "Incorrect errno value for existing "
962 "id");
963
964 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_free(adapter),
965 "Failed to free adapter");
966
967 rte_mempool_free(eventdev_test_mempool);
968
969 return TEST_SUCCESS;
970 }
971
972
973 /* Test that adapter can be freed correctly. */
974 static int
adapter_free(void)975 adapter_free(void)
976 {
977 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stop(timdev),
978 "Failed to stop adapter");
979
980 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_free(timdev),
981 "Failed to free valid adapter");
982
983 /* Test free of already freed adapter */
984 TEST_ASSERT_FAIL(rte_event_timer_adapter_free(timdev),
985 "Freed adapter that was already freed");
986
987 /* Test free of null adapter */
988 timdev = NULL;
989 TEST_ASSERT_FAIL(rte_event_timer_adapter_free(timdev),
990 "Freed null adapter");
991
992 rte_mempool_free(eventdev_test_mempool);
993
994 return TEST_SUCCESS;
995 }
996
997 /* Test that adapter info can be retrieved and is correct. */
998 static int
adapter_get_info(void)999 adapter_get_info(void)
1000 {
1001 struct rte_event_timer_adapter_info info;
1002
1003 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_get_info(timdev, &info),
1004 "Failed to get adapter info");
1005
1006 if (using_services)
1007 TEST_ASSERT_EQUAL(info.event_dev_port_id, 1,
1008 "Expected port id = 1, got port id = %d",
1009 info.event_dev_port_id);
1010
1011 return TEST_SUCCESS;
1012 }
1013
1014 /* Test adapter lookup via adapter ID. */
1015 static int
adapter_lookup(void)1016 adapter_lookup(void)
1017 {
1018 struct rte_event_timer_adapter *adapter;
1019
1020 adapter = rte_event_timer_adapter_lookup(TEST_ADAPTER_ID);
1021 TEST_ASSERT_NOT_NULL(adapter, "Failed to lookup adapter");
1022
1023 return TEST_SUCCESS;
1024 }
1025
1026 static int
adapter_start(void)1027 adapter_start(void)
1028 {
1029 TEST_ASSERT_SUCCESS(_timdev_setup(180 * NSECPERSEC,
1030 NSECPERSEC / 10),
1031 "Failed to start adapter");
1032 TEST_ASSERT_EQUAL(rte_event_timer_adapter_start(timdev), -EALREADY,
1033 "Timer adapter started without call to stop.");
1034
1035 return TEST_SUCCESS;
1036 }
1037
1038 /* Test that adapter stops correctly. */
1039 static int
adapter_stop(void)1040 adapter_stop(void)
1041 {
1042 struct rte_event_timer_adapter *l_adapter = NULL;
1043
1044 /* Test adapter stop */
1045 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stop(timdev),
1046 "Failed to stop event adapter");
1047
1048 TEST_ASSERT_FAIL(rte_event_timer_adapter_stop(l_adapter),
1049 "Erroneously stopped null event adapter");
1050
1051 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_free(timdev),
1052 "Failed to free adapter");
1053
1054 rte_mempool_free(eventdev_test_mempool);
1055
1056 return TEST_SUCCESS;
1057 }
1058
1059 /* Test increment and reset of ev_enq_count stat */
1060 static int
stat_inc_reset_ev_enq(void)1061 stat_inc_reset_ev_enq(void)
1062 {
1063 int ret, i, n;
1064 int num_evtims = MAX_TIMERS;
1065 struct rte_event_timer *evtims[num_evtims];
1066 struct rte_event evs[BATCH_SIZE];
1067 struct rte_event_timer_adapter_stats stats;
1068 const struct rte_event_timer init_tim = {
1069 .ev.op = RTE_EVENT_OP_NEW,
1070 .ev.queue_id = TEST_QUEUE_ID,
1071 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
1072 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1073 .ev.event_type = RTE_EVENT_TYPE_TIMER,
1074 .state = RTE_EVENT_TIMER_NOT_ARMED,
1075 .timeout_ticks = CALC_TICKS(5), // expire in .5 sec
1076 };
1077
1078 ret = rte_mempool_get_bulk(eventdev_test_mempool, (void **)evtims,
1079 num_evtims);
1080 TEST_ASSERT_EQUAL(ret, 0, "Failed to get array of timer objs: ret = %d",
1081 ret);
1082
1083 for (i = 0; i < num_evtims; i++) {
1084 *evtims[i] = init_tim;
1085 evtims[i]->ev.event_ptr = evtims[i];
1086 }
1087
1088 ret = rte_event_timer_adapter_stats_get(timdev, &stats);
1089 TEST_ASSERT_EQUAL(ret, 0, "Failed to get stats");
1090 TEST_ASSERT_EQUAL((int)stats.ev_enq_count, 0, "Stats not clear at "
1091 "startup");
1092
1093 /* Test with the max value for the adapter */
1094 ret = rte_event_timer_arm_burst(timdev, evtims, num_evtims);
1095 TEST_ASSERT_EQUAL(ret, num_evtims,
1096 "Failed to arm all event timers: attempted = %d, "
1097 "succeeded = %d, rte_errno = %s",
1098 num_evtims, ret, rte_strerror(rte_errno));
1099
1100 rte_delay_ms(1000);
1101
1102 #define MAX_TRIES num_evtims
1103 int sum = 0;
1104 int tries = 0;
1105 bool done = false;
1106 while (!done) {
1107 sum += rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs,
1108 RTE_DIM(evs), 10);
1109 if (sum >= num_evtims || ++tries >= MAX_TRIES)
1110 done = true;
1111
1112 rte_delay_ms(10);
1113 }
1114
1115 TEST_ASSERT_EQUAL(sum, num_evtims, "Expected %d timer expiry events, "
1116 "got %d", num_evtims, sum);
1117
1118 TEST_ASSERT(tries < MAX_TRIES, "Exceeded max tries");
1119
1120 rte_delay_ms(100);
1121
1122 /* Make sure the eventdev is still empty */
1123 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs),
1124 10);
1125
1126 TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected number of timer expiry "
1127 "events from event device");
1128
1129 /* Check stats again */
1130 ret = rte_event_timer_adapter_stats_get(timdev, &stats);
1131 TEST_ASSERT_EQUAL(ret, 0, "Failed to get stats");
1132 TEST_ASSERT_EQUAL((int)stats.ev_enq_count, num_evtims,
1133 "Expected enqueue stat = %d; got %d", num_evtims,
1134 (int)stats.ev_enq_count);
1135
1136 /* Reset and check again */
1137 ret = rte_event_timer_adapter_stats_reset(timdev);
1138 TEST_ASSERT_EQUAL(ret, 0, "Failed to reset stats");
1139
1140 ret = rte_event_timer_adapter_stats_get(timdev, &stats);
1141 TEST_ASSERT_EQUAL(ret, 0, "Failed to get stats");
1142 TEST_ASSERT_EQUAL((int)stats.ev_enq_count, 0,
1143 "Expected enqueue stat = %d; got %d", 0,
1144 (int)stats.ev_enq_count);
1145
1146 rte_mempool_put_bulk(eventdev_test_mempool, (void **)evtims,
1147 num_evtims);
1148
1149 return TEST_SUCCESS;
1150 }
1151
1152 /* Test various cases in arming timers */
1153 static int
event_timer_arm(void)1154 event_timer_arm(void)
1155 {
1156 uint16_t n;
1157 int ret;
1158 struct rte_event_timer_adapter *adapter = timdev;
1159 struct rte_event_timer *evtim = NULL;
1160 struct rte_event evs[BATCH_SIZE];
1161 const struct rte_event_timer init_tim = {
1162 .ev.op = RTE_EVENT_OP_NEW,
1163 .ev.queue_id = TEST_QUEUE_ID,
1164 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
1165 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1166 .ev.event_type = RTE_EVENT_TYPE_TIMER,
1167 .state = RTE_EVENT_TIMER_NOT_ARMED,
1168 .timeout_ticks = CALC_TICKS(5), // expire in .5 sec
1169 };
1170
1171 rte_mempool_get(eventdev_test_mempool, (void **)&evtim);
1172 if (evtim == NULL) {
1173 /* Failed to get an event timer object */
1174 return TEST_FAILED;
1175 }
1176
1177 /* Set up a timer */
1178 *evtim = init_tim;
1179 evtim->ev.event_ptr = evtim;
1180
1181 /* Test single timer arm succeeds */
1182 ret = rte_event_timer_arm_burst(adapter, &evtim, 1);
1183 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n",
1184 rte_strerror(rte_errno));
1185 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ARMED, "Event timer "
1186 "in incorrect state");
1187
1188 /* Test arm of armed timer fails */
1189 ret = rte_event_timer_arm_burst(adapter, &evtim, 1);
1190 TEST_ASSERT_EQUAL(ret, 0, "expected return value from "
1191 "rte_event_timer_arm_burst: 0, got: %d", ret);
1192 TEST_ASSERT_EQUAL(rte_errno, EALREADY, "Unexpected rte_errno value "
1193 "after arming already armed timer");
1194
1195 /* Let timer expire */
1196 rte_delay_ms(1000);
1197
1198 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0);
1199 TEST_ASSERT_EQUAL(n, 1, "Failed to dequeue expected number of expiry "
1200 "events from event device");
1201
1202 rte_mempool_put(eventdev_test_mempool, evtim);
1203
1204 return TEST_SUCCESS;
1205 }
1206
1207 /* This test checks that repeated references to the same event timer in the
1208 * arm request work as expected; only the first one through should succeed.
1209 */
1210 static int
event_timer_arm_double(void)1211 event_timer_arm_double(void)
1212 {
1213 uint16_t n;
1214 int ret;
1215 struct rte_event_timer_adapter *adapter = timdev;
1216 struct rte_event_timer *evtim = NULL;
1217 struct rte_event evs[BATCH_SIZE];
1218 const struct rte_event_timer init_tim = {
1219 .ev.op = RTE_EVENT_OP_NEW,
1220 .ev.queue_id = TEST_QUEUE_ID,
1221 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
1222 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1223 .ev.event_type = RTE_EVENT_TYPE_TIMER,
1224 .state = RTE_EVENT_TIMER_NOT_ARMED,
1225 .timeout_ticks = CALC_TICKS(5), // expire in .5 sec
1226 };
1227
1228 rte_mempool_get(eventdev_test_mempool, (void **)&evtim);
1229 if (evtim == NULL) {
1230 /* Failed to get an event timer object */
1231 return TEST_FAILED;
1232 }
1233
1234 /* Set up a timer */
1235 *evtim = init_tim;
1236 evtim->ev.event_ptr = evtim;
1237
1238 struct rte_event_timer *evtim_arr[] = {evtim, evtim};
1239 ret = rte_event_timer_arm_burst(adapter, evtim_arr, RTE_DIM(evtim_arr));
1240 TEST_ASSERT_EQUAL(ret, 1, "Unexpected return value from "
1241 "rte_event_timer_arm_burst");
1242 TEST_ASSERT_EQUAL(rte_errno, EALREADY, "Unexpected rte_errno value "
1243 "after double-arm");
1244
1245 /* Let timer expire */
1246 rte_delay_ms(600);
1247
1248 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0);
1249 TEST_ASSERT_EQUAL(n, 1, "Dequeued incorrect number of expiry events - "
1250 "expected: 1, actual: %d", n);
1251
1252 rte_mempool_put(eventdev_test_mempool, evtim);
1253
1254 return TEST_SUCCESS;
1255 }
1256
1257 /* Test the timer expiry event is generated at the expected time. */
1258 static int
event_timer_arm_expiry(void)1259 event_timer_arm_expiry(void)
1260 {
1261 uint16_t n;
1262 int ret;
1263 struct rte_event_timer_adapter *adapter = timdev;
1264 struct rte_event_timer *evtim = NULL;
1265 struct rte_event_timer *evtim2 = NULL;
1266 struct rte_event evs[BATCH_SIZE];
1267 const struct rte_event_timer init_tim = {
1268 .ev.op = RTE_EVENT_OP_NEW,
1269 .ev.queue_id = TEST_QUEUE_ID,
1270 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
1271 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1272 .ev.event_type = RTE_EVENT_TYPE_TIMER,
1273 .state = RTE_EVENT_TIMER_NOT_ARMED,
1274 };
1275
1276 rte_mempool_get(eventdev_test_mempool, (void **)&evtim);
1277 if (evtim == NULL) {
1278 /* Failed to get an event timer object */
1279 return TEST_FAILED;
1280 }
1281
1282 /* Set up an event timer */
1283 *evtim = init_tim;
1284 evtim->timeout_ticks = CALC_TICKS(30), // expire in 3 secs
1285 evtim->ev.event_ptr = evtim;
1286
1287 ret = rte_event_timer_arm_burst(adapter, &evtim, 1);
1288 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s",
1289 rte_strerror(rte_errno));
1290 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ARMED, "Event "
1291 "timer in incorrect state");
1292
1293 rte_delay_ms(2999);
1294
1295 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0);
1296 TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected timer expiry event");
1297
1298 /* Delay 100 ms to account for the adapter tick window - should let us
1299 * dequeue one event
1300 */
1301 rte_delay_ms(100);
1302
1303 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0);
1304 TEST_ASSERT_EQUAL(n, 1, "Dequeued incorrect number (%d) of timer "
1305 "expiry events", n);
1306 TEST_ASSERT_EQUAL(evs[0].event_type, RTE_EVENT_TYPE_TIMER,
1307 "Dequeued unexpected type of event");
1308
1309 /* Check that we recover the original event timer and then free it */
1310 evtim2 = evs[0].event_ptr;
1311 TEST_ASSERT_EQUAL(evtim, evtim2,
1312 "Failed to recover pointer to original event timer");
1313 rte_mempool_put(eventdev_test_mempool, evtim2);
1314
1315 return TEST_SUCCESS;
1316 }
1317
1318 /* Check that rearming a timer works as expected. */
1319 static int
event_timer_arm_rearm(void)1320 event_timer_arm_rearm(void)
1321 {
1322 uint16_t n;
1323 int ret;
1324 struct rte_event_timer *evtim = NULL;
1325 struct rte_event_timer *evtim2 = NULL;
1326 struct rte_event evs[BATCH_SIZE];
1327 const struct rte_event_timer init_tim = {
1328 .ev.op = RTE_EVENT_OP_NEW,
1329 .ev.queue_id = TEST_QUEUE_ID,
1330 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
1331 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1332 .ev.event_type = RTE_EVENT_TYPE_TIMER,
1333 .state = RTE_EVENT_TIMER_NOT_ARMED,
1334 };
1335
1336 rte_mempool_get(eventdev_test_mempool, (void **)&evtim);
1337 if (evtim == NULL) {
1338 /* Failed to get an event timer object */
1339 return TEST_FAILED;
1340 }
1341
1342 /* Set up a timer */
1343 *evtim = init_tim;
1344 evtim->timeout_ticks = CALC_TICKS(1); // expire in 0.1 sec
1345 evtim->ev.event_ptr = evtim;
1346
1347 /* Arm it */
1348 ret = rte_event_timer_arm_burst(timdev, &evtim, 1);
1349 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n",
1350 rte_strerror(rte_errno));
1351
1352 /* Add 100ms to account for the adapter tick window */
1353 rte_delay_ms(100 + 100);
1354
1355 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0);
1356 TEST_ASSERT_EQUAL(n, 1, "Failed to dequeue expected number of expiry "
1357 "events from event device");
1358
1359 /* Recover the timer through the event that was dequeued. */
1360 evtim2 = evs[0].event_ptr;
1361 TEST_ASSERT_EQUAL(evtim, evtim2,
1362 "Failed to recover pointer to original event timer");
1363
1364 /* Need to reset state in case implementation can't do it */
1365 evtim2->state = RTE_EVENT_TIMER_NOT_ARMED;
1366
1367 /* Rearm it */
1368 ret = rte_event_timer_arm_burst(timdev, &evtim2, 1);
1369 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n",
1370 rte_strerror(rte_errno));
1371
1372 /* Add 100ms to account for the adapter tick window */
1373 rte_delay_ms(100 + 100);
1374
1375 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0);
1376 TEST_ASSERT_EQUAL(n, 1, "Failed to dequeue expected number of expiry "
1377 "events from event device");
1378
1379 /* Free it */
1380 evtim2 = evs[0].event_ptr;
1381 TEST_ASSERT_EQUAL(evtim, evtim2,
1382 "Failed to recover pointer to original event timer");
1383 rte_mempool_put(eventdev_test_mempool, evtim2);
1384
1385 return TEST_SUCCESS;
1386 }
1387
1388 /* Check that the adapter handles the max specified number of timers as
1389 * expected.
1390 */
1391 static int
event_timer_arm_max(void)1392 event_timer_arm_max(void)
1393 {
1394 int ret, i, n;
1395 int num_evtims = MAX_TIMERS;
1396 struct rte_event_timer *evtims[num_evtims];
1397 struct rte_event evs[BATCH_SIZE];
1398 const struct rte_event_timer init_tim = {
1399 .ev.op = RTE_EVENT_OP_NEW,
1400 .ev.queue_id = TEST_QUEUE_ID,
1401 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
1402 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1403 .ev.event_type = RTE_EVENT_TYPE_TIMER,
1404 .state = RTE_EVENT_TIMER_NOT_ARMED,
1405 .timeout_ticks = CALC_TICKS(5), // expire in .5 sec
1406 };
1407
1408 ret = rte_mempool_get_bulk(eventdev_test_mempool, (void **)evtims,
1409 num_evtims);
1410 TEST_ASSERT_EQUAL(ret, 0, "Failed to get array of timer objs: ret = %d",
1411 ret);
1412
1413 for (i = 0; i < num_evtims; i++) {
1414 *evtims[i] = init_tim;
1415 evtims[i]->ev.event_ptr = evtims[i];
1416 }
1417
1418 /* Test with the max value for the adapter */
1419 ret = rte_event_timer_arm_burst(timdev, evtims, num_evtims);
1420 TEST_ASSERT_EQUAL(ret, num_evtims,
1421 "Failed to arm all event timers: attempted = %d, "
1422 "succeeded = %d, rte_errno = %s",
1423 num_evtims, ret, rte_strerror(rte_errno));
1424
1425 rte_delay_ms(1000);
1426
1427 #define MAX_TRIES num_evtims
1428 int sum = 0;
1429 int tries = 0;
1430 bool done = false;
1431 while (!done) {
1432 sum += rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs,
1433 RTE_DIM(evs), 10);
1434 if (sum >= num_evtims || ++tries >= MAX_TRIES)
1435 done = true;
1436
1437 rte_delay_ms(10);
1438 }
1439
1440 TEST_ASSERT_EQUAL(sum, num_evtims, "Expected %d timer expiry events, "
1441 "got %d", num_evtims, sum);
1442
1443 TEST_ASSERT(tries < MAX_TRIES, "Exceeded max tries");
1444
1445 rte_delay_ms(100);
1446
1447 /* Make sure the eventdev is still empty */
1448 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs),
1449 10);
1450
1451 TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected number of timer expiry "
1452 "events from event device");
1453
1454 rte_mempool_put_bulk(eventdev_test_mempool, (void **)evtims,
1455 num_evtims);
1456
1457 return TEST_SUCCESS;
1458 }
1459
1460 /* Check that creating an event timer with incorrect event sched type fails. */
1461 static int
event_timer_arm_invalid_sched_type(void)1462 event_timer_arm_invalid_sched_type(void)
1463 {
1464 int ret;
1465 struct rte_event_timer *evtim = NULL;
1466 const struct rte_event_timer init_tim = {
1467 .ev.op = RTE_EVENT_OP_NEW,
1468 .ev.queue_id = TEST_QUEUE_ID,
1469 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
1470 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1471 .ev.event_type = RTE_EVENT_TYPE_TIMER,
1472 .state = RTE_EVENT_TIMER_NOT_ARMED,
1473 .timeout_ticks = CALC_TICKS(5), // expire in .5 sec
1474 };
1475
1476 if (!using_services)
1477 return -ENOTSUP;
1478
1479 rte_mempool_get(eventdev_test_mempool, (void **)&evtim);
1480 if (evtim == NULL) {
1481 /* Failed to get an event timer object */
1482 return TEST_FAILED;
1483 }
1484
1485 *evtim = init_tim;
1486 evtim->ev.event_ptr = evtim;
1487 evtim->ev.sched_type = RTE_SCHED_TYPE_PARALLEL; // bad sched type
1488
1489 ret = rte_event_timer_arm_burst(timdev, &evtim, 1);
1490 TEST_ASSERT_EQUAL(ret, 0, "Expected to fail timer arm with invalid "
1491 "sched type, but didn't");
1492 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after"
1493 " arm fail with invalid queue");
1494
1495 rte_mempool_put(eventdev_test_mempool, &evtim);
1496
1497 return TEST_SUCCESS;
1498 }
1499
1500 /* Check that creating an event timer with a timeout value that is too small or
1501 * too big fails.
1502 */
1503 static int
event_timer_arm_invalid_timeout(void)1504 event_timer_arm_invalid_timeout(void)
1505 {
1506 int ret;
1507 struct rte_event_timer *evtim = NULL;
1508 const struct rte_event_timer init_tim = {
1509 .ev.op = RTE_EVENT_OP_NEW,
1510 .ev.queue_id = TEST_QUEUE_ID,
1511 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
1512 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1513 .ev.event_type = RTE_EVENT_TYPE_TIMER,
1514 .state = RTE_EVENT_TIMER_NOT_ARMED,
1515 .timeout_ticks = CALC_TICKS(5), // expire in .5 sec
1516 };
1517
1518 rte_mempool_get(eventdev_test_mempool, (void **)&evtim);
1519 if (evtim == NULL) {
1520 /* Failed to get an event timer object */
1521 return TEST_FAILED;
1522 }
1523
1524 *evtim = init_tim;
1525 evtim->ev.event_ptr = evtim;
1526 evtim->timeout_ticks = 0; // timeout too small
1527
1528 ret = rte_event_timer_arm_burst(timdev, &evtim, 1);
1529 TEST_ASSERT_EQUAL(ret, 0, "Expected to fail timer arm with invalid "
1530 "timeout, but didn't");
1531 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after"
1532 " arm fail with invalid timeout");
1533 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ERROR_TOOEARLY,
1534 "Unexpected event timer state");
1535
1536 *evtim = init_tim;
1537 evtim->ev.event_ptr = evtim;
1538 evtim->timeout_ticks = CALC_TICKS(1801); // timeout too big
1539
1540 ret = rte_event_timer_arm_burst(timdev, &evtim, 1);
1541 TEST_ASSERT_EQUAL(ret, 0, "Expected to fail timer arm with invalid "
1542 "timeout, but didn't");
1543 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after"
1544 " arm fail with invalid timeout");
1545 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ERROR_TOOLATE,
1546 "Unexpected event timer state");
1547
1548 rte_mempool_put(eventdev_test_mempool, evtim);
1549
1550 return TEST_SUCCESS;
1551 }
1552
1553 static int
event_timer_cancel(void)1554 event_timer_cancel(void)
1555 {
1556 uint16_t n;
1557 int ret;
1558 struct rte_event_timer_adapter *adapter = timdev;
1559 struct rte_event_timer *evtim = NULL;
1560 struct rte_event evs[BATCH_SIZE];
1561 const struct rte_event_timer init_tim = {
1562 .ev.op = RTE_EVENT_OP_NEW,
1563 .ev.queue_id = TEST_QUEUE_ID,
1564 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
1565 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1566 .ev.event_type = RTE_EVENT_TYPE_TIMER,
1567 .state = RTE_EVENT_TIMER_NOT_ARMED,
1568 };
1569
1570 rte_mempool_get(eventdev_test_mempool, (void **)&evtim);
1571 if (evtim == NULL) {
1572 /* Failed to get an event timer object */
1573 return TEST_FAILED;
1574 }
1575
1576 /* Check that cancelling an uninited timer fails */
1577 ret = rte_event_timer_cancel_burst(adapter, &evtim, 1);
1578 TEST_ASSERT_EQUAL(ret, 0, "Succeeded unexpectedly in canceling "
1579 "uninited timer");
1580 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after "
1581 "cancelling uninited timer");
1582
1583 /* Set up a timer */
1584 *evtim = init_tim;
1585 evtim->ev.event_ptr = evtim;
1586 evtim->timeout_ticks = CALC_TICKS(30); // expire in 3 sec
1587
1588 /* Check that cancelling an inited but unarmed timer fails */
1589 ret = rte_event_timer_cancel_burst(adapter, &evtim, 1);
1590 TEST_ASSERT_EQUAL(ret, 0, "Succeeded unexpectedly in canceling "
1591 "unarmed timer");
1592 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after "
1593 "cancelling unarmed timer");
1594
1595 ret = rte_event_timer_arm_burst(adapter, &evtim, 1);
1596 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n",
1597 rte_strerror(rte_errno));
1598 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ARMED,
1599 "evtim in incorrect state");
1600
1601 /* Delay 1 sec */
1602 rte_delay_ms(1000);
1603
1604 ret = rte_event_timer_cancel_burst(adapter, &evtim, 1);
1605 TEST_ASSERT_EQUAL(ret, 1, "Failed to cancel event_timer: %s\n",
1606 rte_strerror(rte_errno));
1607 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_CANCELED,
1608 "evtim in incorrect state");
1609
1610 rte_delay_ms(3000);
1611
1612 /* Make sure that no expiry event was generated */
1613 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0);
1614 TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected timer expiry event\n");
1615
1616 rte_mempool_put(eventdev_test_mempool, evtim);
1617
1618 return TEST_SUCCESS;
1619 }
1620
1621 static int
event_timer_cancel_double(void)1622 event_timer_cancel_double(void)
1623 {
1624 uint16_t n;
1625 int ret;
1626 struct rte_event_timer_adapter *adapter = timdev;
1627 struct rte_event_timer *evtim = NULL;
1628 struct rte_event evs[BATCH_SIZE];
1629 const struct rte_event_timer init_tim = {
1630 .ev.op = RTE_EVENT_OP_NEW,
1631 .ev.queue_id = TEST_QUEUE_ID,
1632 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
1633 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1634 .ev.event_type = RTE_EVENT_TYPE_TIMER,
1635 .state = RTE_EVENT_TIMER_NOT_ARMED,
1636 .timeout_ticks = CALC_TICKS(5), // expire in .5 sec
1637 };
1638
1639 rte_mempool_get(eventdev_test_mempool, (void **)&evtim);
1640 if (evtim == NULL) {
1641 /* Failed to get an event timer object */
1642 return TEST_FAILED;
1643 }
1644
1645 /* Set up a timer */
1646 *evtim = init_tim;
1647 evtim->ev.event_ptr = evtim;
1648 evtim->timeout_ticks = CALC_TICKS(30); // expire in 3 sec
1649
1650 ret = rte_event_timer_arm_burst(adapter, &evtim, 1);
1651 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n",
1652 rte_strerror(rte_errno));
1653 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ARMED,
1654 "timer in unexpected state");
1655
1656 /* Now, test that referencing the same timer twice in the same call
1657 * fails
1658 */
1659 struct rte_event_timer *evtim_arr[] = {evtim, evtim};
1660 ret = rte_event_timer_cancel_burst(adapter, evtim_arr,
1661 RTE_DIM(evtim_arr));
1662
1663 /* Two requests to cancel same timer, only one should succeed */
1664 TEST_ASSERT_EQUAL(ret, 1, "Succeeded unexpectedly in canceling timer "
1665 "twice");
1666
1667 TEST_ASSERT_EQUAL(rte_errno, EALREADY, "Unexpected rte_errno value "
1668 "after double-cancel: rte_errno = %d", rte_errno);
1669
1670 rte_delay_ms(3000);
1671
1672 /* Still make sure that no expiry event was generated */
1673 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0);
1674 TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected timer expiry event\n");
1675
1676 rte_mempool_put(eventdev_test_mempool, evtim);
1677
1678 return TEST_SUCCESS;
1679 }
1680
1681 /* Check that event timer adapter tick resolution works as expected by testing
1682 * the number of adapter ticks that occur within a particular time interval.
1683 */
1684 static int
adapter_tick_resolution(void)1685 adapter_tick_resolution(void)
1686 {
1687 struct rte_event_timer_adapter_stats stats;
1688 uint64_t adapter_tick_count;
1689
1690 /* Only run this test in the software driver case */
1691 if (!using_services)
1692 return -ENOTSUP;
1693
1694 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stats_reset(timdev),
1695 "Failed to reset stats");
1696
1697 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stats_get(timdev,
1698 &stats), "Failed to get adapter stats");
1699 TEST_ASSERT_EQUAL(stats.adapter_tick_count, 0, "Adapter tick count "
1700 "not zeroed out");
1701
1702 /* Delay 1 second; should let at least 10 ticks occur with the default
1703 * adapter configuration used by this test.
1704 */
1705 rte_delay_ms(1000);
1706
1707 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stats_get(timdev,
1708 &stats), "Failed to get adapter stats");
1709
1710 adapter_tick_count = stats.adapter_tick_count;
1711 TEST_ASSERT(adapter_tick_count >= 10 && adapter_tick_count <= 12,
1712 "Expected 10-12 adapter ticks, got %"PRIu64"\n",
1713 adapter_tick_count);
1714
1715 return TEST_SUCCESS;
1716 }
1717
1718 static int
adapter_create_max(void)1719 adapter_create_max(void)
1720 {
1721 int i;
1722 uint32_t svc_start_count, svc_end_count;
1723 struct rte_event_timer_adapter *adapters[
1724 RTE_EVENT_TIMER_ADAPTER_NUM_MAX + 1];
1725
1726 struct rte_event_timer_adapter_conf conf = {
1727 .event_dev_id = evdev,
1728 // timer_adapter_id set in loop
1729 .clk_src = RTE_EVENT_TIMER_ADAPTER_CPU_CLK,
1730 .timer_tick_ns = NSECPERSEC / 10,
1731 .max_tmo_ns = 180 * NSECPERSEC,
1732 .nb_timers = MAX_TIMERS,
1733 .flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES,
1734 };
1735
1736 if (!using_services)
1737 return -ENOTSUP;
1738
1739 svc_start_count = rte_service_get_count();
1740
1741 /* This test expects that there are sufficient service IDs available
1742 * to be allocated. I.e., RTE_EVENT_TIMER_ADAPTER_NUM_MAX may need to
1743 * be less than RTE_SERVICE_NUM_MAX if anything else uses a service
1744 * (the SW event device, for example).
1745 */
1746 for (i = 0; i < RTE_EVENT_TIMER_ADAPTER_NUM_MAX; i++) {
1747 conf.timer_adapter_id = i;
1748 adapters[i] = rte_event_timer_adapter_create_ext(&conf,
1749 test_port_conf_cb, NULL);
1750 TEST_ASSERT_NOT_NULL(adapters[i], "Failed to create adapter "
1751 "%d", i);
1752 }
1753
1754 conf.timer_adapter_id = i;
1755 adapters[i] = rte_event_timer_adapter_create(&conf);
1756 TEST_ASSERT_NULL(adapters[i], "Created too many adapters");
1757
1758 /* Check that at least RTE_EVENT_TIMER_ADAPTER_NUM_MAX services
1759 * have been created
1760 */
1761 svc_end_count = rte_service_get_count();
1762 TEST_ASSERT_EQUAL(svc_end_count - svc_start_count,
1763 RTE_EVENT_TIMER_ADAPTER_NUM_MAX,
1764 "Failed to create expected number of services");
1765
1766 for (i = 0; i < RTE_EVENT_TIMER_ADAPTER_NUM_MAX; i++)
1767 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_free(adapters[i]),
1768 "Failed to free adapter %d", i);
1769
1770 /* Check that service count is back to where it was at start */
1771 svc_end_count = rte_service_get_count();
1772 TEST_ASSERT_EQUAL(svc_start_count, svc_end_count, "Failed to release "
1773 "correct number of services");
1774
1775 return TEST_SUCCESS;
1776 }
1777
1778 static struct unit_test_suite event_timer_adptr_functional_testsuite = {
1779 .suite_name = "event timer functional test suite",
1780 .setup = testsuite_setup,
1781 .teardown = testsuite_teardown,
1782 .unit_test_cases = {
1783 TEST_CASE_ST(timdev_setup_usec, timdev_teardown,
1784 test_timer_state),
1785 TEST_CASE_ST(timdev_setup_usec, timdev_teardown,
1786 test_timer_arm),
1787 TEST_CASE_ST(timdev_setup_usec, timdev_teardown,
1788 test_timer_arm_burst),
1789 TEST_CASE_ST(timdev_setup_sec, timdev_teardown,
1790 test_timer_cancel),
1791 TEST_CASE_ST(timdev_setup_sec, timdev_teardown,
1792 test_timer_cancel_random),
1793 TEST_CASE_ST(timdev_setup_usec_multicore, timdev_teardown,
1794 test_timer_arm_multicore),
1795 TEST_CASE_ST(timdev_setup_usec_multicore, timdev_teardown,
1796 test_timer_arm_burst_multicore),
1797 TEST_CASE_ST(timdev_setup_sec_multicore, timdev_teardown,
1798 test_timer_cancel_multicore),
1799 TEST_CASE_ST(timdev_setup_sec_multicore, timdev_teardown,
1800 test_timer_cancel_burst_multicore),
1801 TEST_CASE(adapter_create),
1802 TEST_CASE_ST(timdev_setup_msec, NULL, adapter_free),
1803 TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
1804 adapter_get_info),
1805 TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
1806 adapter_lookup),
1807 TEST_CASE_ST(NULL, timdev_teardown,
1808 adapter_start),
1809 TEST_CASE_ST(timdev_setup_msec, NULL,
1810 adapter_stop),
1811 TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
1812 stat_inc_reset_ev_enq),
1813 TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
1814 event_timer_arm),
1815 TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
1816 event_timer_arm_double),
1817 TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
1818 event_timer_arm_expiry),
1819 TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
1820 event_timer_arm_rearm),
1821 TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
1822 event_timer_arm_max),
1823 TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
1824 event_timer_arm_invalid_sched_type),
1825 TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
1826 event_timer_arm_invalid_timeout),
1827 TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
1828 event_timer_cancel),
1829 TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
1830 event_timer_cancel_double),
1831 TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
1832 adapter_tick_resolution),
1833 TEST_CASE(adapter_create_max),
1834 TEST_CASES_END() /**< NULL terminate unit test array */
1835 }
1836 };
1837
1838 static int
test_event_timer_adapter_func(void)1839 test_event_timer_adapter_func(void)
1840 {
1841 return unit_test_suite_runner(&event_timer_adptr_functional_testsuite);
1842 }
1843
1844 REGISTER_TEST_COMMAND(event_timer_adapter_test, test_event_timer_adapter_func);
1845