1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Cavium, Inc 3 * Copyright(c) 2017-2018 Intel Corporation. 4 */ 5 6 #include <rte_atomic.h> 7 #include <rte_common.h> 8 #include <rte_cycles.h> 9 #include <rte_debug.h> 10 #include <rte_eal.h> 11 #include <rte_ethdev.h> 12 #include <rte_eventdev.h> 13 #include <rte_event_timer_adapter.h> 14 #include <rte_mempool.h> 15 #include <rte_launch.h> 16 #include <rte_lcore.h> 17 #include <rte_per_lcore.h> 18 #include <rte_random.h> 19 #include <rte_bus_vdev.h> 20 #include <rte_service.h> 21 #include <stdbool.h> 22 23 #include "test.h" 24 25 /* 4K timers corresponds to sw evdev max inflight events */ 26 #define MAX_TIMERS (4 * 1024) 27 #define BKT_TCK_NSEC 28 29 #define NSECPERSEC 1E9 30 #define BATCH_SIZE 16 31 /* Both the app lcore and adapter ports are linked to this queue */ 32 #define TEST_QUEUE_ID 0 33 /* Port the application dequeues from */ 34 #define TEST_PORT_ID 0 35 #define TEST_ADAPTER_ID 0 36 37 /* Handle log statements in same manner as test macros */ 38 #define LOG_DBG(...) RTE_LOG(DEBUG, EAL, __VA_ARGS__) 39 40 static int evdev; 41 static struct rte_event_timer_adapter *timdev; 42 static struct rte_mempool *eventdev_test_mempool; 43 static struct rte_ring *timer_producer_ring; 44 static uint64_t global_bkt_tck_ns; 45 static volatile uint8_t arm_done; 46 47 static bool using_services; 48 static uint32_t test_lcore1; 49 static uint32_t test_lcore2; 50 static uint32_t test_lcore3; 51 static uint32_t sw_evdev_slcore; 52 static uint32_t sw_adptr_slcore; 53 54 static inline void 55 devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf, 56 struct rte_event_dev_info *info) 57 { 58 memset(dev_conf, 0, sizeof(struct rte_event_dev_config)); 59 dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns; 60 dev_conf->nb_event_ports = 1; 61 dev_conf->nb_event_queues = 1; 62 dev_conf->nb_event_queue_flows = info->max_event_queue_flows; 63 dev_conf->nb_event_port_dequeue_depth = 64 info->max_event_port_dequeue_depth; 65 dev_conf->nb_event_port_enqueue_depth = 66 info->max_event_port_enqueue_depth; 67 dev_conf->nb_event_port_enqueue_depth = 68 info->max_event_port_enqueue_depth; 69 dev_conf->nb_events_limit = 70 info->max_num_events; 71 } 72 73 static inline int 74 eventdev_setup(void) 75 { 76 int ret; 77 struct rte_event_dev_config dev_conf; 78 struct rte_event_dev_info info; 79 uint32_t service_id; 80 81 ret = rte_event_dev_info_get(evdev, &info); 82 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info"); 83 TEST_ASSERT(info.max_num_events >= (int32_t)MAX_TIMERS, 84 "ERROR max_num_events=%d < max_events=%d", 85 info.max_num_events, MAX_TIMERS); 86 87 devconf_set_default_sane_values(&dev_conf, &info); 88 ret = rte_event_dev_configure(evdev, &dev_conf); 89 TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev"); 90 91 ret = rte_event_queue_setup(evdev, 0, NULL); 92 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d", 0); 93 94 /* Configure event port */ 95 ret = rte_event_port_setup(evdev, 0, NULL); 96 TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d", 0); 97 ret = rte_event_port_link(evdev, 0, NULL, NULL, 0); 98 TEST_ASSERT(ret >= 0, "Failed to link all queues port=%d", 0); 99 100 /* If this is a software event device, map and start its service */ 101 if (rte_event_dev_service_id_get(evdev, &service_id) == 0) { 102 TEST_ASSERT_SUCCESS(rte_service_lcore_add(sw_evdev_slcore), 103 "Failed to add service core"); 104 TEST_ASSERT_SUCCESS(rte_service_lcore_start( 105 sw_evdev_slcore), 106 "Failed to start service core"); 107 TEST_ASSERT_SUCCESS(rte_service_map_lcore_set( 108 service_id, sw_evdev_slcore, 1), 109 "Failed to map evdev service"); 110 TEST_ASSERT_SUCCESS(rte_service_runstate_set( 111 service_id, 1), 112 "Failed to start evdev service"); 113 } 114 115 ret = rte_event_dev_start(evdev); 116 TEST_ASSERT_SUCCESS(ret, "Failed to start device"); 117 118 return TEST_SUCCESS; 119 } 120 121 static int 122 testsuite_setup(void) 123 { 124 /* Some of the multithreaded tests require 3 other lcores to run */ 125 unsigned int required_lcore_count = 4; 126 uint32_t service_id; 127 128 /* To make it easier to map services later if needed, just reset 129 * service core state. 130 */ 131 (void) rte_service_lcore_reset_all(); 132 133 if (!rte_event_dev_count()) { 134 /* If there is no hardware eventdev, or no software vdev was 135 * specified on the command line, create an instance of 136 * event_sw. 137 */ 138 LOG_DBG("Failed to find a valid event device... testing with" 139 " event_sw device\n"); 140 TEST_ASSERT_SUCCESS(rte_vdev_init("event_sw0", NULL), 141 "Error creating eventdev"); 142 evdev = rte_event_dev_get_dev_id("event_sw0"); 143 } 144 145 if (rte_event_dev_service_id_get(evdev, &service_id) == 0) { 146 /* A software event device will use a software event timer 147 * adapter as well. 2 more cores required to convert to 148 * service cores. 149 */ 150 required_lcore_count += 2; 151 using_services = true; 152 } 153 154 if (rte_lcore_count() < required_lcore_count) { 155 printf("%d lcores needed to run tests", required_lcore_count); 156 return TEST_FAILED; 157 } 158 159 /* Assign lcores for various tasks */ 160 test_lcore1 = rte_get_next_lcore(-1, 1, 0); 161 test_lcore2 = rte_get_next_lcore(test_lcore1, 1, 0); 162 test_lcore3 = rte_get_next_lcore(test_lcore2, 1, 0); 163 if (using_services) { 164 sw_evdev_slcore = rte_get_next_lcore(test_lcore3, 1, 0); 165 sw_adptr_slcore = rte_get_next_lcore(sw_evdev_slcore, 1, 0); 166 } 167 168 return eventdev_setup(); 169 } 170 171 static void 172 testsuite_teardown(void) 173 { 174 rte_event_dev_stop(evdev); 175 rte_event_dev_close(evdev); 176 } 177 178 static int 179 setup_adapter_service(struct rte_event_timer_adapter *adptr) 180 { 181 uint32_t adapter_service_id; 182 int ret; 183 184 /* retrieve service ids */ 185 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_service_id_get(adptr, 186 &adapter_service_id), "Failed to get event timer " 187 "adapter service id"); 188 /* add a service core and start it */ 189 ret = rte_service_lcore_add(sw_adptr_slcore); 190 TEST_ASSERT(ret == 0 || ret == -EALREADY, 191 "Failed to add service core"); 192 ret = rte_service_lcore_start(sw_adptr_slcore); 193 TEST_ASSERT(ret == 0 || ret == -EALREADY, 194 "Failed to start service core"); 195 196 /* map services to it */ 197 TEST_ASSERT_SUCCESS(rte_service_map_lcore_set(adapter_service_id, 198 sw_adptr_slcore, 1), 199 "Failed to map adapter service"); 200 201 /* set services to running */ 202 TEST_ASSERT_SUCCESS(rte_service_runstate_set(adapter_service_id, 1), 203 "Failed to start event timer adapter service"); 204 205 return TEST_SUCCESS; 206 } 207 208 static int 209 test_port_conf_cb(uint16_t id, uint8_t event_dev_id, uint8_t *event_port_id, 210 void *conf_arg) 211 { 212 struct rte_event_dev_config dev_conf; 213 struct rte_event_dev_info info; 214 struct rte_event_port_conf *port_conf, def_port_conf = {0}; 215 uint32_t started; 216 static int port_allocated; 217 static uint8_t port_id; 218 int ret; 219 220 if (port_allocated) { 221 *event_port_id = port_id; 222 return 0; 223 } 224 225 RTE_SET_USED(id); 226 227 ret = rte_event_dev_attr_get(event_dev_id, RTE_EVENT_DEV_ATTR_STARTED, 228 &started); 229 if (ret < 0) 230 return ret; 231 232 if (started) 233 rte_event_dev_stop(event_dev_id); 234 235 ret = rte_event_dev_info_get(evdev, &info); 236 if (ret < 0) 237 return ret; 238 239 devconf_set_default_sane_values(&dev_conf, &info); 240 241 port_id = dev_conf.nb_event_ports; 242 dev_conf.nb_event_ports++; 243 244 ret = rte_event_dev_configure(event_dev_id, &dev_conf); 245 if (ret < 0) { 246 if (started) 247 rte_event_dev_start(event_dev_id); 248 return ret; 249 } 250 251 if (conf_arg != NULL) 252 port_conf = conf_arg; 253 else { 254 port_conf = &def_port_conf; 255 ret = rte_event_port_default_conf_get(event_dev_id, port_id, 256 port_conf); 257 if (ret < 0) 258 return ret; 259 } 260 261 ret = rte_event_port_setup(event_dev_id, port_id, port_conf); 262 if (ret < 0) 263 return ret; 264 265 *event_port_id = port_id; 266 267 if (started) 268 rte_event_dev_start(event_dev_id); 269 270 /* Reuse this port number next time this is called */ 271 port_allocated = 1; 272 273 return 0; 274 } 275 276 static int 277 _timdev_setup(uint64_t max_tmo_ns, uint64_t bkt_tck_ns) 278 { 279 struct rte_event_timer_adapter_conf config = { 280 .event_dev_id = evdev, 281 .timer_adapter_id = TEST_ADAPTER_ID, 282 .timer_tick_ns = bkt_tck_ns, 283 .max_tmo_ns = max_tmo_ns, 284 .nb_timers = MAX_TIMERS * 10, 285 }; 286 uint32_t caps = 0; 287 const char *pool_name = "timdev_test_pool"; 288 289 global_bkt_tck_ns = bkt_tck_ns; 290 291 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_caps_get(evdev, &caps), 292 "failed to get adapter capabilities"); 293 if (!(caps & RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) { 294 timdev = rte_event_timer_adapter_create_ext(&config, 295 test_port_conf_cb, 296 NULL); 297 setup_adapter_service(timdev); 298 using_services = true; 299 } else 300 timdev = rte_event_timer_adapter_create(&config); 301 302 TEST_ASSERT_NOT_NULL(timdev, 303 "failed to create event timer ring"); 304 305 TEST_ASSERT_EQUAL(rte_event_timer_adapter_start(timdev), 0, 306 "failed to Start event timer adapter"); 307 308 /* Create event timer mempool */ 309 eventdev_test_mempool = rte_mempool_create(pool_name, 310 MAX_TIMERS * 2, 311 sizeof(struct rte_event_timer), /* element size*/ 312 0, /* cache size*/ 313 0, NULL, NULL, NULL, NULL, 314 rte_socket_id(), 0); 315 if (!eventdev_test_mempool) { 316 printf("ERROR creating mempool\n"); 317 return TEST_FAILED; 318 } 319 320 return TEST_SUCCESS; 321 } 322 323 static int 324 timdev_setup_usec(void) 325 { 326 return using_services ? 327 /* Max timeout is 10,000us and bucket interval is 100us */ 328 _timdev_setup(1E7, 1E5) : 329 /* Max timeout is 100us and bucket interval is 1us */ 330 _timdev_setup(1E5, 1E3); 331 } 332 333 static int 334 timdev_setup_usec_multicore(void) 335 { 336 return using_services ? 337 /* Max timeout is 10,000us and bucket interval is 100us */ 338 _timdev_setup(1E7, 1E5) : 339 /* Max timeout is 100us and bucket interval is 1us */ 340 _timdev_setup(1E5, 1E3); 341 } 342 343 static int 344 timdev_setup_msec(void) 345 { 346 /* Max timeout is 2 mins, and bucket interval is 100 ms */ 347 return _timdev_setup(180 * NSECPERSEC, NSECPERSEC / 10); 348 } 349 350 static int 351 timdev_setup_sec(void) 352 { 353 /* Max timeout is 100sec and bucket interval is 1sec */ 354 return _timdev_setup(1E11, 1E9); 355 } 356 357 static int 358 timdev_setup_sec_multicore(void) 359 { 360 /* Max timeout is 100sec and bucket interval is 1sec */ 361 return _timdev_setup(1E11, 1E9); 362 } 363 364 static void 365 timdev_teardown(void) 366 { 367 rte_event_timer_adapter_stop(timdev); 368 rte_event_timer_adapter_free(timdev); 369 370 rte_mempool_free(eventdev_test_mempool); 371 } 372 373 static inline int 374 test_timer_state(void) 375 { 376 struct rte_event_timer *ev_tim; 377 struct rte_event ev; 378 const struct rte_event_timer tim = { 379 .ev.op = RTE_EVENT_OP_NEW, 380 .ev.queue_id = 0, 381 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 382 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 383 .ev.event_type = RTE_EVENT_TYPE_TIMER, 384 .state = RTE_EVENT_TIMER_NOT_ARMED, 385 }; 386 387 rte_mempool_get(eventdev_test_mempool, (void **)&ev_tim); 388 *ev_tim = tim; 389 ev_tim->ev.event_ptr = ev_tim; 390 ev_tim->timeout_ticks = 120; 391 392 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 1), 0, 393 "Armed timer exceeding max_timeout."); 394 TEST_ASSERT_EQUAL(ev_tim->state, RTE_EVENT_TIMER_ERROR_TOOLATE, 395 "Improper timer state set expected %d returned %d", 396 RTE_EVENT_TIMER_ERROR_TOOLATE, ev_tim->state); 397 398 ev_tim->state = RTE_EVENT_TIMER_NOT_ARMED; 399 ev_tim->timeout_ticks = 10; 400 401 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 1), 1, 402 "Failed to arm timer with proper timeout."); 403 TEST_ASSERT_EQUAL(ev_tim->state, RTE_EVENT_TIMER_ARMED, 404 "Improper timer state set expected %d returned %d", 405 RTE_EVENT_TIMER_ARMED, ev_tim->state); 406 407 if (!using_services) 408 rte_delay_us(20); 409 else 410 rte_delay_us(1000 + 200); 411 TEST_ASSERT_EQUAL(rte_event_dequeue_burst(evdev, 0, &ev, 1, 0), 1, 412 "Armed timer failed to trigger."); 413 414 ev_tim->state = RTE_EVENT_TIMER_NOT_ARMED; 415 ev_tim->timeout_ticks = 90; 416 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 1), 1, 417 "Failed to arm timer with proper timeout."); 418 TEST_ASSERT_EQUAL(rte_event_timer_cancel_burst(timdev, &ev_tim, 1), 419 1, "Failed to cancel armed timer"); 420 TEST_ASSERT_EQUAL(ev_tim->state, RTE_EVENT_TIMER_CANCELED, 421 "Improper timer state set expected %d returned %d", 422 RTE_EVENT_TIMER_CANCELED, ev_tim->state); 423 424 rte_mempool_put(eventdev_test_mempool, (void *)ev_tim); 425 426 return TEST_SUCCESS; 427 } 428 429 static inline int 430 _arm_timers(uint64_t timeout_tcks, uint64_t timers) 431 { 432 uint64_t i; 433 struct rte_event_timer *ev_tim; 434 const struct rte_event_timer tim = { 435 .ev.op = RTE_EVENT_OP_NEW, 436 .ev.queue_id = 0, 437 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 438 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 439 .ev.event_type = RTE_EVENT_TYPE_TIMER, 440 .state = RTE_EVENT_TIMER_NOT_ARMED, 441 .timeout_ticks = timeout_tcks, 442 }; 443 444 for (i = 0; i < timers; i++) { 445 446 TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool, 447 (void **)&ev_tim), 448 "mempool alloc failed"); 449 *ev_tim = tim; 450 ev_tim->ev.event_ptr = ev_tim; 451 452 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 453 1), 1, "Failed to arm timer %d", 454 rte_errno); 455 } 456 457 return TEST_SUCCESS; 458 } 459 460 static inline int 461 _wait_timer_triggers(uint64_t wait_sec, uint64_t arm_count, 462 uint64_t cancel_count) 463 { 464 uint8_t valid_event; 465 uint64_t events = 0; 466 uint64_t wait_start, max_wait; 467 struct rte_event ev; 468 469 max_wait = rte_get_timer_hz() * wait_sec; 470 wait_start = rte_get_timer_cycles(); 471 while (1) { 472 if (rte_get_timer_cycles() - wait_start > max_wait) { 473 if (events + cancel_count != arm_count) 474 TEST_ASSERT_SUCCESS(max_wait, 475 "Max time limit for timers exceeded."); 476 break; 477 } 478 479 valid_event = rte_event_dequeue_burst(evdev, 0, &ev, 1, 0); 480 if (!valid_event) 481 continue; 482 483 rte_mempool_put(eventdev_test_mempool, ev.event_ptr); 484 events++; 485 } 486 487 return TEST_SUCCESS; 488 } 489 490 static inline int 491 test_timer_arm(void) 492 { 493 TEST_ASSERT_SUCCESS(_arm_timers(20, MAX_TIMERS), 494 "Failed to arm timers"); 495 TEST_ASSERT_SUCCESS(_wait_timer_triggers(10, MAX_TIMERS, 0), 496 "Timer triggered count doesn't match arm count"); 497 return TEST_SUCCESS; 498 } 499 500 static int 501 _arm_wrapper(void *arg) 502 { 503 RTE_SET_USED(arg); 504 505 TEST_ASSERT_SUCCESS(_arm_timers(20, MAX_TIMERS), 506 "Failed to arm timers"); 507 508 return TEST_SUCCESS; 509 } 510 511 static inline int 512 test_timer_arm_multicore(void) 513 { 514 515 uint32_t lcore_1 = rte_get_next_lcore(-1, 1, 0); 516 uint32_t lcore_2 = rte_get_next_lcore(lcore_1, 1, 0); 517 518 rte_eal_remote_launch(_arm_wrapper, NULL, lcore_1); 519 rte_eal_remote_launch(_arm_wrapper, NULL, lcore_2); 520 521 rte_eal_mp_wait_lcore(); 522 TEST_ASSERT_SUCCESS(_wait_timer_triggers(10, MAX_TIMERS * 2, 0), 523 "Timer triggered count doesn't match arm count"); 524 525 return TEST_SUCCESS; 526 } 527 528 #define MAX_BURST 16 529 static inline int 530 _arm_timers_burst(uint64_t timeout_tcks, uint64_t timers) 531 { 532 uint64_t i; 533 int j; 534 struct rte_event_timer *ev_tim[MAX_BURST]; 535 const struct rte_event_timer tim = { 536 .ev.op = RTE_EVENT_OP_NEW, 537 .ev.queue_id = 0, 538 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 539 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 540 .ev.event_type = RTE_EVENT_TYPE_TIMER, 541 .state = RTE_EVENT_TIMER_NOT_ARMED, 542 .timeout_ticks = timeout_tcks, 543 }; 544 545 for (i = 0; i < timers / MAX_BURST; i++) { 546 TEST_ASSERT_SUCCESS(rte_mempool_get_bulk( 547 eventdev_test_mempool, 548 (void **)ev_tim, MAX_BURST), 549 "mempool alloc failed"); 550 551 for (j = 0; j < MAX_BURST; j++) { 552 *ev_tim[j] = tim; 553 ev_tim[j]->ev.event_ptr = ev_tim[j]; 554 } 555 556 TEST_ASSERT_EQUAL(rte_event_timer_arm_tmo_tick_burst(timdev, 557 ev_tim, tim.timeout_ticks, MAX_BURST), 558 MAX_BURST, "Failed to arm timer %d", rte_errno); 559 } 560 561 return TEST_SUCCESS; 562 } 563 564 static inline int 565 test_timer_arm_burst(void) 566 { 567 TEST_ASSERT_SUCCESS(_arm_timers_burst(20, MAX_TIMERS), 568 "Failed to arm timers"); 569 TEST_ASSERT_SUCCESS(_wait_timer_triggers(10, MAX_TIMERS, 0), 570 "Timer triggered count doesn't match arm count"); 571 572 return TEST_SUCCESS; 573 } 574 575 static int 576 _arm_wrapper_burst(void *arg) 577 { 578 RTE_SET_USED(arg); 579 580 TEST_ASSERT_SUCCESS(_arm_timers_burst(20, MAX_TIMERS), 581 "Failed to arm timers"); 582 583 return TEST_SUCCESS; 584 } 585 586 static inline int 587 test_timer_arm_burst_multicore(void) 588 { 589 rte_eal_remote_launch(_arm_wrapper_burst, NULL, test_lcore1); 590 rte_eal_remote_launch(_arm_wrapper_burst, NULL, test_lcore2); 591 592 rte_eal_mp_wait_lcore(); 593 TEST_ASSERT_SUCCESS(_wait_timer_triggers(10, MAX_TIMERS * 2, 0), 594 "Timer triggered count doesn't match arm count"); 595 596 return TEST_SUCCESS; 597 } 598 599 static inline int 600 test_timer_cancel(void) 601 { 602 uint64_t i; 603 struct rte_event_timer *ev_tim; 604 const struct rte_event_timer tim = { 605 .ev.op = RTE_EVENT_OP_NEW, 606 .ev.queue_id = 0, 607 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 608 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 609 .ev.event_type = RTE_EVENT_TYPE_TIMER, 610 .state = RTE_EVENT_TIMER_NOT_ARMED, 611 .timeout_ticks = 20, 612 }; 613 614 for (i = 0; i < MAX_TIMERS; i++) { 615 TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool, 616 (void **)&ev_tim), 617 "mempool alloc failed"); 618 *ev_tim = tim; 619 ev_tim->ev.event_ptr = ev_tim; 620 621 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 622 1), 1, "Failed to arm timer %d", 623 rte_errno); 624 625 rte_delay_us(100 + (i % 5000)); 626 627 TEST_ASSERT_EQUAL(rte_event_timer_cancel_burst(timdev, 628 &ev_tim, 1), 1, 629 "Failed to cancel event timer %d", rte_errno); 630 rte_mempool_put(eventdev_test_mempool, ev_tim); 631 } 632 633 634 TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS, 635 MAX_TIMERS), 636 "Timer triggered count doesn't match arm, cancel count"); 637 638 return TEST_SUCCESS; 639 } 640 641 static int 642 _cancel_producer(uint64_t timeout_tcks, uint64_t timers) 643 { 644 uint64_t i; 645 struct rte_event_timer *ev_tim; 646 const struct rte_event_timer tim = { 647 .ev.op = RTE_EVENT_OP_NEW, 648 .ev.queue_id = 0, 649 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 650 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 651 .ev.event_type = RTE_EVENT_TYPE_TIMER, 652 .state = RTE_EVENT_TIMER_NOT_ARMED, 653 .timeout_ticks = timeout_tcks, 654 }; 655 656 for (i = 0; i < timers; i++) { 657 TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool, 658 (void **)&ev_tim), 659 "mempool alloc failed"); 660 661 *ev_tim = tim; 662 ev_tim->ev.event_ptr = ev_tim; 663 664 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 665 1), 1, "Failed to arm timer %d", 666 rte_errno); 667 668 TEST_ASSERT_EQUAL(ev_tim->state, RTE_EVENT_TIMER_ARMED, 669 "Failed to arm event timer"); 670 671 while (rte_ring_enqueue(timer_producer_ring, ev_tim) != 0) 672 ; 673 } 674 675 return TEST_SUCCESS; 676 } 677 678 static int 679 _cancel_producer_burst(uint64_t timeout_tcks, uint64_t timers) 680 { 681 682 uint64_t i; 683 int j, ret; 684 struct rte_event_timer *ev_tim[MAX_BURST]; 685 const struct rte_event_timer tim = { 686 .ev.op = RTE_EVENT_OP_NEW, 687 .ev.queue_id = 0, 688 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 689 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 690 .ev.event_type = RTE_EVENT_TYPE_TIMER, 691 .state = RTE_EVENT_TIMER_NOT_ARMED, 692 .timeout_ticks = timeout_tcks, 693 }; 694 int arm_count = 0; 695 696 for (i = 0; i < timers / MAX_BURST; i++) { 697 TEST_ASSERT_SUCCESS(rte_mempool_get_bulk( 698 eventdev_test_mempool, 699 (void **)ev_tim, MAX_BURST), 700 "mempool alloc failed"); 701 702 for (j = 0; j < MAX_BURST; j++) { 703 *ev_tim[j] = tim; 704 ev_tim[j]->ev.event_ptr = ev_tim[j]; 705 } 706 707 TEST_ASSERT_EQUAL(rte_event_timer_arm_tmo_tick_burst(timdev, 708 ev_tim, tim.timeout_ticks, MAX_BURST), 709 MAX_BURST, "Failed to arm timer %d", rte_errno); 710 711 for (j = 0; j < MAX_BURST; j++) 712 TEST_ASSERT_EQUAL(ev_tim[j]->state, 713 RTE_EVENT_TIMER_ARMED, 714 "Event timer not armed, state = %d", 715 ev_tim[j]->state); 716 717 ret = rte_ring_enqueue_bulk(timer_producer_ring, 718 (void **)ev_tim, MAX_BURST, NULL); 719 TEST_ASSERT_EQUAL(ret, MAX_BURST, 720 "Failed to enqueue event timers to ring"); 721 arm_count += ret; 722 } 723 724 TEST_ASSERT_EQUAL(arm_count, MAX_TIMERS, 725 "Failed to arm expected number of event timers"); 726 727 return TEST_SUCCESS; 728 } 729 730 static int 731 _cancel_producer_wrapper(void *args) 732 { 733 RTE_SET_USED(args); 734 735 return _cancel_producer(20, MAX_TIMERS); 736 } 737 738 static int 739 _cancel_producer_burst_wrapper(void *args) 740 { 741 RTE_SET_USED(args); 742 743 return _cancel_producer_burst(100, MAX_TIMERS); 744 } 745 746 static int 747 _cancel_thread(void *args) 748 { 749 RTE_SET_USED(args); 750 struct rte_event_timer *ev_tim = NULL; 751 uint64_t cancel_count = 0; 752 uint16_t ret; 753 754 while (!arm_done || rte_ring_count(timer_producer_ring) > 0) { 755 if (rte_ring_dequeue(timer_producer_ring, (void **)&ev_tim)) 756 continue; 757 758 ret = rte_event_timer_cancel_burst(timdev, &ev_tim, 1); 759 TEST_ASSERT_EQUAL(ret, 1, "Failed to cancel timer"); 760 rte_mempool_put(eventdev_test_mempool, (void *)ev_tim); 761 cancel_count++; 762 } 763 764 return TEST_SUCCESS; 765 } 766 767 static int 768 _cancel_burst_thread(void *args) 769 { 770 RTE_SET_USED(args); 771 772 int ret, i, n; 773 struct rte_event_timer *ev_tim[MAX_BURST]; 774 uint64_t cancel_count = 0; 775 uint64_t dequeue_count = 0; 776 777 while (!arm_done || rte_ring_count(timer_producer_ring) > 0) { 778 n = rte_ring_dequeue_burst(timer_producer_ring, 779 (void **)ev_tim, MAX_BURST, NULL); 780 if (!n) 781 continue; 782 783 dequeue_count += n; 784 785 for (i = 0; i < n; i++) 786 TEST_ASSERT_EQUAL(ev_tim[i]->state, 787 RTE_EVENT_TIMER_ARMED, 788 "Event timer not armed, state = %d", 789 ev_tim[i]->state); 790 791 ret = rte_event_timer_cancel_burst(timdev, ev_tim, n); 792 TEST_ASSERT_EQUAL(n, ret, "Failed to cancel complete burst of " 793 "event timers"); 794 rte_mempool_put_bulk(eventdev_test_mempool, (void **)ev_tim, 795 RTE_MIN(ret, MAX_BURST)); 796 797 cancel_count += ret; 798 } 799 800 TEST_ASSERT_EQUAL(cancel_count, MAX_TIMERS, 801 "Failed to cancel expected number of timers: " 802 "expected = %d, cancel_count = %"PRIu64", " 803 "dequeue_count = %"PRIu64"\n", MAX_TIMERS, 804 cancel_count, dequeue_count); 805 806 return TEST_SUCCESS; 807 } 808 809 static inline int 810 test_timer_cancel_multicore(void) 811 { 812 arm_done = 0; 813 timer_producer_ring = rte_ring_create("timer_cancel_queue", 814 MAX_TIMERS * 2, rte_socket_id(), 0); 815 TEST_ASSERT_NOT_NULL(timer_producer_ring, 816 "Unable to reserve memory for ring"); 817 818 rte_eal_remote_launch(_cancel_thread, NULL, test_lcore3); 819 rte_eal_remote_launch(_cancel_producer_wrapper, NULL, test_lcore1); 820 rte_eal_remote_launch(_cancel_producer_wrapper, NULL, test_lcore2); 821 822 rte_eal_wait_lcore(test_lcore1); 823 rte_eal_wait_lcore(test_lcore2); 824 arm_done = 1; 825 rte_eal_wait_lcore(test_lcore3); 826 rte_ring_free(timer_producer_ring); 827 828 TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS * 2, 829 MAX_TIMERS * 2), 830 "Timer triggered count doesn't match arm count"); 831 832 return TEST_SUCCESS; 833 } 834 835 static inline int 836 test_timer_cancel_burst_multicore(void) 837 { 838 arm_done = 0; 839 timer_producer_ring = rte_ring_create("timer_cancel_queue", 840 MAX_TIMERS * 2, rte_socket_id(), 0); 841 TEST_ASSERT_NOT_NULL(timer_producer_ring, 842 "Unable to reserve memory for ring"); 843 844 rte_eal_remote_launch(_cancel_burst_thread, NULL, test_lcore2); 845 rte_eal_remote_launch(_cancel_producer_burst_wrapper, NULL, 846 test_lcore1); 847 848 rte_eal_wait_lcore(test_lcore1); 849 arm_done = 1; 850 rte_eal_wait_lcore(test_lcore2); 851 rte_ring_free(timer_producer_ring); 852 853 TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS, 854 MAX_TIMERS), 855 "Timer triggered count doesn't match arm count"); 856 857 return TEST_SUCCESS; 858 } 859 860 static inline int 861 test_timer_cancel_random(void) 862 { 863 uint64_t i; 864 uint64_t events_canceled = 0; 865 struct rte_event_timer *ev_tim; 866 const struct rte_event_timer tim = { 867 .ev.op = RTE_EVENT_OP_NEW, 868 .ev.queue_id = 0, 869 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 870 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 871 .ev.event_type = RTE_EVENT_TYPE_TIMER, 872 .state = RTE_EVENT_TIMER_NOT_ARMED, 873 .timeout_ticks = 20, 874 }; 875 876 for (i = 0; i < MAX_TIMERS; i++) { 877 878 TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool, 879 (void **)&ev_tim), 880 "mempool alloc failed"); 881 *ev_tim = tim; 882 ev_tim->ev.event_ptr = ev_tim; 883 884 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 885 1), 1, "Failed to arm timer %d", 886 rte_errno); 887 888 if (rte_rand() & 1) { 889 rte_delay_us(100 + (i % 5000)); 890 TEST_ASSERT_EQUAL(rte_event_timer_cancel_burst( 891 timdev, 892 &ev_tim, 1), 1, 893 "Failed to cancel event timer %d", rte_errno); 894 rte_mempool_put(eventdev_test_mempool, ev_tim); 895 events_canceled++; 896 } 897 } 898 899 TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS, 900 events_canceled), 901 "Timer triggered count doesn't match arm, cancel count"); 902 903 return TEST_SUCCESS; 904 } 905 906 /* Check that the adapter can be created correctly */ 907 static int 908 adapter_create(void) 909 { 910 int adapter_id = 0; 911 struct rte_event_timer_adapter *adapter, *adapter2; 912 913 struct rte_event_timer_adapter_conf conf = { 914 .event_dev_id = evdev + 1, // invalid event dev id 915 .timer_adapter_id = adapter_id, 916 .clk_src = RTE_EVENT_TIMER_ADAPTER_CPU_CLK, 917 .timer_tick_ns = NSECPERSEC / 10, 918 .max_tmo_ns = 180 * NSECPERSEC, 919 .nb_timers = MAX_TIMERS, 920 .flags = 0, 921 }; 922 uint32_t caps = 0; 923 924 /* Test invalid conf */ 925 adapter = rte_event_timer_adapter_create(&conf); 926 TEST_ASSERT_NULL(adapter, "Created adapter with invalid " 927 "event device id"); 928 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Incorrect errno value for " 929 "invalid event device id"); 930 931 /* Test valid conf */ 932 conf.event_dev_id = evdev; 933 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_caps_get(evdev, &caps), 934 "failed to get adapter capabilities"); 935 if (!(caps & RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) 936 adapter = rte_event_timer_adapter_create_ext(&conf, 937 test_port_conf_cb, 938 NULL); 939 else 940 adapter = rte_event_timer_adapter_create(&conf); 941 TEST_ASSERT_NOT_NULL(adapter, "Failed to create adapter with valid " 942 "configuration"); 943 944 /* Test existing id */ 945 adapter2 = rte_event_timer_adapter_create(&conf); 946 TEST_ASSERT_NULL(adapter2, "Created adapter with in-use id"); 947 TEST_ASSERT(rte_errno == EEXIST, "Incorrect errno value for existing " 948 "id"); 949 950 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_free(adapter), 951 "Failed to free adapter"); 952 953 rte_mempool_free(eventdev_test_mempool); 954 955 return TEST_SUCCESS; 956 } 957 958 959 /* Test that adapter can be freed correctly. */ 960 static int 961 adapter_free(void) 962 { 963 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stop(timdev), 964 "Failed to stop adapter"); 965 966 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_free(timdev), 967 "Failed to free valid adapter"); 968 969 /* Test free of already freed adapter */ 970 TEST_ASSERT_FAIL(rte_event_timer_adapter_free(timdev), 971 "Freed adapter that was already freed"); 972 973 /* Test free of null adapter */ 974 timdev = NULL; 975 TEST_ASSERT_FAIL(rte_event_timer_adapter_free(timdev), 976 "Freed null adapter"); 977 978 rte_mempool_free(eventdev_test_mempool); 979 980 return TEST_SUCCESS; 981 } 982 983 /* Test that adapter info can be retrieved and is correct. */ 984 static int 985 adapter_get_info(void) 986 { 987 struct rte_event_timer_adapter_info info; 988 989 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_get_info(timdev, &info), 990 "Failed to get adapter info"); 991 992 if (using_services) 993 TEST_ASSERT_EQUAL(info.event_dev_port_id, 1, 994 "Expected port id = 1, got port id = %d", 995 info.event_dev_port_id); 996 997 return TEST_SUCCESS; 998 } 999 1000 /* Test adapter lookup via adapter ID. */ 1001 static int 1002 adapter_lookup(void) 1003 { 1004 struct rte_event_timer_adapter *adapter; 1005 1006 adapter = rte_event_timer_adapter_lookup(TEST_ADAPTER_ID); 1007 TEST_ASSERT_NOT_NULL(adapter, "Failed to lookup adapter"); 1008 1009 return TEST_SUCCESS; 1010 } 1011 1012 static int 1013 adapter_start(void) 1014 { 1015 TEST_ASSERT_SUCCESS(_timdev_setup(180 * NSECPERSEC, 1016 NSECPERSEC / 10), 1017 "Failed to start adapter"); 1018 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_start(timdev), 1019 "Failed to repeatedly start adapter"); 1020 1021 return TEST_SUCCESS; 1022 } 1023 1024 /* Test that adapter stops correctly. */ 1025 static int 1026 adapter_stop(void) 1027 { 1028 struct rte_event_timer_adapter *l_adapter = NULL; 1029 1030 /* Test adapter stop */ 1031 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stop(timdev), 1032 "Failed to stop event adapter"); 1033 1034 TEST_ASSERT_FAIL(rte_event_timer_adapter_stop(l_adapter), 1035 "Erroneously stopped null event adapter"); 1036 1037 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_free(timdev), 1038 "Failed to free adapter"); 1039 1040 rte_mempool_free(eventdev_test_mempool); 1041 1042 return TEST_SUCCESS; 1043 } 1044 1045 /* Test increment and reset of ev_enq_count stat */ 1046 static int 1047 stat_inc_reset_ev_enq(void) 1048 { 1049 int ret, i, n; 1050 int num_evtims = MAX_TIMERS; 1051 struct rte_event_timer *evtims[num_evtims]; 1052 struct rte_event evs[BATCH_SIZE]; 1053 struct rte_event_timer_adapter_stats stats; 1054 const struct rte_event_timer init_tim = { 1055 .ev.op = RTE_EVENT_OP_NEW, 1056 .ev.queue_id = TEST_QUEUE_ID, 1057 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1058 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1059 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1060 .state = RTE_EVENT_TIMER_NOT_ARMED, 1061 .timeout_ticks = 5, // expire in .5 sec 1062 }; 1063 1064 ret = rte_mempool_get_bulk(eventdev_test_mempool, (void **)evtims, 1065 num_evtims); 1066 TEST_ASSERT_EQUAL(ret, 0, "Failed to get array of timer objs: ret = %d", 1067 ret); 1068 1069 for (i = 0; i < num_evtims; i++) { 1070 *evtims[i] = init_tim; 1071 evtims[i]->ev.event_ptr = evtims[i]; 1072 } 1073 1074 ret = rte_event_timer_adapter_stats_get(timdev, &stats); 1075 TEST_ASSERT_EQUAL(ret, 0, "Failed to get stats"); 1076 TEST_ASSERT_EQUAL((int)stats.ev_enq_count, 0, "Stats not clear at " 1077 "startup"); 1078 1079 /* Test with the max value for the adapter */ 1080 ret = rte_event_timer_arm_burst(timdev, evtims, num_evtims); 1081 TEST_ASSERT_EQUAL(ret, num_evtims, 1082 "Failed to arm all event timers: attempted = %d, " 1083 "succeeded = %d, rte_errno = %s", 1084 num_evtims, ret, rte_strerror(rte_errno)); 1085 1086 rte_delay_ms(1000); 1087 1088 #define MAX_TRIES num_evtims 1089 int sum = 0; 1090 int tries = 0; 1091 bool done = false; 1092 while (!done) { 1093 sum += rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, 1094 RTE_DIM(evs), 10); 1095 if (sum >= num_evtims || ++tries >= MAX_TRIES) 1096 done = true; 1097 1098 rte_delay_ms(10); 1099 } 1100 1101 TEST_ASSERT_EQUAL(sum, num_evtims, "Expected %d timer expiry events, " 1102 "got %d", num_evtims, sum); 1103 1104 TEST_ASSERT(tries < MAX_TRIES, "Exceeded max tries"); 1105 1106 rte_delay_ms(100); 1107 1108 /* Make sure the eventdev is still empty */ 1109 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 1110 10); 1111 1112 TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected number of timer expiry " 1113 "events from event device"); 1114 1115 /* Check stats again */ 1116 ret = rte_event_timer_adapter_stats_get(timdev, &stats); 1117 TEST_ASSERT_EQUAL(ret, 0, "Failed to get stats"); 1118 TEST_ASSERT_EQUAL((int)stats.ev_enq_count, num_evtims, 1119 "Expected enqueue stat = %d; got %d", num_evtims, 1120 (int)stats.ev_enq_count); 1121 1122 /* Reset and check again */ 1123 ret = rte_event_timer_adapter_stats_reset(timdev); 1124 TEST_ASSERT_EQUAL(ret, 0, "Failed to reset stats"); 1125 1126 ret = rte_event_timer_adapter_stats_get(timdev, &stats); 1127 TEST_ASSERT_EQUAL(ret, 0, "Failed to get stats"); 1128 TEST_ASSERT_EQUAL((int)stats.ev_enq_count, 0, 1129 "Expected enqueue stat = %d; got %d", 0, 1130 (int)stats.ev_enq_count); 1131 1132 rte_mempool_put_bulk(eventdev_test_mempool, (void **)evtims, 1133 num_evtims); 1134 1135 return TEST_SUCCESS; 1136 } 1137 1138 /* Test various cases in arming timers */ 1139 static int 1140 event_timer_arm(void) 1141 { 1142 uint16_t n; 1143 int ret; 1144 struct rte_event_timer_adapter *adapter = timdev; 1145 struct rte_event_timer *evtim = NULL; 1146 struct rte_event evs[BATCH_SIZE]; 1147 const struct rte_event_timer init_tim = { 1148 .ev.op = RTE_EVENT_OP_NEW, 1149 .ev.queue_id = TEST_QUEUE_ID, 1150 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1151 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1152 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1153 .state = RTE_EVENT_TIMER_NOT_ARMED, 1154 .timeout_ticks = 5, // expire in .5 sec 1155 }; 1156 1157 rte_mempool_get(eventdev_test_mempool, (void **)&evtim); 1158 if (evtim == NULL) { 1159 /* Failed to get an event timer object */ 1160 return TEST_FAILED; 1161 } 1162 1163 /* Set up a timer */ 1164 *evtim = init_tim; 1165 evtim->ev.event_ptr = evtim; 1166 1167 /* Test single timer arm succeeds */ 1168 ret = rte_event_timer_arm_burst(adapter, &evtim, 1); 1169 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n", 1170 rte_strerror(rte_errno)); 1171 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ARMED, "Event timer " 1172 "in incorrect state"); 1173 1174 /* Test arm of armed timer fails */ 1175 ret = rte_event_timer_arm_burst(adapter, &evtim, 1); 1176 TEST_ASSERT_EQUAL(ret, 0, "expected return value from " 1177 "rte_event_timer_arm_burst: 0, got: %d", ret); 1178 TEST_ASSERT_EQUAL(rte_errno, EALREADY, "Unexpected rte_errno value " 1179 "after arming already armed timer"); 1180 1181 /* Let timer expire */ 1182 rte_delay_ms(1000); 1183 1184 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); 1185 TEST_ASSERT_EQUAL(n, 1, "Failed to dequeue expected number of expiry " 1186 "events from event device"); 1187 1188 rte_mempool_put(eventdev_test_mempool, evtim); 1189 1190 return TEST_SUCCESS; 1191 } 1192 1193 /* This test checks that repeated references to the same event timer in the 1194 * arm request work as expected; only the first one through should succeed. 1195 */ 1196 static int 1197 event_timer_arm_double(void) 1198 { 1199 uint16_t n; 1200 int ret; 1201 struct rte_event_timer_adapter *adapter = timdev; 1202 struct rte_event_timer *evtim = NULL; 1203 struct rte_event evs[BATCH_SIZE]; 1204 const struct rte_event_timer init_tim = { 1205 .ev.op = RTE_EVENT_OP_NEW, 1206 .ev.queue_id = TEST_QUEUE_ID, 1207 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1208 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1209 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1210 .state = RTE_EVENT_TIMER_NOT_ARMED, 1211 .timeout_ticks = 5, // expire in .5 sec 1212 }; 1213 1214 rte_mempool_get(eventdev_test_mempool, (void **)&evtim); 1215 if (evtim == NULL) { 1216 /* Failed to get an event timer object */ 1217 return TEST_FAILED; 1218 } 1219 1220 /* Set up a timer */ 1221 *evtim = init_tim; 1222 evtim->ev.event_ptr = evtim; 1223 1224 struct rte_event_timer *evtim_arr[] = {evtim, evtim}; 1225 ret = rte_event_timer_arm_burst(adapter, evtim_arr, RTE_DIM(evtim_arr)); 1226 TEST_ASSERT_EQUAL(ret, 1, "Unexpected return value from " 1227 "rte_event_timer_arm_burst"); 1228 TEST_ASSERT_EQUAL(rte_errno, EALREADY, "Unexpected rte_errno value " 1229 "after double-arm"); 1230 1231 /* Let timer expire */ 1232 rte_delay_ms(600); 1233 1234 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); 1235 TEST_ASSERT_EQUAL(n, 1, "Dequeued incorrect number of expiry events - " 1236 "expected: 1, actual: %d", n); 1237 1238 rte_mempool_put(eventdev_test_mempool, evtim); 1239 1240 return TEST_SUCCESS; 1241 } 1242 1243 /* Test the timer expiry event is generated at the expected time. */ 1244 static int 1245 event_timer_arm_expiry(void) 1246 { 1247 uint16_t n; 1248 int ret; 1249 struct rte_event_timer_adapter *adapter = timdev; 1250 struct rte_event_timer *evtim = NULL; 1251 struct rte_event_timer *evtim2 = NULL; 1252 struct rte_event evs[BATCH_SIZE]; 1253 const struct rte_event_timer init_tim = { 1254 .ev.op = RTE_EVENT_OP_NEW, 1255 .ev.queue_id = TEST_QUEUE_ID, 1256 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1257 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1258 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1259 .state = RTE_EVENT_TIMER_NOT_ARMED, 1260 }; 1261 1262 rte_mempool_get(eventdev_test_mempool, (void **)&evtim); 1263 if (evtim == NULL) { 1264 /* Failed to get an event timer object */ 1265 return TEST_FAILED; 1266 } 1267 1268 /* Set up an event timer */ 1269 *evtim = init_tim; 1270 evtim->timeout_ticks = 30, // expire in 3 secs 1271 evtim->ev.event_ptr = evtim; 1272 1273 ret = rte_event_timer_arm_burst(adapter, &evtim, 1); 1274 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s", 1275 rte_strerror(rte_errno)); 1276 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ARMED, "Event " 1277 "timer in incorrect state"); 1278 1279 rte_delay_ms(2999); 1280 1281 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); 1282 TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected timer expiry event"); 1283 1284 /* Delay 100 ms to account for the adapter tick window - should let us 1285 * dequeue one event 1286 */ 1287 rte_delay_ms(100); 1288 1289 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); 1290 TEST_ASSERT_EQUAL(n, 1, "Dequeued incorrect number (%d) of timer " 1291 "expiry events", n); 1292 TEST_ASSERT_EQUAL(evs[0].event_type, RTE_EVENT_TYPE_TIMER, 1293 "Dequeued unexpected type of event"); 1294 1295 /* Check that we recover the original event timer and then free it */ 1296 evtim2 = evs[0].event_ptr; 1297 TEST_ASSERT_EQUAL(evtim, evtim2, 1298 "Failed to recover pointer to original event timer"); 1299 rte_mempool_put(eventdev_test_mempool, evtim2); 1300 1301 return TEST_SUCCESS; 1302 } 1303 1304 /* Check that rearming a timer works as expected. */ 1305 static int 1306 event_timer_arm_rearm(void) 1307 { 1308 uint16_t n; 1309 int ret; 1310 struct rte_event_timer *evtim = NULL; 1311 struct rte_event_timer *evtim2 = NULL; 1312 struct rte_event evs[BATCH_SIZE]; 1313 const struct rte_event_timer init_tim = { 1314 .ev.op = RTE_EVENT_OP_NEW, 1315 .ev.queue_id = TEST_QUEUE_ID, 1316 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1317 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1318 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1319 .state = RTE_EVENT_TIMER_NOT_ARMED, 1320 }; 1321 1322 rte_mempool_get(eventdev_test_mempool, (void **)&evtim); 1323 if (evtim == NULL) { 1324 /* Failed to get an event timer object */ 1325 return TEST_FAILED; 1326 } 1327 1328 /* Set up a timer */ 1329 *evtim = init_tim; 1330 evtim->timeout_ticks = 1; // expire in 0.1 sec 1331 evtim->ev.event_ptr = evtim; 1332 1333 /* Arm it */ 1334 ret = rte_event_timer_arm_burst(timdev, &evtim, 1); 1335 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n", 1336 rte_strerror(rte_errno)); 1337 1338 /* Add 100ms to account for the adapter tick window */ 1339 rte_delay_ms(100 + 100); 1340 1341 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); 1342 TEST_ASSERT_EQUAL(n, 1, "Failed to dequeue expected number of expiry " 1343 "events from event device"); 1344 1345 /* Recover the timer through the event that was dequeued. */ 1346 evtim2 = evs[0].event_ptr; 1347 TEST_ASSERT_EQUAL(evtim, evtim2, 1348 "Failed to recover pointer to original event timer"); 1349 1350 /* Need to reset state in case implementation can't do it */ 1351 evtim2->state = RTE_EVENT_TIMER_NOT_ARMED; 1352 1353 /* Rearm it */ 1354 ret = rte_event_timer_arm_burst(timdev, &evtim2, 1); 1355 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n", 1356 rte_strerror(rte_errno)); 1357 1358 /* Add 100ms to account for the adapter tick window */ 1359 rte_delay_ms(100 + 100); 1360 1361 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); 1362 TEST_ASSERT_EQUAL(n, 1, "Failed to dequeue expected number of expiry " 1363 "events from event device"); 1364 1365 /* Free it */ 1366 evtim2 = evs[0].event_ptr; 1367 TEST_ASSERT_EQUAL(evtim, evtim2, 1368 "Failed to recover pointer to original event timer"); 1369 rte_mempool_put(eventdev_test_mempool, evtim2); 1370 1371 return TEST_SUCCESS; 1372 } 1373 1374 /* Check that the adapter handles the max specified number of timers as 1375 * expected. 1376 */ 1377 static int 1378 event_timer_arm_max(void) 1379 { 1380 int ret, i, n; 1381 int num_evtims = MAX_TIMERS; 1382 struct rte_event_timer *evtims[num_evtims]; 1383 struct rte_event evs[BATCH_SIZE]; 1384 const struct rte_event_timer init_tim = { 1385 .ev.op = RTE_EVENT_OP_NEW, 1386 .ev.queue_id = TEST_QUEUE_ID, 1387 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1388 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1389 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1390 .state = RTE_EVENT_TIMER_NOT_ARMED, 1391 .timeout_ticks = 5, // expire in .5 sec 1392 }; 1393 1394 ret = rte_mempool_get_bulk(eventdev_test_mempool, (void **)evtims, 1395 num_evtims); 1396 TEST_ASSERT_EQUAL(ret, 0, "Failed to get array of timer objs: ret = %d", 1397 ret); 1398 1399 for (i = 0; i < num_evtims; i++) { 1400 *evtims[i] = init_tim; 1401 evtims[i]->ev.event_ptr = evtims[i]; 1402 } 1403 1404 /* Test with the max value for the adapter */ 1405 ret = rte_event_timer_arm_burst(timdev, evtims, num_evtims); 1406 TEST_ASSERT_EQUAL(ret, num_evtims, 1407 "Failed to arm all event timers: attempted = %d, " 1408 "succeeded = %d, rte_errno = %s", 1409 num_evtims, ret, rte_strerror(rte_errno)); 1410 1411 rte_delay_ms(1000); 1412 1413 #define MAX_TRIES num_evtims 1414 int sum = 0; 1415 int tries = 0; 1416 bool done = false; 1417 while (!done) { 1418 sum += rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, 1419 RTE_DIM(evs), 10); 1420 if (sum >= num_evtims || ++tries >= MAX_TRIES) 1421 done = true; 1422 1423 rte_delay_ms(10); 1424 } 1425 1426 TEST_ASSERT_EQUAL(sum, num_evtims, "Expected %d timer expiry events, " 1427 "got %d", num_evtims, sum); 1428 1429 TEST_ASSERT(tries < MAX_TRIES, "Exceeded max tries"); 1430 1431 rte_delay_ms(100); 1432 1433 /* Make sure the eventdev is still empty */ 1434 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 1435 10); 1436 1437 TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected number of timer expiry " 1438 "events from event device"); 1439 1440 rte_mempool_put_bulk(eventdev_test_mempool, (void **)evtims, 1441 num_evtims); 1442 1443 return TEST_SUCCESS; 1444 } 1445 1446 /* Check that creating an event timer with incorrect event sched type fails. */ 1447 static int 1448 event_timer_arm_invalid_sched_type(void) 1449 { 1450 int ret; 1451 struct rte_event_timer *evtim = NULL; 1452 const struct rte_event_timer init_tim = { 1453 .ev.op = RTE_EVENT_OP_NEW, 1454 .ev.queue_id = TEST_QUEUE_ID, 1455 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1456 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1457 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1458 .state = RTE_EVENT_TIMER_NOT_ARMED, 1459 .timeout_ticks = 5, // expire in .5 sec 1460 }; 1461 1462 if (!using_services) 1463 return -ENOTSUP; 1464 1465 rte_mempool_get(eventdev_test_mempool, (void **)&evtim); 1466 if (evtim == NULL) { 1467 /* Failed to get an event timer object */ 1468 return TEST_FAILED; 1469 } 1470 1471 *evtim = init_tim; 1472 evtim->ev.event_ptr = evtim; 1473 evtim->ev.sched_type = RTE_SCHED_TYPE_PARALLEL; // bad sched type 1474 1475 ret = rte_event_timer_arm_burst(timdev, &evtim, 1); 1476 TEST_ASSERT_EQUAL(ret, 0, "Expected to fail timer arm with invalid " 1477 "sched type, but didn't"); 1478 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after" 1479 " arm fail with invalid queue"); 1480 1481 rte_mempool_put(eventdev_test_mempool, &evtim); 1482 1483 return TEST_SUCCESS; 1484 } 1485 1486 /* Check that creating an event timer with a timeout value that is too small or 1487 * too big fails. 1488 */ 1489 static int 1490 event_timer_arm_invalid_timeout(void) 1491 { 1492 int ret; 1493 struct rte_event_timer *evtim = NULL; 1494 const struct rte_event_timer init_tim = { 1495 .ev.op = RTE_EVENT_OP_NEW, 1496 .ev.queue_id = TEST_QUEUE_ID, 1497 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1498 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1499 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1500 .state = RTE_EVENT_TIMER_NOT_ARMED, 1501 .timeout_ticks = 5, // expire in .5 sec 1502 }; 1503 1504 rte_mempool_get(eventdev_test_mempool, (void **)&evtim); 1505 if (evtim == NULL) { 1506 /* Failed to get an event timer object */ 1507 return TEST_FAILED; 1508 } 1509 1510 *evtim = init_tim; 1511 evtim->ev.event_ptr = evtim; 1512 evtim->timeout_ticks = 0; // timeout too small 1513 1514 ret = rte_event_timer_arm_burst(timdev, &evtim, 1); 1515 TEST_ASSERT_EQUAL(ret, 0, "Expected to fail timer arm with invalid " 1516 "timeout, but didn't"); 1517 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after" 1518 " arm fail with invalid timeout"); 1519 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ERROR_TOOEARLY, 1520 "Unexpected event timer state"); 1521 1522 *evtim = init_tim; 1523 evtim->ev.event_ptr = evtim; 1524 evtim->timeout_ticks = 1801; // timeout too big 1525 1526 ret = rte_event_timer_arm_burst(timdev, &evtim, 1); 1527 TEST_ASSERT_EQUAL(ret, 0, "Expected to fail timer arm with invalid " 1528 "timeout, but didn't"); 1529 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after" 1530 " arm fail with invalid timeout"); 1531 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ERROR_TOOLATE, 1532 "Unexpected event timer state"); 1533 1534 rte_mempool_put(eventdev_test_mempool, evtim); 1535 1536 return TEST_SUCCESS; 1537 } 1538 1539 static int 1540 event_timer_cancel(void) 1541 { 1542 uint16_t n; 1543 int ret; 1544 struct rte_event_timer_adapter *adapter = timdev; 1545 struct rte_event_timer *evtim = NULL; 1546 struct rte_event evs[BATCH_SIZE]; 1547 const struct rte_event_timer init_tim = { 1548 .ev.op = RTE_EVENT_OP_NEW, 1549 .ev.queue_id = TEST_QUEUE_ID, 1550 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1551 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1552 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1553 .state = RTE_EVENT_TIMER_NOT_ARMED, 1554 }; 1555 1556 rte_mempool_get(eventdev_test_mempool, (void **)&evtim); 1557 if (evtim == NULL) { 1558 /* Failed to get an event timer object */ 1559 return TEST_FAILED; 1560 } 1561 1562 /* Check that cancelling an uninited timer fails */ 1563 ret = rte_event_timer_cancel_burst(adapter, &evtim, 1); 1564 TEST_ASSERT_EQUAL(ret, 0, "Succeeded unexpectedly in canceling " 1565 "uninited timer"); 1566 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after " 1567 "cancelling uninited timer"); 1568 1569 /* Set up a timer */ 1570 *evtim = init_tim; 1571 evtim->ev.event_ptr = evtim; 1572 evtim->timeout_ticks = 30; // expire in 3 sec 1573 1574 /* Check that cancelling an inited but unarmed timer fails */ 1575 ret = rte_event_timer_cancel_burst(adapter, &evtim, 1); 1576 TEST_ASSERT_EQUAL(ret, 0, "Succeeded unexpectedly in canceling " 1577 "unarmed timer"); 1578 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after " 1579 "cancelling unarmed timer"); 1580 1581 ret = rte_event_timer_arm_burst(adapter, &evtim, 1); 1582 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n", 1583 rte_strerror(rte_errno)); 1584 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ARMED, 1585 "evtim in incorrect state"); 1586 1587 /* Delay 1 sec */ 1588 rte_delay_ms(1000); 1589 1590 ret = rte_event_timer_cancel_burst(adapter, &evtim, 1); 1591 TEST_ASSERT_EQUAL(ret, 1, "Failed to cancel event_timer: %s\n", 1592 rte_strerror(rte_errno)); 1593 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_CANCELED, 1594 "evtim in incorrect state"); 1595 1596 rte_delay_ms(3000); 1597 1598 /* Make sure that no expiry event was generated */ 1599 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); 1600 TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected timer expiry event\n"); 1601 1602 rte_mempool_put(eventdev_test_mempool, evtim); 1603 1604 return TEST_SUCCESS; 1605 } 1606 1607 static int 1608 event_timer_cancel_double(void) 1609 { 1610 uint16_t n; 1611 int ret; 1612 struct rte_event_timer_adapter *adapter = timdev; 1613 struct rte_event_timer *evtim = NULL; 1614 struct rte_event evs[BATCH_SIZE]; 1615 const struct rte_event_timer init_tim = { 1616 .ev.op = RTE_EVENT_OP_NEW, 1617 .ev.queue_id = TEST_QUEUE_ID, 1618 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1619 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1620 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1621 .state = RTE_EVENT_TIMER_NOT_ARMED, 1622 .timeout_ticks = 5, // expire in .5 sec 1623 }; 1624 1625 rte_mempool_get(eventdev_test_mempool, (void **)&evtim); 1626 if (evtim == NULL) { 1627 /* Failed to get an event timer object */ 1628 return TEST_FAILED; 1629 } 1630 1631 /* Set up a timer */ 1632 *evtim = init_tim; 1633 evtim->ev.event_ptr = evtim; 1634 evtim->timeout_ticks = 30; // expire in 3 sec 1635 1636 ret = rte_event_timer_arm_burst(adapter, &evtim, 1); 1637 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n", 1638 rte_strerror(rte_errno)); 1639 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ARMED, 1640 "timer in unexpected state"); 1641 1642 /* Now, test that referencing the same timer twice in the same call 1643 * fails 1644 */ 1645 struct rte_event_timer *evtim_arr[] = {evtim, evtim}; 1646 ret = rte_event_timer_cancel_burst(adapter, evtim_arr, 1647 RTE_DIM(evtim_arr)); 1648 1649 /* Two requests to cancel same timer, only one should succeed */ 1650 TEST_ASSERT_EQUAL(ret, 1, "Succeeded unexpectedly in canceling timer " 1651 "twice"); 1652 1653 TEST_ASSERT_EQUAL(rte_errno, EALREADY, "Unexpected rte_errno value " 1654 "after double-cancel: rte_errno = %d", rte_errno); 1655 1656 rte_delay_ms(3000); 1657 1658 /* Still make sure that no expiry event was generated */ 1659 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); 1660 TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected timer expiry event\n"); 1661 1662 rte_mempool_put(eventdev_test_mempool, evtim); 1663 1664 return TEST_SUCCESS; 1665 } 1666 1667 /* Check that event timer adapter tick resolution works as expected by testing 1668 * the number of adapter ticks that occur within a particular time interval. 1669 */ 1670 static int 1671 adapter_tick_resolution(void) 1672 { 1673 struct rte_event_timer_adapter_stats stats; 1674 uint64_t adapter_tick_count; 1675 1676 /* Only run this test in the software driver case */ 1677 if (!using_services) 1678 return -ENOTSUP; 1679 1680 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stats_reset(timdev), 1681 "Failed to reset stats"); 1682 1683 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stats_get(timdev, 1684 &stats), "Failed to get adapter stats"); 1685 TEST_ASSERT_EQUAL(stats.adapter_tick_count, 0, "Adapter tick count " 1686 "not zeroed out"); 1687 1688 /* Delay 1 second; should let at least 10 ticks occur with the default 1689 * adapter configuration used by this test. 1690 */ 1691 rte_delay_ms(1000); 1692 1693 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stats_get(timdev, 1694 &stats), "Failed to get adapter stats"); 1695 1696 adapter_tick_count = stats.adapter_tick_count; 1697 TEST_ASSERT(adapter_tick_count >= 10 && adapter_tick_count <= 12, 1698 "Expected 10-12 adapter ticks, got %"PRIu64"\n", 1699 adapter_tick_count); 1700 1701 return TEST_SUCCESS; 1702 } 1703 1704 static int 1705 adapter_create_max(void) 1706 { 1707 int i; 1708 uint32_t svc_start_count, svc_end_count; 1709 struct rte_event_timer_adapter *adapters[ 1710 RTE_EVENT_TIMER_ADAPTER_NUM_MAX + 1]; 1711 1712 struct rte_event_timer_adapter_conf conf = { 1713 .event_dev_id = evdev, 1714 // timer_adapter_id set in loop 1715 .clk_src = RTE_EVENT_TIMER_ADAPTER_CPU_CLK, 1716 .timer_tick_ns = NSECPERSEC / 10, 1717 .max_tmo_ns = 180 * NSECPERSEC, 1718 .nb_timers = MAX_TIMERS, 1719 .flags = 0, 1720 }; 1721 1722 if (!using_services) 1723 return -ENOTSUP; 1724 1725 svc_start_count = rte_service_get_count(); 1726 1727 /* This test expects that there are sufficient service IDs available 1728 * to be allocated. I.e., RTE_EVENT_TIMER_ADAPTER_NUM_MAX may need to 1729 * be less than RTE_SERVICE_NUM_MAX if anything else uses a service 1730 * (the SW event device, for example). 1731 */ 1732 for (i = 0; i < RTE_EVENT_TIMER_ADAPTER_NUM_MAX; i++) { 1733 conf.timer_adapter_id = i; 1734 adapters[i] = rte_event_timer_adapter_create_ext(&conf, 1735 test_port_conf_cb, NULL); 1736 TEST_ASSERT_NOT_NULL(adapters[i], "Failed to create adapter " 1737 "%d", i); 1738 } 1739 1740 conf.timer_adapter_id = i; 1741 adapters[i] = rte_event_timer_adapter_create(&conf); 1742 TEST_ASSERT_NULL(adapters[i], "Created too many adapters"); 1743 1744 /* Check that at least RTE_EVENT_TIMER_ADAPTER_NUM_MAX services 1745 * have been created 1746 */ 1747 svc_end_count = rte_service_get_count(); 1748 TEST_ASSERT_EQUAL(svc_end_count - svc_start_count, 1749 RTE_EVENT_TIMER_ADAPTER_NUM_MAX, 1750 "Failed to create expected number of services"); 1751 1752 for (i = 0; i < RTE_EVENT_TIMER_ADAPTER_NUM_MAX; i++) 1753 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_free(adapters[i]), 1754 "Failed to free adapter %d", i); 1755 1756 /* Check that service count is back to where it was at start */ 1757 svc_end_count = rte_service_get_count(); 1758 TEST_ASSERT_EQUAL(svc_start_count, svc_end_count, "Failed to release " 1759 "correct number of services"); 1760 1761 return TEST_SUCCESS; 1762 } 1763 1764 static struct unit_test_suite event_timer_adptr_functional_testsuite = { 1765 .suite_name = "event timer functional test suite", 1766 .setup = testsuite_setup, 1767 .teardown = testsuite_teardown, 1768 .unit_test_cases = { 1769 TEST_CASE_ST(timdev_setup_usec, timdev_teardown, 1770 test_timer_state), 1771 TEST_CASE_ST(timdev_setup_usec, timdev_teardown, 1772 test_timer_arm), 1773 TEST_CASE_ST(timdev_setup_usec, timdev_teardown, 1774 test_timer_arm_burst), 1775 TEST_CASE_ST(timdev_setup_sec, timdev_teardown, 1776 test_timer_cancel), 1777 TEST_CASE_ST(timdev_setup_sec, timdev_teardown, 1778 test_timer_cancel_random), 1779 TEST_CASE_ST(timdev_setup_usec_multicore, timdev_teardown, 1780 test_timer_arm_multicore), 1781 TEST_CASE_ST(timdev_setup_usec_multicore, timdev_teardown, 1782 test_timer_arm_burst_multicore), 1783 TEST_CASE_ST(timdev_setup_sec_multicore, timdev_teardown, 1784 test_timer_cancel_multicore), 1785 TEST_CASE_ST(timdev_setup_sec_multicore, timdev_teardown, 1786 test_timer_cancel_burst_multicore), 1787 TEST_CASE(adapter_create), 1788 TEST_CASE_ST(timdev_setup_msec, NULL, adapter_free), 1789 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1790 adapter_get_info), 1791 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1792 adapter_lookup), 1793 TEST_CASE_ST(NULL, timdev_teardown, 1794 adapter_start), 1795 TEST_CASE_ST(timdev_setup_msec, NULL, 1796 adapter_stop), 1797 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1798 stat_inc_reset_ev_enq), 1799 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1800 event_timer_arm), 1801 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1802 event_timer_arm_double), 1803 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1804 event_timer_arm_expiry), 1805 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1806 event_timer_arm_rearm), 1807 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1808 event_timer_arm_max), 1809 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1810 event_timer_arm_invalid_sched_type), 1811 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1812 event_timer_arm_invalid_timeout), 1813 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1814 event_timer_cancel), 1815 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1816 event_timer_cancel_double), 1817 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1818 adapter_tick_resolution), 1819 TEST_CASE(adapter_create_max), 1820 TEST_CASES_END() /**< NULL terminate unit test array */ 1821 } 1822 }; 1823 1824 static int 1825 test_event_timer_adapter_func(void) 1826 { 1827 return unit_test_suite_runner(&event_timer_adptr_functional_testsuite); 1828 } 1829 1830 REGISTER_TEST_COMMAND(event_timer_adapter_test, test_event_timer_adapter_func); 1831