1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Cavium, Inc 3 * Copyright(c) 2017-2018 Intel Corporation. 4 */ 5 6 #include <math.h> 7 8 #include <rte_atomic.h> 9 #include <rte_common.h> 10 #include <rte_cycles.h> 11 #include <rte_debug.h> 12 #include <rte_eal.h> 13 #include <rte_ethdev.h> 14 #include <rte_eventdev.h> 15 #include <rte_event_timer_adapter.h> 16 #include <rte_mempool.h> 17 #include <rte_launch.h> 18 #include <rte_lcore.h> 19 #include <rte_per_lcore.h> 20 #include <rte_random.h> 21 #include <rte_bus_vdev.h> 22 #include <rte_service.h> 23 #include <stdbool.h> 24 25 #include "test.h" 26 27 /* 4K timers corresponds to sw evdev max inflight events */ 28 #define MAX_TIMERS (4 * 1024) 29 #define BKT_TCK_NSEC 30 31 #define NSECPERSEC 1E9 32 #define BATCH_SIZE 16 33 /* Both the app lcore and adapter ports are linked to this queue */ 34 #define TEST_QUEUE_ID 0 35 /* Port the application dequeues from */ 36 #define TEST_PORT_ID 0 37 #define TEST_ADAPTER_ID 0 38 39 /* Handle log statements in same manner as test macros */ 40 #define LOG_DBG(...) RTE_LOG(DEBUG, EAL, __VA_ARGS__) 41 42 static int evdev; 43 static struct rte_event_timer_adapter *timdev; 44 static struct rte_mempool *eventdev_test_mempool; 45 static struct rte_ring *timer_producer_ring; 46 static uint64_t global_bkt_tck_ns; 47 static uint64_t global_info_bkt_tck_ns; 48 static volatile uint8_t arm_done; 49 50 #define CALC_TICKS(tks) \ 51 ceil((double)(tks * global_bkt_tck_ns) / global_info_bkt_tck_ns) 52 53 54 static bool using_services; 55 static uint32_t test_lcore1; 56 static uint32_t test_lcore2; 57 static uint32_t test_lcore3; 58 static uint32_t sw_evdev_slcore; 59 static uint32_t sw_adptr_slcore; 60 61 static inline void 62 devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf, 63 struct rte_event_dev_info *info) 64 { 65 memset(dev_conf, 0, sizeof(struct rte_event_dev_config)); 66 dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns; 67 dev_conf->nb_event_ports = 1; 68 dev_conf->nb_event_queues = 1; 69 dev_conf->nb_event_queue_flows = info->max_event_queue_flows; 70 dev_conf->nb_event_port_dequeue_depth = 71 info->max_event_port_dequeue_depth; 72 dev_conf->nb_event_port_enqueue_depth = 73 info->max_event_port_enqueue_depth; 74 dev_conf->nb_event_port_enqueue_depth = 75 info->max_event_port_enqueue_depth; 76 dev_conf->nb_events_limit = 77 info->max_num_events; 78 } 79 80 static inline int 81 eventdev_setup(void) 82 { 83 int ret; 84 struct rte_event_dev_config dev_conf; 85 struct rte_event_dev_info info; 86 uint32_t service_id; 87 88 ret = rte_event_dev_info_get(evdev, &info); 89 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info"); 90 TEST_ASSERT(info.max_num_events < 0 || 91 info.max_num_events >= (int32_t)MAX_TIMERS, 92 "ERROR max_num_events=%d < max_events=%d", 93 info.max_num_events, MAX_TIMERS); 94 95 devconf_set_default_sane_values(&dev_conf, &info); 96 ret = rte_event_dev_configure(evdev, &dev_conf); 97 TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev"); 98 99 ret = rte_event_queue_setup(evdev, 0, NULL); 100 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d", 0); 101 102 /* Configure event port */ 103 ret = rte_event_port_setup(evdev, 0, NULL); 104 TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d", 0); 105 ret = rte_event_port_link(evdev, 0, NULL, NULL, 0); 106 TEST_ASSERT(ret >= 0, "Failed to link all queues port=%d", 0); 107 108 /* If this is a software event device, map and start its service */ 109 if (rte_event_dev_service_id_get(evdev, &service_id) == 0) { 110 TEST_ASSERT_SUCCESS(rte_service_lcore_add(sw_evdev_slcore), 111 "Failed to add service core"); 112 TEST_ASSERT_SUCCESS(rte_service_lcore_start( 113 sw_evdev_slcore), 114 "Failed to start service core"); 115 TEST_ASSERT_SUCCESS(rte_service_map_lcore_set( 116 service_id, sw_evdev_slcore, 1), 117 "Failed to map evdev service"); 118 TEST_ASSERT_SUCCESS(rte_service_runstate_set( 119 service_id, 1), 120 "Failed to start evdev service"); 121 } 122 123 ret = rte_event_dev_start(evdev); 124 TEST_ASSERT_SUCCESS(ret, "Failed to start device"); 125 126 return TEST_SUCCESS; 127 } 128 129 static int 130 testsuite_setup(void) 131 { 132 /* Some of the multithreaded tests require 3 other lcores to run */ 133 unsigned int required_lcore_count = 4; 134 uint32_t service_id; 135 136 /* To make it easier to map services later if needed, just reset 137 * service core state. 138 */ 139 (void) rte_service_lcore_reset_all(); 140 141 if (!rte_event_dev_count()) { 142 /* If there is no hardware eventdev, or no software vdev was 143 * specified on the command line, create an instance of 144 * event_sw. 145 */ 146 LOG_DBG("Failed to find a valid event device... testing with" 147 " event_sw device\n"); 148 TEST_ASSERT_SUCCESS(rte_vdev_init("event_sw0", NULL), 149 "Error creating eventdev"); 150 evdev = rte_event_dev_get_dev_id("event_sw0"); 151 } 152 153 if (rte_event_dev_service_id_get(evdev, &service_id) == 0) { 154 /* A software event device will use a software event timer 155 * adapter as well. 2 more cores required to convert to 156 * service cores. 157 */ 158 required_lcore_count += 2; 159 using_services = true; 160 } 161 162 if (rte_lcore_count() < required_lcore_count) { 163 printf("Not enough cores for event_timer_adapter_test, expecting at least %u\n", 164 required_lcore_count); 165 return TEST_SKIPPED; 166 } 167 168 /* Assign lcores for various tasks */ 169 test_lcore1 = rte_get_next_lcore(-1, 1, 0); 170 test_lcore2 = rte_get_next_lcore(test_lcore1, 1, 0); 171 test_lcore3 = rte_get_next_lcore(test_lcore2, 1, 0); 172 if (using_services) { 173 sw_evdev_slcore = rte_get_next_lcore(test_lcore3, 1, 0); 174 sw_adptr_slcore = rte_get_next_lcore(sw_evdev_slcore, 1, 0); 175 } 176 177 return eventdev_setup(); 178 } 179 180 static void 181 testsuite_teardown(void) 182 { 183 rte_event_dev_stop(evdev); 184 rte_event_dev_close(evdev); 185 } 186 187 static int 188 setup_adapter_service(struct rte_event_timer_adapter *adptr) 189 { 190 uint32_t adapter_service_id; 191 int ret; 192 193 /* retrieve service ids */ 194 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_service_id_get(adptr, 195 &adapter_service_id), "Failed to get event timer " 196 "adapter service id"); 197 /* add a service core and start it */ 198 ret = rte_service_lcore_add(sw_adptr_slcore); 199 TEST_ASSERT(ret == 0 || ret == -EALREADY, 200 "Failed to add service core"); 201 ret = rte_service_lcore_start(sw_adptr_slcore); 202 TEST_ASSERT(ret == 0 || ret == -EALREADY, 203 "Failed to start service core"); 204 205 /* map services to it */ 206 TEST_ASSERT_SUCCESS(rte_service_map_lcore_set(adapter_service_id, 207 sw_adptr_slcore, 1), 208 "Failed to map adapter service"); 209 210 /* set services to running */ 211 TEST_ASSERT_SUCCESS(rte_service_runstate_set(adapter_service_id, 1), 212 "Failed to start event timer adapter service"); 213 214 return TEST_SUCCESS; 215 } 216 217 static int 218 test_port_conf_cb(uint16_t id, uint8_t event_dev_id, uint8_t *event_port_id, 219 void *conf_arg) 220 { 221 struct rte_event_dev_config dev_conf; 222 struct rte_event_dev_info info; 223 struct rte_event_port_conf *port_conf, def_port_conf = {0}; 224 uint32_t started; 225 static int port_allocated; 226 static uint8_t port_id; 227 int ret; 228 229 if (port_allocated) { 230 *event_port_id = port_id; 231 return 0; 232 } 233 234 RTE_SET_USED(id); 235 236 ret = rte_event_dev_attr_get(event_dev_id, RTE_EVENT_DEV_ATTR_STARTED, 237 &started); 238 if (ret < 0) 239 return ret; 240 241 if (started) 242 rte_event_dev_stop(event_dev_id); 243 244 ret = rte_event_dev_info_get(evdev, &info); 245 if (ret < 0) 246 return ret; 247 248 devconf_set_default_sane_values(&dev_conf, &info); 249 250 port_id = dev_conf.nb_event_ports; 251 dev_conf.nb_event_ports++; 252 253 ret = rte_event_dev_configure(event_dev_id, &dev_conf); 254 if (ret < 0) { 255 if (started) 256 rte_event_dev_start(event_dev_id); 257 return ret; 258 } 259 260 if (conf_arg != NULL) 261 port_conf = conf_arg; 262 else { 263 port_conf = &def_port_conf; 264 ret = rte_event_port_default_conf_get(event_dev_id, port_id, 265 port_conf); 266 if (ret < 0) 267 return ret; 268 } 269 270 ret = rte_event_port_setup(event_dev_id, port_id, port_conf); 271 if (ret < 0) 272 return ret; 273 274 *event_port_id = port_id; 275 276 if (started) 277 rte_event_dev_start(event_dev_id); 278 279 /* Reuse this port number next time this is called */ 280 port_allocated = 1; 281 282 return 0; 283 } 284 285 static int 286 _timdev_setup(uint64_t max_tmo_ns, uint64_t bkt_tck_ns, uint64_t flags) 287 { 288 struct rte_event_timer_adapter_info info; 289 struct rte_event_timer_adapter_conf config = { 290 .event_dev_id = evdev, 291 .timer_adapter_id = TEST_ADAPTER_ID, 292 .timer_tick_ns = bkt_tck_ns, 293 .max_tmo_ns = max_tmo_ns, 294 .nb_timers = MAX_TIMERS * 10, 295 .flags = flags, 296 }; 297 uint32_t caps = 0; 298 const char *pool_name = "timdev_test_pool"; 299 300 global_bkt_tck_ns = bkt_tck_ns; 301 302 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_caps_get(evdev, &caps), 303 "failed to get adapter capabilities"); 304 305 if (flags & RTE_EVENT_TIMER_ADAPTER_F_PERIODIC && 306 !(caps & RTE_EVENT_TIMER_ADAPTER_CAP_PERIODIC)) { 307 printf("Adapter does not support periodic timers\n"); 308 return TEST_SKIPPED; 309 } 310 311 if (!(caps & RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) { 312 timdev = rte_event_timer_adapter_create_ext(&config, 313 test_port_conf_cb, 314 NULL); 315 setup_adapter_service(timdev); 316 using_services = true; 317 } else 318 timdev = rte_event_timer_adapter_create(&config); 319 320 TEST_ASSERT_NOT_NULL(timdev, 321 "failed to create event timer ring"); 322 323 TEST_ASSERT_EQUAL(rte_event_timer_adapter_start(timdev), 0, 324 "failed to Start event timer adapter"); 325 326 /* Create event timer mempool */ 327 eventdev_test_mempool = rte_mempool_create(pool_name, 328 MAX_TIMERS * 2, 329 sizeof(struct rte_event_timer), /* element size*/ 330 0, /* cache size*/ 331 0, NULL, NULL, NULL, NULL, 332 rte_socket_id(), 0); 333 if (!eventdev_test_mempool) { 334 printf("ERROR creating mempool\n"); 335 return TEST_FAILED; 336 } 337 338 rte_event_timer_adapter_get_info(timdev, &info); 339 340 global_info_bkt_tck_ns = info.min_resolution_ns; 341 342 return TEST_SUCCESS; 343 } 344 345 static int 346 timdev_setup_usec(void) 347 { 348 uint64_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES; 349 350 return using_services ? 351 /* Max timeout is 10,000us and bucket interval is 100us */ 352 _timdev_setup(1E7, 1E5, flags) : 353 /* Max timeout is 100us and bucket interval is 1us */ 354 _timdev_setup(1E5, 1E3, flags); 355 } 356 357 static int 358 timdev_setup_usec_multicore(void) 359 { 360 uint64_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES; 361 362 return using_services ? 363 /* Max timeout is 10,000us and bucket interval is 100us */ 364 _timdev_setup(1E7, 1E5, flags) : 365 /* Max timeout is 100us and bucket interval is 1us */ 366 _timdev_setup(1E5, 1E3, flags); 367 } 368 369 static int 370 timdev_setup_msec(void) 371 { 372 uint64_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES; 373 374 /* Max timeout is 3 mins, and bucket interval is 100 ms */ 375 return _timdev_setup(180 * NSECPERSEC, NSECPERSEC / 10, flags); 376 } 377 378 static int 379 timdev_setup_msec_periodic(void) 380 { 381 uint64_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES | 382 RTE_EVENT_TIMER_ADAPTER_F_PERIODIC; 383 384 /* Periodic mode with 100 ms resolution */ 385 return _timdev_setup(0, NSECPERSEC / 10, flags); 386 } 387 388 static int 389 timdev_setup_sec(void) 390 { 391 uint64_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES; 392 393 /* Max timeout is 100sec and bucket interval is 1sec */ 394 return _timdev_setup(1E11, 1E9, flags); 395 } 396 397 static int 398 timdev_setup_sec_periodic(void) 399 { 400 uint64_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES | 401 RTE_EVENT_TIMER_ADAPTER_F_PERIODIC; 402 403 /* Periodic mode with 1 sec resolution */ 404 return _timdev_setup(0, NSECPERSEC, flags); 405 } 406 407 static int 408 timdev_setup_sec_multicore(void) 409 { 410 uint64_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES; 411 412 /* Max timeout is 100sec and bucket interval is 1sec */ 413 return _timdev_setup(1E11, 1E9, flags); 414 } 415 416 static void 417 timdev_teardown(void) 418 { 419 rte_event_timer_adapter_stop(timdev); 420 rte_event_timer_adapter_free(timdev); 421 422 rte_mempool_free(eventdev_test_mempool); 423 } 424 425 static inline int 426 test_timer_state(void) 427 { 428 struct rte_event_timer *ev_tim; 429 struct rte_event ev; 430 const struct rte_event_timer tim = { 431 .ev.op = RTE_EVENT_OP_NEW, 432 .ev.queue_id = 0, 433 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 434 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 435 .ev.event_type = RTE_EVENT_TYPE_TIMER, 436 .state = RTE_EVENT_TIMER_NOT_ARMED, 437 }; 438 439 440 rte_mempool_get(eventdev_test_mempool, (void **)&ev_tim); 441 *ev_tim = tim; 442 ev_tim->ev.event_ptr = ev_tim; 443 ev_tim->timeout_ticks = CALC_TICKS(120); 444 445 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 1), 0, 446 "Armed timer exceeding max_timeout."); 447 TEST_ASSERT_EQUAL(ev_tim->state, RTE_EVENT_TIMER_ERROR_TOOLATE, 448 "Improper timer state set expected %d returned %d", 449 RTE_EVENT_TIMER_ERROR_TOOLATE, ev_tim->state); 450 451 ev_tim->state = RTE_EVENT_TIMER_NOT_ARMED; 452 ev_tim->timeout_ticks = CALC_TICKS(10); 453 454 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 1), 1, 455 "Failed to arm timer with proper timeout."); 456 TEST_ASSERT_EQUAL(ev_tim->state, RTE_EVENT_TIMER_ARMED, 457 "Improper timer state set expected %d returned %d", 458 RTE_EVENT_TIMER_ARMED, ev_tim->state); 459 460 if (!using_services) 461 rte_delay_us(20); 462 else 463 rte_delay_us(1000 + 200); 464 TEST_ASSERT_EQUAL(rte_event_dequeue_burst(evdev, 0, &ev, 1, 0), 1, 465 "Armed timer failed to trigger."); 466 467 ev_tim->state = RTE_EVENT_TIMER_NOT_ARMED; 468 ev_tim->timeout_ticks = CALC_TICKS(90); 469 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 1), 1, 470 "Failed to arm timer with proper timeout."); 471 TEST_ASSERT_EQUAL(rte_event_timer_cancel_burst(timdev, &ev_tim, 1), 472 1, "Failed to cancel armed timer"); 473 TEST_ASSERT_EQUAL(ev_tim->state, RTE_EVENT_TIMER_CANCELED, 474 "Improper timer state set expected %d returned %d", 475 RTE_EVENT_TIMER_CANCELED, ev_tim->state); 476 477 rte_mempool_put(eventdev_test_mempool, (void *)ev_tim); 478 479 return TEST_SUCCESS; 480 } 481 482 static inline int 483 _arm_timers(uint64_t timeout_tcks, uint64_t timers) 484 { 485 uint64_t i; 486 struct rte_event_timer *ev_tim; 487 const struct rte_event_timer tim = { 488 .ev.op = RTE_EVENT_OP_NEW, 489 .ev.queue_id = 0, 490 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 491 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 492 .ev.event_type = RTE_EVENT_TYPE_TIMER, 493 .state = RTE_EVENT_TIMER_NOT_ARMED, 494 .timeout_ticks = CALC_TICKS(timeout_tcks), 495 }; 496 497 for (i = 0; i < timers; i++) { 498 499 TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool, 500 (void **)&ev_tim), 501 "mempool alloc failed"); 502 *ev_tim = tim; 503 ev_tim->ev.event_ptr = ev_tim; 504 505 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 506 1), 1, "Failed to arm timer %d", 507 rte_errno); 508 } 509 510 return TEST_SUCCESS; 511 } 512 513 static inline int 514 _wait_timer_triggers(uint64_t wait_sec, uint64_t arm_count, 515 uint64_t cancel_count) 516 { 517 uint8_t valid_event; 518 uint64_t events = 0; 519 uint64_t wait_start, max_wait; 520 struct rte_event ev; 521 522 max_wait = rte_get_timer_hz() * wait_sec; 523 wait_start = rte_get_timer_cycles(); 524 while (1) { 525 if (rte_get_timer_cycles() - wait_start > max_wait) { 526 if (events + cancel_count != arm_count) 527 TEST_ASSERT_SUCCESS(max_wait, 528 "Max time limit for timers exceeded."); 529 break; 530 } 531 532 valid_event = rte_event_dequeue_burst(evdev, 0, &ev, 1, 0); 533 if (!valid_event) 534 continue; 535 536 rte_mempool_put(eventdev_test_mempool, ev.event_ptr); 537 events++; 538 } 539 540 return TEST_SUCCESS; 541 } 542 543 static inline int 544 test_timer_arm(void) 545 { 546 TEST_ASSERT_SUCCESS(_arm_timers(20, MAX_TIMERS), 547 "Failed to arm timers"); 548 TEST_ASSERT_SUCCESS(_wait_timer_triggers(10, MAX_TIMERS, 0), 549 "Timer triggered count doesn't match arm count"); 550 return TEST_SUCCESS; 551 } 552 553 static inline int 554 test_timer_arm_periodic(void) 555 { 556 TEST_ASSERT_SUCCESS(_arm_timers(1, MAX_TIMERS), 557 "Failed to arm timers"); 558 /* With a resolution of 100ms and wait time of 1sec, 559 * there will be 10 * MAX_TIMERS periodic timer triggers. 560 */ 561 TEST_ASSERT_SUCCESS(_wait_timer_triggers(1, 10 * MAX_TIMERS, 0), 562 "Timer triggered count doesn't match arm count"); 563 return TEST_SUCCESS; 564 } 565 566 static int 567 _arm_wrapper(void *arg) 568 { 569 RTE_SET_USED(arg); 570 571 TEST_ASSERT_SUCCESS(_arm_timers(20, MAX_TIMERS), 572 "Failed to arm timers"); 573 574 return TEST_SUCCESS; 575 } 576 577 static inline int 578 test_timer_arm_multicore(void) 579 { 580 581 uint32_t lcore_1 = rte_get_next_lcore(-1, 1, 0); 582 uint32_t lcore_2 = rte_get_next_lcore(lcore_1, 1, 0); 583 584 rte_eal_remote_launch(_arm_wrapper, NULL, lcore_1); 585 rte_eal_remote_launch(_arm_wrapper, NULL, lcore_2); 586 587 rte_eal_mp_wait_lcore(); 588 TEST_ASSERT_SUCCESS(_wait_timer_triggers(10, MAX_TIMERS * 2, 0), 589 "Timer triggered count doesn't match arm count"); 590 591 return TEST_SUCCESS; 592 } 593 594 #define MAX_BURST 16 595 static inline int 596 _arm_timers_burst(uint64_t timeout_tcks, uint64_t timers) 597 { 598 uint64_t i; 599 int j; 600 struct rte_event_timer *ev_tim[MAX_BURST]; 601 const struct rte_event_timer tim = { 602 .ev.op = RTE_EVENT_OP_NEW, 603 .ev.queue_id = 0, 604 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 605 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 606 .ev.event_type = RTE_EVENT_TYPE_TIMER, 607 .state = RTE_EVENT_TIMER_NOT_ARMED, 608 .timeout_ticks = CALC_TICKS(timeout_tcks), 609 }; 610 611 for (i = 0; i < timers / MAX_BURST; i++) { 612 TEST_ASSERT_SUCCESS(rte_mempool_get_bulk( 613 eventdev_test_mempool, 614 (void **)ev_tim, MAX_BURST), 615 "mempool alloc failed"); 616 617 for (j = 0; j < MAX_BURST; j++) { 618 *ev_tim[j] = tim; 619 ev_tim[j]->ev.event_ptr = ev_tim[j]; 620 } 621 622 TEST_ASSERT_EQUAL(rte_event_timer_arm_tmo_tick_burst(timdev, 623 ev_tim, tim.timeout_ticks, MAX_BURST), 624 MAX_BURST, "Failed to arm timer %d", rte_errno); 625 } 626 627 return TEST_SUCCESS; 628 } 629 630 static inline int 631 test_timer_arm_burst(void) 632 { 633 TEST_ASSERT_SUCCESS(_arm_timers_burst(20, MAX_TIMERS), 634 "Failed to arm timers"); 635 TEST_ASSERT_SUCCESS(_wait_timer_triggers(10, MAX_TIMERS, 0), 636 "Timer triggered count doesn't match arm count"); 637 638 return TEST_SUCCESS; 639 } 640 641 static inline int 642 test_timer_arm_burst_periodic(void) 643 { 644 TEST_ASSERT_SUCCESS(_arm_timers_burst(1, MAX_TIMERS), 645 "Failed to arm timers"); 646 /* With a resolution of 100ms and wait time of 1sec, 647 * there will be 10 * MAX_TIMERS periodic timer triggers. 648 */ 649 TEST_ASSERT_SUCCESS(_wait_timer_triggers(1, 10 * MAX_TIMERS, 0), 650 "Timer triggered count doesn't match arm count"); 651 652 return TEST_SUCCESS; 653 } 654 655 static int 656 _arm_wrapper_burst(void *arg) 657 { 658 RTE_SET_USED(arg); 659 660 TEST_ASSERT_SUCCESS(_arm_timers_burst(20, MAX_TIMERS), 661 "Failed to arm timers"); 662 663 return TEST_SUCCESS; 664 } 665 666 static inline int 667 test_timer_arm_burst_multicore(void) 668 { 669 rte_eal_remote_launch(_arm_wrapper_burst, NULL, test_lcore1); 670 rte_eal_remote_launch(_arm_wrapper_burst, NULL, test_lcore2); 671 672 rte_eal_mp_wait_lcore(); 673 TEST_ASSERT_SUCCESS(_wait_timer_triggers(10, MAX_TIMERS * 2, 0), 674 "Timer triggered count doesn't match arm count"); 675 676 return TEST_SUCCESS; 677 } 678 679 static inline int 680 test_timer_cancel_periodic(void) 681 { 682 uint64_t i; 683 struct rte_event_timer *ev_tim; 684 const struct rte_event_timer tim = { 685 .ev.op = RTE_EVENT_OP_NEW, 686 .ev.queue_id = 0, 687 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 688 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 689 .ev.event_type = RTE_EVENT_TYPE_TIMER, 690 .state = RTE_EVENT_TIMER_NOT_ARMED, 691 .timeout_ticks = CALC_TICKS(1), 692 }; 693 694 for (i = 0; i < MAX_TIMERS; i++) { 695 TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool, 696 (void **)&ev_tim), 697 "mempool alloc failed"); 698 *ev_tim = tim; 699 ev_tim->ev.event_ptr = ev_tim; 700 701 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 702 1), 1, "Failed to arm timer %d", 703 rte_errno); 704 705 rte_delay_us(100 + (i % 5000)); 706 707 TEST_ASSERT_EQUAL(rte_event_timer_cancel_burst(timdev, 708 &ev_tim, 1), 1, 709 "Failed to cancel event timer %d", rte_errno); 710 rte_mempool_put(eventdev_test_mempool, ev_tim); 711 } 712 713 714 TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS, 715 MAX_TIMERS), 716 "Timer triggered count doesn't match arm, cancel count"); 717 718 return TEST_SUCCESS; 719 } 720 721 static inline int 722 test_timer_cancel(void) 723 { 724 uint64_t i; 725 struct rte_event_timer *ev_tim; 726 const struct rte_event_timer tim = { 727 .ev.op = RTE_EVENT_OP_NEW, 728 .ev.queue_id = 0, 729 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 730 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 731 .ev.event_type = RTE_EVENT_TYPE_TIMER, 732 .state = RTE_EVENT_TIMER_NOT_ARMED, 733 .timeout_ticks = CALC_TICKS(20), 734 }; 735 736 for (i = 0; i < MAX_TIMERS; i++) { 737 TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool, 738 (void **)&ev_tim), 739 "mempool alloc failed"); 740 *ev_tim = tim; 741 ev_tim->ev.event_ptr = ev_tim; 742 743 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 744 1), 1, "Failed to arm timer %d", 745 rte_errno); 746 747 rte_delay_us(100 + (i % 5000)); 748 749 TEST_ASSERT_EQUAL(rte_event_timer_cancel_burst(timdev, 750 &ev_tim, 1), 1, 751 "Failed to cancel event timer %d", rte_errno); 752 rte_mempool_put(eventdev_test_mempool, ev_tim); 753 } 754 755 756 TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS, 757 MAX_TIMERS), 758 "Timer triggered count doesn't match arm, cancel count"); 759 760 return TEST_SUCCESS; 761 } 762 763 static int 764 _cancel_producer(uint64_t timeout_tcks, uint64_t timers) 765 { 766 uint64_t i; 767 struct rte_event_timer *ev_tim; 768 const struct rte_event_timer tim = { 769 .ev.op = RTE_EVENT_OP_NEW, 770 .ev.queue_id = 0, 771 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 772 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 773 .ev.event_type = RTE_EVENT_TYPE_TIMER, 774 .state = RTE_EVENT_TIMER_NOT_ARMED, 775 .timeout_ticks = CALC_TICKS(timeout_tcks), 776 }; 777 778 for (i = 0; i < timers; i++) { 779 TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool, 780 (void **)&ev_tim), 781 "mempool alloc failed"); 782 783 *ev_tim = tim; 784 ev_tim->ev.event_ptr = ev_tim; 785 786 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 787 1), 1, "Failed to arm timer %d", 788 rte_errno); 789 790 TEST_ASSERT_EQUAL(ev_tim->state, RTE_EVENT_TIMER_ARMED, 791 "Failed to arm event timer"); 792 793 while (rte_ring_enqueue(timer_producer_ring, ev_tim) != 0) 794 ; 795 } 796 797 return TEST_SUCCESS; 798 } 799 800 static int 801 _cancel_producer_burst(uint64_t timeout_tcks, uint64_t timers) 802 { 803 804 uint64_t i; 805 int j, ret; 806 struct rte_event_timer *ev_tim[MAX_BURST]; 807 const struct rte_event_timer tim = { 808 .ev.op = RTE_EVENT_OP_NEW, 809 .ev.queue_id = 0, 810 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 811 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 812 .ev.event_type = RTE_EVENT_TYPE_TIMER, 813 .state = RTE_EVENT_TIMER_NOT_ARMED, 814 .timeout_ticks = CALC_TICKS(timeout_tcks), 815 }; 816 int arm_count = 0; 817 818 for (i = 0; i < timers / MAX_BURST; i++) { 819 TEST_ASSERT_SUCCESS(rte_mempool_get_bulk( 820 eventdev_test_mempool, 821 (void **)ev_tim, MAX_BURST), 822 "mempool alloc failed"); 823 824 for (j = 0; j < MAX_BURST; j++) { 825 *ev_tim[j] = tim; 826 ev_tim[j]->ev.event_ptr = ev_tim[j]; 827 } 828 829 TEST_ASSERT_EQUAL(rte_event_timer_arm_tmo_tick_burst(timdev, 830 ev_tim, tim.timeout_ticks, MAX_BURST), 831 MAX_BURST, "Failed to arm timer %d", rte_errno); 832 833 for (j = 0; j < MAX_BURST; j++) 834 TEST_ASSERT_EQUAL(ev_tim[j]->state, 835 RTE_EVENT_TIMER_ARMED, 836 "Event timer not armed, state = %d", 837 ev_tim[j]->state); 838 839 ret = rte_ring_enqueue_bulk(timer_producer_ring, 840 (void **)ev_tim, MAX_BURST, NULL); 841 TEST_ASSERT_EQUAL(ret, MAX_BURST, 842 "Failed to enqueue event timers to ring"); 843 arm_count += ret; 844 } 845 846 TEST_ASSERT_EQUAL(arm_count, MAX_TIMERS, 847 "Failed to arm expected number of event timers"); 848 849 return TEST_SUCCESS; 850 } 851 852 static int 853 _cancel_producer_wrapper(void *args) 854 { 855 RTE_SET_USED(args); 856 857 return _cancel_producer(20, MAX_TIMERS); 858 } 859 860 static int 861 _cancel_producer_burst_wrapper(void *args) 862 { 863 RTE_SET_USED(args); 864 865 return _cancel_producer_burst(100, MAX_TIMERS); 866 } 867 868 static int 869 _cancel_thread(void *args) 870 { 871 RTE_SET_USED(args); 872 struct rte_event_timer *ev_tim = NULL; 873 uint64_t cancel_count = 0; 874 uint16_t ret; 875 876 while (!arm_done || rte_ring_count(timer_producer_ring) > 0) { 877 if (rte_ring_dequeue(timer_producer_ring, (void **)&ev_tim)) 878 continue; 879 880 ret = rte_event_timer_cancel_burst(timdev, &ev_tim, 1); 881 TEST_ASSERT_EQUAL(ret, 1, "Failed to cancel timer"); 882 rte_mempool_put(eventdev_test_mempool, (void *)ev_tim); 883 cancel_count++; 884 } 885 886 return TEST_SUCCESS; 887 } 888 889 static int 890 _cancel_burst_thread(void *args) 891 { 892 RTE_SET_USED(args); 893 894 int ret, i, n; 895 struct rte_event_timer *ev_tim[MAX_BURST]; 896 uint64_t cancel_count = 0; 897 uint64_t dequeue_count = 0; 898 899 while (!arm_done || rte_ring_count(timer_producer_ring) > 0) { 900 n = rte_ring_dequeue_burst(timer_producer_ring, 901 (void **)ev_tim, MAX_BURST, NULL); 902 if (!n) 903 continue; 904 905 dequeue_count += n; 906 907 for (i = 0; i < n; i++) 908 TEST_ASSERT_EQUAL(ev_tim[i]->state, 909 RTE_EVENT_TIMER_ARMED, 910 "Event timer not armed, state = %d", 911 ev_tim[i]->state); 912 913 ret = rte_event_timer_cancel_burst(timdev, ev_tim, n); 914 TEST_ASSERT_EQUAL(n, ret, "Failed to cancel complete burst of " 915 "event timers"); 916 rte_mempool_put_bulk(eventdev_test_mempool, (void **)ev_tim, 917 RTE_MIN(ret, MAX_BURST)); 918 919 cancel_count += ret; 920 } 921 922 TEST_ASSERT_EQUAL(cancel_count, MAX_TIMERS, 923 "Failed to cancel expected number of timers: " 924 "expected = %d, cancel_count = %"PRIu64", " 925 "dequeue_count = %"PRIu64"\n", MAX_TIMERS, 926 cancel_count, dequeue_count); 927 928 return TEST_SUCCESS; 929 } 930 931 static inline int 932 test_timer_cancel_multicore(void) 933 { 934 arm_done = 0; 935 timer_producer_ring = rte_ring_create("timer_cancel_queue", 936 MAX_TIMERS * 2, rte_socket_id(), 0); 937 TEST_ASSERT_NOT_NULL(timer_producer_ring, 938 "Unable to reserve memory for ring"); 939 940 rte_eal_remote_launch(_cancel_thread, NULL, test_lcore3); 941 rte_eal_remote_launch(_cancel_producer_wrapper, NULL, test_lcore1); 942 rte_eal_remote_launch(_cancel_producer_wrapper, NULL, test_lcore2); 943 944 rte_eal_wait_lcore(test_lcore1); 945 rte_eal_wait_lcore(test_lcore2); 946 arm_done = 1; 947 rte_eal_wait_lcore(test_lcore3); 948 rte_ring_free(timer_producer_ring); 949 950 TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS * 2, 951 MAX_TIMERS * 2), 952 "Timer triggered count doesn't match arm count"); 953 954 return TEST_SUCCESS; 955 } 956 957 static inline int 958 test_timer_cancel_burst_multicore(void) 959 { 960 arm_done = 0; 961 timer_producer_ring = rte_ring_create("timer_cancel_queue", 962 MAX_TIMERS * 2, rte_socket_id(), 0); 963 TEST_ASSERT_NOT_NULL(timer_producer_ring, 964 "Unable to reserve memory for ring"); 965 966 rte_eal_remote_launch(_cancel_burst_thread, NULL, test_lcore2); 967 rte_eal_remote_launch(_cancel_producer_burst_wrapper, NULL, 968 test_lcore1); 969 970 rte_eal_wait_lcore(test_lcore1); 971 arm_done = 1; 972 rte_eal_wait_lcore(test_lcore2); 973 rte_ring_free(timer_producer_ring); 974 975 TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS, 976 MAX_TIMERS), 977 "Timer triggered count doesn't match arm count"); 978 979 return TEST_SUCCESS; 980 } 981 982 static inline int 983 test_timer_cancel_random(void) 984 { 985 uint64_t i; 986 uint64_t events_canceled = 0; 987 struct rte_event_timer *ev_tim; 988 const struct rte_event_timer tim = { 989 .ev.op = RTE_EVENT_OP_NEW, 990 .ev.queue_id = 0, 991 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 992 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 993 .ev.event_type = RTE_EVENT_TYPE_TIMER, 994 .state = RTE_EVENT_TIMER_NOT_ARMED, 995 .timeout_ticks = CALC_TICKS(20), 996 }; 997 998 for (i = 0; i < MAX_TIMERS; i++) { 999 1000 TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool, 1001 (void **)&ev_tim), 1002 "mempool alloc failed"); 1003 *ev_tim = tim; 1004 ev_tim->ev.event_ptr = ev_tim; 1005 1006 TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 1007 1), 1, "Failed to arm timer %d", 1008 rte_errno); 1009 1010 if (rte_rand() & 1) { 1011 rte_delay_us(100 + (i % 5000)); 1012 TEST_ASSERT_EQUAL(rte_event_timer_cancel_burst( 1013 timdev, 1014 &ev_tim, 1), 1, 1015 "Failed to cancel event timer %d", rte_errno); 1016 rte_mempool_put(eventdev_test_mempool, ev_tim); 1017 events_canceled++; 1018 } 1019 } 1020 1021 TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS, 1022 events_canceled), 1023 "Timer triggered count doesn't match arm, cancel count"); 1024 1025 return TEST_SUCCESS; 1026 } 1027 1028 /* Check that the adapter can be created correctly */ 1029 static int 1030 adapter_create(void) 1031 { 1032 int adapter_id = 0; 1033 struct rte_event_timer_adapter *adapter, *adapter2; 1034 1035 struct rte_event_timer_adapter_conf conf = { 1036 .event_dev_id = evdev + 1, // invalid event dev id 1037 .timer_adapter_id = adapter_id, 1038 .clk_src = RTE_EVENT_TIMER_ADAPTER_CPU_CLK, 1039 .timer_tick_ns = NSECPERSEC / 10, 1040 .max_tmo_ns = 180 * NSECPERSEC, 1041 .nb_timers = MAX_TIMERS, 1042 .flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES, 1043 }; 1044 uint32_t caps = 0; 1045 1046 /* Test invalid conf */ 1047 adapter = rte_event_timer_adapter_create(&conf); 1048 TEST_ASSERT_NULL(adapter, "Created adapter with invalid " 1049 "event device id"); 1050 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Incorrect errno value for " 1051 "invalid event device id"); 1052 1053 /* Test valid conf */ 1054 conf.event_dev_id = evdev; 1055 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_caps_get(evdev, &caps), 1056 "failed to get adapter capabilities"); 1057 if (!(caps & RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) 1058 adapter = rte_event_timer_adapter_create_ext(&conf, 1059 test_port_conf_cb, 1060 NULL); 1061 else 1062 adapter = rte_event_timer_adapter_create(&conf); 1063 TEST_ASSERT_NOT_NULL(adapter, "Failed to create adapter with valid " 1064 "configuration"); 1065 1066 /* Test existing id */ 1067 adapter2 = rte_event_timer_adapter_create(&conf); 1068 TEST_ASSERT_NULL(adapter2, "Created adapter with in-use id"); 1069 TEST_ASSERT(rte_errno == EEXIST, "Incorrect errno value for existing " 1070 "id"); 1071 1072 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_free(adapter), 1073 "Failed to free adapter"); 1074 1075 rte_mempool_free(eventdev_test_mempool); 1076 1077 return TEST_SUCCESS; 1078 } 1079 1080 1081 /* Test that adapter can be freed correctly. */ 1082 static int 1083 adapter_free(void) 1084 { 1085 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stop(timdev), 1086 "Failed to stop adapter"); 1087 1088 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_free(timdev), 1089 "Failed to free valid adapter"); 1090 1091 /* Test free of already freed adapter */ 1092 TEST_ASSERT_FAIL(rte_event_timer_adapter_free(timdev), 1093 "Freed adapter that was already freed"); 1094 1095 /* Test free of null adapter */ 1096 timdev = NULL; 1097 TEST_ASSERT_FAIL(rte_event_timer_adapter_free(timdev), 1098 "Freed null adapter"); 1099 1100 rte_mempool_free(eventdev_test_mempool); 1101 1102 return TEST_SUCCESS; 1103 } 1104 1105 /* Test that adapter info can be retrieved and is correct. */ 1106 static int 1107 adapter_get_info(void) 1108 { 1109 struct rte_event_timer_adapter_info info; 1110 1111 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_get_info(timdev, &info), 1112 "Failed to get adapter info"); 1113 1114 if (using_services) 1115 TEST_ASSERT_EQUAL(info.event_dev_port_id, 1, 1116 "Expected port id = 1, got port id = %d", 1117 info.event_dev_port_id); 1118 1119 return TEST_SUCCESS; 1120 } 1121 1122 /* Test adapter lookup via adapter ID. */ 1123 static int 1124 adapter_lookup(void) 1125 { 1126 struct rte_event_timer_adapter *adapter; 1127 1128 adapter = rte_event_timer_adapter_lookup(TEST_ADAPTER_ID); 1129 TEST_ASSERT_NOT_NULL(adapter, "Failed to lookup adapter"); 1130 1131 return TEST_SUCCESS; 1132 } 1133 1134 static int 1135 adapter_start(void) 1136 { 1137 TEST_ASSERT_SUCCESS(_timdev_setup(180 * NSECPERSEC, NSECPERSEC / 10, 1138 RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES), 1139 "Failed to start adapter"); 1140 TEST_ASSERT_EQUAL(rte_event_timer_adapter_start(timdev), -EALREADY, 1141 "Timer adapter started without call to stop."); 1142 1143 return TEST_SUCCESS; 1144 } 1145 1146 /* Test that adapter stops correctly. */ 1147 static int 1148 adapter_stop(void) 1149 { 1150 struct rte_event_timer_adapter *l_adapter = NULL; 1151 1152 /* Test adapter stop */ 1153 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stop(timdev), 1154 "Failed to stop event adapter"); 1155 1156 TEST_ASSERT_FAIL(rte_event_timer_adapter_stop(l_adapter), 1157 "Erroneously stopped null event adapter"); 1158 1159 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_free(timdev), 1160 "Failed to free adapter"); 1161 1162 rte_mempool_free(eventdev_test_mempool); 1163 1164 return TEST_SUCCESS; 1165 } 1166 1167 /* Test increment and reset of ev_enq_count stat */ 1168 static int 1169 stat_inc_reset_ev_enq(void) 1170 { 1171 int ret, i, n; 1172 int num_evtims = MAX_TIMERS; 1173 struct rte_event_timer *evtims[num_evtims]; 1174 struct rte_event evs[BATCH_SIZE]; 1175 struct rte_event_timer_adapter_stats stats; 1176 const struct rte_event_timer init_tim = { 1177 .ev.op = RTE_EVENT_OP_NEW, 1178 .ev.queue_id = TEST_QUEUE_ID, 1179 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1180 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1181 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1182 .state = RTE_EVENT_TIMER_NOT_ARMED, 1183 .timeout_ticks = CALC_TICKS(5), // expire in .5 sec 1184 }; 1185 1186 ret = rte_mempool_get_bulk(eventdev_test_mempool, (void **)evtims, 1187 num_evtims); 1188 TEST_ASSERT_EQUAL(ret, 0, "Failed to get array of timer objs: ret = %d", 1189 ret); 1190 1191 for (i = 0; i < num_evtims; i++) { 1192 *evtims[i] = init_tim; 1193 evtims[i]->ev.event_ptr = evtims[i]; 1194 } 1195 1196 ret = rte_event_timer_adapter_stats_get(timdev, &stats); 1197 TEST_ASSERT_EQUAL(ret, 0, "Failed to get stats"); 1198 TEST_ASSERT_EQUAL((int)stats.ev_enq_count, 0, "Stats not clear at " 1199 "startup"); 1200 1201 /* Test with the max value for the adapter */ 1202 ret = rte_event_timer_arm_burst(timdev, evtims, num_evtims); 1203 TEST_ASSERT_EQUAL(ret, num_evtims, 1204 "Failed to arm all event timers: attempted = %d, " 1205 "succeeded = %d, rte_errno = %s", 1206 num_evtims, ret, rte_strerror(rte_errno)); 1207 1208 rte_delay_ms(1000); 1209 1210 #define MAX_TRIES num_evtims 1211 int sum = 0; 1212 int tries = 0; 1213 bool done = false; 1214 while (!done) { 1215 sum += rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, 1216 RTE_DIM(evs), 10); 1217 if (sum >= num_evtims || ++tries >= MAX_TRIES) 1218 done = true; 1219 1220 rte_delay_ms(10); 1221 } 1222 1223 TEST_ASSERT_EQUAL(sum, num_evtims, "Expected %d timer expiry events, " 1224 "got %d", num_evtims, sum); 1225 1226 TEST_ASSERT(tries < MAX_TRIES, "Exceeded max tries"); 1227 1228 rte_delay_ms(100); 1229 1230 /* Make sure the eventdev is still empty */ 1231 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 1232 10); 1233 1234 TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected number of timer expiry " 1235 "events from event device"); 1236 1237 /* Check stats again */ 1238 ret = rte_event_timer_adapter_stats_get(timdev, &stats); 1239 TEST_ASSERT_EQUAL(ret, 0, "Failed to get stats"); 1240 TEST_ASSERT_EQUAL((int)stats.ev_enq_count, num_evtims, 1241 "Expected enqueue stat = %d; got %d", num_evtims, 1242 (int)stats.ev_enq_count); 1243 1244 /* Reset and check again */ 1245 ret = rte_event_timer_adapter_stats_reset(timdev); 1246 TEST_ASSERT_EQUAL(ret, 0, "Failed to reset stats"); 1247 1248 ret = rte_event_timer_adapter_stats_get(timdev, &stats); 1249 TEST_ASSERT_EQUAL(ret, 0, "Failed to get stats"); 1250 TEST_ASSERT_EQUAL((int)stats.ev_enq_count, 0, 1251 "Expected enqueue stat = %d; got %d", 0, 1252 (int)stats.ev_enq_count); 1253 1254 rte_mempool_put_bulk(eventdev_test_mempool, (void **)evtims, 1255 num_evtims); 1256 1257 return TEST_SUCCESS; 1258 } 1259 1260 /* Test various cases in arming timers */ 1261 static int 1262 event_timer_arm(void) 1263 { 1264 uint16_t n; 1265 int ret; 1266 struct rte_event_timer_adapter *adapter = timdev; 1267 struct rte_event_timer *evtim = NULL; 1268 struct rte_event evs[BATCH_SIZE]; 1269 const struct rte_event_timer init_tim = { 1270 .ev.op = RTE_EVENT_OP_NEW, 1271 .ev.queue_id = TEST_QUEUE_ID, 1272 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1273 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1274 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1275 .state = RTE_EVENT_TIMER_NOT_ARMED, 1276 .timeout_ticks = CALC_TICKS(5), // expire in .5 sec 1277 }; 1278 1279 rte_mempool_get(eventdev_test_mempool, (void **)&evtim); 1280 if (evtim == NULL) { 1281 /* Failed to get an event timer object */ 1282 return TEST_FAILED; 1283 } 1284 1285 /* Set up a timer */ 1286 *evtim = init_tim; 1287 evtim->ev.event_ptr = evtim; 1288 1289 /* Test single timer arm succeeds */ 1290 ret = rte_event_timer_arm_burst(adapter, &evtim, 1); 1291 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n", 1292 rte_strerror(rte_errno)); 1293 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ARMED, "Event timer " 1294 "in incorrect state"); 1295 1296 /* Test arm of armed timer fails */ 1297 ret = rte_event_timer_arm_burst(adapter, &evtim, 1); 1298 TEST_ASSERT_EQUAL(ret, 0, "expected return value from " 1299 "rte_event_timer_arm_burst: 0, got: %d", ret); 1300 TEST_ASSERT_EQUAL(rte_errno, EALREADY, "Unexpected rte_errno value " 1301 "after arming already armed timer"); 1302 1303 /* Let timer expire */ 1304 rte_delay_ms(1000); 1305 1306 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); 1307 TEST_ASSERT_EQUAL(n, 1, "Failed to dequeue expected number of expiry " 1308 "events from event device"); 1309 1310 rte_mempool_put(eventdev_test_mempool, evtim); 1311 1312 return TEST_SUCCESS; 1313 } 1314 1315 /* This test checks that repeated references to the same event timer in the 1316 * arm request work as expected; only the first one through should succeed. 1317 */ 1318 static int 1319 event_timer_arm_double(void) 1320 { 1321 uint16_t n; 1322 int ret; 1323 struct rte_event_timer_adapter *adapter = timdev; 1324 struct rte_event_timer *evtim = NULL; 1325 struct rte_event evs[BATCH_SIZE]; 1326 const struct rte_event_timer init_tim = { 1327 .ev.op = RTE_EVENT_OP_NEW, 1328 .ev.queue_id = TEST_QUEUE_ID, 1329 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1330 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1331 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1332 .state = RTE_EVENT_TIMER_NOT_ARMED, 1333 .timeout_ticks = CALC_TICKS(5), // expire in .5 sec 1334 }; 1335 1336 rte_mempool_get(eventdev_test_mempool, (void **)&evtim); 1337 if (evtim == NULL) { 1338 /* Failed to get an event timer object */ 1339 return TEST_FAILED; 1340 } 1341 1342 /* Set up a timer */ 1343 *evtim = init_tim; 1344 evtim->ev.event_ptr = evtim; 1345 1346 struct rte_event_timer *evtim_arr[] = {evtim, evtim}; 1347 ret = rte_event_timer_arm_burst(adapter, evtim_arr, RTE_DIM(evtim_arr)); 1348 TEST_ASSERT_EQUAL(ret, 1, "Unexpected return value from " 1349 "rte_event_timer_arm_burst"); 1350 TEST_ASSERT_EQUAL(rte_errno, EALREADY, "Unexpected rte_errno value " 1351 "after double-arm"); 1352 1353 /* Let timer expire */ 1354 rte_delay_ms(600); 1355 1356 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); 1357 TEST_ASSERT_EQUAL(n, 1, "Dequeued incorrect number of expiry events - " 1358 "expected: 1, actual: %d", n); 1359 1360 rte_mempool_put(eventdev_test_mempool, evtim); 1361 1362 return TEST_SUCCESS; 1363 } 1364 1365 /* Test the timer expiry event is generated at the expected time. */ 1366 static int 1367 event_timer_arm_expiry(void) 1368 { 1369 uint16_t n; 1370 int ret; 1371 struct rte_event_timer_adapter *adapter = timdev; 1372 struct rte_event_timer *evtim = NULL; 1373 struct rte_event_timer *evtim2 = NULL; 1374 struct rte_event evs[BATCH_SIZE]; 1375 const struct rte_event_timer init_tim = { 1376 .ev.op = RTE_EVENT_OP_NEW, 1377 .ev.queue_id = TEST_QUEUE_ID, 1378 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1379 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1380 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1381 .state = RTE_EVENT_TIMER_NOT_ARMED, 1382 }; 1383 1384 rte_mempool_get(eventdev_test_mempool, (void **)&evtim); 1385 if (evtim == NULL) { 1386 /* Failed to get an event timer object */ 1387 return TEST_FAILED; 1388 } 1389 1390 /* Set up an event timer */ 1391 *evtim = init_tim; 1392 evtim->timeout_ticks = CALC_TICKS(30), // expire in 3 secs 1393 evtim->ev.event_ptr = evtim; 1394 1395 ret = rte_event_timer_arm_burst(adapter, &evtim, 1); 1396 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s", 1397 rte_strerror(rte_errno)); 1398 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ARMED, "Event " 1399 "timer in incorrect state"); 1400 1401 rte_delay_ms(2999); 1402 1403 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); 1404 TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected timer expiry event"); 1405 1406 /* Delay 100 ms to account for the adapter tick window - should let us 1407 * dequeue one event 1408 */ 1409 rte_delay_ms(100); 1410 1411 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); 1412 TEST_ASSERT_EQUAL(n, 1, "Dequeued incorrect number (%d) of timer " 1413 "expiry events", n); 1414 TEST_ASSERT_EQUAL(evs[0].event_type, RTE_EVENT_TYPE_TIMER, 1415 "Dequeued unexpected type of event"); 1416 1417 /* Check that we recover the original event timer and then free it */ 1418 evtim2 = evs[0].event_ptr; 1419 TEST_ASSERT_EQUAL(evtim, evtim2, 1420 "Failed to recover pointer to original event timer"); 1421 rte_mempool_put(eventdev_test_mempool, evtim2); 1422 1423 return TEST_SUCCESS; 1424 } 1425 1426 /* Check that rearming a timer works as expected. */ 1427 static int 1428 event_timer_arm_rearm(void) 1429 { 1430 uint16_t n; 1431 int ret; 1432 struct rte_event_timer *evtim = NULL; 1433 struct rte_event_timer *evtim2 = NULL; 1434 struct rte_event evs[BATCH_SIZE]; 1435 const struct rte_event_timer init_tim = { 1436 .ev.op = RTE_EVENT_OP_NEW, 1437 .ev.queue_id = TEST_QUEUE_ID, 1438 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1439 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1440 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1441 .state = RTE_EVENT_TIMER_NOT_ARMED, 1442 }; 1443 1444 rte_mempool_get(eventdev_test_mempool, (void **)&evtim); 1445 if (evtim == NULL) { 1446 /* Failed to get an event timer object */ 1447 return TEST_FAILED; 1448 } 1449 1450 /* Set up a timer */ 1451 *evtim = init_tim; 1452 evtim->timeout_ticks = CALC_TICKS(1); // expire in 0.1 sec 1453 evtim->ev.event_ptr = evtim; 1454 1455 /* Arm it */ 1456 ret = rte_event_timer_arm_burst(timdev, &evtim, 1); 1457 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n", 1458 rte_strerror(rte_errno)); 1459 1460 /* Add 100ms to account for the adapter tick window */ 1461 rte_delay_ms(100 + 100); 1462 1463 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); 1464 TEST_ASSERT_EQUAL(n, 1, "Failed to dequeue expected number of expiry " 1465 "events from event device"); 1466 1467 /* Recover the timer through the event that was dequeued. */ 1468 evtim2 = evs[0].event_ptr; 1469 TEST_ASSERT_EQUAL(evtim, evtim2, 1470 "Failed to recover pointer to original event timer"); 1471 1472 /* Need to reset state in case implementation can't do it */ 1473 evtim2->state = RTE_EVENT_TIMER_NOT_ARMED; 1474 1475 /* Rearm it */ 1476 ret = rte_event_timer_arm_burst(timdev, &evtim2, 1); 1477 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n", 1478 rte_strerror(rte_errno)); 1479 1480 /* Add 100ms to account for the adapter tick window */ 1481 rte_delay_ms(100 + 100); 1482 1483 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); 1484 TEST_ASSERT_EQUAL(n, 1, "Failed to dequeue expected number of expiry " 1485 "events from event device"); 1486 1487 /* Free it */ 1488 evtim2 = evs[0].event_ptr; 1489 TEST_ASSERT_EQUAL(evtim, evtim2, 1490 "Failed to recover pointer to original event timer"); 1491 rte_mempool_put(eventdev_test_mempool, evtim2); 1492 1493 return TEST_SUCCESS; 1494 } 1495 1496 /* Check that the adapter handles the max specified number of timers as 1497 * expected. 1498 */ 1499 static int 1500 event_timer_arm_max(void) 1501 { 1502 int ret, i, n; 1503 int num_evtims = MAX_TIMERS; 1504 struct rte_event_timer *evtims[num_evtims]; 1505 struct rte_event evs[BATCH_SIZE]; 1506 const struct rte_event_timer init_tim = { 1507 .ev.op = RTE_EVENT_OP_NEW, 1508 .ev.queue_id = TEST_QUEUE_ID, 1509 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1510 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1511 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1512 .state = RTE_EVENT_TIMER_NOT_ARMED, 1513 .timeout_ticks = CALC_TICKS(5), // expire in .5 sec 1514 }; 1515 1516 ret = rte_mempool_get_bulk(eventdev_test_mempool, (void **)evtims, 1517 num_evtims); 1518 TEST_ASSERT_EQUAL(ret, 0, "Failed to get array of timer objs: ret = %d", 1519 ret); 1520 1521 for (i = 0; i < num_evtims; i++) { 1522 *evtims[i] = init_tim; 1523 evtims[i]->ev.event_ptr = evtims[i]; 1524 } 1525 1526 /* Test with the max value for the adapter */ 1527 ret = rte_event_timer_arm_burst(timdev, evtims, num_evtims); 1528 TEST_ASSERT_EQUAL(ret, num_evtims, 1529 "Failed to arm all event timers: attempted = %d, " 1530 "succeeded = %d, rte_errno = %s", 1531 num_evtims, ret, rte_strerror(rte_errno)); 1532 1533 rte_delay_ms(1000); 1534 1535 #define MAX_TRIES num_evtims 1536 int sum = 0; 1537 int tries = 0; 1538 bool done = false; 1539 while (!done) { 1540 sum += rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, 1541 RTE_DIM(evs), 10); 1542 if (sum >= num_evtims || ++tries >= MAX_TRIES) 1543 done = true; 1544 1545 rte_delay_ms(10); 1546 } 1547 1548 TEST_ASSERT_EQUAL(sum, num_evtims, "Expected %d timer expiry events, " 1549 "got %d", num_evtims, sum); 1550 1551 TEST_ASSERT(tries < MAX_TRIES, "Exceeded max tries"); 1552 1553 rte_delay_ms(100); 1554 1555 /* Make sure the eventdev is still empty */ 1556 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 1557 10); 1558 1559 TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected number of timer expiry " 1560 "events from event device"); 1561 1562 rte_mempool_put_bulk(eventdev_test_mempool, (void **)evtims, 1563 num_evtims); 1564 1565 return TEST_SUCCESS; 1566 } 1567 1568 /* Check that creating an event timer with incorrect event sched type fails. */ 1569 static int 1570 event_timer_arm_invalid_sched_type(void) 1571 { 1572 int ret; 1573 struct rte_event_timer *evtim = NULL; 1574 const struct rte_event_timer init_tim = { 1575 .ev.op = RTE_EVENT_OP_NEW, 1576 .ev.queue_id = TEST_QUEUE_ID, 1577 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1578 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1579 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1580 .state = RTE_EVENT_TIMER_NOT_ARMED, 1581 .timeout_ticks = CALC_TICKS(5), // expire in .5 sec 1582 }; 1583 1584 if (!using_services) 1585 return -ENOTSUP; 1586 1587 rte_mempool_get(eventdev_test_mempool, (void **)&evtim); 1588 if (evtim == NULL) { 1589 /* Failed to get an event timer object */ 1590 return TEST_FAILED; 1591 } 1592 1593 *evtim = init_tim; 1594 evtim->ev.event_ptr = evtim; 1595 evtim->ev.sched_type = RTE_SCHED_TYPE_PARALLEL; // bad sched type 1596 1597 ret = rte_event_timer_arm_burst(timdev, &evtim, 1); 1598 TEST_ASSERT_EQUAL(ret, 0, "Expected to fail timer arm with invalid " 1599 "sched type, but didn't"); 1600 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after" 1601 " arm fail with invalid queue"); 1602 1603 rte_mempool_put(eventdev_test_mempool, &evtim); 1604 1605 return TEST_SUCCESS; 1606 } 1607 1608 /* Check that creating an event timer with a timeout value that is too small or 1609 * too big fails. 1610 */ 1611 static int 1612 event_timer_arm_invalid_timeout(void) 1613 { 1614 int ret; 1615 struct rte_event_timer *evtim = NULL; 1616 const struct rte_event_timer init_tim = { 1617 .ev.op = RTE_EVENT_OP_NEW, 1618 .ev.queue_id = TEST_QUEUE_ID, 1619 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1620 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1621 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1622 .state = RTE_EVENT_TIMER_NOT_ARMED, 1623 .timeout_ticks = CALC_TICKS(5), // expire in .5 sec 1624 }; 1625 1626 rte_mempool_get(eventdev_test_mempool, (void **)&evtim); 1627 if (evtim == NULL) { 1628 /* Failed to get an event timer object */ 1629 return TEST_FAILED; 1630 } 1631 1632 *evtim = init_tim; 1633 evtim->ev.event_ptr = evtim; 1634 evtim->timeout_ticks = 0; // timeout too small 1635 1636 ret = rte_event_timer_arm_burst(timdev, &evtim, 1); 1637 TEST_ASSERT_EQUAL(ret, 0, "Expected to fail timer arm with invalid " 1638 "timeout, but didn't"); 1639 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after" 1640 " arm fail with invalid timeout"); 1641 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ERROR_TOOEARLY, 1642 "Unexpected event timer state"); 1643 1644 *evtim = init_tim; 1645 evtim->ev.event_ptr = evtim; 1646 evtim->timeout_ticks = CALC_TICKS(1801); // timeout too big 1647 1648 ret = rte_event_timer_arm_burst(timdev, &evtim, 1); 1649 TEST_ASSERT_EQUAL(ret, 0, "Expected to fail timer arm with invalid " 1650 "timeout, but didn't"); 1651 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after" 1652 " arm fail with invalid timeout"); 1653 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ERROR_TOOLATE, 1654 "Unexpected event timer state"); 1655 1656 rte_mempool_put(eventdev_test_mempool, evtim); 1657 1658 return TEST_SUCCESS; 1659 } 1660 1661 static int 1662 event_timer_cancel(void) 1663 { 1664 uint16_t n; 1665 int ret; 1666 struct rte_event_timer_adapter *adapter = timdev; 1667 struct rte_event_timer *evtim = NULL; 1668 struct rte_event evs[BATCH_SIZE]; 1669 const struct rte_event_timer init_tim = { 1670 .ev.op = RTE_EVENT_OP_NEW, 1671 .ev.queue_id = TEST_QUEUE_ID, 1672 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1673 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1674 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1675 .state = RTE_EVENT_TIMER_NOT_ARMED, 1676 }; 1677 1678 rte_mempool_get(eventdev_test_mempool, (void **)&evtim); 1679 if (evtim == NULL) { 1680 /* Failed to get an event timer object */ 1681 return TEST_FAILED; 1682 } 1683 1684 /* Check that cancelling an uninited timer fails */ 1685 ret = rte_event_timer_cancel_burst(adapter, &evtim, 1); 1686 TEST_ASSERT_EQUAL(ret, 0, "Succeeded unexpectedly in canceling " 1687 "uninited timer"); 1688 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after " 1689 "cancelling uninited timer"); 1690 1691 /* Set up a timer */ 1692 *evtim = init_tim; 1693 evtim->ev.event_ptr = evtim; 1694 evtim->timeout_ticks = CALC_TICKS(30); // expire in 3 sec 1695 1696 /* Check that cancelling an inited but unarmed timer fails */ 1697 ret = rte_event_timer_cancel_burst(adapter, &evtim, 1); 1698 TEST_ASSERT_EQUAL(ret, 0, "Succeeded unexpectedly in canceling " 1699 "unarmed timer"); 1700 TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after " 1701 "cancelling unarmed timer"); 1702 1703 ret = rte_event_timer_arm_burst(adapter, &evtim, 1); 1704 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n", 1705 rte_strerror(rte_errno)); 1706 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ARMED, 1707 "evtim in incorrect state"); 1708 1709 /* Delay 1 sec */ 1710 rte_delay_ms(1000); 1711 1712 ret = rte_event_timer_cancel_burst(adapter, &evtim, 1); 1713 TEST_ASSERT_EQUAL(ret, 1, "Failed to cancel event_timer: %s\n", 1714 rte_strerror(rte_errno)); 1715 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_CANCELED, 1716 "evtim in incorrect state"); 1717 1718 rte_delay_ms(3000); 1719 1720 /* Make sure that no expiry event was generated */ 1721 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); 1722 TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected timer expiry event\n"); 1723 1724 rte_mempool_put(eventdev_test_mempool, evtim); 1725 1726 return TEST_SUCCESS; 1727 } 1728 1729 static int 1730 event_timer_cancel_double(void) 1731 { 1732 uint16_t n; 1733 int ret; 1734 struct rte_event_timer_adapter *adapter = timdev; 1735 struct rte_event_timer *evtim = NULL; 1736 struct rte_event evs[BATCH_SIZE]; 1737 const struct rte_event_timer init_tim = { 1738 .ev.op = RTE_EVENT_OP_NEW, 1739 .ev.queue_id = TEST_QUEUE_ID, 1740 .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, 1741 .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1742 .ev.event_type = RTE_EVENT_TYPE_TIMER, 1743 .state = RTE_EVENT_TIMER_NOT_ARMED, 1744 .timeout_ticks = CALC_TICKS(5), // expire in .5 sec 1745 }; 1746 1747 rte_mempool_get(eventdev_test_mempool, (void **)&evtim); 1748 if (evtim == NULL) { 1749 /* Failed to get an event timer object */ 1750 return TEST_FAILED; 1751 } 1752 1753 /* Set up a timer */ 1754 *evtim = init_tim; 1755 evtim->ev.event_ptr = evtim; 1756 evtim->timeout_ticks = CALC_TICKS(30); // expire in 3 sec 1757 1758 ret = rte_event_timer_arm_burst(adapter, &evtim, 1); 1759 TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n", 1760 rte_strerror(rte_errno)); 1761 TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ARMED, 1762 "timer in unexpected state"); 1763 1764 /* Now, test that referencing the same timer twice in the same call 1765 * fails 1766 */ 1767 struct rte_event_timer *evtim_arr[] = {evtim, evtim}; 1768 ret = rte_event_timer_cancel_burst(adapter, evtim_arr, 1769 RTE_DIM(evtim_arr)); 1770 1771 /* Two requests to cancel same timer, only one should succeed */ 1772 TEST_ASSERT_EQUAL(ret, 1, "Succeeded unexpectedly in canceling timer " 1773 "twice"); 1774 1775 TEST_ASSERT_EQUAL(rte_errno, EALREADY, "Unexpected rte_errno value " 1776 "after double-cancel: rte_errno = %d", rte_errno); 1777 1778 rte_delay_ms(3000); 1779 1780 /* Still make sure that no expiry event was generated */ 1781 n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); 1782 TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected timer expiry event\n"); 1783 1784 rte_mempool_put(eventdev_test_mempool, evtim); 1785 1786 return TEST_SUCCESS; 1787 } 1788 1789 /* Check that event timer adapter tick resolution works as expected by testing 1790 * the number of adapter ticks that occur within a particular time interval. 1791 */ 1792 static int 1793 adapter_tick_resolution(void) 1794 { 1795 struct rte_event_timer_adapter_stats stats; 1796 uint64_t adapter_tick_count; 1797 1798 /* Only run this test in the software driver case */ 1799 if (!using_services) 1800 return -ENOTSUP; 1801 1802 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stats_reset(timdev), 1803 "Failed to reset stats"); 1804 1805 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stats_get(timdev, 1806 &stats), "Failed to get adapter stats"); 1807 TEST_ASSERT_EQUAL(stats.adapter_tick_count, 0, "Adapter tick count " 1808 "not zeroed out"); 1809 1810 /* Delay 1 second; should let at least 10 ticks occur with the default 1811 * adapter configuration used by this test. 1812 */ 1813 rte_delay_ms(1000); 1814 1815 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stats_get(timdev, 1816 &stats), "Failed to get adapter stats"); 1817 1818 adapter_tick_count = stats.adapter_tick_count; 1819 TEST_ASSERT(adapter_tick_count >= 10 && adapter_tick_count <= 12, 1820 "Expected 10-12 adapter ticks, got %"PRIu64"\n", 1821 adapter_tick_count); 1822 1823 return TEST_SUCCESS; 1824 } 1825 1826 static int 1827 adapter_create_max(void) 1828 { 1829 int i; 1830 uint32_t svc_start_count, svc_end_count; 1831 struct rte_event_timer_adapter *adapters[ 1832 RTE_EVENT_TIMER_ADAPTER_NUM_MAX + 1]; 1833 1834 struct rte_event_timer_adapter_conf conf = { 1835 .event_dev_id = evdev, 1836 // timer_adapter_id set in loop 1837 .clk_src = RTE_EVENT_TIMER_ADAPTER_CPU_CLK, 1838 .timer_tick_ns = NSECPERSEC / 10, 1839 .max_tmo_ns = 180 * NSECPERSEC, 1840 .nb_timers = MAX_TIMERS, 1841 .flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES, 1842 }; 1843 1844 if (!using_services) 1845 return -ENOTSUP; 1846 1847 svc_start_count = rte_service_get_count(); 1848 1849 /* This test expects that there are sufficient service IDs available 1850 * to be allocated. I.e., RTE_EVENT_TIMER_ADAPTER_NUM_MAX may need to 1851 * be less than RTE_SERVICE_NUM_MAX if anything else uses a service 1852 * (the SW event device, for example). 1853 */ 1854 for (i = 0; i < RTE_EVENT_TIMER_ADAPTER_NUM_MAX; i++) { 1855 conf.timer_adapter_id = i; 1856 adapters[i] = rte_event_timer_adapter_create_ext(&conf, 1857 test_port_conf_cb, NULL); 1858 TEST_ASSERT_NOT_NULL(adapters[i], "Failed to create adapter " 1859 "%d", i); 1860 } 1861 1862 conf.timer_adapter_id = i; 1863 adapters[i] = rte_event_timer_adapter_create(&conf); 1864 TEST_ASSERT_NULL(adapters[i], "Created too many adapters"); 1865 1866 /* Check that at least RTE_EVENT_TIMER_ADAPTER_NUM_MAX services 1867 * have been created 1868 */ 1869 svc_end_count = rte_service_get_count(); 1870 TEST_ASSERT_EQUAL(svc_end_count - svc_start_count, 1871 RTE_EVENT_TIMER_ADAPTER_NUM_MAX, 1872 "Failed to create expected number of services"); 1873 1874 for (i = 0; i < RTE_EVENT_TIMER_ADAPTER_NUM_MAX; i++) 1875 TEST_ASSERT_SUCCESS(rte_event_timer_adapter_free(adapters[i]), 1876 "Failed to free adapter %d", i); 1877 1878 /* Check that service count is back to where it was at start */ 1879 svc_end_count = rte_service_get_count(); 1880 TEST_ASSERT_EQUAL(svc_start_count, svc_end_count, "Failed to release " 1881 "correct number of services"); 1882 1883 return TEST_SUCCESS; 1884 } 1885 1886 static struct unit_test_suite event_timer_adptr_functional_testsuite = { 1887 .suite_name = "event timer functional test suite", 1888 .setup = testsuite_setup, 1889 .teardown = testsuite_teardown, 1890 .unit_test_cases = { 1891 TEST_CASE_ST(timdev_setup_usec, timdev_teardown, 1892 test_timer_state), 1893 TEST_CASE_ST(timdev_setup_usec, timdev_teardown, 1894 test_timer_arm), 1895 TEST_CASE_ST(timdev_setup_msec_periodic, timdev_teardown, 1896 test_timer_arm_periodic), 1897 TEST_CASE_ST(timdev_setup_usec, timdev_teardown, 1898 test_timer_arm_burst), 1899 TEST_CASE_ST(timdev_setup_msec_periodic, timdev_teardown, 1900 test_timer_arm_burst_periodic), 1901 TEST_CASE_ST(timdev_setup_sec, timdev_teardown, 1902 test_timer_cancel), 1903 TEST_CASE_ST(timdev_setup_sec_periodic, timdev_teardown, 1904 test_timer_cancel_periodic), 1905 TEST_CASE_ST(timdev_setup_sec, timdev_teardown, 1906 test_timer_cancel_random), 1907 TEST_CASE_ST(timdev_setup_usec_multicore, timdev_teardown, 1908 test_timer_arm_multicore), 1909 TEST_CASE_ST(timdev_setup_usec_multicore, timdev_teardown, 1910 test_timer_arm_burst_multicore), 1911 TEST_CASE_ST(timdev_setup_sec_multicore, timdev_teardown, 1912 test_timer_cancel_multicore), 1913 TEST_CASE_ST(timdev_setup_sec_multicore, timdev_teardown, 1914 test_timer_cancel_burst_multicore), 1915 TEST_CASE(adapter_create), 1916 TEST_CASE_ST(timdev_setup_msec, NULL, adapter_free), 1917 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1918 adapter_get_info), 1919 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1920 adapter_lookup), 1921 TEST_CASE_ST(NULL, timdev_teardown, 1922 adapter_start), 1923 TEST_CASE_ST(timdev_setup_msec, NULL, 1924 adapter_stop), 1925 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1926 stat_inc_reset_ev_enq), 1927 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1928 event_timer_arm), 1929 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1930 event_timer_arm_double), 1931 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1932 event_timer_arm_expiry), 1933 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1934 event_timer_arm_rearm), 1935 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1936 event_timer_arm_max), 1937 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1938 event_timer_arm_invalid_sched_type), 1939 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1940 event_timer_arm_invalid_timeout), 1941 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1942 event_timer_cancel), 1943 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1944 event_timer_cancel_double), 1945 TEST_CASE_ST(timdev_setup_msec, timdev_teardown, 1946 adapter_tick_resolution), 1947 TEST_CASE(adapter_create_max), 1948 TEST_CASES_END() /**< NULL terminate unit test array */ 1949 } 1950 }; 1951 1952 static int 1953 test_event_timer_adapter_func(void) 1954 { 1955 return unit_test_suite_runner(&event_timer_adptr_functional_testsuite); 1956 } 1957 1958 REGISTER_TEST_COMMAND(event_timer_adapter_test, test_event_timer_adapter_func); 1959