1 /* 2 * SPDX-License-Identifier: BSD-3-Clause 3 * Copyright 2015 Intel Corporation. 4 * Copyright 2012 Hasan Alayli <[email protected]> 5 */ 6 /** 7 * @file lthread_api.h 8 * 9 * @warning 10 * @b EXPERIMENTAL: this API may change without prior notice 11 * 12 * This file contains the public API for the L-thread subsystem 13 * 14 * The L_thread subsystem provides a simple cooperative scheduler to 15 * enable arbitrary functions to run as cooperative threads within a 16 * single P-thread. 17 * 18 * The subsystem provides a P-thread like API that is intended to assist in 19 * reuse of legacy code written for POSIX p_threads. 20 * 21 * The L-thread subsystem relies on cooperative multitasking, as such 22 * an L-thread must possess frequent rescheduling points. Often these 23 * rescheduling points are provided transparently when the application 24 * invokes an L-thread API. 25 * 26 * In some applications it is possible that the program may enter a loop the 27 * exit condition for which depends on the action of another thread or a 28 * response from hardware. In such a case it is necessary to yield the thread 29 * periodically in the loop body, to allow other threads an opportunity to 30 * run. This can be done by inserting a call to lthread_yield() or 31 * lthread_sleep(n) in the body of the loop. 32 * 33 * If the application makes expensive / blocking system calls or does other 34 * work that would take an inordinate amount of time to complete, this will 35 * stall the cooperative scheduler resulting in very poor performance. 36 * 37 * In such cases an L-thread can be migrated temporarily to another scheduler 38 * running in a different P-thread on another core. When the expensive or 39 * blocking operation is completed it can be migrated back to the original 40 * scheduler. In this way other threads can continue to run on the original 41 * scheduler and will be completely unaffected by the blocking behaviour. 42 * To migrate an L-thread to another scheduler the API lthread_set_affinity() 43 * is provided. 44 * 45 * If L-threads that share data are running on the same core it is possible 46 * to design programs where mutual exclusion mechanisms to protect shared data 47 * can be avoided. This is due to the fact that the cooperative threads cannot 48 * preempt each other. 49 * 50 * There are two cases where mutual exclusion mechanisms are necessary. 51 * 52 * a) Where the L-threads sharing data are running on different cores. 53 * b) Where code must yield while updating data shared with another thread. 54 * 55 * The L-thread subsystem provides a set of mutex APIs to help with such 56 * scenarios, however excessive reliance on on these will impact performance 57 * and is best avoided if possible. 58 * 59 * L-threads can synchronise using a fast condition variable implementation 60 * that supports signal and broadcast. An L-thread running on any core can 61 * wait on a condition. 62 * 63 * L-threads can have L-thread local storage with an API modelled on either the 64 * P-thread get/set specific API or using PER_LTHREAD macros modelled on the 65 * RTE_PER_LCORE macros. Alternatively a simple user data pointer may be set 66 * and retrieved from a thread. 67 */ 68 #ifndef LTHREAD_H 69 #define LTHREAD_H 70 71 #ifdef __cplusplus 72 extern "C" { 73 #endif 74 75 #include <stdint.h> 76 #include <sys/socket.h> 77 #include <fcntl.h> 78 #include <netinet/in.h> 79 80 #include <rte_cycles.h> 81 82 83 struct lthread; 84 struct lthread_cond; 85 struct lthread_mutex; 86 87 struct lthread_condattr; 88 struct lthread_mutexattr; 89 90 typedef void *(*lthread_func_t) (void *); 91 92 /* 93 * Define the size of stack for an lthread 94 * Then this is the size that will be allocated on lthread creation 95 * This is a fixed size and will not grow. 96 */ 97 #define LTHREAD_MAX_STACK_SIZE (1024*64) 98 99 /** 100 * Define the maximum number of TLS keys that can be created 101 * 102 */ 103 #define LTHREAD_MAX_KEYS 1024 104 105 /** 106 * Define the maximum number of attempts to destroy an lthread's 107 * TLS data on thread exit 108 */ 109 #define LTHREAD_DESTRUCTOR_ITERATIONS 4 110 111 112 /** 113 * Define the maximum number of lcores that will support lthreads 114 */ 115 #define LTHREAD_MAX_LCORES RTE_MAX_LCORE 116 117 /** 118 * How many lthread objects to pre-allocate as the system grows 119 * applies to lthreads + stacks, TLS, mutexs, cond vars. 120 * 121 * @see _lthread_alloc() 122 * @see _cond_alloc() 123 * @see _mutex_alloc() 124 * 125 */ 126 #define LTHREAD_PREALLOC 100 127 128 /** 129 * Set the number of schedulers in the system. 130 * 131 * This function may optionally be called before starting schedulers. 132 * 133 * If the number of schedulers is not set, or set to 0 then each scheduler 134 * will begin scheduling lthreads immediately it is started. 135 136 * If the number of schedulers is set to greater than 0, then each scheduler 137 * will wait until all schedulers have started before beginning to schedule 138 * lthreads. 139 * 140 * If an application wishes to have threads migrate between cores using 141 * lthread_set_affinity(), or join threads running on other cores using 142 * lthread_join(), then it is prudent to set the number of schedulers to ensure 143 * that all schedulers are initialised beforehand. 144 * 145 * @param num 146 * the number of schedulers in the system 147 * @return 148 * the number of schedulers in the system 149 */ 150 int lthread_num_schedulers_set(int num); 151 152 /** 153 * Return the number of schedulers currently running 154 * @return 155 * the number of schedulers in the system 156 */ 157 int lthread_active_schedulers(void); 158 159 /** 160 * Shutdown the specified scheduler 161 * 162 * This function tells the specified scheduler to 163 * exit if/when there is no more work to do. 164 * 165 * Note that although the scheduler will stop 166 * resources are not freed. 167 * 168 * @param lcore 169 * The lcore of the scheduler to shutdown 170 * 171 * @return 172 * none 173 */ 174 void lthread_scheduler_shutdown(unsigned lcore); 175 176 /** 177 * Shutdown all schedulers 178 * 179 * This function tells all schedulers including the current scheduler to 180 * exit if/when there is no more work to do. 181 * 182 * Note that although the schedulers will stop 183 * resources are not freed. 184 * 185 * @return 186 * none 187 */ 188 void lthread_scheduler_shutdown_all(void); 189 190 /** 191 * Run the lthread scheduler 192 * 193 * Runs the lthread scheduler. 194 * This function returns only if/when all lthreads have exited. 195 * This function must be the main loop of an EAL thread. 196 * 197 * @return 198 * none 199 */ 200 201 void lthread_run(void); 202 203 /** 204 * Create an lthread 205 * 206 * Creates an lthread and places it in the ready queue on a particular 207 * lcore. 208 * 209 * If no scheduler exists yet on the current lcore then one is created. 210 * 211 * @param new_lt 212 * Pointer to an lthread pointer that will be initialized 213 * @param lcore 214 * the lcore the thread should be started on or the current lcore 215 * -1 the current lcore 216 * 0 - LTHREAD_MAX_LCORES any other lcore 217 * @param lthread_func 218 * Pointer to the function the for the thread to run 219 * @param arg 220 * Pointer to args that will be passed to the thread 221 * 222 * @return 223 * 0 success 224 * EAGAIN no resources available 225 * EINVAL NULL thread or function pointer, or lcore_id out of range 226 */ 227 int 228 lthread_create(struct lthread **new_lt, 229 int lcore, lthread_func_t func, void *arg); 230 231 /** 232 * Cancel an lthread 233 * 234 * Cancels an lthread and causes it to be terminated 235 * If the lthread is detached it will be freed immediately 236 * otherwise its resources will not be released until it is joined. 237 * 238 * @param new_lt 239 * Pointer to an lthread that will be cancelled 240 * 241 * @return 242 * 0 success 243 * EINVAL thread was NULL 244 */ 245 int lthread_cancel(struct lthread *lt); 246 247 /** 248 * Join an lthread 249 * 250 * Joins the current thread with the specified lthread, and waits for that 251 * thread to exit. 252 * Passes an optional pointer to collect returned data. 253 * 254 * @param lt 255 * Pointer to the lthread to be joined 256 * @param ptr 257 * Pointer to pointer to collect returned data 258 * 259 0 * @return 260 * 0 success 261 * EINVAL lthread could not be joined. 262 */ 263 int lthread_join(struct lthread *lt, void **ptr); 264 265 /** 266 * Detach an lthread 267 * 268 * Detaches the current thread 269 * On exit a detached lthread will be freed immediately and will not wait 270 * to be joined. The default state for a thread is not detached. 271 * 272 * @return 273 * none 274 */ 275 void lthread_detach(void); 276 277 /** 278 * Exit an lthread 279 * 280 * Terminate the current thread, optionally return data. 281 * The data may be collected by lthread_join() 282 * 283 * After calling this function the lthread will be suspended until it is 284 * joined. After it is joined then its resources will be freed. 285 * 286 * @param ptr 287 * Pointer to pointer to data to be returned 288 * 289 * @return 290 * none 291 */ 292 void lthread_exit(void *val); 293 294 /** 295 * Cause the current lthread to sleep for n nanoseconds 296 * 297 * The current thread will be suspended until the specified time has elapsed 298 * or has been exceeded. 299 * 300 * Execution will switch to the next lthread that is ready to run 301 * 302 * @param nsecs 303 * Number of nanoseconds to sleep 304 * 305 * @return 306 * none 307 */ 308 void lthread_sleep(uint64_t nsecs); 309 310 /** 311 * Cause the current lthread to sleep for n cpu clock ticks 312 * 313 * The current thread will be suspended until the specified time has elapsed 314 * or has been exceeded. 315 * 316 * Execution will switch to the next lthread that is ready to run 317 * 318 * @param clks 319 * Number of clock ticks to sleep 320 * 321 * @return 322 * none 323 */ 324 void lthread_sleep_clks(uint64_t clks); 325 326 /** 327 * Yield the current lthread 328 * 329 * The current thread will yield and execution will switch to the 330 * next lthread that is ready to run 331 * 332 * @return 333 * none 334 */ 335 void lthread_yield(void); 336 337 /** 338 * Migrate the current thread to another scheduler 339 * 340 * This function migrates the current thread to another scheduler. 341 * Execution will switch to the next lthread that is ready to run on the 342 * current scheduler. The current thread will be resumed on the new scheduler. 343 * 344 * @param lcore 345 * The lcore to migrate to 346 * 347 * @return 348 * 0 success we are now running on the specified core 349 * EINVAL the destination lcore was not valid 350 */ 351 int lthread_set_affinity(unsigned lcore); 352 353 /** 354 * Return the current lthread 355 * 356 * Returns the current lthread 357 * 358 * @return 359 * pointer to the current lthread 360 */ 361 struct lthread 362 *lthread_current(void); 363 364 /** 365 * Associate user data with an lthread 366 * 367 * This function sets a user data pointer in the current lthread 368 * The pointer can be retrieved with lthread_get_data() 369 * It is the users responsibility to allocate and free any data referenced 370 * by the user pointer. 371 * 372 * @param data 373 * pointer to user data 374 * 375 * @return 376 * none 377 */ 378 void lthread_set_data(void *data); 379 380 /** 381 * Get user data for the current lthread 382 * 383 * This function returns a user data pointer for the current lthread 384 * The pointer must first be set with lthread_set_data() 385 * It is the users responsibility to allocate and free any data referenced 386 * by the user pointer. 387 * 388 * @return 389 * pointer to user data 390 */ 391 void 392 *lthread_get_data(void); 393 394 struct lthread_key; 395 typedef void (*tls_destructor_func) (void *); 396 397 /** 398 * Create a key for lthread TLS 399 * 400 * This function is modelled on pthread_key_create 401 * It creates a thread-specific data key visible to all lthreads on the 402 * current scheduler. 403 * 404 * Key values may be used to locate thread-specific data. 405 * The same key value may be used by different threads, the values bound 406 * to the key by lthread_setspecific() are maintained on a per-thread 407 * basis and persist for the life of the calling thread. 408 * 409 * An optional destructor function may be associated with each key value. 410 * At thread exit, if a key value has a non-NULL destructor pointer, and the 411 * thread has a non-NULL value associated with the key, the function pointed 412 * to is called with the current associated value as its sole argument. 413 * 414 * @param key 415 * Pointer to the key to be created 416 * @param destructor 417 * Pointer to destructor function 418 * 419 * @return 420 * 0 success 421 * EINVAL the key ptr was NULL 422 * EAGAIN no resources available 423 */ 424 int lthread_key_create(unsigned int *key, tls_destructor_func destructor); 425 426 /** 427 * Delete key for lthread TLS 428 * 429 * This function is modelled on pthread_key_delete(). 430 * It deletes a thread-specific data key previously returned by 431 * lthread_key_create(). 432 * The thread-specific data values associated with the key need not be NULL 433 * at the time that lthread_key_delete is called. 434 * It is the responsibility of the application to free any application 435 * storage or perform any cleanup actions for data structures related to the 436 * deleted key. This cleanup can be done either before or after 437 * lthread_key_delete is called. 438 * 439 * @param key 440 * The key to be deleted 441 * 442 * @return 443 * 0 Success 444 * EINVAL the key was invalid 445 */ 446 int lthread_key_delete(unsigned int key); 447 448 /** 449 * Get lthread TLS 450 * 451 * This function is modelled on pthread_get_specific(). 452 * It returns the value currently bound to the specified key on behalf of the 453 * calling thread. Calling lthread_getspecific() with a key value not 454 * obtained from lthread_key_create() or after key has been deleted with 455 * lthread_key_delete() will result in undefined behaviour. 456 * lthread_getspecific() may be called from a thread-specific data destructor 457 * function. 458 * 459 * @param key 460 * The key for which data is requested 461 * 462 * @return 463 * Pointer to the thread specific data associated with that key 464 * or NULL if no data has been set. 465 */ 466 void 467 *lthread_getspecific(unsigned int key); 468 469 /** 470 * Set lthread TLS 471 * 472 * This function is modelled on pthread_set_specific() 473 * It associates a thread-specific value with a key obtained via a previous 474 * call to lthread_key_create(). 475 * Different threads may bind different values to the same key. These values 476 * are typically pointers to dynamically allocated memory that have been 477 * reserved by the calling thread. Calling lthread_setspecific with a key 478 * value not obtained from lthread_key_create or after the key has been 479 * deleted with lthread_key_delete will result in undefined behaviour. 480 * 481 * @param key 482 * The key for which data is to be set 483 * @param key 484 * Pointer to the user data 485 * 486 * @return 487 * 0 success 488 * EINVAL the key was invalid 489 */ 490 491 int lthread_setspecific(unsigned int key, const void *value); 492 493 /** 494 * The macros below provide an alternative mechanism to access lthread local 495 * storage. 496 * 497 * The macros can be used to declare define and access per lthread local 498 * storage in a similar way to the RTE_PER_LCORE macros which control storage 499 * local to an lcore. 500 * 501 * Memory for per lthread variables declared in this way is allocated when the 502 * lthread is created and a pointer to this memory is stored in the lthread. 503 * The per lthread variables are accessed via the pointer + the offset of the 504 * particular variable. 505 * 506 * The total size of per lthread storage, and the variable offsets are found by 507 * defining the variables in a unique global memory section, the start and end 508 * of which is known. This global memory section is used only in the 509 * computation of the addresses of the lthread variables, and is never actually 510 * used to store any data. 511 * 512 * Due to the fact that variables declared this way may be scattered across 513 * many files, the start and end of the section and variable offsets are only 514 * known after linking, thus the computation of section size and variable 515 * addresses is performed at run time. 516 * 517 * These macros are primarily provided to aid porting of code that makes use 518 * of the existing RTE_PER_LCORE macros. In principle it would be more efficient 519 * to gather all lthread local variables into a single structure and 520 * set/retrieve a pointer to that struct using the alternative 521 * lthread_data_set/get APIs. 522 * 523 * These macros are mutually exclusive with the lthread_data_set/get APIs. 524 * If you define storage using these macros then the lthread_data_set/get APIs 525 * will not perform as expected, the lthread_data_set API does nothing, and the 526 * lthread_data_get API returns the start of global section. 527 * 528 */ 529 /* start and end of per lthread section */ 530 extern char __start_per_lt; 531 extern char __stop_per_lt; 532 533 534 #define RTE_DEFINE_PER_LTHREAD(type, name) \ 535 __typeof__(type)__attribute((section("per_lt"))) per_lt_##name 536 537 /** 538 * Macro to declare an extern per lthread variable "var" of type "type" 539 */ 540 #define RTE_DECLARE_PER_LTHREAD(type, name) \ 541 extern __typeof__(type)__attribute((section("per_lt"))) per_lt_##name 542 543 /** 544 * Read/write the per-lcore variable value 545 */ 546 #define RTE_PER_LTHREAD(name) ((typeof(per_lt_##name) *)\ 547 ((char *)lthread_get_data() +\ 548 ((char *) &per_lt_##name - &__start_per_lt))) 549 550 /** 551 * Initialize a mutex 552 * 553 * This function provides a mutual exclusion device, the need for which 554 * can normally be avoided in a cooperative multitasking environment. 555 * It is provided to aid porting of legacy code originally written for 556 * preemptive multitasking environments such as pthreads. 557 * 558 * A mutex may be unlocked (not owned by any thread), or locked (owned by 559 * one thread). 560 * 561 * A mutex can never be owned by more than one thread simultaneously. 562 * A thread attempting to lock a mutex that is already locked by another 563 * thread is suspended until the owning thread unlocks the mutex. 564 * 565 * lthread_mutex_init() initializes the mutex object pointed to by mutex 566 * Optional mutex attributes specified in mutexattr, are reserved for future 567 * use and are currently ignored. 568 * 569 * If a thread calls lthread_mutex_lock() on the mutex, then if the mutex 570 * is currently unlocked, it becomes locked and owned by the calling 571 * thread, and lthread_mutex_lock returns immediately. If the mutex is 572 * already locked by another thread, lthread_mutex_lock suspends the calling 573 * thread until the mutex is unlocked. 574 * 575 * lthread_mutex_trylock behaves identically to rte_thread_mutex_lock, except 576 * that it does not block the calling thread if the mutex is already locked 577 * by another thread. 578 * 579 * lthread_mutex_unlock() unlocks the specified mutex. The mutex is assumed 580 * to be locked and owned by the calling thread. 581 * 582 * lthread_mutex_destroy() destroys a mutex object, freeing its resources. 583 * The mutex must be unlocked with nothing blocked on it before calling 584 * lthread_mutex_destroy. 585 * 586 * @param name 587 * Optional pointer to string describing the mutex 588 * @param mutex 589 * Pointer to pointer to the mutex to be initialized 590 * @param attribute 591 * Pointer to attribute - unused reserved 592 * 593 * @return 594 * 0 success 595 * EINVAL mutex was not a valid pointer 596 * EAGAIN insufficient resources 597 */ 598 599 int 600 lthread_mutex_init(char *name, struct lthread_mutex **mutex, 601 const struct lthread_mutexattr *attr); 602 603 /** 604 * Destroy a mutex 605 * 606 * This function destroys the specified mutex freeing its resources. 607 * The mutex must be unlocked before calling lthread_mutex_destroy. 608 * 609 * @see lthread_mutex_init() 610 * 611 * @param mutex 612 * Pointer to pointer to the mutex to be initialized 613 * 614 * @return 615 * 0 success 616 * EINVAL mutex was not an initialized mutex 617 * EBUSY mutex was still in use 618 */ 619 int lthread_mutex_destroy(struct lthread_mutex *mutex); 620 621 /** 622 * Lock a mutex 623 * 624 * This function attempts to lock a mutex. 625 * If a thread calls lthread_mutex_lock() on the mutex, then if the mutex 626 * is currently unlocked, it becomes locked and owned by the calling 627 * thread, and lthread_mutex_lock returns immediately. If the mutex is 628 * already locked by another thread, lthread_mutex_lock suspends the calling 629 * thread until the mutex is unlocked. 630 * 631 * @see lthread_mutex_init() 632 * 633 * @param mutex 634 * Pointer to pointer to the mutex to be initialized 635 * 636 * @return 637 * 0 success 638 * EINVAL mutex was not an initialized mutex 639 * EDEADLOCK the mutex was already owned by the calling thread 640 */ 641 642 int lthread_mutex_lock(struct lthread_mutex *mutex); 643 644 /** 645 * Try to lock a mutex 646 * 647 * This function attempts to lock a mutex. 648 * lthread_mutex_trylock behaves identically to rte_thread_mutex_lock, except 649 * that it does not block the calling thread if the mutex is already locked 650 * by another thread. 651 * 652 * 653 * @see lthread_mutex_init() 654 * 655 * @param mutex 656 * Pointer to pointer to the mutex to be initialized 657 * 658 * @return 659 * 0 success 660 * EINVAL mutex was not an initialized mutex 661 * EBUSY the mutex was already locked by another thread 662 */ 663 int lthread_mutex_trylock(struct lthread_mutex *mutex); 664 665 /** 666 * Unlock a mutex 667 * 668 * This function attempts to unlock the specified mutex. The mutex is assumed 669 * to be locked and owned by the calling thread. 670 * 671 * The oldest of any threads blocked on the mutex is made ready and may 672 * compete with any other running thread to gain the mutex, it fails it will 673 * be blocked again. 674 * 675 * @param mutex 676 * Pointer to pointer to the mutex to be initialized 677 * 678 * @return 679 * 0 mutex was unlocked 680 * EINVAL mutex was not an initialized mutex 681 * EPERM the mutex was not owned by the calling thread 682 */ 683 684 int lthread_mutex_unlock(struct lthread_mutex *mutex); 685 686 /** 687 * Initialize a condition variable 688 * 689 * This function initializes a condition variable. 690 * 691 * Condition variables can be used to communicate changes in the state of data 692 * shared between threads. 693 * 694 * @see lthread_cond_wait() 695 * 696 * @param name 697 * Pointer to optional string describing the condition variable 698 * @param c 699 * Pointer to pointer to the condition variable to be initialized 700 * @param attr 701 * Pointer to optional attribute reserved for future use, currently ignored 702 * 703 * @return 704 * 0 success 705 * EINVAL cond was not a valid pointer 706 * EAGAIN insufficient resources 707 */ 708 int 709 lthread_cond_init(char *name, struct lthread_cond **c, 710 const struct lthread_condattr *attr); 711 712 /** 713 * Destroy a condition variable 714 * 715 * This function destroys a condition variable that was created with 716 * lthread_cond_init() and releases its resources. 717 * 718 * @param cond 719 * Pointer to pointer to the condition variable to be destroyed 720 * 721 * @return 722 * 0 Success 723 * EBUSY condition variable was still in use 724 * EINVAL was not an initialised condition variable 725 */ 726 int lthread_cond_destroy(struct lthread_cond *cond); 727 728 /** 729 * Wait on a condition variable 730 * 731 * The function blocks the current thread waiting on the condition variable 732 * specified by cond. The waiting thread unblocks only after another thread 733 * calls lthread_cond_signal, or lthread_cond_broadcast, specifying the 734 * same condition variable. 735 * 736 * @param cond 737 * Pointer to pointer to the condition variable to be waited on 738 * 739 * @param reserved 740 * reserved for future use 741 * 742 * @return 743 * 0 The condition was signalled ( Success ) 744 * EINVAL was not a an initialised condition variable 745 */ 746 int lthread_cond_wait(struct lthread_cond *c, uint64_t reserved); 747 748 /** 749 * Signal a condition variable 750 * 751 * The function unblocks one thread waiting for the condition variable cond. 752 * If no threads are waiting on cond, the rte_lthread_cond_signal() function 753 * has no effect. 754 * 755 * @param cond 756 * Pointer to pointer to the condition variable to be signalled 757 * 758 * @return 759 * 0 The condition was signalled ( Success ) 760 * EINVAL was not a an initialised condition variable 761 */ 762 int lthread_cond_signal(struct lthread_cond *c); 763 764 /** 765 * Broadcast a condition variable 766 * 767 * The function unblocks all threads waiting for the condition variable cond. 768 * If no threads are waiting on cond, the rte_lathed_cond_broadcast() 769 * function has no effect. 770 * 771 * @param cond 772 * Pointer to pointer to the condition variable to be signalled 773 * 774 * @return 775 * 0 The condition was signalled ( Success ) 776 * EINVAL was not a an initialised condition variable 777 */ 778 int lthread_cond_broadcast(struct lthread_cond *c); 779 780 #ifdef __cplusplus 781 } 782 #endif 783 784 #endif /* LTHREAD_H */ 785