15af785ecSfengbojiang(姜凤波) /*
2d30ea906Sjfb8856606  * SPDX-License-Identifier: BSD-3-Clause
3d30ea906Sjfb8856606  * Copyright 2015 Intel Corporation.
4d30ea906Sjfb8856606  * Copyright 2012 Hasan Alayli <[email protected]>
55af785ecSfengbojiang(姜凤波)  */
6a9643ea8Slogwang /**
7a9643ea8Slogwang  *  @file lthread_api.h
8a9643ea8Slogwang  *
9a9643ea8Slogwang  *  @warning
10a9643ea8Slogwang  *  @b EXPERIMENTAL: this API may change without prior notice
11a9643ea8Slogwang  *
12a9643ea8Slogwang  *  This file contains the public API for the L-thread subsystem
13a9643ea8Slogwang  *
14a9643ea8Slogwang  *  The L_thread subsystem provides a simple cooperative scheduler to
15a9643ea8Slogwang  *  enable arbitrary functions to run as cooperative threads within a
16a9643ea8Slogwang  * single P-thread.
17a9643ea8Slogwang  *
18a9643ea8Slogwang  * The subsystem provides a P-thread like API that is intended to assist in
19a9643ea8Slogwang  * reuse of legacy code written for POSIX p_threads.
20a9643ea8Slogwang  *
21a9643ea8Slogwang  * The L-thread subsystem relies on cooperative multitasking, as such
22a9643ea8Slogwang  * an L-thread must possess frequent rescheduling points. Often these
23a9643ea8Slogwang  * rescheduling points are provided transparently when the application
24a9643ea8Slogwang  * invokes an L-thread API.
25a9643ea8Slogwang  *
26a9643ea8Slogwang  * In some applications it is possible that the program may enter a loop the
27a9643ea8Slogwang  * exit condition for which depends on the action of another thread or a
28a9643ea8Slogwang  * response from hardware. In such a case it is necessary to yield the thread
29a9643ea8Slogwang  * periodically in the loop body, to allow other threads an opportunity to
30a9643ea8Slogwang  * run. This can be done by inserting a call to lthread_yield() or
31a9643ea8Slogwang  * lthread_sleep(n) in the body of the loop.
32a9643ea8Slogwang  *
33a9643ea8Slogwang  * If the application makes expensive / blocking system calls or does other
34a9643ea8Slogwang  * work that would take an inordinate amount of time to complete, this will
35a9643ea8Slogwang  * stall the cooperative scheduler resulting in very poor performance.
36a9643ea8Slogwang  *
37a9643ea8Slogwang  * In such cases an L-thread can be migrated temporarily to another scheduler
38a9643ea8Slogwang  * running in a different P-thread on another core. When the expensive or
39a9643ea8Slogwang  * blocking operation is completed it can be migrated back to the original
40a9643ea8Slogwang  * scheduler.  In this way other threads can continue to run on the original
41a9643ea8Slogwang  * scheduler and will be completely unaffected by the blocking behaviour.
42a9643ea8Slogwang  * To migrate an L-thread to another scheduler the API lthread_set_affinity()
43a9643ea8Slogwang  * is provided.
44a9643ea8Slogwang  *
45a9643ea8Slogwang  * If L-threads that share data are running on the same core it is possible
46a9643ea8Slogwang  * to design programs where mutual exclusion mechanisms to protect shared data
47a9643ea8Slogwang  * can be avoided. This is due to the fact that the cooperative threads cannot
48a9643ea8Slogwang  * preempt each other.
49a9643ea8Slogwang  *
50a9643ea8Slogwang  * There are two cases where mutual exclusion mechanisms are necessary.
51a9643ea8Slogwang  *
52a9643ea8Slogwang  *  a) Where the L-threads sharing data are running on different cores.
53a9643ea8Slogwang  *  b) Where code must yield while updating data shared with another thread.
54a9643ea8Slogwang  *
55a9643ea8Slogwang  * The L-thread subsystem provides a set of mutex APIs to help with such
56a9643ea8Slogwang  * scenarios, however excessive reliance on on these will impact performance
57a9643ea8Slogwang  * and is best avoided if possible.
58a9643ea8Slogwang  *
59a9643ea8Slogwang  * L-threads can synchronise using a fast condition variable implementation
60a9643ea8Slogwang  * that supports signal and broadcast. An L-thread running on any core can
61a9643ea8Slogwang  * wait on a condition.
62a9643ea8Slogwang  *
63a9643ea8Slogwang  * L-threads can have L-thread local storage with an API modelled on either the
64a9643ea8Slogwang  * P-thread get/set specific API or using PER_LTHREAD macros modelled on the
65a9643ea8Slogwang  * RTE_PER_LCORE macros. Alternatively a simple user data pointer may be set
66a9643ea8Slogwang  * and retrieved from a thread.
67a9643ea8Slogwang  */
68a9643ea8Slogwang #ifndef LTHREAD_H
69a9643ea8Slogwang #define LTHREAD_H
70a9643ea8Slogwang 
712bfe3f2eSlogwang #ifdef __cplusplus
722bfe3f2eSlogwang extern "C" {
732bfe3f2eSlogwang #endif
742bfe3f2eSlogwang 
75a9643ea8Slogwang #include <stdint.h>
76a9643ea8Slogwang #include <sys/socket.h>
77a9643ea8Slogwang #include <fcntl.h>
78a9643ea8Slogwang #include <netinet/in.h>
79a9643ea8Slogwang 
80a9643ea8Slogwang #include <rte_cycles.h>
81a9643ea8Slogwang 
82a9643ea8Slogwang 
83a9643ea8Slogwang struct lthread;
84a9643ea8Slogwang struct lthread_cond;
85a9643ea8Slogwang struct lthread_mutex;
86a9643ea8Slogwang 
87a9643ea8Slogwang struct lthread_condattr;
88a9643ea8Slogwang struct lthread_mutexattr;
89a9643ea8Slogwang 
90579bf1e2Sjfb8856606 typedef void *(*lthread_func_t) (void *);
91a9643ea8Slogwang 
92a9643ea8Slogwang /*
93a9643ea8Slogwang  * Define the size of stack for an lthread
94a9643ea8Slogwang  * Then this is the size that will be allocated on lthread creation
95a9643ea8Slogwang  * This is a fixed size and will not grow.
96a9643ea8Slogwang  */
97a9643ea8Slogwang #define LTHREAD_MAX_STACK_SIZE (1024*64)
98a9643ea8Slogwang 
99a9643ea8Slogwang /**
100a9643ea8Slogwang  * Define the maximum number of TLS keys that can be created
101a9643ea8Slogwang  *
102a9643ea8Slogwang  */
103a9643ea8Slogwang #define LTHREAD_MAX_KEYS 1024
104a9643ea8Slogwang 
105a9643ea8Slogwang /**
106a9643ea8Slogwang  * Define the maximum number of attempts to destroy an lthread's
107a9643ea8Slogwang  * TLS data on thread exit
108a9643ea8Slogwang  */
109a9643ea8Slogwang #define LTHREAD_DESTRUCTOR_ITERATIONS 4
110a9643ea8Slogwang 
111a9643ea8Slogwang 
112a9643ea8Slogwang /**
113a9643ea8Slogwang  * Define the maximum number of lcores that will support lthreads
114a9643ea8Slogwang  */
115a9643ea8Slogwang #define LTHREAD_MAX_LCORES RTE_MAX_LCORE
116a9643ea8Slogwang 
117a9643ea8Slogwang /**
118a9643ea8Slogwang  * How many lthread objects to pre-allocate as the system grows
119a9643ea8Slogwang  * applies to lthreads + stacks, TLS, mutexs, cond vars.
120a9643ea8Slogwang  *
121a9643ea8Slogwang  * @see _lthread_alloc()
122a9643ea8Slogwang  * @see _cond_alloc()
123a9643ea8Slogwang  * @see _mutex_alloc()
124a9643ea8Slogwang  *
125a9643ea8Slogwang  */
126a9643ea8Slogwang #define LTHREAD_PREALLOC 100
127a9643ea8Slogwang 
128a9643ea8Slogwang /**
129a9643ea8Slogwang  * Set the number of schedulers in the system.
130a9643ea8Slogwang  *
131a9643ea8Slogwang  * This function may optionally be called before starting schedulers.
132a9643ea8Slogwang  *
133a9643ea8Slogwang  * If the number of schedulers is not set, or set to 0 then each scheduler
134a9643ea8Slogwang  * will begin scheduling lthreads immediately it is started.
135a9643ea8Slogwang 
136a9643ea8Slogwang  * If the number of schedulers is set to greater than 0, then each scheduler
137a9643ea8Slogwang  * will wait until all schedulers have started before beginning to schedule
138a9643ea8Slogwang  * lthreads.
139a9643ea8Slogwang  *
140a9643ea8Slogwang  * If an application wishes to have threads migrate between cores using
141a9643ea8Slogwang  * lthread_set_affinity(), or join threads running on other cores using
142a9643ea8Slogwang  * lthread_join(), then it is prudent to set the number of schedulers to ensure
143a9643ea8Slogwang  * that all schedulers are initialised beforehand.
144a9643ea8Slogwang  *
145a9643ea8Slogwang  * @param num
146a9643ea8Slogwang  *  the number of schedulers in the system
147a9643ea8Slogwang  * @return
148a9643ea8Slogwang  * the number of schedulers in the system
149a9643ea8Slogwang  */
150a9643ea8Slogwang int lthread_num_schedulers_set(int num);
151a9643ea8Slogwang 
152a9643ea8Slogwang /**
153a9643ea8Slogwang  * Return the number of schedulers currently running
154a9643ea8Slogwang  * @return
155a9643ea8Slogwang  *  the number of schedulers in the system
156a9643ea8Slogwang  */
157a9643ea8Slogwang int lthread_active_schedulers(void);
158a9643ea8Slogwang 
159a9643ea8Slogwang /**
160a9643ea8Slogwang   * Shutdown the specified scheduler
161a9643ea8Slogwang   *
162a9643ea8Slogwang   *  This function tells the specified scheduler to
163a9643ea8Slogwang   *  exit if/when there is no more work to do.
164a9643ea8Slogwang   *
165a9643ea8Slogwang   *  Note that although the scheduler will stop
166a9643ea8Slogwang   *  resources are not freed.
167a9643ea8Slogwang   *
168a9643ea8Slogwang   * @param lcore
169a9643ea8Slogwang   *	The lcore of the scheduler to shutdown
170a9643ea8Slogwang   *
171a9643ea8Slogwang   * @return
172a9643ea8Slogwang   *  none
173a9643ea8Slogwang   */
174a9643ea8Slogwang void lthread_scheduler_shutdown(unsigned lcore);
175a9643ea8Slogwang 
176a9643ea8Slogwang /**
177a9643ea8Slogwang   * Shutdown all schedulers
178a9643ea8Slogwang   *
179a9643ea8Slogwang   *  This function tells all schedulers  including the current scheduler to
180a9643ea8Slogwang   *  exit if/when there is no more work to do.
181a9643ea8Slogwang   *
182a9643ea8Slogwang   *  Note that although the schedulers will stop
183a9643ea8Slogwang   *  resources are not freed.
184a9643ea8Slogwang   *
185a9643ea8Slogwang   * @return
186a9643ea8Slogwang   *  none
187a9643ea8Slogwang   */
188a9643ea8Slogwang void lthread_scheduler_shutdown_all(void);
189a9643ea8Slogwang 
190a9643ea8Slogwang /**
191a9643ea8Slogwang   * Run the lthread scheduler
192a9643ea8Slogwang   *
193a9643ea8Slogwang   *  Runs the lthread scheduler.
194a9643ea8Slogwang   *  This function returns only if/when all lthreads have exited.
195a9643ea8Slogwang   *  This function must be the main loop of an EAL thread.
196a9643ea8Slogwang   *
197a9643ea8Slogwang   * @return
198a9643ea8Slogwang   *	 none
199a9643ea8Slogwang   */
200a9643ea8Slogwang 
201a9643ea8Slogwang void lthread_run(void);
202a9643ea8Slogwang 
203a9643ea8Slogwang /**
204a9643ea8Slogwang   * Create an lthread
205a9643ea8Slogwang   *
206a9643ea8Slogwang   *  Creates an lthread and places it in the ready queue on a particular
207a9643ea8Slogwang   *  lcore.
208a9643ea8Slogwang   *
209*1646932aSjfb8856606   *  If no scheduler exists yet on the current lcore then one is created.
210a9643ea8Slogwang   *
211a9643ea8Slogwang   * @param new_lt
212a9643ea8Slogwang   *  Pointer to an lthread pointer that will be initialized
213a9643ea8Slogwang   * @param lcore
214*1646932aSjfb8856606   *  the lcore the thread should be started on or the current lcore
215a9643ea8Slogwang   *    -1 the current lcore
216a9643ea8Slogwang   *    0 - LTHREAD_MAX_LCORES any other lcore
217a9643ea8Slogwang   * @param lthread_func
218a9643ea8Slogwang   *  Pointer to the function the for the thread to run
219a9643ea8Slogwang   * @param arg
220a9643ea8Slogwang   *  Pointer to args that will be passed to the thread
221a9643ea8Slogwang   *
222a9643ea8Slogwang   * @return
223a9643ea8Slogwang   *	 0    success
224a9643ea8Slogwang   *	 EAGAIN  no resources available
225a9643ea8Slogwang   *	 EINVAL  NULL thread or function pointer, or lcore_id out of range
226a9643ea8Slogwang   */
227a9643ea8Slogwang int
228a9643ea8Slogwang lthread_create(struct lthread **new_lt,
229a9643ea8Slogwang 		int lcore, lthread_func_t func, void *arg);
230a9643ea8Slogwang 
231a9643ea8Slogwang /**
232a9643ea8Slogwang   * Cancel an lthread
233a9643ea8Slogwang   *
234a9643ea8Slogwang   *  Cancels an lthread and causes it to be terminated
235a9643ea8Slogwang   *  If the lthread is detached it will be freed immediately
236a9643ea8Slogwang   *  otherwise its resources will not be released until it is joined.
237a9643ea8Slogwang   *
238a9643ea8Slogwang   * @param new_lt
239a9643ea8Slogwang   *  Pointer to an lthread that will be cancelled
240a9643ea8Slogwang   *
241a9643ea8Slogwang   * @return
242a9643ea8Slogwang   *	 0    success
243a9643ea8Slogwang   *	 EINVAL  thread was NULL
244a9643ea8Slogwang   */
245a9643ea8Slogwang int lthread_cancel(struct lthread *lt);
246a9643ea8Slogwang 
247a9643ea8Slogwang /**
248a9643ea8Slogwang   * Join an lthread
249a9643ea8Slogwang   *
250a9643ea8Slogwang   *  Joins the current thread with the specified lthread, and waits for that
251a9643ea8Slogwang   *  thread to exit.
252a9643ea8Slogwang   *  Passes an optional pointer to collect returned data.
253a9643ea8Slogwang   *
254a9643ea8Slogwang   * @param lt
255a9643ea8Slogwang   *  Pointer to the lthread to be joined
256a9643ea8Slogwang   * @param ptr
257a9643ea8Slogwang   *  Pointer to pointer to collect returned data
258a9643ea8Slogwang   *
259a9643ea8Slogwang 0  * @return
260a9643ea8Slogwang   *  0    success
261a9643ea8Slogwang   *  EINVAL lthread could not be joined.
262a9643ea8Slogwang   */
263a9643ea8Slogwang int lthread_join(struct lthread *lt, void **ptr);
264a9643ea8Slogwang 
265a9643ea8Slogwang /**
266a9643ea8Slogwang   * Detach an lthread
267a9643ea8Slogwang   *
268a9643ea8Slogwang   * Detaches the current thread
269a9643ea8Slogwang   * On exit a detached lthread will be freed immediately and will not wait
270a9643ea8Slogwang   * to be joined. The default state for a thread is not detached.
271a9643ea8Slogwang   *
272a9643ea8Slogwang   * @return
273a9643ea8Slogwang   *  none
274a9643ea8Slogwang   */
275a9643ea8Slogwang void lthread_detach(void);
276a9643ea8Slogwang 
277a9643ea8Slogwang /**
278a9643ea8Slogwang   *  Exit an lthread
279a9643ea8Slogwang   *
280a9643ea8Slogwang   * Terminate the current thread, optionally return data.
281a9643ea8Slogwang   * The data may be collected by lthread_join()
282a9643ea8Slogwang   *
283a9643ea8Slogwang   * After calling this function the lthread will be suspended until it is
284a9643ea8Slogwang   * joined. After it is joined then its resources will be freed.
285a9643ea8Slogwang   *
286a9643ea8Slogwang   * @param ptr
287a9643ea8Slogwang   *  Pointer to pointer to data to be returned
288a9643ea8Slogwang   *
289a9643ea8Slogwang   * @return
290a9643ea8Slogwang   *  none
291a9643ea8Slogwang   */
292a9643ea8Slogwang void lthread_exit(void *val);
293a9643ea8Slogwang 
294a9643ea8Slogwang /**
295a9643ea8Slogwang   * Cause the current lthread to sleep for n nanoseconds
296a9643ea8Slogwang   *
297a9643ea8Slogwang   * The current thread will be suspended until the specified time has elapsed
298a9643ea8Slogwang   * or has been exceeded.
299a9643ea8Slogwang   *
300a9643ea8Slogwang   * Execution will switch to the next lthread that is ready to run
301a9643ea8Slogwang   *
302a9643ea8Slogwang   * @param nsecs
303a9643ea8Slogwang   *  Number of nanoseconds to sleep
304a9643ea8Slogwang   *
305a9643ea8Slogwang   * @return
306a9643ea8Slogwang   *  none
307a9643ea8Slogwang   */
308a9643ea8Slogwang void lthread_sleep(uint64_t nsecs);
309a9643ea8Slogwang 
310a9643ea8Slogwang /**
311a9643ea8Slogwang   * Cause the current lthread to sleep for n cpu clock ticks
312a9643ea8Slogwang   *
313a9643ea8Slogwang   *  The current thread will be suspended until the specified time has elapsed
314a9643ea8Slogwang   *  or has been exceeded.
315a9643ea8Slogwang   *
316a9643ea8Slogwang   *	 Execution will switch to the next lthread that is ready to run
317a9643ea8Slogwang   *
318a9643ea8Slogwang   * @param clks
319a9643ea8Slogwang   *  Number of clock ticks to sleep
320a9643ea8Slogwang   *
321a9643ea8Slogwang   * @return
322a9643ea8Slogwang   *  none
323a9643ea8Slogwang   */
324a9643ea8Slogwang void lthread_sleep_clks(uint64_t clks);
325a9643ea8Slogwang 
326a9643ea8Slogwang /**
327a9643ea8Slogwang   * Yield the current lthread
328a9643ea8Slogwang   *
329a9643ea8Slogwang   *  The current thread will yield and execution will switch to the
330a9643ea8Slogwang   *  next lthread that is ready to run
331a9643ea8Slogwang   *
332a9643ea8Slogwang   * @return
333a9643ea8Slogwang   *  none
334a9643ea8Slogwang   */
335a9643ea8Slogwang void lthread_yield(void);
336a9643ea8Slogwang 
337a9643ea8Slogwang /**
338a9643ea8Slogwang   * Migrate the current thread to another scheduler
339a9643ea8Slogwang   *
340a9643ea8Slogwang   *  This function migrates the current thread to another scheduler.
341a9643ea8Slogwang   *  Execution will switch to the next lthread that is ready to run on the
342a9643ea8Slogwang   *  current scheduler. The current thread will be resumed on the new scheduler.
343a9643ea8Slogwang   *
344a9643ea8Slogwang   * @param lcore
345a9643ea8Slogwang   *	The lcore to migrate to
346a9643ea8Slogwang   *
347a9643ea8Slogwang   * @return
348a9643ea8Slogwang   *  0   success we are now running on the specified core
349a9643ea8Slogwang   *  EINVAL the destination lcore was not valid
350a9643ea8Slogwang   */
351a9643ea8Slogwang int lthread_set_affinity(unsigned lcore);
352a9643ea8Slogwang 
353a9643ea8Slogwang /**
354a9643ea8Slogwang   * Return the current lthread
355a9643ea8Slogwang   *
356a9643ea8Slogwang   *  Returns the current lthread
357a9643ea8Slogwang   *
358a9643ea8Slogwang   * @return
359a9643ea8Slogwang   *  pointer to the current lthread
360a9643ea8Slogwang   */
361a9643ea8Slogwang struct lthread
362a9643ea8Slogwang *lthread_current(void);
363a9643ea8Slogwang 
364a9643ea8Slogwang /**
365a9643ea8Slogwang   * Associate user data with an lthread
366a9643ea8Slogwang   *
367a9643ea8Slogwang   *  This function sets a user data pointer in the current lthread
368a9643ea8Slogwang   *  The pointer can be retrieved with lthread_get_data()
369a9643ea8Slogwang   *  It is the users responsibility to allocate and free any data referenced
370a9643ea8Slogwang   *  by the user pointer.
371a9643ea8Slogwang   *
372a9643ea8Slogwang   * @param data
373a9643ea8Slogwang   *  pointer to user data
374a9643ea8Slogwang   *
375a9643ea8Slogwang   * @return
376a9643ea8Slogwang   *  none
377a9643ea8Slogwang   */
378a9643ea8Slogwang void lthread_set_data(void *data);
379a9643ea8Slogwang 
380a9643ea8Slogwang /**
381a9643ea8Slogwang   * Get user data for the current lthread
382a9643ea8Slogwang   *
383a9643ea8Slogwang   *  This function returns a user data pointer for the current lthread
384a9643ea8Slogwang   *  The pointer must first be set with lthread_set_data()
385a9643ea8Slogwang   *  It is the users responsibility to allocate and free any data referenced
386a9643ea8Slogwang   *  by the user pointer.
387a9643ea8Slogwang   *
388a9643ea8Slogwang   * @return
389a9643ea8Slogwang   *  pointer to user data
390a9643ea8Slogwang   */
391a9643ea8Slogwang void
392a9643ea8Slogwang *lthread_get_data(void);
393a9643ea8Slogwang 
394a9643ea8Slogwang struct lthread_key;
395a9643ea8Slogwang typedef void (*tls_destructor_func) (void *);
396a9643ea8Slogwang 
397a9643ea8Slogwang /**
398a9643ea8Slogwang   * Create a key for lthread TLS
399a9643ea8Slogwang   *
400a9643ea8Slogwang   *  This function is modelled on pthread_key_create
401a9643ea8Slogwang   *  It creates a thread-specific data key visible to all lthreads on the
402a9643ea8Slogwang   *  current scheduler.
403a9643ea8Slogwang   *
404a9643ea8Slogwang   *  Key values may be used to locate thread-specific data.
405a9643ea8Slogwang   *  The same key value	may be used by different threads, the values bound
406a9643ea8Slogwang   *  to the key by	lthread_setspecific() are maintained on	a per-thread
407a9643ea8Slogwang   *  basis and persist for the life of the calling thread.
408a9643ea8Slogwang   *
409a9643ea8Slogwang   *  An	optional destructor function may be associated with each key value.
410a9643ea8Slogwang   *  At	thread exit, if	a key value has	a non-NULL destructor pointer, and the
411a9643ea8Slogwang   *  thread has	a non-NULL value associated with the key, the function pointed
412a9643ea8Slogwang   *  to	is called with the current associated value as its sole	argument.
413a9643ea8Slogwang   *
414a9643ea8Slogwang   * @param key
415a9643ea8Slogwang   *   Pointer to the key to be created
416a9643ea8Slogwang   * @param destructor
417a9643ea8Slogwang   *   Pointer to destructor function
418a9643ea8Slogwang   *
419a9643ea8Slogwang   * @return
420a9643ea8Slogwang   *  0 success
421a9643ea8Slogwang   *  EINVAL the key ptr was NULL
422a9643ea8Slogwang   *  EAGAIN no resources available
423a9643ea8Slogwang   */
424a9643ea8Slogwang int lthread_key_create(unsigned int *key, tls_destructor_func destructor);
425a9643ea8Slogwang 
426a9643ea8Slogwang /**
427a9643ea8Slogwang   * Delete key for lthread TLS
428a9643ea8Slogwang   *
429a9643ea8Slogwang   *  This function is modelled on pthread_key_delete().
430a9643ea8Slogwang   *  It deletes a thread-specific data key previously returned by
431a9643ea8Slogwang   *  lthread_key_create().
432a9643ea8Slogwang   *  The thread-specific data values associated with the key need not be NULL
433a9643ea8Slogwang   *  at the time that lthread_key_delete is called.
434a9643ea8Slogwang   *  It is the responsibility of the application to free any application
435a9643ea8Slogwang   *  storage or perform any cleanup actions for data structures related to the
436a9643ea8Slogwang   *  deleted key. This cleanup can be done either before or after
437a9643ea8Slogwang   * lthread_key_delete is called.
438a9643ea8Slogwang   *
439a9643ea8Slogwang   * @param key
440a9643ea8Slogwang   *  The key to be deleted
441a9643ea8Slogwang   *
442a9643ea8Slogwang   * @return
443a9643ea8Slogwang   *  0 Success
444a9643ea8Slogwang   *  EINVAL the key was invalid
445a9643ea8Slogwang   */
446a9643ea8Slogwang int lthread_key_delete(unsigned int key);
447a9643ea8Slogwang 
448a9643ea8Slogwang /**
449a9643ea8Slogwang   * Get lthread TLS
450a9643ea8Slogwang   *
451a9643ea8Slogwang   *  This function is modelled on pthread_get_specific().
452a9643ea8Slogwang   *  It returns the value currently bound to the specified key on behalf of the
453a9643ea8Slogwang   *  calling thread. Calling lthread_getspecific() with a key value not
454a9643ea8Slogwang   *  obtained from lthread_key_create() or after key has been deleted with
455a9643ea8Slogwang   *  lthread_key_delete() will result in undefined behaviour.
456a9643ea8Slogwang   *  lthread_getspecific() may be called from a thread-specific data destructor
457a9643ea8Slogwang   *  function.
458a9643ea8Slogwang   *
459a9643ea8Slogwang   * @param key
460a9643ea8Slogwang   *  The key for which data is requested
461a9643ea8Slogwang   *
462a9643ea8Slogwang   * @return
463a9643ea8Slogwang   *  Pointer to the thread specific data associated with that key
464a9643ea8Slogwang   *  or NULL if no data has been set.
465a9643ea8Slogwang   */
466a9643ea8Slogwang void
467a9643ea8Slogwang *lthread_getspecific(unsigned int key);
468a9643ea8Slogwang 
469a9643ea8Slogwang /**
470a9643ea8Slogwang   * Set lthread TLS
471a9643ea8Slogwang   *
472*1646932aSjfb8856606   *  This function is modelled on pthread_set_specific()
473a9643ea8Slogwang   *  It associates a thread-specific value with a key obtained via a previous
474a9643ea8Slogwang   *  call to lthread_key_create().
475a9643ea8Slogwang   *  Different threads may bind different values to the same key. These values
476a9643ea8Slogwang   *  are typically pointers to dynamically allocated memory that have been
477a9643ea8Slogwang   *  reserved by the calling thread. Calling lthread_setspecific with a key
478a9643ea8Slogwang   *  value not obtained from lthread_key_create or after the key has been
479a9643ea8Slogwang   *  deleted with lthread_key_delete will result in undefined behaviour.
480a9643ea8Slogwang   *
481a9643ea8Slogwang   * @param key
482a9643ea8Slogwang   *  The key for which data is to be set
483a9643ea8Slogwang   * @param key
484a9643ea8Slogwang   *  Pointer to the user data
485a9643ea8Slogwang   *
486a9643ea8Slogwang   * @return
487a9643ea8Slogwang   *  0 success
488a9643ea8Slogwang   *  EINVAL the key was invalid
489a9643ea8Slogwang   */
490a9643ea8Slogwang 
491a9643ea8Slogwang int lthread_setspecific(unsigned int key, const void *value);
492a9643ea8Slogwang 
493a9643ea8Slogwang /**
494a9643ea8Slogwang  * The macros below provide an alternative mechanism to access lthread local
495a9643ea8Slogwang  *  storage.
496a9643ea8Slogwang  *
497a9643ea8Slogwang  * The macros can be used to declare define and access per lthread local
498a9643ea8Slogwang  * storage in a similar way to the RTE_PER_LCORE macros which control storage
499a9643ea8Slogwang  * local to an lcore.
500a9643ea8Slogwang  *
501a9643ea8Slogwang  * Memory for per lthread variables declared in this way is allocated when the
502a9643ea8Slogwang  * lthread is created and a pointer to this memory is stored in the lthread.
503a9643ea8Slogwang  * The per lthread variables are accessed via the pointer + the offset of the
504a9643ea8Slogwang  * particular variable.
505a9643ea8Slogwang  *
506a9643ea8Slogwang  * The total size of per lthread storage, and the variable offsets are found by
507a9643ea8Slogwang  * defining the variables in a unique global memory section, the start and end
508a9643ea8Slogwang  * of which is known. This global memory section is used only in the
509a9643ea8Slogwang  * computation of the addresses of the lthread variables, and is never actually
510a9643ea8Slogwang  * used to store any data.
511a9643ea8Slogwang  *
512a9643ea8Slogwang  * Due to the fact that variables declared this way may be scattered across
513a9643ea8Slogwang  * many files, the start and end of the section and variable offsets are only
514a9643ea8Slogwang  * known after linking, thus the computation of section size and variable
515a9643ea8Slogwang  * addresses is performed at run time.
516a9643ea8Slogwang  *
517a9643ea8Slogwang  * These macros are primarily provided to aid porting of code that makes use
518a9643ea8Slogwang  * of the existing RTE_PER_LCORE macros. In principle it would be more efficient
519a9643ea8Slogwang  * to gather all lthread local variables into a single structure and
520a9643ea8Slogwang  * set/retrieve a pointer to that struct using the alternative
521a9643ea8Slogwang  * lthread_data_set/get APIs.
522a9643ea8Slogwang  *
523a9643ea8Slogwang  * These macros are mutually exclusive with the lthread_data_set/get APIs.
524a9643ea8Slogwang  * If you define storage using these macros then the lthread_data_set/get APIs
525a9643ea8Slogwang  * will not perform as expected, the lthread_data_set API does nothing, and the
526a9643ea8Slogwang  * lthread_data_get API returns the start of global section.
527a9643ea8Slogwang  *
528a9643ea8Slogwang  */
529a9643ea8Slogwang /* start and end of per lthread section */
530a9643ea8Slogwang extern char __start_per_lt;
531a9643ea8Slogwang extern char __stop_per_lt;
532a9643ea8Slogwang 
533a9643ea8Slogwang 
534a9643ea8Slogwang #define RTE_DEFINE_PER_LTHREAD(type, name)                      \
535a9643ea8Slogwang __typeof__(type)__attribute((section("per_lt"))) per_lt_##name
536a9643ea8Slogwang 
537a9643ea8Slogwang /**
538a9643ea8Slogwang  * Macro to declare an extern per lthread variable "var" of type "type"
539a9643ea8Slogwang  */
540a9643ea8Slogwang #define RTE_DECLARE_PER_LTHREAD(type, name)                     \
541a9643ea8Slogwang extern __typeof__(type)__attribute((section("per_lt"))) per_lt_##name
542a9643ea8Slogwang 
543a9643ea8Slogwang /**
544a9643ea8Slogwang  * Read/write the per-lcore variable value
545a9643ea8Slogwang  */
546a9643ea8Slogwang #define RTE_PER_LTHREAD(name) ((typeof(per_lt_##name) *)\
547a9643ea8Slogwang ((char *)lthread_get_data() +\
548a9643ea8Slogwang ((char *) &per_lt_##name - &__start_per_lt)))
549a9643ea8Slogwang 
550a9643ea8Slogwang /**
551a9643ea8Slogwang   * Initialize a mutex
552a9643ea8Slogwang   *
553a9643ea8Slogwang   *  This function provides a mutual exclusion device, the need for which
554a9643ea8Slogwang   *  can normally be avoided in a cooperative multitasking environment.
555a9643ea8Slogwang   *  It is provided to aid porting of legacy code originally written for
556a9643ea8Slogwang   *   preemptive multitasking environments such as pthreads.
557a9643ea8Slogwang   *
558a9643ea8Slogwang   *  A mutex may be unlocked (not owned by any thread), or locked (owned by
559a9643ea8Slogwang   *  one thread).
560a9643ea8Slogwang   *
561a9643ea8Slogwang   *  A mutex can never be owned  by more than one thread simultaneously.
562a9643ea8Slogwang   *  A thread attempting to lock a mutex that is already locked by another
563a9643ea8Slogwang   *  thread is suspended until the owning thread unlocks the mutex.
564a9643ea8Slogwang   *
565a9643ea8Slogwang   *  lthread_mutex_init() initializes the mutex object pointed to by mutex
566a9643ea8Slogwang   *  Optional mutex attributes specified in mutexattr, are reserved for future
567a9643ea8Slogwang   *  use and are currently ignored.
568a9643ea8Slogwang   *
569a9643ea8Slogwang   *  If a thread calls lthread_mutex_lock() on the mutex, then if the mutex
570a9643ea8Slogwang   *  is currently unlocked,  it  becomes  locked  and  owned  by  the calling
571a9643ea8Slogwang   *  thread, and lthread_mutex_lock returns immediately. If the mutex is
572a9643ea8Slogwang   *  already locked by another thread, lthread_mutex_lock suspends the calling
573a9643ea8Slogwang   *  thread until the mutex is unlocked.
574a9643ea8Slogwang   *
575a9643ea8Slogwang   *  lthread_mutex_trylock behaves identically to rte_thread_mutex_lock, except
576a9643ea8Slogwang   *  that it does not block the calling  thread  if the mutex is already locked
577a9643ea8Slogwang   *  by another thread.
578a9643ea8Slogwang   *
579a9643ea8Slogwang   *  lthread_mutex_unlock() unlocks the specified mutex. The mutex is assumed
580a9643ea8Slogwang   *  to be locked and owned by the calling thread.
581a9643ea8Slogwang   *
582a9643ea8Slogwang   *  lthread_mutex_destroy() destroys a	mutex object, freeing its resources.
583a9643ea8Slogwang   *  The mutex must be unlocked with nothing blocked on it before calling
584a9643ea8Slogwang   *  lthread_mutex_destroy.
585a9643ea8Slogwang   *
586a9643ea8Slogwang   * @param name
587a9643ea8Slogwang   *  Optional pointer to string describing the mutex
588a9643ea8Slogwang   * @param mutex
589a9643ea8Slogwang   *  Pointer to pointer to the mutex to be initialized
590a9643ea8Slogwang   * @param attribute
591a9643ea8Slogwang   *  Pointer to attribute - unused reserved
592a9643ea8Slogwang   *
593a9643ea8Slogwang   * @return
594a9643ea8Slogwang   *  0 success
595a9643ea8Slogwang   *  EINVAL mutex was not a valid pointer
596a9643ea8Slogwang   *  EAGAIN insufficient resources
597a9643ea8Slogwang   */
598a9643ea8Slogwang 
599a9643ea8Slogwang int
600a9643ea8Slogwang lthread_mutex_init(char *name, struct lthread_mutex **mutex,
601a9643ea8Slogwang 		   const struct lthread_mutexattr *attr);
602a9643ea8Slogwang 
603a9643ea8Slogwang /**
604a9643ea8Slogwang   * Destroy a mutex
605a9643ea8Slogwang   *
606a9643ea8Slogwang   *  This function destroys the specified mutex freeing its resources.
607a9643ea8Slogwang   *  The mutex must be unlocked before calling lthread_mutex_destroy.
608a9643ea8Slogwang   *
609a9643ea8Slogwang   * @see lthread_mutex_init()
610a9643ea8Slogwang   *
611a9643ea8Slogwang   * @param mutex
612a9643ea8Slogwang   *  Pointer to pointer to the mutex to be initialized
613a9643ea8Slogwang   *
614a9643ea8Slogwang   * @return
615a9643ea8Slogwang   *  0 success
616a9643ea8Slogwang   *  EINVAL mutex was not an initialized mutex
617a9643ea8Slogwang   *  EBUSY mutex was still in use
618a9643ea8Slogwang   */
619a9643ea8Slogwang int lthread_mutex_destroy(struct lthread_mutex *mutex);
620a9643ea8Slogwang 
621a9643ea8Slogwang /**
622a9643ea8Slogwang   * Lock a mutex
623a9643ea8Slogwang   *
624a9643ea8Slogwang   *  This function attempts to lock a mutex.
625a9643ea8Slogwang   *  If a thread calls lthread_mutex_lock() on the mutex, then if the mutex
626a9643ea8Slogwang   *  is currently unlocked,  it  becomes  locked  and  owned  by  the calling
627a9643ea8Slogwang   *  thread, and lthread_mutex_lock returns immediately. If the mutex is
628a9643ea8Slogwang   *  already locked by another thread, lthread_mutex_lock suspends the calling
629a9643ea8Slogwang   *  thread until the mutex is unlocked.
630a9643ea8Slogwang   *
631a9643ea8Slogwang   * @see lthread_mutex_init()
632a9643ea8Slogwang   *
633a9643ea8Slogwang   * @param mutex
634a9643ea8Slogwang   *  Pointer to pointer to the mutex to be initialized
635a9643ea8Slogwang   *
636a9643ea8Slogwang   * @return
637a9643ea8Slogwang   *  0 success
638a9643ea8Slogwang   *  EINVAL mutex was not an initialized mutex
639a9643ea8Slogwang   *  EDEADLOCK the mutex was already owned by the calling thread
640a9643ea8Slogwang   */
641a9643ea8Slogwang 
642a9643ea8Slogwang int lthread_mutex_lock(struct lthread_mutex *mutex);
643a9643ea8Slogwang 
644a9643ea8Slogwang /**
645a9643ea8Slogwang   * Try to lock a mutex
646a9643ea8Slogwang   *
647a9643ea8Slogwang   *  This function attempts to lock a mutex.
648a9643ea8Slogwang   *  lthread_mutex_trylock behaves identically to rte_thread_mutex_lock, except
649a9643ea8Slogwang   *  that it does not block the calling  thread  if the mutex is already locked
650a9643ea8Slogwang   *  by another thread.
651a9643ea8Slogwang   *
652a9643ea8Slogwang   *
653a9643ea8Slogwang   * @see lthread_mutex_init()
654a9643ea8Slogwang   *
655a9643ea8Slogwang   * @param mutex
656a9643ea8Slogwang   *  Pointer to pointer to the mutex to be initialized
657a9643ea8Slogwang   *
658a9643ea8Slogwang   * @return
659a9643ea8Slogwang   * 0 success
660a9643ea8Slogwang   * EINVAL mutex was not an initialized mutex
661a9643ea8Slogwang   * EBUSY the mutex was already locked by another thread
662a9643ea8Slogwang   */
663a9643ea8Slogwang int lthread_mutex_trylock(struct lthread_mutex *mutex);
664a9643ea8Slogwang 
665a9643ea8Slogwang /**
666a9643ea8Slogwang   * Unlock a mutex
667a9643ea8Slogwang   *
668a9643ea8Slogwang   * This function attempts to unlock the specified mutex. The mutex is assumed
669a9643ea8Slogwang   * to be locked and owned by the calling thread.
670a9643ea8Slogwang   *
671a9643ea8Slogwang   * The oldest of any threads blocked on the mutex is made ready and may
672a9643ea8Slogwang   * compete with any other running thread to gain the mutex, it fails it will
673a9643ea8Slogwang   *  be blocked again.
674a9643ea8Slogwang   *
675a9643ea8Slogwang   * @param mutex
676a9643ea8Slogwang   * Pointer to pointer to the mutex to be initialized
677a9643ea8Slogwang   *
678a9643ea8Slogwang   * @return
679a9643ea8Slogwang   *  0 mutex was unlocked
680a9643ea8Slogwang   *  EINVAL mutex was not an initialized mutex
681a9643ea8Slogwang   *  EPERM the mutex was not owned by the calling thread
682a9643ea8Slogwang   */
683a9643ea8Slogwang 
684a9643ea8Slogwang int lthread_mutex_unlock(struct lthread_mutex *mutex);
685a9643ea8Slogwang 
686a9643ea8Slogwang /**
687a9643ea8Slogwang   * Initialize a condition variable
688a9643ea8Slogwang   *
689a9643ea8Slogwang   *  This function initializes a condition variable.
690a9643ea8Slogwang   *
691a9643ea8Slogwang   *  Condition variables can be used to communicate changes in the state of data
692a9643ea8Slogwang   *  shared between threads.
693a9643ea8Slogwang   *
694a9643ea8Slogwang   * @see lthread_cond_wait()
695a9643ea8Slogwang   *
696a9643ea8Slogwang   * @param name
697a9643ea8Slogwang   *  Pointer to optional string describing the condition variable
698a9643ea8Slogwang   * @param c
699a9643ea8Slogwang   *  Pointer to pointer to the condition variable to be initialized
700a9643ea8Slogwang   * @param attr
701a9643ea8Slogwang   *  Pointer to optional attribute reserved for future use, currently ignored
702a9643ea8Slogwang   *
703a9643ea8Slogwang   * @return
704a9643ea8Slogwang   *  0 success
705a9643ea8Slogwang   *  EINVAL cond was not a valid pointer
706a9643ea8Slogwang   *  EAGAIN insufficient resources
707a9643ea8Slogwang   */
708a9643ea8Slogwang int
709a9643ea8Slogwang lthread_cond_init(char *name, struct lthread_cond **c,
710a9643ea8Slogwang 		  const struct lthread_condattr *attr);
711a9643ea8Slogwang 
712a9643ea8Slogwang /**
713a9643ea8Slogwang   * Destroy a condition variable
714a9643ea8Slogwang   *
715a9643ea8Slogwang   *  This function destroys a condition variable that was created with
716a9643ea8Slogwang   *  lthread_cond_init() and releases its resources.
717a9643ea8Slogwang   *
718a9643ea8Slogwang   * @param cond
719a9643ea8Slogwang   *  Pointer to pointer to the condition variable to be destroyed
720a9643ea8Slogwang   *
721a9643ea8Slogwang   * @return
722a9643ea8Slogwang   *  0 Success
723a9643ea8Slogwang   *  EBUSY condition variable was still in use
724a9643ea8Slogwang   *  EINVAL was not an initialised condition variable
725a9643ea8Slogwang   */
726a9643ea8Slogwang int lthread_cond_destroy(struct lthread_cond *cond);
727a9643ea8Slogwang 
728a9643ea8Slogwang /**
729a9643ea8Slogwang   * Wait on a condition variable
730a9643ea8Slogwang   *
731a9643ea8Slogwang   *  The function blocks the current thread waiting on the condition variable
732a9643ea8Slogwang   *  specified by cond. The waiting thread unblocks only after another thread
733a9643ea8Slogwang   *  calls lthread_cond_signal, or lthread_cond_broadcast, specifying the
734a9643ea8Slogwang   *  same condition variable.
735a9643ea8Slogwang   *
736a9643ea8Slogwang   * @param cond
737a9643ea8Slogwang   *  Pointer to pointer to the condition variable to be waited on
738a9643ea8Slogwang   *
739a9643ea8Slogwang   * @param reserved
740a9643ea8Slogwang   *  reserved for future use
741a9643ea8Slogwang   *
742a9643ea8Slogwang   * @return
743a9643ea8Slogwang   *  0 The condition was signalled ( Success )
744a9643ea8Slogwang   *  EINVAL was not a an initialised condition variable
745a9643ea8Slogwang   */
746a9643ea8Slogwang int lthread_cond_wait(struct lthread_cond *c, uint64_t reserved);
747a9643ea8Slogwang 
748a9643ea8Slogwang /**
749a9643ea8Slogwang   * Signal a condition variable
750a9643ea8Slogwang   *
751a9643ea8Slogwang   *  The function unblocks one thread waiting for the condition variable cond.
752*1646932aSjfb8856606   *  If no threads are waiting on cond, the rte_lthread_cond_signal() function
753a9643ea8Slogwang   *  has no effect.
754a9643ea8Slogwang   *
755a9643ea8Slogwang   * @param cond
756a9643ea8Slogwang   *  Pointer to pointer to the condition variable to be signalled
757a9643ea8Slogwang   *
758a9643ea8Slogwang   * @return
759a9643ea8Slogwang   *  0 The condition was signalled ( Success )
760a9643ea8Slogwang   *  EINVAL was not a an initialised condition variable
761a9643ea8Slogwang   */
762a9643ea8Slogwang int lthread_cond_signal(struct lthread_cond *c);
763a9643ea8Slogwang 
764a9643ea8Slogwang /**
765a9643ea8Slogwang   * Broadcast a condition variable
766a9643ea8Slogwang   *
767a9643ea8Slogwang   *  The function unblocks all threads waiting for the condition variable cond.
768*1646932aSjfb8856606   *  If no threads are waiting on cond, the rte_lathed_cond_broadcast()
769a9643ea8Slogwang   *  function has no effect.
770a9643ea8Slogwang   *
771a9643ea8Slogwang   * @param cond
772a9643ea8Slogwang   *  Pointer to pointer to the condition variable to be signalled
773a9643ea8Slogwang   *
774a9643ea8Slogwang   * @return
775a9643ea8Slogwang   *  0 The condition was signalled ( Success )
776a9643ea8Slogwang   *  EINVAL was not a an initialised condition variable
777a9643ea8Slogwang   */
778a9643ea8Slogwang int lthread_cond_broadcast(struct lthread_cond *c);
779a9643ea8Slogwang 
7802bfe3f2eSlogwang #ifdef __cplusplus
7812bfe3f2eSlogwang }
7822bfe3f2eSlogwang #endif
7832bfe3f2eSlogwang 
784a9643ea8Slogwang #endif				/* LTHREAD_H */
785