1a9643ea8Slogwang /*-
2a9643ea8Slogwang  *   BSD LICENSE
3a9643ea8Slogwang  *
4a9643ea8Slogwang  *   Copyright(c) 2015 Intel Corporation. All rights reserved.
5a9643ea8Slogwang  *   All rights reserved.
6a9643ea8Slogwang  *
7a9643ea8Slogwang  *   Redistribution and use in source and binary forms, with or without
8a9643ea8Slogwang  *   modification, are permitted provided that the following conditions
9a9643ea8Slogwang  *   are met:
10a9643ea8Slogwang  *
11a9643ea8Slogwang  *     * Redistributions of source code must retain the above copyright
12a9643ea8Slogwang  *       notice, this list of conditions and the following disclaimer.
13a9643ea8Slogwang  *     * Redistributions in binary form must reproduce the above copyright
14a9643ea8Slogwang  *       notice, this list of conditions and the following disclaimer in
15a9643ea8Slogwang  *       the documentation and/or other materials provided with the
16a9643ea8Slogwang  *       distribution.
17a9643ea8Slogwang  *     * Neither the name of Intel Corporation nor the names of its
18a9643ea8Slogwang  *       contributors may be used to endorse or promote products derived
19a9643ea8Slogwang  *       from this software without specific prior written permission.
20a9643ea8Slogwang  *
21a9643ea8Slogwang  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22a9643ea8Slogwang  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23a9643ea8Slogwang  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24a9643ea8Slogwang  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25a9643ea8Slogwang  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26a9643ea8Slogwang  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27a9643ea8Slogwang  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28a9643ea8Slogwang  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29a9643ea8Slogwang  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30a9643ea8Slogwang  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31a9643ea8Slogwang  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32a9643ea8Slogwang  */
33a9643ea8Slogwang 
34a9643ea8Slogwang /*
35a9643ea8Slogwang  * Some portions of this software may have been derived from the
36a9643ea8Slogwang  * https://github.com/halayli/lthread which carrys the following license.
37a9643ea8Slogwang  *
38a9643ea8Slogwang  * Copyright (C) 2012, Hasan Alayli <[email protected]>
39a9643ea8Slogwang  *
40a9643ea8Slogwang  * Redistribution and use in source and binary forms, with or without
41a9643ea8Slogwang  * modification, are permitted provided that the following conditions
42a9643ea8Slogwang  * are met:
43a9643ea8Slogwang  * 1. Redistributions of source code must retain the above copyright
44a9643ea8Slogwang  *    notice, this list of conditions and the following disclaimer.
45a9643ea8Slogwang  * 2. Redistributions in binary form must reproduce the above copyright
46a9643ea8Slogwang  *    notice, this list of conditions and the following disclaimer in the
47a9643ea8Slogwang  *    documentation and/or other materials provided with the distribution.
48a9643ea8Slogwang  *
49a9643ea8Slogwang  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
50a9643ea8Slogwang  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51a9643ea8Slogwang  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52a9643ea8Slogwang  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
53a9643ea8Slogwang  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54a9643ea8Slogwang  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55a9643ea8Slogwang  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56a9643ea8Slogwang  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57a9643ea8Slogwang  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58a9643ea8Slogwang  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59a9643ea8Slogwang  * SUCH DAMAGE.
60a9643ea8Slogwang  */
61a9643ea8Slogwang 
62a9643ea8Slogwang /**
63a9643ea8Slogwang  *  @file lthread_api.h
64a9643ea8Slogwang  *
65a9643ea8Slogwang  *  @warning
66a9643ea8Slogwang  *  @b EXPERIMENTAL: this API may change without prior notice
67a9643ea8Slogwang  *
68a9643ea8Slogwang  *  This file contains the public API for the L-thread subsystem
69a9643ea8Slogwang  *
70a9643ea8Slogwang  *  The L_thread subsystem provides a simple cooperative scheduler to
71a9643ea8Slogwang  *  enable arbitrary functions to run as cooperative threads within a
72a9643ea8Slogwang  * single P-thread.
73a9643ea8Slogwang  *
74a9643ea8Slogwang  * The subsystem provides a P-thread like API that is intended to assist in
75a9643ea8Slogwang  * reuse of legacy code written for POSIX p_threads.
76a9643ea8Slogwang  *
77a9643ea8Slogwang  * The L-thread subsystem relies on cooperative multitasking, as such
78a9643ea8Slogwang  * an L-thread must possess frequent rescheduling points. Often these
79a9643ea8Slogwang  * rescheduling points are provided transparently when the application
80a9643ea8Slogwang  * invokes an L-thread API.
81a9643ea8Slogwang  *
82a9643ea8Slogwang  * In some applications it is possible that the program may enter a loop the
83a9643ea8Slogwang  * exit condition for which depends on the action of another thread or a
84a9643ea8Slogwang  * response from hardware. In such a case it is necessary to yield the thread
85a9643ea8Slogwang  * periodically in the loop body, to allow other threads an opportunity to
86a9643ea8Slogwang  * run. This can be done by inserting a call to lthread_yield() or
87a9643ea8Slogwang  * lthread_sleep(n) in the body of the loop.
88a9643ea8Slogwang  *
89a9643ea8Slogwang  * If the application makes expensive / blocking system calls or does other
90a9643ea8Slogwang  * work that would take an inordinate amount of time to complete, this will
91a9643ea8Slogwang  * stall the cooperative scheduler resulting in very poor performance.
92a9643ea8Slogwang  *
93a9643ea8Slogwang  * In such cases an L-thread can be migrated temporarily to another scheduler
94a9643ea8Slogwang  * running in a different P-thread on another core. When the expensive or
95a9643ea8Slogwang  * blocking operation is completed it can be migrated back to the original
96a9643ea8Slogwang  * scheduler.  In this way other threads can continue to run on the original
97a9643ea8Slogwang  * scheduler and will be completely unaffected by the blocking behaviour.
98a9643ea8Slogwang  * To migrate an L-thread to another scheduler the API lthread_set_affinity()
99a9643ea8Slogwang  * is provided.
100a9643ea8Slogwang  *
101a9643ea8Slogwang  * If L-threads that share data are running on the same core it is possible
102a9643ea8Slogwang  * to design programs where mutual exclusion mechanisms to protect shared data
103a9643ea8Slogwang  * can be avoided. This is due to the fact that the cooperative threads cannot
104a9643ea8Slogwang  * preempt each other.
105a9643ea8Slogwang  *
106a9643ea8Slogwang  * There are two cases where mutual exclusion mechanisms are necessary.
107a9643ea8Slogwang  *
108a9643ea8Slogwang  *  a) Where the L-threads sharing data are running on different cores.
109a9643ea8Slogwang  *  b) Where code must yield while updating data shared with another thread.
110a9643ea8Slogwang  *
111a9643ea8Slogwang  * The L-thread subsystem provides a set of mutex APIs to help with such
112a9643ea8Slogwang  * scenarios, however excessive reliance on on these will impact performance
113a9643ea8Slogwang  * and is best avoided if possible.
114a9643ea8Slogwang  *
115a9643ea8Slogwang  * L-threads can synchronise using a fast condition variable implementation
116a9643ea8Slogwang  * that supports signal and broadcast. An L-thread running on any core can
117a9643ea8Slogwang  * wait on a condition.
118a9643ea8Slogwang  *
119a9643ea8Slogwang  * L-threads can have L-thread local storage with an API modelled on either the
120a9643ea8Slogwang  * P-thread get/set specific API or using PER_LTHREAD macros modelled on the
121a9643ea8Slogwang  * RTE_PER_LCORE macros. Alternatively a simple user data pointer may be set
122a9643ea8Slogwang  * and retrieved from a thread.
123a9643ea8Slogwang  */
124a9643ea8Slogwang #ifndef LTHREAD_H
125a9643ea8Slogwang #define LTHREAD_H
126a9643ea8Slogwang 
127*2bfe3f2eSlogwang #ifdef __cplusplus
128*2bfe3f2eSlogwang extern "C" {
129*2bfe3f2eSlogwang #endif
130*2bfe3f2eSlogwang 
131a9643ea8Slogwang #include <stdint.h>
132a9643ea8Slogwang #include <sys/socket.h>
133a9643ea8Slogwang #include <fcntl.h>
134a9643ea8Slogwang #include <netinet/in.h>
135a9643ea8Slogwang 
136a9643ea8Slogwang #include <rte_cycles.h>
137a9643ea8Slogwang 
138a9643ea8Slogwang 
139a9643ea8Slogwang struct lthread;
140a9643ea8Slogwang struct lthread_cond;
141a9643ea8Slogwang struct lthread_mutex;
142a9643ea8Slogwang 
143a9643ea8Slogwang struct lthread_condattr;
144a9643ea8Slogwang struct lthread_mutexattr;
145a9643ea8Slogwang 
146a9643ea8Slogwang typedef void (*lthread_func_t) (void *);
147a9643ea8Slogwang 
148a9643ea8Slogwang /*
149a9643ea8Slogwang  * Define the size of stack for an lthread
150a9643ea8Slogwang  * Then this is the size that will be allocated on lthread creation
151a9643ea8Slogwang  * This is a fixed size and will not grow.
152a9643ea8Slogwang  */
153a9643ea8Slogwang #define LTHREAD_MAX_STACK_SIZE (1024*64)
154a9643ea8Slogwang 
155a9643ea8Slogwang /**
156a9643ea8Slogwang  * Define the maximum number of TLS keys that can be created
157a9643ea8Slogwang  *
158a9643ea8Slogwang  */
159a9643ea8Slogwang #define LTHREAD_MAX_KEYS 1024
160a9643ea8Slogwang 
161a9643ea8Slogwang /**
162a9643ea8Slogwang  * Define the maximum number of attempts to destroy an lthread's
163a9643ea8Slogwang  * TLS data on thread exit
164a9643ea8Slogwang  */
165a9643ea8Slogwang #define LTHREAD_DESTRUCTOR_ITERATIONS 4
166a9643ea8Slogwang 
167a9643ea8Slogwang 
168a9643ea8Slogwang /**
169a9643ea8Slogwang  * Define the maximum number of lcores that will support lthreads
170a9643ea8Slogwang  */
171a9643ea8Slogwang #define LTHREAD_MAX_LCORES RTE_MAX_LCORE
172a9643ea8Slogwang 
173a9643ea8Slogwang /**
174a9643ea8Slogwang  * How many lthread objects to pre-allocate as the system grows
175a9643ea8Slogwang  * applies to lthreads + stacks, TLS, mutexs, cond vars.
176a9643ea8Slogwang  *
177a9643ea8Slogwang  * @see _lthread_alloc()
178a9643ea8Slogwang  * @see _cond_alloc()
179a9643ea8Slogwang  * @see _mutex_alloc()
180a9643ea8Slogwang  *
181a9643ea8Slogwang  */
182a9643ea8Slogwang #define LTHREAD_PREALLOC 100
183a9643ea8Slogwang 
184a9643ea8Slogwang /**
185a9643ea8Slogwang  * Set the number of schedulers in the system.
186a9643ea8Slogwang  *
187a9643ea8Slogwang  * This function may optionally be called before starting schedulers.
188a9643ea8Slogwang  *
189a9643ea8Slogwang  * If the number of schedulers is not set, or set to 0 then each scheduler
190a9643ea8Slogwang  * will begin scheduling lthreads immediately it is started.
191a9643ea8Slogwang 
192a9643ea8Slogwang  * If the number of schedulers is set to greater than 0, then each scheduler
193a9643ea8Slogwang  * will wait until all schedulers have started before beginning to schedule
194a9643ea8Slogwang  * lthreads.
195a9643ea8Slogwang  *
196a9643ea8Slogwang  * If an application wishes to have threads migrate between cores using
197a9643ea8Slogwang  * lthread_set_affinity(), or join threads running on other cores using
198a9643ea8Slogwang  * lthread_join(), then it is prudent to set the number of schedulers to ensure
199a9643ea8Slogwang  * that all schedulers are initialised beforehand.
200a9643ea8Slogwang  *
201a9643ea8Slogwang  * @param num
202a9643ea8Slogwang  *  the number of schedulers in the system
203a9643ea8Slogwang  * @return
204a9643ea8Slogwang  * the number of schedulers in the system
205a9643ea8Slogwang  */
206a9643ea8Slogwang int lthread_num_schedulers_set(int num);
207a9643ea8Slogwang 
208a9643ea8Slogwang /**
209a9643ea8Slogwang  * Return the number of schedulers currently running
210a9643ea8Slogwang  * @return
211a9643ea8Slogwang  *  the number of schedulers in the system
212a9643ea8Slogwang  */
213a9643ea8Slogwang int lthread_active_schedulers(void);
214a9643ea8Slogwang 
215a9643ea8Slogwang /**
216a9643ea8Slogwang   * Shutdown the specified scheduler
217a9643ea8Slogwang   *
218a9643ea8Slogwang   *  This function tells the specified scheduler to
219a9643ea8Slogwang   *  exit if/when there is no more work to do.
220a9643ea8Slogwang   *
221a9643ea8Slogwang   *  Note that although the scheduler will stop
222a9643ea8Slogwang   *  resources are not freed.
223a9643ea8Slogwang   *
224a9643ea8Slogwang   * @param lcore
225a9643ea8Slogwang   *	The lcore of the scheduler to shutdown
226a9643ea8Slogwang   *
227a9643ea8Slogwang   * @return
228a9643ea8Slogwang   *  none
229a9643ea8Slogwang   */
230a9643ea8Slogwang void lthread_scheduler_shutdown(unsigned lcore);
231a9643ea8Slogwang 
232a9643ea8Slogwang /**
233a9643ea8Slogwang   * Shutdown all schedulers
234a9643ea8Slogwang   *
235a9643ea8Slogwang   *  This function tells all schedulers  including the current scheduler to
236a9643ea8Slogwang   *  exit if/when there is no more work to do.
237a9643ea8Slogwang   *
238a9643ea8Slogwang   *  Note that although the schedulers will stop
239a9643ea8Slogwang   *  resources are not freed.
240a9643ea8Slogwang   *
241a9643ea8Slogwang   * @return
242a9643ea8Slogwang   *  none
243a9643ea8Slogwang   */
244a9643ea8Slogwang void lthread_scheduler_shutdown_all(void);
245a9643ea8Slogwang 
246a9643ea8Slogwang /**
247a9643ea8Slogwang   * Run the lthread scheduler
248a9643ea8Slogwang   *
249a9643ea8Slogwang   *  Runs the lthread scheduler.
250a9643ea8Slogwang   *  This function returns only if/when all lthreads have exited.
251a9643ea8Slogwang   *  This function must be the main loop of an EAL thread.
252a9643ea8Slogwang   *
253a9643ea8Slogwang   * @return
254a9643ea8Slogwang   *	 none
255a9643ea8Slogwang   */
256a9643ea8Slogwang 
257a9643ea8Slogwang void lthread_run(void);
258a9643ea8Slogwang 
259a9643ea8Slogwang /**
260a9643ea8Slogwang   * Create an lthread
261a9643ea8Slogwang   *
262a9643ea8Slogwang   *  Creates an lthread and places it in the ready queue on a particular
263a9643ea8Slogwang   *  lcore.
264a9643ea8Slogwang   *
265a9643ea8Slogwang   *  If no scheduler exists yet on the curret lcore then one is created.
266a9643ea8Slogwang   *
267a9643ea8Slogwang   * @param new_lt
268a9643ea8Slogwang   *  Pointer to an lthread pointer that will be initialized
269a9643ea8Slogwang   * @param lcore
270a9643ea8Slogwang   *  the lcore the thread should be started on or the current clore
271a9643ea8Slogwang   *    -1 the current lcore
272a9643ea8Slogwang   *    0 - LTHREAD_MAX_LCORES any other lcore
273a9643ea8Slogwang   * @param lthread_func
274a9643ea8Slogwang   *  Pointer to the function the for the thread to run
275a9643ea8Slogwang   * @param arg
276a9643ea8Slogwang   *  Pointer to args that will be passed to the thread
277a9643ea8Slogwang   *
278a9643ea8Slogwang   * @return
279a9643ea8Slogwang   *	 0    success
280a9643ea8Slogwang   *	 EAGAIN  no resources available
281a9643ea8Slogwang   *	 EINVAL  NULL thread or function pointer, or lcore_id out of range
282a9643ea8Slogwang   */
283a9643ea8Slogwang int
284a9643ea8Slogwang lthread_create(struct lthread **new_lt,
285a9643ea8Slogwang 		int lcore, lthread_func_t func, void *arg);
286a9643ea8Slogwang 
287a9643ea8Slogwang /**
288a9643ea8Slogwang   * Cancel an lthread
289a9643ea8Slogwang   *
290a9643ea8Slogwang   *  Cancels an lthread and causes it to be terminated
291a9643ea8Slogwang   *  If the lthread is detached it will be freed immediately
292a9643ea8Slogwang   *  otherwise its resources will not be released until it is joined.
293a9643ea8Slogwang   *
294a9643ea8Slogwang   * @param new_lt
295a9643ea8Slogwang   *  Pointer to an lthread that will be cancelled
296a9643ea8Slogwang   *
297a9643ea8Slogwang   * @return
298a9643ea8Slogwang   *	 0    success
299a9643ea8Slogwang   *	 EINVAL  thread was NULL
300a9643ea8Slogwang   */
301a9643ea8Slogwang int lthread_cancel(struct lthread *lt);
302a9643ea8Slogwang 
303a9643ea8Slogwang /**
304a9643ea8Slogwang   * Join an lthread
305a9643ea8Slogwang   *
306a9643ea8Slogwang   *  Joins the current thread with the specified lthread, and waits for that
307a9643ea8Slogwang   *  thread to exit.
308a9643ea8Slogwang   *  Passes an optional pointer to collect returned data.
309a9643ea8Slogwang   *
310a9643ea8Slogwang   * @param lt
311a9643ea8Slogwang   *  Pointer to the lthread to be joined
312a9643ea8Slogwang   * @param ptr
313a9643ea8Slogwang   *  Pointer to pointer to collect returned data
314a9643ea8Slogwang   *
315a9643ea8Slogwang 0  * @return
316a9643ea8Slogwang   *  0    success
317a9643ea8Slogwang   *  EINVAL lthread could not be joined.
318a9643ea8Slogwang   */
319a9643ea8Slogwang int lthread_join(struct lthread *lt, void **ptr);
320a9643ea8Slogwang 
321a9643ea8Slogwang /**
322a9643ea8Slogwang   * Detach an lthread
323a9643ea8Slogwang   *
324a9643ea8Slogwang   * Detaches the current thread
325a9643ea8Slogwang   * On exit a detached lthread will be freed immediately and will not wait
326a9643ea8Slogwang   * to be joined. The default state for a thread is not detached.
327a9643ea8Slogwang   *
328a9643ea8Slogwang   * @return
329a9643ea8Slogwang   *  none
330a9643ea8Slogwang   */
331a9643ea8Slogwang void lthread_detach(void);
332a9643ea8Slogwang 
333a9643ea8Slogwang /**
334a9643ea8Slogwang   *  Exit an lthread
335a9643ea8Slogwang   *
336a9643ea8Slogwang   * Terminate the current thread, optionally return data.
337a9643ea8Slogwang   * The data may be collected by lthread_join()
338a9643ea8Slogwang   *
339a9643ea8Slogwang   * After calling this function the lthread will be suspended until it is
340a9643ea8Slogwang   * joined. After it is joined then its resources will be freed.
341a9643ea8Slogwang   *
342a9643ea8Slogwang   * @param ptr
343a9643ea8Slogwang   *  Pointer to pointer to data to be returned
344a9643ea8Slogwang   *
345a9643ea8Slogwang   * @return
346a9643ea8Slogwang   *  none
347a9643ea8Slogwang   */
348a9643ea8Slogwang void lthread_exit(void *val);
349a9643ea8Slogwang 
350a9643ea8Slogwang /**
351a9643ea8Slogwang   * Cause the current lthread to sleep for n nanoseconds
352a9643ea8Slogwang   *
353a9643ea8Slogwang   * The current thread will be suspended until the specified time has elapsed
354a9643ea8Slogwang   * or has been exceeded.
355a9643ea8Slogwang   *
356a9643ea8Slogwang   * Execution will switch to the next lthread that is ready to run
357a9643ea8Slogwang   *
358a9643ea8Slogwang   * @param nsecs
359a9643ea8Slogwang   *  Number of nanoseconds to sleep
360a9643ea8Slogwang   *
361a9643ea8Slogwang   * @return
362a9643ea8Slogwang   *  none
363a9643ea8Slogwang   */
364a9643ea8Slogwang void lthread_sleep(uint64_t nsecs);
365a9643ea8Slogwang 
366a9643ea8Slogwang /**
367a9643ea8Slogwang   * Cause the current lthread to sleep for n cpu clock ticks
368a9643ea8Slogwang   *
369a9643ea8Slogwang   *  The current thread will be suspended until the specified time has elapsed
370a9643ea8Slogwang   *  or has been exceeded.
371a9643ea8Slogwang   *
372a9643ea8Slogwang   *	 Execution will switch to the next lthread that is ready to run
373a9643ea8Slogwang   *
374a9643ea8Slogwang   * @param clks
375a9643ea8Slogwang   *  Number of clock ticks to sleep
376a9643ea8Slogwang   *
377a9643ea8Slogwang   * @return
378a9643ea8Slogwang   *  none
379a9643ea8Slogwang   */
380a9643ea8Slogwang void lthread_sleep_clks(uint64_t clks);
381a9643ea8Slogwang 
382a9643ea8Slogwang /**
383a9643ea8Slogwang   * Yield the current lthread
384a9643ea8Slogwang   *
385a9643ea8Slogwang   *  The current thread will yield and execution will switch to the
386a9643ea8Slogwang   *  next lthread that is ready to run
387a9643ea8Slogwang   *
388a9643ea8Slogwang   * @return
389a9643ea8Slogwang   *  none
390a9643ea8Slogwang   */
391a9643ea8Slogwang void lthread_yield(void);
392a9643ea8Slogwang 
393a9643ea8Slogwang /**
394a9643ea8Slogwang   * Migrate the current thread to another scheduler
395a9643ea8Slogwang   *
396a9643ea8Slogwang   *  This function migrates the current thread to another scheduler.
397a9643ea8Slogwang   *  Execution will switch to the next lthread that is ready to run on the
398a9643ea8Slogwang   *  current scheduler. The current thread will be resumed on the new scheduler.
399a9643ea8Slogwang   *
400a9643ea8Slogwang   * @param lcore
401a9643ea8Slogwang   *	The lcore to migrate to
402a9643ea8Slogwang   *
403a9643ea8Slogwang   * @return
404a9643ea8Slogwang   *  0   success we are now running on the specified core
405a9643ea8Slogwang   *  EINVAL the destination lcore was not valid
406a9643ea8Slogwang   */
407a9643ea8Slogwang int lthread_set_affinity(unsigned lcore);
408a9643ea8Slogwang 
409a9643ea8Slogwang /**
410a9643ea8Slogwang   * Return the current lthread
411a9643ea8Slogwang   *
412a9643ea8Slogwang   *  Returns the current lthread
413a9643ea8Slogwang   *
414a9643ea8Slogwang   * @return
415a9643ea8Slogwang   *  pointer to the current lthread
416a9643ea8Slogwang   */
417a9643ea8Slogwang struct lthread
418a9643ea8Slogwang *lthread_current(void);
419a9643ea8Slogwang 
420a9643ea8Slogwang /**
421a9643ea8Slogwang   * Associate user data with an lthread
422a9643ea8Slogwang   *
423a9643ea8Slogwang   *  This function sets a user data pointer in the current lthread
424a9643ea8Slogwang   *  The pointer can be retrieved with lthread_get_data()
425a9643ea8Slogwang   *  It is the users responsibility to allocate and free any data referenced
426a9643ea8Slogwang   *  by the user pointer.
427a9643ea8Slogwang   *
428a9643ea8Slogwang   * @param data
429a9643ea8Slogwang   *  pointer to user data
430a9643ea8Slogwang   *
431a9643ea8Slogwang   * @return
432a9643ea8Slogwang   *  none
433a9643ea8Slogwang   */
434a9643ea8Slogwang void lthread_set_data(void *data);
435a9643ea8Slogwang 
436a9643ea8Slogwang /**
437a9643ea8Slogwang   * Get user data for the current lthread
438a9643ea8Slogwang   *
439a9643ea8Slogwang   *  This function returns a user data pointer for the current lthread
440a9643ea8Slogwang   *  The pointer must first be set with lthread_set_data()
441a9643ea8Slogwang   *  It is the users responsibility to allocate and free any data referenced
442a9643ea8Slogwang   *  by the user pointer.
443a9643ea8Slogwang   *
444a9643ea8Slogwang   * @return
445a9643ea8Slogwang   *  pointer to user data
446a9643ea8Slogwang   */
447a9643ea8Slogwang void
448a9643ea8Slogwang *lthread_get_data(void);
449a9643ea8Slogwang 
450a9643ea8Slogwang struct lthread_key;
451a9643ea8Slogwang typedef void (*tls_destructor_func) (void *);
452a9643ea8Slogwang 
453a9643ea8Slogwang /**
454a9643ea8Slogwang   * Create a key for lthread TLS
455a9643ea8Slogwang   *
456a9643ea8Slogwang   *  This function is modelled on pthread_key_create
457a9643ea8Slogwang   *  It creates a thread-specific data key visible to all lthreads on the
458a9643ea8Slogwang   *  current scheduler.
459a9643ea8Slogwang   *
460a9643ea8Slogwang   *  Key values may be used to locate thread-specific data.
461a9643ea8Slogwang   *  The same key value	may be used by different threads, the values bound
462a9643ea8Slogwang   *  to the key by	lthread_setspecific() are maintained on	a per-thread
463a9643ea8Slogwang   *  basis and persist for the life of the calling thread.
464a9643ea8Slogwang   *
465a9643ea8Slogwang   *  An	optional destructor function may be associated with each key value.
466a9643ea8Slogwang   *  At	thread exit, if	a key value has	a non-NULL destructor pointer, and the
467a9643ea8Slogwang   *  thread has	a non-NULL value associated with the key, the function pointed
468a9643ea8Slogwang   *  to	is called with the current associated value as its sole	argument.
469a9643ea8Slogwang   *
470a9643ea8Slogwang   * @param key
471a9643ea8Slogwang   *   Pointer to the key to be created
472a9643ea8Slogwang   * @param destructor
473a9643ea8Slogwang   *   Pointer to destructor function
474a9643ea8Slogwang   *
475a9643ea8Slogwang   * @return
476a9643ea8Slogwang   *  0 success
477a9643ea8Slogwang   *  EINVAL the key ptr was NULL
478a9643ea8Slogwang   *  EAGAIN no resources available
479a9643ea8Slogwang   */
480a9643ea8Slogwang int lthread_key_create(unsigned int *key, tls_destructor_func destructor);
481a9643ea8Slogwang 
482a9643ea8Slogwang /**
483a9643ea8Slogwang   * Delete key for lthread TLS
484a9643ea8Slogwang   *
485a9643ea8Slogwang   *  This function is modelled on pthread_key_delete().
486a9643ea8Slogwang   *  It deletes a thread-specific data key previously returned by
487a9643ea8Slogwang   *  lthread_key_create().
488a9643ea8Slogwang   *  The thread-specific data values associated with the key need not be NULL
489a9643ea8Slogwang   *  at the time that lthread_key_delete is called.
490a9643ea8Slogwang   *  It is the responsibility of the application to free any application
491a9643ea8Slogwang   *  storage or perform any cleanup actions for data structures related to the
492a9643ea8Slogwang   *  deleted key. This cleanup can be done either before or after
493a9643ea8Slogwang   * lthread_key_delete is called.
494a9643ea8Slogwang   *
495a9643ea8Slogwang   * @param key
496a9643ea8Slogwang   *  The key to be deleted
497a9643ea8Slogwang   *
498a9643ea8Slogwang   * @return
499a9643ea8Slogwang   *  0 Success
500a9643ea8Slogwang   *  EINVAL the key was invalid
501a9643ea8Slogwang   */
502a9643ea8Slogwang int lthread_key_delete(unsigned int key);
503a9643ea8Slogwang 
504a9643ea8Slogwang /**
505a9643ea8Slogwang   * Get lthread TLS
506a9643ea8Slogwang   *
507a9643ea8Slogwang   *  This function is modelled on pthread_get_specific().
508a9643ea8Slogwang   *  It returns the value currently bound to the specified key on behalf of the
509a9643ea8Slogwang   *  calling thread. Calling lthread_getspecific() with a key value not
510a9643ea8Slogwang   *  obtained from lthread_key_create() or after key has been deleted with
511a9643ea8Slogwang   *  lthread_key_delete() will result in undefined behaviour.
512a9643ea8Slogwang   *  lthread_getspecific() may be called from a thread-specific data destructor
513a9643ea8Slogwang   *  function.
514a9643ea8Slogwang   *
515a9643ea8Slogwang   * @param key
516a9643ea8Slogwang   *  The key for which data is requested
517a9643ea8Slogwang   *
518a9643ea8Slogwang   * @return
519a9643ea8Slogwang   *  Pointer to the thread specific data associated with that key
520a9643ea8Slogwang   *  or NULL if no data has been set.
521a9643ea8Slogwang   */
522a9643ea8Slogwang void
523a9643ea8Slogwang *lthread_getspecific(unsigned int key);
524a9643ea8Slogwang 
525a9643ea8Slogwang /**
526a9643ea8Slogwang   * Set lthread TLS
527a9643ea8Slogwang   *
528a9643ea8Slogwang   *  This function is modelled on pthread_set_sepcific()
529a9643ea8Slogwang   *  It associates a thread-specific value with a key obtained via a previous
530a9643ea8Slogwang   *  call to lthread_key_create().
531a9643ea8Slogwang   *  Different threads may bind different values to the same key. These values
532a9643ea8Slogwang   *  are typically pointers to dynamically allocated memory that have been
533a9643ea8Slogwang   *  reserved by the calling thread. Calling lthread_setspecific with a key
534a9643ea8Slogwang   *  value not obtained from lthread_key_create or after the key has been
535a9643ea8Slogwang   *  deleted with lthread_key_delete will result in undefined behaviour.
536a9643ea8Slogwang   *
537a9643ea8Slogwang   * @param key
538a9643ea8Slogwang   *  The key for which data is to be set
539a9643ea8Slogwang   * @param key
540a9643ea8Slogwang   *  Pointer to the user data
541a9643ea8Slogwang   *
542a9643ea8Slogwang   * @return
543a9643ea8Slogwang   *  0 success
544a9643ea8Slogwang   *  EINVAL the key was invalid
545a9643ea8Slogwang   */
546a9643ea8Slogwang 
547a9643ea8Slogwang int lthread_setspecific(unsigned int key, const void *value);
548a9643ea8Slogwang 
549a9643ea8Slogwang /**
550a9643ea8Slogwang  * The macros below provide an alternative mechanism to access lthread local
551a9643ea8Slogwang  *  storage.
552a9643ea8Slogwang  *
553a9643ea8Slogwang  * The macros can be used to declare define and access per lthread local
554a9643ea8Slogwang  * storage in a similar way to the RTE_PER_LCORE macros which control storage
555a9643ea8Slogwang  * local to an lcore.
556a9643ea8Slogwang  *
557a9643ea8Slogwang  * Memory for per lthread variables declared in this way is allocated when the
558a9643ea8Slogwang  * lthread is created and a pointer to this memory is stored in the lthread.
559a9643ea8Slogwang  * The per lthread variables are accessed via the pointer + the offset of the
560a9643ea8Slogwang  * particular variable.
561a9643ea8Slogwang  *
562a9643ea8Slogwang  * The total size of per lthread storage, and the variable offsets are found by
563a9643ea8Slogwang  * defining the variables in a unique global memory section, the start and end
564a9643ea8Slogwang  * of which is known. This global memory section is used only in the
565a9643ea8Slogwang  * computation of the addresses of the lthread variables, and is never actually
566a9643ea8Slogwang  * used to store any data.
567a9643ea8Slogwang  *
568a9643ea8Slogwang  * Due to the fact that variables declared this way may be scattered across
569a9643ea8Slogwang  * many files, the start and end of the section and variable offsets are only
570a9643ea8Slogwang  * known after linking, thus the computation of section size and variable
571a9643ea8Slogwang  * addresses is performed at run time.
572a9643ea8Slogwang  *
573a9643ea8Slogwang  * These macros are primarily provided to aid porting of code that makes use
574a9643ea8Slogwang  * of the existing RTE_PER_LCORE macros. In principle it would be more efficient
575a9643ea8Slogwang  * to gather all lthread local variables into a single structure and
576a9643ea8Slogwang  * set/retrieve a pointer to that struct using the alternative
577a9643ea8Slogwang  * lthread_data_set/get APIs.
578a9643ea8Slogwang  *
579a9643ea8Slogwang  * These macros are mutually exclusive with the lthread_data_set/get APIs.
580a9643ea8Slogwang  * If you define storage using these macros then the lthread_data_set/get APIs
581a9643ea8Slogwang  * will not perform as expected, the lthread_data_set API does nothing, and the
582a9643ea8Slogwang  * lthread_data_get API returns the start of global section.
583a9643ea8Slogwang  *
584a9643ea8Slogwang  */
585a9643ea8Slogwang /* start and end of per lthread section */
586a9643ea8Slogwang extern char __start_per_lt;
587a9643ea8Slogwang extern char __stop_per_lt;
588a9643ea8Slogwang 
589a9643ea8Slogwang 
590a9643ea8Slogwang #define RTE_DEFINE_PER_LTHREAD(type, name)                      \
591a9643ea8Slogwang __typeof__(type)__attribute((section("per_lt"))) per_lt_##name
592a9643ea8Slogwang 
593a9643ea8Slogwang /**
594a9643ea8Slogwang  * Macro to declare an extern per lthread variable "var" of type "type"
595a9643ea8Slogwang  */
596a9643ea8Slogwang #define RTE_DECLARE_PER_LTHREAD(type, name)                     \
597a9643ea8Slogwang extern __typeof__(type)__attribute((section("per_lt"))) per_lt_##name
598a9643ea8Slogwang 
599a9643ea8Slogwang /**
600a9643ea8Slogwang  * Read/write the per-lcore variable value
601a9643ea8Slogwang  */
602a9643ea8Slogwang #define RTE_PER_LTHREAD(name) ((typeof(per_lt_##name) *)\
603a9643ea8Slogwang ((char *)lthread_get_data() +\
604a9643ea8Slogwang ((char *) &per_lt_##name - &__start_per_lt)))
605a9643ea8Slogwang 
606a9643ea8Slogwang /**
607a9643ea8Slogwang   * Initialize a mutex
608a9643ea8Slogwang   *
609a9643ea8Slogwang   *  This function provides a mutual exclusion device, the need for which
610a9643ea8Slogwang   *  can normally be avoided in a cooperative multitasking environment.
611a9643ea8Slogwang   *  It is provided to aid porting of legacy code originally written for
612a9643ea8Slogwang   *   preemptive multitasking environments such as pthreads.
613a9643ea8Slogwang   *
614a9643ea8Slogwang   *  A mutex may be unlocked (not owned by any thread), or locked (owned by
615a9643ea8Slogwang   *  one thread).
616a9643ea8Slogwang   *
617a9643ea8Slogwang   *  A mutex can never be owned  by more than one thread simultaneously.
618a9643ea8Slogwang   *  A thread attempting to lock a mutex that is already locked by another
619a9643ea8Slogwang   *  thread is suspended until the owning thread unlocks the mutex.
620a9643ea8Slogwang   *
621a9643ea8Slogwang   *  lthread_mutex_init() initializes the mutex object pointed to by mutex
622a9643ea8Slogwang   *  Optional mutex attributes specified in mutexattr, are reserved for future
623a9643ea8Slogwang   *  use and are currently ignored.
624a9643ea8Slogwang   *
625a9643ea8Slogwang   *  If a thread calls lthread_mutex_lock() on the mutex, then if the mutex
626a9643ea8Slogwang   *  is currently unlocked,  it  becomes  locked  and  owned  by  the calling
627a9643ea8Slogwang   *  thread, and lthread_mutex_lock returns immediately. If the mutex is
628a9643ea8Slogwang   *  already locked by another thread, lthread_mutex_lock suspends the calling
629a9643ea8Slogwang   *  thread until the mutex is unlocked.
630a9643ea8Slogwang   *
631a9643ea8Slogwang   *  lthread_mutex_trylock behaves identically to rte_thread_mutex_lock, except
632a9643ea8Slogwang   *  that it does not block the calling  thread  if the mutex is already locked
633a9643ea8Slogwang   *  by another thread.
634a9643ea8Slogwang   *
635a9643ea8Slogwang   *  lthread_mutex_unlock() unlocks the specified mutex. The mutex is assumed
636a9643ea8Slogwang   *  to be locked and owned by the calling thread.
637a9643ea8Slogwang   *
638a9643ea8Slogwang   *  lthread_mutex_destroy() destroys a	mutex object, freeing its resources.
639a9643ea8Slogwang   *  The mutex must be unlocked with nothing blocked on it before calling
640a9643ea8Slogwang   *  lthread_mutex_destroy.
641a9643ea8Slogwang   *
642a9643ea8Slogwang   * @param name
643a9643ea8Slogwang   *  Optional pointer to string describing the mutex
644a9643ea8Slogwang   * @param mutex
645a9643ea8Slogwang   *  Pointer to pointer to the mutex to be initialized
646a9643ea8Slogwang   * @param attribute
647a9643ea8Slogwang   *  Pointer to attribute - unused reserved
648a9643ea8Slogwang   *
649a9643ea8Slogwang   * @return
650a9643ea8Slogwang   *  0 success
651a9643ea8Slogwang   *  EINVAL mutex was not a valid pointer
652a9643ea8Slogwang   *  EAGAIN insufficient resources
653a9643ea8Slogwang   */
654a9643ea8Slogwang 
655a9643ea8Slogwang int
656a9643ea8Slogwang lthread_mutex_init(char *name, struct lthread_mutex **mutex,
657a9643ea8Slogwang 		   const struct lthread_mutexattr *attr);
658a9643ea8Slogwang 
659a9643ea8Slogwang /**
660a9643ea8Slogwang   * Destroy a mutex
661a9643ea8Slogwang   *
662a9643ea8Slogwang   *  This function destroys the specified mutex freeing its resources.
663a9643ea8Slogwang   *  The mutex must be unlocked before calling lthread_mutex_destroy.
664a9643ea8Slogwang   *
665a9643ea8Slogwang   * @see lthread_mutex_init()
666a9643ea8Slogwang   *
667a9643ea8Slogwang   * @param mutex
668a9643ea8Slogwang   *  Pointer to pointer to the mutex to be initialized
669a9643ea8Slogwang   *
670a9643ea8Slogwang   * @return
671a9643ea8Slogwang   *  0 success
672a9643ea8Slogwang   *  EINVAL mutex was not an initialized mutex
673a9643ea8Slogwang   *  EBUSY mutex was still in use
674a9643ea8Slogwang   */
675a9643ea8Slogwang int lthread_mutex_destroy(struct lthread_mutex *mutex);
676a9643ea8Slogwang 
677a9643ea8Slogwang /**
678a9643ea8Slogwang   * Lock a mutex
679a9643ea8Slogwang   *
680a9643ea8Slogwang   *  This function attempts to lock a mutex.
681a9643ea8Slogwang   *  If a thread calls lthread_mutex_lock() on the mutex, then if the mutex
682a9643ea8Slogwang   *  is currently unlocked,  it  becomes  locked  and  owned  by  the calling
683a9643ea8Slogwang   *  thread, and lthread_mutex_lock returns immediately. If the mutex is
684a9643ea8Slogwang   *  already locked by another thread, lthread_mutex_lock suspends the calling
685a9643ea8Slogwang   *  thread until the mutex is unlocked.
686a9643ea8Slogwang   *
687a9643ea8Slogwang   * @see lthread_mutex_init()
688a9643ea8Slogwang   *
689a9643ea8Slogwang   * @param mutex
690a9643ea8Slogwang   *  Pointer to pointer to the mutex to be initialized
691a9643ea8Slogwang   *
692a9643ea8Slogwang   * @return
693a9643ea8Slogwang   *  0 success
694a9643ea8Slogwang   *  EINVAL mutex was not an initialized mutex
695a9643ea8Slogwang   *  EDEADLOCK the mutex was already owned by the calling thread
696a9643ea8Slogwang   */
697a9643ea8Slogwang 
698a9643ea8Slogwang int lthread_mutex_lock(struct lthread_mutex *mutex);
699a9643ea8Slogwang 
700a9643ea8Slogwang /**
701a9643ea8Slogwang   * Try to lock a mutex
702a9643ea8Slogwang   *
703a9643ea8Slogwang   *  This function attempts to lock a mutex.
704a9643ea8Slogwang   *  lthread_mutex_trylock behaves identically to rte_thread_mutex_lock, except
705a9643ea8Slogwang   *  that it does not block the calling  thread  if the mutex is already locked
706a9643ea8Slogwang   *  by another thread.
707a9643ea8Slogwang   *
708a9643ea8Slogwang   *
709a9643ea8Slogwang   * @see lthread_mutex_init()
710a9643ea8Slogwang   *
711a9643ea8Slogwang   * @param mutex
712a9643ea8Slogwang   *  Pointer to pointer to the mutex to be initialized
713a9643ea8Slogwang   *
714a9643ea8Slogwang   * @return
715a9643ea8Slogwang   * 0 success
716a9643ea8Slogwang   * EINVAL mutex was not an initialized mutex
717a9643ea8Slogwang   * EBUSY the mutex was already locked by another thread
718a9643ea8Slogwang   */
719a9643ea8Slogwang int lthread_mutex_trylock(struct lthread_mutex *mutex);
720a9643ea8Slogwang 
721a9643ea8Slogwang /**
722a9643ea8Slogwang   * Unlock a mutex
723a9643ea8Slogwang   *
724a9643ea8Slogwang   * This function attempts to unlock the specified mutex. The mutex is assumed
725a9643ea8Slogwang   * to be locked and owned by the calling thread.
726a9643ea8Slogwang   *
727a9643ea8Slogwang   * The oldest of any threads blocked on the mutex is made ready and may
728a9643ea8Slogwang   * compete with any other running thread to gain the mutex, it fails it will
729a9643ea8Slogwang   *  be blocked again.
730a9643ea8Slogwang   *
731a9643ea8Slogwang   * @param mutex
732a9643ea8Slogwang   * Pointer to pointer to the mutex to be initialized
733a9643ea8Slogwang   *
734a9643ea8Slogwang   * @return
735a9643ea8Slogwang   *  0 mutex was unlocked
736a9643ea8Slogwang   *  EINVAL mutex was not an initialized mutex
737a9643ea8Slogwang   *  EPERM the mutex was not owned by the calling thread
738a9643ea8Slogwang   */
739a9643ea8Slogwang 
740a9643ea8Slogwang int lthread_mutex_unlock(struct lthread_mutex *mutex);
741a9643ea8Slogwang 
742a9643ea8Slogwang /**
743a9643ea8Slogwang   * Initialize a condition variable
744a9643ea8Slogwang   *
745a9643ea8Slogwang   *  This function initializes a condition variable.
746a9643ea8Slogwang   *
747a9643ea8Slogwang   *  Condition variables can be used to communicate changes in the state of data
748a9643ea8Slogwang   *  shared between threads.
749a9643ea8Slogwang   *
750a9643ea8Slogwang   * @see lthread_cond_wait()
751a9643ea8Slogwang   *
752a9643ea8Slogwang   * @param name
753a9643ea8Slogwang   *  Pointer to optional string describing the condition variable
754a9643ea8Slogwang   * @param c
755a9643ea8Slogwang   *  Pointer to pointer to the condition variable to be initialized
756a9643ea8Slogwang   * @param attr
757a9643ea8Slogwang   *  Pointer to optional attribute reserved for future use, currently ignored
758a9643ea8Slogwang   *
759a9643ea8Slogwang   * @return
760a9643ea8Slogwang   *  0 success
761a9643ea8Slogwang   *  EINVAL cond was not a valid pointer
762a9643ea8Slogwang   *  EAGAIN insufficient resources
763a9643ea8Slogwang   */
764a9643ea8Slogwang int
765a9643ea8Slogwang lthread_cond_init(char *name, struct lthread_cond **c,
766a9643ea8Slogwang 		  const struct lthread_condattr *attr);
767a9643ea8Slogwang 
768a9643ea8Slogwang /**
769a9643ea8Slogwang   * Destroy a condition variable
770a9643ea8Slogwang   *
771a9643ea8Slogwang   *  This function destroys a condition variable that was created with
772a9643ea8Slogwang   *  lthread_cond_init() and releases its resources.
773a9643ea8Slogwang   *
774a9643ea8Slogwang   * @param cond
775a9643ea8Slogwang   *  Pointer to pointer to the condition variable to be destroyed
776a9643ea8Slogwang   *
777a9643ea8Slogwang   * @return
778a9643ea8Slogwang   *  0 Success
779a9643ea8Slogwang   *  EBUSY condition variable was still in use
780a9643ea8Slogwang   *  EINVAL was not an initialised condition variable
781a9643ea8Slogwang   */
782a9643ea8Slogwang int lthread_cond_destroy(struct lthread_cond *cond);
783a9643ea8Slogwang 
784a9643ea8Slogwang /**
785a9643ea8Slogwang   * Wait on a condition variable
786a9643ea8Slogwang   *
787a9643ea8Slogwang   *  The function blocks the current thread waiting on the condition variable
788a9643ea8Slogwang   *  specified by cond. The waiting thread unblocks only after another thread
789a9643ea8Slogwang   *  calls lthread_cond_signal, or lthread_cond_broadcast, specifying the
790a9643ea8Slogwang   *  same condition variable.
791a9643ea8Slogwang   *
792a9643ea8Slogwang   * @param cond
793a9643ea8Slogwang   *  Pointer to pointer to the condition variable to be waited on
794a9643ea8Slogwang   *
795a9643ea8Slogwang   * @param reserved
796a9643ea8Slogwang   *  reserved for future use
797a9643ea8Slogwang   *
798a9643ea8Slogwang   * @return
799a9643ea8Slogwang   *  0 The condition was signalled ( Success )
800a9643ea8Slogwang   *  EINVAL was not a an initialised condition variable
801a9643ea8Slogwang   */
802a9643ea8Slogwang int lthread_cond_wait(struct lthread_cond *c, uint64_t reserved);
803a9643ea8Slogwang 
804a9643ea8Slogwang /**
805a9643ea8Slogwang   * Signal a condition variable
806a9643ea8Slogwang   *
807a9643ea8Slogwang   *  The function unblocks one thread waiting for the condition variable cond.
808a9643ea8Slogwang   *  If no threads are waiting on cond, the rte_lthead_cond_signal() function
809a9643ea8Slogwang   *  has no effect.
810a9643ea8Slogwang   *
811a9643ea8Slogwang   * @param cond
812a9643ea8Slogwang   *  Pointer to pointer to the condition variable to be signalled
813a9643ea8Slogwang   *
814a9643ea8Slogwang   * @return
815a9643ea8Slogwang   *  0 The condition was signalled ( Success )
816a9643ea8Slogwang   *  EINVAL was not a an initialised condition variable
817a9643ea8Slogwang   */
818a9643ea8Slogwang int lthread_cond_signal(struct lthread_cond *c);
819a9643ea8Slogwang 
820a9643ea8Slogwang /**
821a9643ea8Slogwang   * Broadcast a condition variable
822a9643ea8Slogwang   *
823a9643ea8Slogwang   *  The function unblocks all threads waiting for the condition variable cond.
824a9643ea8Slogwang   *  If no threads are waiting on cond, the rte_lthead_cond_broadcast()
825a9643ea8Slogwang   *  function has no effect.
826a9643ea8Slogwang   *
827a9643ea8Slogwang   * @param cond
828a9643ea8Slogwang   *  Pointer to pointer to the condition variable to be signalled
829a9643ea8Slogwang   *
830a9643ea8Slogwang   * @return
831a9643ea8Slogwang   *  0 The condition was signalled ( Success )
832a9643ea8Slogwang   *  EINVAL was not a an initialised condition variable
833a9643ea8Slogwang   */
834a9643ea8Slogwang int lthread_cond_broadcast(struct lthread_cond *c);
835a9643ea8Slogwang 
836*2bfe3f2eSlogwang #ifdef __cplusplus
837*2bfe3f2eSlogwang }
838*2bfe3f2eSlogwang #endif
839*2bfe3f2eSlogwang 
840a9643ea8Slogwang #endif				/* LTHREAD_H */
841