1*a9643ea8Slogwang /*-
2*a9643ea8Slogwang  *   BSD LICENSE
3*a9643ea8Slogwang  *
4*a9643ea8Slogwang  *   Copyright(c) 2015 Intel Corporation. All rights reserved.
5*a9643ea8Slogwang  *   All rights reserved.
6*a9643ea8Slogwang  *
7*a9643ea8Slogwang  *   Redistribution and use in source and binary forms, with or without
8*a9643ea8Slogwang  *   modification, are permitted provided that the following conditions
9*a9643ea8Slogwang  *   are met:
10*a9643ea8Slogwang  *
11*a9643ea8Slogwang  *     * Redistributions of source code must retain the above copyright
12*a9643ea8Slogwang  *       notice, this list of conditions and the following disclaimer.
13*a9643ea8Slogwang  *     * Redistributions in binary form must reproduce the above copyright
14*a9643ea8Slogwang  *       notice, this list of conditions and the following disclaimer in
15*a9643ea8Slogwang  *       the documentation and/or other materials provided with the
16*a9643ea8Slogwang  *       distribution.
17*a9643ea8Slogwang  *     * Neither the name of Intel Corporation nor the names of its
18*a9643ea8Slogwang  *       contributors may be used to endorse or promote products derived
19*a9643ea8Slogwang  *       from this software without specific prior written permission.
20*a9643ea8Slogwang  *
21*a9643ea8Slogwang  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22*a9643ea8Slogwang  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23*a9643ea8Slogwang  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24*a9643ea8Slogwang  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25*a9643ea8Slogwang  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26*a9643ea8Slogwang  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27*a9643ea8Slogwang  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28*a9643ea8Slogwang  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29*a9643ea8Slogwang  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30*a9643ea8Slogwang  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31*a9643ea8Slogwang  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32*a9643ea8Slogwang  */
33*a9643ea8Slogwang 
34*a9643ea8Slogwang /*
35*a9643ea8Slogwang  * Some portions of this software may have been derived from the
36*a9643ea8Slogwang  * https://github.com/halayli/lthread which carrys the following license.
37*a9643ea8Slogwang  *
38*a9643ea8Slogwang  * Copyright (C) 2012, Hasan Alayli <[email protected]>
39*a9643ea8Slogwang  *
40*a9643ea8Slogwang  * Redistribution and use in source and binary forms, with or without
41*a9643ea8Slogwang  * modification, are permitted provided that the following conditions
42*a9643ea8Slogwang  * are met:
43*a9643ea8Slogwang  * 1. Redistributions of source code must retain the above copyright
44*a9643ea8Slogwang  *    notice, this list of conditions and the following disclaimer.
45*a9643ea8Slogwang  * 2. Redistributions in binary form must reproduce the above copyright
46*a9643ea8Slogwang  *    notice, this list of conditions and the following disclaimer in the
47*a9643ea8Slogwang  *    documentation and/or other materials provided with the distribution.
48*a9643ea8Slogwang  *
49*a9643ea8Slogwang  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
50*a9643ea8Slogwang  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51*a9643ea8Slogwang  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52*a9643ea8Slogwang  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
53*a9643ea8Slogwang  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54*a9643ea8Slogwang  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55*a9643ea8Slogwang  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56*a9643ea8Slogwang  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57*a9643ea8Slogwang  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58*a9643ea8Slogwang  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59*a9643ea8Slogwang  * SUCH DAMAGE.
60*a9643ea8Slogwang  */
61*a9643ea8Slogwang 
62*a9643ea8Slogwang /**
63*a9643ea8Slogwang  *  @file lthread_api.h
64*a9643ea8Slogwang  *
65*a9643ea8Slogwang  *  @warning
66*a9643ea8Slogwang  *  @b EXPERIMENTAL: this API may change without prior notice
67*a9643ea8Slogwang  *
68*a9643ea8Slogwang  *  This file contains the public API for the L-thread subsystem
69*a9643ea8Slogwang  *
70*a9643ea8Slogwang  *  The L_thread subsystem provides a simple cooperative scheduler to
71*a9643ea8Slogwang  *  enable arbitrary functions to run as cooperative threads within a
72*a9643ea8Slogwang  * single P-thread.
73*a9643ea8Slogwang  *
74*a9643ea8Slogwang  * The subsystem provides a P-thread like API that is intended to assist in
75*a9643ea8Slogwang  * reuse of legacy code written for POSIX p_threads.
76*a9643ea8Slogwang  *
77*a9643ea8Slogwang  * The L-thread subsystem relies on cooperative multitasking, as such
78*a9643ea8Slogwang  * an L-thread must possess frequent rescheduling points. Often these
79*a9643ea8Slogwang  * rescheduling points are provided transparently when the application
80*a9643ea8Slogwang  * invokes an L-thread API.
81*a9643ea8Slogwang  *
82*a9643ea8Slogwang  * In some applications it is possible that the program may enter a loop the
83*a9643ea8Slogwang  * exit condition for which depends on the action of another thread or a
84*a9643ea8Slogwang  * response from hardware. In such a case it is necessary to yield the thread
85*a9643ea8Slogwang  * periodically in the loop body, to allow other threads an opportunity to
86*a9643ea8Slogwang  * run. This can be done by inserting a call to lthread_yield() or
87*a9643ea8Slogwang  * lthread_sleep(n) in the body of the loop.
88*a9643ea8Slogwang  *
89*a9643ea8Slogwang  * If the application makes expensive / blocking system calls or does other
90*a9643ea8Slogwang  * work that would take an inordinate amount of time to complete, this will
91*a9643ea8Slogwang  * stall the cooperative scheduler resulting in very poor performance.
92*a9643ea8Slogwang  *
93*a9643ea8Slogwang  * In such cases an L-thread can be migrated temporarily to another scheduler
94*a9643ea8Slogwang  * running in a different P-thread on another core. When the expensive or
95*a9643ea8Slogwang  * blocking operation is completed it can be migrated back to the original
96*a9643ea8Slogwang  * scheduler.  In this way other threads can continue to run on the original
97*a9643ea8Slogwang  * scheduler and will be completely unaffected by the blocking behaviour.
98*a9643ea8Slogwang  * To migrate an L-thread to another scheduler the API lthread_set_affinity()
99*a9643ea8Slogwang  * is provided.
100*a9643ea8Slogwang  *
101*a9643ea8Slogwang  * If L-threads that share data are running on the same core it is possible
102*a9643ea8Slogwang  * to design programs where mutual exclusion mechanisms to protect shared data
103*a9643ea8Slogwang  * can be avoided. This is due to the fact that the cooperative threads cannot
104*a9643ea8Slogwang  * preempt each other.
105*a9643ea8Slogwang  *
106*a9643ea8Slogwang  * There are two cases where mutual exclusion mechanisms are necessary.
107*a9643ea8Slogwang  *
108*a9643ea8Slogwang  *  a) Where the L-threads sharing data are running on different cores.
109*a9643ea8Slogwang  *  b) Where code must yield while updating data shared with another thread.
110*a9643ea8Slogwang  *
111*a9643ea8Slogwang  * The L-thread subsystem provides a set of mutex APIs to help with such
112*a9643ea8Slogwang  * scenarios, however excessive reliance on on these will impact performance
113*a9643ea8Slogwang  * and is best avoided if possible.
114*a9643ea8Slogwang  *
115*a9643ea8Slogwang  * L-threads can synchronise using a fast condition variable implementation
116*a9643ea8Slogwang  * that supports signal and broadcast. An L-thread running on any core can
117*a9643ea8Slogwang  * wait on a condition.
118*a9643ea8Slogwang  *
119*a9643ea8Slogwang  * L-threads can have L-thread local storage with an API modelled on either the
120*a9643ea8Slogwang  * P-thread get/set specific API or using PER_LTHREAD macros modelled on the
121*a9643ea8Slogwang  * RTE_PER_LCORE macros. Alternatively a simple user data pointer may be set
122*a9643ea8Slogwang  * and retrieved from a thread.
123*a9643ea8Slogwang  */
124*a9643ea8Slogwang #ifndef LTHREAD_H
125*a9643ea8Slogwang #define LTHREAD_H
126*a9643ea8Slogwang 
127*a9643ea8Slogwang #include <stdint.h>
128*a9643ea8Slogwang #include <sys/socket.h>
129*a9643ea8Slogwang #include <fcntl.h>
130*a9643ea8Slogwang #include <netinet/in.h>
131*a9643ea8Slogwang 
132*a9643ea8Slogwang #include <rte_cycles.h>
133*a9643ea8Slogwang 
134*a9643ea8Slogwang 
135*a9643ea8Slogwang struct lthread;
136*a9643ea8Slogwang struct lthread_cond;
137*a9643ea8Slogwang struct lthread_mutex;
138*a9643ea8Slogwang 
139*a9643ea8Slogwang struct lthread_condattr;
140*a9643ea8Slogwang struct lthread_mutexattr;
141*a9643ea8Slogwang 
142*a9643ea8Slogwang typedef void (*lthread_func_t) (void *);
143*a9643ea8Slogwang 
144*a9643ea8Slogwang /*
145*a9643ea8Slogwang  * Define the size of stack for an lthread
146*a9643ea8Slogwang  * Then this is the size that will be allocated on lthread creation
147*a9643ea8Slogwang  * This is a fixed size and will not grow.
148*a9643ea8Slogwang  */
149*a9643ea8Slogwang #define LTHREAD_MAX_STACK_SIZE (1024*64)
150*a9643ea8Slogwang 
151*a9643ea8Slogwang /**
152*a9643ea8Slogwang  * Define the maximum number of TLS keys that can be created
153*a9643ea8Slogwang  *
154*a9643ea8Slogwang  */
155*a9643ea8Slogwang #define LTHREAD_MAX_KEYS 1024
156*a9643ea8Slogwang 
157*a9643ea8Slogwang /**
158*a9643ea8Slogwang  * Define the maximum number of attempts to destroy an lthread's
159*a9643ea8Slogwang  * TLS data on thread exit
160*a9643ea8Slogwang  */
161*a9643ea8Slogwang #define LTHREAD_DESTRUCTOR_ITERATIONS 4
162*a9643ea8Slogwang 
163*a9643ea8Slogwang 
164*a9643ea8Slogwang /**
165*a9643ea8Slogwang  * Define the maximum number of lcores that will support lthreads
166*a9643ea8Slogwang  */
167*a9643ea8Slogwang #define LTHREAD_MAX_LCORES RTE_MAX_LCORE
168*a9643ea8Slogwang 
169*a9643ea8Slogwang /**
170*a9643ea8Slogwang  * How many lthread objects to pre-allocate as the system grows
171*a9643ea8Slogwang  * applies to lthreads + stacks, TLS, mutexs, cond vars.
172*a9643ea8Slogwang  *
173*a9643ea8Slogwang  * @see _lthread_alloc()
174*a9643ea8Slogwang  * @see _cond_alloc()
175*a9643ea8Slogwang  * @see _mutex_alloc()
176*a9643ea8Slogwang  *
177*a9643ea8Slogwang  */
178*a9643ea8Slogwang #define LTHREAD_PREALLOC 100
179*a9643ea8Slogwang 
180*a9643ea8Slogwang /**
181*a9643ea8Slogwang  * Set the number of schedulers in the system.
182*a9643ea8Slogwang  *
183*a9643ea8Slogwang  * This function may optionally be called before starting schedulers.
184*a9643ea8Slogwang  *
185*a9643ea8Slogwang  * If the number of schedulers is not set, or set to 0 then each scheduler
186*a9643ea8Slogwang  * will begin scheduling lthreads immediately it is started.
187*a9643ea8Slogwang 
188*a9643ea8Slogwang  * If the number of schedulers is set to greater than 0, then each scheduler
189*a9643ea8Slogwang  * will wait until all schedulers have started before beginning to schedule
190*a9643ea8Slogwang  * lthreads.
191*a9643ea8Slogwang  *
192*a9643ea8Slogwang  * If an application wishes to have threads migrate between cores using
193*a9643ea8Slogwang  * lthread_set_affinity(), or join threads running on other cores using
194*a9643ea8Slogwang  * lthread_join(), then it is prudent to set the number of schedulers to ensure
195*a9643ea8Slogwang  * that all schedulers are initialised beforehand.
196*a9643ea8Slogwang  *
197*a9643ea8Slogwang  * @param num
198*a9643ea8Slogwang  *  the number of schedulers in the system
199*a9643ea8Slogwang  * @return
200*a9643ea8Slogwang  * the number of schedulers in the system
201*a9643ea8Slogwang  */
202*a9643ea8Slogwang int lthread_num_schedulers_set(int num);
203*a9643ea8Slogwang 
204*a9643ea8Slogwang /**
205*a9643ea8Slogwang  * Return the number of schedulers currently running
206*a9643ea8Slogwang  * @return
207*a9643ea8Slogwang  *  the number of schedulers in the system
208*a9643ea8Slogwang  */
209*a9643ea8Slogwang int lthread_active_schedulers(void);
210*a9643ea8Slogwang 
211*a9643ea8Slogwang /**
212*a9643ea8Slogwang   * Shutdown the specified scheduler
213*a9643ea8Slogwang   *
214*a9643ea8Slogwang   *  This function tells the specified scheduler to
215*a9643ea8Slogwang   *  exit if/when there is no more work to do.
216*a9643ea8Slogwang   *
217*a9643ea8Slogwang   *  Note that although the scheduler will stop
218*a9643ea8Slogwang   *  resources are not freed.
219*a9643ea8Slogwang   *
220*a9643ea8Slogwang   * @param lcore
221*a9643ea8Slogwang   *	The lcore of the scheduler to shutdown
222*a9643ea8Slogwang   *
223*a9643ea8Slogwang   * @return
224*a9643ea8Slogwang   *  none
225*a9643ea8Slogwang   */
226*a9643ea8Slogwang void lthread_scheduler_shutdown(unsigned lcore);
227*a9643ea8Slogwang 
228*a9643ea8Slogwang /**
229*a9643ea8Slogwang   * Shutdown all schedulers
230*a9643ea8Slogwang   *
231*a9643ea8Slogwang   *  This function tells all schedulers  including the current scheduler to
232*a9643ea8Slogwang   *  exit if/when there is no more work to do.
233*a9643ea8Slogwang   *
234*a9643ea8Slogwang   *  Note that although the schedulers will stop
235*a9643ea8Slogwang   *  resources are not freed.
236*a9643ea8Slogwang   *
237*a9643ea8Slogwang   * @return
238*a9643ea8Slogwang   *  none
239*a9643ea8Slogwang   */
240*a9643ea8Slogwang void lthread_scheduler_shutdown_all(void);
241*a9643ea8Slogwang 
242*a9643ea8Slogwang /**
243*a9643ea8Slogwang   * Run the lthread scheduler
244*a9643ea8Slogwang   *
245*a9643ea8Slogwang   *  Runs the lthread scheduler.
246*a9643ea8Slogwang   *  This function returns only if/when all lthreads have exited.
247*a9643ea8Slogwang   *  This function must be the main loop of an EAL thread.
248*a9643ea8Slogwang   *
249*a9643ea8Slogwang   * @return
250*a9643ea8Slogwang   *	 none
251*a9643ea8Slogwang   */
252*a9643ea8Slogwang 
253*a9643ea8Slogwang void lthread_run(void);
254*a9643ea8Slogwang 
255*a9643ea8Slogwang /**
256*a9643ea8Slogwang   * Create an lthread
257*a9643ea8Slogwang   *
258*a9643ea8Slogwang   *  Creates an lthread and places it in the ready queue on a particular
259*a9643ea8Slogwang   *  lcore.
260*a9643ea8Slogwang   *
261*a9643ea8Slogwang   *  If no scheduler exists yet on the curret lcore then one is created.
262*a9643ea8Slogwang   *
263*a9643ea8Slogwang   * @param new_lt
264*a9643ea8Slogwang   *  Pointer to an lthread pointer that will be initialized
265*a9643ea8Slogwang   * @param lcore
266*a9643ea8Slogwang   *  the lcore the thread should be started on or the current clore
267*a9643ea8Slogwang   *    -1 the current lcore
268*a9643ea8Slogwang   *    0 - LTHREAD_MAX_LCORES any other lcore
269*a9643ea8Slogwang   * @param lthread_func
270*a9643ea8Slogwang   *  Pointer to the function the for the thread to run
271*a9643ea8Slogwang   * @param arg
272*a9643ea8Slogwang   *  Pointer to args that will be passed to the thread
273*a9643ea8Slogwang   *
274*a9643ea8Slogwang   * @return
275*a9643ea8Slogwang   *	 0    success
276*a9643ea8Slogwang   *	 EAGAIN  no resources available
277*a9643ea8Slogwang   *	 EINVAL  NULL thread or function pointer, or lcore_id out of range
278*a9643ea8Slogwang   */
279*a9643ea8Slogwang int
280*a9643ea8Slogwang lthread_create(struct lthread **new_lt,
281*a9643ea8Slogwang 		int lcore, lthread_func_t func, void *arg);
282*a9643ea8Slogwang 
283*a9643ea8Slogwang /**
284*a9643ea8Slogwang   * Cancel an lthread
285*a9643ea8Slogwang   *
286*a9643ea8Slogwang   *  Cancels an lthread and causes it to be terminated
287*a9643ea8Slogwang   *  If the lthread is detached it will be freed immediately
288*a9643ea8Slogwang   *  otherwise its resources will not be released until it is joined.
289*a9643ea8Slogwang   *
290*a9643ea8Slogwang   * @param new_lt
291*a9643ea8Slogwang   *  Pointer to an lthread that will be cancelled
292*a9643ea8Slogwang   *
293*a9643ea8Slogwang   * @return
294*a9643ea8Slogwang   *	 0    success
295*a9643ea8Slogwang   *	 EINVAL  thread was NULL
296*a9643ea8Slogwang   */
297*a9643ea8Slogwang int lthread_cancel(struct lthread *lt);
298*a9643ea8Slogwang 
299*a9643ea8Slogwang /**
300*a9643ea8Slogwang   * Join an lthread
301*a9643ea8Slogwang   *
302*a9643ea8Slogwang   *  Joins the current thread with the specified lthread, and waits for that
303*a9643ea8Slogwang   *  thread to exit.
304*a9643ea8Slogwang   *  Passes an optional pointer to collect returned data.
305*a9643ea8Slogwang   *
306*a9643ea8Slogwang   * @param lt
307*a9643ea8Slogwang   *  Pointer to the lthread to be joined
308*a9643ea8Slogwang   * @param ptr
309*a9643ea8Slogwang   *  Pointer to pointer to collect returned data
310*a9643ea8Slogwang   *
311*a9643ea8Slogwang 0  * @return
312*a9643ea8Slogwang   *  0    success
313*a9643ea8Slogwang   *  EINVAL lthread could not be joined.
314*a9643ea8Slogwang   */
315*a9643ea8Slogwang int lthread_join(struct lthread *lt, void **ptr);
316*a9643ea8Slogwang 
317*a9643ea8Slogwang /**
318*a9643ea8Slogwang   * Detach an lthread
319*a9643ea8Slogwang   *
320*a9643ea8Slogwang   * Detaches the current thread
321*a9643ea8Slogwang   * On exit a detached lthread will be freed immediately and will not wait
322*a9643ea8Slogwang   * to be joined. The default state for a thread is not detached.
323*a9643ea8Slogwang   *
324*a9643ea8Slogwang   * @return
325*a9643ea8Slogwang   *  none
326*a9643ea8Slogwang   */
327*a9643ea8Slogwang void lthread_detach(void);
328*a9643ea8Slogwang 
329*a9643ea8Slogwang /**
330*a9643ea8Slogwang   *  Exit an lthread
331*a9643ea8Slogwang   *
332*a9643ea8Slogwang   * Terminate the current thread, optionally return data.
333*a9643ea8Slogwang   * The data may be collected by lthread_join()
334*a9643ea8Slogwang   *
335*a9643ea8Slogwang   * After calling this function the lthread will be suspended until it is
336*a9643ea8Slogwang   * joined. After it is joined then its resources will be freed.
337*a9643ea8Slogwang   *
338*a9643ea8Slogwang   * @param ptr
339*a9643ea8Slogwang   *  Pointer to pointer to data to be returned
340*a9643ea8Slogwang   *
341*a9643ea8Slogwang   * @return
342*a9643ea8Slogwang   *  none
343*a9643ea8Slogwang   */
344*a9643ea8Slogwang void lthread_exit(void *val);
345*a9643ea8Slogwang 
346*a9643ea8Slogwang /**
347*a9643ea8Slogwang   * Cause the current lthread to sleep for n nanoseconds
348*a9643ea8Slogwang   *
349*a9643ea8Slogwang   * The current thread will be suspended until the specified time has elapsed
350*a9643ea8Slogwang   * or has been exceeded.
351*a9643ea8Slogwang   *
352*a9643ea8Slogwang   * Execution will switch to the next lthread that is ready to run
353*a9643ea8Slogwang   *
354*a9643ea8Slogwang   * @param nsecs
355*a9643ea8Slogwang   *  Number of nanoseconds to sleep
356*a9643ea8Slogwang   *
357*a9643ea8Slogwang   * @return
358*a9643ea8Slogwang   *  none
359*a9643ea8Slogwang   */
360*a9643ea8Slogwang void lthread_sleep(uint64_t nsecs);
361*a9643ea8Slogwang 
362*a9643ea8Slogwang /**
363*a9643ea8Slogwang   * Cause the current lthread to sleep for n cpu clock ticks
364*a9643ea8Slogwang   *
365*a9643ea8Slogwang   *  The current thread will be suspended until the specified time has elapsed
366*a9643ea8Slogwang   *  or has been exceeded.
367*a9643ea8Slogwang   *
368*a9643ea8Slogwang   *	 Execution will switch to the next lthread that is ready to run
369*a9643ea8Slogwang   *
370*a9643ea8Slogwang   * @param clks
371*a9643ea8Slogwang   *  Number of clock ticks to sleep
372*a9643ea8Slogwang   *
373*a9643ea8Slogwang   * @return
374*a9643ea8Slogwang   *  none
375*a9643ea8Slogwang   */
376*a9643ea8Slogwang void lthread_sleep_clks(uint64_t clks);
377*a9643ea8Slogwang 
378*a9643ea8Slogwang /**
379*a9643ea8Slogwang   * Yield the current lthread
380*a9643ea8Slogwang   *
381*a9643ea8Slogwang   *  The current thread will yield and execution will switch to the
382*a9643ea8Slogwang   *  next lthread that is ready to run
383*a9643ea8Slogwang   *
384*a9643ea8Slogwang   * @return
385*a9643ea8Slogwang   *  none
386*a9643ea8Slogwang   */
387*a9643ea8Slogwang void lthread_yield(void);
388*a9643ea8Slogwang 
389*a9643ea8Slogwang /**
390*a9643ea8Slogwang   * Migrate the current thread to another scheduler
391*a9643ea8Slogwang   *
392*a9643ea8Slogwang   *  This function migrates the current thread to another scheduler.
393*a9643ea8Slogwang   *  Execution will switch to the next lthread that is ready to run on the
394*a9643ea8Slogwang   *  current scheduler. The current thread will be resumed on the new scheduler.
395*a9643ea8Slogwang   *
396*a9643ea8Slogwang   * @param lcore
397*a9643ea8Slogwang   *	The lcore to migrate to
398*a9643ea8Slogwang   *
399*a9643ea8Slogwang   * @return
400*a9643ea8Slogwang   *  0   success we are now running on the specified core
401*a9643ea8Slogwang   *  EINVAL the destination lcore was not valid
402*a9643ea8Slogwang   */
403*a9643ea8Slogwang int lthread_set_affinity(unsigned lcore);
404*a9643ea8Slogwang 
405*a9643ea8Slogwang /**
406*a9643ea8Slogwang   * Return the current lthread
407*a9643ea8Slogwang   *
408*a9643ea8Slogwang   *  Returns the current lthread
409*a9643ea8Slogwang   *
410*a9643ea8Slogwang   * @return
411*a9643ea8Slogwang   *  pointer to the current lthread
412*a9643ea8Slogwang   */
413*a9643ea8Slogwang struct lthread
414*a9643ea8Slogwang *lthread_current(void);
415*a9643ea8Slogwang 
416*a9643ea8Slogwang /**
417*a9643ea8Slogwang   * Associate user data with an lthread
418*a9643ea8Slogwang   *
419*a9643ea8Slogwang   *  This function sets a user data pointer in the current lthread
420*a9643ea8Slogwang   *  The pointer can be retrieved with lthread_get_data()
421*a9643ea8Slogwang   *  It is the users responsibility to allocate and free any data referenced
422*a9643ea8Slogwang   *  by the user pointer.
423*a9643ea8Slogwang   *
424*a9643ea8Slogwang   * @param data
425*a9643ea8Slogwang   *  pointer to user data
426*a9643ea8Slogwang   *
427*a9643ea8Slogwang   * @return
428*a9643ea8Slogwang   *  none
429*a9643ea8Slogwang   */
430*a9643ea8Slogwang void lthread_set_data(void *data);
431*a9643ea8Slogwang 
432*a9643ea8Slogwang /**
433*a9643ea8Slogwang   * Get user data for the current lthread
434*a9643ea8Slogwang   *
435*a9643ea8Slogwang   *  This function returns a user data pointer for the current lthread
436*a9643ea8Slogwang   *  The pointer must first be set with lthread_set_data()
437*a9643ea8Slogwang   *  It is the users responsibility to allocate and free any data referenced
438*a9643ea8Slogwang   *  by the user pointer.
439*a9643ea8Slogwang   *
440*a9643ea8Slogwang   * @return
441*a9643ea8Slogwang   *  pointer to user data
442*a9643ea8Slogwang   */
443*a9643ea8Slogwang void
444*a9643ea8Slogwang *lthread_get_data(void);
445*a9643ea8Slogwang 
446*a9643ea8Slogwang struct lthread_key;
447*a9643ea8Slogwang typedef void (*tls_destructor_func) (void *);
448*a9643ea8Slogwang 
449*a9643ea8Slogwang /**
450*a9643ea8Slogwang   * Create a key for lthread TLS
451*a9643ea8Slogwang   *
452*a9643ea8Slogwang   *  This function is modelled on pthread_key_create
453*a9643ea8Slogwang   *  It creates a thread-specific data key visible to all lthreads on the
454*a9643ea8Slogwang   *  current scheduler.
455*a9643ea8Slogwang   *
456*a9643ea8Slogwang   *  Key values may be used to locate thread-specific data.
457*a9643ea8Slogwang   *  The same key value	may be used by different threads, the values bound
458*a9643ea8Slogwang   *  to the key by	lthread_setspecific() are maintained on	a per-thread
459*a9643ea8Slogwang   *  basis and persist for the life of the calling thread.
460*a9643ea8Slogwang   *
461*a9643ea8Slogwang   *  An	optional destructor function may be associated with each key value.
462*a9643ea8Slogwang   *  At	thread exit, if	a key value has	a non-NULL destructor pointer, and the
463*a9643ea8Slogwang   *  thread has	a non-NULL value associated with the key, the function pointed
464*a9643ea8Slogwang   *  to	is called with the current associated value as its sole	argument.
465*a9643ea8Slogwang   *
466*a9643ea8Slogwang   * @param key
467*a9643ea8Slogwang   *   Pointer to the key to be created
468*a9643ea8Slogwang   * @param destructor
469*a9643ea8Slogwang   *   Pointer to destructor function
470*a9643ea8Slogwang   *
471*a9643ea8Slogwang   * @return
472*a9643ea8Slogwang   *  0 success
473*a9643ea8Slogwang   *  EINVAL the key ptr was NULL
474*a9643ea8Slogwang   *  EAGAIN no resources available
475*a9643ea8Slogwang   */
476*a9643ea8Slogwang int lthread_key_create(unsigned int *key, tls_destructor_func destructor);
477*a9643ea8Slogwang 
478*a9643ea8Slogwang /**
479*a9643ea8Slogwang   * Delete key for lthread TLS
480*a9643ea8Slogwang   *
481*a9643ea8Slogwang   *  This function is modelled on pthread_key_delete().
482*a9643ea8Slogwang   *  It deletes a thread-specific data key previously returned by
483*a9643ea8Slogwang   *  lthread_key_create().
484*a9643ea8Slogwang   *  The thread-specific data values associated with the key need not be NULL
485*a9643ea8Slogwang   *  at the time that lthread_key_delete is called.
486*a9643ea8Slogwang   *  It is the responsibility of the application to free any application
487*a9643ea8Slogwang   *  storage or perform any cleanup actions for data structures related to the
488*a9643ea8Slogwang   *  deleted key. This cleanup can be done either before or after
489*a9643ea8Slogwang   * lthread_key_delete is called.
490*a9643ea8Slogwang   *
491*a9643ea8Slogwang   * @param key
492*a9643ea8Slogwang   *  The key to be deleted
493*a9643ea8Slogwang   *
494*a9643ea8Slogwang   * @return
495*a9643ea8Slogwang   *  0 Success
496*a9643ea8Slogwang   *  EINVAL the key was invalid
497*a9643ea8Slogwang   */
498*a9643ea8Slogwang int lthread_key_delete(unsigned int key);
499*a9643ea8Slogwang 
500*a9643ea8Slogwang /**
501*a9643ea8Slogwang   * Get lthread TLS
502*a9643ea8Slogwang   *
503*a9643ea8Slogwang   *  This function is modelled on pthread_get_specific().
504*a9643ea8Slogwang   *  It returns the value currently bound to the specified key on behalf of the
505*a9643ea8Slogwang   *  calling thread. Calling lthread_getspecific() with a key value not
506*a9643ea8Slogwang   *  obtained from lthread_key_create() or after key has been deleted with
507*a9643ea8Slogwang   *  lthread_key_delete() will result in undefined behaviour.
508*a9643ea8Slogwang   *  lthread_getspecific() may be called from a thread-specific data destructor
509*a9643ea8Slogwang   *  function.
510*a9643ea8Slogwang   *
511*a9643ea8Slogwang   * @param key
512*a9643ea8Slogwang   *  The key for which data is requested
513*a9643ea8Slogwang   *
514*a9643ea8Slogwang   * @return
515*a9643ea8Slogwang   *  Pointer to the thread specific data associated with that key
516*a9643ea8Slogwang   *  or NULL if no data has been set.
517*a9643ea8Slogwang   */
518*a9643ea8Slogwang void
519*a9643ea8Slogwang *lthread_getspecific(unsigned int key);
520*a9643ea8Slogwang 
521*a9643ea8Slogwang /**
522*a9643ea8Slogwang   * Set lthread TLS
523*a9643ea8Slogwang   *
524*a9643ea8Slogwang   *  This function is modelled on pthread_set_sepcific()
525*a9643ea8Slogwang   *  It associates a thread-specific value with a key obtained via a previous
526*a9643ea8Slogwang   *  call to lthread_key_create().
527*a9643ea8Slogwang   *  Different threads may bind different values to the same key. These values
528*a9643ea8Slogwang   *  are typically pointers to dynamically allocated memory that have been
529*a9643ea8Slogwang   *  reserved by the calling thread. Calling lthread_setspecific with a key
530*a9643ea8Slogwang   *  value not obtained from lthread_key_create or after the key has been
531*a9643ea8Slogwang   *  deleted with lthread_key_delete will result in undefined behaviour.
532*a9643ea8Slogwang   *
533*a9643ea8Slogwang   * @param key
534*a9643ea8Slogwang   *  The key for which data is to be set
535*a9643ea8Slogwang   * @param key
536*a9643ea8Slogwang   *  Pointer to the user data
537*a9643ea8Slogwang   *
538*a9643ea8Slogwang   * @return
539*a9643ea8Slogwang   *  0 success
540*a9643ea8Slogwang   *  EINVAL the key was invalid
541*a9643ea8Slogwang   */
542*a9643ea8Slogwang 
543*a9643ea8Slogwang int lthread_setspecific(unsigned int key, const void *value);
544*a9643ea8Slogwang 
545*a9643ea8Slogwang /**
546*a9643ea8Slogwang  * The macros below provide an alternative mechanism to access lthread local
547*a9643ea8Slogwang  *  storage.
548*a9643ea8Slogwang  *
549*a9643ea8Slogwang  * The macros can be used to declare define and access per lthread local
550*a9643ea8Slogwang  * storage in a similar way to the RTE_PER_LCORE macros which control storage
551*a9643ea8Slogwang  * local to an lcore.
552*a9643ea8Slogwang  *
553*a9643ea8Slogwang  * Memory for per lthread variables declared in this way is allocated when the
554*a9643ea8Slogwang  * lthread is created and a pointer to this memory is stored in the lthread.
555*a9643ea8Slogwang  * The per lthread variables are accessed via the pointer + the offset of the
556*a9643ea8Slogwang  * particular variable.
557*a9643ea8Slogwang  *
558*a9643ea8Slogwang  * The total size of per lthread storage, and the variable offsets are found by
559*a9643ea8Slogwang  * defining the variables in a unique global memory section, the start and end
560*a9643ea8Slogwang  * of which is known. This global memory section is used only in the
561*a9643ea8Slogwang  * computation of the addresses of the lthread variables, and is never actually
562*a9643ea8Slogwang  * used to store any data.
563*a9643ea8Slogwang  *
564*a9643ea8Slogwang  * Due to the fact that variables declared this way may be scattered across
565*a9643ea8Slogwang  * many files, the start and end of the section and variable offsets are only
566*a9643ea8Slogwang  * known after linking, thus the computation of section size and variable
567*a9643ea8Slogwang  * addresses is performed at run time.
568*a9643ea8Slogwang  *
569*a9643ea8Slogwang  * These macros are primarily provided to aid porting of code that makes use
570*a9643ea8Slogwang  * of the existing RTE_PER_LCORE macros. In principle it would be more efficient
571*a9643ea8Slogwang  * to gather all lthread local variables into a single structure and
572*a9643ea8Slogwang  * set/retrieve a pointer to that struct using the alternative
573*a9643ea8Slogwang  * lthread_data_set/get APIs.
574*a9643ea8Slogwang  *
575*a9643ea8Slogwang  * These macros are mutually exclusive with the lthread_data_set/get APIs.
576*a9643ea8Slogwang  * If you define storage using these macros then the lthread_data_set/get APIs
577*a9643ea8Slogwang  * will not perform as expected, the lthread_data_set API does nothing, and the
578*a9643ea8Slogwang  * lthread_data_get API returns the start of global section.
579*a9643ea8Slogwang  *
580*a9643ea8Slogwang  */
581*a9643ea8Slogwang /* start and end of per lthread section */
582*a9643ea8Slogwang extern char __start_per_lt;
583*a9643ea8Slogwang extern char __stop_per_lt;
584*a9643ea8Slogwang 
585*a9643ea8Slogwang 
586*a9643ea8Slogwang #define RTE_DEFINE_PER_LTHREAD(type, name)                      \
587*a9643ea8Slogwang __typeof__(type)__attribute((section("per_lt"))) per_lt_##name
588*a9643ea8Slogwang 
589*a9643ea8Slogwang /**
590*a9643ea8Slogwang  * Macro to declare an extern per lthread variable "var" of type "type"
591*a9643ea8Slogwang  */
592*a9643ea8Slogwang #define RTE_DECLARE_PER_LTHREAD(type, name)                     \
593*a9643ea8Slogwang extern __typeof__(type)__attribute((section("per_lt"))) per_lt_##name
594*a9643ea8Slogwang 
595*a9643ea8Slogwang /**
596*a9643ea8Slogwang  * Read/write the per-lcore variable value
597*a9643ea8Slogwang  */
598*a9643ea8Slogwang #define RTE_PER_LTHREAD(name) ((typeof(per_lt_##name) *)\
599*a9643ea8Slogwang ((char *)lthread_get_data() +\
600*a9643ea8Slogwang ((char *) &per_lt_##name - &__start_per_lt)))
601*a9643ea8Slogwang 
602*a9643ea8Slogwang /**
603*a9643ea8Slogwang   * Initialize a mutex
604*a9643ea8Slogwang   *
605*a9643ea8Slogwang   *  This function provides a mutual exclusion device, the need for which
606*a9643ea8Slogwang   *  can normally be avoided in a cooperative multitasking environment.
607*a9643ea8Slogwang   *  It is provided to aid porting of legacy code originally written for
608*a9643ea8Slogwang   *   preemptive multitasking environments such as pthreads.
609*a9643ea8Slogwang   *
610*a9643ea8Slogwang   *  A mutex may be unlocked (not owned by any thread), or locked (owned by
611*a9643ea8Slogwang   *  one thread).
612*a9643ea8Slogwang   *
613*a9643ea8Slogwang   *  A mutex can never be owned  by more than one thread simultaneously.
614*a9643ea8Slogwang   *  A thread attempting to lock a mutex that is already locked by another
615*a9643ea8Slogwang   *  thread is suspended until the owning thread unlocks the mutex.
616*a9643ea8Slogwang   *
617*a9643ea8Slogwang   *  lthread_mutex_init() initializes the mutex object pointed to by mutex
618*a9643ea8Slogwang   *  Optional mutex attributes specified in mutexattr, are reserved for future
619*a9643ea8Slogwang   *  use and are currently ignored.
620*a9643ea8Slogwang   *
621*a9643ea8Slogwang   *  If a thread calls lthread_mutex_lock() on the mutex, then if the mutex
622*a9643ea8Slogwang   *  is currently unlocked,  it  becomes  locked  and  owned  by  the calling
623*a9643ea8Slogwang   *  thread, and lthread_mutex_lock returns immediately. If the mutex is
624*a9643ea8Slogwang   *  already locked by another thread, lthread_mutex_lock suspends the calling
625*a9643ea8Slogwang   *  thread until the mutex is unlocked.
626*a9643ea8Slogwang   *
627*a9643ea8Slogwang   *  lthread_mutex_trylock behaves identically to rte_thread_mutex_lock, except
628*a9643ea8Slogwang   *  that it does not block the calling  thread  if the mutex is already locked
629*a9643ea8Slogwang   *  by another thread.
630*a9643ea8Slogwang   *
631*a9643ea8Slogwang   *  lthread_mutex_unlock() unlocks the specified mutex. The mutex is assumed
632*a9643ea8Slogwang   *  to be locked and owned by the calling thread.
633*a9643ea8Slogwang   *
634*a9643ea8Slogwang   *  lthread_mutex_destroy() destroys a	mutex object, freeing its resources.
635*a9643ea8Slogwang   *  The mutex must be unlocked with nothing blocked on it before calling
636*a9643ea8Slogwang   *  lthread_mutex_destroy.
637*a9643ea8Slogwang   *
638*a9643ea8Slogwang   * @param name
639*a9643ea8Slogwang   *  Optional pointer to string describing the mutex
640*a9643ea8Slogwang   * @param mutex
641*a9643ea8Slogwang   *  Pointer to pointer to the mutex to be initialized
642*a9643ea8Slogwang   * @param attribute
643*a9643ea8Slogwang   *  Pointer to attribute - unused reserved
644*a9643ea8Slogwang   *
645*a9643ea8Slogwang   * @return
646*a9643ea8Slogwang   *  0 success
647*a9643ea8Slogwang   *  EINVAL mutex was not a valid pointer
648*a9643ea8Slogwang   *  EAGAIN insufficient resources
649*a9643ea8Slogwang   */
650*a9643ea8Slogwang 
651*a9643ea8Slogwang int
652*a9643ea8Slogwang lthread_mutex_init(char *name, struct lthread_mutex **mutex,
653*a9643ea8Slogwang 		   const struct lthread_mutexattr *attr);
654*a9643ea8Slogwang 
655*a9643ea8Slogwang /**
656*a9643ea8Slogwang   * Destroy a mutex
657*a9643ea8Slogwang   *
658*a9643ea8Slogwang   *  This function destroys the specified mutex freeing its resources.
659*a9643ea8Slogwang   *  The mutex must be unlocked before calling lthread_mutex_destroy.
660*a9643ea8Slogwang   *
661*a9643ea8Slogwang   * @see lthread_mutex_init()
662*a9643ea8Slogwang   *
663*a9643ea8Slogwang   * @param mutex
664*a9643ea8Slogwang   *  Pointer to pointer to the mutex to be initialized
665*a9643ea8Slogwang   *
666*a9643ea8Slogwang   * @return
667*a9643ea8Slogwang   *  0 success
668*a9643ea8Slogwang   *  EINVAL mutex was not an initialized mutex
669*a9643ea8Slogwang   *  EBUSY mutex was still in use
670*a9643ea8Slogwang   */
671*a9643ea8Slogwang int lthread_mutex_destroy(struct lthread_mutex *mutex);
672*a9643ea8Slogwang 
673*a9643ea8Slogwang /**
674*a9643ea8Slogwang   * Lock a mutex
675*a9643ea8Slogwang   *
676*a9643ea8Slogwang   *  This function attempts to lock a mutex.
677*a9643ea8Slogwang   *  If a thread calls lthread_mutex_lock() on the mutex, then if the mutex
678*a9643ea8Slogwang   *  is currently unlocked,  it  becomes  locked  and  owned  by  the calling
679*a9643ea8Slogwang   *  thread, and lthread_mutex_lock returns immediately. If the mutex is
680*a9643ea8Slogwang   *  already locked by another thread, lthread_mutex_lock suspends the calling
681*a9643ea8Slogwang   *  thread until the mutex is unlocked.
682*a9643ea8Slogwang   *
683*a9643ea8Slogwang   * @see lthread_mutex_init()
684*a9643ea8Slogwang   *
685*a9643ea8Slogwang   * @param mutex
686*a9643ea8Slogwang   *  Pointer to pointer to the mutex to be initialized
687*a9643ea8Slogwang   *
688*a9643ea8Slogwang   * @return
689*a9643ea8Slogwang   *  0 success
690*a9643ea8Slogwang   *  EINVAL mutex was not an initialized mutex
691*a9643ea8Slogwang   *  EDEADLOCK the mutex was already owned by the calling thread
692*a9643ea8Slogwang   */
693*a9643ea8Slogwang 
694*a9643ea8Slogwang int lthread_mutex_lock(struct lthread_mutex *mutex);
695*a9643ea8Slogwang 
696*a9643ea8Slogwang /**
697*a9643ea8Slogwang   * Try to lock a mutex
698*a9643ea8Slogwang   *
699*a9643ea8Slogwang   *  This function attempts to lock a mutex.
700*a9643ea8Slogwang   *  lthread_mutex_trylock behaves identically to rte_thread_mutex_lock, except
701*a9643ea8Slogwang   *  that it does not block the calling  thread  if the mutex is already locked
702*a9643ea8Slogwang   *  by another thread.
703*a9643ea8Slogwang   *
704*a9643ea8Slogwang   *
705*a9643ea8Slogwang   * @see lthread_mutex_init()
706*a9643ea8Slogwang   *
707*a9643ea8Slogwang   * @param mutex
708*a9643ea8Slogwang   *  Pointer to pointer to the mutex to be initialized
709*a9643ea8Slogwang   *
710*a9643ea8Slogwang   * @return
711*a9643ea8Slogwang   * 0 success
712*a9643ea8Slogwang   * EINVAL mutex was not an initialized mutex
713*a9643ea8Slogwang   * EBUSY the mutex was already locked by another thread
714*a9643ea8Slogwang   */
715*a9643ea8Slogwang int lthread_mutex_trylock(struct lthread_mutex *mutex);
716*a9643ea8Slogwang 
717*a9643ea8Slogwang /**
718*a9643ea8Slogwang   * Unlock a mutex
719*a9643ea8Slogwang   *
720*a9643ea8Slogwang   * This function attempts to unlock the specified mutex. The mutex is assumed
721*a9643ea8Slogwang   * to be locked and owned by the calling thread.
722*a9643ea8Slogwang   *
723*a9643ea8Slogwang   * The oldest of any threads blocked on the mutex is made ready and may
724*a9643ea8Slogwang   * compete with any other running thread to gain the mutex, it fails it will
725*a9643ea8Slogwang   *  be blocked again.
726*a9643ea8Slogwang   *
727*a9643ea8Slogwang   * @param mutex
728*a9643ea8Slogwang   * Pointer to pointer to the mutex to be initialized
729*a9643ea8Slogwang   *
730*a9643ea8Slogwang   * @return
731*a9643ea8Slogwang   *  0 mutex was unlocked
732*a9643ea8Slogwang   *  EINVAL mutex was not an initialized mutex
733*a9643ea8Slogwang   *  EPERM the mutex was not owned by the calling thread
734*a9643ea8Slogwang   */
735*a9643ea8Slogwang 
736*a9643ea8Slogwang int lthread_mutex_unlock(struct lthread_mutex *mutex);
737*a9643ea8Slogwang 
738*a9643ea8Slogwang /**
739*a9643ea8Slogwang   * Initialize a condition variable
740*a9643ea8Slogwang   *
741*a9643ea8Slogwang   *  This function initializes a condition variable.
742*a9643ea8Slogwang   *
743*a9643ea8Slogwang   *  Condition variables can be used to communicate changes in the state of data
744*a9643ea8Slogwang   *  shared between threads.
745*a9643ea8Slogwang   *
746*a9643ea8Slogwang   * @see lthread_cond_wait()
747*a9643ea8Slogwang   *
748*a9643ea8Slogwang   * @param name
749*a9643ea8Slogwang   *  Pointer to optional string describing the condition variable
750*a9643ea8Slogwang   * @param c
751*a9643ea8Slogwang   *  Pointer to pointer to the condition variable to be initialized
752*a9643ea8Slogwang   * @param attr
753*a9643ea8Slogwang   *  Pointer to optional attribute reserved for future use, currently ignored
754*a9643ea8Slogwang   *
755*a9643ea8Slogwang   * @return
756*a9643ea8Slogwang   *  0 success
757*a9643ea8Slogwang   *  EINVAL cond was not a valid pointer
758*a9643ea8Slogwang   *  EAGAIN insufficient resources
759*a9643ea8Slogwang   */
760*a9643ea8Slogwang int
761*a9643ea8Slogwang lthread_cond_init(char *name, struct lthread_cond **c,
762*a9643ea8Slogwang 		  const struct lthread_condattr *attr);
763*a9643ea8Slogwang 
764*a9643ea8Slogwang /**
765*a9643ea8Slogwang   * Destroy a condition variable
766*a9643ea8Slogwang   *
767*a9643ea8Slogwang   *  This function destroys a condition variable that was created with
768*a9643ea8Slogwang   *  lthread_cond_init() and releases its resources.
769*a9643ea8Slogwang   *
770*a9643ea8Slogwang   * @param cond
771*a9643ea8Slogwang   *  Pointer to pointer to the condition variable to be destroyed
772*a9643ea8Slogwang   *
773*a9643ea8Slogwang   * @return
774*a9643ea8Slogwang   *  0 Success
775*a9643ea8Slogwang   *  EBUSY condition variable was still in use
776*a9643ea8Slogwang   *  EINVAL was not an initialised condition variable
777*a9643ea8Slogwang   */
778*a9643ea8Slogwang int lthread_cond_destroy(struct lthread_cond *cond);
779*a9643ea8Slogwang 
780*a9643ea8Slogwang /**
781*a9643ea8Slogwang   * Wait on a condition variable
782*a9643ea8Slogwang   *
783*a9643ea8Slogwang   *  The function blocks the current thread waiting on the condition variable
784*a9643ea8Slogwang   *  specified by cond. The waiting thread unblocks only after another thread
785*a9643ea8Slogwang   *  calls lthread_cond_signal, or lthread_cond_broadcast, specifying the
786*a9643ea8Slogwang   *  same condition variable.
787*a9643ea8Slogwang   *
788*a9643ea8Slogwang   * @param cond
789*a9643ea8Slogwang   *  Pointer to pointer to the condition variable to be waited on
790*a9643ea8Slogwang   *
791*a9643ea8Slogwang   * @param reserved
792*a9643ea8Slogwang   *  reserved for future use
793*a9643ea8Slogwang   *
794*a9643ea8Slogwang   * @return
795*a9643ea8Slogwang   *  0 The condition was signalled ( Success )
796*a9643ea8Slogwang   *  EINVAL was not a an initialised condition variable
797*a9643ea8Slogwang   */
798*a9643ea8Slogwang int lthread_cond_wait(struct lthread_cond *c, uint64_t reserved);
799*a9643ea8Slogwang 
800*a9643ea8Slogwang /**
801*a9643ea8Slogwang   * Signal a condition variable
802*a9643ea8Slogwang   *
803*a9643ea8Slogwang   *  The function unblocks one thread waiting for the condition variable cond.
804*a9643ea8Slogwang   *  If no threads are waiting on cond, the rte_lthead_cond_signal() function
805*a9643ea8Slogwang   *  has no effect.
806*a9643ea8Slogwang   *
807*a9643ea8Slogwang   * @param cond
808*a9643ea8Slogwang   *  Pointer to pointer to the condition variable to be signalled
809*a9643ea8Slogwang   *
810*a9643ea8Slogwang   * @return
811*a9643ea8Slogwang   *  0 The condition was signalled ( Success )
812*a9643ea8Slogwang   *  EINVAL was not a an initialised condition variable
813*a9643ea8Slogwang   */
814*a9643ea8Slogwang int lthread_cond_signal(struct lthread_cond *c);
815*a9643ea8Slogwang 
816*a9643ea8Slogwang /**
817*a9643ea8Slogwang   * Broadcast a condition variable
818*a9643ea8Slogwang   *
819*a9643ea8Slogwang   *  The function unblocks all threads waiting for the condition variable cond.
820*a9643ea8Slogwang   *  If no threads are waiting on cond, the rte_lthead_cond_broadcast()
821*a9643ea8Slogwang   *  function has no effect.
822*a9643ea8Slogwang   *
823*a9643ea8Slogwang   * @param cond
824*a9643ea8Slogwang   *  Pointer to pointer to the condition variable to be signalled
825*a9643ea8Slogwang   *
826*a9643ea8Slogwang   * @return
827*a9643ea8Slogwang   *  0 The condition was signalled ( Success )
828*a9643ea8Slogwang   *  EINVAL was not a an initialised condition variable
829*a9643ea8Slogwang   */
830*a9643ea8Slogwang int lthread_cond_broadcast(struct lthread_cond *c);
831*a9643ea8Slogwang 
832*a9643ea8Slogwang #endif				/* LTHREAD_H */
833