xref: /linux-6.15/include/linux/wait.h (revision 20dd026d)
1 #ifndef _LINUX_WAIT_H
2 #define _LINUX_WAIT_H
3 
4 #define WNOHANG		0x00000001
5 #define WUNTRACED	0x00000002
6 #define WSTOPPED	WUNTRACED
7 #define WEXITED		0x00000004
8 #define WCONTINUED	0x00000008
9 #define WNOWAIT		0x01000000	/* Don't reap, just poll status.  */
10 
11 #define __WNOTHREAD	0x20000000	/* Don't wait on children of other threads in this group */
12 #define __WALL		0x40000000	/* Wait on all children, regardless of type */
13 #define __WCLONE	0x80000000	/* Wait only on non-SIGCHLD children */
14 
15 /* First argument to waitid: */
16 #define P_ALL		0
17 #define P_PID		1
18 #define P_PGID		2
19 
20 #ifdef __KERNEL__
21 
22 #include <linux/config.h>
23 #include <linux/list.h>
24 #include <linux/stddef.h>
25 #include <linux/spinlock.h>
26 #include <asm/system.h>
27 #include <asm/current.h>
28 
29 typedef struct __wait_queue wait_queue_t;
30 typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int sync, void *key);
31 int default_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
32 
33 struct __wait_queue {
34 	unsigned int flags;
35 #define WQ_FLAG_EXCLUSIVE	0x01
36 	void *private;
37 	wait_queue_func_t func;
38 	struct list_head task_list;
39 };
40 
41 struct wait_bit_key {
42 	void *flags;
43 	int bit_nr;
44 };
45 
46 struct wait_bit_queue {
47 	struct wait_bit_key key;
48 	wait_queue_t wait;
49 };
50 
51 struct __wait_queue_head {
52 	spinlock_t lock;
53 	struct list_head task_list;
54 };
55 typedef struct __wait_queue_head wait_queue_head_t;
56 
57 
58 /*
59  * Macros for declaration and initialisaton of the datatypes
60  */
61 
62 #define __WAITQUEUE_INITIALIZER(name, tsk) {				\
63 	.private	= tsk,						\
64 	.func		= default_wake_function,			\
65 	.task_list	= { NULL, NULL } }
66 
67 #define DECLARE_WAITQUEUE(name, tsk)					\
68 	wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
69 
70 #define __WAIT_QUEUE_HEAD_INITIALIZER(name) {				\
71 	.lock		= SPIN_LOCK_UNLOCKED,				\
72 	.task_list	= { &(name).task_list, &(name).task_list } }
73 
74 #define DECLARE_WAIT_QUEUE_HEAD(name) \
75 	wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
76 
77 #define __WAIT_BIT_KEY_INITIALIZER(word, bit)				\
78 	{ .flags = word, .bit_nr = bit, }
79 
80 static inline void init_waitqueue_head(wait_queue_head_t *q)
81 {
82 	spin_lock_init(&q->lock);
83 	INIT_LIST_HEAD(&q->task_list);
84 }
85 
86 static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
87 {
88 	q->flags = 0;
89 	q->private = p;
90 	q->func = default_wake_function;
91 }
92 
93 static inline void init_waitqueue_func_entry(wait_queue_t *q,
94 					wait_queue_func_t func)
95 {
96 	q->flags = 0;
97 	q->private = NULL;
98 	q->func = func;
99 }
100 
101 static inline int waitqueue_active(wait_queue_head_t *q)
102 {
103 	return !list_empty(&q->task_list);
104 }
105 
106 /*
107  * Used to distinguish between sync and async io wait context:
108  * sync i/o typically specifies a NULL wait queue entry or a wait
109  * queue entry bound to a task (current task) to wake up.
110  * aio specifies a wait queue entry with an async notification
111  * callback routine, not associated with any task.
112  */
113 #define is_sync_wait(wait)	(!(wait) || ((wait)->private))
114 
115 extern void FASTCALL(add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait));
116 extern void FASTCALL(add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait));
117 extern void FASTCALL(remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait));
118 
119 static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
120 {
121 	list_add(&new->task_list, &head->task_list);
122 }
123 
124 /*
125  * Used for wake-one threads:
126  */
127 static inline void __add_wait_queue_tail(wait_queue_head_t *head,
128 						wait_queue_t *new)
129 {
130 	list_add_tail(&new->task_list, &head->task_list);
131 }
132 
133 static inline void __remove_wait_queue(wait_queue_head_t *head,
134 							wait_queue_t *old)
135 {
136 	list_del(&old->task_list);
137 }
138 
139 void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key));
140 extern void FASTCALL(__wake_up_locked(wait_queue_head_t *q, unsigned int mode));
141 extern void FASTCALL(__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr));
142 void FASTCALL(__wake_up_bit(wait_queue_head_t *, void *, int));
143 int FASTCALL(__wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned));
144 int FASTCALL(__wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned));
145 void FASTCALL(wake_up_bit(void *, int));
146 int FASTCALL(out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned));
147 int FASTCALL(out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned));
148 wait_queue_head_t *FASTCALL(bit_waitqueue(void *, int));
149 
150 #define wake_up(x)			__wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, NULL)
151 #define wake_up_nr(x, nr)		__wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr, NULL)
152 #define wake_up_all(x)			__wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0, NULL)
153 #define wake_up_interruptible(x)	__wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
154 #define wake_up_interruptible_nr(x, nr)	__wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
155 #define wake_up_interruptible_all(x)	__wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
156 #define	wake_up_locked(x)		__wake_up_locked((x), TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE)
157 #define wake_up_interruptible_sync(x)   __wake_up_sync((x),TASK_INTERRUPTIBLE, 1)
158 
159 #define __wait_event(wq, condition) 					\
160 do {									\
161 	DEFINE_WAIT(__wait);						\
162 									\
163 	for (;;) {							\
164 		prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE);	\
165 		if (condition)						\
166 			break;						\
167 		schedule();						\
168 	}								\
169 	finish_wait(&wq, &__wait);					\
170 } while (0)
171 
172 /**
173  * wait_event - sleep until a condition gets true
174  * @wq: the waitqueue to wait on
175  * @condition: a C expression for the event to wait for
176  *
177  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
178  * @condition evaluates to true. The @condition is checked each time
179  * the waitqueue @wq is woken up.
180  *
181  * wake_up() has to be called after changing any variable that could
182  * change the result of the wait condition.
183  */
184 #define wait_event(wq, condition) 					\
185 do {									\
186 	if (condition)	 						\
187 		break;							\
188 	__wait_event(wq, condition);					\
189 } while (0)
190 
191 #define __wait_event_timeout(wq, condition, ret)			\
192 do {									\
193 	DEFINE_WAIT(__wait);						\
194 									\
195 	for (;;) {							\
196 		prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE);	\
197 		if (condition)						\
198 			break;						\
199 		ret = schedule_timeout(ret);				\
200 		if (!ret)						\
201 			break;						\
202 	}								\
203 	finish_wait(&wq, &__wait);					\
204 } while (0)
205 
206 /**
207  * wait_event_timeout - sleep until a condition gets true or a timeout elapses
208  * @wq: the waitqueue to wait on
209  * @condition: a C expression for the event to wait for
210  * @timeout: timeout, in jiffies
211  *
212  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
213  * @condition evaluates to true. The @condition is checked each time
214  * the waitqueue @wq is woken up.
215  *
216  * wake_up() has to be called after changing any variable that could
217  * change the result of the wait condition.
218  *
219  * The function returns 0 if the @timeout elapsed, and the remaining
220  * jiffies if the condition evaluated to true before the timeout elapsed.
221  */
222 #define wait_event_timeout(wq, condition, timeout)			\
223 ({									\
224 	long __ret = timeout;						\
225 	if (!(condition)) 						\
226 		__wait_event_timeout(wq, condition, __ret);		\
227 	__ret;								\
228 })
229 
230 #define __wait_event_interruptible(wq, condition, ret)			\
231 do {									\
232 	DEFINE_WAIT(__wait);						\
233 									\
234 	for (;;) {							\
235 		prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE);	\
236 		if (condition)						\
237 			break;						\
238 		if (!signal_pending(current)) {				\
239 			schedule();					\
240 			continue;					\
241 		}							\
242 		ret = -ERESTARTSYS;					\
243 		break;							\
244 	}								\
245 	finish_wait(&wq, &__wait);					\
246 } while (0)
247 
248 /**
249  * wait_event_interruptible - sleep until a condition gets true
250  * @wq: the waitqueue to wait on
251  * @condition: a C expression for the event to wait for
252  *
253  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
254  * @condition evaluates to true or a signal is received.
255  * The @condition is checked each time the waitqueue @wq is woken up.
256  *
257  * wake_up() has to be called after changing any variable that could
258  * change the result of the wait condition.
259  *
260  * The function will return -ERESTARTSYS if it was interrupted by a
261  * signal and 0 if @condition evaluated to true.
262  */
263 #define wait_event_interruptible(wq, condition)				\
264 ({									\
265 	int __ret = 0;							\
266 	if (!(condition))						\
267 		__wait_event_interruptible(wq, condition, __ret);	\
268 	__ret;								\
269 })
270 
271 #define __wait_event_interruptible_timeout(wq, condition, ret)		\
272 do {									\
273 	DEFINE_WAIT(__wait);						\
274 									\
275 	for (;;) {							\
276 		prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE);	\
277 		if (condition)						\
278 			break;						\
279 		if (!signal_pending(current)) {				\
280 			ret = schedule_timeout(ret);			\
281 			if (!ret)					\
282 				break;					\
283 			continue;					\
284 		}							\
285 		ret = -ERESTARTSYS;					\
286 		break;							\
287 	}								\
288 	finish_wait(&wq, &__wait);					\
289 } while (0)
290 
291 /**
292  * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
293  * @wq: the waitqueue to wait on
294  * @condition: a C expression for the event to wait for
295  * @timeout: timeout, in jiffies
296  *
297  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
298  * @condition evaluates to true or a signal is received.
299  * The @condition is checked each time the waitqueue @wq is woken up.
300  *
301  * wake_up() has to be called after changing any variable that could
302  * change the result of the wait condition.
303  *
304  * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
305  * was interrupted by a signal, and the remaining jiffies otherwise
306  * if the condition evaluated to true before the timeout elapsed.
307  */
308 #define wait_event_interruptible_timeout(wq, condition, timeout)	\
309 ({									\
310 	long __ret = timeout;						\
311 	if (!(condition))						\
312 		__wait_event_interruptible_timeout(wq, condition, __ret); \
313 	__ret;								\
314 })
315 
316 #define __wait_event_interruptible_exclusive(wq, condition, ret)	\
317 do {									\
318 	DEFINE_WAIT(__wait);						\
319 									\
320 	for (;;) {							\
321 		prepare_to_wait_exclusive(&wq, &__wait,			\
322 					TASK_INTERRUPTIBLE);		\
323 		if (condition)						\
324 			break;						\
325 		if (!signal_pending(current)) {				\
326 			schedule();					\
327 			continue;					\
328 		}							\
329 		ret = -ERESTARTSYS;					\
330 		break;							\
331 	}								\
332 	finish_wait(&wq, &__wait);					\
333 } while (0)
334 
335 #define wait_event_interruptible_exclusive(wq, condition)		\
336 ({									\
337 	int __ret = 0;							\
338 	if (!(condition))						\
339 		__wait_event_interruptible_exclusive(wq, condition, __ret);\
340 	__ret;								\
341 })
342 
343 /*
344  * Must be called with the spinlock in the wait_queue_head_t held.
345  */
346 static inline void add_wait_queue_exclusive_locked(wait_queue_head_t *q,
347 						   wait_queue_t * wait)
348 {
349 	wait->flags |= WQ_FLAG_EXCLUSIVE;
350 	__add_wait_queue_tail(q,  wait);
351 }
352 
353 /*
354  * Must be called with the spinlock in the wait_queue_head_t held.
355  */
356 static inline void remove_wait_queue_locked(wait_queue_head_t *q,
357 					    wait_queue_t * wait)
358 {
359 	__remove_wait_queue(q,  wait);
360 }
361 
362 /*
363  * These are the old interfaces to sleep waiting for an event.
364  * They are racy.  DO NOT use them, use the wait_event* interfaces above.
365  * We plan to remove these interfaces during 2.7.
366  */
367 extern void FASTCALL(sleep_on(wait_queue_head_t *q));
368 extern long FASTCALL(sleep_on_timeout(wait_queue_head_t *q,
369 				      signed long timeout));
370 extern void FASTCALL(interruptible_sleep_on(wait_queue_head_t *q));
371 extern long FASTCALL(interruptible_sleep_on_timeout(wait_queue_head_t *q,
372 						    signed long timeout));
373 
374 /*
375  * Waitqueues which are removed from the waitqueue_head at wakeup time
376  */
377 void FASTCALL(prepare_to_wait(wait_queue_head_t *q,
378 				wait_queue_t *wait, int state));
379 void FASTCALL(prepare_to_wait_exclusive(wait_queue_head_t *q,
380 				wait_queue_t *wait, int state));
381 void FASTCALL(finish_wait(wait_queue_head_t *q, wait_queue_t *wait));
382 int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
383 int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
384 
385 #define DEFINE_WAIT(name)						\
386 	wait_queue_t name = {						\
387 		.private	= current,				\
388 		.func		= autoremove_wake_function,		\
389 		.task_list	= LIST_HEAD_INIT((name).task_list),	\
390 	}
391 
392 #define DEFINE_WAIT_BIT(name, word, bit)				\
393 	struct wait_bit_queue name = {					\
394 		.key = __WAIT_BIT_KEY_INITIALIZER(word, bit),		\
395 		.wait	= {						\
396 			.private	= current,			\
397 			.func		= wake_bit_function,		\
398 			.task_list	=				\
399 				LIST_HEAD_INIT((name).wait.task_list),	\
400 		},							\
401 	}
402 
403 #define init_wait(wait)							\
404 	do {								\
405 		(wait)->private = current;				\
406 		(wait)->func = autoremove_wake_function;		\
407 		INIT_LIST_HEAD(&(wait)->task_list);			\
408 	} while (0)
409 
410 /**
411  * wait_on_bit - wait for a bit to be cleared
412  * @word: the word being waited on, a kernel virtual address
413  * @bit: the bit of the word being waited on
414  * @action: the function used to sleep, which may take special actions
415  * @mode: the task state to sleep in
416  *
417  * There is a standard hashed waitqueue table for generic use. This
418  * is the part of the hashtable's accessor API that waits on a bit.
419  * For instance, if one were to have waiters on a bitflag, one would
420  * call wait_on_bit() in threads waiting for the bit to clear.
421  * One uses wait_on_bit() where one is waiting for the bit to clear,
422  * but has no intention of setting it.
423  */
424 static inline int wait_on_bit(void *word, int bit,
425 				int (*action)(void *), unsigned mode)
426 {
427 	if (!test_bit(bit, word))
428 		return 0;
429 	return out_of_line_wait_on_bit(word, bit, action, mode);
430 }
431 
432 /**
433  * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
434  * @word: the word being waited on, a kernel virtual address
435  * @bit: the bit of the word being waited on
436  * @action: the function used to sleep, which may take special actions
437  * @mode: the task state to sleep in
438  *
439  * There is a standard hashed waitqueue table for generic use. This
440  * is the part of the hashtable's accessor API that waits on a bit
441  * when one intends to set it, for instance, trying to lock bitflags.
442  * For instance, if one were to have waiters trying to set bitflag
443  * and waiting for it to clear before setting it, one would call
444  * wait_on_bit() in threads waiting to be able to set the bit.
445  * One uses wait_on_bit_lock() where one is waiting for the bit to
446  * clear with the intention of setting it, and when done, clearing it.
447  */
448 static inline int wait_on_bit_lock(void *word, int bit,
449 				int (*action)(void *), unsigned mode)
450 {
451 	if (!test_and_set_bit(bit, word))
452 		return 0;
453 	return out_of_line_wait_on_bit_lock(word, bit, action, mode);
454 }
455 
456 #endif /* __KERNEL__ */
457 
458 #endif
459