xref: /linux-6.15/include/linux/wait.h (revision 82d00a93)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_WAIT_H
3 #define _LINUX_WAIT_H
4 /*
5  * Linux wait queue related types and methods
6  */
7 #include <linux/list.h>
8 #include <linux/stddef.h>
9 #include <linux/spinlock.h>
10 
11 #include <asm/current.h>
12 #include <uapi/linux/wait.h>
13 
14 typedef struct wait_queue_entry wait_queue_entry_t;
15 
16 typedef int (*wait_queue_func_t)(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
17 int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
18 
19 /* wait_queue_entry::flags */
20 #define WQ_FLAG_EXCLUSIVE	0x01
21 #define WQ_FLAG_WOKEN		0x02
22 #define WQ_FLAG_BOOKMARK	0x04
23 #define WQ_FLAG_CUSTOM		0x08
24 
25 /*
26  * A single wait-queue entry structure:
27  */
28 struct wait_queue_entry {
29 	unsigned int		flags;
30 	void			*private;
31 	wait_queue_func_t	func;
32 	struct list_head	entry;
33 };
34 
35 struct wait_queue_head {
36 	spinlock_t		lock;
37 	struct list_head	head;
38 };
39 typedef struct wait_queue_head wait_queue_head_t;
40 
41 struct task_struct;
42 
43 /*
44  * Macros for declaration and initialisaton of the datatypes
45  */
46 
47 #define __WAITQUEUE_INITIALIZER(name, tsk) {					\
48 	.private	= tsk,							\
49 	.func		= default_wake_function,				\
50 	.entry		= { NULL, NULL } }
51 
52 #define DECLARE_WAITQUEUE(name, tsk)						\
53 	struct wait_queue_entry name = __WAITQUEUE_INITIALIZER(name, tsk)
54 
55 #define __WAIT_QUEUE_HEAD_INITIALIZER(name) {					\
56 	.lock		= __SPIN_LOCK_UNLOCKED(name.lock),			\
57 	.head		= { &(name).head, &(name).head } }
58 
59 #define DECLARE_WAIT_QUEUE_HEAD(name) \
60 	struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
61 
62 extern void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *);
63 
64 #define init_waitqueue_head(wq_head)						\
65 	do {									\
66 		static struct lock_class_key __key;				\
67 										\
68 		__init_waitqueue_head((wq_head), #wq_head, &__key);		\
69 	} while (0)
70 
71 #ifdef CONFIG_LOCKDEP
72 # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
73 	({ init_waitqueue_head(&name); name; })
74 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
75 	struct wait_queue_head name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
76 #else
77 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
78 #endif
79 
80 static inline void init_waitqueue_entry(struct wait_queue_entry *wq_entry, struct task_struct *p)
81 {
82 	wq_entry->flags		= 0;
83 	wq_entry->private	= p;
84 	wq_entry->func		= default_wake_function;
85 }
86 
87 static inline void
88 init_waitqueue_func_entry(struct wait_queue_entry *wq_entry, wait_queue_func_t func)
89 {
90 	wq_entry->flags		= 0;
91 	wq_entry->private	= NULL;
92 	wq_entry->func		= func;
93 }
94 
95 /**
96  * waitqueue_active -- locklessly test for waiters on the queue
97  * @wq_head: the waitqueue to test for waiters
98  *
99  * returns true if the wait list is not empty
100  *
101  * NOTE: this function is lockless and requires care, incorrect usage _will_
102  * lead to sporadic and non-obvious failure.
103  *
104  * Use either while holding wait_queue_head::lock or when used for wakeups
105  * with an extra smp_mb() like::
106  *
107  *      CPU0 - waker                    CPU1 - waiter
108  *
109  *                                      for (;;) {
110  *      @cond = true;                     prepare_to_wait(&wq_head, &wait, state);
111  *      smp_mb();                         // smp_mb() from set_current_state()
112  *      if (waitqueue_active(wq_head))         if (@cond)
113  *        wake_up(wq_head);                      break;
114  *                                        schedule();
115  *                                      }
116  *                                      finish_wait(&wq_head, &wait);
117  *
118  * Because without the explicit smp_mb() it's possible for the
119  * waitqueue_active() load to get hoisted over the @cond store such that we'll
120  * observe an empty wait list while the waiter might not observe @cond.
121  *
122  * Also note that this 'optimization' trades a spin_lock() for an smp_mb(),
123  * which (when the lock is uncontended) are of roughly equal cost.
124  */
125 static inline int waitqueue_active(struct wait_queue_head *wq_head)
126 {
127 	return !list_empty(&wq_head->head);
128 }
129 
130 /**
131  * wq_has_single_sleeper - check if there is only one sleeper
132  * @wq_head: wait queue head
133  *
134  * Returns true of wq_head has only one sleeper on the list.
135  *
136  * Please refer to the comment for waitqueue_active.
137  */
138 static inline bool wq_has_single_sleeper(struct wait_queue_head *wq_head)
139 {
140 	return list_is_singular(&wq_head->head);
141 }
142 
143 /**
144  * wq_has_sleeper - check if there are any waiting processes
145  * @wq_head: wait queue head
146  *
147  * Returns true if wq_head has waiting processes
148  *
149  * Please refer to the comment for waitqueue_active.
150  */
151 static inline bool wq_has_sleeper(struct wait_queue_head *wq_head)
152 {
153 	/*
154 	 * We need to be sure we are in sync with the
155 	 * add_wait_queue modifications to the wait queue.
156 	 *
157 	 * This memory barrier should be paired with one on the
158 	 * waiting side.
159 	 */
160 	smp_mb();
161 	return waitqueue_active(wq_head);
162 }
163 
164 extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
165 extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
166 extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
167 
168 static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
169 {
170 	list_add(&wq_entry->entry, &wq_head->head);
171 }
172 
173 /*
174  * Used for wake-one threads:
175  */
176 static inline void
177 __add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
178 {
179 	wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
180 	__add_wait_queue(wq_head, wq_entry);
181 }
182 
183 static inline void __add_wait_queue_entry_tail(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
184 {
185 	list_add_tail(&wq_entry->entry, &wq_head->head);
186 }
187 
188 static inline void
189 __add_wait_queue_entry_tail_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
190 {
191 	wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
192 	__add_wait_queue_entry_tail(wq_head, wq_entry);
193 }
194 
195 static inline void
196 __remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
197 {
198 	list_del(&wq_entry->entry);
199 }
200 
201 void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
202 void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
203 void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
204 		unsigned int mode, void *key, wait_queue_entry_t *bookmark);
205 void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
206 void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
207 void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
208 void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
209 
210 #define wake_up(x)			__wake_up(x, TASK_NORMAL, 1, NULL)
211 #define wake_up_nr(x, nr)		__wake_up(x, TASK_NORMAL, nr, NULL)
212 #define wake_up_all(x)			__wake_up(x, TASK_NORMAL, 0, NULL)
213 #define wake_up_locked(x)		__wake_up_locked((x), TASK_NORMAL, 1)
214 #define wake_up_all_locked(x)		__wake_up_locked((x), TASK_NORMAL, 0)
215 
216 #define wake_up_interruptible(x)	__wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
217 #define wake_up_interruptible_nr(x, nr)	__wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
218 #define wake_up_interruptible_all(x)	__wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
219 #define wake_up_interruptible_sync(x)	__wake_up_sync((x), TASK_INTERRUPTIBLE)
220 
221 /*
222  * Wakeup macros to be used to report events to the targets.
223  */
224 #define poll_to_key(m) ((void *)(__force uintptr_t)(__poll_t)(m))
225 #define key_to_poll(m) ((__force __poll_t)(uintptr_t)(void *)(m))
226 #define wake_up_poll(x, m)							\
227 	__wake_up(x, TASK_NORMAL, 1, poll_to_key(m))
228 #define wake_up_locked_poll(x, m)						\
229 	__wake_up_locked_key((x), TASK_NORMAL, poll_to_key(m))
230 #define wake_up_interruptible_poll(x, m)					\
231 	__wake_up(x, TASK_INTERRUPTIBLE, 1, poll_to_key(m))
232 #define wake_up_interruptible_sync_poll(x, m)					\
233 	__wake_up_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
234 #define wake_up_interruptible_sync_poll_locked(x, m)				\
235 	__wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
236 
237 #define ___wait_cond_timeout(condition)						\
238 ({										\
239 	bool __cond = (condition);						\
240 	if (__cond && !__ret)							\
241 		__ret = 1;							\
242 	__cond || !__ret;							\
243 })
244 
245 #define ___wait_is_interruptible(state)						\
246 	(!__builtin_constant_p(state) ||					\
247 		state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE)		\
248 
249 extern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags);
250 
251 /*
252  * The below macro ___wait_event() has an explicit shadow of the __ret
253  * variable when used from the wait_event_*() macros.
254  *
255  * This is so that both can use the ___wait_cond_timeout() construct
256  * to wrap the condition.
257  *
258  * The type inconsistency of the wait_event_*() __ret variable is also
259  * on purpose; we use long where we can return timeout values and int
260  * otherwise.
261  */
262 
263 #define ___wait_event(wq_head, condition, state, exclusive, ret, cmd)		\
264 ({										\
265 	__label__ __out;							\
266 	struct wait_queue_entry __wq_entry;					\
267 	long __ret = ret;	/* explicit shadow */				\
268 										\
269 	init_wait_entry(&__wq_entry, exclusive ? WQ_FLAG_EXCLUSIVE : 0);	\
270 	for (;;) {								\
271 		long __int = prepare_to_wait_event(&wq_head, &__wq_entry, state);\
272 										\
273 		if (condition)							\
274 			break;							\
275 										\
276 		if (___wait_is_interruptible(state) && __int) {			\
277 			__ret = __int;						\
278 			goto __out;						\
279 		}								\
280 										\
281 		cmd;								\
282 	}									\
283 	finish_wait(&wq_head, &__wq_entry);					\
284 __out:	__ret;									\
285 })
286 
287 #define __wait_event(wq_head, condition)					\
288 	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
289 			    schedule())
290 
291 /**
292  * wait_event - sleep until a condition gets true
293  * @wq_head: the waitqueue to wait on
294  * @condition: a C expression for the event to wait for
295  *
296  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
297  * @condition evaluates to true. The @condition is checked each time
298  * the waitqueue @wq_head is woken up.
299  *
300  * wake_up() has to be called after changing any variable that could
301  * change the result of the wait condition.
302  */
303 #define wait_event(wq_head, condition)						\
304 do {										\
305 	might_sleep();								\
306 	if (condition)								\
307 		break;								\
308 	__wait_event(wq_head, condition);					\
309 } while (0)
310 
311 #define __io_wait_event(wq_head, condition)					\
312 	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
313 			    io_schedule())
314 
315 /*
316  * io_wait_event() -- like wait_event() but with io_schedule()
317  */
318 #define io_wait_event(wq_head, condition)					\
319 do {										\
320 	might_sleep();								\
321 	if (condition)								\
322 		break;								\
323 	__io_wait_event(wq_head, condition);					\
324 } while (0)
325 
326 #define __wait_event_freezable(wq_head, condition)				\
327 	___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0,		\
328 			    freezable_schedule())
329 
330 /**
331  * wait_event_freezable - sleep (or freeze) until a condition gets true
332  * @wq_head: the waitqueue to wait on
333  * @condition: a C expression for the event to wait for
334  *
335  * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
336  * to system load) until the @condition evaluates to true. The
337  * @condition is checked each time the waitqueue @wq_head is woken up.
338  *
339  * wake_up() has to be called after changing any variable that could
340  * change the result of the wait condition.
341  */
342 #define wait_event_freezable(wq_head, condition)				\
343 ({										\
344 	int __ret = 0;								\
345 	might_sleep();								\
346 	if (!(condition))							\
347 		__ret = __wait_event_freezable(wq_head, condition);		\
348 	__ret;									\
349 })
350 
351 #define __wait_event_timeout(wq_head, condition, timeout)			\
352 	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
353 		      TASK_UNINTERRUPTIBLE, 0, timeout,				\
354 		      __ret = schedule_timeout(__ret))
355 
356 /**
357  * wait_event_timeout - sleep until a condition gets true or a timeout elapses
358  * @wq_head: the waitqueue to wait on
359  * @condition: a C expression for the event to wait for
360  * @timeout: timeout, in jiffies
361  *
362  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
363  * @condition evaluates to true. The @condition is checked each time
364  * the waitqueue @wq_head is woken up.
365  *
366  * wake_up() has to be called after changing any variable that could
367  * change the result of the wait condition.
368  *
369  * Returns:
370  * 0 if the @condition evaluated to %false after the @timeout elapsed,
371  * 1 if the @condition evaluated to %true after the @timeout elapsed,
372  * or the remaining jiffies (at least 1) if the @condition evaluated
373  * to %true before the @timeout elapsed.
374  */
375 #define wait_event_timeout(wq_head, condition, timeout)				\
376 ({										\
377 	long __ret = timeout;							\
378 	might_sleep();								\
379 	if (!___wait_cond_timeout(condition))					\
380 		__ret = __wait_event_timeout(wq_head, condition, timeout);	\
381 	__ret;									\
382 })
383 
384 #define __wait_event_freezable_timeout(wq_head, condition, timeout)		\
385 	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
386 		      TASK_INTERRUPTIBLE, 0, timeout,				\
387 		      __ret = freezable_schedule_timeout(__ret))
388 
389 /*
390  * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
391  * increasing load and is freezable.
392  */
393 #define wait_event_freezable_timeout(wq_head, condition, timeout)		\
394 ({										\
395 	long __ret = timeout;							\
396 	might_sleep();								\
397 	if (!___wait_cond_timeout(condition))					\
398 		__ret = __wait_event_freezable_timeout(wq_head, condition, timeout); \
399 	__ret;									\
400 })
401 
402 #define __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2)		\
403 	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 1, 0,	\
404 			    cmd1; schedule(); cmd2)
405 /*
406  * Just like wait_event_cmd(), except it sets exclusive flag
407  */
408 #define wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2)		\
409 do {										\
410 	if (condition)								\
411 		break;								\
412 	__wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2);		\
413 } while (0)
414 
415 #define __wait_event_cmd(wq_head, condition, cmd1, cmd2)			\
416 	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
417 			    cmd1; schedule(); cmd2)
418 
419 /**
420  * wait_event_cmd - sleep until a condition gets true
421  * @wq_head: the waitqueue to wait on
422  * @condition: a C expression for the event to wait for
423  * @cmd1: the command will be executed before sleep
424  * @cmd2: the command will be executed after sleep
425  *
426  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
427  * @condition evaluates to true. The @condition is checked each time
428  * the waitqueue @wq_head is woken up.
429  *
430  * wake_up() has to be called after changing any variable that could
431  * change the result of the wait condition.
432  */
433 #define wait_event_cmd(wq_head, condition, cmd1, cmd2)				\
434 do {										\
435 	if (condition)								\
436 		break;								\
437 	__wait_event_cmd(wq_head, condition, cmd1, cmd2);			\
438 } while (0)
439 
440 #define __wait_event_interruptible(wq_head, condition)				\
441 	___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0,		\
442 		      schedule())
443 
444 /**
445  * wait_event_interruptible - sleep until a condition gets true
446  * @wq_head: the waitqueue to wait on
447  * @condition: a C expression for the event to wait for
448  *
449  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
450  * @condition evaluates to true or a signal is received.
451  * The @condition is checked each time the waitqueue @wq_head is woken up.
452  *
453  * wake_up() has to be called after changing any variable that could
454  * change the result of the wait condition.
455  *
456  * The function will return -ERESTARTSYS if it was interrupted by a
457  * signal and 0 if @condition evaluated to true.
458  */
459 #define wait_event_interruptible(wq_head, condition)				\
460 ({										\
461 	int __ret = 0;								\
462 	might_sleep();								\
463 	if (!(condition))							\
464 		__ret = __wait_event_interruptible(wq_head, condition);		\
465 	__ret;									\
466 })
467 
468 #define __wait_event_interruptible_timeout(wq_head, condition, timeout)		\
469 	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
470 		      TASK_INTERRUPTIBLE, 0, timeout,				\
471 		      __ret = schedule_timeout(__ret))
472 
473 /**
474  * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
475  * @wq_head: the waitqueue to wait on
476  * @condition: a C expression for the event to wait for
477  * @timeout: timeout, in jiffies
478  *
479  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
480  * @condition evaluates to true or a signal is received.
481  * The @condition is checked each time the waitqueue @wq_head is woken up.
482  *
483  * wake_up() has to be called after changing any variable that could
484  * change the result of the wait condition.
485  *
486  * Returns:
487  * 0 if the @condition evaluated to %false after the @timeout elapsed,
488  * 1 if the @condition evaluated to %true after the @timeout elapsed,
489  * the remaining jiffies (at least 1) if the @condition evaluated
490  * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
491  * interrupted by a signal.
492  */
493 #define wait_event_interruptible_timeout(wq_head, condition, timeout)		\
494 ({										\
495 	long __ret = timeout;							\
496 	might_sleep();								\
497 	if (!___wait_cond_timeout(condition))					\
498 		__ret = __wait_event_interruptible_timeout(wq_head,		\
499 						condition, timeout);		\
500 	__ret;									\
501 })
502 
503 #define __wait_event_hrtimeout(wq_head, condition, timeout, state)		\
504 ({										\
505 	int __ret = 0;								\
506 	struct hrtimer_sleeper __t;						\
507 										\
508 	hrtimer_init_sleeper_on_stack(&__t, CLOCK_MONOTONIC,			\
509 				      HRTIMER_MODE_REL);			\
510 	if ((timeout) != KTIME_MAX)						\
511 		hrtimer_start_range_ns(&__t.timer, timeout,			\
512 				       current->timer_slack_ns,			\
513 				       HRTIMER_MODE_REL);			\
514 										\
515 	__ret = ___wait_event(wq_head, condition, state, 0, 0,			\
516 		if (!__t.task) {						\
517 			__ret = -ETIME;						\
518 			break;							\
519 		}								\
520 		schedule());							\
521 										\
522 	hrtimer_cancel(&__t.timer);						\
523 	destroy_hrtimer_on_stack(&__t.timer);					\
524 	__ret;									\
525 })
526 
527 /**
528  * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
529  * @wq_head: the waitqueue to wait on
530  * @condition: a C expression for the event to wait for
531  * @timeout: timeout, as a ktime_t
532  *
533  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
534  * @condition evaluates to true or a signal is received.
535  * The @condition is checked each time the waitqueue @wq_head is woken up.
536  *
537  * wake_up() has to be called after changing any variable that could
538  * change the result of the wait condition.
539  *
540  * The function returns 0 if @condition became true, or -ETIME if the timeout
541  * elapsed.
542  */
543 #define wait_event_hrtimeout(wq_head, condition, timeout)			\
544 ({										\
545 	int __ret = 0;								\
546 	might_sleep();								\
547 	if (!(condition))							\
548 		__ret = __wait_event_hrtimeout(wq_head, condition, timeout,	\
549 					       TASK_UNINTERRUPTIBLE);		\
550 	__ret;									\
551 })
552 
553 /**
554  * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
555  * @wq: the waitqueue to wait on
556  * @condition: a C expression for the event to wait for
557  * @timeout: timeout, as a ktime_t
558  *
559  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
560  * @condition evaluates to true or a signal is received.
561  * The @condition is checked each time the waitqueue @wq is woken up.
562  *
563  * wake_up() has to be called after changing any variable that could
564  * change the result of the wait condition.
565  *
566  * The function returns 0 if @condition became true, -ERESTARTSYS if it was
567  * interrupted by a signal, or -ETIME if the timeout elapsed.
568  */
569 #define wait_event_interruptible_hrtimeout(wq, condition, timeout)		\
570 ({										\
571 	long __ret = 0;								\
572 	might_sleep();								\
573 	if (!(condition))							\
574 		__ret = __wait_event_hrtimeout(wq, condition, timeout,		\
575 					       TASK_INTERRUPTIBLE);		\
576 	__ret;									\
577 })
578 
579 #define __wait_event_interruptible_exclusive(wq, condition)			\
580 	___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0,			\
581 		      schedule())
582 
583 #define wait_event_interruptible_exclusive(wq, condition)			\
584 ({										\
585 	int __ret = 0;								\
586 	might_sleep();								\
587 	if (!(condition))							\
588 		__ret = __wait_event_interruptible_exclusive(wq, condition);	\
589 	__ret;									\
590 })
591 
592 #define __wait_event_killable_exclusive(wq, condition)				\
593 	___wait_event(wq, condition, TASK_KILLABLE, 1, 0,			\
594 		      schedule())
595 
596 #define wait_event_killable_exclusive(wq, condition)				\
597 ({										\
598 	int __ret = 0;								\
599 	might_sleep();								\
600 	if (!(condition))							\
601 		__ret = __wait_event_killable_exclusive(wq, condition);		\
602 	__ret;									\
603 })
604 
605 
606 #define __wait_event_freezable_exclusive(wq, condition)				\
607 	___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0,			\
608 			freezable_schedule())
609 
610 #define wait_event_freezable_exclusive(wq, condition)				\
611 ({										\
612 	int __ret = 0;								\
613 	might_sleep();								\
614 	if (!(condition))							\
615 		__ret = __wait_event_freezable_exclusive(wq, condition);	\
616 	__ret;									\
617 })
618 
619 /**
620  * wait_event_idle - wait for a condition without contributing to system load
621  * @wq_head: the waitqueue to wait on
622  * @condition: a C expression for the event to wait for
623  *
624  * The process is put to sleep (TASK_IDLE) until the
625  * @condition evaluates to true.
626  * The @condition is checked each time the waitqueue @wq_head is woken up.
627  *
628  * wake_up() has to be called after changing any variable that could
629  * change the result of the wait condition.
630  *
631  */
632 #define wait_event_idle(wq_head, condition)					\
633 do {										\
634 	might_sleep();								\
635 	if (!(condition))							\
636 		___wait_event(wq_head, condition, TASK_IDLE, 0, 0, schedule());	\
637 } while (0)
638 
639 /**
640  * wait_event_idle_exclusive - wait for a condition with contributing to system load
641  * @wq_head: the waitqueue to wait on
642  * @condition: a C expression for the event to wait for
643  *
644  * The process is put to sleep (TASK_IDLE) until the
645  * @condition evaluates to true.
646  * The @condition is checked each time the waitqueue @wq_head is woken up.
647  *
648  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
649  * set thus if other processes wait on the same list, when this
650  * process is woken further processes are not considered.
651  *
652  * wake_up() has to be called after changing any variable that could
653  * change the result of the wait condition.
654  *
655  */
656 #define wait_event_idle_exclusive(wq_head, condition)				\
657 do {										\
658 	might_sleep();								\
659 	if (!(condition))							\
660 		___wait_event(wq_head, condition, TASK_IDLE, 1, 0, schedule());	\
661 } while (0)
662 
663 #define __wait_event_idle_timeout(wq_head, condition, timeout)			\
664 	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
665 		      TASK_IDLE, 0, timeout,					\
666 		      __ret = schedule_timeout(__ret))
667 
668 /**
669  * wait_event_idle_timeout - sleep without load until a condition becomes true or a timeout elapses
670  * @wq_head: the waitqueue to wait on
671  * @condition: a C expression for the event to wait for
672  * @timeout: timeout, in jiffies
673  *
674  * The process is put to sleep (TASK_IDLE) until the
675  * @condition evaluates to true. The @condition is checked each time
676  * the waitqueue @wq_head is woken up.
677  *
678  * wake_up() has to be called after changing any variable that could
679  * change the result of the wait condition.
680  *
681  * Returns:
682  * 0 if the @condition evaluated to %false after the @timeout elapsed,
683  * 1 if the @condition evaluated to %true after the @timeout elapsed,
684  * or the remaining jiffies (at least 1) if the @condition evaluated
685  * to %true before the @timeout elapsed.
686  */
687 #define wait_event_idle_timeout(wq_head, condition, timeout)			\
688 ({										\
689 	long __ret = timeout;							\
690 	might_sleep();								\
691 	if (!___wait_cond_timeout(condition))					\
692 		__ret = __wait_event_idle_timeout(wq_head, condition, timeout);	\
693 	__ret;									\
694 })
695 
696 #define __wait_event_idle_exclusive_timeout(wq_head, condition, timeout)	\
697 	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
698 		      TASK_IDLE, 1, timeout,					\
699 		      __ret = schedule_timeout(__ret))
700 
701 /**
702  * wait_event_idle_exclusive_timeout - sleep without load until a condition becomes true or a timeout elapses
703  * @wq_head: the waitqueue to wait on
704  * @condition: a C expression for the event to wait for
705  * @timeout: timeout, in jiffies
706  *
707  * The process is put to sleep (TASK_IDLE) until the
708  * @condition evaluates to true. The @condition is checked each time
709  * the waitqueue @wq_head is woken up.
710  *
711  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
712  * set thus if other processes wait on the same list, when this
713  * process is woken further processes are not considered.
714  *
715  * wake_up() has to be called after changing any variable that could
716  * change the result of the wait condition.
717  *
718  * Returns:
719  * 0 if the @condition evaluated to %false after the @timeout elapsed,
720  * 1 if the @condition evaluated to %true after the @timeout elapsed,
721  * or the remaining jiffies (at least 1) if the @condition evaluated
722  * to %true before the @timeout elapsed.
723  */
724 #define wait_event_idle_exclusive_timeout(wq_head, condition, timeout)		\
725 ({										\
726 	long __ret = timeout;							\
727 	might_sleep();								\
728 	if (!___wait_cond_timeout(condition))					\
729 		__ret = __wait_event_idle_exclusive_timeout(wq_head, condition, timeout);\
730 	__ret;									\
731 })
732 
733 extern int do_wait_intr(wait_queue_head_t *, wait_queue_entry_t *);
734 extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
735 
736 #define __wait_event_interruptible_locked(wq, condition, exclusive, fn)		\
737 ({										\
738 	int __ret;								\
739 	DEFINE_WAIT(__wait);							\
740 	if (exclusive)								\
741 		__wait.flags |= WQ_FLAG_EXCLUSIVE;				\
742 	do {									\
743 		__ret = fn(&(wq), &__wait);					\
744 		if (__ret)							\
745 			break;							\
746 	} while (!(condition));							\
747 	__remove_wait_queue(&(wq), &__wait);					\
748 	__set_current_state(TASK_RUNNING);					\
749 	__ret;									\
750 })
751 
752 
753 /**
754  * wait_event_interruptible_locked - sleep until a condition gets true
755  * @wq: the waitqueue to wait on
756  * @condition: a C expression for the event to wait for
757  *
758  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
759  * @condition evaluates to true or a signal is received.
760  * The @condition is checked each time the waitqueue @wq is woken up.
761  *
762  * It must be called with wq.lock being held.  This spinlock is
763  * unlocked while sleeping but @condition testing is done while lock
764  * is held and when this macro exits the lock is held.
765  *
766  * The lock is locked/unlocked using spin_lock()/spin_unlock()
767  * functions which must match the way they are locked/unlocked outside
768  * of this macro.
769  *
770  * wake_up_locked() has to be called after changing any variable that could
771  * change the result of the wait condition.
772  *
773  * The function will return -ERESTARTSYS if it was interrupted by a
774  * signal and 0 if @condition evaluated to true.
775  */
776 #define wait_event_interruptible_locked(wq, condition)				\
777 	((condition)								\
778 	 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr))
779 
780 /**
781  * wait_event_interruptible_locked_irq - sleep until a condition gets true
782  * @wq: the waitqueue to wait on
783  * @condition: a C expression for the event to wait for
784  *
785  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
786  * @condition evaluates to true or a signal is received.
787  * The @condition is checked each time the waitqueue @wq is woken up.
788  *
789  * It must be called with wq.lock being held.  This spinlock is
790  * unlocked while sleeping but @condition testing is done while lock
791  * is held and when this macro exits the lock is held.
792  *
793  * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
794  * functions which must match the way they are locked/unlocked outside
795  * of this macro.
796  *
797  * wake_up_locked() has to be called after changing any variable that could
798  * change the result of the wait condition.
799  *
800  * The function will return -ERESTARTSYS if it was interrupted by a
801  * signal and 0 if @condition evaluated to true.
802  */
803 #define wait_event_interruptible_locked_irq(wq, condition)			\
804 	((condition)								\
805 	 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq))
806 
807 /**
808  * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
809  * @wq: the waitqueue to wait on
810  * @condition: a C expression for the event to wait for
811  *
812  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
813  * @condition evaluates to true or a signal is received.
814  * The @condition is checked each time the waitqueue @wq is woken up.
815  *
816  * It must be called with wq.lock being held.  This spinlock is
817  * unlocked while sleeping but @condition testing is done while lock
818  * is held and when this macro exits the lock is held.
819  *
820  * The lock is locked/unlocked using spin_lock()/spin_unlock()
821  * functions which must match the way they are locked/unlocked outside
822  * of this macro.
823  *
824  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
825  * set thus when other process waits process on the list if this
826  * process is awaken further processes are not considered.
827  *
828  * wake_up_locked() has to be called after changing any variable that could
829  * change the result of the wait condition.
830  *
831  * The function will return -ERESTARTSYS if it was interrupted by a
832  * signal and 0 if @condition evaluated to true.
833  */
834 #define wait_event_interruptible_exclusive_locked(wq, condition)		\
835 	((condition)								\
836 	 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr))
837 
838 /**
839  * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
840  * @wq: the waitqueue to wait on
841  * @condition: a C expression for the event to wait for
842  *
843  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
844  * @condition evaluates to true or a signal is received.
845  * The @condition is checked each time the waitqueue @wq is woken up.
846  *
847  * It must be called with wq.lock being held.  This spinlock is
848  * unlocked while sleeping but @condition testing is done while lock
849  * is held and when this macro exits the lock is held.
850  *
851  * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
852  * functions which must match the way they are locked/unlocked outside
853  * of this macro.
854  *
855  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
856  * set thus when other process waits process on the list if this
857  * process is awaken further processes are not considered.
858  *
859  * wake_up_locked() has to be called after changing any variable that could
860  * change the result of the wait condition.
861  *
862  * The function will return -ERESTARTSYS if it was interrupted by a
863  * signal and 0 if @condition evaluated to true.
864  */
865 #define wait_event_interruptible_exclusive_locked_irq(wq, condition)		\
866 	((condition)								\
867 	 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq))
868 
869 
870 #define __wait_event_killable(wq, condition)					\
871 	___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
872 
873 /**
874  * wait_event_killable - sleep until a condition gets true
875  * @wq_head: the waitqueue to wait on
876  * @condition: a C expression for the event to wait for
877  *
878  * The process is put to sleep (TASK_KILLABLE) until the
879  * @condition evaluates to true or a signal is received.
880  * The @condition is checked each time the waitqueue @wq_head is woken up.
881  *
882  * wake_up() has to be called after changing any variable that could
883  * change the result of the wait condition.
884  *
885  * The function will return -ERESTARTSYS if it was interrupted by a
886  * signal and 0 if @condition evaluated to true.
887  */
888 #define wait_event_killable(wq_head, condition)					\
889 ({										\
890 	int __ret = 0;								\
891 	might_sleep();								\
892 	if (!(condition))							\
893 		__ret = __wait_event_killable(wq_head, condition);		\
894 	__ret;									\
895 })
896 
897 #define __wait_event_killable_timeout(wq_head, condition, timeout)		\
898 	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
899 		      TASK_KILLABLE, 0, timeout,				\
900 		      __ret = schedule_timeout(__ret))
901 
902 /**
903  * wait_event_killable_timeout - sleep until a condition gets true or a timeout elapses
904  * @wq_head: the waitqueue to wait on
905  * @condition: a C expression for the event to wait for
906  * @timeout: timeout, in jiffies
907  *
908  * The process is put to sleep (TASK_KILLABLE) until the
909  * @condition evaluates to true or a kill signal is received.
910  * The @condition is checked each time the waitqueue @wq_head is woken up.
911  *
912  * wake_up() has to be called after changing any variable that could
913  * change the result of the wait condition.
914  *
915  * Returns:
916  * 0 if the @condition evaluated to %false after the @timeout elapsed,
917  * 1 if the @condition evaluated to %true after the @timeout elapsed,
918  * the remaining jiffies (at least 1) if the @condition evaluated
919  * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
920  * interrupted by a kill signal.
921  *
922  * Only kill signals interrupt this process.
923  */
924 #define wait_event_killable_timeout(wq_head, condition, timeout)		\
925 ({										\
926 	long __ret = timeout;							\
927 	might_sleep();								\
928 	if (!___wait_cond_timeout(condition))					\
929 		__ret = __wait_event_killable_timeout(wq_head,			\
930 						condition, timeout);		\
931 	__ret;									\
932 })
933 
934 
935 #define __wait_event_lock_irq(wq_head, condition, lock, cmd)			\
936 	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
937 			    spin_unlock_irq(&lock);				\
938 			    cmd;						\
939 			    schedule();						\
940 			    spin_lock_irq(&lock))
941 
942 /**
943  * wait_event_lock_irq_cmd - sleep until a condition gets true. The
944  *			     condition is checked under the lock. This
945  *			     is expected to be called with the lock
946  *			     taken.
947  * @wq_head: the waitqueue to wait on
948  * @condition: a C expression for the event to wait for
949  * @lock: a locked spinlock_t, which will be released before cmd
950  *	  and schedule() and reacquired afterwards.
951  * @cmd: a command which is invoked outside the critical section before
952  *	 sleep
953  *
954  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
955  * @condition evaluates to true. The @condition is checked each time
956  * the waitqueue @wq_head is woken up.
957  *
958  * wake_up() has to be called after changing any variable that could
959  * change the result of the wait condition.
960  *
961  * This is supposed to be called while holding the lock. The lock is
962  * dropped before invoking the cmd and going to sleep and is reacquired
963  * afterwards.
964  */
965 #define wait_event_lock_irq_cmd(wq_head, condition, lock, cmd)			\
966 do {										\
967 	if (condition)								\
968 		break;								\
969 	__wait_event_lock_irq(wq_head, condition, lock, cmd);			\
970 } while (0)
971 
972 /**
973  * wait_event_lock_irq - sleep until a condition gets true. The
974  *			 condition is checked under the lock. This
975  *			 is expected to be called with the lock
976  *			 taken.
977  * @wq_head: the waitqueue to wait on
978  * @condition: a C expression for the event to wait for
979  * @lock: a locked spinlock_t, which will be released before schedule()
980  *	  and reacquired afterwards.
981  *
982  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
983  * @condition evaluates to true. The @condition is checked each time
984  * the waitqueue @wq_head is woken up.
985  *
986  * wake_up() has to be called after changing any variable that could
987  * change the result of the wait condition.
988  *
989  * This is supposed to be called while holding the lock. The lock is
990  * dropped before going to sleep and is reacquired afterwards.
991  */
992 #define wait_event_lock_irq(wq_head, condition, lock)				\
993 do {										\
994 	if (condition)								\
995 		break;								\
996 	__wait_event_lock_irq(wq_head, condition, lock, );			\
997 } while (0)
998 
999 
1000 #define __wait_event_interruptible_lock_irq(wq_head, condition, lock, cmd)	\
1001 	___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0,		\
1002 		      spin_unlock_irq(&lock);					\
1003 		      cmd;							\
1004 		      schedule();						\
1005 		      spin_lock_irq(&lock))
1006 
1007 /**
1008  * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
1009  *		The condition is checked under the lock. This is expected to
1010  *		be called with the lock taken.
1011  * @wq_head: the waitqueue to wait on
1012  * @condition: a C expression for the event to wait for
1013  * @lock: a locked spinlock_t, which will be released before cmd and
1014  *	  schedule() and reacquired afterwards.
1015  * @cmd: a command which is invoked outside the critical section before
1016  *	 sleep
1017  *
1018  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1019  * @condition evaluates to true or a signal is received. The @condition is
1020  * checked each time the waitqueue @wq_head is woken up.
1021  *
1022  * wake_up() has to be called after changing any variable that could
1023  * change the result of the wait condition.
1024  *
1025  * This is supposed to be called while holding the lock. The lock is
1026  * dropped before invoking the cmd and going to sleep and is reacquired
1027  * afterwards.
1028  *
1029  * The macro will return -ERESTARTSYS if it was interrupted by a signal
1030  * and 0 if @condition evaluated to true.
1031  */
1032 #define wait_event_interruptible_lock_irq_cmd(wq_head, condition, lock, cmd)	\
1033 ({										\
1034 	int __ret = 0;								\
1035 	if (!(condition))							\
1036 		__ret = __wait_event_interruptible_lock_irq(wq_head,		\
1037 						condition, lock, cmd);		\
1038 	__ret;									\
1039 })
1040 
1041 /**
1042  * wait_event_interruptible_lock_irq - sleep until a condition gets true.
1043  *		The condition is checked under the lock. This is expected
1044  *		to be called with the lock taken.
1045  * @wq_head: the waitqueue to wait on
1046  * @condition: a C expression for the event to wait for
1047  * @lock: a locked spinlock_t, which will be released before schedule()
1048  *	  and reacquired afterwards.
1049  *
1050  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1051  * @condition evaluates to true or signal is received. The @condition is
1052  * checked each time the waitqueue @wq_head is woken up.
1053  *
1054  * wake_up() has to be called after changing any variable that could
1055  * change the result of the wait condition.
1056  *
1057  * This is supposed to be called while holding the lock. The lock is
1058  * dropped before going to sleep and is reacquired afterwards.
1059  *
1060  * The macro will return -ERESTARTSYS if it was interrupted by a signal
1061  * and 0 if @condition evaluated to true.
1062  */
1063 #define wait_event_interruptible_lock_irq(wq_head, condition, lock)		\
1064 ({										\
1065 	int __ret = 0;								\
1066 	if (!(condition))							\
1067 		__ret = __wait_event_interruptible_lock_irq(wq_head,		\
1068 						condition, lock,);		\
1069 	__ret;									\
1070 })
1071 
1072 #define __wait_event_lock_irq_timeout(wq_head, condition, lock, timeout, state)	\
1073 	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
1074 		      state, 0, timeout,					\
1075 		      spin_unlock_irq(&lock);					\
1076 		      __ret = schedule_timeout(__ret);				\
1077 		      spin_lock_irq(&lock));
1078 
1079 /**
1080  * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
1081  *		true or a timeout elapses. The condition is checked under
1082  *		the lock. This is expected to be called with the lock taken.
1083  * @wq_head: the waitqueue to wait on
1084  * @condition: a C expression for the event to wait for
1085  * @lock: a locked spinlock_t, which will be released before schedule()
1086  *	  and reacquired afterwards.
1087  * @timeout: timeout, in jiffies
1088  *
1089  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1090  * @condition evaluates to true or signal is received. The @condition is
1091  * checked each time the waitqueue @wq_head is woken up.
1092  *
1093  * wake_up() has to be called after changing any variable that could
1094  * change the result of the wait condition.
1095  *
1096  * This is supposed to be called while holding the lock. The lock is
1097  * dropped before going to sleep and is reacquired afterwards.
1098  *
1099  * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
1100  * was interrupted by a signal, and the remaining jiffies otherwise
1101  * if the condition evaluated to true before the timeout elapsed.
1102  */
1103 #define wait_event_interruptible_lock_irq_timeout(wq_head, condition, lock,	\
1104 						  timeout)			\
1105 ({										\
1106 	long __ret = timeout;							\
1107 	if (!___wait_cond_timeout(condition))					\
1108 		__ret = __wait_event_lock_irq_timeout(				\
1109 					wq_head, condition, lock, timeout,	\
1110 					TASK_INTERRUPTIBLE);			\
1111 	__ret;									\
1112 })
1113 
1114 #define wait_event_lock_irq_timeout(wq_head, condition, lock, timeout)		\
1115 ({										\
1116 	long __ret = timeout;							\
1117 	if (!___wait_cond_timeout(condition))					\
1118 		__ret = __wait_event_lock_irq_timeout(				\
1119 					wq_head, condition, lock, timeout,	\
1120 					TASK_UNINTERRUPTIBLE);			\
1121 	__ret;									\
1122 })
1123 
1124 /*
1125  * Waitqueues which are removed from the waitqueue_head at wakeup time
1126  */
1127 void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1128 void prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1129 long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1130 void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
1131 long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout);
1132 int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
1133 int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
1134 
1135 #define DEFINE_WAIT_FUNC(name, function)					\
1136 	struct wait_queue_entry name = {					\
1137 		.private	= current,					\
1138 		.func		= function,					\
1139 		.entry		= LIST_HEAD_INIT((name).entry),			\
1140 	}
1141 
1142 #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
1143 
1144 #define init_wait(wait)								\
1145 	do {									\
1146 		(wait)->private = current;					\
1147 		(wait)->func = autoremove_wake_function;			\
1148 		INIT_LIST_HEAD(&(wait)->entry);					\
1149 		(wait)->flags = 0;						\
1150 	} while (0)
1151 
1152 #endif /* _LINUX_WAIT_H */
1153