xref: /linux-6.15/include/linux/wait.h (revision e756bc56)
1 #ifndef _LINUX_WAIT_H
2 #define _LINUX_WAIT_H
3 /*
4  * Linux wait queue related types and methods
5  */
6 #include <linux/list.h>
7 #include <linux/stddef.h>
8 #include <linux/spinlock.h>
9 #include <asm/current.h>
10 #include <uapi/linux/wait.h>
11 
12 typedef struct __wait_queue wait_queue_t;
13 typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
14 int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
15 
16 struct __wait_queue {
17 	unsigned int		flags;
18 #define WQ_FLAG_EXCLUSIVE	0x01
19 	void			*private;
20 	wait_queue_func_t	func;
21 	struct list_head	task_list;
22 };
23 
24 struct wait_bit_key {
25 	void			*flags;
26 	int			bit_nr;
27 #define WAIT_ATOMIC_T_BIT_NR	-1
28 };
29 
30 struct wait_bit_queue {
31 	struct wait_bit_key	key;
32 	wait_queue_t		wait;
33 };
34 
35 struct __wait_queue_head {
36 	spinlock_t		lock;
37 	struct list_head	task_list;
38 };
39 typedef struct __wait_queue_head wait_queue_head_t;
40 
41 struct task_struct;
42 
43 /*
44  * Macros for declaration and initialisaton of the datatypes
45  */
46 
47 #define __WAITQUEUE_INITIALIZER(name, tsk) {				\
48 	.private	= tsk,						\
49 	.func		= default_wake_function,			\
50 	.task_list	= { NULL, NULL } }
51 
52 #define DECLARE_WAITQUEUE(name, tsk)					\
53 	wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
54 
55 #define __WAIT_QUEUE_HEAD_INITIALIZER(name) {				\
56 	.lock		= __SPIN_LOCK_UNLOCKED(name.lock),		\
57 	.task_list	= { &(name).task_list, &(name).task_list } }
58 
59 #define DECLARE_WAIT_QUEUE_HEAD(name) \
60 	wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
61 
62 #define __WAIT_BIT_KEY_INITIALIZER(word, bit)				\
63 	{ .flags = word, .bit_nr = bit, }
64 
65 #define __WAIT_ATOMIC_T_KEY_INITIALIZER(p)				\
66 	{ .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
67 
68 extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
69 
70 #define init_waitqueue_head(q)				\
71 	do {						\
72 		static struct lock_class_key __key;	\
73 							\
74 		__init_waitqueue_head((q), #q, &__key);	\
75 	} while (0)
76 
77 #ifdef CONFIG_LOCKDEP
78 # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
79 	({ init_waitqueue_head(&name); name; })
80 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
81 	wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
82 #else
83 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
84 #endif
85 
86 static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
87 {
88 	q->flags	= 0;
89 	q->private	= p;
90 	q->func		= default_wake_function;
91 }
92 
93 static inline void
94 init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func)
95 {
96 	q->flags	= 0;
97 	q->private	= NULL;
98 	q->func		= func;
99 }
100 
101 static inline int waitqueue_active(wait_queue_head_t *q)
102 {
103 	return !list_empty(&q->task_list);
104 }
105 
106 extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
107 extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
108 extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
109 
110 static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
111 {
112 	list_add(&new->task_list, &head->task_list);
113 }
114 
115 /*
116  * Used for wake-one threads:
117  */
118 static inline void
119 __add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
120 {
121 	wait->flags |= WQ_FLAG_EXCLUSIVE;
122 	__add_wait_queue(q, wait);
123 }
124 
125 static inline void __add_wait_queue_tail(wait_queue_head_t *head,
126 					 wait_queue_t *new)
127 {
128 	list_add_tail(&new->task_list, &head->task_list);
129 }
130 
131 static inline void
132 __add_wait_queue_tail_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
133 {
134 	wait->flags |= WQ_FLAG_EXCLUSIVE;
135 	__add_wait_queue_tail(q, wait);
136 }
137 
138 static inline void
139 __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
140 {
141 	list_del(&old->task_list);
142 }
143 
144 void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
145 void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
146 void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
147 void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
148 void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
149 void __wake_up_bit(wait_queue_head_t *, void *, int);
150 int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
151 int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
152 void wake_up_bit(void *, int);
153 void wake_up_atomic_t(atomic_t *);
154 int out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned);
155 int out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned);
156 int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
157 wait_queue_head_t *bit_waitqueue(void *, int);
158 
159 #define wake_up(x)			__wake_up(x, TASK_NORMAL, 1, NULL)
160 #define wake_up_nr(x, nr)		__wake_up(x, TASK_NORMAL, nr, NULL)
161 #define wake_up_all(x)			__wake_up(x, TASK_NORMAL, 0, NULL)
162 #define wake_up_locked(x)		__wake_up_locked((x), TASK_NORMAL, 1)
163 #define wake_up_all_locked(x)		__wake_up_locked((x), TASK_NORMAL, 0)
164 
165 #define wake_up_interruptible(x)	__wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
166 #define wake_up_interruptible_nr(x, nr)	__wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
167 #define wake_up_interruptible_all(x)	__wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
168 #define wake_up_interruptible_sync(x)	__wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
169 
170 /*
171  * Wakeup macros to be used to report events to the targets.
172  */
173 #define wake_up_poll(x, m)						\
174 	__wake_up(x, TASK_NORMAL, 1, (void *) (m))
175 #define wake_up_locked_poll(x, m)					\
176 	__wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
177 #define wake_up_interruptible_poll(x, m)				\
178 	__wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
179 #define wake_up_interruptible_sync_poll(x, m)				\
180 	__wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
181 
182 #define ___wait_cond_timeout(condition)					\
183 ({									\
184 	bool __cond = (condition);					\
185 	if (__cond && !__ret)						\
186 		__ret = 1;						\
187 	__cond || !__ret;						\
188 })
189 
190 #define ___wait_is_interruptible(state)					\
191 	(!__builtin_constant_p(state) ||				\
192 		state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE)	\
193 
194 #define ___wait_event(wq, condition, state, exclusive, ret, cmd)	\
195 ({									\
196 	__label__ __out;						\
197 	wait_queue_t __wait;						\
198 	long __ret = ret;						\
199 									\
200 	INIT_LIST_HEAD(&__wait.task_list);				\
201 	if (exclusive)							\
202 		__wait.flags = WQ_FLAG_EXCLUSIVE;			\
203 	else								\
204 		__wait.flags = 0;					\
205 									\
206 	for (;;) {							\
207 		long __int = prepare_to_wait_event(&wq, &__wait, state);\
208 									\
209 		if (condition)						\
210 			break;						\
211 									\
212 		if (___wait_is_interruptible(state) && __int) {		\
213 			__ret = __int;					\
214 			if (exclusive) {				\
215 				abort_exclusive_wait(&wq, &__wait,	\
216 						     state, NULL);	\
217 				goto __out;				\
218 			}						\
219 			break;						\
220 		}							\
221 									\
222 		cmd;							\
223 	}								\
224 	finish_wait(&wq, &__wait);					\
225 __out:	__ret;								\
226 })
227 
228 #define __wait_event(wq, condition)					\
229 	(void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
230 			    schedule())
231 
232 /**
233  * wait_event - sleep until a condition gets true
234  * @wq: the waitqueue to wait on
235  * @condition: a C expression for the event to wait for
236  *
237  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
238  * @condition evaluates to true. The @condition is checked each time
239  * the waitqueue @wq is woken up.
240  *
241  * wake_up() has to be called after changing any variable that could
242  * change the result of the wait condition.
243  */
244 #define wait_event(wq, condition)					\
245 do {									\
246 	if (condition)							\
247 		break;							\
248 	__wait_event(wq, condition);					\
249 } while (0)
250 
251 #define __wait_event_timeout(wq, condition, timeout)			\
252 	___wait_event(wq, ___wait_cond_timeout(condition),		\
253 		      TASK_UNINTERRUPTIBLE, 0, timeout,			\
254 		      __ret = schedule_timeout(__ret))
255 
256 /**
257  * wait_event_timeout - sleep until a condition gets true or a timeout elapses
258  * @wq: the waitqueue to wait on
259  * @condition: a C expression for the event to wait for
260  * @timeout: timeout, in jiffies
261  *
262  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
263  * @condition evaluates to true. The @condition is checked each time
264  * the waitqueue @wq is woken up.
265  *
266  * wake_up() has to be called after changing any variable that could
267  * change the result of the wait condition.
268  *
269  * The function returns 0 if the @timeout elapsed, or the remaining
270  * jiffies (at least 1) if the @condition evaluated to %true before
271  * the @timeout elapsed.
272  */
273 #define wait_event_timeout(wq, condition, timeout)			\
274 ({									\
275 	long __ret = timeout;						\
276 	if (!___wait_cond_timeout(condition))				\
277 		__ret = __wait_event_timeout(wq, condition, timeout);	\
278 	__ret;								\
279 })
280 
281 #define __wait_event_cmd(wq, condition, cmd1, cmd2)			\
282 	(void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
283 			    cmd1; schedule(); cmd2)
284 
285 /**
286  * wait_event_cmd - sleep until a condition gets true
287  * @wq: the waitqueue to wait on
288  * @condition: a C expression for the event to wait for
289  * cmd1: the command will be executed before sleep
290  * cmd2: the command will be executed after sleep
291  *
292  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
293  * @condition evaluates to true. The @condition is checked each time
294  * the waitqueue @wq is woken up.
295  *
296  * wake_up() has to be called after changing any variable that could
297  * change the result of the wait condition.
298  */
299 #define wait_event_cmd(wq, condition, cmd1, cmd2)			\
300 do {									\
301 	if (condition)							\
302 		break;							\
303 	__wait_event_cmd(wq, condition, cmd1, cmd2);			\
304 } while (0)
305 
306 #define __wait_event_interruptible(wq, condition)			\
307 	___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0,		\
308 		      schedule())
309 
310 /**
311  * wait_event_interruptible - sleep until a condition gets true
312  * @wq: the waitqueue to wait on
313  * @condition: a C expression for the event to wait for
314  *
315  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
316  * @condition evaluates to true or a signal is received.
317  * The @condition is checked each time the waitqueue @wq is woken up.
318  *
319  * wake_up() has to be called after changing any variable that could
320  * change the result of the wait condition.
321  *
322  * The function will return -ERESTARTSYS if it was interrupted by a
323  * signal and 0 if @condition evaluated to true.
324  */
325 #define wait_event_interruptible(wq, condition)				\
326 ({									\
327 	int __ret = 0;							\
328 	if (!(condition))						\
329 		__ret = __wait_event_interruptible(wq, condition);	\
330 	__ret;								\
331 })
332 
333 #define __wait_event_interruptible_timeout(wq, condition, timeout)	\
334 	___wait_event(wq, ___wait_cond_timeout(condition),		\
335 		      TASK_INTERRUPTIBLE, 0, timeout,			\
336 		      __ret = schedule_timeout(__ret))
337 
338 /**
339  * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
340  * @wq: the waitqueue to wait on
341  * @condition: a C expression for the event to wait for
342  * @timeout: timeout, in jiffies
343  *
344  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
345  * @condition evaluates to true or a signal is received.
346  * The @condition is checked each time the waitqueue @wq is woken up.
347  *
348  * wake_up() has to be called after changing any variable that could
349  * change the result of the wait condition.
350  *
351  * Returns:
352  * 0 if the @timeout elapsed, -%ERESTARTSYS if it was interrupted by
353  * a signal, or the remaining jiffies (at least 1) if the @condition
354  * evaluated to %true before the @timeout elapsed.
355  */
356 #define wait_event_interruptible_timeout(wq, condition, timeout)	\
357 ({									\
358 	long __ret = timeout;						\
359 	if (!___wait_cond_timeout(condition))				\
360 		__ret = __wait_event_interruptible_timeout(wq,		\
361 						condition, timeout);	\
362 	__ret;								\
363 })
364 
365 #define __wait_event_hrtimeout(wq, condition, timeout, state)		\
366 ({									\
367 	int __ret = 0;							\
368 	struct hrtimer_sleeper __t;					\
369 									\
370 	hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC,		\
371 			      HRTIMER_MODE_REL);			\
372 	hrtimer_init_sleeper(&__t, current);				\
373 	if ((timeout).tv64 != KTIME_MAX)				\
374 		hrtimer_start_range_ns(&__t.timer, timeout,		\
375 				       current->timer_slack_ns,		\
376 				       HRTIMER_MODE_REL);		\
377 									\
378 	__ret = ___wait_event(wq, condition, state, 0, 0,		\
379 		if (!__t.task) {					\
380 			__ret = -ETIME;					\
381 			break;						\
382 		}							\
383 		schedule());						\
384 									\
385 	hrtimer_cancel(&__t.timer);					\
386 	destroy_hrtimer_on_stack(&__t.timer);				\
387 	__ret;								\
388 })
389 
390 /**
391  * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
392  * @wq: the waitqueue to wait on
393  * @condition: a C expression for the event to wait for
394  * @timeout: timeout, as a ktime_t
395  *
396  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
397  * @condition evaluates to true or a signal is received.
398  * The @condition is checked each time the waitqueue @wq is woken up.
399  *
400  * wake_up() has to be called after changing any variable that could
401  * change the result of the wait condition.
402  *
403  * The function returns 0 if @condition became true, or -ETIME if the timeout
404  * elapsed.
405  */
406 #define wait_event_hrtimeout(wq, condition, timeout)			\
407 ({									\
408 	int __ret = 0;							\
409 	if (!(condition))						\
410 		__ret = __wait_event_hrtimeout(wq, condition, timeout,	\
411 					       TASK_UNINTERRUPTIBLE);	\
412 	__ret;								\
413 })
414 
415 /**
416  * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
417  * @wq: the waitqueue to wait on
418  * @condition: a C expression for the event to wait for
419  * @timeout: timeout, as a ktime_t
420  *
421  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
422  * @condition evaluates to true or a signal is received.
423  * The @condition is checked each time the waitqueue @wq is woken up.
424  *
425  * wake_up() has to be called after changing any variable that could
426  * change the result of the wait condition.
427  *
428  * The function returns 0 if @condition became true, -ERESTARTSYS if it was
429  * interrupted by a signal, or -ETIME if the timeout elapsed.
430  */
431 #define wait_event_interruptible_hrtimeout(wq, condition, timeout)	\
432 ({									\
433 	long __ret = 0;							\
434 	if (!(condition))						\
435 		__ret = __wait_event_hrtimeout(wq, condition, timeout,	\
436 					       TASK_INTERRUPTIBLE);	\
437 	__ret;								\
438 })
439 
440 #define __wait_event_interruptible_exclusive(wq, condition)		\
441 	___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0,		\
442 		      schedule())
443 
444 #define wait_event_interruptible_exclusive(wq, condition)		\
445 ({									\
446 	int __ret = 0;							\
447 	if (!(condition))						\
448 		__ret = __wait_event_interruptible_exclusive(wq, condition);\
449 	__ret;								\
450 })
451 
452 
453 #define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
454 ({									\
455 	int __ret = 0;							\
456 	DEFINE_WAIT(__wait);						\
457 	if (exclusive)							\
458 		__wait.flags |= WQ_FLAG_EXCLUSIVE;			\
459 	do {								\
460 		if (likely(list_empty(&__wait.task_list)))		\
461 			__add_wait_queue_tail(&(wq), &__wait);		\
462 		set_current_state(TASK_INTERRUPTIBLE);			\
463 		if (signal_pending(current)) {				\
464 			__ret = -ERESTARTSYS;				\
465 			break;						\
466 		}							\
467 		if (irq)						\
468 			spin_unlock_irq(&(wq).lock);			\
469 		else							\
470 			spin_unlock(&(wq).lock);			\
471 		schedule();						\
472 		if (irq)						\
473 			spin_lock_irq(&(wq).lock);			\
474 		else							\
475 			spin_lock(&(wq).lock);				\
476 	} while (!(condition));						\
477 	__remove_wait_queue(&(wq), &__wait);				\
478 	__set_current_state(TASK_RUNNING);				\
479 	__ret;								\
480 })
481 
482 
483 /**
484  * wait_event_interruptible_locked - sleep until a condition gets true
485  * @wq: the waitqueue to wait on
486  * @condition: a C expression for the event to wait for
487  *
488  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
489  * @condition evaluates to true or a signal is received.
490  * The @condition is checked each time the waitqueue @wq is woken up.
491  *
492  * It must be called with wq.lock being held.  This spinlock is
493  * unlocked while sleeping but @condition testing is done while lock
494  * is held and when this macro exits the lock is held.
495  *
496  * The lock is locked/unlocked using spin_lock()/spin_unlock()
497  * functions which must match the way they are locked/unlocked outside
498  * of this macro.
499  *
500  * wake_up_locked() has to be called after changing any variable that could
501  * change the result of the wait condition.
502  *
503  * The function will return -ERESTARTSYS if it was interrupted by a
504  * signal and 0 if @condition evaluated to true.
505  */
506 #define wait_event_interruptible_locked(wq, condition)			\
507 	((condition)							\
508 	 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0))
509 
510 /**
511  * wait_event_interruptible_locked_irq - sleep until a condition gets true
512  * @wq: the waitqueue to wait on
513  * @condition: a C expression for the event to wait for
514  *
515  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
516  * @condition evaluates to true or a signal is received.
517  * The @condition is checked each time the waitqueue @wq is woken up.
518  *
519  * It must be called with wq.lock being held.  This spinlock is
520  * unlocked while sleeping but @condition testing is done while lock
521  * is held and when this macro exits the lock is held.
522  *
523  * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
524  * functions which must match the way they are locked/unlocked outside
525  * of this macro.
526  *
527  * wake_up_locked() has to be called after changing any variable that could
528  * change the result of the wait condition.
529  *
530  * The function will return -ERESTARTSYS if it was interrupted by a
531  * signal and 0 if @condition evaluated to true.
532  */
533 #define wait_event_interruptible_locked_irq(wq, condition)		\
534 	((condition)							\
535 	 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1))
536 
537 /**
538  * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
539  * @wq: the waitqueue to wait on
540  * @condition: a C expression for the event to wait for
541  *
542  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
543  * @condition evaluates to true or a signal is received.
544  * The @condition is checked each time the waitqueue @wq is woken up.
545  *
546  * It must be called with wq.lock being held.  This spinlock is
547  * unlocked while sleeping but @condition testing is done while lock
548  * is held and when this macro exits the lock is held.
549  *
550  * The lock is locked/unlocked using spin_lock()/spin_unlock()
551  * functions which must match the way they are locked/unlocked outside
552  * of this macro.
553  *
554  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
555  * set thus when other process waits process on the list if this
556  * process is awaken further processes are not considered.
557  *
558  * wake_up_locked() has to be called after changing any variable that could
559  * change the result of the wait condition.
560  *
561  * The function will return -ERESTARTSYS if it was interrupted by a
562  * signal and 0 if @condition evaluated to true.
563  */
564 #define wait_event_interruptible_exclusive_locked(wq, condition)	\
565 	((condition)							\
566 	 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0))
567 
568 /**
569  * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
570  * @wq: the waitqueue to wait on
571  * @condition: a C expression for the event to wait for
572  *
573  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
574  * @condition evaluates to true or a signal is received.
575  * The @condition is checked each time the waitqueue @wq is woken up.
576  *
577  * It must be called with wq.lock being held.  This spinlock is
578  * unlocked while sleeping but @condition testing is done while lock
579  * is held and when this macro exits the lock is held.
580  *
581  * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
582  * functions which must match the way they are locked/unlocked outside
583  * of this macro.
584  *
585  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
586  * set thus when other process waits process on the list if this
587  * process is awaken further processes are not considered.
588  *
589  * wake_up_locked() has to be called after changing any variable that could
590  * change the result of the wait condition.
591  *
592  * The function will return -ERESTARTSYS if it was interrupted by a
593  * signal and 0 if @condition evaluated to true.
594  */
595 #define wait_event_interruptible_exclusive_locked_irq(wq, condition)	\
596 	((condition)							\
597 	 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
598 
599 
600 #define __wait_event_killable(wq, condition)				\
601 	___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
602 
603 /**
604  * wait_event_killable - sleep until a condition gets true
605  * @wq: the waitqueue to wait on
606  * @condition: a C expression for the event to wait for
607  *
608  * The process is put to sleep (TASK_KILLABLE) until the
609  * @condition evaluates to true or a signal is received.
610  * The @condition is checked each time the waitqueue @wq is woken up.
611  *
612  * wake_up() has to be called after changing any variable that could
613  * change the result of the wait condition.
614  *
615  * The function will return -ERESTARTSYS if it was interrupted by a
616  * signal and 0 if @condition evaluated to true.
617  */
618 #define wait_event_killable(wq, condition)				\
619 ({									\
620 	int __ret = 0;							\
621 	if (!(condition))						\
622 		__ret = __wait_event_killable(wq, condition);		\
623 	__ret;								\
624 })
625 
626 
627 #define __wait_event_lock_irq(wq, condition, lock, cmd)			\
628 	(void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
629 			    spin_unlock_irq(&lock);			\
630 			    cmd;					\
631 			    schedule();					\
632 			    spin_lock_irq(&lock))
633 
634 /**
635  * wait_event_lock_irq_cmd - sleep until a condition gets true. The
636  *			     condition is checked under the lock. This
637  *			     is expected to be called with the lock
638  *			     taken.
639  * @wq: the waitqueue to wait on
640  * @condition: a C expression for the event to wait for
641  * @lock: a locked spinlock_t, which will be released before cmd
642  *	  and schedule() and reacquired afterwards.
643  * @cmd: a command which is invoked outside the critical section before
644  *	 sleep
645  *
646  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
647  * @condition evaluates to true. The @condition is checked each time
648  * the waitqueue @wq is woken up.
649  *
650  * wake_up() has to be called after changing any variable that could
651  * change the result of the wait condition.
652  *
653  * This is supposed to be called while holding the lock. The lock is
654  * dropped before invoking the cmd and going to sleep and is reacquired
655  * afterwards.
656  */
657 #define wait_event_lock_irq_cmd(wq, condition, lock, cmd)		\
658 do {									\
659 	if (condition)							\
660 		break;							\
661 	__wait_event_lock_irq(wq, condition, lock, cmd);		\
662 } while (0)
663 
664 /**
665  * wait_event_lock_irq - sleep until a condition gets true. The
666  *			 condition is checked under the lock. This
667  *			 is expected to be called with the lock
668  *			 taken.
669  * @wq: the waitqueue to wait on
670  * @condition: a C expression for the event to wait for
671  * @lock: a locked spinlock_t, which will be released before schedule()
672  *	  and reacquired afterwards.
673  *
674  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
675  * @condition evaluates to true. The @condition is checked each time
676  * the waitqueue @wq is woken up.
677  *
678  * wake_up() has to be called after changing any variable that could
679  * change the result of the wait condition.
680  *
681  * This is supposed to be called while holding the lock. The lock is
682  * dropped before going to sleep and is reacquired afterwards.
683  */
684 #define wait_event_lock_irq(wq, condition, lock)			\
685 do {									\
686 	if (condition)							\
687 		break;							\
688 	__wait_event_lock_irq(wq, condition, lock, );			\
689 } while (0)
690 
691 
692 #define __wait_event_interruptible_lock_irq(wq, condition, lock, cmd)	\
693 	___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0,		\
694 		      spin_unlock_irq(&lock);				\
695 		      cmd;						\
696 		      schedule();					\
697 		      spin_lock_irq(&lock))
698 
699 /**
700  * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
701  *		The condition is checked under the lock. This is expected to
702  *		be called with the lock taken.
703  * @wq: the waitqueue to wait on
704  * @condition: a C expression for the event to wait for
705  * @lock: a locked spinlock_t, which will be released before cmd and
706  *	  schedule() and reacquired afterwards.
707  * @cmd: a command which is invoked outside the critical section before
708  *	 sleep
709  *
710  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
711  * @condition evaluates to true or a signal is received. The @condition is
712  * checked each time the waitqueue @wq is woken up.
713  *
714  * wake_up() has to be called after changing any variable that could
715  * change the result of the wait condition.
716  *
717  * This is supposed to be called while holding the lock. The lock is
718  * dropped before invoking the cmd and going to sleep and is reacquired
719  * afterwards.
720  *
721  * The macro will return -ERESTARTSYS if it was interrupted by a signal
722  * and 0 if @condition evaluated to true.
723  */
724 #define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd)	\
725 ({									\
726 	int __ret = 0;							\
727 	if (!(condition))						\
728 		__ret = __wait_event_interruptible_lock_irq(wq,		\
729 						condition, lock, cmd);	\
730 	__ret;								\
731 })
732 
733 /**
734  * wait_event_interruptible_lock_irq - sleep until a condition gets true.
735  *		The condition is checked under the lock. This is expected
736  *		to be called with the lock taken.
737  * @wq: the waitqueue to wait on
738  * @condition: a C expression for the event to wait for
739  * @lock: a locked spinlock_t, which will be released before schedule()
740  *	  and reacquired afterwards.
741  *
742  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
743  * @condition evaluates to true or signal is received. The @condition is
744  * checked each time the waitqueue @wq is woken up.
745  *
746  * wake_up() has to be called after changing any variable that could
747  * change the result of the wait condition.
748  *
749  * This is supposed to be called while holding the lock. The lock is
750  * dropped before going to sleep and is reacquired afterwards.
751  *
752  * The macro will return -ERESTARTSYS if it was interrupted by a signal
753  * and 0 if @condition evaluated to true.
754  */
755 #define wait_event_interruptible_lock_irq(wq, condition, lock)		\
756 ({									\
757 	int __ret = 0;							\
758 	if (!(condition))						\
759 		__ret = __wait_event_interruptible_lock_irq(wq,		\
760 						condition, lock,);	\
761 	__ret;								\
762 })
763 
764 #define __wait_event_interruptible_lock_irq_timeout(wq, condition,	\
765 						    lock, timeout)	\
766 	___wait_event(wq, ___wait_cond_timeout(condition),		\
767 		      TASK_INTERRUPTIBLE, 0, timeout,			\
768 		      spin_unlock_irq(&lock);				\
769 		      __ret = schedule_timeout(__ret);			\
770 		      spin_lock_irq(&lock));
771 
772 /**
773  * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
774  *		true or a timeout elapses. The condition is checked under
775  *		the lock. This is expected to be called with the lock taken.
776  * @wq: the waitqueue to wait on
777  * @condition: a C expression for the event to wait for
778  * @lock: a locked spinlock_t, which will be released before schedule()
779  *	  and reacquired afterwards.
780  * @timeout: timeout, in jiffies
781  *
782  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
783  * @condition evaluates to true or signal is received. The @condition is
784  * checked each time the waitqueue @wq is woken up.
785  *
786  * wake_up() has to be called after changing any variable that could
787  * change the result of the wait condition.
788  *
789  * This is supposed to be called while holding the lock. The lock is
790  * dropped before going to sleep and is reacquired afterwards.
791  *
792  * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
793  * was interrupted by a signal, and the remaining jiffies otherwise
794  * if the condition evaluated to true before the timeout elapsed.
795  */
796 #define wait_event_interruptible_lock_irq_timeout(wq, condition, lock,	\
797 						  timeout)		\
798 ({									\
799 	long __ret = timeout;						\
800 	if (!___wait_cond_timeout(condition))				\
801 		__ret = __wait_event_interruptible_lock_irq_timeout(	\
802 					wq, condition, lock, timeout);	\
803 	__ret;								\
804 })
805 
806 
807 /*
808  * These are the old interfaces to sleep waiting for an event.
809  * They are racy.  DO NOT use them, use the wait_event* interfaces above.
810  * We plan to remove these interfaces.
811  */
812 extern void sleep_on(wait_queue_head_t *q);
813 extern long sleep_on_timeout(wait_queue_head_t *q, signed long timeout);
814 extern void interruptible_sleep_on(wait_queue_head_t *q);
815 extern long interruptible_sleep_on_timeout(wait_queue_head_t *q, signed long timeout);
816 
817 /*
818  * Waitqueues which are removed from the waitqueue_head at wakeup time
819  */
820 void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
821 void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
822 long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state);
823 void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
824 void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key);
825 int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
826 int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
827 
828 #define DEFINE_WAIT_FUNC(name, function)				\
829 	wait_queue_t name = {						\
830 		.private	= current,				\
831 		.func		= function,				\
832 		.task_list	= LIST_HEAD_INIT((name).task_list),	\
833 	}
834 
835 #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
836 
837 #define DEFINE_WAIT_BIT(name, word, bit)				\
838 	struct wait_bit_queue name = {					\
839 		.key = __WAIT_BIT_KEY_INITIALIZER(word, bit),		\
840 		.wait	= {						\
841 			.private	= current,			\
842 			.func		= wake_bit_function,		\
843 			.task_list	=				\
844 				LIST_HEAD_INIT((name).wait.task_list),	\
845 		},							\
846 	}
847 
848 #define init_wait(wait)							\
849 	do {								\
850 		(wait)->private = current;				\
851 		(wait)->func = autoremove_wake_function;		\
852 		INIT_LIST_HEAD(&(wait)->task_list);			\
853 		(wait)->flags = 0;					\
854 	} while (0)
855 
856 /**
857  * wait_on_bit - wait for a bit to be cleared
858  * @word: the word being waited on, a kernel virtual address
859  * @bit: the bit of the word being waited on
860  * @action: the function used to sleep, which may take special actions
861  * @mode: the task state to sleep in
862  *
863  * There is a standard hashed waitqueue table for generic use. This
864  * is the part of the hashtable's accessor API that waits on a bit.
865  * For instance, if one were to have waiters on a bitflag, one would
866  * call wait_on_bit() in threads waiting for the bit to clear.
867  * One uses wait_on_bit() where one is waiting for the bit to clear,
868  * but has no intention of setting it.
869  */
870 static inline int
871 wait_on_bit(void *word, int bit, int (*action)(void *), unsigned mode)
872 {
873 	if (!test_bit(bit, word))
874 		return 0;
875 	return out_of_line_wait_on_bit(word, bit, action, mode);
876 }
877 
878 /**
879  * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
880  * @word: the word being waited on, a kernel virtual address
881  * @bit: the bit of the word being waited on
882  * @action: the function used to sleep, which may take special actions
883  * @mode: the task state to sleep in
884  *
885  * There is a standard hashed waitqueue table for generic use. This
886  * is the part of the hashtable's accessor API that waits on a bit
887  * when one intends to set it, for instance, trying to lock bitflags.
888  * For instance, if one were to have waiters trying to set bitflag
889  * and waiting for it to clear before setting it, one would call
890  * wait_on_bit() in threads waiting to be able to set the bit.
891  * One uses wait_on_bit_lock() where one is waiting for the bit to
892  * clear with the intention of setting it, and when done, clearing it.
893  */
894 static inline int
895 wait_on_bit_lock(void *word, int bit, int (*action)(void *), unsigned mode)
896 {
897 	if (!test_and_set_bit(bit, word))
898 		return 0;
899 	return out_of_line_wait_on_bit_lock(word, bit, action, mode);
900 }
901 
902 /**
903  * wait_on_atomic_t - Wait for an atomic_t to become 0
904  * @val: The atomic value being waited on, a kernel virtual address
905  * @action: the function used to sleep, which may take special actions
906  * @mode: the task state to sleep in
907  *
908  * Wait for an atomic_t to become 0.  We abuse the bit-wait waitqueue table for
909  * the purpose of getting a waitqueue, but we set the key to a bit number
910  * outside of the target 'word'.
911  */
912 static inline
913 int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
914 {
915 	if (atomic_read(val) == 0)
916 		return 0;
917 	return out_of_line_wait_on_atomic_t(val, action, mode);
918 }
919 
920 #endif /* _LINUX_WAIT_H */
921