xref: /linux-6.15/include/linux/wait.h (revision dec102aa)
1 #ifndef _LINUX_WAIT_H
2 #define _LINUX_WAIT_H
3 /*
4  * Linux wait queue related types and methods
5  */
6 #include <linux/list.h>
7 #include <linux/stddef.h>
8 #include <linux/spinlock.h>
9 #include <asm/current.h>
10 #include <uapi/linux/wait.h>
11 
12 typedef struct __wait_queue wait_queue_t;
13 typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
14 int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
15 
16 struct __wait_queue {
17 	unsigned int		flags;
18 #define WQ_FLAG_EXCLUSIVE	0x01
19 	void			*private;
20 	wait_queue_func_t	func;
21 	struct list_head	task_list;
22 };
23 
24 struct wait_bit_key {
25 	void			*flags;
26 	int			bit_nr;
27 #define WAIT_ATOMIC_T_BIT_NR	-1
28 };
29 
30 struct wait_bit_queue {
31 	struct wait_bit_key	key;
32 	wait_queue_t		wait;
33 };
34 
35 struct __wait_queue_head {
36 	spinlock_t		lock;
37 	struct list_head	task_list;
38 };
39 typedef struct __wait_queue_head wait_queue_head_t;
40 
41 struct task_struct;
42 
43 /*
44  * Macros for declaration and initialisaton of the datatypes
45  */
46 
47 #define __WAITQUEUE_INITIALIZER(name, tsk) {				\
48 	.private	= tsk,						\
49 	.func		= default_wake_function,			\
50 	.task_list	= { NULL, NULL } }
51 
52 #define DECLARE_WAITQUEUE(name, tsk)					\
53 	wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
54 
55 #define __WAIT_QUEUE_HEAD_INITIALIZER(name) {				\
56 	.lock		= __SPIN_LOCK_UNLOCKED(name.lock),		\
57 	.task_list	= { &(name).task_list, &(name).task_list } }
58 
59 #define DECLARE_WAIT_QUEUE_HEAD(name) \
60 	wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
61 
62 #define __WAIT_BIT_KEY_INITIALIZER(word, bit)				\
63 	{ .flags = word, .bit_nr = bit, }
64 
65 #define __WAIT_ATOMIC_T_KEY_INITIALIZER(p)				\
66 	{ .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
67 
68 extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
69 
70 #define init_waitqueue_head(q)				\
71 	do {						\
72 		static struct lock_class_key __key;	\
73 							\
74 		__init_waitqueue_head((q), #q, &__key);	\
75 	} while (0)
76 
77 #ifdef CONFIG_LOCKDEP
78 # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
79 	({ init_waitqueue_head(&name); name; })
80 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
81 	wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
82 #else
83 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
84 #endif
85 
86 static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
87 {
88 	q->flags	= 0;
89 	q->private	= p;
90 	q->func		= default_wake_function;
91 }
92 
93 static inline void
94 init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func)
95 {
96 	q->flags	= 0;
97 	q->private	= NULL;
98 	q->func		= func;
99 }
100 
101 static inline int waitqueue_active(wait_queue_head_t *q)
102 {
103 	return !list_empty(&q->task_list);
104 }
105 
106 extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
107 extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
108 extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
109 
110 static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
111 {
112 	list_add(&new->task_list, &head->task_list);
113 }
114 
115 /*
116  * Used for wake-one threads:
117  */
118 static inline void
119 __add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
120 {
121 	wait->flags |= WQ_FLAG_EXCLUSIVE;
122 	__add_wait_queue(q, wait);
123 }
124 
125 static inline void __add_wait_queue_tail(wait_queue_head_t *head,
126 					 wait_queue_t *new)
127 {
128 	list_add_tail(&new->task_list, &head->task_list);
129 }
130 
131 static inline void
132 __add_wait_queue_tail_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
133 {
134 	wait->flags |= WQ_FLAG_EXCLUSIVE;
135 	__add_wait_queue_tail(q, wait);
136 }
137 
138 static inline void
139 __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
140 {
141 	list_del(&old->task_list);
142 }
143 
144 void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
145 void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
146 void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
147 void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
148 void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
149 void __wake_up_bit(wait_queue_head_t *, void *, int);
150 int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
151 int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
152 void wake_up_bit(void *, int);
153 void wake_up_atomic_t(atomic_t *);
154 int out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned);
155 int out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned);
156 int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
157 wait_queue_head_t *bit_waitqueue(void *, int);
158 
159 #define wake_up(x)			__wake_up(x, TASK_NORMAL, 1, NULL)
160 #define wake_up_nr(x, nr)		__wake_up(x, TASK_NORMAL, nr, NULL)
161 #define wake_up_all(x)			__wake_up(x, TASK_NORMAL, 0, NULL)
162 #define wake_up_locked(x)		__wake_up_locked((x), TASK_NORMAL, 1)
163 #define wake_up_all_locked(x)		__wake_up_locked((x), TASK_NORMAL, 0)
164 
165 #define wake_up_interruptible(x)	__wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
166 #define wake_up_interruptible_nr(x, nr)	__wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
167 #define wake_up_interruptible_all(x)	__wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
168 #define wake_up_interruptible_sync(x)	__wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
169 
170 /*
171  * Wakeup macros to be used to report events to the targets.
172  */
173 #define wake_up_poll(x, m)						\
174 	__wake_up(x, TASK_NORMAL, 1, (void *) (m))
175 #define wake_up_locked_poll(x, m)					\
176 	__wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
177 #define wake_up_interruptible_poll(x, m)				\
178 	__wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
179 #define wake_up_interruptible_sync_poll(x, m)				\
180 	__wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
181 
182 #define ___wait_cond_timeout(condition)					\
183 ({									\
184 	bool __cond = (condition);					\
185 	if (__cond && !__ret)						\
186 		__ret = 1;						\
187 	__cond || !__ret;						\
188 })
189 
190 #define ___wait_is_interruptible(state)					\
191 	(!__builtin_constant_p(state) ||				\
192 		state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE)	\
193 
194 /*
195  * The below macro ___wait_event() has an explicit shadow of the __ret
196  * variable when used from the wait_event_*() macros.
197  *
198  * This is so that both can use the ___wait_cond_timeout() construct
199  * to wrap the condition.
200  *
201  * The type inconsistency of the wait_event_*() __ret variable is also
202  * on purpose; we use long where we can return timeout values and int
203  * otherwise.
204  */
205 
206 #define ___wait_event(wq, condition, state, exclusive, ret, cmd)	\
207 ({									\
208 	__label__ __out;						\
209 	wait_queue_t __wait;						\
210 	long __ret = ret;	/* explicit shadow */			\
211 									\
212 	INIT_LIST_HEAD(&__wait.task_list);				\
213 	if (exclusive)							\
214 		__wait.flags = WQ_FLAG_EXCLUSIVE;			\
215 	else								\
216 		__wait.flags = 0;					\
217 									\
218 	for (;;) {							\
219 		long __int = prepare_to_wait_event(&wq, &__wait, state);\
220 									\
221 		if (condition)						\
222 			break;						\
223 									\
224 		if (___wait_is_interruptible(state) && __int) {		\
225 			__ret = __int;					\
226 			if (exclusive) {				\
227 				abort_exclusive_wait(&wq, &__wait,	\
228 						     state, NULL);	\
229 				goto __out;				\
230 			}						\
231 			break;						\
232 		}							\
233 									\
234 		cmd;							\
235 	}								\
236 	finish_wait(&wq, &__wait);					\
237 __out:	__ret;								\
238 })
239 
240 #define __wait_event(wq, condition)					\
241 	(void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
242 			    schedule())
243 
244 /**
245  * wait_event - sleep until a condition gets true
246  * @wq: the waitqueue to wait on
247  * @condition: a C expression for the event to wait for
248  *
249  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
250  * @condition evaluates to true. The @condition is checked each time
251  * the waitqueue @wq is woken up.
252  *
253  * wake_up() has to be called after changing any variable that could
254  * change the result of the wait condition.
255  */
256 #define wait_event(wq, condition)					\
257 do {									\
258 	if (condition)							\
259 		break;							\
260 	__wait_event(wq, condition);					\
261 } while (0)
262 
263 #define __wait_event_timeout(wq, condition, timeout)			\
264 	___wait_event(wq, ___wait_cond_timeout(condition),		\
265 		      TASK_UNINTERRUPTIBLE, 0, timeout,			\
266 		      __ret = schedule_timeout(__ret))
267 
268 /**
269  * wait_event_timeout - sleep until a condition gets true or a timeout elapses
270  * @wq: the waitqueue to wait on
271  * @condition: a C expression for the event to wait for
272  * @timeout: timeout, in jiffies
273  *
274  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
275  * @condition evaluates to true. The @condition is checked each time
276  * the waitqueue @wq is woken up.
277  *
278  * wake_up() has to be called after changing any variable that could
279  * change the result of the wait condition.
280  *
281  * The function returns 0 if the @timeout elapsed, or the remaining
282  * jiffies (at least 1) if the @condition evaluated to %true before
283  * the @timeout elapsed.
284  */
285 #define wait_event_timeout(wq, condition, timeout)			\
286 ({									\
287 	long __ret = timeout;						\
288 	if (!___wait_cond_timeout(condition))				\
289 		__ret = __wait_event_timeout(wq, condition, timeout);	\
290 	__ret;								\
291 })
292 
293 #define __wait_event_cmd(wq, condition, cmd1, cmd2)			\
294 	(void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
295 			    cmd1; schedule(); cmd2)
296 
297 /**
298  * wait_event_cmd - sleep until a condition gets true
299  * @wq: the waitqueue to wait on
300  * @condition: a C expression for the event to wait for
301  * @cmd1: the command will be executed before sleep
302  * @cmd2: the command will be executed after sleep
303  *
304  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
305  * @condition evaluates to true. The @condition is checked each time
306  * the waitqueue @wq is woken up.
307  *
308  * wake_up() has to be called after changing any variable that could
309  * change the result of the wait condition.
310  */
311 #define wait_event_cmd(wq, condition, cmd1, cmd2)			\
312 do {									\
313 	if (condition)							\
314 		break;							\
315 	__wait_event_cmd(wq, condition, cmd1, cmd2);			\
316 } while (0)
317 
318 #define __wait_event_interruptible(wq, condition)			\
319 	___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0,		\
320 		      schedule())
321 
322 /**
323  * wait_event_interruptible - sleep until a condition gets true
324  * @wq: the waitqueue to wait on
325  * @condition: a C expression for the event to wait for
326  *
327  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
328  * @condition evaluates to true or a signal is received.
329  * The @condition is checked each time the waitqueue @wq is woken up.
330  *
331  * wake_up() has to be called after changing any variable that could
332  * change the result of the wait condition.
333  *
334  * The function will return -ERESTARTSYS if it was interrupted by a
335  * signal and 0 if @condition evaluated to true.
336  */
337 #define wait_event_interruptible(wq, condition)				\
338 ({									\
339 	int __ret = 0;							\
340 	if (!(condition))						\
341 		__ret = __wait_event_interruptible(wq, condition);	\
342 	__ret;								\
343 })
344 
345 #define __wait_event_interruptible_timeout(wq, condition, timeout)	\
346 	___wait_event(wq, ___wait_cond_timeout(condition),		\
347 		      TASK_INTERRUPTIBLE, 0, timeout,			\
348 		      __ret = schedule_timeout(__ret))
349 
350 /**
351  * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
352  * @wq: the waitqueue to wait on
353  * @condition: a C expression for the event to wait for
354  * @timeout: timeout, in jiffies
355  *
356  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
357  * @condition evaluates to true or a signal is received.
358  * The @condition is checked each time the waitqueue @wq is woken up.
359  *
360  * wake_up() has to be called after changing any variable that could
361  * change the result of the wait condition.
362  *
363  * Returns:
364  * 0 if the @timeout elapsed, -%ERESTARTSYS if it was interrupted by
365  * a signal, or the remaining jiffies (at least 1) if the @condition
366  * evaluated to %true before the @timeout elapsed.
367  */
368 #define wait_event_interruptible_timeout(wq, condition, timeout)	\
369 ({									\
370 	long __ret = timeout;						\
371 	if (!___wait_cond_timeout(condition))				\
372 		__ret = __wait_event_interruptible_timeout(wq,		\
373 						condition, timeout);	\
374 	__ret;								\
375 })
376 
377 #define __wait_event_hrtimeout(wq, condition, timeout, state)		\
378 ({									\
379 	int __ret = 0;							\
380 	struct hrtimer_sleeper __t;					\
381 									\
382 	hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC,		\
383 			      HRTIMER_MODE_REL);			\
384 	hrtimer_init_sleeper(&__t, current);				\
385 	if ((timeout).tv64 != KTIME_MAX)				\
386 		hrtimer_start_range_ns(&__t.timer, timeout,		\
387 				       current->timer_slack_ns,		\
388 				       HRTIMER_MODE_REL);		\
389 									\
390 	__ret = ___wait_event(wq, condition, state, 0, 0,		\
391 		if (!__t.task) {					\
392 			__ret = -ETIME;					\
393 			break;						\
394 		}							\
395 		schedule());						\
396 									\
397 	hrtimer_cancel(&__t.timer);					\
398 	destroy_hrtimer_on_stack(&__t.timer);				\
399 	__ret;								\
400 })
401 
402 /**
403  * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
404  * @wq: the waitqueue to wait on
405  * @condition: a C expression for the event to wait for
406  * @timeout: timeout, as a ktime_t
407  *
408  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
409  * @condition evaluates to true or a signal is received.
410  * The @condition is checked each time the waitqueue @wq is woken up.
411  *
412  * wake_up() has to be called after changing any variable that could
413  * change the result of the wait condition.
414  *
415  * The function returns 0 if @condition became true, or -ETIME if the timeout
416  * elapsed.
417  */
418 #define wait_event_hrtimeout(wq, condition, timeout)			\
419 ({									\
420 	int __ret = 0;							\
421 	if (!(condition))						\
422 		__ret = __wait_event_hrtimeout(wq, condition, timeout,	\
423 					       TASK_UNINTERRUPTIBLE);	\
424 	__ret;								\
425 })
426 
427 /**
428  * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
429  * @wq: the waitqueue to wait on
430  * @condition: a C expression for the event to wait for
431  * @timeout: timeout, as a ktime_t
432  *
433  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
434  * @condition evaluates to true or a signal is received.
435  * The @condition is checked each time the waitqueue @wq is woken up.
436  *
437  * wake_up() has to be called after changing any variable that could
438  * change the result of the wait condition.
439  *
440  * The function returns 0 if @condition became true, -ERESTARTSYS if it was
441  * interrupted by a signal, or -ETIME if the timeout elapsed.
442  */
443 #define wait_event_interruptible_hrtimeout(wq, condition, timeout)	\
444 ({									\
445 	long __ret = 0;							\
446 	if (!(condition))						\
447 		__ret = __wait_event_hrtimeout(wq, condition, timeout,	\
448 					       TASK_INTERRUPTIBLE);	\
449 	__ret;								\
450 })
451 
452 #define __wait_event_interruptible_exclusive(wq, condition)		\
453 	___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0,		\
454 		      schedule())
455 
456 #define wait_event_interruptible_exclusive(wq, condition)		\
457 ({									\
458 	int __ret = 0;							\
459 	if (!(condition))						\
460 		__ret = __wait_event_interruptible_exclusive(wq, condition);\
461 	__ret;								\
462 })
463 
464 
465 #define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
466 ({									\
467 	int __ret = 0;							\
468 	DEFINE_WAIT(__wait);						\
469 	if (exclusive)							\
470 		__wait.flags |= WQ_FLAG_EXCLUSIVE;			\
471 	do {								\
472 		if (likely(list_empty(&__wait.task_list)))		\
473 			__add_wait_queue_tail(&(wq), &__wait);		\
474 		set_current_state(TASK_INTERRUPTIBLE);			\
475 		if (signal_pending(current)) {				\
476 			__ret = -ERESTARTSYS;				\
477 			break;						\
478 		}							\
479 		if (irq)						\
480 			spin_unlock_irq(&(wq).lock);			\
481 		else							\
482 			spin_unlock(&(wq).lock);			\
483 		schedule();						\
484 		if (irq)						\
485 			spin_lock_irq(&(wq).lock);			\
486 		else							\
487 			spin_lock(&(wq).lock);				\
488 	} while (!(condition));						\
489 	__remove_wait_queue(&(wq), &__wait);				\
490 	__set_current_state(TASK_RUNNING);				\
491 	__ret;								\
492 })
493 
494 
495 /**
496  * wait_event_interruptible_locked - sleep until a condition gets true
497  * @wq: the waitqueue to wait on
498  * @condition: a C expression for the event to wait for
499  *
500  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
501  * @condition evaluates to true or a signal is received.
502  * The @condition is checked each time the waitqueue @wq is woken up.
503  *
504  * It must be called with wq.lock being held.  This spinlock is
505  * unlocked while sleeping but @condition testing is done while lock
506  * is held and when this macro exits the lock is held.
507  *
508  * The lock is locked/unlocked using spin_lock()/spin_unlock()
509  * functions which must match the way they are locked/unlocked outside
510  * of this macro.
511  *
512  * wake_up_locked() has to be called after changing any variable that could
513  * change the result of the wait condition.
514  *
515  * The function will return -ERESTARTSYS if it was interrupted by a
516  * signal and 0 if @condition evaluated to true.
517  */
518 #define wait_event_interruptible_locked(wq, condition)			\
519 	((condition)							\
520 	 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0))
521 
522 /**
523  * wait_event_interruptible_locked_irq - sleep until a condition gets true
524  * @wq: the waitqueue to wait on
525  * @condition: a C expression for the event to wait for
526  *
527  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
528  * @condition evaluates to true or a signal is received.
529  * The @condition is checked each time the waitqueue @wq is woken up.
530  *
531  * It must be called with wq.lock being held.  This spinlock is
532  * unlocked while sleeping but @condition testing is done while lock
533  * is held and when this macro exits the lock is held.
534  *
535  * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
536  * functions which must match the way they are locked/unlocked outside
537  * of this macro.
538  *
539  * wake_up_locked() has to be called after changing any variable that could
540  * change the result of the wait condition.
541  *
542  * The function will return -ERESTARTSYS if it was interrupted by a
543  * signal and 0 if @condition evaluated to true.
544  */
545 #define wait_event_interruptible_locked_irq(wq, condition)		\
546 	((condition)							\
547 	 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1))
548 
549 /**
550  * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
551  * @wq: the waitqueue to wait on
552  * @condition: a C expression for the event to wait for
553  *
554  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
555  * @condition evaluates to true or a signal is received.
556  * The @condition is checked each time the waitqueue @wq is woken up.
557  *
558  * It must be called with wq.lock being held.  This spinlock is
559  * unlocked while sleeping but @condition testing is done while lock
560  * is held and when this macro exits the lock is held.
561  *
562  * The lock is locked/unlocked using spin_lock()/spin_unlock()
563  * functions which must match the way they are locked/unlocked outside
564  * of this macro.
565  *
566  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
567  * set thus when other process waits process on the list if this
568  * process is awaken further processes are not considered.
569  *
570  * wake_up_locked() has to be called after changing any variable that could
571  * change the result of the wait condition.
572  *
573  * The function will return -ERESTARTSYS if it was interrupted by a
574  * signal and 0 if @condition evaluated to true.
575  */
576 #define wait_event_interruptible_exclusive_locked(wq, condition)	\
577 	((condition)							\
578 	 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0))
579 
580 /**
581  * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
582  * @wq: the waitqueue to wait on
583  * @condition: a C expression for the event to wait for
584  *
585  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
586  * @condition evaluates to true or a signal is received.
587  * The @condition is checked each time the waitqueue @wq is woken up.
588  *
589  * It must be called with wq.lock being held.  This spinlock is
590  * unlocked while sleeping but @condition testing is done while lock
591  * is held and when this macro exits the lock is held.
592  *
593  * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
594  * functions which must match the way they are locked/unlocked outside
595  * of this macro.
596  *
597  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
598  * set thus when other process waits process on the list if this
599  * process is awaken further processes are not considered.
600  *
601  * wake_up_locked() has to be called after changing any variable that could
602  * change the result of the wait condition.
603  *
604  * The function will return -ERESTARTSYS if it was interrupted by a
605  * signal and 0 if @condition evaluated to true.
606  */
607 #define wait_event_interruptible_exclusive_locked_irq(wq, condition)	\
608 	((condition)							\
609 	 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
610 
611 
612 #define __wait_event_killable(wq, condition)				\
613 	___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
614 
615 /**
616  * wait_event_killable - sleep until a condition gets true
617  * @wq: the waitqueue to wait on
618  * @condition: a C expression for the event to wait for
619  *
620  * The process is put to sleep (TASK_KILLABLE) until the
621  * @condition evaluates to true or a signal is received.
622  * The @condition is checked each time the waitqueue @wq is woken up.
623  *
624  * wake_up() has to be called after changing any variable that could
625  * change the result of the wait condition.
626  *
627  * The function will return -ERESTARTSYS if it was interrupted by a
628  * signal and 0 if @condition evaluated to true.
629  */
630 #define wait_event_killable(wq, condition)				\
631 ({									\
632 	int __ret = 0;							\
633 	if (!(condition))						\
634 		__ret = __wait_event_killable(wq, condition);		\
635 	__ret;								\
636 })
637 
638 
639 #define __wait_event_lock_irq(wq, condition, lock, cmd)			\
640 	(void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
641 			    spin_unlock_irq(&lock);			\
642 			    cmd;					\
643 			    schedule();					\
644 			    spin_lock_irq(&lock))
645 
646 /**
647  * wait_event_lock_irq_cmd - sleep until a condition gets true. The
648  *			     condition is checked under the lock. This
649  *			     is expected to be called with the lock
650  *			     taken.
651  * @wq: the waitqueue to wait on
652  * @condition: a C expression for the event to wait for
653  * @lock: a locked spinlock_t, which will be released before cmd
654  *	  and schedule() and reacquired afterwards.
655  * @cmd: a command which is invoked outside the critical section before
656  *	 sleep
657  *
658  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
659  * @condition evaluates to true. The @condition is checked each time
660  * the waitqueue @wq is woken up.
661  *
662  * wake_up() has to be called after changing any variable that could
663  * change the result of the wait condition.
664  *
665  * This is supposed to be called while holding the lock. The lock is
666  * dropped before invoking the cmd and going to sleep and is reacquired
667  * afterwards.
668  */
669 #define wait_event_lock_irq_cmd(wq, condition, lock, cmd)		\
670 do {									\
671 	if (condition)							\
672 		break;							\
673 	__wait_event_lock_irq(wq, condition, lock, cmd);		\
674 } while (0)
675 
676 /**
677  * wait_event_lock_irq - sleep until a condition gets true. The
678  *			 condition is checked under the lock. This
679  *			 is expected to be called with the lock
680  *			 taken.
681  * @wq: the waitqueue to wait on
682  * @condition: a C expression for the event to wait for
683  * @lock: a locked spinlock_t, which will be released before schedule()
684  *	  and reacquired afterwards.
685  *
686  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
687  * @condition evaluates to true. The @condition is checked each time
688  * the waitqueue @wq is woken up.
689  *
690  * wake_up() has to be called after changing any variable that could
691  * change the result of the wait condition.
692  *
693  * This is supposed to be called while holding the lock. The lock is
694  * dropped before going to sleep and is reacquired afterwards.
695  */
696 #define wait_event_lock_irq(wq, condition, lock)			\
697 do {									\
698 	if (condition)							\
699 		break;							\
700 	__wait_event_lock_irq(wq, condition, lock, );			\
701 } while (0)
702 
703 
704 #define __wait_event_interruptible_lock_irq(wq, condition, lock, cmd)	\
705 	___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0,		\
706 		      spin_unlock_irq(&lock);				\
707 		      cmd;						\
708 		      schedule();					\
709 		      spin_lock_irq(&lock))
710 
711 /**
712  * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
713  *		The condition is checked under the lock. This is expected to
714  *		be called with the lock taken.
715  * @wq: the waitqueue to wait on
716  * @condition: a C expression for the event to wait for
717  * @lock: a locked spinlock_t, which will be released before cmd and
718  *	  schedule() and reacquired afterwards.
719  * @cmd: a command which is invoked outside the critical section before
720  *	 sleep
721  *
722  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
723  * @condition evaluates to true or a signal is received. The @condition is
724  * checked each time the waitqueue @wq is woken up.
725  *
726  * wake_up() has to be called after changing any variable that could
727  * change the result of the wait condition.
728  *
729  * This is supposed to be called while holding the lock. The lock is
730  * dropped before invoking the cmd and going to sleep and is reacquired
731  * afterwards.
732  *
733  * The macro will return -ERESTARTSYS if it was interrupted by a signal
734  * and 0 if @condition evaluated to true.
735  */
736 #define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd)	\
737 ({									\
738 	int __ret = 0;							\
739 	if (!(condition))						\
740 		__ret = __wait_event_interruptible_lock_irq(wq,		\
741 						condition, lock, cmd);	\
742 	__ret;								\
743 })
744 
745 /**
746  * wait_event_interruptible_lock_irq - sleep until a condition gets true.
747  *		The condition is checked under the lock. This is expected
748  *		to be called with the lock taken.
749  * @wq: the waitqueue to wait on
750  * @condition: a C expression for the event to wait for
751  * @lock: a locked spinlock_t, which will be released before schedule()
752  *	  and reacquired afterwards.
753  *
754  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
755  * @condition evaluates to true or signal is received. The @condition is
756  * checked each time the waitqueue @wq is woken up.
757  *
758  * wake_up() has to be called after changing any variable that could
759  * change the result of the wait condition.
760  *
761  * This is supposed to be called while holding the lock. The lock is
762  * dropped before going to sleep and is reacquired afterwards.
763  *
764  * The macro will return -ERESTARTSYS if it was interrupted by a signal
765  * and 0 if @condition evaluated to true.
766  */
767 #define wait_event_interruptible_lock_irq(wq, condition, lock)		\
768 ({									\
769 	int __ret = 0;							\
770 	if (!(condition))						\
771 		__ret = __wait_event_interruptible_lock_irq(wq,		\
772 						condition, lock,);	\
773 	__ret;								\
774 })
775 
776 #define __wait_event_interruptible_lock_irq_timeout(wq, condition,	\
777 						    lock, timeout)	\
778 	___wait_event(wq, ___wait_cond_timeout(condition),		\
779 		      TASK_INTERRUPTIBLE, 0, timeout,			\
780 		      spin_unlock_irq(&lock);				\
781 		      __ret = schedule_timeout(__ret);			\
782 		      spin_lock_irq(&lock));
783 
784 /**
785  * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
786  *		true or a timeout elapses. The condition is checked under
787  *		the lock. This is expected to be called with the lock taken.
788  * @wq: the waitqueue to wait on
789  * @condition: a C expression for the event to wait for
790  * @lock: a locked spinlock_t, which will be released before schedule()
791  *	  and reacquired afterwards.
792  * @timeout: timeout, in jiffies
793  *
794  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
795  * @condition evaluates to true or signal is received. The @condition is
796  * checked each time the waitqueue @wq is woken up.
797  *
798  * wake_up() has to be called after changing any variable that could
799  * change the result of the wait condition.
800  *
801  * This is supposed to be called while holding the lock. The lock is
802  * dropped before going to sleep and is reacquired afterwards.
803  *
804  * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
805  * was interrupted by a signal, and the remaining jiffies otherwise
806  * if the condition evaluated to true before the timeout elapsed.
807  */
808 #define wait_event_interruptible_lock_irq_timeout(wq, condition, lock,	\
809 						  timeout)		\
810 ({									\
811 	long __ret = timeout;						\
812 	if (!___wait_cond_timeout(condition))				\
813 		__ret = __wait_event_interruptible_lock_irq_timeout(	\
814 					wq, condition, lock, timeout);	\
815 	__ret;								\
816 })
817 
818 /*
819  * Waitqueues which are removed from the waitqueue_head at wakeup time
820  */
821 void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
822 void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
823 long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state);
824 void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
825 void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key);
826 int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
827 int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
828 
829 #define DEFINE_WAIT_FUNC(name, function)				\
830 	wait_queue_t name = {						\
831 		.private	= current,				\
832 		.func		= function,				\
833 		.task_list	= LIST_HEAD_INIT((name).task_list),	\
834 	}
835 
836 #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
837 
838 #define DEFINE_WAIT_BIT(name, word, bit)				\
839 	struct wait_bit_queue name = {					\
840 		.key = __WAIT_BIT_KEY_INITIALIZER(word, bit),		\
841 		.wait	= {						\
842 			.private	= current,			\
843 			.func		= wake_bit_function,		\
844 			.task_list	=				\
845 				LIST_HEAD_INIT((name).wait.task_list),	\
846 		},							\
847 	}
848 
849 #define init_wait(wait)							\
850 	do {								\
851 		(wait)->private = current;				\
852 		(wait)->func = autoremove_wake_function;		\
853 		INIT_LIST_HEAD(&(wait)->task_list);			\
854 		(wait)->flags = 0;					\
855 	} while (0)
856 
857 /**
858  * wait_on_bit - wait for a bit to be cleared
859  * @word: the word being waited on, a kernel virtual address
860  * @bit: the bit of the word being waited on
861  * @action: the function used to sleep, which may take special actions
862  * @mode: the task state to sleep in
863  *
864  * There is a standard hashed waitqueue table for generic use. This
865  * is the part of the hashtable's accessor API that waits on a bit.
866  * For instance, if one were to have waiters on a bitflag, one would
867  * call wait_on_bit() in threads waiting for the bit to clear.
868  * One uses wait_on_bit() where one is waiting for the bit to clear,
869  * but has no intention of setting it.
870  */
871 static inline int
872 wait_on_bit(void *word, int bit, int (*action)(void *), unsigned mode)
873 {
874 	if (!test_bit(bit, word))
875 		return 0;
876 	return out_of_line_wait_on_bit(word, bit, action, mode);
877 }
878 
879 /**
880  * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
881  * @word: the word being waited on, a kernel virtual address
882  * @bit: the bit of the word being waited on
883  * @action: the function used to sleep, which may take special actions
884  * @mode: the task state to sleep in
885  *
886  * There is a standard hashed waitqueue table for generic use. This
887  * is the part of the hashtable's accessor API that waits on a bit
888  * when one intends to set it, for instance, trying to lock bitflags.
889  * For instance, if one were to have waiters trying to set bitflag
890  * and waiting for it to clear before setting it, one would call
891  * wait_on_bit() in threads waiting to be able to set the bit.
892  * One uses wait_on_bit_lock() where one is waiting for the bit to
893  * clear with the intention of setting it, and when done, clearing it.
894  */
895 static inline int
896 wait_on_bit_lock(void *word, int bit, int (*action)(void *), unsigned mode)
897 {
898 	if (!test_and_set_bit(bit, word))
899 		return 0;
900 	return out_of_line_wait_on_bit_lock(word, bit, action, mode);
901 }
902 
903 /**
904  * wait_on_atomic_t - Wait for an atomic_t to become 0
905  * @val: The atomic value being waited on, a kernel virtual address
906  * @action: the function used to sleep, which may take special actions
907  * @mode: the task state to sleep in
908  *
909  * Wait for an atomic_t to become 0.  We abuse the bit-wait waitqueue table for
910  * the purpose of getting a waitqueue, but we set the key to a bit number
911  * outside of the target 'word'.
912  */
913 static inline
914 int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
915 {
916 	if (atomic_read(val) == 0)
917 		return 0;
918 	return out_of_line_wait_on_atomic_t(val, action, mode);
919 }
920 
921 #endif /* _LINUX_WAIT_H */
922