xref: /linux-6.15/include/linux/wait.h (revision dcd454af)
1 #ifndef _LINUX_WAIT_H
2 #define _LINUX_WAIT_H
3 
4 
5 #include <linux/list.h>
6 #include <linux/stddef.h>
7 #include <linux/spinlock.h>
8 #include <asm/current.h>
9 #include <uapi/linux/wait.h>
10 
11 typedef struct __wait_queue wait_queue_t;
12 typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
13 int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
14 
15 struct __wait_queue {
16 	unsigned int flags;
17 #define WQ_FLAG_EXCLUSIVE	0x01
18 	void *private;
19 	wait_queue_func_t func;
20 	struct list_head task_list;
21 };
22 
23 struct wait_bit_key {
24 	void *flags;
25 	int bit_nr;
26 #define WAIT_ATOMIC_T_BIT_NR -1
27 };
28 
29 struct wait_bit_queue {
30 	struct wait_bit_key key;
31 	wait_queue_t wait;
32 };
33 
34 struct __wait_queue_head {
35 	spinlock_t lock;
36 	struct list_head task_list;
37 };
38 typedef struct __wait_queue_head wait_queue_head_t;
39 
40 struct task_struct;
41 
42 /*
43  * Macros for declaration and initialisaton of the datatypes
44  */
45 
46 #define __WAITQUEUE_INITIALIZER(name, tsk) {				\
47 	.private	= tsk,						\
48 	.func		= default_wake_function,			\
49 	.task_list	= { NULL, NULL } }
50 
51 #define DECLARE_WAITQUEUE(name, tsk)					\
52 	wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
53 
54 #define __WAIT_QUEUE_HEAD_INITIALIZER(name) {				\
55 	.lock		= __SPIN_LOCK_UNLOCKED(name.lock),		\
56 	.task_list	= { &(name).task_list, &(name).task_list } }
57 
58 #define DECLARE_WAIT_QUEUE_HEAD(name) \
59 	wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
60 
61 #define __WAIT_BIT_KEY_INITIALIZER(word, bit)				\
62 	{ .flags = word, .bit_nr = bit, }
63 
64 #define __WAIT_ATOMIC_T_KEY_INITIALIZER(p)				\
65 	{ .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
66 
67 extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
68 
69 #define init_waitqueue_head(q)				\
70 	do {						\
71 		static struct lock_class_key __key;	\
72 							\
73 		__init_waitqueue_head((q), #q, &__key);	\
74 	} while (0)
75 
76 #ifdef CONFIG_LOCKDEP
77 # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
78 	({ init_waitqueue_head(&name); name; })
79 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
80 	wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
81 #else
82 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
83 #endif
84 
85 static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
86 {
87 	q->flags = 0;
88 	q->private = p;
89 	q->func = default_wake_function;
90 }
91 
92 static inline void init_waitqueue_func_entry(wait_queue_t *q,
93 					wait_queue_func_t func)
94 {
95 	q->flags = 0;
96 	q->private = NULL;
97 	q->func = func;
98 }
99 
100 static inline int waitqueue_active(wait_queue_head_t *q)
101 {
102 	return !list_empty(&q->task_list);
103 }
104 
105 extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
106 extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
107 extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
108 
109 static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
110 {
111 	list_add(&new->task_list, &head->task_list);
112 }
113 
114 /*
115  * Used for wake-one threads:
116  */
117 static inline void __add_wait_queue_exclusive(wait_queue_head_t *q,
118 					      wait_queue_t *wait)
119 {
120 	wait->flags |= WQ_FLAG_EXCLUSIVE;
121 	__add_wait_queue(q, wait);
122 }
123 
124 static inline void __add_wait_queue_tail(wait_queue_head_t *head,
125 					 wait_queue_t *new)
126 {
127 	list_add_tail(&new->task_list, &head->task_list);
128 }
129 
130 static inline void __add_wait_queue_tail_exclusive(wait_queue_head_t *q,
131 					      wait_queue_t *wait)
132 {
133 	wait->flags |= WQ_FLAG_EXCLUSIVE;
134 	__add_wait_queue_tail(q, wait);
135 }
136 
137 static inline void __remove_wait_queue(wait_queue_head_t *head,
138 							wait_queue_t *old)
139 {
140 	list_del(&old->task_list);
141 }
142 
143 void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
144 void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
145 void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr,
146 			void *key);
147 void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
148 void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
149 void __wake_up_bit(wait_queue_head_t *, void *, int);
150 int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
151 int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
152 void wake_up_bit(void *, int);
153 void wake_up_atomic_t(atomic_t *);
154 int out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned);
155 int out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned);
156 int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
157 wait_queue_head_t *bit_waitqueue(void *, int);
158 
159 #define wake_up(x)			__wake_up(x, TASK_NORMAL, 1, NULL)
160 #define wake_up_nr(x, nr)		__wake_up(x, TASK_NORMAL, nr, NULL)
161 #define wake_up_all(x)			__wake_up(x, TASK_NORMAL, 0, NULL)
162 #define wake_up_locked(x)		__wake_up_locked((x), TASK_NORMAL, 1)
163 #define wake_up_all_locked(x)		__wake_up_locked((x), TASK_NORMAL, 0)
164 
165 #define wake_up_interruptible(x)	__wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
166 #define wake_up_interruptible_nr(x, nr)	__wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
167 #define wake_up_interruptible_all(x)	__wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
168 #define wake_up_interruptible_sync(x)	__wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
169 
170 /*
171  * Wakeup macros to be used to report events to the targets.
172  */
173 #define wake_up_poll(x, m)				\
174 	__wake_up(x, TASK_NORMAL, 1, (void *) (m))
175 #define wake_up_locked_poll(x, m)				\
176 	__wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
177 #define wake_up_interruptible_poll(x, m)			\
178 	__wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
179 #define wake_up_interruptible_sync_poll(x, m)				\
180 	__wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
181 
182 #define __wait_event(wq, condition) 					\
183 do {									\
184 	DEFINE_WAIT(__wait);						\
185 									\
186 	for (;;) {							\
187 		prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE);	\
188 		if (condition)						\
189 			break;						\
190 		schedule();						\
191 	}								\
192 	finish_wait(&wq, &__wait);					\
193 } while (0)
194 
195 /**
196  * wait_event - sleep until a condition gets true
197  * @wq: the waitqueue to wait on
198  * @condition: a C expression for the event to wait for
199  *
200  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
201  * @condition evaluates to true. The @condition is checked each time
202  * the waitqueue @wq is woken up.
203  *
204  * wake_up() has to be called after changing any variable that could
205  * change the result of the wait condition.
206  */
207 #define wait_event(wq, condition) 					\
208 do {									\
209 	if (condition)	 						\
210 		break;							\
211 	__wait_event(wq, condition);					\
212 } while (0)
213 
214 #define __wait_event_timeout(wq, condition, ret)			\
215 do {									\
216 	DEFINE_WAIT(__wait);						\
217 									\
218 	for (;;) {							\
219 		prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE);	\
220 		if (condition)						\
221 			break;						\
222 		ret = schedule_timeout(ret);				\
223 		if (!ret)						\
224 			break;						\
225 	}								\
226 	if (!ret && (condition))					\
227 		ret = 1;						\
228 	finish_wait(&wq, &__wait);					\
229 } while (0)
230 
231 /**
232  * wait_event_timeout - sleep until a condition gets true or a timeout elapses
233  * @wq: the waitqueue to wait on
234  * @condition: a C expression for the event to wait for
235  * @timeout: timeout, in jiffies
236  *
237  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
238  * @condition evaluates to true. The @condition is checked each time
239  * the waitqueue @wq is woken up.
240  *
241  * wake_up() has to be called after changing any variable that could
242  * change the result of the wait condition.
243  *
244  * The function returns 0 if the @timeout elapsed, or the remaining
245  * jiffies (at least 1) if the @condition evaluated to %true before
246  * the @timeout elapsed.
247  */
248 #define wait_event_timeout(wq, condition, timeout)			\
249 ({									\
250 	long __ret = timeout;						\
251 	if (!(condition)) 						\
252 		__wait_event_timeout(wq, condition, __ret);		\
253 	__ret;								\
254 })
255 
256 #define __wait_event_interruptible(wq, condition, ret)			\
257 do {									\
258 	DEFINE_WAIT(__wait);						\
259 									\
260 	for (;;) {							\
261 		prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE);	\
262 		if (condition)						\
263 			break;						\
264 		if (!signal_pending(current)) {				\
265 			schedule();					\
266 			continue;					\
267 		}							\
268 		ret = -ERESTARTSYS;					\
269 		break;							\
270 	}								\
271 	finish_wait(&wq, &__wait);					\
272 } while (0)
273 
274 /**
275  * wait_event_interruptible - sleep until a condition gets true
276  * @wq: the waitqueue to wait on
277  * @condition: a C expression for the event to wait for
278  *
279  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
280  * @condition evaluates to true or a signal is received.
281  * The @condition is checked each time the waitqueue @wq is woken up.
282  *
283  * wake_up() has to be called after changing any variable that could
284  * change the result of the wait condition.
285  *
286  * The function will return -ERESTARTSYS if it was interrupted by a
287  * signal and 0 if @condition evaluated to true.
288  */
289 #define wait_event_interruptible(wq, condition)				\
290 ({									\
291 	int __ret = 0;							\
292 	if (!(condition))						\
293 		__wait_event_interruptible(wq, condition, __ret);	\
294 	__ret;								\
295 })
296 
297 #define __wait_event_interruptible_timeout(wq, condition, ret)		\
298 do {									\
299 	DEFINE_WAIT(__wait);						\
300 									\
301 	for (;;) {							\
302 		prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE);	\
303 		if (condition)						\
304 			break;						\
305 		if (!signal_pending(current)) {				\
306 			ret = schedule_timeout(ret);			\
307 			if (!ret)					\
308 				break;					\
309 			continue;					\
310 		}							\
311 		ret = -ERESTARTSYS;					\
312 		break;							\
313 	}								\
314 	if (!ret && (condition))					\
315 		ret = 1;						\
316 	finish_wait(&wq, &__wait);					\
317 } while (0)
318 
319 /**
320  * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
321  * @wq: the waitqueue to wait on
322  * @condition: a C expression for the event to wait for
323  * @timeout: timeout, in jiffies
324  *
325  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
326  * @condition evaluates to true or a signal is received.
327  * The @condition is checked each time the waitqueue @wq is woken up.
328  *
329  * wake_up() has to be called after changing any variable that could
330  * change the result of the wait condition.
331  *
332  * Returns:
333  * 0 if the @timeout elapsed, -%ERESTARTSYS if it was interrupted by
334  * a signal, or the remaining jiffies (at least 1) if the @condition
335  * evaluated to %true before the @timeout elapsed.
336  */
337 #define wait_event_interruptible_timeout(wq, condition, timeout)	\
338 ({									\
339 	long __ret = timeout;						\
340 	if (!(condition))						\
341 		__wait_event_interruptible_timeout(wq, condition, __ret); \
342 	__ret;								\
343 })
344 
345 #define __wait_event_hrtimeout(wq, condition, timeout, state)		\
346 ({									\
347 	int __ret = 0;							\
348 	DEFINE_WAIT(__wait);						\
349 	struct hrtimer_sleeper __t;					\
350 									\
351 	hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC,		\
352 			      HRTIMER_MODE_REL);			\
353 	hrtimer_init_sleeper(&__t, current);				\
354 	if ((timeout).tv64 != KTIME_MAX)				\
355 		hrtimer_start_range_ns(&__t.timer, timeout,		\
356 				       current->timer_slack_ns,		\
357 				       HRTIMER_MODE_REL);		\
358 									\
359 	for (;;) {							\
360 		prepare_to_wait(&wq, &__wait, state);			\
361 		if (condition)						\
362 			break;						\
363 		if (state == TASK_INTERRUPTIBLE &&			\
364 		    signal_pending(current)) {				\
365 			__ret = -ERESTARTSYS;				\
366 			break;						\
367 		}							\
368 		if (!__t.task) {					\
369 			__ret = -ETIME;					\
370 			break;						\
371 		}							\
372 		schedule();						\
373 	}								\
374 									\
375 	hrtimer_cancel(&__t.timer);					\
376 	destroy_hrtimer_on_stack(&__t.timer);				\
377 	finish_wait(&wq, &__wait);					\
378 	__ret;								\
379 })
380 
381 /**
382  * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
383  * @wq: the waitqueue to wait on
384  * @condition: a C expression for the event to wait for
385  * @timeout: timeout, as a ktime_t
386  *
387  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
388  * @condition evaluates to true or a signal is received.
389  * The @condition is checked each time the waitqueue @wq is woken up.
390  *
391  * wake_up() has to be called after changing any variable that could
392  * change the result of the wait condition.
393  *
394  * The function returns 0 if @condition became true, or -ETIME if the timeout
395  * elapsed.
396  */
397 #define wait_event_hrtimeout(wq, condition, timeout)			\
398 ({									\
399 	int __ret = 0;							\
400 	if (!(condition))						\
401 		__ret = __wait_event_hrtimeout(wq, condition, timeout,	\
402 					       TASK_UNINTERRUPTIBLE);	\
403 	__ret;								\
404 })
405 
406 /**
407  * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
408  * @wq: the waitqueue to wait on
409  * @condition: a C expression for the event to wait for
410  * @timeout: timeout, as a ktime_t
411  *
412  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
413  * @condition evaluates to true or a signal is received.
414  * The @condition is checked each time the waitqueue @wq is woken up.
415  *
416  * wake_up() has to be called after changing any variable that could
417  * change the result of the wait condition.
418  *
419  * The function returns 0 if @condition became true, -ERESTARTSYS if it was
420  * interrupted by a signal, or -ETIME if the timeout elapsed.
421  */
422 #define wait_event_interruptible_hrtimeout(wq, condition, timeout)	\
423 ({									\
424 	long __ret = 0;							\
425 	if (!(condition))						\
426 		__ret = __wait_event_hrtimeout(wq, condition, timeout,	\
427 					       TASK_INTERRUPTIBLE);	\
428 	__ret;								\
429 })
430 
431 #define __wait_event_interruptible_exclusive(wq, condition, ret)	\
432 do {									\
433 	DEFINE_WAIT(__wait);						\
434 									\
435 	for (;;) {							\
436 		prepare_to_wait_exclusive(&wq, &__wait,			\
437 					TASK_INTERRUPTIBLE);		\
438 		if (condition) {					\
439 			finish_wait(&wq, &__wait);			\
440 			break;						\
441 		}							\
442 		if (!signal_pending(current)) {				\
443 			schedule();					\
444 			continue;					\
445 		}							\
446 		ret = -ERESTARTSYS;					\
447 		abort_exclusive_wait(&wq, &__wait, 			\
448 				TASK_INTERRUPTIBLE, NULL);		\
449 		break;							\
450 	}								\
451 } while (0)
452 
453 #define wait_event_interruptible_exclusive(wq, condition)		\
454 ({									\
455 	int __ret = 0;							\
456 	if (!(condition))						\
457 		__wait_event_interruptible_exclusive(wq, condition, __ret);\
458 	__ret;								\
459 })
460 
461 
462 #define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
463 ({									\
464 	int __ret = 0;							\
465 	DEFINE_WAIT(__wait);						\
466 	if (exclusive)							\
467 		__wait.flags |= WQ_FLAG_EXCLUSIVE;			\
468 	do {								\
469 		if (likely(list_empty(&__wait.task_list)))		\
470 			__add_wait_queue_tail(&(wq), &__wait);		\
471 		set_current_state(TASK_INTERRUPTIBLE);			\
472 		if (signal_pending(current)) {				\
473 			__ret = -ERESTARTSYS;				\
474 			break;						\
475 		}							\
476 		if (irq)						\
477 			spin_unlock_irq(&(wq).lock);			\
478 		else							\
479 			spin_unlock(&(wq).lock);			\
480 		schedule();						\
481 		if (irq)						\
482 			spin_lock_irq(&(wq).lock);			\
483 		else							\
484 			spin_lock(&(wq).lock);				\
485 	} while (!(condition));						\
486 	__remove_wait_queue(&(wq), &__wait);				\
487 	__set_current_state(TASK_RUNNING);				\
488 	__ret;								\
489 })
490 
491 
492 /**
493  * wait_event_interruptible_locked - sleep until a condition gets true
494  * @wq: the waitqueue to wait on
495  * @condition: a C expression for the event to wait for
496  *
497  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
498  * @condition evaluates to true or a signal is received.
499  * The @condition is checked each time the waitqueue @wq is woken up.
500  *
501  * It must be called with wq.lock being held.  This spinlock is
502  * unlocked while sleeping but @condition testing is done while lock
503  * is held and when this macro exits the lock is held.
504  *
505  * The lock is locked/unlocked using spin_lock()/spin_unlock()
506  * functions which must match the way they are locked/unlocked outside
507  * of this macro.
508  *
509  * wake_up_locked() has to be called after changing any variable that could
510  * change the result of the wait condition.
511  *
512  * The function will return -ERESTARTSYS if it was interrupted by a
513  * signal and 0 if @condition evaluated to true.
514  */
515 #define wait_event_interruptible_locked(wq, condition)			\
516 	((condition)							\
517 	 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0))
518 
519 /**
520  * wait_event_interruptible_locked_irq - sleep until a condition gets true
521  * @wq: the waitqueue to wait on
522  * @condition: a C expression for the event to wait for
523  *
524  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
525  * @condition evaluates to true or a signal is received.
526  * The @condition is checked each time the waitqueue @wq is woken up.
527  *
528  * It must be called with wq.lock being held.  This spinlock is
529  * unlocked while sleeping but @condition testing is done while lock
530  * is held and when this macro exits the lock is held.
531  *
532  * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
533  * functions which must match the way they are locked/unlocked outside
534  * of this macro.
535  *
536  * wake_up_locked() has to be called after changing any variable that could
537  * change the result of the wait condition.
538  *
539  * The function will return -ERESTARTSYS if it was interrupted by a
540  * signal and 0 if @condition evaluated to true.
541  */
542 #define wait_event_interruptible_locked_irq(wq, condition)		\
543 	((condition)							\
544 	 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1))
545 
546 /**
547  * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
548  * @wq: the waitqueue to wait on
549  * @condition: a C expression for the event to wait for
550  *
551  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
552  * @condition evaluates to true or a signal is received.
553  * The @condition is checked each time the waitqueue @wq is woken up.
554  *
555  * It must be called with wq.lock being held.  This spinlock is
556  * unlocked while sleeping but @condition testing is done while lock
557  * is held and when this macro exits the lock is held.
558  *
559  * The lock is locked/unlocked using spin_lock()/spin_unlock()
560  * functions which must match the way they are locked/unlocked outside
561  * of this macro.
562  *
563  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
564  * set thus when other process waits process on the list if this
565  * process is awaken further processes are not considered.
566  *
567  * wake_up_locked() has to be called after changing any variable that could
568  * change the result of the wait condition.
569  *
570  * The function will return -ERESTARTSYS if it was interrupted by a
571  * signal and 0 if @condition evaluated to true.
572  */
573 #define wait_event_interruptible_exclusive_locked(wq, condition)	\
574 	((condition)							\
575 	 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0))
576 
577 /**
578  * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
579  * @wq: the waitqueue to wait on
580  * @condition: a C expression for the event to wait for
581  *
582  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
583  * @condition evaluates to true or a signal is received.
584  * The @condition is checked each time the waitqueue @wq is woken up.
585  *
586  * It must be called with wq.lock being held.  This spinlock is
587  * unlocked while sleeping but @condition testing is done while lock
588  * is held and when this macro exits the lock is held.
589  *
590  * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
591  * functions which must match the way they are locked/unlocked outside
592  * of this macro.
593  *
594  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
595  * set thus when other process waits process on the list if this
596  * process is awaken further processes are not considered.
597  *
598  * wake_up_locked() has to be called after changing any variable that could
599  * change the result of the wait condition.
600  *
601  * The function will return -ERESTARTSYS if it was interrupted by a
602  * signal and 0 if @condition evaluated to true.
603  */
604 #define wait_event_interruptible_exclusive_locked_irq(wq, condition)	\
605 	((condition)							\
606 	 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
607 
608 
609 
610 #define __wait_event_killable(wq, condition, ret)			\
611 do {									\
612 	DEFINE_WAIT(__wait);						\
613 									\
614 	for (;;) {							\
615 		prepare_to_wait(&wq, &__wait, TASK_KILLABLE);		\
616 		if (condition)						\
617 			break;						\
618 		if (!fatal_signal_pending(current)) {			\
619 			schedule();					\
620 			continue;					\
621 		}							\
622 		ret = -ERESTARTSYS;					\
623 		break;							\
624 	}								\
625 	finish_wait(&wq, &__wait);					\
626 } while (0)
627 
628 /**
629  * wait_event_killable - sleep until a condition gets true
630  * @wq: the waitqueue to wait on
631  * @condition: a C expression for the event to wait for
632  *
633  * The process is put to sleep (TASK_KILLABLE) until the
634  * @condition evaluates to true or a signal is received.
635  * The @condition is checked each time the waitqueue @wq is woken up.
636  *
637  * wake_up() has to be called after changing any variable that could
638  * change the result of the wait condition.
639  *
640  * The function will return -ERESTARTSYS if it was interrupted by a
641  * signal and 0 if @condition evaluated to true.
642  */
643 #define wait_event_killable(wq, condition)				\
644 ({									\
645 	int __ret = 0;							\
646 	if (!(condition))						\
647 		__wait_event_killable(wq, condition, __ret);		\
648 	__ret;								\
649 })
650 
651 
652 #define __wait_event_lock_irq(wq, condition, lock, cmd)			\
653 do {									\
654 	DEFINE_WAIT(__wait);						\
655 									\
656 	for (;;) {							\
657 		prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE);	\
658 		if (condition)						\
659 			break;						\
660 		spin_unlock_irq(&lock);					\
661 		cmd;							\
662 		schedule();						\
663 		spin_lock_irq(&lock);					\
664 	}								\
665 	finish_wait(&wq, &__wait);					\
666 } while (0)
667 
668 /**
669  * wait_event_lock_irq_cmd - sleep until a condition gets true. The
670  *			     condition is checked under the lock. This
671  *			     is expected to be called with the lock
672  *			     taken.
673  * @wq: the waitqueue to wait on
674  * @condition: a C expression for the event to wait for
675  * @lock: a locked spinlock_t, which will be released before cmd
676  *	  and schedule() and reacquired afterwards.
677  * @cmd: a command which is invoked outside the critical section before
678  *	 sleep
679  *
680  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
681  * @condition evaluates to true. The @condition is checked each time
682  * the waitqueue @wq is woken up.
683  *
684  * wake_up() has to be called after changing any variable that could
685  * change the result of the wait condition.
686  *
687  * This is supposed to be called while holding the lock. The lock is
688  * dropped before invoking the cmd and going to sleep and is reacquired
689  * afterwards.
690  */
691 #define wait_event_lock_irq_cmd(wq, condition, lock, cmd)		\
692 do {									\
693 	if (condition)							\
694 		break;							\
695 	__wait_event_lock_irq(wq, condition, lock, cmd);		\
696 } while (0)
697 
698 /**
699  * wait_event_lock_irq - sleep until a condition gets true. The
700  *			 condition is checked under the lock. This
701  *			 is expected to be called with the lock
702  *			 taken.
703  * @wq: the waitqueue to wait on
704  * @condition: a C expression for the event to wait for
705  * @lock: a locked spinlock_t, which will be released before schedule()
706  *	  and reacquired afterwards.
707  *
708  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
709  * @condition evaluates to true. The @condition is checked each time
710  * the waitqueue @wq is woken up.
711  *
712  * wake_up() has to be called after changing any variable that could
713  * change the result of the wait condition.
714  *
715  * This is supposed to be called while holding the lock. The lock is
716  * dropped before going to sleep and is reacquired afterwards.
717  */
718 #define wait_event_lock_irq(wq, condition, lock)			\
719 do {									\
720 	if (condition)							\
721 		break;							\
722 	__wait_event_lock_irq(wq, condition, lock, );			\
723 } while (0)
724 
725 
726 #define __wait_event_interruptible_lock_irq(wq, condition,		\
727 					    lock, ret, cmd)		\
728 do {									\
729 	DEFINE_WAIT(__wait);						\
730 									\
731 	for (;;) {							\
732 		prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE);	\
733 		if (condition)						\
734 			break;						\
735 		if (signal_pending(current)) {				\
736 			ret = -ERESTARTSYS;				\
737 			break;						\
738 		}							\
739 		spin_unlock_irq(&lock);					\
740 		cmd;							\
741 		schedule();						\
742 		spin_lock_irq(&lock);					\
743 	}								\
744 	finish_wait(&wq, &__wait);					\
745 } while (0)
746 
747 /**
748  * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
749  *		The condition is checked under the lock. This is expected to
750  *		be called with the lock taken.
751  * @wq: the waitqueue to wait on
752  * @condition: a C expression for the event to wait for
753  * @lock: a locked spinlock_t, which will be released before cmd and
754  *	  schedule() and reacquired afterwards.
755  * @cmd: a command which is invoked outside the critical section before
756  *	 sleep
757  *
758  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
759  * @condition evaluates to true or a signal is received. The @condition is
760  * checked each time the waitqueue @wq is woken up.
761  *
762  * wake_up() has to be called after changing any variable that could
763  * change the result of the wait condition.
764  *
765  * This is supposed to be called while holding the lock. The lock is
766  * dropped before invoking the cmd and going to sleep and is reacquired
767  * afterwards.
768  *
769  * The macro will return -ERESTARTSYS if it was interrupted by a signal
770  * and 0 if @condition evaluated to true.
771  */
772 #define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd)	\
773 ({									\
774 	int __ret = 0;							\
775 									\
776 	if (!(condition))						\
777 		__wait_event_interruptible_lock_irq(wq, condition,	\
778 						    lock, __ret, cmd);	\
779 	__ret;								\
780 })
781 
782 /**
783  * wait_event_interruptible_lock_irq - sleep until a condition gets true.
784  *		The condition is checked under the lock. This is expected
785  *		to be called with the lock taken.
786  * @wq: the waitqueue to wait on
787  * @condition: a C expression for the event to wait for
788  * @lock: a locked spinlock_t, which will be released before schedule()
789  *	  and reacquired afterwards.
790  *
791  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
792  * @condition evaluates to true or signal is received. The @condition is
793  * checked each time the waitqueue @wq is woken up.
794  *
795  * wake_up() has to be called after changing any variable that could
796  * change the result of the wait condition.
797  *
798  * This is supposed to be called while holding the lock. The lock is
799  * dropped before going to sleep and is reacquired afterwards.
800  *
801  * The macro will return -ERESTARTSYS if it was interrupted by a signal
802  * and 0 if @condition evaluated to true.
803  */
804 #define wait_event_interruptible_lock_irq(wq, condition, lock)		\
805 ({									\
806 	int __ret = 0;							\
807 									\
808 	if (!(condition))						\
809 		__wait_event_interruptible_lock_irq(wq, condition,	\
810 						    lock, __ret, );	\
811 	__ret;								\
812 })
813 
814 #define __wait_event_interruptible_lock_irq_timeout(wq, condition,	\
815 						    lock, ret)		\
816 do {									\
817 	DEFINE_WAIT(__wait);						\
818 									\
819 	for (;;) {							\
820 		prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE);	\
821 		if (condition)						\
822 			break;						\
823 		if (signal_pending(current)) {				\
824 			ret = -ERESTARTSYS;				\
825 			break;						\
826 		}							\
827 		spin_unlock_irq(&lock);					\
828 		ret = schedule_timeout(ret);				\
829 		spin_lock_irq(&lock);					\
830 		if (!ret)						\
831 			break;						\
832 	}								\
833 	finish_wait(&wq, &__wait);					\
834 } while (0)
835 
836 /**
837  * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets true or a timeout elapses.
838  *		The condition is checked under the lock. This is expected
839  *		to be called with the lock taken.
840  * @wq: the waitqueue to wait on
841  * @condition: a C expression for the event to wait for
842  * @lock: a locked spinlock_t, which will be released before schedule()
843  *	  and reacquired afterwards.
844  * @timeout: timeout, in jiffies
845  *
846  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
847  * @condition evaluates to true or signal is received. The @condition is
848  * checked each time the waitqueue @wq is woken up.
849  *
850  * wake_up() has to be called after changing any variable that could
851  * change the result of the wait condition.
852  *
853  * This is supposed to be called while holding the lock. The lock is
854  * dropped before going to sleep and is reacquired afterwards.
855  *
856  * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
857  * was interrupted by a signal, and the remaining jiffies otherwise
858  * if the condition evaluated to true before the timeout elapsed.
859  */
860 #define wait_event_interruptible_lock_irq_timeout(wq, condition, lock,	\
861 						  timeout)		\
862 ({									\
863 	int __ret = timeout;						\
864 									\
865 	if (!(condition))						\
866 		__wait_event_interruptible_lock_irq_timeout(		\
867 					wq, condition, lock, __ret);	\
868 	__ret;								\
869 })
870 
871 
872 /*
873  * These are the old interfaces to sleep waiting for an event.
874  * They are racy.  DO NOT use them, use the wait_event* interfaces above.
875  * We plan to remove these interfaces.
876  */
877 extern void sleep_on(wait_queue_head_t *q);
878 extern long sleep_on_timeout(wait_queue_head_t *q,
879 				      signed long timeout);
880 extern void interruptible_sleep_on(wait_queue_head_t *q);
881 extern long interruptible_sleep_on_timeout(wait_queue_head_t *q,
882 					   signed long timeout);
883 
884 /*
885  * Waitqueues which are removed from the waitqueue_head at wakeup time
886  */
887 void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
888 void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
889 void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
890 void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
891 			unsigned int mode, void *key);
892 int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
893 int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
894 
895 #define DEFINE_WAIT_FUNC(name, function)				\
896 	wait_queue_t name = {						\
897 		.private	= current,				\
898 		.func		= function,				\
899 		.task_list	= LIST_HEAD_INIT((name).task_list),	\
900 	}
901 
902 #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
903 
904 #define DEFINE_WAIT_BIT(name, word, bit)				\
905 	struct wait_bit_queue name = {					\
906 		.key = __WAIT_BIT_KEY_INITIALIZER(word, bit),		\
907 		.wait	= {						\
908 			.private	= current,			\
909 			.func		= wake_bit_function,		\
910 			.task_list	=				\
911 				LIST_HEAD_INIT((name).wait.task_list),	\
912 		},							\
913 	}
914 
915 #define init_wait(wait)							\
916 	do {								\
917 		(wait)->private = current;				\
918 		(wait)->func = autoremove_wake_function;		\
919 		INIT_LIST_HEAD(&(wait)->task_list);			\
920 		(wait)->flags = 0;					\
921 	} while (0)
922 
923 /**
924  * wait_on_bit - wait for a bit to be cleared
925  * @word: the word being waited on, a kernel virtual address
926  * @bit: the bit of the word being waited on
927  * @action: the function used to sleep, which may take special actions
928  * @mode: the task state to sleep in
929  *
930  * There is a standard hashed waitqueue table for generic use. This
931  * is the part of the hashtable's accessor API that waits on a bit.
932  * For instance, if one were to have waiters on a bitflag, one would
933  * call wait_on_bit() in threads waiting for the bit to clear.
934  * One uses wait_on_bit() where one is waiting for the bit to clear,
935  * but has no intention of setting it.
936  */
937 static inline int wait_on_bit(void *word, int bit,
938 				int (*action)(void *), unsigned mode)
939 {
940 	if (!test_bit(bit, word))
941 		return 0;
942 	return out_of_line_wait_on_bit(word, bit, action, mode);
943 }
944 
945 /**
946  * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
947  * @word: the word being waited on, a kernel virtual address
948  * @bit: the bit of the word being waited on
949  * @action: the function used to sleep, which may take special actions
950  * @mode: the task state to sleep in
951  *
952  * There is a standard hashed waitqueue table for generic use. This
953  * is the part of the hashtable's accessor API that waits on a bit
954  * when one intends to set it, for instance, trying to lock bitflags.
955  * For instance, if one were to have waiters trying to set bitflag
956  * and waiting for it to clear before setting it, one would call
957  * wait_on_bit() in threads waiting to be able to set the bit.
958  * One uses wait_on_bit_lock() where one is waiting for the bit to
959  * clear with the intention of setting it, and when done, clearing it.
960  */
961 static inline int wait_on_bit_lock(void *word, int bit,
962 				int (*action)(void *), unsigned mode)
963 {
964 	if (!test_and_set_bit(bit, word))
965 		return 0;
966 	return out_of_line_wait_on_bit_lock(word, bit, action, mode);
967 }
968 
969 /**
970  * wait_on_atomic_t - Wait for an atomic_t to become 0
971  * @val: The atomic value being waited on, a kernel virtual address
972  * @action: the function used to sleep, which may take special actions
973  * @mode: the task state to sleep in
974  *
975  * Wait for an atomic_t to become 0.  We abuse the bit-wait waitqueue table for
976  * the purpose of getting a waitqueue, but we set the key to a bit number
977  * outside of the target 'word'.
978  */
979 static inline
980 int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
981 {
982 	if (atomic_read(val) == 0)
983 		return 0;
984 	return out_of_line_wait_on_atomic_t(val, action, mode);
985 }
986 
987 #endif
988