xref: /linux-6.15/include/linux/wait_bit.h (revision 80681c04)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_WAIT_BIT_H
3 #define _LINUX_WAIT_BIT_H
4 
5 /*
6  * Linux wait-bit related types and methods:
7  */
8 #include <linux/wait.h>
9 
10 struct wait_bit_key {
11 	unsigned long		*flags;
12 	int			bit_nr;
13 	unsigned long		timeout;
14 };
15 
16 struct wait_bit_queue_entry {
17 	struct wait_bit_key	key;
18 	struct wait_queue_entry	wq_entry;
19 };
20 
21 #define __WAIT_BIT_KEY_INITIALIZER(word, bit)					\
22 	{ .flags = word, .bit_nr = bit, }
23 
24 typedef int wait_bit_action_f(struct wait_bit_key *key, int mode);
25 
26 void __wake_up_bit(struct wait_queue_head *wq_head, unsigned long *word, int bit);
27 int __wait_on_bit(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, wait_bit_action_f *action, unsigned int mode);
28 int __wait_on_bit_lock(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, wait_bit_action_f *action, unsigned int mode);
29 void wake_up_bit(unsigned long *word, int bit);
30 int out_of_line_wait_on_bit(unsigned long *word, int, wait_bit_action_f *action, unsigned int mode);
31 int out_of_line_wait_on_bit_timeout(unsigned long *word, int, wait_bit_action_f *action, unsigned int mode, unsigned long timeout);
32 int out_of_line_wait_on_bit_lock(unsigned long *word, int, wait_bit_action_f *action, unsigned int mode);
33 struct wait_queue_head *bit_waitqueue(unsigned long *word, int bit);
34 extern void __init wait_bit_init(void);
35 
36 int wake_bit_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
37 
38 #define DEFINE_WAIT_BIT(name, word, bit)					\
39 	struct wait_bit_queue_entry name = {					\
40 		.key = __WAIT_BIT_KEY_INITIALIZER(word, bit),			\
41 		.wq_entry = {							\
42 			.private	= current,				\
43 			.func		= wake_bit_function,			\
44 			.entry		=					\
45 				LIST_HEAD_INIT((name).wq_entry.entry),		\
46 		},								\
47 	}
48 
49 extern int bit_wait(struct wait_bit_key *key, int mode);
50 extern int bit_wait_io(struct wait_bit_key *key, int mode);
51 extern int bit_wait_timeout(struct wait_bit_key *key, int mode);
52 extern int bit_wait_io_timeout(struct wait_bit_key *key, int mode);
53 
54 /**
55  * wait_on_bit - wait for a bit to be cleared
56  * @word: the address containing the bit being waited on
57  * @bit: the bit at that address being waited on
58  * @mode: the task state to sleep in
59  *
60  * Wait for the given bit in an unsigned long or bitmap (see DECLARE_BITMAP())
61  * to be cleared.  The clearing of the bit must be signalled with
62  * wake_up_bit(), often as clear_and_wake_up_bit().
63  *
64  * The process will wait on a waitqueue selected by hash from a shared
65  * pool.  It will only be woken on a wake_up for the target bit, even
66  * if other processes on the same queue are waiting for other bits.
67  *
68  * Returned value will be zero if the bit was cleared in which case the
69  * call has ACQUIRE semantics, or %-EINTR if the process received a
70  * signal and the mode permitted wake up on that signal.
71  */
72 static inline int
73 wait_on_bit(unsigned long *word, int bit, unsigned mode)
74 {
75 	might_sleep();
76 	if (!test_bit_acquire(bit, word))
77 		return 0;
78 	return out_of_line_wait_on_bit(word, bit,
79 				       bit_wait,
80 				       mode);
81 }
82 
83 /**
84  * wait_on_bit_io - wait for a bit to be cleared
85  * @word: the address containing the bit being waited on
86  * @bit: the bit at that address being waited on
87  * @mode: the task state to sleep in
88  *
89  * Wait for the given bit in an unsigned long or bitmap (see DECLARE_BITMAP())
90  * to be cleared.  The clearing of the bit must be signalled with
91  * wake_up_bit(), often as clear_and_wake_up_bit().
92  *
93  * This is similar to wait_on_bit(), but calls io_schedule() instead of
94  * schedule() for the actual waiting.
95  *
96  * Returned value will be zero if the bit was cleared in which case the
97  * call has ACQUIRE semantics, or %-EINTR if the process received a
98  * signal and the mode permitted wake up on that signal.
99  */
100 static inline int
101 wait_on_bit_io(unsigned long *word, int bit, unsigned mode)
102 {
103 	might_sleep();
104 	if (!test_bit_acquire(bit, word))
105 		return 0;
106 	return out_of_line_wait_on_bit(word, bit,
107 				       bit_wait_io,
108 				       mode);
109 }
110 
111 /**
112  * wait_on_bit_timeout - wait for a bit to be cleared or a timeout to elapse
113  * @word: the address containing the bit being waited on
114  * @bit: the bit at that address being waited on
115  * @mode: the task state to sleep in
116  * @timeout: timeout, in jiffies
117  *
118  * Wait for the given bit in an unsigned long or bitmap (see
119  * DECLARE_BITMAP()) to be cleared, or for a timeout to expire.  The
120  * clearing of the bit must be signalled with wake_up_bit(), often as
121  * clear_and_wake_up_bit().
122  *
123  * This is similar to wait_on_bit(), except it also takes a timeout
124  * parameter.
125  *
126  * Returned value will be zero if the bit was cleared in which case the
127  * call has ACQUIRE semantics, or %-EINTR if the process received a
128  * signal and the mode permitted wake up on that signal, or %-EAGAIN if the
129  * timeout elapsed.
130  */
131 static inline int
132 wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode,
133 		    unsigned long timeout)
134 {
135 	might_sleep();
136 	if (!test_bit_acquire(bit, word))
137 		return 0;
138 	return out_of_line_wait_on_bit_timeout(word, bit,
139 					       bit_wait_timeout,
140 					       mode, timeout);
141 }
142 
143 /**
144  * wait_on_bit_action - wait for a bit to be cleared
145  * @word: the address containing the bit waited on
146  * @bit: the bit at that address being waited on
147  * @action: the function used to sleep, which may take special actions
148  * @mode: the task state to sleep in
149  *
150  * Wait for the given bit in an unsigned long or bitmap (see DECLARE_BITMAP())
151  * to be cleared.  The clearing of the bit must be signalled with
152  * wake_up_bit(), often as clear_and_wake_up_bit().
153  *
154  * This is similar to wait_on_bit(), but calls @action() instead of
155  * schedule() for the actual waiting.
156  *
157  * Returned value will be zero if the bit was cleared in which case the
158  * call has ACQUIRE semantics, or the error code returned by @action if
159  * that call returned non-zero.
160  */
161 static inline int
162 wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action,
163 		   unsigned mode)
164 {
165 	might_sleep();
166 	if (!test_bit_acquire(bit, word))
167 		return 0;
168 	return out_of_line_wait_on_bit(word, bit, action, mode);
169 }
170 
171 /**
172  * wait_on_bit_lock - wait for a bit to be cleared, then set it
173  * @word: the address containing the bit being waited on
174  * @bit: the bit of the word being waited on and set
175  * @mode: the task state to sleep in
176  *
177  * Wait for the given bit in an unsigned long or bitmap (see
178  * DECLARE_BITMAP()) to be cleared.  The clearing of the bit must be
179  * signalled with wake_up_bit(), often as clear_and_wake_up_bit().  As
180  * soon as it is clear, atomically set it and return.
181  *
182  * This is similar to wait_on_bit(), but sets the bit before returning.
183  *
184  * Returned value will be zero if the bit was successfully set in which
185  * case the call has the same memory sequencing semantics as
186  * test_and_clear_bit(), or %-EINTR if the process received a signal and
187  * the mode permitted wake up on that signal.
188  */
189 static inline int
190 wait_on_bit_lock(unsigned long *word, int bit, unsigned mode)
191 {
192 	might_sleep();
193 	if (!test_and_set_bit(bit, word))
194 		return 0;
195 	return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode);
196 }
197 
198 /**
199  * wait_on_bit_lock_io - wait for a bit to be cleared, then set it
200  * @word: the address containing the bit being waited on
201  * @bit: the bit of the word being waited on and set
202  * @mode: the task state to sleep in
203  *
204  * Wait for the given bit in an unsigned long or bitmap (see
205  * DECLARE_BITMAP()) to be cleared.  The clearing of the bit must be
206  * signalled with wake_up_bit(), often as clear_and_wake_up_bit().  As
207  * soon as it is clear, atomically set it and return.
208  *
209  * This is similar to wait_on_bit_lock(), but calls io_schedule() instead
210  * of schedule().
211  *
212  * Returns zero if the bit was (eventually) found to be clear and was
213  * set.  Returns non-zero if a signal was delivered to the process and
214  * the @mode allows that signal to wake the process.
215  */
216 static inline int
217 wait_on_bit_lock_io(unsigned long *word, int bit, unsigned mode)
218 {
219 	might_sleep();
220 	if (!test_and_set_bit(bit, word))
221 		return 0;
222 	return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode);
223 }
224 
225 /**
226  * wait_on_bit_lock_action - wait for a bit to be cleared, then set it
227  * @word: the address containing the bit being waited on
228  * @bit: the bit of the word being waited on and set
229  * @action: the function used to sleep, which may take special actions
230  * @mode: the task state to sleep in
231  *
232  * This is similar to wait_on_bit_lock(), but calls @action() instead of
233  * schedule() for the actual waiting.
234  *
235  * Returned value will be zero if the bit was successfully set in which
236  * case the call has the same memory sequencing semantics as
237  * test_and_clear_bit(), or the error code returned by @action if that
238  * call returned non-zero.
239  */
240 static inline int
241 wait_on_bit_lock_action(unsigned long *word, int bit, wait_bit_action_f *action,
242 			unsigned mode)
243 {
244 	might_sleep();
245 	if (!test_and_set_bit(bit, word))
246 		return 0;
247 	return out_of_line_wait_on_bit_lock(word, bit, action, mode);
248 }
249 
250 extern void init_wait_var_entry(struct wait_bit_queue_entry *wbq_entry, void *var, int flags);
251 extern void wake_up_var(void *var);
252 extern wait_queue_head_t *__var_waitqueue(void *p);
253 
254 #define ___wait_var_event(var, condition, state, exclusive, ret, cmd)	\
255 ({									\
256 	__label__ __out;						\
257 	struct wait_queue_head *__wq_head = __var_waitqueue(var);	\
258 	struct wait_bit_queue_entry __wbq_entry;			\
259 	long __ret = ret; /* explicit shadow */				\
260 									\
261 	init_wait_var_entry(&__wbq_entry, var,				\
262 			    exclusive ? WQ_FLAG_EXCLUSIVE : 0);		\
263 	for (;;) {							\
264 		long __int = prepare_to_wait_event(__wq_head,		\
265 						   &__wbq_entry.wq_entry, \
266 						   state);		\
267 		if (condition)						\
268 			break;						\
269 									\
270 		if (___wait_is_interruptible(state) && __int) {		\
271 			__ret = __int;					\
272 			goto __out;					\
273 		}							\
274 									\
275 		cmd;							\
276 	}								\
277 	finish_wait(__wq_head, &__wbq_entry.wq_entry);			\
278 __out:	__ret;								\
279 })
280 
281 #define __wait_var_event(var, condition)				\
282 	___wait_var_event(var, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
283 			  schedule())
284 #define __wait_var_event_io(var, condition)				\
285 	___wait_var_event(var, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
286 			  io_schedule())
287 
288 /**
289  * wait_var_event - wait for a variable to be updated and notified
290  * @var: the address of variable being waited on
291  * @condition: the condition to wait for
292  *
293  * Wait for a @condition to be true, only re-checking when a wake up is
294  * received for the given @var (an arbitrary kernel address which need
295  * not be directly related to the given condition, but usually is).
296  *
297  * The process will wait on a waitqueue selected by hash from a shared
298  * pool.  It will only be woken on a wake_up for the given address.
299  *
300  * The condition should normally use smp_load_acquire() or a similarly
301  * ordered access to ensure that any changes to memory made before the
302  * condition became true will be visible after the wait completes.
303  */
304 #define wait_var_event(var, condition)					\
305 do {									\
306 	might_sleep();							\
307 	if (condition)							\
308 		break;							\
309 	__wait_var_event(var, condition);				\
310 } while (0)
311 
312 /**
313  * wait_var_event_io - wait for a variable to be updated and notified
314  * @var: the address of variable being waited on
315  * @condition: the condition to wait for
316  *
317  * Wait for an IO related @condition to be true, only re-checking when a
318  * wake up is received for the given @var (an arbitrary kernel address
319  * which need not be directly related to the given condition, but
320  * usually is).
321  *
322  * The process will wait on a waitqueue selected by hash from a shared
323  * pool.  It will only be woken on a wake_up for the given address.
324  *
325  * This is similar to wait_var_event(), but calls io_schedule() instead
326  * of schedule().
327  *
328  * The condition should normally use smp_load_acquire() or a similarly
329  * ordered access to ensure that any changes to memory made before the
330  * condition became true will be visible after the wait completes.
331  */
332 #define wait_var_event_io(var, condition)				\
333 do {									\
334 	might_sleep();							\
335 	if (condition)							\
336 		break;							\
337 	__wait_var_event_io(var, condition);				\
338 } while (0)
339 
340 #define __wait_var_event_killable(var, condition)			\
341 	___wait_var_event(var, condition, TASK_KILLABLE, 0, 0,		\
342 			  schedule())
343 
344 /**
345  * wait_var_event_killable - wait for a variable to be updated and notified
346  * @var: the address of variable being waited on
347  * @condition: the condition to wait for
348  *
349  * Wait for a @condition to be true or a fatal signal to be received,
350  * only re-checking the condition when a wake up is received for the given
351  * @var (an arbitrary kernel address which need not be directly related
352  * to the given condition, but usually is).
353  *
354  * This is similar to wait_var_event() but returns a value which is
355  * 0 if the condition became true, or %-ERESTARTSYS if a fatal signal
356  * was received.
357  *
358  * The condition should normally use smp_load_acquire() or a similarly
359  * ordered access to ensure that any changes to memory made before the
360  * condition became true will be visible after the wait completes.
361  */
362 #define wait_var_event_killable(var, condition)				\
363 ({									\
364 	int __ret = 0;							\
365 	might_sleep();							\
366 	if (!(condition))						\
367 		__ret = __wait_var_event_killable(var, condition);	\
368 	__ret;								\
369 })
370 
371 #define __wait_var_event_timeout(var, condition, timeout)		\
372 	___wait_var_event(var, ___wait_cond_timeout(condition),		\
373 			  TASK_UNINTERRUPTIBLE, 0, timeout,		\
374 			  __ret = schedule_timeout(__ret))
375 
376 /**
377  * wait_var_event_timeout - wait for a variable to be updated or a timeout to expire
378  * @var: the address of variable being waited on
379  * @condition: the condition to wait for
380  * @timeout: maximum time to wait in jiffies
381  *
382  * Wait for a @condition to be true or a timeout to expire, only
383  * re-checking the condition when a wake up is received for the given
384  * @var (an arbitrary kernel address which need not be directly related
385  * to the given condition, but usually is).
386  *
387  * This is similar to wait_var_event() but returns a value which is 0 if
388  * the timeout expired and the condition was still false, or the
389  * remaining time left in the timeout (but at least 1) if the condition
390  * was found to be true.
391  *
392  * The condition should normally use smp_load_acquire() or a similarly
393  * ordered access to ensure that any changes to memory made before the
394  * condition became true will be visible after the wait completes.
395  */
396 #define wait_var_event_timeout(var, condition, timeout)			\
397 ({									\
398 	long __ret = timeout;						\
399 	might_sleep();							\
400 	if (!___wait_cond_timeout(condition))				\
401 		__ret = __wait_var_event_timeout(var, condition, timeout); \
402 	__ret;								\
403 })
404 
405 #define __wait_var_event_interruptible(var, condition)			\
406 	___wait_var_event(var, condition, TASK_INTERRUPTIBLE, 0, 0,	\
407 			  schedule())
408 
409 /**
410  * wait_var_event_killable - wait for a variable to be updated and notified
411  * @var: the address of variable being waited on
412  * @condition: the condition to wait for
413  *
414  * Wait for a @condition to be true or a signal to be received, only
415  * re-checking the condition when a wake up is received for the given
416  * @var (an arbitrary kernel address which need not be directly related
417  * to the given condition, but usually is).
418  *
419  * This is similar to wait_var_event() but returns a value which is 0 if
420  * the condition became true, or %-ERESTARTSYS if a signal was received.
421  *
422  * The condition should normally use smp_load_acquire() or a similarly
423  * ordered access to ensure that any changes to memory made before the
424  * condition became true will be visible after the wait completes.
425  */
426 #define wait_var_event_interruptible(var, condition)			\
427 ({									\
428 	int __ret = 0;							\
429 	might_sleep();							\
430 	if (!(condition))						\
431 		__ret = __wait_var_event_interruptible(var, condition);	\
432 	__ret;								\
433 })
434 
435 /**
436  * wait_var_event_any_lock - wait for a variable to be updated under a lock
437  * @var: the address of the variable being waited on
438  * @condition: condition to wait for
439  * @lock: the object that is locked to protect updates to the variable
440  * @type: prefix on lock and unlock operations
441  * @state: waiting state, %TASK_UNINTERRUPTIBLE etc.
442  *
443  * Wait for a condition which can only be reliably tested while holding
444  * a lock.  The variables assessed in the condition will normal be updated
445  * under the same lock, and the wake up should be signalled with
446  * wake_up_var_locked() under the same lock.
447  *
448  * This is similar to wait_var_event(), but assumes a lock is held
449  * while calling this function and while updating the variable.
450  *
451  * This must be called while the given lock is held and the lock will be
452  * dropped when schedule() is called to wait for a wake up, and will be
453  * reclaimed before testing the condition again.  The functions used to
454  * unlock and lock the object are constructed by appending _unlock and _lock
455  * to @type.
456  *
457  * Return %-ERESTARTSYS if a signal arrives which is allowed to interrupt
458  * the wait according to @state.
459  */
460 #define wait_var_event_any_lock(var, condition, lock, type, state)	\
461 ({									\
462 	int __ret = 0;							\
463 	if (!(condition))						\
464 		__ret = ___wait_var_event(var, condition, state, 0, 0,	\
465 					  type ## _unlock(lock);	\
466 					  schedule();			\
467 					  type ## _lock(lock));		\
468 	__ret;								\
469 })
470 
471 /**
472  * wait_var_event_spinlock - wait for a variable to be updated under a spinlock
473  * @var: the address of the variable being waited on
474  * @condition: condition to wait for
475  * @lock: the spinlock which protects updates to the variable
476  *
477  * Wait for a condition which can only be reliably tested while holding
478  * a spinlock.  The variables assessed in the condition will normal be updated
479  * under the same spinlock, and the wake up should be signalled with
480  * wake_up_var_locked() under the same spinlock.
481  *
482  * This is similar to wait_var_event(), but assumes a spinlock is held
483  * while calling this function and while updating the variable.
484  *
485  * This must be called while the given lock is held and the lock will be
486  * dropped when schedule() is called to wait for a wake up, and will be
487  * reclaimed before testing the condition again.
488  */
489 #define wait_var_event_spinlock(var, condition, lock)			\
490 	wait_var_event_any_lock(var, condition, lock, spin, TASK_UNINTERRUPTIBLE)
491 
492 /**
493  * wait_var_event_mutex - wait for a variable to be updated under a mutex
494  * @var: the address of the variable being waited on
495  * @condition: condition to wait for
496  * @mutex: the mutex which protects updates to the variable
497  *
498  * Wait for a condition which can only be reliably tested while holding
499  * a mutex.  The variables assessed in the condition will normal be
500  * updated under the same mutex, and the wake up should be signalled
501  * with wake_up_var_locked() under the same mutex.
502  *
503  * This is similar to wait_var_event(), but assumes a mutex is held
504  * while calling this function and while updating the variable.
505  *
506  * This must be called while the given mutex is held and the mutex will be
507  * dropped when schedule() is called to wait for a wake up, and will be
508  * reclaimed before testing the condition again.
509  */
510 #define wait_var_event_mutex(var, condition, lock)			\
511 	wait_var_event_any_lock(var, condition, lock, mutex, TASK_UNINTERRUPTIBLE)
512 
513 /**
514  * wake_up_var_protected - wake up waiters for a variable asserting that it is safe
515  * @var: the address of the variable being waited on
516  * @cond: the condition which afirms this is safe
517  *
518  * When waking waiters which use wait_var_event_any_lock() the waker must be
519  * holding the reelvant lock to avoid races.  This version of wake_up_var()
520  * asserts that the relevant lock is held and so no barrier is needed.
521  * The @cond is only tested when CONFIG_LOCKDEP is enabled.
522  */
523 #define wake_up_var_protected(var, cond)				\
524 do {									\
525 	lockdep_assert(cond);						\
526 	wake_up_var(var);						\
527 } while (0)
528 
529 /**
530  * wake_up_var_locked - wake up waiters for a variable while holding a spinlock or mutex
531  * @var: the address of the variable being waited on
532  * @lock: The spinlock or mutex what protects the variable
533  *
534  * Send a wake up for the given variable which should be waited for with
535  * wait_var_event_spinlock() or wait_var_event_mutex().  Unlike wake_up_var(),
536  * no extra barriers are needed as the locking provides sufficient sequencing.
537  */
538 #define wake_up_var_locked(var, lock)					\
539 	wake_up_var_protected(var, lockdep_is_held(lock))
540 
541 /**
542  * clear_and_wake_up_bit - clear a bit and wake up anyone waiting on that bit
543  * @bit: the bit of the word being waited on
544  * @word: the address containing the bit being waited on
545  *
546  * The designated bit is cleared and any tasks waiting in wait_on_bit()
547  * or similar will be woken.  This call has RELEASE semantics so that
548  * any changes to memory made before this call are guaranteed to be visible
549  * after the corresponding wait_on_bit() completes.
550  */
551 static inline void clear_and_wake_up_bit(int bit, unsigned long *word)
552 {
553 	clear_bit_unlock(bit, word);
554 	/* See wake_up_bit() for which memory barrier you need to use. */
555 	smp_mb__after_atomic();
556 	wake_up_bit(word, bit);
557 }
558 
559 /**
560  * test_and_clear_wake_up_bit - clear a bit if it was set: wake up anyone waiting on that bit
561  * @bit: the bit of the word being waited on
562  * @word: the address of memory containing that bit
563  *
564  * If the bit is set and can be atomically cleared, any tasks waiting in
565  * wait_on_bit() or similar will be woken.  This call has the same
566  * complete ordering semantics as test_and_clear_bit().  Any changes to
567  * memory made before this call are guaranteed to be visible after the
568  * corresponding wait_on_bit() completes.
569  *
570  * Returns %true if the bit was successfully set and the wake up was sent.
571  */
572 static inline bool test_and_clear_wake_up_bit(int bit, unsigned long *word)
573 {
574 	if (!test_and_clear_bit(bit, word))
575 		return false;
576 	/* no extra barrier required */
577 	wake_up_bit(word, bit);
578 	return true;
579 }
580 
581 /**
582  * atomic_dec_and_wake_up - decrement an atomic_t and if zero, wake up waiters
583  * @var: the variable to dec and test
584  *
585  * Decrements the atomic variable and if it reaches zero, send a wake_up to any
586  * processes waiting on the variable.
587  *
588  * This function has the same complete ordering semantics as atomic_dec_and_test.
589  *
590  * Returns %true is the variable reaches zero and the wake up was sent.
591  */
592 
593 static inline bool atomic_dec_and_wake_up(atomic_t *var)
594 {
595 	if (!atomic_dec_and_test(var))
596 		return false;
597 	/* No extra barrier required */
598 	wake_up_var(var);
599 	return true;
600 }
601 
602 /**
603  * store_release_wake_up - update a variable and send a wake_up
604  * @var: the address of the variable to be updated and woken
605  * @val: the value to store in the variable.
606  *
607  * Store the given value in the variable send a wake up to any tasks
608  * waiting on the variable.  All necessary barriers are included to ensure
609  * the task calling wait_var_event() sees the new value and all values
610  * written to memory before this call.
611  */
612 #define store_release_wake_up(var, val)					\
613 do {									\
614 	smp_store_release(var, val);					\
615 	smp_mb();							\
616 	wake_up_var(var);						\
617 } while (0)
618 
619 #endif /* _LINUX_WAIT_BIT_H */
620