xref: /linux-6.15/include/linux/rcupdate_wait.h (revision 5a562b8b)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2f9411ebeSIngo Molnar #ifndef _LINUX_SCHED_RCUPDATE_WAIT_H
3f9411ebeSIngo Molnar #define _LINUX_SCHED_RCUPDATE_WAIT_H
4f9411ebeSIngo Molnar 
5f9411ebeSIngo Molnar /*
6f9411ebeSIngo Molnar  * RCU synchronization types and methods:
7f9411ebeSIngo Molnar  */
8f9411ebeSIngo Molnar 
9f9411ebeSIngo Molnar #include <linux/rcupdate.h>
10f9411ebeSIngo Molnar #include <linux/completion.h>
111e2f2d31SKent Overstreet #include <linux/sched.h>
12f9411ebeSIngo Molnar 
13f9411ebeSIngo Molnar /*
14f9411ebeSIngo Molnar  * Structure allowing asynchronous waiting on RCU.
15f9411ebeSIngo Molnar  */
16f9411ebeSIngo Molnar struct rcu_synchronize {
17f9411ebeSIngo Molnar 	struct rcu_head head;
18f9411ebeSIngo Molnar 	struct completion completion;
19*5a562b8bSUladzislau Rezki (Sony) 
20*5a562b8bSUladzislau Rezki (Sony) 	/* This is for debugging. */
21*5a562b8bSUladzislau Rezki (Sony) 	struct rcu_gp_oldstate oldstate;
22f9411ebeSIngo Molnar };
23f9411ebeSIngo Molnar void wakeme_after_rcu(struct rcu_head *head);
24f9411ebeSIngo Molnar 
25c342b42fSPaul E. McKenney void __wait_rcu_gp(bool checktiny, unsigned int state, int n, call_rcu_func_t *crcu_array,
26f9411ebeSIngo Molnar 		   struct rcu_synchronize *rs_array);
27f9411ebeSIngo Molnar 
28c342b42fSPaul E. McKenney #define _wait_rcu_gp(checktiny, state, ...) \
29f9411ebeSIngo Molnar do {												\
30f9411ebeSIngo Molnar 	call_rcu_func_t __crcu_array[] = { __VA_ARGS__ };					\
31f9411ebeSIngo Molnar 	struct rcu_synchronize __rs_array[ARRAY_SIZE(__crcu_array)];				\
32c342b42fSPaul E. McKenney 	__wait_rcu_gp(checktiny, state, ARRAY_SIZE(__crcu_array), __crcu_array, __rs_array);	\
33f9411ebeSIngo Molnar } while (0)
34f9411ebeSIngo Molnar 
35c342b42fSPaul E. McKenney #define wait_rcu_gp(...) _wait_rcu_gp(false, TASK_UNINTERRUPTIBLE, __VA_ARGS__)
36c342b42fSPaul E. McKenney #define wait_rcu_gp_state(state, ...) _wait_rcu_gp(false, state, __VA_ARGS__)
37f9411ebeSIngo Molnar 
38b3d73156SPaul E. McKenney /**
39b3d73156SPaul E. McKenney  * synchronize_rcu_mult - Wait concurrently for multiple grace periods
40b3d73156SPaul E. McKenney  * @...: List of call_rcu() functions for different grace periods to wait on
41b3d73156SPaul E. McKenney  *
42b3d73156SPaul E. McKenney  * This macro waits concurrently for multiple types of RCU grace periods.
43b3d73156SPaul E. McKenney  * For example, synchronize_rcu_mult(call_rcu, call_rcu_tasks) would wait
44b3d73156SPaul E. McKenney  * on concurrent RCU and RCU-tasks grace periods.  Waiting on a given SRCU
45b3d73156SPaul E. McKenney  * domain requires you to write a wrapper function for that SRCU domain's
46b3d73156SPaul E. McKenney  * call_srcu() function, with this wrapper supplying the pointer to the
47b3d73156SPaul E. McKenney  * corresponding srcu_struct.
48b3d73156SPaul E. McKenney  *
496716f4d3SPaul E. McKenney  * Note that call_rcu_hurry() should be used instead of call_rcu()
506716f4d3SPaul E. McKenney  * because in kernels built with CONFIG_RCU_LAZY=y the delay between the
516716f4d3SPaul E. McKenney  * invocation of call_rcu() and that of the corresponding RCU callback
526716f4d3SPaul E. McKenney  * can be multiple seconds.
536716f4d3SPaul E. McKenney  *
54b3d73156SPaul E. McKenney  * The first argument tells Tiny RCU's _wait_rcu_gp() not to
55b3d73156SPaul E. McKenney  * bother waiting for RCU.  The reason for this is because anywhere
56b3d73156SPaul E. McKenney  * synchronize_rcu_mult() can be called is automatically already a full
57b3d73156SPaul E. McKenney  * grace period.
58b3d73156SPaul E. McKenney  */
59b3d73156SPaul E. McKenney #define synchronize_rcu_mult(...) \
60c342b42fSPaul E. McKenney 	_wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), TASK_UNINTERRUPTIBLE, __VA_ARGS__)
61b3d73156SPaul E. McKenney 
cond_resched_rcu(void)621e2f2d31SKent Overstreet static inline void cond_resched_rcu(void)
631e2f2d31SKent Overstreet {
641e2f2d31SKent Overstreet #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
651e2f2d31SKent Overstreet 	rcu_read_unlock();
661e2f2d31SKent Overstreet 	cond_resched();
671e2f2d31SKent Overstreet 	rcu_read_lock();
681e2f2d31SKent Overstreet #endif
691e2f2d31SKent Overstreet }
701e2f2d31SKent Overstreet 
710f38c06cSPaul E. McKenney // Has the current task blocked within its current RCU read-side
720f38c06cSPaul E. McKenney // critical section?
has_rcu_reader_blocked(void)730f38c06cSPaul E. McKenney static inline bool has_rcu_reader_blocked(void)
740f38c06cSPaul E. McKenney {
750f38c06cSPaul E. McKenney #ifdef CONFIG_PREEMPT_RCU
760f38c06cSPaul E. McKenney 	return !list_empty(&current->rcu_node_entry);
770f38c06cSPaul E. McKenney #else
780f38c06cSPaul E. McKenney 	return false;
790f38c06cSPaul E. McKenney #endif
800f38c06cSPaul E. McKenney }
810f38c06cSPaul E. McKenney 
82f9411ebeSIngo Molnar #endif /* _LINUX_SCHED_RCUPDATE_WAIT_H */
83