xref: /linux-6.15/include/linux/rcutree.h (revision ad6b5b73)
1a9b7343eSPaul E. McKenney /* SPDX-License-Identifier: GPL-2.0+ */
264db4cffSPaul E. McKenney /*
364db4cffSPaul E. McKenney  * Read-Copy Update mechanism for mutual exclusion (tree-based version)
464db4cffSPaul E. McKenney  *
564db4cffSPaul E. McKenney  * Copyright IBM Corporation, 2008
664db4cffSPaul E. McKenney  *
764db4cffSPaul E. McKenney  * Author: Dipankar Sarma <[email protected]>
8a9b7343eSPaul E. McKenney  *	   Paul E. McKenney <[email protected]> Hierarchical algorithm
964db4cffSPaul E. McKenney  *
10a9b7343eSPaul E. McKenney  * Based on the original work by Paul McKenney <[email protected]>
1164db4cffSPaul E. McKenney  * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
1264db4cffSPaul E. McKenney  *
1364db4cffSPaul E. McKenney  * For detailed explanation of Read-Copy Update mechanism see -
1464db4cffSPaul E. McKenney  *	Documentation/RCU
1564db4cffSPaul E. McKenney  */
1664db4cffSPaul E. McKenney 
1764db4cffSPaul E. McKenney #ifndef __LINUX_RCUTREE_H
1864db4cffSPaul E. McKenney #define __LINUX_RCUTREE_H
1964db4cffSPaul E. McKenney 
20d28139c4SPaul E. McKenney void rcu_softirq_qs(void);
21bcbfdd01SPaul E. McKenney void rcu_note_context_switch(bool preempt);
2229845399SFrederic Weisbecker int rcu_needs_cpu(void);
23584dc4ceSTeodora Baluta void rcu_cpu_stall_reset(void);
2443a89baeSPaul E. McKenney void rcu_request_urgent_qs_task(struct task_struct *t);
2564db4cffSPaul E. McKenney 
2629ce8310SGleb Natapov /*
2729ce8310SGleb Natapov  * Note a virtualization-based context switch.  This is simply a
2829ce8310SGleb Natapov  * wrapper around rcu_note_context_switch(), which allows TINY_RCU
2946a5d164SPaul E. McKenney  * to save a few bytes. The caller must have disabled interrupts.
3029ce8310SGleb Natapov  */
rcu_virt_note_context_switch(void)31b5ad0d2eSZeng Heng static inline void rcu_virt_note_context_switch(void)
3229ce8310SGleb Natapov {
33bcbfdd01SPaul E. McKenney 	rcu_note_context_switch(false);
3429ce8310SGleb Natapov }
3529ce8310SGleb Natapov 
36584dc4ceSTeodora Baluta void synchronize_rcu_expedited(void);
3704a522b7SUladzislau Rezki (Sony) 
382b55d6a4SUladzislau Rezki (Sony) void rcu_barrier(void);
39486e2593SPaul E. McKenney void rcu_momentary_eqs(void);
40584dc4ceSTeodora Baluta 
4132a9f26eSValentin Schneider struct rcu_gp_oldstate {
42a35d1690SByungchul Park 	unsigned long rgos_norm;
4391a967fdSPaul E. McKenney 	unsigned long rgos_exp;
4491a967fdSPaul E. McKenney };
4591a967fdSPaul E. McKenney 
4691a967fdSPaul E. McKenney // Maximum number of rcu_gp_oldstate values corresponding to
4791a967fdSPaul E. McKenney // not-yet-completed RCU grace periods.
4891a967fdSPaul E. McKenney #define NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE 4
4918538248SPaul E. McKenney 
5018538248SPaul E. McKenney /**
5118538248SPaul E. McKenney  * same_state_synchronize_rcu_full - Are two old-state values identical?
5218538248SPaul E. McKenney  * @rgosp1: First old-state value.
5318538248SPaul E. McKenney  * @rgosp2: Second old-state value.
5418538248SPaul E. McKenney  *
5518538248SPaul E. McKenney  * The two old-state values must have been obtained from either
5618538248SPaul E. McKenney  * get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(),
5718538248SPaul E. McKenney  * or get_completed_synchronize_rcu_full().  Returns @true if the two
5818538248SPaul E. McKenney  * values are identical and @false otherwise.  This allows structures
5918538248SPaul E. McKenney  * whose lifetimes are tracked by old-state values to push these values
6018538248SPaul E. McKenney  * to a list header, allowing those structures to be slightly smaller.
6118538248SPaul E. McKenney  *
6218538248SPaul E. McKenney  * Note that equality is judged on a bitwise basis, so that an
6318538248SPaul E. McKenney  * @rcu_gp_oldstate structure with an already-completed state in one field
6418538248SPaul E. McKenney  * will compare not-equal to a structure with an already-completed state
6518538248SPaul E. McKenney  * in the other field.  After all, the @rcu_gp_oldstate structure is opaque
6618538248SPaul E. McKenney  * so how did such a situation come to pass in the first place?
6718538248SPaul E. McKenney  */
same_state_synchronize_rcu_full(struct rcu_gp_oldstate * rgosp1,struct rcu_gp_oldstate * rgosp2)6818538248SPaul E. McKenney static inline bool same_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp1,
6918538248SPaul E. McKenney 						   struct rcu_gp_oldstate *rgosp2)
7018538248SPaul E. McKenney {
7118538248SPaul E. McKenney 	return rgosp1->rgos_norm == rgosp2->rgos_norm && rgosp1->rgos_exp == rgosp2->rgos_exp;
7218538248SPaul E. McKenney }
7318538248SPaul E. McKenney 
7418538248SPaul E. McKenney unsigned long start_poll_synchronize_rcu_expedited(void);
7518538248SPaul E. McKenney void start_poll_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp);
7618538248SPaul E. McKenney void cond_synchronize_rcu_expedited(unsigned long oldstate);
77d96c52feSPaul E. McKenney void cond_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp);
786c502b14SPaul E. McKenney unsigned long get_state_synchronize_rcu(void);
79d96c52feSPaul E. McKenney void get_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp);
808df13f01SPaul E. McKenney unsigned long start_poll_synchronize_rcu(void);
81765a3f4fSPaul E. McKenney void start_poll_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp);
823fdefca9SPaul E. McKenney bool poll_state_synchronize_rcu(unsigned long oldstate);
837abb18bdSPaul E. McKenney bool poll_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp);
8476ea3641SPaul E. McKenney void cond_synchronize_rcu(unsigned long oldstate);
857abb18bdSPaul E. McKenney void cond_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp);
8691a967fdSPaul E. McKenney 
87765a3f4fSPaul E. McKenney #ifdef CONFIG_PROVE_RCU
88b6fe4917SPaul E. McKenney void rcu_irq_exit_check_preempt(void);
89a57eb940SPaul E. McKenney #else
rcu_irq_exit_check_preempt(void)9007325d4aSThomas Gleixner static inline void rcu_irq_exit_check_preempt(void) { }
9107325d4aSThomas Gleixner #endif
9207325d4aSThomas Gleixner 
9307325d4aSThomas Gleixner struct task_struct;
9407325d4aSThomas Gleixner void rcu_preempt_deferred_qs(struct task_struct *t);
9507325d4aSThomas Gleixner 
9617211455SFrederic Weisbecker void exit_rcu(void);
9717211455SFrederic Weisbecker 
9817211455SFrederic Weisbecker void rcu_scheduler_starting(void);
99584dc4ceSTeodora Baluta extern int rcu_scheduler_active;
1002439b696SPaul E. McKenney void rcu_end_inkernel_boot(void);
101584dc4ceSTeodora Baluta bool rcu_inkernel_boot_has_ended(void);
102e6339d3bSIngo Molnar bool rcu_is_watching(void);
103d2b1654fSPaul E. McKenney #ifndef CONFIG_PREEMPT_RCU
10459ee0326SPaul E. McKenney void rcu_all_qs(void);
105584dc4ceSTeodora Baluta #endif
106*ad6b5b73SAnkur Arora 
1075cd37193SPaul E. McKenney /* RCUtree hotplug events */
108395a2f09SPaul E. McKenney int rcutree_prepare_cpu(unsigned int cpu);
1095cd37193SPaul E. McKenney int rcutree_online_cpu(unsigned int cpu);
1104df83742SThomas Gleixner void rcutree_report_cpu_starting(unsigned int cpu);
1114df83742SThomas Gleixner 
1124df83742SThomas Gleixner #ifdef CONFIG_HOTPLUG_CPU
113448e9f34SFrederic Weisbecker int rcutree_dead_cpu(unsigned int cpu);
1142cb1f6e9SFrederic Weisbecker int rcutree_dying_cpu(unsigned int cpu);
1152cb1f6e9SFrederic Weisbecker int rcutree_offline_cpu(unsigned int cpu);
1164df83742SThomas Gleixner #else
1174df83742SThomas Gleixner #define rcutree_dead_cpu NULL
1182cb1f6e9SFrederic Weisbecker #define rcutree_dying_cpu NULL
1192cb1f6e9SFrederic Weisbecker #define rcutree_offline_cpu NULL
1202cb1f6e9SFrederic Weisbecker #endif
1212cb1f6e9SFrederic Weisbecker 
1222cb1f6e9SFrederic Weisbecker void rcutree_migrate_callbacks(int cpu);
1232cb1f6e9SFrederic Weisbecker 
1244df83742SThomas Gleixner /* Called from hotplug and also arm64 early secondary boot failure */
125448e9f34SFrederic Weisbecker void rcutree_report_cpu_dead(void);
126448e9f34SFrederic Weisbecker 
127448e9f34SFrederic Weisbecker #endif /* __LINUX_RCUTREE_H */
128448e9f34SFrederic Weisbecker