xref: /linux-6.15/include/linux/rcutiny.h (revision b14ff274)
16c442127SPaul E. McKenney /* SPDX-License-Identifier: GPL-2.0+ */
29b1d82faSPaul E. McKenney /*
39b1d82faSPaul E. McKenney  * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
49b1d82faSPaul E. McKenney  *
59b1d82faSPaul E. McKenney  * Copyright IBM Corporation, 2008
69b1d82faSPaul E. McKenney  *
76c442127SPaul E. McKenney  * Author: Paul E. McKenney <[email protected]>
89b1d82faSPaul E. McKenney  *
99b1d82faSPaul E. McKenney  * For detailed explanation of Read-Copy Update mechanism see -
109b1d82faSPaul E. McKenney  *		Documentation/RCU
119b1d82faSPaul E. McKenney  */
129b1d82faSPaul E. McKenney #ifndef __LINUX_TINY_H
139b1d82faSPaul E. McKenney #define __LINUX_TINY_H
149b1d82faSPaul E. McKenney 
1524691069SChristoph Hellwig #include <asm/param.h> /* for HZ */
169b1d82faSPaul E. McKenney 
1791a967fdSPaul E. McKenney struct rcu_gp_oldstate {
1891a967fdSPaul E. McKenney 	unsigned long rgos_norm;
1991a967fdSPaul E. McKenney };
2091a967fdSPaul E. McKenney 
2118538248SPaul E. McKenney // Maximum number of rcu_gp_oldstate values corresponding to
2218538248SPaul E. McKenney // not-yet-completed RCU grace periods.
2318538248SPaul E. McKenney #define NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE 2
2418538248SPaul E. McKenney 
2518538248SPaul E. McKenney /*
2618538248SPaul E. McKenney  * Are the two oldstate values the same?  See the Tree RCU version for
2718538248SPaul E. McKenney  * docbook header.
2818538248SPaul E. McKenney  */
same_state_synchronize_rcu_full(struct rcu_gp_oldstate * rgosp1,struct rcu_gp_oldstate * rgosp2)2918538248SPaul E. McKenney static inline bool same_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp1,
3018538248SPaul E. McKenney 						   struct rcu_gp_oldstate *rgosp2)
3118538248SPaul E. McKenney {
3218538248SPaul E. McKenney 	return rgosp1->rgos_norm == rgosp2->rgos_norm;
3318538248SPaul E. McKenney }
3418538248SPaul E. McKenney 
350909fc2bSPaul E. McKenney unsigned long get_state_synchronize_rcu(void);
363fdefca9SPaul E. McKenney 
get_state_synchronize_rcu_full(struct rcu_gp_oldstate * rgosp)373fdefca9SPaul E. McKenney static inline void get_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
383fdefca9SPaul E. McKenney {
393fdefca9SPaul E. McKenney 	rgosp->rgos_norm = get_state_synchronize_rcu();
403fdefca9SPaul E. McKenney }
413fdefca9SPaul E. McKenney 
420909fc2bSPaul E. McKenney unsigned long start_poll_synchronize_rcu(void);
4376ea3641SPaul E. McKenney 
start_poll_synchronize_rcu_full(struct rcu_gp_oldstate * rgosp)4476ea3641SPaul E. McKenney static inline void start_poll_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
4576ea3641SPaul E. McKenney {
4676ea3641SPaul E. McKenney 	rgosp->rgos_norm = start_poll_synchronize_rcu();
4776ea3641SPaul E. McKenney }
4876ea3641SPaul E. McKenney 
490909fc2bSPaul E. McKenney bool poll_state_synchronize_rcu(unsigned long oldstate);
50765a3f4fSPaul E. McKenney 
poll_state_synchronize_rcu_full(struct rcu_gp_oldstate * rgosp)5191a967fdSPaul E. McKenney static inline bool poll_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
5291a967fdSPaul E. McKenney {
5391a967fdSPaul E. McKenney 	return poll_state_synchronize_rcu(rgosp->rgos_norm);
5491a967fdSPaul E. McKenney }
5591a967fdSPaul E. McKenney 
cond_synchronize_rcu(unsigned long oldstate)56765a3f4fSPaul E. McKenney static inline void cond_synchronize_rcu(unsigned long oldstate)
57765a3f4fSPaul E. McKenney {
58765a3f4fSPaul E. McKenney 	might_sleep();
59765a3f4fSPaul E. McKenney }
60765a3f4fSPaul E. McKenney 
cond_synchronize_rcu_full(struct rcu_gp_oldstate * rgosp)61b6fe4917SPaul E. McKenney static inline void cond_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
62b6fe4917SPaul E. McKenney {
63b6fe4917SPaul E. McKenney 	cond_synchronize_rcu(rgosp->rgos_norm);
64b6fe4917SPaul E. McKenney }
65b6fe4917SPaul E. McKenney 
start_poll_synchronize_rcu_expedited(void)66d96c52feSPaul E. McKenney static inline unsigned long start_poll_synchronize_rcu_expedited(void)
67d96c52feSPaul E. McKenney {
68d96c52feSPaul E. McKenney 	return start_poll_synchronize_rcu();
69d96c52feSPaul E. McKenney }
70d96c52feSPaul E. McKenney 
start_poll_synchronize_rcu_expedited_full(struct rcu_gp_oldstate * rgosp)716c502b14SPaul E. McKenney static inline void start_poll_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp)
726c502b14SPaul E. McKenney {
736c502b14SPaul E. McKenney 	rgosp->rgos_norm = start_poll_synchronize_rcu_expedited();
746c502b14SPaul E. McKenney }
756c502b14SPaul E. McKenney 
cond_synchronize_rcu_expedited(unsigned long oldstate)76d96c52feSPaul E. McKenney static inline void cond_synchronize_rcu_expedited(unsigned long oldstate)
77d96c52feSPaul E. McKenney {
78d96c52feSPaul E. McKenney 	cond_synchronize_rcu(oldstate);
79d96c52feSPaul E. McKenney }
80d96c52feSPaul E. McKenney 
cond_synchronize_rcu_expedited_full(struct rcu_gp_oldstate * rgosp)818df13f01SPaul E. McKenney static inline void cond_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp)
828df13f01SPaul E. McKenney {
838df13f01SPaul E. McKenney 	cond_synchronize_rcu_expedited(rgosp->rgos_norm);
848df13f01SPaul E. McKenney }
858df13f01SPaul E. McKenney 
86709fdce7SPaul E. McKenney extern void rcu_barrier(void);
872c42818eSPaul E. McKenney 
synchronize_rcu_expedited(void)88a57eb940SPaul E. McKenney static inline void synchronize_rcu_expedited(void)
89da848c47SPaul E. McKenney {
90a8bb74acSPaul E. McKenney 	synchronize_rcu();
91da848c47SPaul E. McKenney }
926ebb237bSPaul E. McKenney 
93709fdce7SPaul E. McKenney void rcu_qs(void);
9445975c7dSPaul E. McKenney 
rcu_softirq_qs(void)95d28139c4SPaul E. McKenney static inline void rcu_softirq_qs(void)
96d28139c4SPaul E. McKenney {
97709fdce7SPaul E. McKenney 	rcu_qs();
98d28139c4SPaul E. McKenney }
99d28139c4SPaul E. McKenney 
100bcbfdd01SPaul E. McKenney #define rcu_note_context_switch(preempt) \
101bcbfdd01SPaul E. McKenney 	do { \
102709fdce7SPaul E. McKenney 		rcu_qs(); \
10343766c3eSPaul E. McKenney 		rcu_tasks_qs(current, (preempt)); \
104bcbfdd01SPaul E. McKenney 	} while (0)
105a57eb940SPaul E. McKenney 
rcu_needs_cpu(void)10629845399SFrederic Weisbecker static inline int rcu_needs_cpu(void)
1075f192ab0SPaul E. McKenney {
1085f192ab0SPaul E. McKenney 	return 0;
1095f192ab0SPaul E. McKenney }
1105f192ab0SPaul E. McKenney 
rcu_request_urgent_qs_task(struct task_struct * t)11143a89baeSPaul E. McKenney static inline void rcu_request_urgent_qs_task(struct task_struct *t) { }
11243a89baeSPaul E. McKenney 
113a57eb940SPaul E. McKenney /*
11429ce8310SGleb Natapov  * Take advantage of the fact that there is only one CPU, which
11529ce8310SGleb Natapov  * allows us to ignore virtualization-based context switches.
11629ce8310SGleb Natapov  */
rcu_virt_note_context_switch(void)117b5ad0d2eSZeng Heng static inline void rcu_virt_note_context_switch(void) { }
rcu_cpu_stall_reset(void)11871c40fd0SPaul E. McKenney static inline void rcu_cpu_stall_reset(void) { }
rcu_jiffies_till_stall_check(void)1191b27291bSPaul E. McKenney static inline int rcu_jiffies_till_stall_check(void) { return 21 * HZ; }
rcu_irq_exit_check_preempt(void)12007325d4aSThomas Gleixner static inline void rcu_irq_exit_check_preempt(void) { }
exit_rcu(void)12171c40fd0SPaul E. McKenney static inline void exit_rcu(void) { }
rcu_preempt_need_deferred_qs(struct task_struct * t)1223e310098SPaul E. McKenney static inline bool rcu_preempt_need_deferred_qs(struct task_struct *t)
1233e310098SPaul E. McKenney {
1243e310098SPaul E. McKenney 	return false;
1253e310098SPaul E. McKenney }
rcu_preempt_deferred_qs(struct task_struct * t)1263e310098SPaul E. McKenney static inline void rcu_preempt_deferred_qs(struct task_struct *t) { }
127584dc4ceSTeodora Baluta void rcu_scheduler_starting(void);
rcu_end_inkernel_boot(void)128d2b1654fSPaul E. McKenney static inline void rcu_end_inkernel_boot(void) { }
rcu_inkernel_boot_has_ended(void)12959ee0326SPaul E. McKenney static inline bool rcu_inkernel_boot_has_ended(void) { return true; }
rcu_is_watching(void)13071c40fd0SPaul E. McKenney static inline bool rcu_is_watching(void) { return true; }
rcu_momentary_eqs(void)131*32a9f26eSValentin Schneider static inline void rcu_momentary_eqs(void) { }
1325c173eb8SPaul E. McKenney 
13371c40fd0SPaul E. McKenney /* Avoid RCU read-side critical sections leaking across. */
rcu_all_qs(void)13471c40fd0SPaul E. McKenney static inline void rcu_all_qs(void) { barrier(); }
1355cd37193SPaul E. McKenney 
1364df83742SThomas Gleixner /* RCUtree hotplug events */
1374df83742SThomas Gleixner #define rcutree_prepare_cpu      NULL
1384df83742SThomas Gleixner #define rcutree_online_cpu       NULL
1394df83742SThomas Gleixner #define rcutree_offline_cpu      NULL
1404df83742SThomas Gleixner #define rcutree_dead_cpu         NULL
1414df83742SThomas Gleixner #define rcutree_dying_cpu        NULL
rcutree_report_cpu_starting(unsigned int cpu)142448e9f34SFrederic Weisbecker static inline void rcutree_report_cpu_starting(unsigned int cpu) { }
1434df83742SThomas Gleixner 
1449b1d82faSPaul E. McKenney #endif /* __LINUX_RCUTINY_H */
145