1 /* 2 * Read-Copy Update mechanism for mutual exclusion (tree-based version) 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, you can access it online at 16 * http://www.gnu.org/licenses/gpl-2.0.html. 17 * 18 * Copyright IBM Corporation, 2008 19 * 20 * Author: Dipankar Sarma <[email protected]> 21 * Paul E. McKenney <[email protected]> Hierarchical algorithm 22 * 23 * Based on the original work by Paul McKenney <[email protected]> 24 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. 25 * 26 * For detailed explanation of Read-Copy Update mechanism see - 27 * Documentation/RCU 28 */ 29 30 #ifndef __LINUX_RCUTREE_H 31 #define __LINUX_RCUTREE_H 32 33 void rcu_note_context_switch(int cpu); 34 #ifndef CONFIG_RCU_NOCB_CPU_ALL 35 int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies); 36 #endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */ 37 void rcu_cpu_stall_reset(void); 38 39 /* 40 * Note a virtualization-based context switch. This is simply a 41 * wrapper around rcu_note_context_switch(), which allows TINY_RCU 42 * to save a few bytes. 43 */ 44 static inline void rcu_virt_note_context_switch(int cpu) 45 { 46 rcu_note_context_switch(cpu); 47 } 48 49 void synchronize_rcu_bh(void); 50 void synchronize_sched_expedited(void); 51 void synchronize_rcu_expedited(void); 52 53 void kfree_call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); 54 55 /** 56 * synchronize_rcu_bh_expedited - Brute-force RCU-bh grace period 57 * 58 * Wait for an RCU-bh grace period to elapse, but use a "big hammer" 59 * approach to force the grace period to end quickly. This consumes 60 * significant time on all CPUs and is unfriendly to real-time workloads, 61 * so is thus not recommended for any sort of common-case code. In fact, 62 * if you are using synchronize_rcu_bh_expedited() in a loop, please 63 * restructure your code to batch your updates, and then use a single 64 * synchronize_rcu_bh() instead. 65 * 66 * Note that it is illegal to call this function while holding any lock 67 * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal 68 * to call this function from a CPU-hotplug notifier. Failing to observe 69 * these restriction will result in deadlock. 70 */ 71 static inline void synchronize_rcu_bh_expedited(void) 72 { 73 synchronize_sched_expedited(); 74 } 75 76 void rcu_barrier(void); 77 void rcu_barrier_bh(void); 78 void rcu_barrier_sched(void); 79 unsigned long get_state_synchronize_rcu(void); 80 void cond_synchronize_rcu(unsigned long oldstate); 81 82 extern unsigned long rcutorture_testseq; 83 extern unsigned long rcutorture_vernum; 84 long rcu_batches_completed(void); 85 long rcu_batches_completed_bh(void); 86 long rcu_batches_completed_sched(void); 87 88 void rcu_force_quiescent_state(void); 89 void rcu_bh_force_quiescent_state(void); 90 void rcu_sched_force_quiescent_state(void); 91 92 void exit_rcu(void); 93 94 void rcu_scheduler_starting(void); 95 extern int rcu_scheduler_active __read_mostly; 96 97 bool rcu_is_watching(void); 98 99 #endif /* __LINUX_RCUTREE_H */ 100