1 /* 2 * Read-Copy Update mechanism for mutual exclusion (tree-based version) 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17 * 18 * Copyright IBM Corporation, 2008 19 * 20 * Author: Dipankar Sarma <[email protected]> 21 * Paul E. McKenney <[email protected]> Hierarchical algorithm 22 * 23 * Based on the original work by Paul McKenney <[email protected]> 24 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. 25 * 26 * For detailed explanation of Read-Copy Update mechanism see - 27 * Documentation/RCU 28 */ 29 30 #ifndef __LINUX_RCUTREE_H 31 #define __LINUX_RCUTREE_H 32 33 struct notifier_block; 34 35 extern void rcu_sched_qs(int cpu); 36 extern void rcu_bh_qs(int cpu); 37 extern void rcu_note_context_switch(int cpu); 38 extern int rcu_needs_cpu(int cpu); 39 40 #ifdef CONFIG_TREE_PREEMPT_RCU 41 42 extern void __rcu_read_lock(void); 43 extern void __rcu_read_unlock(void); 44 extern void synchronize_rcu(void); 45 extern void exit_rcu(void); 46 47 /* 48 * Defined as macro as it is a very low level header 49 * included from areas that don't even know about current 50 */ 51 #define rcu_preempt_depth() (current->rcu_read_lock_nesting) 52 53 #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 54 55 static inline void __rcu_read_lock(void) 56 { 57 preempt_disable(); 58 } 59 60 static inline void __rcu_read_unlock(void) 61 { 62 preempt_enable(); 63 } 64 65 #define synchronize_rcu synchronize_sched 66 67 static inline void exit_rcu(void) 68 { 69 } 70 71 static inline int rcu_preempt_depth(void) 72 { 73 return 0; 74 } 75 76 #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ 77 78 static inline void __rcu_read_lock_bh(void) 79 { 80 local_bh_disable(); 81 } 82 static inline void __rcu_read_unlock_bh(void) 83 { 84 local_bh_enable(); 85 } 86 87 extern void call_rcu_sched(struct rcu_head *head, 88 void (*func)(struct rcu_head *rcu)); 89 extern void synchronize_rcu_bh(void); 90 extern void synchronize_sched(void); 91 extern void synchronize_rcu_expedited(void); 92 93 static inline void synchronize_rcu_bh_expedited(void) 94 { 95 synchronize_sched_expedited(); 96 } 97 98 extern void rcu_check_callbacks(int cpu, int user); 99 100 extern long rcu_batches_completed(void); 101 extern long rcu_batches_completed_bh(void); 102 extern long rcu_batches_completed_sched(void); 103 extern void rcu_force_quiescent_state(void); 104 extern void rcu_bh_force_quiescent_state(void); 105 extern void rcu_sched_force_quiescent_state(void); 106 107 #ifdef CONFIG_NO_HZ 108 void rcu_enter_nohz(void); 109 void rcu_exit_nohz(void); 110 #else /* CONFIG_NO_HZ */ 111 static inline void rcu_enter_nohz(void) 112 { 113 } 114 static inline void rcu_exit_nohz(void) 115 { 116 } 117 #endif /* CONFIG_NO_HZ */ 118 119 /* A context switch is a grace period for RCU-sched and RCU-bh. */ 120 static inline int rcu_blocking_is_gp(void) 121 { 122 return num_online_cpus() == 1; 123 } 124 125 extern void rcu_scheduler_starting(void); 126 extern int rcu_scheduler_active __read_mostly; 127 128 #endif /* __LINUX_RCUTREE_H */ 129