1 /* 2 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, you can access it online at 16 * http://www.gnu.org/licenses/gpl-2.0.html. 17 * 18 * Copyright IBM Corporation, 2008 19 * 20 * Author: Paul E. McKenney <[email protected]> 21 * 22 * For detailed explanation of Read-Copy Update mechanism see - 23 * Documentation/RCU 24 */ 25 #ifndef __LINUX_TINY_H 26 #define __LINUX_TINY_H 27 28 #include <linux/cache.h> 29 30 struct rcu_dynticks; 31 static inline int rcu_dynticks_snap(struct rcu_dynticks *rdtp) 32 { 33 return 0; 34 } 35 36 static inline bool rcu_eqs_special_set(int cpu) 37 { 38 return false; /* Never flag non-existent other CPUs! */ 39 } 40 41 static inline unsigned long get_state_synchronize_rcu(void) 42 { 43 return 0; 44 } 45 46 static inline void cond_synchronize_rcu(unsigned long oldstate) 47 { 48 might_sleep(); 49 } 50 51 static inline unsigned long get_state_synchronize_sched(void) 52 { 53 return 0; 54 } 55 56 static inline void cond_synchronize_sched(unsigned long oldstate) 57 { 58 might_sleep(); 59 } 60 61 extern void rcu_barrier_bh(void); 62 extern void rcu_barrier_sched(void); 63 64 static inline void synchronize_rcu_expedited(void) 65 { 66 synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */ 67 } 68 69 static inline void rcu_barrier(void) 70 { 71 rcu_barrier_sched(); /* Only one CPU, so only one list of callbacks! */ 72 } 73 74 static inline void synchronize_rcu_bh(void) 75 { 76 synchronize_sched(); 77 } 78 79 static inline void synchronize_rcu_bh_expedited(void) 80 { 81 synchronize_sched(); 82 } 83 84 static inline void synchronize_sched_expedited(void) 85 { 86 synchronize_sched(); 87 } 88 89 static inline void kfree_call_rcu(struct rcu_head *head, 90 rcu_callback_t func) 91 { 92 call_rcu(head, func); 93 } 94 95 #define rcu_note_context_switch(preempt) \ 96 do { \ 97 rcu_sched_qs(); \ 98 rcu_note_voluntary_context_switch_lite(current); \ 99 } while (0) 100 101 /* 102 * Take advantage of the fact that there is only one CPU, which 103 * allows us to ignore virtualization-based context switches. 104 */ 105 static inline void rcu_virt_note_context_switch(int cpu) 106 { 107 } 108 109 /* 110 * Return the number of grace periods started. 111 */ 112 static inline unsigned long rcu_batches_started(void) 113 { 114 return 0; 115 } 116 117 /* 118 * Return the number of bottom-half grace periods started. 119 */ 120 static inline unsigned long rcu_batches_started_bh(void) 121 { 122 return 0; 123 } 124 125 /* 126 * Return the number of sched grace periods started. 127 */ 128 static inline unsigned long rcu_batches_started_sched(void) 129 { 130 return 0; 131 } 132 133 /* 134 * Return the number of grace periods completed. 135 */ 136 static inline unsigned long rcu_batches_completed(void) 137 { 138 return 0; 139 } 140 141 /* 142 * Return the number of bottom-half grace periods completed. 143 */ 144 static inline unsigned long rcu_batches_completed_bh(void) 145 { 146 return 0; 147 } 148 149 /* 150 * Return the number of sched grace periods completed. 151 */ 152 static inline unsigned long rcu_batches_completed_sched(void) 153 { 154 return 0; 155 } 156 157 /* 158 * Return the number of expedited grace periods completed. 159 */ 160 static inline unsigned long rcu_exp_batches_completed(void) 161 { 162 return 0; 163 } 164 165 /* 166 * Return the number of expedited sched grace periods completed. 167 */ 168 static inline unsigned long rcu_exp_batches_completed_sched(void) 169 { 170 return 0; 171 } 172 173 static inline void rcu_force_quiescent_state(void) 174 { 175 } 176 177 static inline void rcu_bh_force_quiescent_state(void) 178 { 179 } 180 181 static inline void rcu_sched_force_quiescent_state(void) 182 { 183 } 184 185 static inline void show_rcu_gp_kthreads(void) 186 { 187 } 188 189 static inline void rcu_cpu_stall_reset(void) 190 { 191 } 192 193 static inline void rcu_idle_enter(void) 194 { 195 } 196 197 static inline void rcu_idle_exit(void) 198 { 199 } 200 201 static inline void rcu_irq_enter(void) 202 { 203 } 204 205 static inline void rcu_irq_exit_irqson(void) 206 { 207 } 208 209 static inline void rcu_irq_enter_irqson(void) 210 { 211 } 212 213 static inline void rcu_irq_exit(void) 214 { 215 } 216 217 static inline void exit_rcu(void) 218 { 219 } 220 221 #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU) 222 extern int rcu_scheduler_active __read_mostly; 223 void rcu_scheduler_starting(void); 224 #else /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU) */ 225 static inline void rcu_scheduler_starting(void) 226 { 227 } 228 #endif /* #else #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU) */ 229 230 #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) 231 232 static inline bool rcu_is_watching(void) 233 { 234 return __rcu_is_watching(); 235 } 236 237 #else /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ 238 239 static inline bool rcu_is_watching(void) 240 { 241 return true; 242 } 243 244 #endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ 245 246 static inline void rcu_request_urgent_qs_task(struct task_struct *t) 247 { 248 } 249 250 static inline void rcu_all_qs(void) 251 { 252 barrier(); /* Avoid RCU read-side critical sections leaking across. */ 253 } 254 255 /* RCUtree hotplug events */ 256 #define rcutree_prepare_cpu NULL 257 #define rcutree_online_cpu NULL 258 #define rcutree_offline_cpu NULL 259 #define rcutree_dead_cpu NULL 260 #define rcutree_dying_cpu NULL 261 262 #endif /* __LINUX_RCUTINY_H */ 263