1 /* 2 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17 * 18 * Copyright IBM Corporation, 2008 19 * 20 * Author: Paul E. McKenney <[email protected]> 21 * 22 * For detailed explanation of Read-Copy Update mechanism see - 23 * Documentation/RCU 24 */ 25 #ifndef __LINUX_TINY_H 26 #define __LINUX_TINY_H 27 28 #include <linux/cache.h> 29 30 static inline void rcu_barrier_bh(void) 31 { 32 wait_rcu_gp(call_rcu_bh); 33 } 34 35 static inline void rcu_barrier_sched(void) 36 { 37 wait_rcu_gp(call_rcu_sched); 38 } 39 40 static inline void synchronize_rcu_expedited(void) 41 { 42 synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */ 43 } 44 45 static inline void rcu_barrier(void) 46 { 47 rcu_barrier_sched(); /* Only one CPU, so only one list of callbacks! */ 48 } 49 50 static inline void synchronize_rcu_bh(void) 51 { 52 synchronize_sched(); 53 } 54 55 static inline void synchronize_rcu_bh_expedited(void) 56 { 57 synchronize_sched(); 58 } 59 60 static inline void synchronize_sched_expedited(void) 61 { 62 synchronize_sched(); 63 } 64 65 static inline void kfree_call_rcu(struct rcu_head *head, 66 void (*func)(struct rcu_head *rcu)) 67 { 68 call_rcu(head, func); 69 } 70 71 static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) 72 { 73 *delta_jiffies = ULONG_MAX; 74 return 0; 75 } 76 77 static inline void rcu_note_context_switch(int cpu) 78 { 79 rcu_sched_qs(cpu); 80 } 81 82 /* 83 * Take advantage of the fact that there is only one CPU, which 84 * allows us to ignore virtualization-based context switches. 85 */ 86 static inline void rcu_virt_note_context_switch(int cpu) 87 { 88 } 89 90 /* 91 * Return the number of grace periods. 92 */ 93 static inline long rcu_batches_completed(void) 94 { 95 return 0; 96 } 97 98 /* 99 * Return the number of bottom-half grace periods. 100 */ 101 static inline long rcu_batches_completed_bh(void) 102 { 103 return 0; 104 } 105 106 static inline void rcu_force_quiescent_state(void) 107 { 108 } 109 110 static inline void rcu_bh_force_quiescent_state(void) 111 { 112 } 113 114 static inline void rcu_sched_force_quiescent_state(void) 115 { 116 } 117 118 static inline void rcu_cpu_stall_reset(void) 119 { 120 } 121 122 static inline void exit_rcu(void) 123 { 124 } 125 126 #ifdef CONFIG_DEBUG_LOCK_ALLOC 127 extern int rcu_scheduler_active __read_mostly; 128 extern void rcu_scheduler_starting(void); 129 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 130 static inline void rcu_scheduler_starting(void) 131 { 132 } 133 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 134 135 #endif /* __LINUX_RCUTINY_H */ 136