1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 /* 3 * Sleepable Read-Copy Update mechanism for mutual exclusion 4 * 5 * Copyright (C) IBM Corporation, 2006 6 * Copyright (C) Fujitsu, 2012 7 * 8 * Author: Paul McKenney <[email protected]> 9 * Lai Jiangshan <[email protected]> 10 * 11 * For detailed explanation of Read-Copy Update mechanism see - 12 * Documentation/RCU/ *.txt 13 * 14 */ 15 16 #ifndef _LINUX_SRCU_H 17 #define _LINUX_SRCU_H 18 19 #include <linux/mutex.h> 20 #include <linux/rcupdate.h> 21 #include <linux/workqueue.h> 22 #include <linux/rcu_segcblist.h> 23 24 struct srcu_struct; 25 26 #ifdef CONFIG_DEBUG_LOCK_ALLOC 27 28 int __init_srcu_struct(struct srcu_struct *ssp, const char *name, 29 struct lock_class_key *key); 30 31 #define init_srcu_struct(ssp) \ 32 ({ \ 33 static struct lock_class_key __srcu_key; \ 34 \ 35 __init_srcu_struct((ssp), #ssp, &__srcu_key); \ 36 }) 37 38 #define __SRCU_DEP_MAP_INIT(srcu_name) .dep_map = { .name = #srcu_name }, 39 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 40 41 int init_srcu_struct(struct srcu_struct *ssp); 42 43 #define __SRCU_DEP_MAP_INIT(srcu_name) 44 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 45 46 #ifdef CONFIG_TINY_SRCU 47 #include <linux/srcutiny.h> 48 #elif defined(CONFIG_TREE_SRCU) 49 #include <linux/srcutree.h> 50 #else 51 #error "Unknown SRCU implementation specified to kernel configuration" 52 #endif 53 54 void call_srcu(struct srcu_struct *ssp, struct rcu_head *head, 55 void (*func)(struct rcu_head *head)); 56 void cleanup_srcu_struct(struct srcu_struct *ssp); 57 int __srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp); 58 void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp); 59 void synchronize_srcu(struct srcu_struct *ssp); 60 unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp); 61 unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp); 62 bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie); 63 64 // Maximum number of unsigned long values corresponding to 65 // not-yet-completed SRCU grace periods. 66 #define NUM_ACTIVE_SRCU_POLL_OLDSTATE 2 67 68 #ifdef CONFIG_NEED_SRCU_NMI_SAFE 69 int __srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp); 70 void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx) __releases(ssp); 71 #else 72 static inline int __srcu_read_lock_nmisafe(struct srcu_struct *ssp) 73 { 74 return __srcu_read_lock(ssp); 75 } 76 static inline void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx) 77 { 78 __srcu_read_unlock(ssp, idx); 79 } 80 #endif /* CONFIG_NEED_SRCU_NMI_SAFE */ 81 82 void srcu_init(void); 83 84 #ifdef CONFIG_DEBUG_LOCK_ALLOC 85 86 /** 87 * srcu_read_lock_held - might we be in SRCU read-side critical section? 88 * @ssp: The srcu_struct structure to check 89 * 90 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU 91 * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, 92 * this assumes we are in an SRCU read-side critical section unless it can 93 * prove otherwise. 94 * 95 * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot 96 * and while lockdep is disabled. 97 * 98 * Note that SRCU is based on its own statemachine and it doesn't 99 * relies on normal RCU, it can be called from the CPU which 100 * is in the idle loop from an RCU point of view or offline. 101 */ 102 static inline int srcu_read_lock_held(const struct srcu_struct *ssp) 103 { 104 if (!debug_lockdep_rcu_enabled()) 105 return 1; 106 return lock_is_held(&ssp->dep_map); 107 } 108 109 /* 110 * Annotations provide deadlock detection for SRCU. 111 * 112 * Similar to other lockdep annotations, except there is an additional 113 * srcu_lock_sync(), which is basically an empty *write*-side critical section, 114 * see lock_sync() for more information. 115 */ 116 117 /* Annotates a srcu_read_lock() */ 118 static inline void srcu_lock_acquire(struct lockdep_map *map) 119 { 120 lock_map_acquire_read(map); 121 } 122 123 /* Annotates a srcu_read_lock() */ 124 static inline void srcu_lock_release(struct lockdep_map *map) 125 { 126 lock_map_release(map); 127 } 128 129 /* Annotates a synchronize_srcu() */ 130 static inline void srcu_lock_sync(struct lockdep_map *map) 131 { 132 lock_map_sync(map); 133 } 134 135 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 136 137 static inline int srcu_read_lock_held(const struct srcu_struct *ssp) 138 { 139 return 1; 140 } 141 142 #define srcu_lock_acquire(m) do { } while (0) 143 #define srcu_lock_release(m) do { } while (0) 144 #define srcu_lock_sync(m) do { } while (0) 145 146 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 147 148 #define SRCU_NMI_UNKNOWN 0x0 149 #define SRCU_NMI_UNSAFE 0x1 150 #define SRCU_NMI_SAFE 0x2 151 152 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_TREE_SRCU) 153 void srcu_check_nmi_safety(struct srcu_struct *ssp, bool nmi_safe); 154 #else 155 static inline void srcu_check_nmi_safety(struct srcu_struct *ssp, 156 bool nmi_safe) { } 157 #endif 158 159 160 /** 161 * srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing 162 * @p: the pointer to fetch and protect for later dereferencing 163 * @ssp: pointer to the srcu_struct, which is used to check that we 164 * really are in an SRCU read-side critical section. 165 * @c: condition to check for update-side use 166 * 167 * If PROVE_RCU is enabled, invoking this outside of an RCU read-side 168 * critical section will result in an RCU-lockdep splat, unless @c evaluates 169 * to 1. The @c argument will normally be a logical expression containing 170 * lockdep_is_held() calls. 171 */ 172 #define srcu_dereference_check(p, ssp, c) \ 173 __rcu_dereference_check((p), __UNIQUE_ID(rcu), \ 174 (c) || srcu_read_lock_held(ssp), __rcu) 175 176 /** 177 * srcu_dereference - fetch SRCU-protected pointer for later dereferencing 178 * @p: the pointer to fetch and protect for later dereferencing 179 * @ssp: pointer to the srcu_struct, which is used to check that we 180 * really are in an SRCU read-side critical section. 181 * 182 * Makes rcu_dereference_check() do the dirty work. If PROVE_RCU 183 * is enabled, invoking this outside of an RCU read-side critical 184 * section will result in an RCU-lockdep splat. 185 */ 186 #define srcu_dereference(p, ssp) srcu_dereference_check((p), (ssp), 0) 187 188 /** 189 * srcu_dereference_notrace - no tracing and no lockdep calls from here 190 * @p: the pointer to fetch and protect for later dereferencing 191 * @ssp: pointer to the srcu_struct, which is used to check that we 192 * really are in an SRCU read-side critical section. 193 */ 194 #define srcu_dereference_notrace(p, ssp) srcu_dereference_check((p), (ssp), 1) 195 196 /** 197 * srcu_read_lock - register a new reader for an SRCU-protected structure. 198 * @ssp: srcu_struct in which to register the new reader. 199 * 200 * Enter an SRCU read-side critical section. Note that SRCU read-side 201 * critical sections may be nested. However, it is illegal to 202 * call anything that waits on an SRCU grace period for the same 203 * srcu_struct, whether directly or indirectly. Please note that 204 * one way to indirectly wait on an SRCU grace period is to acquire 205 * a mutex that is held elsewhere while calling synchronize_srcu() or 206 * synchronize_srcu_expedited(). 207 * 208 * Note that srcu_read_lock() and the matching srcu_read_unlock() must 209 * occur in the same context, for example, it is illegal to invoke 210 * srcu_read_unlock() in an irq handler if the matching srcu_read_lock() 211 * was invoked in process context. 212 */ 213 static inline int srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp) 214 { 215 int retval; 216 217 srcu_check_nmi_safety(ssp, false); 218 retval = __srcu_read_lock(ssp); 219 srcu_lock_acquire(&ssp->dep_map); 220 return retval; 221 } 222 223 /** 224 * srcu_read_lock_nmisafe - register a new reader for an SRCU-protected structure. 225 * @ssp: srcu_struct in which to register the new reader. 226 * 227 * Enter an SRCU read-side critical section, but in an NMI-safe manner. 228 * See srcu_read_lock() for more information. 229 */ 230 static inline int srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp) 231 { 232 int retval; 233 234 srcu_check_nmi_safety(ssp, true); 235 retval = __srcu_read_lock_nmisafe(ssp); 236 rcu_try_lock_acquire(&ssp->dep_map); 237 return retval; 238 } 239 240 /* Used by tracing, cannot be traced and cannot invoke lockdep. */ 241 static inline notrace int 242 srcu_read_lock_notrace(struct srcu_struct *ssp) __acquires(ssp) 243 { 244 int retval; 245 246 srcu_check_nmi_safety(ssp, false); 247 retval = __srcu_read_lock(ssp); 248 return retval; 249 } 250 251 /** 252 * srcu_down_read - register a new reader for an SRCU-protected structure. 253 * @ssp: srcu_struct in which to register the new reader. 254 * 255 * Enter a semaphore-like SRCU read-side critical section. Note that 256 * SRCU read-side critical sections may be nested. However, it is 257 * illegal to call anything that waits on an SRCU grace period for the 258 * same srcu_struct, whether directly or indirectly. Please note that 259 * one way to indirectly wait on an SRCU grace period is to acquire 260 * a mutex that is held elsewhere while calling synchronize_srcu() or 261 * synchronize_srcu_expedited(). But if you want lockdep to help you 262 * keep this stuff straight, you should instead use srcu_read_lock(). 263 * 264 * The semaphore-like nature of srcu_down_read() means that the matching 265 * srcu_up_read() can be invoked from some other context, for example, 266 * from some other task or from an irq handler. However, neither 267 * srcu_down_read() nor srcu_up_read() may be invoked from an NMI handler. 268 * 269 * Calls to srcu_down_read() may be nested, similar to the manner in 270 * which calls to down_read() may be nested. 271 */ 272 static inline int srcu_down_read(struct srcu_struct *ssp) __acquires(ssp) 273 { 274 WARN_ON_ONCE(in_nmi()); 275 srcu_check_nmi_safety(ssp, false); 276 return __srcu_read_lock(ssp); 277 } 278 279 /** 280 * srcu_read_unlock - unregister a old reader from an SRCU-protected structure. 281 * @ssp: srcu_struct in which to unregister the old reader. 282 * @idx: return value from corresponding srcu_read_lock(). 283 * 284 * Exit an SRCU read-side critical section. 285 */ 286 static inline void srcu_read_unlock(struct srcu_struct *ssp, int idx) 287 __releases(ssp) 288 { 289 WARN_ON_ONCE(idx & ~0x1); 290 srcu_check_nmi_safety(ssp, false); 291 srcu_lock_release(&ssp->dep_map); 292 __srcu_read_unlock(ssp, idx); 293 } 294 295 /** 296 * srcu_read_unlock_nmisafe - unregister a old reader from an SRCU-protected structure. 297 * @ssp: srcu_struct in which to unregister the old reader. 298 * @idx: return value from corresponding srcu_read_lock(). 299 * 300 * Exit an SRCU read-side critical section, but in an NMI-safe manner. 301 */ 302 static inline void srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx) 303 __releases(ssp) 304 { 305 WARN_ON_ONCE(idx & ~0x1); 306 srcu_check_nmi_safety(ssp, true); 307 rcu_lock_release(&ssp->dep_map); 308 __srcu_read_unlock_nmisafe(ssp, idx); 309 } 310 311 /* Used by tracing, cannot be traced and cannot call lockdep. */ 312 static inline notrace void 313 srcu_read_unlock_notrace(struct srcu_struct *ssp, int idx) __releases(ssp) 314 { 315 srcu_check_nmi_safety(ssp, false); 316 __srcu_read_unlock(ssp, idx); 317 } 318 319 /** 320 * srcu_up_read - unregister a old reader from an SRCU-protected structure. 321 * @ssp: srcu_struct in which to unregister the old reader. 322 * @idx: return value from corresponding srcu_read_lock(). 323 * 324 * Exit an SRCU read-side critical section, but not necessarily from 325 * the same context as the maching srcu_down_read(). 326 */ 327 static inline void srcu_up_read(struct srcu_struct *ssp, int idx) 328 __releases(ssp) 329 { 330 WARN_ON_ONCE(idx & ~0x1); 331 WARN_ON_ONCE(in_nmi()); 332 srcu_check_nmi_safety(ssp, false); 333 __srcu_read_unlock(ssp, idx); 334 } 335 336 /** 337 * smp_mb__after_srcu_read_unlock - ensure full ordering after srcu_read_unlock 338 * 339 * Converts the preceding srcu_read_unlock into a two-way memory barrier. 340 * 341 * Call this after srcu_read_unlock, to guarantee that all memory operations 342 * that occur after smp_mb__after_srcu_read_unlock will appear to happen after 343 * the preceding srcu_read_unlock. 344 */ 345 static inline void smp_mb__after_srcu_read_unlock(void) 346 { 347 /* __srcu_read_unlock has smp_mb() internally so nothing to do here. */ 348 } 349 350 DEFINE_LOCK_GUARD_1(srcu, struct srcu_struct, 351 _T->idx = srcu_read_lock(_T->lock), 352 srcu_read_unlock(_T->lock, _T->idx), 353 int idx) 354 355 #endif 356