1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 /* 3 * Sleepable Read-Copy Update mechanism for mutual exclusion 4 * 5 * Copyright (C) IBM Corporation, 2006 6 * Copyright (C) Fujitsu, 2012 7 * 8 * Author: Paul McKenney <[email protected]> 9 * Lai Jiangshan <[email protected]> 10 * 11 * For detailed explanation of Read-Copy Update mechanism see - 12 * Documentation/RCU/ *.txt 13 * 14 */ 15 16 #ifndef _LINUX_SRCU_H 17 #define _LINUX_SRCU_H 18 19 #include <linux/mutex.h> 20 #include <linux/rcupdate.h> 21 #include <linux/workqueue.h> 22 #include <linux/rcu_segcblist.h> 23 24 struct srcu_struct; 25 26 #ifdef CONFIG_DEBUG_LOCK_ALLOC 27 28 int __init_srcu_struct(struct srcu_struct *ssp, const char *name, 29 struct lock_class_key *key); 30 31 #define init_srcu_struct(ssp) \ 32 ({ \ 33 static struct lock_class_key __srcu_key; \ 34 \ 35 __init_srcu_struct((ssp), #ssp, &__srcu_key); \ 36 }) 37 38 #define __SRCU_DEP_MAP_INIT(srcu_name) .dep_map = { .name = #srcu_name }, 39 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 40 41 int init_srcu_struct(struct srcu_struct *ssp); 42 43 #define __SRCU_DEP_MAP_INIT(srcu_name) 44 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 45 46 #ifdef CONFIG_TINY_SRCU 47 #include <linux/srcutiny.h> 48 #elif defined(CONFIG_TREE_SRCU) 49 #include <linux/srcutree.h> 50 #else 51 #error "Unknown SRCU implementation specified to kernel configuration" 52 #endif 53 54 void call_srcu(struct srcu_struct *ssp, struct rcu_head *head, 55 void (*func)(struct rcu_head *head)); 56 void cleanup_srcu_struct(struct srcu_struct *ssp); 57 int __srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp); 58 void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp); 59 void synchronize_srcu(struct srcu_struct *ssp); 60 61 #define SRCU_GET_STATE_COMPLETED 0x1 62 63 /** 64 * get_completed_synchronize_srcu - Return a pre-completed polled state cookie 65 * 66 * Returns a value that poll_state_synchronize_srcu() will always treat 67 * as a cookie whose grace period has already completed. 68 */ 69 static inline unsigned long get_completed_synchronize_srcu(void) 70 { 71 return SRCU_GET_STATE_COMPLETED; 72 } 73 74 unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp); 75 unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp); 76 bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie); 77 78 // Maximum number of unsigned long values corresponding to 79 // not-yet-completed SRCU grace periods. 80 #define NUM_ACTIVE_SRCU_POLL_OLDSTATE 2 81 82 /** 83 * same_state_synchronize_srcu - Are two old-state values identical? 84 * @oldstate1: First old-state value. 85 * @oldstate2: Second old-state value. 86 * 87 * The two old-state values must have been obtained from either 88 * get_state_synchronize_srcu(), start_poll_synchronize_srcu(), or 89 * get_completed_synchronize_srcu(). Returns @true if the two values are 90 * identical and @false otherwise. This allows structures whose lifetimes 91 * are tracked by old-state values to push these values to a list header, 92 * allowing those structures to be slightly smaller. 93 */ 94 static inline bool same_state_synchronize_srcu(unsigned long oldstate1, unsigned long oldstate2) 95 { 96 return oldstate1 == oldstate2; 97 } 98 99 #ifdef CONFIG_NEED_SRCU_NMI_SAFE 100 int __srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp); 101 void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx) __releases(ssp); 102 #else 103 static inline int __srcu_read_lock_nmisafe(struct srcu_struct *ssp) 104 { 105 return __srcu_read_lock(ssp); 106 } 107 static inline void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx) 108 { 109 __srcu_read_unlock(ssp, idx); 110 } 111 #endif /* CONFIG_NEED_SRCU_NMI_SAFE */ 112 113 void srcu_init(void); 114 115 #ifdef CONFIG_DEBUG_LOCK_ALLOC 116 117 /** 118 * srcu_read_lock_held - might we be in SRCU read-side critical section? 119 * @ssp: The srcu_struct structure to check 120 * 121 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU 122 * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, 123 * this assumes we are in an SRCU read-side critical section unless it can 124 * prove otherwise. 125 * 126 * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot 127 * and while lockdep is disabled. 128 * 129 * Note that SRCU is based on its own statemachine and it doesn't 130 * relies on normal RCU, it can be called from the CPU which 131 * is in the idle loop from an RCU point of view or offline. 132 */ 133 static inline int srcu_read_lock_held(const struct srcu_struct *ssp) 134 { 135 if (!debug_lockdep_rcu_enabled()) 136 return 1; 137 return lock_is_held(&ssp->dep_map); 138 } 139 140 /* 141 * Annotations provide deadlock detection for SRCU. 142 * 143 * Similar to other lockdep annotations, except there is an additional 144 * srcu_lock_sync(), which is basically an empty *write*-side critical section, 145 * see lock_sync() for more information. 146 */ 147 148 /* Annotates a srcu_read_lock() */ 149 static inline void srcu_lock_acquire(struct lockdep_map *map) 150 { 151 lock_map_acquire_read(map); 152 } 153 154 /* Annotates a srcu_read_lock() */ 155 static inline void srcu_lock_release(struct lockdep_map *map) 156 { 157 lock_map_release(map); 158 } 159 160 /* Annotates a synchronize_srcu() */ 161 static inline void srcu_lock_sync(struct lockdep_map *map) 162 { 163 lock_map_sync(map); 164 } 165 166 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 167 168 static inline int srcu_read_lock_held(const struct srcu_struct *ssp) 169 { 170 return 1; 171 } 172 173 #define srcu_lock_acquire(m) do { } while (0) 174 #define srcu_lock_release(m) do { } while (0) 175 #define srcu_lock_sync(m) do { } while (0) 176 177 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 178 179 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_TREE_SRCU) 180 void srcu_check_read_flavor(struct srcu_struct *ssp, int read_flavor); 181 #else 182 static inline void srcu_check_read_flavor(struct srcu_struct *ssp, int read_flavor) { } 183 #endif 184 185 186 /** 187 * srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing 188 * @p: the pointer to fetch and protect for later dereferencing 189 * @ssp: pointer to the srcu_struct, which is used to check that we 190 * really are in an SRCU read-side critical section. 191 * @c: condition to check for update-side use 192 * 193 * If PROVE_RCU is enabled, invoking this outside of an RCU read-side 194 * critical section will result in an RCU-lockdep splat, unless @c evaluates 195 * to 1. The @c argument will normally be a logical expression containing 196 * lockdep_is_held() calls. 197 */ 198 #define srcu_dereference_check(p, ssp, c) \ 199 __rcu_dereference_check((p), __UNIQUE_ID(rcu), \ 200 (c) || srcu_read_lock_held(ssp), __rcu) 201 202 /** 203 * srcu_dereference - fetch SRCU-protected pointer for later dereferencing 204 * @p: the pointer to fetch and protect for later dereferencing 205 * @ssp: pointer to the srcu_struct, which is used to check that we 206 * really are in an SRCU read-side critical section. 207 * 208 * Makes rcu_dereference_check() do the dirty work. If PROVE_RCU 209 * is enabled, invoking this outside of an RCU read-side critical 210 * section will result in an RCU-lockdep splat. 211 */ 212 #define srcu_dereference(p, ssp) srcu_dereference_check((p), (ssp), 0) 213 214 /** 215 * srcu_dereference_notrace - no tracing and no lockdep calls from here 216 * @p: the pointer to fetch and protect for later dereferencing 217 * @ssp: pointer to the srcu_struct, which is used to check that we 218 * really are in an SRCU read-side critical section. 219 */ 220 #define srcu_dereference_notrace(p, ssp) srcu_dereference_check((p), (ssp), 1) 221 222 /** 223 * srcu_read_lock - register a new reader for an SRCU-protected structure. 224 * @ssp: srcu_struct in which to register the new reader. 225 * 226 * Enter an SRCU read-side critical section. Note that SRCU read-side 227 * critical sections may be nested. However, it is illegal to 228 * call anything that waits on an SRCU grace period for the same 229 * srcu_struct, whether directly or indirectly. Please note that 230 * one way to indirectly wait on an SRCU grace period is to acquire 231 * a mutex that is held elsewhere while calling synchronize_srcu() or 232 * synchronize_srcu_expedited(). 233 * 234 * The return value from srcu_read_lock() must be passed unaltered 235 * to the matching srcu_read_unlock(). Note that srcu_read_lock() and 236 * the matching srcu_read_unlock() must occur in the same context, for 237 * example, it is illegal to invoke srcu_read_unlock() in an irq handler 238 * if the matching srcu_read_lock() was invoked in process context. Or, 239 * for that matter to invoke srcu_read_unlock() from one task and the 240 * matching srcu_read_lock() from another. 241 */ 242 static inline int srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp) 243 { 244 int retval; 245 246 srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL); 247 retval = __srcu_read_lock(ssp); 248 srcu_lock_acquire(&ssp->dep_map); 249 return retval; 250 } 251 252 /** 253 * srcu_read_lock_nmisafe - register a new reader for an SRCU-protected structure. 254 * @ssp: srcu_struct in which to register the new reader. 255 * 256 * Enter an SRCU read-side critical section, but in an NMI-safe manner. 257 * See srcu_read_lock() for more information. 258 * 259 * If srcu_read_lock_nmisafe() is ever used on an srcu_struct structure, 260 * then none of the other flavors may be used, whether before, during, 261 * or after. 262 */ 263 static inline int srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp) 264 { 265 int retval; 266 267 srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NMI); 268 retval = __srcu_read_lock_nmisafe(ssp); 269 rcu_try_lock_acquire(&ssp->dep_map); 270 return retval; 271 } 272 273 /* Used by tracing, cannot be traced and cannot invoke lockdep. */ 274 static inline notrace int 275 srcu_read_lock_notrace(struct srcu_struct *ssp) __acquires(ssp) 276 { 277 int retval; 278 279 srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL); 280 retval = __srcu_read_lock(ssp); 281 return retval; 282 } 283 284 /** 285 * srcu_down_read - register a new reader for an SRCU-protected structure. 286 * @ssp: srcu_struct in which to register the new reader. 287 * 288 * Enter a semaphore-like SRCU read-side critical section. Note that 289 * SRCU read-side critical sections may be nested. However, it is 290 * illegal to call anything that waits on an SRCU grace period for the 291 * same srcu_struct, whether directly or indirectly. Please note that 292 * one way to indirectly wait on an SRCU grace period is to acquire 293 * a mutex that is held elsewhere while calling synchronize_srcu() or 294 * synchronize_srcu_expedited(). But if you want lockdep to help you 295 * keep this stuff straight, you should instead use srcu_read_lock(). 296 * 297 * The semaphore-like nature of srcu_down_read() means that the matching 298 * srcu_up_read() can be invoked from some other context, for example, 299 * from some other task or from an irq handler. However, neither 300 * srcu_down_read() nor srcu_up_read() may be invoked from an NMI handler. 301 * 302 * Calls to srcu_down_read() may be nested, similar to the manner in 303 * which calls to down_read() may be nested. 304 */ 305 static inline int srcu_down_read(struct srcu_struct *ssp) __acquires(ssp) 306 { 307 WARN_ON_ONCE(in_nmi()); 308 srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL); 309 return __srcu_read_lock(ssp); 310 } 311 312 /** 313 * srcu_read_unlock - unregister a old reader from an SRCU-protected structure. 314 * @ssp: srcu_struct in which to unregister the old reader. 315 * @idx: return value from corresponding srcu_read_lock(). 316 * 317 * Exit an SRCU read-side critical section. 318 */ 319 static inline void srcu_read_unlock(struct srcu_struct *ssp, int idx) 320 __releases(ssp) 321 { 322 WARN_ON_ONCE(idx & ~0x1); 323 srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL); 324 srcu_lock_release(&ssp->dep_map); 325 __srcu_read_unlock(ssp, idx); 326 } 327 328 /** 329 * srcu_read_unlock_nmisafe - unregister a old reader from an SRCU-protected structure. 330 * @ssp: srcu_struct in which to unregister the old reader. 331 * @idx: return value from corresponding srcu_read_lock(). 332 * 333 * Exit an SRCU read-side critical section, but in an NMI-safe manner. 334 */ 335 static inline void srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx) 336 __releases(ssp) 337 { 338 WARN_ON_ONCE(idx & ~0x1); 339 srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NMI); 340 rcu_lock_release(&ssp->dep_map); 341 __srcu_read_unlock_nmisafe(ssp, idx); 342 } 343 344 /* Used by tracing, cannot be traced and cannot call lockdep. */ 345 static inline notrace void 346 srcu_read_unlock_notrace(struct srcu_struct *ssp, int idx) __releases(ssp) 347 { 348 srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL); 349 __srcu_read_unlock(ssp, idx); 350 } 351 352 /** 353 * srcu_up_read - unregister a old reader from an SRCU-protected structure. 354 * @ssp: srcu_struct in which to unregister the old reader. 355 * @idx: return value from corresponding srcu_read_lock(). 356 * 357 * Exit an SRCU read-side critical section, but not necessarily from 358 * the same context as the maching srcu_down_read(). 359 */ 360 static inline void srcu_up_read(struct srcu_struct *ssp, int idx) 361 __releases(ssp) 362 { 363 WARN_ON_ONCE(idx & ~0x1); 364 WARN_ON_ONCE(in_nmi()); 365 srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL); 366 __srcu_read_unlock(ssp, idx); 367 } 368 369 /** 370 * smp_mb__after_srcu_read_unlock - ensure full ordering after srcu_read_unlock 371 * 372 * Converts the preceding srcu_read_unlock into a two-way memory barrier. 373 * 374 * Call this after srcu_read_unlock, to guarantee that all memory operations 375 * that occur after smp_mb__after_srcu_read_unlock will appear to happen after 376 * the preceding srcu_read_unlock. 377 */ 378 static inline void smp_mb__after_srcu_read_unlock(void) 379 { 380 /* __srcu_read_unlock has smp_mb() internally so nothing to do here. */ 381 } 382 383 /** 384 * smp_mb__after_srcu_read_lock - ensure full ordering after srcu_read_lock 385 * 386 * Converts the preceding srcu_read_lock into a two-way memory barrier. 387 * 388 * Call this after srcu_read_lock, to guarantee that all memory operations 389 * that occur after smp_mb__after_srcu_read_lock will appear to happen after 390 * the preceding srcu_read_lock. 391 */ 392 static inline void smp_mb__after_srcu_read_lock(void) 393 { 394 /* __srcu_read_lock has smp_mb() internally so nothing to do here. */ 395 } 396 397 DEFINE_LOCK_GUARD_1(srcu, struct srcu_struct, 398 _T->idx = srcu_read_lock(_T->lock), 399 srcu_read_unlock(_T->lock, _T->idx), 400 int idx) 401 402 #endif 403